1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 6 #include "hclge_cmd.h" 7 #include "hclge_main.h" 8 #include "hclge_tm.h" 9 10 enum hclge_shaper_level { 11 HCLGE_SHAPER_LVL_PRI = 0, 12 HCLGE_SHAPER_LVL_PG = 1, 13 HCLGE_SHAPER_LVL_PORT = 2, 14 HCLGE_SHAPER_LVL_QSET = 3, 15 HCLGE_SHAPER_LVL_CNT = 4, 16 HCLGE_SHAPER_LVL_VF = 0, 17 HCLGE_SHAPER_LVL_PF = 1, 18 }; 19 20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 22 23 #define HCLGE_SHAPER_BS_U_DEF 5 24 #define HCLGE_SHAPER_BS_S_DEF 20 25 26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper 27 * @ir: Rate to be config, its unit is Mbps 28 * @shaper_level: the shaper level. eg: port, pg, priority, queueset 29 * @ir_para: parameters of IR shaper 30 * @max_tm_rate: max tm rate is available to config 31 * 32 * the formula: 33 * 34 * IR_b * (2 ^ IR_u) * 8 35 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) 36 * Tick * (2 ^ IR_s) 37 * 38 * @return: 0: calculate sucessful, negative: fail 39 */ 40 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, 41 struct hclge_shaper_ir_para *ir_para, 42 u32 max_tm_rate) 43 { 44 #define DEFAULT_SHAPER_IR_B 126 45 #define DIVISOR_CLK (1000 * 8) 46 #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK) 47 48 static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { 49 6 * 256, /* Prioriy level */ 50 6 * 32, /* Prioriy group level */ 51 6 * 8, /* Port level */ 52 6 * 256 /* Qset level */ 53 }; 54 u8 ir_u_calc = 0; 55 u8 ir_s_calc = 0; 56 u32 ir_calc; 57 u32 tick; 58 59 /* Calc tick */ 60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT || 61 ir > max_tm_rate) 62 return -EINVAL; 63 64 tick = tick_array[shaper_level]; 65 66 /** 67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 68 * the formula is changed to: 69 * 126 * 1 * 8 70 * ir_calc = ---------------- * 1000 71 * tick * 1 72 */ 73 ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick; 74 75 if (ir_calc == ir) { 76 ir_para->ir_b = DEFAULT_SHAPER_IR_B; 77 ir_para->ir_u = 0; 78 ir_para->ir_s = 0; 79 80 return 0; 81 } else if (ir_calc > ir) { 82 /* Increasing the denominator to select ir_s value */ 83 while (ir_calc >= ir && ir) { 84 ir_s_calc++; 85 ir_calc = DEFAULT_DIVISOR_IR_B / 86 (tick * (1 << ir_s_calc)); 87 } 88 89 ir_para->ir_b = (ir * tick * (1 << ir_s_calc) + 90 (DIVISOR_CLK >> 1)) / DIVISOR_CLK; 91 } else { 92 /* Increasing the numerator to select ir_u value */ 93 u32 numerator; 94 95 while (ir_calc < ir) { 96 ir_u_calc++; 97 numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc); 98 ir_calc = (numerator + (tick >> 1)) / tick; 99 } 100 101 if (ir_calc == ir) { 102 ir_para->ir_b = DEFAULT_SHAPER_IR_B; 103 } else { 104 u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc); 105 ir_para->ir_b = (ir * tick + (denominator >> 1)) / 106 denominator; 107 } 108 } 109 110 ir_para->ir_u = ir_u_calc; 111 ir_para->ir_s = ir_s_calc; 112 113 return 0; 114 } 115 116 static int hclge_pfc_stats_get(struct hclge_dev *hdev, 117 enum hclge_opcode_type opcode, u64 *stats) 118 { 119 struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM]; 120 int ret, i, j; 121 122 if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT || 123 opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) 124 return -EINVAL; 125 126 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) { 127 hclge_cmd_setup_basic_desc(&desc[i], opcode, true); 128 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 129 } 130 131 hclge_cmd_setup_basic_desc(&desc[i], opcode, true); 132 133 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); 134 if (ret) 135 return ret; 136 137 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { 138 struct hclge_pfc_stats_cmd *pfc_stats = 139 (struct hclge_pfc_stats_cmd *)desc[i].data; 140 141 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) { 142 u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j; 143 144 if (index < HCLGE_MAX_TC_NUM) 145 stats[index] = 146 le64_to_cpu(pfc_stats->pkt_num[j]); 147 } 148 } 149 return 0; 150 } 151 152 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) 153 { 154 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats); 155 } 156 157 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) 158 { 159 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats); 160 } 161 162 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) 163 { 164 struct hclge_desc desc; 165 166 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); 167 168 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | 169 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); 170 171 return hclge_cmd_send(&hdev->hw, &desc, 1); 172 } 173 174 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, 175 u8 pfc_bitmap) 176 { 177 struct hclge_desc desc; 178 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; 179 180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); 181 182 pfc->tx_rx_en_bitmap = tx_rx_bitmap; 183 pfc->pri_en_bitmap = pfc_bitmap; 184 185 return hclge_cmd_send(&hdev->hw, &desc, 1); 186 } 187 188 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, 189 u8 pause_trans_gap, u16 pause_trans_time) 190 { 191 struct hclge_cfg_pause_param_cmd *pause_param; 192 struct hclge_desc desc; 193 194 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 195 196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); 197 198 ether_addr_copy(pause_param->mac_addr, addr); 199 ether_addr_copy(pause_param->mac_addr_extra, addr); 200 pause_param->pause_trans_gap = pause_trans_gap; 201 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); 202 203 return hclge_cmd_send(&hdev->hw, &desc, 1); 204 } 205 206 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) 207 { 208 struct hclge_cfg_pause_param_cmd *pause_param; 209 struct hclge_desc desc; 210 u16 trans_time; 211 u8 trans_gap; 212 int ret; 213 214 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 215 216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 217 218 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 219 if (ret) 220 return ret; 221 222 trans_gap = pause_param->pause_trans_gap; 223 trans_time = le16_to_cpu(pause_param->pause_trans_time); 224 225 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); 226 } 227 228 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) 229 { 230 u8 tc; 231 232 tc = hdev->tm_info.prio_tc[pri_id]; 233 234 if (tc >= hdev->tm_info.num_tc) 235 return -EINVAL; 236 237 /** 238 * the register for priority has four bytes, the first bytes includes 239 * priority0 and priority1, the higher 4bit stands for priority1 240 * while the lower 4bit stands for priority0, as below: 241 * first byte: | pri_1 | pri_0 | 242 * second byte: | pri_3 | pri_2 | 243 * third byte: | pri_5 | pri_4 | 244 * fourth byte: | pri_7 | pri_6 | 245 */ 246 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); 247 248 return 0; 249 } 250 251 static int hclge_up_to_tc_map(struct hclge_dev *hdev) 252 { 253 struct hclge_desc desc; 254 u8 *pri = (u8 *)desc.data; 255 u8 pri_id; 256 int ret; 257 258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); 259 260 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { 261 ret = hclge_fill_pri_array(hdev, pri, pri_id); 262 if (ret) 263 return ret; 264 } 265 266 return hclge_cmd_send(&hdev->hw, &desc, 1); 267 } 268 269 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, 270 u8 pg_id, u8 pri_bit_map) 271 { 272 struct hclge_pg_to_pri_link_cmd *map; 273 struct hclge_desc desc; 274 275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); 276 277 map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 278 279 map->pg_id = pg_id; 280 map->pri_bit_map = pri_bit_map; 281 282 return hclge_cmd_send(&hdev->hw, &desc, 1); 283 } 284 285 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, 286 u16 qs_id, u8 pri) 287 { 288 struct hclge_qs_to_pri_link_cmd *map; 289 struct hclge_desc desc; 290 291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); 292 293 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 294 295 map->qs_id = cpu_to_le16(qs_id); 296 map->priority = pri; 297 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; 298 299 return hclge_cmd_send(&hdev->hw, &desc, 1); 300 } 301 302 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, 303 u16 q_id, u16 qs_id) 304 { 305 struct hclge_nq_to_qs_link_cmd *map; 306 struct hclge_desc desc; 307 u16 qs_id_l; 308 u16 qs_id_h; 309 310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); 311 312 map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 313 314 map->nq_id = cpu_to_le16(q_id); 315 316 /* convert qs_id to the following format to support qset_id >= 1024 317 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 | 318 * / / \ \ 319 * / / \ \ 320 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | 321 * | qs_id_h | vld | qs_id_l | 322 */ 323 qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK, 324 HCLGE_TM_QS_ID_L_S); 325 qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK, 326 HCLGE_TM_QS_ID_H_S); 327 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, 328 qs_id_l); 329 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S, 330 qs_id_h); 331 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); 332 333 return hclge_cmd_send(&hdev->hw, &desc, 1); 334 } 335 336 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, 337 u8 dwrr) 338 { 339 struct hclge_pg_weight_cmd *weight; 340 struct hclge_desc desc; 341 342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); 343 344 weight = (struct hclge_pg_weight_cmd *)desc.data; 345 346 weight->pg_id = pg_id; 347 weight->dwrr = dwrr; 348 349 return hclge_cmd_send(&hdev->hw, &desc, 1); 350 } 351 352 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, 353 u8 dwrr) 354 { 355 struct hclge_priority_weight_cmd *weight; 356 struct hclge_desc desc; 357 358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); 359 360 weight = (struct hclge_priority_weight_cmd *)desc.data; 361 362 weight->pri_id = pri_id; 363 weight->dwrr = dwrr; 364 365 return hclge_cmd_send(&hdev->hw, &desc, 1); 366 } 367 368 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, 369 u8 dwrr) 370 { 371 struct hclge_qs_weight_cmd *weight; 372 struct hclge_desc desc; 373 374 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); 375 376 weight = (struct hclge_qs_weight_cmd *)desc.data; 377 378 weight->qs_id = cpu_to_le16(qs_id); 379 weight->dwrr = dwrr; 380 381 return hclge_cmd_send(&hdev->hw, &desc, 1); 382 } 383 384 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s, 385 u8 bs_b, u8 bs_s) 386 { 387 u32 shapping_para = 0; 388 389 hclge_tm_set_field(shapping_para, IR_B, ir_b); 390 hclge_tm_set_field(shapping_para, IR_U, ir_u); 391 hclge_tm_set_field(shapping_para, IR_S, ir_s); 392 hclge_tm_set_field(shapping_para, BS_B, bs_b); 393 hclge_tm_set_field(shapping_para, BS_S, bs_s); 394 395 return shapping_para; 396 } 397 398 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, 399 enum hclge_shap_bucket bucket, u8 pg_id, 400 u32 shapping_para, u32 rate) 401 { 402 struct hclge_pg_shapping_cmd *shap_cfg_cmd; 403 enum hclge_opcode_type opcode; 404 struct hclge_desc desc; 405 406 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : 407 HCLGE_OPC_TM_PG_C_SHAPPING; 408 hclge_cmd_setup_basic_desc(&desc, opcode, false); 409 410 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 411 412 shap_cfg_cmd->pg_id = pg_id; 413 414 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); 415 416 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 417 418 shap_cfg_cmd->pg_rate = cpu_to_le32(rate); 419 420 return hclge_cmd_send(&hdev->hw, &desc, 1); 421 } 422 423 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) 424 { 425 struct hclge_port_shapping_cmd *shap_cfg_cmd; 426 struct hclge_shaper_ir_para ir_para; 427 struct hclge_desc desc; 428 u32 shapping_para; 429 int ret; 430 431 ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT, 432 &ir_para, 433 hdev->ae_dev->dev_specs.max_tm_rate); 434 if (ret) 435 return ret; 436 437 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); 438 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 439 440 shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 441 ir_para.ir_s, 442 HCLGE_SHAPER_BS_U_DEF, 443 HCLGE_SHAPER_BS_S_DEF); 444 445 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); 446 447 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 448 449 shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed); 450 451 return hclge_cmd_send(&hdev->hw, &desc, 1); 452 } 453 454 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, 455 enum hclge_shap_bucket bucket, u8 pri_id, 456 u32 shapping_para, u32 rate) 457 { 458 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 459 enum hclge_opcode_type opcode; 460 struct hclge_desc desc; 461 462 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : 463 HCLGE_OPC_TM_PRI_C_SHAPPING; 464 465 hclge_cmd_setup_basic_desc(&desc, opcode, false); 466 467 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 468 469 shap_cfg_cmd->pri_id = pri_id; 470 471 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); 472 473 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 474 475 shap_cfg_cmd->pri_rate = cpu_to_le32(rate); 476 477 return hclge_cmd_send(&hdev->hw, &desc, 1); 478 } 479 480 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) 481 { 482 struct hclge_desc desc; 483 484 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); 485 486 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) 487 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 488 else 489 desc.data[1] = 0; 490 491 desc.data[0] = cpu_to_le32(pg_id); 492 493 return hclge_cmd_send(&hdev->hw, &desc, 1); 494 } 495 496 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) 497 { 498 struct hclge_desc desc; 499 500 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); 501 502 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) 503 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 504 else 505 desc.data[1] = 0; 506 507 desc.data[0] = cpu_to_le32(pri_id); 508 509 return hclge_cmd_send(&hdev->hw, &desc, 1); 510 } 511 512 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) 513 { 514 struct hclge_desc desc; 515 516 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); 517 518 if (mode == HCLGE_SCH_MODE_DWRR) 519 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 520 else 521 desc.data[1] = 0; 522 523 desc.data[0] = cpu_to_le32(qs_id); 524 525 return hclge_cmd_send(&hdev->hw, &desc, 1); 526 } 527 528 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, 529 u32 bit_map) 530 { 531 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 532 struct hclge_desc desc; 533 534 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, 535 false); 536 537 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 538 539 bp_to_qs_map_cmd->tc_id = tc; 540 bp_to_qs_map_cmd->qs_group_id = grp_id; 541 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); 542 543 return hclge_cmd_send(&hdev->hw, &desc, 1); 544 } 545 546 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) 547 { 548 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 549 struct hclge_qs_shapping_cmd *shap_cfg_cmd; 550 struct hclge_shaper_ir_para ir_para; 551 struct hclge_dev *hdev = vport->back; 552 struct hclge_desc desc; 553 u32 shaper_para; 554 int ret, i; 555 556 if (!max_tx_rate) 557 max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate; 558 559 ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET, 560 &ir_para, 561 hdev->ae_dev->dev_specs.max_tm_rate); 562 if (ret) 563 return ret; 564 565 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 566 ir_para.ir_s, 567 HCLGE_SHAPER_BS_U_DEF, 568 HCLGE_SHAPER_BS_S_DEF); 569 570 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, 572 false); 573 574 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; 575 shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i); 576 shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para); 577 578 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 579 shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate); 580 581 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 582 if (ret) { 583 dev_err(&hdev->pdev->dev, 584 "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n", 585 vport->vport_id, shap_cfg_cmd->qs_id, 586 max_tx_rate, ret); 587 return ret; 588 } 589 } 590 591 return 0; 592 } 593 594 static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport) 595 { 596 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 597 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 598 struct hclge_dev *hdev = vport->back; 599 u16 max_rss_size = 0; 600 int i; 601 602 if (!tc_info->mqprio_active) 603 return vport->alloc_tqps / tc_info->num_tc; 604 605 for (i = 0; i < HNAE3_MAX_TC; i++) { 606 if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) 607 continue; 608 if (max_rss_size < tc_info->tqp_count[i]) 609 max_rss_size = tc_info->tqp_count[i]; 610 } 611 612 return max_rss_size; 613 } 614 615 static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) 616 { 617 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 618 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 619 struct hclge_dev *hdev = vport->back; 620 int sum = 0; 621 int i; 622 623 if (!tc_info->mqprio_active) 624 return kinfo->rss_size * tc_info->num_tc; 625 626 for (i = 0; i < HNAE3_MAX_TC; i++) { 627 if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) 628 sum += tc_info->tqp_count[i]; 629 } 630 631 return sum; 632 } 633 634 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) 635 { 636 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 637 struct hclge_dev *hdev = vport->back; 638 u16 vport_max_rss_size; 639 u16 max_rss_size; 640 641 /* TC configuration is shared by PF/VF in one port, only allow 642 * one tc for VF for simplicity. VF's vport_id is non zero. 643 */ 644 if (vport->vport_id) { 645 kinfo->tc_info.num_tc = 1; 646 vport->qs_offset = HNAE3_MAX_TC + 647 vport->vport_id - HCLGE_VF_VPORT_START_NUM; 648 vport_max_rss_size = hdev->vf_rss_size_max; 649 } else { 650 kinfo->tc_info.num_tc = 651 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); 652 vport->qs_offset = 0; 653 vport_max_rss_size = hdev->pf_rss_size_max; 654 } 655 656 max_rss_size = min_t(u16, vport_max_rss_size, 657 hclge_vport_get_max_rss_size(vport)); 658 659 /* Set to user value, no larger than max_rss_size. */ 660 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 661 kinfo->req_rss_size <= max_rss_size) { 662 dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", 663 kinfo->rss_size, kinfo->req_rss_size); 664 kinfo->rss_size = kinfo->req_rss_size; 665 } else if (kinfo->rss_size > max_rss_size || 666 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { 667 /* Set to the maximum specification value (max_rss_size). */ 668 kinfo->rss_size = max_rss_size; 669 } 670 } 671 672 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) 673 { 674 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 675 struct hclge_dev *hdev = vport->back; 676 u8 i; 677 678 hclge_tm_update_kinfo_rss_size(vport); 679 kinfo->num_tqps = hclge_vport_get_tqp_num(vport); 680 vport->dwrr = 100; /* 100 percent as init */ 681 vport->alloc_rss_size = kinfo->rss_size; 682 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; 683 684 /* when enable mqprio, the tc_info has been updated. */ 685 if (kinfo->tc_info.mqprio_active) 686 return; 687 688 for (i = 0; i < HNAE3_MAX_TC; i++) { 689 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { 690 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; 691 kinfo->tc_info.tqp_count[i] = kinfo->rss_size; 692 } else { 693 /* Set to default queue if TC is disable */ 694 kinfo->tc_info.tqp_offset[i] = 0; 695 kinfo->tc_info.tqp_count[i] = 1; 696 } 697 } 698 699 memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc, 700 sizeof_field(struct hnae3_tc_info, prio_tc)); 701 } 702 703 static void hclge_tm_vport_info_update(struct hclge_dev *hdev) 704 { 705 struct hclge_vport *vport = hdev->vport; 706 u32 i; 707 708 for (i = 0; i < hdev->num_alloc_vport; i++) { 709 hclge_tm_vport_tc_info_update(vport); 710 711 vport++; 712 } 713 } 714 715 static void hclge_tm_tc_info_init(struct hclge_dev *hdev) 716 { 717 u8 i; 718 719 for (i = 0; i < hdev->tm_info.num_tc; i++) { 720 hdev->tm_info.tc_info[i].tc_id = i; 721 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; 722 hdev->tm_info.tc_info[i].pgid = 0; 723 hdev->tm_info.tc_info[i].bw_limit = 724 hdev->tm_info.pg_info[0].bw_limit; 725 } 726 727 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 728 hdev->tm_info.prio_tc[i] = 729 (i >= hdev->tm_info.num_tc) ? 0 : i; 730 731 /* DCB is enabled if we have more than 1 TC or pfc_en is 732 * non-zero. 733 */ 734 if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en) 735 hdev->flag |= HCLGE_FLAG_DCB_ENABLE; 736 else 737 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 738 } 739 740 static void hclge_tm_pg_info_init(struct hclge_dev *hdev) 741 { 742 #define BW_PERCENT 100 743 744 u8 i; 745 746 for (i = 0; i < hdev->tm_info.num_pg; i++) { 747 int k; 748 749 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT; 750 751 hdev->tm_info.pg_info[i].pg_id = i; 752 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; 753 754 hdev->tm_info.pg_info[i].bw_limit = 755 hdev->ae_dev->dev_specs.max_tm_rate; 756 757 if (i != 0) 758 continue; 759 760 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; 761 for (k = 0; k < hdev->tm_info.num_tc; k++) 762 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; 763 } 764 } 765 766 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev) 767 { 768 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { 769 if (hdev->fc_mode_last_time == HCLGE_FC_PFC) 770 dev_warn(&hdev->pdev->dev, 771 "DCB is disable, but last mode is FC_PFC\n"); 772 773 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 774 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 775 /* fc_mode_last_time record the last fc_mode when 776 * DCB is enabled, so that fc_mode can be set to 777 * the correct value when DCB is disabled. 778 */ 779 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 780 hdev->tm_info.fc_mode = HCLGE_FC_PFC; 781 } 782 } 783 784 static void hclge_update_fc_mode(struct hclge_dev *hdev) 785 { 786 if (!hdev->tm_info.pfc_en) { 787 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 788 return; 789 } 790 791 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 792 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 793 hdev->tm_info.fc_mode = HCLGE_FC_PFC; 794 } 795 } 796 797 static void hclge_pfc_info_init(struct hclge_dev *hdev) 798 { 799 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 800 hclge_update_fc_mode(hdev); 801 else 802 hclge_update_fc_mode_by_dcb_flag(hdev); 803 } 804 805 static void hclge_tm_schd_info_init(struct hclge_dev *hdev) 806 { 807 hclge_tm_pg_info_init(hdev); 808 809 hclge_tm_tc_info_init(hdev); 810 811 hclge_tm_vport_info_update(hdev); 812 813 hclge_pfc_info_init(hdev); 814 } 815 816 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) 817 { 818 int ret; 819 u32 i; 820 821 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 822 return 0; 823 824 for (i = 0; i < hdev->tm_info.num_pg; i++) { 825 /* Cfg mapping */ 826 ret = hclge_tm_pg_to_pri_map_cfg( 827 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); 828 if (ret) 829 return ret; 830 } 831 832 return 0; 833 } 834 835 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) 836 { 837 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 838 struct hclge_shaper_ir_para ir_para; 839 u32 shaper_para; 840 int ret; 841 u32 i; 842 843 /* Cfg pg schd */ 844 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 845 return 0; 846 847 /* Pg to pri */ 848 for (i = 0; i < hdev->tm_info.num_pg; i++) { 849 u32 rate = hdev->tm_info.pg_info[i].bw_limit; 850 851 /* Calc shaper para */ 852 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG, 853 &ir_para, max_tm_rate); 854 if (ret) 855 return ret; 856 857 shaper_para = hclge_tm_get_shapping_para(0, 0, 0, 858 HCLGE_SHAPER_BS_U_DEF, 859 HCLGE_SHAPER_BS_S_DEF); 860 ret = hclge_tm_pg_shapping_cfg(hdev, 861 HCLGE_TM_SHAP_C_BUCKET, i, 862 shaper_para, rate); 863 if (ret) 864 return ret; 865 866 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, 867 ir_para.ir_u, 868 ir_para.ir_s, 869 HCLGE_SHAPER_BS_U_DEF, 870 HCLGE_SHAPER_BS_S_DEF); 871 ret = hclge_tm_pg_shapping_cfg(hdev, 872 HCLGE_TM_SHAP_P_BUCKET, i, 873 shaper_para, rate); 874 if (ret) 875 return ret; 876 } 877 878 return 0; 879 } 880 881 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) 882 { 883 int ret; 884 u32 i; 885 886 /* cfg pg schd */ 887 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 888 return 0; 889 890 /* pg to prio */ 891 for (i = 0; i < hdev->tm_info.num_pg; i++) { 892 /* Cfg dwrr */ 893 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]); 894 if (ret) 895 return ret; 896 } 897 898 return 0; 899 } 900 901 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, 902 struct hclge_vport *vport) 903 { 904 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 905 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 906 struct hnae3_queue **tqp = kinfo->tqp; 907 u32 i, j; 908 int ret; 909 910 for (i = 0; i < tc_info->num_tc; i++) { 911 for (j = 0; j < tc_info->tqp_count[i]; j++) { 912 struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j]; 913 914 ret = hclge_tm_q_to_qs_map_cfg(hdev, 915 hclge_get_queue_id(q), 916 vport->qs_offset + i); 917 if (ret) 918 return ret; 919 } 920 } 921 922 return 0; 923 } 924 925 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) 926 { 927 struct hclge_vport *vport = hdev->vport; 928 int ret; 929 u32 i, k; 930 931 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 932 /* Cfg qs -> pri mapping, one by one mapping */ 933 for (k = 0; k < hdev->num_alloc_vport; k++) { 934 struct hnae3_knic_private_info *kinfo = 935 &vport[k].nic.kinfo; 936 937 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 938 ret = hclge_tm_qs_to_pri_map_cfg( 939 hdev, vport[k].qs_offset + i, i); 940 if (ret) 941 return ret; 942 } 943 } 944 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { 945 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ 946 for (k = 0; k < hdev->num_alloc_vport; k++) 947 for (i = 0; i < HNAE3_MAX_TC; i++) { 948 ret = hclge_tm_qs_to_pri_map_cfg( 949 hdev, vport[k].qs_offset + i, k); 950 if (ret) 951 return ret; 952 } 953 } else { 954 return -EINVAL; 955 } 956 957 /* Cfg q -> qs mapping */ 958 for (i = 0; i < hdev->num_alloc_vport; i++) { 959 ret = hclge_vport_q_to_qs_map(hdev, vport); 960 if (ret) 961 return ret; 962 963 vport++; 964 } 965 966 return 0; 967 } 968 969 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) 970 { 971 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 972 struct hclge_shaper_ir_para ir_para; 973 u32 shaper_para; 974 int ret; 975 u32 i; 976 977 for (i = 0; i < hdev->tm_info.num_tc; i++) { 978 u32 rate = hdev->tm_info.tc_info[i].bw_limit; 979 980 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, 981 &ir_para, max_tm_rate); 982 if (ret) 983 return ret; 984 985 shaper_para = hclge_tm_get_shapping_para(0, 0, 0, 986 HCLGE_SHAPER_BS_U_DEF, 987 HCLGE_SHAPER_BS_S_DEF); 988 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, 989 shaper_para, rate); 990 if (ret) 991 return ret; 992 993 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, 994 ir_para.ir_u, 995 ir_para.ir_s, 996 HCLGE_SHAPER_BS_U_DEF, 997 HCLGE_SHAPER_BS_S_DEF); 998 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, 999 shaper_para, rate); 1000 if (ret) 1001 return ret; 1002 } 1003 1004 return 0; 1005 } 1006 1007 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) 1008 { 1009 struct hclge_dev *hdev = vport->back; 1010 struct hclge_shaper_ir_para ir_para; 1011 u32 shaper_para; 1012 int ret; 1013 1014 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, 1015 &ir_para, 1016 hdev->ae_dev->dev_specs.max_tm_rate); 1017 if (ret) 1018 return ret; 1019 1020 shaper_para = hclge_tm_get_shapping_para(0, 0, 0, 1021 HCLGE_SHAPER_BS_U_DEF, 1022 HCLGE_SHAPER_BS_S_DEF); 1023 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, 1024 vport->vport_id, shaper_para, 1025 vport->bw_limit); 1026 if (ret) 1027 return ret; 1028 1029 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 1030 ir_para.ir_s, 1031 HCLGE_SHAPER_BS_U_DEF, 1032 HCLGE_SHAPER_BS_S_DEF); 1033 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, 1034 vport->vport_id, shaper_para, 1035 vport->bw_limit); 1036 if (ret) 1037 return ret; 1038 1039 return 0; 1040 } 1041 1042 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) 1043 { 1044 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1045 struct hclge_dev *hdev = vport->back; 1046 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 1047 struct hclge_shaper_ir_para ir_para; 1048 u32 i; 1049 int ret; 1050 1051 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1052 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, 1053 HCLGE_SHAPER_LVL_QSET, 1054 &ir_para, max_tm_rate); 1055 if (ret) 1056 return ret; 1057 } 1058 1059 return 0; 1060 } 1061 1062 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) 1063 { 1064 struct hclge_vport *vport = hdev->vport; 1065 int ret; 1066 u32 i; 1067 1068 /* Need config vport shaper */ 1069 for (i = 0; i < hdev->num_alloc_vport; i++) { 1070 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); 1071 if (ret) 1072 return ret; 1073 1074 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); 1075 if (ret) 1076 return ret; 1077 1078 vport++; 1079 } 1080 1081 return 0; 1082 } 1083 1084 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) 1085 { 1086 int ret; 1087 1088 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1089 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); 1090 if (ret) 1091 return ret; 1092 } else { 1093 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); 1094 if (ret) 1095 return ret; 1096 } 1097 1098 return 0; 1099 } 1100 1101 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) 1102 { 1103 struct hclge_vport *vport = hdev->vport; 1104 struct hclge_pg_info *pg_info; 1105 u8 dwrr; 1106 int ret; 1107 u32 i, k; 1108 1109 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1110 pg_info = 1111 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 1112 dwrr = pg_info->tc_dwrr[i]; 1113 1114 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); 1115 if (ret) 1116 return ret; 1117 1118 for (k = 0; k < hdev->num_alloc_vport; k++) { 1119 ret = hclge_tm_qs_weight_cfg( 1120 hdev, vport[k].qs_offset + i, 1121 vport[k].dwrr); 1122 if (ret) 1123 return ret; 1124 } 1125 } 1126 1127 return 0; 1128 } 1129 1130 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) 1131 { 1132 #define DEFAULT_TC_WEIGHT 1 1133 #define DEFAULT_TC_OFFSET 14 1134 1135 struct hclge_ets_tc_weight_cmd *ets_weight; 1136 struct hclge_desc desc; 1137 unsigned int i; 1138 1139 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); 1140 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; 1141 1142 for (i = 0; i < HNAE3_MAX_TC; i++) { 1143 struct hclge_pg_info *pg_info; 1144 1145 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT; 1146 1147 if (!(hdev->hw_tc_map & BIT(i))) 1148 continue; 1149 1150 pg_info = 1151 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 1152 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; 1153 } 1154 1155 ets_weight->weight_offset = DEFAULT_TC_OFFSET; 1156 1157 return hclge_cmd_send(&hdev->hw, &desc, 1); 1158 } 1159 1160 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) 1161 { 1162 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1163 struct hclge_dev *hdev = vport->back; 1164 int ret; 1165 u8 i; 1166 1167 /* Vf dwrr */ 1168 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); 1169 if (ret) 1170 return ret; 1171 1172 /* Qset dwrr */ 1173 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1174 ret = hclge_tm_qs_weight_cfg( 1175 hdev, vport->qs_offset + i, 1176 hdev->tm_info.pg_info[0].tc_dwrr[i]); 1177 if (ret) 1178 return ret; 1179 } 1180 1181 return 0; 1182 } 1183 1184 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) 1185 { 1186 struct hclge_vport *vport = hdev->vport; 1187 int ret; 1188 u32 i; 1189 1190 for (i = 0; i < hdev->num_alloc_vport; i++) { 1191 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); 1192 if (ret) 1193 return ret; 1194 1195 vport++; 1196 } 1197 1198 return 0; 1199 } 1200 1201 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) 1202 { 1203 int ret; 1204 1205 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1206 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); 1207 if (ret) 1208 return ret; 1209 1210 if (!hnae3_dev_dcb_supported(hdev)) 1211 return 0; 1212 1213 ret = hclge_tm_ets_tc_dwrr_cfg(hdev); 1214 if (ret == -EOPNOTSUPP) { 1215 dev_warn(&hdev->pdev->dev, 1216 "fw %08x does't support ets tc weight cmd\n", 1217 hdev->fw_version); 1218 ret = 0; 1219 } 1220 1221 return ret; 1222 } else { 1223 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); 1224 if (ret) 1225 return ret; 1226 } 1227 1228 return 0; 1229 } 1230 1231 static int hclge_tm_map_cfg(struct hclge_dev *hdev) 1232 { 1233 int ret; 1234 1235 ret = hclge_up_to_tc_map(hdev); 1236 if (ret) 1237 return ret; 1238 1239 ret = hclge_tm_pg_to_pri_map(hdev); 1240 if (ret) 1241 return ret; 1242 1243 return hclge_tm_pri_q_qs_cfg(hdev); 1244 } 1245 1246 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) 1247 { 1248 int ret; 1249 1250 ret = hclge_tm_port_shaper_cfg(hdev); 1251 if (ret) 1252 return ret; 1253 1254 ret = hclge_tm_pg_shaper_cfg(hdev); 1255 if (ret) 1256 return ret; 1257 1258 return hclge_tm_pri_shaper_cfg(hdev); 1259 } 1260 1261 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) 1262 { 1263 int ret; 1264 1265 ret = hclge_tm_pg_dwrr_cfg(hdev); 1266 if (ret) 1267 return ret; 1268 1269 return hclge_tm_pri_dwrr_cfg(hdev); 1270 } 1271 1272 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) 1273 { 1274 int ret; 1275 u8 i; 1276 1277 /* Only being config on TC-Based scheduler mode */ 1278 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) 1279 return 0; 1280 1281 for (i = 0; i < hdev->tm_info.num_pg; i++) { 1282 ret = hclge_tm_pg_schd_mode_cfg(hdev, i); 1283 if (ret) 1284 return ret; 1285 } 1286 1287 return 0; 1288 } 1289 1290 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) 1291 { 1292 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1293 struct hclge_dev *hdev = vport->back; 1294 int ret; 1295 u8 i; 1296 1297 if (vport->vport_id >= HNAE3_MAX_TC) 1298 return -EINVAL; 1299 1300 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); 1301 if (ret) 1302 return ret; 1303 1304 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1305 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; 1306 1307 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, 1308 sch_mode); 1309 if (ret) 1310 return ret; 1311 } 1312 1313 return 0; 1314 } 1315 1316 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) 1317 { 1318 struct hclge_vport *vport = hdev->vport; 1319 int ret; 1320 u8 i, k; 1321 1322 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1323 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1324 ret = hclge_tm_pri_schd_mode_cfg(hdev, i); 1325 if (ret) 1326 return ret; 1327 1328 for (k = 0; k < hdev->num_alloc_vport; k++) { 1329 ret = hclge_tm_qs_schd_mode_cfg( 1330 hdev, vport[k].qs_offset + i, 1331 HCLGE_SCH_MODE_DWRR); 1332 if (ret) 1333 return ret; 1334 } 1335 } 1336 } else { 1337 for (i = 0; i < hdev->num_alloc_vport; i++) { 1338 ret = hclge_tm_schd_mode_vnet_base_cfg(vport); 1339 if (ret) 1340 return ret; 1341 1342 vport++; 1343 } 1344 } 1345 1346 return 0; 1347 } 1348 1349 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) 1350 { 1351 int ret; 1352 1353 ret = hclge_tm_lvl2_schd_mode_cfg(hdev); 1354 if (ret) 1355 return ret; 1356 1357 return hclge_tm_lvl34_schd_mode_cfg(hdev); 1358 } 1359 1360 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) 1361 { 1362 int ret; 1363 1364 /* Cfg tm mapping */ 1365 ret = hclge_tm_map_cfg(hdev); 1366 if (ret) 1367 return ret; 1368 1369 /* Cfg tm shaper */ 1370 ret = hclge_tm_shaper_cfg(hdev); 1371 if (ret) 1372 return ret; 1373 1374 /* Cfg dwrr */ 1375 ret = hclge_tm_dwrr_cfg(hdev); 1376 if (ret) 1377 return ret; 1378 1379 /* Cfg schd mode for each level schd */ 1380 return hclge_tm_schd_mode_hw(hdev); 1381 } 1382 1383 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) 1384 { 1385 struct hclge_mac *mac = &hdev->hw.mac; 1386 1387 return hclge_pause_param_cfg(hdev, mac->mac_addr, 1388 HCLGE_DEFAULT_PAUSE_TRANS_GAP, 1389 HCLGE_DEFAULT_PAUSE_TRANS_TIME); 1390 } 1391 1392 static int hclge_pfc_setup_hw(struct hclge_dev *hdev) 1393 { 1394 u8 enable_bitmap = 0; 1395 1396 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 1397 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | 1398 HCLGE_RX_MAC_PAUSE_EN_MSK; 1399 1400 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, 1401 hdev->tm_info.pfc_en); 1402 } 1403 1404 /* for the queues that use for backpress, divides to several groups, 1405 * each group contains 32 queue sets, which can be represented by u32 bitmap. 1406 */ 1407 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) 1408 { 1409 u16 grp_id_shift = HCLGE_BP_GRP_ID_S; 1410 u16 grp_id_mask = HCLGE_BP_GRP_ID_M; 1411 u8 grp_num = HCLGE_BP_GRP_NUM; 1412 int i; 1413 1414 if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) { 1415 grp_num = HCLGE_BP_EXT_GRP_NUM; 1416 grp_id_mask = HCLGE_BP_EXT_GRP_ID_M; 1417 grp_id_shift = HCLGE_BP_EXT_GRP_ID_S; 1418 } 1419 1420 for (i = 0; i < grp_num; i++) { 1421 u32 qs_bitmap = 0; 1422 int k, ret; 1423 1424 for (k = 0; k < hdev->num_alloc_vport; k++) { 1425 struct hclge_vport *vport = &hdev->vport[k]; 1426 u16 qs_id = vport->qs_offset + tc; 1427 u8 grp, sub_grp; 1428 1429 grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift); 1430 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, 1431 HCLGE_BP_SUB_GRP_ID_S); 1432 if (i == grp) 1433 qs_bitmap |= (1 << sub_grp); 1434 } 1435 1436 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); 1437 if (ret) 1438 return ret; 1439 } 1440 1441 return 0; 1442 } 1443 1444 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) 1445 { 1446 bool tx_en, rx_en; 1447 1448 switch (hdev->tm_info.fc_mode) { 1449 case HCLGE_FC_NONE: 1450 tx_en = false; 1451 rx_en = false; 1452 break; 1453 case HCLGE_FC_RX_PAUSE: 1454 tx_en = false; 1455 rx_en = true; 1456 break; 1457 case HCLGE_FC_TX_PAUSE: 1458 tx_en = true; 1459 rx_en = false; 1460 break; 1461 case HCLGE_FC_FULL: 1462 tx_en = true; 1463 rx_en = true; 1464 break; 1465 case HCLGE_FC_PFC: 1466 tx_en = false; 1467 rx_en = false; 1468 break; 1469 default: 1470 tx_en = true; 1471 rx_en = true; 1472 } 1473 1474 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 1475 } 1476 1477 static int hclge_tm_bp_setup(struct hclge_dev *hdev) 1478 { 1479 int ret; 1480 int i; 1481 1482 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1483 ret = hclge_bp_setup_hw(hdev, i); 1484 if (ret) 1485 return ret; 1486 } 1487 1488 return 0; 1489 } 1490 1491 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) 1492 { 1493 int ret; 1494 1495 ret = hclge_pause_param_setup_hw(hdev); 1496 if (ret) 1497 return ret; 1498 1499 ret = hclge_mac_pause_setup_hw(hdev); 1500 if (ret) 1501 return ret; 1502 1503 /* Only DCB-supported dev supports qset back pressure and pfc cmd */ 1504 if (!hnae3_dev_dcb_supported(hdev)) 1505 return 0; 1506 1507 /* GE MAC does not support PFC, when driver is initializing and MAC 1508 * is in GE Mode, ignore the error here, otherwise initialization 1509 * will fail. 1510 */ 1511 ret = hclge_pfc_setup_hw(hdev); 1512 if (init && ret == -EOPNOTSUPP) 1513 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); 1514 else if (ret) { 1515 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", 1516 ret); 1517 return ret; 1518 } 1519 1520 return hclge_tm_bp_setup(hdev); 1521 } 1522 1523 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) 1524 { 1525 struct hclge_vport *vport = hdev->vport; 1526 struct hnae3_knic_private_info *kinfo; 1527 u32 i, k; 1528 1529 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 1530 hdev->tm_info.prio_tc[i] = prio_tc[i]; 1531 1532 for (k = 0; k < hdev->num_alloc_vport; k++) { 1533 kinfo = &vport[k].nic.kinfo; 1534 kinfo->tc_info.prio_tc[i] = prio_tc[i]; 1535 } 1536 } 1537 } 1538 1539 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) 1540 { 1541 u8 bit_map = 0; 1542 u8 i; 1543 1544 hdev->tm_info.num_tc = num_tc; 1545 1546 for (i = 0; i < hdev->tm_info.num_tc; i++) 1547 bit_map |= BIT(i); 1548 1549 if (!bit_map) { 1550 bit_map = 1; 1551 hdev->tm_info.num_tc = 1; 1552 } 1553 1554 hdev->hw_tc_map = bit_map; 1555 1556 hclge_tm_schd_info_init(hdev); 1557 } 1558 1559 void hclge_tm_pfc_info_update(struct hclge_dev *hdev) 1560 { 1561 /* DCB is enabled if we have more than 1 TC or pfc_en is 1562 * non-zero. 1563 */ 1564 if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en) 1565 hdev->flag |= HCLGE_FLAG_DCB_ENABLE; 1566 else 1567 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 1568 1569 hclge_pfc_info_init(hdev); 1570 } 1571 1572 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) 1573 { 1574 int ret; 1575 1576 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && 1577 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) 1578 return -ENOTSUPP; 1579 1580 ret = hclge_tm_schd_setup_hw(hdev); 1581 if (ret) 1582 return ret; 1583 1584 ret = hclge_pause_setup_hw(hdev, init); 1585 if (ret) 1586 return ret; 1587 1588 return 0; 1589 } 1590 1591 int hclge_tm_schd_init(struct hclge_dev *hdev) 1592 { 1593 /* fc_mode is HCLGE_FC_FULL on reset */ 1594 hdev->tm_info.fc_mode = HCLGE_FC_FULL; 1595 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 1596 1597 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && 1598 hdev->tm_info.num_pg != 1) 1599 return -EINVAL; 1600 1601 hclge_tm_schd_info_init(hdev); 1602 1603 return hclge_tm_init_hw(hdev, true); 1604 } 1605 1606 int hclge_tm_vport_map_update(struct hclge_dev *hdev) 1607 { 1608 struct hclge_vport *vport = hdev->vport; 1609 int ret; 1610 1611 hclge_tm_vport_tc_info_update(vport); 1612 1613 ret = hclge_vport_q_to_qs_map(hdev, vport); 1614 if (ret) 1615 return ret; 1616 1617 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) 1618 return 0; 1619 1620 return hclge_tm_bp_setup(hdev); 1621 } 1622 1623 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num) 1624 { 1625 struct hclge_tm_nodes_cmd *nodes; 1626 struct hclge_desc desc; 1627 int ret; 1628 1629 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { 1630 /* Each PF has 8 qsets and each VF has 1 qset */ 1631 *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev); 1632 return 0; 1633 } 1634 1635 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); 1636 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1637 if (ret) { 1638 dev_err(&hdev->pdev->dev, 1639 "failed to get qset num, ret = %d\n", ret); 1640 return ret; 1641 } 1642 1643 nodes = (struct hclge_tm_nodes_cmd *)desc.data; 1644 *qset_num = le16_to_cpu(nodes->qset_num); 1645 return 0; 1646 } 1647 1648 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num) 1649 { 1650 struct hclge_tm_nodes_cmd *nodes; 1651 struct hclge_desc desc; 1652 int ret; 1653 1654 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { 1655 *pri_num = HCLGE_TM_PF_MAX_PRI_NUM; 1656 return 0; 1657 } 1658 1659 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); 1660 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1661 if (ret) { 1662 dev_err(&hdev->pdev->dev, 1663 "failed to get pri num, ret = %d\n", ret); 1664 return ret; 1665 } 1666 1667 nodes = (struct hclge_tm_nodes_cmd *)desc.data; 1668 *pri_num = nodes->pri_num; 1669 return 0; 1670 } 1671 1672 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, 1673 u8 *link_vld) 1674 { 1675 struct hclge_qs_to_pri_link_cmd *map; 1676 struct hclge_desc desc; 1677 int ret; 1678 1679 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true); 1680 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 1681 map->qs_id = cpu_to_le16(qset_id); 1682 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1683 if (ret) { 1684 dev_err(&hdev->pdev->dev, 1685 "failed to get qset map priority, ret = %d\n", ret); 1686 return ret; 1687 } 1688 1689 *priority = map->priority; 1690 *link_vld = map->link_vld; 1691 return 0; 1692 } 1693 1694 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode) 1695 { 1696 struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode; 1697 struct hclge_desc desc; 1698 int ret; 1699 1700 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true); 1701 qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data; 1702 qs_sch_mode->qs_id = cpu_to_le16(qset_id); 1703 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1704 if (ret) { 1705 dev_err(&hdev->pdev->dev, 1706 "failed to get qset sch mode, ret = %d\n", ret); 1707 return ret; 1708 } 1709 1710 *mode = qs_sch_mode->sch_mode; 1711 return 0; 1712 } 1713 1714 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight) 1715 { 1716 struct hclge_qs_weight_cmd *qs_weight; 1717 struct hclge_desc desc; 1718 int ret; 1719 1720 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true); 1721 qs_weight = (struct hclge_qs_weight_cmd *)desc.data; 1722 qs_weight->qs_id = cpu_to_le16(qset_id); 1723 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1724 if (ret) { 1725 dev_err(&hdev->pdev->dev, 1726 "failed to get qset weight, ret = %d\n", ret); 1727 return ret; 1728 } 1729 1730 *weight = qs_weight->dwrr; 1731 return 0; 1732 } 1733 1734 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id, 1735 struct hclge_tm_shaper_para *para) 1736 { 1737 struct hclge_qs_shapping_cmd *shap_cfg_cmd; 1738 struct hclge_desc desc; 1739 u32 shapping_para; 1740 int ret; 1741 1742 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); 1743 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; 1744 shap_cfg_cmd->qs_id = cpu_to_le16(qset_id); 1745 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1746 if (ret) { 1747 dev_err(&hdev->pdev->dev, 1748 "failed to get qset %u shaper, ret = %d\n", qset_id, 1749 ret); 1750 return ret; 1751 } 1752 1753 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); 1754 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 1755 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 1756 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 1757 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 1758 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 1759 para->flag = shap_cfg_cmd->flag; 1760 para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate); 1761 return 0; 1762 } 1763 1764 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode) 1765 { 1766 struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode; 1767 struct hclge_desc desc; 1768 int ret; 1769 1770 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true); 1771 pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data; 1772 pri_sch_mode->pri_id = pri_id; 1773 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1774 if (ret) { 1775 dev_err(&hdev->pdev->dev, 1776 "failed to get priority sch mode, ret = %d\n", ret); 1777 return ret; 1778 } 1779 1780 *mode = pri_sch_mode->sch_mode; 1781 return 0; 1782 } 1783 1784 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight) 1785 { 1786 struct hclge_priority_weight_cmd *priority_weight; 1787 struct hclge_desc desc; 1788 int ret; 1789 1790 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true); 1791 priority_weight = (struct hclge_priority_weight_cmd *)desc.data; 1792 priority_weight->pri_id = pri_id; 1793 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1794 if (ret) { 1795 dev_err(&hdev->pdev->dev, 1796 "failed to get priority weight, ret = %d\n", ret); 1797 return ret; 1798 } 1799 1800 *weight = priority_weight->dwrr; 1801 return 0; 1802 } 1803 1804 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, 1805 enum hclge_opcode_type cmd, 1806 struct hclge_tm_shaper_para *para) 1807 { 1808 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 1809 struct hclge_desc desc; 1810 u32 shapping_para; 1811 int ret; 1812 1813 if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING && 1814 cmd != HCLGE_OPC_TM_PRI_P_SHAPPING) 1815 return -EINVAL; 1816 1817 hclge_cmd_setup_basic_desc(&desc, cmd, true); 1818 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 1819 shap_cfg_cmd->pri_id = pri_id; 1820 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1821 if (ret) { 1822 dev_err(&hdev->pdev->dev, 1823 "failed to get priority shaper(%#x), ret = %d\n", 1824 cmd, ret); 1825 return ret; 1826 } 1827 1828 shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para); 1829 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 1830 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 1831 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 1832 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 1833 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 1834 para->flag = shap_cfg_cmd->flag; 1835 para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate); 1836 return 0; 1837 } 1838 1839 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id) 1840 { 1841 struct hclge_nq_to_qs_link_cmd *map; 1842 struct hclge_desc desc; 1843 u16 qs_id_l; 1844 u16 qs_id_h; 1845 int ret; 1846 1847 map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 1848 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true); 1849 map->nq_id = cpu_to_le16(q_id); 1850 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1851 if (ret) { 1852 dev_err(&hdev->pdev->dev, 1853 "failed to get queue to qset map, ret = %d\n", ret); 1854 return ret; 1855 } 1856 *qset_id = le16_to_cpu(map->qset_id); 1857 1858 /* convert qset_id to the following format, drop the vld bit 1859 * | qs_id_h | vld | qs_id_l | 1860 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | 1861 * \ \ / / 1862 * \ \ / / 1863 * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 | 1864 */ 1865 qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, 1866 HCLGE_TM_QS_ID_L_S); 1867 qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK, 1868 HCLGE_TM_QS_ID_H_EXT_S); 1869 *qset_id = 0; 1870 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, 1871 qs_id_l); 1872 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S, 1873 qs_id_h); 1874 return 0; 1875 } 1876 1877 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id) 1878 { 1879 #define HCLGE_TM_TC_MASK 0x7 1880 1881 struct hclge_tqp_tx_queue_tc_cmd *tc; 1882 struct hclge_desc desc; 1883 int ret; 1884 1885 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; 1886 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true); 1887 tc->queue_id = cpu_to_le16(q_id); 1888 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1889 if (ret) { 1890 dev_err(&hdev->pdev->dev, 1891 "failed to get queue to tc map, ret = %d\n", ret); 1892 return ret; 1893 } 1894 1895 *tc_id = tc->tc_id & HCLGE_TM_TC_MASK; 1896 return 0; 1897 } 1898 1899 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id, 1900 u8 *pri_bit_map) 1901 { 1902 struct hclge_pg_to_pri_link_cmd *map; 1903 struct hclge_desc desc; 1904 int ret; 1905 1906 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true); 1907 map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 1908 map->pg_id = pg_id; 1909 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1910 if (ret) { 1911 dev_err(&hdev->pdev->dev, 1912 "failed to get pg to pri map, ret = %d\n", ret); 1913 return ret; 1914 } 1915 1916 *pri_bit_map = map->pri_bit_map; 1917 return 0; 1918 } 1919 1920 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight) 1921 { 1922 struct hclge_pg_weight_cmd *pg_weight_cmd; 1923 struct hclge_desc desc; 1924 int ret; 1925 1926 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true); 1927 pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data; 1928 pg_weight_cmd->pg_id = pg_id; 1929 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1930 if (ret) { 1931 dev_err(&hdev->pdev->dev, 1932 "failed to get pg weight, ret = %d\n", ret); 1933 return ret; 1934 } 1935 1936 *weight = pg_weight_cmd->dwrr; 1937 return 0; 1938 } 1939 1940 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode) 1941 { 1942 struct hclge_desc desc; 1943 int ret; 1944 1945 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true); 1946 desc.data[0] = cpu_to_le32(pg_id); 1947 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1948 if (ret) { 1949 dev_err(&hdev->pdev->dev, 1950 "failed to get pg sch mode, ret = %d\n", ret); 1951 return ret; 1952 } 1953 1954 *mode = (u8)le32_to_cpu(desc.data[1]); 1955 return 0; 1956 } 1957 1958 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id, 1959 enum hclge_opcode_type cmd, 1960 struct hclge_tm_shaper_para *para) 1961 { 1962 struct hclge_pg_shapping_cmd *shap_cfg_cmd; 1963 struct hclge_desc desc; 1964 u32 shapping_para; 1965 int ret; 1966 1967 if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING && 1968 cmd != HCLGE_OPC_TM_PG_P_SHAPPING) 1969 return -EINVAL; 1970 1971 hclge_cmd_setup_basic_desc(&desc, cmd, true); 1972 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 1973 shap_cfg_cmd->pg_id = pg_id; 1974 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1975 if (ret) { 1976 dev_err(&hdev->pdev->dev, 1977 "failed to get pg shaper(%#x), ret = %d\n", 1978 cmd, ret); 1979 return ret; 1980 } 1981 1982 shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para); 1983 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 1984 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 1985 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 1986 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 1987 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 1988 para->flag = shap_cfg_cmd->flag; 1989 para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate); 1990 return 0; 1991 } 1992 1993 int hclge_tm_get_port_shaper(struct hclge_dev *hdev, 1994 struct hclge_tm_shaper_para *para) 1995 { 1996 struct hclge_port_shapping_cmd *port_shap_cfg_cmd; 1997 struct hclge_desc desc; 1998 u32 shapping_para; 1999 int ret; 2000 2001 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true); 2002 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2003 if (ret) { 2004 dev_err(&hdev->pdev->dev, 2005 "failed to get port shaper, ret = %d\n", ret); 2006 return ret; 2007 } 2008 2009 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 2010 shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para); 2011 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 2012 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 2013 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 2014 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 2015 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 2016 para->flag = port_shap_cfg_cmd->flag; 2017 para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate); 2018 2019 return 0; 2020 } 2021