1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 6 #include "hclge_cmd.h" 7 #include "hclge_main.h" 8 #include "hclge_tm.h" 9 10 enum hclge_shaper_level { 11 HCLGE_SHAPER_LVL_PRI = 0, 12 HCLGE_SHAPER_LVL_PG = 1, 13 HCLGE_SHAPER_LVL_PORT = 2, 14 HCLGE_SHAPER_LVL_QSET = 3, 15 HCLGE_SHAPER_LVL_CNT = 4, 16 HCLGE_SHAPER_LVL_VF = 0, 17 HCLGE_SHAPER_LVL_PF = 1, 18 }; 19 20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 22 23 #define HCLGE_SHAPER_BS_U_DEF 5 24 #define HCLGE_SHAPER_BS_S_DEF 20 25 26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper 27 * @ir: Rate to be config, its unit is Mbps 28 * @shaper_level: the shaper level. eg: port, pg, priority, queueset 29 * @ir_para: parameters of IR shaper 30 * @max_tm_rate: max tm rate is available to config 31 * 32 * the formula: 33 * 34 * IR_b * (2 ^ IR_u) * 8 35 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) 36 * Tick * (2 ^ IR_s) 37 * 38 * @return: 0: calculate sucessful, negative: fail 39 */ 40 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, 41 struct hclge_shaper_ir_para *ir_para, 42 u32 max_tm_rate) 43 { 44 #define DEFAULT_SHAPER_IR_B 126 45 #define DIVISOR_CLK (1000 * 8) 46 #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK) 47 48 static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { 49 6 * 256, /* Prioriy level */ 50 6 * 32, /* Prioriy group level */ 51 6 * 8, /* Port level */ 52 6 * 256 /* Qset level */ 53 }; 54 u8 ir_u_calc = 0; 55 u8 ir_s_calc = 0; 56 u32 ir_calc; 57 u32 tick; 58 59 /* Calc tick */ 60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT || 61 ir > max_tm_rate) 62 return -EINVAL; 63 64 tick = tick_array[shaper_level]; 65 66 /** 67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 68 * the formula is changed to: 69 * 126 * 1 * 8 70 * ir_calc = ---------------- * 1000 71 * tick * 1 72 */ 73 ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick; 74 75 if (ir_calc == ir) { 76 ir_para->ir_b = DEFAULT_SHAPER_IR_B; 77 ir_para->ir_u = 0; 78 ir_para->ir_s = 0; 79 80 return 0; 81 } else if (ir_calc > ir) { 82 /* Increasing the denominator to select ir_s value */ 83 while (ir_calc >= ir && ir) { 84 ir_s_calc++; 85 ir_calc = DEFAULT_DIVISOR_IR_B / 86 (tick * (1 << ir_s_calc)); 87 } 88 89 ir_para->ir_b = (ir * tick * (1 << ir_s_calc) + 90 (DIVISOR_CLK >> 1)) / DIVISOR_CLK; 91 } else { 92 /* Increasing the numerator to select ir_u value */ 93 u32 numerator; 94 95 while (ir_calc < ir) { 96 ir_u_calc++; 97 numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc); 98 ir_calc = (numerator + (tick >> 1)) / tick; 99 } 100 101 if (ir_calc == ir) { 102 ir_para->ir_b = DEFAULT_SHAPER_IR_B; 103 } else { 104 u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc); 105 ir_para->ir_b = (ir * tick + (denominator >> 1)) / 106 denominator; 107 } 108 } 109 110 ir_para->ir_u = ir_u_calc; 111 ir_para->ir_s = ir_s_calc; 112 113 return 0; 114 } 115 116 static int hclge_pfc_stats_get(struct hclge_dev *hdev, 117 enum hclge_opcode_type opcode, u64 *stats) 118 { 119 struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM]; 120 int ret, i, j; 121 122 if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT || 123 opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) 124 return -EINVAL; 125 126 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) { 127 hclge_cmd_setup_basic_desc(&desc[i], opcode, true); 128 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 129 } 130 131 hclge_cmd_setup_basic_desc(&desc[i], opcode, true); 132 133 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); 134 if (ret) 135 return ret; 136 137 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { 138 struct hclge_pfc_stats_cmd *pfc_stats = 139 (struct hclge_pfc_stats_cmd *)desc[i].data; 140 141 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) { 142 u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j; 143 144 if (index < HCLGE_MAX_TC_NUM) 145 stats[index] = 146 le64_to_cpu(pfc_stats->pkt_num[j]); 147 } 148 } 149 return 0; 150 } 151 152 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) 153 { 154 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats); 155 } 156 157 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) 158 { 159 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats); 160 } 161 162 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) 163 { 164 struct hclge_desc desc; 165 166 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); 167 168 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | 169 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); 170 171 return hclge_cmd_send(&hdev->hw, &desc, 1); 172 } 173 174 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, 175 u8 pfc_bitmap) 176 { 177 struct hclge_desc desc; 178 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; 179 180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); 181 182 pfc->tx_rx_en_bitmap = tx_rx_bitmap; 183 pfc->pri_en_bitmap = pfc_bitmap; 184 185 return hclge_cmd_send(&hdev->hw, &desc, 1); 186 } 187 188 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, 189 u8 pause_trans_gap, u16 pause_trans_time) 190 { 191 struct hclge_cfg_pause_param_cmd *pause_param; 192 struct hclge_desc desc; 193 194 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 195 196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); 197 198 ether_addr_copy(pause_param->mac_addr, addr); 199 ether_addr_copy(pause_param->mac_addr_extra, addr); 200 pause_param->pause_trans_gap = pause_trans_gap; 201 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); 202 203 return hclge_cmd_send(&hdev->hw, &desc, 1); 204 } 205 206 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) 207 { 208 struct hclge_cfg_pause_param_cmd *pause_param; 209 struct hclge_desc desc; 210 u16 trans_time; 211 u8 trans_gap; 212 int ret; 213 214 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 215 216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 217 218 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 219 if (ret) 220 return ret; 221 222 trans_gap = pause_param->pause_trans_gap; 223 trans_time = le16_to_cpu(pause_param->pause_trans_time); 224 225 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); 226 } 227 228 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) 229 { 230 u8 tc; 231 232 tc = hdev->tm_info.prio_tc[pri_id]; 233 234 if (tc >= hdev->tm_info.num_tc) 235 return -EINVAL; 236 237 /** 238 * the register for priority has four bytes, the first bytes includes 239 * priority0 and priority1, the higher 4bit stands for priority1 240 * while the lower 4bit stands for priority0, as below: 241 * first byte: | pri_1 | pri_0 | 242 * second byte: | pri_3 | pri_2 | 243 * third byte: | pri_5 | pri_4 | 244 * fourth byte: | pri_7 | pri_6 | 245 */ 246 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); 247 248 return 0; 249 } 250 251 static int hclge_up_to_tc_map(struct hclge_dev *hdev) 252 { 253 struct hclge_desc desc; 254 u8 *pri = (u8 *)desc.data; 255 u8 pri_id; 256 int ret; 257 258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); 259 260 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { 261 ret = hclge_fill_pri_array(hdev, pri, pri_id); 262 if (ret) 263 return ret; 264 } 265 266 return hclge_cmd_send(&hdev->hw, &desc, 1); 267 } 268 269 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, 270 u8 pg_id, u8 pri_bit_map) 271 { 272 struct hclge_pg_to_pri_link_cmd *map; 273 struct hclge_desc desc; 274 275 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); 276 277 map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 278 279 map->pg_id = pg_id; 280 map->pri_bit_map = pri_bit_map; 281 282 return hclge_cmd_send(&hdev->hw, &desc, 1); 283 } 284 285 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, 286 u16 qs_id, u8 pri) 287 { 288 struct hclge_qs_to_pri_link_cmd *map; 289 struct hclge_desc desc; 290 291 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); 292 293 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 294 295 map->qs_id = cpu_to_le16(qs_id); 296 map->priority = pri; 297 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; 298 299 return hclge_cmd_send(&hdev->hw, &desc, 1); 300 } 301 302 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, 303 u16 q_id, u16 qs_id) 304 { 305 struct hclge_nq_to_qs_link_cmd *map; 306 struct hclge_desc desc; 307 u16 qs_id_l; 308 u16 qs_id_h; 309 310 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); 311 312 map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 313 314 map->nq_id = cpu_to_le16(q_id); 315 316 /* convert qs_id to the following format to support qset_id >= 1024 317 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 | 318 * / / \ \ 319 * / / \ \ 320 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | 321 * | qs_id_h | vld | qs_id_l | 322 */ 323 qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK, 324 HCLGE_TM_QS_ID_L_S); 325 qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK, 326 HCLGE_TM_QS_ID_H_S); 327 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, 328 qs_id_l); 329 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S, 330 qs_id_h); 331 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); 332 333 return hclge_cmd_send(&hdev->hw, &desc, 1); 334 } 335 336 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, 337 u8 dwrr) 338 { 339 struct hclge_pg_weight_cmd *weight; 340 struct hclge_desc desc; 341 342 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); 343 344 weight = (struct hclge_pg_weight_cmd *)desc.data; 345 346 weight->pg_id = pg_id; 347 weight->dwrr = dwrr; 348 349 return hclge_cmd_send(&hdev->hw, &desc, 1); 350 } 351 352 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, 353 u8 dwrr) 354 { 355 struct hclge_priority_weight_cmd *weight; 356 struct hclge_desc desc; 357 358 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); 359 360 weight = (struct hclge_priority_weight_cmd *)desc.data; 361 362 weight->pri_id = pri_id; 363 weight->dwrr = dwrr; 364 365 return hclge_cmd_send(&hdev->hw, &desc, 1); 366 } 367 368 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, 369 u8 dwrr) 370 { 371 struct hclge_qs_weight_cmd *weight; 372 struct hclge_desc desc; 373 374 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); 375 376 weight = (struct hclge_qs_weight_cmd *)desc.data; 377 378 weight->qs_id = cpu_to_le16(qs_id); 379 weight->dwrr = dwrr; 380 381 return hclge_cmd_send(&hdev->hw, &desc, 1); 382 } 383 384 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s, 385 u8 bs_b, u8 bs_s) 386 { 387 u32 shapping_para = 0; 388 389 hclge_tm_set_field(shapping_para, IR_B, ir_b); 390 hclge_tm_set_field(shapping_para, IR_U, ir_u); 391 hclge_tm_set_field(shapping_para, IR_S, ir_s); 392 hclge_tm_set_field(shapping_para, BS_B, bs_b); 393 hclge_tm_set_field(shapping_para, BS_S, bs_s); 394 395 return shapping_para; 396 } 397 398 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, 399 enum hclge_shap_bucket bucket, u8 pg_id, 400 u32 shapping_para, u32 rate) 401 { 402 struct hclge_pg_shapping_cmd *shap_cfg_cmd; 403 enum hclge_opcode_type opcode; 404 struct hclge_desc desc; 405 406 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : 407 HCLGE_OPC_TM_PG_C_SHAPPING; 408 hclge_cmd_setup_basic_desc(&desc, opcode, false); 409 410 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 411 412 shap_cfg_cmd->pg_id = pg_id; 413 414 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); 415 416 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 417 418 shap_cfg_cmd->pg_rate = cpu_to_le32(rate); 419 420 return hclge_cmd_send(&hdev->hw, &desc, 1); 421 } 422 423 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) 424 { 425 struct hclge_port_shapping_cmd *shap_cfg_cmd; 426 struct hclge_shaper_ir_para ir_para; 427 struct hclge_desc desc; 428 u32 shapping_para; 429 int ret; 430 431 ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT, 432 &ir_para, 433 hdev->ae_dev->dev_specs.max_tm_rate); 434 if (ret) 435 return ret; 436 437 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); 438 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 439 440 shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 441 ir_para.ir_s, 442 HCLGE_SHAPER_BS_U_DEF, 443 HCLGE_SHAPER_BS_S_DEF); 444 445 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); 446 447 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 448 449 shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed); 450 451 return hclge_cmd_send(&hdev->hw, &desc, 1); 452 } 453 454 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, 455 enum hclge_shap_bucket bucket, u8 pri_id, 456 u32 shapping_para, u32 rate) 457 { 458 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 459 enum hclge_opcode_type opcode; 460 struct hclge_desc desc; 461 462 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : 463 HCLGE_OPC_TM_PRI_C_SHAPPING; 464 465 hclge_cmd_setup_basic_desc(&desc, opcode, false); 466 467 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 468 469 shap_cfg_cmd->pri_id = pri_id; 470 471 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); 472 473 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 474 475 shap_cfg_cmd->pri_rate = cpu_to_le32(rate); 476 477 return hclge_cmd_send(&hdev->hw, &desc, 1); 478 } 479 480 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) 481 { 482 struct hclge_desc desc; 483 484 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); 485 486 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) 487 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 488 else 489 desc.data[1] = 0; 490 491 desc.data[0] = cpu_to_le32(pg_id); 492 493 return hclge_cmd_send(&hdev->hw, &desc, 1); 494 } 495 496 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) 497 { 498 struct hclge_desc desc; 499 500 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); 501 502 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) 503 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 504 else 505 desc.data[1] = 0; 506 507 desc.data[0] = cpu_to_le32(pri_id); 508 509 return hclge_cmd_send(&hdev->hw, &desc, 1); 510 } 511 512 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) 513 { 514 struct hclge_desc desc; 515 516 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); 517 518 if (mode == HCLGE_SCH_MODE_DWRR) 519 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 520 else 521 desc.data[1] = 0; 522 523 desc.data[0] = cpu_to_le32(qs_id); 524 525 return hclge_cmd_send(&hdev->hw, &desc, 1); 526 } 527 528 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, 529 u32 bit_map) 530 { 531 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 532 struct hclge_desc desc; 533 534 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, 535 false); 536 537 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 538 539 bp_to_qs_map_cmd->tc_id = tc; 540 bp_to_qs_map_cmd->qs_group_id = grp_id; 541 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); 542 543 return hclge_cmd_send(&hdev->hw, &desc, 1); 544 } 545 546 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) 547 { 548 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 549 struct hclge_qs_shapping_cmd *shap_cfg_cmd; 550 struct hclge_shaper_ir_para ir_para; 551 struct hclge_dev *hdev = vport->back; 552 struct hclge_desc desc; 553 u32 shaper_para; 554 int ret, i; 555 556 if (!max_tx_rate) 557 max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate; 558 559 ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET, 560 &ir_para, 561 hdev->ae_dev->dev_specs.max_tm_rate); 562 if (ret) 563 return ret; 564 565 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 566 ir_para.ir_s, 567 HCLGE_SHAPER_BS_U_DEF, 568 HCLGE_SHAPER_BS_S_DEF); 569 570 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 571 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, 572 false); 573 574 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; 575 shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i); 576 shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para); 577 578 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 579 shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate); 580 581 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 582 if (ret) { 583 dev_err(&hdev->pdev->dev, 584 "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n", 585 vport->vport_id, shap_cfg_cmd->qs_id, 586 max_tx_rate, ret); 587 return ret; 588 } 589 } 590 591 return 0; 592 } 593 594 static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport) 595 { 596 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 597 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 598 struct hclge_dev *hdev = vport->back; 599 u16 max_rss_size = 0; 600 int i; 601 602 if (!tc_info->mqprio_active) 603 return vport->alloc_tqps / tc_info->num_tc; 604 605 for (i = 0; i < HNAE3_MAX_TC; i++) { 606 if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) 607 continue; 608 if (max_rss_size < tc_info->tqp_count[i]) 609 max_rss_size = tc_info->tqp_count[i]; 610 } 611 612 return max_rss_size; 613 } 614 615 static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) 616 { 617 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 618 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 619 struct hclge_dev *hdev = vport->back; 620 int sum = 0; 621 int i; 622 623 if (!tc_info->mqprio_active) 624 return kinfo->rss_size * tc_info->num_tc; 625 626 for (i = 0; i < HNAE3_MAX_TC; i++) { 627 if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) 628 sum += tc_info->tqp_count[i]; 629 } 630 631 return sum; 632 } 633 634 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) 635 { 636 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 637 struct hclge_dev *hdev = vport->back; 638 u16 vport_max_rss_size; 639 u16 max_rss_size; 640 641 /* TC configuration is shared by PF/VF in one port, only allow 642 * one tc for VF for simplicity. VF's vport_id is non zero. 643 */ 644 if (vport->vport_id) { 645 kinfo->tc_info.num_tc = 1; 646 vport->qs_offset = HNAE3_MAX_TC + 647 vport->vport_id - HCLGE_VF_VPORT_START_NUM; 648 vport_max_rss_size = hdev->vf_rss_size_max; 649 } else { 650 kinfo->tc_info.num_tc = 651 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); 652 vport->qs_offset = 0; 653 vport_max_rss_size = hdev->pf_rss_size_max; 654 } 655 656 max_rss_size = min_t(u16, vport_max_rss_size, 657 hclge_vport_get_max_rss_size(vport)); 658 659 /* Set to user value, no larger than max_rss_size. */ 660 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 661 kinfo->req_rss_size <= max_rss_size) { 662 dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", 663 kinfo->rss_size, kinfo->req_rss_size); 664 kinfo->rss_size = kinfo->req_rss_size; 665 } else if (kinfo->rss_size > max_rss_size || 666 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { 667 /* if user not set rss, the rss_size should compare with the 668 * valid msi numbers to ensure one to one map between tqp and 669 * irq as default. 670 */ 671 if (!kinfo->req_rss_size) 672 max_rss_size = min_t(u16, max_rss_size, 673 (hdev->num_nic_msi - 1) / 674 kinfo->tc_info.num_tc); 675 676 /* Set to the maximum specification value (max_rss_size). */ 677 kinfo->rss_size = max_rss_size; 678 } 679 } 680 681 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) 682 { 683 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 684 struct hclge_dev *hdev = vport->back; 685 u8 i; 686 687 hclge_tm_update_kinfo_rss_size(vport); 688 kinfo->num_tqps = hclge_vport_get_tqp_num(vport); 689 vport->dwrr = 100; /* 100 percent as init */ 690 vport->alloc_rss_size = kinfo->rss_size; 691 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; 692 693 /* when enable mqprio, the tc_info has been updated. */ 694 if (kinfo->tc_info.mqprio_active) 695 return; 696 697 for (i = 0; i < HNAE3_MAX_TC; i++) { 698 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { 699 set_bit(i, &kinfo->tc_info.tc_en); 700 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; 701 kinfo->tc_info.tqp_count[i] = kinfo->rss_size; 702 } else { 703 /* Set to default queue if TC is disable */ 704 clear_bit(i, &kinfo->tc_info.tc_en); 705 kinfo->tc_info.tqp_offset[i] = 0; 706 kinfo->tc_info.tqp_count[i] = 1; 707 } 708 } 709 710 memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc, 711 sizeof_field(struct hnae3_tc_info, prio_tc)); 712 } 713 714 static void hclge_tm_vport_info_update(struct hclge_dev *hdev) 715 { 716 struct hclge_vport *vport = hdev->vport; 717 u32 i; 718 719 for (i = 0; i < hdev->num_alloc_vport; i++) { 720 hclge_tm_vport_tc_info_update(vport); 721 722 vport++; 723 } 724 } 725 726 static void hclge_tm_tc_info_init(struct hclge_dev *hdev) 727 { 728 u8 i; 729 730 for (i = 0; i < hdev->tm_info.num_tc; i++) { 731 hdev->tm_info.tc_info[i].tc_id = i; 732 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; 733 hdev->tm_info.tc_info[i].pgid = 0; 734 hdev->tm_info.tc_info[i].bw_limit = 735 hdev->tm_info.pg_info[0].bw_limit; 736 } 737 738 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 739 hdev->tm_info.prio_tc[i] = 740 (i >= hdev->tm_info.num_tc) ? 0 : i; 741 742 /* DCB is enabled if we have more than 1 TC or pfc_en is 743 * non-zero. 744 */ 745 if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en) 746 hdev->flag |= HCLGE_FLAG_DCB_ENABLE; 747 else 748 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 749 } 750 751 static void hclge_tm_pg_info_init(struct hclge_dev *hdev) 752 { 753 #define BW_PERCENT 100 754 755 u8 i; 756 757 for (i = 0; i < hdev->tm_info.num_pg; i++) { 758 int k; 759 760 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT; 761 762 hdev->tm_info.pg_info[i].pg_id = i; 763 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; 764 765 hdev->tm_info.pg_info[i].bw_limit = 766 hdev->ae_dev->dev_specs.max_tm_rate; 767 768 if (i != 0) 769 continue; 770 771 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; 772 for (k = 0; k < hdev->tm_info.num_tc; k++) 773 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; 774 } 775 } 776 777 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev) 778 { 779 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { 780 if (hdev->fc_mode_last_time == HCLGE_FC_PFC) 781 dev_warn(&hdev->pdev->dev, 782 "DCB is disable, but last mode is FC_PFC\n"); 783 784 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 785 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 786 /* fc_mode_last_time record the last fc_mode when 787 * DCB is enabled, so that fc_mode can be set to 788 * the correct value when DCB is disabled. 789 */ 790 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 791 hdev->tm_info.fc_mode = HCLGE_FC_PFC; 792 } 793 } 794 795 static void hclge_update_fc_mode(struct hclge_dev *hdev) 796 { 797 if (!hdev->tm_info.pfc_en) { 798 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 799 return; 800 } 801 802 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 803 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 804 hdev->tm_info.fc_mode = HCLGE_FC_PFC; 805 } 806 } 807 808 static void hclge_pfc_info_init(struct hclge_dev *hdev) 809 { 810 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 811 hclge_update_fc_mode(hdev); 812 else 813 hclge_update_fc_mode_by_dcb_flag(hdev); 814 } 815 816 static void hclge_tm_schd_info_init(struct hclge_dev *hdev) 817 { 818 hclge_tm_pg_info_init(hdev); 819 820 hclge_tm_tc_info_init(hdev); 821 822 hclge_tm_vport_info_update(hdev); 823 824 hclge_pfc_info_init(hdev); 825 } 826 827 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) 828 { 829 int ret; 830 u32 i; 831 832 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 833 return 0; 834 835 for (i = 0; i < hdev->tm_info.num_pg; i++) { 836 /* Cfg mapping */ 837 ret = hclge_tm_pg_to_pri_map_cfg( 838 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); 839 if (ret) 840 return ret; 841 } 842 843 return 0; 844 } 845 846 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) 847 { 848 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 849 struct hclge_shaper_ir_para ir_para; 850 u32 shaper_para; 851 int ret; 852 u32 i; 853 854 /* Cfg pg schd */ 855 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 856 return 0; 857 858 /* Pg to pri */ 859 for (i = 0; i < hdev->tm_info.num_pg; i++) { 860 u32 rate = hdev->tm_info.pg_info[i].bw_limit; 861 862 /* Calc shaper para */ 863 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG, 864 &ir_para, max_tm_rate); 865 if (ret) 866 return ret; 867 868 shaper_para = hclge_tm_get_shapping_para(0, 0, 0, 869 HCLGE_SHAPER_BS_U_DEF, 870 HCLGE_SHAPER_BS_S_DEF); 871 ret = hclge_tm_pg_shapping_cfg(hdev, 872 HCLGE_TM_SHAP_C_BUCKET, i, 873 shaper_para, rate); 874 if (ret) 875 return ret; 876 877 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, 878 ir_para.ir_u, 879 ir_para.ir_s, 880 HCLGE_SHAPER_BS_U_DEF, 881 HCLGE_SHAPER_BS_S_DEF); 882 ret = hclge_tm_pg_shapping_cfg(hdev, 883 HCLGE_TM_SHAP_P_BUCKET, i, 884 shaper_para, rate); 885 if (ret) 886 return ret; 887 } 888 889 return 0; 890 } 891 892 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) 893 { 894 int ret; 895 u32 i; 896 897 /* cfg pg schd */ 898 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 899 return 0; 900 901 /* pg to prio */ 902 for (i = 0; i < hdev->tm_info.num_pg; i++) { 903 /* Cfg dwrr */ 904 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]); 905 if (ret) 906 return ret; 907 } 908 909 return 0; 910 } 911 912 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, 913 struct hclge_vport *vport) 914 { 915 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 916 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 917 struct hnae3_queue **tqp = kinfo->tqp; 918 u32 i, j; 919 int ret; 920 921 for (i = 0; i < tc_info->num_tc; i++) { 922 for (j = 0; j < tc_info->tqp_count[i]; j++) { 923 struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j]; 924 925 ret = hclge_tm_q_to_qs_map_cfg(hdev, 926 hclge_get_queue_id(q), 927 vport->qs_offset + i); 928 if (ret) 929 return ret; 930 } 931 } 932 933 return 0; 934 } 935 936 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) 937 { 938 struct hclge_vport *vport = hdev->vport; 939 int ret; 940 u32 i, k; 941 942 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 943 /* Cfg qs -> pri mapping, one by one mapping */ 944 for (k = 0; k < hdev->num_alloc_vport; k++) { 945 struct hnae3_knic_private_info *kinfo = 946 &vport[k].nic.kinfo; 947 948 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 949 ret = hclge_tm_qs_to_pri_map_cfg( 950 hdev, vport[k].qs_offset + i, i); 951 if (ret) 952 return ret; 953 } 954 } 955 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { 956 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ 957 for (k = 0; k < hdev->num_alloc_vport; k++) 958 for (i = 0; i < HNAE3_MAX_TC; i++) { 959 ret = hclge_tm_qs_to_pri_map_cfg( 960 hdev, vport[k].qs_offset + i, k); 961 if (ret) 962 return ret; 963 } 964 } else { 965 return -EINVAL; 966 } 967 968 /* Cfg q -> qs mapping */ 969 for (i = 0; i < hdev->num_alloc_vport; i++) { 970 ret = hclge_vport_q_to_qs_map(hdev, vport); 971 if (ret) 972 return ret; 973 974 vport++; 975 } 976 977 return 0; 978 } 979 980 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) 981 { 982 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 983 struct hclge_shaper_ir_para ir_para; 984 u32 shaper_para; 985 int ret; 986 u32 i; 987 988 for (i = 0; i < hdev->tm_info.num_tc; i++) { 989 u32 rate = hdev->tm_info.tc_info[i].bw_limit; 990 991 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, 992 &ir_para, max_tm_rate); 993 if (ret) 994 return ret; 995 996 shaper_para = hclge_tm_get_shapping_para(0, 0, 0, 997 HCLGE_SHAPER_BS_U_DEF, 998 HCLGE_SHAPER_BS_S_DEF); 999 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, 1000 shaper_para, rate); 1001 if (ret) 1002 return ret; 1003 1004 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, 1005 ir_para.ir_u, 1006 ir_para.ir_s, 1007 HCLGE_SHAPER_BS_U_DEF, 1008 HCLGE_SHAPER_BS_S_DEF); 1009 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, 1010 shaper_para, rate); 1011 if (ret) 1012 return ret; 1013 } 1014 1015 return 0; 1016 } 1017 1018 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) 1019 { 1020 struct hclge_dev *hdev = vport->back; 1021 struct hclge_shaper_ir_para ir_para; 1022 u32 shaper_para; 1023 int ret; 1024 1025 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, 1026 &ir_para, 1027 hdev->ae_dev->dev_specs.max_tm_rate); 1028 if (ret) 1029 return ret; 1030 1031 shaper_para = hclge_tm_get_shapping_para(0, 0, 0, 1032 HCLGE_SHAPER_BS_U_DEF, 1033 HCLGE_SHAPER_BS_S_DEF); 1034 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, 1035 vport->vport_id, shaper_para, 1036 vport->bw_limit); 1037 if (ret) 1038 return ret; 1039 1040 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 1041 ir_para.ir_s, 1042 HCLGE_SHAPER_BS_U_DEF, 1043 HCLGE_SHAPER_BS_S_DEF); 1044 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, 1045 vport->vport_id, shaper_para, 1046 vport->bw_limit); 1047 if (ret) 1048 return ret; 1049 1050 return 0; 1051 } 1052 1053 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) 1054 { 1055 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1056 struct hclge_dev *hdev = vport->back; 1057 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 1058 struct hclge_shaper_ir_para ir_para; 1059 u32 i; 1060 int ret; 1061 1062 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1063 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, 1064 HCLGE_SHAPER_LVL_QSET, 1065 &ir_para, max_tm_rate); 1066 if (ret) 1067 return ret; 1068 } 1069 1070 return 0; 1071 } 1072 1073 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) 1074 { 1075 struct hclge_vport *vport = hdev->vport; 1076 int ret; 1077 u32 i; 1078 1079 /* Need config vport shaper */ 1080 for (i = 0; i < hdev->num_alloc_vport; i++) { 1081 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); 1082 if (ret) 1083 return ret; 1084 1085 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); 1086 if (ret) 1087 return ret; 1088 1089 vport++; 1090 } 1091 1092 return 0; 1093 } 1094 1095 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) 1096 { 1097 int ret; 1098 1099 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1100 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); 1101 if (ret) 1102 return ret; 1103 } else { 1104 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); 1105 if (ret) 1106 return ret; 1107 } 1108 1109 return 0; 1110 } 1111 1112 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) 1113 { 1114 struct hclge_vport *vport = hdev->vport; 1115 struct hclge_pg_info *pg_info; 1116 u8 dwrr; 1117 int ret; 1118 u32 i, k; 1119 1120 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1121 pg_info = 1122 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 1123 dwrr = pg_info->tc_dwrr[i]; 1124 1125 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); 1126 if (ret) 1127 return ret; 1128 1129 for (k = 0; k < hdev->num_alloc_vport; k++) { 1130 ret = hclge_tm_qs_weight_cfg( 1131 hdev, vport[k].qs_offset + i, 1132 vport[k].dwrr); 1133 if (ret) 1134 return ret; 1135 } 1136 } 1137 1138 return 0; 1139 } 1140 1141 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) 1142 { 1143 #define DEFAULT_TC_WEIGHT 1 1144 #define DEFAULT_TC_OFFSET 14 1145 1146 struct hclge_ets_tc_weight_cmd *ets_weight; 1147 struct hclge_desc desc; 1148 unsigned int i; 1149 1150 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); 1151 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; 1152 1153 for (i = 0; i < HNAE3_MAX_TC; i++) { 1154 struct hclge_pg_info *pg_info; 1155 1156 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT; 1157 1158 if (!(hdev->hw_tc_map & BIT(i))) 1159 continue; 1160 1161 pg_info = 1162 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 1163 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; 1164 } 1165 1166 ets_weight->weight_offset = DEFAULT_TC_OFFSET; 1167 1168 return hclge_cmd_send(&hdev->hw, &desc, 1); 1169 } 1170 1171 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) 1172 { 1173 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1174 struct hclge_dev *hdev = vport->back; 1175 int ret; 1176 u8 i; 1177 1178 /* Vf dwrr */ 1179 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); 1180 if (ret) 1181 return ret; 1182 1183 /* Qset dwrr */ 1184 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1185 ret = hclge_tm_qs_weight_cfg( 1186 hdev, vport->qs_offset + i, 1187 hdev->tm_info.pg_info[0].tc_dwrr[i]); 1188 if (ret) 1189 return ret; 1190 } 1191 1192 return 0; 1193 } 1194 1195 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) 1196 { 1197 struct hclge_vport *vport = hdev->vport; 1198 int ret; 1199 u32 i; 1200 1201 for (i = 0; i < hdev->num_alloc_vport; i++) { 1202 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); 1203 if (ret) 1204 return ret; 1205 1206 vport++; 1207 } 1208 1209 return 0; 1210 } 1211 1212 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) 1213 { 1214 int ret; 1215 1216 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1217 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); 1218 if (ret) 1219 return ret; 1220 1221 if (!hnae3_dev_dcb_supported(hdev)) 1222 return 0; 1223 1224 ret = hclge_tm_ets_tc_dwrr_cfg(hdev); 1225 if (ret == -EOPNOTSUPP) { 1226 dev_warn(&hdev->pdev->dev, 1227 "fw %08x does't support ets tc weight cmd\n", 1228 hdev->fw_version); 1229 ret = 0; 1230 } 1231 1232 return ret; 1233 } else { 1234 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); 1235 if (ret) 1236 return ret; 1237 } 1238 1239 return 0; 1240 } 1241 1242 static int hclge_tm_map_cfg(struct hclge_dev *hdev) 1243 { 1244 int ret; 1245 1246 ret = hclge_up_to_tc_map(hdev); 1247 if (ret) 1248 return ret; 1249 1250 ret = hclge_tm_pg_to_pri_map(hdev); 1251 if (ret) 1252 return ret; 1253 1254 return hclge_tm_pri_q_qs_cfg(hdev); 1255 } 1256 1257 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) 1258 { 1259 int ret; 1260 1261 ret = hclge_tm_port_shaper_cfg(hdev); 1262 if (ret) 1263 return ret; 1264 1265 ret = hclge_tm_pg_shaper_cfg(hdev); 1266 if (ret) 1267 return ret; 1268 1269 return hclge_tm_pri_shaper_cfg(hdev); 1270 } 1271 1272 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) 1273 { 1274 int ret; 1275 1276 ret = hclge_tm_pg_dwrr_cfg(hdev); 1277 if (ret) 1278 return ret; 1279 1280 return hclge_tm_pri_dwrr_cfg(hdev); 1281 } 1282 1283 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) 1284 { 1285 int ret; 1286 u8 i; 1287 1288 /* Only being config on TC-Based scheduler mode */ 1289 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) 1290 return 0; 1291 1292 for (i = 0; i < hdev->tm_info.num_pg; i++) { 1293 ret = hclge_tm_pg_schd_mode_cfg(hdev, i); 1294 if (ret) 1295 return ret; 1296 } 1297 1298 return 0; 1299 } 1300 1301 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) 1302 { 1303 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1304 struct hclge_dev *hdev = vport->back; 1305 int ret; 1306 u8 i; 1307 1308 if (vport->vport_id >= HNAE3_MAX_TC) 1309 return -EINVAL; 1310 1311 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); 1312 if (ret) 1313 return ret; 1314 1315 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1316 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; 1317 1318 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, 1319 sch_mode); 1320 if (ret) 1321 return ret; 1322 } 1323 1324 return 0; 1325 } 1326 1327 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) 1328 { 1329 struct hclge_vport *vport = hdev->vport; 1330 int ret; 1331 u8 i, k; 1332 1333 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1334 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1335 ret = hclge_tm_pri_schd_mode_cfg(hdev, i); 1336 if (ret) 1337 return ret; 1338 1339 for (k = 0; k < hdev->num_alloc_vport; k++) { 1340 ret = hclge_tm_qs_schd_mode_cfg( 1341 hdev, vport[k].qs_offset + i, 1342 HCLGE_SCH_MODE_DWRR); 1343 if (ret) 1344 return ret; 1345 } 1346 } 1347 } else { 1348 for (i = 0; i < hdev->num_alloc_vport; i++) { 1349 ret = hclge_tm_schd_mode_vnet_base_cfg(vport); 1350 if (ret) 1351 return ret; 1352 1353 vport++; 1354 } 1355 } 1356 1357 return 0; 1358 } 1359 1360 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) 1361 { 1362 int ret; 1363 1364 ret = hclge_tm_lvl2_schd_mode_cfg(hdev); 1365 if (ret) 1366 return ret; 1367 1368 return hclge_tm_lvl34_schd_mode_cfg(hdev); 1369 } 1370 1371 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) 1372 { 1373 int ret; 1374 1375 /* Cfg tm mapping */ 1376 ret = hclge_tm_map_cfg(hdev); 1377 if (ret) 1378 return ret; 1379 1380 /* Cfg tm shaper */ 1381 ret = hclge_tm_shaper_cfg(hdev); 1382 if (ret) 1383 return ret; 1384 1385 /* Cfg dwrr */ 1386 ret = hclge_tm_dwrr_cfg(hdev); 1387 if (ret) 1388 return ret; 1389 1390 /* Cfg schd mode for each level schd */ 1391 return hclge_tm_schd_mode_hw(hdev); 1392 } 1393 1394 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) 1395 { 1396 struct hclge_mac *mac = &hdev->hw.mac; 1397 1398 return hclge_pause_param_cfg(hdev, mac->mac_addr, 1399 HCLGE_DEFAULT_PAUSE_TRANS_GAP, 1400 HCLGE_DEFAULT_PAUSE_TRANS_TIME); 1401 } 1402 1403 static int hclge_pfc_setup_hw(struct hclge_dev *hdev) 1404 { 1405 u8 enable_bitmap = 0; 1406 1407 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 1408 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | 1409 HCLGE_RX_MAC_PAUSE_EN_MSK; 1410 1411 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, 1412 hdev->tm_info.pfc_en); 1413 } 1414 1415 /* for the queues that use for backpress, divides to several groups, 1416 * each group contains 32 queue sets, which can be represented by u32 bitmap. 1417 */ 1418 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) 1419 { 1420 u16 grp_id_shift = HCLGE_BP_GRP_ID_S; 1421 u16 grp_id_mask = HCLGE_BP_GRP_ID_M; 1422 u8 grp_num = HCLGE_BP_GRP_NUM; 1423 int i; 1424 1425 if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) { 1426 grp_num = HCLGE_BP_EXT_GRP_NUM; 1427 grp_id_mask = HCLGE_BP_EXT_GRP_ID_M; 1428 grp_id_shift = HCLGE_BP_EXT_GRP_ID_S; 1429 } 1430 1431 for (i = 0; i < grp_num; i++) { 1432 u32 qs_bitmap = 0; 1433 int k, ret; 1434 1435 for (k = 0; k < hdev->num_alloc_vport; k++) { 1436 struct hclge_vport *vport = &hdev->vport[k]; 1437 u16 qs_id = vport->qs_offset + tc; 1438 u8 grp, sub_grp; 1439 1440 grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift); 1441 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, 1442 HCLGE_BP_SUB_GRP_ID_S); 1443 if (i == grp) 1444 qs_bitmap |= (1 << sub_grp); 1445 } 1446 1447 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); 1448 if (ret) 1449 return ret; 1450 } 1451 1452 return 0; 1453 } 1454 1455 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) 1456 { 1457 bool tx_en, rx_en; 1458 1459 switch (hdev->tm_info.fc_mode) { 1460 case HCLGE_FC_NONE: 1461 tx_en = false; 1462 rx_en = false; 1463 break; 1464 case HCLGE_FC_RX_PAUSE: 1465 tx_en = false; 1466 rx_en = true; 1467 break; 1468 case HCLGE_FC_TX_PAUSE: 1469 tx_en = true; 1470 rx_en = false; 1471 break; 1472 case HCLGE_FC_FULL: 1473 tx_en = true; 1474 rx_en = true; 1475 break; 1476 case HCLGE_FC_PFC: 1477 tx_en = false; 1478 rx_en = false; 1479 break; 1480 default: 1481 tx_en = true; 1482 rx_en = true; 1483 } 1484 1485 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 1486 } 1487 1488 static int hclge_tm_bp_setup(struct hclge_dev *hdev) 1489 { 1490 int ret; 1491 int i; 1492 1493 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1494 ret = hclge_bp_setup_hw(hdev, i); 1495 if (ret) 1496 return ret; 1497 } 1498 1499 return 0; 1500 } 1501 1502 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) 1503 { 1504 int ret; 1505 1506 ret = hclge_pause_param_setup_hw(hdev); 1507 if (ret) 1508 return ret; 1509 1510 ret = hclge_mac_pause_setup_hw(hdev); 1511 if (ret) 1512 return ret; 1513 1514 /* Only DCB-supported dev supports qset back pressure and pfc cmd */ 1515 if (!hnae3_dev_dcb_supported(hdev)) 1516 return 0; 1517 1518 /* GE MAC does not support PFC, when driver is initializing and MAC 1519 * is in GE Mode, ignore the error here, otherwise initialization 1520 * will fail. 1521 */ 1522 ret = hclge_pfc_setup_hw(hdev); 1523 if (init && ret == -EOPNOTSUPP) 1524 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); 1525 else if (ret) { 1526 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", 1527 ret); 1528 return ret; 1529 } 1530 1531 return hclge_tm_bp_setup(hdev); 1532 } 1533 1534 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) 1535 { 1536 struct hclge_vport *vport = hdev->vport; 1537 struct hnae3_knic_private_info *kinfo; 1538 u32 i, k; 1539 1540 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 1541 hdev->tm_info.prio_tc[i] = prio_tc[i]; 1542 1543 for (k = 0; k < hdev->num_alloc_vport; k++) { 1544 kinfo = &vport[k].nic.kinfo; 1545 kinfo->tc_info.prio_tc[i] = prio_tc[i]; 1546 } 1547 } 1548 } 1549 1550 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) 1551 { 1552 u8 bit_map = 0; 1553 u8 i; 1554 1555 hdev->tm_info.num_tc = num_tc; 1556 1557 for (i = 0; i < hdev->tm_info.num_tc; i++) 1558 bit_map |= BIT(i); 1559 1560 if (!bit_map) { 1561 bit_map = 1; 1562 hdev->tm_info.num_tc = 1; 1563 } 1564 1565 hdev->hw_tc_map = bit_map; 1566 1567 hclge_tm_schd_info_init(hdev); 1568 } 1569 1570 void hclge_tm_pfc_info_update(struct hclge_dev *hdev) 1571 { 1572 /* DCB is enabled if we have more than 1 TC or pfc_en is 1573 * non-zero. 1574 */ 1575 if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en) 1576 hdev->flag |= HCLGE_FLAG_DCB_ENABLE; 1577 else 1578 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 1579 1580 hclge_pfc_info_init(hdev); 1581 } 1582 1583 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) 1584 { 1585 int ret; 1586 1587 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && 1588 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) 1589 return -ENOTSUPP; 1590 1591 ret = hclge_tm_schd_setup_hw(hdev); 1592 if (ret) 1593 return ret; 1594 1595 ret = hclge_pause_setup_hw(hdev, init); 1596 if (ret) 1597 return ret; 1598 1599 return 0; 1600 } 1601 1602 int hclge_tm_schd_init(struct hclge_dev *hdev) 1603 { 1604 /* fc_mode is HCLGE_FC_FULL on reset */ 1605 hdev->tm_info.fc_mode = HCLGE_FC_FULL; 1606 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 1607 1608 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && 1609 hdev->tm_info.num_pg != 1) 1610 return -EINVAL; 1611 1612 hclge_tm_schd_info_init(hdev); 1613 1614 return hclge_tm_init_hw(hdev, true); 1615 } 1616 1617 int hclge_tm_vport_map_update(struct hclge_dev *hdev) 1618 { 1619 struct hclge_vport *vport = hdev->vport; 1620 int ret; 1621 1622 hclge_tm_vport_tc_info_update(vport); 1623 1624 ret = hclge_vport_q_to_qs_map(hdev, vport); 1625 if (ret) 1626 return ret; 1627 1628 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) 1629 return 0; 1630 1631 return hclge_tm_bp_setup(hdev); 1632 } 1633 1634 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num) 1635 { 1636 struct hclge_tm_nodes_cmd *nodes; 1637 struct hclge_desc desc; 1638 int ret; 1639 1640 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { 1641 /* Each PF has 8 qsets and each VF has 1 qset */ 1642 *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev); 1643 return 0; 1644 } 1645 1646 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); 1647 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1648 if (ret) { 1649 dev_err(&hdev->pdev->dev, 1650 "failed to get qset num, ret = %d\n", ret); 1651 return ret; 1652 } 1653 1654 nodes = (struct hclge_tm_nodes_cmd *)desc.data; 1655 *qset_num = le16_to_cpu(nodes->qset_num); 1656 return 0; 1657 } 1658 1659 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num) 1660 { 1661 struct hclge_tm_nodes_cmd *nodes; 1662 struct hclge_desc desc; 1663 int ret; 1664 1665 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { 1666 *pri_num = HCLGE_TM_PF_MAX_PRI_NUM; 1667 return 0; 1668 } 1669 1670 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); 1671 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1672 if (ret) { 1673 dev_err(&hdev->pdev->dev, 1674 "failed to get pri num, ret = %d\n", ret); 1675 return ret; 1676 } 1677 1678 nodes = (struct hclge_tm_nodes_cmd *)desc.data; 1679 *pri_num = nodes->pri_num; 1680 return 0; 1681 } 1682 1683 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, 1684 u8 *link_vld) 1685 { 1686 struct hclge_qs_to_pri_link_cmd *map; 1687 struct hclge_desc desc; 1688 int ret; 1689 1690 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true); 1691 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 1692 map->qs_id = cpu_to_le16(qset_id); 1693 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1694 if (ret) { 1695 dev_err(&hdev->pdev->dev, 1696 "failed to get qset map priority, ret = %d\n", ret); 1697 return ret; 1698 } 1699 1700 *priority = map->priority; 1701 *link_vld = map->link_vld; 1702 return 0; 1703 } 1704 1705 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode) 1706 { 1707 struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode; 1708 struct hclge_desc desc; 1709 int ret; 1710 1711 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true); 1712 qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data; 1713 qs_sch_mode->qs_id = cpu_to_le16(qset_id); 1714 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1715 if (ret) { 1716 dev_err(&hdev->pdev->dev, 1717 "failed to get qset sch mode, ret = %d\n", ret); 1718 return ret; 1719 } 1720 1721 *mode = qs_sch_mode->sch_mode; 1722 return 0; 1723 } 1724 1725 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight) 1726 { 1727 struct hclge_qs_weight_cmd *qs_weight; 1728 struct hclge_desc desc; 1729 int ret; 1730 1731 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true); 1732 qs_weight = (struct hclge_qs_weight_cmd *)desc.data; 1733 qs_weight->qs_id = cpu_to_le16(qset_id); 1734 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1735 if (ret) { 1736 dev_err(&hdev->pdev->dev, 1737 "failed to get qset weight, ret = %d\n", ret); 1738 return ret; 1739 } 1740 1741 *weight = qs_weight->dwrr; 1742 return 0; 1743 } 1744 1745 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode) 1746 { 1747 struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode; 1748 struct hclge_desc desc; 1749 int ret; 1750 1751 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true); 1752 pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data; 1753 pri_sch_mode->pri_id = pri_id; 1754 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1755 if (ret) { 1756 dev_err(&hdev->pdev->dev, 1757 "failed to get priority sch mode, ret = %d\n", ret); 1758 return ret; 1759 } 1760 1761 *mode = pri_sch_mode->sch_mode; 1762 return 0; 1763 } 1764 1765 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight) 1766 { 1767 struct hclge_priority_weight_cmd *priority_weight; 1768 struct hclge_desc desc; 1769 int ret; 1770 1771 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true); 1772 priority_weight = (struct hclge_priority_weight_cmd *)desc.data; 1773 priority_weight->pri_id = pri_id; 1774 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1775 if (ret) { 1776 dev_err(&hdev->pdev->dev, 1777 "failed to get priority weight, ret = %d\n", ret); 1778 return ret; 1779 } 1780 1781 *weight = priority_weight->dwrr; 1782 return 0; 1783 } 1784 1785 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, 1786 enum hclge_opcode_type cmd, 1787 struct hclge_pri_shaper_para *para) 1788 { 1789 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 1790 struct hclge_desc desc; 1791 u32 shapping_para; 1792 int ret; 1793 1794 if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING && 1795 cmd != HCLGE_OPC_TM_PRI_P_SHAPPING) 1796 return -EINVAL; 1797 1798 hclge_cmd_setup_basic_desc(&desc, cmd, true); 1799 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 1800 shap_cfg_cmd->pri_id = pri_id; 1801 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1802 if (ret) { 1803 dev_err(&hdev->pdev->dev, 1804 "failed to get priority shaper(%#x), ret = %d\n", 1805 cmd, ret); 1806 return ret; 1807 } 1808 1809 shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para); 1810 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 1811 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 1812 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 1813 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 1814 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 1815 para->flag = shap_cfg_cmd->flag; 1816 para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate); 1817 return 0; 1818 } 1819