1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 6 #include "hclge_cmd.h" 7 #include "hclge_main.h" 8 #include "hclge_tm.h" 9 10 enum hclge_shaper_level { 11 HCLGE_SHAPER_LVL_PRI = 0, 12 HCLGE_SHAPER_LVL_PG = 1, 13 HCLGE_SHAPER_LVL_PORT = 2, 14 HCLGE_SHAPER_LVL_QSET = 3, 15 HCLGE_SHAPER_LVL_CNT = 4, 16 HCLGE_SHAPER_LVL_VF = 0, 17 HCLGE_SHAPER_LVL_PF = 1, 18 }; 19 20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 22 23 #define HCLGE_SHAPER_BS_U_DEF 5 24 #define HCLGE_SHAPER_BS_S_DEF 20 25 26 #define HCLGE_ETHER_MAX_RATE 100000 27 28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper 29 * @ir: Rate to be config, its unit is Mbps 30 * @shaper_level: the shaper level. eg: port, pg, priority, queueset 31 * @ir_b: IR_B parameter of IR shaper 32 * @ir_u: IR_U parameter of IR shaper 33 * @ir_s: IR_S parameter of IR shaper 34 * 35 * the formula: 36 * 37 * IR_b * (2 ^ IR_u) * 8 38 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) 39 * Tick * (2 ^ IR_s) 40 * 41 * @return: 0: calculate sucessful, negative: fail 42 */ 43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, 44 u8 *ir_b, u8 *ir_u, u8 *ir_s) 45 { 46 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { 47 6 * 256, /* Prioriy level */ 48 6 * 32, /* Prioriy group level */ 49 6 * 8, /* Port level */ 50 6 * 256 /* Qset level */ 51 }; 52 u8 ir_u_calc = 0, ir_s_calc = 0; 53 u32 ir_calc; 54 u32 tick; 55 56 /* Calc tick */ 57 if (shaper_level >= HCLGE_SHAPER_LVL_CNT) 58 return -EINVAL; 59 60 tick = tick_array[shaper_level]; 61 62 /** 63 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 64 * the formula is changed to: 65 * 126 * 1 * 8 66 * ir_calc = ---------------- * 1000 67 * tick * 1 68 */ 69 ir_calc = (1008000 + (tick >> 1) - 1) / tick; 70 71 if (ir_calc == ir) { 72 *ir_b = 126; 73 *ir_u = 0; 74 *ir_s = 0; 75 76 return 0; 77 } else if (ir_calc > ir) { 78 /* Increasing the denominator to select ir_s value */ 79 while (ir_calc > ir) { 80 ir_s_calc++; 81 ir_calc = 1008000 / (tick * (1 << ir_s_calc)); 82 } 83 84 if (ir_calc == ir) 85 *ir_b = 126; 86 else 87 *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000; 88 } else { 89 /* Increasing the numerator to select ir_u value */ 90 u32 numerator; 91 92 while (ir_calc < ir) { 93 ir_u_calc++; 94 numerator = 1008000 * (1 << ir_u_calc); 95 ir_calc = (numerator + (tick >> 1)) / tick; 96 } 97 98 if (ir_calc == ir) { 99 *ir_b = 126; 100 } else { 101 u32 denominator = (8000 * (1 << --ir_u_calc)); 102 *ir_b = (ir * tick + (denominator >> 1)) / denominator; 103 } 104 } 105 106 *ir_u = ir_u_calc; 107 *ir_s = ir_s_calc; 108 109 return 0; 110 } 111 112 static int hclge_pfc_stats_get(struct hclge_dev *hdev, 113 enum hclge_opcode_type opcode, u64 *stats) 114 { 115 struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM]; 116 int ret, i, j; 117 118 if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT || 119 opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) 120 return -EINVAL; 121 122 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { 123 hclge_cmd_setup_basic_desc(&desc[i], opcode, true); 124 if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1)) 125 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 126 else 127 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 128 } 129 130 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); 131 if (ret) 132 return ret; 133 134 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { 135 struct hclge_pfc_stats_cmd *pfc_stats = 136 (struct hclge_pfc_stats_cmd *)desc[i].data; 137 138 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) { 139 u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j; 140 141 if (index < HCLGE_MAX_TC_NUM) 142 stats[index] = 143 le64_to_cpu(pfc_stats->pkt_num[j]); 144 } 145 } 146 return 0; 147 } 148 149 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) 150 { 151 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats); 152 } 153 154 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) 155 { 156 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats); 157 } 158 159 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) 160 { 161 struct hclge_desc desc; 162 163 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); 164 165 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | 166 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); 167 168 return hclge_cmd_send(&hdev->hw, &desc, 1); 169 } 170 171 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, 172 u8 pfc_bitmap) 173 { 174 struct hclge_desc desc; 175 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; 176 177 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); 178 179 pfc->tx_rx_en_bitmap = tx_rx_bitmap; 180 pfc->pri_en_bitmap = pfc_bitmap; 181 182 return hclge_cmd_send(&hdev->hw, &desc, 1); 183 } 184 185 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, 186 u8 pause_trans_gap, u16 pause_trans_time) 187 { 188 struct hclge_cfg_pause_param_cmd *pause_param; 189 struct hclge_desc desc; 190 191 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 192 193 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); 194 195 ether_addr_copy(pause_param->mac_addr, addr); 196 ether_addr_copy(pause_param->mac_addr_extra, addr); 197 pause_param->pause_trans_gap = pause_trans_gap; 198 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); 199 200 return hclge_cmd_send(&hdev->hw, &desc, 1); 201 } 202 203 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) 204 { 205 struct hclge_cfg_pause_param_cmd *pause_param; 206 struct hclge_desc desc; 207 u16 trans_time; 208 u8 trans_gap; 209 int ret; 210 211 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 212 213 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 214 215 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 216 if (ret) 217 return ret; 218 219 trans_gap = pause_param->pause_trans_gap; 220 trans_time = le16_to_cpu(pause_param->pause_trans_time); 221 222 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, 223 trans_time); 224 } 225 226 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) 227 { 228 u8 tc; 229 230 tc = hdev->tm_info.prio_tc[pri_id]; 231 232 if (tc >= hdev->tm_info.num_tc) 233 return -EINVAL; 234 235 /** 236 * the register for priority has four bytes, the first bytes includes 237 * priority0 and priority1, the higher 4bit stands for priority1 238 * while the lower 4bit stands for priority0, as below: 239 * first byte: | pri_1 | pri_0 | 240 * second byte: | pri_3 | pri_2 | 241 * third byte: | pri_5 | pri_4 | 242 * fourth byte: | pri_7 | pri_6 | 243 */ 244 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); 245 246 return 0; 247 } 248 249 static int hclge_up_to_tc_map(struct hclge_dev *hdev) 250 { 251 struct hclge_desc desc; 252 u8 *pri = (u8 *)desc.data; 253 u8 pri_id; 254 int ret; 255 256 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); 257 258 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { 259 ret = hclge_fill_pri_array(hdev, pri, pri_id); 260 if (ret) 261 return ret; 262 } 263 264 return hclge_cmd_send(&hdev->hw, &desc, 1); 265 } 266 267 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, 268 u8 pg_id, u8 pri_bit_map) 269 { 270 struct hclge_pg_to_pri_link_cmd *map; 271 struct hclge_desc desc; 272 273 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); 274 275 map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 276 277 map->pg_id = pg_id; 278 map->pri_bit_map = pri_bit_map; 279 280 return hclge_cmd_send(&hdev->hw, &desc, 1); 281 } 282 283 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, 284 u16 qs_id, u8 pri) 285 { 286 struct hclge_qs_to_pri_link_cmd *map; 287 struct hclge_desc desc; 288 289 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); 290 291 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 292 293 map->qs_id = cpu_to_le16(qs_id); 294 map->priority = pri; 295 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; 296 297 return hclge_cmd_send(&hdev->hw, &desc, 1); 298 } 299 300 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, 301 u16 q_id, u16 qs_id) 302 { 303 struct hclge_nq_to_qs_link_cmd *map; 304 struct hclge_desc desc; 305 306 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); 307 308 map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 309 310 map->nq_id = cpu_to_le16(q_id); 311 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); 312 313 return hclge_cmd_send(&hdev->hw, &desc, 1); 314 } 315 316 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, 317 u8 dwrr) 318 { 319 struct hclge_pg_weight_cmd *weight; 320 struct hclge_desc desc; 321 322 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); 323 324 weight = (struct hclge_pg_weight_cmd *)desc.data; 325 326 weight->pg_id = pg_id; 327 weight->dwrr = dwrr; 328 329 return hclge_cmd_send(&hdev->hw, &desc, 1); 330 } 331 332 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, 333 u8 dwrr) 334 { 335 struct hclge_priority_weight_cmd *weight; 336 struct hclge_desc desc; 337 338 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); 339 340 weight = (struct hclge_priority_weight_cmd *)desc.data; 341 342 weight->pri_id = pri_id; 343 weight->dwrr = dwrr; 344 345 return hclge_cmd_send(&hdev->hw, &desc, 1); 346 } 347 348 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, 349 u8 dwrr) 350 { 351 struct hclge_qs_weight_cmd *weight; 352 struct hclge_desc desc; 353 354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); 355 356 weight = (struct hclge_qs_weight_cmd *)desc.data; 357 358 weight->qs_id = cpu_to_le16(qs_id); 359 weight->dwrr = dwrr; 360 361 return hclge_cmd_send(&hdev->hw, &desc, 1); 362 } 363 364 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, 365 enum hclge_shap_bucket bucket, u8 pg_id, 366 u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) 367 { 368 struct hclge_pg_shapping_cmd *shap_cfg_cmd; 369 enum hclge_opcode_type opcode; 370 struct hclge_desc desc; 371 u32 shapping_para = 0; 372 373 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : 374 HCLGE_OPC_TM_PG_C_SHAPPING; 375 hclge_cmd_setup_basic_desc(&desc, opcode, false); 376 377 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 378 379 shap_cfg_cmd->pg_id = pg_id; 380 381 hclge_tm_set_field(shapping_para, IR_B, ir_b); 382 hclge_tm_set_field(shapping_para, IR_U, ir_u); 383 hclge_tm_set_field(shapping_para, IR_S, ir_s); 384 hclge_tm_set_field(shapping_para, BS_B, bs_b); 385 hclge_tm_set_field(shapping_para, BS_S, bs_s); 386 387 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); 388 389 return hclge_cmd_send(&hdev->hw, &desc, 1); 390 } 391 392 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) 393 { 394 struct hclge_port_shapping_cmd *shap_cfg_cmd; 395 struct hclge_desc desc; 396 u32 shapping_para = 0; 397 u8 ir_u, ir_b, ir_s; 398 int ret; 399 400 ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE, 401 HCLGE_SHAPER_LVL_PORT, 402 &ir_b, &ir_u, &ir_s); 403 if (ret) 404 return ret; 405 406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); 407 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 408 409 hclge_tm_set_field(shapping_para, IR_B, ir_b); 410 hclge_tm_set_field(shapping_para, IR_U, ir_u); 411 hclge_tm_set_field(shapping_para, IR_S, ir_s); 412 hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF); 413 hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF); 414 415 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); 416 417 return hclge_cmd_send(&hdev->hw, &desc, 1); 418 } 419 420 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, 421 enum hclge_shap_bucket bucket, u8 pri_id, 422 u8 ir_b, u8 ir_u, u8 ir_s, 423 u8 bs_b, u8 bs_s) 424 { 425 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 426 enum hclge_opcode_type opcode; 427 struct hclge_desc desc; 428 u32 shapping_para = 0; 429 430 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : 431 HCLGE_OPC_TM_PRI_C_SHAPPING; 432 433 hclge_cmd_setup_basic_desc(&desc, opcode, false); 434 435 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 436 437 shap_cfg_cmd->pri_id = pri_id; 438 439 hclge_tm_set_field(shapping_para, IR_B, ir_b); 440 hclge_tm_set_field(shapping_para, IR_U, ir_u); 441 hclge_tm_set_field(shapping_para, IR_S, ir_s); 442 hclge_tm_set_field(shapping_para, BS_B, bs_b); 443 hclge_tm_set_field(shapping_para, BS_S, bs_s); 444 445 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); 446 447 return hclge_cmd_send(&hdev->hw, &desc, 1); 448 } 449 450 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) 451 { 452 struct hclge_desc desc; 453 454 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); 455 456 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) 457 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 458 else 459 desc.data[1] = 0; 460 461 desc.data[0] = cpu_to_le32(pg_id); 462 463 return hclge_cmd_send(&hdev->hw, &desc, 1); 464 } 465 466 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) 467 { 468 struct hclge_desc desc; 469 470 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); 471 472 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) 473 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 474 else 475 desc.data[1] = 0; 476 477 desc.data[0] = cpu_to_le32(pri_id); 478 479 return hclge_cmd_send(&hdev->hw, &desc, 1); 480 } 481 482 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) 483 { 484 struct hclge_desc desc; 485 486 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); 487 488 if (mode == HCLGE_SCH_MODE_DWRR) 489 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 490 else 491 desc.data[1] = 0; 492 493 desc.data[0] = cpu_to_le32(qs_id); 494 495 return hclge_cmd_send(&hdev->hw, &desc, 1); 496 } 497 498 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, 499 u32 bit_map) 500 { 501 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 502 struct hclge_desc desc; 503 504 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, 505 false); 506 507 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 508 509 bp_to_qs_map_cmd->tc_id = tc; 510 bp_to_qs_map_cmd->qs_group_id = grp_id; 511 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); 512 513 return hclge_cmd_send(&hdev->hw, &desc, 1); 514 } 515 516 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) 517 { 518 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 519 struct hclge_dev *hdev = vport->back; 520 u8 i; 521 522 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; 523 kinfo->num_tc = 524 min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc); 525 kinfo->rss_size 526 = min_t(u16, hdev->rss_size_max, 527 kinfo->num_tqps / kinfo->num_tc); 528 vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; 529 vport->dwrr = 100; /* 100 percent as init */ 530 vport->alloc_rss_size = kinfo->rss_size; 531 532 for (i = 0; i < kinfo->num_tc; i++) { 533 if (hdev->hw_tc_map & BIT(i)) { 534 kinfo->tc_info[i].enable = true; 535 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 536 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 537 kinfo->tc_info[i].tc = i; 538 } else { 539 /* Set to default queue if TC is disable */ 540 kinfo->tc_info[i].enable = false; 541 kinfo->tc_info[i].tqp_offset = 0; 542 kinfo->tc_info[i].tqp_count = 1; 543 kinfo->tc_info[i].tc = 0; 544 } 545 } 546 547 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, 548 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc)); 549 } 550 551 static void hclge_tm_vport_info_update(struct hclge_dev *hdev) 552 { 553 struct hclge_vport *vport = hdev->vport; 554 u32 i; 555 556 for (i = 0; i < hdev->num_alloc_vport; i++) { 557 hclge_tm_vport_tc_info_update(vport); 558 559 vport++; 560 } 561 } 562 563 static void hclge_tm_tc_info_init(struct hclge_dev *hdev) 564 { 565 u8 i; 566 567 for (i = 0; i < hdev->tm_info.num_tc; i++) { 568 hdev->tm_info.tc_info[i].tc_id = i; 569 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; 570 hdev->tm_info.tc_info[i].pgid = 0; 571 hdev->tm_info.tc_info[i].bw_limit = 572 hdev->tm_info.pg_info[0].bw_limit; 573 } 574 575 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 576 hdev->tm_info.prio_tc[i] = 577 (i >= hdev->tm_info.num_tc) ? 0 : i; 578 579 /* DCB is enabled if we have more than 1 TC */ 580 if (hdev->tm_info.num_tc > 1) 581 hdev->flag |= HCLGE_FLAG_DCB_ENABLE; 582 else 583 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 584 } 585 586 static void hclge_tm_pg_info_init(struct hclge_dev *hdev) 587 { 588 u8 i; 589 590 for (i = 0; i < hdev->tm_info.num_pg; i++) { 591 int k; 592 593 hdev->tm_info.pg_dwrr[i] = i ? 0 : 100; 594 595 hdev->tm_info.pg_info[i].pg_id = i; 596 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; 597 598 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE; 599 600 if (i != 0) 601 continue; 602 603 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; 604 for (k = 0; k < hdev->tm_info.num_tc; k++) 605 hdev->tm_info.pg_info[i].tc_dwrr[k] = 100; 606 } 607 } 608 609 static void hclge_pfc_info_init(struct hclge_dev *hdev) 610 { 611 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { 612 if (hdev->fc_mode_last_time == HCLGE_FC_PFC) 613 dev_warn(&hdev->pdev->dev, 614 "DCB is disable, but last mode is FC_PFC\n"); 615 616 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 617 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 618 /* fc_mode_last_time record the last fc_mode when 619 * DCB is enabled, so that fc_mode can be set to 620 * the correct value when DCB is disabled. 621 */ 622 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 623 hdev->tm_info.fc_mode = HCLGE_FC_PFC; 624 } 625 } 626 627 static int hclge_tm_schd_info_init(struct hclge_dev *hdev) 628 { 629 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && 630 (hdev->tm_info.num_pg != 1)) 631 return -EINVAL; 632 633 hclge_tm_pg_info_init(hdev); 634 635 hclge_tm_tc_info_init(hdev); 636 637 hclge_tm_vport_info_update(hdev); 638 639 hclge_pfc_info_init(hdev); 640 641 return 0; 642 } 643 644 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) 645 { 646 int ret; 647 u32 i; 648 649 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 650 return 0; 651 652 for (i = 0; i < hdev->tm_info.num_pg; i++) { 653 /* Cfg mapping */ 654 ret = hclge_tm_pg_to_pri_map_cfg( 655 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); 656 if (ret) 657 return ret; 658 } 659 660 return 0; 661 } 662 663 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) 664 { 665 u8 ir_u, ir_b, ir_s; 666 int ret; 667 u32 i; 668 669 /* Cfg pg schd */ 670 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 671 return 0; 672 673 /* Pg to pri */ 674 for (i = 0; i < hdev->tm_info.num_pg; i++) { 675 /* Calc shaper para */ 676 ret = hclge_shaper_para_calc( 677 hdev->tm_info.pg_info[i].bw_limit, 678 HCLGE_SHAPER_LVL_PG, 679 &ir_b, &ir_u, &ir_s); 680 if (ret) 681 return ret; 682 683 ret = hclge_tm_pg_shapping_cfg(hdev, 684 HCLGE_TM_SHAP_C_BUCKET, i, 685 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, 686 HCLGE_SHAPER_BS_S_DEF); 687 if (ret) 688 return ret; 689 690 ret = hclge_tm_pg_shapping_cfg(hdev, 691 HCLGE_TM_SHAP_P_BUCKET, i, 692 ir_b, ir_u, ir_s, 693 HCLGE_SHAPER_BS_U_DEF, 694 HCLGE_SHAPER_BS_S_DEF); 695 if (ret) 696 return ret; 697 } 698 699 return 0; 700 } 701 702 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) 703 { 704 int ret; 705 u32 i; 706 707 /* cfg pg schd */ 708 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 709 return 0; 710 711 /* pg to prio */ 712 for (i = 0; i < hdev->tm_info.num_pg; i++) { 713 /* Cfg dwrr */ 714 ret = hclge_tm_pg_weight_cfg(hdev, i, 715 hdev->tm_info.pg_dwrr[i]); 716 if (ret) 717 return ret; 718 } 719 720 return 0; 721 } 722 723 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, 724 struct hclge_vport *vport) 725 { 726 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 727 struct hnae3_queue **tqp = kinfo->tqp; 728 struct hnae3_tc_info *v_tc_info; 729 u32 i, j; 730 int ret; 731 732 for (i = 0; i < kinfo->num_tc; i++) { 733 v_tc_info = &kinfo->tc_info[i]; 734 for (j = 0; j < v_tc_info->tqp_count; j++) { 735 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j]; 736 737 ret = hclge_tm_q_to_qs_map_cfg(hdev, 738 hclge_get_queue_id(q), 739 vport->qs_offset + i); 740 if (ret) 741 return ret; 742 } 743 } 744 745 return 0; 746 } 747 748 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) 749 { 750 struct hclge_vport *vport = hdev->vport; 751 int ret; 752 u32 i, k; 753 754 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 755 /* Cfg qs -> pri mapping, one by one mapping */ 756 for (k = 0; k < hdev->num_alloc_vport; k++) 757 for (i = 0; i < hdev->tm_info.num_tc; i++) { 758 ret = hclge_tm_qs_to_pri_map_cfg( 759 hdev, vport[k].qs_offset + i, i); 760 if (ret) 761 return ret; 762 } 763 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { 764 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ 765 for (k = 0; k < hdev->num_alloc_vport; k++) 766 for (i = 0; i < HNAE3_MAX_TC; i++) { 767 ret = hclge_tm_qs_to_pri_map_cfg( 768 hdev, vport[k].qs_offset + i, k); 769 if (ret) 770 return ret; 771 } 772 } else { 773 return -EINVAL; 774 } 775 776 /* Cfg q -> qs mapping */ 777 for (i = 0; i < hdev->num_alloc_vport; i++) { 778 ret = hclge_vport_q_to_qs_map(hdev, vport); 779 if (ret) 780 return ret; 781 782 vport++; 783 } 784 785 return 0; 786 } 787 788 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) 789 { 790 u8 ir_u, ir_b, ir_s; 791 int ret; 792 u32 i; 793 794 for (i = 0; i < hdev->tm_info.num_tc; i++) { 795 ret = hclge_shaper_para_calc( 796 hdev->tm_info.tc_info[i].bw_limit, 797 HCLGE_SHAPER_LVL_PRI, 798 &ir_b, &ir_u, &ir_s); 799 if (ret) 800 return ret; 801 802 ret = hclge_tm_pri_shapping_cfg( 803 hdev, HCLGE_TM_SHAP_C_BUCKET, i, 804 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, 805 HCLGE_SHAPER_BS_S_DEF); 806 if (ret) 807 return ret; 808 809 ret = hclge_tm_pri_shapping_cfg( 810 hdev, HCLGE_TM_SHAP_P_BUCKET, i, 811 ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF, 812 HCLGE_SHAPER_BS_S_DEF); 813 if (ret) 814 return ret; 815 } 816 817 return 0; 818 } 819 820 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) 821 { 822 struct hclge_dev *hdev = vport->back; 823 u8 ir_u, ir_b, ir_s; 824 int ret; 825 826 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, 827 &ir_b, &ir_u, &ir_s); 828 if (ret) 829 return ret; 830 831 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, 832 vport->vport_id, 833 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, 834 HCLGE_SHAPER_BS_S_DEF); 835 if (ret) 836 return ret; 837 838 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, 839 vport->vport_id, 840 ir_b, ir_u, ir_s, 841 HCLGE_SHAPER_BS_U_DEF, 842 HCLGE_SHAPER_BS_S_DEF); 843 if (ret) 844 return ret; 845 846 return 0; 847 } 848 849 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) 850 { 851 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 852 struct hclge_dev *hdev = vport->back; 853 u8 ir_u, ir_b, ir_s; 854 u32 i; 855 int ret; 856 857 for (i = 0; i < kinfo->num_tc; i++) { 858 ret = hclge_shaper_para_calc( 859 hdev->tm_info.tc_info[i].bw_limit, 860 HCLGE_SHAPER_LVL_QSET, 861 &ir_b, &ir_u, &ir_s); 862 if (ret) 863 return ret; 864 } 865 866 return 0; 867 } 868 869 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) 870 { 871 struct hclge_vport *vport = hdev->vport; 872 int ret; 873 u32 i; 874 875 /* Need config vport shaper */ 876 for (i = 0; i < hdev->num_alloc_vport; i++) { 877 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); 878 if (ret) 879 return ret; 880 881 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); 882 if (ret) 883 return ret; 884 885 vport++; 886 } 887 888 return 0; 889 } 890 891 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) 892 { 893 int ret; 894 895 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 896 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); 897 if (ret) 898 return ret; 899 } else { 900 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); 901 if (ret) 902 return ret; 903 } 904 905 return 0; 906 } 907 908 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) 909 { 910 struct hclge_vport *vport = hdev->vport; 911 struct hclge_pg_info *pg_info; 912 u8 dwrr; 913 int ret; 914 u32 i, k; 915 916 for (i = 0; i < hdev->tm_info.num_tc; i++) { 917 pg_info = 918 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 919 dwrr = pg_info->tc_dwrr[i]; 920 921 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); 922 if (ret) 923 return ret; 924 925 for (k = 0; k < hdev->num_alloc_vport; k++) { 926 ret = hclge_tm_qs_weight_cfg( 927 hdev, vport[k].qs_offset + i, 928 vport[k].dwrr); 929 if (ret) 930 return ret; 931 } 932 } 933 934 return 0; 935 } 936 937 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) 938 { 939 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 940 struct hclge_dev *hdev = vport->back; 941 int ret; 942 u8 i; 943 944 /* Vf dwrr */ 945 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); 946 if (ret) 947 return ret; 948 949 /* Qset dwrr */ 950 for (i = 0; i < kinfo->num_tc; i++) { 951 ret = hclge_tm_qs_weight_cfg( 952 hdev, vport->qs_offset + i, 953 hdev->tm_info.pg_info[0].tc_dwrr[i]); 954 if (ret) 955 return ret; 956 } 957 958 return 0; 959 } 960 961 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) 962 { 963 struct hclge_vport *vport = hdev->vport; 964 int ret; 965 u32 i; 966 967 for (i = 0; i < hdev->num_alloc_vport; i++) { 968 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); 969 if (ret) 970 return ret; 971 972 vport++; 973 } 974 975 return 0; 976 } 977 978 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) 979 { 980 int ret; 981 982 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 983 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); 984 if (ret) 985 return ret; 986 } else { 987 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); 988 if (ret) 989 return ret; 990 } 991 992 return 0; 993 } 994 995 int hclge_tm_map_cfg(struct hclge_dev *hdev) 996 { 997 int ret; 998 999 ret = hclge_up_to_tc_map(hdev); 1000 if (ret) 1001 return ret; 1002 1003 ret = hclge_tm_pg_to_pri_map(hdev); 1004 if (ret) 1005 return ret; 1006 1007 return hclge_tm_pri_q_qs_cfg(hdev); 1008 } 1009 1010 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) 1011 { 1012 int ret; 1013 1014 ret = hclge_tm_port_shaper_cfg(hdev); 1015 if (ret) 1016 return ret; 1017 1018 ret = hclge_tm_pg_shaper_cfg(hdev); 1019 if (ret) 1020 return ret; 1021 1022 return hclge_tm_pri_shaper_cfg(hdev); 1023 } 1024 1025 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) 1026 { 1027 int ret; 1028 1029 ret = hclge_tm_pg_dwrr_cfg(hdev); 1030 if (ret) 1031 return ret; 1032 1033 return hclge_tm_pri_dwrr_cfg(hdev); 1034 } 1035 1036 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) 1037 { 1038 int ret; 1039 u8 i; 1040 1041 /* Only being config on TC-Based scheduler mode */ 1042 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) 1043 return 0; 1044 1045 for (i = 0; i < hdev->tm_info.num_pg; i++) { 1046 ret = hclge_tm_pg_schd_mode_cfg(hdev, i); 1047 if (ret) 1048 return ret; 1049 } 1050 1051 return 0; 1052 } 1053 1054 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) 1055 { 1056 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1057 struct hclge_dev *hdev = vport->back; 1058 int ret; 1059 u8 i; 1060 1061 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); 1062 if (ret) 1063 return ret; 1064 1065 for (i = 0; i < kinfo->num_tc; i++) { 1066 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; 1067 1068 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, 1069 sch_mode); 1070 if (ret) 1071 return ret; 1072 } 1073 1074 return 0; 1075 } 1076 1077 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) 1078 { 1079 struct hclge_vport *vport = hdev->vport; 1080 int ret; 1081 u8 i, k; 1082 1083 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1084 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1085 ret = hclge_tm_pri_schd_mode_cfg(hdev, i); 1086 if (ret) 1087 return ret; 1088 1089 for (k = 0; k < hdev->num_alloc_vport; k++) { 1090 ret = hclge_tm_qs_schd_mode_cfg( 1091 hdev, vport[k].qs_offset + i, 1092 HCLGE_SCH_MODE_DWRR); 1093 if (ret) 1094 return ret; 1095 } 1096 } 1097 } else { 1098 for (i = 0; i < hdev->num_alloc_vport; i++) { 1099 ret = hclge_tm_schd_mode_vnet_base_cfg(vport); 1100 if (ret) 1101 return ret; 1102 1103 vport++; 1104 } 1105 } 1106 1107 return 0; 1108 } 1109 1110 int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) 1111 { 1112 int ret; 1113 1114 ret = hclge_tm_lvl2_schd_mode_cfg(hdev); 1115 if (ret) 1116 return ret; 1117 1118 return hclge_tm_lvl34_schd_mode_cfg(hdev); 1119 } 1120 1121 static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) 1122 { 1123 int ret; 1124 1125 /* Cfg tm mapping */ 1126 ret = hclge_tm_map_cfg(hdev); 1127 if (ret) 1128 return ret; 1129 1130 /* Cfg tm shaper */ 1131 ret = hclge_tm_shaper_cfg(hdev); 1132 if (ret) 1133 return ret; 1134 1135 /* Cfg dwrr */ 1136 ret = hclge_tm_dwrr_cfg(hdev); 1137 if (ret) 1138 return ret; 1139 1140 /* Cfg schd mode for each level schd */ 1141 return hclge_tm_schd_mode_hw(hdev); 1142 } 1143 1144 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) 1145 { 1146 struct hclge_mac *mac = &hdev->hw.mac; 1147 1148 return hclge_pause_param_cfg(hdev, mac->mac_addr, 1149 HCLGE_DEFAULT_PAUSE_TRANS_GAP, 1150 HCLGE_DEFAULT_PAUSE_TRANS_TIME); 1151 } 1152 1153 static int hclge_pfc_setup_hw(struct hclge_dev *hdev) 1154 { 1155 u8 enable_bitmap = 0; 1156 1157 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 1158 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | 1159 HCLGE_RX_MAC_PAUSE_EN_MSK; 1160 1161 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, 1162 hdev->tm_info.hw_pfc_map); 1163 } 1164 1165 /* Each Tc has a 1024 queue sets to backpress, it divides to 1166 * 32 group, each group contains 32 queue sets, which can be 1167 * represented by u32 bitmap. 1168 */ 1169 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) 1170 { 1171 struct hclge_vport *vport = hdev->vport; 1172 u32 i, k, qs_bitmap; 1173 int ret; 1174 1175 for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { 1176 qs_bitmap = 0; 1177 1178 for (k = 0; k < hdev->num_alloc_vport; k++) { 1179 u16 qs_id = vport->qs_offset + tc; 1180 u8 grp, sub_grp; 1181 1182 grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M, 1183 HCLGE_BP_GRP_ID_S); 1184 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, 1185 HCLGE_BP_SUB_GRP_ID_S); 1186 if (i == grp) 1187 qs_bitmap |= (1 << sub_grp); 1188 1189 vport++; 1190 } 1191 1192 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); 1193 if (ret) 1194 return ret; 1195 } 1196 1197 return 0; 1198 } 1199 1200 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) 1201 { 1202 bool tx_en, rx_en; 1203 1204 switch (hdev->tm_info.fc_mode) { 1205 case HCLGE_FC_NONE: 1206 tx_en = false; 1207 rx_en = false; 1208 break; 1209 case HCLGE_FC_RX_PAUSE: 1210 tx_en = false; 1211 rx_en = true; 1212 break; 1213 case HCLGE_FC_TX_PAUSE: 1214 tx_en = true; 1215 rx_en = false; 1216 break; 1217 case HCLGE_FC_FULL: 1218 tx_en = true; 1219 rx_en = true; 1220 break; 1221 case HCLGE_FC_PFC: 1222 tx_en = false; 1223 rx_en = false; 1224 break; 1225 default: 1226 tx_en = true; 1227 rx_en = true; 1228 } 1229 1230 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 1231 } 1232 1233 int hclge_pause_setup_hw(struct hclge_dev *hdev) 1234 { 1235 int ret; 1236 u8 i; 1237 1238 ret = hclge_pause_param_setup_hw(hdev); 1239 if (ret) 1240 return ret; 1241 1242 ret = hclge_mac_pause_setup_hw(hdev); 1243 if (ret) 1244 return ret; 1245 1246 /* Only DCB-supported dev supports qset back pressure and pfc cmd */ 1247 if (!hnae3_dev_dcb_supported(hdev)) 1248 return 0; 1249 1250 /* When MAC is GE Mode, hdev does not support pfc setting */ 1251 ret = hclge_pfc_setup_hw(hdev); 1252 if (ret) 1253 dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); 1254 1255 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1256 ret = hclge_bp_setup_hw(hdev, i); 1257 if (ret) 1258 return ret; 1259 } 1260 1261 return 0; 1262 } 1263 1264 int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) 1265 { 1266 struct hclge_vport *vport = hdev->vport; 1267 struct hnae3_knic_private_info *kinfo; 1268 u32 i, k; 1269 1270 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 1271 if (prio_tc[i] >= hdev->tm_info.num_tc) 1272 return -EINVAL; 1273 hdev->tm_info.prio_tc[i] = prio_tc[i]; 1274 1275 for (k = 0; k < hdev->num_alloc_vport; k++) { 1276 kinfo = &vport[k].nic.kinfo; 1277 kinfo->prio_tc[i] = prio_tc[i]; 1278 } 1279 } 1280 return 0; 1281 } 1282 1283 int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) 1284 { 1285 u8 i, bit_map = 0; 1286 1287 for (i = 0; i < hdev->num_alloc_vport; i++) { 1288 if (num_tc > hdev->vport[i].alloc_tqps) 1289 return -EINVAL; 1290 } 1291 1292 hdev->tm_info.num_tc = num_tc; 1293 1294 for (i = 0; i < hdev->tm_info.num_tc; i++) 1295 bit_map |= BIT(i); 1296 1297 if (!bit_map) { 1298 bit_map = 1; 1299 hdev->tm_info.num_tc = 1; 1300 } 1301 1302 hdev->hw_tc_map = bit_map; 1303 1304 hclge_tm_schd_info_init(hdev); 1305 1306 return 0; 1307 } 1308 1309 int hclge_tm_init_hw(struct hclge_dev *hdev) 1310 { 1311 int ret; 1312 1313 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && 1314 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) 1315 return -ENOTSUPP; 1316 1317 ret = hclge_tm_schd_setup_hw(hdev); 1318 if (ret) 1319 return ret; 1320 1321 ret = hclge_pause_setup_hw(hdev); 1322 if (ret) 1323 return ret; 1324 1325 return 0; 1326 } 1327 1328 int hclge_tm_schd_init(struct hclge_dev *hdev) 1329 { 1330 int ret; 1331 1332 /* fc_mode is HCLGE_FC_FULL on reset */ 1333 hdev->tm_info.fc_mode = HCLGE_FC_FULL; 1334 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 1335 1336 ret = hclge_tm_schd_info_init(hdev); 1337 if (ret) 1338 return ret; 1339 1340 return hclge_tm_init_hw(hdev); 1341 } 1342