1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 6 #include "hclge_cmd.h" 7 #include "hclge_main.h" 8 #include "hclge_tm.h" 9 10 enum hclge_shaper_level { 11 HCLGE_SHAPER_LVL_PRI = 0, 12 HCLGE_SHAPER_LVL_PG = 1, 13 HCLGE_SHAPER_LVL_PORT = 2, 14 HCLGE_SHAPER_LVL_QSET = 3, 15 HCLGE_SHAPER_LVL_CNT = 4, 16 HCLGE_SHAPER_LVL_VF = 0, 17 HCLGE_SHAPER_LVL_PF = 1, 18 }; 19 20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 22 23 #define HCLGE_SHAPER_BS_U_DEF 5 24 #define HCLGE_SHAPER_BS_S_DEF 20 25 26 #define HCLGE_ETHER_MAX_RATE 100000 27 28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper 29 * @ir: Rate to be config, its unit is Mbps 30 * @shaper_level: the shaper level. eg: port, pg, priority, queueset 31 * @ir_b: IR_B parameter of IR shaper 32 * @ir_u: IR_U parameter of IR shaper 33 * @ir_s: IR_S parameter of IR shaper 34 * 35 * the formula: 36 * 37 * IR_b * (2 ^ IR_u) * 8 38 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) 39 * Tick * (2 ^ IR_s) 40 * 41 * @return: 0: calculate sucessful, negative: fail 42 */ 43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, 44 u8 *ir_b, u8 *ir_u, u8 *ir_s) 45 { 46 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { 47 6 * 256, /* Prioriy level */ 48 6 * 32, /* Prioriy group level */ 49 6 * 8, /* Port level */ 50 6 * 256 /* Qset level */ 51 }; 52 u8 ir_u_calc = 0, ir_s_calc = 0; 53 u32 ir_calc; 54 u32 tick; 55 56 /* Calc tick */ 57 if (shaper_level >= HCLGE_SHAPER_LVL_CNT) 58 return -EINVAL; 59 60 tick = tick_array[shaper_level]; 61 62 /** 63 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 64 * the formula is changed to: 65 * 126 * 1 * 8 66 * ir_calc = ---------------- * 1000 67 * tick * 1 68 */ 69 ir_calc = (1008000 + (tick >> 1) - 1) / tick; 70 71 if (ir_calc == ir) { 72 *ir_b = 126; 73 *ir_u = 0; 74 *ir_s = 0; 75 76 return 0; 77 } else if (ir_calc > ir) { 78 /* Increasing the denominator to select ir_s value */ 79 while (ir_calc > ir) { 80 ir_s_calc++; 81 ir_calc = 1008000 / (tick * (1 << ir_s_calc)); 82 } 83 84 if (ir_calc == ir) 85 *ir_b = 126; 86 else 87 *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000; 88 } else { 89 /* Increasing the numerator to select ir_u value */ 90 u32 numerator; 91 92 while (ir_calc < ir) { 93 ir_u_calc++; 94 numerator = 1008000 * (1 << ir_u_calc); 95 ir_calc = (numerator + (tick >> 1)) / tick; 96 } 97 98 if (ir_calc == ir) { 99 *ir_b = 126; 100 } else { 101 u32 denominator = (8000 * (1 << --ir_u_calc)); 102 *ir_b = (ir * tick + (denominator >> 1)) / denominator; 103 } 104 } 105 106 *ir_u = ir_u_calc; 107 *ir_s = ir_s_calc; 108 109 return 0; 110 } 111 112 static int hclge_pfc_stats_get(struct hclge_dev *hdev, 113 enum hclge_opcode_type opcode, u64 *stats) 114 { 115 struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM]; 116 int ret, i, j; 117 118 if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT || 119 opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) 120 return -EINVAL; 121 122 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { 123 hclge_cmd_setup_basic_desc(&desc[i], opcode, true); 124 if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1)) 125 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 126 else 127 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 128 } 129 130 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); 131 if (ret) 132 return ret; 133 134 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { 135 struct hclge_pfc_stats_cmd *pfc_stats = 136 (struct hclge_pfc_stats_cmd *)desc[i].data; 137 138 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) { 139 u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j; 140 141 if (index < HCLGE_MAX_TC_NUM) 142 stats[index] = 143 le64_to_cpu(pfc_stats->pkt_num[j]); 144 } 145 } 146 return 0; 147 } 148 149 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) 150 { 151 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats); 152 } 153 154 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) 155 { 156 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats); 157 } 158 159 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) 160 { 161 struct hclge_desc desc; 162 163 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); 164 165 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | 166 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); 167 168 return hclge_cmd_send(&hdev->hw, &desc, 1); 169 } 170 171 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, 172 u8 pfc_bitmap) 173 { 174 struct hclge_desc desc; 175 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; 176 177 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); 178 179 pfc->tx_rx_en_bitmap = tx_rx_bitmap; 180 pfc->pri_en_bitmap = pfc_bitmap; 181 182 return hclge_cmd_send(&hdev->hw, &desc, 1); 183 } 184 185 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, 186 u8 pause_trans_gap, u16 pause_trans_time) 187 { 188 struct hclge_cfg_pause_param_cmd *pause_param; 189 struct hclge_desc desc; 190 191 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 192 193 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); 194 195 ether_addr_copy(pause_param->mac_addr, addr); 196 ether_addr_copy(pause_param->mac_addr_extra, addr); 197 pause_param->pause_trans_gap = pause_trans_gap; 198 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); 199 200 return hclge_cmd_send(&hdev->hw, &desc, 1); 201 } 202 203 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) 204 { 205 struct hclge_cfg_pause_param_cmd *pause_param; 206 struct hclge_desc desc; 207 u16 trans_time; 208 u8 trans_gap; 209 int ret; 210 211 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 212 213 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 214 215 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 216 if (ret) 217 return ret; 218 219 trans_gap = pause_param->pause_trans_gap; 220 trans_time = le16_to_cpu(pause_param->pause_trans_time); 221 222 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, 223 trans_time); 224 } 225 226 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) 227 { 228 u8 tc; 229 230 tc = hdev->tm_info.prio_tc[pri_id]; 231 232 if (tc >= hdev->tm_info.num_tc) 233 return -EINVAL; 234 235 /** 236 * the register for priority has four bytes, the first bytes includes 237 * priority0 and priority1, the higher 4bit stands for priority1 238 * while the lower 4bit stands for priority0, as below: 239 * first byte: | pri_1 | pri_0 | 240 * second byte: | pri_3 | pri_2 | 241 * third byte: | pri_5 | pri_4 | 242 * fourth byte: | pri_7 | pri_6 | 243 */ 244 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); 245 246 return 0; 247 } 248 249 static int hclge_up_to_tc_map(struct hclge_dev *hdev) 250 { 251 struct hclge_desc desc; 252 u8 *pri = (u8 *)desc.data; 253 u8 pri_id; 254 int ret; 255 256 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); 257 258 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { 259 ret = hclge_fill_pri_array(hdev, pri, pri_id); 260 if (ret) 261 return ret; 262 } 263 264 return hclge_cmd_send(&hdev->hw, &desc, 1); 265 } 266 267 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, 268 u8 pg_id, u8 pri_bit_map) 269 { 270 struct hclge_pg_to_pri_link_cmd *map; 271 struct hclge_desc desc; 272 273 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); 274 275 map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 276 277 map->pg_id = pg_id; 278 map->pri_bit_map = pri_bit_map; 279 280 return hclge_cmd_send(&hdev->hw, &desc, 1); 281 } 282 283 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, 284 u16 qs_id, u8 pri) 285 { 286 struct hclge_qs_to_pri_link_cmd *map; 287 struct hclge_desc desc; 288 289 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); 290 291 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 292 293 map->qs_id = cpu_to_le16(qs_id); 294 map->priority = pri; 295 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; 296 297 return hclge_cmd_send(&hdev->hw, &desc, 1); 298 } 299 300 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, 301 u16 q_id, u16 qs_id) 302 { 303 struct hclge_nq_to_qs_link_cmd *map; 304 struct hclge_desc desc; 305 306 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); 307 308 map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 309 310 map->nq_id = cpu_to_le16(q_id); 311 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); 312 313 return hclge_cmd_send(&hdev->hw, &desc, 1); 314 } 315 316 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, 317 u8 dwrr) 318 { 319 struct hclge_pg_weight_cmd *weight; 320 struct hclge_desc desc; 321 322 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); 323 324 weight = (struct hclge_pg_weight_cmd *)desc.data; 325 326 weight->pg_id = pg_id; 327 weight->dwrr = dwrr; 328 329 return hclge_cmd_send(&hdev->hw, &desc, 1); 330 } 331 332 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, 333 u8 dwrr) 334 { 335 struct hclge_priority_weight_cmd *weight; 336 struct hclge_desc desc; 337 338 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); 339 340 weight = (struct hclge_priority_weight_cmd *)desc.data; 341 342 weight->pri_id = pri_id; 343 weight->dwrr = dwrr; 344 345 return hclge_cmd_send(&hdev->hw, &desc, 1); 346 } 347 348 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, 349 u8 dwrr) 350 { 351 struct hclge_qs_weight_cmd *weight; 352 struct hclge_desc desc; 353 354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); 355 356 weight = (struct hclge_qs_weight_cmd *)desc.data; 357 358 weight->qs_id = cpu_to_le16(qs_id); 359 weight->dwrr = dwrr; 360 361 return hclge_cmd_send(&hdev->hw, &desc, 1); 362 } 363 364 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, 365 enum hclge_shap_bucket bucket, u8 pg_id, 366 u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) 367 { 368 struct hclge_pg_shapping_cmd *shap_cfg_cmd; 369 enum hclge_opcode_type opcode; 370 struct hclge_desc desc; 371 u32 shapping_para = 0; 372 373 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : 374 HCLGE_OPC_TM_PG_C_SHAPPING; 375 hclge_cmd_setup_basic_desc(&desc, opcode, false); 376 377 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 378 379 shap_cfg_cmd->pg_id = pg_id; 380 381 hclge_tm_set_field(shapping_para, IR_B, ir_b); 382 hclge_tm_set_field(shapping_para, IR_U, ir_u); 383 hclge_tm_set_field(shapping_para, IR_S, ir_s); 384 hclge_tm_set_field(shapping_para, BS_B, bs_b); 385 hclge_tm_set_field(shapping_para, BS_S, bs_s); 386 387 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); 388 389 return hclge_cmd_send(&hdev->hw, &desc, 1); 390 } 391 392 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) 393 { 394 struct hclge_port_shapping_cmd *shap_cfg_cmd; 395 struct hclge_desc desc; 396 u32 shapping_para = 0; 397 u8 ir_u, ir_b, ir_s; 398 int ret; 399 400 ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE, 401 HCLGE_SHAPER_LVL_PORT, 402 &ir_b, &ir_u, &ir_s); 403 if (ret) 404 return ret; 405 406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); 407 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 408 409 hclge_tm_set_field(shapping_para, IR_B, ir_b); 410 hclge_tm_set_field(shapping_para, IR_U, ir_u); 411 hclge_tm_set_field(shapping_para, IR_S, ir_s); 412 hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF); 413 hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF); 414 415 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); 416 417 return hclge_cmd_send(&hdev->hw, &desc, 1); 418 } 419 420 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, 421 enum hclge_shap_bucket bucket, u8 pri_id, 422 u8 ir_b, u8 ir_u, u8 ir_s, 423 u8 bs_b, u8 bs_s) 424 { 425 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 426 enum hclge_opcode_type opcode; 427 struct hclge_desc desc; 428 u32 shapping_para = 0; 429 430 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : 431 HCLGE_OPC_TM_PRI_C_SHAPPING; 432 433 hclge_cmd_setup_basic_desc(&desc, opcode, false); 434 435 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 436 437 shap_cfg_cmd->pri_id = pri_id; 438 439 hclge_tm_set_field(shapping_para, IR_B, ir_b); 440 hclge_tm_set_field(shapping_para, IR_U, ir_u); 441 hclge_tm_set_field(shapping_para, IR_S, ir_s); 442 hclge_tm_set_field(shapping_para, BS_B, bs_b); 443 hclge_tm_set_field(shapping_para, BS_S, bs_s); 444 445 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); 446 447 return hclge_cmd_send(&hdev->hw, &desc, 1); 448 } 449 450 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) 451 { 452 struct hclge_desc desc; 453 454 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); 455 456 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) 457 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 458 else 459 desc.data[1] = 0; 460 461 desc.data[0] = cpu_to_le32(pg_id); 462 463 return hclge_cmd_send(&hdev->hw, &desc, 1); 464 } 465 466 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) 467 { 468 struct hclge_desc desc; 469 470 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); 471 472 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) 473 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 474 else 475 desc.data[1] = 0; 476 477 desc.data[0] = cpu_to_le32(pri_id); 478 479 return hclge_cmd_send(&hdev->hw, &desc, 1); 480 } 481 482 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) 483 { 484 struct hclge_desc desc; 485 486 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); 487 488 if (mode == HCLGE_SCH_MODE_DWRR) 489 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 490 else 491 desc.data[1] = 0; 492 493 desc.data[0] = cpu_to_le32(qs_id); 494 495 return hclge_cmd_send(&hdev->hw, &desc, 1); 496 } 497 498 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, 499 u32 bit_map) 500 { 501 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 502 struct hclge_desc desc; 503 504 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, 505 false); 506 507 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 508 509 bp_to_qs_map_cmd->tc_id = tc; 510 bp_to_qs_map_cmd->qs_group_id = grp_id; 511 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); 512 513 return hclge_cmd_send(&hdev->hw, &desc, 1); 514 } 515 516 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) 517 { 518 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 519 struct hclge_dev *hdev = vport->back; 520 u16 max_rss_size; 521 u8 i; 522 523 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; 524 kinfo->num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); 525 max_rss_size = min_t(u16, hdev->rss_size_max, 526 vport->alloc_tqps / kinfo->num_tc); 527 528 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 529 kinfo->req_rss_size <= max_rss_size) { 530 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", 531 kinfo->rss_size, kinfo->req_rss_size); 532 kinfo->rss_size = kinfo->req_rss_size; 533 } else if (kinfo->rss_size > max_rss_size || 534 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { 535 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", 536 kinfo->rss_size, max_rss_size); 537 kinfo->rss_size = max_rss_size; 538 } 539 540 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 541 vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id; 542 vport->dwrr = 100; /* 100 percent as init */ 543 vport->alloc_rss_size = kinfo->rss_size; 544 545 for (i = 0; i < HNAE3_MAX_TC; i++) { 546 if (hdev->hw_tc_map & BIT(i)) { 547 kinfo->tc_info[i].enable = true; 548 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 549 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 550 kinfo->tc_info[i].tc = i; 551 } else { 552 /* Set to default queue if TC is disable */ 553 kinfo->tc_info[i].enable = false; 554 kinfo->tc_info[i].tqp_offset = 0; 555 kinfo->tc_info[i].tqp_count = 1; 556 kinfo->tc_info[i].tc = 0; 557 } 558 } 559 560 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, 561 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc)); 562 } 563 564 static void hclge_tm_vport_info_update(struct hclge_dev *hdev) 565 { 566 struct hclge_vport *vport = hdev->vport; 567 u32 i; 568 569 for (i = 0; i < hdev->num_alloc_vport; i++) { 570 hclge_tm_vport_tc_info_update(vport); 571 572 vport++; 573 } 574 } 575 576 static void hclge_tm_tc_info_init(struct hclge_dev *hdev) 577 { 578 u8 i; 579 580 for (i = 0; i < hdev->tm_info.num_tc; i++) { 581 hdev->tm_info.tc_info[i].tc_id = i; 582 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; 583 hdev->tm_info.tc_info[i].pgid = 0; 584 hdev->tm_info.tc_info[i].bw_limit = 585 hdev->tm_info.pg_info[0].bw_limit; 586 } 587 588 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 589 hdev->tm_info.prio_tc[i] = 590 (i >= hdev->tm_info.num_tc) ? 0 : i; 591 592 /* DCB is enabled if we have more than 1 TC */ 593 if (hdev->tm_info.num_tc > 1) 594 hdev->flag |= HCLGE_FLAG_DCB_ENABLE; 595 else 596 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 597 } 598 599 static void hclge_tm_pg_info_init(struct hclge_dev *hdev) 600 { 601 u8 i; 602 603 for (i = 0; i < hdev->tm_info.num_pg; i++) { 604 int k; 605 606 hdev->tm_info.pg_dwrr[i] = i ? 0 : 100; 607 608 hdev->tm_info.pg_info[i].pg_id = i; 609 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; 610 611 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE; 612 613 if (i != 0) 614 continue; 615 616 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; 617 for (k = 0; k < hdev->tm_info.num_tc; k++) 618 hdev->tm_info.pg_info[i].tc_dwrr[k] = 100; 619 } 620 } 621 622 static void hclge_pfc_info_init(struct hclge_dev *hdev) 623 { 624 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { 625 if (hdev->fc_mode_last_time == HCLGE_FC_PFC) 626 dev_warn(&hdev->pdev->dev, 627 "DCB is disable, but last mode is FC_PFC\n"); 628 629 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 630 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 631 /* fc_mode_last_time record the last fc_mode when 632 * DCB is enabled, so that fc_mode can be set to 633 * the correct value when DCB is disabled. 634 */ 635 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 636 hdev->tm_info.fc_mode = HCLGE_FC_PFC; 637 } 638 } 639 640 static int hclge_tm_schd_info_init(struct hclge_dev *hdev) 641 { 642 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && 643 (hdev->tm_info.num_pg != 1)) 644 return -EINVAL; 645 646 hclge_tm_pg_info_init(hdev); 647 648 hclge_tm_tc_info_init(hdev); 649 650 hclge_tm_vport_info_update(hdev); 651 652 hclge_pfc_info_init(hdev); 653 654 return 0; 655 } 656 657 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) 658 { 659 int ret; 660 u32 i; 661 662 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 663 return 0; 664 665 for (i = 0; i < hdev->tm_info.num_pg; i++) { 666 /* Cfg mapping */ 667 ret = hclge_tm_pg_to_pri_map_cfg( 668 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); 669 if (ret) 670 return ret; 671 } 672 673 return 0; 674 } 675 676 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) 677 { 678 u8 ir_u, ir_b, ir_s; 679 int ret; 680 u32 i; 681 682 /* Cfg pg schd */ 683 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 684 return 0; 685 686 /* Pg to pri */ 687 for (i = 0; i < hdev->tm_info.num_pg; i++) { 688 /* Calc shaper para */ 689 ret = hclge_shaper_para_calc( 690 hdev->tm_info.pg_info[i].bw_limit, 691 HCLGE_SHAPER_LVL_PG, 692 &ir_b, &ir_u, &ir_s); 693 if (ret) 694 return ret; 695 696 ret = hclge_tm_pg_shapping_cfg(hdev, 697 HCLGE_TM_SHAP_C_BUCKET, i, 698 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, 699 HCLGE_SHAPER_BS_S_DEF); 700 if (ret) 701 return ret; 702 703 ret = hclge_tm_pg_shapping_cfg(hdev, 704 HCLGE_TM_SHAP_P_BUCKET, i, 705 ir_b, ir_u, ir_s, 706 HCLGE_SHAPER_BS_U_DEF, 707 HCLGE_SHAPER_BS_S_DEF); 708 if (ret) 709 return ret; 710 } 711 712 return 0; 713 } 714 715 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) 716 { 717 int ret; 718 u32 i; 719 720 /* cfg pg schd */ 721 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 722 return 0; 723 724 /* pg to prio */ 725 for (i = 0; i < hdev->tm_info.num_pg; i++) { 726 /* Cfg dwrr */ 727 ret = hclge_tm_pg_weight_cfg(hdev, i, 728 hdev->tm_info.pg_dwrr[i]); 729 if (ret) 730 return ret; 731 } 732 733 return 0; 734 } 735 736 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, 737 struct hclge_vport *vport) 738 { 739 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 740 struct hnae3_queue **tqp = kinfo->tqp; 741 struct hnae3_tc_info *v_tc_info; 742 u32 i, j; 743 int ret; 744 745 for (i = 0; i < kinfo->num_tc; i++) { 746 v_tc_info = &kinfo->tc_info[i]; 747 for (j = 0; j < v_tc_info->tqp_count; j++) { 748 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j]; 749 750 ret = hclge_tm_q_to_qs_map_cfg(hdev, 751 hclge_get_queue_id(q), 752 vport->qs_offset + i); 753 if (ret) 754 return ret; 755 } 756 } 757 758 return 0; 759 } 760 761 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) 762 { 763 struct hclge_vport *vport = hdev->vport; 764 int ret; 765 u32 i, k; 766 767 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 768 /* Cfg qs -> pri mapping, one by one mapping */ 769 for (k = 0; k < hdev->num_alloc_vport; k++) 770 for (i = 0; i < hdev->tm_info.num_tc; i++) { 771 ret = hclge_tm_qs_to_pri_map_cfg( 772 hdev, vport[k].qs_offset + i, i); 773 if (ret) 774 return ret; 775 } 776 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { 777 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ 778 for (k = 0; k < hdev->num_alloc_vport; k++) 779 for (i = 0; i < HNAE3_MAX_TC; i++) { 780 ret = hclge_tm_qs_to_pri_map_cfg( 781 hdev, vport[k].qs_offset + i, k); 782 if (ret) 783 return ret; 784 } 785 } else { 786 return -EINVAL; 787 } 788 789 /* Cfg q -> qs mapping */ 790 for (i = 0; i < hdev->num_alloc_vport; i++) { 791 ret = hclge_vport_q_to_qs_map(hdev, vport); 792 if (ret) 793 return ret; 794 795 vport++; 796 } 797 798 return 0; 799 } 800 801 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) 802 { 803 u8 ir_u, ir_b, ir_s; 804 int ret; 805 u32 i; 806 807 for (i = 0; i < hdev->tm_info.num_tc; i++) { 808 ret = hclge_shaper_para_calc( 809 hdev->tm_info.tc_info[i].bw_limit, 810 HCLGE_SHAPER_LVL_PRI, 811 &ir_b, &ir_u, &ir_s); 812 if (ret) 813 return ret; 814 815 ret = hclge_tm_pri_shapping_cfg( 816 hdev, HCLGE_TM_SHAP_C_BUCKET, i, 817 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, 818 HCLGE_SHAPER_BS_S_DEF); 819 if (ret) 820 return ret; 821 822 ret = hclge_tm_pri_shapping_cfg( 823 hdev, HCLGE_TM_SHAP_P_BUCKET, i, 824 ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF, 825 HCLGE_SHAPER_BS_S_DEF); 826 if (ret) 827 return ret; 828 } 829 830 return 0; 831 } 832 833 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) 834 { 835 struct hclge_dev *hdev = vport->back; 836 u8 ir_u, ir_b, ir_s; 837 int ret; 838 839 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, 840 &ir_b, &ir_u, &ir_s); 841 if (ret) 842 return ret; 843 844 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, 845 vport->vport_id, 846 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, 847 HCLGE_SHAPER_BS_S_DEF); 848 if (ret) 849 return ret; 850 851 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, 852 vport->vport_id, 853 ir_b, ir_u, ir_s, 854 HCLGE_SHAPER_BS_U_DEF, 855 HCLGE_SHAPER_BS_S_DEF); 856 if (ret) 857 return ret; 858 859 return 0; 860 } 861 862 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) 863 { 864 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 865 struct hclge_dev *hdev = vport->back; 866 u8 ir_u, ir_b, ir_s; 867 u32 i; 868 int ret; 869 870 for (i = 0; i < kinfo->num_tc; i++) { 871 ret = hclge_shaper_para_calc( 872 hdev->tm_info.tc_info[i].bw_limit, 873 HCLGE_SHAPER_LVL_QSET, 874 &ir_b, &ir_u, &ir_s); 875 if (ret) 876 return ret; 877 } 878 879 return 0; 880 } 881 882 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) 883 { 884 struct hclge_vport *vport = hdev->vport; 885 int ret; 886 u32 i; 887 888 /* Need config vport shaper */ 889 for (i = 0; i < hdev->num_alloc_vport; i++) { 890 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); 891 if (ret) 892 return ret; 893 894 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); 895 if (ret) 896 return ret; 897 898 vport++; 899 } 900 901 return 0; 902 } 903 904 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) 905 { 906 int ret; 907 908 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 909 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); 910 if (ret) 911 return ret; 912 } else { 913 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); 914 if (ret) 915 return ret; 916 } 917 918 return 0; 919 } 920 921 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) 922 { 923 struct hclge_vport *vport = hdev->vport; 924 struct hclge_pg_info *pg_info; 925 u8 dwrr; 926 int ret; 927 u32 i, k; 928 929 for (i = 0; i < hdev->tm_info.num_tc; i++) { 930 pg_info = 931 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 932 dwrr = pg_info->tc_dwrr[i]; 933 934 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); 935 if (ret) 936 return ret; 937 938 for (k = 0; k < hdev->num_alloc_vport; k++) { 939 ret = hclge_tm_qs_weight_cfg( 940 hdev, vport[k].qs_offset + i, 941 vport[k].dwrr); 942 if (ret) 943 return ret; 944 } 945 } 946 947 return 0; 948 } 949 950 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) 951 { 952 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 953 struct hclge_dev *hdev = vport->back; 954 int ret; 955 u8 i; 956 957 /* Vf dwrr */ 958 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); 959 if (ret) 960 return ret; 961 962 /* Qset dwrr */ 963 for (i = 0; i < kinfo->num_tc; i++) { 964 ret = hclge_tm_qs_weight_cfg( 965 hdev, vport->qs_offset + i, 966 hdev->tm_info.pg_info[0].tc_dwrr[i]); 967 if (ret) 968 return ret; 969 } 970 971 return 0; 972 } 973 974 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) 975 { 976 struct hclge_vport *vport = hdev->vport; 977 int ret; 978 u32 i; 979 980 for (i = 0; i < hdev->num_alloc_vport; i++) { 981 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); 982 if (ret) 983 return ret; 984 985 vport++; 986 } 987 988 return 0; 989 } 990 991 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) 992 { 993 int ret; 994 995 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 996 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); 997 if (ret) 998 return ret; 999 } else { 1000 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); 1001 if (ret) 1002 return ret; 1003 } 1004 1005 return 0; 1006 } 1007 1008 static int hclge_tm_map_cfg(struct hclge_dev *hdev) 1009 { 1010 int ret; 1011 1012 ret = hclge_up_to_tc_map(hdev); 1013 if (ret) 1014 return ret; 1015 1016 ret = hclge_tm_pg_to_pri_map(hdev); 1017 if (ret) 1018 return ret; 1019 1020 return hclge_tm_pri_q_qs_cfg(hdev); 1021 } 1022 1023 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) 1024 { 1025 int ret; 1026 1027 ret = hclge_tm_port_shaper_cfg(hdev); 1028 if (ret) 1029 return ret; 1030 1031 ret = hclge_tm_pg_shaper_cfg(hdev); 1032 if (ret) 1033 return ret; 1034 1035 return hclge_tm_pri_shaper_cfg(hdev); 1036 } 1037 1038 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) 1039 { 1040 int ret; 1041 1042 ret = hclge_tm_pg_dwrr_cfg(hdev); 1043 if (ret) 1044 return ret; 1045 1046 return hclge_tm_pri_dwrr_cfg(hdev); 1047 } 1048 1049 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) 1050 { 1051 int ret; 1052 u8 i; 1053 1054 /* Only being config on TC-Based scheduler mode */ 1055 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) 1056 return 0; 1057 1058 for (i = 0; i < hdev->tm_info.num_pg; i++) { 1059 ret = hclge_tm_pg_schd_mode_cfg(hdev, i); 1060 if (ret) 1061 return ret; 1062 } 1063 1064 return 0; 1065 } 1066 1067 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) 1068 { 1069 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1070 struct hclge_dev *hdev = vport->back; 1071 int ret; 1072 u8 i; 1073 1074 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); 1075 if (ret) 1076 return ret; 1077 1078 for (i = 0; i < kinfo->num_tc; i++) { 1079 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; 1080 1081 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, 1082 sch_mode); 1083 if (ret) 1084 return ret; 1085 } 1086 1087 return 0; 1088 } 1089 1090 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) 1091 { 1092 struct hclge_vport *vport = hdev->vport; 1093 int ret; 1094 u8 i, k; 1095 1096 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1097 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1098 ret = hclge_tm_pri_schd_mode_cfg(hdev, i); 1099 if (ret) 1100 return ret; 1101 1102 for (k = 0; k < hdev->num_alloc_vport; k++) { 1103 ret = hclge_tm_qs_schd_mode_cfg( 1104 hdev, vport[k].qs_offset + i, 1105 HCLGE_SCH_MODE_DWRR); 1106 if (ret) 1107 return ret; 1108 } 1109 } 1110 } else { 1111 for (i = 0; i < hdev->num_alloc_vport; i++) { 1112 ret = hclge_tm_schd_mode_vnet_base_cfg(vport); 1113 if (ret) 1114 return ret; 1115 1116 vport++; 1117 } 1118 } 1119 1120 return 0; 1121 } 1122 1123 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) 1124 { 1125 int ret; 1126 1127 ret = hclge_tm_lvl2_schd_mode_cfg(hdev); 1128 if (ret) 1129 return ret; 1130 1131 return hclge_tm_lvl34_schd_mode_cfg(hdev); 1132 } 1133 1134 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) 1135 { 1136 int ret; 1137 1138 /* Cfg tm mapping */ 1139 ret = hclge_tm_map_cfg(hdev); 1140 if (ret) 1141 return ret; 1142 1143 /* Cfg tm shaper */ 1144 ret = hclge_tm_shaper_cfg(hdev); 1145 if (ret) 1146 return ret; 1147 1148 /* Cfg dwrr */ 1149 ret = hclge_tm_dwrr_cfg(hdev); 1150 if (ret) 1151 return ret; 1152 1153 /* Cfg schd mode for each level schd */ 1154 return hclge_tm_schd_mode_hw(hdev); 1155 } 1156 1157 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) 1158 { 1159 struct hclge_mac *mac = &hdev->hw.mac; 1160 1161 return hclge_pause_param_cfg(hdev, mac->mac_addr, 1162 HCLGE_DEFAULT_PAUSE_TRANS_GAP, 1163 HCLGE_DEFAULT_PAUSE_TRANS_TIME); 1164 } 1165 1166 static int hclge_pfc_setup_hw(struct hclge_dev *hdev) 1167 { 1168 u8 enable_bitmap = 0; 1169 1170 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 1171 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | 1172 HCLGE_RX_MAC_PAUSE_EN_MSK; 1173 1174 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, 1175 hdev->tm_info.hw_pfc_map); 1176 } 1177 1178 /* Each Tc has a 1024 queue sets to backpress, it divides to 1179 * 32 group, each group contains 32 queue sets, which can be 1180 * represented by u32 bitmap. 1181 */ 1182 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) 1183 { 1184 int i; 1185 1186 for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { 1187 u32 qs_bitmap = 0; 1188 int k, ret; 1189 1190 for (k = 0; k < hdev->num_alloc_vport; k++) { 1191 struct hclge_vport *vport = &hdev->vport[k]; 1192 u16 qs_id = vport->qs_offset + tc; 1193 u8 grp, sub_grp; 1194 1195 grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M, 1196 HCLGE_BP_GRP_ID_S); 1197 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, 1198 HCLGE_BP_SUB_GRP_ID_S); 1199 if (i == grp) 1200 qs_bitmap |= (1 << sub_grp); 1201 } 1202 1203 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); 1204 if (ret) 1205 return ret; 1206 } 1207 1208 return 0; 1209 } 1210 1211 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) 1212 { 1213 bool tx_en, rx_en; 1214 1215 switch (hdev->tm_info.fc_mode) { 1216 case HCLGE_FC_NONE: 1217 tx_en = false; 1218 rx_en = false; 1219 break; 1220 case HCLGE_FC_RX_PAUSE: 1221 tx_en = false; 1222 rx_en = true; 1223 break; 1224 case HCLGE_FC_TX_PAUSE: 1225 tx_en = true; 1226 rx_en = false; 1227 break; 1228 case HCLGE_FC_FULL: 1229 tx_en = true; 1230 rx_en = true; 1231 break; 1232 case HCLGE_FC_PFC: 1233 tx_en = false; 1234 rx_en = false; 1235 break; 1236 default: 1237 tx_en = true; 1238 rx_en = true; 1239 } 1240 1241 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 1242 } 1243 1244 static int hclge_tm_bp_setup(struct hclge_dev *hdev) 1245 { 1246 int ret = 0; 1247 int i; 1248 1249 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1250 ret = hclge_bp_setup_hw(hdev, i); 1251 if (ret) 1252 return ret; 1253 } 1254 1255 return ret; 1256 } 1257 1258 int hclge_pause_setup_hw(struct hclge_dev *hdev) 1259 { 1260 int ret; 1261 1262 ret = hclge_pause_param_setup_hw(hdev); 1263 if (ret) 1264 return ret; 1265 1266 ret = hclge_mac_pause_setup_hw(hdev); 1267 if (ret) 1268 return ret; 1269 1270 /* Only DCB-supported dev supports qset back pressure and pfc cmd */ 1271 if (!hnae3_dev_dcb_supported(hdev)) 1272 return 0; 1273 1274 /* When MAC is GE Mode, hdev does not support pfc setting */ 1275 ret = hclge_pfc_setup_hw(hdev); 1276 if (ret) 1277 dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret); 1278 1279 return hclge_tm_bp_setup(hdev); 1280 } 1281 1282 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) 1283 { 1284 struct hclge_vport *vport = hdev->vport; 1285 struct hnae3_knic_private_info *kinfo; 1286 u32 i, k; 1287 1288 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 1289 hdev->tm_info.prio_tc[i] = prio_tc[i]; 1290 1291 for (k = 0; k < hdev->num_alloc_vport; k++) { 1292 kinfo = &vport[k].nic.kinfo; 1293 kinfo->prio_tc[i] = prio_tc[i]; 1294 } 1295 } 1296 } 1297 1298 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) 1299 { 1300 u8 i, bit_map = 0; 1301 1302 hdev->tm_info.num_tc = num_tc; 1303 1304 for (i = 0; i < hdev->tm_info.num_tc; i++) 1305 bit_map |= BIT(i); 1306 1307 if (!bit_map) { 1308 bit_map = 1; 1309 hdev->tm_info.num_tc = 1; 1310 } 1311 1312 hdev->hw_tc_map = bit_map; 1313 1314 hclge_tm_schd_info_init(hdev); 1315 } 1316 1317 int hclge_tm_init_hw(struct hclge_dev *hdev) 1318 { 1319 int ret; 1320 1321 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && 1322 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) 1323 return -ENOTSUPP; 1324 1325 ret = hclge_tm_schd_setup_hw(hdev); 1326 if (ret) 1327 return ret; 1328 1329 ret = hclge_pause_setup_hw(hdev); 1330 if (ret) 1331 return ret; 1332 1333 return 0; 1334 } 1335 1336 int hclge_tm_schd_init(struct hclge_dev *hdev) 1337 { 1338 int ret; 1339 1340 /* fc_mode is HCLGE_FC_FULL on reset */ 1341 hdev->tm_info.fc_mode = HCLGE_FC_FULL; 1342 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 1343 1344 ret = hclge_tm_schd_info_init(hdev); 1345 if (ret) 1346 return ret; 1347 1348 return hclge_tm_init_hw(hdev); 1349 } 1350 1351 int hclge_tm_vport_map_update(struct hclge_dev *hdev) 1352 { 1353 struct hclge_vport *vport = hdev->vport; 1354 int ret; 1355 1356 hclge_tm_vport_tc_info_update(vport); 1357 1358 ret = hclge_vport_q_to_qs_map(hdev, vport); 1359 if (ret) 1360 return ret; 1361 1362 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) 1363 return 0; 1364 1365 return hclge_tm_bp_setup(hdev); 1366 } 1367