1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 6 #include "hclge_cmd.h" 7 #include "hclge_main.h" 8 #include "hclge_tm.h" 9 10 enum hclge_shaper_level { 11 HCLGE_SHAPER_LVL_PRI = 0, 12 HCLGE_SHAPER_LVL_PG = 1, 13 HCLGE_SHAPER_LVL_PORT = 2, 14 HCLGE_SHAPER_LVL_QSET = 3, 15 HCLGE_SHAPER_LVL_CNT = 4, 16 HCLGE_SHAPER_LVL_VF = 0, 17 HCLGE_SHAPER_LVL_PF = 1, 18 }; 19 20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 22 23 #define HCLGE_SHAPER_BS_U_DEF 5 24 #define HCLGE_SHAPER_BS_S_DEF 20 25 26 #define HCLGE_ETHER_MAX_RATE 100000 27 28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper 29 * @ir: Rate to be config, its unit is Mbps 30 * @shaper_level: the shaper level. eg: port, pg, priority, queueset 31 * @ir_b: IR_B parameter of IR shaper 32 * @ir_u: IR_U parameter of IR shaper 33 * @ir_s: IR_S parameter of IR shaper 34 * 35 * the formula: 36 * 37 * IR_b * (2 ^ IR_u) * 8 38 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) 39 * Tick * (2 ^ IR_s) 40 * 41 * @return: 0: calculate sucessful, negative: fail 42 */ 43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, 44 u8 *ir_b, u8 *ir_u, u8 *ir_s) 45 { 46 const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { 47 6 * 256, /* Prioriy level */ 48 6 * 32, /* Prioriy group level */ 49 6 * 8, /* Port level */ 50 6 * 256 /* Qset level */ 51 }; 52 u8 ir_u_calc = 0, ir_s_calc = 0; 53 u32 ir_calc; 54 u32 tick; 55 56 /* Calc tick */ 57 if (shaper_level >= HCLGE_SHAPER_LVL_CNT) 58 return -EINVAL; 59 60 tick = tick_array[shaper_level]; 61 62 /** 63 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 64 * the formula is changed to: 65 * 126 * 1 * 8 66 * ir_calc = ---------------- * 1000 67 * tick * 1 68 */ 69 ir_calc = (1008000 + (tick >> 1) - 1) / tick; 70 71 if (ir_calc == ir) { 72 *ir_b = 126; 73 *ir_u = 0; 74 *ir_s = 0; 75 76 return 0; 77 } else if (ir_calc > ir) { 78 /* Increasing the denominator to select ir_s value */ 79 while (ir_calc > ir) { 80 ir_s_calc++; 81 ir_calc = 1008000 / (tick * (1 << ir_s_calc)); 82 } 83 84 if (ir_calc == ir) 85 *ir_b = 126; 86 else 87 *ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000; 88 } else { 89 /* Increasing the numerator to select ir_u value */ 90 u32 numerator; 91 92 while (ir_calc < ir) { 93 ir_u_calc++; 94 numerator = 1008000 * (1 << ir_u_calc); 95 ir_calc = (numerator + (tick >> 1)) / tick; 96 } 97 98 if (ir_calc == ir) { 99 *ir_b = 126; 100 } else { 101 u32 denominator = (8000 * (1 << --ir_u_calc)); 102 *ir_b = (ir * tick + (denominator >> 1)) / denominator; 103 } 104 } 105 106 *ir_u = ir_u_calc; 107 *ir_s = ir_s_calc; 108 109 return 0; 110 } 111 112 static int hclge_pfc_stats_get(struct hclge_dev *hdev, 113 enum hclge_opcode_type opcode, u64 *stats) 114 { 115 struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM]; 116 int ret, i, j; 117 118 if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT || 119 opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT)) 120 return -EINVAL; 121 122 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { 123 hclge_cmd_setup_basic_desc(&desc[i], opcode, true); 124 if (i != (HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1)) 125 desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 126 else 127 desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT); 128 } 129 130 ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM); 131 if (ret) 132 return ret; 133 134 for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) { 135 struct hclge_pfc_stats_cmd *pfc_stats = 136 (struct hclge_pfc_stats_cmd *)desc[i].data; 137 138 for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) { 139 u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j; 140 141 if (index < HCLGE_MAX_TC_NUM) 142 stats[index] = 143 le64_to_cpu(pfc_stats->pkt_num[j]); 144 } 145 } 146 return 0; 147 } 148 149 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) 150 { 151 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats); 152 } 153 154 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) 155 { 156 return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats); 157 } 158 159 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) 160 { 161 struct hclge_desc desc; 162 163 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); 164 165 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | 166 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); 167 168 return hclge_cmd_send(&hdev->hw, &desc, 1); 169 } 170 171 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, 172 u8 pfc_bitmap) 173 { 174 struct hclge_desc desc; 175 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; 176 177 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); 178 179 pfc->tx_rx_en_bitmap = tx_rx_bitmap; 180 pfc->pri_en_bitmap = pfc_bitmap; 181 182 return hclge_cmd_send(&hdev->hw, &desc, 1); 183 } 184 185 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, 186 u8 pause_trans_gap, u16 pause_trans_time) 187 { 188 struct hclge_cfg_pause_param_cmd *pause_param; 189 struct hclge_desc desc; 190 191 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 192 193 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); 194 195 ether_addr_copy(pause_param->mac_addr, addr); 196 ether_addr_copy(pause_param->mac_addr_extra, addr); 197 pause_param->pause_trans_gap = pause_trans_gap; 198 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); 199 200 return hclge_cmd_send(&hdev->hw, &desc, 1); 201 } 202 203 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) 204 { 205 struct hclge_cfg_pause_param_cmd *pause_param; 206 struct hclge_desc desc; 207 u16 trans_time; 208 u8 trans_gap; 209 int ret; 210 211 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 212 213 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 214 215 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 216 if (ret) 217 return ret; 218 219 trans_gap = pause_param->pause_trans_gap; 220 trans_time = le16_to_cpu(pause_param->pause_trans_time); 221 222 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, 223 trans_time); 224 } 225 226 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) 227 { 228 u8 tc; 229 230 tc = hdev->tm_info.prio_tc[pri_id]; 231 232 if (tc >= hdev->tm_info.num_tc) 233 return -EINVAL; 234 235 /** 236 * the register for priority has four bytes, the first bytes includes 237 * priority0 and priority1, the higher 4bit stands for priority1 238 * while the lower 4bit stands for priority0, as below: 239 * first byte: | pri_1 | pri_0 | 240 * second byte: | pri_3 | pri_2 | 241 * third byte: | pri_5 | pri_4 | 242 * fourth byte: | pri_7 | pri_6 | 243 */ 244 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); 245 246 return 0; 247 } 248 249 static int hclge_up_to_tc_map(struct hclge_dev *hdev) 250 { 251 struct hclge_desc desc; 252 u8 *pri = (u8 *)desc.data; 253 u8 pri_id; 254 int ret; 255 256 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); 257 258 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { 259 ret = hclge_fill_pri_array(hdev, pri, pri_id); 260 if (ret) 261 return ret; 262 } 263 264 return hclge_cmd_send(&hdev->hw, &desc, 1); 265 } 266 267 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, 268 u8 pg_id, u8 pri_bit_map) 269 { 270 struct hclge_pg_to_pri_link_cmd *map; 271 struct hclge_desc desc; 272 273 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); 274 275 map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 276 277 map->pg_id = pg_id; 278 map->pri_bit_map = pri_bit_map; 279 280 return hclge_cmd_send(&hdev->hw, &desc, 1); 281 } 282 283 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, 284 u16 qs_id, u8 pri) 285 { 286 struct hclge_qs_to_pri_link_cmd *map; 287 struct hclge_desc desc; 288 289 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); 290 291 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 292 293 map->qs_id = cpu_to_le16(qs_id); 294 map->priority = pri; 295 map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; 296 297 return hclge_cmd_send(&hdev->hw, &desc, 1); 298 } 299 300 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, 301 u16 q_id, u16 qs_id) 302 { 303 struct hclge_nq_to_qs_link_cmd *map; 304 struct hclge_desc desc; 305 306 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); 307 308 map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 309 310 map->nq_id = cpu_to_le16(q_id); 311 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); 312 313 return hclge_cmd_send(&hdev->hw, &desc, 1); 314 } 315 316 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, 317 u8 dwrr) 318 { 319 struct hclge_pg_weight_cmd *weight; 320 struct hclge_desc desc; 321 322 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); 323 324 weight = (struct hclge_pg_weight_cmd *)desc.data; 325 326 weight->pg_id = pg_id; 327 weight->dwrr = dwrr; 328 329 return hclge_cmd_send(&hdev->hw, &desc, 1); 330 } 331 332 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, 333 u8 dwrr) 334 { 335 struct hclge_priority_weight_cmd *weight; 336 struct hclge_desc desc; 337 338 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); 339 340 weight = (struct hclge_priority_weight_cmd *)desc.data; 341 342 weight->pri_id = pri_id; 343 weight->dwrr = dwrr; 344 345 return hclge_cmd_send(&hdev->hw, &desc, 1); 346 } 347 348 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, 349 u8 dwrr) 350 { 351 struct hclge_qs_weight_cmd *weight; 352 struct hclge_desc desc; 353 354 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); 355 356 weight = (struct hclge_qs_weight_cmd *)desc.data; 357 358 weight->qs_id = cpu_to_le16(qs_id); 359 weight->dwrr = dwrr; 360 361 return hclge_cmd_send(&hdev->hw, &desc, 1); 362 } 363 364 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, 365 enum hclge_shap_bucket bucket, u8 pg_id, 366 u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s) 367 { 368 struct hclge_pg_shapping_cmd *shap_cfg_cmd; 369 enum hclge_opcode_type opcode; 370 struct hclge_desc desc; 371 u32 shapping_para = 0; 372 373 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : 374 HCLGE_OPC_TM_PG_C_SHAPPING; 375 hclge_cmd_setup_basic_desc(&desc, opcode, false); 376 377 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 378 379 shap_cfg_cmd->pg_id = pg_id; 380 381 hclge_tm_set_field(shapping_para, IR_B, ir_b); 382 hclge_tm_set_field(shapping_para, IR_U, ir_u); 383 hclge_tm_set_field(shapping_para, IR_S, ir_s); 384 hclge_tm_set_field(shapping_para, BS_B, bs_b); 385 hclge_tm_set_field(shapping_para, BS_S, bs_s); 386 387 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); 388 389 return hclge_cmd_send(&hdev->hw, &desc, 1); 390 } 391 392 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) 393 { 394 struct hclge_port_shapping_cmd *shap_cfg_cmd; 395 struct hclge_desc desc; 396 u32 shapping_para = 0; 397 u8 ir_u, ir_b, ir_s; 398 int ret; 399 400 ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE, 401 HCLGE_SHAPER_LVL_PORT, 402 &ir_b, &ir_u, &ir_s); 403 if (ret) 404 return ret; 405 406 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); 407 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 408 409 hclge_tm_set_field(shapping_para, IR_B, ir_b); 410 hclge_tm_set_field(shapping_para, IR_U, ir_u); 411 hclge_tm_set_field(shapping_para, IR_S, ir_s); 412 hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF); 413 hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF); 414 415 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); 416 417 return hclge_cmd_send(&hdev->hw, &desc, 1); 418 } 419 420 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, 421 enum hclge_shap_bucket bucket, u8 pri_id, 422 u8 ir_b, u8 ir_u, u8 ir_s, 423 u8 bs_b, u8 bs_s) 424 { 425 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 426 enum hclge_opcode_type opcode; 427 struct hclge_desc desc; 428 u32 shapping_para = 0; 429 430 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : 431 HCLGE_OPC_TM_PRI_C_SHAPPING; 432 433 hclge_cmd_setup_basic_desc(&desc, opcode, false); 434 435 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 436 437 shap_cfg_cmd->pri_id = pri_id; 438 439 hclge_tm_set_field(shapping_para, IR_B, ir_b); 440 hclge_tm_set_field(shapping_para, IR_U, ir_u); 441 hclge_tm_set_field(shapping_para, IR_S, ir_s); 442 hclge_tm_set_field(shapping_para, BS_B, bs_b); 443 hclge_tm_set_field(shapping_para, BS_S, bs_s); 444 445 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); 446 447 return hclge_cmd_send(&hdev->hw, &desc, 1); 448 } 449 450 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) 451 { 452 struct hclge_desc desc; 453 454 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); 455 456 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) 457 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 458 else 459 desc.data[1] = 0; 460 461 desc.data[0] = cpu_to_le32(pg_id); 462 463 return hclge_cmd_send(&hdev->hw, &desc, 1); 464 } 465 466 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) 467 { 468 struct hclge_desc desc; 469 470 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); 471 472 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) 473 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 474 else 475 desc.data[1] = 0; 476 477 desc.data[0] = cpu_to_le32(pri_id); 478 479 return hclge_cmd_send(&hdev->hw, &desc, 1); 480 } 481 482 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) 483 { 484 struct hclge_desc desc; 485 486 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); 487 488 if (mode == HCLGE_SCH_MODE_DWRR) 489 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 490 else 491 desc.data[1] = 0; 492 493 desc.data[0] = cpu_to_le32(qs_id); 494 495 return hclge_cmd_send(&hdev->hw, &desc, 1); 496 } 497 498 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, 499 u32 bit_map) 500 { 501 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 502 struct hclge_desc desc; 503 504 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, 505 false); 506 507 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 508 509 bp_to_qs_map_cmd->tc_id = tc; 510 bp_to_qs_map_cmd->qs_group_id = grp_id; 511 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); 512 513 return hclge_cmd_send(&hdev->hw, &desc, 1); 514 } 515 516 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) 517 { 518 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 519 struct hclge_dev *hdev = vport->back; 520 u16 max_rss_size; 521 u8 i; 522 523 /* TC configuration is shared by PF/VF in one port, only allow 524 * one tc for VF for simplicity. VF's vport_id is non zero. 525 */ 526 kinfo->num_tc = vport->vport_id ? 1 : 527 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); 528 vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) + 529 (vport->vport_id ? (vport->vport_id - 1) : 0); 530 531 max_rss_size = min_t(u16, hdev->rss_size_max, 532 vport->alloc_tqps / kinfo->num_tc); 533 534 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 535 kinfo->req_rss_size <= max_rss_size) { 536 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", 537 kinfo->rss_size, kinfo->req_rss_size); 538 kinfo->rss_size = kinfo->req_rss_size; 539 } else if (kinfo->rss_size > max_rss_size || 540 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { 541 dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n", 542 kinfo->rss_size, max_rss_size); 543 kinfo->rss_size = max_rss_size; 544 } 545 546 kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size; 547 vport->dwrr = 100; /* 100 percent as init */ 548 vport->alloc_rss_size = kinfo->rss_size; 549 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; 550 551 for (i = 0; i < HNAE3_MAX_TC; i++) { 552 if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) { 553 kinfo->tc_info[i].enable = true; 554 kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size; 555 kinfo->tc_info[i].tqp_count = kinfo->rss_size; 556 kinfo->tc_info[i].tc = i; 557 } else { 558 /* Set to default queue if TC is disable */ 559 kinfo->tc_info[i].enable = false; 560 kinfo->tc_info[i].tqp_offset = 0; 561 kinfo->tc_info[i].tqp_count = 1; 562 kinfo->tc_info[i].tc = 0; 563 } 564 } 565 566 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, 567 FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc)); 568 } 569 570 static void hclge_tm_vport_info_update(struct hclge_dev *hdev) 571 { 572 struct hclge_vport *vport = hdev->vport; 573 u32 i; 574 575 for (i = 0; i < hdev->num_alloc_vport; i++) { 576 hclge_tm_vport_tc_info_update(vport); 577 578 vport++; 579 } 580 } 581 582 static void hclge_tm_tc_info_init(struct hclge_dev *hdev) 583 { 584 u8 i; 585 586 for (i = 0; i < hdev->tm_info.num_tc; i++) { 587 hdev->tm_info.tc_info[i].tc_id = i; 588 hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; 589 hdev->tm_info.tc_info[i].pgid = 0; 590 hdev->tm_info.tc_info[i].bw_limit = 591 hdev->tm_info.pg_info[0].bw_limit; 592 } 593 594 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 595 hdev->tm_info.prio_tc[i] = 596 (i >= hdev->tm_info.num_tc) ? 0 : i; 597 598 /* DCB is enabled if we have more than 1 TC */ 599 if (hdev->tm_info.num_tc > 1) 600 hdev->flag |= HCLGE_FLAG_DCB_ENABLE; 601 else 602 hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE; 603 } 604 605 static void hclge_tm_pg_info_init(struct hclge_dev *hdev) 606 { 607 u8 i; 608 609 for (i = 0; i < hdev->tm_info.num_pg; i++) { 610 int k; 611 612 hdev->tm_info.pg_dwrr[i] = i ? 0 : 100; 613 614 hdev->tm_info.pg_info[i].pg_id = i; 615 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; 616 617 hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE; 618 619 if (i != 0) 620 continue; 621 622 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; 623 for (k = 0; k < hdev->tm_info.num_tc; k++) 624 hdev->tm_info.pg_info[i].tc_dwrr[k] = 100; 625 } 626 } 627 628 static void hclge_pfc_info_init(struct hclge_dev *hdev) 629 { 630 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) { 631 if (hdev->fc_mode_last_time == HCLGE_FC_PFC) 632 dev_warn(&hdev->pdev->dev, 633 "DCB is disable, but last mode is FC_PFC\n"); 634 635 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 636 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 637 /* fc_mode_last_time record the last fc_mode when 638 * DCB is enabled, so that fc_mode can be set to 639 * the correct value when DCB is disabled. 640 */ 641 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 642 hdev->tm_info.fc_mode = HCLGE_FC_PFC; 643 } 644 } 645 646 static int hclge_tm_schd_info_init(struct hclge_dev *hdev) 647 { 648 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && 649 (hdev->tm_info.num_pg != 1)) 650 return -EINVAL; 651 652 hclge_tm_pg_info_init(hdev); 653 654 hclge_tm_tc_info_init(hdev); 655 656 hclge_tm_vport_info_update(hdev); 657 658 hclge_pfc_info_init(hdev); 659 660 return 0; 661 } 662 663 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) 664 { 665 int ret; 666 u32 i; 667 668 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 669 return 0; 670 671 for (i = 0; i < hdev->tm_info.num_pg; i++) { 672 /* Cfg mapping */ 673 ret = hclge_tm_pg_to_pri_map_cfg( 674 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); 675 if (ret) 676 return ret; 677 } 678 679 return 0; 680 } 681 682 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) 683 { 684 u8 ir_u, ir_b, ir_s; 685 int ret; 686 u32 i; 687 688 /* Cfg pg schd */ 689 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 690 return 0; 691 692 /* Pg to pri */ 693 for (i = 0; i < hdev->tm_info.num_pg; i++) { 694 /* Calc shaper para */ 695 ret = hclge_shaper_para_calc( 696 hdev->tm_info.pg_info[i].bw_limit, 697 HCLGE_SHAPER_LVL_PG, 698 &ir_b, &ir_u, &ir_s); 699 if (ret) 700 return ret; 701 702 ret = hclge_tm_pg_shapping_cfg(hdev, 703 HCLGE_TM_SHAP_C_BUCKET, i, 704 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, 705 HCLGE_SHAPER_BS_S_DEF); 706 if (ret) 707 return ret; 708 709 ret = hclge_tm_pg_shapping_cfg(hdev, 710 HCLGE_TM_SHAP_P_BUCKET, i, 711 ir_b, ir_u, ir_s, 712 HCLGE_SHAPER_BS_U_DEF, 713 HCLGE_SHAPER_BS_S_DEF); 714 if (ret) 715 return ret; 716 } 717 718 return 0; 719 } 720 721 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) 722 { 723 int ret; 724 u32 i; 725 726 /* cfg pg schd */ 727 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 728 return 0; 729 730 /* pg to prio */ 731 for (i = 0; i < hdev->tm_info.num_pg; i++) { 732 /* Cfg dwrr */ 733 ret = hclge_tm_pg_weight_cfg(hdev, i, 734 hdev->tm_info.pg_dwrr[i]); 735 if (ret) 736 return ret; 737 } 738 739 return 0; 740 } 741 742 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, 743 struct hclge_vport *vport) 744 { 745 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 746 struct hnae3_queue **tqp = kinfo->tqp; 747 struct hnae3_tc_info *v_tc_info; 748 u32 i, j; 749 int ret; 750 751 for (i = 0; i < kinfo->num_tc; i++) { 752 v_tc_info = &kinfo->tc_info[i]; 753 for (j = 0; j < v_tc_info->tqp_count; j++) { 754 struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j]; 755 756 ret = hclge_tm_q_to_qs_map_cfg(hdev, 757 hclge_get_queue_id(q), 758 vport->qs_offset + i); 759 if (ret) 760 return ret; 761 } 762 } 763 764 return 0; 765 } 766 767 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) 768 { 769 struct hclge_vport *vport = hdev->vport; 770 int ret; 771 u32 i, k; 772 773 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 774 /* Cfg qs -> pri mapping, one by one mapping */ 775 for (k = 0; k < hdev->num_alloc_vport; k++) { 776 struct hnae3_knic_private_info *kinfo = 777 &vport[k].nic.kinfo; 778 779 for (i = 0; i < kinfo->num_tc; i++) { 780 ret = hclge_tm_qs_to_pri_map_cfg( 781 hdev, vport[k].qs_offset + i, i); 782 if (ret) 783 return ret; 784 } 785 } 786 } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) { 787 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ 788 for (k = 0; k < hdev->num_alloc_vport; k++) 789 for (i = 0; i < HNAE3_MAX_TC; i++) { 790 ret = hclge_tm_qs_to_pri_map_cfg( 791 hdev, vport[k].qs_offset + i, k); 792 if (ret) 793 return ret; 794 } 795 } else { 796 return -EINVAL; 797 } 798 799 /* Cfg q -> qs mapping */ 800 for (i = 0; i < hdev->num_alloc_vport; i++) { 801 ret = hclge_vport_q_to_qs_map(hdev, vport); 802 if (ret) 803 return ret; 804 805 vport++; 806 } 807 808 return 0; 809 } 810 811 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) 812 { 813 u8 ir_u, ir_b, ir_s; 814 int ret; 815 u32 i; 816 817 for (i = 0; i < hdev->tm_info.num_tc; i++) { 818 ret = hclge_shaper_para_calc( 819 hdev->tm_info.tc_info[i].bw_limit, 820 HCLGE_SHAPER_LVL_PRI, 821 &ir_b, &ir_u, &ir_s); 822 if (ret) 823 return ret; 824 825 ret = hclge_tm_pri_shapping_cfg( 826 hdev, HCLGE_TM_SHAP_C_BUCKET, i, 827 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, 828 HCLGE_SHAPER_BS_S_DEF); 829 if (ret) 830 return ret; 831 832 ret = hclge_tm_pri_shapping_cfg( 833 hdev, HCLGE_TM_SHAP_P_BUCKET, i, 834 ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF, 835 HCLGE_SHAPER_BS_S_DEF); 836 if (ret) 837 return ret; 838 } 839 840 return 0; 841 } 842 843 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) 844 { 845 struct hclge_dev *hdev = vport->back; 846 u8 ir_u, ir_b, ir_s; 847 int ret; 848 849 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, 850 &ir_b, &ir_u, &ir_s); 851 if (ret) 852 return ret; 853 854 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, 855 vport->vport_id, 856 0, 0, 0, HCLGE_SHAPER_BS_U_DEF, 857 HCLGE_SHAPER_BS_S_DEF); 858 if (ret) 859 return ret; 860 861 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, 862 vport->vport_id, 863 ir_b, ir_u, ir_s, 864 HCLGE_SHAPER_BS_U_DEF, 865 HCLGE_SHAPER_BS_S_DEF); 866 if (ret) 867 return ret; 868 869 return 0; 870 } 871 872 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) 873 { 874 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 875 struct hclge_dev *hdev = vport->back; 876 u8 ir_u, ir_b, ir_s; 877 u32 i; 878 int ret; 879 880 for (i = 0; i < kinfo->num_tc; i++) { 881 ret = hclge_shaper_para_calc( 882 hdev->tm_info.tc_info[i].bw_limit, 883 HCLGE_SHAPER_LVL_QSET, 884 &ir_b, &ir_u, &ir_s); 885 if (ret) 886 return ret; 887 } 888 889 return 0; 890 } 891 892 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) 893 { 894 struct hclge_vport *vport = hdev->vport; 895 int ret; 896 u32 i; 897 898 /* Need config vport shaper */ 899 for (i = 0; i < hdev->num_alloc_vport; i++) { 900 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); 901 if (ret) 902 return ret; 903 904 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); 905 if (ret) 906 return ret; 907 908 vport++; 909 } 910 911 return 0; 912 } 913 914 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) 915 { 916 int ret; 917 918 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 919 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); 920 if (ret) 921 return ret; 922 } else { 923 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); 924 if (ret) 925 return ret; 926 } 927 928 return 0; 929 } 930 931 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) 932 { 933 struct hclge_vport *vport = hdev->vport; 934 struct hclge_pg_info *pg_info; 935 u8 dwrr; 936 int ret; 937 u32 i, k; 938 939 for (i = 0; i < hdev->tm_info.num_tc; i++) { 940 pg_info = 941 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 942 dwrr = pg_info->tc_dwrr[i]; 943 944 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); 945 if (ret) 946 return ret; 947 948 for (k = 0; k < hdev->num_alloc_vport; k++) { 949 ret = hclge_tm_qs_weight_cfg( 950 hdev, vport[k].qs_offset + i, 951 vport[k].dwrr); 952 if (ret) 953 return ret; 954 } 955 } 956 957 return 0; 958 } 959 960 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) 961 { 962 #define DEFAULT_TC_WEIGHT 1 963 #define DEFAULT_TC_OFFSET 14 964 965 struct hclge_ets_tc_weight_cmd *ets_weight; 966 struct hclge_desc desc; 967 int i; 968 969 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); 970 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; 971 972 for (i = 0; i < HNAE3_MAX_TC; i++) { 973 struct hclge_pg_info *pg_info; 974 975 ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT; 976 977 if (!(hdev->hw_tc_map & BIT(i))) 978 continue; 979 980 pg_info = 981 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 982 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; 983 } 984 985 ets_weight->weight_offset = DEFAULT_TC_OFFSET; 986 987 return hclge_cmd_send(&hdev->hw, &desc, 1); 988 } 989 990 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) 991 { 992 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 993 struct hclge_dev *hdev = vport->back; 994 int ret; 995 u8 i; 996 997 /* Vf dwrr */ 998 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); 999 if (ret) 1000 return ret; 1001 1002 /* Qset dwrr */ 1003 for (i = 0; i < kinfo->num_tc; i++) { 1004 ret = hclge_tm_qs_weight_cfg( 1005 hdev, vport->qs_offset + i, 1006 hdev->tm_info.pg_info[0].tc_dwrr[i]); 1007 if (ret) 1008 return ret; 1009 } 1010 1011 return 0; 1012 } 1013 1014 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) 1015 { 1016 struct hclge_vport *vport = hdev->vport; 1017 int ret; 1018 u32 i; 1019 1020 for (i = 0; i < hdev->num_alloc_vport; i++) { 1021 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); 1022 if (ret) 1023 return ret; 1024 1025 vport++; 1026 } 1027 1028 return 0; 1029 } 1030 1031 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) 1032 { 1033 int ret; 1034 1035 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1036 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); 1037 if (ret) 1038 return ret; 1039 1040 if (!hnae3_dev_dcb_supported(hdev)) 1041 return 0; 1042 1043 ret = hclge_tm_ets_tc_dwrr_cfg(hdev); 1044 if (ret == -EOPNOTSUPP) { 1045 dev_warn(&hdev->pdev->dev, 1046 "fw %08x does't support ets tc weight cmd\n", 1047 hdev->fw_version); 1048 ret = 0; 1049 } 1050 1051 return ret; 1052 } else { 1053 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); 1054 if (ret) 1055 return ret; 1056 } 1057 1058 return 0; 1059 } 1060 1061 static int hclge_tm_map_cfg(struct hclge_dev *hdev) 1062 { 1063 int ret; 1064 1065 ret = hclge_up_to_tc_map(hdev); 1066 if (ret) 1067 return ret; 1068 1069 ret = hclge_tm_pg_to_pri_map(hdev); 1070 if (ret) 1071 return ret; 1072 1073 return hclge_tm_pri_q_qs_cfg(hdev); 1074 } 1075 1076 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) 1077 { 1078 int ret; 1079 1080 ret = hclge_tm_port_shaper_cfg(hdev); 1081 if (ret) 1082 return ret; 1083 1084 ret = hclge_tm_pg_shaper_cfg(hdev); 1085 if (ret) 1086 return ret; 1087 1088 return hclge_tm_pri_shaper_cfg(hdev); 1089 } 1090 1091 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) 1092 { 1093 int ret; 1094 1095 ret = hclge_tm_pg_dwrr_cfg(hdev); 1096 if (ret) 1097 return ret; 1098 1099 return hclge_tm_pri_dwrr_cfg(hdev); 1100 } 1101 1102 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) 1103 { 1104 int ret; 1105 u8 i; 1106 1107 /* Only being config on TC-Based scheduler mode */ 1108 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) 1109 return 0; 1110 1111 for (i = 0; i < hdev->tm_info.num_pg; i++) { 1112 ret = hclge_tm_pg_schd_mode_cfg(hdev, i); 1113 if (ret) 1114 return ret; 1115 } 1116 1117 return 0; 1118 } 1119 1120 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) 1121 { 1122 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1123 struct hclge_dev *hdev = vport->back; 1124 int ret; 1125 u8 i; 1126 1127 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); 1128 if (ret) 1129 return ret; 1130 1131 for (i = 0; i < kinfo->num_tc; i++) { 1132 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; 1133 1134 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, 1135 sch_mode); 1136 if (ret) 1137 return ret; 1138 } 1139 1140 return 0; 1141 } 1142 1143 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) 1144 { 1145 struct hclge_vport *vport = hdev->vport; 1146 int ret; 1147 u8 i, k; 1148 1149 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1150 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1151 ret = hclge_tm_pri_schd_mode_cfg(hdev, i); 1152 if (ret) 1153 return ret; 1154 1155 for (k = 0; k < hdev->num_alloc_vport; k++) { 1156 ret = hclge_tm_qs_schd_mode_cfg( 1157 hdev, vport[k].qs_offset + i, 1158 HCLGE_SCH_MODE_DWRR); 1159 if (ret) 1160 return ret; 1161 } 1162 } 1163 } else { 1164 for (i = 0; i < hdev->num_alloc_vport; i++) { 1165 ret = hclge_tm_schd_mode_vnet_base_cfg(vport); 1166 if (ret) 1167 return ret; 1168 1169 vport++; 1170 } 1171 } 1172 1173 return 0; 1174 } 1175 1176 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) 1177 { 1178 int ret; 1179 1180 ret = hclge_tm_lvl2_schd_mode_cfg(hdev); 1181 if (ret) 1182 return ret; 1183 1184 return hclge_tm_lvl34_schd_mode_cfg(hdev); 1185 } 1186 1187 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) 1188 { 1189 int ret; 1190 1191 /* Cfg tm mapping */ 1192 ret = hclge_tm_map_cfg(hdev); 1193 if (ret) 1194 return ret; 1195 1196 /* Cfg tm shaper */ 1197 ret = hclge_tm_shaper_cfg(hdev); 1198 if (ret) 1199 return ret; 1200 1201 /* Cfg dwrr */ 1202 ret = hclge_tm_dwrr_cfg(hdev); 1203 if (ret) 1204 return ret; 1205 1206 /* Cfg schd mode for each level schd */ 1207 return hclge_tm_schd_mode_hw(hdev); 1208 } 1209 1210 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) 1211 { 1212 struct hclge_mac *mac = &hdev->hw.mac; 1213 1214 return hclge_pause_param_cfg(hdev, mac->mac_addr, 1215 HCLGE_DEFAULT_PAUSE_TRANS_GAP, 1216 HCLGE_DEFAULT_PAUSE_TRANS_TIME); 1217 } 1218 1219 static int hclge_pfc_setup_hw(struct hclge_dev *hdev) 1220 { 1221 u8 enable_bitmap = 0; 1222 1223 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 1224 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | 1225 HCLGE_RX_MAC_PAUSE_EN_MSK; 1226 1227 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, 1228 hdev->tm_info.pfc_en); 1229 } 1230 1231 /* Each Tc has a 1024 queue sets to backpress, it divides to 1232 * 32 group, each group contains 32 queue sets, which can be 1233 * represented by u32 bitmap. 1234 */ 1235 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) 1236 { 1237 int i; 1238 1239 for (i = 0; i < HCLGE_BP_GRP_NUM; i++) { 1240 u32 qs_bitmap = 0; 1241 int k, ret; 1242 1243 for (k = 0; k < hdev->num_alloc_vport; k++) { 1244 struct hclge_vport *vport = &hdev->vport[k]; 1245 u16 qs_id = vport->qs_offset + tc; 1246 u8 grp, sub_grp; 1247 1248 grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M, 1249 HCLGE_BP_GRP_ID_S); 1250 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, 1251 HCLGE_BP_SUB_GRP_ID_S); 1252 if (i == grp) 1253 qs_bitmap |= (1 << sub_grp); 1254 } 1255 1256 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); 1257 if (ret) 1258 return ret; 1259 } 1260 1261 return 0; 1262 } 1263 1264 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) 1265 { 1266 bool tx_en, rx_en; 1267 1268 switch (hdev->tm_info.fc_mode) { 1269 case HCLGE_FC_NONE: 1270 tx_en = false; 1271 rx_en = false; 1272 break; 1273 case HCLGE_FC_RX_PAUSE: 1274 tx_en = false; 1275 rx_en = true; 1276 break; 1277 case HCLGE_FC_TX_PAUSE: 1278 tx_en = true; 1279 rx_en = false; 1280 break; 1281 case HCLGE_FC_FULL: 1282 tx_en = true; 1283 rx_en = true; 1284 break; 1285 case HCLGE_FC_PFC: 1286 tx_en = false; 1287 rx_en = false; 1288 break; 1289 default: 1290 tx_en = true; 1291 rx_en = true; 1292 } 1293 1294 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 1295 } 1296 1297 static int hclge_tm_bp_setup(struct hclge_dev *hdev) 1298 { 1299 int ret = 0; 1300 int i; 1301 1302 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1303 ret = hclge_bp_setup_hw(hdev, i); 1304 if (ret) 1305 return ret; 1306 } 1307 1308 return ret; 1309 } 1310 1311 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) 1312 { 1313 int ret; 1314 1315 ret = hclge_pause_param_setup_hw(hdev); 1316 if (ret) 1317 return ret; 1318 1319 ret = hclge_mac_pause_setup_hw(hdev); 1320 if (ret) 1321 return ret; 1322 1323 /* Only DCB-supported dev supports qset back pressure and pfc cmd */ 1324 if (!hnae3_dev_dcb_supported(hdev)) 1325 return 0; 1326 1327 /* GE MAC does not support PFC, when driver is initializing and MAC 1328 * is in GE Mode, ignore the error here, otherwise initialization 1329 * will fail. 1330 */ 1331 ret = hclge_pfc_setup_hw(hdev); 1332 if (init && ret == -EOPNOTSUPP) 1333 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); 1334 else if (ret) { 1335 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", 1336 ret); 1337 return ret; 1338 } 1339 1340 return hclge_tm_bp_setup(hdev); 1341 } 1342 1343 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) 1344 { 1345 struct hclge_vport *vport = hdev->vport; 1346 struct hnae3_knic_private_info *kinfo; 1347 u32 i, k; 1348 1349 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 1350 hdev->tm_info.prio_tc[i] = prio_tc[i]; 1351 1352 for (k = 0; k < hdev->num_alloc_vport; k++) { 1353 kinfo = &vport[k].nic.kinfo; 1354 kinfo->prio_tc[i] = prio_tc[i]; 1355 } 1356 } 1357 } 1358 1359 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) 1360 { 1361 u8 i, bit_map = 0; 1362 1363 hdev->tm_info.num_tc = num_tc; 1364 1365 for (i = 0; i < hdev->tm_info.num_tc; i++) 1366 bit_map |= BIT(i); 1367 1368 if (!bit_map) { 1369 bit_map = 1; 1370 hdev->tm_info.num_tc = 1; 1371 } 1372 1373 hdev->hw_tc_map = bit_map; 1374 1375 hclge_tm_schd_info_init(hdev); 1376 } 1377 1378 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) 1379 { 1380 int ret; 1381 1382 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && 1383 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) 1384 return -ENOTSUPP; 1385 1386 ret = hclge_tm_schd_setup_hw(hdev); 1387 if (ret) 1388 return ret; 1389 1390 ret = hclge_pause_setup_hw(hdev, init); 1391 if (ret) 1392 return ret; 1393 1394 return 0; 1395 } 1396 1397 int hclge_tm_schd_init(struct hclge_dev *hdev) 1398 { 1399 int ret; 1400 1401 /* fc_mode is HCLGE_FC_FULL on reset */ 1402 hdev->tm_info.fc_mode = HCLGE_FC_FULL; 1403 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 1404 1405 ret = hclge_tm_schd_info_init(hdev); 1406 if (ret) 1407 return ret; 1408 1409 return hclge_tm_init_hw(hdev, true); 1410 } 1411 1412 int hclge_tm_vport_map_update(struct hclge_dev *hdev) 1413 { 1414 struct hclge_vport *vport = hdev->vport; 1415 int ret; 1416 1417 hclge_tm_vport_tc_info_update(vport); 1418 1419 ret = hclge_vport_q_to_qs_map(hdev, vport); 1420 if (ret) 1421 return ret; 1422 1423 if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) 1424 return 0; 1425 1426 return hclge_tm_bp_setup(hdev); 1427 } 1428