1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 6 #include "hclge_cmd.h" 7 #include "hclge_main.h" 8 #include "hclge_tm.h" 9 10 enum hclge_shaper_level { 11 HCLGE_SHAPER_LVL_PRI = 0, 12 HCLGE_SHAPER_LVL_PG = 1, 13 HCLGE_SHAPER_LVL_PORT = 2, 14 HCLGE_SHAPER_LVL_QSET = 3, 15 HCLGE_SHAPER_LVL_CNT = 4, 16 HCLGE_SHAPER_LVL_VF = 0, 17 HCLGE_SHAPER_LVL_PF = 1, 18 }; 19 20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 22 23 #define HCLGE_SHAPER_BS_U_DEF 5 24 #define HCLGE_SHAPER_BS_S_DEF 20 25 26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper 27 * @ir: Rate to be config, its unit is Mbps 28 * @shaper_level: the shaper level. eg: port, pg, priority, queueset 29 * @ir_para: parameters of IR shaper 30 * @max_tm_rate: max tm rate is available to config 31 * 32 * the formula: 33 * 34 * IR_b * (2 ^ IR_u) * 8 35 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) 36 * Tick * (2 ^ IR_s) 37 * 38 * @return: 0: calculate sucessful, negative: fail 39 */ 40 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, 41 struct hclge_shaper_ir_para *ir_para, 42 u32 max_tm_rate) 43 { 44 #define DEFAULT_SHAPER_IR_B 126 45 #define DIVISOR_CLK (1000 * 8) 46 #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK) 47 48 static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { 49 6 * 256, /* Prioriy level */ 50 6 * 32, /* Prioriy group level */ 51 6 * 8, /* Port level */ 52 6 * 256 /* Qset level */ 53 }; 54 u8 ir_u_calc = 0; 55 u8 ir_s_calc = 0; 56 u32 ir_calc; 57 u32 tick; 58 59 /* Calc tick */ 60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT || 61 ir > max_tm_rate) 62 return -EINVAL; 63 64 tick = tick_array[shaper_level]; 65 66 /** 67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 68 * the formula is changed to: 69 * 126 * 1 * 8 70 * ir_calc = ---------------- * 1000 71 * tick * 1 72 */ 73 ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick; 74 75 if (ir_calc == ir) { 76 ir_para->ir_b = DEFAULT_SHAPER_IR_B; 77 ir_para->ir_u = 0; 78 ir_para->ir_s = 0; 79 80 return 0; 81 } else if (ir_calc > ir) { 82 /* Increasing the denominator to select ir_s value */ 83 while (ir_calc >= ir && ir) { 84 ir_s_calc++; 85 ir_calc = DEFAULT_DIVISOR_IR_B / 86 (tick * (1 << ir_s_calc)); 87 } 88 89 ir_para->ir_b = (ir * tick * (1 << ir_s_calc) + 90 (DIVISOR_CLK >> 1)) / DIVISOR_CLK; 91 } else { 92 /* Increasing the numerator to select ir_u value */ 93 u32 numerator; 94 95 while (ir_calc < ir) { 96 ir_u_calc++; 97 numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc); 98 ir_calc = (numerator + (tick >> 1)) / tick; 99 } 100 101 if (ir_calc == ir) { 102 ir_para->ir_b = DEFAULT_SHAPER_IR_B; 103 } else { 104 u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc); 105 ir_para->ir_b = (ir * tick + (denominator >> 1)) / 106 denominator; 107 } 108 } 109 110 ir_para->ir_u = ir_u_calc; 111 ir_para->ir_s = ir_s_calc; 112 113 return 0; 114 } 115 116 static const u16 hclge_pfc_tx_stats_offset[] = { 117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num), 118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num), 119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num), 120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num), 121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num), 122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num), 123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num), 124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num) 125 }; 126 127 static const u16 hclge_pfc_rx_stats_offset[] = { 128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num), 129 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num), 130 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num), 131 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num), 132 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num), 133 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num), 134 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num), 135 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num) 136 }; 137 138 static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats) 139 { 140 const u16 *offset; 141 int i; 142 143 if (tx) 144 offset = hclge_pfc_tx_stats_offset; 145 else 146 offset = hclge_pfc_rx_stats_offset; 147 148 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 149 stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]); 150 } 151 152 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) 153 { 154 hclge_pfc_stats_get(hdev, false, stats); 155 } 156 157 void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) 158 { 159 hclge_pfc_stats_get(hdev, true, stats); 160 } 161 162 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) 163 { 164 struct hclge_desc desc; 165 166 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); 167 168 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | 169 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); 170 171 return hclge_cmd_send(&hdev->hw, &desc, 1); 172 } 173 174 int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, 175 u8 pfc_bitmap) 176 { 177 struct hclge_desc desc; 178 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; 179 180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); 181 182 pfc->tx_rx_en_bitmap = tx_rx_bitmap; 183 pfc->pri_en_bitmap = pfc_bitmap; 184 185 return hclge_cmd_send(&hdev->hw, &desc, 1); 186 } 187 188 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, 189 u8 pause_trans_gap, u16 pause_trans_time) 190 { 191 struct hclge_cfg_pause_param_cmd *pause_param; 192 struct hclge_desc desc; 193 194 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 195 196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); 197 198 ether_addr_copy(pause_param->mac_addr, addr); 199 ether_addr_copy(pause_param->mac_addr_extra, addr); 200 pause_param->pause_trans_gap = pause_trans_gap; 201 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); 202 203 return hclge_cmd_send(&hdev->hw, &desc, 1); 204 } 205 206 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) 207 { 208 struct hclge_cfg_pause_param_cmd *pause_param; 209 struct hclge_desc desc; 210 u16 trans_time; 211 u8 trans_gap; 212 int ret; 213 214 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 215 216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 217 218 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 219 if (ret) 220 return ret; 221 222 trans_gap = pause_param->pause_trans_gap; 223 trans_time = le16_to_cpu(pause_param->pause_trans_time); 224 225 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); 226 } 227 228 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) 229 { 230 u8 tc; 231 232 tc = hdev->tm_info.prio_tc[pri_id]; 233 234 if (tc >= hdev->tm_info.num_tc) 235 return -EINVAL; 236 237 /** 238 * the register for priority has four bytes, the first bytes includes 239 * priority0 and priority1, the higher 4bit stands for priority1 240 * while the lower 4bit stands for priority0, as below: 241 * first byte: | pri_1 | pri_0 | 242 * second byte: | pri_3 | pri_2 | 243 * third byte: | pri_5 | pri_4 | 244 * fourth byte: | pri_7 | pri_6 | 245 */ 246 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); 247 248 return 0; 249 } 250 251 int hclge_up_to_tc_map(struct hclge_dev *hdev) 252 { 253 struct hclge_desc desc; 254 u8 *pri = (u8 *)desc.data; 255 u8 pri_id; 256 int ret; 257 258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); 259 260 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { 261 ret = hclge_fill_pri_array(hdev, pri, pri_id); 262 if (ret) 263 return ret; 264 } 265 266 return hclge_cmd_send(&hdev->hw, &desc, 1); 267 } 268 269 static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev) 270 { 271 u8 i; 272 273 hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO; 274 hdev->vport[0].nic.kinfo.dscp_app_cnt = 0; 275 for (i = 0; i < HNAE3_MAX_DSCP; i++) 276 hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID; 277 } 278 279 int hclge_dscp_to_tc_map(struct hclge_dev *hdev) 280 { 281 struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM]; 282 u8 *req0 = (u8 *)desc[0].data; 283 u8 *req1 = (u8 *)desc[1].data; 284 u8 pri_id, tc_id, i, j; 285 286 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false); 287 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 288 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false); 289 290 /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */ 291 for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) { 292 pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i]; 293 pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; 294 tc_id = hdev->tm_info.prio_tc[pri_id]; 295 /* Each dscp setting has 4 bits, so each byte saves two dscp 296 * setting 297 */ 298 req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); 299 300 j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; 301 pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j]; 302 pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; 303 tc_id = hdev->tm_info.prio_tc[pri_id]; 304 req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); 305 } 306 307 return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM); 308 } 309 310 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, 311 u8 pg_id, u8 pri_bit_map) 312 { 313 struct hclge_pg_to_pri_link_cmd *map; 314 struct hclge_desc desc; 315 316 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); 317 318 map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 319 320 map->pg_id = pg_id; 321 map->pri_bit_map = pri_bit_map; 322 323 return hclge_cmd_send(&hdev->hw, &desc, 1); 324 } 325 326 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri, 327 bool link_vld) 328 { 329 struct hclge_qs_to_pri_link_cmd *map; 330 struct hclge_desc desc; 331 332 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); 333 334 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 335 336 map->qs_id = cpu_to_le16(qs_id); 337 map->priority = pri; 338 map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0; 339 340 return hclge_cmd_send(&hdev->hw, &desc, 1); 341 } 342 343 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, 344 u16 q_id, u16 qs_id) 345 { 346 struct hclge_nq_to_qs_link_cmd *map; 347 struct hclge_desc desc; 348 u16 qs_id_l; 349 u16 qs_id_h; 350 351 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); 352 353 map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 354 355 map->nq_id = cpu_to_le16(q_id); 356 357 /* convert qs_id to the following format to support qset_id >= 1024 358 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 | 359 * / / \ \ 360 * / / \ \ 361 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | 362 * | qs_id_h | vld | qs_id_l | 363 */ 364 qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK, 365 HCLGE_TM_QS_ID_L_S); 366 qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK, 367 HCLGE_TM_QS_ID_H_S); 368 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, 369 qs_id_l); 370 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S, 371 qs_id_h); 372 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); 373 374 return hclge_cmd_send(&hdev->hw, &desc, 1); 375 } 376 377 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, 378 u8 dwrr) 379 { 380 struct hclge_pg_weight_cmd *weight; 381 struct hclge_desc desc; 382 383 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); 384 385 weight = (struct hclge_pg_weight_cmd *)desc.data; 386 387 weight->pg_id = pg_id; 388 weight->dwrr = dwrr; 389 390 return hclge_cmd_send(&hdev->hw, &desc, 1); 391 } 392 393 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, 394 u8 dwrr) 395 { 396 struct hclge_priority_weight_cmd *weight; 397 struct hclge_desc desc; 398 399 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); 400 401 weight = (struct hclge_priority_weight_cmd *)desc.data; 402 403 weight->pri_id = pri_id; 404 weight->dwrr = dwrr; 405 406 return hclge_cmd_send(&hdev->hw, &desc, 1); 407 } 408 409 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, 410 u8 dwrr) 411 { 412 struct hclge_qs_weight_cmd *weight; 413 struct hclge_desc desc; 414 415 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); 416 417 weight = (struct hclge_qs_weight_cmd *)desc.data; 418 419 weight->qs_id = cpu_to_le16(qs_id); 420 weight->dwrr = dwrr; 421 422 return hclge_cmd_send(&hdev->hw, &desc, 1); 423 } 424 425 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s, 426 u8 bs_b, u8 bs_s) 427 { 428 u32 shapping_para = 0; 429 430 hclge_tm_set_field(shapping_para, IR_B, ir_b); 431 hclge_tm_set_field(shapping_para, IR_U, ir_u); 432 hclge_tm_set_field(shapping_para, IR_S, ir_s); 433 hclge_tm_set_field(shapping_para, BS_B, bs_b); 434 hclge_tm_set_field(shapping_para, BS_S, bs_s); 435 436 return shapping_para; 437 } 438 439 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, 440 enum hclge_shap_bucket bucket, u8 pg_id, 441 u32 shapping_para, u32 rate) 442 { 443 struct hclge_pg_shapping_cmd *shap_cfg_cmd; 444 enum hclge_opcode_type opcode; 445 struct hclge_desc desc; 446 447 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : 448 HCLGE_OPC_TM_PG_C_SHAPPING; 449 hclge_cmd_setup_basic_desc(&desc, opcode, false); 450 451 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 452 453 shap_cfg_cmd->pg_id = pg_id; 454 455 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); 456 457 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 458 459 shap_cfg_cmd->pg_rate = cpu_to_le32(rate); 460 461 return hclge_cmd_send(&hdev->hw, &desc, 1); 462 } 463 464 int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) 465 { 466 struct hclge_port_shapping_cmd *shap_cfg_cmd; 467 struct hclge_shaper_ir_para ir_para; 468 struct hclge_desc desc; 469 u32 shapping_para; 470 int ret; 471 472 ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT, 473 &ir_para, 474 hdev->ae_dev->dev_specs.max_tm_rate); 475 if (ret) 476 return ret; 477 478 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); 479 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 480 481 shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 482 ir_para.ir_s, 483 HCLGE_SHAPER_BS_U_DEF, 484 HCLGE_SHAPER_BS_S_DEF); 485 486 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); 487 488 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 489 490 shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed); 491 492 return hclge_cmd_send(&hdev->hw, &desc, 1); 493 } 494 495 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, 496 enum hclge_shap_bucket bucket, u8 pri_id, 497 u32 shapping_para, u32 rate) 498 { 499 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 500 enum hclge_opcode_type opcode; 501 struct hclge_desc desc; 502 503 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : 504 HCLGE_OPC_TM_PRI_C_SHAPPING; 505 506 hclge_cmd_setup_basic_desc(&desc, opcode, false); 507 508 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 509 510 shap_cfg_cmd->pri_id = pri_id; 511 512 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); 513 514 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 515 516 shap_cfg_cmd->pri_rate = cpu_to_le32(rate); 517 518 return hclge_cmd_send(&hdev->hw, &desc, 1); 519 } 520 521 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) 522 { 523 struct hclge_desc desc; 524 525 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); 526 527 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) 528 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 529 else 530 desc.data[1] = 0; 531 532 desc.data[0] = cpu_to_le32(pg_id); 533 534 return hclge_cmd_send(&hdev->hw, &desc, 1); 535 } 536 537 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) 538 { 539 struct hclge_desc desc; 540 541 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); 542 543 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) 544 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 545 else 546 desc.data[1] = 0; 547 548 desc.data[0] = cpu_to_le32(pri_id); 549 550 return hclge_cmd_send(&hdev->hw, &desc, 1); 551 } 552 553 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) 554 { 555 struct hclge_desc desc; 556 557 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); 558 559 if (mode == HCLGE_SCH_MODE_DWRR) 560 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 561 else 562 desc.data[1] = 0; 563 564 desc.data[0] = cpu_to_le32(qs_id); 565 566 return hclge_cmd_send(&hdev->hw, &desc, 1); 567 } 568 569 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, 570 u32 bit_map) 571 { 572 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 573 struct hclge_desc desc; 574 575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, 576 false); 577 578 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 579 580 bp_to_qs_map_cmd->tc_id = tc; 581 bp_to_qs_map_cmd->qs_group_id = grp_id; 582 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); 583 584 return hclge_cmd_send(&hdev->hw, &desc, 1); 585 } 586 587 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) 588 { 589 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 590 struct hclge_qs_shapping_cmd *shap_cfg_cmd; 591 struct hclge_shaper_ir_para ir_para; 592 struct hclge_dev *hdev = vport->back; 593 struct hclge_desc desc; 594 u32 shaper_para; 595 int ret, i; 596 597 if (!max_tx_rate) 598 max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate; 599 600 ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET, 601 &ir_para, 602 hdev->ae_dev->dev_specs.max_tm_rate); 603 if (ret) 604 return ret; 605 606 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 607 ir_para.ir_s, 608 HCLGE_SHAPER_BS_U_DEF, 609 HCLGE_SHAPER_BS_S_DEF); 610 611 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, 613 false); 614 615 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; 616 shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i); 617 shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para); 618 619 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 620 shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate); 621 622 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 623 if (ret) { 624 dev_err(&hdev->pdev->dev, 625 "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n", 626 vport->vport_id, shap_cfg_cmd->qs_id, 627 max_tx_rate, ret); 628 return ret; 629 } 630 } 631 632 return 0; 633 } 634 635 static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport) 636 { 637 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 638 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 639 struct hclge_dev *hdev = vport->back; 640 u16 max_rss_size = 0; 641 int i; 642 643 if (!tc_info->mqprio_active) 644 return vport->alloc_tqps / tc_info->num_tc; 645 646 for (i = 0; i < HNAE3_MAX_TC; i++) { 647 if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) 648 continue; 649 if (max_rss_size < tc_info->tqp_count[i]) 650 max_rss_size = tc_info->tqp_count[i]; 651 } 652 653 return max_rss_size; 654 } 655 656 static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) 657 { 658 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 659 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 660 struct hclge_dev *hdev = vport->back; 661 int sum = 0; 662 int i; 663 664 if (!tc_info->mqprio_active) 665 return kinfo->rss_size * tc_info->num_tc; 666 667 for (i = 0; i < HNAE3_MAX_TC; i++) { 668 if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) 669 sum += tc_info->tqp_count[i]; 670 } 671 672 return sum; 673 } 674 675 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) 676 { 677 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 678 struct hclge_dev *hdev = vport->back; 679 u16 vport_max_rss_size; 680 u16 max_rss_size; 681 682 /* TC configuration is shared by PF/VF in one port, only allow 683 * one tc for VF for simplicity. VF's vport_id is non zero. 684 */ 685 if (vport->vport_id) { 686 kinfo->tc_info.max_tc = 1; 687 kinfo->tc_info.num_tc = 1; 688 vport->qs_offset = HNAE3_MAX_TC + 689 vport->vport_id - HCLGE_VF_VPORT_START_NUM; 690 vport_max_rss_size = hdev->vf_rss_size_max; 691 } else { 692 kinfo->tc_info.max_tc = hdev->tc_max; 693 kinfo->tc_info.num_tc = 694 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); 695 vport->qs_offset = 0; 696 vport_max_rss_size = hdev->pf_rss_size_max; 697 } 698 699 max_rss_size = min_t(u16, vport_max_rss_size, 700 hclge_vport_get_max_rss_size(vport)); 701 702 /* Set to user value, no larger than max_rss_size. */ 703 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 704 kinfo->req_rss_size <= max_rss_size) { 705 dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", 706 kinfo->rss_size, kinfo->req_rss_size); 707 kinfo->rss_size = kinfo->req_rss_size; 708 } else if (kinfo->rss_size > max_rss_size || 709 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { 710 /* Set to the maximum specification value (max_rss_size). */ 711 kinfo->rss_size = max_rss_size; 712 } 713 } 714 715 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) 716 { 717 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 718 struct hclge_dev *hdev = vport->back; 719 u8 i; 720 721 hclge_tm_update_kinfo_rss_size(vport); 722 kinfo->num_tqps = hclge_vport_get_tqp_num(vport); 723 vport->dwrr = 100; /* 100 percent as init */ 724 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; 725 726 if (vport->vport_id == PF_VPORT_ID) 727 hdev->rss_cfg.rss_size = kinfo->rss_size; 728 729 /* when enable mqprio, the tc_info has been updated. */ 730 if (kinfo->tc_info.mqprio_active) 731 return; 732 733 for (i = 0; i < HNAE3_MAX_TC; i++) { 734 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { 735 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; 736 kinfo->tc_info.tqp_count[i] = kinfo->rss_size; 737 } else { 738 /* Set to default queue if TC is disable */ 739 kinfo->tc_info.tqp_offset[i] = 0; 740 kinfo->tc_info.tqp_count[i] = 1; 741 } 742 } 743 744 memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc, 745 sizeof_field(struct hnae3_tc_info, prio_tc)); 746 } 747 748 static void hclge_tm_vport_info_update(struct hclge_dev *hdev) 749 { 750 struct hclge_vport *vport = hdev->vport; 751 u32 i; 752 753 for (i = 0; i < hdev->num_alloc_vport; i++) { 754 hclge_tm_vport_tc_info_update(vport); 755 756 vport++; 757 } 758 } 759 760 static void hclge_tm_tc_info_init(struct hclge_dev *hdev) 761 { 762 u8 i, tc_sch_mode; 763 u32 bw_limit; 764 765 for (i = 0; i < hdev->tc_max; i++) { 766 if (i < hdev->tm_info.num_tc) { 767 tc_sch_mode = HCLGE_SCH_MODE_DWRR; 768 bw_limit = hdev->tm_info.pg_info[0].bw_limit; 769 } else { 770 tc_sch_mode = HCLGE_SCH_MODE_SP; 771 bw_limit = 0; 772 } 773 774 hdev->tm_info.tc_info[i].tc_id = i; 775 hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode; 776 hdev->tm_info.tc_info[i].pgid = 0; 777 hdev->tm_info.tc_info[i].bw_limit = bw_limit; 778 } 779 780 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 781 hdev->tm_info.prio_tc[i] = 782 (i >= hdev->tm_info.num_tc) ? 0 : i; 783 } 784 785 static void hclge_tm_pg_info_init(struct hclge_dev *hdev) 786 { 787 #define BW_PERCENT 100 788 789 u8 i; 790 791 for (i = 0; i < hdev->tm_info.num_pg; i++) { 792 int k; 793 794 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT; 795 796 hdev->tm_info.pg_info[i].pg_id = i; 797 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; 798 799 hdev->tm_info.pg_info[i].bw_limit = 800 hdev->ae_dev->dev_specs.max_tm_rate; 801 802 if (i != 0) 803 continue; 804 805 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; 806 for (k = 0; k < hdev->tm_info.num_tc; k++) 807 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; 808 for (; k < HNAE3_MAX_TC; k++) 809 hdev->tm_info.pg_info[i].tc_dwrr[k] = 0; 810 } 811 } 812 813 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev) 814 { 815 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) { 816 if (hdev->fc_mode_last_time == HCLGE_FC_PFC) 817 dev_warn(&hdev->pdev->dev, 818 "Only 1 tc used, but last mode is FC_PFC\n"); 819 820 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 821 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 822 /* fc_mode_last_time record the last fc_mode when 823 * DCB is enabled, so that fc_mode can be set to 824 * the correct value when DCB is disabled. 825 */ 826 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 827 hdev->tm_info.fc_mode = HCLGE_FC_PFC; 828 } 829 } 830 831 static void hclge_update_fc_mode(struct hclge_dev *hdev) 832 { 833 if (!hdev->tm_info.pfc_en) { 834 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 835 return; 836 } 837 838 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 839 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 840 hdev->tm_info.fc_mode = HCLGE_FC_PFC; 841 } 842 } 843 844 void hclge_tm_pfc_info_update(struct hclge_dev *hdev) 845 { 846 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 847 hclge_update_fc_mode(hdev); 848 else 849 hclge_update_fc_mode_by_dcb_flag(hdev); 850 } 851 852 static void hclge_tm_schd_info_init(struct hclge_dev *hdev) 853 { 854 hclge_tm_pg_info_init(hdev); 855 856 hclge_tm_tc_info_init(hdev); 857 858 hclge_tm_vport_info_update(hdev); 859 860 hclge_tm_pfc_info_update(hdev); 861 } 862 863 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) 864 { 865 int ret; 866 u32 i; 867 868 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 869 return 0; 870 871 for (i = 0; i < hdev->tm_info.num_pg; i++) { 872 /* Cfg mapping */ 873 ret = hclge_tm_pg_to_pri_map_cfg( 874 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); 875 if (ret) 876 return ret; 877 } 878 879 return 0; 880 } 881 882 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) 883 { 884 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 885 struct hclge_shaper_ir_para ir_para; 886 u32 shaper_para; 887 int ret; 888 u32 i; 889 890 /* Cfg pg schd */ 891 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 892 return 0; 893 894 /* Pg to pri */ 895 for (i = 0; i < hdev->tm_info.num_pg; i++) { 896 u32 rate = hdev->tm_info.pg_info[i].bw_limit; 897 898 /* Calc shaper para */ 899 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG, 900 &ir_para, max_tm_rate); 901 if (ret) 902 return ret; 903 904 shaper_para = hclge_tm_get_shapping_para(0, 0, 0, 905 HCLGE_SHAPER_BS_U_DEF, 906 HCLGE_SHAPER_BS_S_DEF); 907 ret = hclge_tm_pg_shapping_cfg(hdev, 908 HCLGE_TM_SHAP_C_BUCKET, i, 909 shaper_para, rate); 910 if (ret) 911 return ret; 912 913 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, 914 ir_para.ir_u, 915 ir_para.ir_s, 916 HCLGE_SHAPER_BS_U_DEF, 917 HCLGE_SHAPER_BS_S_DEF); 918 ret = hclge_tm_pg_shapping_cfg(hdev, 919 HCLGE_TM_SHAP_P_BUCKET, i, 920 shaper_para, rate); 921 if (ret) 922 return ret; 923 } 924 925 return 0; 926 } 927 928 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) 929 { 930 int ret; 931 u32 i; 932 933 /* cfg pg schd */ 934 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 935 return 0; 936 937 /* pg to prio */ 938 for (i = 0; i < hdev->tm_info.num_pg; i++) { 939 /* Cfg dwrr */ 940 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]); 941 if (ret) 942 return ret; 943 } 944 945 return 0; 946 } 947 948 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, 949 struct hclge_vport *vport) 950 { 951 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 952 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 953 struct hnae3_queue **tqp = kinfo->tqp; 954 u32 i, j; 955 int ret; 956 957 for (i = 0; i < tc_info->num_tc; i++) { 958 for (j = 0; j < tc_info->tqp_count[i]; j++) { 959 struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j]; 960 961 ret = hclge_tm_q_to_qs_map_cfg(hdev, 962 hclge_get_queue_id(q), 963 vport->qs_offset + i); 964 if (ret) 965 return ret; 966 } 967 } 968 969 return 0; 970 } 971 972 static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) 973 { 974 struct hclge_vport *vport = hdev->vport; 975 u16 i, k; 976 int ret; 977 978 /* Cfg qs -> pri mapping, one by one mapping */ 979 for (k = 0; k < hdev->num_alloc_vport; k++) { 980 struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; 981 982 for (i = 0; i < kinfo->tc_info.max_tc; i++) { 983 u8 pri = i < kinfo->tc_info.num_tc ? i : 0; 984 bool link_vld = i < kinfo->tc_info.num_tc; 985 986 ret = hclge_tm_qs_to_pri_map_cfg(hdev, 987 vport[k].qs_offset + i, 988 pri, link_vld); 989 if (ret) 990 return ret; 991 } 992 } 993 994 return 0; 995 } 996 997 static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) 998 { 999 struct hclge_vport *vport = hdev->vport; 1000 u16 i, k; 1001 int ret; 1002 1003 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ 1004 for (k = 0; k < hdev->num_alloc_vport; k++) 1005 for (i = 0; i < HNAE3_MAX_TC; i++) { 1006 ret = hclge_tm_qs_to_pri_map_cfg(hdev, 1007 vport[k].qs_offset + i, 1008 k, true); 1009 if (ret) 1010 return ret; 1011 } 1012 1013 return 0; 1014 } 1015 1016 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) 1017 { 1018 struct hclge_vport *vport = hdev->vport; 1019 int ret; 1020 u32 i; 1021 1022 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) 1023 ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev); 1024 else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) 1025 ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev); 1026 else 1027 return -EINVAL; 1028 1029 if (ret) 1030 return ret; 1031 1032 /* Cfg q -> qs mapping */ 1033 for (i = 0; i < hdev->num_alloc_vport; i++) { 1034 ret = hclge_vport_q_to_qs_map(hdev, vport); 1035 if (ret) 1036 return ret; 1037 1038 vport++; 1039 } 1040 1041 return 0; 1042 } 1043 1044 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) 1045 { 1046 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 1047 struct hclge_shaper_ir_para ir_para; 1048 u32 shaper_para_c, shaper_para_p; 1049 int ret; 1050 u32 i; 1051 1052 for (i = 0; i < hdev->tc_max; i++) { 1053 u32 rate = hdev->tm_info.tc_info[i].bw_limit; 1054 1055 if (rate) { 1056 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, 1057 &ir_para, max_tm_rate); 1058 if (ret) 1059 return ret; 1060 1061 shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0, 1062 HCLGE_SHAPER_BS_U_DEF, 1063 HCLGE_SHAPER_BS_S_DEF); 1064 shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b, 1065 ir_para.ir_u, 1066 ir_para.ir_s, 1067 HCLGE_SHAPER_BS_U_DEF, 1068 HCLGE_SHAPER_BS_S_DEF); 1069 } else { 1070 shaper_para_c = 0; 1071 shaper_para_p = 0; 1072 } 1073 1074 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, 1075 shaper_para_c, rate); 1076 if (ret) 1077 return ret; 1078 1079 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, 1080 shaper_para_p, rate); 1081 if (ret) 1082 return ret; 1083 } 1084 1085 return 0; 1086 } 1087 1088 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) 1089 { 1090 struct hclge_dev *hdev = vport->back; 1091 struct hclge_shaper_ir_para ir_para; 1092 u32 shaper_para; 1093 int ret; 1094 1095 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, 1096 &ir_para, 1097 hdev->ae_dev->dev_specs.max_tm_rate); 1098 if (ret) 1099 return ret; 1100 1101 shaper_para = hclge_tm_get_shapping_para(0, 0, 0, 1102 HCLGE_SHAPER_BS_U_DEF, 1103 HCLGE_SHAPER_BS_S_DEF); 1104 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, 1105 vport->vport_id, shaper_para, 1106 vport->bw_limit); 1107 if (ret) 1108 return ret; 1109 1110 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 1111 ir_para.ir_s, 1112 HCLGE_SHAPER_BS_U_DEF, 1113 HCLGE_SHAPER_BS_S_DEF); 1114 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, 1115 vport->vport_id, shaper_para, 1116 vport->bw_limit); 1117 if (ret) 1118 return ret; 1119 1120 return 0; 1121 } 1122 1123 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) 1124 { 1125 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1126 struct hclge_dev *hdev = vport->back; 1127 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 1128 struct hclge_shaper_ir_para ir_para; 1129 u32 i; 1130 int ret; 1131 1132 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1133 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, 1134 HCLGE_SHAPER_LVL_QSET, 1135 &ir_para, max_tm_rate); 1136 if (ret) 1137 return ret; 1138 } 1139 1140 return 0; 1141 } 1142 1143 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) 1144 { 1145 struct hclge_vport *vport = hdev->vport; 1146 int ret; 1147 u32 i; 1148 1149 /* Need config vport shaper */ 1150 for (i = 0; i < hdev->num_alloc_vport; i++) { 1151 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); 1152 if (ret) 1153 return ret; 1154 1155 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); 1156 if (ret) 1157 return ret; 1158 1159 vport++; 1160 } 1161 1162 return 0; 1163 } 1164 1165 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) 1166 { 1167 int ret; 1168 1169 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1170 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); 1171 if (ret) 1172 return ret; 1173 } else { 1174 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); 1175 if (ret) 1176 return ret; 1177 } 1178 1179 return 0; 1180 } 1181 1182 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) 1183 { 1184 struct hclge_vport *vport = hdev->vport; 1185 struct hclge_pg_info *pg_info; 1186 u8 dwrr; 1187 int ret; 1188 u32 i, k; 1189 1190 for (i = 0; i < hdev->tc_max; i++) { 1191 pg_info = 1192 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 1193 dwrr = pg_info->tc_dwrr[i]; 1194 1195 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); 1196 if (ret) 1197 return ret; 1198 1199 for (k = 0; k < hdev->num_alloc_vport; k++) { 1200 struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; 1201 1202 if (i >= kinfo->tc_info.max_tc) 1203 continue; 1204 1205 dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0; 1206 ret = hclge_tm_qs_weight_cfg( 1207 hdev, vport[k].qs_offset + i, 1208 dwrr); 1209 if (ret) 1210 return ret; 1211 } 1212 } 1213 1214 return 0; 1215 } 1216 1217 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) 1218 { 1219 #define DEFAULT_TC_OFFSET 14 1220 1221 struct hclge_ets_tc_weight_cmd *ets_weight; 1222 struct hclge_desc desc; 1223 unsigned int i; 1224 1225 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); 1226 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; 1227 1228 for (i = 0; i < HNAE3_MAX_TC; i++) { 1229 struct hclge_pg_info *pg_info; 1230 1231 pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 1232 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; 1233 } 1234 1235 ets_weight->weight_offset = DEFAULT_TC_OFFSET; 1236 1237 return hclge_cmd_send(&hdev->hw, &desc, 1); 1238 } 1239 1240 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) 1241 { 1242 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1243 struct hclge_dev *hdev = vport->back; 1244 int ret; 1245 u8 i; 1246 1247 /* Vf dwrr */ 1248 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); 1249 if (ret) 1250 return ret; 1251 1252 /* Qset dwrr */ 1253 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1254 ret = hclge_tm_qs_weight_cfg( 1255 hdev, vport->qs_offset + i, 1256 hdev->tm_info.pg_info[0].tc_dwrr[i]); 1257 if (ret) 1258 return ret; 1259 } 1260 1261 return 0; 1262 } 1263 1264 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) 1265 { 1266 struct hclge_vport *vport = hdev->vport; 1267 int ret; 1268 u32 i; 1269 1270 for (i = 0; i < hdev->num_alloc_vport; i++) { 1271 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); 1272 if (ret) 1273 return ret; 1274 1275 vport++; 1276 } 1277 1278 return 0; 1279 } 1280 1281 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) 1282 { 1283 int ret; 1284 1285 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1286 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); 1287 if (ret) 1288 return ret; 1289 1290 if (!hnae3_dev_dcb_supported(hdev)) 1291 return 0; 1292 1293 ret = hclge_tm_ets_tc_dwrr_cfg(hdev); 1294 if (ret == -EOPNOTSUPP) { 1295 dev_warn(&hdev->pdev->dev, 1296 "fw %08x doesn't support ets tc weight cmd\n", 1297 hdev->fw_version); 1298 ret = 0; 1299 } 1300 1301 return ret; 1302 } else { 1303 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); 1304 if (ret) 1305 return ret; 1306 } 1307 1308 return 0; 1309 } 1310 1311 static int hclge_tm_map_cfg(struct hclge_dev *hdev) 1312 { 1313 int ret; 1314 1315 ret = hclge_up_to_tc_map(hdev); 1316 if (ret) 1317 return ret; 1318 1319 if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) { 1320 ret = hclge_dscp_to_tc_map(hdev); 1321 if (ret) 1322 return ret; 1323 } 1324 1325 ret = hclge_tm_pg_to_pri_map(hdev); 1326 if (ret) 1327 return ret; 1328 1329 return hclge_tm_pri_q_qs_cfg(hdev); 1330 } 1331 1332 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) 1333 { 1334 int ret; 1335 1336 ret = hclge_tm_port_shaper_cfg(hdev); 1337 if (ret) 1338 return ret; 1339 1340 ret = hclge_tm_pg_shaper_cfg(hdev); 1341 if (ret) 1342 return ret; 1343 1344 return hclge_tm_pri_shaper_cfg(hdev); 1345 } 1346 1347 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) 1348 { 1349 int ret; 1350 1351 ret = hclge_tm_pg_dwrr_cfg(hdev); 1352 if (ret) 1353 return ret; 1354 1355 return hclge_tm_pri_dwrr_cfg(hdev); 1356 } 1357 1358 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) 1359 { 1360 int ret; 1361 u8 i; 1362 1363 /* Only being config on TC-Based scheduler mode */ 1364 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) 1365 return 0; 1366 1367 for (i = 0; i < hdev->tm_info.num_pg; i++) { 1368 ret = hclge_tm_pg_schd_mode_cfg(hdev, i); 1369 if (ret) 1370 return ret; 1371 } 1372 1373 return 0; 1374 } 1375 1376 static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) 1377 { 1378 struct hclge_vport *vport = hdev->vport; 1379 int ret; 1380 u8 mode; 1381 u16 i; 1382 1383 ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); 1384 if (ret) 1385 return ret; 1386 1387 for (i = 0; i < hdev->num_alloc_vport; i++) { 1388 struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo; 1389 1390 if (pri_id >= kinfo->tc_info.max_tc) 1391 continue; 1392 1393 mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR : 1394 HCLGE_SCH_MODE_SP; 1395 ret = hclge_tm_qs_schd_mode_cfg(hdev, 1396 vport[i].qs_offset + pri_id, 1397 mode); 1398 if (ret) 1399 return ret; 1400 } 1401 1402 return 0; 1403 } 1404 1405 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) 1406 { 1407 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1408 struct hclge_dev *hdev = vport->back; 1409 int ret; 1410 u8 i; 1411 1412 if (vport->vport_id >= HNAE3_MAX_TC) 1413 return -EINVAL; 1414 1415 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); 1416 if (ret) 1417 return ret; 1418 1419 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1420 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; 1421 1422 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, 1423 sch_mode); 1424 if (ret) 1425 return ret; 1426 } 1427 1428 return 0; 1429 } 1430 1431 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) 1432 { 1433 struct hclge_vport *vport = hdev->vport; 1434 int ret; 1435 u8 i; 1436 1437 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1438 for (i = 0; i < hdev->tc_max; i++) { 1439 ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); 1440 if (ret) 1441 return ret; 1442 } 1443 } else { 1444 for (i = 0; i < hdev->num_alloc_vport; i++) { 1445 ret = hclge_tm_schd_mode_vnet_base_cfg(vport); 1446 if (ret) 1447 return ret; 1448 1449 vport++; 1450 } 1451 } 1452 1453 return 0; 1454 } 1455 1456 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) 1457 { 1458 int ret; 1459 1460 ret = hclge_tm_lvl2_schd_mode_cfg(hdev); 1461 if (ret) 1462 return ret; 1463 1464 return hclge_tm_lvl34_schd_mode_cfg(hdev); 1465 } 1466 1467 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) 1468 { 1469 int ret; 1470 1471 /* Cfg tm mapping */ 1472 ret = hclge_tm_map_cfg(hdev); 1473 if (ret) 1474 return ret; 1475 1476 /* Cfg tm shaper */ 1477 ret = hclge_tm_shaper_cfg(hdev); 1478 if (ret) 1479 return ret; 1480 1481 /* Cfg dwrr */ 1482 ret = hclge_tm_dwrr_cfg(hdev); 1483 if (ret) 1484 return ret; 1485 1486 /* Cfg schd mode for each level schd */ 1487 return hclge_tm_schd_mode_hw(hdev); 1488 } 1489 1490 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) 1491 { 1492 struct hclge_mac *mac = &hdev->hw.mac; 1493 1494 return hclge_pause_param_cfg(hdev, mac->mac_addr, 1495 HCLGE_DEFAULT_PAUSE_TRANS_GAP, 1496 HCLGE_DEFAULT_PAUSE_TRANS_TIME); 1497 } 1498 1499 static int hclge_pfc_setup_hw(struct hclge_dev *hdev) 1500 { 1501 u8 enable_bitmap = 0; 1502 1503 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 1504 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | 1505 HCLGE_RX_MAC_PAUSE_EN_MSK; 1506 1507 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, 1508 hdev->tm_info.pfc_en); 1509 } 1510 1511 /* for the queues that use for backpress, divides to several groups, 1512 * each group contains 32 queue sets, which can be represented by u32 bitmap. 1513 */ 1514 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) 1515 { 1516 u16 grp_id_shift = HCLGE_BP_GRP_ID_S; 1517 u16 grp_id_mask = HCLGE_BP_GRP_ID_M; 1518 u8 grp_num = HCLGE_BP_GRP_NUM; 1519 int i; 1520 1521 if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) { 1522 grp_num = HCLGE_BP_EXT_GRP_NUM; 1523 grp_id_mask = HCLGE_BP_EXT_GRP_ID_M; 1524 grp_id_shift = HCLGE_BP_EXT_GRP_ID_S; 1525 } 1526 1527 for (i = 0; i < grp_num; i++) { 1528 u32 qs_bitmap = 0; 1529 int k, ret; 1530 1531 for (k = 0; k < hdev->num_alloc_vport; k++) { 1532 struct hclge_vport *vport = &hdev->vport[k]; 1533 u16 qs_id = vport->qs_offset + tc; 1534 u8 grp, sub_grp; 1535 1536 grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift); 1537 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, 1538 HCLGE_BP_SUB_GRP_ID_S); 1539 if (i == grp) 1540 qs_bitmap |= (1 << sub_grp); 1541 } 1542 1543 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); 1544 if (ret) 1545 return ret; 1546 } 1547 1548 return 0; 1549 } 1550 1551 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) 1552 { 1553 bool tx_en, rx_en; 1554 1555 switch (hdev->tm_info.fc_mode) { 1556 case HCLGE_FC_NONE: 1557 tx_en = false; 1558 rx_en = false; 1559 break; 1560 case HCLGE_FC_RX_PAUSE: 1561 tx_en = false; 1562 rx_en = true; 1563 break; 1564 case HCLGE_FC_TX_PAUSE: 1565 tx_en = true; 1566 rx_en = false; 1567 break; 1568 case HCLGE_FC_FULL: 1569 tx_en = true; 1570 rx_en = true; 1571 break; 1572 case HCLGE_FC_PFC: 1573 tx_en = false; 1574 rx_en = false; 1575 break; 1576 default: 1577 tx_en = true; 1578 rx_en = true; 1579 } 1580 1581 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 1582 } 1583 1584 static int hclge_tm_bp_setup(struct hclge_dev *hdev) 1585 { 1586 int ret; 1587 int i; 1588 1589 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1590 ret = hclge_bp_setup_hw(hdev, i); 1591 if (ret) 1592 return ret; 1593 } 1594 1595 return 0; 1596 } 1597 1598 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) 1599 { 1600 int ret; 1601 1602 ret = hclge_pause_param_setup_hw(hdev); 1603 if (ret) 1604 return ret; 1605 1606 ret = hclge_mac_pause_setup_hw(hdev); 1607 if (ret) 1608 return ret; 1609 1610 /* Only DCB-supported dev supports qset back pressure and pfc cmd */ 1611 if (!hnae3_dev_dcb_supported(hdev)) 1612 return 0; 1613 1614 /* GE MAC does not support PFC, when driver is initializing and MAC 1615 * is in GE Mode, ignore the error here, otherwise initialization 1616 * will fail. 1617 */ 1618 ret = hclge_pfc_setup_hw(hdev); 1619 if (init && ret == -EOPNOTSUPP) 1620 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); 1621 else if (ret) { 1622 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", 1623 ret); 1624 return ret; 1625 } 1626 1627 return hclge_tm_bp_setup(hdev); 1628 } 1629 1630 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) 1631 { 1632 struct hclge_vport *vport = hdev->vport; 1633 struct hnae3_knic_private_info *kinfo; 1634 u32 i, k; 1635 1636 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 1637 hdev->tm_info.prio_tc[i] = prio_tc[i]; 1638 1639 for (k = 0; k < hdev->num_alloc_vport; k++) { 1640 kinfo = &vport[k].nic.kinfo; 1641 kinfo->tc_info.prio_tc[i] = prio_tc[i]; 1642 } 1643 } 1644 } 1645 1646 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) 1647 { 1648 u8 bit_map = 0; 1649 u8 i; 1650 1651 hdev->tm_info.num_tc = num_tc; 1652 1653 for (i = 0; i < hdev->tm_info.num_tc; i++) 1654 bit_map |= BIT(i); 1655 1656 if (!bit_map) { 1657 bit_map = 1; 1658 hdev->tm_info.num_tc = 1; 1659 } 1660 1661 hdev->hw_tc_map = bit_map; 1662 1663 hclge_tm_schd_info_init(hdev); 1664 } 1665 1666 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) 1667 { 1668 int ret; 1669 1670 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && 1671 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) 1672 return -ENOTSUPP; 1673 1674 ret = hclge_tm_schd_setup_hw(hdev); 1675 if (ret) 1676 return ret; 1677 1678 ret = hclge_pause_setup_hw(hdev, init); 1679 if (ret) 1680 return ret; 1681 1682 return 0; 1683 } 1684 1685 int hclge_tm_schd_init(struct hclge_dev *hdev) 1686 { 1687 /* fc_mode is HCLGE_FC_FULL on reset */ 1688 hdev->tm_info.fc_mode = HCLGE_FC_FULL; 1689 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 1690 1691 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && 1692 hdev->tm_info.num_pg != 1) 1693 return -EINVAL; 1694 1695 hclge_tm_schd_info_init(hdev); 1696 hclge_dscp_to_prio_map_init(hdev); 1697 1698 return hclge_tm_init_hw(hdev, true); 1699 } 1700 1701 int hclge_tm_vport_map_update(struct hclge_dev *hdev) 1702 { 1703 struct hclge_vport *vport = hdev->vport; 1704 int ret; 1705 1706 hclge_tm_vport_tc_info_update(vport); 1707 1708 ret = hclge_vport_q_to_qs_map(hdev, vport); 1709 if (ret) 1710 return ret; 1711 1712 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) 1713 return 0; 1714 1715 return hclge_tm_bp_setup(hdev); 1716 } 1717 1718 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num) 1719 { 1720 struct hclge_tm_nodes_cmd *nodes; 1721 struct hclge_desc desc; 1722 int ret; 1723 1724 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { 1725 /* Each PF has 8 qsets and each VF has 1 qset */ 1726 *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev); 1727 return 0; 1728 } 1729 1730 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); 1731 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1732 if (ret) { 1733 dev_err(&hdev->pdev->dev, 1734 "failed to get qset num, ret = %d\n", ret); 1735 return ret; 1736 } 1737 1738 nodes = (struct hclge_tm_nodes_cmd *)desc.data; 1739 *qset_num = le16_to_cpu(nodes->qset_num); 1740 return 0; 1741 } 1742 1743 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num) 1744 { 1745 struct hclge_tm_nodes_cmd *nodes; 1746 struct hclge_desc desc; 1747 int ret; 1748 1749 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { 1750 *pri_num = HCLGE_TM_PF_MAX_PRI_NUM; 1751 return 0; 1752 } 1753 1754 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); 1755 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1756 if (ret) { 1757 dev_err(&hdev->pdev->dev, 1758 "failed to get pri num, ret = %d\n", ret); 1759 return ret; 1760 } 1761 1762 nodes = (struct hclge_tm_nodes_cmd *)desc.data; 1763 *pri_num = nodes->pri_num; 1764 return 0; 1765 } 1766 1767 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, 1768 u8 *link_vld) 1769 { 1770 struct hclge_qs_to_pri_link_cmd *map; 1771 struct hclge_desc desc; 1772 int ret; 1773 1774 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true); 1775 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 1776 map->qs_id = cpu_to_le16(qset_id); 1777 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1778 if (ret) { 1779 dev_err(&hdev->pdev->dev, 1780 "failed to get qset map priority, ret = %d\n", ret); 1781 return ret; 1782 } 1783 1784 *priority = map->priority; 1785 *link_vld = map->link_vld; 1786 return 0; 1787 } 1788 1789 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode) 1790 { 1791 struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode; 1792 struct hclge_desc desc; 1793 int ret; 1794 1795 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true); 1796 qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data; 1797 qs_sch_mode->qs_id = cpu_to_le16(qset_id); 1798 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1799 if (ret) { 1800 dev_err(&hdev->pdev->dev, 1801 "failed to get qset sch mode, ret = %d\n", ret); 1802 return ret; 1803 } 1804 1805 *mode = qs_sch_mode->sch_mode; 1806 return 0; 1807 } 1808 1809 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight) 1810 { 1811 struct hclge_qs_weight_cmd *qs_weight; 1812 struct hclge_desc desc; 1813 int ret; 1814 1815 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true); 1816 qs_weight = (struct hclge_qs_weight_cmd *)desc.data; 1817 qs_weight->qs_id = cpu_to_le16(qset_id); 1818 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1819 if (ret) { 1820 dev_err(&hdev->pdev->dev, 1821 "failed to get qset weight, ret = %d\n", ret); 1822 return ret; 1823 } 1824 1825 *weight = qs_weight->dwrr; 1826 return 0; 1827 } 1828 1829 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id, 1830 struct hclge_tm_shaper_para *para) 1831 { 1832 struct hclge_qs_shapping_cmd *shap_cfg_cmd; 1833 struct hclge_desc desc; 1834 u32 shapping_para; 1835 int ret; 1836 1837 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); 1838 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; 1839 shap_cfg_cmd->qs_id = cpu_to_le16(qset_id); 1840 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1841 if (ret) { 1842 dev_err(&hdev->pdev->dev, 1843 "failed to get qset %u shaper, ret = %d\n", qset_id, 1844 ret); 1845 return ret; 1846 } 1847 1848 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); 1849 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 1850 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 1851 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 1852 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 1853 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 1854 para->flag = shap_cfg_cmd->flag; 1855 para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate); 1856 return 0; 1857 } 1858 1859 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode) 1860 { 1861 struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode; 1862 struct hclge_desc desc; 1863 int ret; 1864 1865 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true); 1866 pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data; 1867 pri_sch_mode->pri_id = pri_id; 1868 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1869 if (ret) { 1870 dev_err(&hdev->pdev->dev, 1871 "failed to get priority sch mode, ret = %d\n", ret); 1872 return ret; 1873 } 1874 1875 *mode = pri_sch_mode->sch_mode; 1876 return 0; 1877 } 1878 1879 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight) 1880 { 1881 struct hclge_priority_weight_cmd *priority_weight; 1882 struct hclge_desc desc; 1883 int ret; 1884 1885 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true); 1886 priority_weight = (struct hclge_priority_weight_cmd *)desc.data; 1887 priority_weight->pri_id = pri_id; 1888 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1889 if (ret) { 1890 dev_err(&hdev->pdev->dev, 1891 "failed to get priority weight, ret = %d\n", ret); 1892 return ret; 1893 } 1894 1895 *weight = priority_weight->dwrr; 1896 return 0; 1897 } 1898 1899 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, 1900 enum hclge_opcode_type cmd, 1901 struct hclge_tm_shaper_para *para) 1902 { 1903 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 1904 struct hclge_desc desc; 1905 u32 shapping_para; 1906 int ret; 1907 1908 if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING && 1909 cmd != HCLGE_OPC_TM_PRI_P_SHAPPING) 1910 return -EINVAL; 1911 1912 hclge_cmd_setup_basic_desc(&desc, cmd, true); 1913 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 1914 shap_cfg_cmd->pri_id = pri_id; 1915 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1916 if (ret) { 1917 dev_err(&hdev->pdev->dev, 1918 "failed to get priority shaper(%#x), ret = %d\n", 1919 cmd, ret); 1920 return ret; 1921 } 1922 1923 shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para); 1924 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 1925 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 1926 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 1927 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 1928 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 1929 para->flag = shap_cfg_cmd->flag; 1930 para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate); 1931 return 0; 1932 } 1933 1934 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id) 1935 { 1936 struct hclge_nq_to_qs_link_cmd *map; 1937 struct hclge_desc desc; 1938 u16 qs_id_l; 1939 u16 qs_id_h; 1940 int ret; 1941 1942 map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 1943 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true); 1944 map->nq_id = cpu_to_le16(q_id); 1945 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1946 if (ret) { 1947 dev_err(&hdev->pdev->dev, 1948 "failed to get queue to qset map, ret = %d\n", ret); 1949 return ret; 1950 } 1951 *qset_id = le16_to_cpu(map->qset_id); 1952 1953 /* convert qset_id to the following format, drop the vld bit 1954 * | qs_id_h | vld | qs_id_l | 1955 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | 1956 * \ \ / / 1957 * \ \ / / 1958 * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 | 1959 */ 1960 qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, 1961 HCLGE_TM_QS_ID_L_S); 1962 qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK, 1963 HCLGE_TM_QS_ID_H_EXT_S); 1964 *qset_id = 0; 1965 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, 1966 qs_id_l); 1967 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S, 1968 qs_id_h); 1969 return 0; 1970 } 1971 1972 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id) 1973 { 1974 #define HCLGE_TM_TC_MASK 0x7 1975 1976 struct hclge_tqp_tx_queue_tc_cmd *tc; 1977 struct hclge_desc desc; 1978 int ret; 1979 1980 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; 1981 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true); 1982 tc->queue_id = cpu_to_le16(q_id); 1983 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1984 if (ret) { 1985 dev_err(&hdev->pdev->dev, 1986 "failed to get queue to tc map, ret = %d\n", ret); 1987 return ret; 1988 } 1989 1990 *tc_id = tc->tc_id & HCLGE_TM_TC_MASK; 1991 return 0; 1992 } 1993 1994 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id, 1995 u8 *pri_bit_map) 1996 { 1997 struct hclge_pg_to_pri_link_cmd *map; 1998 struct hclge_desc desc; 1999 int ret; 2000 2001 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true); 2002 map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 2003 map->pg_id = pg_id; 2004 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2005 if (ret) { 2006 dev_err(&hdev->pdev->dev, 2007 "failed to get pg to pri map, ret = %d\n", ret); 2008 return ret; 2009 } 2010 2011 *pri_bit_map = map->pri_bit_map; 2012 return 0; 2013 } 2014 2015 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight) 2016 { 2017 struct hclge_pg_weight_cmd *pg_weight_cmd; 2018 struct hclge_desc desc; 2019 int ret; 2020 2021 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true); 2022 pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data; 2023 pg_weight_cmd->pg_id = pg_id; 2024 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2025 if (ret) { 2026 dev_err(&hdev->pdev->dev, 2027 "failed to get pg weight, ret = %d\n", ret); 2028 return ret; 2029 } 2030 2031 *weight = pg_weight_cmd->dwrr; 2032 return 0; 2033 } 2034 2035 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode) 2036 { 2037 struct hclge_desc desc; 2038 int ret; 2039 2040 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true); 2041 desc.data[0] = cpu_to_le32(pg_id); 2042 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2043 if (ret) { 2044 dev_err(&hdev->pdev->dev, 2045 "failed to get pg sch mode, ret = %d\n", ret); 2046 return ret; 2047 } 2048 2049 *mode = (u8)le32_to_cpu(desc.data[1]); 2050 return 0; 2051 } 2052 2053 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id, 2054 enum hclge_opcode_type cmd, 2055 struct hclge_tm_shaper_para *para) 2056 { 2057 struct hclge_pg_shapping_cmd *shap_cfg_cmd; 2058 struct hclge_desc desc; 2059 u32 shapping_para; 2060 int ret; 2061 2062 if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING && 2063 cmd != HCLGE_OPC_TM_PG_P_SHAPPING) 2064 return -EINVAL; 2065 2066 hclge_cmd_setup_basic_desc(&desc, cmd, true); 2067 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 2068 shap_cfg_cmd->pg_id = pg_id; 2069 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2070 if (ret) { 2071 dev_err(&hdev->pdev->dev, 2072 "failed to get pg shaper(%#x), ret = %d\n", 2073 cmd, ret); 2074 return ret; 2075 } 2076 2077 shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para); 2078 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 2079 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 2080 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 2081 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 2082 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 2083 para->flag = shap_cfg_cmd->flag; 2084 para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate); 2085 return 0; 2086 } 2087 2088 int hclge_tm_get_port_shaper(struct hclge_dev *hdev, 2089 struct hclge_tm_shaper_para *para) 2090 { 2091 struct hclge_port_shapping_cmd *port_shap_cfg_cmd; 2092 struct hclge_desc desc; 2093 u32 shapping_para; 2094 int ret; 2095 2096 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true); 2097 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2098 if (ret) { 2099 dev_err(&hdev->pdev->dev, 2100 "failed to get port shaper, ret = %d\n", ret); 2101 return ret; 2102 } 2103 2104 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 2105 shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para); 2106 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 2107 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 2108 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 2109 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 2110 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 2111 para->flag = port_shap_cfg_cmd->flag; 2112 para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate); 2113 2114 return 0; 2115 } 2116