1 // SPDX-License-Identifier: GPL-2.0+ 2 // Copyright (c) 2016-2017 Hisilicon Limited. 3 4 #include <linux/etherdevice.h> 5 6 #include "hclge_cmd.h" 7 #include "hclge_main.h" 8 #include "hclge_tm.h" 9 10 enum hclge_shaper_level { 11 HCLGE_SHAPER_LVL_PRI = 0, 12 HCLGE_SHAPER_LVL_PG = 1, 13 HCLGE_SHAPER_LVL_PORT = 2, 14 HCLGE_SHAPER_LVL_QSET = 3, 15 HCLGE_SHAPER_LVL_CNT = 4, 16 HCLGE_SHAPER_LVL_VF = 0, 17 HCLGE_SHAPER_LVL_PF = 1, 18 }; 19 20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3 21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD 3 22 23 #define HCLGE_SHAPER_BS_U_DEF 5 24 #define HCLGE_SHAPER_BS_S_DEF 20 25 26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper 27 * @ir: Rate to be config, its unit is Mbps 28 * @shaper_level: the shaper level. eg: port, pg, priority, queueset 29 * @ir_para: parameters of IR shaper 30 * @max_tm_rate: max tm rate is available to config 31 * 32 * the formula: 33 * 34 * IR_b * (2 ^ IR_u) * 8 35 * IR(Mbps) = ------------------------- * CLOCK(1000Mbps) 36 * Tick * (2 ^ IR_s) 37 * 38 * @return: 0: calculate sucessful, negative: fail 39 */ 40 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level, 41 struct hclge_shaper_ir_para *ir_para, 42 u32 max_tm_rate) 43 { 44 #define DEFAULT_SHAPER_IR_B 126 45 #define DIVISOR_CLK (1000 * 8) 46 #define DEFAULT_DIVISOR_IR_B (DEFAULT_SHAPER_IR_B * DIVISOR_CLK) 47 48 static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = { 49 6 * 256, /* Prioriy level */ 50 6 * 32, /* Prioriy group level */ 51 6 * 8, /* Port level */ 52 6 * 256 /* Qset level */ 53 }; 54 u8 ir_u_calc = 0; 55 u8 ir_s_calc = 0; 56 u32 ir_calc; 57 u32 tick; 58 59 /* Calc tick */ 60 if (shaper_level >= HCLGE_SHAPER_LVL_CNT || 61 ir > max_tm_rate) 62 return -EINVAL; 63 64 tick = tick_array[shaper_level]; 65 66 /** 67 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0 68 * the formula is changed to: 69 * 126 * 1 * 8 70 * ir_calc = ---------------- * 1000 71 * tick * 1 72 */ 73 ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick; 74 75 if (ir_calc == ir) { 76 ir_para->ir_b = DEFAULT_SHAPER_IR_B; 77 ir_para->ir_u = 0; 78 ir_para->ir_s = 0; 79 80 return 0; 81 } else if (ir_calc > ir) { 82 /* Increasing the denominator to select ir_s value */ 83 while (ir_calc >= ir && ir) { 84 ir_s_calc++; 85 ir_calc = DEFAULT_DIVISOR_IR_B / 86 (tick * (1 << ir_s_calc)); 87 } 88 89 ir_para->ir_b = (ir * tick * (1 << ir_s_calc) + 90 (DIVISOR_CLK >> 1)) / DIVISOR_CLK; 91 } else { 92 /* Increasing the numerator to select ir_u value */ 93 u32 numerator; 94 95 while (ir_calc < ir) { 96 ir_u_calc++; 97 numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc); 98 ir_calc = (numerator + (tick >> 1)) / tick; 99 } 100 101 if (ir_calc == ir) { 102 ir_para->ir_b = DEFAULT_SHAPER_IR_B; 103 } else { 104 u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc); 105 ir_para->ir_b = (ir * tick + (denominator >> 1)) / 106 denominator; 107 } 108 } 109 110 ir_para->ir_u = ir_u_calc; 111 ir_para->ir_s = ir_s_calc; 112 113 return 0; 114 } 115 116 static const u16 hclge_pfc_tx_stats_offset[] = { 117 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num), 118 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num), 119 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num), 120 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num), 121 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num), 122 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num), 123 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num), 124 HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num) 125 }; 126 127 static const u16 hclge_pfc_rx_stats_offset[] = { 128 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num), 129 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num), 130 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num), 131 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num), 132 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num), 133 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num), 134 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num), 135 HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num) 136 }; 137 138 static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats) 139 { 140 const u16 *offset; 141 int i; 142 143 if (tx) 144 offset = hclge_pfc_tx_stats_offset; 145 else 146 offset = hclge_pfc_rx_stats_offset; 147 148 for (i = 0; i < HCLGE_MAX_TC_NUM; i++) 149 stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]); 150 } 151 152 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats) 153 { 154 hclge_pfc_stats_get(hdev, false, stats); 155 } 156 157 void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats) 158 { 159 hclge_pfc_stats_get(hdev, true, stats); 160 } 161 162 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx) 163 { 164 struct hclge_desc desc; 165 166 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false); 167 168 desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) | 169 (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0)); 170 171 return hclge_cmd_send(&hdev->hw, &desc, 1); 172 } 173 174 int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap, 175 u8 pfc_bitmap) 176 { 177 struct hclge_desc desc; 178 struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data; 179 180 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false); 181 182 pfc->tx_rx_en_bitmap = tx_rx_bitmap; 183 pfc->pri_en_bitmap = pfc_bitmap; 184 185 return hclge_cmd_send(&hdev->hw, &desc, 1); 186 } 187 188 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr, 189 u8 pause_trans_gap, u16 pause_trans_time) 190 { 191 struct hclge_cfg_pause_param_cmd *pause_param; 192 struct hclge_desc desc; 193 194 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 195 196 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false); 197 198 ether_addr_copy(pause_param->mac_addr, addr); 199 ether_addr_copy(pause_param->mac_addr_extra, addr); 200 pause_param->pause_trans_gap = pause_trans_gap; 201 pause_param->pause_trans_time = cpu_to_le16(pause_trans_time); 202 203 return hclge_cmd_send(&hdev->hw, &desc, 1); 204 } 205 206 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr) 207 { 208 struct hclge_cfg_pause_param_cmd *pause_param; 209 struct hclge_desc desc; 210 u16 trans_time; 211 u8 trans_gap; 212 int ret; 213 214 pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; 215 216 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); 217 218 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 219 if (ret) 220 return ret; 221 222 trans_gap = pause_param->pause_trans_gap; 223 trans_time = le16_to_cpu(pause_param->pause_trans_time); 224 225 return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time); 226 } 227 228 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id) 229 { 230 u8 tc; 231 232 tc = hdev->tm_info.prio_tc[pri_id]; 233 234 if (tc >= hdev->tm_info.num_tc) 235 return -EINVAL; 236 237 /** 238 * the register for priority has four bytes, the first bytes includes 239 * priority0 and priority1, the higher 4bit stands for priority1 240 * while the lower 4bit stands for priority0, as below: 241 * first byte: | pri_1 | pri_0 | 242 * second byte: | pri_3 | pri_2 | 243 * third byte: | pri_5 | pri_4 | 244 * fourth byte: | pri_7 | pri_6 | 245 */ 246 pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4); 247 248 return 0; 249 } 250 251 int hclge_up_to_tc_map(struct hclge_dev *hdev) 252 { 253 struct hclge_desc desc; 254 u8 *pri = (u8 *)desc.data; 255 u8 pri_id; 256 int ret; 257 258 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false); 259 260 for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) { 261 ret = hclge_fill_pri_array(hdev, pri, pri_id); 262 if (ret) 263 return ret; 264 } 265 266 return hclge_cmd_send(&hdev->hw, &desc, 1); 267 } 268 269 static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev) 270 { 271 u8 i; 272 273 hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO; 274 hdev->vport[0].nic.kinfo.dscp_app_cnt = 0; 275 for (i = 0; i < HNAE3_MAX_DSCP; i++) 276 hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID; 277 } 278 279 int hclge_dscp_to_tc_map(struct hclge_dev *hdev) 280 { 281 struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM]; 282 u8 *req0 = (u8 *)desc[0].data; 283 u8 *req1 = (u8 *)desc[1].data; 284 u8 pri_id, tc_id, i, j; 285 286 hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false); 287 desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT); 288 hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false); 289 290 /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */ 291 for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) { 292 pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i]; 293 pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; 294 tc_id = hdev->tm_info.prio_tc[pri_id]; 295 /* Each dscp setting has 4 bits, so each byte saves two dscp 296 * setting 297 */ 298 req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); 299 300 j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; 301 pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j]; 302 pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id; 303 tc_id = hdev->tm_info.prio_tc[pri_id]; 304 req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i); 305 } 306 307 return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM); 308 } 309 310 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, 311 u8 pg_id, u8 pri_bit_map) 312 { 313 struct hclge_pg_to_pri_link_cmd *map; 314 struct hclge_desc desc; 315 316 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false); 317 318 map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 319 320 map->pg_id = pg_id; 321 map->pri_bit_map = pri_bit_map; 322 323 return hclge_cmd_send(&hdev->hw, &desc, 1); 324 } 325 326 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri, 327 bool link_vld) 328 { 329 struct hclge_qs_to_pri_link_cmd *map; 330 struct hclge_desc desc; 331 332 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false); 333 334 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 335 336 map->qs_id = cpu_to_le16(qs_id); 337 map->priority = pri; 338 map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0; 339 340 return hclge_cmd_send(&hdev->hw, &desc, 1); 341 } 342 343 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev, 344 u16 q_id, u16 qs_id) 345 { 346 struct hclge_nq_to_qs_link_cmd *map; 347 struct hclge_desc desc; 348 u16 qs_id_l; 349 u16 qs_id_h; 350 351 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false); 352 353 map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 354 355 map->nq_id = cpu_to_le16(q_id); 356 357 /* convert qs_id to the following format to support qset_id >= 1024 358 * qs_id: | 15 | 14 ~ 10 | 9 ~ 0 | 359 * / / \ \ 360 * / / \ \ 361 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | 362 * | qs_id_h | vld | qs_id_l | 363 */ 364 qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK, 365 HCLGE_TM_QS_ID_L_S); 366 qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK, 367 HCLGE_TM_QS_ID_H_S); 368 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, 369 qs_id_l); 370 hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S, 371 qs_id_h); 372 map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK); 373 374 return hclge_cmd_send(&hdev->hw, &desc, 1); 375 } 376 377 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id, 378 u8 dwrr) 379 { 380 struct hclge_pg_weight_cmd *weight; 381 struct hclge_desc desc; 382 383 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false); 384 385 weight = (struct hclge_pg_weight_cmd *)desc.data; 386 387 weight->pg_id = pg_id; 388 weight->dwrr = dwrr; 389 390 return hclge_cmd_send(&hdev->hw, &desc, 1); 391 } 392 393 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id, 394 u8 dwrr) 395 { 396 struct hclge_priority_weight_cmd *weight; 397 struct hclge_desc desc; 398 399 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false); 400 401 weight = (struct hclge_priority_weight_cmd *)desc.data; 402 403 weight->pri_id = pri_id; 404 weight->dwrr = dwrr; 405 406 return hclge_cmd_send(&hdev->hw, &desc, 1); 407 } 408 409 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id, 410 u8 dwrr) 411 { 412 struct hclge_qs_weight_cmd *weight; 413 struct hclge_desc desc; 414 415 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false); 416 417 weight = (struct hclge_qs_weight_cmd *)desc.data; 418 419 weight->qs_id = cpu_to_le16(qs_id); 420 weight->dwrr = dwrr; 421 422 return hclge_cmd_send(&hdev->hw, &desc, 1); 423 } 424 425 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s, 426 u8 bs_b, u8 bs_s) 427 { 428 u32 shapping_para = 0; 429 430 hclge_tm_set_field(shapping_para, IR_B, ir_b); 431 hclge_tm_set_field(shapping_para, IR_U, ir_u); 432 hclge_tm_set_field(shapping_para, IR_S, ir_s); 433 hclge_tm_set_field(shapping_para, BS_B, bs_b); 434 hclge_tm_set_field(shapping_para, BS_S, bs_s); 435 436 return shapping_para; 437 } 438 439 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, 440 enum hclge_shap_bucket bucket, u8 pg_id, 441 u32 shapping_para, u32 rate) 442 { 443 struct hclge_pg_shapping_cmd *shap_cfg_cmd; 444 enum hclge_opcode_type opcode; 445 struct hclge_desc desc; 446 447 opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING : 448 HCLGE_OPC_TM_PG_C_SHAPPING; 449 hclge_cmd_setup_basic_desc(&desc, opcode, false); 450 451 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 452 453 shap_cfg_cmd->pg_id = pg_id; 454 455 shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para); 456 457 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 458 459 shap_cfg_cmd->pg_rate = cpu_to_le32(rate); 460 461 return hclge_cmd_send(&hdev->hw, &desc, 1); 462 } 463 464 int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) 465 { 466 struct hclge_port_shapping_cmd *shap_cfg_cmd; 467 struct hclge_shaper_ir_para ir_para; 468 struct hclge_desc desc; 469 u32 shapping_para; 470 int ret; 471 472 ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT, 473 &ir_para, 474 hdev->ae_dev->dev_specs.max_tm_rate); 475 if (ret) 476 return ret; 477 478 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false); 479 shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 480 481 shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 482 ir_para.ir_s, 483 HCLGE_SHAPER_BS_U_DEF, 484 HCLGE_SHAPER_BS_S_DEF); 485 486 shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para); 487 488 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 489 490 shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed); 491 492 return hclge_cmd_send(&hdev->hw, &desc, 1); 493 } 494 495 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev, 496 enum hclge_shap_bucket bucket, u8 pri_id, 497 u32 shapping_para, u32 rate) 498 { 499 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 500 enum hclge_opcode_type opcode; 501 struct hclge_desc desc; 502 503 opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING : 504 HCLGE_OPC_TM_PRI_C_SHAPPING; 505 506 hclge_cmd_setup_basic_desc(&desc, opcode, false); 507 508 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 509 510 shap_cfg_cmd->pri_id = pri_id; 511 512 shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para); 513 514 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 515 516 shap_cfg_cmd->pri_rate = cpu_to_le32(rate); 517 518 return hclge_cmd_send(&hdev->hw, &desc, 1); 519 } 520 521 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id) 522 { 523 struct hclge_desc desc; 524 525 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false); 526 527 if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR) 528 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 529 else 530 desc.data[1] = 0; 531 532 desc.data[0] = cpu_to_le32(pg_id); 533 534 return hclge_cmd_send(&hdev->hw, &desc, 1); 535 } 536 537 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id) 538 { 539 struct hclge_desc desc; 540 541 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false); 542 543 if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR) 544 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 545 else 546 desc.data[1] = 0; 547 548 desc.data[0] = cpu_to_le32(pri_id); 549 550 return hclge_cmd_send(&hdev->hw, &desc, 1); 551 } 552 553 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode) 554 { 555 struct hclge_desc desc; 556 557 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false); 558 559 if (mode == HCLGE_SCH_MODE_DWRR) 560 desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK); 561 else 562 desc.data[1] = 0; 563 564 desc.data[0] = cpu_to_le32(qs_id); 565 566 return hclge_cmd_send(&hdev->hw, &desc, 1); 567 } 568 569 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id, 570 u32 bit_map) 571 { 572 struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; 573 struct hclge_desc desc; 574 575 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING, 576 false); 577 578 bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; 579 580 bp_to_qs_map_cmd->tc_id = tc; 581 bp_to_qs_map_cmd->qs_group_id = grp_id; 582 bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map); 583 584 return hclge_cmd_send(&hdev->hw, &desc, 1); 585 } 586 587 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate) 588 { 589 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 590 struct hclge_qs_shapping_cmd *shap_cfg_cmd; 591 struct hclge_shaper_ir_para ir_para; 592 struct hclge_dev *hdev = vport->back; 593 struct hclge_desc desc; 594 u32 shaper_para; 595 int ret, i; 596 597 if (!max_tx_rate) 598 max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate; 599 600 ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET, 601 &ir_para, 602 hdev->ae_dev->dev_specs.max_tm_rate); 603 if (ret) 604 return ret; 605 606 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 607 ir_para.ir_s, 608 HCLGE_SHAPER_BS_U_DEF, 609 HCLGE_SHAPER_BS_S_DEF); 610 611 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 612 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, 613 false); 614 615 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; 616 shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i); 617 shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para); 618 619 hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1); 620 shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate); 621 622 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 623 if (ret) { 624 dev_err(&hdev->pdev->dev, 625 "vport%u, qs%u failed to set tx_rate:%d, ret=%d\n", 626 vport->vport_id, shap_cfg_cmd->qs_id, 627 max_tx_rate, ret); 628 return ret; 629 } 630 } 631 632 return 0; 633 } 634 635 static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport) 636 { 637 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 638 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 639 struct hclge_dev *hdev = vport->back; 640 u16 max_rss_size = 0; 641 int i; 642 643 if (!tc_info->mqprio_active) 644 return vport->alloc_tqps / tc_info->num_tc; 645 646 for (i = 0; i < HNAE3_MAX_TC; i++) { 647 if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc) 648 continue; 649 if (max_rss_size < tc_info->tqp_count[i]) 650 max_rss_size = tc_info->tqp_count[i]; 651 } 652 653 return max_rss_size; 654 } 655 656 static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport) 657 { 658 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 659 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 660 struct hclge_dev *hdev = vport->back; 661 int sum = 0; 662 int i; 663 664 if (!tc_info->mqprio_active) 665 return kinfo->rss_size * tc_info->num_tc; 666 667 for (i = 0; i < HNAE3_MAX_TC; i++) { 668 if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc) 669 sum += tc_info->tqp_count[i]; 670 } 671 672 return sum; 673 } 674 675 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) 676 { 677 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 678 struct hclge_dev *hdev = vport->back; 679 u16 vport_max_rss_size; 680 u16 max_rss_size; 681 682 /* TC configuration is shared by PF/VF in one port, only allow 683 * one tc for VF for simplicity. VF's vport_id is non zero. 684 */ 685 if (vport->vport_id) { 686 kinfo->tc_info.max_tc = 1; 687 kinfo->tc_info.num_tc = 1; 688 vport->qs_offset = HNAE3_MAX_TC + 689 vport->vport_id - HCLGE_VF_VPORT_START_NUM; 690 vport_max_rss_size = hdev->vf_rss_size_max; 691 } else { 692 kinfo->tc_info.max_tc = hdev->tc_max; 693 kinfo->tc_info.num_tc = 694 min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); 695 vport->qs_offset = 0; 696 vport_max_rss_size = hdev->pf_rss_size_max; 697 } 698 699 max_rss_size = min_t(u16, vport_max_rss_size, 700 hclge_vport_get_max_rss_size(vport)); 701 702 /* Set to user value, no larger than max_rss_size. */ 703 if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size && 704 kinfo->req_rss_size <= max_rss_size) { 705 dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n", 706 kinfo->rss_size, kinfo->req_rss_size); 707 kinfo->rss_size = kinfo->req_rss_size; 708 } else if (kinfo->rss_size > max_rss_size || 709 (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) { 710 /* Set to the maximum specification value (max_rss_size). */ 711 kinfo->rss_size = max_rss_size; 712 } 713 } 714 715 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) 716 { 717 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 718 struct hclge_dev *hdev = vport->back; 719 u8 i; 720 721 hclge_tm_update_kinfo_rss_size(vport); 722 kinfo->num_tqps = hclge_vport_get_tqp_num(vport); 723 vport->dwrr = 100; /* 100 percent as init */ 724 vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; 725 726 if (vport->vport_id == PF_VPORT_ID) 727 hdev->rss_cfg.rss_size = kinfo->rss_size; 728 729 /* when enable mqprio, the tc_info has been updated. */ 730 if (kinfo->tc_info.mqprio_active) 731 return; 732 733 for (i = 0; i < HNAE3_MAX_TC; i++) { 734 if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) { 735 kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size; 736 kinfo->tc_info.tqp_count[i] = kinfo->rss_size; 737 } else { 738 /* Set to default queue if TC is disable */ 739 kinfo->tc_info.tqp_offset[i] = 0; 740 kinfo->tc_info.tqp_count[i] = 1; 741 } 742 } 743 744 memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc, 745 sizeof_field(struct hnae3_tc_info, prio_tc)); 746 } 747 748 static void hclge_tm_vport_info_update(struct hclge_dev *hdev) 749 { 750 struct hclge_vport *vport = hdev->vport; 751 u32 i; 752 753 for (i = 0; i < hdev->num_alloc_vport; i++) { 754 hclge_tm_vport_tc_info_update(vport); 755 756 vport++; 757 } 758 } 759 760 static void hclge_tm_tc_info_init(struct hclge_dev *hdev) 761 { 762 u8 i, tc_sch_mode; 763 u32 bw_limit; 764 765 for (i = 0; i < hdev->tc_max; i++) { 766 if (i < hdev->tm_info.num_tc) { 767 tc_sch_mode = HCLGE_SCH_MODE_DWRR; 768 bw_limit = hdev->tm_info.pg_info[0].bw_limit; 769 } else { 770 tc_sch_mode = HCLGE_SCH_MODE_SP; 771 bw_limit = 0; 772 } 773 774 hdev->tm_info.tc_info[i].tc_id = i; 775 hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode; 776 hdev->tm_info.tc_info[i].pgid = 0; 777 hdev->tm_info.tc_info[i].bw_limit = bw_limit; 778 } 779 780 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) 781 hdev->tm_info.prio_tc[i] = 782 (i >= hdev->tm_info.num_tc) ? 0 : i; 783 } 784 785 static void hclge_tm_pg_info_init(struct hclge_dev *hdev) 786 { 787 #define BW_PERCENT 100 788 #define DEFAULT_BW_WEIGHT 1 789 790 u8 i; 791 792 for (i = 0; i < hdev->tm_info.num_pg; i++) { 793 int k; 794 795 hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT; 796 797 hdev->tm_info.pg_info[i].pg_id = i; 798 hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR; 799 800 hdev->tm_info.pg_info[i].bw_limit = 801 hdev->ae_dev->dev_specs.max_tm_rate; 802 803 if (i != 0) 804 continue; 805 806 hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map; 807 for (k = 0; k < hdev->tm_info.num_tc; k++) 808 hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; 809 for (; k < HNAE3_MAX_TC; k++) 810 hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT; 811 } 812 } 813 814 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev) 815 { 816 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) { 817 if (hdev->fc_mode_last_time == HCLGE_FC_PFC) 818 dev_warn(&hdev->pdev->dev, 819 "Only 1 tc used, but last mode is FC_PFC\n"); 820 821 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 822 } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 823 /* fc_mode_last_time record the last fc_mode when 824 * DCB is enabled, so that fc_mode can be set to 825 * the correct value when DCB is disabled. 826 */ 827 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 828 hdev->tm_info.fc_mode = HCLGE_FC_PFC; 829 } 830 } 831 832 static void hclge_update_fc_mode(struct hclge_dev *hdev) 833 { 834 if (!hdev->tm_info.pfc_en) { 835 hdev->tm_info.fc_mode = hdev->fc_mode_last_time; 836 return; 837 } 838 839 if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) { 840 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 841 hdev->tm_info.fc_mode = HCLGE_FC_PFC; 842 } 843 } 844 845 void hclge_tm_pfc_info_update(struct hclge_dev *hdev) 846 { 847 if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) 848 hclge_update_fc_mode(hdev); 849 else 850 hclge_update_fc_mode_by_dcb_flag(hdev); 851 } 852 853 static void hclge_tm_schd_info_init(struct hclge_dev *hdev) 854 { 855 hclge_tm_pg_info_init(hdev); 856 857 hclge_tm_tc_info_init(hdev); 858 859 hclge_tm_vport_info_update(hdev); 860 861 hclge_tm_pfc_info_update(hdev); 862 } 863 864 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev) 865 { 866 int ret; 867 u32 i; 868 869 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 870 return 0; 871 872 for (i = 0; i < hdev->tm_info.num_pg; i++) { 873 /* Cfg mapping */ 874 ret = hclge_tm_pg_to_pri_map_cfg( 875 hdev, i, hdev->tm_info.pg_info[i].tc_bit_map); 876 if (ret) 877 return ret; 878 } 879 880 return 0; 881 } 882 883 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev) 884 { 885 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 886 struct hclge_shaper_ir_para ir_para; 887 u32 shaper_para; 888 int ret; 889 u32 i; 890 891 /* Cfg pg schd */ 892 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 893 return 0; 894 895 /* Pg to pri */ 896 for (i = 0; i < hdev->tm_info.num_pg; i++) { 897 u32 rate = hdev->tm_info.pg_info[i].bw_limit; 898 899 /* Calc shaper para */ 900 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG, 901 &ir_para, max_tm_rate); 902 if (ret) 903 return ret; 904 905 shaper_para = hclge_tm_get_shapping_para(0, 0, 0, 906 HCLGE_SHAPER_BS_U_DEF, 907 HCLGE_SHAPER_BS_S_DEF); 908 ret = hclge_tm_pg_shapping_cfg(hdev, 909 HCLGE_TM_SHAP_C_BUCKET, i, 910 shaper_para, rate); 911 if (ret) 912 return ret; 913 914 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, 915 ir_para.ir_u, 916 ir_para.ir_s, 917 HCLGE_SHAPER_BS_U_DEF, 918 HCLGE_SHAPER_BS_S_DEF); 919 ret = hclge_tm_pg_shapping_cfg(hdev, 920 HCLGE_TM_SHAP_P_BUCKET, i, 921 shaper_para, rate); 922 if (ret) 923 return ret; 924 } 925 926 return 0; 927 } 928 929 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev) 930 { 931 int ret; 932 u32 i; 933 934 /* cfg pg schd */ 935 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) 936 return 0; 937 938 /* pg to prio */ 939 for (i = 0; i < hdev->tm_info.num_pg; i++) { 940 /* Cfg dwrr */ 941 ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]); 942 if (ret) 943 return ret; 944 } 945 946 return 0; 947 } 948 949 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev, 950 struct hclge_vport *vport) 951 { 952 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 953 struct hnae3_tc_info *tc_info = &kinfo->tc_info; 954 struct hnae3_queue **tqp = kinfo->tqp; 955 u32 i, j; 956 int ret; 957 958 for (i = 0; i < tc_info->num_tc; i++) { 959 for (j = 0; j < tc_info->tqp_count[i]; j++) { 960 struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j]; 961 962 ret = hclge_tm_q_to_qs_map_cfg(hdev, 963 hclge_get_queue_id(q), 964 vport->qs_offset + i); 965 if (ret) 966 return ret; 967 } 968 } 969 970 return 0; 971 } 972 973 static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) 974 { 975 struct hclge_vport *vport = hdev->vport; 976 u16 i, k; 977 int ret; 978 979 /* Cfg qs -> pri mapping, one by one mapping */ 980 for (k = 0; k < hdev->num_alloc_vport; k++) { 981 struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; 982 983 for (i = 0; i < kinfo->tc_info.max_tc; i++) { 984 u8 pri = i < kinfo->tc_info.num_tc ? i : 0; 985 bool link_vld = i < kinfo->tc_info.num_tc; 986 987 ret = hclge_tm_qs_to_pri_map_cfg(hdev, 988 vport[k].qs_offset + i, 989 pri, link_vld); 990 if (ret) 991 return ret; 992 } 993 } 994 995 return 0; 996 } 997 998 static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) 999 { 1000 struct hclge_vport *vport = hdev->vport; 1001 u16 i, k; 1002 int ret; 1003 1004 /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */ 1005 for (k = 0; k < hdev->num_alloc_vport; k++) 1006 for (i = 0; i < HNAE3_MAX_TC; i++) { 1007 ret = hclge_tm_qs_to_pri_map_cfg(hdev, 1008 vport[k].qs_offset + i, 1009 k, true); 1010 if (ret) 1011 return ret; 1012 } 1013 1014 return 0; 1015 } 1016 1017 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev) 1018 { 1019 struct hclge_vport *vport = hdev->vport; 1020 int ret; 1021 u32 i; 1022 1023 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) 1024 ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev); 1025 else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) 1026 ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev); 1027 else 1028 return -EINVAL; 1029 1030 if (ret) 1031 return ret; 1032 1033 /* Cfg q -> qs mapping */ 1034 for (i = 0; i < hdev->num_alloc_vport; i++) { 1035 ret = hclge_vport_q_to_qs_map(hdev, vport); 1036 if (ret) 1037 return ret; 1038 1039 vport++; 1040 } 1041 1042 return 0; 1043 } 1044 1045 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) 1046 { 1047 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 1048 struct hclge_shaper_ir_para ir_para; 1049 u32 shaper_para_c, shaper_para_p; 1050 int ret; 1051 u32 i; 1052 1053 for (i = 0; i < hdev->tc_max; i++) { 1054 u32 rate = hdev->tm_info.tc_info[i].bw_limit; 1055 1056 if (rate) { 1057 ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, 1058 &ir_para, max_tm_rate); 1059 if (ret) 1060 return ret; 1061 1062 shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0, 1063 HCLGE_SHAPER_BS_U_DEF, 1064 HCLGE_SHAPER_BS_S_DEF); 1065 shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b, 1066 ir_para.ir_u, 1067 ir_para.ir_s, 1068 HCLGE_SHAPER_BS_U_DEF, 1069 HCLGE_SHAPER_BS_S_DEF); 1070 } else { 1071 shaper_para_c = 0; 1072 shaper_para_p = 0; 1073 } 1074 1075 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, 1076 shaper_para_c, rate); 1077 if (ret) 1078 return ret; 1079 1080 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, 1081 shaper_para_p, rate); 1082 if (ret) 1083 return ret; 1084 } 1085 1086 return 0; 1087 } 1088 1089 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport) 1090 { 1091 struct hclge_dev *hdev = vport->back; 1092 struct hclge_shaper_ir_para ir_para; 1093 u32 shaper_para; 1094 int ret; 1095 1096 ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF, 1097 &ir_para, 1098 hdev->ae_dev->dev_specs.max_tm_rate); 1099 if (ret) 1100 return ret; 1101 1102 shaper_para = hclge_tm_get_shapping_para(0, 0, 0, 1103 HCLGE_SHAPER_BS_U_DEF, 1104 HCLGE_SHAPER_BS_S_DEF); 1105 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, 1106 vport->vport_id, shaper_para, 1107 vport->bw_limit); 1108 if (ret) 1109 return ret; 1110 1111 shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u, 1112 ir_para.ir_s, 1113 HCLGE_SHAPER_BS_U_DEF, 1114 HCLGE_SHAPER_BS_S_DEF); 1115 ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, 1116 vport->vport_id, shaper_para, 1117 vport->bw_limit); 1118 if (ret) 1119 return ret; 1120 1121 return 0; 1122 } 1123 1124 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport) 1125 { 1126 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1127 struct hclge_dev *hdev = vport->back; 1128 u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; 1129 struct hclge_shaper_ir_para ir_para; 1130 u32 i; 1131 int ret; 1132 1133 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1134 ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit, 1135 HCLGE_SHAPER_LVL_QSET, 1136 &ir_para, max_tm_rate); 1137 if (ret) 1138 return ret; 1139 } 1140 1141 return 0; 1142 } 1143 1144 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev) 1145 { 1146 struct hclge_vport *vport = hdev->vport; 1147 int ret; 1148 u32 i; 1149 1150 /* Need config vport shaper */ 1151 for (i = 0; i < hdev->num_alloc_vport; i++) { 1152 ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport); 1153 if (ret) 1154 return ret; 1155 1156 ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport); 1157 if (ret) 1158 return ret; 1159 1160 vport++; 1161 } 1162 1163 return 0; 1164 } 1165 1166 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev) 1167 { 1168 int ret; 1169 1170 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1171 ret = hclge_tm_pri_tc_base_shaper_cfg(hdev); 1172 if (ret) 1173 return ret; 1174 } else { 1175 ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev); 1176 if (ret) 1177 return ret; 1178 } 1179 1180 return 0; 1181 } 1182 1183 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) 1184 { 1185 struct hclge_vport *vport = hdev->vport; 1186 struct hclge_pg_info *pg_info; 1187 u8 dwrr; 1188 int ret; 1189 u32 i, k; 1190 1191 for (i = 0; i < hdev->tc_max; i++) { 1192 pg_info = 1193 &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 1194 dwrr = pg_info->tc_dwrr[i]; 1195 1196 ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr); 1197 if (ret) 1198 return ret; 1199 1200 for (k = 0; k < hdev->num_alloc_vport; k++) { 1201 struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; 1202 1203 if (i >= kinfo->tc_info.max_tc) 1204 continue; 1205 1206 dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0; 1207 ret = hclge_tm_qs_weight_cfg( 1208 hdev, vport[k].qs_offset + i, 1209 dwrr); 1210 if (ret) 1211 return ret; 1212 } 1213 } 1214 1215 return 0; 1216 } 1217 1218 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev) 1219 { 1220 #define DEFAULT_TC_OFFSET 14 1221 1222 struct hclge_ets_tc_weight_cmd *ets_weight; 1223 struct hclge_desc desc; 1224 unsigned int i; 1225 1226 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false); 1227 ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; 1228 1229 for (i = 0; i < HNAE3_MAX_TC; i++) { 1230 struct hclge_pg_info *pg_info; 1231 1232 pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; 1233 ets_weight->tc_weight[i] = pg_info->tc_dwrr[i]; 1234 } 1235 1236 ets_weight->weight_offset = DEFAULT_TC_OFFSET; 1237 1238 return hclge_cmd_send(&hdev->hw, &desc, 1); 1239 } 1240 1241 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport) 1242 { 1243 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1244 struct hclge_dev *hdev = vport->back; 1245 int ret; 1246 u8 i; 1247 1248 /* Vf dwrr */ 1249 ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr); 1250 if (ret) 1251 return ret; 1252 1253 /* Qset dwrr */ 1254 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1255 ret = hclge_tm_qs_weight_cfg( 1256 hdev, vport->qs_offset + i, 1257 hdev->tm_info.pg_info[0].tc_dwrr[i]); 1258 if (ret) 1259 return ret; 1260 } 1261 1262 return 0; 1263 } 1264 1265 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev) 1266 { 1267 struct hclge_vport *vport = hdev->vport; 1268 int ret; 1269 u32 i; 1270 1271 for (i = 0; i < hdev->num_alloc_vport; i++) { 1272 ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport); 1273 if (ret) 1274 return ret; 1275 1276 vport++; 1277 } 1278 1279 return 0; 1280 } 1281 1282 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev) 1283 { 1284 int ret; 1285 1286 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1287 ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev); 1288 if (ret) 1289 return ret; 1290 1291 if (!hnae3_dev_dcb_supported(hdev)) 1292 return 0; 1293 1294 ret = hclge_tm_ets_tc_dwrr_cfg(hdev); 1295 if (ret == -EOPNOTSUPP) { 1296 dev_warn(&hdev->pdev->dev, 1297 "fw %08x doesn't support ets tc weight cmd\n", 1298 hdev->fw_version); 1299 ret = 0; 1300 } 1301 1302 return ret; 1303 } else { 1304 ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev); 1305 if (ret) 1306 return ret; 1307 } 1308 1309 return 0; 1310 } 1311 1312 static int hclge_tm_map_cfg(struct hclge_dev *hdev) 1313 { 1314 int ret; 1315 1316 ret = hclge_up_to_tc_map(hdev); 1317 if (ret) 1318 return ret; 1319 1320 if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) { 1321 ret = hclge_dscp_to_tc_map(hdev); 1322 if (ret) 1323 return ret; 1324 } 1325 1326 ret = hclge_tm_pg_to_pri_map(hdev); 1327 if (ret) 1328 return ret; 1329 1330 return hclge_tm_pri_q_qs_cfg(hdev); 1331 } 1332 1333 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev) 1334 { 1335 int ret; 1336 1337 ret = hclge_tm_port_shaper_cfg(hdev); 1338 if (ret) 1339 return ret; 1340 1341 ret = hclge_tm_pg_shaper_cfg(hdev); 1342 if (ret) 1343 return ret; 1344 1345 return hclge_tm_pri_shaper_cfg(hdev); 1346 } 1347 1348 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev) 1349 { 1350 int ret; 1351 1352 ret = hclge_tm_pg_dwrr_cfg(hdev); 1353 if (ret) 1354 return ret; 1355 1356 return hclge_tm_pri_dwrr_cfg(hdev); 1357 } 1358 1359 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev) 1360 { 1361 int ret; 1362 u8 i; 1363 1364 /* Only being config on TC-Based scheduler mode */ 1365 if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) 1366 return 0; 1367 1368 for (i = 0; i < hdev->tm_info.num_pg; i++) { 1369 ret = hclge_tm_pg_schd_mode_cfg(hdev, i); 1370 if (ret) 1371 return ret; 1372 } 1373 1374 return 0; 1375 } 1376 1377 static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) 1378 { 1379 struct hclge_vport *vport = hdev->vport; 1380 int ret; 1381 u8 mode; 1382 u16 i; 1383 1384 ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); 1385 if (ret) 1386 return ret; 1387 1388 for (i = 0; i < hdev->num_alloc_vport; i++) { 1389 struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo; 1390 1391 if (pri_id >= kinfo->tc_info.max_tc) 1392 continue; 1393 1394 mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR : 1395 HCLGE_SCH_MODE_SP; 1396 ret = hclge_tm_qs_schd_mode_cfg(hdev, 1397 vport[i].qs_offset + pri_id, 1398 mode); 1399 if (ret) 1400 return ret; 1401 } 1402 1403 return 0; 1404 } 1405 1406 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport) 1407 { 1408 struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; 1409 struct hclge_dev *hdev = vport->back; 1410 int ret; 1411 u8 i; 1412 1413 if (vport->vport_id >= HNAE3_MAX_TC) 1414 return -EINVAL; 1415 1416 ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id); 1417 if (ret) 1418 return ret; 1419 1420 for (i = 0; i < kinfo->tc_info.num_tc; i++) { 1421 u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode; 1422 1423 ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i, 1424 sch_mode); 1425 if (ret) 1426 return ret; 1427 } 1428 1429 return 0; 1430 } 1431 1432 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) 1433 { 1434 struct hclge_vport *vport = hdev->vport; 1435 int ret; 1436 u8 i; 1437 1438 if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { 1439 for (i = 0; i < hdev->tc_max; i++) { 1440 ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); 1441 if (ret) 1442 return ret; 1443 } 1444 } else { 1445 for (i = 0; i < hdev->num_alloc_vport; i++) { 1446 ret = hclge_tm_schd_mode_vnet_base_cfg(vport); 1447 if (ret) 1448 return ret; 1449 1450 vport++; 1451 } 1452 } 1453 1454 return 0; 1455 } 1456 1457 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev) 1458 { 1459 int ret; 1460 1461 ret = hclge_tm_lvl2_schd_mode_cfg(hdev); 1462 if (ret) 1463 return ret; 1464 1465 return hclge_tm_lvl34_schd_mode_cfg(hdev); 1466 } 1467 1468 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev) 1469 { 1470 int ret; 1471 1472 /* Cfg tm mapping */ 1473 ret = hclge_tm_map_cfg(hdev); 1474 if (ret) 1475 return ret; 1476 1477 /* Cfg tm shaper */ 1478 ret = hclge_tm_shaper_cfg(hdev); 1479 if (ret) 1480 return ret; 1481 1482 /* Cfg dwrr */ 1483 ret = hclge_tm_dwrr_cfg(hdev); 1484 if (ret) 1485 return ret; 1486 1487 /* Cfg schd mode for each level schd */ 1488 ret = hclge_tm_schd_mode_hw(hdev); 1489 if (ret) 1490 return ret; 1491 1492 return hclge_tm_flush_cfg(hdev, false); 1493 } 1494 1495 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev) 1496 { 1497 struct hclge_mac *mac = &hdev->hw.mac; 1498 1499 return hclge_pause_param_cfg(hdev, mac->mac_addr, 1500 HCLGE_DEFAULT_PAUSE_TRANS_GAP, 1501 HCLGE_DEFAULT_PAUSE_TRANS_TIME); 1502 } 1503 1504 static int hclge_pfc_setup_hw(struct hclge_dev *hdev) 1505 { 1506 u8 enable_bitmap = 0; 1507 1508 if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) 1509 enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK | 1510 HCLGE_RX_MAC_PAUSE_EN_MSK; 1511 1512 return hclge_pfc_pause_en_cfg(hdev, enable_bitmap, 1513 hdev->tm_info.pfc_en); 1514 } 1515 1516 /* for the queues that use for backpress, divides to several groups, 1517 * each group contains 32 queue sets, which can be represented by u32 bitmap. 1518 */ 1519 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc) 1520 { 1521 u16 grp_id_shift = HCLGE_BP_GRP_ID_S; 1522 u16 grp_id_mask = HCLGE_BP_GRP_ID_M; 1523 u8 grp_num = HCLGE_BP_GRP_NUM; 1524 int i; 1525 1526 if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) { 1527 grp_num = HCLGE_BP_EXT_GRP_NUM; 1528 grp_id_mask = HCLGE_BP_EXT_GRP_ID_M; 1529 grp_id_shift = HCLGE_BP_EXT_GRP_ID_S; 1530 } 1531 1532 for (i = 0; i < grp_num; i++) { 1533 u32 qs_bitmap = 0; 1534 int k, ret; 1535 1536 for (k = 0; k < hdev->num_alloc_vport; k++) { 1537 struct hclge_vport *vport = &hdev->vport[k]; 1538 u16 qs_id = vport->qs_offset + tc; 1539 u8 grp, sub_grp; 1540 1541 grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift); 1542 sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M, 1543 HCLGE_BP_SUB_GRP_ID_S); 1544 if (i == grp) 1545 qs_bitmap |= (1 << sub_grp); 1546 } 1547 1548 ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap); 1549 if (ret) 1550 return ret; 1551 } 1552 1553 return 0; 1554 } 1555 1556 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev) 1557 { 1558 bool tx_en, rx_en; 1559 1560 switch (hdev->tm_info.fc_mode) { 1561 case HCLGE_FC_NONE: 1562 tx_en = false; 1563 rx_en = false; 1564 break; 1565 case HCLGE_FC_RX_PAUSE: 1566 tx_en = false; 1567 rx_en = true; 1568 break; 1569 case HCLGE_FC_TX_PAUSE: 1570 tx_en = true; 1571 rx_en = false; 1572 break; 1573 case HCLGE_FC_FULL: 1574 tx_en = true; 1575 rx_en = true; 1576 break; 1577 case HCLGE_FC_PFC: 1578 tx_en = false; 1579 rx_en = false; 1580 break; 1581 default: 1582 tx_en = true; 1583 rx_en = true; 1584 } 1585 1586 return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en); 1587 } 1588 1589 static int hclge_tm_bp_setup(struct hclge_dev *hdev) 1590 { 1591 int ret; 1592 int i; 1593 1594 for (i = 0; i < hdev->tm_info.num_tc; i++) { 1595 ret = hclge_bp_setup_hw(hdev, i); 1596 if (ret) 1597 return ret; 1598 } 1599 1600 return 0; 1601 } 1602 1603 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init) 1604 { 1605 int ret; 1606 1607 ret = hclge_pause_param_setup_hw(hdev); 1608 if (ret) 1609 return ret; 1610 1611 ret = hclge_mac_pause_setup_hw(hdev); 1612 if (ret) 1613 return ret; 1614 1615 /* Only DCB-supported dev supports qset back pressure and pfc cmd */ 1616 if (!hnae3_dev_dcb_supported(hdev)) 1617 return 0; 1618 1619 /* GE MAC does not support PFC, when driver is initializing and MAC 1620 * is in GE Mode, ignore the error here, otherwise initialization 1621 * will fail. 1622 */ 1623 ret = hclge_pfc_setup_hw(hdev); 1624 if (init && ret == -EOPNOTSUPP) 1625 dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n"); 1626 else if (ret) { 1627 dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n", 1628 ret); 1629 return ret; 1630 } 1631 1632 return hclge_tm_bp_setup(hdev); 1633 } 1634 1635 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) 1636 { 1637 struct hclge_vport *vport = hdev->vport; 1638 struct hnae3_knic_private_info *kinfo; 1639 u32 i, k; 1640 1641 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { 1642 hdev->tm_info.prio_tc[i] = prio_tc[i]; 1643 1644 for (k = 0; k < hdev->num_alloc_vport; k++) { 1645 kinfo = &vport[k].nic.kinfo; 1646 kinfo->tc_info.prio_tc[i] = prio_tc[i]; 1647 } 1648 } 1649 } 1650 1651 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) 1652 { 1653 u8 bit_map = 0; 1654 u8 i; 1655 1656 hdev->tm_info.num_tc = num_tc; 1657 1658 for (i = 0; i < hdev->tm_info.num_tc; i++) 1659 bit_map |= BIT(i); 1660 1661 if (!bit_map) { 1662 bit_map = 1; 1663 hdev->tm_info.num_tc = 1; 1664 } 1665 1666 hdev->hw_tc_map = bit_map; 1667 1668 hclge_tm_schd_info_init(hdev); 1669 } 1670 1671 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init) 1672 { 1673 int ret; 1674 1675 if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) && 1676 (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE)) 1677 return -ENOTSUPP; 1678 1679 ret = hclge_tm_schd_setup_hw(hdev); 1680 if (ret) 1681 return ret; 1682 1683 ret = hclge_pause_setup_hw(hdev, init); 1684 if (ret) 1685 return ret; 1686 1687 return 0; 1688 } 1689 1690 int hclge_tm_schd_init(struct hclge_dev *hdev) 1691 { 1692 /* fc_mode is HCLGE_FC_FULL on reset */ 1693 hdev->tm_info.fc_mode = HCLGE_FC_FULL; 1694 hdev->fc_mode_last_time = hdev->tm_info.fc_mode; 1695 1696 if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE && 1697 hdev->tm_info.num_pg != 1) 1698 return -EINVAL; 1699 1700 hclge_tm_schd_info_init(hdev); 1701 hclge_dscp_to_prio_map_init(hdev); 1702 1703 return hclge_tm_init_hw(hdev, true); 1704 } 1705 1706 int hclge_tm_vport_map_update(struct hclge_dev *hdev) 1707 { 1708 struct hclge_vport *vport = hdev->vport; 1709 int ret; 1710 1711 hclge_tm_vport_tc_info_update(vport); 1712 1713 ret = hclge_vport_q_to_qs_map(hdev, vport); 1714 if (ret) 1715 return ret; 1716 1717 if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) 1718 return 0; 1719 1720 return hclge_tm_bp_setup(hdev); 1721 } 1722 1723 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num) 1724 { 1725 struct hclge_tm_nodes_cmd *nodes; 1726 struct hclge_desc desc; 1727 int ret; 1728 1729 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { 1730 /* Each PF has 8 qsets and each VF has 1 qset */ 1731 *qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev); 1732 return 0; 1733 } 1734 1735 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); 1736 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1737 if (ret) { 1738 dev_err(&hdev->pdev->dev, 1739 "failed to get qset num, ret = %d\n", ret); 1740 return ret; 1741 } 1742 1743 nodes = (struct hclge_tm_nodes_cmd *)desc.data; 1744 *qset_num = le16_to_cpu(nodes->qset_num); 1745 return 0; 1746 } 1747 1748 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num) 1749 { 1750 struct hclge_tm_nodes_cmd *nodes; 1751 struct hclge_desc desc; 1752 int ret; 1753 1754 if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) { 1755 *pri_num = HCLGE_TM_PF_MAX_PRI_NUM; 1756 return 0; 1757 } 1758 1759 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true); 1760 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1761 if (ret) { 1762 dev_err(&hdev->pdev->dev, 1763 "failed to get pri num, ret = %d\n", ret); 1764 return ret; 1765 } 1766 1767 nodes = (struct hclge_tm_nodes_cmd *)desc.data; 1768 *pri_num = nodes->pri_num; 1769 return 0; 1770 } 1771 1772 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, 1773 u8 *link_vld) 1774 { 1775 struct hclge_qs_to_pri_link_cmd *map; 1776 struct hclge_desc desc; 1777 int ret; 1778 1779 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true); 1780 map = (struct hclge_qs_to_pri_link_cmd *)desc.data; 1781 map->qs_id = cpu_to_le16(qset_id); 1782 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1783 if (ret) { 1784 dev_err(&hdev->pdev->dev, 1785 "failed to get qset map priority, ret = %d\n", ret); 1786 return ret; 1787 } 1788 1789 *priority = map->priority; 1790 *link_vld = map->link_vld; 1791 return 0; 1792 } 1793 1794 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode) 1795 { 1796 struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode; 1797 struct hclge_desc desc; 1798 int ret; 1799 1800 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true); 1801 qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data; 1802 qs_sch_mode->qs_id = cpu_to_le16(qset_id); 1803 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1804 if (ret) { 1805 dev_err(&hdev->pdev->dev, 1806 "failed to get qset sch mode, ret = %d\n", ret); 1807 return ret; 1808 } 1809 1810 *mode = qs_sch_mode->sch_mode; 1811 return 0; 1812 } 1813 1814 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight) 1815 { 1816 struct hclge_qs_weight_cmd *qs_weight; 1817 struct hclge_desc desc; 1818 int ret; 1819 1820 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true); 1821 qs_weight = (struct hclge_qs_weight_cmd *)desc.data; 1822 qs_weight->qs_id = cpu_to_le16(qset_id); 1823 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1824 if (ret) { 1825 dev_err(&hdev->pdev->dev, 1826 "failed to get qset weight, ret = %d\n", ret); 1827 return ret; 1828 } 1829 1830 *weight = qs_weight->dwrr; 1831 return 0; 1832 } 1833 1834 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id, 1835 struct hclge_tm_shaper_para *para) 1836 { 1837 struct hclge_qs_shapping_cmd *shap_cfg_cmd; 1838 struct hclge_desc desc; 1839 u32 shapping_para; 1840 int ret; 1841 1842 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true); 1843 shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data; 1844 shap_cfg_cmd->qs_id = cpu_to_le16(qset_id); 1845 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1846 if (ret) { 1847 dev_err(&hdev->pdev->dev, 1848 "failed to get qset %u shaper, ret = %d\n", qset_id, 1849 ret); 1850 return ret; 1851 } 1852 1853 shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para); 1854 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 1855 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 1856 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 1857 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 1858 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 1859 para->flag = shap_cfg_cmd->flag; 1860 para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate); 1861 return 0; 1862 } 1863 1864 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode) 1865 { 1866 struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode; 1867 struct hclge_desc desc; 1868 int ret; 1869 1870 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true); 1871 pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data; 1872 pri_sch_mode->pri_id = pri_id; 1873 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1874 if (ret) { 1875 dev_err(&hdev->pdev->dev, 1876 "failed to get priority sch mode, ret = %d\n", ret); 1877 return ret; 1878 } 1879 1880 *mode = pri_sch_mode->sch_mode; 1881 return 0; 1882 } 1883 1884 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight) 1885 { 1886 struct hclge_priority_weight_cmd *priority_weight; 1887 struct hclge_desc desc; 1888 int ret; 1889 1890 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true); 1891 priority_weight = (struct hclge_priority_weight_cmd *)desc.data; 1892 priority_weight->pri_id = pri_id; 1893 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1894 if (ret) { 1895 dev_err(&hdev->pdev->dev, 1896 "failed to get priority weight, ret = %d\n", ret); 1897 return ret; 1898 } 1899 1900 *weight = priority_weight->dwrr; 1901 return 0; 1902 } 1903 1904 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id, 1905 enum hclge_opcode_type cmd, 1906 struct hclge_tm_shaper_para *para) 1907 { 1908 struct hclge_pri_shapping_cmd *shap_cfg_cmd; 1909 struct hclge_desc desc; 1910 u32 shapping_para; 1911 int ret; 1912 1913 if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING && 1914 cmd != HCLGE_OPC_TM_PRI_P_SHAPPING) 1915 return -EINVAL; 1916 1917 hclge_cmd_setup_basic_desc(&desc, cmd, true); 1918 shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; 1919 shap_cfg_cmd->pri_id = pri_id; 1920 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1921 if (ret) { 1922 dev_err(&hdev->pdev->dev, 1923 "failed to get priority shaper(%#x), ret = %d\n", 1924 cmd, ret); 1925 return ret; 1926 } 1927 1928 shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para); 1929 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 1930 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 1931 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 1932 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 1933 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 1934 para->flag = shap_cfg_cmd->flag; 1935 para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate); 1936 return 0; 1937 } 1938 1939 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id) 1940 { 1941 struct hclge_nq_to_qs_link_cmd *map; 1942 struct hclge_desc desc; 1943 u16 qs_id_l; 1944 u16 qs_id_h; 1945 int ret; 1946 1947 map = (struct hclge_nq_to_qs_link_cmd *)desc.data; 1948 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true); 1949 map->nq_id = cpu_to_le16(q_id); 1950 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1951 if (ret) { 1952 dev_err(&hdev->pdev->dev, 1953 "failed to get queue to qset map, ret = %d\n", ret); 1954 return ret; 1955 } 1956 *qset_id = le16_to_cpu(map->qset_id); 1957 1958 /* convert qset_id to the following format, drop the vld bit 1959 * | qs_id_h | vld | qs_id_l | 1960 * qset_id: | 15 ~ 11 | 10 | 9 ~ 0 | 1961 * \ \ / / 1962 * \ \ / / 1963 * qset_id: | 15 | 14 ~ 10 | 9 ~ 0 | 1964 */ 1965 qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, 1966 HCLGE_TM_QS_ID_L_S); 1967 qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK, 1968 HCLGE_TM_QS_ID_H_EXT_S); 1969 *qset_id = 0; 1970 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S, 1971 qs_id_l); 1972 hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S, 1973 qs_id_h); 1974 return 0; 1975 } 1976 1977 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id) 1978 { 1979 #define HCLGE_TM_TC_MASK 0x7 1980 1981 struct hclge_tqp_tx_queue_tc_cmd *tc; 1982 struct hclge_desc desc; 1983 int ret; 1984 1985 tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; 1986 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true); 1987 tc->queue_id = cpu_to_le16(q_id); 1988 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 1989 if (ret) { 1990 dev_err(&hdev->pdev->dev, 1991 "failed to get queue to tc map, ret = %d\n", ret); 1992 return ret; 1993 } 1994 1995 *tc_id = tc->tc_id & HCLGE_TM_TC_MASK; 1996 return 0; 1997 } 1998 1999 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id, 2000 u8 *pri_bit_map) 2001 { 2002 struct hclge_pg_to_pri_link_cmd *map; 2003 struct hclge_desc desc; 2004 int ret; 2005 2006 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true); 2007 map = (struct hclge_pg_to_pri_link_cmd *)desc.data; 2008 map->pg_id = pg_id; 2009 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2010 if (ret) { 2011 dev_err(&hdev->pdev->dev, 2012 "failed to get pg to pri map, ret = %d\n", ret); 2013 return ret; 2014 } 2015 2016 *pri_bit_map = map->pri_bit_map; 2017 return 0; 2018 } 2019 2020 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight) 2021 { 2022 struct hclge_pg_weight_cmd *pg_weight_cmd; 2023 struct hclge_desc desc; 2024 int ret; 2025 2026 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true); 2027 pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data; 2028 pg_weight_cmd->pg_id = pg_id; 2029 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2030 if (ret) { 2031 dev_err(&hdev->pdev->dev, 2032 "failed to get pg weight, ret = %d\n", ret); 2033 return ret; 2034 } 2035 2036 *weight = pg_weight_cmd->dwrr; 2037 return 0; 2038 } 2039 2040 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode) 2041 { 2042 struct hclge_desc desc; 2043 int ret; 2044 2045 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true); 2046 desc.data[0] = cpu_to_le32(pg_id); 2047 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2048 if (ret) { 2049 dev_err(&hdev->pdev->dev, 2050 "failed to get pg sch mode, ret = %d\n", ret); 2051 return ret; 2052 } 2053 2054 *mode = (u8)le32_to_cpu(desc.data[1]); 2055 return 0; 2056 } 2057 2058 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id, 2059 enum hclge_opcode_type cmd, 2060 struct hclge_tm_shaper_para *para) 2061 { 2062 struct hclge_pg_shapping_cmd *shap_cfg_cmd; 2063 struct hclge_desc desc; 2064 u32 shapping_para; 2065 int ret; 2066 2067 if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING && 2068 cmd != HCLGE_OPC_TM_PG_P_SHAPPING) 2069 return -EINVAL; 2070 2071 hclge_cmd_setup_basic_desc(&desc, cmd, true); 2072 shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; 2073 shap_cfg_cmd->pg_id = pg_id; 2074 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2075 if (ret) { 2076 dev_err(&hdev->pdev->dev, 2077 "failed to get pg shaper(%#x), ret = %d\n", 2078 cmd, ret); 2079 return ret; 2080 } 2081 2082 shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para); 2083 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 2084 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 2085 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 2086 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 2087 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 2088 para->flag = shap_cfg_cmd->flag; 2089 para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate); 2090 return 0; 2091 } 2092 2093 int hclge_tm_get_port_shaper(struct hclge_dev *hdev, 2094 struct hclge_tm_shaper_para *para) 2095 { 2096 struct hclge_port_shapping_cmd *port_shap_cfg_cmd; 2097 struct hclge_desc desc; 2098 u32 shapping_para; 2099 int ret; 2100 2101 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true); 2102 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2103 if (ret) { 2104 dev_err(&hdev->pdev->dev, 2105 "failed to get port shaper, ret = %d\n", ret); 2106 return ret; 2107 } 2108 2109 port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; 2110 shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para); 2111 para->ir_b = hclge_tm_get_field(shapping_para, IR_B); 2112 para->ir_u = hclge_tm_get_field(shapping_para, IR_U); 2113 para->ir_s = hclge_tm_get_field(shapping_para, IR_S); 2114 para->bs_b = hclge_tm_get_field(shapping_para, BS_B); 2115 para->bs_s = hclge_tm_get_field(shapping_para, BS_S); 2116 para->flag = port_shap_cfg_cmd->flag; 2117 para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate); 2118 2119 return 0; 2120 } 2121 2122 int hclge_tm_flush_cfg(struct hclge_dev *hdev, bool enable) 2123 { 2124 struct hclge_desc desc; 2125 int ret; 2126 2127 if (!hnae3_ae_dev_tm_flush_supported(hdev)) 2128 return 0; 2129 2130 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_FLUSH, false); 2131 2132 desc.data[0] = cpu_to_le32(enable ? HCLGE_TM_FLUSH_EN_MSK : 0); 2133 2134 ret = hclge_cmd_send(&hdev->hw, &desc, 1); 2135 if (ret) { 2136 dev_err(&hdev->pdev->dev, 2137 "failed to config tm flush, ret = %d\n", ret); 2138 return ret; 2139 } 2140 2141 if (enable) 2142 msleep(HCLGE_TM_FLUSH_TIME_MS); 2143 2144 return ret; 2145 } 2146