1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 
6 #include "hclge_cmd.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9 
10 enum hclge_shaper_level {
11 	HCLGE_SHAPER_LVL_PRI	= 0,
12 	HCLGE_SHAPER_LVL_PG	= 1,
13 	HCLGE_SHAPER_LVL_PORT	= 2,
14 	HCLGE_SHAPER_LVL_QSET	= 3,
15 	HCLGE_SHAPER_LVL_CNT	= 4,
16 	HCLGE_SHAPER_LVL_VF	= 0,
17 	HCLGE_SHAPER_LVL_PF	= 1,
18 };
19 
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM	3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD	3
22 
23 #define HCLGE_SHAPER_BS_U_DEF	5
24 #define HCLGE_SHAPER_BS_S_DEF	20
25 
26 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
27  * @ir: Rate to be config, its unit is Mbps
28  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
29  * @ir_para: parameters of IR shaper
30  * @max_tm_rate: max tm rate is available to config
31  *
32  * the formula:
33  *
34  *		IR_b * (2 ^ IR_u) * 8
35  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
36  *		Tick * (2 ^ IR_s)
37  *
38  * @return: 0: calculate sucessful, negative: fail
39  */
40 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
41 				  struct hclge_shaper_ir_para *ir_para,
42 				  u32 max_tm_rate)
43 {
44 #define DEFAULT_SHAPER_IR_B	126
45 #define DIVISOR_CLK		(1000 * 8)
46 #define DEFAULT_DIVISOR_IR_B	(DEFAULT_SHAPER_IR_B * DIVISOR_CLK)
47 
48 	static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
49 		6 * 256,        /* Prioriy level */
50 		6 * 32,         /* Prioriy group level */
51 		6 * 8,          /* Port level */
52 		6 * 256         /* Qset level */
53 	};
54 	u8 ir_u_calc = 0;
55 	u8 ir_s_calc = 0;
56 	u32 ir_calc;
57 	u32 tick;
58 
59 	/* Calc tick */
60 	if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
61 	    ir > max_tm_rate)
62 		return -EINVAL;
63 
64 	tick = tick_array[shaper_level];
65 
66 	/**
67 	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 	 * the formula is changed to:
69 	 *		126 * 1 * 8
70 	 * ir_calc = ---------------- * 1000
71 	 *		tick * 1
72 	 */
73 	ir_calc = (DEFAULT_DIVISOR_IR_B + (tick >> 1) - 1) / tick;
74 
75 	if (ir_calc == ir) {
76 		ir_para->ir_b = DEFAULT_SHAPER_IR_B;
77 		ir_para->ir_u = 0;
78 		ir_para->ir_s = 0;
79 
80 		return 0;
81 	} else if (ir_calc > ir) {
82 		/* Increasing the denominator to select ir_s value */
83 		while (ir_calc >= ir && ir) {
84 			ir_s_calc++;
85 			ir_calc = DEFAULT_DIVISOR_IR_B /
86 				  (tick * (1 << ir_s_calc));
87 		}
88 
89 		ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
90 				(DIVISOR_CLK >> 1)) / DIVISOR_CLK;
91 	} else {
92 		/* Increasing the numerator to select ir_u value */
93 		u32 numerator;
94 
95 		while (ir_calc < ir) {
96 			ir_u_calc++;
97 			numerator = DEFAULT_DIVISOR_IR_B * (1 << ir_u_calc);
98 			ir_calc = (numerator + (tick >> 1)) / tick;
99 		}
100 
101 		if (ir_calc == ir) {
102 			ir_para->ir_b = DEFAULT_SHAPER_IR_B;
103 		} else {
104 			u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
105 			ir_para->ir_b = (ir * tick + (denominator >> 1)) /
106 					denominator;
107 		}
108 	}
109 
110 	ir_para->ir_u = ir_u_calc;
111 	ir_para->ir_s = ir_s_calc;
112 
113 	return 0;
114 }
115 
116 static const u16 hclge_pfc_tx_stats_offset[] = {
117 	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num),
118 	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num),
119 	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num),
120 	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num),
121 	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num),
122 	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num),
123 	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num),
124 	HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)
125 };
126 
127 static const u16 hclge_pfc_rx_stats_offset[] = {
128 	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num),
129 	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num),
130 	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num),
131 	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num),
132 	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num),
133 	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num),
134 	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num),
135 	HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)
136 };
137 
138 static void hclge_pfc_stats_get(struct hclge_dev *hdev, bool tx, u64 *stats)
139 {
140 	const u16 *offset;
141 	int i;
142 
143 	if (tx)
144 		offset = hclge_pfc_tx_stats_offset;
145 	else
146 		offset = hclge_pfc_rx_stats_offset;
147 
148 	for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
149 		stats[i] = HCLGE_STATS_READ(&hdev->mac_stats, offset[i]);
150 }
151 
152 void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
153 {
154 	hclge_pfc_stats_get(hdev, false, stats);
155 }
156 
157 void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
158 {
159 	hclge_pfc_stats_get(hdev, true, stats);
160 }
161 
162 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
163 {
164 	struct hclge_desc desc;
165 
166 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
167 
168 	desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
169 		(rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
170 
171 	return hclge_cmd_send(&hdev->hw, &desc, 1);
172 }
173 
174 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
175 				  u8 pfc_bitmap)
176 {
177 	struct hclge_desc desc;
178 	struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
179 
180 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
181 
182 	pfc->tx_rx_en_bitmap = tx_rx_bitmap;
183 	pfc->pri_en_bitmap = pfc_bitmap;
184 
185 	return hclge_cmd_send(&hdev->hw, &desc, 1);
186 }
187 
188 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
189 				 u8 pause_trans_gap, u16 pause_trans_time)
190 {
191 	struct hclge_cfg_pause_param_cmd *pause_param;
192 	struct hclge_desc desc;
193 
194 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
195 
196 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
197 
198 	ether_addr_copy(pause_param->mac_addr, addr);
199 	ether_addr_copy(pause_param->mac_addr_extra, addr);
200 	pause_param->pause_trans_gap = pause_trans_gap;
201 	pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
202 
203 	return hclge_cmd_send(&hdev->hw, &desc, 1);
204 }
205 
206 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
207 {
208 	struct hclge_cfg_pause_param_cmd *pause_param;
209 	struct hclge_desc desc;
210 	u16 trans_time;
211 	u8 trans_gap;
212 	int ret;
213 
214 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
215 
216 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
217 
218 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
219 	if (ret)
220 		return ret;
221 
222 	trans_gap = pause_param->pause_trans_gap;
223 	trans_time = le16_to_cpu(pause_param->pause_trans_time);
224 
225 	return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
226 }
227 
228 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
229 {
230 	u8 tc;
231 
232 	tc = hdev->tm_info.prio_tc[pri_id];
233 
234 	if (tc >= hdev->tm_info.num_tc)
235 		return -EINVAL;
236 
237 	/**
238 	 * the register for priority has four bytes, the first bytes includes
239 	 *  priority0 and priority1, the higher 4bit stands for priority1
240 	 *  while the lower 4bit stands for priority0, as below:
241 	 * first byte:	| pri_1 | pri_0 |
242 	 * second byte:	| pri_3 | pri_2 |
243 	 * third byte:	| pri_5 | pri_4 |
244 	 * fourth byte:	| pri_7 | pri_6 |
245 	 */
246 	pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
247 
248 	return 0;
249 }
250 
251 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
252 {
253 	struct hclge_desc desc;
254 	u8 *pri = (u8 *)desc.data;
255 	u8 pri_id;
256 	int ret;
257 
258 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
259 
260 	for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
261 		ret = hclge_fill_pri_array(hdev, pri, pri_id);
262 		if (ret)
263 			return ret;
264 	}
265 
266 	return hclge_cmd_send(&hdev->hw, &desc, 1);
267 }
268 
269 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
270 				      u8 pg_id, u8 pri_bit_map)
271 {
272 	struct hclge_pg_to_pri_link_cmd *map;
273 	struct hclge_desc desc;
274 
275 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
276 
277 	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
278 
279 	map->pg_id = pg_id;
280 	map->pri_bit_map = pri_bit_map;
281 
282 	return hclge_cmd_send(&hdev->hw, &desc, 1);
283 }
284 
285 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri,
286 				      bool link_vld)
287 {
288 	struct hclge_qs_to_pri_link_cmd *map;
289 	struct hclge_desc desc;
290 
291 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
292 
293 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
294 
295 	map->qs_id = cpu_to_le16(qs_id);
296 	map->priority = pri;
297 	map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0;
298 
299 	return hclge_cmd_send(&hdev->hw, &desc, 1);
300 }
301 
302 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
303 				    u16 q_id, u16 qs_id)
304 {
305 	struct hclge_nq_to_qs_link_cmd *map;
306 	struct hclge_desc desc;
307 	u16 qs_id_l;
308 	u16 qs_id_h;
309 
310 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
311 
312 	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
313 
314 	map->nq_id = cpu_to_le16(q_id);
315 
316 	/* convert qs_id to the following format to support qset_id >= 1024
317 	 * qs_id: | 15 | 14 ~ 10 |  9 ~ 0   |
318 	 *            /         / \         \
319 	 *           /         /   \         \
320 	 * qset_id: | 15 ~ 11 |  10 |  9 ~ 0  |
321 	 *          | qs_id_h | vld | qs_id_l |
322 	 */
323 	qs_id_l = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_L_MSK,
324 				  HCLGE_TM_QS_ID_L_S);
325 	qs_id_h = hnae3_get_field(qs_id, HCLGE_TM_QS_ID_H_MSK,
326 				  HCLGE_TM_QS_ID_H_S);
327 	hnae3_set_field(qs_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
328 			qs_id_l);
329 	hnae3_set_field(qs_id, HCLGE_TM_QS_ID_H_EXT_MSK, HCLGE_TM_QS_ID_H_EXT_S,
330 			qs_id_h);
331 	map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
332 
333 	return hclge_cmd_send(&hdev->hw, &desc, 1);
334 }
335 
336 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
337 				  u8 dwrr)
338 {
339 	struct hclge_pg_weight_cmd *weight;
340 	struct hclge_desc desc;
341 
342 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
343 
344 	weight = (struct hclge_pg_weight_cmd *)desc.data;
345 
346 	weight->pg_id = pg_id;
347 	weight->dwrr = dwrr;
348 
349 	return hclge_cmd_send(&hdev->hw, &desc, 1);
350 }
351 
352 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
353 				   u8 dwrr)
354 {
355 	struct hclge_priority_weight_cmd *weight;
356 	struct hclge_desc desc;
357 
358 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
359 
360 	weight = (struct hclge_priority_weight_cmd *)desc.data;
361 
362 	weight->pri_id = pri_id;
363 	weight->dwrr = dwrr;
364 
365 	return hclge_cmd_send(&hdev->hw, &desc, 1);
366 }
367 
368 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
369 				  u8 dwrr)
370 {
371 	struct hclge_qs_weight_cmd *weight;
372 	struct hclge_desc desc;
373 
374 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
375 
376 	weight = (struct hclge_qs_weight_cmd *)desc.data;
377 
378 	weight->qs_id = cpu_to_le16(qs_id);
379 	weight->dwrr = dwrr;
380 
381 	return hclge_cmd_send(&hdev->hw, &desc, 1);
382 }
383 
384 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
385 				      u8 bs_b, u8 bs_s)
386 {
387 	u32 shapping_para = 0;
388 
389 	hclge_tm_set_field(shapping_para, IR_B, ir_b);
390 	hclge_tm_set_field(shapping_para, IR_U, ir_u);
391 	hclge_tm_set_field(shapping_para, IR_S, ir_s);
392 	hclge_tm_set_field(shapping_para, BS_B, bs_b);
393 	hclge_tm_set_field(shapping_para, BS_S, bs_s);
394 
395 	return shapping_para;
396 }
397 
398 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
399 				    enum hclge_shap_bucket bucket, u8 pg_id,
400 				    u32 shapping_para, u32 rate)
401 {
402 	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
403 	enum hclge_opcode_type opcode;
404 	struct hclge_desc desc;
405 
406 	opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
407 		 HCLGE_OPC_TM_PG_C_SHAPPING;
408 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
409 
410 	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
411 
412 	shap_cfg_cmd->pg_id = pg_id;
413 
414 	shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
415 
416 	hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
417 
418 	shap_cfg_cmd->pg_rate = cpu_to_le32(rate);
419 
420 	return hclge_cmd_send(&hdev->hw, &desc, 1);
421 }
422 
423 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
424 {
425 	struct hclge_port_shapping_cmd *shap_cfg_cmd;
426 	struct hclge_shaper_ir_para ir_para;
427 	struct hclge_desc desc;
428 	u32 shapping_para;
429 	int ret;
430 
431 	ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
432 				     &ir_para,
433 				     hdev->ae_dev->dev_specs.max_tm_rate);
434 	if (ret)
435 		return ret;
436 
437 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
438 	shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
439 
440 	shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
441 						   ir_para.ir_s,
442 						   HCLGE_SHAPER_BS_U_DEF,
443 						   HCLGE_SHAPER_BS_S_DEF);
444 
445 	shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
446 
447 	hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
448 
449 	shap_cfg_cmd->port_rate = cpu_to_le32(hdev->hw.mac.speed);
450 
451 	return hclge_cmd_send(&hdev->hw, &desc, 1);
452 }
453 
454 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
455 				     enum hclge_shap_bucket bucket, u8 pri_id,
456 				     u32 shapping_para, u32 rate)
457 {
458 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
459 	enum hclge_opcode_type opcode;
460 	struct hclge_desc desc;
461 
462 	opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
463 		 HCLGE_OPC_TM_PRI_C_SHAPPING;
464 
465 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
466 
467 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
468 
469 	shap_cfg_cmd->pri_id = pri_id;
470 
471 	shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
472 
473 	hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
474 
475 	shap_cfg_cmd->pri_rate = cpu_to_le32(rate);
476 
477 	return hclge_cmd_send(&hdev->hw, &desc, 1);
478 }
479 
480 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
481 {
482 	struct hclge_desc desc;
483 
484 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
485 
486 	if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
487 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
488 	else
489 		desc.data[1] = 0;
490 
491 	desc.data[0] = cpu_to_le32(pg_id);
492 
493 	return hclge_cmd_send(&hdev->hw, &desc, 1);
494 }
495 
496 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
497 {
498 	struct hclge_desc desc;
499 
500 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
501 
502 	if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
503 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
504 	else
505 		desc.data[1] = 0;
506 
507 	desc.data[0] = cpu_to_le32(pri_id);
508 
509 	return hclge_cmd_send(&hdev->hw, &desc, 1);
510 }
511 
512 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
513 {
514 	struct hclge_desc desc;
515 
516 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
517 
518 	if (mode == HCLGE_SCH_MODE_DWRR)
519 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
520 	else
521 		desc.data[1] = 0;
522 
523 	desc.data[0] = cpu_to_le32(qs_id);
524 
525 	return hclge_cmd_send(&hdev->hw, &desc, 1);
526 }
527 
528 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
529 			      u32 bit_map)
530 {
531 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
532 	struct hclge_desc desc;
533 
534 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
535 				   false);
536 
537 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
538 
539 	bp_to_qs_map_cmd->tc_id = tc;
540 	bp_to_qs_map_cmd->qs_group_id = grp_id;
541 	bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
542 
543 	return hclge_cmd_send(&hdev->hw, &desc, 1);
544 }
545 
546 int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
547 {
548 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
549 	struct hclge_qs_shapping_cmd *shap_cfg_cmd;
550 	struct hclge_shaper_ir_para ir_para;
551 	struct hclge_dev *hdev = vport->back;
552 	struct hclge_desc desc;
553 	u32 shaper_para;
554 	int ret, i;
555 
556 	if (!max_tx_rate)
557 		max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
558 
559 	ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
560 				     &ir_para,
561 				     hdev->ae_dev->dev_specs.max_tm_rate);
562 	if (ret)
563 		return ret;
564 
565 	shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
566 						 ir_para.ir_s,
567 						 HCLGE_SHAPER_BS_U_DEF,
568 						 HCLGE_SHAPER_BS_S_DEF);
569 
570 	for (i = 0; i < kinfo->tc_info.num_tc; i++) {
571 		hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
572 					   false);
573 
574 		shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
575 		shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
576 		shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
577 
578 		hnae3_set_bit(shap_cfg_cmd->flag, HCLGE_TM_RATE_VLD, 1);
579 		shap_cfg_cmd->qs_rate = cpu_to_le32(max_tx_rate);
580 
581 		ret = hclge_cmd_send(&hdev->hw, &desc, 1);
582 		if (ret) {
583 			dev_err(&hdev->pdev->dev,
584 				"vport%u, qs%u failed to set tx_rate:%d, ret=%d\n",
585 				vport->vport_id, shap_cfg_cmd->qs_id,
586 				max_tx_rate, ret);
587 			return ret;
588 		}
589 	}
590 
591 	return 0;
592 }
593 
594 static u16 hclge_vport_get_max_rss_size(struct hclge_vport *vport)
595 {
596 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
597 	struct hnae3_tc_info *tc_info = &kinfo->tc_info;
598 	struct hclge_dev *hdev = vport->back;
599 	u16 max_rss_size = 0;
600 	int i;
601 
602 	if (!tc_info->mqprio_active)
603 		return vport->alloc_tqps / tc_info->num_tc;
604 
605 	for (i = 0; i < HNAE3_MAX_TC; i++) {
606 		if (!(hdev->hw_tc_map & BIT(i)) || i >= tc_info->num_tc)
607 			continue;
608 		if (max_rss_size < tc_info->tqp_count[i])
609 			max_rss_size = tc_info->tqp_count[i];
610 	}
611 
612 	return max_rss_size;
613 }
614 
615 static u16 hclge_vport_get_tqp_num(struct hclge_vport *vport)
616 {
617 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
618 	struct hnae3_tc_info *tc_info = &kinfo->tc_info;
619 	struct hclge_dev *hdev = vport->back;
620 	int sum = 0;
621 	int i;
622 
623 	if (!tc_info->mqprio_active)
624 		return kinfo->rss_size * tc_info->num_tc;
625 
626 	for (i = 0; i < HNAE3_MAX_TC; i++) {
627 		if (hdev->hw_tc_map & BIT(i) && i < tc_info->num_tc)
628 			sum += tc_info->tqp_count[i];
629 	}
630 
631 	return sum;
632 }
633 
634 static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport)
635 {
636 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
637 	struct hclge_dev *hdev = vport->back;
638 	u16 vport_max_rss_size;
639 	u16 max_rss_size;
640 
641 	/* TC configuration is shared by PF/VF in one port, only allow
642 	 * one tc for VF for simplicity. VF's vport_id is non zero.
643 	 */
644 	if (vport->vport_id) {
645 		kinfo->tc_info.max_tc = 1;
646 		kinfo->tc_info.num_tc = 1;
647 		vport->qs_offset = HNAE3_MAX_TC +
648 				   vport->vport_id - HCLGE_VF_VPORT_START_NUM;
649 		vport_max_rss_size = hdev->vf_rss_size_max;
650 	} else {
651 		kinfo->tc_info.max_tc = hdev->tc_max;
652 		kinfo->tc_info.num_tc =
653 			min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
654 		vport->qs_offset = 0;
655 		vport_max_rss_size = hdev->pf_rss_size_max;
656 	}
657 
658 	max_rss_size = min_t(u16, vport_max_rss_size,
659 			     hclge_vport_get_max_rss_size(vport));
660 
661 	/* Set to user value, no larger than max_rss_size. */
662 	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
663 	    kinfo->req_rss_size <= max_rss_size) {
664 		dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
665 			 kinfo->rss_size, kinfo->req_rss_size);
666 		kinfo->rss_size = kinfo->req_rss_size;
667 	} else if (kinfo->rss_size > max_rss_size ||
668 		   (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
669 		/* Set to the maximum specification value (max_rss_size). */
670 		kinfo->rss_size = max_rss_size;
671 	}
672 }
673 
674 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
675 {
676 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
677 	struct hclge_dev *hdev = vport->back;
678 	u8 i;
679 
680 	hclge_tm_update_kinfo_rss_size(vport);
681 	kinfo->num_tqps = hclge_vport_get_tqp_num(vport);
682 	vport->dwrr = 100;  /* 100 percent as init */
683 	vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
684 	hdev->rss_cfg.rss_size = kinfo->rss_size;
685 
686 	/* when enable mqprio, the tc_info has been updated. */
687 	if (kinfo->tc_info.mqprio_active)
688 		return;
689 
690 	for (i = 0; i < HNAE3_MAX_TC; i++) {
691 		if (hdev->hw_tc_map & BIT(i) && i < kinfo->tc_info.num_tc) {
692 			kinfo->tc_info.tqp_offset[i] = i * kinfo->rss_size;
693 			kinfo->tc_info.tqp_count[i] = kinfo->rss_size;
694 		} else {
695 			/* Set to default queue if TC is disable */
696 			kinfo->tc_info.tqp_offset[i] = 0;
697 			kinfo->tc_info.tqp_count[i] = 1;
698 		}
699 	}
700 
701 	memcpy(kinfo->tc_info.prio_tc, hdev->tm_info.prio_tc,
702 	       sizeof_field(struct hnae3_tc_info, prio_tc));
703 }
704 
705 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
706 {
707 	struct hclge_vport *vport = hdev->vport;
708 	u32 i;
709 
710 	for (i = 0; i < hdev->num_alloc_vport; i++) {
711 		hclge_tm_vport_tc_info_update(vport);
712 
713 		vport++;
714 	}
715 }
716 
717 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
718 {
719 	u8 i, tc_sch_mode;
720 	u32 bw_limit;
721 
722 	for (i = 0; i < hdev->tc_max; i++) {
723 		if (i < hdev->tm_info.num_tc) {
724 			tc_sch_mode = HCLGE_SCH_MODE_DWRR;
725 			bw_limit = hdev->tm_info.pg_info[0].bw_limit;
726 		} else {
727 			tc_sch_mode = HCLGE_SCH_MODE_SP;
728 			bw_limit = 0;
729 		}
730 
731 		hdev->tm_info.tc_info[i].tc_id = i;
732 		hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode;
733 		hdev->tm_info.tc_info[i].pgid = 0;
734 		hdev->tm_info.tc_info[i].bw_limit = bw_limit;
735 	}
736 
737 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
738 		hdev->tm_info.prio_tc[i] =
739 			(i >= hdev->tm_info.num_tc) ? 0 : i;
740 }
741 
742 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
743 {
744 #define BW_PERCENT	100
745 
746 	u8 i;
747 
748 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
749 		int k;
750 
751 		hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
752 
753 		hdev->tm_info.pg_info[i].pg_id = i;
754 		hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
755 
756 		hdev->tm_info.pg_info[i].bw_limit =
757 					hdev->ae_dev->dev_specs.max_tm_rate;
758 
759 		if (i != 0)
760 			continue;
761 
762 		hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
763 		for (k = 0; k < hdev->tm_info.num_tc; k++)
764 			hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
765 		for (; k < HNAE3_MAX_TC; k++)
766 			hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
767 	}
768 }
769 
770 static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
771 {
772 	if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
773 		if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
774 			dev_warn(&hdev->pdev->dev,
775 				 "Only 1 tc used, but last mode is FC_PFC\n");
776 
777 		hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
778 	} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
779 		/* fc_mode_last_time record the last fc_mode when
780 		 * DCB is enabled, so that fc_mode can be set to
781 		 * the correct value when DCB is disabled.
782 		 */
783 		hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
784 		hdev->tm_info.fc_mode = HCLGE_FC_PFC;
785 	}
786 }
787 
788 static void hclge_update_fc_mode(struct hclge_dev *hdev)
789 {
790 	if (!hdev->tm_info.pfc_en) {
791 		hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
792 		return;
793 	}
794 
795 	if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
796 		hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
797 		hdev->tm_info.fc_mode = HCLGE_FC_PFC;
798 	}
799 }
800 
801 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
802 {
803 	if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
804 		hclge_update_fc_mode(hdev);
805 	else
806 		hclge_update_fc_mode_by_dcb_flag(hdev);
807 }
808 
809 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
810 {
811 	hclge_tm_pg_info_init(hdev);
812 
813 	hclge_tm_tc_info_init(hdev);
814 
815 	hclge_tm_vport_info_update(hdev);
816 
817 	hclge_tm_pfc_info_update(hdev);
818 }
819 
820 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
821 {
822 	int ret;
823 	u32 i;
824 
825 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
826 		return 0;
827 
828 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
829 		/* Cfg mapping */
830 		ret = hclge_tm_pg_to_pri_map_cfg(
831 			hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
832 		if (ret)
833 			return ret;
834 	}
835 
836 	return 0;
837 }
838 
839 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
840 {
841 	u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
842 	struct hclge_shaper_ir_para ir_para;
843 	u32 shaper_para;
844 	int ret;
845 	u32 i;
846 
847 	/* Cfg pg schd */
848 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
849 		return 0;
850 
851 	/* Pg to pri */
852 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
853 		u32 rate = hdev->tm_info.pg_info[i].bw_limit;
854 
855 		/* Calc shaper para */
856 		ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PG,
857 					     &ir_para, max_tm_rate);
858 		if (ret)
859 			return ret;
860 
861 		shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
862 							 HCLGE_SHAPER_BS_U_DEF,
863 							 HCLGE_SHAPER_BS_S_DEF);
864 		ret = hclge_tm_pg_shapping_cfg(hdev,
865 					       HCLGE_TM_SHAP_C_BUCKET, i,
866 					       shaper_para, rate);
867 		if (ret)
868 			return ret;
869 
870 		shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
871 							 ir_para.ir_u,
872 							 ir_para.ir_s,
873 							 HCLGE_SHAPER_BS_U_DEF,
874 							 HCLGE_SHAPER_BS_S_DEF);
875 		ret = hclge_tm_pg_shapping_cfg(hdev,
876 					       HCLGE_TM_SHAP_P_BUCKET, i,
877 					       shaper_para, rate);
878 		if (ret)
879 			return ret;
880 	}
881 
882 	return 0;
883 }
884 
885 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
886 {
887 	int ret;
888 	u32 i;
889 
890 	/* cfg pg schd */
891 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
892 		return 0;
893 
894 	/* pg to prio */
895 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
896 		/* Cfg dwrr */
897 		ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
898 		if (ret)
899 			return ret;
900 	}
901 
902 	return 0;
903 }
904 
905 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
906 				   struct hclge_vport *vport)
907 {
908 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
909 	struct hnae3_tc_info *tc_info = &kinfo->tc_info;
910 	struct hnae3_queue **tqp = kinfo->tqp;
911 	u32 i, j;
912 	int ret;
913 
914 	for (i = 0; i < tc_info->num_tc; i++) {
915 		for (j = 0; j < tc_info->tqp_count[i]; j++) {
916 			struct hnae3_queue *q = tqp[tc_info->tqp_offset[i] + j];
917 
918 			ret = hclge_tm_q_to_qs_map_cfg(hdev,
919 						       hclge_get_queue_id(q),
920 						       vport->qs_offset + i);
921 			if (ret)
922 				return ret;
923 		}
924 	}
925 
926 	return 0;
927 }
928 
929 static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev)
930 {
931 	struct hclge_vport *vport = hdev->vport;
932 	u16 i, k;
933 	int ret;
934 
935 	/* Cfg qs -> pri mapping, one by one mapping */
936 	for (k = 0; k < hdev->num_alloc_vport; k++) {
937 		struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
938 
939 		for (i = 0; i < kinfo->tc_info.max_tc; i++) {
940 			u8 pri = i < kinfo->tc_info.num_tc ? i : 0;
941 			bool link_vld = i < kinfo->tc_info.num_tc;
942 
943 			ret = hclge_tm_qs_to_pri_map_cfg(hdev,
944 							 vport[k].qs_offset + i,
945 							 pri, link_vld);
946 			if (ret)
947 				return ret;
948 		}
949 	}
950 
951 	return 0;
952 }
953 
954 static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev)
955 {
956 	struct hclge_vport *vport = hdev->vport;
957 	u16 i, k;
958 	int ret;
959 
960 	/* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
961 	for (k = 0; k < hdev->num_alloc_vport; k++)
962 		for (i = 0; i < HNAE3_MAX_TC; i++) {
963 			ret = hclge_tm_qs_to_pri_map_cfg(hdev,
964 							 vport[k].qs_offset + i,
965 							 k, true);
966 			if (ret)
967 				return ret;
968 		}
969 
970 	return 0;
971 }
972 
973 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
974 {
975 	struct hclge_vport *vport = hdev->vport;
976 	int ret;
977 	u32 i;
978 
979 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE)
980 		ret = hclge_tm_pri_q_qs_cfg_tc_base(hdev);
981 	else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
982 		ret = hclge_tm_pri_q_qs_cfg_vnet_base(hdev);
983 	else
984 		return -EINVAL;
985 
986 	if (ret)
987 		return ret;
988 
989 	/* Cfg q -> qs mapping */
990 	for (i = 0; i < hdev->num_alloc_vport; i++) {
991 		ret = hclge_vport_q_to_qs_map(hdev, vport);
992 		if (ret)
993 			return ret;
994 
995 		vport++;
996 	}
997 
998 	return 0;
999 }
1000 
1001 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
1002 {
1003 	u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
1004 	struct hclge_shaper_ir_para ir_para;
1005 	u32 shaper_para_c, shaper_para_p;
1006 	int ret;
1007 	u32 i;
1008 
1009 	for (i = 0; i < hdev->tc_max; i++) {
1010 		u32 rate = hdev->tm_info.tc_info[i].bw_limit;
1011 
1012 		if (rate) {
1013 			ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI,
1014 						     &ir_para, max_tm_rate);
1015 			if (ret)
1016 				return ret;
1017 
1018 			shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0,
1019 								   HCLGE_SHAPER_BS_U_DEF,
1020 								   HCLGE_SHAPER_BS_S_DEF);
1021 			shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b,
1022 								   ir_para.ir_u,
1023 								   ir_para.ir_s,
1024 								   HCLGE_SHAPER_BS_U_DEF,
1025 								   HCLGE_SHAPER_BS_S_DEF);
1026 		} else {
1027 			shaper_para_c = 0;
1028 			shaper_para_p = 0;
1029 		}
1030 
1031 		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
1032 						shaper_para_c, rate);
1033 		if (ret)
1034 			return ret;
1035 
1036 		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
1037 						shaper_para_p, rate);
1038 		if (ret)
1039 			return ret;
1040 	}
1041 
1042 	return 0;
1043 }
1044 
1045 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
1046 {
1047 	struct hclge_dev *hdev = vport->back;
1048 	struct hclge_shaper_ir_para ir_para;
1049 	u32 shaper_para;
1050 	int ret;
1051 
1052 	ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
1053 				     &ir_para,
1054 				     hdev->ae_dev->dev_specs.max_tm_rate);
1055 	if (ret)
1056 		return ret;
1057 
1058 	shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
1059 						 HCLGE_SHAPER_BS_U_DEF,
1060 						 HCLGE_SHAPER_BS_S_DEF);
1061 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
1062 					vport->vport_id, shaper_para,
1063 					vport->bw_limit);
1064 	if (ret)
1065 		return ret;
1066 
1067 	shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
1068 						 ir_para.ir_s,
1069 						 HCLGE_SHAPER_BS_U_DEF,
1070 						 HCLGE_SHAPER_BS_S_DEF);
1071 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
1072 					vport->vport_id, shaper_para,
1073 					vport->bw_limit);
1074 	if (ret)
1075 		return ret;
1076 
1077 	return 0;
1078 }
1079 
1080 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
1081 {
1082 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1083 	struct hclge_dev *hdev = vport->back;
1084 	u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
1085 	struct hclge_shaper_ir_para ir_para;
1086 	u32 i;
1087 	int ret;
1088 
1089 	for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1090 		ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
1091 					     HCLGE_SHAPER_LVL_QSET,
1092 					     &ir_para, max_tm_rate);
1093 		if (ret)
1094 			return ret;
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
1101 {
1102 	struct hclge_vport *vport = hdev->vport;
1103 	int ret;
1104 	u32 i;
1105 
1106 	/* Need config vport shaper */
1107 	for (i = 0; i < hdev->num_alloc_vport; i++) {
1108 		ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
1109 		if (ret)
1110 			return ret;
1111 
1112 		ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
1113 		if (ret)
1114 			return ret;
1115 
1116 		vport++;
1117 	}
1118 
1119 	return 0;
1120 }
1121 
1122 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
1123 {
1124 	int ret;
1125 
1126 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1127 		ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
1128 		if (ret)
1129 			return ret;
1130 	} else {
1131 		ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
1132 		if (ret)
1133 			return ret;
1134 	}
1135 
1136 	return 0;
1137 }
1138 
1139 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
1140 {
1141 	struct hclge_vport *vport = hdev->vport;
1142 	struct hclge_pg_info *pg_info;
1143 	u8 dwrr;
1144 	int ret;
1145 	u32 i, k;
1146 
1147 	for (i = 0; i < hdev->tc_max; i++) {
1148 		pg_info =
1149 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1150 		dwrr = pg_info->tc_dwrr[i];
1151 
1152 		ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
1153 		if (ret)
1154 			return ret;
1155 
1156 		for (k = 0; k < hdev->num_alloc_vport; k++) {
1157 			struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo;
1158 
1159 			if (i >= kinfo->tc_info.max_tc)
1160 				continue;
1161 
1162 			dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0;
1163 			ret = hclge_tm_qs_weight_cfg(
1164 				hdev, vport[k].qs_offset + i,
1165 				dwrr);
1166 			if (ret)
1167 				return ret;
1168 		}
1169 	}
1170 
1171 	return 0;
1172 }
1173 
1174 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
1175 {
1176 #define DEFAULT_TC_OFFSET	14
1177 
1178 	struct hclge_ets_tc_weight_cmd *ets_weight;
1179 	struct hclge_desc desc;
1180 	unsigned int i;
1181 
1182 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
1183 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
1184 
1185 	for (i = 0; i < HNAE3_MAX_TC; i++) {
1186 		struct hclge_pg_info *pg_info;
1187 
1188 		pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
1189 		ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
1190 	}
1191 
1192 	ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1193 
1194 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1195 }
1196 
1197 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1198 {
1199 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1200 	struct hclge_dev *hdev = vport->back;
1201 	int ret;
1202 	u8 i;
1203 
1204 	/* Vf dwrr */
1205 	ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1206 	if (ret)
1207 		return ret;
1208 
1209 	/* Qset dwrr */
1210 	for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1211 		ret = hclge_tm_qs_weight_cfg(
1212 			hdev, vport->qs_offset + i,
1213 			hdev->tm_info.pg_info[0].tc_dwrr[i]);
1214 		if (ret)
1215 			return ret;
1216 	}
1217 
1218 	return 0;
1219 }
1220 
1221 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1222 {
1223 	struct hclge_vport *vport = hdev->vport;
1224 	int ret;
1225 	u32 i;
1226 
1227 	for (i = 0; i < hdev->num_alloc_vport; i++) {
1228 		ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1229 		if (ret)
1230 			return ret;
1231 
1232 		vport++;
1233 	}
1234 
1235 	return 0;
1236 }
1237 
1238 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1239 {
1240 	int ret;
1241 
1242 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1243 		ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1244 		if (ret)
1245 			return ret;
1246 
1247 		if (!hnae3_dev_dcb_supported(hdev))
1248 			return 0;
1249 
1250 		ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1251 		if (ret == -EOPNOTSUPP) {
1252 			dev_warn(&hdev->pdev->dev,
1253 				 "fw %08x doesn't support ets tc weight cmd\n",
1254 				 hdev->fw_version);
1255 			ret = 0;
1256 		}
1257 
1258 		return ret;
1259 	} else {
1260 		ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1261 		if (ret)
1262 			return ret;
1263 	}
1264 
1265 	return 0;
1266 }
1267 
1268 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1269 {
1270 	int ret;
1271 
1272 	ret = hclge_up_to_tc_map(hdev);
1273 	if (ret)
1274 		return ret;
1275 
1276 	ret = hclge_tm_pg_to_pri_map(hdev);
1277 	if (ret)
1278 		return ret;
1279 
1280 	return hclge_tm_pri_q_qs_cfg(hdev);
1281 }
1282 
1283 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1284 {
1285 	int ret;
1286 
1287 	ret = hclge_tm_port_shaper_cfg(hdev);
1288 	if (ret)
1289 		return ret;
1290 
1291 	ret = hclge_tm_pg_shaper_cfg(hdev);
1292 	if (ret)
1293 		return ret;
1294 
1295 	return hclge_tm_pri_shaper_cfg(hdev);
1296 }
1297 
1298 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1299 {
1300 	int ret;
1301 
1302 	ret = hclge_tm_pg_dwrr_cfg(hdev);
1303 	if (ret)
1304 		return ret;
1305 
1306 	return hclge_tm_pri_dwrr_cfg(hdev);
1307 }
1308 
1309 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1310 {
1311 	int ret;
1312 	u8 i;
1313 
1314 	/* Only being config on TC-Based scheduler mode */
1315 	if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1316 		return 0;
1317 
1318 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
1319 		ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1320 		if (ret)
1321 			return ret;
1322 	}
1323 
1324 	return 0;
1325 }
1326 
1327 static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id)
1328 {
1329 	struct hclge_vport *vport = hdev->vport;
1330 	int ret;
1331 	u8 mode;
1332 	u16 i;
1333 
1334 	ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id);
1335 	if (ret)
1336 		return ret;
1337 
1338 	for (i = 0; i < hdev->num_alloc_vport; i++) {
1339 		struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo;
1340 
1341 		if (pri_id >= kinfo->tc_info.max_tc)
1342 			continue;
1343 
1344 		mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR :
1345 		       HCLGE_SCH_MODE_SP;
1346 		ret = hclge_tm_qs_schd_mode_cfg(hdev,
1347 						vport[i].qs_offset + pri_id,
1348 						mode);
1349 		if (ret)
1350 			return ret;
1351 	}
1352 
1353 	return 0;
1354 }
1355 
1356 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1357 {
1358 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1359 	struct hclge_dev *hdev = vport->back;
1360 	int ret;
1361 	u8 i;
1362 
1363 	if (vport->vport_id >= HNAE3_MAX_TC)
1364 		return -EINVAL;
1365 
1366 	ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1367 	if (ret)
1368 		return ret;
1369 
1370 	for (i = 0; i < kinfo->tc_info.num_tc; i++) {
1371 		u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1372 
1373 		ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1374 						sch_mode);
1375 		if (ret)
1376 			return ret;
1377 	}
1378 
1379 	return 0;
1380 }
1381 
1382 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1383 {
1384 	struct hclge_vport *vport = hdev->vport;
1385 	int ret;
1386 	u8 i;
1387 
1388 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1389 		for (i = 0; i < hdev->tc_max; i++) {
1390 			ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i);
1391 			if (ret)
1392 				return ret;
1393 		}
1394 	} else {
1395 		for (i = 0; i < hdev->num_alloc_vport; i++) {
1396 			ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1397 			if (ret)
1398 				return ret;
1399 
1400 			vport++;
1401 		}
1402 	}
1403 
1404 	return 0;
1405 }
1406 
1407 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1408 {
1409 	int ret;
1410 
1411 	ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1412 	if (ret)
1413 		return ret;
1414 
1415 	return hclge_tm_lvl34_schd_mode_cfg(hdev);
1416 }
1417 
1418 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1419 {
1420 	int ret;
1421 
1422 	/* Cfg tm mapping  */
1423 	ret = hclge_tm_map_cfg(hdev);
1424 	if (ret)
1425 		return ret;
1426 
1427 	/* Cfg tm shaper */
1428 	ret = hclge_tm_shaper_cfg(hdev);
1429 	if (ret)
1430 		return ret;
1431 
1432 	/* Cfg dwrr */
1433 	ret = hclge_tm_dwrr_cfg(hdev);
1434 	if (ret)
1435 		return ret;
1436 
1437 	/* Cfg schd mode for each level schd */
1438 	return hclge_tm_schd_mode_hw(hdev);
1439 }
1440 
1441 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1442 {
1443 	struct hclge_mac *mac = &hdev->hw.mac;
1444 
1445 	return hclge_pause_param_cfg(hdev, mac->mac_addr,
1446 				     HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1447 				     HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1448 }
1449 
1450 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1451 {
1452 	u8 enable_bitmap = 0;
1453 
1454 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1455 		enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1456 				HCLGE_RX_MAC_PAUSE_EN_MSK;
1457 
1458 	return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1459 				      hdev->tm_info.pfc_en);
1460 }
1461 
1462 /* for the queues that use for backpress, divides to several groups,
1463  * each group contains 32 queue sets, which can be represented by u32 bitmap.
1464  */
1465 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1466 {
1467 	u16 grp_id_shift = HCLGE_BP_GRP_ID_S;
1468 	u16 grp_id_mask = HCLGE_BP_GRP_ID_M;
1469 	u8 grp_num = HCLGE_BP_GRP_NUM;
1470 	int i;
1471 
1472 	if (hdev->num_tqps > HCLGE_TQP_MAX_SIZE_DEV_V2) {
1473 		grp_num = HCLGE_BP_EXT_GRP_NUM;
1474 		grp_id_mask = HCLGE_BP_EXT_GRP_ID_M;
1475 		grp_id_shift = HCLGE_BP_EXT_GRP_ID_S;
1476 	}
1477 
1478 	for (i = 0; i < grp_num; i++) {
1479 		u32 qs_bitmap = 0;
1480 		int k, ret;
1481 
1482 		for (k = 0; k < hdev->num_alloc_vport; k++) {
1483 			struct hclge_vport *vport = &hdev->vport[k];
1484 			u16 qs_id = vport->qs_offset + tc;
1485 			u8 grp, sub_grp;
1486 
1487 			grp = hnae3_get_field(qs_id, grp_id_mask, grp_id_shift);
1488 			sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1489 						  HCLGE_BP_SUB_GRP_ID_S);
1490 			if (i == grp)
1491 				qs_bitmap |= (1 << sub_grp);
1492 		}
1493 
1494 		ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1495 		if (ret)
1496 			return ret;
1497 	}
1498 
1499 	return 0;
1500 }
1501 
1502 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1503 {
1504 	bool tx_en, rx_en;
1505 
1506 	switch (hdev->tm_info.fc_mode) {
1507 	case HCLGE_FC_NONE:
1508 		tx_en = false;
1509 		rx_en = false;
1510 		break;
1511 	case HCLGE_FC_RX_PAUSE:
1512 		tx_en = false;
1513 		rx_en = true;
1514 		break;
1515 	case HCLGE_FC_TX_PAUSE:
1516 		tx_en = true;
1517 		rx_en = false;
1518 		break;
1519 	case HCLGE_FC_FULL:
1520 		tx_en = true;
1521 		rx_en = true;
1522 		break;
1523 	case HCLGE_FC_PFC:
1524 		tx_en = false;
1525 		rx_en = false;
1526 		break;
1527 	default:
1528 		tx_en = true;
1529 		rx_en = true;
1530 	}
1531 
1532 	return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1533 }
1534 
1535 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1536 {
1537 	int ret;
1538 	int i;
1539 
1540 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
1541 		ret = hclge_bp_setup_hw(hdev, i);
1542 		if (ret)
1543 			return ret;
1544 	}
1545 
1546 	return 0;
1547 }
1548 
1549 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1550 {
1551 	int ret;
1552 
1553 	ret = hclge_pause_param_setup_hw(hdev);
1554 	if (ret)
1555 		return ret;
1556 
1557 	ret = hclge_mac_pause_setup_hw(hdev);
1558 	if (ret)
1559 		return ret;
1560 
1561 	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
1562 	if (!hnae3_dev_dcb_supported(hdev))
1563 		return 0;
1564 
1565 	/* GE MAC does not support PFC, when driver is initializing and MAC
1566 	 * is in GE Mode, ignore the error here, otherwise initialization
1567 	 * will fail.
1568 	 */
1569 	ret = hclge_pfc_setup_hw(hdev);
1570 	if (init && ret == -EOPNOTSUPP)
1571 		dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1572 	else if (ret) {
1573 		dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1574 			ret);
1575 		return ret;
1576 	}
1577 
1578 	return hclge_tm_bp_setup(hdev);
1579 }
1580 
1581 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1582 {
1583 	struct hclge_vport *vport = hdev->vport;
1584 	struct hnae3_knic_private_info *kinfo;
1585 	u32 i, k;
1586 
1587 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1588 		hdev->tm_info.prio_tc[i] = prio_tc[i];
1589 
1590 		for (k = 0;  k < hdev->num_alloc_vport; k++) {
1591 			kinfo = &vport[k].nic.kinfo;
1592 			kinfo->tc_info.prio_tc[i] = prio_tc[i];
1593 		}
1594 	}
1595 }
1596 
1597 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1598 {
1599 	u8 bit_map = 0;
1600 	u8 i;
1601 
1602 	hdev->tm_info.num_tc = num_tc;
1603 
1604 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1605 		bit_map |= BIT(i);
1606 
1607 	if (!bit_map) {
1608 		bit_map = 1;
1609 		hdev->tm_info.num_tc = 1;
1610 	}
1611 
1612 	hdev->hw_tc_map = bit_map;
1613 
1614 	hclge_tm_schd_info_init(hdev);
1615 }
1616 
1617 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1618 {
1619 	int ret;
1620 
1621 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1622 	    (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1623 		return -ENOTSUPP;
1624 
1625 	ret = hclge_tm_schd_setup_hw(hdev);
1626 	if (ret)
1627 		return ret;
1628 
1629 	ret = hclge_pause_setup_hw(hdev, init);
1630 	if (ret)
1631 		return ret;
1632 
1633 	return 0;
1634 }
1635 
1636 int hclge_tm_schd_init(struct hclge_dev *hdev)
1637 {
1638 	/* fc_mode is HCLGE_FC_FULL on reset */
1639 	hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1640 	hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1641 
1642 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1643 	    hdev->tm_info.num_pg != 1)
1644 		return -EINVAL;
1645 
1646 	hclge_tm_schd_info_init(hdev);
1647 
1648 	return hclge_tm_init_hw(hdev, true);
1649 }
1650 
1651 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1652 {
1653 	struct hclge_vport *vport = hdev->vport;
1654 	int ret;
1655 
1656 	hclge_tm_vport_tc_info_update(vport);
1657 
1658 	ret = hclge_vport_q_to_qs_map(hdev, vport);
1659 	if (ret)
1660 		return ret;
1661 
1662 	if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
1663 		return 0;
1664 
1665 	return hclge_tm_bp_setup(hdev);
1666 }
1667 
1668 int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num)
1669 {
1670 	struct hclge_tm_nodes_cmd *nodes;
1671 	struct hclge_desc desc;
1672 	int ret;
1673 
1674 	if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1675 		/* Each PF has 8 qsets and each VF has 1 qset */
1676 		*qset_num = HCLGE_TM_PF_MAX_QSET_NUM + pci_num_vf(hdev->pdev);
1677 		return 0;
1678 	}
1679 
1680 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1681 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1682 	if (ret) {
1683 		dev_err(&hdev->pdev->dev,
1684 			"failed to get qset num, ret = %d\n", ret);
1685 		return ret;
1686 	}
1687 
1688 	nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1689 	*qset_num = le16_to_cpu(nodes->qset_num);
1690 	return 0;
1691 }
1692 
1693 int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num)
1694 {
1695 	struct hclge_tm_nodes_cmd *nodes;
1696 	struct hclge_desc desc;
1697 	int ret;
1698 
1699 	if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) {
1700 		*pri_num = HCLGE_TM_PF_MAX_PRI_NUM;
1701 		return 0;
1702 	}
1703 
1704 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NODES, true);
1705 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1706 	if (ret) {
1707 		dev_err(&hdev->pdev->dev,
1708 			"failed to get pri num, ret = %d\n", ret);
1709 		return ret;
1710 	}
1711 
1712 	nodes = (struct hclge_tm_nodes_cmd *)desc.data;
1713 	*pri_num = nodes->pri_num;
1714 	return 0;
1715 }
1716 
1717 int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority,
1718 			      u8 *link_vld)
1719 {
1720 	struct hclge_qs_to_pri_link_cmd *map;
1721 	struct hclge_desc desc;
1722 	int ret;
1723 
1724 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, true);
1725 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
1726 	map->qs_id = cpu_to_le16(qset_id);
1727 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1728 	if (ret) {
1729 		dev_err(&hdev->pdev->dev,
1730 			"failed to get qset map priority, ret = %d\n", ret);
1731 		return ret;
1732 	}
1733 
1734 	*priority = map->priority;
1735 	*link_vld = map->link_vld;
1736 	return 0;
1737 }
1738 
1739 int hclge_tm_get_qset_sch_mode(struct hclge_dev *hdev, u16 qset_id, u8 *mode)
1740 {
1741 	struct hclge_qs_sch_mode_cfg_cmd *qs_sch_mode;
1742 	struct hclge_desc desc;
1743 	int ret;
1744 
1745 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, true);
1746 	qs_sch_mode = (struct hclge_qs_sch_mode_cfg_cmd *)desc.data;
1747 	qs_sch_mode->qs_id = cpu_to_le16(qset_id);
1748 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1749 	if (ret) {
1750 		dev_err(&hdev->pdev->dev,
1751 			"failed to get qset sch mode, ret = %d\n", ret);
1752 		return ret;
1753 	}
1754 
1755 	*mode = qs_sch_mode->sch_mode;
1756 	return 0;
1757 }
1758 
1759 int hclge_tm_get_qset_weight(struct hclge_dev *hdev, u16 qset_id, u8 *weight)
1760 {
1761 	struct hclge_qs_weight_cmd *qs_weight;
1762 	struct hclge_desc desc;
1763 	int ret;
1764 
1765 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, true);
1766 	qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
1767 	qs_weight->qs_id = cpu_to_le16(qset_id);
1768 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1769 	if (ret) {
1770 		dev_err(&hdev->pdev->dev,
1771 			"failed to get qset weight, ret = %d\n", ret);
1772 		return ret;
1773 	}
1774 
1775 	*weight = qs_weight->dwrr;
1776 	return 0;
1777 }
1778 
1779 int hclge_tm_get_qset_shaper(struct hclge_dev *hdev, u16 qset_id,
1780 			     struct hclge_tm_shaper_para *para)
1781 {
1782 	struct hclge_qs_shapping_cmd *shap_cfg_cmd;
1783 	struct hclge_desc desc;
1784 	u32 shapping_para;
1785 	int ret;
1786 
1787 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
1788 	shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
1789 	shap_cfg_cmd->qs_id = cpu_to_le16(qset_id);
1790 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1791 	if (ret) {
1792 		dev_err(&hdev->pdev->dev,
1793 			"failed to get qset %u shaper, ret = %d\n", qset_id,
1794 			ret);
1795 		return ret;
1796 	}
1797 
1798 	shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
1799 	para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1800 	para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1801 	para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1802 	para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1803 	para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1804 	para->flag = shap_cfg_cmd->flag;
1805 	para->rate = le32_to_cpu(shap_cfg_cmd->qs_rate);
1806 	return 0;
1807 }
1808 
1809 int hclge_tm_get_pri_sch_mode(struct hclge_dev *hdev, u8 pri_id, u8 *mode)
1810 {
1811 	struct hclge_pri_sch_mode_cfg_cmd *pri_sch_mode;
1812 	struct hclge_desc desc;
1813 	int ret;
1814 
1815 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, true);
1816 	pri_sch_mode = (struct hclge_pri_sch_mode_cfg_cmd *)desc.data;
1817 	pri_sch_mode->pri_id = pri_id;
1818 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1819 	if (ret) {
1820 		dev_err(&hdev->pdev->dev,
1821 			"failed to get priority sch mode, ret = %d\n", ret);
1822 		return ret;
1823 	}
1824 
1825 	*mode = pri_sch_mode->sch_mode;
1826 	return 0;
1827 }
1828 
1829 int hclge_tm_get_pri_weight(struct hclge_dev *hdev, u8 pri_id, u8 *weight)
1830 {
1831 	struct hclge_priority_weight_cmd *priority_weight;
1832 	struct hclge_desc desc;
1833 	int ret;
1834 
1835 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, true);
1836 	priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
1837 	priority_weight->pri_id = pri_id;
1838 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1839 	if (ret) {
1840 		dev_err(&hdev->pdev->dev,
1841 			"failed to get priority weight, ret = %d\n", ret);
1842 		return ret;
1843 	}
1844 
1845 	*weight = priority_weight->dwrr;
1846 	return 0;
1847 }
1848 
1849 int hclge_tm_get_pri_shaper(struct hclge_dev *hdev, u8 pri_id,
1850 			    enum hclge_opcode_type cmd,
1851 			    struct hclge_tm_shaper_para *para)
1852 {
1853 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
1854 	struct hclge_desc desc;
1855 	u32 shapping_para;
1856 	int ret;
1857 
1858 	if (cmd != HCLGE_OPC_TM_PRI_C_SHAPPING &&
1859 	    cmd != HCLGE_OPC_TM_PRI_P_SHAPPING)
1860 		return -EINVAL;
1861 
1862 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
1863 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
1864 	shap_cfg_cmd->pri_id = pri_id;
1865 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1866 	if (ret) {
1867 		dev_err(&hdev->pdev->dev,
1868 			"failed to get priority shaper(%#x), ret = %d\n",
1869 			cmd, ret);
1870 		return ret;
1871 	}
1872 
1873 	shapping_para = le32_to_cpu(shap_cfg_cmd->pri_shapping_para);
1874 	para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
1875 	para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
1876 	para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
1877 	para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
1878 	para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
1879 	para->flag = shap_cfg_cmd->flag;
1880 	para->rate = le32_to_cpu(shap_cfg_cmd->pri_rate);
1881 	return 0;
1882 }
1883 
1884 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id)
1885 {
1886 	struct hclge_nq_to_qs_link_cmd *map;
1887 	struct hclge_desc desc;
1888 	u16 qs_id_l;
1889 	u16 qs_id_h;
1890 	int ret;
1891 
1892 	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
1893 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, true);
1894 	map->nq_id = cpu_to_le16(q_id);
1895 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1896 	if (ret) {
1897 		dev_err(&hdev->pdev->dev,
1898 			"failed to get queue to qset map, ret = %d\n", ret);
1899 		return ret;
1900 	}
1901 	*qset_id = le16_to_cpu(map->qset_id);
1902 
1903 	/* convert qset_id to the following format, drop the vld bit
1904 	 *            | qs_id_h | vld | qs_id_l |
1905 	 * qset_id:   | 15 ~ 11 |  10 |  9 ~ 0  |
1906 	 *             \         \   /         /
1907 	 *              \         \ /         /
1908 	 * qset_id: | 15 | 14 ~ 10 |  9 ~ 0  |
1909 	 */
1910 	qs_id_l = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_L_MSK,
1911 				  HCLGE_TM_QS_ID_L_S);
1912 	qs_id_h = hnae3_get_field(*qset_id, HCLGE_TM_QS_ID_H_EXT_MSK,
1913 				  HCLGE_TM_QS_ID_H_EXT_S);
1914 	*qset_id = 0;
1915 	hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_L_MSK, HCLGE_TM_QS_ID_L_S,
1916 			qs_id_l);
1917 	hnae3_set_field(*qset_id, HCLGE_TM_QS_ID_H_MSK, HCLGE_TM_QS_ID_H_S,
1918 			qs_id_h);
1919 	return 0;
1920 }
1921 
1922 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id)
1923 {
1924 #define HCLGE_TM_TC_MASK		0x7
1925 
1926 	struct hclge_tqp_tx_queue_tc_cmd *tc;
1927 	struct hclge_desc desc;
1928 	int ret;
1929 
1930 	tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
1931 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TQP_TX_QUEUE_TC, true);
1932 	tc->queue_id = cpu_to_le16(q_id);
1933 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1934 	if (ret) {
1935 		dev_err(&hdev->pdev->dev,
1936 			"failed to get queue to tc map, ret = %d\n", ret);
1937 		return ret;
1938 	}
1939 
1940 	*tc_id = tc->tc_id & HCLGE_TM_TC_MASK;
1941 	return 0;
1942 }
1943 
1944 int hclge_tm_get_pg_to_pri_map(struct hclge_dev *hdev, u8 pg_id,
1945 			       u8 *pri_bit_map)
1946 {
1947 	struct hclge_pg_to_pri_link_cmd *map;
1948 	struct hclge_desc desc;
1949 	int ret;
1950 
1951 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, true);
1952 	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
1953 	map->pg_id = pg_id;
1954 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1955 	if (ret) {
1956 		dev_err(&hdev->pdev->dev,
1957 			"failed to get pg to pri map, ret = %d\n", ret);
1958 		return ret;
1959 	}
1960 
1961 	*pri_bit_map = map->pri_bit_map;
1962 	return 0;
1963 }
1964 
1965 int hclge_tm_get_pg_weight(struct hclge_dev *hdev, u8 pg_id, u8 *weight)
1966 {
1967 	struct hclge_pg_weight_cmd *pg_weight_cmd;
1968 	struct hclge_desc desc;
1969 	int ret;
1970 
1971 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, true);
1972 	pg_weight_cmd = (struct hclge_pg_weight_cmd *)desc.data;
1973 	pg_weight_cmd->pg_id = pg_id;
1974 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1975 	if (ret) {
1976 		dev_err(&hdev->pdev->dev,
1977 			"failed to get pg weight, ret = %d\n", ret);
1978 		return ret;
1979 	}
1980 
1981 	*weight = pg_weight_cmd->dwrr;
1982 	return 0;
1983 }
1984 
1985 int hclge_tm_get_pg_sch_mode(struct hclge_dev *hdev, u8 pg_id, u8 *mode)
1986 {
1987 	struct hclge_desc desc;
1988 	int ret;
1989 
1990 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, true);
1991 	desc.data[0] = cpu_to_le32(pg_id);
1992 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
1993 	if (ret) {
1994 		dev_err(&hdev->pdev->dev,
1995 			"failed to get pg sch mode, ret = %d\n", ret);
1996 		return ret;
1997 	}
1998 
1999 	*mode = (u8)le32_to_cpu(desc.data[1]);
2000 	return 0;
2001 }
2002 
2003 int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
2004 			   enum hclge_opcode_type cmd,
2005 			   struct hclge_tm_shaper_para *para)
2006 {
2007 	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
2008 	struct hclge_desc desc;
2009 	u32 shapping_para;
2010 	int ret;
2011 
2012 	if (cmd != HCLGE_OPC_TM_PG_C_SHAPPING &&
2013 	    cmd != HCLGE_OPC_TM_PG_P_SHAPPING)
2014 		return -EINVAL;
2015 
2016 	hclge_cmd_setup_basic_desc(&desc, cmd, true);
2017 	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
2018 	shap_cfg_cmd->pg_id = pg_id;
2019 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2020 	if (ret) {
2021 		dev_err(&hdev->pdev->dev,
2022 			"failed to get pg shaper(%#x), ret = %d\n",
2023 			cmd, ret);
2024 		return ret;
2025 	}
2026 
2027 	shapping_para = le32_to_cpu(shap_cfg_cmd->pg_shapping_para);
2028 	para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
2029 	para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
2030 	para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
2031 	para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
2032 	para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
2033 	para->flag = shap_cfg_cmd->flag;
2034 	para->rate = le32_to_cpu(shap_cfg_cmd->pg_rate);
2035 	return 0;
2036 }
2037 
2038 int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
2039 			     struct hclge_tm_shaper_para *para)
2040 {
2041 	struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
2042 	struct hclge_desc desc;
2043 	u32 shapping_para;
2044 	int ret;
2045 
2046 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, true);
2047 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
2048 	if (ret) {
2049 		dev_err(&hdev->pdev->dev,
2050 			"failed to get port shaper, ret = %d\n", ret);
2051 		return ret;
2052 	}
2053 
2054 	port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
2055 	shapping_para = le32_to_cpu(port_shap_cfg_cmd->port_shapping_para);
2056 	para->ir_b = hclge_tm_get_field(shapping_para, IR_B);
2057 	para->ir_u = hclge_tm_get_field(shapping_para, IR_U);
2058 	para->ir_s = hclge_tm_get_field(shapping_para, IR_S);
2059 	para->bs_b = hclge_tm_get_field(shapping_para, BS_B);
2060 	para->bs_s = hclge_tm_get_field(shapping_para, BS_S);
2061 	para->flag = port_shap_cfg_cmd->flag;
2062 	para->rate = le32_to_cpu(port_shap_cfg_cmd->port_rate);
2063 
2064 	return 0;
2065 }
2066