1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 
6 #include "hclge_cmd.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9 
10 enum hclge_shaper_level {
11 	HCLGE_SHAPER_LVL_PRI	= 0,
12 	HCLGE_SHAPER_LVL_PG	= 1,
13 	HCLGE_SHAPER_LVL_PORT	= 2,
14 	HCLGE_SHAPER_LVL_QSET	= 3,
15 	HCLGE_SHAPER_LVL_CNT	= 4,
16 	HCLGE_SHAPER_LVL_VF	= 0,
17 	HCLGE_SHAPER_LVL_PF	= 1,
18 };
19 
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM	3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD	3
22 
23 #define HCLGE_SHAPER_BS_U_DEF	5
24 #define HCLGE_SHAPER_BS_S_DEF	20
25 
26 #define HCLGE_ETHER_MAX_RATE	100000
27 
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29  * @ir: Rate to be config, its unit is Mbps
30  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31  * @ir_b: IR_B parameter of IR shaper
32  * @ir_u: IR_U parameter of IR shaper
33  * @ir_s: IR_S parameter of IR shaper
34  *
35  * the formula:
36  *
37  *		IR_b * (2 ^ IR_u) * 8
38  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
39  *		Tick * (2 ^ IR_s)
40  *
41  * @return: 0: calculate sucessful, negative: fail
42  */
43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
44 				  u8 *ir_b, u8 *ir_u, u8 *ir_s)
45 {
46 #define DIVISOR_CLK		(1000 * 8)
47 #define DIVISOR_IR_B_126	(126 * DIVISOR_CLK)
48 
49 	const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 		6 * 256,        /* Prioriy level */
51 		6 * 32,         /* Prioriy group level */
52 		6 * 8,          /* Port level */
53 		6 * 256         /* Qset level */
54 	};
55 	u8 ir_u_calc = 0;
56 	u8 ir_s_calc = 0;
57 	u32 ir_calc;
58 	u32 tick;
59 
60 	/* Calc tick */
61 	if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
62 	    ir > HCLGE_ETHER_MAX_RATE)
63 		return -EINVAL;
64 
65 	tick = tick_array[shaper_level];
66 
67 	/**
68 	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
69 	 * the formula is changed to:
70 	 *		126 * 1 * 8
71 	 * ir_calc = ---------------- * 1000
72 	 *		tick * 1
73 	 */
74 	ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
75 
76 	if (ir_calc == ir) {
77 		*ir_b = 126;
78 		*ir_u = 0;
79 		*ir_s = 0;
80 
81 		return 0;
82 	} else if (ir_calc > ir) {
83 		/* Increasing the denominator to select ir_s value */
84 		while (ir_calc > ir) {
85 			ir_s_calc++;
86 			ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
87 		}
88 
89 		if (ir_calc == ir)
90 			*ir_b = 126;
91 		else
92 			*ir_b = (ir * tick * (1 << ir_s_calc) +
93 				 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
94 	} else {
95 		/* Increasing the numerator to select ir_u value */
96 		u32 numerator;
97 
98 		while (ir_calc < ir) {
99 			ir_u_calc++;
100 			numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
101 			ir_calc = (numerator + (tick >> 1)) / tick;
102 		}
103 
104 		if (ir_calc == ir) {
105 			*ir_b = 126;
106 		} else {
107 			u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc));
108 			*ir_b = (ir * tick + (denominator >> 1)) / denominator;
109 		}
110 	}
111 
112 	*ir_u = ir_u_calc;
113 	*ir_s = ir_s_calc;
114 
115 	return 0;
116 }
117 
118 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
119 			       enum hclge_opcode_type opcode, u64 *stats)
120 {
121 	struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
122 	int ret, i, j;
123 
124 	if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
125 	      opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
126 		return -EINVAL;
127 
128 	for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
129 		hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
130 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
131 	}
132 
133 	hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
134 
135 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
136 	if (ret)
137 		return ret;
138 
139 	for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
140 		struct hclge_pfc_stats_cmd *pfc_stats =
141 				(struct hclge_pfc_stats_cmd *)desc[i].data;
142 
143 		for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
144 			u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
145 
146 			if (index < HCLGE_MAX_TC_NUM)
147 				stats[index] =
148 					le64_to_cpu(pfc_stats->pkt_num[j]);
149 		}
150 	}
151 	return 0;
152 }
153 
154 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
155 {
156 	return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
157 }
158 
159 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
160 {
161 	return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
162 }
163 
164 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
165 {
166 	struct hclge_desc desc;
167 
168 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
169 
170 	desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
171 		(rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
172 
173 	return hclge_cmd_send(&hdev->hw, &desc, 1);
174 }
175 
176 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
177 				  u8 pfc_bitmap)
178 {
179 	struct hclge_desc desc;
180 	struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
181 
182 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
183 
184 	pfc->tx_rx_en_bitmap = tx_rx_bitmap;
185 	pfc->pri_en_bitmap = pfc_bitmap;
186 
187 	return hclge_cmd_send(&hdev->hw, &desc, 1);
188 }
189 
190 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
191 				 u8 pause_trans_gap, u16 pause_trans_time)
192 {
193 	struct hclge_cfg_pause_param_cmd *pause_param;
194 	struct hclge_desc desc;
195 
196 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
197 
198 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
199 
200 	ether_addr_copy(pause_param->mac_addr, addr);
201 	ether_addr_copy(pause_param->mac_addr_extra, addr);
202 	pause_param->pause_trans_gap = pause_trans_gap;
203 	pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
204 
205 	return hclge_cmd_send(&hdev->hw, &desc, 1);
206 }
207 
208 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
209 {
210 	struct hclge_cfg_pause_param_cmd *pause_param;
211 	struct hclge_desc desc;
212 	u16 trans_time;
213 	u8 trans_gap;
214 	int ret;
215 
216 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
217 
218 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
219 
220 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
221 	if (ret)
222 		return ret;
223 
224 	trans_gap = pause_param->pause_trans_gap;
225 	trans_time = le16_to_cpu(pause_param->pause_trans_time);
226 
227 	return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
228 }
229 
230 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
231 {
232 	u8 tc;
233 
234 	tc = hdev->tm_info.prio_tc[pri_id];
235 
236 	if (tc >= hdev->tm_info.num_tc)
237 		return -EINVAL;
238 
239 	/**
240 	 * the register for priority has four bytes, the first bytes includes
241 	 *  priority0 and priority1, the higher 4bit stands for priority1
242 	 *  while the lower 4bit stands for priority0, as below:
243 	 * first byte:	| pri_1 | pri_0 |
244 	 * second byte:	| pri_3 | pri_2 |
245 	 * third byte:	| pri_5 | pri_4 |
246 	 * fourth byte:	| pri_7 | pri_6 |
247 	 */
248 	pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
249 
250 	return 0;
251 }
252 
253 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
254 {
255 	struct hclge_desc desc;
256 	u8 *pri = (u8 *)desc.data;
257 	u8 pri_id;
258 	int ret;
259 
260 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
261 
262 	for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
263 		ret = hclge_fill_pri_array(hdev, pri, pri_id);
264 		if (ret)
265 			return ret;
266 	}
267 
268 	return hclge_cmd_send(&hdev->hw, &desc, 1);
269 }
270 
271 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
272 				      u8 pg_id, u8 pri_bit_map)
273 {
274 	struct hclge_pg_to_pri_link_cmd *map;
275 	struct hclge_desc desc;
276 
277 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
278 
279 	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
280 
281 	map->pg_id = pg_id;
282 	map->pri_bit_map = pri_bit_map;
283 
284 	return hclge_cmd_send(&hdev->hw, &desc, 1);
285 }
286 
287 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
288 				      u16 qs_id, u8 pri)
289 {
290 	struct hclge_qs_to_pri_link_cmd *map;
291 	struct hclge_desc desc;
292 
293 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
294 
295 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
296 
297 	map->qs_id = cpu_to_le16(qs_id);
298 	map->priority = pri;
299 	map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
300 
301 	return hclge_cmd_send(&hdev->hw, &desc, 1);
302 }
303 
304 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
305 				    u16 q_id, u16 qs_id)
306 {
307 	struct hclge_nq_to_qs_link_cmd *map;
308 	struct hclge_desc desc;
309 
310 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
311 
312 	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
313 
314 	map->nq_id = cpu_to_le16(q_id);
315 	map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
316 
317 	return hclge_cmd_send(&hdev->hw, &desc, 1);
318 }
319 
320 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
321 				  u8 dwrr)
322 {
323 	struct hclge_pg_weight_cmd *weight;
324 	struct hclge_desc desc;
325 
326 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
327 
328 	weight = (struct hclge_pg_weight_cmd *)desc.data;
329 
330 	weight->pg_id = pg_id;
331 	weight->dwrr = dwrr;
332 
333 	return hclge_cmd_send(&hdev->hw, &desc, 1);
334 }
335 
336 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
337 				   u8 dwrr)
338 {
339 	struct hclge_priority_weight_cmd *weight;
340 	struct hclge_desc desc;
341 
342 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
343 
344 	weight = (struct hclge_priority_weight_cmd *)desc.data;
345 
346 	weight->pri_id = pri_id;
347 	weight->dwrr = dwrr;
348 
349 	return hclge_cmd_send(&hdev->hw, &desc, 1);
350 }
351 
352 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
353 				  u8 dwrr)
354 {
355 	struct hclge_qs_weight_cmd *weight;
356 	struct hclge_desc desc;
357 
358 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
359 
360 	weight = (struct hclge_qs_weight_cmd *)desc.data;
361 
362 	weight->qs_id = cpu_to_le16(qs_id);
363 	weight->dwrr = dwrr;
364 
365 	return hclge_cmd_send(&hdev->hw, &desc, 1);
366 }
367 
368 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
369 				      u8 bs_b, u8 bs_s)
370 {
371 	u32 shapping_para = 0;
372 
373 	hclge_tm_set_field(shapping_para, IR_B, ir_b);
374 	hclge_tm_set_field(shapping_para, IR_U, ir_u);
375 	hclge_tm_set_field(shapping_para, IR_S, ir_s);
376 	hclge_tm_set_field(shapping_para, BS_B, bs_b);
377 	hclge_tm_set_field(shapping_para, BS_S, bs_s);
378 
379 	return shapping_para;
380 }
381 
382 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
383 				    enum hclge_shap_bucket bucket, u8 pg_id,
384 				    u32 shapping_para)
385 {
386 	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
387 	enum hclge_opcode_type opcode;
388 	struct hclge_desc desc;
389 
390 	opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
391 		 HCLGE_OPC_TM_PG_C_SHAPPING;
392 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
393 
394 	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
395 
396 	shap_cfg_cmd->pg_id = pg_id;
397 
398 	shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
399 
400 	return hclge_cmd_send(&hdev->hw, &desc, 1);
401 }
402 
403 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
404 {
405 	struct hclge_port_shapping_cmd *shap_cfg_cmd;
406 	struct hclge_desc desc;
407 	u32 shapping_para = 0;
408 	u8 ir_u, ir_b, ir_s;
409 	int ret;
410 
411 	ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
412 				     HCLGE_SHAPER_LVL_PORT,
413 				     &ir_b, &ir_u, &ir_s);
414 	if (ret)
415 		return ret;
416 
417 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
418 	shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
419 
420 	shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
421 						   HCLGE_SHAPER_BS_U_DEF,
422 						   HCLGE_SHAPER_BS_S_DEF);
423 
424 	shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
425 
426 	return hclge_cmd_send(&hdev->hw, &desc, 1);
427 }
428 
429 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
430 				     enum hclge_shap_bucket bucket, u8 pri_id,
431 				     u32 shapping_para)
432 {
433 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
434 	enum hclge_opcode_type opcode;
435 	struct hclge_desc desc;
436 
437 	opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
438 		 HCLGE_OPC_TM_PRI_C_SHAPPING;
439 
440 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
441 
442 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
443 
444 	shap_cfg_cmd->pri_id = pri_id;
445 
446 	shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
447 
448 	return hclge_cmd_send(&hdev->hw, &desc, 1);
449 }
450 
451 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
452 {
453 	struct hclge_desc desc;
454 
455 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
456 
457 	if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
458 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
459 	else
460 		desc.data[1] = 0;
461 
462 	desc.data[0] = cpu_to_le32(pg_id);
463 
464 	return hclge_cmd_send(&hdev->hw, &desc, 1);
465 }
466 
467 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
468 {
469 	struct hclge_desc desc;
470 
471 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
472 
473 	if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
474 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
475 	else
476 		desc.data[1] = 0;
477 
478 	desc.data[0] = cpu_to_le32(pri_id);
479 
480 	return hclge_cmd_send(&hdev->hw, &desc, 1);
481 }
482 
483 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
484 {
485 	struct hclge_desc desc;
486 
487 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
488 
489 	if (mode == HCLGE_SCH_MODE_DWRR)
490 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
491 	else
492 		desc.data[1] = 0;
493 
494 	desc.data[0] = cpu_to_le32(qs_id);
495 
496 	return hclge_cmd_send(&hdev->hw, &desc, 1);
497 }
498 
499 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
500 			      u32 bit_map)
501 {
502 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
503 	struct hclge_desc desc;
504 
505 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
506 				   false);
507 
508 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
509 
510 	bp_to_qs_map_cmd->tc_id = tc;
511 	bp_to_qs_map_cmd->qs_group_id = grp_id;
512 	bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
513 
514 	return hclge_cmd_send(&hdev->hw, &desc, 1);
515 }
516 
517 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
518 {
519 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
520 	struct hclge_dev *hdev = vport->back;
521 	u16 max_rss_size;
522 	u8 i;
523 
524 	/* TC configuration is shared by PF/VF in one port, only allow
525 	 * one tc for VF for simplicity. VF's vport_id is non zero.
526 	 */
527 	kinfo->num_tc = vport->vport_id ? 1 :
528 			min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
529 	vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
530 				(vport->vport_id ? (vport->vport_id - 1) : 0);
531 
532 	max_rss_size = min_t(u16, hdev->rss_size_max,
533 			     vport->alloc_tqps / kinfo->num_tc);
534 
535 	/* Set to user value, no larger than max_rss_size. */
536 	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
537 	    kinfo->req_rss_size <= max_rss_size) {
538 		dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
539 			 kinfo->rss_size, kinfo->req_rss_size);
540 		kinfo->rss_size = kinfo->req_rss_size;
541 	} else if (kinfo->rss_size > max_rss_size ||
542 		   (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
543 		/* Set to the maximum specification value (max_rss_size). */
544 		dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
545 			 kinfo->rss_size, max_rss_size);
546 		kinfo->rss_size = max_rss_size;
547 	}
548 
549 	kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
550 	vport->dwrr = 100;  /* 100 percent as init */
551 	vport->alloc_rss_size = kinfo->rss_size;
552 	vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
553 
554 	for (i = 0; i < HNAE3_MAX_TC; i++) {
555 		if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
556 			kinfo->tc_info[i].enable = true;
557 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
558 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
559 			kinfo->tc_info[i].tc = i;
560 		} else {
561 			/* Set to default queue if TC is disable */
562 			kinfo->tc_info[i].enable = false;
563 			kinfo->tc_info[i].tqp_offset = 0;
564 			kinfo->tc_info[i].tqp_count = 1;
565 			kinfo->tc_info[i].tc = 0;
566 		}
567 	}
568 
569 	memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
570 	       FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
571 }
572 
573 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
574 {
575 	struct hclge_vport *vport = hdev->vport;
576 	u32 i;
577 
578 	for (i = 0; i < hdev->num_alloc_vport; i++) {
579 		hclge_tm_vport_tc_info_update(vport);
580 
581 		vport++;
582 	}
583 }
584 
585 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
586 {
587 	u8 i;
588 
589 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
590 		hdev->tm_info.tc_info[i].tc_id = i;
591 		hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
592 		hdev->tm_info.tc_info[i].pgid = 0;
593 		hdev->tm_info.tc_info[i].bw_limit =
594 			hdev->tm_info.pg_info[0].bw_limit;
595 	}
596 
597 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
598 		hdev->tm_info.prio_tc[i] =
599 			(i >= hdev->tm_info.num_tc) ? 0 : i;
600 
601 	/* DCB is enabled if we have more than 1 TC or pfc_en is
602 	 * non-zero.
603 	 */
604 	if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
605 		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
606 	else
607 		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
608 }
609 
610 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
611 {
612 #define BW_PERCENT	100
613 
614 	u8 i;
615 
616 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
617 		int k;
618 
619 		hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
620 
621 		hdev->tm_info.pg_info[i].pg_id = i;
622 		hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
623 
624 		hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
625 
626 		if (i != 0)
627 			continue;
628 
629 		hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
630 		for (k = 0; k < hdev->tm_info.num_tc; k++)
631 			hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
632 	}
633 }
634 
635 static void hclge_pfc_info_init(struct hclge_dev *hdev)
636 {
637 	if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
638 		if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
639 			dev_warn(&hdev->pdev->dev,
640 				 "DCB is disable, but last mode is FC_PFC\n");
641 
642 		hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
643 	} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
644 		/* fc_mode_last_time record the last fc_mode when
645 		 * DCB is enabled, so that fc_mode can be set to
646 		 * the correct value when DCB is disabled.
647 		 */
648 		hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
649 		hdev->tm_info.fc_mode = HCLGE_FC_PFC;
650 	}
651 }
652 
653 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
654 {
655 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
656 	    (hdev->tm_info.num_pg != 1))
657 		return -EINVAL;
658 
659 	hclge_tm_pg_info_init(hdev);
660 
661 	hclge_tm_tc_info_init(hdev);
662 
663 	hclge_tm_vport_info_update(hdev);
664 
665 	hclge_pfc_info_init(hdev);
666 
667 	return 0;
668 }
669 
670 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
671 {
672 	int ret;
673 	u32 i;
674 
675 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
676 		return 0;
677 
678 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
679 		/* Cfg mapping */
680 		ret = hclge_tm_pg_to_pri_map_cfg(
681 			hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
682 		if (ret)
683 			return ret;
684 	}
685 
686 	return 0;
687 }
688 
689 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
690 {
691 	u8 ir_u, ir_b, ir_s;
692 	u32 shaper_para;
693 	int ret;
694 	u32 i;
695 
696 	/* Cfg pg schd */
697 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
698 		return 0;
699 
700 	/* Pg to pri */
701 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
702 		/* Calc shaper para */
703 		ret = hclge_shaper_para_calc(
704 					hdev->tm_info.pg_info[i].bw_limit,
705 					HCLGE_SHAPER_LVL_PG,
706 					&ir_b, &ir_u, &ir_s);
707 		if (ret)
708 			return ret;
709 
710 		shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
711 							 HCLGE_SHAPER_BS_U_DEF,
712 							 HCLGE_SHAPER_BS_S_DEF);
713 		ret = hclge_tm_pg_shapping_cfg(hdev,
714 					       HCLGE_TM_SHAP_C_BUCKET, i,
715 					       shaper_para);
716 		if (ret)
717 			return ret;
718 
719 		shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
720 							 HCLGE_SHAPER_BS_U_DEF,
721 							 HCLGE_SHAPER_BS_S_DEF);
722 		ret = hclge_tm_pg_shapping_cfg(hdev,
723 					       HCLGE_TM_SHAP_P_BUCKET, i,
724 					       shaper_para);
725 		if (ret)
726 			return ret;
727 	}
728 
729 	return 0;
730 }
731 
732 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
733 {
734 	int ret;
735 	u32 i;
736 
737 	/* cfg pg schd */
738 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
739 		return 0;
740 
741 	/* pg to prio */
742 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
743 		/* Cfg dwrr */
744 		ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
745 		if (ret)
746 			return ret;
747 	}
748 
749 	return 0;
750 }
751 
752 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
753 				   struct hclge_vport *vport)
754 {
755 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
756 	struct hnae3_queue **tqp = kinfo->tqp;
757 	struct hnae3_tc_info *v_tc_info;
758 	u32 i, j;
759 	int ret;
760 
761 	for (i = 0; i < kinfo->num_tc; i++) {
762 		v_tc_info = &kinfo->tc_info[i];
763 		for (j = 0; j < v_tc_info->tqp_count; j++) {
764 			struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
765 
766 			ret = hclge_tm_q_to_qs_map_cfg(hdev,
767 						       hclge_get_queue_id(q),
768 						       vport->qs_offset + i);
769 			if (ret)
770 				return ret;
771 		}
772 	}
773 
774 	return 0;
775 }
776 
777 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
778 {
779 	struct hclge_vport *vport = hdev->vport;
780 	int ret;
781 	u32 i, k;
782 
783 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
784 		/* Cfg qs -> pri mapping, one by one mapping */
785 		for (k = 0; k < hdev->num_alloc_vport; k++) {
786 			struct hnae3_knic_private_info *kinfo =
787 				&vport[k].nic.kinfo;
788 
789 			for (i = 0; i < kinfo->num_tc; i++) {
790 				ret = hclge_tm_qs_to_pri_map_cfg(
791 					hdev, vport[k].qs_offset + i, i);
792 				if (ret)
793 					return ret;
794 			}
795 		}
796 	} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
797 		/* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
798 		for (k = 0; k < hdev->num_alloc_vport; k++)
799 			for (i = 0; i < HNAE3_MAX_TC; i++) {
800 				ret = hclge_tm_qs_to_pri_map_cfg(
801 					hdev, vport[k].qs_offset + i, k);
802 				if (ret)
803 					return ret;
804 			}
805 	} else {
806 		return -EINVAL;
807 	}
808 
809 	/* Cfg q -> qs mapping */
810 	for (i = 0; i < hdev->num_alloc_vport; i++) {
811 		ret = hclge_vport_q_to_qs_map(hdev, vport);
812 		if (ret)
813 			return ret;
814 
815 		vport++;
816 	}
817 
818 	return 0;
819 }
820 
821 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
822 {
823 	u8 ir_u, ir_b, ir_s;
824 	u32 shaper_para;
825 	int ret;
826 	u32 i;
827 
828 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
829 		ret = hclge_shaper_para_calc(
830 					hdev->tm_info.tc_info[i].bw_limit,
831 					HCLGE_SHAPER_LVL_PRI,
832 					&ir_b, &ir_u, &ir_s);
833 		if (ret)
834 			return ret;
835 
836 		shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
837 							 HCLGE_SHAPER_BS_U_DEF,
838 							 HCLGE_SHAPER_BS_S_DEF);
839 		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
840 						shaper_para);
841 		if (ret)
842 			return ret;
843 
844 		shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
845 							 HCLGE_SHAPER_BS_U_DEF,
846 							 HCLGE_SHAPER_BS_S_DEF);
847 		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
848 						shaper_para);
849 		if (ret)
850 			return ret;
851 	}
852 
853 	return 0;
854 }
855 
856 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
857 {
858 	struct hclge_dev *hdev = vport->back;
859 	u8 ir_u, ir_b, ir_s;
860 	u32 shaper_para;
861 	int ret;
862 
863 	ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
864 				     &ir_b, &ir_u, &ir_s);
865 	if (ret)
866 		return ret;
867 
868 	shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
869 						 HCLGE_SHAPER_BS_U_DEF,
870 						 HCLGE_SHAPER_BS_S_DEF);
871 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
872 					vport->vport_id, shaper_para);
873 	if (ret)
874 		return ret;
875 
876 	shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
877 						 HCLGE_SHAPER_BS_U_DEF,
878 						 HCLGE_SHAPER_BS_S_DEF);
879 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
880 					vport->vport_id, shaper_para);
881 	if (ret)
882 		return ret;
883 
884 	return 0;
885 }
886 
887 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
888 {
889 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
890 	struct hclge_dev *hdev = vport->back;
891 	u8 ir_u, ir_b, ir_s;
892 	u32 i;
893 	int ret;
894 
895 	for (i = 0; i < kinfo->num_tc; i++) {
896 		ret = hclge_shaper_para_calc(
897 					hdev->tm_info.tc_info[i].bw_limit,
898 					HCLGE_SHAPER_LVL_QSET,
899 					&ir_b, &ir_u, &ir_s);
900 		if (ret)
901 			return ret;
902 	}
903 
904 	return 0;
905 }
906 
907 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
908 {
909 	struct hclge_vport *vport = hdev->vport;
910 	int ret;
911 	u32 i;
912 
913 	/* Need config vport shaper */
914 	for (i = 0; i < hdev->num_alloc_vport; i++) {
915 		ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
916 		if (ret)
917 			return ret;
918 
919 		ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
920 		if (ret)
921 			return ret;
922 
923 		vport++;
924 	}
925 
926 	return 0;
927 }
928 
929 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
930 {
931 	int ret;
932 
933 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
934 		ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
935 		if (ret)
936 			return ret;
937 	} else {
938 		ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
939 		if (ret)
940 			return ret;
941 	}
942 
943 	return 0;
944 }
945 
946 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
947 {
948 	struct hclge_vport *vport = hdev->vport;
949 	struct hclge_pg_info *pg_info;
950 	u8 dwrr;
951 	int ret;
952 	u32 i, k;
953 
954 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
955 		pg_info =
956 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
957 		dwrr = pg_info->tc_dwrr[i];
958 
959 		ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
960 		if (ret)
961 			return ret;
962 
963 		for (k = 0; k < hdev->num_alloc_vport; k++) {
964 			ret = hclge_tm_qs_weight_cfg(
965 				hdev, vport[k].qs_offset + i,
966 				vport[k].dwrr);
967 			if (ret)
968 				return ret;
969 		}
970 	}
971 
972 	return 0;
973 }
974 
975 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
976 {
977 #define DEFAULT_TC_WEIGHT	1
978 #define DEFAULT_TC_OFFSET	14
979 
980 	struct hclge_ets_tc_weight_cmd *ets_weight;
981 	struct hclge_desc desc;
982 	unsigned int i;
983 
984 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
985 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
986 
987 	for (i = 0; i < HNAE3_MAX_TC; i++) {
988 		struct hclge_pg_info *pg_info;
989 
990 		ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
991 
992 		if (!(hdev->hw_tc_map & BIT(i)))
993 			continue;
994 
995 		pg_info =
996 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
997 		ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
998 	}
999 
1000 	ets_weight->weight_offset = DEFAULT_TC_OFFSET;
1001 
1002 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1003 }
1004 
1005 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1006 {
1007 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1008 	struct hclge_dev *hdev = vport->back;
1009 	int ret;
1010 	u8 i;
1011 
1012 	/* Vf dwrr */
1013 	ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1014 	if (ret)
1015 		return ret;
1016 
1017 	/* Qset dwrr */
1018 	for (i = 0; i < kinfo->num_tc; i++) {
1019 		ret = hclge_tm_qs_weight_cfg(
1020 			hdev, vport->qs_offset + i,
1021 			hdev->tm_info.pg_info[0].tc_dwrr[i]);
1022 		if (ret)
1023 			return ret;
1024 	}
1025 
1026 	return 0;
1027 }
1028 
1029 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1030 {
1031 	struct hclge_vport *vport = hdev->vport;
1032 	int ret;
1033 	u32 i;
1034 
1035 	for (i = 0; i < hdev->num_alloc_vport; i++) {
1036 		ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1037 		if (ret)
1038 			return ret;
1039 
1040 		vport++;
1041 	}
1042 
1043 	return 0;
1044 }
1045 
1046 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1047 {
1048 	int ret;
1049 
1050 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1051 		ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1052 		if (ret)
1053 			return ret;
1054 
1055 		if (!hnae3_dev_dcb_supported(hdev))
1056 			return 0;
1057 
1058 		ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1059 		if (ret == -EOPNOTSUPP) {
1060 			dev_warn(&hdev->pdev->dev,
1061 				 "fw %08x does't support ets tc weight cmd\n",
1062 				 hdev->fw_version);
1063 			ret = 0;
1064 		}
1065 
1066 		return ret;
1067 	} else {
1068 		ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1069 		if (ret)
1070 			return ret;
1071 	}
1072 
1073 	return 0;
1074 }
1075 
1076 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1077 {
1078 	int ret;
1079 
1080 	ret = hclge_up_to_tc_map(hdev);
1081 	if (ret)
1082 		return ret;
1083 
1084 	ret = hclge_tm_pg_to_pri_map(hdev);
1085 	if (ret)
1086 		return ret;
1087 
1088 	return hclge_tm_pri_q_qs_cfg(hdev);
1089 }
1090 
1091 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1092 {
1093 	int ret;
1094 
1095 	ret = hclge_tm_port_shaper_cfg(hdev);
1096 	if (ret)
1097 		return ret;
1098 
1099 	ret = hclge_tm_pg_shaper_cfg(hdev);
1100 	if (ret)
1101 		return ret;
1102 
1103 	return hclge_tm_pri_shaper_cfg(hdev);
1104 }
1105 
1106 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1107 {
1108 	int ret;
1109 
1110 	ret = hclge_tm_pg_dwrr_cfg(hdev);
1111 	if (ret)
1112 		return ret;
1113 
1114 	return hclge_tm_pri_dwrr_cfg(hdev);
1115 }
1116 
1117 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1118 {
1119 	int ret;
1120 	u8 i;
1121 
1122 	/* Only being config on TC-Based scheduler mode */
1123 	if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1124 		return 0;
1125 
1126 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
1127 		ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1128 		if (ret)
1129 			return ret;
1130 	}
1131 
1132 	return 0;
1133 }
1134 
1135 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1136 {
1137 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1138 	struct hclge_dev *hdev = vport->back;
1139 	int ret;
1140 	u8 i;
1141 
1142 	if (vport->vport_id >= HNAE3_MAX_TC)
1143 		return -EINVAL;
1144 
1145 	ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1146 	if (ret)
1147 		return ret;
1148 
1149 	for (i = 0; i < kinfo->num_tc; i++) {
1150 		u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1151 
1152 		ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1153 						sch_mode);
1154 		if (ret)
1155 			return ret;
1156 	}
1157 
1158 	return 0;
1159 }
1160 
1161 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1162 {
1163 	struct hclge_vport *vport = hdev->vport;
1164 	int ret;
1165 	u8 i, k;
1166 
1167 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1168 		for (i = 0; i < hdev->tm_info.num_tc; i++) {
1169 			ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1170 			if (ret)
1171 				return ret;
1172 
1173 			for (k = 0; k < hdev->num_alloc_vport; k++) {
1174 				ret = hclge_tm_qs_schd_mode_cfg(
1175 					hdev, vport[k].qs_offset + i,
1176 					HCLGE_SCH_MODE_DWRR);
1177 				if (ret)
1178 					return ret;
1179 			}
1180 		}
1181 	} else {
1182 		for (i = 0; i < hdev->num_alloc_vport; i++) {
1183 			ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1184 			if (ret)
1185 				return ret;
1186 
1187 			vport++;
1188 		}
1189 	}
1190 
1191 	return 0;
1192 }
1193 
1194 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1195 {
1196 	int ret;
1197 
1198 	ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1199 	if (ret)
1200 		return ret;
1201 
1202 	return hclge_tm_lvl34_schd_mode_cfg(hdev);
1203 }
1204 
1205 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1206 {
1207 	int ret;
1208 
1209 	/* Cfg tm mapping  */
1210 	ret = hclge_tm_map_cfg(hdev);
1211 	if (ret)
1212 		return ret;
1213 
1214 	/* Cfg tm shaper */
1215 	ret = hclge_tm_shaper_cfg(hdev);
1216 	if (ret)
1217 		return ret;
1218 
1219 	/* Cfg dwrr */
1220 	ret = hclge_tm_dwrr_cfg(hdev);
1221 	if (ret)
1222 		return ret;
1223 
1224 	/* Cfg schd mode for each level schd */
1225 	return hclge_tm_schd_mode_hw(hdev);
1226 }
1227 
1228 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1229 {
1230 	struct hclge_mac *mac = &hdev->hw.mac;
1231 
1232 	return hclge_pause_param_cfg(hdev, mac->mac_addr,
1233 				     HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1234 				     HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1235 }
1236 
1237 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1238 {
1239 	u8 enable_bitmap = 0;
1240 
1241 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1242 		enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1243 				HCLGE_RX_MAC_PAUSE_EN_MSK;
1244 
1245 	return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1246 				      hdev->tm_info.pfc_en);
1247 }
1248 
1249 /* Each Tc has a 1024 queue sets to backpress, it divides to
1250  * 32 group, each group contains 32 queue sets, which can be
1251  * represented by u32 bitmap.
1252  */
1253 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1254 {
1255 	int i;
1256 
1257 	for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1258 		u32 qs_bitmap = 0;
1259 		int k, ret;
1260 
1261 		for (k = 0; k < hdev->num_alloc_vport; k++) {
1262 			struct hclge_vport *vport = &hdev->vport[k];
1263 			u16 qs_id = vport->qs_offset + tc;
1264 			u8 grp, sub_grp;
1265 
1266 			grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1267 					      HCLGE_BP_GRP_ID_S);
1268 			sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1269 						  HCLGE_BP_SUB_GRP_ID_S);
1270 			if (i == grp)
1271 				qs_bitmap |= (1 << sub_grp);
1272 		}
1273 
1274 		ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1275 		if (ret)
1276 			return ret;
1277 	}
1278 
1279 	return 0;
1280 }
1281 
1282 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1283 {
1284 	bool tx_en, rx_en;
1285 
1286 	switch (hdev->tm_info.fc_mode) {
1287 	case HCLGE_FC_NONE:
1288 		tx_en = false;
1289 		rx_en = false;
1290 		break;
1291 	case HCLGE_FC_RX_PAUSE:
1292 		tx_en = false;
1293 		rx_en = true;
1294 		break;
1295 	case HCLGE_FC_TX_PAUSE:
1296 		tx_en = true;
1297 		rx_en = false;
1298 		break;
1299 	case HCLGE_FC_FULL:
1300 		tx_en = true;
1301 		rx_en = true;
1302 		break;
1303 	case HCLGE_FC_PFC:
1304 		tx_en = false;
1305 		rx_en = false;
1306 		break;
1307 	default:
1308 		tx_en = true;
1309 		rx_en = true;
1310 	}
1311 
1312 	return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1313 }
1314 
1315 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1316 {
1317 	int ret = 0;
1318 	int i;
1319 
1320 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
1321 		ret = hclge_bp_setup_hw(hdev, i);
1322 		if (ret)
1323 			return ret;
1324 	}
1325 
1326 	return ret;
1327 }
1328 
1329 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1330 {
1331 	int ret;
1332 
1333 	ret = hclge_pause_param_setup_hw(hdev);
1334 	if (ret)
1335 		return ret;
1336 
1337 	ret = hclge_mac_pause_setup_hw(hdev);
1338 	if (ret)
1339 		return ret;
1340 
1341 	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
1342 	if (!hnae3_dev_dcb_supported(hdev))
1343 		return 0;
1344 
1345 	/* GE MAC does not support PFC, when driver is initializing and MAC
1346 	 * is in GE Mode, ignore the error here, otherwise initialization
1347 	 * will fail.
1348 	 */
1349 	ret = hclge_pfc_setup_hw(hdev);
1350 	if (init && ret == -EOPNOTSUPP)
1351 		dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1352 	else if (ret) {
1353 		dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1354 			ret);
1355 		return ret;
1356 	}
1357 
1358 	return hclge_tm_bp_setup(hdev);
1359 }
1360 
1361 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1362 {
1363 	struct hclge_vport *vport = hdev->vport;
1364 	struct hnae3_knic_private_info *kinfo;
1365 	u32 i, k;
1366 
1367 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1368 		hdev->tm_info.prio_tc[i] = prio_tc[i];
1369 
1370 		for (k = 0;  k < hdev->num_alloc_vport; k++) {
1371 			kinfo = &vport[k].nic.kinfo;
1372 			kinfo->prio_tc[i] = prio_tc[i];
1373 		}
1374 	}
1375 }
1376 
1377 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1378 {
1379 	u8 bit_map = 0;
1380 	u8 i;
1381 
1382 	hdev->tm_info.num_tc = num_tc;
1383 
1384 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1385 		bit_map |= BIT(i);
1386 
1387 	if (!bit_map) {
1388 		bit_map = 1;
1389 		hdev->tm_info.num_tc = 1;
1390 	}
1391 
1392 	hdev->hw_tc_map = bit_map;
1393 
1394 	hclge_tm_schd_info_init(hdev);
1395 }
1396 
1397 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
1398 {
1399 	/* DCB is enabled if we have more than 1 TC or pfc_en is
1400 	 * non-zero.
1401 	 */
1402 	if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
1403 		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
1404 	else
1405 		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
1406 
1407 	hclge_pfc_info_init(hdev);
1408 }
1409 
1410 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1411 {
1412 	int ret;
1413 
1414 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1415 	    (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1416 		return -ENOTSUPP;
1417 
1418 	ret = hclge_tm_schd_setup_hw(hdev);
1419 	if (ret)
1420 		return ret;
1421 
1422 	ret = hclge_pause_setup_hw(hdev, init);
1423 	if (ret)
1424 		return ret;
1425 
1426 	return 0;
1427 }
1428 
1429 int hclge_tm_schd_init(struct hclge_dev *hdev)
1430 {
1431 	int ret;
1432 
1433 	/* fc_mode is HCLGE_FC_FULL on reset */
1434 	hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1435 	hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1436 
1437 	ret = hclge_tm_schd_info_init(hdev);
1438 	if (ret)
1439 		return ret;
1440 
1441 	return hclge_tm_init_hw(hdev, true);
1442 }
1443 
1444 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1445 {
1446 	struct hclge_vport *vport = hdev->vport;
1447 	int ret;
1448 
1449 	hclge_tm_vport_tc_info_update(vport);
1450 
1451 	ret = hclge_vport_q_to_qs_map(hdev, vport);
1452 	if (ret)
1453 		return ret;
1454 
1455 	if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1456 		return 0;
1457 
1458 	return hclge_tm_bp_setup(hdev);
1459 }
1460