1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 
6 #include "hclge_cmd.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9 
10 enum hclge_shaper_level {
11 	HCLGE_SHAPER_LVL_PRI	= 0,
12 	HCLGE_SHAPER_LVL_PG	= 1,
13 	HCLGE_SHAPER_LVL_PORT	= 2,
14 	HCLGE_SHAPER_LVL_QSET	= 3,
15 	HCLGE_SHAPER_LVL_CNT	= 4,
16 	HCLGE_SHAPER_LVL_VF	= 0,
17 	HCLGE_SHAPER_LVL_PF	= 1,
18 };
19 
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM	3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD	3
22 
23 #define HCLGE_SHAPER_BS_U_DEF	5
24 #define HCLGE_SHAPER_BS_S_DEF	20
25 
26 #define HCLGE_ETHER_MAX_RATE	100000
27 
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29  * @ir: Rate to be config, its unit is Mbps
30  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31  * @ir_b: IR_B parameter of IR shaper
32  * @ir_u: IR_U parameter of IR shaper
33  * @ir_s: IR_S parameter of IR shaper
34  *
35  * the formula:
36  *
37  *		IR_b * (2 ^ IR_u) * 8
38  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
39  *		Tick * (2 ^ IR_s)
40  *
41  * @return: 0: calculate sucessful, negative: fail
42  */
43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
44 				  u8 *ir_b, u8 *ir_u, u8 *ir_s)
45 {
46 #define DIVISOR_CLK		(1000 * 8)
47 #define DIVISOR_IR_B_126	(126 * DIVISOR_CLK)
48 
49 	const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 		6 * 256,        /* Prioriy level */
51 		6 * 32,         /* Prioriy group level */
52 		6 * 8,          /* Port level */
53 		6 * 256         /* Qset level */
54 	};
55 	u8 ir_u_calc = 0;
56 	u8 ir_s_calc = 0;
57 	u32 ir_calc;
58 	u32 tick;
59 
60 	/* Calc tick */
61 	if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
62 		return -EINVAL;
63 
64 	tick = tick_array[shaper_level];
65 
66 	/**
67 	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
68 	 * the formula is changed to:
69 	 *		126 * 1 * 8
70 	 * ir_calc = ---------------- * 1000
71 	 *		tick * 1
72 	 */
73 	ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
74 
75 	if (ir_calc == ir) {
76 		*ir_b = 126;
77 		*ir_u = 0;
78 		*ir_s = 0;
79 
80 		return 0;
81 	} else if (ir_calc > ir) {
82 		/* Increasing the denominator to select ir_s value */
83 		while (ir_calc > ir) {
84 			ir_s_calc++;
85 			ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
86 		}
87 
88 		if (ir_calc == ir)
89 			*ir_b = 126;
90 		else
91 			*ir_b = (ir * tick * (1 << ir_s_calc) +
92 				 (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
93 	} else {
94 		/* Increasing the numerator to select ir_u value */
95 		u32 numerator;
96 
97 		while (ir_calc < ir) {
98 			ir_u_calc++;
99 			numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
100 			ir_calc = (numerator + (tick >> 1)) / tick;
101 		}
102 
103 		if (ir_calc == ir) {
104 			*ir_b = 126;
105 		} else {
106 			u32 denominator = (DIVISOR_CLK * (1 << --ir_u_calc));
107 			*ir_b = (ir * tick + (denominator >> 1)) / denominator;
108 		}
109 	}
110 
111 	*ir_u = ir_u_calc;
112 	*ir_s = ir_s_calc;
113 
114 	return 0;
115 }
116 
117 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
118 			       enum hclge_opcode_type opcode, u64 *stats)
119 {
120 	struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
121 	int ret, i, j;
122 
123 	if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
124 	      opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
125 		return -EINVAL;
126 
127 	for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
128 		hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
129 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
130 	}
131 
132 	hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
133 
134 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
135 	if (ret)
136 		return ret;
137 
138 	for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
139 		struct hclge_pfc_stats_cmd *pfc_stats =
140 				(struct hclge_pfc_stats_cmd *)desc[i].data;
141 
142 		for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
143 			u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
144 
145 			if (index < HCLGE_MAX_TC_NUM)
146 				stats[index] =
147 					le64_to_cpu(pfc_stats->pkt_num[j]);
148 		}
149 	}
150 	return 0;
151 }
152 
153 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
154 {
155 	return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
156 }
157 
158 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
159 {
160 	return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
161 }
162 
163 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
164 {
165 	struct hclge_desc desc;
166 
167 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
168 
169 	desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
170 		(rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
171 
172 	return hclge_cmd_send(&hdev->hw, &desc, 1);
173 }
174 
175 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
176 				  u8 pfc_bitmap)
177 {
178 	struct hclge_desc desc;
179 	struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
180 
181 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
182 
183 	pfc->tx_rx_en_bitmap = tx_rx_bitmap;
184 	pfc->pri_en_bitmap = pfc_bitmap;
185 
186 	return hclge_cmd_send(&hdev->hw, &desc, 1);
187 }
188 
189 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
190 				 u8 pause_trans_gap, u16 pause_trans_time)
191 {
192 	struct hclge_cfg_pause_param_cmd *pause_param;
193 	struct hclge_desc desc;
194 
195 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
196 
197 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
198 
199 	ether_addr_copy(pause_param->mac_addr, addr);
200 	ether_addr_copy(pause_param->mac_addr_extra, addr);
201 	pause_param->pause_trans_gap = pause_trans_gap;
202 	pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
203 
204 	return hclge_cmd_send(&hdev->hw, &desc, 1);
205 }
206 
207 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
208 {
209 	struct hclge_cfg_pause_param_cmd *pause_param;
210 	struct hclge_desc desc;
211 	u16 trans_time;
212 	u8 trans_gap;
213 	int ret;
214 
215 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
216 
217 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
218 
219 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
220 	if (ret)
221 		return ret;
222 
223 	trans_gap = pause_param->pause_trans_gap;
224 	trans_time = le16_to_cpu(pause_param->pause_trans_time);
225 
226 	return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
227 }
228 
229 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
230 {
231 	u8 tc;
232 
233 	tc = hdev->tm_info.prio_tc[pri_id];
234 
235 	if (tc >= hdev->tm_info.num_tc)
236 		return -EINVAL;
237 
238 	/**
239 	 * the register for priority has four bytes, the first bytes includes
240 	 *  priority0 and priority1, the higher 4bit stands for priority1
241 	 *  while the lower 4bit stands for priority0, as below:
242 	 * first byte:	| pri_1 | pri_0 |
243 	 * second byte:	| pri_3 | pri_2 |
244 	 * third byte:	| pri_5 | pri_4 |
245 	 * fourth byte:	| pri_7 | pri_6 |
246 	 */
247 	pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
248 
249 	return 0;
250 }
251 
252 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
253 {
254 	struct hclge_desc desc;
255 	u8 *pri = (u8 *)desc.data;
256 	u8 pri_id;
257 	int ret;
258 
259 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
260 
261 	for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
262 		ret = hclge_fill_pri_array(hdev, pri, pri_id);
263 		if (ret)
264 			return ret;
265 	}
266 
267 	return hclge_cmd_send(&hdev->hw, &desc, 1);
268 }
269 
270 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
271 				      u8 pg_id, u8 pri_bit_map)
272 {
273 	struct hclge_pg_to_pri_link_cmd *map;
274 	struct hclge_desc desc;
275 
276 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
277 
278 	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
279 
280 	map->pg_id = pg_id;
281 	map->pri_bit_map = pri_bit_map;
282 
283 	return hclge_cmd_send(&hdev->hw, &desc, 1);
284 }
285 
286 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
287 				      u16 qs_id, u8 pri)
288 {
289 	struct hclge_qs_to_pri_link_cmd *map;
290 	struct hclge_desc desc;
291 
292 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
293 
294 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
295 
296 	map->qs_id = cpu_to_le16(qs_id);
297 	map->priority = pri;
298 	map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
299 
300 	return hclge_cmd_send(&hdev->hw, &desc, 1);
301 }
302 
303 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
304 				    u16 q_id, u16 qs_id)
305 {
306 	struct hclge_nq_to_qs_link_cmd *map;
307 	struct hclge_desc desc;
308 
309 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
310 
311 	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
312 
313 	map->nq_id = cpu_to_le16(q_id);
314 	map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
315 
316 	return hclge_cmd_send(&hdev->hw, &desc, 1);
317 }
318 
319 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
320 				  u8 dwrr)
321 {
322 	struct hclge_pg_weight_cmd *weight;
323 	struct hclge_desc desc;
324 
325 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
326 
327 	weight = (struct hclge_pg_weight_cmd *)desc.data;
328 
329 	weight->pg_id = pg_id;
330 	weight->dwrr = dwrr;
331 
332 	return hclge_cmd_send(&hdev->hw, &desc, 1);
333 }
334 
335 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
336 				   u8 dwrr)
337 {
338 	struct hclge_priority_weight_cmd *weight;
339 	struct hclge_desc desc;
340 
341 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
342 
343 	weight = (struct hclge_priority_weight_cmd *)desc.data;
344 
345 	weight->pri_id = pri_id;
346 	weight->dwrr = dwrr;
347 
348 	return hclge_cmd_send(&hdev->hw, &desc, 1);
349 }
350 
351 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
352 				  u8 dwrr)
353 {
354 	struct hclge_qs_weight_cmd *weight;
355 	struct hclge_desc desc;
356 
357 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
358 
359 	weight = (struct hclge_qs_weight_cmd *)desc.data;
360 
361 	weight->qs_id = cpu_to_le16(qs_id);
362 	weight->dwrr = dwrr;
363 
364 	return hclge_cmd_send(&hdev->hw, &desc, 1);
365 }
366 
367 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
368 				      u8 bs_b, u8 bs_s)
369 {
370 	u32 shapping_para = 0;
371 
372 	hclge_tm_set_field(shapping_para, IR_B, ir_b);
373 	hclge_tm_set_field(shapping_para, IR_U, ir_u);
374 	hclge_tm_set_field(shapping_para, IR_S, ir_s);
375 	hclge_tm_set_field(shapping_para, BS_B, bs_b);
376 	hclge_tm_set_field(shapping_para, BS_S, bs_s);
377 
378 	return shapping_para;
379 }
380 
381 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
382 				    enum hclge_shap_bucket bucket, u8 pg_id,
383 				    u32 shapping_para)
384 {
385 	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
386 	enum hclge_opcode_type opcode;
387 	struct hclge_desc desc;
388 
389 	opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
390 		 HCLGE_OPC_TM_PG_C_SHAPPING;
391 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
392 
393 	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
394 
395 	shap_cfg_cmd->pg_id = pg_id;
396 
397 	shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
398 
399 	return hclge_cmd_send(&hdev->hw, &desc, 1);
400 }
401 
402 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
403 {
404 	struct hclge_port_shapping_cmd *shap_cfg_cmd;
405 	struct hclge_desc desc;
406 	u32 shapping_para = 0;
407 	u8 ir_u, ir_b, ir_s;
408 	int ret;
409 
410 	ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
411 				     HCLGE_SHAPER_LVL_PORT,
412 				     &ir_b, &ir_u, &ir_s);
413 	if (ret)
414 		return ret;
415 
416 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
417 	shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
418 
419 	shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
420 						   HCLGE_SHAPER_BS_U_DEF,
421 						   HCLGE_SHAPER_BS_S_DEF);
422 
423 	shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
424 
425 	return hclge_cmd_send(&hdev->hw, &desc, 1);
426 }
427 
428 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
429 				     enum hclge_shap_bucket bucket, u8 pri_id,
430 				     u32 shapping_para)
431 {
432 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
433 	enum hclge_opcode_type opcode;
434 	struct hclge_desc desc;
435 
436 	opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
437 		 HCLGE_OPC_TM_PRI_C_SHAPPING;
438 
439 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
440 
441 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
442 
443 	shap_cfg_cmd->pri_id = pri_id;
444 
445 	shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
446 
447 	return hclge_cmd_send(&hdev->hw, &desc, 1);
448 }
449 
450 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
451 {
452 	struct hclge_desc desc;
453 
454 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
455 
456 	if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
457 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
458 	else
459 		desc.data[1] = 0;
460 
461 	desc.data[0] = cpu_to_le32(pg_id);
462 
463 	return hclge_cmd_send(&hdev->hw, &desc, 1);
464 }
465 
466 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
467 {
468 	struct hclge_desc desc;
469 
470 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
471 
472 	if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
473 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
474 	else
475 		desc.data[1] = 0;
476 
477 	desc.data[0] = cpu_to_le32(pri_id);
478 
479 	return hclge_cmd_send(&hdev->hw, &desc, 1);
480 }
481 
482 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
483 {
484 	struct hclge_desc desc;
485 
486 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
487 
488 	if (mode == HCLGE_SCH_MODE_DWRR)
489 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
490 	else
491 		desc.data[1] = 0;
492 
493 	desc.data[0] = cpu_to_le32(qs_id);
494 
495 	return hclge_cmd_send(&hdev->hw, &desc, 1);
496 }
497 
498 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
499 			      u32 bit_map)
500 {
501 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
502 	struct hclge_desc desc;
503 
504 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
505 				   false);
506 
507 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
508 
509 	bp_to_qs_map_cmd->tc_id = tc;
510 	bp_to_qs_map_cmd->qs_group_id = grp_id;
511 	bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
512 
513 	return hclge_cmd_send(&hdev->hw, &desc, 1);
514 }
515 
516 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
517 {
518 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
519 	struct hclge_dev *hdev = vport->back;
520 	u16 max_rss_size;
521 	u8 i;
522 
523 	/* TC configuration is shared by PF/VF in one port, only allow
524 	 * one tc for VF for simplicity. VF's vport_id is non zero.
525 	 */
526 	kinfo->num_tc = vport->vport_id ? 1 :
527 			min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
528 	vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
529 				(vport->vport_id ? (vport->vport_id - 1) : 0);
530 
531 	max_rss_size = min_t(u16, hdev->rss_size_max,
532 			     vport->alloc_tqps / kinfo->num_tc);
533 
534 	/* Set to user value, no larger than max_rss_size. */
535 	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
536 	    kinfo->req_rss_size <= max_rss_size) {
537 		dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
538 			 kinfo->rss_size, kinfo->req_rss_size);
539 		kinfo->rss_size = kinfo->req_rss_size;
540 	} else if (kinfo->rss_size > max_rss_size ||
541 		   (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
542 		/* Set to the maximum specification value (max_rss_size). */
543 		dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
544 			 kinfo->rss_size, max_rss_size);
545 		kinfo->rss_size = max_rss_size;
546 	}
547 
548 	kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
549 	vport->dwrr = 100;  /* 100 percent as init */
550 	vport->alloc_rss_size = kinfo->rss_size;
551 	vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
552 
553 	for (i = 0; i < HNAE3_MAX_TC; i++) {
554 		if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
555 			kinfo->tc_info[i].enable = true;
556 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
557 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
558 			kinfo->tc_info[i].tc = i;
559 		} else {
560 			/* Set to default queue if TC is disable */
561 			kinfo->tc_info[i].enable = false;
562 			kinfo->tc_info[i].tqp_offset = 0;
563 			kinfo->tc_info[i].tqp_count = 1;
564 			kinfo->tc_info[i].tc = 0;
565 		}
566 	}
567 
568 	memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
569 	       FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
570 }
571 
572 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
573 {
574 	struct hclge_vport *vport = hdev->vport;
575 	u32 i;
576 
577 	for (i = 0; i < hdev->num_alloc_vport; i++) {
578 		hclge_tm_vport_tc_info_update(vport);
579 
580 		vport++;
581 	}
582 }
583 
584 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
585 {
586 	u8 i;
587 
588 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
589 		hdev->tm_info.tc_info[i].tc_id = i;
590 		hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
591 		hdev->tm_info.tc_info[i].pgid = 0;
592 		hdev->tm_info.tc_info[i].bw_limit =
593 			hdev->tm_info.pg_info[0].bw_limit;
594 	}
595 
596 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
597 		hdev->tm_info.prio_tc[i] =
598 			(i >= hdev->tm_info.num_tc) ? 0 : i;
599 
600 	/* DCB is enabled if we have more than 1 TC */
601 	if (hdev->tm_info.num_tc > 1)
602 		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
603 	else
604 		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
605 }
606 
607 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
608 {
609 #define BW_PERCENT	100
610 
611 	u8 i;
612 
613 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
614 		int k;
615 
616 		hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
617 
618 		hdev->tm_info.pg_info[i].pg_id = i;
619 		hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
620 
621 		hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
622 
623 		if (i != 0)
624 			continue;
625 
626 		hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
627 		for (k = 0; k < hdev->tm_info.num_tc; k++)
628 			hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
629 	}
630 }
631 
632 static void hclge_pfc_info_init(struct hclge_dev *hdev)
633 {
634 	if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
635 		if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
636 			dev_warn(&hdev->pdev->dev,
637 				 "DCB is disable, but last mode is FC_PFC\n");
638 
639 		hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
640 	} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
641 		/* fc_mode_last_time record the last fc_mode when
642 		 * DCB is enabled, so that fc_mode can be set to
643 		 * the correct value when DCB is disabled.
644 		 */
645 		hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
646 		hdev->tm_info.fc_mode = HCLGE_FC_PFC;
647 	}
648 }
649 
650 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
651 {
652 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
653 	    (hdev->tm_info.num_pg != 1))
654 		return -EINVAL;
655 
656 	hclge_tm_pg_info_init(hdev);
657 
658 	hclge_tm_tc_info_init(hdev);
659 
660 	hclge_tm_vport_info_update(hdev);
661 
662 	hclge_pfc_info_init(hdev);
663 
664 	return 0;
665 }
666 
667 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
668 {
669 	int ret;
670 	u32 i;
671 
672 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
673 		return 0;
674 
675 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
676 		/* Cfg mapping */
677 		ret = hclge_tm_pg_to_pri_map_cfg(
678 			hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
679 		if (ret)
680 			return ret;
681 	}
682 
683 	return 0;
684 }
685 
686 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
687 {
688 	u8 ir_u, ir_b, ir_s;
689 	u32 shaper_para;
690 	int ret;
691 	u32 i;
692 
693 	/* Cfg pg schd */
694 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
695 		return 0;
696 
697 	/* Pg to pri */
698 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
699 		/* Calc shaper para */
700 		ret = hclge_shaper_para_calc(
701 					hdev->tm_info.pg_info[i].bw_limit,
702 					HCLGE_SHAPER_LVL_PG,
703 					&ir_b, &ir_u, &ir_s);
704 		if (ret)
705 			return ret;
706 
707 		shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
708 							 HCLGE_SHAPER_BS_U_DEF,
709 							 HCLGE_SHAPER_BS_S_DEF);
710 		ret = hclge_tm_pg_shapping_cfg(hdev,
711 					       HCLGE_TM_SHAP_C_BUCKET, i,
712 					       shaper_para);
713 		if (ret)
714 			return ret;
715 
716 		shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
717 							 HCLGE_SHAPER_BS_U_DEF,
718 							 HCLGE_SHAPER_BS_S_DEF);
719 		ret = hclge_tm_pg_shapping_cfg(hdev,
720 					       HCLGE_TM_SHAP_P_BUCKET, i,
721 					       shaper_para);
722 		if (ret)
723 			return ret;
724 	}
725 
726 	return 0;
727 }
728 
729 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
730 {
731 	int ret;
732 	u32 i;
733 
734 	/* cfg pg schd */
735 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
736 		return 0;
737 
738 	/* pg to prio */
739 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
740 		/* Cfg dwrr */
741 		ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
742 		if (ret)
743 			return ret;
744 	}
745 
746 	return 0;
747 }
748 
749 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
750 				   struct hclge_vport *vport)
751 {
752 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
753 	struct hnae3_queue **tqp = kinfo->tqp;
754 	struct hnae3_tc_info *v_tc_info;
755 	u32 i, j;
756 	int ret;
757 
758 	for (i = 0; i < kinfo->num_tc; i++) {
759 		v_tc_info = &kinfo->tc_info[i];
760 		for (j = 0; j < v_tc_info->tqp_count; j++) {
761 			struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
762 
763 			ret = hclge_tm_q_to_qs_map_cfg(hdev,
764 						       hclge_get_queue_id(q),
765 						       vport->qs_offset + i);
766 			if (ret)
767 				return ret;
768 		}
769 	}
770 
771 	return 0;
772 }
773 
774 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
775 {
776 	struct hclge_vport *vport = hdev->vport;
777 	int ret;
778 	u32 i, k;
779 
780 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
781 		/* Cfg qs -> pri mapping, one by one mapping */
782 		for (k = 0; k < hdev->num_alloc_vport; k++) {
783 			struct hnae3_knic_private_info *kinfo =
784 				&vport[k].nic.kinfo;
785 
786 			for (i = 0; i < kinfo->num_tc; i++) {
787 				ret = hclge_tm_qs_to_pri_map_cfg(
788 					hdev, vport[k].qs_offset + i, i);
789 				if (ret)
790 					return ret;
791 			}
792 		}
793 	} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
794 		/* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
795 		for (k = 0; k < hdev->num_alloc_vport; k++)
796 			for (i = 0; i < HNAE3_MAX_TC; i++) {
797 				ret = hclge_tm_qs_to_pri_map_cfg(
798 					hdev, vport[k].qs_offset + i, k);
799 				if (ret)
800 					return ret;
801 			}
802 	} else {
803 		return -EINVAL;
804 	}
805 
806 	/* Cfg q -> qs mapping */
807 	for (i = 0; i < hdev->num_alloc_vport; i++) {
808 		ret = hclge_vport_q_to_qs_map(hdev, vport);
809 		if (ret)
810 			return ret;
811 
812 		vport++;
813 	}
814 
815 	return 0;
816 }
817 
818 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
819 {
820 	u8 ir_u, ir_b, ir_s;
821 	u32 shaper_para;
822 	int ret;
823 	u32 i;
824 
825 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
826 		ret = hclge_shaper_para_calc(
827 					hdev->tm_info.tc_info[i].bw_limit,
828 					HCLGE_SHAPER_LVL_PRI,
829 					&ir_b, &ir_u, &ir_s);
830 		if (ret)
831 			return ret;
832 
833 		shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
834 							 HCLGE_SHAPER_BS_U_DEF,
835 							 HCLGE_SHAPER_BS_S_DEF);
836 		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
837 						shaper_para);
838 		if (ret)
839 			return ret;
840 
841 		shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
842 							 HCLGE_SHAPER_BS_U_DEF,
843 							 HCLGE_SHAPER_BS_S_DEF);
844 		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
845 						shaper_para);
846 		if (ret)
847 			return ret;
848 	}
849 
850 	return 0;
851 }
852 
853 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
854 {
855 	struct hclge_dev *hdev = vport->back;
856 	u8 ir_u, ir_b, ir_s;
857 	u32 shaper_para;
858 	int ret;
859 
860 	ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
861 				     &ir_b, &ir_u, &ir_s);
862 	if (ret)
863 		return ret;
864 
865 	shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
866 						 HCLGE_SHAPER_BS_U_DEF,
867 						 HCLGE_SHAPER_BS_S_DEF);
868 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
869 					vport->vport_id, shaper_para);
870 	if (ret)
871 		return ret;
872 
873 	shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
874 						 HCLGE_SHAPER_BS_U_DEF,
875 						 HCLGE_SHAPER_BS_S_DEF);
876 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
877 					vport->vport_id, shaper_para);
878 	if (ret)
879 		return ret;
880 
881 	return 0;
882 }
883 
884 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
885 {
886 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
887 	struct hclge_dev *hdev = vport->back;
888 	u8 ir_u, ir_b, ir_s;
889 	u32 i;
890 	int ret;
891 
892 	for (i = 0; i < kinfo->num_tc; i++) {
893 		ret = hclge_shaper_para_calc(
894 					hdev->tm_info.tc_info[i].bw_limit,
895 					HCLGE_SHAPER_LVL_QSET,
896 					&ir_b, &ir_u, &ir_s);
897 		if (ret)
898 			return ret;
899 	}
900 
901 	return 0;
902 }
903 
904 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
905 {
906 	struct hclge_vport *vport = hdev->vport;
907 	int ret;
908 	u32 i;
909 
910 	/* Need config vport shaper */
911 	for (i = 0; i < hdev->num_alloc_vport; i++) {
912 		ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
913 		if (ret)
914 			return ret;
915 
916 		ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
917 		if (ret)
918 			return ret;
919 
920 		vport++;
921 	}
922 
923 	return 0;
924 }
925 
926 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
927 {
928 	int ret;
929 
930 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
931 		ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
932 		if (ret)
933 			return ret;
934 	} else {
935 		ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
936 		if (ret)
937 			return ret;
938 	}
939 
940 	return 0;
941 }
942 
943 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
944 {
945 	struct hclge_vport *vport = hdev->vport;
946 	struct hclge_pg_info *pg_info;
947 	u8 dwrr;
948 	int ret;
949 	u32 i, k;
950 
951 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
952 		pg_info =
953 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
954 		dwrr = pg_info->tc_dwrr[i];
955 
956 		ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
957 		if (ret)
958 			return ret;
959 
960 		for (k = 0; k < hdev->num_alloc_vport; k++) {
961 			ret = hclge_tm_qs_weight_cfg(
962 				hdev, vport[k].qs_offset + i,
963 				vport[k].dwrr);
964 			if (ret)
965 				return ret;
966 		}
967 	}
968 
969 	return 0;
970 }
971 
972 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
973 {
974 #define DEFAULT_TC_WEIGHT	1
975 #define DEFAULT_TC_OFFSET	14
976 
977 	struct hclge_ets_tc_weight_cmd *ets_weight;
978 	struct hclge_desc desc;
979 	int i;
980 
981 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
982 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
983 
984 	for (i = 0; i < HNAE3_MAX_TC; i++) {
985 		struct hclge_pg_info *pg_info;
986 
987 		ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
988 
989 		if (!(hdev->hw_tc_map & BIT(i)))
990 			continue;
991 
992 		pg_info =
993 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
994 		ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
995 	}
996 
997 	ets_weight->weight_offset = DEFAULT_TC_OFFSET;
998 
999 	return hclge_cmd_send(&hdev->hw, &desc, 1);
1000 }
1001 
1002 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
1003 {
1004 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1005 	struct hclge_dev *hdev = vport->back;
1006 	int ret;
1007 	u8 i;
1008 
1009 	/* Vf dwrr */
1010 	ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1011 	if (ret)
1012 		return ret;
1013 
1014 	/* Qset dwrr */
1015 	for (i = 0; i < kinfo->num_tc; i++) {
1016 		ret = hclge_tm_qs_weight_cfg(
1017 			hdev, vport->qs_offset + i,
1018 			hdev->tm_info.pg_info[0].tc_dwrr[i]);
1019 		if (ret)
1020 			return ret;
1021 	}
1022 
1023 	return 0;
1024 }
1025 
1026 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1027 {
1028 	struct hclge_vport *vport = hdev->vport;
1029 	int ret;
1030 	u32 i;
1031 
1032 	for (i = 0; i < hdev->num_alloc_vport; i++) {
1033 		ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1034 		if (ret)
1035 			return ret;
1036 
1037 		vport++;
1038 	}
1039 
1040 	return 0;
1041 }
1042 
1043 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1044 {
1045 	int ret;
1046 
1047 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1048 		ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1049 		if (ret)
1050 			return ret;
1051 
1052 		if (!hnae3_dev_dcb_supported(hdev))
1053 			return 0;
1054 
1055 		ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1056 		if (ret == -EOPNOTSUPP) {
1057 			dev_warn(&hdev->pdev->dev,
1058 				 "fw %08x does't support ets tc weight cmd\n",
1059 				 hdev->fw_version);
1060 			ret = 0;
1061 		}
1062 
1063 		return ret;
1064 	} else {
1065 		ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1066 		if (ret)
1067 			return ret;
1068 	}
1069 
1070 	return 0;
1071 }
1072 
1073 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1074 {
1075 	int ret;
1076 
1077 	ret = hclge_up_to_tc_map(hdev);
1078 	if (ret)
1079 		return ret;
1080 
1081 	ret = hclge_tm_pg_to_pri_map(hdev);
1082 	if (ret)
1083 		return ret;
1084 
1085 	return hclge_tm_pri_q_qs_cfg(hdev);
1086 }
1087 
1088 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1089 {
1090 	int ret;
1091 
1092 	ret = hclge_tm_port_shaper_cfg(hdev);
1093 	if (ret)
1094 		return ret;
1095 
1096 	ret = hclge_tm_pg_shaper_cfg(hdev);
1097 	if (ret)
1098 		return ret;
1099 
1100 	return hclge_tm_pri_shaper_cfg(hdev);
1101 }
1102 
1103 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1104 {
1105 	int ret;
1106 
1107 	ret = hclge_tm_pg_dwrr_cfg(hdev);
1108 	if (ret)
1109 		return ret;
1110 
1111 	return hclge_tm_pri_dwrr_cfg(hdev);
1112 }
1113 
1114 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1115 {
1116 	int ret;
1117 	u8 i;
1118 
1119 	/* Only being config on TC-Based scheduler mode */
1120 	if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1121 		return 0;
1122 
1123 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
1124 		ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1125 		if (ret)
1126 			return ret;
1127 	}
1128 
1129 	return 0;
1130 }
1131 
1132 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1133 {
1134 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1135 	struct hclge_dev *hdev = vport->back;
1136 	int ret;
1137 	u8 i;
1138 
1139 	ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1140 	if (ret)
1141 		return ret;
1142 
1143 	for (i = 0; i < kinfo->num_tc; i++) {
1144 		u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1145 
1146 		ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1147 						sch_mode);
1148 		if (ret)
1149 			return ret;
1150 	}
1151 
1152 	return 0;
1153 }
1154 
1155 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1156 {
1157 	struct hclge_vport *vport = hdev->vport;
1158 	int ret;
1159 	u8 i, k;
1160 
1161 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1162 		for (i = 0; i < hdev->tm_info.num_tc; i++) {
1163 			ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1164 			if (ret)
1165 				return ret;
1166 
1167 			for (k = 0; k < hdev->num_alloc_vport; k++) {
1168 				ret = hclge_tm_qs_schd_mode_cfg(
1169 					hdev, vport[k].qs_offset + i,
1170 					HCLGE_SCH_MODE_DWRR);
1171 				if (ret)
1172 					return ret;
1173 			}
1174 		}
1175 	} else {
1176 		for (i = 0; i < hdev->num_alloc_vport; i++) {
1177 			ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1178 			if (ret)
1179 				return ret;
1180 
1181 			vport++;
1182 		}
1183 	}
1184 
1185 	return 0;
1186 }
1187 
1188 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1189 {
1190 	int ret;
1191 
1192 	ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1193 	if (ret)
1194 		return ret;
1195 
1196 	return hclge_tm_lvl34_schd_mode_cfg(hdev);
1197 }
1198 
1199 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1200 {
1201 	int ret;
1202 
1203 	/* Cfg tm mapping  */
1204 	ret = hclge_tm_map_cfg(hdev);
1205 	if (ret)
1206 		return ret;
1207 
1208 	/* Cfg tm shaper */
1209 	ret = hclge_tm_shaper_cfg(hdev);
1210 	if (ret)
1211 		return ret;
1212 
1213 	/* Cfg dwrr */
1214 	ret = hclge_tm_dwrr_cfg(hdev);
1215 	if (ret)
1216 		return ret;
1217 
1218 	/* Cfg schd mode for each level schd */
1219 	return hclge_tm_schd_mode_hw(hdev);
1220 }
1221 
1222 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1223 {
1224 	struct hclge_mac *mac = &hdev->hw.mac;
1225 
1226 	return hclge_pause_param_cfg(hdev, mac->mac_addr,
1227 				     HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1228 				     HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1229 }
1230 
1231 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1232 {
1233 	u8 enable_bitmap = 0;
1234 
1235 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1236 		enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1237 				HCLGE_RX_MAC_PAUSE_EN_MSK;
1238 
1239 	return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1240 				      hdev->tm_info.pfc_en);
1241 }
1242 
1243 /* Each Tc has a 1024 queue sets to backpress, it divides to
1244  * 32 group, each group contains 32 queue sets, which can be
1245  * represented by u32 bitmap.
1246  */
1247 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1248 {
1249 	int i;
1250 
1251 	for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1252 		u32 qs_bitmap = 0;
1253 		int k, ret;
1254 
1255 		for (k = 0; k < hdev->num_alloc_vport; k++) {
1256 			struct hclge_vport *vport = &hdev->vport[k];
1257 			u16 qs_id = vport->qs_offset + tc;
1258 			u8 grp, sub_grp;
1259 
1260 			grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1261 					      HCLGE_BP_GRP_ID_S);
1262 			sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1263 						  HCLGE_BP_SUB_GRP_ID_S);
1264 			if (i == grp)
1265 				qs_bitmap |= (1 << sub_grp);
1266 		}
1267 
1268 		ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1269 		if (ret)
1270 			return ret;
1271 	}
1272 
1273 	return 0;
1274 }
1275 
1276 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1277 {
1278 	bool tx_en, rx_en;
1279 
1280 	switch (hdev->tm_info.fc_mode) {
1281 	case HCLGE_FC_NONE:
1282 		tx_en = false;
1283 		rx_en = false;
1284 		break;
1285 	case HCLGE_FC_RX_PAUSE:
1286 		tx_en = false;
1287 		rx_en = true;
1288 		break;
1289 	case HCLGE_FC_TX_PAUSE:
1290 		tx_en = true;
1291 		rx_en = false;
1292 		break;
1293 	case HCLGE_FC_FULL:
1294 		tx_en = true;
1295 		rx_en = true;
1296 		break;
1297 	case HCLGE_FC_PFC:
1298 		tx_en = false;
1299 		rx_en = false;
1300 		break;
1301 	default:
1302 		tx_en = true;
1303 		rx_en = true;
1304 	}
1305 
1306 	return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1307 }
1308 
1309 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1310 {
1311 	int ret = 0;
1312 	int i;
1313 
1314 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
1315 		ret = hclge_bp_setup_hw(hdev, i);
1316 		if (ret)
1317 			return ret;
1318 	}
1319 
1320 	return ret;
1321 }
1322 
1323 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1324 {
1325 	int ret;
1326 
1327 	ret = hclge_pause_param_setup_hw(hdev);
1328 	if (ret)
1329 		return ret;
1330 
1331 	ret = hclge_mac_pause_setup_hw(hdev);
1332 	if (ret)
1333 		return ret;
1334 
1335 	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
1336 	if (!hnae3_dev_dcb_supported(hdev))
1337 		return 0;
1338 
1339 	/* GE MAC does not support PFC, when driver is initializing and MAC
1340 	 * is in GE Mode, ignore the error here, otherwise initialization
1341 	 * will fail.
1342 	 */
1343 	ret = hclge_pfc_setup_hw(hdev);
1344 	if (init && ret == -EOPNOTSUPP)
1345 		dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1346 	else if (ret) {
1347 		dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1348 			ret);
1349 		return ret;
1350 	}
1351 
1352 	return hclge_tm_bp_setup(hdev);
1353 }
1354 
1355 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1356 {
1357 	struct hclge_vport *vport = hdev->vport;
1358 	struct hnae3_knic_private_info *kinfo;
1359 	u32 i, k;
1360 
1361 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1362 		hdev->tm_info.prio_tc[i] = prio_tc[i];
1363 
1364 		for (k = 0;  k < hdev->num_alloc_vport; k++) {
1365 			kinfo = &vport[k].nic.kinfo;
1366 			kinfo->prio_tc[i] = prio_tc[i];
1367 		}
1368 	}
1369 }
1370 
1371 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1372 {
1373 	u8 bit_map = 0;
1374 	u8 i;
1375 
1376 	hdev->tm_info.num_tc = num_tc;
1377 
1378 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1379 		bit_map |= BIT(i);
1380 
1381 	if (!bit_map) {
1382 		bit_map = 1;
1383 		hdev->tm_info.num_tc = 1;
1384 	}
1385 
1386 	hdev->hw_tc_map = bit_map;
1387 
1388 	hclge_tm_schd_info_init(hdev);
1389 }
1390 
1391 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1392 {
1393 	int ret;
1394 
1395 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1396 	    (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1397 		return -ENOTSUPP;
1398 
1399 	ret = hclge_tm_schd_setup_hw(hdev);
1400 	if (ret)
1401 		return ret;
1402 
1403 	ret = hclge_pause_setup_hw(hdev, init);
1404 	if (ret)
1405 		return ret;
1406 
1407 	return 0;
1408 }
1409 
1410 int hclge_tm_schd_init(struct hclge_dev *hdev)
1411 {
1412 	int ret;
1413 
1414 	/* fc_mode is HCLGE_FC_FULL on reset */
1415 	hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1416 	hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1417 
1418 	ret = hclge_tm_schd_info_init(hdev);
1419 	if (ret)
1420 		return ret;
1421 
1422 	return hclge_tm_init_hw(hdev, true);
1423 }
1424 
1425 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1426 {
1427 	struct hclge_vport *vport = hdev->vport;
1428 	int ret;
1429 
1430 	hclge_tm_vport_tc_info_update(vport);
1431 
1432 	ret = hclge_vport_q_to_qs_map(hdev, vport);
1433 	if (ret)
1434 		return ret;
1435 
1436 	if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1437 		return 0;
1438 
1439 	return hclge_tm_bp_setup(hdev);
1440 }
1441