1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include <linux/etherdevice.h>
5 
6 #include "hclge_cmd.h"
7 #include "hclge_main.h"
8 #include "hclge_tm.h"
9 
10 enum hclge_shaper_level {
11 	HCLGE_SHAPER_LVL_PRI	= 0,
12 	HCLGE_SHAPER_LVL_PG	= 1,
13 	HCLGE_SHAPER_LVL_PORT	= 2,
14 	HCLGE_SHAPER_LVL_QSET	= 3,
15 	HCLGE_SHAPER_LVL_CNT	= 4,
16 	HCLGE_SHAPER_LVL_VF	= 0,
17 	HCLGE_SHAPER_LVL_PF	= 1,
18 };
19 
20 #define HCLGE_TM_PFC_PKT_GET_CMD_NUM	3
21 #define HCLGE_TM_PFC_NUM_GET_PER_CMD	3
22 
23 #define HCLGE_SHAPER_BS_U_DEF	5
24 #define HCLGE_SHAPER_BS_S_DEF	20
25 
26 #define HCLGE_ETHER_MAX_RATE	100000
27 
28 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
29  * @ir: Rate to be config, its unit is Mbps
30  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
31  * @ir_b: IR_B parameter of IR shaper
32  * @ir_u: IR_U parameter of IR shaper
33  * @ir_s: IR_S parameter of IR shaper
34  *
35  * the formula:
36  *
37  *		IR_b * (2 ^ IR_u) * 8
38  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
39  *		Tick * (2 ^ IR_s)
40  *
41  * @return: 0: calculate sucessful, negative: fail
42  */
43 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
44 				  u8 *ir_b, u8 *ir_u, u8 *ir_s)
45 {
46 #define DIVISOR_CLK		(1000 * 8)
47 #define DIVISOR_IR_B_126	(126 * DIVISOR_CLK)
48 
49 	const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 		6 * 256,        /* Prioriy level */
51 		6 * 32,         /* Prioriy group level */
52 		6 * 8,          /* Port level */
53 		6 * 256         /* Qset level */
54 	};
55 	u8 ir_u_calc = 0;
56 	u8 ir_s_calc = 0;
57 	u32 ir_calc;
58 	u32 tick;
59 
60 	/* Calc tick */
61 	if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
62 	    ir > HCLGE_ETHER_MAX_RATE)
63 		return -EINVAL;
64 
65 	tick = tick_array[shaper_level];
66 
67 	/**
68 	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
69 	 * the formula is changed to:
70 	 *		126 * 1 * 8
71 	 * ir_calc = ---------------- * 1000
72 	 *		tick * 1
73 	 */
74 	ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
75 
76 	if (ir_calc == ir) {
77 		*ir_b = 126;
78 		*ir_u = 0;
79 		*ir_s = 0;
80 
81 		return 0;
82 	} else if (ir_calc > ir) {
83 		/* Increasing the denominator to select ir_s value */
84 		while (ir_calc >= ir && ir) {
85 			ir_s_calc++;
86 			ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
87 		}
88 
89 		*ir_b = (ir * tick * (1 << ir_s_calc) + (DIVISOR_CLK >> 1)) /
90 			DIVISOR_CLK;
91 	} else {
92 		/* Increasing the numerator to select ir_u value */
93 		u32 numerator;
94 
95 		while (ir_calc < ir) {
96 			ir_u_calc++;
97 			numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
98 			ir_calc = (numerator + (tick >> 1)) / tick;
99 		}
100 
101 		if (ir_calc == ir) {
102 			*ir_b = 126;
103 		} else {
104 			u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
105 			*ir_b = (ir * tick + (denominator >> 1)) / denominator;
106 		}
107 	}
108 
109 	*ir_u = ir_u_calc;
110 	*ir_s = ir_s_calc;
111 
112 	return 0;
113 }
114 
115 static int hclge_pfc_stats_get(struct hclge_dev *hdev,
116 			       enum hclge_opcode_type opcode, u64 *stats)
117 {
118 	struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
119 	int ret, i, j;
120 
121 	if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
122 	      opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
123 		return -EINVAL;
124 
125 	for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
126 		hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
127 		desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
128 	}
129 
130 	hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
131 
132 	ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
133 	if (ret)
134 		return ret;
135 
136 	for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
137 		struct hclge_pfc_stats_cmd *pfc_stats =
138 				(struct hclge_pfc_stats_cmd *)desc[i].data;
139 
140 		for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
141 			u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
142 
143 			if (index < HCLGE_MAX_TC_NUM)
144 				stats[index] =
145 					le64_to_cpu(pfc_stats->pkt_num[j]);
146 		}
147 	}
148 	return 0;
149 }
150 
151 int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
152 {
153 	return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
154 }
155 
156 int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
157 {
158 	return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
159 }
160 
161 int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
162 {
163 	struct hclge_desc desc;
164 
165 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
166 
167 	desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
168 		(rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
169 
170 	return hclge_cmd_send(&hdev->hw, &desc, 1);
171 }
172 
173 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
174 				  u8 pfc_bitmap)
175 {
176 	struct hclge_desc desc;
177 	struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
178 
179 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
180 
181 	pfc->tx_rx_en_bitmap = tx_rx_bitmap;
182 	pfc->pri_en_bitmap = pfc_bitmap;
183 
184 	return hclge_cmd_send(&hdev->hw, &desc, 1);
185 }
186 
187 static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
188 				 u8 pause_trans_gap, u16 pause_trans_time)
189 {
190 	struct hclge_cfg_pause_param_cmd *pause_param;
191 	struct hclge_desc desc;
192 
193 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
194 
195 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
196 
197 	ether_addr_copy(pause_param->mac_addr, addr);
198 	ether_addr_copy(pause_param->mac_addr_extra, addr);
199 	pause_param->pause_trans_gap = pause_trans_gap;
200 	pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
201 
202 	return hclge_cmd_send(&hdev->hw, &desc, 1);
203 }
204 
205 int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
206 {
207 	struct hclge_cfg_pause_param_cmd *pause_param;
208 	struct hclge_desc desc;
209 	u16 trans_time;
210 	u8 trans_gap;
211 	int ret;
212 
213 	pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
214 
215 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
216 
217 	ret = hclge_cmd_send(&hdev->hw, &desc, 1);
218 	if (ret)
219 		return ret;
220 
221 	trans_gap = pause_param->pause_trans_gap;
222 	trans_time = le16_to_cpu(pause_param->pause_trans_time);
223 
224 	return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
225 }
226 
227 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
228 {
229 	u8 tc;
230 
231 	tc = hdev->tm_info.prio_tc[pri_id];
232 
233 	if (tc >= hdev->tm_info.num_tc)
234 		return -EINVAL;
235 
236 	/**
237 	 * the register for priority has four bytes, the first bytes includes
238 	 *  priority0 and priority1, the higher 4bit stands for priority1
239 	 *  while the lower 4bit stands for priority0, as below:
240 	 * first byte:	| pri_1 | pri_0 |
241 	 * second byte:	| pri_3 | pri_2 |
242 	 * third byte:	| pri_5 | pri_4 |
243 	 * fourth byte:	| pri_7 | pri_6 |
244 	 */
245 	pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
246 
247 	return 0;
248 }
249 
250 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
251 {
252 	struct hclge_desc desc;
253 	u8 *pri = (u8 *)desc.data;
254 	u8 pri_id;
255 	int ret;
256 
257 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
258 
259 	for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
260 		ret = hclge_fill_pri_array(hdev, pri, pri_id);
261 		if (ret)
262 			return ret;
263 	}
264 
265 	return hclge_cmd_send(&hdev->hw, &desc, 1);
266 }
267 
268 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
269 				      u8 pg_id, u8 pri_bit_map)
270 {
271 	struct hclge_pg_to_pri_link_cmd *map;
272 	struct hclge_desc desc;
273 
274 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
275 
276 	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
277 
278 	map->pg_id = pg_id;
279 	map->pri_bit_map = pri_bit_map;
280 
281 	return hclge_cmd_send(&hdev->hw, &desc, 1);
282 }
283 
284 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
285 				      u16 qs_id, u8 pri)
286 {
287 	struct hclge_qs_to_pri_link_cmd *map;
288 	struct hclge_desc desc;
289 
290 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
291 
292 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
293 
294 	map->qs_id = cpu_to_le16(qs_id);
295 	map->priority = pri;
296 	map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
297 
298 	return hclge_cmd_send(&hdev->hw, &desc, 1);
299 }
300 
301 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
302 				    u16 q_id, u16 qs_id)
303 {
304 	struct hclge_nq_to_qs_link_cmd *map;
305 	struct hclge_desc desc;
306 
307 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
308 
309 	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
310 
311 	map->nq_id = cpu_to_le16(q_id);
312 	map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
313 
314 	return hclge_cmd_send(&hdev->hw, &desc, 1);
315 }
316 
317 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
318 				  u8 dwrr)
319 {
320 	struct hclge_pg_weight_cmd *weight;
321 	struct hclge_desc desc;
322 
323 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
324 
325 	weight = (struct hclge_pg_weight_cmd *)desc.data;
326 
327 	weight->pg_id = pg_id;
328 	weight->dwrr = dwrr;
329 
330 	return hclge_cmd_send(&hdev->hw, &desc, 1);
331 }
332 
333 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
334 				   u8 dwrr)
335 {
336 	struct hclge_priority_weight_cmd *weight;
337 	struct hclge_desc desc;
338 
339 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
340 
341 	weight = (struct hclge_priority_weight_cmd *)desc.data;
342 
343 	weight->pri_id = pri_id;
344 	weight->dwrr = dwrr;
345 
346 	return hclge_cmd_send(&hdev->hw, &desc, 1);
347 }
348 
349 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
350 				  u8 dwrr)
351 {
352 	struct hclge_qs_weight_cmd *weight;
353 	struct hclge_desc desc;
354 
355 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
356 
357 	weight = (struct hclge_qs_weight_cmd *)desc.data;
358 
359 	weight->qs_id = cpu_to_le16(qs_id);
360 	weight->dwrr = dwrr;
361 
362 	return hclge_cmd_send(&hdev->hw, &desc, 1);
363 }
364 
365 static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
366 				      u8 bs_b, u8 bs_s)
367 {
368 	u32 shapping_para = 0;
369 
370 	hclge_tm_set_field(shapping_para, IR_B, ir_b);
371 	hclge_tm_set_field(shapping_para, IR_U, ir_u);
372 	hclge_tm_set_field(shapping_para, IR_S, ir_s);
373 	hclge_tm_set_field(shapping_para, BS_B, bs_b);
374 	hclge_tm_set_field(shapping_para, BS_S, bs_s);
375 
376 	return shapping_para;
377 }
378 
379 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
380 				    enum hclge_shap_bucket bucket, u8 pg_id,
381 				    u32 shapping_para)
382 {
383 	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
384 	enum hclge_opcode_type opcode;
385 	struct hclge_desc desc;
386 
387 	opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
388 		 HCLGE_OPC_TM_PG_C_SHAPPING;
389 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
390 
391 	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
392 
393 	shap_cfg_cmd->pg_id = pg_id;
394 
395 	shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
396 
397 	return hclge_cmd_send(&hdev->hw, &desc, 1);
398 }
399 
400 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
401 {
402 	struct hclge_port_shapping_cmd *shap_cfg_cmd;
403 	struct hclge_desc desc;
404 	u8 ir_u, ir_b, ir_s;
405 	u32 shapping_para;
406 	int ret;
407 
408 	ret = hclge_shaper_para_calc(hdev->hw.mac.speed,
409 				     HCLGE_SHAPER_LVL_PORT,
410 				     &ir_b, &ir_u, &ir_s);
411 	if (ret)
412 		return ret;
413 
414 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
415 	shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
416 
417 	shapping_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
418 						   HCLGE_SHAPER_BS_U_DEF,
419 						   HCLGE_SHAPER_BS_S_DEF);
420 
421 	shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
422 
423 	return hclge_cmd_send(&hdev->hw, &desc, 1);
424 }
425 
426 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
427 				     enum hclge_shap_bucket bucket, u8 pri_id,
428 				     u32 shapping_para)
429 {
430 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
431 	enum hclge_opcode_type opcode;
432 	struct hclge_desc desc;
433 
434 	opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
435 		 HCLGE_OPC_TM_PRI_C_SHAPPING;
436 
437 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
438 
439 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
440 
441 	shap_cfg_cmd->pri_id = pri_id;
442 
443 	shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
444 
445 	return hclge_cmd_send(&hdev->hw, &desc, 1);
446 }
447 
448 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
449 {
450 	struct hclge_desc desc;
451 
452 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
453 
454 	if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
455 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
456 	else
457 		desc.data[1] = 0;
458 
459 	desc.data[0] = cpu_to_le32(pg_id);
460 
461 	return hclge_cmd_send(&hdev->hw, &desc, 1);
462 }
463 
464 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
465 {
466 	struct hclge_desc desc;
467 
468 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
469 
470 	if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
471 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
472 	else
473 		desc.data[1] = 0;
474 
475 	desc.data[0] = cpu_to_le32(pri_id);
476 
477 	return hclge_cmd_send(&hdev->hw, &desc, 1);
478 }
479 
480 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
481 {
482 	struct hclge_desc desc;
483 
484 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
485 
486 	if (mode == HCLGE_SCH_MODE_DWRR)
487 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
488 	else
489 		desc.data[1] = 0;
490 
491 	desc.data[0] = cpu_to_le32(qs_id);
492 
493 	return hclge_cmd_send(&hdev->hw, &desc, 1);
494 }
495 
496 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
497 			      u32 bit_map)
498 {
499 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
500 	struct hclge_desc desc;
501 
502 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
503 				   false);
504 
505 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
506 
507 	bp_to_qs_map_cmd->tc_id = tc;
508 	bp_to_qs_map_cmd->qs_group_id = grp_id;
509 	bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
510 
511 	return hclge_cmd_send(&hdev->hw, &desc, 1);
512 }
513 
514 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
515 {
516 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
517 	struct hclge_dev *hdev = vport->back;
518 	u16 max_rss_size;
519 	u8 i;
520 
521 	/* TC configuration is shared by PF/VF in one port, only allow
522 	 * one tc for VF for simplicity. VF's vport_id is non zero.
523 	 */
524 	kinfo->num_tc = vport->vport_id ? 1 :
525 			min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
526 	vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
527 				(vport->vport_id ? (vport->vport_id - 1) : 0);
528 
529 	max_rss_size = min_t(u16, hdev->rss_size_max,
530 			     vport->alloc_tqps / kinfo->num_tc);
531 
532 	/* Set to user value, no larger than max_rss_size. */
533 	if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
534 	    kinfo->req_rss_size <= max_rss_size) {
535 		dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
536 			 kinfo->rss_size, kinfo->req_rss_size);
537 		kinfo->rss_size = kinfo->req_rss_size;
538 	} else if (kinfo->rss_size > max_rss_size ||
539 		   (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
540 		/* Set to the maximum specification value (max_rss_size). */
541 		dev_info(&hdev->pdev->dev, "rss changes from %d to %d\n",
542 			 kinfo->rss_size, max_rss_size);
543 		kinfo->rss_size = max_rss_size;
544 	}
545 
546 	kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
547 	vport->dwrr = 100;  /* 100 percent as init */
548 	vport->alloc_rss_size = kinfo->rss_size;
549 	vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
550 
551 	for (i = 0; i < HNAE3_MAX_TC; i++) {
552 		if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
553 			kinfo->tc_info[i].enable = true;
554 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
555 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
556 			kinfo->tc_info[i].tc = i;
557 		} else {
558 			/* Set to default queue if TC is disable */
559 			kinfo->tc_info[i].enable = false;
560 			kinfo->tc_info[i].tqp_offset = 0;
561 			kinfo->tc_info[i].tqp_count = 1;
562 			kinfo->tc_info[i].tc = 0;
563 		}
564 	}
565 
566 	memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
567 	       FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
568 }
569 
570 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
571 {
572 	struct hclge_vport *vport = hdev->vport;
573 	u32 i;
574 
575 	for (i = 0; i < hdev->num_alloc_vport; i++) {
576 		hclge_tm_vport_tc_info_update(vport);
577 
578 		vport++;
579 	}
580 }
581 
582 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
583 {
584 	u8 i;
585 
586 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
587 		hdev->tm_info.tc_info[i].tc_id = i;
588 		hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
589 		hdev->tm_info.tc_info[i].pgid = 0;
590 		hdev->tm_info.tc_info[i].bw_limit =
591 			hdev->tm_info.pg_info[0].bw_limit;
592 	}
593 
594 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
595 		hdev->tm_info.prio_tc[i] =
596 			(i >= hdev->tm_info.num_tc) ? 0 : i;
597 
598 	/* DCB is enabled if we have more than 1 TC or pfc_en is
599 	 * non-zero.
600 	 */
601 	if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
602 		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
603 	else
604 		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
605 }
606 
607 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
608 {
609 #define BW_PERCENT	100
610 
611 	u8 i;
612 
613 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
614 		int k;
615 
616 		hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
617 
618 		hdev->tm_info.pg_info[i].pg_id = i;
619 		hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
620 
621 		hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
622 
623 		if (i != 0)
624 			continue;
625 
626 		hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
627 		for (k = 0; k < hdev->tm_info.num_tc; k++)
628 			hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
629 	}
630 }
631 
632 static void hclge_pfc_info_init(struct hclge_dev *hdev)
633 {
634 	if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
635 		if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
636 			dev_warn(&hdev->pdev->dev,
637 				 "DCB is disable, but last mode is FC_PFC\n");
638 
639 		hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
640 	} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
641 		/* fc_mode_last_time record the last fc_mode when
642 		 * DCB is enabled, so that fc_mode can be set to
643 		 * the correct value when DCB is disabled.
644 		 */
645 		hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
646 		hdev->tm_info.fc_mode = HCLGE_FC_PFC;
647 	}
648 }
649 
650 static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
651 {
652 	hclge_tm_pg_info_init(hdev);
653 
654 	hclge_tm_tc_info_init(hdev);
655 
656 	hclge_tm_vport_info_update(hdev);
657 
658 	hclge_pfc_info_init(hdev);
659 }
660 
661 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
662 {
663 	int ret;
664 	u32 i;
665 
666 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
667 		return 0;
668 
669 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
670 		/* Cfg mapping */
671 		ret = hclge_tm_pg_to_pri_map_cfg(
672 			hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
673 		if (ret)
674 			return ret;
675 	}
676 
677 	return 0;
678 }
679 
680 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
681 {
682 	u8 ir_u, ir_b, ir_s;
683 	u32 shaper_para;
684 	int ret;
685 	u32 i;
686 
687 	/* Cfg pg schd */
688 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
689 		return 0;
690 
691 	/* Pg to pri */
692 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
693 		/* Calc shaper para */
694 		ret = hclge_shaper_para_calc(
695 					hdev->tm_info.pg_info[i].bw_limit,
696 					HCLGE_SHAPER_LVL_PG,
697 					&ir_b, &ir_u, &ir_s);
698 		if (ret)
699 			return ret;
700 
701 		shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
702 							 HCLGE_SHAPER_BS_U_DEF,
703 							 HCLGE_SHAPER_BS_S_DEF);
704 		ret = hclge_tm_pg_shapping_cfg(hdev,
705 					       HCLGE_TM_SHAP_C_BUCKET, i,
706 					       shaper_para);
707 		if (ret)
708 			return ret;
709 
710 		shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
711 							 HCLGE_SHAPER_BS_U_DEF,
712 							 HCLGE_SHAPER_BS_S_DEF);
713 		ret = hclge_tm_pg_shapping_cfg(hdev,
714 					       HCLGE_TM_SHAP_P_BUCKET, i,
715 					       shaper_para);
716 		if (ret)
717 			return ret;
718 	}
719 
720 	return 0;
721 }
722 
723 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
724 {
725 	int ret;
726 	u32 i;
727 
728 	/* cfg pg schd */
729 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
730 		return 0;
731 
732 	/* pg to prio */
733 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
734 		/* Cfg dwrr */
735 		ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
736 		if (ret)
737 			return ret;
738 	}
739 
740 	return 0;
741 }
742 
743 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
744 				   struct hclge_vport *vport)
745 {
746 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
747 	struct hnae3_queue **tqp = kinfo->tqp;
748 	struct hnae3_tc_info *v_tc_info;
749 	u32 i, j;
750 	int ret;
751 
752 	for (i = 0; i < kinfo->num_tc; i++) {
753 		v_tc_info = &kinfo->tc_info[i];
754 		for (j = 0; j < v_tc_info->tqp_count; j++) {
755 			struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
756 
757 			ret = hclge_tm_q_to_qs_map_cfg(hdev,
758 						       hclge_get_queue_id(q),
759 						       vport->qs_offset + i);
760 			if (ret)
761 				return ret;
762 		}
763 	}
764 
765 	return 0;
766 }
767 
768 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
769 {
770 	struct hclge_vport *vport = hdev->vport;
771 	int ret;
772 	u32 i, k;
773 
774 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
775 		/* Cfg qs -> pri mapping, one by one mapping */
776 		for (k = 0; k < hdev->num_alloc_vport; k++) {
777 			struct hnae3_knic_private_info *kinfo =
778 				&vport[k].nic.kinfo;
779 
780 			for (i = 0; i < kinfo->num_tc; i++) {
781 				ret = hclge_tm_qs_to_pri_map_cfg(
782 					hdev, vport[k].qs_offset + i, i);
783 				if (ret)
784 					return ret;
785 			}
786 		}
787 	} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
788 		/* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
789 		for (k = 0; k < hdev->num_alloc_vport; k++)
790 			for (i = 0; i < HNAE3_MAX_TC; i++) {
791 				ret = hclge_tm_qs_to_pri_map_cfg(
792 					hdev, vport[k].qs_offset + i, k);
793 				if (ret)
794 					return ret;
795 			}
796 	} else {
797 		return -EINVAL;
798 	}
799 
800 	/* Cfg q -> qs mapping */
801 	for (i = 0; i < hdev->num_alloc_vport; i++) {
802 		ret = hclge_vport_q_to_qs_map(hdev, vport);
803 		if (ret)
804 			return ret;
805 
806 		vport++;
807 	}
808 
809 	return 0;
810 }
811 
812 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
813 {
814 	u8 ir_u, ir_b, ir_s;
815 	u32 shaper_para;
816 	int ret;
817 	u32 i;
818 
819 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
820 		ret = hclge_shaper_para_calc(
821 					hdev->tm_info.tc_info[i].bw_limit,
822 					HCLGE_SHAPER_LVL_PRI,
823 					&ir_b, &ir_u, &ir_s);
824 		if (ret)
825 			return ret;
826 
827 		shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
828 							 HCLGE_SHAPER_BS_U_DEF,
829 							 HCLGE_SHAPER_BS_S_DEF);
830 		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
831 						shaper_para);
832 		if (ret)
833 			return ret;
834 
835 		shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
836 							 HCLGE_SHAPER_BS_U_DEF,
837 							 HCLGE_SHAPER_BS_S_DEF);
838 		ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
839 						shaper_para);
840 		if (ret)
841 			return ret;
842 	}
843 
844 	return 0;
845 }
846 
847 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
848 {
849 	struct hclge_dev *hdev = vport->back;
850 	u8 ir_u, ir_b, ir_s;
851 	u32 shaper_para;
852 	int ret;
853 
854 	ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
855 				     &ir_b, &ir_u, &ir_s);
856 	if (ret)
857 		return ret;
858 
859 	shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
860 						 HCLGE_SHAPER_BS_U_DEF,
861 						 HCLGE_SHAPER_BS_S_DEF);
862 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
863 					vport->vport_id, shaper_para);
864 	if (ret)
865 		return ret;
866 
867 	shaper_para = hclge_tm_get_shapping_para(ir_b, ir_u, ir_s,
868 						 HCLGE_SHAPER_BS_U_DEF,
869 						 HCLGE_SHAPER_BS_S_DEF);
870 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
871 					vport->vport_id, shaper_para);
872 	if (ret)
873 		return ret;
874 
875 	return 0;
876 }
877 
878 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
879 {
880 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
881 	struct hclge_dev *hdev = vport->back;
882 	u8 ir_u, ir_b, ir_s;
883 	u32 i;
884 	int ret;
885 
886 	for (i = 0; i < kinfo->num_tc; i++) {
887 		ret = hclge_shaper_para_calc(
888 					hdev->tm_info.tc_info[i].bw_limit,
889 					HCLGE_SHAPER_LVL_QSET,
890 					&ir_b, &ir_u, &ir_s);
891 		if (ret)
892 			return ret;
893 	}
894 
895 	return 0;
896 }
897 
898 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
899 {
900 	struct hclge_vport *vport = hdev->vport;
901 	int ret;
902 	u32 i;
903 
904 	/* Need config vport shaper */
905 	for (i = 0; i < hdev->num_alloc_vport; i++) {
906 		ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
907 		if (ret)
908 			return ret;
909 
910 		ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
911 		if (ret)
912 			return ret;
913 
914 		vport++;
915 	}
916 
917 	return 0;
918 }
919 
920 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
921 {
922 	int ret;
923 
924 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
925 		ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
926 		if (ret)
927 			return ret;
928 	} else {
929 		ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
930 		if (ret)
931 			return ret;
932 	}
933 
934 	return 0;
935 }
936 
937 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
938 {
939 	struct hclge_vport *vport = hdev->vport;
940 	struct hclge_pg_info *pg_info;
941 	u8 dwrr;
942 	int ret;
943 	u32 i, k;
944 
945 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
946 		pg_info =
947 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
948 		dwrr = pg_info->tc_dwrr[i];
949 
950 		ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
951 		if (ret)
952 			return ret;
953 
954 		for (k = 0; k < hdev->num_alloc_vport; k++) {
955 			ret = hclge_tm_qs_weight_cfg(
956 				hdev, vport[k].qs_offset + i,
957 				vport[k].dwrr);
958 			if (ret)
959 				return ret;
960 		}
961 	}
962 
963 	return 0;
964 }
965 
966 static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
967 {
968 #define DEFAULT_TC_WEIGHT	1
969 #define DEFAULT_TC_OFFSET	14
970 
971 	struct hclge_ets_tc_weight_cmd *ets_weight;
972 	struct hclge_desc desc;
973 	unsigned int i;
974 
975 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
976 	ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
977 
978 	for (i = 0; i < HNAE3_MAX_TC; i++) {
979 		struct hclge_pg_info *pg_info;
980 
981 		ets_weight->tc_weight[i] = DEFAULT_TC_WEIGHT;
982 
983 		if (!(hdev->hw_tc_map & BIT(i)))
984 			continue;
985 
986 		pg_info =
987 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
988 		ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
989 	}
990 
991 	ets_weight->weight_offset = DEFAULT_TC_OFFSET;
992 
993 	return hclge_cmd_send(&hdev->hw, &desc, 1);
994 }
995 
996 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
997 {
998 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
999 	struct hclge_dev *hdev = vport->back;
1000 	int ret;
1001 	u8 i;
1002 
1003 	/* Vf dwrr */
1004 	ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
1005 	if (ret)
1006 		return ret;
1007 
1008 	/* Qset dwrr */
1009 	for (i = 0; i < kinfo->num_tc; i++) {
1010 		ret = hclge_tm_qs_weight_cfg(
1011 			hdev, vport->qs_offset + i,
1012 			hdev->tm_info.pg_info[0].tc_dwrr[i]);
1013 		if (ret)
1014 			return ret;
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
1021 {
1022 	struct hclge_vport *vport = hdev->vport;
1023 	int ret;
1024 	u32 i;
1025 
1026 	for (i = 0; i < hdev->num_alloc_vport; i++) {
1027 		ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
1028 		if (ret)
1029 			return ret;
1030 
1031 		vport++;
1032 	}
1033 
1034 	return 0;
1035 }
1036 
1037 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
1038 {
1039 	int ret;
1040 
1041 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1042 		ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
1043 		if (ret)
1044 			return ret;
1045 
1046 		if (!hnae3_dev_dcb_supported(hdev))
1047 			return 0;
1048 
1049 		ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
1050 		if (ret == -EOPNOTSUPP) {
1051 			dev_warn(&hdev->pdev->dev,
1052 				 "fw %08x does't support ets tc weight cmd\n",
1053 				 hdev->fw_version);
1054 			ret = 0;
1055 		}
1056 
1057 		return ret;
1058 	} else {
1059 		ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
1060 		if (ret)
1061 			return ret;
1062 	}
1063 
1064 	return 0;
1065 }
1066 
1067 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
1068 {
1069 	int ret;
1070 
1071 	ret = hclge_up_to_tc_map(hdev);
1072 	if (ret)
1073 		return ret;
1074 
1075 	ret = hclge_tm_pg_to_pri_map(hdev);
1076 	if (ret)
1077 		return ret;
1078 
1079 	return hclge_tm_pri_q_qs_cfg(hdev);
1080 }
1081 
1082 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
1083 {
1084 	int ret;
1085 
1086 	ret = hclge_tm_port_shaper_cfg(hdev);
1087 	if (ret)
1088 		return ret;
1089 
1090 	ret = hclge_tm_pg_shaper_cfg(hdev);
1091 	if (ret)
1092 		return ret;
1093 
1094 	return hclge_tm_pri_shaper_cfg(hdev);
1095 }
1096 
1097 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
1098 {
1099 	int ret;
1100 
1101 	ret = hclge_tm_pg_dwrr_cfg(hdev);
1102 	if (ret)
1103 		return ret;
1104 
1105 	return hclge_tm_pri_dwrr_cfg(hdev);
1106 }
1107 
1108 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
1109 {
1110 	int ret;
1111 	u8 i;
1112 
1113 	/* Only being config on TC-Based scheduler mode */
1114 	if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
1115 		return 0;
1116 
1117 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
1118 		ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
1119 		if (ret)
1120 			return ret;
1121 	}
1122 
1123 	return 0;
1124 }
1125 
1126 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
1127 {
1128 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
1129 	struct hclge_dev *hdev = vport->back;
1130 	int ret;
1131 	u8 i;
1132 
1133 	if (vport->vport_id >= HNAE3_MAX_TC)
1134 		return -EINVAL;
1135 
1136 	ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
1137 	if (ret)
1138 		return ret;
1139 
1140 	for (i = 0; i < kinfo->num_tc; i++) {
1141 		u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
1142 
1143 		ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
1144 						sch_mode);
1145 		if (ret)
1146 			return ret;
1147 	}
1148 
1149 	return 0;
1150 }
1151 
1152 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
1153 {
1154 	struct hclge_vport *vport = hdev->vport;
1155 	int ret;
1156 	u8 i, k;
1157 
1158 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
1159 		for (i = 0; i < hdev->tm_info.num_tc; i++) {
1160 			ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1161 			if (ret)
1162 				return ret;
1163 
1164 			for (k = 0; k < hdev->num_alloc_vport; k++) {
1165 				ret = hclge_tm_qs_schd_mode_cfg(
1166 					hdev, vport[k].qs_offset + i,
1167 					HCLGE_SCH_MODE_DWRR);
1168 				if (ret)
1169 					return ret;
1170 			}
1171 		}
1172 	} else {
1173 		for (i = 0; i < hdev->num_alloc_vport; i++) {
1174 			ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1175 			if (ret)
1176 				return ret;
1177 
1178 			vport++;
1179 		}
1180 	}
1181 
1182 	return 0;
1183 }
1184 
1185 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1186 {
1187 	int ret;
1188 
1189 	ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1190 	if (ret)
1191 		return ret;
1192 
1193 	return hclge_tm_lvl34_schd_mode_cfg(hdev);
1194 }
1195 
1196 int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1197 {
1198 	int ret;
1199 
1200 	/* Cfg tm mapping  */
1201 	ret = hclge_tm_map_cfg(hdev);
1202 	if (ret)
1203 		return ret;
1204 
1205 	/* Cfg tm shaper */
1206 	ret = hclge_tm_shaper_cfg(hdev);
1207 	if (ret)
1208 		return ret;
1209 
1210 	/* Cfg dwrr */
1211 	ret = hclge_tm_dwrr_cfg(hdev);
1212 	if (ret)
1213 		return ret;
1214 
1215 	/* Cfg schd mode for each level schd */
1216 	return hclge_tm_schd_mode_hw(hdev);
1217 }
1218 
1219 static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
1220 {
1221 	struct hclge_mac *mac = &hdev->hw.mac;
1222 
1223 	return hclge_pause_param_cfg(hdev, mac->mac_addr,
1224 				     HCLGE_DEFAULT_PAUSE_TRANS_GAP,
1225 				     HCLGE_DEFAULT_PAUSE_TRANS_TIME);
1226 }
1227 
1228 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1229 {
1230 	u8 enable_bitmap = 0;
1231 
1232 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1233 		enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1234 				HCLGE_RX_MAC_PAUSE_EN_MSK;
1235 
1236 	return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1237 				      hdev->tm_info.pfc_en);
1238 }
1239 
1240 /* Each Tc has a 1024 queue sets to backpress, it divides to
1241  * 32 group, each group contains 32 queue sets, which can be
1242  * represented by u32 bitmap.
1243  */
1244 static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
1245 {
1246 	int i;
1247 
1248 	for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
1249 		u32 qs_bitmap = 0;
1250 		int k, ret;
1251 
1252 		for (k = 0; k < hdev->num_alloc_vport; k++) {
1253 			struct hclge_vport *vport = &hdev->vport[k];
1254 			u16 qs_id = vport->qs_offset + tc;
1255 			u8 grp, sub_grp;
1256 
1257 			grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
1258 					      HCLGE_BP_GRP_ID_S);
1259 			sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
1260 						  HCLGE_BP_SUB_GRP_ID_S);
1261 			if (i == grp)
1262 				qs_bitmap |= (1 << sub_grp);
1263 		}
1264 
1265 		ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
1266 		if (ret)
1267 			return ret;
1268 	}
1269 
1270 	return 0;
1271 }
1272 
1273 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1274 {
1275 	bool tx_en, rx_en;
1276 
1277 	switch (hdev->tm_info.fc_mode) {
1278 	case HCLGE_FC_NONE:
1279 		tx_en = false;
1280 		rx_en = false;
1281 		break;
1282 	case HCLGE_FC_RX_PAUSE:
1283 		tx_en = false;
1284 		rx_en = true;
1285 		break;
1286 	case HCLGE_FC_TX_PAUSE:
1287 		tx_en = true;
1288 		rx_en = false;
1289 		break;
1290 	case HCLGE_FC_FULL:
1291 		tx_en = true;
1292 		rx_en = true;
1293 		break;
1294 	case HCLGE_FC_PFC:
1295 		tx_en = false;
1296 		rx_en = false;
1297 		break;
1298 	default:
1299 		tx_en = true;
1300 		rx_en = true;
1301 	}
1302 
1303 	return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1304 }
1305 
1306 static int hclge_tm_bp_setup(struct hclge_dev *hdev)
1307 {
1308 	int ret = 0;
1309 	int i;
1310 
1311 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
1312 		ret = hclge_bp_setup_hw(hdev, i);
1313 		if (ret)
1314 			return ret;
1315 	}
1316 
1317 	return ret;
1318 }
1319 
1320 int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
1321 {
1322 	int ret;
1323 
1324 	ret = hclge_pause_param_setup_hw(hdev);
1325 	if (ret)
1326 		return ret;
1327 
1328 	ret = hclge_mac_pause_setup_hw(hdev);
1329 	if (ret)
1330 		return ret;
1331 
1332 	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
1333 	if (!hnae3_dev_dcb_supported(hdev))
1334 		return 0;
1335 
1336 	/* GE MAC does not support PFC, when driver is initializing and MAC
1337 	 * is in GE Mode, ignore the error here, otherwise initialization
1338 	 * will fail.
1339 	 */
1340 	ret = hclge_pfc_setup_hw(hdev);
1341 	if (init && ret == -EOPNOTSUPP)
1342 		dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
1343 	else if (ret) {
1344 		dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
1345 			ret);
1346 		return ret;
1347 	}
1348 
1349 	return hclge_tm_bp_setup(hdev);
1350 }
1351 
1352 void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1353 {
1354 	struct hclge_vport *vport = hdev->vport;
1355 	struct hnae3_knic_private_info *kinfo;
1356 	u32 i, k;
1357 
1358 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1359 		hdev->tm_info.prio_tc[i] = prio_tc[i];
1360 
1361 		for (k = 0;  k < hdev->num_alloc_vport; k++) {
1362 			kinfo = &vport[k].nic.kinfo;
1363 			kinfo->prio_tc[i] = prio_tc[i];
1364 		}
1365 	}
1366 }
1367 
1368 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1369 {
1370 	u8 bit_map = 0;
1371 	u8 i;
1372 
1373 	hdev->tm_info.num_tc = num_tc;
1374 
1375 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1376 		bit_map |= BIT(i);
1377 
1378 	if (!bit_map) {
1379 		bit_map = 1;
1380 		hdev->tm_info.num_tc = 1;
1381 	}
1382 
1383 	hdev->hw_tc_map = bit_map;
1384 
1385 	hclge_tm_schd_info_init(hdev);
1386 }
1387 
1388 void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
1389 {
1390 	/* DCB is enabled if we have more than 1 TC or pfc_en is
1391 	 * non-zero.
1392 	 */
1393 	if (hdev->tm_info.num_tc > 1 || hdev->tm_info.pfc_en)
1394 		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
1395 	else
1396 		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
1397 
1398 	hclge_pfc_info_init(hdev);
1399 }
1400 
1401 int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
1402 {
1403 	int ret;
1404 
1405 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1406 	    (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1407 		return -ENOTSUPP;
1408 
1409 	ret = hclge_tm_schd_setup_hw(hdev);
1410 	if (ret)
1411 		return ret;
1412 
1413 	ret = hclge_pause_setup_hw(hdev, init);
1414 	if (ret)
1415 		return ret;
1416 
1417 	return 0;
1418 }
1419 
1420 int hclge_tm_schd_init(struct hclge_dev *hdev)
1421 {
1422 	/* fc_mode is HCLGE_FC_FULL on reset */
1423 	hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1424 	hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1425 
1426 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
1427 	    hdev->tm_info.num_pg != 1)
1428 		return -EINVAL;
1429 
1430 	hclge_tm_schd_info_init(hdev);
1431 
1432 	return hclge_tm_init_hw(hdev, true);
1433 }
1434 
1435 int hclge_tm_vport_map_update(struct hclge_dev *hdev)
1436 {
1437 	struct hclge_vport *vport = hdev->vport;
1438 	int ret;
1439 
1440 	hclge_tm_vport_tc_info_update(vport);
1441 
1442 	ret = hclge_vport_q_to_qs_map(hdev, vport);
1443 	if (ret)
1444 		return ret;
1445 
1446 	if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE))
1447 		return 0;
1448 
1449 	return hclge_tm_bp_setup(hdev);
1450 }
1451