1 /*
2  * Copyright (c) 2016~2017 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/etherdevice.h>
11 
12 #include "hclge_cmd.h"
13 #include "hclge_main.h"
14 #include "hclge_tm.h"
15 
16 enum hclge_shaper_level {
17 	HCLGE_SHAPER_LVL_PRI	= 0,
18 	HCLGE_SHAPER_LVL_PG	= 1,
19 	HCLGE_SHAPER_LVL_PORT	= 2,
20 	HCLGE_SHAPER_LVL_QSET	= 3,
21 	HCLGE_SHAPER_LVL_CNT	= 4,
22 	HCLGE_SHAPER_LVL_VF	= 0,
23 	HCLGE_SHAPER_LVL_PF	= 1,
24 };
25 
26 #define HCLGE_SHAPER_BS_U_DEF	1
27 #define HCLGE_SHAPER_BS_S_DEF	4
28 
29 #define HCLGE_ETHER_MAX_RATE	100000
30 
31 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
32  * @ir: Rate to be config, its unit is Mbps
33  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
34  * @ir_b: IR_B parameter of IR shaper
35  * @ir_u: IR_U parameter of IR shaper
36  * @ir_s: IR_S parameter of IR shaper
37  *
38  * the formula:
39  *
40  *		IR_b * (2 ^ IR_u) * 8
41  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
42  *		Tick * (2 ^ IR_s)
43  *
44  * @return: 0: calculate sucessful, negative: fail
45  */
46 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
47 				  u8 *ir_b, u8 *ir_u, u8 *ir_s)
48 {
49 	const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 		6 * 256,        /* Prioriy level */
51 		6 * 32,         /* Prioriy group level */
52 		6 * 8,          /* Port level */
53 		6 * 256         /* Qset level */
54 	};
55 	u8 ir_u_calc = 0, ir_s_calc = 0;
56 	u32 ir_calc;
57 	u32 tick;
58 
59 	/* Calc tick */
60 	if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
61 		return -EINVAL;
62 
63 	tick = tick_array[shaper_level];
64 
65 	/**
66 	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
67 	 * the formula is changed to:
68 	 *		126 * 1 * 8
69 	 * ir_calc = ---------------- * 1000
70 	 *		tick * 1
71 	 */
72 	ir_calc = (1008000 + (tick >> 1) - 1) / tick;
73 
74 	if (ir_calc == ir) {
75 		*ir_b = 126;
76 		*ir_u = 0;
77 		*ir_s = 0;
78 
79 		return 0;
80 	} else if (ir_calc > ir) {
81 		/* Increasing the denominator to select ir_s value */
82 		while (ir_calc > ir) {
83 			ir_s_calc++;
84 			ir_calc = 1008000 / (tick * (1 << ir_s_calc));
85 		}
86 
87 		if (ir_calc == ir)
88 			*ir_b = 126;
89 		else
90 			*ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
91 	} else {
92 		/* Increasing the numerator to select ir_u value */
93 		u32 numerator;
94 
95 		while (ir_calc < ir) {
96 			ir_u_calc++;
97 			numerator = 1008000 * (1 << ir_u_calc);
98 			ir_calc = (numerator + (tick >> 1)) / tick;
99 		}
100 
101 		if (ir_calc == ir) {
102 			*ir_b = 126;
103 		} else {
104 			u32 denominator = (8000 * (1 << --ir_u_calc));
105 			*ir_b = (ir * tick + (denominator >> 1)) / denominator;
106 		}
107 	}
108 
109 	*ir_u = ir_u_calc;
110 	*ir_s = ir_s_calc;
111 
112 	return 0;
113 }
114 
115 static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
116 {
117 	struct hclge_desc desc;
118 
119 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
120 
121 	desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
122 		(rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
123 
124 	return hclge_cmd_send(&hdev->hw, &desc, 1);
125 }
126 
127 static int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
128 				  u8 pfc_bitmap)
129 {
130 	struct hclge_desc desc;
131 	struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)&desc.data;
132 
133 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
134 
135 	pfc->tx_rx_en_bitmap = tx_rx_bitmap;
136 	pfc->pri_en_bitmap = pfc_bitmap;
137 
138 	return hclge_cmd_send(&hdev->hw, &desc, 1);
139 }
140 
141 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
142 {
143 	u8 tc;
144 
145 	tc = hdev->tm_info.prio_tc[pri_id];
146 
147 	if (tc >= hdev->tm_info.num_tc)
148 		return -EINVAL;
149 
150 	/**
151 	 * the register for priority has four bytes, the first bytes includes
152 	 *  priority0 and priority1, the higher 4bit stands for priority1
153 	 *  while the lower 4bit stands for priority0, as below:
154 	 * first byte:	| pri_1 | pri_0 |
155 	 * second byte:	| pri_3 | pri_2 |
156 	 * third byte:	| pri_5 | pri_4 |
157 	 * fourth byte:	| pri_7 | pri_6 |
158 	 */
159 	pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
160 
161 	return 0;
162 }
163 
164 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
165 {
166 	struct hclge_desc desc;
167 	u8 *pri = (u8 *)desc.data;
168 	u8 pri_id;
169 	int ret;
170 
171 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
172 
173 	for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
174 		ret = hclge_fill_pri_array(hdev, pri, pri_id);
175 		if (ret)
176 			return ret;
177 	}
178 
179 	return hclge_cmd_send(&hdev->hw, &desc, 1);
180 }
181 
182 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
183 				      u8 pg_id, u8 pri_bit_map)
184 {
185 	struct hclge_pg_to_pri_link_cmd *map;
186 	struct hclge_desc desc;
187 
188 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
189 
190 	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
191 
192 	map->pg_id = pg_id;
193 	map->pri_bit_map = pri_bit_map;
194 
195 	return hclge_cmd_send(&hdev->hw, &desc, 1);
196 }
197 
198 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
199 				      u16 qs_id, u8 pri)
200 {
201 	struct hclge_qs_to_pri_link_cmd *map;
202 	struct hclge_desc desc;
203 
204 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
205 
206 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
207 
208 	map->qs_id = cpu_to_le16(qs_id);
209 	map->priority = pri;
210 	map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
211 
212 	return hclge_cmd_send(&hdev->hw, &desc, 1);
213 }
214 
215 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
216 				    u8 q_id, u16 qs_id)
217 {
218 	struct hclge_nq_to_qs_link_cmd *map;
219 	struct hclge_desc desc;
220 
221 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
222 
223 	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
224 
225 	map->nq_id = cpu_to_le16(q_id);
226 	map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
227 
228 	return hclge_cmd_send(&hdev->hw, &desc, 1);
229 }
230 
231 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
232 				  u8 dwrr)
233 {
234 	struct hclge_pg_weight_cmd *weight;
235 	struct hclge_desc desc;
236 
237 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
238 
239 	weight = (struct hclge_pg_weight_cmd *)desc.data;
240 
241 	weight->pg_id = pg_id;
242 	weight->dwrr = dwrr;
243 
244 	return hclge_cmd_send(&hdev->hw, &desc, 1);
245 }
246 
247 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
248 				   u8 dwrr)
249 {
250 	struct hclge_priority_weight_cmd *weight;
251 	struct hclge_desc desc;
252 
253 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
254 
255 	weight = (struct hclge_priority_weight_cmd *)desc.data;
256 
257 	weight->pri_id = pri_id;
258 	weight->dwrr = dwrr;
259 
260 	return hclge_cmd_send(&hdev->hw, &desc, 1);
261 }
262 
263 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
264 				  u8 dwrr)
265 {
266 	struct hclge_qs_weight_cmd *weight;
267 	struct hclge_desc desc;
268 
269 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
270 
271 	weight = (struct hclge_qs_weight_cmd *)desc.data;
272 
273 	weight->qs_id = cpu_to_le16(qs_id);
274 	weight->dwrr = dwrr;
275 
276 	return hclge_cmd_send(&hdev->hw, &desc, 1);
277 }
278 
279 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
280 				    enum hclge_shap_bucket bucket, u8 pg_id,
281 				    u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
282 {
283 	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
284 	enum hclge_opcode_type opcode;
285 	struct hclge_desc desc;
286 	u32 shapping_para = 0;
287 
288 	opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
289 		HCLGE_OPC_TM_PG_C_SHAPPING;
290 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
291 
292 	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
293 
294 	shap_cfg_cmd->pg_id = pg_id;
295 
296 	hclge_tm_set_field(shapping_para, IR_B, ir_b);
297 	hclge_tm_set_field(shapping_para, IR_U, ir_u);
298 	hclge_tm_set_field(shapping_para, IR_S, ir_s);
299 	hclge_tm_set_field(shapping_para, BS_B, bs_b);
300 	hclge_tm_set_field(shapping_para, BS_S, bs_s);
301 
302 	shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
303 
304 	return hclge_cmd_send(&hdev->hw, &desc, 1);
305 }
306 
307 static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
308 {
309 	struct hclge_port_shapping_cmd *shap_cfg_cmd;
310 	struct hclge_desc desc;
311 	u32 shapping_para = 0;
312 	u8 ir_u, ir_b, ir_s;
313 	int ret;
314 
315 	ret = hclge_shaper_para_calc(HCLGE_ETHER_MAX_RATE,
316 				     HCLGE_SHAPER_LVL_PORT,
317 				     &ir_b, &ir_u, &ir_s);
318 	if (ret)
319 		return ret;
320 
321 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
322 	shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
323 
324 	hclge_tm_set_field(shapping_para, IR_B, ir_b);
325 	hclge_tm_set_field(shapping_para, IR_U, ir_u);
326 	hclge_tm_set_field(shapping_para, IR_S, ir_s);
327 	hclge_tm_set_field(shapping_para, BS_B, HCLGE_SHAPER_BS_U_DEF);
328 	hclge_tm_set_field(shapping_para, BS_S, HCLGE_SHAPER_BS_S_DEF);
329 
330 	shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
331 
332 	return hclge_cmd_send(&hdev->hw, &desc, 1);
333 }
334 
335 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
336 				     enum hclge_shap_bucket bucket, u8 pri_id,
337 				     u8 ir_b, u8 ir_u, u8 ir_s,
338 				     u8 bs_b, u8 bs_s)
339 {
340 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
341 	enum hclge_opcode_type opcode;
342 	struct hclge_desc desc;
343 	u32 shapping_para = 0;
344 
345 	opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
346 		HCLGE_OPC_TM_PRI_C_SHAPPING;
347 
348 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
349 
350 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
351 
352 	shap_cfg_cmd->pri_id = pri_id;
353 
354 	hclge_tm_set_field(shapping_para, IR_B, ir_b);
355 	hclge_tm_set_field(shapping_para, IR_U, ir_u);
356 	hclge_tm_set_field(shapping_para, IR_S, ir_s);
357 	hclge_tm_set_field(shapping_para, BS_B, bs_b);
358 	hclge_tm_set_field(shapping_para, BS_S, bs_s);
359 
360 	shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
361 
362 	return hclge_cmd_send(&hdev->hw, &desc, 1);
363 }
364 
365 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
366 {
367 	struct hclge_desc desc;
368 
369 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
370 
371 	if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
372 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
373 	else
374 		desc.data[1] = 0;
375 
376 	desc.data[0] = cpu_to_le32(pg_id);
377 
378 	return hclge_cmd_send(&hdev->hw, &desc, 1);
379 }
380 
381 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
382 {
383 	struct hclge_desc desc;
384 
385 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
386 
387 	if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
388 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
389 	else
390 		desc.data[1] = 0;
391 
392 	desc.data[0] = cpu_to_le32(pri_id);
393 
394 	return hclge_cmd_send(&hdev->hw, &desc, 1);
395 }
396 
397 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
398 {
399 	struct hclge_desc desc;
400 
401 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
402 
403 	if (mode == HCLGE_SCH_MODE_DWRR)
404 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
405 	else
406 		desc.data[1] = 0;
407 
408 	desc.data[0] = cpu_to_le32(qs_id);
409 
410 	return hclge_cmd_send(&hdev->hw, &desc, 1);
411 }
412 
413 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
414 {
415 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
416 	struct hclge_desc desc;
417 
418 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
419 				   false);
420 
421 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
422 
423 	bp_to_qs_map_cmd->tc_id = tc;
424 
425 	/* Qset and tc is one by one mapping */
426 	bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
427 
428 	return hclge_cmd_send(&hdev->hw, &desc, 1);
429 }
430 
431 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
432 {
433 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
434 	struct hclge_dev *hdev = vport->back;
435 	u8 i;
436 
437 	vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
438 	kinfo->num_tc =
439 		min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
440 	kinfo->rss_size
441 		= min_t(u16, hdev->rss_size_max,
442 			kinfo->num_tqps / kinfo->num_tc);
443 	vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
444 	vport->dwrr = 100;  /* 100 percent as init */
445 	vport->alloc_rss_size = kinfo->rss_size;
446 
447 	for (i = 0; i < kinfo->num_tc; i++) {
448 		if (hdev->hw_tc_map & BIT(i)) {
449 			kinfo->tc_info[i].enable = true;
450 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
451 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
452 			kinfo->tc_info[i].tc = i;
453 		} else {
454 			/* Set to default queue if TC is disable */
455 			kinfo->tc_info[i].enable = false;
456 			kinfo->tc_info[i].tqp_offset = 0;
457 			kinfo->tc_info[i].tqp_count = 1;
458 			kinfo->tc_info[i].tc = 0;
459 		}
460 	}
461 
462 	memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
463 	       FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
464 }
465 
466 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
467 {
468 	struct hclge_vport *vport = hdev->vport;
469 	u32 i;
470 
471 	for (i = 0; i < hdev->num_alloc_vport; i++) {
472 		hclge_tm_vport_tc_info_update(vport);
473 
474 		vport++;
475 	}
476 }
477 
478 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
479 {
480 	u8 i;
481 
482 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
483 		hdev->tm_info.tc_info[i].tc_id = i;
484 		hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
485 		hdev->tm_info.tc_info[i].pgid = 0;
486 		hdev->tm_info.tc_info[i].bw_limit =
487 			hdev->tm_info.pg_info[0].bw_limit;
488 	}
489 
490 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
491 		hdev->tm_info.prio_tc[i] =
492 			(i >= hdev->tm_info.num_tc) ? 0 : i;
493 
494 	/* DCB is enabled if we have more than 1 TC */
495 	if (hdev->tm_info.num_tc > 1)
496 		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
497 	else
498 		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
499 }
500 
501 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
502 {
503 	u8 i;
504 
505 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
506 		int k;
507 
508 		hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
509 
510 		hdev->tm_info.pg_info[i].pg_id = i;
511 		hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
512 
513 		hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
514 
515 		if (i != 0)
516 			continue;
517 
518 		hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
519 		for (k = 0; k < hdev->tm_info.num_tc; k++)
520 			hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
521 	}
522 }
523 
524 static void hclge_pfc_info_init(struct hclge_dev *hdev)
525 {
526 	if (!(hdev->flag & HCLGE_FLAG_DCB_ENABLE)) {
527 		if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
528 			dev_warn(&hdev->pdev->dev,
529 				 "DCB is disable, but last mode is FC_PFC\n");
530 
531 		hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
532 	} else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
533 		/* fc_mode_last_time record the last fc_mode when
534 		 * DCB is enabled, so that fc_mode can be set to
535 		 * the correct value when DCB is disabled.
536 		 */
537 		hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
538 		hdev->tm_info.fc_mode = HCLGE_FC_PFC;
539 	}
540 }
541 
542 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
543 {
544 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
545 	    (hdev->tm_info.num_pg != 1))
546 		return -EINVAL;
547 
548 	hclge_tm_pg_info_init(hdev);
549 
550 	hclge_tm_tc_info_init(hdev);
551 
552 	hclge_tm_vport_info_update(hdev);
553 
554 	hclge_pfc_info_init(hdev);
555 
556 	return 0;
557 }
558 
559 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
560 {
561 	int ret;
562 	u32 i;
563 
564 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
565 		return 0;
566 
567 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
568 		/* Cfg mapping */
569 		ret = hclge_tm_pg_to_pri_map_cfg(
570 			hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
571 		if (ret)
572 			return ret;
573 	}
574 
575 	return 0;
576 }
577 
578 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
579 {
580 	u8 ir_u, ir_b, ir_s;
581 	int ret;
582 	u32 i;
583 
584 	/* Cfg pg schd */
585 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
586 		return 0;
587 
588 	/* Pg to pri */
589 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
590 		/* Calc shaper para */
591 		ret = hclge_shaper_para_calc(
592 					hdev->tm_info.pg_info[i].bw_limit,
593 					HCLGE_SHAPER_LVL_PG,
594 					&ir_b, &ir_u, &ir_s);
595 		if (ret)
596 			return ret;
597 
598 		ret = hclge_tm_pg_shapping_cfg(hdev,
599 					       HCLGE_TM_SHAP_C_BUCKET, i,
600 					       0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
601 					       HCLGE_SHAPER_BS_S_DEF);
602 		if (ret)
603 			return ret;
604 
605 		ret = hclge_tm_pg_shapping_cfg(hdev,
606 					       HCLGE_TM_SHAP_P_BUCKET, i,
607 					       ir_b, ir_u, ir_s,
608 					       HCLGE_SHAPER_BS_U_DEF,
609 					       HCLGE_SHAPER_BS_S_DEF);
610 		if (ret)
611 			return ret;
612 	}
613 
614 	return 0;
615 }
616 
617 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
618 {
619 	int ret;
620 	u32 i;
621 
622 	/* cfg pg schd */
623 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
624 		return 0;
625 
626 	/* pg to prio */
627 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
628 		/* Cfg dwrr */
629 		ret = hclge_tm_pg_weight_cfg(hdev, i,
630 					     hdev->tm_info.pg_dwrr[i]);
631 		if (ret)
632 			return ret;
633 	}
634 
635 	return 0;
636 }
637 
638 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
639 				   struct hclge_vport *vport)
640 {
641 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
642 	struct hnae3_queue **tqp = kinfo->tqp;
643 	struct hnae3_tc_info *v_tc_info;
644 	u32 i, j;
645 	int ret;
646 
647 	for (i = 0; i < kinfo->num_tc; i++) {
648 		v_tc_info = &kinfo->tc_info[i];
649 		for (j = 0; j < v_tc_info->tqp_count; j++) {
650 			struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
651 
652 			ret = hclge_tm_q_to_qs_map_cfg(hdev,
653 						       hclge_get_queue_id(q),
654 						       vport->qs_offset + i);
655 			if (ret)
656 				return ret;
657 		}
658 	}
659 
660 	return 0;
661 }
662 
663 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
664 {
665 	struct hclge_vport *vport = hdev->vport;
666 	int ret;
667 	u32 i, k;
668 
669 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
670 		/* Cfg qs -> pri mapping, one by one mapping */
671 		for (k = 0; k < hdev->num_alloc_vport; k++)
672 			for (i = 0; i < hdev->tm_info.num_tc; i++) {
673 				ret = hclge_tm_qs_to_pri_map_cfg(
674 					hdev, vport[k].qs_offset + i, i);
675 				if (ret)
676 					return ret;
677 			}
678 	} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
679 		/* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
680 		for (k = 0; k < hdev->num_alloc_vport; k++)
681 			for (i = 0; i < HNAE3_MAX_TC; i++) {
682 				ret = hclge_tm_qs_to_pri_map_cfg(
683 					hdev, vport[k].qs_offset + i, k);
684 				if (ret)
685 					return ret;
686 			}
687 	} else {
688 		return -EINVAL;
689 	}
690 
691 	/* Cfg q -> qs mapping */
692 	for (i = 0; i < hdev->num_alloc_vport; i++) {
693 		ret = hclge_vport_q_to_qs_map(hdev, vport);
694 		if (ret)
695 			return ret;
696 
697 		vport++;
698 	}
699 
700 	return 0;
701 }
702 
703 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
704 {
705 	u8 ir_u, ir_b, ir_s;
706 	int ret;
707 	u32 i;
708 
709 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
710 		ret = hclge_shaper_para_calc(
711 					hdev->tm_info.tc_info[i].bw_limit,
712 					HCLGE_SHAPER_LVL_PRI,
713 					&ir_b, &ir_u, &ir_s);
714 		if (ret)
715 			return ret;
716 
717 		ret = hclge_tm_pri_shapping_cfg(
718 			hdev, HCLGE_TM_SHAP_C_BUCKET, i,
719 			0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
720 			HCLGE_SHAPER_BS_S_DEF);
721 		if (ret)
722 			return ret;
723 
724 		ret = hclge_tm_pri_shapping_cfg(
725 			hdev, HCLGE_TM_SHAP_P_BUCKET, i,
726 			ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
727 			HCLGE_SHAPER_BS_S_DEF);
728 		if (ret)
729 			return ret;
730 	}
731 
732 	return 0;
733 }
734 
735 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
736 {
737 	struct hclge_dev *hdev = vport->back;
738 	u8 ir_u, ir_b, ir_s;
739 	int ret;
740 
741 	ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
742 				     &ir_b, &ir_u, &ir_s);
743 	if (ret)
744 		return ret;
745 
746 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
747 					vport->vport_id,
748 					0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
749 					HCLGE_SHAPER_BS_S_DEF);
750 	if (ret)
751 		return ret;
752 
753 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
754 					vport->vport_id,
755 					ir_b, ir_u, ir_s,
756 					HCLGE_SHAPER_BS_U_DEF,
757 					HCLGE_SHAPER_BS_S_DEF);
758 	if (ret)
759 		return ret;
760 
761 	return 0;
762 }
763 
764 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
765 {
766 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
767 	struct hclge_dev *hdev = vport->back;
768 	u8 ir_u, ir_b, ir_s;
769 	u32 i;
770 	int ret;
771 
772 	for (i = 0; i < kinfo->num_tc; i++) {
773 		ret = hclge_shaper_para_calc(
774 					hdev->tm_info.tc_info[i].bw_limit,
775 					HCLGE_SHAPER_LVL_QSET,
776 					&ir_b, &ir_u, &ir_s);
777 		if (ret)
778 			return ret;
779 	}
780 
781 	return 0;
782 }
783 
784 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
785 {
786 	struct hclge_vport *vport = hdev->vport;
787 	int ret;
788 	u32 i;
789 
790 	/* Need config vport shaper */
791 	for (i = 0; i < hdev->num_alloc_vport; i++) {
792 		ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
793 		if (ret)
794 			return ret;
795 
796 		ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
797 		if (ret)
798 			return ret;
799 
800 		vport++;
801 	}
802 
803 	return 0;
804 }
805 
806 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
807 {
808 	int ret;
809 
810 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
811 		ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
812 		if (ret)
813 			return ret;
814 	} else {
815 		ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
816 		if (ret)
817 			return ret;
818 	}
819 
820 	return 0;
821 }
822 
823 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
824 {
825 	struct hclge_vport *vport = hdev->vport;
826 	struct hclge_pg_info *pg_info;
827 	u8 dwrr;
828 	int ret;
829 	u32 i, k;
830 
831 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
832 		pg_info =
833 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
834 		dwrr = pg_info->tc_dwrr[i];
835 
836 		ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
837 		if (ret)
838 			return ret;
839 
840 		for (k = 0; k < hdev->num_alloc_vport; k++) {
841 			ret = hclge_tm_qs_weight_cfg(
842 				hdev, vport[k].qs_offset + i,
843 				vport[k].dwrr);
844 			if (ret)
845 				return ret;
846 		}
847 	}
848 
849 	return 0;
850 }
851 
852 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
853 {
854 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
855 	struct hclge_dev *hdev = vport->back;
856 	int ret;
857 	u8 i;
858 
859 	/* Vf dwrr */
860 	ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
861 	if (ret)
862 		return ret;
863 
864 	/* Qset dwrr */
865 	for (i = 0; i < kinfo->num_tc; i++) {
866 		ret = hclge_tm_qs_weight_cfg(
867 			hdev, vport->qs_offset + i,
868 			hdev->tm_info.pg_info[0].tc_dwrr[i]);
869 		if (ret)
870 			return ret;
871 	}
872 
873 	return 0;
874 }
875 
876 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
877 {
878 	struct hclge_vport *vport = hdev->vport;
879 	int ret;
880 	u32 i;
881 
882 	for (i = 0; i < hdev->num_alloc_vport; i++) {
883 		ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
884 		if (ret)
885 			return ret;
886 
887 		vport++;
888 	}
889 
890 	return 0;
891 }
892 
893 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
894 {
895 	int ret;
896 
897 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
898 		ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
899 		if (ret)
900 			return ret;
901 	} else {
902 		ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
903 		if (ret)
904 			return ret;
905 	}
906 
907 	return 0;
908 }
909 
910 int hclge_tm_map_cfg(struct hclge_dev *hdev)
911 {
912 	int ret;
913 
914 	ret = hclge_up_to_tc_map(hdev);
915 	if (ret)
916 		return ret;
917 
918 	ret = hclge_tm_pg_to_pri_map(hdev);
919 	if (ret)
920 		return ret;
921 
922 	return hclge_tm_pri_q_qs_cfg(hdev);
923 }
924 
925 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
926 {
927 	int ret;
928 
929 	ret = hclge_tm_port_shaper_cfg(hdev);
930 	if (ret)
931 		return ret;
932 
933 	ret = hclge_tm_pg_shaper_cfg(hdev);
934 	if (ret)
935 		return ret;
936 
937 	return hclge_tm_pri_shaper_cfg(hdev);
938 }
939 
940 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
941 {
942 	int ret;
943 
944 	ret = hclge_tm_pg_dwrr_cfg(hdev);
945 	if (ret)
946 		return ret;
947 
948 	return hclge_tm_pri_dwrr_cfg(hdev);
949 }
950 
951 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
952 {
953 	int ret;
954 	u8 i;
955 
956 	/* Only being config on TC-Based scheduler mode */
957 	if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
958 		return 0;
959 
960 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
961 		ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
962 		if (ret)
963 			return ret;
964 	}
965 
966 	return 0;
967 }
968 
969 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
970 {
971 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
972 	struct hclge_dev *hdev = vport->back;
973 	int ret;
974 	u8 i;
975 
976 	ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
977 	if (ret)
978 		return ret;
979 
980 	for (i = 0; i < kinfo->num_tc; i++) {
981 		u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
982 
983 		ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
984 						sch_mode);
985 		if (ret)
986 			return ret;
987 	}
988 
989 	return 0;
990 }
991 
992 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
993 {
994 	struct hclge_vport *vport = hdev->vport;
995 	int ret;
996 	u8 i, k;
997 
998 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
999 		for (i = 0; i < hdev->tm_info.num_tc; i++) {
1000 			ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
1001 			if (ret)
1002 				return ret;
1003 
1004 			for (k = 0; k < hdev->num_alloc_vport; k++) {
1005 				ret = hclge_tm_qs_schd_mode_cfg(
1006 					hdev, vport[k].qs_offset + i,
1007 					HCLGE_SCH_MODE_DWRR);
1008 				if (ret)
1009 					return ret;
1010 			}
1011 		}
1012 	} else {
1013 		for (i = 0; i < hdev->num_alloc_vport; i++) {
1014 			ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
1015 			if (ret)
1016 				return ret;
1017 
1018 			vport++;
1019 		}
1020 	}
1021 
1022 	return 0;
1023 }
1024 
1025 int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
1026 {
1027 	int ret;
1028 
1029 	ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
1030 	if (ret)
1031 		return ret;
1032 
1033 	return hclge_tm_lvl34_schd_mode_cfg(hdev);
1034 }
1035 
1036 static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
1037 {
1038 	int ret;
1039 
1040 	/* Cfg tm mapping  */
1041 	ret = hclge_tm_map_cfg(hdev);
1042 	if (ret)
1043 		return ret;
1044 
1045 	/* Cfg tm shaper */
1046 	ret = hclge_tm_shaper_cfg(hdev);
1047 	if (ret)
1048 		return ret;
1049 
1050 	/* Cfg dwrr */
1051 	ret = hclge_tm_dwrr_cfg(hdev);
1052 	if (ret)
1053 		return ret;
1054 
1055 	/* Cfg schd mode for each level schd */
1056 	return hclge_tm_schd_mode_hw(hdev);
1057 }
1058 
1059 static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
1060 {
1061 	u8 enable_bitmap = 0;
1062 
1063 	if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
1064 		enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
1065 				HCLGE_RX_MAC_PAUSE_EN_MSK;
1066 
1067 	return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
1068 				      hdev->tm_info.hw_pfc_map);
1069 }
1070 
1071 static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
1072 {
1073 	bool tx_en, rx_en;
1074 
1075 	switch (hdev->tm_info.fc_mode) {
1076 	case HCLGE_FC_NONE:
1077 		tx_en = false;
1078 		rx_en = false;
1079 		break;
1080 	case HCLGE_FC_RX_PAUSE:
1081 		tx_en = false;
1082 		rx_en = true;
1083 		break;
1084 	case HCLGE_FC_TX_PAUSE:
1085 		tx_en = true;
1086 		rx_en = false;
1087 		break;
1088 	case HCLGE_FC_FULL:
1089 		tx_en = true;
1090 		rx_en = true;
1091 		break;
1092 	default:
1093 		tx_en = true;
1094 		rx_en = true;
1095 	}
1096 
1097 	return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
1098 }
1099 
1100 int hclge_pause_setup_hw(struct hclge_dev *hdev)
1101 {
1102 	int ret;
1103 	u8 i;
1104 
1105 	if (hdev->tm_info.fc_mode != HCLGE_FC_PFC)
1106 		return hclge_mac_pause_setup_hw(hdev);
1107 
1108 	/* Only DCB-supported dev supports qset back pressure and pfc cmd */
1109 	if (!hnae3_dev_dcb_supported(hdev))
1110 		return 0;
1111 
1112 	/* When MAC is GE Mode, hdev does not support pfc setting */
1113 	ret = hclge_pfc_setup_hw(hdev);
1114 	if (ret)
1115 		dev_warn(&hdev->pdev->dev, "set pfc pause failed:%d\n", ret);
1116 
1117 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
1118 		ret = hclge_tm_qs_bp_cfg(hdev, i);
1119 		if (ret)
1120 			return ret;
1121 	}
1122 
1123 	return 0;
1124 }
1125 
1126 int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
1127 {
1128 	struct hclge_vport *vport = hdev->vport;
1129 	struct hnae3_knic_private_info *kinfo;
1130 	u32 i, k;
1131 
1132 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
1133 		if (prio_tc[i] >= hdev->tm_info.num_tc)
1134 			return -EINVAL;
1135 		hdev->tm_info.prio_tc[i] = prio_tc[i];
1136 
1137 		for (k = 0;  k < hdev->num_alloc_vport; k++) {
1138 			kinfo = &vport[k].nic.kinfo;
1139 			kinfo->prio_tc[i] = prio_tc[i];
1140 		}
1141 	}
1142 	return 0;
1143 }
1144 
1145 void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
1146 {
1147 	u8 i, bit_map = 0;
1148 
1149 	hdev->tm_info.num_tc = num_tc;
1150 
1151 	for (i = 0; i < hdev->tm_info.num_tc; i++)
1152 		bit_map |= BIT(i);
1153 
1154 	if (!bit_map) {
1155 		bit_map = 1;
1156 		hdev->tm_info.num_tc = 1;
1157 	}
1158 
1159 	hdev->hw_tc_map = bit_map;
1160 
1161 	hclge_tm_schd_info_init(hdev);
1162 }
1163 
1164 int hclge_tm_init_hw(struct hclge_dev *hdev)
1165 {
1166 	int ret;
1167 
1168 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1169 	    (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1170 		return -ENOTSUPP;
1171 
1172 	ret = hclge_tm_schd_setup_hw(hdev);
1173 	if (ret)
1174 		return ret;
1175 
1176 	ret = hclge_pause_setup_hw(hdev);
1177 	if (ret)
1178 		return ret;
1179 
1180 	return 0;
1181 }
1182 
1183 int hclge_tm_schd_init(struct hclge_dev *hdev)
1184 {
1185 	int ret;
1186 
1187 	/* fc_mode is HCLGE_FC_FULL on reset */
1188 	hdev->tm_info.fc_mode = HCLGE_FC_FULL;
1189 	hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
1190 
1191 	ret = hclge_tm_schd_info_init(hdev);
1192 	if (ret)
1193 		return ret;
1194 
1195 	return hclge_tm_init_hw(hdev);
1196 }
1197