1 /*
2  * Copyright (c) 2016~2017 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/etherdevice.h>
11 
12 #include "hclge_cmd.h"
13 #include "hclge_main.h"
14 #include "hclge_tm.h"
15 
16 enum hclge_shaper_level {
17 	HCLGE_SHAPER_LVL_PRI	= 0,
18 	HCLGE_SHAPER_LVL_PG	= 1,
19 	HCLGE_SHAPER_LVL_PORT	= 2,
20 	HCLGE_SHAPER_LVL_QSET	= 3,
21 	HCLGE_SHAPER_LVL_CNT	= 4,
22 	HCLGE_SHAPER_LVL_VF	= 0,
23 	HCLGE_SHAPER_LVL_PF	= 1,
24 };
25 
26 #define HCLGE_SHAPER_BS_U_DEF	1
27 #define HCLGE_SHAPER_BS_S_DEF	4
28 
29 #define HCLGE_ETHER_MAX_RATE	100000
30 
31 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
32  * @ir: Rate to be config, its unit is Mbps
33  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
34  * @ir_b: IR_B parameter of IR shaper
35  * @ir_u: IR_U parameter of IR shaper
36  * @ir_s: IR_S parameter of IR shaper
37  *
38  * the formula:
39  *
40  *		IR_b * (2 ^ IR_u) * 8
41  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
42  *		Tick * (2 ^ IR_s)
43  *
44  * @return: 0: calculate sucessful, negative: fail
45  */
46 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
47 				  u8 *ir_b, u8 *ir_u, u8 *ir_s)
48 {
49 	const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 		6 * 256,        /* Prioriy level */
51 		6 * 32,         /* Prioriy group level */
52 		6 * 8,          /* Port level */
53 		6 * 256         /* Qset level */
54 	};
55 	u8 ir_u_calc = 0, ir_s_calc = 0;
56 	u32 ir_calc;
57 	u32 tick;
58 
59 	/* Calc tick */
60 	if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
61 		return -EINVAL;
62 
63 	tick = tick_array[shaper_level];
64 
65 	/**
66 	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
67 	 * the formula is changed to:
68 	 *		126 * 1 * 8
69 	 * ir_calc = ---------------- * 1000
70 	 *		tick * 1
71 	 */
72 	ir_calc = (1008000 + (tick >> 1) - 1) / tick;
73 
74 	if (ir_calc == ir) {
75 		*ir_b = 126;
76 		*ir_u = 0;
77 		*ir_s = 0;
78 
79 		return 0;
80 	} else if (ir_calc > ir) {
81 		/* Increasing the denominator to select ir_s value */
82 		while (ir_calc > ir) {
83 			ir_s_calc++;
84 			ir_calc = 1008000 / (tick * (1 << ir_s_calc));
85 		}
86 
87 		if (ir_calc == ir)
88 			*ir_b = 126;
89 		else
90 			*ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
91 	} else {
92 		/* Increasing the numerator to select ir_u value */
93 		u32 numerator;
94 
95 		while (ir_calc < ir) {
96 			ir_u_calc++;
97 			numerator = 1008000 * (1 << ir_u_calc);
98 			ir_calc = (numerator + (tick >> 1)) / tick;
99 		}
100 
101 		if (ir_calc == ir) {
102 			*ir_b = 126;
103 		} else {
104 			u32 denominator = (8000 * (1 << --ir_u_calc));
105 			*ir_b = (ir * tick + (denominator >> 1)) / denominator;
106 		}
107 	}
108 
109 	*ir_u = ir_u_calc;
110 	*ir_s = ir_s_calc;
111 
112 	return 0;
113 }
114 
115 static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
116 {
117 	struct hclge_desc desc;
118 
119 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
120 
121 	desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
122 		(rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
123 
124 	return hclge_cmd_send(&hdev->hw, &desc, 1);
125 }
126 
127 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
128 {
129 	u8 tc;
130 
131 	tc = hdev->tm_info.prio_tc[pri_id];
132 
133 	if (tc >= hdev->tm_info.num_tc)
134 		return -EINVAL;
135 
136 	/**
137 	 * the register for priority has four bytes, the first bytes includes
138 	 *  priority0 and priority1, the higher 4bit stands for priority1
139 	 *  while the lower 4bit stands for priority0, as below:
140 	 * first byte:	| pri_1 | pri_0 |
141 	 * second byte:	| pri_3 | pri_2 |
142 	 * third byte:	| pri_5 | pri_4 |
143 	 * fourth byte:	| pri_7 | pri_6 |
144 	 */
145 	pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
146 
147 	return 0;
148 }
149 
150 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
151 {
152 	struct hclge_desc desc;
153 	u8 *pri = (u8 *)desc.data;
154 	u8 pri_id;
155 	int ret;
156 
157 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
158 
159 	for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
160 		ret = hclge_fill_pri_array(hdev, pri, pri_id);
161 		if (ret)
162 			return ret;
163 	}
164 
165 	return hclge_cmd_send(&hdev->hw, &desc, 1);
166 }
167 
168 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
169 				      u8 pg_id, u8 pri_bit_map)
170 {
171 	struct hclge_pg_to_pri_link_cmd *map;
172 	struct hclge_desc desc;
173 
174 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
175 
176 	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
177 
178 	map->pg_id = pg_id;
179 	map->pri_bit_map = pri_bit_map;
180 
181 	return hclge_cmd_send(&hdev->hw, &desc, 1);
182 }
183 
184 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
185 				      u16 qs_id, u8 pri)
186 {
187 	struct hclge_qs_to_pri_link_cmd *map;
188 	struct hclge_desc desc;
189 
190 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
191 
192 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
193 
194 	map->qs_id = cpu_to_le16(qs_id);
195 	map->priority = pri;
196 	map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
197 
198 	return hclge_cmd_send(&hdev->hw, &desc, 1);
199 }
200 
201 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
202 				    u8 q_id, u16 qs_id)
203 {
204 	struct hclge_nq_to_qs_link_cmd *map;
205 	struct hclge_desc desc;
206 
207 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
208 
209 	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
210 
211 	map->nq_id = cpu_to_le16(q_id);
212 	map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
213 
214 	return hclge_cmd_send(&hdev->hw, &desc, 1);
215 }
216 
217 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
218 				  u8 dwrr)
219 {
220 	struct hclge_pg_weight_cmd *weight;
221 	struct hclge_desc desc;
222 
223 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
224 
225 	weight = (struct hclge_pg_weight_cmd *)desc.data;
226 
227 	weight->pg_id = pg_id;
228 	weight->dwrr = dwrr;
229 
230 	return hclge_cmd_send(&hdev->hw, &desc, 1);
231 }
232 
233 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
234 				   u8 dwrr)
235 {
236 	struct hclge_priority_weight_cmd *weight;
237 	struct hclge_desc desc;
238 
239 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
240 
241 	weight = (struct hclge_priority_weight_cmd *)desc.data;
242 
243 	weight->pri_id = pri_id;
244 	weight->dwrr = dwrr;
245 
246 	return hclge_cmd_send(&hdev->hw, &desc, 1);
247 }
248 
249 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
250 				  u8 dwrr)
251 {
252 	struct hclge_qs_weight_cmd *weight;
253 	struct hclge_desc desc;
254 
255 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
256 
257 	weight = (struct hclge_qs_weight_cmd *)desc.data;
258 
259 	weight->qs_id = cpu_to_le16(qs_id);
260 	weight->dwrr = dwrr;
261 
262 	return hclge_cmd_send(&hdev->hw, &desc, 1);
263 }
264 
265 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
266 				    enum hclge_shap_bucket bucket, u8 pg_id,
267 				    u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
268 {
269 	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
270 	enum hclge_opcode_type opcode;
271 	struct hclge_desc desc;
272 
273 	opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
274 		HCLGE_OPC_TM_PG_C_SHAPPING;
275 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
276 
277 	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
278 
279 	shap_cfg_cmd->pg_id = pg_id;
280 
281 	hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
282 	hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
283 	hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
284 	hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
285 	hclge_tm_set_field(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
286 
287 	return hclge_cmd_send(&hdev->hw, &desc, 1);
288 }
289 
290 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
291 				     enum hclge_shap_bucket bucket, u8 pri_id,
292 				     u8 ir_b, u8 ir_u, u8 ir_s,
293 				     u8 bs_b, u8 bs_s)
294 {
295 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
296 	enum hclge_opcode_type opcode;
297 	struct hclge_desc desc;
298 
299 	opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
300 		HCLGE_OPC_TM_PRI_C_SHAPPING;
301 
302 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
303 
304 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
305 
306 	shap_cfg_cmd->pri_id = pri_id;
307 
308 	hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
309 	hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
310 	hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
311 	hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
312 	hclge_tm_set_field(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
313 
314 	return hclge_cmd_send(&hdev->hw, &desc, 1);
315 }
316 
317 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
318 {
319 	struct hclge_desc desc;
320 
321 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
322 
323 	if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
324 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
325 	else
326 		desc.data[1] = 0;
327 
328 	desc.data[0] = cpu_to_le32(pg_id);
329 
330 	return hclge_cmd_send(&hdev->hw, &desc, 1);
331 }
332 
333 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
334 {
335 	struct hclge_desc desc;
336 
337 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
338 
339 	if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
340 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
341 	else
342 		desc.data[1] = 0;
343 
344 	desc.data[0] = cpu_to_le32(pri_id);
345 
346 	return hclge_cmd_send(&hdev->hw, &desc, 1);
347 }
348 
349 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id)
350 {
351 	struct hclge_desc desc;
352 
353 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
354 
355 	if (hdev->tm_info.tc_info[qs_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
356 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
357 	else
358 		desc.data[1] = 0;
359 
360 	desc.data[0] = cpu_to_le32(qs_id);
361 
362 	return hclge_cmd_send(&hdev->hw, &desc, 1);
363 }
364 
365 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
366 {
367 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
368 	struct hclge_desc desc;
369 
370 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
371 				   false);
372 
373 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
374 
375 	bp_to_qs_map_cmd->tc_id = tc;
376 
377 	/* Qset and tc is one by one mapping */
378 	bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
379 
380 	return hclge_cmd_send(&hdev->hw, &desc, 1);
381 }
382 
383 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
384 {
385 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
386 	struct hclge_dev *hdev = vport->back;
387 	u8 i;
388 
389 	kinfo = &vport->nic.kinfo;
390 	vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
391 	kinfo->num_tc =
392 		min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
393 	kinfo->rss_size
394 		= min_t(u16, hdev->rss_size_max,
395 			kinfo->num_tqps / kinfo->num_tc);
396 	vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
397 	vport->dwrr = 100;  /* 100 percent as init */
398 	vport->alloc_rss_size = kinfo->rss_size;
399 
400 	for (i = 0; i < kinfo->num_tc; i++) {
401 		if (hdev->hw_tc_map & BIT(i)) {
402 			kinfo->tc_info[i].enable = true;
403 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
404 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
405 			kinfo->tc_info[i].tc = i;
406 		} else {
407 			/* Set to default queue if TC is disable */
408 			kinfo->tc_info[i].enable = false;
409 			kinfo->tc_info[i].tqp_offset = 0;
410 			kinfo->tc_info[i].tqp_count = 1;
411 			kinfo->tc_info[i].tc = 0;
412 		}
413 	}
414 
415 	memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
416 	       FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc));
417 }
418 
419 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
420 {
421 	struct hclge_vport *vport = hdev->vport;
422 	u32 i;
423 
424 	for (i = 0; i < hdev->num_alloc_vport; i++) {
425 		hclge_tm_vport_tc_info_update(vport);
426 
427 		vport++;
428 	}
429 }
430 
431 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
432 {
433 	u8 i;
434 
435 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
436 		hdev->tm_info.tc_info[i].tc_id = i;
437 		hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
438 		hdev->tm_info.tc_info[i].pgid = 0;
439 		hdev->tm_info.tc_info[i].bw_limit =
440 			hdev->tm_info.pg_info[0].bw_limit;
441 	}
442 
443 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
444 		hdev->tm_info.prio_tc[i] =
445 			(i >= hdev->tm_info.num_tc) ? 0 : i;
446 
447 	hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
448 }
449 
450 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
451 {
452 	u8 i;
453 
454 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
455 		int k;
456 
457 		hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
458 
459 		hdev->tm_info.pg_info[i].pg_id = i;
460 		hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
461 
462 		hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
463 
464 		if (i != 0)
465 			continue;
466 
467 		hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
468 		for (k = 0; k < hdev->tm_info.num_tc; k++)
469 			hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
470 	}
471 }
472 
473 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
474 {
475 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
476 	    (hdev->tm_info.num_pg != 1))
477 		return -EINVAL;
478 
479 	hclge_tm_pg_info_init(hdev);
480 
481 	hclge_tm_tc_info_init(hdev);
482 
483 	hclge_tm_vport_info_update(hdev);
484 
485 	hdev->tm_info.fc_mode = HCLGE_FC_NONE;
486 	hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
487 
488 	return 0;
489 }
490 
491 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
492 {
493 	int ret;
494 	u32 i;
495 
496 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
497 		return 0;
498 
499 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
500 		/* Cfg mapping */
501 		ret = hclge_tm_pg_to_pri_map_cfg(
502 			hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
503 		if (ret)
504 			return ret;
505 	}
506 
507 	return 0;
508 }
509 
510 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
511 {
512 	u8 ir_u, ir_b, ir_s;
513 	int ret;
514 	u32 i;
515 
516 	/* Cfg pg schd */
517 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
518 		return 0;
519 
520 	/* Pg to pri */
521 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
522 		/* Calc shaper para */
523 		ret = hclge_shaper_para_calc(
524 					hdev->tm_info.pg_info[i].bw_limit,
525 					HCLGE_SHAPER_LVL_PG,
526 					&ir_b, &ir_u, &ir_s);
527 		if (ret)
528 			return ret;
529 
530 		ret = hclge_tm_pg_shapping_cfg(hdev,
531 					       HCLGE_TM_SHAP_C_BUCKET, i,
532 					       0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
533 					       HCLGE_SHAPER_BS_S_DEF);
534 		if (ret)
535 			return ret;
536 
537 		ret = hclge_tm_pg_shapping_cfg(hdev,
538 					       HCLGE_TM_SHAP_P_BUCKET, i,
539 					       ir_b, ir_u, ir_s,
540 					       HCLGE_SHAPER_BS_U_DEF,
541 					       HCLGE_SHAPER_BS_S_DEF);
542 		if (ret)
543 			return ret;
544 	}
545 
546 	return 0;
547 }
548 
549 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
550 {
551 	int ret;
552 	u32 i;
553 
554 	/* cfg pg schd */
555 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
556 		return 0;
557 
558 	/* pg to prio */
559 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
560 		/* Cfg dwrr */
561 		ret = hclge_tm_pg_weight_cfg(hdev, i,
562 					     hdev->tm_info.pg_dwrr[i]);
563 		if (ret)
564 			return ret;
565 	}
566 
567 	return 0;
568 }
569 
570 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
571 				   struct hclge_vport *vport)
572 {
573 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
574 	struct hnae3_queue **tqp = kinfo->tqp;
575 	struct hnae3_tc_info *v_tc_info;
576 	u32 i, j;
577 	int ret;
578 
579 	for (i = 0; i < kinfo->num_tc; i++) {
580 		v_tc_info = &kinfo->tc_info[i];
581 		for (j = 0; j < v_tc_info->tqp_count; j++) {
582 			struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
583 
584 			ret = hclge_tm_q_to_qs_map_cfg(hdev,
585 						       hclge_get_queue_id(q),
586 						       vport->qs_offset + i);
587 			if (ret)
588 				return ret;
589 		}
590 	}
591 
592 	return 0;
593 }
594 
595 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
596 {
597 	struct hclge_vport *vport = hdev->vport;
598 	int ret;
599 	u32 i;
600 
601 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
602 		/* Cfg qs -> pri mapping, one by one mapping */
603 		for (i = 0; i < hdev->tm_info.num_tc; i++) {
604 			ret = hclge_tm_qs_to_pri_map_cfg(hdev, i, i);
605 			if (ret)
606 				return ret;
607 		}
608 	} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
609 		int k;
610 		/* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
611 		for (k = 0; k < hdev->num_alloc_vport; k++)
612 			for (i = 0; i < HNAE3_MAX_TC; i++) {
613 				ret = hclge_tm_qs_to_pri_map_cfg(
614 					hdev, vport[k].qs_offset + i, k);
615 				if (ret)
616 					return ret;
617 			}
618 	} else {
619 		return -EINVAL;
620 	}
621 
622 	/* Cfg q -> qs mapping */
623 	for (i = 0; i < hdev->num_alloc_vport; i++) {
624 		ret = hclge_vport_q_to_qs_map(hdev, vport);
625 		if (ret)
626 			return ret;
627 
628 		vport++;
629 	}
630 
631 	return 0;
632 }
633 
634 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
635 {
636 	u8 ir_u, ir_b, ir_s;
637 	int ret;
638 	u32 i;
639 
640 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
641 		ret = hclge_shaper_para_calc(
642 					hdev->tm_info.tc_info[i].bw_limit,
643 					HCLGE_SHAPER_LVL_PRI,
644 					&ir_b, &ir_u, &ir_s);
645 		if (ret)
646 			return ret;
647 
648 		ret = hclge_tm_pri_shapping_cfg(
649 			hdev, HCLGE_TM_SHAP_C_BUCKET, i,
650 			0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
651 			HCLGE_SHAPER_BS_S_DEF);
652 		if (ret)
653 			return ret;
654 
655 		ret = hclge_tm_pri_shapping_cfg(
656 			hdev, HCLGE_TM_SHAP_P_BUCKET, i,
657 			ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
658 			HCLGE_SHAPER_BS_S_DEF);
659 		if (ret)
660 			return ret;
661 	}
662 
663 	return 0;
664 }
665 
666 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
667 {
668 	struct hclge_dev *hdev = vport->back;
669 	u8 ir_u, ir_b, ir_s;
670 	int ret;
671 
672 	ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
673 				     &ir_b, &ir_u, &ir_s);
674 	if (ret)
675 		return ret;
676 
677 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
678 					vport->vport_id,
679 					0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
680 					HCLGE_SHAPER_BS_S_DEF);
681 	if (ret)
682 		return ret;
683 
684 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
685 					vport->vport_id,
686 					ir_b, ir_u, ir_s,
687 					HCLGE_SHAPER_BS_U_DEF,
688 					HCLGE_SHAPER_BS_S_DEF);
689 	if (ret)
690 		return ret;
691 
692 	return 0;
693 }
694 
695 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
696 {
697 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
698 	struct hclge_dev *hdev = vport->back;
699 	struct hnae3_tc_info *v_tc_info;
700 	u8 ir_u, ir_b, ir_s;
701 	u32 i;
702 	int ret;
703 
704 	for (i = 0; i < kinfo->num_tc; i++) {
705 		v_tc_info = &kinfo->tc_info[i];
706 		ret = hclge_shaper_para_calc(
707 					hdev->tm_info.tc_info[i].bw_limit,
708 					HCLGE_SHAPER_LVL_QSET,
709 					&ir_b, &ir_u, &ir_s);
710 		if (ret)
711 			return ret;
712 	}
713 
714 	return 0;
715 }
716 
717 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
718 {
719 	struct hclge_vport *vport = hdev->vport;
720 	int ret;
721 	u32 i;
722 
723 	/* Need config vport shaper */
724 	for (i = 0; i < hdev->num_alloc_vport; i++) {
725 		ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
726 		if (ret)
727 			return ret;
728 
729 		ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
730 		if (ret)
731 			return ret;
732 
733 		vport++;
734 	}
735 
736 	return 0;
737 }
738 
739 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
740 {
741 	int ret;
742 
743 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
744 		ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
745 		if (ret)
746 			return ret;
747 	} else {
748 		ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
749 		if (ret)
750 			return ret;
751 	}
752 
753 	return 0;
754 }
755 
756 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
757 {
758 	struct hclge_pg_info *pg_info;
759 	u8 dwrr;
760 	int ret;
761 	u32 i;
762 
763 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
764 		pg_info =
765 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
766 		dwrr = pg_info->tc_dwrr[i];
767 
768 		ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
769 		if (ret)
770 			return ret;
771 
772 		ret = hclge_tm_qs_weight_cfg(hdev, i, dwrr);
773 		if (ret)
774 			return ret;
775 	}
776 
777 	return 0;
778 }
779 
780 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
781 {
782 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
783 	struct hclge_dev *hdev = vport->back;
784 	int ret;
785 	u8 i;
786 
787 	/* Vf dwrr */
788 	ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
789 	if (ret)
790 		return ret;
791 
792 	/* Qset dwrr */
793 	for (i = 0; i < kinfo->num_tc; i++) {
794 		ret = hclge_tm_qs_weight_cfg(
795 			hdev, vport->qs_offset + i,
796 			hdev->tm_info.pg_info[0].tc_dwrr[i]);
797 		if (ret)
798 			return ret;
799 	}
800 
801 	return 0;
802 }
803 
804 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
805 {
806 	struct hclge_vport *vport = hdev->vport;
807 	int ret;
808 	u32 i;
809 
810 	for (i = 0; i < hdev->num_alloc_vport; i++) {
811 		ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
812 		if (ret)
813 			return ret;
814 
815 		vport++;
816 	}
817 
818 	return 0;
819 }
820 
821 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
822 {
823 	int ret;
824 
825 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
826 		ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
827 		if (ret)
828 			return ret;
829 	} else {
830 		ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
831 		if (ret)
832 			return ret;
833 	}
834 
835 	return 0;
836 }
837 
838 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
839 {
840 	int ret;
841 
842 	ret = hclge_tm_pg_to_pri_map(hdev);
843 	if (ret)
844 		return ret;
845 
846 	return hclge_tm_pri_q_qs_cfg(hdev);
847 }
848 
849 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
850 {
851 	int ret;
852 
853 	ret = hclge_tm_pg_shaper_cfg(hdev);
854 	if (ret)
855 		return ret;
856 
857 	return hclge_tm_pri_shaper_cfg(hdev);
858 }
859 
860 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
861 {
862 	int ret;
863 
864 	ret = hclge_tm_pg_dwrr_cfg(hdev);
865 	if (ret)
866 		return ret;
867 
868 	return hclge_tm_pri_dwrr_cfg(hdev);
869 }
870 
871 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
872 {
873 	int ret;
874 	u8 i;
875 
876 	/* Only being config on TC-Based scheduler mode */
877 	if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
878 		return 0;
879 
880 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
881 		ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
882 		if (ret)
883 			return ret;
884 	}
885 
886 	return 0;
887 }
888 
889 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
890 {
891 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
892 	struct hclge_dev *hdev = vport->back;
893 	int ret;
894 	u8 i;
895 
896 	ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
897 	if (ret)
898 		return ret;
899 
900 	for (i = 0; i < kinfo->num_tc; i++) {
901 		ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i);
902 		if (ret)
903 			return ret;
904 	}
905 
906 	return 0;
907 }
908 
909 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
910 {
911 	struct hclge_vport *vport = hdev->vport;
912 	int ret;
913 	u8 i;
914 
915 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
916 		for (i = 0; i < hdev->tm_info.num_tc; i++) {
917 			ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
918 			if (ret)
919 				return ret;
920 
921 			ret = hclge_tm_qs_schd_mode_cfg(hdev, i);
922 			if (ret)
923 				return ret;
924 		}
925 	} else {
926 		for (i = 0; i < hdev->num_alloc_vport; i++) {
927 			ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
928 			if (ret)
929 				return ret;
930 
931 			vport++;
932 		}
933 	}
934 
935 	return 0;
936 }
937 
938 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
939 {
940 	int ret;
941 
942 	ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
943 	if (ret)
944 		return ret;
945 
946 	return hclge_tm_lvl34_schd_mode_cfg(hdev);
947 }
948 
949 static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
950 {
951 	int ret;
952 
953 	/* Cfg tm mapping  */
954 	ret = hclge_tm_map_cfg(hdev);
955 	if (ret)
956 		return ret;
957 
958 	/* Cfg tm shaper */
959 	ret = hclge_tm_shaper_cfg(hdev);
960 	if (ret)
961 		return ret;
962 
963 	/* Cfg dwrr */
964 	ret = hclge_tm_dwrr_cfg(hdev);
965 	if (ret)
966 		return ret;
967 
968 	/* Cfg schd mode for each level schd */
969 	return hclge_tm_schd_mode_hw(hdev);
970 }
971 
972 int hclge_pause_setup_hw(struct hclge_dev *hdev)
973 {
974 	bool en = hdev->tm_info.fc_mode != HCLGE_FC_PFC;
975 	int ret;
976 	u8 i;
977 
978 	ret = hclge_mac_pause_en_cfg(hdev, en, en);
979 	if (ret)
980 		return ret;
981 
982 	/* Only DCB-supported dev supports qset back pressure setting */
983 	if (!hnae3_dev_dcb_supported(hdev))
984 		return 0;
985 
986 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
987 		ret = hclge_tm_qs_bp_cfg(hdev, i);
988 		if (ret)
989 			return ret;
990 	}
991 
992 	return hclge_up_to_tc_map(hdev);
993 }
994 
995 int hclge_tm_init_hw(struct hclge_dev *hdev)
996 {
997 	int ret;
998 
999 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
1000 	    (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
1001 		return -ENOTSUPP;
1002 
1003 	ret = hclge_tm_schd_setup_hw(hdev);
1004 	if (ret)
1005 		return ret;
1006 
1007 	ret = hclge_pause_setup_hw(hdev);
1008 	if (ret)
1009 		return ret;
1010 
1011 	return 0;
1012 }
1013 
1014 int hclge_tm_schd_init(struct hclge_dev *hdev)
1015 {
1016 	int ret = hclge_tm_schd_info_init(hdev);
1017 
1018 	if (ret)
1019 		return ret;
1020 
1021 	return hclge_tm_init_hw(hdev);
1022 }
1023