1 /*
2  * Copyright (c) 2016~2017 Hisilicon Limited.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  */
9 
10 #include <linux/etherdevice.h>
11 
12 #include "hclge_cmd.h"
13 #include "hclge_main.h"
14 #include "hclge_tm.h"
15 
16 enum hclge_shaper_level {
17 	HCLGE_SHAPER_LVL_PRI	= 0,
18 	HCLGE_SHAPER_LVL_PG	= 1,
19 	HCLGE_SHAPER_LVL_PORT	= 2,
20 	HCLGE_SHAPER_LVL_QSET	= 3,
21 	HCLGE_SHAPER_LVL_CNT	= 4,
22 	HCLGE_SHAPER_LVL_VF	= 0,
23 	HCLGE_SHAPER_LVL_PF	= 1,
24 };
25 
26 #define HCLGE_SHAPER_BS_U_DEF	1
27 #define HCLGE_SHAPER_BS_S_DEF	4
28 
29 #define HCLGE_ETHER_MAX_RATE	100000
30 
31 /* hclge_shaper_para_calc: calculate ir parameter for the shaper
32  * @ir: Rate to be config, its unit is Mbps
33  * @shaper_level: the shaper level. eg: port, pg, priority, queueset
34  * @ir_b: IR_B parameter of IR shaper
35  * @ir_u: IR_U parameter of IR shaper
36  * @ir_s: IR_S parameter of IR shaper
37  *
38  * the formula:
39  *
40  *		IR_b * (2 ^ IR_u) * 8
41  * IR(Mbps) = -------------------------  *  CLOCK(1000Mbps)
42  *		Tick * (2 ^ IR_s)
43  *
44  * @return: 0: calculate sucessful, negative: fail
45  */
46 static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
47 				  u8 *ir_b, u8 *ir_u, u8 *ir_s)
48 {
49 	const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
50 		6 * 256,        /* Prioriy level */
51 		6 * 32,         /* Prioriy group level */
52 		6 * 8,          /* Port level */
53 		6 * 256         /* Qset level */
54 	};
55 	u8 ir_u_calc = 0, ir_s_calc = 0;
56 	u32 ir_calc;
57 	u32 tick;
58 
59 	/* Calc tick */
60 	if (shaper_level >= HCLGE_SHAPER_LVL_CNT)
61 		return -EINVAL;
62 
63 	tick = tick_array[shaper_level];
64 
65 	/**
66 	 * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
67 	 * the formula is changed to:
68 	 *		126 * 1 * 8
69 	 * ir_calc = ---------------- * 1000
70 	 *		tick * 1
71 	 */
72 	ir_calc = (1008000 + (tick >> 1) - 1) / tick;
73 
74 	if (ir_calc == ir) {
75 		*ir_b = 126;
76 		*ir_u = 0;
77 		*ir_s = 0;
78 
79 		return 0;
80 	} else if (ir_calc > ir) {
81 		/* Increasing the denominator to select ir_s value */
82 		while (ir_calc > ir) {
83 			ir_s_calc++;
84 			ir_calc = 1008000 / (tick * (1 << ir_s_calc));
85 		}
86 
87 		if (ir_calc == ir)
88 			*ir_b = 126;
89 		else
90 			*ir_b = (ir * tick * (1 << ir_s_calc) + 4000) / 8000;
91 	} else {
92 		/* Increasing the numerator to select ir_u value */
93 		u32 numerator;
94 
95 		while (ir_calc < ir) {
96 			ir_u_calc++;
97 			numerator = 1008000 * (1 << ir_u_calc);
98 			ir_calc = (numerator + (tick >> 1)) / tick;
99 		}
100 
101 		if (ir_calc == ir) {
102 			*ir_b = 126;
103 		} else {
104 			u32 denominator = (8000 * (1 << --ir_u_calc));
105 			*ir_b = (ir * tick + (denominator >> 1)) / denominator;
106 		}
107 	}
108 
109 	*ir_u = ir_u_calc;
110 	*ir_s = ir_s_calc;
111 
112 	return 0;
113 }
114 
115 static int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
116 {
117 	struct hclge_desc desc;
118 
119 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
120 
121 	desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
122 		(rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
123 
124 	return hclge_cmd_send(&hdev->hw, &desc, 1);
125 }
126 
127 static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
128 {
129 	u8 tc;
130 
131 	for (tc = 0; tc < hdev->tm_info.num_tc; tc++)
132 		if (hdev->tm_info.tc_info[tc].up == pri_id)
133 			break;
134 
135 	if (tc >= hdev->tm_info.num_tc)
136 		return -EINVAL;
137 
138 	/**
139 	 * the register for priority has four bytes, the first bytes includes
140 	 *  priority0 and priority1, the higher 4bit stands for priority1
141 	 *  while the lower 4bit stands for priority0, as below:
142 	 * first byte:	| pri_1 | pri_0 |
143 	 * second byte:	| pri_3 | pri_2 |
144 	 * third byte:	| pri_5 | pri_4 |
145 	 * fourth byte:	| pri_7 | pri_6 |
146 	 */
147 	pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
148 
149 	return 0;
150 }
151 
152 static int hclge_up_to_tc_map(struct hclge_dev *hdev)
153 {
154 	struct hclge_desc desc;
155 	u8 *pri = (u8 *)desc.data;
156 	u8 pri_id;
157 	int ret;
158 
159 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
160 
161 	for (pri_id = 0; pri_id < hdev->tm_info.num_tc; pri_id++) {
162 		ret = hclge_fill_pri_array(hdev, pri, pri_id);
163 		if (ret)
164 			return ret;
165 	}
166 
167 	return hclge_cmd_send(&hdev->hw, &desc, 1);
168 }
169 
170 static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
171 				      u8 pg_id, u8 pri_bit_map)
172 {
173 	struct hclge_pg_to_pri_link_cmd *map;
174 	struct hclge_desc desc;
175 
176 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
177 
178 	map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
179 
180 	map->pg_id = pg_id;
181 	map->pri_bit_map = pri_bit_map;
182 
183 	return hclge_cmd_send(&hdev->hw, &desc, 1);
184 }
185 
186 static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
187 				      u16 qs_id, u8 pri)
188 {
189 	struct hclge_qs_to_pri_link_cmd *map;
190 	struct hclge_desc desc;
191 
192 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
193 
194 	map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
195 
196 	map->qs_id = cpu_to_le16(qs_id);
197 	map->priority = pri;
198 	map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
199 
200 	return hclge_cmd_send(&hdev->hw, &desc, 1);
201 }
202 
203 static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
204 				    u8 q_id, u16 qs_id)
205 {
206 	struct hclge_nq_to_qs_link_cmd *map;
207 	struct hclge_desc desc;
208 
209 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
210 
211 	map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
212 
213 	map->nq_id = cpu_to_le16(q_id);
214 	map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
215 
216 	return hclge_cmd_send(&hdev->hw, &desc, 1);
217 }
218 
219 static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
220 				  u8 dwrr)
221 {
222 	struct hclge_pg_weight_cmd *weight;
223 	struct hclge_desc desc;
224 
225 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
226 
227 	weight = (struct hclge_pg_weight_cmd *)desc.data;
228 
229 	weight->pg_id = pg_id;
230 	weight->dwrr = dwrr;
231 
232 	return hclge_cmd_send(&hdev->hw, &desc, 1);
233 }
234 
235 static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
236 				   u8 dwrr)
237 {
238 	struct hclge_priority_weight_cmd *weight;
239 	struct hclge_desc desc;
240 
241 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
242 
243 	weight = (struct hclge_priority_weight_cmd *)desc.data;
244 
245 	weight->pri_id = pri_id;
246 	weight->dwrr = dwrr;
247 
248 	return hclge_cmd_send(&hdev->hw, &desc, 1);
249 }
250 
251 static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
252 				  u8 dwrr)
253 {
254 	struct hclge_qs_weight_cmd *weight;
255 	struct hclge_desc desc;
256 
257 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
258 
259 	weight = (struct hclge_qs_weight_cmd *)desc.data;
260 
261 	weight->qs_id = cpu_to_le16(qs_id);
262 	weight->dwrr = dwrr;
263 
264 	return hclge_cmd_send(&hdev->hw, &desc, 1);
265 }
266 
267 static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
268 				    enum hclge_shap_bucket bucket, u8 pg_id,
269 				    u8 ir_b, u8 ir_u, u8 ir_s, u8 bs_b, u8 bs_s)
270 {
271 	struct hclge_pg_shapping_cmd *shap_cfg_cmd;
272 	enum hclge_opcode_type opcode;
273 	struct hclge_desc desc;
274 
275 	opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
276 		HCLGE_OPC_TM_PG_C_SHAPPING;
277 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
278 
279 	shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
280 
281 	shap_cfg_cmd->pg_id = pg_id;
282 
283 	hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_B, ir_b);
284 	hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_U, ir_u);
285 	hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, IR_S, ir_s);
286 	hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_B, bs_b);
287 	hclge_tm_set_feild(shap_cfg_cmd->pg_shapping_para, BS_S, bs_s);
288 
289 	return hclge_cmd_send(&hdev->hw, &desc, 1);
290 }
291 
292 static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
293 				     enum hclge_shap_bucket bucket, u8 pri_id,
294 				     u8 ir_b, u8 ir_u, u8 ir_s,
295 				     u8 bs_b, u8 bs_s)
296 {
297 	struct hclge_pri_shapping_cmd *shap_cfg_cmd;
298 	enum hclge_opcode_type opcode;
299 	struct hclge_desc desc;
300 
301 	opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
302 		HCLGE_OPC_TM_PRI_C_SHAPPING;
303 
304 	hclge_cmd_setup_basic_desc(&desc, opcode, false);
305 
306 	shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
307 
308 	shap_cfg_cmd->pri_id = pri_id;
309 
310 	hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_B, ir_b);
311 	hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_U, ir_u);
312 	hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, IR_S, ir_s);
313 	hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_B, bs_b);
314 	hclge_tm_set_feild(shap_cfg_cmd->pri_shapping_para, BS_S, bs_s);
315 
316 	return hclge_cmd_send(&hdev->hw, &desc, 1);
317 }
318 
319 static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
320 {
321 	struct hclge_desc desc;
322 
323 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
324 
325 	if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
326 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
327 	else
328 		desc.data[1] = 0;
329 
330 	desc.data[0] = cpu_to_le32(pg_id);
331 
332 	return hclge_cmd_send(&hdev->hw, &desc, 1);
333 }
334 
335 static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
336 {
337 	struct hclge_desc desc;
338 
339 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
340 
341 	if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
342 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
343 	else
344 		desc.data[1] = 0;
345 
346 	desc.data[0] = cpu_to_le32(pri_id);
347 
348 	return hclge_cmd_send(&hdev->hw, &desc, 1);
349 }
350 
351 static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id)
352 {
353 	struct hclge_desc desc;
354 
355 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
356 
357 	if (hdev->tm_info.tc_info[qs_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
358 		desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
359 	else
360 		desc.data[1] = 0;
361 
362 	desc.data[0] = cpu_to_le32(qs_id);
363 
364 	return hclge_cmd_send(&hdev->hw, &desc, 1);
365 }
366 
367 static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc)
368 {
369 	struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
370 	struct hclge_desc desc;
371 
372 	hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
373 				   false);
374 
375 	bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
376 
377 	bp_to_qs_map_cmd->tc_id = tc;
378 
379 	/* Qset and tc is one by one mapping */
380 	bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(1 << tc);
381 
382 	return hclge_cmd_send(&hdev->hw, &desc, 1);
383 }
384 
385 static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
386 {
387 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
388 	struct hclge_dev *hdev = vport->back;
389 	u8 i;
390 
391 	kinfo = &vport->nic.kinfo;
392 	vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
393 	kinfo->num_tc =
394 		min_t(u16, kinfo->num_tqps, hdev->tm_info.num_tc);
395 	kinfo->rss_size
396 		= min_t(u16, hdev->rss_size_max,
397 			kinfo->num_tqps / kinfo->num_tc);
398 	vport->qs_offset = hdev->tm_info.num_tc * vport->vport_id;
399 	vport->dwrr = 100;  /* 100 percent as init */
400 
401 	for (i = 0; i < kinfo->num_tc; i++) {
402 		if (hdev->hw_tc_map & BIT(i)) {
403 			kinfo->tc_info[i].enable = true;
404 			kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
405 			kinfo->tc_info[i].tqp_count = kinfo->rss_size;
406 			kinfo->tc_info[i].tc = i;
407 			kinfo->tc_info[i].up = hdev->tm_info.tc_info[i].up;
408 		} else {
409 			/* Set to default queue if TC is disable */
410 			kinfo->tc_info[i].enable = false;
411 			kinfo->tc_info[i].tqp_offset = 0;
412 			kinfo->tc_info[i].tqp_count = 1;
413 			kinfo->tc_info[i].tc = 0;
414 			kinfo->tc_info[i].up = 0;
415 		}
416 	}
417 }
418 
419 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
420 {
421 	struct hclge_vport *vport = hdev->vport;
422 	u32 i;
423 
424 	for (i = 0; i < hdev->num_alloc_vport; i++) {
425 		hclge_tm_vport_tc_info_update(vport);
426 
427 		vport++;
428 	}
429 }
430 
431 static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
432 {
433 	u8 i;
434 
435 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
436 		hdev->tm_info.tc_info[i].tc_id = i;
437 		hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
438 		hdev->tm_info.tc_info[i].up = i;
439 		hdev->tm_info.tc_info[i].pgid = 0;
440 		hdev->tm_info.tc_info[i].bw_limit =
441 			hdev->tm_info.pg_info[0].bw_limit;
442 	}
443 
444 	hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
445 }
446 
447 static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
448 {
449 	u8 i;
450 
451 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
452 		int k;
453 
454 		hdev->tm_info.pg_dwrr[i] = i ? 0 : 100;
455 
456 		hdev->tm_info.pg_info[i].pg_id = i;
457 		hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
458 
459 		hdev->tm_info.pg_info[i].bw_limit = HCLGE_ETHER_MAX_RATE;
460 
461 		if (i != 0)
462 			continue;
463 
464 		hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
465 		for (k = 0; k < hdev->tm_info.num_tc; k++)
466 			hdev->tm_info.pg_info[i].tc_dwrr[k] = 100;
467 	}
468 }
469 
470 static int hclge_tm_schd_info_init(struct hclge_dev *hdev)
471 {
472 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
473 	    (hdev->tm_info.num_pg != 1))
474 		return -EINVAL;
475 
476 	hclge_tm_pg_info_init(hdev);
477 
478 	hclge_tm_tc_info_init(hdev);
479 
480 	hclge_tm_vport_info_update(hdev);
481 
482 	hdev->tm_info.fc_mode = HCLGE_FC_NONE;
483 	hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
484 
485 	return 0;
486 }
487 
488 static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
489 {
490 	int ret;
491 	u32 i;
492 
493 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
494 		return 0;
495 
496 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
497 		/* Cfg mapping */
498 		ret = hclge_tm_pg_to_pri_map_cfg(
499 			hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
500 		if (ret)
501 			return ret;
502 	}
503 
504 	return 0;
505 }
506 
507 static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
508 {
509 	u8 ir_u, ir_b, ir_s;
510 	int ret;
511 	u32 i;
512 
513 	/* Cfg pg schd */
514 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
515 		return 0;
516 
517 	/* Pg to pri */
518 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
519 		/* Calc shaper para */
520 		ret = hclge_shaper_para_calc(
521 					hdev->tm_info.pg_info[i].bw_limit,
522 					HCLGE_SHAPER_LVL_PG,
523 					&ir_b, &ir_u, &ir_s);
524 		if (ret)
525 			return ret;
526 
527 		ret = hclge_tm_pg_shapping_cfg(hdev,
528 					       HCLGE_TM_SHAP_C_BUCKET, i,
529 					       0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
530 					       HCLGE_SHAPER_BS_S_DEF);
531 		if (ret)
532 			return ret;
533 
534 		ret = hclge_tm_pg_shapping_cfg(hdev,
535 					       HCLGE_TM_SHAP_P_BUCKET, i,
536 					       ir_b, ir_u, ir_s,
537 					       HCLGE_SHAPER_BS_U_DEF,
538 					       HCLGE_SHAPER_BS_S_DEF);
539 		if (ret)
540 			return ret;
541 	}
542 
543 	return 0;
544 }
545 
546 static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
547 {
548 	int ret;
549 	u32 i;
550 
551 	/* cfg pg schd */
552 	if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
553 		return 0;
554 
555 	/* pg to prio */
556 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
557 		/* Cfg dwrr */
558 		ret = hclge_tm_pg_weight_cfg(hdev, i,
559 					     hdev->tm_info.pg_dwrr[i]);
560 		if (ret)
561 			return ret;
562 	}
563 
564 	return 0;
565 }
566 
567 static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
568 				   struct hclge_vport *vport)
569 {
570 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
571 	struct hnae3_queue **tqp = kinfo->tqp;
572 	struct hnae3_tc_info *v_tc_info;
573 	u32 i, j;
574 	int ret;
575 
576 	for (i = 0; i < kinfo->num_tc; i++) {
577 		v_tc_info = &kinfo->tc_info[i];
578 		for (j = 0; j < v_tc_info->tqp_count; j++) {
579 			struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
580 
581 			ret = hclge_tm_q_to_qs_map_cfg(hdev,
582 						       hclge_get_queue_id(q),
583 						       vport->qs_offset + i);
584 			if (ret)
585 				return ret;
586 		}
587 	}
588 
589 	return 0;
590 }
591 
592 static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
593 {
594 	struct hclge_vport *vport = hdev->vport;
595 	int ret;
596 	u32 i;
597 
598 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
599 		/* Cfg qs -> pri mapping, one by one mapping */
600 		for (i = 0; i < hdev->tm_info.num_tc; i++) {
601 			ret = hclge_tm_qs_to_pri_map_cfg(hdev, i, i);
602 			if (ret)
603 				return ret;
604 		}
605 	} else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
606 		int k;
607 		/* Cfg qs -> pri mapping,  qs = tc, pri = vf, 8 qs -> 1 pri */
608 		for (k = 0; k < hdev->num_alloc_vport; k++)
609 			for (i = 0; i < HNAE3_MAX_TC; i++) {
610 				ret = hclge_tm_qs_to_pri_map_cfg(
611 					hdev, vport[k].qs_offset + i, k);
612 				if (ret)
613 					return ret;
614 			}
615 	} else {
616 		return -EINVAL;
617 	}
618 
619 	/* Cfg q -> qs mapping */
620 	for (i = 0; i < hdev->num_alloc_vport; i++) {
621 		ret = hclge_vport_q_to_qs_map(hdev, vport);
622 		if (ret)
623 			return ret;
624 
625 		vport++;
626 	}
627 
628 	return 0;
629 }
630 
631 static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
632 {
633 	u8 ir_u, ir_b, ir_s;
634 	int ret;
635 	u32 i;
636 
637 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
638 		ret = hclge_shaper_para_calc(
639 					hdev->tm_info.tc_info[i].bw_limit,
640 					HCLGE_SHAPER_LVL_PRI,
641 					&ir_b, &ir_u, &ir_s);
642 		if (ret)
643 			return ret;
644 
645 		ret = hclge_tm_pri_shapping_cfg(
646 			hdev, HCLGE_TM_SHAP_C_BUCKET, i,
647 			0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
648 			HCLGE_SHAPER_BS_S_DEF);
649 		if (ret)
650 			return ret;
651 
652 		ret = hclge_tm_pri_shapping_cfg(
653 			hdev, HCLGE_TM_SHAP_P_BUCKET, i,
654 			ir_b, ir_u, ir_s, HCLGE_SHAPER_BS_U_DEF,
655 			HCLGE_SHAPER_BS_S_DEF);
656 		if (ret)
657 			return ret;
658 	}
659 
660 	return 0;
661 }
662 
663 static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
664 {
665 	struct hclge_dev *hdev = vport->back;
666 	u8 ir_u, ir_b, ir_s;
667 	int ret;
668 
669 	ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
670 				     &ir_b, &ir_u, &ir_s);
671 	if (ret)
672 		return ret;
673 
674 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
675 					vport->vport_id,
676 					0, 0, 0, HCLGE_SHAPER_BS_U_DEF,
677 					HCLGE_SHAPER_BS_S_DEF);
678 	if (ret)
679 		return ret;
680 
681 	ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
682 					vport->vport_id,
683 					ir_b, ir_u, ir_s,
684 					HCLGE_SHAPER_BS_U_DEF,
685 					HCLGE_SHAPER_BS_S_DEF);
686 	if (ret)
687 		return ret;
688 
689 	return 0;
690 }
691 
692 static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
693 {
694 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
695 	struct hclge_dev *hdev = vport->back;
696 	struct hnae3_tc_info *v_tc_info;
697 	u8 ir_u, ir_b, ir_s;
698 	u32 i;
699 	int ret;
700 
701 	for (i = 0; i < kinfo->num_tc; i++) {
702 		v_tc_info = &kinfo->tc_info[i];
703 		ret = hclge_shaper_para_calc(
704 					hdev->tm_info.tc_info[i].bw_limit,
705 					HCLGE_SHAPER_LVL_QSET,
706 					&ir_b, &ir_u, &ir_s);
707 		if (ret)
708 			return ret;
709 	}
710 
711 	return 0;
712 }
713 
714 static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
715 {
716 	struct hclge_vport *vport = hdev->vport;
717 	int ret;
718 	u32 i;
719 
720 	/* Need config vport shaper */
721 	for (i = 0; i < hdev->num_alloc_vport; i++) {
722 		ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
723 		if (ret)
724 			return ret;
725 
726 		ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
727 		if (ret)
728 			return ret;
729 
730 		vport++;
731 	}
732 
733 	return 0;
734 }
735 
736 static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
737 {
738 	int ret;
739 
740 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
741 		ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
742 		if (ret)
743 			return ret;
744 	} else {
745 		ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
746 		if (ret)
747 			return ret;
748 	}
749 
750 	return 0;
751 }
752 
753 static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
754 {
755 	struct hclge_pg_info *pg_info;
756 	u8 dwrr;
757 	int ret;
758 	u32 i;
759 
760 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
761 		pg_info =
762 			&hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
763 		dwrr = pg_info->tc_dwrr[i];
764 
765 		ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
766 		if (ret)
767 			return ret;
768 
769 		ret = hclge_tm_qs_weight_cfg(hdev, i, dwrr);
770 		if (ret)
771 			return ret;
772 	}
773 
774 	return 0;
775 }
776 
777 static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
778 {
779 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
780 	struct hclge_dev *hdev = vport->back;
781 	int ret;
782 	u8 i;
783 
784 	/* Vf dwrr */
785 	ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
786 	if (ret)
787 		return ret;
788 
789 	/* Qset dwrr */
790 	for (i = 0; i < kinfo->num_tc; i++) {
791 		ret = hclge_tm_qs_weight_cfg(
792 			hdev, vport->qs_offset + i,
793 			hdev->tm_info.pg_info[0].tc_dwrr[i]);
794 		if (ret)
795 			return ret;
796 	}
797 
798 	return 0;
799 }
800 
801 static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
802 {
803 	struct hclge_vport *vport = hdev->vport;
804 	int ret;
805 	u32 i;
806 
807 	for (i = 0; i < hdev->num_alloc_vport; i++) {
808 		ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
809 		if (ret)
810 			return ret;
811 
812 		vport++;
813 	}
814 
815 	return 0;
816 }
817 
818 static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
819 {
820 	int ret;
821 
822 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
823 		ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
824 		if (ret)
825 			return ret;
826 	} else {
827 		ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
828 		if (ret)
829 			return ret;
830 	}
831 
832 	return 0;
833 }
834 
835 static int hclge_tm_map_cfg(struct hclge_dev *hdev)
836 {
837 	int ret;
838 
839 	ret = hclge_tm_pg_to_pri_map(hdev);
840 	if (ret)
841 		return ret;
842 
843 	return hclge_tm_pri_q_qs_cfg(hdev);
844 }
845 
846 static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
847 {
848 	int ret;
849 
850 	ret = hclge_tm_pg_shaper_cfg(hdev);
851 	if (ret)
852 		return ret;
853 
854 	return hclge_tm_pri_shaper_cfg(hdev);
855 }
856 
857 int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
858 {
859 	int ret;
860 
861 	ret = hclge_tm_pg_dwrr_cfg(hdev);
862 	if (ret)
863 		return ret;
864 
865 	return hclge_tm_pri_dwrr_cfg(hdev);
866 }
867 
868 static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
869 {
870 	int ret;
871 	u8 i;
872 
873 	/* Only being config on TC-Based scheduler mode */
874 	if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
875 		return 0;
876 
877 	for (i = 0; i < hdev->tm_info.num_pg; i++) {
878 		ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
879 		if (ret)
880 			return ret;
881 	}
882 
883 	return 0;
884 }
885 
886 static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
887 {
888 	struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
889 	struct hclge_dev *hdev = vport->back;
890 	int ret;
891 	u8 i;
892 
893 	ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
894 	if (ret)
895 		return ret;
896 
897 	for (i = 0; i < kinfo->num_tc; i++) {
898 		ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i);
899 		if (ret)
900 			return ret;
901 	}
902 
903 	return 0;
904 }
905 
906 static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
907 {
908 	struct hclge_vport *vport = hdev->vport;
909 	int ret;
910 	u8 i;
911 
912 	if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
913 		for (i = 0; i < hdev->tm_info.num_tc; i++) {
914 			ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
915 			if (ret)
916 				return ret;
917 
918 			ret = hclge_tm_qs_schd_mode_cfg(hdev, i);
919 			if (ret)
920 				return ret;
921 		}
922 	} else {
923 		for (i = 0; i < hdev->num_alloc_vport; i++) {
924 			ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
925 			if (ret)
926 				return ret;
927 
928 			vport++;
929 		}
930 	}
931 
932 	return 0;
933 }
934 
935 static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
936 {
937 	int ret;
938 
939 	ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
940 	if (ret)
941 		return ret;
942 
943 	return hclge_tm_lvl34_schd_mode_cfg(hdev);
944 }
945 
946 static int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
947 {
948 	int ret;
949 
950 	/* Cfg tm mapping  */
951 	ret = hclge_tm_map_cfg(hdev);
952 	if (ret)
953 		return ret;
954 
955 	/* Cfg tm shaper */
956 	ret = hclge_tm_shaper_cfg(hdev);
957 	if (ret)
958 		return ret;
959 
960 	/* Cfg dwrr */
961 	ret = hclge_tm_dwrr_cfg(hdev);
962 	if (ret)
963 		return ret;
964 
965 	/* Cfg schd mode for each level schd */
966 	return hclge_tm_schd_mode_hw(hdev);
967 }
968 
969 int hclge_pause_setup_hw(struct hclge_dev *hdev)
970 {
971 	bool en = hdev->tm_info.fc_mode != HCLGE_FC_PFC;
972 	int ret;
973 	u8 i;
974 
975 	ret = hclge_mac_pause_en_cfg(hdev, en, en);
976 	if (ret)
977 		return ret;
978 
979 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
980 		ret = hclge_tm_qs_bp_cfg(hdev, i);
981 		if (ret)
982 			return ret;
983 	}
984 
985 	return hclge_up_to_tc_map(hdev);
986 }
987 
988 int hclge_tm_init_hw(struct hclge_dev *hdev)
989 {
990 	int ret;
991 
992 	if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
993 	    (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
994 		return -ENOTSUPP;
995 
996 	ret = hclge_tm_schd_setup_hw(hdev);
997 	if (ret)
998 		return ret;
999 
1000 	ret = hclge_pause_setup_hw(hdev);
1001 	if (ret)
1002 		return ret;
1003 
1004 	return 0;
1005 }
1006 
1007 int hclge_tm_schd_init(struct hclge_dev *hdev)
1008 {
1009 	int ret = hclge_tm_schd_info_init(hdev);
1010 
1011 	if (ret)
1012 		return ret;
1013 
1014 	return hclge_tm_init_hw(hdev);
1015 }
1016