1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include "hclge_main.h"
5 #include "hclge_dcb.h"
6 #include "hclge_tm.h"
7 #include "hnae3.h"
8 
9 #define BW_PERCENT	100
10 
11 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
12 				     struct ieee_ets *ets)
13 {
14 	u8 i;
15 
16 	for (i = 0; i < HNAE3_MAX_TC; i++) {
17 		switch (ets->tc_tsa[i]) {
18 		case IEEE_8021QAZ_TSA_STRICT:
19 			hdev->tm_info.tc_info[i].tc_sch_mode =
20 				HCLGE_SCH_MODE_SP;
21 			hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
22 			break;
23 		case IEEE_8021QAZ_TSA_ETS:
24 			hdev->tm_info.tc_info[i].tc_sch_mode =
25 				HCLGE_SCH_MODE_DWRR;
26 			hdev->tm_info.pg_info[0].tc_dwrr[i] =
27 				ets->tc_tx_bw[i];
28 			break;
29 		default:
30 			/* Hardware only supports SP (strict priority)
31 			 * or ETS (enhanced transmission selection)
32 			 * algorithms, if we receive some other value
33 			 * from dcbnl, then throw an error.
34 			 */
35 			return -EINVAL;
36 		}
37 	}
38 
39 	hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
40 
41 	return 0;
42 }
43 
44 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
45 				      struct ieee_ets *ets)
46 {
47 	u32 i;
48 
49 	memset(ets, 0, sizeof(*ets));
50 	ets->willing = 1;
51 	ets->ets_cap = hdev->tc_max;
52 
53 	for (i = 0; i < HNAE3_MAX_TC; i++) {
54 		ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
55 		if (i < hdev->tm_info.num_tc)
56 			ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
57 		else
58 			ets->tc_tx_bw[i] = 0;
59 
60 		if (hdev->tm_info.tc_info[i].tc_sch_mode ==
61 		    HCLGE_SCH_MODE_SP)
62 			ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
63 		else
64 			ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
65 	}
66 }
67 
68 /* IEEE std */
69 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
70 {
71 	struct hclge_vport *vport = hclge_get_vport(h);
72 	struct hclge_dev *hdev = vport->back;
73 
74 	hclge_tm_info_to_ieee_ets(hdev, ets);
75 
76 	return 0;
77 }
78 
79 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
80 				     u8 *prio_tc)
81 {
82 	int i;
83 
84 	if (num_tc > hdev->tc_max) {
85 		dev_err(&hdev->pdev->dev,
86 			"tc num checking failed, %u > tc_max(%u)\n",
87 			num_tc, hdev->tc_max);
88 		return -EINVAL;
89 	}
90 
91 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
92 		if (prio_tc[i] >= num_tc) {
93 			dev_err(&hdev->pdev->dev,
94 				"prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
95 				i, prio_tc[i], num_tc);
96 			return -EINVAL;
97 		}
98 	}
99 
100 	if (num_tc > hdev->vport[0].alloc_tqps) {
101 		dev_err(&hdev->pdev->dev,
102 			"allocated tqp checking failed, %u > tqp(%u)\n",
103 			num_tc, hdev->vport[0].alloc_tqps);
104 		return -EINVAL;
105 	}
106 
107 	return 0;
108 }
109 
110 static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
111 			       bool *changed)
112 {
113 	u8 max_tc_id = 0;
114 	u8 i;
115 
116 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
117 		if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
118 			*changed = true;
119 
120 		if (ets->prio_tc[i] > max_tc_id)
121 			max_tc_id = ets->prio_tc[i];
122 	}
123 
124 	/* return max tc number, max tc id need to plus 1 */
125 	return max_tc_id + 1;
126 }
127 
128 static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
129 				       struct ieee_ets *ets, bool *changed,
130 				       u8 tc_num)
131 {
132 	bool has_ets_tc = false;
133 	u32 total_ets_bw = 0;
134 	u8 i;
135 
136 	for (i = 0; i < HNAE3_MAX_TC; i++) {
137 		switch (ets->tc_tsa[i]) {
138 		case IEEE_8021QAZ_TSA_STRICT:
139 			if (hdev->tm_info.tc_info[i].tc_sch_mode !=
140 				HCLGE_SCH_MODE_SP)
141 				*changed = true;
142 			break;
143 		case IEEE_8021QAZ_TSA_ETS:
144 			if (i >= tc_num) {
145 				dev_err(&hdev->pdev->dev,
146 					"tc%u is disabled, cannot set ets bw\n",
147 					i);
148 				return -EINVAL;
149 			}
150 
151 			/* The hardware will switch to sp mode if bandwidth is
152 			 * 0, so limit ets bandwidth must be greater than 0.
153 			 */
154 			if (!ets->tc_tx_bw[i]) {
155 				dev_err(&hdev->pdev->dev,
156 					"tc%u ets bw cannot be 0\n", i);
157 				return -EINVAL;
158 			}
159 
160 			if (hdev->tm_info.tc_info[i].tc_sch_mode !=
161 				HCLGE_SCH_MODE_DWRR)
162 				*changed = true;
163 
164 			total_ets_bw += ets->tc_tx_bw[i];
165 			has_ets_tc = true;
166 			break;
167 		default:
168 			return -EINVAL;
169 		}
170 	}
171 
172 	if (has_ets_tc && total_ets_bw != BW_PERCENT)
173 		return -EINVAL;
174 
175 	return 0;
176 }
177 
178 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
179 			      u8 *tc, bool *changed)
180 {
181 	u8 tc_num;
182 	int ret;
183 
184 	tc_num = hclge_ets_tc_changed(hdev, ets, changed);
185 
186 	ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
187 	if (ret)
188 		return ret;
189 
190 	ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
191 	if (ret)
192 		return ret;
193 
194 	*tc = tc_num;
195 	if (*tc != hdev->tm_info.num_tc)
196 		*changed = true;
197 
198 	return 0;
199 }
200 
201 static int hclge_map_update(struct hclge_dev *hdev)
202 {
203 	int ret;
204 
205 	ret = hclge_tm_schd_setup_hw(hdev);
206 	if (ret)
207 		return ret;
208 
209 	ret = hclge_pause_setup_hw(hdev, false);
210 	if (ret)
211 		return ret;
212 
213 	ret = hclge_buffer_alloc(hdev);
214 	if (ret)
215 		return ret;
216 
217 	hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
218 
219 	return hclge_rss_init_hw(hdev);
220 }
221 
222 static int hclge_notify_down_uinit(struct hclge_dev *hdev)
223 {
224 	int ret;
225 
226 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
227 	if (ret)
228 		return ret;
229 
230 	ret = hclge_tm_flush_cfg(hdev, true);
231 	if (ret)
232 		return ret;
233 
234 	return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
235 }
236 
237 static int hclge_notify_init_up(struct hclge_dev *hdev)
238 {
239 	int ret;
240 
241 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
242 	if (ret)
243 		return ret;
244 
245 	ret = hclge_tm_flush_cfg(hdev, false);
246 	if (ret)
247 		return ret;
248 
249 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
250 }
251 
252 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
253 {
254 	struct hclge_vport *vport = hclge_get_vport(h);
255 	struct net_device *netdev = h->kinfo.netdev;
256 	struct hclge_dev *hdev = vport->back;
257 	bool map_changed = false;
258 	u8 num_tc = 0;
259 	int ret;
260 
261 	if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
262 	    hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
263 		return -EINVAL;
264 
265 	ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
266 	if (ret)
267 		return ret;
268 
269 	if (map_changed) {
270 		netif_dbg(h, drv, netdev, "set ets\n");
271 
272 		ret = hclge_notify_down_uinit(hdev);
273 		if (ret)
274 			return ret;
275 	}
276 
277 	hclge_tm_schd_info_update(hdev, num_tc);
278 	if (num_tc > 1)
279 		hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
280 	else
281 		hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
282 
283 	ret = hclge_ieee_ets_to_tm_info(hdev, ets);
284 	if (ret)
285 		goto err_out;
286 
287 	if (map_changed) {
288 		ret = hclge_map_update(hdev);
289 		if (ret)
290 			goto err_out;
291 
292 		return hclge_notify_init_up(hdev);
293 	}
294 
295 	return hclge_tm_dwrr_cfg(hdev);
296 
297 err_out:
298 	if (!map_changed)
299 		return ret;
300 
301 	hclge_notify_init_up(hdev);
302 
303 	return ret;
304 }
305 
306 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
307 {
308 	struct hclge_vport *vport = hclge_get_vport(h);
309 	struct hclge_dev *hdev = vport->back;
310 	int ret;
311 
312 	memset(pfc, 0, sizeof(*pfc));
313 	pfc->pfc_cap = hdev->pfc_max;
314 	pfc->pfc_en = hdev->tm_info.pfc_en;
315 
316 	ret = hclge_mac_update_stats(hdev);
317 	if (ret) {
318 		dev_err(&hdev->pdev->dev,
319 			"failed to update MAC stats, ret = %d.\n", ret);
320 		return ret;
321 	}
322 
323 	hclge_pfc_tx_stats_get(hdev, pfc->requests);
324 	hclge_pfc_rx_stats_get(hdev, pfc->indications);
325 
326 	return 0;
327 }
328 
329 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
330 {
331 	struct hclge_vport *vport = hclge_get_vport(h);
332 	struct net_device *netdev = h->kinfo.netdev;
333 	struct hclge_dev *hdev = vport->back;
334 	u8 i, j, pfc_map, *prio_tc;
335 	int last_bad_ret = 0;
336 	int ret;
337 
338 	if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
339 		return -EINVAL;
340 
341 	if (pfc->pfc_en == hdev->tm_info.pfc_en)
342 		return 0;
343 
344 	prio_tc = hdev->tm_info.prio_tc;
345 	pfc_map = 0;
346 
347 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
348 		for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
349 			if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
350 				pfc_map |= BIT(i);
351 				break;
352 			}
353 		}
354 	}
355 
356 	hdev->tm_info.hw_pfc_map = pfc_map;
357 	hdev->tm_info.pfc_en = pfc->pfc_en;
358 
359 	netif_dbg(h, drv, netdev,
360 		  "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
361 		  pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
362 
363 	hclge_tm_pfc_info_update(hdev);
364 
365 	ret = hclge_pause_setup_hw(hdev, false);
366 	if (ret)
367 		return ret;
368 
369 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
370 	if (ret)
371 		return ret;
372 
373 	ret = hclge_tm_flush_cfg(hdev, true);
374 	if (ret)
375 		return ret;
376 
377 	/* No matter whether the following operations are performed
378 	 * successfully or not, disabling the tm flush and notify
379 	 * the network status to up are necessary.
380 	 * Do not return immediately.
381 	 */
382 	ret = hclge_buffer_alloc(hdev);
383 	if (ret)
384 		last_bad_ret = ret;
385 
386 	ret = hclge_tm_flush_cfg(hdev, false);
387 	if (ret)
388 		last_bad_ret = ret;
389 
390 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
391 	if (ret)
392 		last_bad_ret = ret;
393 
394 	return last_bad_ret;
395 }
396 
397 static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
398 {
399 	struct hclge_vport *vport = hclge_get_vport(h);
400 	struct net_device *netdev = h->kinfo.netdev;
401 	struct hclge_dev *hdev = vport->back;
402 	struct dcb_app old_app;
403 	int ret;
404 
405 	if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
406 	    app->protocol >= HNAE3_MAX_DSCP ||
407 	    app->priority >= HNAE3_MAX_USER_PRIO)
408 		return -EINVAL;
409 
410 	dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n",
411 		 app->protocol, app->priority);
412 
413 	if (app->priority == h->kinfo.dscp_prio[app->protocol])
414 		return 0;
415 
416 	ret = dcb_ieee_setapp(netdev, app);
417 	if (ret)
418 		return ret;
419 
420 	old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
421 	old_app.protocol = app->protocol;
422 	old_app.priority = h->kinfo.dscp_prio[app->protocol];
423 
424 	h->kinfo.dscp_prio[app->protocol] = app->priority;
425 	ret = hclge_dscp_to_tc_map(hdev);
426 	if (ret) {
427 		dev_err(&hdev->pdev->dev,
428 			"failed to set dscp to tc map, ret = %d\n", ret);
429 		h->kinfo.dscp_prio[app->protocol] = old_app.priority;
430 		(void)dcb_ieee_delapp(netdev, app);
431 		return ret;
432 	}
433 
434 	vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP;
435 	if (old_app.priority == HNAE3_PRIO_ID_INVALID)
436 		h->kinfo.dscp_app_cnt++;
437 	else
438 		ret = dcb_ieee_delapp(netdev, &old_app);
439 
440 	return ret;
441 }
442 
443 static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app)
444 {
445 	struct hclge_vport *vport = hclge_get_vport(h);
446 	struct net_device *netdev = h->kinfo.netdev;
447 	struct hclge_dev *hdev = vport->back;
448 	int ret;
449 
450 	if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
451 	    app->protocol >= HNAE3_MAX_DSCP ||
452 	    app->priority >= HNAE3_MAX_USER_PRIO ||
453 	    app->priority != h->kinfo.dscp_prio[app->protocol])
454 		return -EINVAL;
455 
456 	dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n",
457 		 app->protocol, app->priority);
458 
459 	ret = dcb_ieee_delapp(netdev, app);
460 	if (ret)
461 		return ret;
462 
463 	h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID;
464 	ret = hclge_dscp_to_tc_map(hdev);
465 	if (ret) {
466 		dev_err(&hdev->pdev->dev,
467 			"failed to del dscp to tc map, ret = %d\n", ret);
468 		h->kinfo.dscp_prio[app->protocol] = app->priority;
469 		(void)dcb_ieee_setapp(netdev, app);
470 		return ret;
471 	}
472 
473 	if (h->kinfo.dscp_app_cnt)
474 		h->kinfo.dscp_app_cnt--;
475 
476 	if (!h->kinfo.dscp_app_cnt) {
477 		vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
478 		ret = hclge_up_to_tc_map(hdev);
479 	}
480 
481 	return ret;
482 }
483 
484 /* DCBX configuration */
485 static u8 hclge_getdcbx(struct hnae3_handle *h)
486 {
487 	struct hclge_vport *vport = hclge_get_vport(h);
488 	struct hclge_dev *hdev = vport->back;
489 
490 	if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
491 		return 0;
492 
493 	return hdev->dcbx_cap;
494 }
495 
496 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
497 {
498 	struct hclge_vport *vport = hclge_get_vport(h);
499 	struct net_device *netdev = h->kinfo.netdev;
500 	struct hclge_dev *hdev = vport->back;
501 
502 	netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
503 
504 	/* No support for LLD_MANAGED modes or CEE */
505 	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
506 	    (mode & DCB_CAP_DCBX_VER_CEE) ||
507 	    !(mode & DCB_CAP_DCBX_HOST))
508 		return 1;
509 
510 	hdev->dcbx_cap = mode;
511 
512 	return 0;
513 }
514 
515 static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
516 				   struct tc_mqprio_qopt_offload *mqprio_qopt)
517 {
518 	u16 queue_sum = 0;
519 	int ret;
520 	int i;
521 
522 	if (!mqprio_qopt->qopt.num_tc) {
523 		mqprio_qopt->qopt.num_tc = 1;
524 		return 0;
525 	}
526 
527 	ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
528 					mqprio_qopt->qopt.prio_tc_map);
529 	if (ret)
530 		return ret;
531 
532 	for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
533 		if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
534 			dev_err(&hdev->pdev->dev,
535 				"qopt queue count must be power of 2\n");
536 			return -EINVAL;
537 		}
538 
539 		if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
540 			dev_err(&hdev->pdev->dev,
541 				"qopt queue count should be no more than %u\n",
542 				hdev->pf_rss_size_max);
543 			return -EINVAL;
544 		}
545 
546 		if (mqprio_qopt->qopt.offset[i] != queue_sum) {
547 			dev_err(&hdev->pdev->dev,
548 				"qopt queue offset must start from 0, and being continuous\n");
549 			return -EINVAL;
550 		}
551 
552 		if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
553 			dev_err(&hdev->pdev->dev,
554 				"qopt tx_rate is not supported\n");
555 			return -EOPNOTSUPP;
556 		}
557 
558 		queue_sum = mqprio_qopt->qopt.offset[i];
559 		queue_sum += mqprio_qopt->qopt.count[i];
560 	}
561 	if (hdev->vport[0].alloc_tqps < queue_sum) {
562 		dev_err(&hdev->pdev->dev,
563 			"qopt queue count sum should be less than %u\n",
564 			hdev->vport[0].alloc_tqps);
565 		return -EINVAL;
566 	}
567 
568 	return 0;
569 }
570 
571 static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
572 				   struct tc_mqprio_qopt_offload *mqprio_qopt)
573 {
574 	memset(tc_info, 0, sizeof(*tc_info));
575 	tc_info->num_tc = mqprio_qopt->qopt.num_tc;
576 	memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
577 	       sizeof_field(struct hnae3_tc_info, prio_tc));
578 	memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
579 	       sizeof_field(struct hnae3_tc_info, tqp_count));
580 	memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
581 	       sizeof_field(struct hnae3_tc_info, tqp_offset));
582 }
583 
584 static int hclge_config_tc(struct hclge_dev *hdev,
585 			   struct hnae3_tc_info *tc_info)
586 {
587 	int i;
588 
589 	hclge_tm_schd_info_update(hdev, tc_info->num_tc);
590 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
591 		hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
592 
593 	return hclge_map_update(hdev);
594 }
595 
596 /* Set up TC for hardware offloaded mqprio in channel mode */
597 static int hclge_setup_tc(struct hnae3_handle *h,
598 			  struct tc_mqprio_qopt_offload *mqprio_qopt)
599 {
600 	struct hclge_vport *vport = hclge_get_vport(h);
601 	struct hnae3_knic_private_info *kinfo;
602 	struct hclge_dev *hdev = vport->back;
603 	struct hnae3_tc_info old_tc_info;
604 	u8 tc = mqprio_qopt->qopt.num_tc;
605 	int ret;
606 
607 	/* if client unregistered, it's not allowed to change
608 	 * mqprio configuration, which may cause uninit ring
609 	 * fail.
610 	 */
611 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
612 		return -EBUSY;
613 
614 	if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
615 		return -EINVAL;
616 
617 	ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
618 	if (ret) {
619 		dev_err(&hdev->pdev->dev,
620 			"failed to check mqprio qopt params, ret = %d\n", ret);
621 		return ret;
622 	}
623 
624 	ret = hclge_notify_down_uinit(hdev);
625 	if (ret)
626 		return ret;
627 
628 	kinfo = &vport->nic.kinfo;
629 	memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
630 	hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
631 	kinfo->tc_info.mqprio_active = tc > 0;
632 
633 	ret = hclge_config_tc(hdev, &kinfo->tc_info);
634 	if (ret)
635 		goto err_out;
636 
637 	hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
638 
639 	if (tc > 1)
640 		hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
641 	else
642 		hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
643 
644 	return hclge_notify_init_up(hdev);
645 
646 err_out:
647 	if (!tc) {
648 		dev_warn(&hdev->pdev->dev,
649 			 "failed to destroy mqprio, will active after reset, ret = %d\n",
650 			 ret);
651 	} else {
652 		/* roll-back */
653 		memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
654 		if (hclge_config_tc(hdev, &kinfo->tc_info))
655 			dev_err(&hdev->pdev->dev,
656 				"failed to roll back tc configuration\n");
657 	}
658 	hclge_notify_init_up(hdev);
659 
660 	return ret;
661 }
662 
663 static const struct hnae3_dcb_ops hns3_dcb_ops = {
664 	.ieee_getets	= hclge_ieee_getets,
665 	.ieee_setets	= hclge_ieee_setets,
666 	.ieee_getpfc	= hclge_ieee_getpfc,
667 	.ieee_setpfc	= hclge_ieee_setpfc,
668 	.ieee_setapp    = hclge_ieee_setapp,
669 	.ieee_delapp    = hclge_ieee_delapp,
670 	.getdcbx	= hclge_getdcbx,
671 	.setdcbx	= hclge_setdcbx,
672 	.setup_tc	= hclge_setup_tc,
673 };
674 
675 void hclge_dcb_ops_set(struct hclge_dev *hdev)
676 {
677 	struct hclge_vport *vport = hdev->vport;
678 	struct hnae3_knic_private_info *kinfo;
679 
680 	/* Hdev does not support DCB or vport is
681 	 * not a pf, then dcb_ops is not set.
682 	 */
683 	if (!hnae3_dev_dcb_supported(hdev) ||
684 	    vport->vport_id != 0)
685 		return;
686 
687 	kinfo = &vport->nic.kinfo;
688 	kinfo->dcb_ops = &hns3_dcb_ops;
689 	hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
690 }
691