1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
3 
4 #include "hclge_main.h"
5 #include "hclge_dcb.h"
6 #include "hclge_tm.h"
7 #include "hnae3.h"
8 
9 #define BW_PERCENT	100
10 
11 static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
12 				     struct ieee_ets *ets)
13 {
14 	u8 i;
15 
16 	for (i = 0; i < HNAE3_MAX_TC; i++) {
17 		switch (ets->tc_tsa[i]) {
18 		case IEEE_8021QAZ_TSA_STRICT:
19 			hdev->tm_info.tc_info[i].tc_sch_mode =
20 				HCLGE_SCH_MODE_SP;
21 			hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
22 			break;
23 		case IEEE_8021QAZ_TSA_ETS:
24 			hdev->tm_info.tc_info[i].tc_sch_mode =
25 				HCLGE_SCH_MODE_DWRR;
26 			hdev->tm_info.pg_info[0].tc_dwrr[i] =
27 				ets->tc_tx_bw[i];
28 			break;
29 		default:
30 			/* Hardware only supports SP (strict priority)
31 			 * or ETS (enhanced transmission selection)
32 			 * algorithms, if we receive some other value
33 			 * from dcbnl, then throw an error.
34 			 */
35 			return -EINVAL;
36 		}
37 	}
38 
39 	hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
40 
41 	return 0;
42 }
43 
44 static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
45 				      struct ieee_ets *ets)
46 {
47 	u32 i;
48 
49 	memset(ets, 0, sizeof(*ets));
50 	ets->willing = 1;
51 	ets->ets_cap = hdev->tc_max;
52 
53 	for (i = 0; i < HNAE3_MAX_TC; i++) {
54 		ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
55 		if (i < hdev->tm_info.num_tc)
56 			ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
57 		else
58 			ets->tc_tx_bw[i] = 0;
59 
60 		if (hdev->tm_info.tc_info[i].tc_sch_mode ==
61 		    HCLGE_SCH_MODE_SP)
62 			ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
63 		else
64 			ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
65 	}
66 }
67 
68 /* IEEE std */
69 static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
70 {
71 	struct hclge_vport *vport = hclge_get_vport(h);
72 	struct hclge_dev *hdev = vport->back;
73 
74 	hclge_tm_info_to_ieee_ets(hdev, ets);
75 
76 	return 0;
77 }
78 
79 static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
80 				     u8 *prio_tc)
81 {
82 	int i;
83 
84 	if (num_tc > hdev->tc_max) {
85 		dev_err(&hdev->pdev->dev,
86 			"tc num checking failed, %u > tc_max(%u)\n",
87 			num_tc, hdev->tc_max);
88 		return -EINVAL;
89 	}
90 
91 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
92 		if (prio_tc[i] >= num_tc) {
93 			dev_err(&hdev->pdev->dev,
94 				"prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
95 				i, prio_tc[i], num_tc);
96 			return -EINVAL;
97 		}
98 	}
99 
100 	if (num_tc > hdev->vport[0].alloc_tqps) {
101 		dev_err(&hdev->pdev->dev,
102 			"allocated tqp checking failed, %u > tqp(%u)\n",
103 			num_tc, hdev->vport[0].alloc_tqps);
104 		return -EINVAL;
105 	}
106 
107 	return 0;
108 }
109 
110 static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
111 			       bool *changed)
112 {
113 	u8 max_tc_id = 0;
114 	u8 i;
115 
116 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
117 		if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
118 			*changed = true;
119 
120 		if (ets->prio_tc[i] > max_tc_id)
121 			max_tc_id = ets->prio_tc[i];
122 	}
123 
124 	/* return max tc number, max tc id need to plus 1 */
125 	return max_tc_id + 1;
126 }
127 
128 static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
129 				       struct ieee_ets *ets, bool *changed,
130 				       u8 tc_num)
131 {
132 	bool has_ets_tc = false;
133 	u32 total_ets_bw = 0;
134 	u8 i;
135 
136 	for (i = 0; i < HNAE3_MAX_TC; i++) {
137 		switch (ets->tc_tsa[i]) {
138 		case IEEE_8021QAZ_TSA_STRICT:
139 			if (hdev->tm_info.tc_info[i].tc_sch_mode !=
140 				HCLGE_SCH_MODE_SP)
141 				*changed = true;
142 			break;
143 		case IEEE_8021QAZ_TSA_ETS:
144 			if (i >= tc_num) {
145 				dev_err(&hdev->pdev->dev,
146 					"tc%u is disabled, cannot set ets bw\n",
147 					i);
148 				return -EINVAL;
149 			}
150 
151 			/* The hardware will switch to sp mode if bandwidth is
152 			 * 0, so limit ets bandwidth must be greater than 0.
153 			 */
154 			if (!ets->tc_tx_bw[i]) {
155 				dev_err(&hdev->pdev->dev,
156 					"tc%u ets bw cannot be 0\n", i);
157 				return -EINVAL;
158 			}
159 
160 			if (hdev->tm_info.tc_info[i].tc_sch_mode !=
161 				HCLGE_SCH_MODE_DWRR)
162 				*changed = true;
163 
164 			total_ets_bw += ets->tc_tx_bw[i];
165 			has_ets_tc = true;
166 			break;
167 		default:
168 			return -EINVAL;
169 		}
170 	}
171 
172 	if (has_ets_tc && total_ets_bw != BW_PERCENT)
173 		return -EINVAL;
174 
175 	return 0;
176 }
177 
178 static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
179 			      u8 *tc, bool *changed)
180 {
181 	u8 tc_num;
182 	int ret;
183 
184 	tc_num = hclge_ets_tc_changed(hdev, ets, changed);
185 
186 	ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
187 	if (ret)
188 		return ret;
189 
190 	ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
191 	if (ret)
192 		return ret;
193 
194 	*tc = tc_num;
195 	if (*tc != hdev->tm_info.num_tc)
196 		*changed = true;
197 
198 	return 0;
199 }
200 
201 static int hclge_map_update(struct hclge_dev *hdev)
202 {
203 	int ret;
204 
205 	ret = hclge_tm_schd_setup_hw(hdev);
206 	if (ret)
207 		return ret;
208 
209 	ret = hclge_pause_setup_hw(hdev, false);
210 	if (ret)
211 		return ret;
212 
213 	ret = hclge_buffer_alloc(hdev);
214 	if (ret)
215 		return ret;
216 
217 	hclge_comm_rss_indir_init_cfg(hdev->ae_dev, &hdev->rss_cfg);
218 
219 	return hclge_rss_init_hw(hdev);
220 }
221 
222 static int hclge_notify_down_uinit(struct hclge_dev *hdev)
223 {
224 	int ret;
225 
226 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
227 	if (ret)
228 		return ret;
229 
230 	ret = hclge_tm_flush_cfg(hdev, true);
231 	if (ret)
232 		return ret;
233 
234 	return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
235 }
236 
237 static int hclge_notify_init_up(struct hclge_dev *hdev)
238 {
239 	int ret;
240 
241 	ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
242 	if (ret)
243 		return ret;
244 
245 	ret = hclge_tm_flush_cfg(hdev, false);
246 	if (ret)
247 		return ret;
248 
249 	return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
250 }
251 
252 static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
253 {
254 	struct hclge_vport *vport = hclge_get_vport(h);
255 	struct net_device *netdev = h->kinfo.netdev;
256 	struct hclge_dev *hdev = vport->back;
257 	bool map_changed = false;
258 	u8 num_tc = 0;
259 	int ret;
260 
261 	if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
262 	    h->kinfo.tc_info.mqprio_active)
263 		return -EINVAL;
264 
265 	ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
266 	if (ret)
267 		return ret;
268 
269 	if (map_changed) {
270 		netif_dbg(h, drv, netdev, "set ets\n");
271 
272 		ret = hclge_notify_down_uinit(hdev);
273 		if (ret)
274 			return ret;
275 	}
276 
277 	hclge_tm_schd_info_update(hdev, num_tc);
278 	h->kinfo.tc_info.dcb_ets_active = num_tc > 1;
279 
280 	ret = hclge_ieee_ets_to_tm_info(hdev, ets);
281 	if (ret)
282 		goto err_out;
283 
284 	if (map_changed) {
285 		ret = hclge_map_update(hdev);
286 		if (ret)
287 			goto err_out;
288 
289 		return hclge_notify_init_up(hdev);
290 	}
291 
292 	return hclge_tm_dwrr_cfg(hdev);
293 
294 err_out:
295 	if (!map_changed)
296 		return ret;
297 
298 	hclge_notify_init_up(hdev);
299 
300 	return ret;
301 }
302 
303 static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
304 {
305 	struct hclge_vport *vport = hclge_get_vport(h);
306 	struct hclge_dev *hdev = vport->back;
307 	int ret;
308 
309 	memset(pfc, 0, sizeof(*pfc));
310 	pfc->pfc_cap = hdev->pfc_max;
311 	pfc->pfc_en = hdev->tm_info.pfc_en;
312 
313 	ret = hclge_mac_update_stats(hdev);
314 	if (ret) {
315 		dev_err(&hdev->pdev->dev,
316 			"failed to update MAC stats, ret = %d.\n", ret);
317 		return ret;
318 	}
319 
320 	hclge_pfc_tx_stats_get(hdev, pfc->requests);
321 	hclge_pfc_rx_stats_get(hdev, pfc->indications);
322 
323 	return 0;
324 }
325 
326 static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
327 {
328 	struct hclge_vport *vport = hclge_get_vport(h);
329 	struct net_device *netdev = h->kinfo.netdev;
330 	struct hclge_dev *hdev = vport->back;
331 	u8 i, j, pfc_map, *prio_tc;
332 	int last_bad_ret = 0;
333 	int ret;
334 
335 	if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
336 		return -EINVAL;
337 
338 	if (pfc->pfc_en == hdev->tm_info.pfc_en)
339 		return 0;
340 
341 	prio_tc = hdev->tm_info.prio_tc;
342 	pfc_map = 0;
343 
344 	for (i = 0; i < hdev->tm_info.num_tc; i++) {
345 		for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
346 			if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
347 				pfc_map |= BIT(i);
348 				break;
349 			}
350 		}
351 	}
352 
353 	hdev->tm_info.hw_pfc_map = pfc_map;
354 	hdev->tm_info.pfc_en = pfc->pfc_en;
355 
356 	netif_dbg(h, drv, netdev,
357 		  "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
358 		  pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
359 
360 	hclge_tm_pfc_info_update(hdev);
361 
362 	ret = hclge_pause_setup_hw(hdev, false);
363 	if (ret)
364 		return ret;
365 
366 	ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
367 	if (ret)
368 		return ret;
369 
370 	ret = hclge_tm_flush_cfg(hdev, true);
371 	if (ret)
372 		return ret;
373 
374 	/* No matter whether the following operations are performed
375 	 * successfully or not, disabling the tm flush and notify
376 	 * the network status to up are necessary.
377 	 * Do not return immediately.
378 	 */
379 	ret = hclge_buffer_alloc(hdev);
380 	if (ret)
381 		last_bad_ret = ret;
382 
383 	ret = hclge_tm_flush_cfg(hdev, false);
384 	if (ret)
385 		last_bad_ret = ret;
386 
387 	ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
388 	if (ret)
389 		last_bad_ret = ret;
390 
391 	return last_bad_ret;
392 }
393 
394 static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
395 {
396 	struct hclge_vport *vport = hclge_get_vport(h);
397 	struct net_device *netdev = h->kinfo.netdev;
398 	struct hclge_dev *hdev = vport->back;
399 	struct dcb_app old_app;
400 	int ret;
401 
402 	if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
403 	    app->protocol >= HNAE3_MAX_DSCP ||
404 	    app->priority >= HNAE3_MAX_USER_PRIO)
405 		return -EINVAL;
406 
407 	dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n",
408 		 app->protocol, app->priority);
409 
410 	if (app->priority == h->kinfo.dscp_prio[app->protocol])
411 		return 0;
412 
413 	ret = dcb_ieee_setapp(netdev, app);
414 	if (ret)
415 		return ret;
416 
417 	old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
418 	old_app.protocol = app->protocol;
419 	old_app.priority = h->kinfo.dscp_prio[app->protocol];
420 
421 	h->kinfo.dscp_prio[app->protocol] = app->priority;
422 	ret = hclge_dscp_to_tc_map(hdev);
423 	if (ret) {
424 		dev_err(&hdev->pdev->dev,
425 			"failed to set dscp to tc map, ret = %d\n", ret);
426 		h->kinfo.dscp_prio[app->protocol] = old_app.priority;
427 		(void)dcb_ieee_delapp(netdev, app);
428 		return ret;
429 	}
430 
431 	vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP;
432 	if (old_app.priority == HNAE3_PRIO_ID_INVALID)
433 		h->kinfo.dscp_app_cnt++;
434 	else
435 		ret = dcb_ieee_delapp(netdev, &old_app);
436 
437 	return ret;
438 }
439 
440 static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app)
441 {
442 	struct hclge_vport *vport = hclge_get_vport(h);
443 	struct net_device *netdev = h->kinfo.netdev;
444 	struct hclge_dev *hdev = vport->back;
445 	int ret;
446 
447 	if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
448 	    app->protocol >= HNAE3_MAX_DSCP ||
449 	    app->priority >= HNAE3_MAX_USER_PRIO ||
450 	    app->priority != h->kinfo.dscp_prio[app->protocol])
451 		return -EINVAL;
452 
453 	dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n",
454 		 app->protocol, app->priority);
455 
456 	ret = dcb_ieee_delapp(netdev, app);
457 	if (ret)
458 		return ret;
459 
460 	h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID;
461 	ret = hclge_dscp_to_tc_map(hdev);
462 	if (ret) {
463 		dev_err(&hdev->pdev->dev,
464 			"failed to del dscp to tc map, ret = %d\n", ret);
465 		h->kinfo.dscp_prio[app->protocol] = app->priority;
466 		(void)dcb_ieee_setapp(netdev, app);
467 		return ret;
468 	}
469 
470 	if (h->kinfo.dscp_app_cnt)
471 		h->kinfo.dscp_app_cnt--;
472 
473 	if (!h->kinfo.dscp_app_cnt) {
474 		vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
475 		ret = hclge_up_to_tc_map(hdev);
476 	}
477 
478 	return ret;
479 }
480 
481 /* DCBX configuration */
482 static u8 hclge_getdcbx(struct hnae3_handle *h)
483 {
484 	struct hclge_vport *vport = hclge_get_vport(h);
485 	struct hclge_dev *hdev = vport->back;
486 
487 	if (h->kinfo.tc_info.mqprio_active)
488 		return 0;
489 
490 	return hdev->dcbx_cap;
491 }
492 
493 static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
494 {
495 	struct hclge_vport *vport = hclge_get_vport(h);
496 	struct net_device *netdev = h->kinfo.netdev;
497 	struct hclge_dev *hdev = vport->back;
498 
499 	netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
500 
501 	/* No support for LLD_MANAGED modes or CEE */
502 	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
503 	    (mode & DCB_CAP_DCBX_VER_CEE) ||
504 	    !(mode & DCB_CAP_DCBX_HOST))
505 		return 1;
506 
507 	hdev->dcbx_cap = mode;
508 
509 	return 0;
510 }
511 
512 static int hclge_mqprio_qopt_check(struct hclge_dev *hdev,
513 				   struct tc_mqprio_qopt_offload *mqprio_qopt)
514 {
515 	u16 queue_sum = 0;
516 	int ret;
517 	int i;
518 
519 	if (!mqprio_qopt->qopt.num_tc) {
520 		mqprio_qopt->qopt.num_tc = 1;
521 		return 0;
522 	}
523 
524 	ret = hclge_dcb_common_validate(hdev, mqprio_qopt->qopt.num_tc,
525 					mqprio_qopt->qopt.prio_tc_map);
526 	if (ret)
527 		return ret;
528 
529 	for (i = 0; i < mqprio_qopt->qopt.num_tc; i++) {
530 		if (!is_power_of_2(mqprio_qopt->qopt.count[i])) {
531 			dev_err(&hdev->pdev->dev,
532 				"qopt queue count must be power of 2\n");
533 			return -EINVAL;
534 		}
535 
536 		if (mqprio_qopt->qopt.count[i] > hdev->pf_rss_size_max) {
537 			dev_err(&hdev->pdev->dev,
538 				"qopt queue count should be no more than %u\n",
539 				hdev->pf_rss_size_max);
540 			return -EINVAL;
541 		}
542 
543 		if (mqprio_qopt->qopt.offset[i] != queue_sum) {
544 			dev_err(&hdev->pdev->dev,
545 				"qopt queue offset must start from 0, and being continuous\n");
546 			return -EINVAL;
547 		}
548 
549 		if (mqprio_qopt->min_rate[i] || mqprio_qopt->max_rate[i]) {
550 			dev_err(&hdev->pdev->dev,
551 				"qopt tx_rate is not supported\n");
552 			return -EOPNOTSUPP;
553 		}
554 
555 		queue_sum = mqprio_qopt->qopt.offset[i];
556 		queue_sum += mqprio_qopt->qopt.count[i];
557 	}
558 	if (hdev->vport[0].alloc_tqps < queue_sum) {
559 		dev_err(&hdev->pdev->dev,
560 			"qopt queue count sum should be less than %u\n",
561 			hdev->vport[0].alloc_tqps);
562 		return -EINVAL;
563 	}
564 
565 	return 0;
566 }
567 
568 static void hclge_sync_mqprio_qopt(struct hnae3_tc_info *tc_info,
569 				   struct tc_mqprio_qopt_offload *mqprio_qopt)
570 {
571 	memset(tc_info, 0, sizeof(*tc_info));
572 	tc_info->num_tc = mqprio_qopt->qopt.num_tc;
573 	memcpy(tc_info->prio_tc, mqprio_qopt->qopt.prio_tc_map,
574 	       sizeof_field(struct hnae3_tc_info, prio_tc));
575 	memcpy(tc_info->tqp_count, mqprio_qopt->qopt.count,
576 	       sizeof_field(struct hnae3_tc_info, tqp_count));
577 	memcpy(tc_info->tqp_offset, mqprio_qopt->qopt.offset,
578 	       sizeof_field(struct hnae3_tc_info, tqp_offset));
579 }
580 
581 static int hclge_config_tc(struct hclge_dev *hdev,
582 			   struct hnae3_tc_info *tc_info)
583 {
584 	int i;
585 
586 	hclge_tm_schd_info_update(hdev, tc_info->num_tc);
587 	for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
588 		hdev->tm_info.prio_tc[i] = tc_info->prio_tc[i];
589 
590 	return hclge_map_update(hdev);
591 }
592 
593 /* Set up TC for hardware offloaded mqprio in channel mode */
594 static int hclge_setup_tc(struct hnae3_handle *h,
595 			  struct tc_mqprio_qopt_offload *mqprio_qopt)
596 {
597 	struct hclge_vport *vport = hclge_get_vport(h);
598 	struct hnae3_knic_private_info *kinfo;
599 	struct hclge_dev *hdev = vport->back;
600 	struct hnae3_tc_info old_tc_info;
601 	u8 tc = mqprio_qopt->qopt.num_tc;
602 	int ret;
603 
604 	/* if client unregistered, it's not allowed to change
605 	 * mqprio configuration, which may cause uninit ring
606 	 * fail.
607 	 */
608 	if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
609 		return -EBUSY;
610 
611 	kinfo = &vport->nic.kinfo;
612 	if (kinfo->tc_info.dcb_ets_active)
613 		return -EINVAL;
614 
615 	ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
616 	if (ret) {
617 		dev_err(&hdev->pdev->dev,
618 			"failed to check mqprio qopt params, ret = %d\n", ret);
619 		return ret;
620 	}
621 
622 	ret = hclge_notify_down_uinit(hdev);
623 	if (ret)
624 		return ret;
625 
626 	memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
627 	hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
628 	kinfo->tc_info.mqprio_active = tc > 0;
629 
630 	ret = hclge_config_tc(hdev, &kinfo->tc_info);
631 	if (ret)
632 		goto err_out;
633 
634 	return hclge_notify_init_up(hdev);
635 
636 err_out:
637 	if (!tc) {
638 		dev_warn(&hdev->pdev->dev,
639 			 "failed to destroy mqprio, will active after reset, ret = %d\n",
640 			 ret);
641 	} else {
642 		/* roll-back */
643 		memcpy(&kinfo->tc_info, &old_tc_info, sizeof(old_tc_info));
644 		if (hclge_config_tc(hdev, &kinfo->tc_info))
645 			dev_err(&hdev->pdev->dev,
646 				"failed to roll back tc configuration\n");
647 	}
648 	hclge_notify_init_up(hdev);
649 
650 	return ret;
651 }
652 
653 static const struct hnae3_dcb_ops hns3_dcb_ops = {
654 	.ieee_getets	= hclge_ieee_getets,
655 	.ieee_setets	= hclge_ieee_setets,
656 	.ieee_getpfc	= hclge_ieee_getpfc,
657 	.ieee_setpfc	= hclge_ieee_setpfc,
658 	.ieee_setapp    = hclge_ieee_setapp,
659 	.ieee_delapp    = hclge_ieee_delapp,
660 	.getdcbx	= hclge_getdcbx,
661 	.setdcbx	= hclge_setdcbx,
662 	.setup_tc	= hclge_setup_tc,
663 };
664 
665 void hclge_dcb_ops_set(struct hclge_dev *hdev)
666 {
667 	struct hclge_vport *vport = hdev->vport;
668 	struct hnae3_knic_private_info *kinfo;
669 
670 	/* Hdev does not support DCB or vport is
671 	 * not a pf, then dcb_ops is not set.
672 	 */
673 	if (!hnae3_dev_dcb_supported(hdev) ||
674 	    vport->vport_id != 0)
675 		return;
676 
677 	kinfo = &vport->nic.kinfo;
678 	kinfo->dcb_ops = &hns3_dcb_ops;
679 	hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
680 }
681