1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_dcb.h"
6 #include "ice_dcb_lib.h"
7 #include "ice_dcb_nl.h"
8 #include <net/dcbnl.h>
9 
10 /**
11  * ice_dcbnl_devreset - perform enough of a ifdown/ifup to sync DCBNL info
12  * @netdev: device associated with interface that needs reset
13  */
14 static void ice_dcbnl_devreset(struct net_device *netdev)
15 {
16 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
17 
18 	while (ice_is_reset_in_progress(pf->state))
19 		usleep_range(1000, 2000);
20 
21 	dev_close(netdev);
22 	netdev_state_change(netdev);
23 	dev_open(netdev, NULL);
24 	netdev_state_change(netdev);
25 }
26 
27 /**
28  * ice_dcbnl_getets - retrieve local ETS configuration
29  * @netdev: the relevant netdev
30  * @ets: struct to hold ETS configuration
31  */
32 static int ice_dcbnl_getets(struct net_device *netdev, struct ieee_ets *ets)
33 {
34 	struct ice_dcbx_cfg *dcbxcfg;
35 	struct ice_pf *pf;
36 
37 	pf = ice_netdev_to_pf(netdev);
38 	dcbxcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
39 
40 	ets->willing = dcbxcfg->etscfg.willing;
41 	ets->ets_cap = dcbxcfg->etscfg.maxtcs;
42 	ets->cbs = dcbxcfg->etscfg.cbs;
43 	memcpy(ets->tc_tx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_tx_bw));
44 	memcpy(ets->tc_rx_bw, dcbxcfg->etscfg.tcbwtable, sizeof(ets->tc_rx_bw));
45 	memcpy(ets->tc_tsa, dcbxcfg->etscfg.tsatable, sizeof(ets->tc_tsa));
46 	memcpy(ets->prio_tc, dcbxcfg->etscfg.prio_table, sizeof(ets->prio_tc));
47 	memcpy(ets->tc_reco_bw, dcbxcfg->etsrec.tcbwtable,
48 	       sizeof(ets->tc_reco_bw));
49 	memcpy(ets->tc_reco_tsa, dcbxcfg->etsrec.tsatable,
50 	       sizeof(ets->tc_reco_tsa));
51 	memcpy(ets->reco_prio_tc, dcbxcfg->etscfg.prio_table,
52 	       sizeof(ets->reco_prio_tc));
53 
54 	return 0;
55 }
56 
57 /**
58  * ice_dcbnl_setets - set IEEE ETS configuration
59  * @netdev: pointer to relevant netdev
60  * @ets: struct to hold ETS configuration
61  */
62 static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
63 {
64 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
65 	struct ice_dcbx_cfg *new_cfg;
66 	int bwcfg = 0, bwrec = 0;
67 	int err, i;
68 
69 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
70 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
71 		return -EINVAL;
72 
73 	if (pf->lag && pf->lag->bonded) {
74 		netdev_err(netdev, "DCB changes not allowed when in a bond\n");
75 		return -EINVAL;
76 	}
77 
78 	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
79 
80 	mutex_lock(&pf->tc_mutex);
81 
82 	new_cfg->etscfg.willing = ets->willing;
83 	new_cfg->etscfg.cbs = ets->cbs;
84 	ice_for_each_traffic_class(i) {
85 		new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
86 		bwcfg += ets->tc_tx_bw[i];
87 		new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i];
88 		if (new_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
89 			/* in DSCP mode up->tc mapping cannot change */
90 			new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
91 			new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
92 		}
93 		new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
94 		bwrec += ets->tc_reco_bw[i];
95 		new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i];
96 	}
97 
98 	if (ice_dcb_bwchk(pf, new_cfg)) {
99 		err = -EINVAL;
100 		goto ets_out;
101 	}
102 
103 	new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
104 
105 	if (!bwcfg)
106 		new_cfg->etscfg.tcbwtable[0] = 100;
107 
108 	if (!bwrec)
109 		new_cfg->etsrec.tcbwtable[0] = 100;
110 
111 	err = ice_pf_dcb_cfg(pf, new_cfg, true);
112 	/* return of zero indicates new cfg applied */
113 	if (err == ICE_DCB_HW_CHG_RST)
114 		ice_dcbnl_devreset(netdev);
115 	if (err == ICE_DCB_NO_HW_CHG)
116 		err = ICE_DCB_HW_CHG_RST;
117 
118 ets_out:
119 	mutex_unlock(&pf->tc_mutex);
120 	return err;
121 }
122 
123 /**
124  * ice_dcbnl_getnumtcs - Get max number of traffic classes supported
125  * @dev: pointer to netdev struct
126  * @tcid: TC ID
127  * @num: total number of TCs supported by the adapter
128  *
129  * Return the total number of TCs supported
130  */
131 static int
132 ice_dcbnl_getnumtcs(struct net_device *dev, int __always_unused tcid, u8 *num)
133 {
134 	struct ice_pf *pf = ice_netdev_to_pf(dev);
135 
136 	if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
137 		return -EINVAL;
138 
139 	*num = pf->hw.func_caps.common_cap.maxtc;
140 	return 0;
141 }
142 
143 /**
144  * ice_dcbnl_getdcbx - retrieve current DCBX capability
145  * @netdev: pointer to the netdev struct
146  */
147 static u8 ice_dcbnl_getdcbx(struct net_device *netdev)
148 {
149 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
150 
151 	return pf->dcbx_cap;
152 }
153 
154 /**
155  * ice_dcbnl_setdcbx - set required DCBX capability
156  * @netdev: the corresponding netdev
157  * @mode: required mode
158  */
159 static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
160 {
161 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
162 	struct ice_qos_cfg *qos_cfg;
163 
164 	/* if FW LLDP agent is running, DCBNL not allowed to change mode */
165 	if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
166 		return ICE_DCB_NO_HW_CHG;
167 
168 	/* No support for LLD_MANAGED modes or CEE+IEEE */
169 	if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
170 	    ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) ||
171 	    !(mode & DCB_CAP_DCBX_HOST))
172 		return ICE_DCB_NO_HW_CHG;
173 
174 	/* Already set to the given mode no change */
175 	if (mode == pf->dcbx_cap)
176 		return ICE_DCB_NO_HW_CHG;
177 
178 	if (pf->lag && pf->lag->bonded) {
179 		netdev_err(netdev, "DCB changes not allowed when in a bond\n");
180 		return ICE_DCB_NO_HW_CHG;
181 	}
182 
183 	qos_cfg = &pf->hw.port_info->qos_cfg;
184 
185 	/* DSCP configuration is not DCBx negotiated */
186 	if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
187 		return ICE_DCB_NO_HW_CHG;
188 
189 	pf->dcbx_cap = mode;
190 
191 	if (mode & DCB_CAP_DCBX_VER_CEE)
192 		qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
193 	else
194 		qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
195 
196 	dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
197 	return ICE_DCB_HW_CHG_RST;
198 }
199 
200 /**
201  * ice_dcbnl_get_perm_hw_addr - MAC address used by DCBX
202  * @netdev: pointer to netdev struct
203  * @perm_addr: buffer to return permanent MAC address
204  */
205 static void ice_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr)
206 {
207 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
208 	struct ice_port_info *pi = pf->hw.port_info;
209 	int i, j;
210 
211 	memset(perm_addr, 0xff, MAX_ADDR_LEN);
212 
213 	for (i = 0; i < netdev->addr_len; i++)
214 		perm_addr[i] = pi->mac.perm_addr[i];
215 
216 	for (j = 0; j < netdev->addr_len; j++, i++)
217 		perm_addr[i] = pi->mac.perm_addr[j];
218 }
219 
220 /**
221  * ice_get_pfc_delay - Retrieve PFC Link Delay
222  * @hw: pointer to HW struct
223  * @delay: holds the PFC Link Delay value
224  */
225 static void ice_get_pfc_delay(struct ice_hw *hw, u16 *delay)
226 {
227 	u32 val;
228 
229 	val = rd32(hw, PRTDCB_GENC);
230 	*delay = (u16)((val & PRTDCB_GENC_PFCLDA_M) >> PRTDCB_GENC_PFCLDA_S);
231 }
232 
233 /**
234  * ice_dcbnl_getpfc - retrieve local IEEE PFC config
235  * @netdev: pointer to netdev struct
236  * @pfc: struct to hold PFC info
237  */
238 static int ice_dcbnl_getpfc(struct net_device *netdev, struct ieee_pfc *pfc)
239 {
240 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
241 	struct ice_port_info *pi = pf->hw.port_info;
242 	struct ice_dcbx_cfg *dcbxcfg;
243 	int i;
244 
245 	dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
246 	pfc->pfc_cap = dcbxcfg->pfc.pfccap;
247 	pfc->pfc_en = dcbxcfg->pfc.pfcena;
248 	pfc->mbc = dcbxcfg->pfc.mbc;
249 	ice_get_pfc_delay(&pf->hw, &pfc->delay);
250 
251 	ice_for_each_traffic_class(i) {
252 		pfc->requests[i] = pf->stats.priority_xoff_tx[i];
253 		pfc->indications[i] = pf->stats.priority_xoff_rx[i];
254 	}
255 
256 	return 0;
257 }
258 
259 /**
260  * ice_dcbnl_setpfc - set local IEEE PFC config
261  * @netdev: pointer to relevant netdev
262  * @pfc: pointer to struct holding PFC config
263  */
264 static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
265 {
266 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
267 	struct ice_dcbx_cfg *new_cfg;
268 	int err;
269 
270 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
271 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
272 		return -EINVAL;
273 
274 	if (pf->lag && pf->lag->bonded) {
275 		netdev_err(netdev, "DCB changes not allowed when in a bond\n");
276 		return -EINVAL;
277 	}
278 
279 	mutex_lock(&pf->tc_mutex);
280 
281 	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
282 
283 	if (pfc->pfc_cap)
284 		new_cfg->pfc.pfccap = pfc->pfc_cap;
285 	else
286 		new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
287 
288 	new_cfg->pfc.pfcena = pfc->pfc_en;
289 
290 	err = ice_pf_dcb_cfg(pf, new_cfg, true);
291 	if (err == ICE_DCB_HW_CHG_RST)
292 		ice_dcbnl_devreset(netdev);
293 	if (err == ICE_DCB_NO_HW_CHG)
294 		err = ICE_DCB_HW_CHG_RST;
295 	mutex_unlock(&pf->tc_mutex);
296 	return err;
297 }
298 
299 /**
300  * ice_dcbnl_get_pfc_cfg - Get CEE PFC config
301  * @netdev: pointer to netdev struct
302  * @prio: corresponding user priority
303  * @setting: the PFC setting for given priority
304  */
305 static void
306 ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
307 {
308 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
309 	struct ice_port_info *pi = pf->hw.port_info;
310 
311 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
312 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
313 		return;
314 
315 	if (prio >= ICE_MAX_USER_PRIORITY)
316 		return;
317 
318 	*setting = (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
319 	dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
320 		prio, *setting, pi->qos_cfg.local_dcbx_cfg.pfc.pfcena);
321 }
322 
323 /**
324  * ice_dcbnl_set_pfc_cfg - Set CEE PFC config
325  * @netdev: the corresponding netdev
326  * @prio: User Priority
327  * @set: PFC setting to apply
328  */
329 static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
330 {
331 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
332 	struct ice_dcbx_cfg *new_cfg;
333 
334 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
335 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
336 		return;
337 
338 	if (prio >= ICE_MAX_USER_PRIORITY)
339 		return;
340 
341 	if (pf->lag && pf->lag->bonded) {
342 		netdev_err(netdev, "DCB changes not allowed when in a bond\n");
343 		return;
344 	}
345 
346 	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
347 
348 	new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
349 	if (set)
350 		new_cfg->pfc.pfcena |= BIT(prio);
351 	else
352 		new_cfg->pfc.pfcena &= ~BIT(prio);
353 
354 	dev_dbg(ice_pf_to_dev(pf), "Set PFC config UP:%d set:%d pfcena:0x%x\n",
355 		prio, set, new_cfg->pfc.pfcena);
356 }
357 
358 /**
359  * ice_dcbnl_getpfcstate - get CEE PFC mode
360  * @netdev: pointer to netdev struct
361  */
362 static u8 ice_dcbnl_getpfcstate(struct net_device *netdev)
363 {
364 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
365 	struct ice_port_info *pi = pf->hw.port_info;
366 
367 	/* Return enabled if any UP enabled for PFC */
368 	if (pi->qos_cfg.local_dcbx_cfg.pfc.pfcena)
369 		return 1;
370 
371 	return 0;
372 }
373 
374 /**
375  * ice_dcbnl_getstate - get DCB enabled state
376  * @netdev: pointer to netdev struct
377  */
378 static u8 ice_dcbnl_getstate(struct net_device *netdev)
379 {
380 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
381 	u8 state = 0;
382 
383 	state = test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
384 
385 	dev_dbg(ice_pf_to_dev(pf), "DCB enabled state = %d\n", state);
386 	return state;
387 }
388 
389 /**
390  * ice_dcbnl_setstate - Set CEE DCB state
391  * @netdev: pointer to relevant netdev
392  * @state: state value to set
393  */
394 static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
395 {
396 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
397 
398 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
399 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
400 		return ICE_DCB_NO_HW_CHG;
401 
402 	if (pf->lag && pf->lag->bonded) {
403 		netdev_err(netdev, "DCB changes not allowed when in a bond\n");
404 		return ICE_DCB_NO_HW_CHG;
405 	}
406 
407 	/* Nothing to do */
408 	if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags))
409 		return ICE_DCB_NO_HW_CHG;
410 
411 	if (state) {
412 		set_bit(ICE_FLAG_DCB_ENA, pf->flags);
413 		memcpy(&pf->hw.port_info->qos_cfg.desired_dcbx_cfg,
414 		       &pf->hw.port_info->qos_cfg.local_dcbx_cfg,
415 		       sizeof(struct ice_dcbx_cfg));
416 	} else {
417 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
418 	}
419 
420 	return ICE_DCB_HW_CHG;
421 }
422 
423 /**
424  * ice_dcbnl_get_pg_tc_cfg_tx - get CEE PG Tx config
425  * @netdev: pointer to netdev struct
426  * @prio: the corresponding user priority
427  * @prio_type: traffic priority type
428  * @pgid: the BW group ID the traffic class belongs to
429  * @bw_pct: BW percentage for the corresponding BWG
430  * @up_map: prio mapped to corresponding TC
431  */
432 static void
433 ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
434 			   u8 __always_unused *prio_type, u8 *pgid,
435 			   u8 __always_unused *bw_pct,
436 			   u8 __always_unused *up_map)
437 {
438 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
439 	struct ice_port_info *pi = pf->hw.port_info;
440 
441 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
442 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
443 		return;
444 
445 	if (prio >= ICE_MAX_USER_PRIORITY)
446 		return;
447 
448 	*pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
449 	dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
450 		*pgid);
451 }
452 
453 /**
454  * ice_dcbnl_set_pg_tc_cfg_tx - set CEE PG Tx config
455  * @netdev: pointer to relevant netdev
456  * @tc: the corresponding traffic class
457  * @prio_type: the traffic priority type
458  * @bwg_id: the BW group ID the TC belongs to
459  * @bw_pct: the BW perventage for the BWG
460  * @up_map: prio mapped to corresponding TC
461  */
462 static void
463 ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
464 			   u8 __always_unused prio_type,
465 			   u8 __always_unused bwg_id,
466 			   u8 __always_unused bw_pct, u8 up_map)
467 {
468 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
469 	struct ice_dcbx_cfg *new_cfg;
470 	int i;
471 
472 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
473 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
474 		return;
475 
476 	if (tc >= ICE_MAX_TRAFFIC_CLASS)
477 		return;
478 
479 	if (pf->lag && pf->lag->bonded) {
480 		netdev_err(netdev, "DCB changes not allowed when in a bond\n");
481 		return;
482 	}
483 
484 	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
485 
486 	/* prio_type, bwg_id and bw_pct per UP are not supported */
487 
488 	ice_for_each_traffic_class(i) {
489 		if (up_map & BIT(i))
490 			new_cfg->etscfg.prio_table[i] = tc;
491 	}
492 	new_cfg->etscfg.tsatable[tc] = ICE_IEEE_TSA_ETS;
493 }
494 
495 /**
496  * ice_dcbnl_get_pg_bwg_cfg_tx - Get CEE PGBW config
497  * @netdev: pointer to the netdev struct
498  * @pgid: corresponding traffic class
499  * @bw_pct: the BW percentage for the corresponding TC
500  */
501 static void
502 ice_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 *bw_pct)
503 {
504 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
505 	struct ice_port_info *pi = pf->hw.port_info;
506 
507 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
508 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
509 		return;
510 
511 	if (pgid >= ICE_MAX_TRAFFIC_CLASS)
512 		return;
513 
514 	*bw_pct = pi->qos_cfg.local_dcbx_cfg.etscfg.tcbwtable[pgid];
515 	dev_dbg(ice_pf_to_dev(pf), "Get PG BW config tc=%d bw_pct=%d\n",
516 		pgid, *bw_pct);
517 }
518 
519 /**
520  * ice_dcbnl_set_pg_bwg_cfg_tx - set CEE PG Tx BW config
521  * @netdev: the corresponding netdev
522  * @pgid: Correspongind traffic class
523  * @bw_pct: the BW percentage for the specified TC
524  */
525 static void
526 ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
527 {
528 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
529 	struct ice_dcbx_cfg *new_cfg;
530 
531 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
532 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
533 		return;
534 
535 	if (pgid >= ICE_MAX_TRAFFIC_CLASS)
536 		return;
537 
538 	if (pf->lag && pf->lag->bonded) {
539 		netdev_err(netdev, "DCB changes not allowed when in a bond\n");
540 		return;
541 	}
542 
543 	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
544 
545 	new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
546 }
547 
548 /**
549  * ice_dcbnl_get_pg_tc_cfg_rx - Get CEE PG Rx config
550  * @netdev: pointer to netdev struct
551  * @prio: the corresponding user priority
552  * @prio_type: the traffic priority type
553  * @pgid: the PG ID
554  * @bw_pct: the BW percentage for the corresponding BWG
555  * @up_map: prio mapped to corresponding TC
556  */
557 static void
558 ice_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int prio,
559 			   u8 __always_unused *prio_type, u8 *pgid,
560 			   u8 __always_unused *bw_pct,
561 			   u8 __always_unused *up_map)
562 {
563 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
564 	struct ice_port_info *pi = pf->hw.port_info;
565 
566 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
567 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
568 		return;
569 
570 	if (prio >= ICE_MAX_USER_PRIORITY)
571 		return;
572 
573 	*pgid = pi->qos_cfg.local_dcbx_cfg.etscfg.prio_table[prio];
574 }
575 
576 /**
577  * ice_dcbnl_set_pg_tc_cfg_rx
578  * @netdev: relevant netdev struct
579  * @prio: corresponding user priority
580  * @prio_type: the traffic priority type
581  * @pgid: the PG ID
582  * @bw_pct: BW percentage for corresponding BWG
583  * @up_map: prio mapped to corresponding TC
584  *
585  * lldpad requires this function pointer to be non-NULL to complete CEE config.
586  */
587 static void
588 ice_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev,
589 			   int __always_unused prio,
590 			   u8 __always_unused prio_type,
591 			   u8 __always_unused pgid,
592 			   u8 __always_unused bw_pct,
593 			   u8 __always_unused up_map)
594 {
595 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
596 
597 	dev_dbg(ice_pf_to_dev(pf), "Rx TC PG Config Not Supported.\n");
598 }
599 
600 /**
601  * ice_dcbnl_get_pg_bwg_cfg_rx - Get CEE PG BW Rx config
602  * @netdev: pointer to netdev struct
603  * @pgid: the corresponding traffic class
604  * @bw_pct: the BW percentage for the corresponding TC
605  */
606 static void
607 ice_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
608 			    u8 *bw_pct)
609 {
610 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
611 
612 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
613 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
614 		return;
615 
616 	*bw_pct = 0;
617 }
618 
619 /**
620  * ice_dcbnl_set_pg_bwg_cfg_rx
621  * @netdev: the corresponding netdev
622  * @pgid: corresponding TC
623  * @bw_pct: BW percentage for given TC
624  *
625  * lldpad requires this function pointer to be non-NULL to complete CEE config.
626  */
627 static void
628 ice_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int __always_unused pgid,
629 			    u8 __always_unused bw_pct)
630 {
631 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
632 
633 	dev_dbg(ice_pf_to_dev(pf), "Rx BWG PG Config Not Supported.\n");
634 }
635 
636 /**
637  * ice_dcbnl_get_cap - Get DCBX capabilities of adapter
638  * @netdev: pointer to netdev struct
639  * @capid: the capability type
640  * @cap: the capability value
641  */
642 static u8 ice_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
643 {
644 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
645 
646 	if (!(test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)))
647 		return ICE_DCB_NO_HW_CHG;
648 
649 	switch (capid) {
650 	case DCB_CAP_ATTR_PG:
651 		*cap = true;
652 		break;
653 	case DCB_CAP_ATTR_PFC:
654 		*cap = true;
655 		break;
656 	case DCB_CAP_ATTR_UP2TC:
657 		*cap = false;
658 		break;
659 	case DCB_CAP_ATTR_PG_TCS:
660 		*cap = 0x80;
661 		break;
662 	case DCB_CAP_ATTR_PFC_TCS:
663 		*cap = 0x80;
664 		break;
665 	case DCB_CAP_ATTR_GSP:
666 		*cap = false;
667 		break;
668 	case DCB_CAP_ATTR_BCN:
669 		*cap = false;
670 		break;
671 	case DCB_CAP_ATTR_DCBX:
672 		*cap = pf->dcbx_cap;
673 		break;
674 	default:
675 		*cap = false;
676 		break;
677 	}
678 
679 	dev_dbg(ice_pf_to_dev(pf), "DCBX Get Capability cap=%d capval=0x%x\n",
680 		capid, *cap);
681 	return 0;
682 }
683 
684 /**
685  * ice_dcbnl_getapp - get CEE APP
686  * @netdev: pointer to netdev struct
687  * @idtype: the App selector
688  * @id: the App ethtype or port number
689  */
690 static int ice_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
691 {
692 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
693 	struct dcb_app app = {
694 				.selector = idtype,
695 				.protocol = id,
696 			     };
697 
698 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
699 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
700 		return -EINVAL;
701 
702 	return dcb_getapp(netdev, &app);
703 }
704 
705 /**
706  * ice_dcbnl_find_app - Search for APP in given DCB config
707  * @cfg: struct to hold DCBX config
708  * @app: struct to hold app data to look for
709  */
710 static bool
711 ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
712 		   struct ice_dcb_app_priority_table *app)
713 {
714 	unsigned int i;
715 
716 	for (i = 0; i < cfg->numapps; i++) {
717 		if (app->selector == cfg->app[i].selector &&
718 		    app->prot_id == cfg->app[i].prot_id &&
719 		    app->priority == cfg->app[i].priority)
720 			return true;
721 	}
722 
723 	return false;
724 }
725 
726 #define ICE_BYTES_PER_DSCP_VAL		8
727 
728 /**
729  * ice_dcbnl_setapp - set local IEEE App config
730  * @netdev: relevant netdev struct
731  * @app: struct to hold app config info
732  */
733 static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
734 {
735 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
736 	struct ice_dcb_app_priority_table new_app;
737 	struct ice_dcbx_cfg *old_cfg, *new_cfg;
738 	u8 max_tc;
739 	int ret;
740 
741 	/* ONLY DSCP APP TLVs have operational significance */
742 	if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
743 		return -EINVAL;
744 
745 	/* only allow APP TLVs in SW Mode */
746 	if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
747 		netdev_err(netdev, "can't do DSCP QoS when FW DCB agent active\n");
748 		return -EINVAL;
749 	}
750 
751 	if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
752 		return -EINVAL;
753 
754 	if (!ice_is_feature_supported(pf, ICE_F_DSCP))
755 		return -EOPNOTSUPP;
756 
757 	if (app->protocol >= ICE_DSCP_NUM_VAL) {
758 		netdev_err(netdev, "DSCP value 0x%04X out of range\n",
759 			   app->protocol);
760 		return -EINVAL;
761 	}
762 
763 	if (pf->lag && pf->lag->bonded) {
764 		netdev_err(netdev, "DCB changes not allowed when in a bond\n");
765 		return -EINVAL;
766 	}
767 
768 	max_tc = pf->hw.func_caps.common_cap.maxtc;
769 	if (app->priority >= max_tc) {
770 		netdev_err(netdev, "TC %d out of range, max TC %d\n",
771 			   app->priority, max_tc);
772 		return -EINVAL;
773 	}
774 
775 	/* grab TC mutex */
776 	mutex_lock(&pf->tc_mutex);
777 
778 	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
779 	old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
780 
781 	ret = dcb_ieee_setapp(netdev, app);
782 	if (ret)
783 		goto setapp_out;
784 
785 	if (test_and_set_bit(app->protocol, new_cfg->dscp_mapped)) {
786 		netdev_err(netdev, "DSCP value 0x%04X already user mapped\n",
787 			   app->protocol);
788 		ret = dcb_ieee_delapp(netdev, app);
789 		if (ret)
790 			netdev_err(netdev, "Failed to delete re-mapping TLV\n");
791 		ret = -EINVAL;
792 		goto setapp_out;
793 	}
794 
795 	new_app.selector = app->selector;
796 	new_app.prot_id = app->protocol;
797 	new_app.priority = app->priority;
798 
799 	/* If port is not in DSCP mode, need to set */
800 	if (old_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
801 		int i, j;
802 
803 		/* set DSCP mode */
804 		ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_DSCP_BASED_PFC,
805 					  NULL);
806 		if (ret) {
807 			netdev_err(netdev, "Failed to set DSCP PFC mode %d\n",
808 				   ret);
809 			goto setapp_out;
810 		}
811 		netdev_info(netdev, "Switched QoS to L3 DSCP mode\n");
812 
813 		new_cfg->pfc_mode = ICE_QOS_MODE_DSCP;
814 
815 		/* set default DSCP QoS values */
816 		new_cfg->etscfg.willing = 0;
817 		new_cfg->pfc.pfccap = max_tc;
818 		new_cfg->pfc.willing = 0;
819 
820 		for (i = 0; i < max_tc; i++)
821 			for (j = 0; j < ICE_BYTES_PER_DSCP_VAL; j++) {
822 				int dscp, offset;
823 
824 				dscp = (i * max_tc) + j;
825 				offset = max_tc * ICE_BYTES_PER_DSCP_VAL;
826 
827 				new_cfg->dscp_map[dscp] = i;
828 				/* if less that 8 TCs supported */
829 				if (max_tc < ICE_MAX_TRAFFIC_CLASS)
830 					new_cfg->dscp_map[dscp + offset] = i;
831 			}
832 
833 		new_cfg->etscfg.tcbwtable[0] = 100;
834 		new_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
835 		new_cfg->etscfg.prio_table[0] = 0;
836 
837 		for (i = 1; i < max_tc; i++) {
838 			new_cfg->etscfg.tcbwtable[i] = 0;
839 			new_cfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
840 			new_cfg->etscfg.prio_table[i] = i;
841 		}
842 	} /* end of switching to DSCP mode */
843 
844 	/* apply new mapping for this DSCP value */
845 	new_cfg->dscp_map[app->protocol] = app->priority;
846 	new_cfg->app[new_cfg->numapps++] = new_app;
847 
848 	ret = ice_pf_dcb_cfg(pf, new_cfg, true);
849 	/* return of zero indicates new cfg applied */
850 	if (ret == ICE_DCB_HW_CHG_RST)
851 		ice_dcbnl_devreset(netdev);
852 	else
853 		ret = ICE_DCB_NO_HW_CHG;
854 
855 setapp_out:
856 	mutex_unlock(&pf->tc_mutex);
857 	return ret;
858 }
859 
860 /**
861  * ice_dcbnl_delapp - Delete local IEEE App config
862  * @netdev: relevant netdev
863  * @app: struct to hold app too delete
864  *
865  * Will not delete first application required by the FW
866  */
867 static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
868 {
869 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
870 	struct ice_dcbx_cfg *old_cfg, *new_cfg;
871 	unsigned int i, j;
872 	int ret = 0;
873 
874 	if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
875 		netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n");
876 		return -EINVAL;
877 	}
878 
879 	if (pf->lag && pf->lag->bonded) {
880 		netdev_err(netdev, "DCB changes not allowed when in a bond\n");
881 		return -EINVAL;
882 	}
883 
884 	mutex_lock(&pf->tc_mutex);
885 	old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
886 
887 	ret = dcb_ieee_delapp(netdev, app);
888 	if (ret)
889 		goto delapp_out;
890 
891 	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
892 
893 	for (i = 0; i < new_cfg->numapps; i++) {
894 		if (app->selector == new_cfg->app[i].selector &&
895 		    app->protocol == new_cfg->app[i].prot_id &&
896 		    app->priority == new_cfg->app[i].priority) {
897 			new_cfg->app[i].selector = 0;
898 			new_cfg->app[i].prot_id = 0;
899 			new_cfg->app[i].priority = 0;
900 			break;
901 		}
902 	}
903 
904 	/* Did not find DCB App */
905 	if (i == new_cfg->numapps) {
906 		ret = -EINVAL;
907 		goto delapp_out;
908 	}
909 
910 	new_cfg->numapps--;
911 
912 	for (j = i; j < new_cfg->numapps; j++) {
913 		new_cfg->app[j].selector = old_cfg->app[j + 1].selector;
914 		new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id;
915 		new_cfg->app[j].priority = old_cfg->app[j + 1].priority;
916 	}
917 
918 	/* if not a DSCP APP TLV or DSCP is not supported, we are done */
919 	if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
920 	    !ice_is_feature_supported(pf, ICE_F_DSCP)) {
921 		ret = ICE_DCB_HW_CHG;
922 		goto delapp_out;
923 	}
924 
925 	/* if DSCP TLV, then need to address change in mapping */
926 	clear_bit(app->protocol, new_cfg->dscp_mapped);
927 	/* remap this DSCP value to default value */
928 	new_cfg->dscp_map[app->protocol] = app->protocol %
929 					   ICE_BYTES_PER_DSCP_VAL;
930 
931 	/* if the last DSCP mapping just got deleted, need to switch
932 	 * to L2 VLAN QoS mode
933 	 */
934 	if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
935 	    new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
936 		ret = ice_aq_set_pfc_mode(&pf->hw,
937 					  ICE_AQC_PFC_VLAN_BASED_PFC,
938 					  NULL);
939 		if (ret) {
940 			netdev_info(netdev, "Failed to set VLAN PFC mode %d\n",
941 				    ret);
942 			goto delapp_out;
943 		}
944 		netdev_info(netdev, "Switched QoS to L2 VLAN mode\n");
945 
946 		new_cfg->pfc_mode = ICE_QOS_MODE_VLAN;
947 
948 		ret = ice_dcb_sw_dflt_cfg(pf, true, true);
949 	} else {
950 		ret = ice_pf_dcb_cfg(pf, new_cfg, true);
951 	}
952 
953 	/* return of ICE_DCB_HW_CHG_RST indicates new cfg applied
954 	 * and reset needs to be performed
955 	 */
956 	if (ret == ICE_DCB_HW_CHG_RST)
957 		ice_dcbnl_devreset(netdev);
958 
959 	/* if the change was not siginificant enough to actually call
960 	 * the reconfiguration flow, we still need to tell caller that
961 	 * their request was successfully handled
962 	 */
963 	if (ret == ICE_DCB_NO_HW_CHG)
964 		ret = ICE_DCB_HW_CHG;
965 
966 delapp_out:
967 	mutex_unlock(&pf->tc_mutex);
968 	return ret;
969 }
970 
971 /**
972  * ice_dcbnl_cee_set_all - Commit CEE DCB settings to HW
973  * @netdev: the corresponding netdev
974  */
975 static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
976 {
977 	struct ice_pf *pf = ice_netdev_to_pf(netdev);
978 	struct ice_dcbx_cfg *new_cfg;
979 	int err;
980 
981 	if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
982 	    !(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
983 		return ICE_DCB_NO_HW_CHG;
984 
985 	if (pf->lag && pf->lag->bonded) {
986 		netdev_err(netdev, "DCB changes not allowed when in a bond\n");
987 		return ICE_DCB_NO_HW_CHG;
988 	}
989 
990 	new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
991 
992 	mutex_lock(&pf->tc_mutex);
993 
994 	err = ice_pf_dcb_cfg(pf, new_cfg, true);
995 
996 	mutex_unlock(&pf->tc_mutex);
997 	return (err != ICE_DCB_HW_CHG_RST) ? ICE_DCB_NO_HW_CHG : err;
998 }
999 
1000 static const struct dcbnl_rtnl_ops dcbnl_ops = {
1001 	/* IEEE 802.1Qaz std */
1002 	.ieee_getets = ice_dcbnl_getets,
1003 	.ieee_setets = ice_dcbnl_setets,
1004 	.ieee_getpfc = ice_dcbnl_getpfc,
1005 	.ieee_setpfc = ice_dcbnl_setpfc,
1006 	.ieee_setapp = ice_dcbnl_setapp,
1007 	.ieee_delapp = ice_dcbnl_delapp,
1008 
1009 	/* CEE std */
1010 	.getstate = ice_dcbnl_getstate,
1011 	.setstate = ice_dcbnl_setstate,
1012 	.getpermhwaddr = ice_dcbnl_get_perm_hw_addr,
1013 	.setpgtccfgtx = ice_dcbnl_set_pg_tc_cfg_tx,
1014 	.setpgbwgcfgtx = ice_dcbnl_set_pg_bwg_cfg_tx,
1015 	.setpgtccfgrx = ice_dcbnl_set_pg_tc_cfg_rx,
1016 	.setpgbwgcfgrx = ice_dcbnl_set_pg_bwg_cfg_rx,
1017 	.getpgtccfgtx = ice_dcbnl_get_pg_tc_cfg_tx,
1018 	.getpgbwgcfgtx = ice_dcbnl_get_pg_bwg_cfg_tx,
1019 	.getpgtccfgrx = ice_dcbnl_get_pg_tc_cfg_rx,
1020 	.getpgbwgcfgrx = ice_dcbnl_get_pg_bwg_cfg_rx,
1021 	.setpfccfg = ice_dcbnl_set_pfc_cfg,
1022 	.getpfccfg = ice_dcbnl_get_pfc_cfg,
1023 	.setall = ice_dcbnl_cee_set_all,
1024 	.getcap = ice_dcbnl_get_cap,
1025 	.getnumtcs = ice_dcbnl_getnumtcs,
1026 	.getpfcstate = ice_dcbnl_getpfcstate,
1027 	.getapp = ice_dcbnl_getapp,
1028 
1029 	/* DCBX configuration */
1030 	.getdcbx = ice_dcbnl_getdcbx,
1031 	.setdcbx = ice_dcbnl_setdcbx,
1032 };
1033 
1034 /**
1035  * ice_dcbnl_set_all - set all the apps and ieee data from DCBX config
1036  * @vsi: pointer to VSI struct
1037  */
1038 void ice_dcbnl_set_all(struct ice_vsi *vsi)
1039 {
1040 	struct net_device *netdev = vsi->netdev;
1041 	struct ice_dcbx_cfg *dcbxcfg;
1042 	struct ice_port_info *pi;
1043 	struct dcb_app sapp;
1044 	struct ice_pf *pf;
1045 	unsigned int i;
1046 
1047 	if (!netdev)
1048 		return;
1049 
1050 	pf = ice_netdev_to_pf(netdev);
1051 	pi = pf->hw.port_info;
1052 
1053 	/* SW DCB taken care of by SW Default Config */
1054 	if (pf->dcbx_cap & DCB_CAP_DCBX_HOST)
1055 		return;
1056 
1057 	/* DCB not enabled */
1058 	if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
1059 		return;
1060 
1061 	dcbxcfg = &pi->qos_cfg.local_dcbx_cfg;
1062 
1063 	for (i = 0; i < dcbxcfg->numapps; i++) {
1064 		u8 prio, tc_map;
1065 
1066 		prio = dcbxcfg->app[i].priority;
1067 		tc_map = BIT(dcbxcfg->etscfg.prio_table[prio]);
1068 
1069 		/* Add APP only if the TC is enabled for this VSI */
1070 		if (tc_map & vsi->tc_cfg.ena_tc) {
1071 			sapp.selector = dcbxcfg->app[i].selector;
1072 			sapp.protocol = dcbxcfg->app[i].prot_id;
1073 			sapp.priority = prio;
1074 			dcb_ieee_setapp(netdev, &sapp);
1075 		}
1076 	}
1077 	/* Notify user-space of the changes */
1078 	dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, 0, 0);
1079 }
1080 
1081 /**
1082  * ice_dcbnl_vsi_del_app - Delete APP on all VSIs
1083  * @vsi: pointer to the main VSI
1084  * @app: APP to delete
1085  *
1086  * Delete given APP from all the VSIs for given PF
1087  */
1088 static void
1089 ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
1090 		      struct ice_dcb_app_priority_table *app)
1091 {
1092 	struct dcb_app sapp;
1093 	int err;
1094 
1095 	sapp.selector = app->selector;
1096 	sapp.protocol = app->prot_id;
1097 	sapp.priority = app->priority;
1098 	err = ice_dcbnl_delapp(vsi->netdev, &sapp);
1099 	dev_dbg(ice_pf_to_dev(vsi->back), "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
1100 		vsi->idx, err, app->selector, app->prot_id, app->priority);
1101 }
1102 
1103 /**
1104  * ice_dcbnl_flush_apps - Delete all removed APPs
1105  * @pf: the corresponding PF
1106  * @old_cfg: old DCBX configuration data
1107  * @new_cfg: new DCBX configuration data
1108  *
1109  * Find and delete all APPS that are not present in the passed
1110  * DCB configuration
1111  */
1112 void
1113 ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
1114 		     struct ice_dcbx_cfg *new_cfg)
1115 {
1116 	struct ice_vsi *main_vsi = ice_get_main_vsi(pf);
1117 	unsigned int i;
1118 
1119 	if (!main_vsi)
1120 		return;
1121 
1122 	for (i = 0; i < old_cfg->numapps; i++) {
1123 		struct ice_dcb_app_priority_table app = old_cfg->app[i];
1124 
1125 		/* The APP is not available anymore delete it */
1126 		if (!ice_dcbnl_find_app(new_cfg, &app))
1127 			ice_dcbnl_vsi_del_app(main_vsi, &app);
1128 	}
1129 }
1130 
1131 /**
1132  * ice_dcbnl_setup - setup DCBNL
1133  * @vsi: VSI to get associated netdev from
1134  */
1135 void ice_dcbnl_setup(struct ice_vsi *vsi)
1136 {
1137 	struct net_device *netdev = vsi->netdev;
1138 	struct ice_pf *pf;
1139 
1140 	pf = ice_netdev_to_pf(netdev);
1141 	if (!test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
1142 		return;
1143 
1144 	netdev->dcbnl_ops = &dcbnl_ops;
1145 	ice_dcbnl_set_all(vsi);
1146 }
1147