1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019, Intel Corporation. */
3 
4 #include "ice_dcb_lib.h"
5 
6 /**
7  * ice_dcb_get_ena_tc - return bitmap of enabled TCs
8  * @dcbcfg: DCB config to evaluate for enabled TCs
9  */
10 u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
11 {
12 	u8 i, num_tc, ena_tc = 1;
13 
14 	num_tc = ice_dcb_get_num_tc(dcbcfg);
15 
16 	for (i = 0; i < num_tc; i++)
17 		ena_tc |= BIT(i);
18 
19 	return ena_tc;
20 }
21 
22 /**
23  * ice_dcb_get_num_tc - Get the number of TCs from DCBX config
24  * @dcbcfg: config to retrieve number of TCs from
25  */
26 u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
27 {
28 	bool tc_unused = false;
29 	u8 num_tc = 0;
30 	u8 ret = 0;
31 	int i;
32 
33 	/* Scan the ETS Config Priority Table to find traffic classes
34 	 * enabled and create a bitmask of enabled TCs
35 	 */
36 	for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
37 		num_tc |= BIT(dcbcfg->etscfg.prio_table[i]);
38 
39 	/* Scan bitmask for contiguous TCs starting with TC0 */
40 	for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
41 		if (num_tc & BIT(i)) {
42 			if (!tc_unused) {
43 				ret++;
44 			} else {
45 				pr_err("Non-contiguous TCs - Disabling DCB\n");
46 				return 1;
47 			}
48 		} else {
49 			tc_unused = true;
50 		}
51 	}
52 
53 	/* There is always at least 1 TC */
54 	if (!ret)
55 		ret = 1;
56 
57 	return ret;
58 }
59 
60 /**
61  * ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC
62  * @vsi: VSI owner of rings being updated
63  */
64 void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
65 {
66 	struct ice_ring *tx_ring, *rx_ring;
67 	u16 qoffset, qcount;
68 	int i, n;
69 
70 	if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
71 		/* Reset the TC information */
72 		for (i = 0; i < vsi->num_txq; i++) {
73 			tx_ring = vsi->tx_rings[i];
74 			tx_ring->dcb_tc = 0;
75 		}
76 		for (i = 0; i < vsi->num_rxq; i++) {
77 			rx_ring = vsi->rx_rings[i];
78 			rx_ring->dcb_tc = 0;
79 		}
80 		return;
81 	}
82 
83 	ice_for_each_traffic_class(n) {
84 		if (!(vsi->tc_cfg.ena_tc & BIT(n)))
85 			break;
86 
87 		qoffset = vsi->tc_cfg.tc_info[n].qoffset;
88 		qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
89 		for (i = qoffset; i < (qoffset + qcount); i++) {
90 			tx_ring = vsi->tx_rings[i];
91 			rx_ring = vsi->rx_rings[i];
92 			tx_ring->dcb_tc = n;
93 			rx_ring->dcb_tc = n;
94 		}
95 	}
96 }
97 
98 /**
99  * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
100  * @pf: pointer to the PF struct
101  *
102  * Assumed caller has already disabled all VSIs before
103  * calling this function. Reconfiguring DCB based on
104  * local_dcbx_cfg.
105  */
106 static void ice_pf_dcb_recfg(struct ice_pf *pf)
107 {
108 	struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
109 	u8 tc_map = 0;
110 	int v, ret;
111 
112 	/* Update each VSI */
113 	ice_for_each_vsi(pf, v) {
114 		if (!pf->vsi[v])
115 			continue;
116 
117 		if (pf->vsi[v]->type == ICE_VSI_PF)
118 			tc_map = ice_dcb_get_ena_tc(dcbcfg);
119 		else
120 			tc_map = ICE_DFLT_TRAFFIC_CLASS;
121 
122 		ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
123 		if (ret)
124 			dev_err(&pf->pdev->dev,
125 				"Failed to config TC for VSI index: %d\n",
126 				pf->vsi[v]->idx);
127 		else
128 			ice_vsi_map_rings_to_vectors(pf->vsi[v]);
129 	}
130 }
131 
132 /**
133  * ice_pf_dcb_cfg - Apply new DCB configuration
134  * @pf: pointer to the PF struct
135  * @new_cfg: DCBX config to apply
136  */
137 static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
138 {
139 	struct ice_dcbx_cfg *old_cfg, *curr_cfg;
140 	struct ice_aqc_port_ets_elem buf = { 0 };
141 	int ret = 0;
142 
143 	curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
144 
145 	/* Enable DCB tagging only when more than one TC */
146 	if (ice_dcb_get_num_tc(new_cfg) > 1) {
147 		dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
148 		set_bit(ICE_FLAG_DCB_ENA, pf->flags);
149 	} else {
150 		dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
151 		clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
152 	}
153 
154 	if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
155 		dev_dbg(&pf->pdev->dev, "No change in DCB config required\n");
156 		return ret;
157 	}
158 
159 	/* Store old config in case FW config fails */
160 	old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
161 	memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
162 
163 	/* avoid race conditions by holding the lock while disabling and
164 	 * re-enabling the VSI
165 	 */
166 	rtnl_lock();
167 	ice_pf_dis_all_vsi(pf, true);
168 
169 	memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
170 	memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
171 
172 	/* Only send new config to HW if we are in SW LLDP mode. Otherwise,
173 	 * the new config came from the HW in the first place.
174 	 */
175 	if (pf->hw.port_info->is_sw_lldp) {
176 		ret = ice_set_dcb_cfg(pf->hw.port_info);
177 		if (ret) {
178 			dev_err(&pf->pdev->dev, "Set DCB Config failed\n");
179 			/* Restore previous settings to local config */
180 			memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
181 			goto out;
182 		}
183 	}
184 
185 	ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
186 	if (ret) {
187 		dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
188 		goto out;
189 	}
190 
191 	ice_pf_dcb_recfg(pf);
192 
193 out:
194 	ice_pf_ena_all_vsi(pf, true);
195 	rtnl_unlock();
196 	devm_kfree(&pf->pdev->dev, old_cfg);
197 	return ret;
198 }
199 
200 /**
201  * ice_dcb_rebuild - rebuild DCB post reset
202  * @pf: physical function instance
203  */
204 void ice_dcb_rebuild(struct ice_pf *pf)
205 {
206 	struct ice_aqc_port_ets_elem buf = { 0 };
207 	struct ice_dcbx_cfg *prev_cfg;
208 	enum ice_status ret;
209 	u8 willing;
210 
211 	ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
212 	if (ret) {
213 		dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
214 		goto dcb_error;
215 	}
216 
217 	/* If DCB was not enabled previously, we are done */
218 	if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
219 		return;
220 
221 	/* Save current willing state and force FW to unwilling */
222 	willing = pf->hw.port_info->local_dcbx_cfg.etscfg.willing;
223 	pf->hw.port_info->local_dcbx_cfg.etscfg.willing = 0x0;
224 	ret = ice_set_dcb_cfg(pf->hw.port_info);
225 	if (ret) {
226 		dev_err(&pf->pdev->dev, "Failed to set DCB to unwilling\n");
227 		goto dcb_error;
228 	}
229 
230 	/* Retrieve DCB config and ensure same as current in SW */
231 	prev_cfg = devm_kmemdup(&pf->pdev->dev,
232 				&pf->hw.port_info->local_dcbx_cfg,
233 				sizeof(*prev_cfg), GFP_KERNEL);
234 	if (!prev_cfg) {
235 		dev_err(&pf->pdev->dev, "Failed to alloc space for DCB cfg\n");
236 		goto dcb_error;
237 	}
238 
239 	ice_init_dcb(&pf->hw);
240 	if (memcmp(prev_cfg, &pf->hw.port_info->local_dcbx_cfg,
241 		   sizeof(*prev_cfg))) {
242 		/* difference in cfg detected - disable DCB till next MIB */
243 		dev_err(&pf->pdev->dev, "Set local MIB not accurate\n");
244 		devm_kfree(&pf->pdev->dev, prev_cfg);
245 		goto dcb_error;
246 	}
247 
248 	/* fetched config congruent to previous configuration */
249 	devm_kfree(&pf->pdev->dev, prev_cfg);
250 
251 	/* Configuration replayed - reset willing state to previous */
252 	pf->hw.port_info->local_dcbx_cfg.etscfg.willing = willing;
253 	ret = ice_set_dcb_cfg(pf->hw.port_info);
254 	if (ret) {
255 		dev_err(&pf->pdev->dev, "Fail restoring prev willing state\n");
256 		goto dcb_error;
257 	}
258 	dev_info(&pf->pdev->dev, "DCB restored after reset\n");
259 	ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
260 	if (ret) {
261 		dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
262 		goto dcb_error;
263 	}
264 
265 	return;
266 
267 dcb_error:
268 	dev_err(&pf->pdev->dev, "Disabling DCB until new settings occur\n");
269 	prev_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*prev_cfg), GFP_KERNEL);
270 	prev_cfg->etscfg.willing = true;
271 	prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
272 	prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
273 	memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
274 	ice_pf_dcb_cfg(pf, prev_cfg);
275 	devm_kfree(&pf->pdev->dev, prev_cfg);
276 }
277 
278 /**
279  * ice_dcb_init_cfg - set the initial DCB config in SW
280  * @pf: pf to apply config to
281  */
282 static int ice_dcb_init_cfg(struct ice_pf *pf)
283 {
284 	struct ice_dcbx_cfg *newcfg;
285 	struct ice_port_info *pi;
286 	int ret = 0;
287 
288 	pi = pf->hw.port_info;
289 	newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL);
290 	if (!newcfg)
291 		return -ENOMEM;
292 
293 	memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg));
294 	memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
295 
296 	dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
297 	if (ice_pf_dcb_cfg(pf, newcfg))
298 		ret = -EINVAL;
299 
300 	devm_kfree(&pf->pdev->dev, newcfg);
301 
302 	return ret;
303 }
304 
305 /**
306  * ice_dcb_sw_default_config - Apply a default DCB config
307  * @pf: pf to apply config to
308  */
309 static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf)
310 {
311 	struct ice_aqc_port_ets_elem buf = { 0 };
312 	struct ice_dcbx_cfg *dcbcfg;
313 	struct ice_port_info *pi;
314 	struct ice_hw *hw;
315 	int ret;
316 
317 	hw = &pf->hw;
318 	pi = hw->port_info;
319 	dcbcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*dcbcfg), GFP_KERNEL);
320 
321 	memset(dcbcfg, 0, sizeof(*dcbcfg));
322 	memset(&pi->local_dcbx_cfg, 0, sizeof(*dcbcfg));
323 
324 	dcbcfg->etscfg.willing = 1;
325 	dcbcfg->etscfg.maxtcs = 8;
326 	dcbcfg->etscfg.tcbwtable[0] = 100;
327 	dcbcfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
328 
329 	memcpy(&dcbcfg->etsrec, &dcbcfg->etscfg,
330 	       sizeof(dcbcfg->etsrec));
331 	dcbcfg->etsrec.willing = 0;
332 
333 	dcbcfg->pfc.willing = 1;
334 	dcbcfg->pfc.pfccap = IEEE_8021QAZ_MAX_TCS;
335 
336 	dcbcfg->numapps = 1;
337 	dcbcfg->app[0].selector = ICE_APP_SEL_ETHTYPE;
338 	dcbcfg->app[0].priority = 3;
339 	dcbcfg->app[0].prot_id = ICE_APP_PROT_ID_FCOE;
340 
341 	ret = ice_pf_dcb_cfg(pf, dcbcfg);
342 	devm_kfree(&pf->pdev->dev, dcbcfg);
343 	if (ret)
344 		return ret;
345 
346 	return ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
347 }
348 
349 /**
350  * ice_init_pf_dcb - initialize DCB for a PF
351  * @pf: pf to initiialize DCB for
352  */
353 int ice_init_pf_dcb(struct ice_pf *pf)
354 {
355 	struct device *dev = &pf->pdev->dev;
356 	struct ice_port_info *port_info;
357 	struct ice_hw *hw = &pf->hw;
358 	int sw_default = 0;
359 	int err;
360 
361 	port_info = hw->port_info;
362 
363 	/* check if device is DCB capable */
364 	if (!hw->func_caps.common_cap.dcb) {
365 		dev_dbg(dev, "DCB not supported\n");
366 		return -EOPNOTSUPP;
367 	}
368 
369 	/* Best effort to put DCBx and LLDP into a good state */
370 	port_info->dcbx_status = ice_get_dcbx_status(hw);
371 	if (port_info->dcbx_status != ICE_DCBX_STATUS_DONE &&
372 	    port_info->dcbx_status != ICE_DCBX_STATUS_IN_PROGRESS) {
373 		bool dcbx_status;
374 
375 		/* Attempt to start LLDP engine. Ignore errors
376 		 * as this will error if it is already started
377 		 */
378 		ice_aq_start_lldp(hw, NULL);
379 
380 		/* Attempt to start DCBX. Ignore errors as this
381 		 * will error if it is already started
382 		 */
383 		ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL);
384 	}
385 
386 	err = ice_init_dcb(hw);
387 	if (err) {
388 		/* FW LLDP not in usable state, default to SW DCBx/LLDP */
389 		dev_info(&pf->pdev->dev, "FW LLDP not in usable state\n");
390 		hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
391 		hw->port_info->is_sw_lldp = true;
392 	}
393 
394 	if (port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
395 		dev_info(&pf->pdev->dev, "DCBX disabled\n");
396 
397 	/* LLDP disabled in FW */
398 	if (port_info->is_sw_lldp) {
399 		sw_default = 1;
400 		dev_info(&pf->pdev->dev, "DCBx/LLDP in SW mode.\n");
401 	}
402 
403 	if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) {
404 		sw_default = 1;
405 		dev_info(&pf->pdev->dev, "DCBX not started\n");
406 	}
407 
408 	if (sw_default) {
409 		err = ice_dcb_sw_dflt_cfg(pf);
410 		if (err) {
411 			dev_err(&pf->pdev->dev,
412 				"Failed to set local DCB config %d\n", err);
413 			err = -EIO;
414 			goto dcb_init_err;
415 		}
416 
417 		pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
418 		set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
419 		set_bit(ICE_FLAG_DCB_ENA, pf->flags);
420 		return 0;
421 	}
422 
423 	/* DCBX in FW and LLDP enabled in FW */
424 	pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
425 
426 	set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
427 
428 	err = ice_dcb_init_cfg(pf);
429 	if (err)
430 		goto dcb_init_err;
431 
432 	dev_info(&pf->pdev->dev, "DCBX offload supported\n");
433 	return err;
434 
435 dcb_init_err:
436 	dev_err(dev, "DCB init failed\n");
437 	return err;
438 }
439 
440 /**
441  * ice_update_dcb_stats - Update DCB stats counters
442  * @pf: PF whose stats needs to be updated
443  */
444 void ice_update_dcb_stats(struct ice_pf *pf)
445 {
446 	struct ice_hw_port_stats *prev_ps, *cur_ps;
447 	struct ice_hw *hw = &pf->hw;
448 	u8 pf_id = hw->pf_id;
449 	int i;
450 
451 	prev_ps = &pf->stats_prev;
452 	cur_ps = &pf->stats;
453 
454 	for (i = 0; i < 8; i++) {
455 		ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i),
456 				  pf->stat_prev_loaded,
457 				  &prev_ps->priority_xoff_rx[i],
458 				  &cur_ps->priority_xoff_rx[i]);
459 		ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i),
460 				  pf->stat_prev_loaded,
461 				  &prev_ps->priority_xon_rx[i],
462 				  &cur_ps->priority_xon_rx[i]);
463 		ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i),
464 				  pf->stat_prev_loaded,
465 				  &prev_ps->priority_xon_tx[i],
466 				  &cur_ps->priority_xon_tx[i]);
467 		ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i),
468 				  pf->stat_prev_loaded,
469 				  &prev_ps->priority_xoff_tx[i],
470 				  &cur_ps->priority_xoff_tx[i]);
471 		ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i),
472 				  pf->stat_prev_loaded,
473 				  &prev_ps->priority_xon_2_xoff[i],
474 				  &cur_ps->priority_xon_2_xoff[i]);
475 	}
476 }
477 
478 /**
479  * ice_tx_prepare_vlan_flags_dcb - prepare VLAN tagging for DCB
480  * @tx_ring: ring to send buffer on
481  * @first: pointer to struct ice_tx_buf
482  */
483 int
484 ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
485 			      struct ice_tx_buf *first)
486 {
487 	struct sk_buff *skb = first->skb;
488 
489 	if (!test_bit(ICE_FLAG_DCB_ENA, tx_ring->vsi->back->flags))
490 		return 0;
491 
492 	/* Insert 802.1p priority into VLAN header */
493 	if ((first->tx_flags & (ICE_TX_FLAGS_HW_VLAN | ICE_TX_FLAGS_SW_VLAN)) ||
494 	    skb->priority != TC_PRIO_CONTROL) {
495 		first->tx_flags &= ~ICE_TX_FLAGS_VLAN_PR_M;
496 		/* Mask the lower 3 bits to set the 802.1p priority */
497 		first->tx_flags |= (skb->priority & 0x7) <<
498 				   ICE_TX_FLAGS_VLAN_PR_S;
499 		if (first->tx_flags & ICE_TX_FLAGS_SW_VLAN) {
500 			struct vlan_ethhdr *vhdr;
501 			int rc;
502 
503 			rc = skb_cow_head(skb, 0);
504 			if (rc < 0)
505 				return rc;
506 			vhdr = (struct vlan_ethhdr *)skb->data;
507 			vhdr->h_vlan_TCI = htons(first->tx_flags >>
508 						 ICE_TX_FLAGS_VLAN_S);
509 		} else {
510 			first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
511 		}
512 	}
513 
514 	return 0;
515 }
516 
517 /**
518  * ice_dcb_process_lldp_set_mib_change - Process MIB change
519  * @pf: ptr to ice_pf
520  * @event: pointer to the admin queue receive event
521  */
522 void
523 ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
524 				    struct ice_rq_event_info *event)
525 {
526 	if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
527 		struct ice_dcbx_cfg *dcbcfg, *prev_cfg;
528 		int err;
529 
530 		prev_cfg = &pf->hw.port_info->local_dcbx_cfg;
531 		dcbcfg = devm_kmemdup(&pf->pdev->dev, prev_cfg,
532 				      sizeof(*dcbcfg), GFP_KERNEL);
533 		if (!dcbcfg)
534 			return;
535 
536 		err = ice_lldp_to_dcb_cfg(event->msg_buf, dcbcfg);
537 		if (!err)
538 			ice_pf_dcb_cfg(pf, dcbcfg);
539 
540 		devm_kfree(&pf->pdev->dev, dcbcfg);
541 
542 		/* Get updated DCBx data from firmware */
543 		err = ice_get_dcb_cfg(pf->hw.port_info);
544 		if (err)
545 			dev_err(&pf->pdev->dev,
546 				"Failed to get DCB config\n");
547 	} else {
548 		dev_dbg(&pf->pdev->dev,
549 			"MIB Change Event in HOST mode\n");
550 	}
551 }
552