1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_flow.h"
7 #include "ice_lib.h"
8 #include "ice_dcb_lib.h"
9 
10 /**
11  * ice_vsi_type_str - maps VSI type enum to string equivalents
12  * @type: VSI type enum
13  */
14 const char *ice_vsi_type_str(enum ice_vsi_type type)
15 {
16 	switch (type) {
17 	case ICE_VSI_PF:
18 		return "ICE_VSI_PF";
19 	case ICE_VSI_VF:
20 		return "ICE_VSI_VF";
21 	case ICE_VSI_LB:
22 		return "ICE_VSI_LB";
23 	default:
24 		return "unknown";
25 	}
26 }
27 
28 /**
29  * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
30  * @vsi: the VSI being configured
31  * @ena: start or stop the Rx rings
32  */
33 static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
34 {
35 	int i, ret = 0;
36 
37 	for (i = 0; i < vsi->num_rxq; i++) {
38 		ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
39 		if (ret)
40 			break;
41 	}
42 
43 	return ret;
44 }
45 
46 /**
47  * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
48  * @vsi: VSI pointer
49  *
50  * On error: returns error code (negative)
51  * On success: returns 0
52  */
53 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
54 {
55 	struct ice_pf *pf = vsi->back;
56 	struct device *dev;
57 
58 	dev = ice_pf_to_dev(pf);
59 
60 	/* allocate memory for both Tx and Rx ring pointers */
61 	vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
62 				     sizeof(*vsi->tx_rings), GFP_KERNEL);
63 	if (!vsi->tx_rings)
64 		return -ENOMEM;
65 
66 	vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
67 				     sizeof(*vsi->rx_rings), GFP_KERNEL);
68 	if (!vsi->rx_rings)
69 		goto err_rings;
70 
71 	/* XDP will have vsi->alloc_txq Tx queues as well, so double the size */
72 	vsi->txq_map = devm_kcalloc(dev, (2 * vsi->alloc_txq),
73 				    sizeof(*vsi->txq_map), GFP_KERNEL);
74 
75 	if (!vsi->txq_map)
76 		goto err_txq_map;
77 
78 	vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
79 				    sizeof(*vsi->rxq_map), GFP_KERNEL);
80 	if (!vsi->rxq_map)
81 		goto err_rxq_map;
82 
83 	/* There is no need to allocate q_vectors for a loopback VSI. */
84 	if (vsi->type == ICE_VSI_LB)
85 		return 0;
86 
87 	/* allocate memory for q_vector pointers */
88 	vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
89 				      sizeof(*vsi->q_vectors), GFP_KERNEL);
90 	if (!vsi->q_vectors)
91 		goto err_vectors;
92 
93 	return 0;
94 
95 err_vectors:
96 	devm_kfree(dev, vsi->rxq_map);
97 err_rxq_map:
98 	devm_kfree(dev, vsi->txq_map);
99 err_txq_map:
100 	devm_kfree(dev, vsi->rx_rings);
101 err_rings:
102 	devm_kfree(dev, vsi->tx_rings);
103 	return -ENOMEM;
104 }
105 
106 /**
107  * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
108  * @vsi: the VSI being configured
109  */
110 static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
111 {
112 	switch (vsi->type) {
113 	case ICE_VSI_PF:
114 		/* fall through */
115 	case ICE_VSI_LB:
116 		vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
117 		vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
118 		break;
119 	default:
120 		dev_dbg(&vsi->back->pdev->dev,
121 			"Not setting number of Tx/Rx descriptors for VSI type %d\n",
122 			vsi->type);
123 		break;
124 	}
125 }
126 
127 /**
128  * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
129  * @vsi: the VSI being configured
130  * @vf_id: ID of the VF being configured
131  *
132  * Return 0 on success and a negative value on error
133  */
134 static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
135 {
136 	struct ice_pf *pf = vsi->back;
137 	struct ice_vf *vf = NULL;
138 
139 	if (vsi->type == ICE_VSI_VF)
140 		vsi->vf_id = vf_id;
141 
142 	switch (vsi->type) {
143 	case ICE_VSI_PF:
144 		vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf),
145 				       num_online_cpus());
146 		if (vsi->req_txq) {
147 			vsi->alloc_txq = vsi->req_txq;
148 			vsi->num_txq = vsi->req_txq;
149 		}
150 
151 		pf->num_lan_tx = vsi->alloc_txq;
152 
153 		/* only 1 Rx queue unless RSS is enabled */
154 		if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
155 			vsi->alloc_rxq = 1;
156 		} else {
157 			vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf),
158 					       num_online_cpus());
159 			if (vsi->req_rxq) {
160 				vsi->alloc_rxq = vsi->req_rxq;
161 				vsi->num_rxq = vsi->req_rxq;
162 			}
163 		}
164 
165 		pf->num_lan_rx = vsi->alloc_rxq;
166 
167 		vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq);
168 		break;
169 	case ICE_VSI_VF:
170 		vf = &pf->vf[vsi->vf_id];
171 		vsi->alloc_txq = vf->num_vf_qs;
172 		vsi->alloc_rxq = vf->num_vf_qs;
173 		/* pf->num_vf_msix includes (VF miscellaneous vector +
174 		 * data queue interrupts). Since vsi->num_q_vectors is number
175 		 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
176 		 * original vector count
177 		 */
178 		vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
179 		break;
180 	case ICE_VSI_LB:
181 		vsi->alloc_txq = 1;
182 		vsi->alloc_rxq = 1;
183 		break;
184 	default:
185 		dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi->type);
186 		break;
187 	}
188 
189 	ice_vsi_set_num_desc(vsi);
190 }
191 
192 /**
193  * ice_get_free_slot - get the next non-NULL location index in array
194  * @array: array to search
195  * @size: size of the array
196  * @curr: last known occupied index to be used as a search hint
197  *
198  * void * is being used to keep the functionality generic. This lets us use this
199  * function on any array of pointers.
200  */
201 static int ice_get_free_slot(void *array, int size, int curr)
202 {
203 	int **tmp_array = (int **)array;
204 	int next;
205 
206 	if (curr < (size - 1) && !tmp_array[curr + 1]) {
207 		next = curr + 1;
208 	} else {
209 		int i = 0;
210 
211 		while ((i < size) && (tmp_array[i]))
212 			i++;
213 		if (i == size)
214 			next = ICE_NO_VSI;
215 		else
216 			next = i;
217 	}
218 	return next;
219 }
220 
221 /**
222  * ice_vsi_delete - delete a VSI from the switch
223  * @vsi: pointer to VSI being removed
224  */
225 void ice_vsi_delete(struct ice_vsi *vsi)
226 {
227 	struct ice_pf *pf = vsi->back;
228 	struct ice_vsi_ctx *ctxt;
229 	enum ice_status status;
230 
231 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
232 	if (!ctxt)
233 		return;
234 
235 	if (vsi->type == ICE_VSI_VF)
236 		ctxt->vf_num = vsi->vf_id;
237 	ctxt->vsi_num = vsi->vsi_num;
238 
239 	memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
240 
241 	status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
242 	if (status)
243 		dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
244 			vsi->vsi_num, status);
245 
246 	kfree(ctxt);
247 }
248 
249 /**
250  * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
251  * @vsi: pointer to VSI being cleared
252  */
253 static void ice_vsi_free_arrays(struct ice_vsi *vsi)
254 {
255 	struct ice_pf *pf = vsi->back;
256 	struct device *dev;
257 
258 	dev = ice_pf_to_dev(pf);
259 
260 	/* free the ring and vector containers */
261 	if (vsi->q_vectors) {
262 		devm_kfree(dev, vsi->q_vectors);
263 		vsi->q_vectors = NULL;
264 	}
265 	if (vsi->tx_rings) {
266 		devm_kfree(dev, vsi->tx_rings);
267 		vsi->tx_rings = NULL;
268 	}
269 	if (vsi->rx_rings) {
270 		devm_kfree(dev, vsi->rx_rings);
271 		vsi->rx_rings = NULL;
272 	}
273 	if (vsi->txq_map) {
274 		devm_kfree(dev, vsi->txq_map);
275 		vsi->txq_map = NULL;
276 	}
277 	if (vsi->rxq_map) {
278 		devm_kfree(dev, vsi->rxq_map);
279 		vsi->rxq_map = NULL;
280 	}
281 }
282 
283 /**
284  * ice_vsi_clear - clean up and deallocate the provided VSI
285  * @vsi: pointer to VSI being cleared
286  *
287  * This deallocates the VSI's queue resources, removes it from the PF's
288  * VSI array if necessary, and deallocates the VSI
289  *
290  * Returns 0 on success, negative on failure
291  */
292 int ice_vsi_clear(struct ice_vsi *vsi)
293 {
294 	struct ice_pf *pf = NULL;
295 	struct device *dev;
296 
297 	if (!vsi)
298 		return 0;
299 
300 	if (!vsi->back)
301 		return -EINVAL;
302 
303 	pf = vsi->back;
304 	dev = ice_pf_to_dev(pf);
305 
306 	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
307 		dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
308 		return -EINVAL;
309 	}
310 
311 	mutex_lock(&pf->sw_mutex);
312 	/* updates the PF for this cleared VSI */
313 
314 	pf->vsi[vsi->idx] = NULL;
315 	if (vsi->idx < pf->next_vsi)
316 		pf->next_vsi = vsi->idx;
317 
318 	ice_vsi_free_arrays(vsi);
319 	mutex_unlock(&pf->sw_mutex);
320 	devm_kfree(dev, vsi);
321 
322 	return 0;
323 }
324 
325 /**
326  * ice_msix_clean_rings - MSIX mode Interrupt Handler
327  * @irq: interrupt number
328  * @data: pointer to a q_vector
329  */
330 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
331 {
332 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
333 
334 	if (!q_vector->tx.ring && !q_vector->rx.ring)
335 		return IRQ_HANDLED;
336 
337 	napi_schedule(&q_vector->napi);
338 
339 	return IRQ_HANDLED;
340 }
341 
342 /**
343  * ice_vsi_alloc - Allocates the next available struct VSI in the PF
344  * @pf: board private structure
345  * @type: type of VSI
346  * @vf_id: ID of the VF being configured
347  *
348  * returns a pointer to a VSI on success, NULL on failure.
349  */
350 static struct ice_vsi *
351 ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
352 {
353 	struct device *dev = ice_pf_to_dev(pf);
354 	struct ice_vsi *vsi = NULL;
355 
356 	/* Need to protect the allocation of the VSIs at the PF level */
357 	mutex_lock(&pf->sw_mutex);
358 
359 	/* If we have already allocated our maximum number of VSIs,
360 	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
361 	 * is available to be populated
362 	 */
363 	if (pf->next_vsi == ICE_NO_VSI) {
364 		dev_dbg(dev, "out of VSI slots!\n");
365 		goto unlock_pf;
366 	}
367 
368 	vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
369 	if (!vsi)
370 		goto unlock_pf;
371 
372 	vsi->type = type;
373 	vsi->back = pf;
374 	set_bit(__ICE_DOWN, vsi->state);
375 
376 	vsi->idx = pf->next_vsi;
377 
378 	if (type == ICE_VSI_VF)
379 		ice_vsi_set_num_qs(vsi, vf_id);
380 	else
381 		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
382 
383 	switch (vsi->type) {
384 	case ICE_VSI_PF:
385 		if (ice_vsi_alloc_arrays(vsi))
386 			goto err_rings;
387 
388 		/* Setup default MSIX irq handler for VSI */
389 		vsi->irq_handler = ice_msix_clean_rings;
390 		break;
391 	case ICE_VSI_VF:
392 		if (ice_vsi_alloc_arrays(vsi))
393 			goto err_rings;
394 		break;
395 	case ICE_VSI_LB:
396 		if (ice_vsi_alloc_arrays(vsi))
397 			goto err_rings;
398 		break;
399 	default:
400 		dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
401 		goto unlock_pf;
402 	}
403 
404 	/* fill VSI slot in the PF struct */
405 	pf->vsi[pf->next_vsi] = vsi;
406 
407 	/* prepare pf->next_vsi for next use */
408 	pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
409 					 pf->next_vsi);
410 	goto unlock_pf;
411 
412 err_rings:
413 	devm_kfree(dev, vsi);
414 	vsi = NULL;
415 unlock_pf:
416 	mutex_unlock(&pf->sw_mutex);
417 	return vsi;
418 }
419 
420 /**
421  * ice_vsi_get_qs - Assign queues from PF to VSI
422  * @vsi: the VSI to assign queues to
423  *
424  * Returns 0 on success and a negative value on error
425  */
426 static int ice_vsi_get_qs(struct ice_vsi *vsi)
427 {
428 	struct ice_pf *pf = vsi->back;
429 	struct ice_qs_cfg tx_qs_cfg = {
430 		.qs_mutex = &pf->avail_q_mutex,
431 		.pf_map = pf->avail_txqs,
432 		.pf_map_size = pf->max_pf_txqs,
433 		.q_count = vsi->alloc_txq,
434 		.scatter_count = ICE_MAX_SCATTER_TXQS,
435 		.vsi_map = vsi->txq_map,
436 		.vsi_map_offset = 0,
437 		.mapping_mode = vsi->tx_mapping_mode
438 	};
439 	struct ice_qs_cfg rx_qs_cfg = {
440 		.qs_mutex = &pf->avail_q_mutex,
441 		.pf_map = pf->avail_rxqs,
442 		.pf_map_size = pf->max_pf_rxqs,
443 		.q_count = vsi->alloc_rxq,
444 		.scatter_count = ICE_MAX_SCATTER_RXQS,
445 		.vsi_map = vsi->rxq_map,
446 		.vsi_map_offset = 0,
447 		.mapping_mode = vsi->rx_mapping_mode
448 	};
449 	int ret = 0;
450 
451 	vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
452 	vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
453 
454 	ret = __ice_vsi_get_qs(&tx_qs_cfg);
455 	if (!ret)
456 		ret = __ice_vsi_get_qs(&rx_qs_cfg);
457 
458 	return ret;
459 }
460 
461 /**
462  * ice_vsi_put_qs - Release queues from VSI to PF
463  * @vsi: the VSI that is going to release queues
464  */
465 void ice_vsi_put_qs(struct ice_vsi *vsi)
466 {
467 	struct ice_pf *pf = vsi->back;
468 	int i;
469 
470 	mutex_lock(&pf->avail_q_mutex);
471 
472 	for (i = 0; i < vsi->alloc_txq; i++) {
473 		clear_bit(vsi->txq_map[i], pf->avail_txqs);
474 		vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
475 	}
476 
477 	for (i = 0; i < vsi->alloc_rxq; i++) {
478 		clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
479 		vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
480 	}
481 
482 	mutex_unlock(&pf->avail_q_mutex);
483 }
484 
485 /**
486  * ice_is_safe_mode
487  * @pf: pointer to the PF struct
488  *
489  * returns true if driver is in safe mode, false otherwise
490  */
491 bool ice_is_safe_mode(struct ice_pf *pf)
492 {
493 	return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
494 }
495 
496 /**
497  * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
498  * @vsi: the VSI being cleaned up
499  *
500  * This function deletes RSS input set for all flows that were configured
501  * for this VSI
502  */
503 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
504 {
505 	struct ice_pf *pf = vsi->back;
506 	enum ice_status status;
507 
508 	if (ice_is_safe_mode(pf))
509 		return;
510 
511 	status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
512 	if (status)
513 		dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
514 			vsi->vsi_num, status);
515 }
516 
517 /**
518  * ice_rss_clean - Delete RSS related VSI structures and configuration
519  * @vsi: the VSI being removed
520  */
521 static void ice_rss_clean(struct ice_vsi *vsi)
522 {
523 	struct ice_pf *pf = vsi->back;
524 	struct device *dev;
525 
526 	dev = ice_pf_to_dev(pf);
527 
528 	if (vsi->rss_hkey_user)
529 		devm_kfree(dev, vsi->rss_hkey_user);
530 	if (vsi->rss_lut_user)
531 		devm_kfree(dev, vsi->rss_lut_user);
532 
533 	ice_vsi_clean_rss_flow_fld(vsi);
534 	/* remove RSS replay list */
535 	if (!ice_is_safe_mode(pf))
536 		ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
537 }
538 
539 /**
540  * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
541  * @vsi: the VSI being configured
542  */
543 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
544 {
545 	struct ice_hw_common_caps *cap;
546 	struct ice_pf *pf = vsi->back;
547 
548 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
549 		vsi->rss_size = 1;
550 		return;
551 	}
552 
553 	cap = &pf->hw.func_caps.common_cap;
554 	switch (vsi->type) {
555 	case ICE_VSI_PF:
556 		/* PF VSI will inherit RSS instance of PF */
557 		vsi->rss_table_size = cap->rss_table_size;
558 		vsi->rss_size = min_t(int, num_online_cpus(),
559 				      BIT(cap->rss_table_entry_width));
560 		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
561 		break;
562 	case ICE_VSI_VF:
563 		/* VF VSI will gets a small RSS table
564 		 * For VSI_LUT, LUT size should be set to 64 bytes
565 		 */
566 		vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
567 		vsi->rss_size = min_t(int, num_online_cpus(),
568 				      BIT(cap->rss_table_entry_width));
569 		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
570 		break;
571 	case ICE_VSI_LB:
572 		break;
573 	default:
574 		dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n",
575 			 vsi->type);
576 		break;
577 	}
578 }
579 
580 /**
581  * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
582  * @ctxt: the VSI context being set
583  *
584  * This initializes a default VSI context for all sections except the Queues.
585  */
586 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
587 {
588 	u32 table = 0;
589 
590 	memset(&ctxt->info, 0, sizeof(ctxt->info));
591 	/* VSI's should be allocated from shared pool */
592 	ctxt->alloc_from_pool = true;
593 	/* Src pruning enabled by default */
594 	ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
595 	/* Traffic from VSI can be sent to LAN */
596 	ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
597 	/* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
598 	 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
599 	 * packets untagged/tagged.
600 	 */
601 	ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
602 				  ICE_AQ_VSI_VLAN_MODE_M) >>
603 				 ICE_AQ_VSI_VLAN_MODE_S);
604 	/* Have 1:1 UP mapping for both ingress/egress tables */
605 	table |= ICE_UP_TABLE_TRANSLATE(0, 0);
606 	table |= ICE_UP_TABLE_TRANSLATE(1, 1);
607 	table |= ICE_UP_TABLE_TRANSLATE(2, 2);
608 	table |= ICE_UP_TABLE_TRANSLATE(3, 3);
609 	table |= ICE_UP_TABLE_TRANSLATE(4, 4);
610 	table |= ICE_UP_TABLE_TRANSLATE(5, 5);
611 	table |= ICE_UP_TABLE_TRANSLATE(6, 6);
612 	table |= ICE_UP_TABLE_TRANSLATE(7, 7);
613 	ctxt->info.ingress_table = cpu_to_le32(table);
614 	ctxt->info.egress_table = cpu_to_le32(table);
615 	/* Have 1:1 UP mapping for outer to inner UP table */
616 	ctxt->info.outer_up_table = cpu_to_le32(table);
617 	/* No Outer tag support outer_tag_flags remains to zero */
618 }
619 
620 /**
621  * ice_vsi_setup_q_map - Setup a VSI queue map
622  * @vsi: the VSI being configured
623  * @ctxt: VSI context structure
624  */
625 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
626 {
627 	u16 offset = 0, qmap = 0, tx_count = 0;
628 	u16 qcount_tx = vsi->alloc_txq;
629 	u16 qcount_rx = vsi->alloc_rxq;
630 	u16 tx_numq_tc, rx_numq_tc;
631 	u16 pow = 0, max_rss = 0;
632 	bool ena_tc0 = false;
633 	u8 netdev_tc = 0;
634 	int i;
635 
636 	/* at least TC0 should be enabled by default */
637 	if (vsi->tc_cfg.numtc) {
638 		if (!(vsi->tc_cfg.ena_tc & BIT(0)))
639 			ena_tc0 = true;
640 	} else {
641 		ena_tc0 = true;
642 	}
643 
644 	if (ena_tc0) {
645 		vsi->tc_cfg.numtc++;
646 		vsi->tc_cfg.ena_tc |= 1;
647 	}
648 
649 	rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
650 	if (!rx_numq_tc)
651 		rx_numq_tc = 1;
652 	tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
653 	if (!tx_numq_tc)
654 		tx_numq_tc = 1;
655 
656 	/* TC mapping is a function of the number of Rx queues assigned to the
657 	 * VSI for each traffic class and the offset of these queues.
658 	 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
659 	 * queues allocated to TC0. No:of queues is a power-of-2.
660 	 *
661 	 * If TC is not enabled, the queue offset is set to 0, and allocate one
662 	 * queue, this way, traffic for the given TC will be sent to the default
663 	 * queue.
664 	 *
665 	 * Setup number and offset of Rx queues for all TCs for the VSI
666 	 */
667 
668 	qcount_rx = rx_numq_tc;
669 
670 	/* qcount will change if RSS is enabled */
671 	if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
672 		if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
673 			if (vsi->type == ICE_VSI_PF)
674 				max_rss = ICE_MAX_LG_RSS_QS;
675 			else
676 				max_rss = ICE_MAX_SMALL_RSS_QS;
677 			qcount_rx = min_t(int, rx_numq_tc, max_rss);
678 			if (!vsi->req_rxq)
679 				qcount_rx = min_t(int, qcount_rx,
680 						  vsi->rss_size);
681 		}
682 	}
683 
684 	/* find the (rounded up) power-of-2 of qcount */
685 	pow = order_base_2(qcount_rx);
686 
687 	ice_for_each_traffic_class(i) {
688 		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
689 			/* TC is not enabled */
690 			vsi->tc_cfg.tc_info[i].qoffset = 0;
691 			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
692 			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
693 			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
694 			ctxt->info.tc_mapping[i] = 0;
695 			continue;
696 		}
697 
698 		/* TC is enabled */
699 		vsi->tc_cfg.tc_info[i].qoffset = offset;
700 		vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
701 		vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
702 		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
703 
704 		qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
705 			ICE_AQ_VSI_TC_Q_OFFSET_M) |
706 			((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
707 			 ICE_AQ_VSI_TC_Q_NUM_M);
708 		offset += qcount_rx;
709 		tx_count += tx_numq_tc;
710 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
711 	}
712 
713 	/* if offset is non-zero, means it is calculated correctly based on
714 	 * enabled TCs for a given VSI otherwise qcount_rx will always
715 	 * be correct and non-zero because it is based off - VSI's
716 	 * allocated Rx queues which is at least 1 (hence qcount_tx will be
717 	 * at least 1)
718 	 */
719 	if (offset)
720 		vsi->num_rxq = offset;
721 	else
722 		vsi->num_rxq = qcount_rx;
723 
724 	vsi->num_txq = tx_count;
725 
726 	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
727 		dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
728 		/* since there is a chance that num_rxq could have been changed
729 		 * in the above for loop, make num_txq equal to num_rxq.
730 		 */
731 		vsi->num_txq = vsi->num_rxq;
732 	}
733 
734 	/* Rx queue mapping */
735 	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
736 	/* q_mapping buffer holds the info for the first queue allocated for
737 	 * this VSI in the PF space and also the number of queues associated
738 	 * with this VSI.
739 	 */
740 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
741 	ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
742 }
743 
744 /**
745  * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
746  * @ctxt: the VSI context being set
747  * @vsi: the VSI being configured
748  */
749 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
750 {
751 	u8 lut_type, hash_type;
752 	struct device *dev;
753 	struct ice_pf *pf;
754 
755 	pf = vsi->back;
756 	dev = ice_pf_to_dev(pf);
757 
758 	switch (vsi->type) {
759 	case ICE_VSI_PF:
760 		/* PF VSI will inherit RSS instance of PF */
761 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
762 		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
763 		break;
764 	case ICE_VSI_VF:
765 		/* VF VSI will gets a small RSS table which is a VSI LUT type */
766 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
767 		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
768 		break;
769 	case ICE_VSI_LB:
770 		dev_dbg(dev, "Unsupported VSI type %s\n",
771 			ice_vsi_type_str(vsi->type));
772 		return;
773 	default:
774 		dev_warn(dev, "Unknown VSI type %d\n", vsi->type);
775 		return;
776 	}
777 
778 	ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
779 				ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
780 				((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
781 				 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
782 }
783 
784 /**
785  * ice_vsi_init - Create and initialize a VSI
786  * @vsi: the VSI being configured
787  * @init_vsi: is this call creating a VSI
788  *
789  * This initializes a VSI context depending on the VSI type to be added and
790  * passes it down to the add_vsi aq command to create a new VSI.
791  */
792 static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
793 {
794 	struct ice_pf *pf = vsi->back;
795 	struct ice_hw *hw = &pf->hw;
796 	struct ice_vsi_ctx *ctxt;
797 	struct device *dev;
798 	int ret = 0;
799 
800 	dev = ice_pf_to_dev(pf);
801 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
802 	if (!ctxt)
803 		return -ENOMEM;
804 
805 	ctxt->info = vsi->info;
806 	switch (vsi->type) {
807 	case ICE_VSI_LB:
808 		/* fall through */
809 	case ICE_VSI_PF:
810 		ctxt->flags = ICE_AQ_VSI_TYPE_PF;
811 		break;
812 	case ICE_VSI_VF:
813 		ctxt->flags = ICE_AQ_VSI_TYPE_VF;
814 		/* VF number here is the absolute VF number (0-255) */
815 		ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
816 		break;
817 	default:
818 		ret = -ENODEV;
819 		goto out;
820 	}
821 
822 	ice_set_dflt_vsi_ctx(ctxt);
823 	/* if the switch is in VEB mode, allow VSI loopback */
824 	if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
825 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
826 
827 	/* Set LUT type and HASH type if RSS is enabled */
828 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
829 		ice_set_rss_vsi_ctx(ctxt, vsi);
830 		/* if updating VSI context, make sure to set valid_section:
831 		 * to indicate which section of VSI context being updated
832 		 */
833 		if (!init_vsi)
834 			ctxt->info.valid_sections |=
835 				cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
836 	}
837 
838 	ctxt->info.sw_id = vsi->port_info->sw_id;
839 	ice_vsi_setup_q_map(vsi, ctxt);
840 	if (!init_vsi) /* means VSI being updated */
841 		/* must to indicate which section of VSI context are
842 		 * being modified
843 		 */
844 		ctxt->info.valid_sections |=
845 			cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
846 
847 	/* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
848 	 * respectively
849 	 */
850 	if (vsi->type == ICE_VSI_VF) {
851 		ctxt->info.valid_sections |=
852 			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
853 		if (pf->vf[vsi->vf_id].spoofchk) {
854 			ctxt->info.sec_flags |=
855 				ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
856 				(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
857 				 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
858 		} else {
859 			ctxt->info.sec_flags &=
860 				~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
861 				  (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
862 				   ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
863 		}
864 	}
865 
866 	/* Allow control frames out of main VSI */
867 	if (vsi->type == ICE_VSI_PF) {
868 		ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
869 		ctxt->info.valid_sections |=
870 			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
871 	}
872 
873 	if (init_vsi) {
874 		ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
875 		if (ret) {
876 			dev_err(dev, "Add VSI failed, err %d\n", ret);
877 			ret = -EIO;
878 			goto out;
879 		}
880 	} else {
881 		ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
882 		if (ret) {
883 			dev_err(dev, "Update VSI failed, err %d\n", ret);
884 			ret = -EIO;
885 			goto out;
886 		}
887 	}
888 
889 	/* keep context for update VSI operations */
890 	vsi->info = ctxt->info;
891 
892 	/* record VSI number returned */
893 	vsi->vsi_num = ctxt->vsi_num;
894 
895 out:
896 	kfree(ctxt);
897 	return ret;
898 }
899 
900 /**
901  * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
902  * @vsi: ptr to the VSI
903  *
904  * This should only be called after ice_vsi_alloc() which allocates the
905  * corresponding SW VSI structure and initializes num_queue_pairs for the
906  * newly allocated VSI.
907  *
908  * Returns 0 on success or negative on failure
909  */
910 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
911 {
912 	struct ice_pf *pf = vsi->back;
913 	struct device *dev;
914 	u16 num_q_vectors;
915 
916 	dev = ice_pf_to_dev(pf);
917 	/* SRIOV doesn't grab irq_tracker entries for each VSI */
918 	if (vsi->type == ICE_VSI_VF)
919 		return 0;
920 
921 	if (vsi->base_vector) {
922 		dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
923 			vsi->vsi_num, vsi->base_vector);
924 		return -EEXIST;
925 	}
926 
927 	num_q_vectors = vsi->num_q_vectors;
928 	/* reserve slots from OS requested IRQs */
929 	vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
930 				       vsi->idx);
931 	if (vsi->base_vector < 0) {
932 		dev_err(dev,
933 			"Failed to get tracking for %d vectors for VSI %d, err=%d\n",
934 			num_q_vectors, vsi->vsi_num, vsi->base_vector);
935 		return -ENOENT;
936 	}
937 	pf->num_avail_sw_msix -= num_q_vectors;
938 
939 	return 0;
940 }
941 
942 /**
943  * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
944  * @vsi: the VSI having rings deallocated
945  */
946 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
947 {
948 	int i;
949 
950 	if (vsi->tx_rings) {
951 		for (i = 0; i < vsi->alloc_txq; i++) {
952 			if (vsi->tx_rings[i]) {
953 				kfree_rcu(vsi->tx_rings[i], rcu);
954 				vsi->tx_rings[i] = NULL;
955 			}
956 		}
957 	}
958 	if (vsi->rx_rings) {
959 		for (i = 0; i < vsi->alloc_rxq; i++) {
960 			if (vsi->rx_rings[i]) {
961 				kfree_rcu(vsi->rx_rings[i], rcu);
962 				vsi->rx_rings[i] = NULL;
963 			}
964 		}
965 	}
966 }
967 
968 /**
969  * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
970  * @vsi: VSI which is having rings allocated
971  */
972 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
973 {
974 	struct ice_pf *pf = vsi->back;
975 	struct device *dev;
976 	int i;
977 
978 	dev = ice_pf_to_dev(pf);
979 	/* Allocate Tx rings */
980 	for (i = 0; i < vsi->alloc_txq; i++) {
981 		struct ice_ring *ring;
982 
983 		/* allocate with kzalloc(), free with kfree_rcu() */
984 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
985 
986 		if (!ring)
987 			goto err_out;
988 
989 		ring->q_index = i;
990 		ring->reg_idx = vsi->txq_map[i];
991 		ring->ring_active = false;
992 		ring->vsi = vsi;
993 		ring->dev = dev;
994 		ring->count = vsi->num_tx_desc;
995 		vsi->tx_rings[i] = ring;
996 	}
997 
998 	/* Allocate Rx rings */
999 	for (i = 0; i < vsi->alloc_rxq; i++) {
1000 		struct ice_ring *ring;
1001 
1002 		/* allocate with kzalloc(), free with kfree_rcu() */
1003 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1004 		if (!ring)
1005 			goto err_out;
1006 
1007 		ring->q_index = i;
1008 		ring->reg_idx = vsi->rxq_map[i];
1009 		ring->ring_active = false;
1010 		ring->vsi = vsi;
1011 		ring->netdev = vsi->netdev;
1012 		ring->dev = dev;
1013 		ring->count = vsi->num_rx_desc;
1014 		vsi->rx_rings[i] = ring;
1015 	}
1016 
1017 	return 0;
1018 
1019 err_out:
1020 	ice_vsi_clear_rings(vsi);
1021 	return -ENOMEM;
1022 }
1023 
1024 /**
1025  * ice_vsi_manage_rss_lut - disable/enable RSS
1026  * @vsi: the VSI being changed
1027  * @ena: boolean value indicating if this is an enable or disable request
1028  *
1029  * In the event of disable request for RSS, this function will zero out RSS
1030  * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1031  * LUT.
1032  */
1033 int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1034 {
1035 	int err = 0;
1036 	u8 *lut;
1037 
1038 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1039 	if (!lut)
1040 		return -ENOMEM;
1041 
1042 	if (ena) {
1043 		if (vsi->rss_lut_user)
1044 			memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1045 		else
1046 			ice_fill_rss_lut(lut, vsi->rss_table_size,
1047 					 vsi->rss_size);
1048 	}
1049 
1050 	err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
1051 	kfree(lut);
1052 	return err;
1053 }
1054 
1055 /**
1056  * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1057  * @vsi: VSI to be configured
1058  */
1059 static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1060 {
1061 	struct ice_aqc_get_set_rss_keys *key;
1062 	struct ice_pf *pf = vsi->back;
1063 	enum ice_status status;
1064 	struct device *dev;
1065 	int err = 0;
1066 	u8 *lut;
1067 
1068 	dev = ice_pf_to_dev(pf);
1069 	vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
1070 
1071 	lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1072 	if (!lut)
1073 		return -ENOMEM;
1074 
1075 	if (vsi->rss_lut_user)
1076 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1077 	else
1078 		ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1079 
1080 	status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
1081 				    vsi->rss_table_size);
1082 
1083 	if (status) {
1084 		dev_err(dev, "set_rss_lut failed, error %d\n", status);
1085 		err = -EIO;
1086 		goto ice_vsi_cfg_rss_exit;
1087 	}
1088 
1089 	key = kzalloc(sizeof(*key), GFP_KERNEL);
1090 	if (!key) {
1091 		err = -ENOMEM;
1092 		goto ice_vsi_cfg_rss_exit;
1093 	}
1094 
1095 	if (vsi->rss_hkey_user)
1096 		memcpy(key,
1097 		       (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
1098 		       ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1099 	else
1100 		netdev_rss_key_fill((void *)key,
1101 				    ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1102 
1103 	status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
1104 
1105 	if (status) {
1106 		dev_err(dev, "set_rss_key failed, error %d\n", status);
1107 		err = -EIO;
1108 	}
1109 
1110 	kfree(key);
1111 ice_vsi_cfg_rss_exit:
1112 	kfree(lut);
1113 	return err;
1114 }
1115 
1116 /**
1117  * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1118  * @vsi: VSI to be configured
1119  *
1120  * This function will only be called during the VF VSI setup. Upon successful
1121  * completion of package download, this function will configure default RSS
1122  * input sets for VF VSI.
1123  */
1124 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
1125 {
1126 	struct ice_pf *pf = vsi->back;
1127 	enum ice_status status;
1128 	struct device *dev;
1129 
1130 	dev = ice_pf_to_dev(pf);
1131 	if (ice_is_safe_mode(pf)) {
1132 		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1133 			vsi->vsi_num);
1134 		return;
1135 	}
1136 
1137 	status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
1138 	if (status)
1139 		dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
1140 			vsi->vsi_num, status);
1141 }
1142 
1143 /**
1144  * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1145  * @vsi: VSI to be configured
1146  *
1147  * This function will only be called after successful download package call
1148  * during initialization of PF. Since the downloaded package will erase the
1149  * RSS section, this function will configure RSS input sets for different
1150  * flow types. The last profile added has the highest priority, therefore 2
1151  * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
1152  * (i.e. IPv4 src/dst TCP src/dst port).
1153  */
1154 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
1155 {
1156 	u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
1157 	struct ice_pf *pf = vsi->back;
1158 	struct ice_hw *hw = &pf->hw;
1159 	enum ice_status status;
1160 	struct device *dev;
1161 
1162 	dev = ice_pf_to_dev(pf);
1163 	if (ice_is_safe_mode(pf)) {
1164 		dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1165 			vsi_num);
1166 		return;
1167 	}
1168 	/* configure RSS for IPv4 with input set IP src/dst */
1169 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1170 				 ICE_FLOW_SEG_HDR_IPV4);
1171 	if (status)
1172 		dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
1173 			vsi_num, status);
1174 
1175 	/* configure RSS for IPv6 with input set IPv6 src/dst */
1176 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1177 				 ICE_FLOW_SEG_HDR_IPV6);
1178 	if (status)
1179 		dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
1180 			vsi_num, status);
1181 
1182 	/* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1183 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
1184 				 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
1185 	if (status)
1186 		dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
1187 			vsi_num, status);
1188 
1189 	/* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1190 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
1191 				 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
1192 	if (status)
1193 		dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
1194 			vsi_num, status);
1195 
1196 	/* configure RSS for sctp4 with input set IP src/dst */
1197 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1198 				 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
1199 	if (status)
1200 		dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
1201 			vsi_num, status);
1202 
1203 	/* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1204 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
1205 				 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
1206 	if (status)
1207 		dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
1208 			vsi_num, status);
1209 
1210 	/* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1211 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
1212 				 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
1213 	if (status)
1214 		dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
1215 			vsi_num, status);
1216 
1217 	/* configure RSS for sctp6 with input set IPv6 src/dst */
1218 	status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1219 				 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
1220 	if (status)
1221 		dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
1222 			vsi_num, status);
1223 }
1224 
1225 /**
1226  * ice_add_mac_to_list - Add a MAC address filter entry to the list
1227  * @vsi: the VSI to be forwarded to
1228  * @add_list: pointer to the list which contains MAC filter entries
1229  * @macaddr: the MAC address to be added.
1230  *
1231  * Adds MAC address filter entry to the temp list
1232  *
1233  * Returns 0 on success or ENOMEM on failure.
1234  */
1235 int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
1236 			const u8 *macaddr)
1237 {
1238 	struct ice_fltr_list_entry *tmp;
1239 	struct ice_pf *pf = vsi->back;
1240 
1241 	tmp = devm_kzalloc(ice_pf_to_dev(pf), sizeof(*tmp), GFP_ATOMIC);
1242 	if (!tmp)
1243 		return -ENOMEM;
1244 
1245 	tmp->fltr_info.flag = ICE_FLTR_TX;
1246 	tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
1247 	tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
1248 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1249 	tmp->fltr_info.vsi_handle = vsi->idx;
1250 	ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
1251 
1252 	INIT_LIST_HEAD(&tmp->list_entry);
1253 	list_add(&tmp->list_entry, add_list);
1254 
1255 	return 0;
1256 }
1257 
1258 /**
1259  * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1260  * @vsi: the VSI to be updated
1261  */
1262 void ice_update_eth_stats(struct ice_vsi *vsi)
1263 {
1264 	struct ice_eth_stats *prev_es, *cur_es;
1265 	struct ice_hw *hw = &vsi->back->hw;
1266 	u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */
1267 
1268 	prev_es = &vsi->eth_stats_prev;
1269 	cur_es = &vsi->eth_stats;
1270 
1271 	ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1272 			  &prev_es->rx_bytes, &cur_es->rx_bytes);
1273 
1274 	ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1275 			  &prev_es->rx_unicast, &cur_es->rx_unicast);
1276 
1277 	ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1278 			  &prev_es->rx_multicast, &cur_es->rx_multicast);
1279 
1280 	ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1281 			  &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1282 
1283 	ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1284 			  &prev_es->rx_discards, &cur_es->rx_discards);
1285 
1286 	ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1287 			  &prev_es->tx_bytes, &cur_es->tx_bytes);
1288 
1289 	ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1290 			  &prev_es->tx_unicast, &cur_es->tx_unicast);
1291 
1292 	ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1293 			  &prev_es->tx_multicast, &cur_es->tx_multicast);
1294 
1295 	ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1296 			  &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1297 
1298 	ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1299 			  &prev_es->tx_errors, &cur_es->tx_errors);
1300 
1301 	vsi->stat_offsets_loaded = true;
1302 }
1303 
1304 /**
1305  * ice_free_fltr_list - free filter lists helper
1306  * @dev: pointer to the device struct
1307  * @h: pointer to the list head to be freed
1308  *
1309  * Helper function to free filter lists previously created using
1310  * ice_add_mac_to_list
1311  */
1312 void ice_free_fltr_list(struct device *dev, struct list_head *h)
1313 {
1314 	struct ice_fltr_list_entry *e, *tmp;
1315 
1316 	list_for_each_entry_safe(e, tmp, h, list_entry) {
1317 		list_del(&e->list_entry);
1318 		devm_kfree(dev, e);
1319 	}
1320 }
1321 
1322 /**
1323  * ice_vsi_add_vlan - Add VSI membership for given VLAN
1324  * @vsi: the VSI being configured
1325  * @vid: VLAN ID to be added
1326  */
1327 int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
1328 {
1329 	struct ice_fltr_list_entry *tmp;
1330 	struct ice_pf *pf = vsi->back;
1331 	LIST_HEAD(tmp_add_list);
1332 	enum ice_status status;
1333 	struct device *dev;
1334 	int err = 0;
1335 
1336 	dev = ice_pf_to_dev(pf);
1337 	tmp = devm_kzalloc(dev, sizeof(*tmp), GFP_KERNEL);
1338 	if (!tmp)
1339 		return -ENOMEM;
1340 
1341 	tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1342 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1343 	tmp->fltr_info.flag = ICE_FLTR_TX;
1344 	tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
1345 	tmp->fltr_info.vsi_handle = vsi->idx;
1346 	tmp->fltr_info.l_data.vlan.vlan_id = vid;
1347 
1348 	INIT_LIST_HEAD(&tmp->list_entry);
1349 	list_add(&tmp->list_entry, &tmp_add_list);
1350 
1351 	status = ice_add_vlan(&pf->hw, &tmp_add_list);
1352 	if (status) {
1353 		err = -ENODEV;
1354 		dev_err(dev, "Failure Adding VLAN %d on VSI %i\n", vid,
1355 			vsi->vsi_num);
1356 	}
1357 
1358 	ice_free_fltr_list(dev, &tmp_add_list);
1359 	return err;
1360 }
1361 
1362 /**
1363  * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
1364  * @vsi: the VSI being configured
1365  * @vid: VLAN ID to be removed
1366  *
1367  * Returns 0 on success and negative on failure
1368  */
1369 int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
1370 {
1371 	struct ice_fltr_list_entry *list;
1372 	struct ice_pf *pf = vsi->back;
1373 	LIST_HEAD(tmp_add_list);
1374 	enum ice_status status;
1375 	struct device *dev;
1376 	int err = 0;
1377 
1378 	dev = ice_pf_to_dev(pf);
1379 	list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
1380 	if (!list)
1381 		return -ENOMEM;
1382 
1383 	list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1384 	list->fltr_info.vsi_handle = vsi->idx;
1385 	list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1386 	list->fltr_info.l_data.vlan.vlan_id = vid;
1387 	list->fltr_info.flag = ICE_FLTR_TX;
1388 	list->fltr_info.src_id = ICE_SRC_ID_VSI;
1389 
1390 	INIT_LIST_HEAD(&list->list_entry);
1391 	list_add(&list->list_entry, &tmp_add_list);
1392 
1393 	status = ice_remove_vlan(&pf->hw, &tmp_add_list);
1394 	if (status == ICE_ERR_DOES_NOT_EXIST) {
1395 		dev_dbg(dev,
1396 			"Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
1397 			vid, vsi->vsi_num, status);
1398 	} else if (status) {
1399 		dev_err(dev,
1400 			"Error removing VLAN %d on vsi %i error: %d\n",
1401 			vid, vsi->vsi_num, status);
1402 		err = -EIO;
1403 	}
1404 
1405 	ice_free_fltr_list(dev, &tmp_add_list);
1406 	return err;
1407 }
1408 
1409 /**
1410  * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
1411  * @vsi: VSI
1412  */
1413 void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
1414 {
1415 	if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
1416 		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
1417 		vsi->rx_buf_len = ICE_RXBUF_2048;
1418 #if (PAGE_SIZE < 8192)
1419 	} else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
1420 		   (vsi->netdev->mtu <= ETH_DATA_LEN)) {
1421 		vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
1422 		vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
1423 #endif
1424 	} else {
1425 		vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
1426 #if (PAGE_SIZE < 8192)
1427 		vsi->rx_buf_len = ICE_RXBUF_3072;
1428 #else
1429 		vsi->rx_buf_len = ICE_RXBUF_2048;
1430 #endif
1431 	}
1432 }
1433 
1434 /**
1435  * ice_vsi_cfg_rxqs - Configure the VSI for Rx
1436  * @vsi: the VSI being configured
1437  *
1438  * Return 0 on success and a negative value on error
1439  * Configure the Rx VSI for operation.
1440  */
1441 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
1442 {
1443 	u16 i;
1444 
1445 	if (vsi->type == ICE_VSI_VF)
1446 		goto setup_rings;
1447 
1448 	ice_vsi_cfg_frame_size(vsi);
1449 setup_rings:
1450 	/* set up individual rings */
1451 	for (i = 0; i < vsi->num_rxq; i++) {
1452 		int err;
1453 
1454 		err = ice_setup_rx_ctx(vsi->rx_rings[i]);
1455 		if (err) {
1456 			dev_err(&vsi->back->pdev->dev,
1457 				"ice_setup_rx_ctx failed for RxQ %d, err %d\n",
1458 				i, err);
1459 			return err;
1460 		}
1461 	}
1462 
1463 	return 0;
1464 }
1465 
1466 /**
1467  * ice_vsi_cfg_txqs - Configure the VSI for Tx
1468  * @vsi: the VSI being configured
1469  * @rings: Tx ring array to be configured
1470  *
1471  * Return 0 on success and a negative value on error
1472  * Configure the Tx VSI for operation.
1473  */
1474 static int
1475 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings)
1476 {
1477 	struct ice_aqc_add_tx_qgrp *qg_buf;
1478 	u16 q_idx = 0;
1479 	int err = 0;
1480 
1481 	qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
1482 	if (!qg_buf)
1483 		return -ENOMEM;
1484 
1485 	qg_buf->num_txqs = 1;
1486 
1487 	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
1488 		err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
1489 		if (err)
1490 			goto err_cfg_txqs;
1491 	}
1492 
1493 err_cfg_txqs:
1494 	kfree(qg_buf);
1495 	return err;
1496 }
1497 
1498 /**
1499  * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1500  * @vsi: the VSI being configured
1501  *
1502  * Return 0 on success and a negative value on error
1503  * Configure the Tx VSI for operation.
1504  */
1505 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1506 {
1507 	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings);
1508 }
1509 
1510 /**
1511  * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1512  * @vsi: the VSI being configured
1513  *
1514  * Return 0 on success and a negative value on error
1515  * Configure the Tx queues dedicated for XDP in given VSI for operation.
1516  */
1517 int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1518 {
1519 	int ret;
1520 	int i;
1521 
1522 	ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings);
1523 	if (ret)
1524 		return ret;
1525 
1526 	for (i = 0; i < vsi->num_xdp_txq; i++)
1527 		vsi->xdp_rings[i]->xsk_umem = ice_xsk_umem(vsi->xdp_rings[i]);
1528 
1529 	return ret;
1530 }
1531 
1532 /**
1533  * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1534  * @intrl: interrupt rate limit in usecs
1535  * @gran: interrupt rate limit granularity in usecs
1536  *
1537  * This function converts a decimal interrupt rate limit in usecs to the format
1538  * expected by firmware.
1539  */
1540 u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1541 {
1542 	u32 val = intrl / gran;
1543 
1544 	if (val)
1545 		return val | GLINT_RATE_INTRL_ENA_M;
1546 	return 0;
1547 }
1548 
1549 /**
1550  * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1551  * @vsi: the VSI being configured
1552  *
1553  * This configures MSIX mode interrupts for the PF VSI, and should not be used
1554  * for the VF VSI.
1555  */
1556 void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1557 {
1558 	struct ice_pf *pf = vsi->back;
1559 	struct ice_hw *hw = &pf->hw;
1560 	u32 txq = 0, rxq = 0;
1561 	int i, q;
1562 
1563 	for (i = 0; i < vsi->num_q_vectors; i++) {
1564 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
1565 		u16 reg_idx = q_vector->reg_idx;
1566 
1567 		ice_cfg_itr(hw, q_vector);
1568 
1569 		wr32(hw, GLINT_RATE(reg_idx),
1570 		     ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
1571 
1572 		/* Both Transmit Queue Interrupt Cause Control register
1573 		 * and Receive Queue Interrupt Cause control register
1574 		 * expects MSIX_INDX field to be the vector index
1575 		 * within the function space and not the absolute
1576 		 * vector index across PF or across device.
1577 		 * For SR-IOV VF VSIs queue vector index always starts
1578 		 * with 1 since first vector index(0) is used for OICR
1579 		 * in VF space. Since VMDq and other PF VSIs are within
1580 		 * the PF function space, use the vector index that is
1581 		 * tracked for this PF.
1582 		 */
1583 		for (q = 0; q < q_vector->num_ring_tx; q++) {
1584 			ice_cfg_txq_interrupt(vsi, txq, reg_idx,
1585 					      q_vector->tx.itr_idx);
1586 			txq++;
1587 		}
1588 
1589 		for (q = 0; q < q_vector->num_ring_rx; q++) {
1590 			ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
1591 					      q_vector->rx.itr_idx);
1592 			rxq++;
1593 		}
1594 	}
1595 }
1596 
1597 /**
1598  * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
1599  * @vsi: the VSI being changed
1600  */
1601 int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
1602 {
1603 	struct ice_hw *hw = &vsi->back->hw;
1604 	struct ice_vsi_ctx *ctxt;
1605 	enum ice_status status;
1606 	int ret = 0;
1607 
1608 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1609 	if (!ctxt)
1610 		return -ENOMEM;
1611 
1612 	/* Here we are configuring the VSI to let the driver add VLAN tags by
1613 	 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
1614 	 * insertion happens in the Tx hot path, in ice_tx_map.
1615 	 */
1616 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
1617 
1618 	/* Preserve existing VLAN strip setting */
1619 	ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
1620 				  ICE_AQ_VSI_VLAN_EMOD_M);
1621 
1622 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
1623 
1624 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1625 	if (status) {
1626 		dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
1627 			status, hw->adminq.sq_last_status);
1628 		ret = -EIO;
1629 		goto out;
1630 	}
1631 
1632 	vsi->info.vlan_flags = ctxt->info.vlan_flags;
1633 out:
1634 	kfree(ctxt);
1635 	return ret;
1636 }
1637 
1638 /**
1639  * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
1640  * @vsi: the VSI being changed
1641  * @ena: boolean value indicating if this is a enable or disable request
1642  */
1643 int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
1644 {
1645 	struct ice_hw *hw = &vsi->back->hw;
1646 	struct ice_vsi_ctx *ctxt;
1647 	enum ice_status status;
1648 	int ret = 0;
1649 
1650 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1651 	if (!ctxt)
1652 		return -ENOMEM;
1653 
1654 	/* Here we are configuring what the VSI should do with the VLAN tag in
1655 	 * the Rx packet. We can either leave the tag in the packet or put it in
1656 	 * the Rx descriptor.
1657 	 */
1658 	if (ena)
1659 		/* Strip VLAN tag from Rx packet and put it in the desc */
1660 		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
1661 	else
1662 		/* Disable stripping. Leave tag in packet */
1663 		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
1664 
1665 	/* Allow all packets untagged/tagged */
1666 	ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
1667 
1668 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
1669 
1670 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1671 	if (status) {
1672 		dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
1673 			ena, status, hw->adminq.sq_last_status);
1674 		ret = -EIO;
1675 		goto out;
1676 	}
1677 
1678 	vsi->info.vlan_flags = ctxt->info.vlan_flags;
1679 out:
1680 	kfree(ctxt);
1681 	return ret;
1682 }
1683 
1684 /**
1685  * ice_vsi_start_rx_rings - start VSI's Rx rings
1686  * @vsi: the VSI whose rings are to be started
1687  *
1688  * Returns 0 on success and a negative value on error
1689  */
1690 int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
1691 {
1692 	return ice_vsi_ctrl_rx_rings(vsi, true);
1693 }
1694 
1695 /**
1696  * ice_vsi_stop_rx_rings - stop VSI's Rx rings
1697  * @vsi: the VSI
1698  *
1699  * Returns 0 on success and a negative value on error
1700  */
1701 int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
1702 {
1703 	return ice_vsi_ctrl_rx_rings(vsi, false);
1704 }
1705 
1706 /**
1707  * ice_vsi_stop_tx_rings - Disable Tx rings
1708  * @vsi: the VSI being configured
1709  * @rst_src: reset source
1710  * @rel_vmvf_num: Relative ID of VF/VM
1711  * @rings: Tx ring array to be stopped
1712  */
1713 static int
1714 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1715 		      u16 rel_vmvf_num, struct ice_ring **rings)
1716 {
1717 	u16 q_idx;
1718 
1719 	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
1720 		return -EINVAL;
1721 
1722 	for (q_idx = 0; q_idx < vsi->num_txq; q_idx++) {
1723 		struct ice_txq_meta txq_meta = { };
1724 		int status;
1725 
1726 		if (!rings || !rings[q_idx])
1727 			return -EINVAL;
1728 
1729 		ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
1730 		status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
1731 					      rings[q_idx], &txq_meta);
1732 
1733 		if (status)
1734 			return status;
1735 	}
1736 
1737 	return 0;
1738 }
1739 
1740 /**
1741  * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
1742  * @vsi: the VSI being configured
1743  * @rst_src: reset source
1744  * @rel_vmvf_num: Relative ID of VF/VM
1745  */
1746 int
1747 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
1748 			  u16 rel_vmvf_num)
1749 {
1750 	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
1751 }
1752 
1753 /**
1754  * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
1755  * @vsi: the VSI being configured
1756  */
1757 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
1758 {
1759 	return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings);
1760 }
1761 
1762 /**
1763  * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
1764  * @vsi: VSI to enable or disable VLAN pruning on
1765  * @ena: set to true to enable VLAN pruning and false to disable it
1766  * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
1767  *
1768  * returns 0 if VSI is updated, negative otherwise
1769  */
1770 int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
1771 {
1772 	struct ice_vsi_ctx *ctxt;
1773 	struct ice_pf *pf;
1774 	int status;
1775 
1776 	if (!vsi)
1777 		return -EINVAL;
1778 
1779 	pf = vsi->back;
1780 	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1781 	if (!ctxt)
1782 		return -ENOMEM;
1783 
1784 	ctxt->info = vsi->info;
1785 
1786 	if (ena)
1787 		ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1788 	else
1789 		ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1790 
1791 	if (!vlan_promisc)
1792 		ctxt->info.valid_sections =
1793 			cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
1794 
1795 	status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
1796 	if (status) {
1797 		netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
1798 			   ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
1799 			   pf->hw.adminq.sq_last_status);
1800 		goto err_out;
1801 	}
1802 
1803 	vsi->info.sw_flags2 = ctxt->info.sw_flags2;
1804 
1805 	kfree(ctxt);
1806 	return 0;
1807 
1808 err_out:
1809 	kfree(ctxt);
1810 	return -EIO;
1811 }
1812 
1813 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
1814 {
1815 	struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
1816 
1817 	vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
1818 	vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
1819 }
1820 
1821 /**
1822  * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
1823  * @vsi: VSI to set the q_vectors register index on
1824  */
1825 static int
1826 ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
1827 {
1828 	u16 i;
1829 
1830 	if (!vsi || !vsi->q_vectors)
1831 		return -EINVAL;
1832 
1833 	ice_for_each_q_vector(vsi, i) {
1834 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
1835 
1836 		if (!q_vector) {
1837 			dev_err(&vsi->back->pdev->dev,
1838 				"Failed to set reg_idx on q_vector %d VSI %d\n",
1839 				i, vsi->vsi_num);
1840 			goto clear_reg_idx;
1841 		}
1842 
1843 		if (vsi->type == ICE_VSI_VF) {
1844 			struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
1845 
1846 			q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
1847 		} else {
1848 			q_vector->reg_idx =
1849 				q_vector->v_idx + vsi->base_vector;
1850 		}
1851 	}
1852 
1853 	return 0;
1854 
1855 clear_reg_idx:
1856 	ice_for_each_q_vector(vsi, i) {
1857 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
1858 
1859 		if (q_vector)
1860 			q_vector->reg_idx = 0;
1861 	}
1862 
1863 	return -EINVAL;
1864 }
1865 
1866 /**
1867  * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
1868  * @vsi: the VSI being configured
1869  * @add_rule: boolean value to add or remove ethertype filter rule
1870  */
1871 static void
1872 ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
1873 {
1874 	struct ice_fltr_list_entry *list;
1875 	struct ice_pf *pf = vsi->back;
1876 	LIST_HEAD(tmp_add_list);
1877 	enum ice_status status;
1878 	struct device *dev;
1879 
1880 	dev = ice_pf_to_dev(pf);
1881 	list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
1882 	if (!list)
1883 		return;
1884 
1885 	list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
1886 	list->fltr_info.fltr_act = ICE_DROP_PACKET;
1887 	list->fltr_info.flag = ICE_FLTR_TX;
1888 	list->fltr_info.src_id = ICE_SRC_ID_VSI;
1889 	list->fltr_info.vsi_handle = vsi->idx;
1890 	list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;
1891 
1892 	INIT_LIST_HEAD(&list->list_entry);
1893 	list_add(&list->list_entry, &tmp_add_list);
1894 
1895 	if (add_rule)
1896 		status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
1897 	else
1898 		status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
1899 
1900 	if (status)
1901 		dev_err(dev,
1902 			"Failure Adding or Removing Ethertype on VSI %i error: %d\n",
1903 			vsi->vsi_num, status);
1904 
1905 	ice_free_fltr_list(dev, &tmp_add_list);
1906 }
1907 
1908 /**
1909  * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
1910  * @vsi: the VSI being configured
1911  * @tx: bool to determine Tx or Rx rule
1912  * @create: bool to determine create or remove Rule
1913  */
1914 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
1915 {
1916 	struct ice_fltr_list_entry *list;
1917 	struct ice_pf *pf = vsi->back;
1918 	LIST_HEAD(tmp_add_list);
1919 	enum ice_status status;
1920 	struct device *dev;
1921 
1922 	dev = ice_pf_to_dev(pf);
1923 	list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
1924 	if (!list)
1925 		return;
1926 
1927 	list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
1928 	list->fltr_info.vsi_handle = vsi->idx;
1929 	list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
1930 
1931 	if (tx) {
1932 		list->fltr_info.fltr_act = ICE_DROP_PACKET;
1933 		list->fltr_info.flag = ICE_FLTR_TX;
1934 		list->fltr_info.src_id = ICE_SRC_ID_VSI;
1935 	} else {
1936 		list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1937 		list->fltr_info.flag = ICE_FLTR_RX;
1938 		list->fltr_info.src_id = ICE_SRC_ID_LPORT;
1939 	}
1940 
1941 	INIT_LIST_HEAD(&list->list_entry);
1942 	list_add(&list->list_entry, &tmp_add_list);
1943 
1944 	if (create)
1945 		status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
1946 	else
1947 		status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
1948 
1949 	if (status)
1950 		dev_err(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
1951 			create ? "adding" : "removing", tx ? "TX" : "RX",
1952 			vsi->vsi_num, status);
1953 
1954 	ice_free_fltr_list(dev, &tmp_add_list);
1955 }
1956 
1957 /**
1958  * ice_vsi_setup - Set up a VSI by a given type
1959  * @pf: board private structure
1960  * @pi: pointer to the port_info instance
1961  * @type: VSI type
1962  * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
1963  *         used only for ICE_VSI_VF VSI type. For other VSI types, should
1964  *         fill-in ICE_INVAL_VFID as input.
1965  *
1966  * This allocates the sw VSI structure and its queue resources.
1967  *
1968  * Returns pointer to the successfully allocated and configured VSI sw struct on
1969  * success, NULL on failure.
1970  */
1971 struct ice_vsi *
1972 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
1973 	      enum ice_vsi_type type, u16 vf_id)
1974 {
1975 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
1976 	struct device *dev = ice_pf_to_dev(pf);
1977 	enum ice_status status;
1978 	struct ice_vsi *vsi;
1979 	int ret, i;
1980 
1981 	if (type == ICE_VSI_VF)
1982 		vsi = ice_vsi_alloc(pf, type, vf_id);
1983 	else
1984 		vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
1985 
1986 	if (!vsi) {
1987 		dev_err(dev, "could not allocate VSI\n");
1988 		return NULL;
1989 	}
1990 
1991 	vsi->port_info = pi;
1992 	vsi->vsw = pf->first_sw;
1993 	if (vsi->type == ICE_VSI_PF)
1994 		vsi->ethtype = ETH_P_PAUSE;
1995 
1996 	if (vsi->type == ICE_VSI_VF)
1997 		vsi->vf_id = vf_id;
1998 
1999 	if (ice_vsi_get_qs(vsi)) {
2000 		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2001 			vsi->idx);
2002 		goto unroll_get_qs;
2003 	}
2004 
2005 	/* set RSS capabilities */
2006 	ice_vsi_set_rss_params(vsi);
2007 
2008 	/* set TC configuration */
2009 	ice_vsi_set_tc_cfg(vsi);
2010 
2011 	/* create the VSI */
2012 	ret = ice_vsi_init(vsi, true);
2013 	if (ret)
2014 		goto unroll_get_qs;
2015 
2016 	switch (vsi->type) {
2017 	case ICE_VSI_PF:
2018 		ret = ice_vsi_alloc_q_vectors(vsi);
2019 		if (ret)
2020 			goto unroll_vsi_init;
2021 
2022 		ret = ice_vsi_setup_vector_base(vsi);
2023 		if (ret)
2024 			goto unroll_alloc_q_vector;
2025 
2026 		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2027 		if (ret)
2028 			goto unroll_vector_base;
2029 
2030 		ret = ice_vsi_alloc_rings(vsi);
2031 		if (ret)
2032 			goto unroll_vector_base;
2033 
2034 		ice_vsi_map_rings_to_vectors(vsi);
2035 
2036 		/* Do not exit if configuring RSS had an issue, at least
2037 		 * receive traffic on first queue. Hence no need to capture
2038 		 * return value
2039 		 */
2040 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2041 			ice_vsi_cfg_rss_lut_key(vsi);
2042 			ice_vsi_set_rss_flow_fld(vsi);
2043 		}
2044 		break;
2045 	case ICE_VSI_VF:
2046 		/* VF driver will take care of creating netdev for this type and
2047 		 * map queues to vectors through Virtchnl, PF driver only
2048 		 * creates a VSI and corresponding structures for bookkeeping
2049 		 * purpose
2050 		 */
2051 		ret = ice_vsi_alloc_q_vectors(vsi);
2052 		if (ret)
2053 			goto unroll_vsi_init;
2054 
2055 		ret = ice_vsi_alloc_rings(vsi);
2056 		if (ret)
2057 			goto unroll_alloc_q_vector;
2058 
2059 		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2060 		if (ret)
2061 			goto unroll_vector_base;
2062 
2063 		/* Do not exit if configuring RSS had an issue, at least
2064 		 * receive traffic on first queue. Hence no need to capture
2065 		 * return value
2066 		 */
2067 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2068 			ice_vsi_cfg_rss_lut_key(vsi);
2069 			ice_vsi_set_vf_rss_flow_fld(vsi);
2070 		}
2071 		break;
2072 	case ICE_VSI_LB:
2073 		ret = ice_vsi_alloc_rings(vsi);
2074 		if (ret)
2075 			goto unroll_vsi_init;
2076 		break;
2077 	default:
2078 		/* clean up the resources and exit */
2079 		goto unroll_vsi_init;
2080 	}
2081 
2082 	/* configure VSI nodes based on number of queues and TC's */
2083 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2084 		max_txqs[i] = vsi->alloc_txq;
2085 
2086 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2087 				 max_txqs);
2088 	if (status) {
2089 		dev_err(dev, "VSI %d failed lan queue config, error %d\n",
2090 			vsi->vsi_num, status);
2091 		goto unroll_vector_base;
2092 	}
2093 
2094 	/* Add switch rule to drop all Tx Flow Control Frames, of look up
2095 	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2096 	 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2097 	 * The rule is added once for PF VSI in order to create appropriate
2098 	 * recipe, since VSI/VSI list is ignored with drop action...
2099 	 * Also add rules to handle LLDP Tx packets.  Tx LLDP packets need to
2100 	 * be dropped so that VFs cannot send LLDP packets to reconfig DCB
2101 	 * settings in the HW.
2102 	 */
2103 	if (!ice_is_safe_mode(pf))
2104 		if (vsi->type == ICE_VSI_PF) {
2105 			ice_vsi_add_rem_eth_mac(vsi, true);
2106 
2107 			/* Tx LLDP packets */
2108 			ice_cfg_sw_lldp(vsi, true, true);
2109 		}
2110 
2111 	return vsi;
2112 
2113 unroll_vector_base:
2114 	/* reclaim SW interrupts back to the common pool */
2115 	ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2116 	pf->num_avail_sw_msix += vsi->num_q_vectors;
2117 unroll_alloc_q_vector:
2118 	ice_vsi_free_q_vectors(vsi);
2119 unroll_vsi_init:
2120 	ice_vsi_delete(vsi);
2121 unroll_get_qs:
2122 	ice_vsi_put_qs(vsi);
2123 	ice_vsi_clear(vsi);
2124 
2125 	return NULL;
2126 }
2127 
2128 /**
2129  * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2130  * @vsi: the VSI being cleaned up
2131  */
2132 static void ice_vsi_release_msix(struct ice_vsi *vsi)
2133 {
2134 	struct ice_pf *pf = vsi->back;
2135 	struct ice_hw *hw = &pf->hw;
2136 	u32 txq = 0;
2137 	u32 rxq = 0;
2138 	int i, q;
2139 
2140 	for (i = 0; i < vsi->num_q_vectors; i++) {
2141 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
2142 		u16 reg_idx = q_vector->reg_idx;
2143 
2144 		wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
2145 		wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
2146 		for (q = 0; q < q_vector->num_ring_tx; q++) {
2147 			wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2148 			if (ice_is_xdp_ena_vsi(vsi)) {
2149 				u32 xdp_txq = txq + vsi->num_xdp_txq;
2150 
2151 				wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2152 			}
2153 			txq++;
2154 		}
2155 
2156 		for (q = 0; q < q_vector->num_ring_rx; q++) {
2157 			wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2158 			rxq++;
2159 		}
2160 	}
2161 
2162 	ice_flush(hw);
2163 }
2164 
2165 /**
2166  * ice_vsi_free_irq - Free the IRQ association with the OS
2167  * @vsi: the VSI being configured
2168  */
2169 void ice_vsi_free_irq(struct ice_vsi *vsi)
2170 {
2171 	struct ice_pf *pf = vsi->back;
2172 	int base = vsi->base_vector;
2173 	int i;
2174 
2175 	if (!vsi->q_vectors || !vsi->irqs_ready)
2176 		return;
2177 
2178 	ice_vsi_release_msix(vsi);
2179 	if (vsi->type == ICE_VSI_VF)
2180 		return;
2181 
2182 	vsi->irqs_ready = false;
2183 	ice_for_each_q_vector(vsi, i) {
2184 		u16 vector = i + base;
2185 		int irq_num;
2186 
2187 		irq_num = pf->msix_entries[vector].vector;
2188 
2189 		/* free only the irqs that were actually requested */
2190 		if (!vsi->q_vectors[i] ||
2191 		    !(vsi->q_vectors[i]->num_ring_tx ||
2192 		      vsi->q_vectors[i]->num_ring_rx))
2193 			continue;
2194 
2195 		/* clear the affinity notifier in the IRQ descriptor */
2196 		irq_set_affinity_notifier(irq_num, NULL);
2197 
2198 		/* clear the affinity_mask in the IRQ descriptor */
2199 		irq_set_affinity_hint(irq_num, NULL);
2200 		synchronize_irq(irq_num);
2201 		devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2202 	}
2203 }
2204 
2205 /**
2206  * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2207  * @vsi: the VSI having resources freed
2208  */
2209 void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2210 {
2211 	int i;
2212 
2213 	if (!vsi->tx_rings)
2214 		return;
2215 
2216 	ice_for_each_txq(vsi, i)
2217 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2218 			ice_free_tx_ring(vsi->tx_rings[i]);
2219 }
2220 
2221 /**
2222  * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2223  * @vsi: the VSI having resources freed
2224  */
2225 void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2226 {
2227 	int i;
2228 
2229 	if (!vsi->rx_rings)
2230 		return;
2231 
2232 	ice_for_each_rxq(vsi, i)
2233 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2234 			ice_free_rx_ring(vsi->rx_rings[i]);
2235 }
2236 
2237 /**
2238  * ice_vsi_close - Shut down a VSI
2239  * @vsi: the VSI being shut down
2240  */
2241 void ice_vsi_close(struct ice_vsi *vsi)
2242 {
2243 	if (!test_and_set_bit(__ICE_DOWN, vsi->state))
2244 		ice_down(vsi);
2245 
2246 	ice_vsi_free_irq(vsi);
2247 	ice_vsi_free_tx_rings(vsi);
2248 	ice_vsi_free_rx_rings(vsi);
2249 }
2250 
2251 /**
2252  * ice_ena_vsi - resume a VSI
2253  * @vsi: the VSI being resume
2254  * @locked: is the rtnl_lock already held
2255  */
2256 int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
2257 {
2258 	int err = 0;
2259 
2260 	if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
2261 		return 0;
2262 
2263 	clear_bit(__ICE_NEEDS_RESTART, vsi->state);
2264 
2265 	if (vsi->netdev && vsi->type == ICE_VSI_PF) {
2266 		if (netif_running(vsi->netdev)) {
2267 			if (!locked)
2268 				rtnl_lock();
2269 
2270 			err = ice_open(vsi->netdev);
2271 
2272 			if (!locked)
2273 				rtnl_unlock();
2274 		}
2275 	}
2276 
2277 	return err;
2278 }
2279 
2280 /**
2281  * ice_dis_vsi - pause a VSI
2282  * @vsi: the VSI being paused
2283  * @locked: is the rtnl_lock already held
2284  */
2285 void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
2286 {
2287 	if (test_bit(__ICE_DOWN, vsi->state))
2288 		return;
2289 
2290 	set_bit(__ICE_NEEDS_RESTART, vsi->state);
2291 
2292 	if (vsi->type == ICE_VSI_PF && vsi->netdev) {
2293 		if (netif_running(vsi->netdev)) {
2294 			if (!locked)
2295 				rtnl_lock();
2296 
2297 			ice_stop(vsi->netdev);
2298 
2299 			if (!locked)
2300 				rtnl_unlock();
2301 		} else {
2302 			ice_vsi_close(vsi);
2303 		}
2304 	}
2305 }
2306 
2307 /**
2308  * ice_free_res - free a block of resources
2309  * @res: pointer to the resource
2310  * @index: starting index previously returned by ice_get_res
2311  * @id: identifier to track owner
2312  *
2313  * Returns number of resources freed
2314  */
2315 int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
2316 {
2317 	int count = 0;
2318 	int i;
2319 
2320 	if (!res || index >= res->end)
2321 		return -EINVAL;
2322 
2323 	id |= ICE_RES_VALID_BIT;
2324 	for (i = index; i < res->end && res->list[i] == id; i++) {
2325 		res->list[i] = 0;
2326 		count++;
2327 	}
2328 
2329 	return count;
2330 }
2331 
2332 /**
2333  * ice_search_res - Search the tracker for a block of resources
2334  * @res: pointer to the resource
2335  * @needed: size of the block needed
2336  * @id: identifier to track owner
2337  *
2338  * Returns the base item index of the block, or -ENOMEM for error
2339  */
2340 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
2341 {
2342 	int start = 0, end = 0;
2343 
2344 	if (needed > res->end)
2345 		return -ENOMEM;
2346 
2347 	id |= ICE_RES_VALID_BIT;
2348 
2349 	do {
2350 		/* skip already allocated entries */
2351 		if (res->list[end++] & ICE_RES_VALID_BIT) {
2352 			start = end;
2353 			if ((start + needed) > res->end)
2354 				break;
2355 		}
2356 
2357 		if (end == (start + needed)) {
2358 			int i = start;
2359 
2360 			/* there was enough, so assign it to the requestor */
2361 			while (i != end)
2362 				res->list[i++] = id;
2363 
2364 			return start;
2365 		}
2366 	} while (end < res->end);
2367 
2368 	return -ENOMEM;
2369 }
2370 
2371 /**
2372  * ice_get_res - get a block of resources
2373  * @pf: board private structure
2374  * @res: pointer to the resource
2375  * @needed: size of the block needed
2376  * @id: identifier to track owner
2377  *
2378  * Returns the base item index of the block, or negative for error
2379  */
2380 int
2381 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
2382 {
2383 	if (!res || !pf)
2384 		return -EINVAL;
2385 
2386 	if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
2387 		dev_err(ice_pf_to_dev(pf),
2388 			"param err: needed=%d, num_entries = %d id=0x%04x\n",
2389 			needed, res->num_entries, id);
2390 		return -EINVAL;
2391 	}
2392 
2393 	return ice_search_res(res, needed, id);
2394 }
2395 
2396 /**
2397  * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
2398  * @vsi: the VSI being un-configured
2399  */
2400 void ice_vsi_dis_irq(struct ice_vsi *vsi)
2401 {
2402 	int base = vsi->base_vector;
2403 	struct ice_pf *pf = vsi->back;
2404 	struct ice_hw *hw = &pf->hw;
2405 	u32 val;
2406 	int i;
2407 
2408 	/* disable interrupt causation from each queue */
2409 	if (vsi->tx_rings) {
2410 		ice_for_each_txq(vsi, i) {
2411 			if (vsi->tx_rings[i]) {
2412 				u16 reg;
2413 
2414 				reg = vsi->tx_rings[i]->reg_idx;
2415 				val = rd32(hw, QINT_TQCTL(reg));
2416 				val &= ~QINT_TQCTL_CAUSE_ENA_M;
2417 				wr32(hw, QINT_TQCTL(reg), val);
2418 			}
2419 		}
2420 	}
2421 
2422 	if (vsi->rx_rings) {
2423 		ice_for_each_rxq(vsi, i) {
2424 			if (vsi->rx_rings[i]) {
2425 				u16 reg;
2426 
2427 				reg = vsi->rx_rings[i]->reg_idx;
2428 				val = rd32(hw, QINT_RQCTL(reg));
2429 				val &= ~QINT_RQCTL_CAUSE_ENA_M;
2430 				wr32(hw, QINT_RQCTL(reg), val);
2431 			}
2432 		}
2433 	}
2434 
2435 	/* disable each interrupt */
2436 	ice_for_each_q_vector(vsi, i) {
2437 		if (!vsi->q_vectors[i])
2438 			continue;
2439 		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
2440 	}
2441 
2442 	ice_flush(hw);
2443 
2444 	/* don't call synchronize_irq() for VF's from the host */
2445 	if (vsi->type == ICE_VSI_VF)
2446 		return;
2447 
2448 	ice_for_each_q_vector(vsi, i)
2449 		synchronize_irq(pf->msix_entries[i + base].vector);
2450 }
2451 
2452 /**
2453  * ice_napi_del - Remove NAPI handler for the VSI
2454  * @vsi: VSI for which NAPI handler is to be removed
2455  */
2456 void ice_napi_del(struct ice_vsi *vsi)
2457 {
2458 	int v_idx;
2459 
2460 	if (!vsi->netdev)
2461 		return;
2462 
2463 	ice_for_each_q_vector(vsi, v_idx)
2464 		netif_napi_del(&vsi->q_vectors[v_idx]->napi);
2465 }
2466 
2467 /**
2468  * ice_vsi_release - Delete a VSI and free its resources
2469  * @vsi: the VSI being removed
2470  *
2471  * Returns 0 on success or < 0 on error
2472  */
2473 int ice_vsi_release(struct ice_vsi *vsi)
2474 {
2475 	struct ice_pf *pf;
2476 
2477 	if (!vsi->back)
2478 		return -ENODEV;
2479 	pf = vsi->back;
2480 
2481 	/* do not unregister while driver is in the reset recovery pending
2482 	 * state. Since reset/rebuild happens through PF service task workqueue,
2483 	 * it's not a good idea to unregister netdev that is associated to the
2484 	 * PF that is running the work queue items currently. This is done to
2485 	 * avoid check_flush_dependency() warning on this wq
2486 	 */
2487 	if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
2488 		unregister_netdev(vsi->netdev);
2489 
2490 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2491 		ice_rss_clean(vsi);
2492 
2493 	/* Disable VSI and free resources */
2494 	if (vsi->type != ICE_VSI_LB)
2495 		ice_vsi_dis_irq(vsi);
2496 	ice_vsi_close(vsi);
2497 
2498 	/* SR-IOV determines needed MSIX resources all at once instead of per
2499 	 * VSI since when VFs are spawned we know how many VFs there are and how
2500 	 * many interrupts each VF needs. SR-IOV MSIX resources are also
2501 	 * cleared in the same manner.
2502 	 */
2503 	if (vsi->type != ICE_VSI_VF) {
2504 		/* reclaim SW interrupts back to the common pool */
2505 		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2506 		pf->num_avail_sw_msix += vsi->num_q_vectors;
2507 	}
2508 
2509 	if (!ice_is_safe_mode(pf)) {
2510 		if (vsi->type == ICE_VSI_PF) {
2511 			ice_vsi_add_rem_eth_mac(vsi, false);
2512 			ice_cfg_sw_lldp(vsi, true, false);
2513 			/* The Rx rule will only exist to remove if the LLDP FW
2514 			 * engine is currently stopped
2515 			 */
2516 			if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2517 				ice_cfg_sw_lldp(vsi, false, false);
2518 		}
2519 	}
2520 
2521 	ice_remove_vsi_fltr(&pf->hw, vsi->idx);
2522 	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2523 	ice_vsi_delete(vsi);
2524 	ice_vsi_free_q_vectors(vsi);
2525 
2526 	/* make sure unregister_netdev() was called by checking __ICE_DOWN */
2527 	if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
2528 		free_netdev(vsi->netdev);
2529 		vsi->netdev = NULL;
2530 	}
2531 
2532 	ice_vsi_clear_rings(vsi);
2533 
2534 	ice_vsi_put_qs(vsi);
2535 
2536 	/* retain SW VSI data structure since it is needed to unregister and
2537 	 * free VSI netdev when PF is not in reset recovery pending state,\
2538 	 * for ex: during rmmod.
2539 	 */
2540 	if (!ice_is_reset_in_progress(pf->state))
2541 		ice_vsi_clear(vsi);
2542 
2543 	return 0;
2544 }
2545 
2546 /**
2547  * ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
2548  * @q_vector: pointer to q_vector which is being updated
2549  * @coalesce: pointer to array of struct with stored coalesce
2550  *
2551  * Set coalesce param in q_vector and update these parameters in HW.
2552  */
2553 static void
2554 ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
2555 				struct ice_coalesce_stored *coalesce)
2556 {
2557 	struct ice_ring_container *rx_rc = &q_vector->rx;
2558 	struct ice_ring_container *tx_rc = &q_vector->tx;
2559 	struct ice_hw *hw = &q_vector->vsi->back->hw;
2560 
2561 	tx_rc->itr_setting = coalesce->itr_tx;
2562 	rx_rc->itr_setting = coalesce->itr_rx;
2563 
2564 	/* dynamic ITR values will be updated during Tx/Rx */
2565 	if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
2566 		wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
2567 		     ITR_REG_ALIGN(tx_rc->itr_setting) >>
2568 		     ICE_ITR_GRAN_S);
2569 	if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
2570 		wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
2571 		     ITR_REG_ALIGN(rx_rc->itr_setting) >>
2572 		     ICE_ITR_GRAN_S);
2573 
2574 	q_vector->intrl = coalesce->intrl;
2575 	wr32(hw, GLINT_RATE(q_vector->reg_idx),
2576 	     ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
2577 }
2578 
2579 /**
2580  * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
2581  * @vsi: VSI connected with q_vectors
2582  * @coalesce: array of struct with stored coalesce
2583  *
2584  * Returns array size.
2585  */
2586 static int
2587 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
2588 			     struct ice_coalesce_stored *coalesce)
2589 {
2590 	int i;
2591 
2592 	ice_for_each_q_vector(vsi, i) {
2593 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
2594 
2595 		coalesce[i].itr_tx = q_vector->tx.itr_setting;
2596 		coalesce[i].itr_rx = q_vector->rx.itr_setting;
2597 		coalesce[i].intrl = q_vector->intrl;
2598 	}
2599 
2600 	return vsi->num_q_vectors;
2601 }
2602 
2603 /**
2604  * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
2605  * @vsi: VSI connected with q_vectors
2606  * @coalesce: pointer to array of struct with stored coalesce
2607  * @size: size of coalesce array
2608  *
2609  * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
2610  * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
2611  * to default value.
2612  */
2613 static void
2614 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
2615 			     struct ice_coalesce_stored *coalesce, int size)
2616 {
2617 	int i;
2618 
2619 	if ((size && !coalesce) || !vsi)
2620 		return;
2621 
2622 	for (i = 0; i < size && i < vsi->num_q_vectors; i++)
2623 		ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
2624 						&coalesce[i]);
2625 
2626 	for (; i < vsi->num_q_vectors; i++) {
2627 		struct ice_coalesce_stored coalesce_dflt = {
2628 			.itr_tx = ICE_DFLT_TX_ITR,
2629 			.itr_rx = ICE_DFLT_RX_ITR,
2630 			.intrl = 0
2631 		};
2632 		ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
2633 						&coalesce_dflt);
2634 	}
2635 }
2636 
2637 /**
2638  * ice_vsi_rebuild - Rebuild VSI after reset
2639  * @vsi: VSI to be rebuild
2640  * @init_vsi: is this an initialization or a reconfigure of the VSI
2641  *
2642  * Returns 0 on success and negative value on failure
2643  */
2644 int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
2645 {
2646 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2647 	struct ice_coalesce_stored *coalesce;
2648 	int prev_num_q_vectors = 0;
2649 	struct ice_vf *vf = NULL;
2650 	enum ice_status status;
2651 	struct ice_pf *pf;
2652 	int ret, i;
2653 
2654 	if (!vsi)
2655 		return -EINVAL;
2656 
2657 	pf = vsi->back;
2658 	if (vsi->type == ICE_VSI_VF)
2659 		vf = &pf->vf[vsi->vf_id];
2660 
2661 	coalesce = kcalloc(vsi->num_q_vectors,
2662 			   sizeof(struct ice_coalesce_stored), GFP_KERNEL);
2663 	if (coalesce)
2664 		prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
2665 								  coalesce);
2666 	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2667 	ice_vsi_free_q_vectors(vsi);
2668 
2669 	/* SR-IOV determines needed MSIX resources all at once instead of per
2670 	 * VSI since when VFs are spawned we know how many VFs there are and how
2671 	 * many interrupts each VF needs. SR-IOV MSIX resources are also
2672 	 * cleared in the same manner.
2673 	 */
2674 	if (vsi->type != ICE_VSI_VF) {
2675 		/* reclaim SW interrupts back to the common pool */
2676 		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2677 		pf->num_avail_sw_msix += vsi->num_q_vectors;
2678 		vsi->base_vector = 0;
2679 	}
2680 
2681 	if (ice_is_xdp_ena_vsi(vsi))
2682 		/* return value check can be skipped here, it always returns
2683 		 * 0 if reset is in progress
2684 		 */
2685 		ice_destroy_xdp_rings(vsi);
2686 	ice_vsi_put_qs(vsi);
2687 	ice_vsi_clear_rings(vsi);
2688 	ice_vsi_free_arrays(vsi);
2689 	ice_dev_onetime_setup(&pf->hw);
2690 	if (vsi->type == ICE_VSI_VF)
2691 		ice_vsi_set_num_qs(vsi, vf->vf_id);
2692 	else
2693 		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
2694 
2695 	ret = ice_vsi_alloc_arrays(vsi);
2696 	if (ret < 0)
2697 		goto err_vsi;
2698 
2699 	ice_vsi_get_qs(vsi);
2700 	ice_vsi_set_tc_cfg(vsi);
2701 
2702 	/* Initialize VSI struct elements and create VSI in FW */
2703 	ret = ice_vsi_init(vsi, init_vsi);
2704 	if (ret < 0)
2705 		goto err_vsi;
2706 
2707 	switch (vsi->type) {
2708 	case ICE_VSI_PF:
2709 		ret = ice_vsi_alloc_q_vectors(vsi);
2710 		if (ret)
2711 			goto err_rings;
2712 
2713 		ret = ice_vsi_setup_vector_base(vsi);
2714 		if (ret)
2715 			goto err_vectors;
2716 
2717 		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2718 		if (ret)
2719 			goto err_vectors;
2720 
2721 		ret = ice_vsi_alloc_rings(vsi);
2722 		if (ret)
2723 			goto err_vectors;
2724 
2725 		ice_vsi_map_rings_to_vectors(vsi);
2726 		if (ice_is_xdp_ena_vsi(vsi)) {
2727 			vsi->num_xdp_txq = vsi->alloc_txq;
2728 			ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
2729 			if (ret)
2730 				goto err_vectors;
2731 		}
2732 		/* Do not exit if configuring RSS had an issue, at least
2733 		 * receive traffic on first queue. Hence no need to capture
2734 		 * return value
2735 		 */
2736 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2737 			ice_vsi_cfg_rss_lut_key(vsi);
2738 		break;
2739 	case ICE_VSI_VF:
2740 		ret = ice_vsi_alloc_q_vectors(vsi);
2741 		if (ret)
2742 			goto err_rings;
2743 
2744 		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2745 		if (ret)
2746 			goto err_vectors;
2747 
2748 		ret = ice_vsi_alloc_rings(vsi);
2749 		if (ret)
2750 			goto err_vectors;
2751 
2752 		break;
2753 	default:
2754 		break;
2755 	}
2756 
2757 	/* configure VSI nodes based on number of queues and TC's */
2758 	for (i = 0; i < vsi->tc_cfg.numtc; i++) {
2759 		max_txqs[i] = vsi->alloc_txq;
2760 
2761 		if (ice_is_xdp_ena_vsi(vsi))
2762 			max_txqs[i] += vsi->num_xdp_txq;
2763 	}
2764 
2765 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2766 				 max_txqs);
2767 	if (status) {
2768 		dev_err(ice_pf_to_dev(pf),
2769 			"VSI %d failed lan queue config, error %d\n",
2770 			vsi->vsi_num, status);
2771 		if (init_vsi) {
2772 			ret = -EIO;
2773 			goto err_vectors;
2774 		} else {
2775 			return ice_schedule_reset(pf, ICE_RESET_PFR);
2776 		}
2777 	}
2778 	ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
2779 	kfree(coalesce);
2780 
2781 	return 0;
2782 
2783 err_vectors:
2784 	ice_vsi_free_q_vectors(vsi);
2785 err_rings:
2786 	if (vsi->netdev) {
2787 		vsi->current_netdev_flags = 0;
2788 		unregister_netdev(vsi->netdev);
2789 		free_netdev(vsi->netdev);
2790 		vsi->netdev = NULL;
2791 	}
2792 err_vsi:
2793 	ice_vsi_clear(vsi);
2794 	set_bit(__ICE_RESET_FAILED, pf->state);
2795 	kfree(coalesce);
2796 	return ret;
2797 }
2798 
2799 /**
2800  * ice_is_reset_in_progress - check for a reset in progress
2801  * @state: PF state field
2802  */
2803 bool ice_is_reset_in_progress(unsigned long *state)
2804 {
2805 	return test_bit(__ICE_RESET_OICR_RECV, state) ||
2806 	       test_bit(__ICE_DCBNL_DEVRESET, state) ||
2807 	       test_bit(__ICE_PFR_REQ, state) ||
2808 	       test_bit(__ICE_CORER_REQ, state) ||
2809 	       test_bit(__ICE_GLOBR_REQ, state);
2810 }
2811 
2812 #ifdef CONFIG_DCB
2813 /**
2814  * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
2815  * @vsi: VSI being configured
2816  * @ctx: the context buffer returned from AQ VSI update command
2817  */
2818 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
2819 {
2820 	vsi->info.mapping_flags = ctx->info.mapping_flags;
2821 	memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
2822 	       sizeof(vsi->info.q_mapping));
2823 	memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
2824 	       sizeof(vsi->info.tc_mapping));
2825 }
2826 
2827 /**
2828  * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
2829  * @vsi: VSI to be configured
2830  * @ena_tc: TC bitmap
2831  *
2832  * VSI queues expected to be quiesced before calling this function
2833  */
2834 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
2835 {
2836 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2837 	struct ice_vsi_ctx *ctx;
2838 	struct ice_pf *pf = vsi->back;
2839 	enum ice_status status;
2840 	struct device *dev;
2841 	int i, ret = 0;
2842 	u8 num_tc = 0;
2843 
2844 	dev = ice_pf_to_dev(pf);
2845 
2846 	ice_for_each_traffic_class(i) {
2847 		/* build bitmap of enabled TCs */
2848 		if (ena_tc & BIT(i))
2849 			num_tc++;
2850 		/* populate max_txqs per TC */
2851 		max_txqs[i] = vsi->alloc_txq;
2852 	}
2853 
2854 	vsi->tc_cfg.ena_tc = ena_tc;
2855 	vsi->tc_cfg.numtc = num_tc;
2856 
2857 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
2858 	if (!ctx)
2859 		return -ENOMEM;
2860 
2861 	ctx->vf_num = 0;
2862 	ctx->info = vsi->info;
2863 
2864 	ice_vsi_setup_q_map(vsi, ctx);
2865 
2866 	/* must to indicate which section of VSI context are being modified */
2867 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
2868 	status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
2869 	if (status) {
2870 		dev_info(dev, "Failed VSI Update\n");
2871 		ret = -EIO;
2872 		goto out;
2873 	}
2874 
2875 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2876 				 max_txqs);
2877 
2878 	if (status) {
2879 		dev_err(dev, "VSI %d failed TC config, error %d\n",
2880 			vsi->vsi_num, status);
2881 		ret = -EIO;
2882 		goto out;
2883 	}
2884 	ice_vsi_update_q_map(vsi, ctx);
2885 	vsi->info.valid_sections = 0;
2886 
2887 	ice_vsi_cfg_netdev_tc(vsi, ena_tc);
2888 out:
2889 	kfree(ctx);
2890 	return ret;
2891 }
2892 #endif /* CONFIG_DCB */
2893 
2894 /**
2895  * ice_nvm_version_str - format the NVM version strings
2896  * @hw: ptr to the hardware info
2897  */
2898 char *ice_nvm_version_str(struct ice_hw *hw)
2899 {
2900 	u8 oem_ver, oem_patch, ver_hi, ver_lo;
2901 	static char buf[ICE_NVM_VER_LEN];
2902 	u16 oem_build;
2903 
2904 	ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
2905 			    &ver_lo);
2906 
2907 	snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
2908 		 hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
2909 
2910 	return buf;
2911 }
2912 
2913 /**
2914  * ice_update_ring_stats - Update ring statistics
2915  * @ring: ring to update
2916  * @cont: used to increment per-vector counters
2917  * @pkts: number of processed packets
2918  * @bytes: number of processed bytes
2919  *
2920  * This function assumes that caller has acquired a u64_stats_sync lock.
2921  */
2922 static void
2923 ice_update_ring_stats(struct ice_ring *ring, struct ice_ring_container *cont,
2924 		      u64 pkts, u64 bytes)
2925 {
2926 	ring->stats.bytes += bytes;
2927 	ring->stats.pkts += pkts;
2928 	cont->total_bytes += bytes;
2929 	cont->total_pkts += pkts;
2930 }
2931 
2932 /**
2933  * ice_update_tx_ring_stats - Update Tx ring specific counters
2934  * @tx_ring: ring to update
2935  * @pkts: number of processed packets
2936  * @bytes: number of processed bytes
2937  */
2938 void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
2939 {
2940 	u64_stats_update_begin(&tx_ring->syncp);
2941 	ice_update_ring_stats(tx_ring, &tx_ring->q_vector->tx, pkts, bytes);
2942 	u64_stats_update_end(&tx_ring->syncp);
2943 }
2944 
2945 /**
2946  * ice_update_rx_ring_stats - Update Rx ring specific counters
2947  * @rx_ring: ring to update
2948  * @pkts: number of processed packets
2949  * @bytes: number of processed bytes
2950  */
2951 void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
2952 {
2953 	u64_stats_update_begin(&rx_ring->syncp);
2954 	ice_update_ring_stats(rx_ring, &rx_ring->q_vector->rx, pkts, bytes);
2955 	u64_stats_update_end(&rx_ring->syncp);
2956 }
2957 
2958 /**
2959  * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
2960  * @vsi: the VSI being configured MAC filter
2961  * @macaddr: the MAC address to be added.
2962  * @set: Add or delete a MAC filter
2963  *
2964  * Adds or removes MAC address filter entry for VF VSI
2965  */
2966 enum ice_status
2967 ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
2968 {
2969 	LIST_HEAD(tmp_add_list);
2970 	enum ice_status status;
2971 
2972 	 /* Update MAC filter list to be added or removed for a VSI */
2973 	if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
2974 		status = ICE_ERR_NO_MEMORY;
2975 		goto cfg_mac_fltr_exit;
2976 	}
2977 
2978 	if (set)
2979 		status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
2980 	else
2981 		status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
2982 
2983 cfg_mac_fltr_exit:
2984 	ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
2985 	return status;
2986 }
2987 
2988 /**
2989  * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
2990  * @sw: switch to check if its default forwarding VSI is free
2991  *
2992  * Return true if the default forwarding VSI is already being used, else returns
2993  * false signalling that it's available to use.
2994  */
2995 bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
2996 {
2997 	return (sw->dflt_vsi && sw->dflt_vsi_ena);
2998 }
2999 
3000 /**
3001  * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3002  * @sw: switch for the default forwarding VSI to compare against
3003  * @vsi: VSI to compare against default forwarding VSI
3004  *
3005  * If this VSI passed in is the default forwarding VSI then return true, else
3006  * return false
3007  */
3008 bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
3009 {
3010 	return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
3011 }
3012 
3013 /**
3014  * ice_set_dflt_vsi - set the default forwarding VSI
3015  * @sw: switch used to assign the default forwarding VSI
3016  * @vsi: VSI getting set as the default forwarding VSI on the switch
3017  *
3018  * If the VSI passed in is already the default VSI and it's enabled just return
3019  * success.
3020  *
3021  * If there is already a default VSI on the switch and it's enabled then return
3022  * -EEXIST since there can only be one default VSI per switch.
3023  *
3024  *  Otherwise try to set the VSI passed in as the switch's default VSI and
3025  *  return the result.
3026  */
3027 int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
3028 {
3029 	enum ice_status status;
3030 	struct device *dev;
3031 
3032 	if (!sw || !vsi)
3033 		return -EINVAL;
3034 
3035 	dev = ice_pf_to_dev(vsi->back);
3036 
3037 	/* the VSI passed in is already the default VSI */
3038 	if (ice_is_vsi_dflt_vsi(sw, vsi)) {
3039 		dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
3040 			vsi->vsi_num);
3041 		return 0;
3042 	}
3043 
3044 	/* another VSI is already the default VSI for this switch */
3045 	if (ice_is_dflt_vsi_in_use(sw)) {
3046 		dev_err(dev,
3047 			"Default forwarding VSI %d already in use, disable it and try again\n",
3048 			sw->dflt_vsi->vsi_num);
3049 		return -EEXIST;
3050 	}
3051 
3052 	status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
3053 	if (status) {
3054 		dev_err(dev,
3055 			"Failed to set VSI %d as the default forwarding VSI, error %d\n",
3056 			vsi->vsi_num, status);
3057 		return -EIO;
3058 	}
3059 
3060 	sw->dflt_vsi = vsi;
3061 	sw->dflt_vsi_ena = true;
3062 
3063 	return 0;
3064 }
3065 
3066 /**
3067  * ice_clear_dflt_vsi - clear the default forwarding VSI
3068  * @sw: switch used to clear the default VSI
3069  *
3070  * If the switch has no default VSI or it's not enabled then return error.
3071  *
3072  * Otherwise try to clear the default VSI and return the result.
3073  */
3074 int ice_clear_dflt_vsi(struct ice_sw *sw)
3075 {
3076 	struct ice_vsi *dflt_vsi;
3077 	enum ice_status status;
3078 	struct device *dev;
3079 
3080 	if (!sw)
3081 		return -EINVAL;
3082 
3083 	dev = ice_pf_to_dev(sw->pf);
3084 
3085 	dflt_vsi = sw->dflt_vsi;
3086 
3087 	/* there is no default VSI configured */
3088 	if (!ice_is_dflt_vsi_in_use(sw))
3089 		return -ENODEV;
3090 
3091 	status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
3092 				  ICE_FLTR_RX);
3093 	if (status) {
3094 		dev_err(dev,
3095 			"Failed to clear the default forwarding VSI %d, error %d\n",
3096 			dflt_vsi->vsi_num, status);
3097 		return -EIO;
3098 	}
3099 
3100 	sw->dflt_vsi = NULL;
3101 	sw->dflt_vsi_ena = false;
3102 
3103 	return 0;
3104 }
3105