1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3 
4 #include "ice.h"
5 #include "ice_lib.h"
6 #include "ice_dcb_lib.h"
7 
8 /**
9  * ice_setup_rx_ctx - Configure a receive ring context
10  * @ring: The Rx ring to configure
11  *
12  * Configure the Rx descriptor ring in RLAN context.
13  */
14 static int ice_setup_rx_ctx(struct ice_ring *ring)
15 {
16 	struct ice_vsi *vsi = ring->vsi;
17 	struct ice_hw *hw = &vsi->back->hw;
18 	u32 rxdid = ICE_RXDID_FLEX_NIC;
19 	struct ice_rlan_ctx rlan_ctx;
20 	u32 regval;
21 	u16 pf_q;
22 	int err;
23 
24 	/* what is Rx queue number in global space of 2K Rx queues */
25 	pf_q = vsi->rxq_map[ring->q_index];
26 
27 	/* clear the context structure first */
28 	memset(&rlan_ctx, 0, sizeof(rlan_ctx));
29 
30 	rlan_ctx.base = ring->dma >> 7;
31 
32 	rlan_ctx.qlen = ring->count;
33 
34 	/* Receive Packet Data Buffer Size.
35 	 * The Packet Data Buffer Size is defined in 128 byte units.
36 	 */
37 	rlan_ctx.dbuf = vsi->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
38 
39 	/* use 32 byte descriptors */
40 	rlan_ctx.dsize = 1;
41 
42 	/* Strip the Ethernet CRC bytes before the packet is posted to host
43 	 * memory.
44 	 */
45 	rlan_ctx.crcstrip = 1;
46 
47 	/* L2TSEL flag defines the reported L2 Tags in the receive descriptor */
48 	rlan_ctx.l2tsel = 1;
49 
50 	rlan_ctx.dtype = ICE_RX_DTYPE_NO_SPLIT;
51 	rlan_ctx.hsplit_0 = ICE_RLAN_RX_HSPLIT_0_NO_SPLIT;
52 	rlan_ctx.hsplit_1 = ICE_RLAN_RX_HSPLIT_1_NO_SPLIT;
53 
54 	/* This controls whether VLAN is stripped from inner headers
55 	 * The VLAN in the inner L2 header is stripped to the receive
56 	 * descriptor if enabled by this flag.
57 	 */
58 	rlan_ctx.showiv = 0;
59 
60 	/* Max packet size for this queue - must not be set to a larger value
61 	 * than 5 x DBUF
62 	 */
63 	rlan_ctx.rxmax = min_t(u16, vsi->max_frame,
64 			       ICE_MAX_CHAINED_RX_BUFS * vsi->rx_buf_len);
65 
66 	/* Rx queue threshold in units of 64 */
67 	rlan_ctx.lrxqthresh = 1;
68 
69 	 /* Enable Flexible Descriptors in the queue context which
70 	  * allows this driver to select a specific receive descriptor format
71 	  */
72 	if (vsi->type != ICE_VSI_VF) {
73 		regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
74 		regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
75 			QRXFLXP_CNTXT_RXDID_IDX_M;
76 
77 		/* increasing context priority to pick up profile ID;
78 		 * default is 0x01; setting to 0x03 to ensure profile
79 		 * is programming if prev context is of same priority
80 		 */
81 		regval |= (0x03 << QRXFLXP_CNTXT_RXDID_PRIO_S) &
82 			QRXFLXP_CNTXT_RXDID_PRIO_M;
83 
84 		wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
85 	}
86 
87 	/* Absolute queue number out of 2K needs to be passed */
88 	err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
89 	if (err) {
90 		dev_err(&vsi->back->pdev->dev,
91 			"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
92 			pf_q, err);
93 		return -EIO;
94 	}
95 
96 	if (vsi->type == ICE_VSI_VF)
97 		return 0;
98 
99 	/* init queue specific tail register */
100 	ring->tail = hw->hw_addr + QRX_TAIL(pf_q);
101 	writel(0, ring->tail);
102 	ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
103 
104 	return 0;
105 }
106 
107 /**
108  * ice_setup_tx_ctx - setup a struct ice_tlan_ctx instance
109  * @ring: The Tx ring to configure
110  * @tlan_ctx: Pointer to the Tx LAN queue context structure to be initialized
111  * @pf_q: queue index in the PF space
112  *
113  * Configure the Tx descriptor ring in TLAN context.
114  */
115 static void
116 ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
117 {
118 	struct ice_vsi *vsi = ring->vsi;
119 	struct ice_hw *hw = &vsi->back->hw;
120 
121 	tlan_ctx->base = ring->dma >> ICE_TLAN_CTX_BASE_S;
122 
123 	tlan_ctx->port_num = vsi->port_info->lport;
124 
125 	/* Transmit Queue Length */
126 	tlan_ctx->qlen = ring->count;
127 
128 	ice_set_cgd_num(tlan_ctx, ring);
129 
130 	/* PF number */
131 	tlan_ctx->pf_num = hw->pf_id;
132 
133 	/* queue belongs to a specific VSI type
134 	 * VF / VM index should be programmed per vmvf_type setting:
135 	 * for vmvf_type = VF, it is VF number between 0-256
136 	 * for vmvf_type = VM, it is VM number between 0-767
137 	 * for PF or EMP this field should be set to zero
138 	 */
139 	switch (vsi->type) {
140 	case ICE_VSI_LB:
141 		/* fall through */
142 	case ICE_VSI_PF:
143 		tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
144 		break;
145 	case ICE_VSI_VF:
146 		/* Firmware expects vmvf_num to be absolute VF ID */
147 		tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
148 		tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
149 		break;
150 	default:
151 		return;
152 	}
153 
154 	/* make sure the context is associated with the right VSI */
155 	tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
156 
157 	tlan_ctx->tso_ena = ICE_TX_LEGACY;
158 	tlan_ctx->tso_qnum = pf_q;
159 
160 	/* Legacy or Advanced Host Interface:
161 	 * 0: Advanced Host Interface
162 	 * 1: Legacy Host Interface
163 	 */
164 	tlan_ctx->legacy_int = ICE_TX_LEGACY;
165 }
166 
167 /**
168  * ice_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
169  * @pf: the PF being configured
170  * @pf_q: the PF queue
171  * @ena: enable or disable state of the queue
172  *
173  * This routine will wait for the given Rx queue of the PF to reach the
174  * enabled or disabled state.
175  * Returns -ETIMEDOUT in case of failing to reach the requested state after
176  * multiple retries; else will return 0 in case of success.
177  */
178 static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
179 {
180 	int i;
181 
182 	for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) {
183 		if (ena == !!(rd32(&pf->hw, QRX_CTRL(pf_q)) &
184 			      QRX_CTRL_QENA_STAT_M))
185 			return 0;
186 
187 		usleep_range(20, 40);
188 	}
189 
190 	return -ETIMEDOUT;
191 }
192 
193 /**
194  * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
195  * @vsi: the VSI being configured
196  * @ena: start or stop the Rx rings
197  * @rxq_idx: Rx queue index
198  */
199 #ifndef CONFIG_PCI_IOV
200 static
201 #endif /* !CONFIG_PCI_IOV */
202 int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
203 {
204 	int pf_q = vsi->rxq_map[rxq_idx];
205 	struct ice_pf *pf = vsi->back;
206 	struct ice_hw *hw = &pf->hw;
207 	int ret = 0;
208 	u32 rx_reg;
209 
210 	rx_reg = rd32(hw, QRX_CTRL(pf_q));
211 
212 	/* Skip if the queue is already in the requested state */
213 	if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
214 		return 0;
215 
216 	/* turn on/off the queue */
217 	if (ena)
218 		rx_reg |= QRX_CTRL_QENA_REQ_M;
219 	else
220 		rx_reg &= ~QRX_CTRL_QENA_REQ_M;
221 	wr32(hw, QRX_CTRL(pf_q), rx_reg);
222 
223 	/* wait for the change to finish */
224 	ret = ice_pf_rxq_wait(pf, pf_q, ena);
225 	if (ret)
226 		dev_err(&pf->pdev->dev,
227 			"VSI idx %d Rx ring %d %sable timeout\n",
228 			vsi->idx, pf_q, (ena ? "en" : "dis"));
229 
230 	return ret;
231 }
232 
233 /**
234  * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
235  * @vsi: the VSI being configured
236  * @ena: start or stop the Rx rings
237  */
238 static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
239 {
240 	int i, ret = 0;
241 
242 	for (i = 0; i < vsi->num_rxq; i++) {
243 		ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
244 		if (ret)
245 			break;
246 	}
247 
248 	return ret;
249 }
250 
251 /**
252  * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
253  * @vsi: VSI pointer
254  *
255  * On error: returns error code (negative)
256  * On success: returns 0
257  */
258 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
259 {
260 	struct ice_pf *pf = vsi->back;
261 
262 	/* allocate memory for both Tx and Rx ring pointers */
263 	vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
264 				     sizeof(*vsi->tx_rings), GFP_KERNEL);
265 	if (!vsi->tx_rings)
266 		return -ENOMEM;
267 
268 	vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
269 				     sizeof(*vsi->rx_rings), GFP_KERNEL);
270 	if (!vsi->rx_rings)
271 		goto err_rings;
272 
273 	vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
274 				    sizeof(*vsi->txq_map), GFP_KERNEL);
275 
276 	if (!vsi->txq_map)
277 		goto err_txq_map;
278 
279 	vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
280 				    sizeof(*vsi->rxq_map), GFP_KERNEL);
281 	if (!vsi->rxq_map)
282 		goto err_rxq_map;
283 
284 
285 	/* There is no need to allocate q_vectors for a loopback VSI. */
286 	if (vsi->type == ICE_VSI_LB)
287 		return 0;
288 
289 	/* allocate memory for q_vector pointers */
290 	vsi->q_vectors = devm_kcalloc(&pf->pdev->dev, vsi->num_q_vectors,
291 				      sizeof(*vsi->q_vectors), GFP_KERNEL);
292 	if (!vsi->q_vectors)
293 		goto err_vectors;
294 
295 	return 0;
296 
297 err_vectors:
298 	devm_kfree(&pf->pdev->dev, vsi->rxq_map);
299 err_rxq_map:
300 	devm_kfree(&pf->pdev->dev, vsi->txq_map);
301 err_txq_map:
302 	devm_kfree(&pf->pdev->dev, vsi->rx_rings);
303 err_rings:
304 	devm_kfree(&pf->pdev->dev, vsi->tx_rings);
305 	return -ENOMEM;
306 }
307 
308 /**
309  * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
310  * @vsi: the VSI being configured
311  */
312 static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
313 {
314 	switch (vsi->type) {
315 	case ICE_VSI_PF:
316 		/* fall through */
317 	case ICE_VSI_LB:
318 		vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
319 		vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
320 		break;
321 	default:
322 		dev_dbg(&vsi->back->pdev->dev,
323 			"Not setting number of Tx/Rx descriptors for VSI type %d\n",
324 			vsi->type);
325 		break;
326 	}
327 }
328 
329 /**
330  * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
331  * @vsi: the VSI being configured
332  * @vf_id: ID of the VF being configured
333  *
334  * Return 0 on success and a negative value on error
335  */
336 static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
337 {
338 	struct ice_pf *pf = vsi->back;
339 	struct ice_vf *vf = NULL;
340 
341 	if (vsi->type == ICE_VSI_VF)
342 		vsi->vf_id = vf_id;
343 
344 	switch (vsi->type) {
345 	case ICE_VSI_PF:
346 		vsi->alloc_txq = pf->num_lan_tx;
347 		vsi->alloc_rxq = pf->num_lan_rx;
348 		vsi->num_q_vectors = max_t(int, pf->num_lan_rx, pf->num_lan_tx);
349 		break;
350 	case ICE_VSI_VF:
351 		vf = &pf->vf[vsi->vf_id];
352 		vsi->alloc_txq = vf->num_vf_qs;
353 		vsi->alloc_rxq = vf->num_vf_qs;
354 		/* pf->num_vf_msix includes (VF miscellaneous vector +
355 		 * data queue interrupts). Since vsi->num_q_vectors is number
356 		 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
357 		 * original vector count
358 		 */
359 		vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
360 		break;
361 	case ICE_VSI_LB:
362 		vsi->alloc_txq = 1;
363 		vsi->alloc_rxq = 1;
364 		break;
365 	default:
366 		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
367 		break;
368 	}
369 
370 	ice_vsi_set_num_desc(vsi);
371 }
372 
373 /**
374  * ice_get_free_slot - get the next non-NULL location index in array
375  * @array: array to search
376  * @size: size of the array
377  * @curr: last known occupied index to be used as a search hint
378  *
379  * void * is being used to keep the functionality generic. This lets us use this
380  * function on any array of pointers.
381  */
382 static int ice_get_free_slot(void *array, int size, int curr)
383 {
384 	int **tmp_array = (int **)array;
385 	int next;
386 
387 	if (curr < (size - 1) && !tmp_array[curr + 1]) {
388 		next = curr + 1;
389 	} else {
390 		int i = 0;
391 
392 		while ((i < size) && (tmp_array[i]))
393 			i++;
394 		if (i == size)
395 			next = ICE_NO_VSI;
396 		else
397 			next = i;
398 	}
399 	return next;
400 }
401 
402 /**
403  * ice_vsi_delete - delete a VSI from the switch
404  * @vsi: pointer to VSI being removed
405  */
406 void ice_vsi_delete(struct ice_vsi *vsi)
407 {
408 	struct ice_pf *pf = vsi->back;
409 	struct ice_vsi_ctx *ctxt;
410 	enum ice_status status;
411 
412 	ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
413 	if (!ctxt)
414 		return;
415 
416 	if (vsi->type == ICE_VSI_VF)
417 		ctxt->vf_num = vsi->vf_id;
418 	ctxt->vsi_num = vsi->vsi_num;
419 
420 	memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
421 
422 	status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
423 	if (status)
424 		dev_err(&pf->pdev->dev, "Failed to delete VSI %i in FW\n",
425 			vsi->vsi_num);
426 
427 	devm_kfree(&pf->pdev->dev, ctxt);
428 }
429 
430 /**
431  * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
432  * @vsi: pointer to VSI being cleared
433  */
434 static void ice_vsi_free_arrays(struct ice_vsi *vsi)
435 {
436 	struct ice_pf *pf = vsi->back;
437 
438 	/* free the ring and vector containers */
439 	if (vsi->q_vectors) {
440 		devm_kfree(&pf->pdev->dev, vsi->q_vectors);
441 		vsi->q_vectors = NULL;
442 	}
443 	if (vsi->tx_rings) {
444 		devm_kfree(&pf->pdev->dev, vsi->tx_rings);
445 		vsi->tx_rings = NULL;
446 	}
447 	if (vsi->rx_rings) {
448 		devm_kfree(&pf->pdev->dev, vsi->rx_rings);
449 		vsi->rx_rings = NULL;
450 	}
451 	if (vsi->txq_map) {
452 		devm_kfree(&pf->pdev->dev, vsi->txq_map);
453 		vsi->txq_map = NULL;
454 	}
455 	if (vsi->rxq_map) {
456 		devm_kfree(&pf->pdev->dev, vsi->rxq_map);
457 		vsi->rxq_map = NULL;
458 	}
459 }
460 
461 /**
462  * ice_vsi_clear - clean up and deallocate the provided VSI
463  * @vsi: pointer to VSI being cleared
464  *
465  * This deallocates the VSI's queue resources, removes it from the PF's
466  * VSI array if necessary, and deallocates the VSI
467  *
468  * Returns 0 on success, negative on failure
469  */
470 int ice_vsi_clear(struct ice_vsi *vsi)
471 {
472 	struct ice_pf *pf = NULL;
473 
474 	if (!vsi)
475 		return 0;
476 
477 	if (!vsi->back)
478 		return -EINVAL;
479 
480 	pf = vsi->back;
481 
482 	if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
483 		dev_dbg(&pf->pdev->dev, "vsi does not exist at pf->vsi[%d]\n",
484 			vsi->idx);
485 		return -EINVAL;
486 	}
487 
488 	mutex_lock(&pf->sw_mutex);
489 	/* updates the PF for this cleared VSI */
490 
491 	pf->vsi[vsi->idx] = NULL;
492 	if (vsi->idx < pf->next_vsi)
493 		pf->next_vsi = vsi->idx;
494 
495 	ice_vsi_free_arrays(vsi);
496 	mutex_unlock(&pf->sw_mutex);
497 	devm_kfree(&pf->pdev->dev, vsi);
498 
499 	return 0;
500 }
501 
502 /**
503  * ice_msix_clean_rings - MSIX mode Interrupt Handler
504  * @irq: interrupt number
505  * @data: pointer to a q_vector
506  */
507 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
508 {
509 	struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
510 
511 	if (!q_vector->tx.ring && !q_vector->rx.ring)
512 		return IRQ_HANDLED;
513 
514 	napi_schedule(&q_vector->napi);
515 
516 	return IRQ_HANDLED;
517 }
518 
519 /**
520  * ice_vsi_alloc - Allocates the next available struct VSI in the PF
521  * @pf: board private structure
522  * @type: type of VSI
523  * @vf_id: ID of the VF being configured
524  *
525  * returns a pointer to a VSI on success, NULL on failure.
526  */
527 static struct ice_vsi *
528 ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type, u16 vf_id)
529 {
530 	struct ice_vsi *vsi = NULL;
531 
532 	/* Need to protect the allocation of the VSIs at the PF level */
533 	mutex_lock(&pf->sw_mutex);
534 
535 	/* If we have already allocated our maximum number of VSIs,
536 	 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
537 	 * is available to be populated
538 	 */
539 	if (pf->next_vsi == ICE_NO_VSI) {
540 		dev_dbg(&pf->pdev->dev, "out of VSI slots!\n");
541 		goto unlock_pf;
542 	}
543 
544 	vsi = devm_kzalloc(&pf->pdev->dev, sizeof(*vsi), GFP_KERNEL);
545 	if (!vsi)
546 		goto unlock_pf;
547 
548 	vsi->type = type;
549 	vsi->back = pf;
550 	set_bit(__ICE_DOWN, vsi->state);
551 	vsi->idx = pf->next_vsi;
552 	vsi->work_lmt = ICE_DFLT_IRQ_WORK;
553 
554 	if (type == ICE_VSI_VF)
555 		ice_vsi_set_num_qs(vsi, vf_id);
556 	else
557 		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
558 
559 	switch (vsi->type) {
560 	case ICE_VSI_PF:
561 		if (ice_vsi_alloc_arrays(vsi))
562 			goto err_rings;
563 
564 		/* Setup default MSIX irq handler for VSI */
565 		vsi->irq_handler = ice_msix_clean_rings;
566 		break;
567 	case ICE_VSI_VF:
568 		if (ice_vsi_alloc_arrays(vsi))
569 			goto err_rings;
570 		break;
571 	case ICE_VSI_LB:
572 		if (ice_vsi_alloc_arrays(vsi))
573 			goto err_rings;
574 		break;
575 	default:
576 		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
577 		goto unlock_pf;
578 	}
579 
580 	/* fill VSI slot in the PF struct */
581 	pf->vsi[pf->next_vsi] = vsi;
582 
583 	/* prepare pf->next_vsi for next use */
584 	pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
585 					 pf->next_vsi);
586 	goto unlock_pf;
587 
588 err_rings:
589 	devm_kfree(&pf->pdev->dev, vsi);
590 	vsi = NULL;
591 unlock_pf:
592 	mutex_unlock(&pf->sw_mutex);
593 	return vsi;
594 }
595 
596 /**
597  * __ice_vsi_get_qs_contig - Assign a contiguous chunk of queues to VSI
598  * @qs_cfg: gathered variables needed for PF->VSI queues assignment
599  *
600  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
601  */
602 static int __ice_vsi_get_qs_contig(struct ice_qs_cfg *qs_cfg)
603 {
604 	int offset, i;
605 
606 	mutex_lock(qs_cfg->qs_mutex);
607 	offset = bitmap_find_next_zero_area(qs_cfg->pf_map, qs_cfg->pf_map_size,
608 					    0, qs_cfg->q_count, 0);
609 	if (offset >= qs_cfg->pf_map_size) {
610 		mutex_unlock(qs_cfg->qs_mutex);
611 		return -ENOMEM;
612 	}
613 
614 	bitmap_set(qs_cfg->pf_map, offset, qs_cfg->q_count);
615 	for (i = 0; i < qs_cfg->q_count; i++)
616 		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = i + offset;
617 	mutex_unlock(qs_cfg->qs_mutex);
618 
619 	return 0;
620 }
621 
622 /**
623  * __ice_vsi_get_qs_sc - Assign a scattered queues from PF to VSI
624  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
625  *
626  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
627  */
628 static int __ice_vsi_get_qs_sc(struct ice_qs_cfg *qs_cfg)
629 {
630 	int i, index = 0;
631 
632 	mutex_lock(qs_cfg->qs_mutex);
633 	for (i = 0; i < qs_cfg->q_count; i++) {
634 		index = find_next_zero_bit(qs_cfg->pf_map,
635 					   qs_cfg->pf_map_size, index);
636 		if (index >= qs_cfg->pf_map_size)
637 			goto err_scatter;
638 		set_bit(index, qs_cfg->pf_map);
639 		qs_cfg->vsi_map[i + qs_cfg->vsi_map_offset] = index;
640 	}
641 	mutex_unlock(qs_cfg->qs_mutex);
642 
643 	return 0;
644 err_scatter:
645 	for (index = 0; index < i; index++) {
646 		clear_bit(qs_cfg->vsi_map[index], qs_cfg->pf_map);
647 		qs_cfg->vsi_map[index + qs_cfg->vsi_map_offset] = 0;
648 	}
649 	mutex_unlock(qs_cfg->qs_mutex);
650 
651 	return -ENOMEM;
652 }
653 
654 /**
655  * __ice_vsi_get_qs - helper function for assigning queues from PF to VSI
656  * @qs_cfg: gathered variables needed for pf->vsi queues assignment
657  *
658  * This function first tries to find contiguous space. If it is not successful,
659  * it tries with the scatter approach.
660  *
661  * Return 0 on success and -ENOMEM in case of no left space in PF queue bitmap
662  */
663 static int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg)
664 {
665 	int ret = 0;
666 
667 	ret = __ice_vsi_get_qs_contig(qs_cfg);
668 	if (ret) {
669 		/* contig failed, so try with scatter approach */
670 		qs_cfg->mapping_mode = ICE_VSI_MAP_SCATTER;
671 		qs_cfg->q_count = min_t(u16, qs_cfg->q_count,
672 					qs_cfg->scatter_count);
673 		ret = __ice_vsi_get_qs_sc(qs_cfg);
674 	}
675 	return ret;
676 }
677 
678 /**
679  * ice_vsi_get_qs - Assign queues from PF to VSI
680  * @vsi: the VSI to assign queues to
681  *
682  * Returns 0 on success and a negative value on error
683  */
684 static int ice_vsi_get_qs(struct ice_vsi *vsi)
685 {
686 	struct ice_pf *pf = vsi->back;
687 	struct ice_qs_cfg tx_qs_cfg = {
688 		.qs_mutex = &pf->avail_q_mutex,
689 		.pf_map = pf->avail_txqs,
690 		.pf_map_size = pf->max_pf_txqs,
691 		.q_count = vsi->alloc_txq,
692 		.scatter_count = ICE_MAX_SCATTER_TXQS,
693 		.vsi_map = vsi->txq_map,
694 		.vsi_map_offset = 0,
695 		.mapping_mode = vsi->tx_mapping_mode
696 	};
697 	struct ice_qs_cfg rx_qs_cfg = {
698 		.qs_mutex = &pf->avail_q_mutex,
699 		.pf_map = pf->avail_rxqs,
700 		.pf_map_size = pf->max_pf_rxqs,
701 		.q_count = vsi->alloc_rxq,
702 		.scatter_count = ICE_MAX_SCATTER_RXQS,
703 		.vsi_map = vsi->rxq_map,
704 		.vsi_map_offset = 0,
705 		.mapping_mode = vsi->rx_mapping_mode
706 	};
707 	int ret = 0;
708 
709 	vsi->tx_mapping_mode = ICE_VSI_MAP_CONTIG;
710 	vsi->rx_mapping_mode = ICE_VSI_MAP_CONTIG;
711 
712 	ret = __ice_vsi_get_qs(&tx_qs_cfg);
713 	if (!ret)
714 		ret = __ice_vsi_get_qs(&rx_qs_cfg);
715 
716 	return ret;
717 }
718 
719 /**
720  * ice_vsi_put_qs - Release queues from VSI to PF
721  * @vsi: the VSI that is going to release queues
722  */
723 void ice_vsi_put_qs(struct ice_vsi *vsi)
724 {
725 	struct ice_pf *pf = vsi->back;
726 	int i;
727 
728 	mutex_lock(&pf->avail_q_mutex);
729 
730 	for (i = 0; i < vsi->alloc_txq; i++) {
731 		clear_bit(vsi->txq_map[i], pf->avail_txqs);
732 		vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
733 	}
734 
735 	for (i = 0; i < vsi->alloc_rxq; i++) {
736 		clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
737 		vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
738 	}
739 
740 	mutex_unlock(&pf->avail_q_mutex);
741 }
742 
743 /**
744  * ice_rss_clean - Delete RSS related VSI structures that hold user inputs
745  * @vsi: the VSI being removed
746  */
747 static void ice_rss_clean(struct ice_vsi *vsi)
748 {
749 	struct ice_pf *pf;
750 
751 	pf = vsi->back;
752 
753 	if (vsi->rss_hkey_user)
754 		devm_kfree(&pf->pdev->dev, vsi->rss_hkey_user);
755 	if (vsi->rss_lut_user)
756 		devm_kfree(&pf->pdev->dev, vsi->rss_lut_user);
757 }
758 
759 /**
760  * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
761  * @vsi: the VSI being configured
762  */
763 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
764 {
765 	struct ice_hw_common_caps *cap;
766 	struct ice_pf *pf = vsi->back;
767 
768 	if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
769 		vsi->rss_size = 1;
770 		return;
771 	}
772 
773 	cap = &pf->hw.func_caps.common_cap;
774 	switch (vsi->type) {
775 	case ICE_VSI_PF:
776 		/* PF VSI will inherit RSS instance of PF */
777 		vsi->rss_table_size = cap->rss_table_size;
778 		vsi->rss_size = min_t(int, num_online_cpus(),
779 				      BIT(cap->rss_table_entry_width));
780 		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
781 		break;
782 	case ICE_VSI_VF:
783 		/* VF VSI will gets a small RSS table
784 		 * For VSI_LUT, LUT size should be set to 64 bytes
785 		 */
786 		vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
787 		vsi->rss_size = min_t(int, num_online_cpus(),
788 				      BIT(cap->rss_table_entry_width));
789 		vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
790 		break;
791 	case ICE_VSI_LB:
792 		break;
793 	default:
794 		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n",
795 			 vsi->type);
796 		break;
797 	}
798 }
799 
800 /**
801  * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
802  * @ctxt: the VSI context being set
803  *
804  * This initializes a default VSI context for all sections except the Queues.
805  */
806 static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
807 {
808 	u32 table = 0;
809 
810 	memset(&ctxt->info, 0, sizeof(ctxt->info));
811 	/* VSI's should be allocated from shared pool */
812 	ctxt->alloc_from_pool = true;
813 	/* Src pruning enabled by default */
814 	ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
815 	/* Traffic from VSI can be sent to LAN */
816 	ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
817 	/* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
818 	 * behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
819 	 * packets untagged/tagged.
820 	 */
821 	ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
822 				  ICE_AQ_VSI_VLAN_MODE_M) >>
823 				 ICE_AQ_VSI_VLAN_MODE_S);
824 	/* Have 1:1 UP mapping for both ingress/egress tables */
825 	table |= ICE_UP_TABLE_TRANSLATE(0, 0);
826 	table |= ICE_UP_TABLE_TRANSLATE(1, 1);
827 	table |= ICE_UP_TABLE_TRANSLATE(2, 2);
828 	table |= ICE_UP_TABLE_TRANSLATE(3, 3);
829 	table |= ICE_UP_TABLE_TRANSLATE(4, 4);
830 	table |= ICE_UP_TABLE_TRANSLATE(5, 5);
831 	table |= ICE_UP_TABLE_TRANSLATE(6, 6);
832 	table |= ICE_UP_TABLE_TRANSLATE(7, 7);
833 	ctxt->info.ingress_table = cpu_to_le32(table);
834 	ctxt->info.egress_table = cpu_to_le32(table);
835 	/* Have 1:1 UP mapping for outer to inner UP table */
836 	ctxt->info.outer_up_table = cpu_to_le32(table);
837 	/* No Outer tag support outer_tag_flags remains to zero */
838 }
839 
840 /**
841  * ice_vsi_setup_q_map - Setup a VSI queue map
842  * @vsi: the VSI being configured
843  * @ctxt: VSI context structure
844  */
845 static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
846 {
847 	u16 offset = 0, qmap = 0, tx_count = 0;
848 	u16 qcount_tx = vsi->alloc_txq;
849 	u16 qcount_rx = vsi->alloc_rxq;
850 	u16 tx_numq_tc, rx_numq_tc;
851 	u16 pow = 0, max_rss = 0;
852 	bool ena_tc0 = false;
853 	u8 netdev_tc = 0;
854 	int i;
855 
856 	/* at least TC0 should be enabled by default */
857 	if (vsi->tc_cfg.numtc) {
858 		if (!(vsi->tc_cfg.ena_tc & BIT(0)))
859 			ena_tc0 = true;
860 	} else {
861 		ena_tc0 = true;
862 	}
863 
864 	if (ena_tc0) {
865 		vsi->tc_cfg.numtc++;
866 		vsi->tc_cfg.ena_tc |= 1;
867 	}
868 
869 	rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc;
870 	if (!rx_numq_tc)
871 		rx_numq_tc = 1;
872 	tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc;
873 	if (!tx_numq_tc)
874 		tx_numq_tc = 1;
875 
876 	/* TC mapping is a function of the number of Rx queues assigned to the
877 	 * VSI for each traffic class and the offset of these queues.
878 	 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
879 	 * queues allocated to TC0. No:of queues is a power-of-2.
880 	 *
881 	 * If TC is not enabled, the queue offset is set to 0, and allocate one
882 	 * queue, this way, traffic for the given TC will be sent to the default
883 	 * queue.
884 	 *
885 	 * Setup number and offset of Rx queues for all TCs for the VSI
886 	 */
887 
888 	qcount_rx = rx_numq_tc;
889 
890 	/* qcount will change if RSS is enabled */
891 	if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) {
892 		if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) {
893 			if (vsi->type == ICE_VSI_PF)
894 				max_rss = ICE_MAX_LG_RSS_QS;
895 			else
896 				max_rss = ICE_MAX_SMALL_RSS_QS;
897 			qcount_rx = min_t(int, rx_numq_tc, max_rss);
898 			qcount_rx = min_t(int, qcount_rx, vsi->rss_size);
899 		}
900 	}
901 
902 	/* find the (rounded up) power-of-2 of qcount */
903 	pow = order_base_2(qcount_rx);
904 
905 	ice_for_each_traffic_class(i) {
906 		if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
907 			/* TC is not enabled */
908 			vsi->tc_cfg.tc_info[i].qoffset = 0;
909 			vsi->tc_cfg.tc_info[i].qcount_rx = 1;
910 			vsi->tc_cfg.tc_info[i].qcount_tx = 1;
911 			vsi->tc_cfg.tc_info[i].netdev_tc = 0;
912 			ctxt->info.tc_mapping[i] = 0;
913 			continue;
914 		}
915 
916 		/* TC is enabled */
917 		vsi->tc_cfg.tc_info[i].qoffset = offset;
918 		vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
919 		vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc;
920 		vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
921 
922 		qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
923 			ICE_AQ_VSI_TC_Q_OFFSET_M) |
924 			((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
925 			 ICE_AQ_VSI_TC_Q_NUM_M);
926 		offset += qcount_rx;
927 		tx_count += tx_numq_tc;
928 		ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
929 	}
930 
931 	/* if offset is non-zero, means it is calculated correctly based on
932 	 * enabled TCs for a given VSI otherwise qcount_rx will always
933 	 * be correct and non-zero because it is based off - VSI's
934 	 * allocated Rx queues which is at least 1 (hence qcount_tx will be
935 	 * at least 1)
936 	 */
937 	if (offset)
938 		vsi->num_rxq = offset;
939 	else
940 		vsi->num_rxq = qcount_rx;
941 
942 	vsi->num_txq = tx_count;
943 
944 	if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
945 		dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
946 		/* since there is a chance that num_rxq could have been changed
947 		 * in the above for loop, make num_txq equal to num_rxq.
948 		 */
949 		vsi->num_txq = vsi->num_rxq;
950 	}
951 
952 	/* Rx queue mapping */
953 	ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
954 	/* q_mapping buffer holds the info for the first queue allocated for
955 	 * this VSI in the PF space and also the number of queues associated
956 	 * with this VSI.
957 	 */
958 	ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
959 	ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
960 }
961 
962 /**
963  * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
964  * @ctxt: the VSI context being set
965  * @vsi: the VSI being configured
966  */
967 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
968 {
969 	u8 lut_type, hash_type;
970 	struct ice_pf *pf;
971 
972 	pf = vsi->back;
973 
974 	switch (vsi->type) {
975 	case ICE_VSI_PF:
976 		/* PF VSI will inherit RSS instance of PF */
977 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
978 		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
979 		break;
980 	case ICE_VSI_VF:
981 		/* VF VSI will gets a small RSS table which is a VSI LUT type */
982 		lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
983 		hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
984 		break;
985 	case ICE_VSI_LB:
986 		dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type);
987 		return;
988 	default:
989 		dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
990 		return;
991 	}
992 
993 	ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
994 				ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
995 				((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
996 				 ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
997 }
998 
999 /**
1000  * ice_vsi_init - Create and initialize a VSI
1001  * @vsi: the VSI being configured
1002  *
1003  * This initializes a VSI context depending on the VSI type to be added and
1004  * passes it down to the add_vsi aq command to create a new VSI.
1005  */
1006 static int ice_vsi_init(struct ice_vsi *vsi)
1007 {
1008 	struct ice_pf *pf = vsi->back;
1009 	struct ice_hw *hw = &pf->hw;
1010 	struct ice_vsi_ctx *ctxt;
1011 	int ret = 0;
1012 
1013 	ctxt = devm_kzalloc(&pf->pdev->dev, sizeof(*ctxt), GFP_KERNEL);
1014 	if (!ctxt)
1015 		return -ENOMEM;
1016 
1017 	ctxt->info = vsi->info;
1018 	switch (vsi->type) {
1019 	case ICE_VSI_LB:
1020 		/* fall through */
1021 	case ICE_VSI_PF:
1022 		ctxt->flags = ICE_AQ_VSI_TYPE_PF;
1023 		break;
1024 	case ICE_VSI_VF:
1025 		ctxt->flags = ICE_AQ_VSI_TYPE_VF;
1026 		/* VF number here is the absolute VF number (0-255) */
1027 		ctxt->vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
1028 		break;
1029 	default:
1030 		return -ENODEV;
1031 	}
1032 
1033 	ice_set_dflt_vsi_ctx(ctxt);
1034 	/* if the switch is in VEB mode, allow VSI loopback */
1035 	if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1036 		ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1037 
1038 	/* Set LUT type and HASH type if RSS is enabled */
1039 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
1040 		ice_set_rss_vsi_ctx(ctxt, vsi);
1041 
1042 	ctxt->info.sw_id = vsi->port_info->sw_id;
1043 	ice_vsi_setup_q_map(vsi, ctxt);
1044 
1045 	/* Enable MAC Antispoof with new VSI being initialized or updated */
1046 	if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
1047 		ctxt->info.valid_sections |=
1048 			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1049 		ctxt->info.sec_flags |=
1050 			ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
1051 	}
1052 
1053 	/* Allow control frames out of main VSI */
1054 	if (vsi->type == ICE_VSI_PF) {
1055 		ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
1056 		ctxt->info.valid_sections |=
1057 			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1058 	}
1059 
1060 	ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1061 	if (ret) {
1062 		dev_err(&pf->pdev->dev,
1063 			"Add VSI failed, err %d\n", ret);
1064 		return -EIO;
1065 	}
1066 
1067 	/* keep context for update VSI operations */
1068 	vsi->info = ctxt->info;
1069 
1070 	/* record VSI number returned */
1071 	vsi->vsi_num = ctxt->vsi_num;
1072 
1073 	devm_kfree(&pf->pdev->dev, ctxt);
1074 	return ret;
1075 }
1076 
1077 /**
1078  * ice_free_q_vector - Free memory allocated for a specific interrupt vector
1079  * @vsi: VSI having the memory freed
1080  * @v_idx: index of the vector to be freed
1081  */
1082 static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
1083 {
1084 	struct ice_q_vector *q_vector;
1085 	struct ice_pf *pf = vsi->back;
1086 	struct ice_ring *ring;
1087 
1088 	if (!vsi->q_vectors[v_idx]) {
1089 		dev_dbg(&pf->pdev->dev, "Queue vector at index %d not found\n",
1090 			v_idx);
1091 		return;
1092 	}
1093 	q_vector = vsi->q_vectors[v_idx];
1094 
1095 	ice_for_each_ring(ring, q_vector->tx)
1096 		ring->q_vector = NULL;
1097 	ice_for_each_ring(ring, q_vector->rx)
1098 		ring->q_vector = NULL;
1099 
1100 	/* only VSI with an associated netdev is set up with NAPI */
1101 	if (vsi->netdev)
1102 		netif_napi_del(&q_vector->napi);
1103 
1104 	devm_kfree(&pf->pdev->dev, q_vector);
1105 	vsi->q_vectors[v_idx] = NULL;
1106 }
1107 
1108 /**
1109  * ice_vsi_free_q_vectors - Free memory allocated for interrupt vectors
1110  * @vsi: the VSI having memory freed
1111  */
1112 void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
1113 {
1114 	int v_idx;
1115 
1116 	ice_for_each_q_vector(vsi, v_idx)
1117 		ice_free_q_vector(vsi, v_idx);
1118 }
1119 
1120 /**
1121  * ice_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
1122  * @vsi: the VSI being configured
1123  * @v_idx: index of the vector in the VSI struct
1124  *
1125  * We allocate one q_vector. If allocation fails we return -ENOMEM.
1126  */
1127 static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
1128 {
1129 	struct ice_pf *pf = vsi->back;
1130 	struct ice_q_vector *q_vector;
1131 
1132 	/* allocate q_vector */
1133 	q_vector = devm_kzalloc(&pf->pdev->dev, sizeof(*q_vector), GFP_KERNEL);
1134 	if (!q_vector)
1135 		return -ENOMEM;
1136 
1137 	q_vector->vsi = vsi;
1138 	q_vector->v_idx = v_idx;
1139 	if (vsi->type == ICE_VSI_VF)
1140 		goto out;
1141 	/* only set affinity_mask if the CPU is online */
1142 	if (cpu_online(v_idx))
1143 		cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
1144 
1145 	/* This will not be called in the driver load path because the netdev
1146 	 * will not be created yet. All other cases with register the NAPI
1147 	 * handler here (i.e. resume, reset/rebuild, etc.)
1148 	 */
1149 	if (vsi->netdev)
1150 		netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
1151 			       NAPI_POLL_WEIGHT);
1152 
1153 out:
1154 	/* tie q_vector and VSI together */
1155 	vsi->q_vectors[v_idx] = q_vector;
1156 
1157 	return 0;
1158 }
1159 
1160 /**
1161  * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
1162  * @vsi: the VSI being configured
1163  *
1164  * We allocate one q_vector per queue interrupt. If allocation fails we
1165  * return -ENOMEM.
1166  */
1167 static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
1168 {
1169 	struct ice_pf *pf = vsi->back;
1170 	int v_idx = 0, num_q_vectors;
1171 	int err;
1172 
1173 	if (vsi->q_vectors[0]) {
1174 		dev_dbg(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
1175 			vsi->vsi_num);
1176 		return -EEXIST;
1177 	}
1178 
1179 	num_q_vectors = vsi->num_q_vectors;
1180 
1181 	for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
1182 		err = ice_vsi_alloc_q_vector(vsi, v_idx);
1183 		if (err)
1184 			goto err_out;
1185 	}
1186 
1187 	return 0;
1188 
1189 err_out:
1190 	while (v_idx--)
1191 		ice_free_q_vector(vsi, v_idx);
1192 
1193 	dev_err(&pf->pdev->dev,
1194 		"Failed to allocate %d q_vector for VSI %d, ret=%d\n",
1195 		vsi->num_q_vectors, vsi->vsi_num, err);
1196 	vsi->num_q_vectors = 0;
1197 	return err;
1198 }
1199 
1200 /**
1201  * ice_vsi_setup_vector_base - Set up the base vector for the given VSI
1202  * @vsi: ptr to the VSI
1203  *
1204  * This should only be called after ice_vsi_alloc() which allocates the
1205  * corresponding SW VSI structure and initializes num_queue_pairs for the
1206  * newly allocated VSI.
1207  *
1208  * Returns 0 on success or negative on failure
1209  */
1210 static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
1211 {
1212 	struct ice_pf *pf = vsi->back;
1213 	u16 num_q_vectors;
1214 
1215 	/* SRIOV doesn't grab irq_tracker entries for each VSI */
1216 	if (vsi->type == ICE_VSI_VF)
1217 		return 0;
1218 
1219 	if (vsi->base_vector) {
1220 		dev_dbg(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
1221 			vsi->vsi_num, vsi->base_vector);
1222 		return -EEXIST;
1223 	}
1224 
1225 	num_q_vectors = vsi->num_q_vectors;
1226 	/* reserve slots from OS requested IRQs */
1227 	vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
1228 				       vsi->idx);
1229 	if (vsi->base_vector < 0) {
1230 		dev_err(&pf->pdev->dev,
1231 			"Failed to get tracking for %d vectors for VSI %d, err=%d\n",
1232 			num_q_vectors, vsi->vsi_num, vsi->base_vector);
1233 		return -ENOENT;
1234 	}
1235 	pf->num_avail_sw_msix -= num_q_vectors;
1236 
1237 	return 0;
1238 }
1239 
1240 /**
1241  * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1242  * @vsi: the VSI having rings deallocated
1243  */
1244 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1245 {
1246 	int i;
1247 
1248 	if (vsi->tx_rings) {
1249 		for (i = 0; i < vsi->alloc_txq; i++) {
1250 			if (vsi->tx_rings[i]) {
1251 				kfree_rcu(vsi->tx_rings[i], rcu);
1252 				vsi->tx_rings[i] = NULL;
1253 			}
1254 		}
1255 	}
1256 	if (vsi->rx_rings) {
1257 		for (i = 0; i < vsi->alloc_rxq; i++) {
1258 			if (vsi->rx_rings[i]) {
1259 				kfree_rcu(vsi->rx_rings[i], rcu);
1260 				vsi->rx_rings[i] = NULL;
1261 			}
1262 		}
1263 	}
1264 }
1265 
1266 /**
1267  * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1268  * @vsi: VSI which is having rings allocated
1269  */
1270 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1271 {
1272 	struct ice_pf *pf = vsi->back;
1273 	int i;
1274 
1275 	/* Allocate Tx rings */
1276 	for (i = 0; i < vsi->alloc_txq; i++) {
1277 		struct ice_ring *ring;
1278 
1279 		/* allocate with kzalloc(), free with kfree_rcu() */
1280 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1281 
1282 		if (!ring)
1283 			goto err_out;
1284 
1285 		ring->q_index = i;
1286 		ring->reg_idx = vsi->txq_map[i];
1287 		ring->ring_active = false;
1288 		ring->vsi = vsi;
1289 		ring->dev = &pf->pdev->dev;
1290 		ring->count = vsi->num_tx_desc;
1291 		vsi->tx_rings[i] = ring;
1292 	}
1293 
1294 	/* Allocate Rx rings */
1295 	for (i = 0; i < vsi->alloc_rxq; i++) {
1296 		struct ice_ring *ring;
1297 
1298 		/* allocate with kzalloc(), free with kfree_rcu() */
1299 		ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1300 		if (!ring)
1301 			goto err_out;
1302 
1303 		ring->q_index = i;
1304 		ring->reg_idx = vsi->rxq_map[i];
1305 		ring->ring_active = false;
1306 		ring->vsi = vsi;
1307 		ring->netdev = vsi->netdev;
1308 		ring->dev = &pf->pdev->dev;
1309 		ring->count = vsi->num_rx_desc;
1310 		vsi->rx_rings[i] = ring;
1311 	}
1312 
1313 	return 0;
1314 
1315 err_out:
1316 	ice_vsi_clear_rings(vsi);
1317 	return -ENOMEM;
1318 }
1319 
1320 /**
1321  * ice_vsi_map_rings_to_vectors - Map VSI rings to interrupt vectors
1322  * @vsi: the VSI being configured
1323  *
1324  * This function maps descriptor rings to the queue-specific vectors allotted
1325  * through the MSI-X enabling code. On a constrained vector budget, we map Tx
1326  * and Rx rings to the vector as "efficiently" as possible.
1327  */
1328 #ifdef CONFIG_DCB
1329 void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1330 #else
1331 static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
1332 #endif /* CONFIG_DCB */
1333 {
1334 	int q_vectors = vsi->num_q_vectors;
1335 	int tx_rings_rem, rx_rings_rem;
1336 	int v_id;
1337 
1338 	/* initially assigning remaining rings count to VSIs num queue value */
1339 	tx_rings_rem = vsi->num_txq;
1340 	rx_rings_rem = vsi->num_rxq;
1341 
1342 	for (v_id = 0; v_id < q_vectors; v_id++) {
1343 		struct ice_q_vector *q_vector = vsi->q_vectors[v_id];
1344 		int tx_rings_per_v, rx_rings_per_v, q_id, q_base;
1345 
1346 		/* Tx rings mapping to vector */
1347 		tx_rings_per_v = DIV_ROUND_UP(tx_rings_rem, q_vectors - v_id);
1348 		q_vector->num_ring_tx = tx_rings_per_v;
1349 		q_vector->tx.ring = NULL;
1350 		q_vector->tx.itr_idx = ICE_TX_ITR;
1351 		q_base = vsi->num_txq - tx_rings_rem;
1352 
1353 		for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
1354 			struct ice_ring *tx_ring = vsi->tx_rings[q_id];
1355 
1356 			tx_ring->q_vector = q_vector;
1357 			tx_ring->next = q_vector->tx.ring;
1358 			q_vector->tx.ring = tx_ring;
1359 		}
1360 		tx_rings_rem -= tx_rings_per_v;
1361 
1362 		/* Rx rings mapping to vector */
1363 		rx_rings_per_v = DIV_ROUND_UP(rx_rings_rem, q_vectors - v_id);
1364 		q_vector->num_ring_rx = rx_rings_per_v;
1365 		q_vector->rx.ring = NULL;
1366 		q_vector->rx.itr_idx = ICE_RX_ITR;
1367 		q_base = vsi->num_rxq - rx_rings_rem;
1368 
1369 		for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
1370 			struct ice_ring *rx_ring = vsi->rx_rings[q_id];
1371 
1372 			rx_ring->q_vector = q_vector;
1373 			rx_ring->next = q_vector->rx.ring;
1374 			q_vector->rx.ring = rx_ring;
1375 		}
1376 		rx_rings_rem -= rx_rings_per_v;
1377 	}
1378 }
1379 
1380 /**
1381  * ice_vsi_manage_rss_lut - disable/enable RSS
1382  * @vsi: the VSI being changed
1383  * @ena: boolean value indicating if this is an enable or disable request
1384  *
1385  * In the event of disable request for RSS, this function will zero out RSS
1386  * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1387  * LUT.
1388  */
1389 int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1390 {
1391 	int err = 0;
1392 	u8 *lut;
1393 
1394 	lut = devm_kzalloc(&vsi->back->pdev->dev, vsi->rss_table_size,
1395 			   GFP_KERNEL);
1396 	if (!lut)
1397 		return -ENOMEM;
1398 
1399 	if (ena) {
1400 		if (vsi->rss_lut_user)
1401 			memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1402 		else
1403 			ice_fill_rss_lut(lut, vsi->rss_table_size,
1404 					 vsi->rss_size);
1405 	}
1406 
1407 	err = ice_set_rss(vsi, NULL, lut, vsi->rss_table_size);
1408 	devm_kfree(&vsi->back->pdev->dev, lut);
1409 	return err;
1410 }
1411 
1412 /**
1413  * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1414  * @vsi: VSI to be configured
1415  */
1416 static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1417 {
1418 	struct ice_aqc_get_set_rss_keys *key;
1419 	struct ice_pf *pf = vsi->back;
1420 	enum ice_status status;
1421 	int err = 0;
1422 	u8 *lut;
1423 
1424 	vsi->rss_size = min_t(int, vsi->rss_size, vsi->num_rxq);
1425 
1426 	lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
1427 	if (!lut)
1428 		return -ENOMEM;
1429 
1430 	if (vsi->rss_lut_user)
1431 		memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1432 	else
1433 		ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1434 
1435 	status = ice_aq_set_rss_lut(&pf->hw, vsi->idx, vsi->rss_lut_type, lut,
1436 				    vsi->rss_table_size);
1437 
1438 	if (status) {
1439 		dev_err(&pf->pdev->dev,
1440 			"set_rss_lut failed, error %d\n", status);
1441 		err = -EIO;
1442 		goto ice_vsi_cfg_rss_exit;
1443 	}
1444 
1445 	key = devm_kzalloc(&pf->pdev->dev, sizeof(*key), GFP_KERNEL);
1446 	if (!key) {
1447 		err = -ENOMEM;
1448 		goto ice_vsi_cfg_rss_exit;
1449 	}
1450 
1451 	if (vsi->rss_hkey_user)
1452 		memcpy(key,
1453 		       (struct ice_aqc_get_set_rss_keys *)vsi->rss_hkey_user,
1454 		       ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1455 	else
1456 		netdev_rss_key_fill((void *)key,
1457 				    ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1458 
1459 	status = ice_aq_set_rss_key(&pf->hw, vsi->idx, key);
1460 
1461 	if (status) {
1462 		dev_err(&pf->pdev->dev, "set_rss_key failed, error %d\n",
1463 			status);
1464 		err = -EIO;
1465 	}
1466 
1467 	devm_kfree(&pf->pdev->dev, key);
1468 ice_vsi_cfg_rss_exit:
1469 	devm_kfree(&pf->pdev->dev, lut);
1470 	return err;
1471 }
1472 
1473 /**
1474  * ice_add_mac_to_list - Add a MAC address filter entry to the list
1475  * @vsi: the VSI to be forwarded to
1476  * @add_list: pointer to the list which contains MAC filter entries
1477  * @macaddr: the MAC address to be added.
1478  *
1479  * Adds MAC address filter entry to the temp list
1480  *
1481  * Returns 0 on success or ENOMEM on failure.
1482  */
1483 int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
1484 			const u8 *macaddr)
1485 {
1486 	struct ice_fltr_list_entry *tmp;
1487 	struct ice_pf *pf = vsi->back;
1488 
1489 	tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_ATOMIC);
1490 	if (!tmp)
1491 		return -ENOMEM;
1492 
1493 	tmp->fltr_info.flag = ICE_FLTR_TX;
1494 	tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
1495 	tmp->fltr_info.lkup_type = ICE_SW_LKUP_MAC;
1496 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1497 	tmp->fltr_info.vsi_handle = vsi->idx;
1498 	ether_addr_copy(tmp->fltr_info.l_data.mac.mac_addr, macaddr);
1499 
1500 	INIT_LIST_HEAD(&tmp->list_entry);
1501 	list_add(&tmp->list_entry, add_list);
1502 
1503 	return 0;
1504 }
1505 
1506 /**
1507  * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1508  * @vsi: the VSI to be updated
1509  */
1510 void ice_update_eth_stats(struct ice_vsi *vsi)
1511 {
1512 	struct ice_eth_stats *prev_es, *cur_es;
1513 	struct ice_hw *hw = &vsi->back->hw;
1514 	u16 vsi_num = vsi->vsi_num;    /* HW absolute index of a VSI */
1515 
1516 	prev_es = &vsi->eth_stats_prev;
1517 	cur_es = &vsi->eth_stats;
1518 
1519 	ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1520 			  &prev_es->rx_bytes, &cur_es->rx_bytes);
1521 
1522 	ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1523 			  &prev_es->rx_unicast, &cur_es->rx_unicast);
1524 
1525 	ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1526 			  &prev_es->rx_multicast, &cur_es->rx_multicast);
1527 
1528 	ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1529 			  &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1530 
1531 	ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1532 			  &prev_es->rx_discards, &cur_es->rx_discards);
1533 
1534 	ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1535 			  &prev_es->tx_bytes, &cur_es->tx_bytes);
1536 
1537 	ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1538 			  &prev_es->tx_unicast, &cur_es->tx_unicast);
1539 
1540 	ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1541 			  &prev_es->tx_multicast, &cur_es->tx_multicast);
1542 
1543 	ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1544 			  &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1545 
1546 	ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1547 			  &prev_es->tx_errors, &cur_es->tx_errors);
1548 
1549 	vsi->stat_offsets_loaded = true;
1550 }
1551 
1552 /**
1553  * ice_free_fltr_list - free filter lists helper
1554  * @dev: pointer to the device struct
1555  * @h: pointer to the list head to be freed
1556  *
1557  * Helper function to free filter lists previously created using
1558  * ice_add_mac_to_list
1559  */
1560 void ice_free_fltr_list(struct device *dev, struct list_head *h)
1561 {
1562 	struct ice_fltr_list_entry *e, *tmp;
1563 
1564 	list_for_each_entry_safe(e, tmp, h, list_entry) {
1565 		list_del(&e->list_entry);
1566 		devm_kfree(dev, e);
1567 	}
1568 }
1569 
1570 /**
1571  * ice_vsi_add_vlan - Add VSI membership for given VLAN
1572  * @vsi: the VSI being configured
1573  * @vid: VLAN ID to be added
1574  */
1575 int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid)
1576 {
1577 	struct ice_fltr_list_entry *tmp;
1578 	struct ice_pf *pf = vsi->back;
1579 	LIST_HEAD(tmp_add_list);
1580 	enum ice_status status;
1581 	int err = 0;
1582 
1583 	tmp = devm_kzalloc(&pf->pdev->dev, sizeof(*tmp), GFP_KERNEL);
1584 	if (!tmp)
1585 		return -ENOMEM;
1586 
1587 	tmp->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1588 	tmp->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1589 	tmp->fltr_info.flag = ICE_FLTR_TX;
1590 	tmp->fltr_info.src_id = ICE_SRC_ID_VSI;
1591 	tmp->fltr_info.vsi_handle = vsi->idx;
1592 	tmp->fltr_info.l_data.vlan.vlan_id = vid;
1593 
1594 	INIT_LIST_HEAD(&tmp->list_entry);
1595 	list_add(&tmp->list_entry, &tmp_add_list);
1596 
1597 	status = ice_add_vlan(&pf->hw, &tmp_add_list);
1598 	if (status) {
1599 		err = -ENODEV;
1600 		dev_err(&pf->pdev->dev, "Failure Adding VLAN %d on VSI %i\n",
1601 			vid, vsi->vsi_num);
1602 	}
1603 
1604 	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1605 	return err;
1606 }
1607 
1608 /**
1609  * ice_vsi_kill_vlan - Remove VSI membership for a given VLAN
1610  * @vsi: the VSI being configured
1611  * @vid: VLAN ID to be removed
1612  *
1613  * Returns 0 on success and negative on failure
1614  */
1615 int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
1616 {
1617 	struct ice_fltr_list_entry *list;
1618 	struct ice_pf *pf = vsi->back;
1619 	LIST_HEAD(tmp_add_list);
1620 	enum ice_status status;
1621 	int err = 0;
1622 
1623 	list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
1624 	if (!list)
1625 		return -ENOMEM;
1626 
1627 	list->fltr_info.lkup_type = ICE_SW_LKUP_VLAN;
1628 	list->fltr_info.vsi_handle = vsi->idx;
1629 	list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
1630 	list->fltr_info.l_data.vlan.vlan_id = vid;
1631 	list->fltr_info.flag = ICE_FLTR_TX;
1632 	list->fltr_info.src_id = ICE_SRC_ID_VSI;
1633 
1634 	INIT_LIST_HEAD(&list->list_entry);
1635 	list_add(&list->list_entry, &tmp_add_list);
1636 
1637 	status = ice_remove_vlan(&pf->hw, &tmp_add_list);
1638 	if (status == ICE_ERR_DOES_NOT_EXIST) {
1639 		dev_dbg(&pf->pdev->dev,
1640 			"Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
1641 			vid, vsi->vsi_num, status);
1642 	} else if (status) {
1643 		dev_err(&pf->pdev->dev,
1644 			"Error removing VLAN %d on vsi %i error: %d\n",
1645 			vid, vsi->vsi_num, status);
1646 		err = -EIO;
1647 	}
1648 
1649 	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
1650 	return err;
1651 }
1652 
1653 /**
1654  * ice_vsi_cfg_rxqs - Configure the VSI for Rx
1655  * @vsi: the VSI being configured
1656  *
1657  * Return 0 on success and a negative value on error
1658  * Configure the Rx VSI for operation.
1659  */
1660 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
1661 {
1662 	u16 i;
1663 
1664 	if (vsi->type == ICE_VSI_VF)
1665 		goto setup_rings;
1666 
1667 	if (vsi->netdev && vsi->netdev->mtu > ETH_DATA_LEN)
1668 		vsi->max_frame = vsi->netdev->mtu +
1669 			ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1670 	else
1671 		vsi->max_frame = ICE_RXBUF_2048;
1672 
1673 	vsi->rx_buf_len = ICE_RXBUF_2048;
1674 setup_rings:
1675 	/* set up individual rings */
1676 	for (i = 0; i < vsi->num_rxq; i++) {
1677 		int err;
1678 
1679 		err = ice_setup_rx_ctx(vsi->rx_rings[i]);
1680 		if (err) {
1681 			dev_err(&vsi->back->pdev->dev,
1682 				"ice_setup_rx_ctx failed for RxQ %d, err %d\n",
1683 				i, err);
1684 			return err;
1685 		}
1686 	}
1687 
1688 	return 0;
1689 }
1690 
1691 /**
1692  * ice_vsi_cfg_txq - Configure single Tx queue
1693  * @vsi: the VSI that queue belongs to
1694  * @ring: Tx ring to be configured
1695  * @tc_q_idx: queue index within given TC
1696  * @qg_buf: queue group buffer
1697  * @tc: TC that Tx ring belongs to
1698  */
1699 static int
1700 ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
1701 		struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)
1702 {
1703 	struct ice_tlan_ctx tlan_ctx = { 0 };
1704 	struct ice_aqc_add_txqs_perq *txq;
1705 	struct ice_pf *pf = vsi->back;
1706 	u8 buf_len = sizeof(*qg_buf);
1707 	enum ice_status status;
1708 	u16 pf_q;
1709 
1710 	pf_q = ring->reg_idx;
1711 	ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
1712 	/* copy context contents into the qg_buf */
1713 	qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
1714 	ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
1715 		    ice_tlan_ctx_info);
1716 
1717 	/* init queue specific tail reg. It is referred as
1718 	 * transmit comm scheduler queue doorbell.
1719 	 */
1720 	ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
1721 
1722 	/* Add unique software queue handle of the Tx queue per
1723 	 * TC into the VSI Tx ring
1724 	 */
1725 	ring->q_handle = tc_q_idx;
1726 
1727 	status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1728 				 1, qg_buf, buf_len, NULL);
1729 	if (status) {
1730 		dev_err(&pf->pdev->dev,
1731 			"Failed to set LAN Tx queue context, error: %d\n",
1732 			status);
1733 		return -ENODEV;
1734 	}
1735 
1736 	/* Add Tx Queue TEID into the VSI Tx ring from the
1737 	 * response. This will complete configuring and
1738 	 * enabling the queue.
1739 	 */
1740 	txq = &qg_buf->txqs[0];
1741 	if (pf_q == le16_to_cpu(txq->txq_id))
1742 		ring->txq_teid = le32_to_cpu(txq->q_teid);
1743 
1744 	return 0;
1745 }
1746 
1747 /**
1748  * ice_vsi_cfg_txqs - Configure the VSI for Tx
1749  * @vsi: the VSI being configured
1750  * @rings: Tx ring array to be configured
1751  * @offset: offset within vsi->txq_map
1752  *
1753  * Return 0 on success and a negative value on error
1754  * Configure the Tx VSI for operation.
1755  */
1756 static int
1757 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
1758 {
1759 	struct ice_aqc_add_tx_qgrp *qg_buf;
1760 	struct ice_pf *pf = vsi->back;
1761 	u16 q_idx = 0, i;
1762 	int err = 0;
1763 	u8 tc;
1764 
1765 	qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL);
1766 	if (!qg_buf)
1767 		return -ENOMEM;
1768 
1769 	qg_buf->num_txqs = 1;
1770 
1771 	/* set up and configure the Tx queues for each enabled TC */
1772 	ice_for_each_traffic_class(tc) {
1773 		if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
1774 			break;
1775 
1776 		for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
1777 			err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset,
1778 					      qg_buf, tc);
1779 			if (err)
1780 				goto err_cfg_txqs;
1781 
1782 			q_idx++;
1783 		}
1784 	}
1785 err_cfg_txqs:
1786 	devm_kfree(&pf->pdev->dev, qg_buf);
1787 	return err;
1788 }
1789 
1790 /**
1791  * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1792  * @vsi: the VSI being configured
1793  *
1794  * Return 0 on success and a negative value on error
1795  * Configure the Tx VSI for operation.
1796  */
1797 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1798 {
1799 	return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, 0);
1800 }
1801 
1802 /**
1803  * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1804  * @intrl: interrupt rate limit in usecs
1805  * @gran: interrupt rate limit granularity in usecs
1806  *
1807  * This function converts a decimal interrupt rate limit in usecs to the format
1808  * expected by firmware.
1809  */
1810 u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1811 {
1812 	u32 val = intrl / gran;
1813 
1814 	if (val)
1815 		return val | GLINT_RATE_INTRL_ENA_M;
1816 	return 0;
1817 }
1818 
1819 /**
1820  * ice_cfg_itr_gran - set the ITR granularity to 2 usecs if not already set
1821  * @hw: board specific structure
1822  */
1823 static void ice_cfg_itr_gran(struct ice_hw *hw)
1824 {
1825 	u32 regval = rd32(hw, GLINT_CTL);
1826 
1827 	/* no need to update global register if ITR gran is already set */
1828 	if (!(regval & GLINT_CTL_DIS_AUTOMASK_M) &&
1829 	    (((regval & GLINT_CTL_ITR_GRAN_200_M) >>
1830 	     GLINT_CTL_ITR_GRAN_200_S) == ICE_ITR_GRAN_US) &&
1831 	    (((regval & GLINT_CTL_ITR_GRAN_100_M) >>
1832 	     GLINT_CTL_ITR_GRAN_100_S) == ICE_ITR_GRAN_US) &&
1833 	    (((regval & GLINT_CTL_ITR_GRAN_50_M) >>
1834 	     GLINT_CTL_ITR_GRAN_50_S) == ICE_ITR_GRAN_US) &&
1835 	    (((regval & GLINT_CTL_ITR_GRAN_25_M) >>
1836 	      GLINT_CTL_ITR_GRAN_25_S) == ICE_ITR_GRAN_US))
1837 		return;
1838 
1839 	regval = ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_200_S) &
1840 		  GLINT_CTL_ITR_GRAN_200_M) |
1841 		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_100_S) &
1842 		  GLINT_CTL_ITR_GRAN_100_M) |
1843 		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_50_S) &
1844 		  GLINT_CTL_ITR_GRAN_50_M) |
1845 		 ((ICE_ITR_GRAN_US << GLINT_CTL_ITR_GRAN_25_S) &
1846 		  GLINT_CTL_ITR_GRAN_25_M);
1847 	wr32(hw, GLINT_CTL, regval);
1848 }
1849 
1850 /**
1851  * ice_cfg_itr - configure the initial interrupt throttle values
1852  * @hw: pointer to the HW structure
1853  * @q_vector: interrupt vector that's being configured
1854  *
1855  * Configure interrupt throttling values for the ring containers that are
1856  * associated with the interrupt vector passed in.
1857  */
1858 static void
1859 ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
1860 {
1861 	ice_cfg_itr_gran(hw);
1862 
1863 	if (q_vector->num_ring_rx) {
1864 		struct ice_ring_container *rc = &q_vector->rx;
1865 
1866 		/* if this value is set then don't overwrite with default */
1867 		if (!rc->itr_setting)
1868 			rc->itr_setting = ICE_DFLT_RX_ITR;
1869 
1870 		rc->target_itr = ITR_TO_REG(rc->itr_setting);
1871 		rc->next_update = jiffies + 1;
1872 		rc->current_itr = rc->target_itr;
1873 		wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1874 		     ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
1875 	}
1876 
1877 	if (q_vector->num_ring_tx) {
1878 		struct ice_ring_container *rc = &q_vector->tx;
1879 
1880 		/* if this value is set then don't overwrite with default */
1881 		if (!rc->itr_setting)
1882 			rc->itr_setting = ICE_DFLT_TX_ITR;
1883 
1884 		rc->target_itr = ITR_TO_REG(rc->itr_setting);
1885 		rc->next_update = jiffies + 1;
1886 		rc->current_itr = rc->target_itr;
1887 		wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1888 		     ITR_REG_ALIGN(rc->current_itr) >> ICE_ITR_GRAN_S);
1889 	}
1890 }
1891 
1892 /**
1893  * ice_cfg_txq_interrupt - configure interrupt on Tx queue
1894  * @vsi: the VSI being configured
1895  * @txq: Tx queue being mapped to MSI-X vector
1896  * @msix_idx: MSI-X vector index within the function
1897  * @itr_idx: ITR index of the interrupt cause
1898  *
1899  * Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
1900  * within the function space.
1901  */
1902 #ifdef CONFIG_PCI_IOV
1903 void
1904 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
1905 #else
1906 static void
1907 ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
1908 #endif /* CONFIG_PCI_IOV */
1909 {
1910 	struct ice_pf *pf = vsi->back;
1911 	struct ice_hw *hw = &pf->hw;
1912 	u32 val;
1913 
1914 	itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
1915 
1916 	val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
1917 	      ((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
1918 
1919 	wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
1920 }
1921 
1922 /**
1923  * ice_cfg_rxq_interrupt - configure interrupt on Rx queue
1924  * @vsi: the VSI being configured
1925  * @rxq: Rx queue being mapped to MSI-X vector
1926  * @msix_idx: MSI-X vector index within the function
1927  * @itr_idx: ITR index of the interrupt cause
1928  *
1929  * Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
1930  * within the function space.
1931  */
1932 #ifdef CONFIG_PCI_IOV
1933 void
1934 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
1935 #else
1936 static void
1937 ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
1938 #endif /* CONFIG_PCI_IOV */
1939 {
1940 	struct ice_pf *pf = vsi->back;
1941 	struct ice_hw *hw = &pf->hw;
1942 	u32 val;
1943 
1944 	itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
1945 
1946 	val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
1947 	      ((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
1948 
1949 	wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
1950 
1951 	ice_flush(hw);
1952 }
1953 
1954 /**
1955  * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
1956  * @vsi: the VSI being configured
1957  *
1958  * This configures MSIX mode interrupts for the PF VSI, and should not be used
1959  * for the VF VSI.
1960  */
1961 void ice_vsi_cfg_msix(struct ice_vsi *vsi)
1962 {
1963 	struct ice_pf *pf = vsi->back;
1964 	struct ice_hw *hw = &pf->hw;
1965 	u32 txq = 0, rxq = 0;
1966 	int i, q;
1967 
1968 	for (i = 0; i < vsi->num_q_vectors; i++) {
1969 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
1970 		u16 reg_idx = q_vector->reg_idx;
1971 
1972 		ice_cfg_itr(hw, q_vector);
1973 
1974 		wr32(hw, GLINT_RATE(reg_idx),
1975 		     ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
1976 
1977 		/* Both Transmit Queue Interrupt Cause Control register
1978 		 * and Receive Queue Interrupt Cause control register
1979 		 * expects MSIX_INDX field to be the vector index
1980 		 * within the function space and not the absolute
1981 		 * vector index across PF or across device.
1982 		 * For SR-IOV VF VSIs queue vector index always starts
1983 		 * with 1 since first vector index(0) is used for OICR
1984 		 * in VF space. Since VMDq and other PF VSIs are within
1985 		 * the PF function space, use the vector index that is
1986 		 * tracked for this PF.
1987 		 */
1988 		for (q = 0; q < q_vector->num_ring_tx; q++) {
1989 			ice_cfg_txq_interrupt(vsi, txq, reg_idx,
1990 					      q_vector->tx.itr_idx);
1991 			txq++;
1992 		}
1993 
1994 		for (q = 0; q < q_vector->num_ring_rx; q++) {
1995 			ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
1996 					      q_vector->rx.itr_idx);
1997 			rxq++;
1998 		}
1999 	}
2000 }
2001 
2002 /**
2003  * ice_vsi_manage_vlan_insertion - Manage VLAN insertion for the VSI for Tx
2004  * @vsi: the VSI being changed
2005  */
2006 int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
2007 {
2008 	struct device *dev = &vsi->back->pdev->dev;
2009 	struct ice_hw *hw = &vsi->back->hw;
2010 	struct ice_vsi_ctx *ctxt;
2011 	enum ice_status status;
2012 	int ret = 0;
2013 
2014 	ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
2015 	if (!ctxt)
2016 		return -ENOMEM;
2017 
2018 	/* Here we are configuring the VSI to let the driver add VLAN tags by
2019 	 * setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
2020 	 * insertion happens in the Tx hot path, in ice_tx_map.
2021 	 */
2022 	ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
2023 
2024 	/* Preserve existing VLAN strip setting */
2025 	ctxt->info.vlan_flags |= (vsi->info.vlan_flags &
2026 				  ICE_AQ_VSI_VLAN_EMOD_M);
2027 
2028 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
2029 
2030 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
2031 	if (status) {
2032 		dev_err(dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
2033 			status, hw->adminq.sq_last_status);
2034 		ret = -EIO;
2035 		goto out;
2036 	}
2037 
2038 	vsi->info.vlan_flags = ctxt->info.vlan_flags;
2039 out:
2040 	devm_kfree(dev, ctxt);
2041 	return ret;
2042 }
2043 
2044 /**
2045  * ice_vsi_manage_vlan_stripping - Manage VLAN stripping for the VSI for Rx
2046  * @vsi: the VSI being changed
2047  * @ena: boolean value indicating if this is a enable or disable request
2048  */
2049 int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
2050 {
2051 	struct device *dev = &vsi->back->pdev->dev;
2052 	struct ice_hw *hw = &vsi->back->hw;
2053 	struct ice_vsi_ctx *ctxt;
2054 	enum ice_status status;
2055 	int ret = 0;
2056 
2057 	ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
2058 	if (!ctxt)
2059 		return -ENOMEM;
2060 
2061 	/* Here we are configuring what the VSI should do with the VLAN tag in
2062 	 * the Rx packet. We can either leave the tag in the packet or put it in
2063 	 * the Rx descriptor.
2064 	 */
2065 	if (ena)
2066 		/* Strip VLAN tag from Rx packet and put it in the desc */
2067 		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
2068 	else
2069 		/* Disable stripping. Leave tag in packet */
2070 		ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
2071 
2072 	/* Allow all packets untagged/tagged */
2073 	ctxt->info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
2074 
2075 	ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
2076 
2077 	status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
2078 	if (status) {
2079 		dev_err(dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
2080 			ena, status, hw->adminq.sq_last_status);
2081 		ret = -EIO;
2082 		goto out;
2083 	}
2084 
2085 	vsi->info.vlan_flags = ctxt->info.vlan_flags;
2086 out:
2087 	devm_kfree(dev, ctxt);
2088 	return ret;
2089 }
2090 
2091 /**
2092  * ice_vsi_start_rx_rings - start VSI's Rx rings
2093  * @vsi: the VSI whose rings are to be started
2094  *
2095  * Returns 0 on success and a negative value on error
2096  */
2097 int ice_vsi_start_rx_rings(struct ice_vsi *vsi)
2098 {
2099 	return ice_vsi_ctrl_rx_rings(vsi, true);
2100 }
2101 
2102 /**
2103  * ice_vsi_stop_rx_rings - stop VSI's Rx rings
2104  * @vsi: the VSI
2105  *
2106  * Returns 0 on success and a negative value on error
2107  */
2108 int ice_vsi_stop_rx_rings(struct ice_vsi *vsi)
2109 {
2110 	return ice_vsi_ctrl_rx_rings(vsi, false);
2111 }
2112 
2113 /**
2114  * ice_trigger_sw_intr - trigger a software interrupt
2115  * @hw: pointer to the HW structure
2116  * @q_vector: interrupt vector to trigger the software interrupt for
2117  */
2118 void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
2119 {
2120 	wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx),
2121 	     (ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) |
2122 	     GLINT_DYN_CTL_SWINT_TRIG_M |
2123 	     GLINT_DYN_CTL_INTENA_M);
2124 }
2125 
2126 /**
2127  * ice_vsi_stop_tx_ring - Disable single Tx ring
2128  * @vsi: the VSI being configured
2129  * @rst_src: reset source
2130  * @rel_vmvf_num: Relative ID of VF/VM
2131  * @ring: Tx ring to be stopped
2132  * @txq_meta: Meta data of Tx ring to be stopped
2133  */
2134 #ifndef CONFIG_PCI_IOV
2135 static
2136 #endif /* !CONFIG_PCI_IOV */
2137 int
2138 ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2139 		     u16 rel_vmvf_num, struct ice_ring *ring,
2140 		     struct ice_txq_meta *txq_meta)
2141 {
2142 	struct ice_pf *pf = vsi->back;
2143 	struct ice_q_vector *q_vector;
2144 	struct ice_hw *hw = &pf->hw;
2145 	enum ice_status status;
2146 	u32 val;
2147 
2148 	/* clear cause_ena bit for disabled queues */
2149 	val = rd32(hw, QINT_TQCTL(ring->reg_idx));
2150 	val &= ~QINT_TQCTL_CAUSE_ENA_M;
2151 	wr32(hw, QINT_TQCTL(ring->reg_idx), val);
2152 
2153 	/* software is expected to wait for 100 ns */
2154 	ndelay(100);
2155 
2156 	/* trigger a software interrupt for the vector
2157 	 * associated to the queue to schedule NAPI handler
2158 	 */
2159 	q_vector = ring->q_vector;
2160 	if (q_vector)
2161 		ice_trigger_sw_intr(hw, q_vector);
2162 
2163 	status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
2164 				 txq_meta->tc, 1, &txq_meta->q_handle,
2165 				 &txq_meta->q_id, &txq_meta->q_teid, rst_src,
2166 				 rel_vmvf_num, NULL);
2167 
2168 	/* if the disable queue command was exercised during an
2169 	 * active reset flow, ICE_ERR_RESET_ONGOING is returned.
2170 	 * This is not an error as the reset operation disables
2171 	 * queues at the hardware level anyway.
2172 	 */
2173 	if (status == ICE_ERR_RESET_ONGOING) {
2174 		dev_dbg(&vsi->back->pdev->dev,
2175 			"Reset in progress. LAN Tx queues already disabled\n");
2176 	} else if (status == ICE_ERR_DOES_NOT_EXIST) {
2177 		dev_dbg(&vsi->back->pdev->dev,
2178 			"LAN Tx queues do not exist, nothing to disable\n");
2179 	} else if (status) {
2180 		dev_err(&vsi->back->pdev->dev,
2181 			"Failed to disable LAN Tx queues, error: %d\n", status);
2182 		return -ENODEV;
2183 	}
2184 
2185 	return 0;
2186 }
2187 
2188 /**
2189  * ice_fill_txq_meta - Prepare the Tx queue's meta data
2190  * @vsi: VSI that ring belongs to
2191  * @ring: ring that txq_meta will be based on
2192  * @txq_meta: a helper struct that wraps Tx queue's information
2193  *
2194  * Set up a helper struct that will contain all the necessary fields that
2195  * are needed for stopping Tx queue
2196  */
2197 #ifndef CONFIG_PCI_IOV
2198 static
2199 #endif /* !CONFIG_PCI_IOV */
2200 void
2201 ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
2202 		  struct ice_txq_meta *txq_meta)
2203 {
2204 	u8 tc = 0;
2205 
2206 #ifdef CONFIG_DCB
2207 	tc = ring->dcb_tc;
2208 #endif /* CONFIG_DCB */
2209 	txq_meta->q_id = ring->reg_idx;
2210 	txq_meta->q_teid = ring->txq_teid;
2211 	txq_meta->q_handle = ring->q_handle;
2212 	txq_meta->vsi_idx = vsi->idx;
2213 	txq_meta->tc = tc;
2214 }
2215 
2216 /**
2217  * ice_vsi_stop_tx_rings - Disable Tx rings
2218  * @vsi: the VSI being configured
2219  * @rst_src: reset source
2220  * @rel_vmvf_num: Relative ID of VF/VM
2221  * @rings: Tx ring array to be stopped
2222  */
2223 static int
2224 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2225 		      u16 rel_vmvf_num, struct ice_ring **rings)
2226 {
2227 	u16 i, q_idx = 0;
2228 	int status;
2229 	u8 tc;
2230 
2231 	if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
2232 		return -EINVAL;
2233 
2234 	/* set up the Tx queue list to be disabled for each enabled TC */
2235 	ice_for_each_traffic_class(tc) {
2236 		if (!(vsi->tc_cfg.ena_tc & BIT(tc)))
2237 			break;
2238 
2239 		for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
2240 			struct ice_txq_meta txq_meta = { };
2241 
2242 			if (!rings || !rings[q_idx])
2243 				return -EINVAL;
2244 
2245 			ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
2246 			status = ice_vsi_stop_tx_ring(vsi, rst_src,
2247 						      rel_vmvf_num,
2248 						      rings[q_idx], &txq_meta);
2249 
2250 			if (status)
2251 				return status;
2252 
2253 			q_idx++;
2254 		}
2255 	}
2256 
2257 	return 0;
2258 }
2259 
2260 /**
2261  * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2262  * @vsi: the VSI being configured
2263  * @rst_src: reset source
2264  * @rel_vmvf_num: Relative ID of VF/VM
2265  */
2266 int
2267 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2268 			  u16 rel_vmvf_num)
2269 {
2270 	return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
2271 }
2272 
2273 /**
2274  * ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
2275  * @vsi: VSI to enable or disable VLAN pruning on
2276  * @ena: set to true to enable VLAN pruning and false to disable it
2277  * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
2278  *
2279  * returns 0 if VSI is updated, negative otherwise
2280  */
2281 int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
2282 {
2283 	struct ice_vsi_ctx *ctxt;
2284 	struct device *dev;
2285 	struct ice_pf *pf;
2286 	int status;
2287 
2288 	if (!vsi)
2289 		return -EINVAL;
2290 
2291 	pf = vsi->back;
2292 	dev = &pf->pdev->dev;
2293 	ctxt = devm_kzalloc(dev, sizeof(*ctxt), GFP_KERNEL);
2294 	if (!ctxt)
2295 		return -ENOMEM;
2296 
2297 	ctxt->info = vsi->info;
2298 
2299 	if (ena) {
2300 		ctxt->info.sec_flags |=
2301 			ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2302 			ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
2303 		ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2304 	} else {
2305 		ctxt->info.sec_flags &=
2306 			~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
2307 			  ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
2308 		ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
2309 	}
2310 
2311 	if (!vlan_promisc)
2312 		ctxt->info.valid_sections =
2313 			cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
2314 				    ICE_AQ_VSI_PROP_SW_VALID);
2315 
2316 	status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
2317 	if (status) {
2318 		netdev_err(vsi->netdev, "%sabling VLAN pruning on VSI handle: %d, VSI HW ID: %d failed, err = %d, aq_err = %d\n",
2319 			   ena ? "En" : "Dis", vsi->idx, vsi->vsi_num, status,
2320 			   pf->hw.adminq.sq_last_status);
2321 		goto err_out;
2322 	}
2323 
2324 	vsi->info.sec_flags = ctxt->info.sec_flags;
2325 	vsi->info.sw_flags2 = ctxt->info.sw_flags2;
2326 
2327 	devm_kfree(dev, ctxt);
2328 	return 0;
2329 
2330 err_out:
2331 	devm_kfree(dev, ctxt);
2332 	return -EIO;
2333 }
2334 
2335 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2336 {
2337 	struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
2338 
2339 	vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
2340 	vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
2341 }
2342 
2343 /**
2344  * ice_vsi_set_q_vectors_reg_idx - set the HW register index for all q_vectors
2345  * @vsi: VSI to set the q_vectors register index on
2346  */
2347 static int
2348 ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
2349 {
2350 	u16 i;
2351 
2352 	if (!vsi || !vsi->q_vectors)
2353 		return -EINVAL;
2354 
2355 	ice_for_each_q_vector(vsi, i) {
2356 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
2357 
2358 		if (!q_vector) {
2359 			dev_err(&vsi->back->pdev->dev,
2360 				"Failed to set reg_idx on q_vector %d VSI %d\n",
2361 				i, vsi->vsi_num);
2362 			goto clear_reg_idx;
2363 		}
2364 
2365 		if (vsi->type == ICE_VSI_VF) {
2366 			struct ice_vf *vf = &vsi->back->vf[vsi->vf_id];
2367 
2368 			q_vector->reg_idx = ice_calc_vf_reg_idx(vf, q_vector);
2369 		} else {
2370 			q_vector->reg_idx =
2371 				q_vector->v_idx + vsi->base_vector;
2372 		}
2373 	}
2374 
2375 	return 0;
2376 
2377 clear_reg_idx:
2378 	ice_for_each_q_vector(vsi, i) {
2379 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
2380 
2381 		if (q_vector)
2382 			q_vector->reg_idx = 0;
2383 	}
2384 
2385 	return -EINVAL;
2386 }
2387 
2388 /**
2389  * ice_vsi_add_rem_eth_mac - Program VSI ethertype based filter with rule
2390  * @vsi: the VSI being configured
2391  * @add_rule: boolean value to add or remove ethertype filter rule
2392  */
2393 static void
2394 ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
2395 {
2396 	struct ice_fltr_list_entry *list;
2397 	struct ice_pf *pf = vsi->back;
2398 	LIST_HEAD(tmp_add_list);
2399 	enum ice_status status;
2400 
2401 	list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
2402 	if (!list)
2403 		return;
2404 
2405 	list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2406 	list->fltr_info.fltr_act = ICE_DROP_PACKET;
2407 	list->fltr_info.flag = ICE_FLTR_TX;
2408 	list->fltr_info.src_id = ICE_SRC_ID_VSI;
2409 	list->fltr_info.vsi_handle = vsi->idx;
2410 	list->fltr_info.l_data.ethertype_mac.ethertype = vsi->ethtype;
2411 
2412 	INIT_LIST_HEAD(&list->list_entry);
2413 	list_add(&list->list_entry, &tmp_add_list);
2414 
2415 	if (add_rule)
2416 		status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
2417 	else
2418 		status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
2419 
2420 	if (status)
2421 		dev_err(&pf->pdev->dev,
2422 			"Failure Adding or Removing Ethertype on VSI %i error: %d\n",
2423 			vsi->vsi_num, status);
2424 
2425 	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
2426 }
2427 
2428 /**
2429  * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2430  * @vsi: the VSI being configured
2431  * @tx: bool to determine Tx or Rx rule
2432  * @create: bool to determine create or remove Rule
2433  */
2434 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
2435 {
2436 	struct ice_fltr_list_entry *list;
2437 	struct ice_pf *pf = vsi->back;
2438 	LIST_HEAD(tmp_add_list);
2439 	enum ice_status status;
2440 
2441 	list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
2442 	if (!list)
2443 		return;
2444 
2445 	list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
2446 	list->fltr_info.vsi_handle = vsi->idx;
2447 	list->fltr_info.l_data.ethertype_mac.ethertype = ETH_P_LLDP;
2448 
2449 	if (tx) {
2450 		list->fltr_info.fltr_act = ICE_DROP_PACKET;
2451 		list->fltr_info.flag = ICE_FLTR_TX;
2452 		list->fltr_info.src_id = ICE_SRC_ID_VSI;
2453 	} else {
2454 		list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
2455 		list->fltr_info.flag = ICE_FLTR_RX;
2456 		list->fltr_info.src_id = ICE_SRC_ID_LPORT;
2457 	}
2458 
2459 	INIT_LIST_HEAD(&list->list_entry);
2460 	list_add(&list->list_entry, &tmp_add_list);
2461 
2462 	if (create)
2463 		status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
2464 	else
2465 		status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
2466 
2467 	if (status)
2468 		dev_err(&pf->pdev->dev,
2469 			"Fail %s %s LLDP rule on VSI %i error: %d\n",
2470 			create ? "adding" : "removing", tx ? "TX" : "RX",
2471 			vsi->vsi_num, status);
2472 
2473 	ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
2474 }
2475 
2476 /**
2477  * ice_vsi_setup - Set up a VSI by a given type
2478  * @pf: board private structure
2479  * @pi: pointer to the port_info instance
2480  * @type: VSI type
2481  * @vf_id: defines VF ID to which this VSI connects. This field is meant to be
2482  *         used only for ICE_VSI_VF VSI type. For other VSI types, should
2483  *         fill-in ICE_INVAL_VFID as input.
2484  *
2485  * This allocates the sw VSI structure and its queue resources.
2486  *
2487  * Returns pointer to the successfully allocated and configured VSI sw struct on
2488  * success, NULL on failure.
2489  */
2490 struct ice_vsi *
2491 ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
2492 	      enum ice_vsi_type type, u16 vf_id)
2493 {
2494 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2495 	struct device *dev = &pf->pdev->dev;
2496 	enum ice_status status;
2497 	struct ice_vsi *vsi;
2498 	int ret, i;
2499 
2500 	if (type == ICE_VSI_VF)
2501 		vsi = ice_vsi_alloc(pf, type, vf_id);
2502 	else
2503 		vsi = ice_vsi_alloc(pf, type, ICE_INVAL_VFID);
2504 
2505 	if (!vsi) {
2506 		dev_err(dev, "could not allocate VSI\n");
2507 		return NULL;
2508 	}
2509 
2510 	vsi->port_info = pi;
2511 	vsi->vsw = pf->first_sw;
2512 	if (vsi->type == ICE_VSI_PF)
2513 		vsi->ethtype = ETH_P_PAUSE;
2514 
2515 	if (vsi->type == ICE_VSI_VF)
2516 		vsi->vf_id = vf_id;
2517 
2518 	if (ice_vsi_get_qs(vsi)) {
2519 		dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2520 			vsi->idx);
2521 		goto unroll_get_qs;
2522 	}
2523 
2524 	/* set RSS capabilities */
2525 	ice_vsi_set_rss_params(vsi);
2526 
2527 	/* set TC configuration */
2528 	ice_vsi_set_tc_cfg(vsi);
2529 
2530 	/* create the VSI */
2531 	ret = ice_vsi_init(vsi);
2532 	if (ret)
2533 		goto unroll_get_qs;
2534 
2535 	switch (vsi->type) {
2536 	case ICE_VSI_PF:
2537 		ret = ice_vsi_alloc_q_vectors(vsi);
2538 		if (ret)
2539 			goto unroll_vsi_init;
2540 
2541 		ret = ice_vsi_setup_vector_base(vsi);
2542 		if (ret)
2543 			goto unroll_alloc_q_vector;
2544 
2545 		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2546 		if (ret)
2547 			goto unroll_vector_base;
2548 
2549 		ret = ice_vsi_alloc_rings(vsi);
2550 		if (ret)
2551 			goto unroll_vector_base;
2552 
2553 		ice_vsi_map_rings_to_vectors(vsi);
2554 
2555 		/* Do not exit if configuring RSS had an issue, at least
2556 		 * receive traffic on first queue. Hence no need to capture
2557 		 * return value
2558 		 */
2559 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2560 			ice_vsi_cfg_rss_lut_key(vsi);
2561 		break;
2562 	case ICE_VSI_VF:
2563 		/* VF driver will take care of creating netdev for this type and
2564 		 * map queues to vectors through Virtchnl, PF driver only
2565 		 * creates a VSI and corresponding structures for bookkeeping
2566 		 * purpose
2567 		 */
2568 		ret = ice_vsi_alloc_q_vectors(vsi);
2569 		if (ret)
2570 			goto unroll_vsi_init;
2571 
2572 		ret = ice_vsi_alloc_rings(vsi);
2573 		if (ret)
2574 			goto unroll_alloc_q_vector;
2575 
2576 		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
2577 		if (ret)
2578 			goto unroll_vector_base;
2579 
2580 		pf->q_left_tx -= vsi->alloc_txq;
2581 		pf->q_left_rx -= vsi->alloc_rxq;
2582 
2583 		/* Do not exit if configuring RSS had an issue, at least
2584 		 * receive traffic on first queue. Hence no need to capture
2585 		 * return value
2586 		 */
2587 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2588 			ice_vsi_cfg_rss_lut_key(vsi);
2589 		break;
2590 	case ICE_VSI_LB:
2591 		ret = ice_vsi_alloc_rings(vsi);
2592 		if (ret)
2593 			goto unroll_vsi_init;
2594 		break;
2595 	default:
2596 		/* clean up the resources and exit */
2597 		goto unroll_vsi_init;
2598 	}
2599 
2600 	/* configure VSI nodes based on number of queues and TC's */
2601 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
2602 		max_txqs[i] = vsi->alloc_txq;
2603 
2604 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2605 				 max_txqs);
2606 	if (status) {
2607 		dev_err(&pf->pdev->dev,
2608 			"VSI %d failed lan queue config, error %d\n",
2609 			vsi->vsi_num, status);
2610 		goto unroll_vector_base;
2611 	}
2612 
2613 	/* Add switch rule to drop all Tx Flow Control Frames, of look up
2614 	 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2615 	 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2616 	 * The rule is added once for PF VSI in order to create appropriate
2617 	 * recipe, since VSI/VSI list is ignored with drop action...
2618 	 * Also add rules to handle LLDP Tx and Rx packets.  Tx LLDP packets
2619 	 * need to be dropped so that VFs cannot send LLDP packets to reconfig
2620 	 * DCB settings in the HW.  Also, if the FW DCBX engine is not running
2621 	 * then Rx LLDP packets need to be redirected up the stack.
2622 	 */
2623 	if (vsi->type == ICE_VSI_PF) {
2624 		ice_vsi_add_rem_eth_mac(vsi, true);
2625 
2626 		/* Tx LLDP packets */
2627 		ice_cfg_sw_lldp(vsi, true, true);
2628 
2629 		/* Rx LLDP packets */
2630 		if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2631 			ice_cfg_sw_lldp(vsi, false, true);
2632 	}
2633 
2634 	return vsi;
2635 
2636 unroll_vector_base:
2637 	/* reclaim SW interrupts back to the common pool */
2638 	ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2639 	pf->num_avail_sw_msix += vsi->num_q_vectors;
2640 unroll_alloc_q_vector:
2641 	ice_vsi_free_q_vectors(vsi);
2642 unroll_vsi_init:
2643 	ice_vsi_delete(vsi);
2644 unroll_get_qs:
2645 	ice_vsi_put_qs(vsi);
2646 	pf->q_left_tx += vsi->alloc_txq;
2647 	pf->q_left_rx += vsi->alloc_rxq;
2648 	ice_vsi_clear(vsi);
2649 
2650 	return NULL;
2651 }
2652 
2653 /**
2654  * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2655  * @vsi: the VSI being cleaned up
2656  */
2657 static void ice_vsi_release_msix(struct ice_vsi *vsi)
2658 {
2659 	struct ice_pf *pf = vsi->back;
2660 	struct ice_hw *hw = &pf->hw;
2661 	u32 txq = 0;
2662 	u32 rxq = 0;
2663 	int i, q;
2664 
2665 	for (i = 0; i < vsi->num_q_vectors; i++) {
2666 		struct ice_q_vector *q_vector = vsi->q_vectors[i];
2667 		u16 reg_idx = q_vector->reg_idx;
2668 
2669 		wr32(hw, GLINT_ITR(ICE_IDX_ITR0, reg_idx), 0);
2670 		wr32(hw, GLINT_ITR(ICE_IDX_ITR1, reg_idx), 0);
2671 		for (q = 0; q < q_vector->num_ring_tx; q++) {
2672 			wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2673 			txq++;
2674 		}
2675 
2676 		for (q = 0; q < q_vector->num_ring_rx; q++) {
2677 			wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2678 			rxq++;
2679 		}
2680 	}
2681 
2682 	ice_flush(hw);
2683 }
2684 
2685 /**
2686  * ice_vsi_free_irq - Free the IRQ association with the OS
2687  * @vsi: the VSI being configured
2688  */
2689 void ice_vsi_free_irq(struct ice_vsi *vsi)
2690 {
2691 	struct ice_pf *pf = vsi->back;
2692 	int base = vsi->base_vector;
2693 	int i;
2694 
2695 	if (!vsi->q_vectors || !vsi->irqs_ready)
2696 		return;
2697 
2698 	ice_vsi_release_msix(vsi);
2699 	if (vsi->type == ICE_VSI_VF)
2700 		return;
2701 
2702 	vsi->irqs_ready = false;
2703 	ice_for_each_q_vector(vsi, i) {
2704 		u16 vector = i + base;
2705 		int irq_num;
2706 
2707 		irq_num = pf->msix_entries[vector].vector;
2708 
2709 		/* free only the irqs that were actually requested */
2710 		if (!vsi->q_vectors[i] ||
2711 		    !(vsi->q_vectors[i]->num_ring_tx ||
2712 		      vsi->q_vectors[i]->num_ring_rx))
2713 			continue;
2714 
2715 		/* clear the affinity notifier in the IRQ descriptor */
2716 		irq_set_affinity_notifier(irq_num, NULL);
2717 
2718 		/* clear the affinity_mask in the IRQ descriptor */
2719 		irq_set_affinity_hint(irq_num, NULL);
2720 		synchronize_irq(irq_num);
2721 		devm_free_irq(&pf->pdev->dev, irq_num,
2722 			      vsi->q_vectors[i]);
2723 	}
2724 }
2725 
2726 /**
2727  * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2728  * @vsi: the VSI having resources freed
2729  */
2730 void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2731 {
2732 	int i;
2733 
2734 	if (!vsi->tx_rings)
2735 		return;
2736 
2737 	ice_for_each_txq(vsi, i)
2738 		if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2739 			ice_free_tx_ring(vsi->tx_rings[i]);
2740 }
2741 
2742 /**
2743  * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2744  * @vsi: the VSI having resources freed
2745  */
2746 void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2747 {
2748 	int i;
2749 
2750 	if (!vsi->rx_rings)
2751 		return;
2752 
2753 	ice_for_each_rxq(vsi, i)
2754 		if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2755 			ice_free_rx_ring(vsi->rx_rings[i]);
2756 }
2757 
2758 /**
2759  * ice_vsi_close - Shut down a VSI
2760  * @vsi: the VSI being shut down
2761  */
2762 void ice_vsi_close(struct ice_vsi *vsi)
2763 {
2764 	if (!test_and_set_bit(__ICE_DOWN, vsi->state))
2765 		ice_down(vsi);
2766 
2767 	ice_vsi_free_irq(vsi);
2768 	ice_vsi_free_tx_rings(vsi);
2769 	ice_vsi_free_rx_rings(vsi);
2770 }
2771 
2772 /**
2773  * ice_free_res - free a block of resources
2774  * @res: pointer to the resource
2775  * @index: starting index previously returned by ice_get_res
2776  * @id: identifier to track owner
2777  *
2778  * Returns number of resources freed
2779  */
2780 int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id)
2781 {
2782 	int count = 0;
2783 	int i;
2784 
2785 	if (!res || index >= res->end)
2786 		return -EINVAL;
2787 
2788 	id |= ICE_RES_VALID_BIT;
2789 	for (i = index; i < res->end && res->list[i] == id; i++) {
2790 		res->list[i] = 0;
2791 		count++;
2792 	}
2793 
2794 	return count;
2795 }
2796 
2797 /**
2798  * ice_search_res - Search the tracker for a block of resources
2799  * @res: pointer to the resource
2800  * @needed: size of the block needed
2801  * @id: identifier to track owner
2802  *
2803  * Returns the base item index of the block, or -ENOMEM for error
2804  */
2805 static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
2806 {
2807 	int start = 0, end = 0;
2808 
2809 	if (needed > res->end)
2810 		return -ENOMEM;
2811 
2812 	id |= ICE_RES_VALID_BIT;
2813 
2814 	do {
2815 		/* skip already allocated entries */
2816 		if (res->list[end++] & ICE_RES_VALID_BIT) {
2817 			start = end;
2818 			if ((start + needed) > res->end)
2819 				break;
2820 		}
2821 
2822 		if (end == (start + needed)) {
2823 			int i = start;
2824 
2825 			/* there was enough, so assign it to the requestor */
2826 			while (i != end)
2827 				res->list[i++] = id;
2828 
2829 			return start;
2830 		}
2831 	} while (end < res->end);
2832 
2833 	return -ENOMEM;
2834 }
2835 
2836 /**
2837  * ice_get_res - get a block of resources
2838  * @pf: board private structure
2839  * @res: pointer to the resource
2840  * @needed: size of the block needed
2841  * @id: identifier to track owner
2842  *
2843  * Returns the base item index of the block, or negative for error
2844  */
2845 int
2846 ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
2847 {
2848 	if (!res || !pf)
2849 		return -EINVAL;
2850 
2851 	if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
2852 		dev_err(&pf->pdev->dev,
2853 			"param err: needed=%d, num_entries = %d id=0x%04x\n",
2854 			needed, res->num_entries, id);
2855 		return -EINVAL;
2856 	}
2857 
2858 	return ice_search_res(res, needed, id);
2859 }
2860 
2861 /**
2862  * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
2863  * @vsi: the VSI being un-configured
2864  */
2865 void ice_vsi_dis_irq(struct ice_vsi *vsi)
2866 {
2867 	int base = vsi->base_vector;
2868 	struct ice_pf *pf = vsi->back;
2869 	struct ice_hw *hw = &pf->hw;
2870 	u32 val;
2871 	int i;
2872 
2873 	/* disable interrupt causation from each queue */
2874 	if (vsi->tx_rings) {
2875 		ice_for_each_txq(vsi, i) {
2876 			if (vsi->tx_rings[i]) {
2877 				u16 reg;
2878 
2879 				reg = vsi->tx_rings[i]->reg_idx;
2880 				val = rd32(hw, QINT_TQCTL(reg));
2881 				val &= ~QINT_TQCTL_CAUSE_ENA_M;
2882 				wr32(hw, QINT_TQCTL(reg), val);
2883 			}
2884 		}
2885 	}
2886 
2887 	if (vsi->rx_rings) {
2888 		ice_for_each_rxq(vsi, i) {
2889 			if (vsi->rx_rings[i]) {
2890 				u16 reg;
2891 
2892 				reg = vsi->rx_rings[i]->reg_idx;
2893 				val = rd32(hw, QINT_RQCTL(reg));
2894 				val &= ~QINT_RQCTL_CAUSE_ENA_M;
2895 				wr32(hw, QINT_RQCTL(reg), val);
2896 			}
2897 		}
2898 	}
2899 
2900 	/* disable each interrupt */
2901 	ice_for_each_q_vector(vsi, i)
2902 		wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
2903 
2904 	ice_flush(hw);
2905 
2906 	/* don't call synchronize_irq() for VF's from the host */
2907 	if (vsi->type == ICE_VSI_VF)
2908 		return;
2909 
2910 	ice_for_each_q_vector(vsi, i)
2911 		synchronize_irq(pf->msix_entries[i + base].vector);
2912 }
2913 
2914 /**
2915  * ice_napi_del - Remove NAPI handler for the VSI
2916  * @vsi: VSI for which NAPI handler is to be removed
2917  */
2918 void ice_napi_del(struct ice_vsi *vsi)
2919 {
2920 	int v_idx;
2921 
2922 	if (!vsi->netdev)
2923 		return;
2924 
2925 	ice_for_each_q_vector(vsi, v_idx)
2926 		netif_napi_del(&vsi->q_vectors[v_idx]->napi);
2927 }
2928 
2929 /**
2930  * ice_vsi_release - Delete a VSI and free its resources
2931  * @vsi: the VSI being removed
2932  *
2933  * Returns 0 on success or < 0 on error
2934  */
2935 int ice_vsi_release(struct ice_vsi *vsi)
2936 {
2937 	struct ice_pf *pf;
2938 
2939 	if (!vsi->back)
2940 		return -ENODEV;
2941 	pf = vsi->back;
2942 
2943 	/* do not unregister while driver is in the reset recovery pending
2944 	 * state. Since reset/rebuild happens through PF service task workqueue,
2945 	 * it's not a good idea to unregister netdev that is associated to the
2946 	 * PF that is running the work queue items currently. This is done to
2947 	 * avoid check_flush_dependency() warning on this wq
2948 	 */
2949 	if (vsi->netdev && !ice_is_reset_in_progress(pf->state))
2950 		unregister_netdev(vsi->netdev);
2951 
2952 	if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2953 		ice_rss_clean(vsi);
2954 
2955 	/* Disable VSI and free resources */
2956 	if (vsi->type != ICE_VSI_LB)
2957 		ice_vsi_dis_irq(vsi);
2958 	ice_vsi_close(vsi);
2959 
2960 	/* SR-IOV determines needed MSIX resources all at once instead of per
2961 	 * VSI since when VFs are spawned we know how many VFs there are and how
2962 	 * many interrupts each VF needs. SR-IOV MSIX resources are also
2963 	 * cleared in the same manner.
2964 	 */
2965 	if (vsi->type != ICE_VSI_VF) {
2966 		/* reclaim SW interrupts back to the common pool */
2967 		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
2968 		pf->num_avail_sw_msix += vsi->num_q_vectors;
2969 	}
2970 
2971 	if (vsi->type == ICE_VSI_PF) {
2972 		ice_vsi_add_rem_eth_mac(vsi, false);
2973 		ice_cfg_sw_lldp(vsi, true, false);
2974 		/* The Rx rule will only exist to remove if the LLDP FW
2975 		 * engine is currently stopped
2976 		 */
2977 		if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2978 			ice_cfg_sw_lldp(vsi, false, false);
2979 	}
2980 
2981 	ice_remove_vsi_fltr(&pf->hw, vsi->idx);
2982 	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2983 	ice_vsi_delete(vsi);
2984 	ice_vsi_free_q_vectors(vsi);
2985 
2986 	/* make sure unregister_netdev() was called by checking __ICE_DOWN */
2987 	if (vsi->netdev && test_bit(__ICE_DOWN, vsi->state)) {
2988 		free_netdev(vsi->netdev);
2989 		vsi->netdev = NULL;
2990 	}
2991 
2992 	ice_vsi_clear_rings(vsi);
2993 
2994 	ice_vsi_put_qs(vsi);
2995 	pf->q_left_tx += vsi->alloc_txq;
2996 	pf->q_left_rx += vsi->alloc_rxq;
2997 
2998 	/* retain SW VSI data structure since it is needed to unregister and
2999 	 * free VSI netdev when PF is not in reset recovery pending state,\
3000 	 * for ex: during rmmod.
3001 	 */
3002 	if (!ice_is_reset_in_progress(pf->state))
3003 		ice_vsi_clear(vsi);
3004 
3005 	return 0;
3006 }
3007 
3008 /**
3009  * ice_vsi_rebuild - Rebuild VSI after reset
3010  * @vsi: VSI to be rebuild
3011  *
3012  * Returns 0 on success and negative value on failure
3013  */
3014 int ice_vsi_rebuild(struct ice_vsi *vsi)
3015 {
3016 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3017 	struct ice_vf *vf = NULL;
3018 	enum ice_status status;
3019 	struct ice_pf *pf;
3020 	int ret, i;
3021 
3022 	if (!vsi)
3023 		return -EINVAL;
3024 
3025 	pf = vsi->back;
3026 	if (vsi->type == ICE_VSI_VF)
3027 		vf = &pf->vf[vsi->vf_id];
3028 
3029 	ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
3030 	ice_vsi_free_q_vectors(vsi);
3031 
3032 	/* SR-IOV determines needed MSIX resources all at once instead of per
3033 	 * VSI since when VFs are spawned we know how many VFs there are and how
3034 	 * many interrupts each VF needs. SR-IOV MSIX resources are also
3035 	 * cleared in the same manner.
3036 	 */
3037 	if (vsi->type != ICE_VSI_VF) {
3038 		/* reclaim SW interrupts back to the common pool */
3039 		ice_free_res(pf->irq_tracker, vsi->base_vector, vsi->idx);
3040 		pf->num_avail_sw_msix += vsi->num_q_vectors;
3041 		vsi->base_vector = 0;
3042 	}
3043 
3044 	ice_vsi_put_qs(vsi);
3045 	ice_vsi_clear_rings(vsi);
3046 	ice_vsi_free_arrays(vsi);
3047 	ice_dev_onetime_setup(&pf->hw);
3048 	if (vsi->type == ICE_VSI_VF)
3049 		ice_vsi_set_num_qs(vsi, vf->vf_id);
3050 	else
3051 		ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
3052 
3053 	ret = ice_vsi_alloc_arrays(vsi);
3054 	if (ret < 0)
3055 		goto err_vsi;
3056 
3057 	ice_vsi_get_qs(vsi);
3058 	ice_vsi_set_tc_cfg(vsi);
3059 
3060 	/* Initialize VSI struct elements and create VSI in FW */
3061 	ret = ice_vsi_init(vsi);
3062 	if (ret < 0)
3063 		goto err_vsi;
3064 
3065 
3066 	switch (vsi->type) {
3067 	case ICE_VSI_PF:
3068 		ret = ice_vsi_alloc_q_vectors(vsi);
3069 		if (ret)
3070 			goto err_rings;
3071 
3072 		ret = ice_vsi_setup_vector_base(vsi);
3073 		if (ret)
3074 			goto err_vectors;
3075 
3076 		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
3077 		if (ret)
3078 			goto err_vectors;
3079 
3080 		ret = ice_vsi_alloc_rings(vsi);
3081 		if (ret)
3082 			goto err_vectors;
3083 
3084 		ice_vsi_map_rings_to_vectors(vsi);
3085 		/* Do not exit if configuring RSS had an issue, at least
3086 		 * receive traffic on first queue. Hence no need to capture
3087 		 * return value
3088 		 */
3089 		if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
3090 			ice_vsi_cfg_rss_lut_key(vsi);
3091 		break;
3092 	case ICE_VSI_VF:
3093 		ret = ice_vsi_alloc_q_vectors(vsi);
3094 		if (ret)
3095 			goto err_rings;
3096 
3097 		ret = ice_vsi_set_q_vectors_reg_idx(vsi);
3098 		if (ret)
3099 			goto err_vectors;
3100 
3101 		ret = ice_vsi_alloc_rings(vsi);
3102 		if (ret)
3103 			goto err_vectors;
3104 
3105 		pf->q_left_tx -= vsi->alloc_txq;
3106 		pf->q_left_rx -= vsi->alloc_rxq;
3107 		break;
3108 	default:
3109 		break;
3110 	}
3111 
3112 	/* configure VSI nodes based on number of queues and TC's */
3113 	for (i = 0; i < vsi->tc_cfg.numtc; i++)
3114 		max_txqs[i] = vsi->alloc_txq;
3115 
3116 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
3117 				 max_txqs);
3118 	if (status) {
3119 		dev_err(&pf->pdev->dev,
3120 			"VSI %d failed lan queue config, error %d\n",
3121 			vsi->vsi_num, status);
3122 		goto err_vectors;
3123 	}
3124 	return 0;
3125 
3126 err_vectors:
3127 	ice_vsi_free_q_vectors(vsi);
3128 err_rings:
3129 	if (vsi->netdev) {
3130 		vsi->current_netdev_flags = 0;
3131 		unregister_netdev(vsi->netdev);
3132 		free_netdev(vsi->netdev);
3133 		vsi->netdev = NULL;
3134 	}
3135 err_vsi:
3136 	ice_vsi_clear(vsi);
3137 	set_bit(__ICE_RESET_FAILED, pf->state);
3138 	return ret;
3139 }
3140 
3141 /**
3142  * ice_is_reset_in_progress - check for a reset in progress
3143  * @state: PF state field
3144  */
3145 bool ice_is_reset_in_progress(unsigned long *state)
3146 {
3147 	return test_bit(__ICE_RESET_OICR_RECV, state) ||
3148 	       test_bit(__ICE_PFR_REQ, state) ||
3149 	       test_bit(__ICE_CORER_REQ, state) ||
3150 	       test_bit(__ICE_GLOBR_REQ, state);
3151 }
3152 
3153 #ifdef CONFIG_DCB
3154 /**
3155  * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3156  * @vsi: VSI being configured
3157  * @ctx: the context buffer returned from AQ VSI update command
3158  */
3159 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
3160 {
3161 	vsi->info.mapping_flags = ctx->info.mapping_flags;
3162 	memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3163 	       sizeof(vsi->info.q_mapping));
3164 	memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3165 	       sizeof(vsi->info.tc_mapping));
3166 }
3167 
3168 /**
3169  * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3170  * @vsi: the VSI being configured
3171  * @ena_tc: TC map to be enabled
3172  */
3173 static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
3174 {
3175 	struct net_device *netdev = vsi->netdev;
3176 	struct ice_pf *pf = vsi->back;
3177 	struct ice_dcbx_cfg *dcbcfg;
3178 	u8 netdev_tc;
3179 	int i;
3180 
3181 	if (!netdev)
3182 		return;
3183 
3184 	if (!ena_tc) {
3185 		netdev_reset_tc(netdev);
3186 		return;
3187 	}
3188 
3189 	if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
3190 		return;
3191 
3192 	dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
3193 
3194 	ice_for_each_traffic_class(i)
3195 		if (vsi->tc_cfg.ena_tc & BIT(i))
3196 			netdev_set_tc_queue(netdev,
3197 					    vsi->tc_cfg.tc_info[i].netdev_tc,
3198 					    vsi->tc_cfg.tc_info[i].qcount_tx,
3199 					    vsi->tc_cfg.tc_info[i].qoffset);
3200 
3201 	for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
3202 		u8 ets_tc = dcbcfg->etscfg.prio_table[i];
3203 
3204 		/* Get the mapped netdev TC# for the UP */
3205 		netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
3206 		netdev_set_prio_tc_map(netdev, i, netdev_tc);
3207 	}
3208 }
3209 
3210 /**
3211  * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3212  * @vsi: VSI to be configured
3213  * @ena_tc: TC bitmap
3214  *
3215  * VSI queues expected to be quiesced before calling this function
3216  */
3217 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
3218 {
3219 	u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3220 	struct ice_vsi_ctx *ctx;
3221 	struct ice_pf *pf = vsi->back;
3222 	enum ice_status status;
3223 	int i, ret = 0;
3224 	u8 num_tc = 0;
3225 
3226 	ice_for_each_traffic_class(i) {
3227 		/* build bitmap of enabled TCs */
3228 		if (ena_tc & BIT(i))
3229 			num_tc++;
3230 		/* populate max_txqs per TC */
3231 		max_txqs[i] = vsi->alloc_txq;
3232 	}
3233 
3234 	vsi->tc_cfg.ena_tc = ena_tc;
3235 	vsi->tc_cfg.numtc = num_tc;
3236 
3237 	ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
3238 	if (!ctx)
3239 		return -ENOMEM;
3240 
3241 	ctx->vf_num = 0;
3242 	ctx->info = vsi->info;
3243 
3244 	ice_vsi_setup_q_map(vsi, ctx);
3245 
3246 	/* must to indicate which section of VSI context are being modified */
3247 	ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
3248 	status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3249 	if (status) {
3250 		dev_info(&pf->pdev->dev, "Failed VSI Update\n");
3251 		ret = -EIO;
3252 		goto out;
3253 	}
3254 
3255 	status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
3256 				 max_txqs);
3257 
3258 	if (status) {
3259 		dev_err(&pf->pdev->dev,
3260 			"VSI %d failed TC config, error %d\n",
3261 			vsi->vsi_num, status);
3262 		ret = -EIO;
3263 		goto out;
3264 	}
3265 	ice_vsi_update_q_map(vsi, ctx);
3266 	vsi->info.valid_sections = 0;
3267 
3268 	ice_vsi_cfg_netdev_tc(vsi, ena_tc);
3269 out:
3270 	devm_kfree(&pf->pdev->dev, ctx);
3271 	return ret;
3272 }
3273 #endif /* CONFIG_DCB */
3274 
3275 /**
3276  * ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
3277  * @vsi: the VSI being configured MAC filter
3278  * @macaddr: the MAC address to be added.
3279  * @set: Add or delete a MAC filter
3280  *
3281  * Adds or removes MAC address filter entry for VF VSI
3282  */
3283 enum ice_status
3284 ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
3285 {
3286 	LIST_HEAD(tmp_add_list);
3287 	enum ice_status status;
3288 
3289 	 /* Update MAC filter list to be added or removed for a VSI */
3290 	if (ice_add_mac_to_list(vsi, &tmp_add_list, macaddr)) {
3291 		status = ICE_ERR_NO_MEMORY;
3292 		goto cfg_mac_fltr_exit;
3293 	}
3294 
3295 	if (set)
3296 		status = ice_add_mac(&vsi->back->hw, &tmp_add_list);
3297 	else
3298 		status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
3299 
3300 cfg_mac_fltr_exit:
3301 	ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
3302 	return status;
3303 }
3304