1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 #include "ice.h"
5 #include "ice_base.h"
6 #include "ice_flow.h"
7 #include "ice_lib.h"
8 #include "ice_fltr.h"
9 #include "ice_dcb_lib.h"
10 #include "ice_devlink.h"
11 #include "ice_vsi_vlan_ops.h"
12
13 /**
14 * ice_vsi_type_str - maps VSI type enum to string equivalents
15 * @vsi_type: VSI type enum
16 */
ice_vsi_type_str(enum ice_vsi_type vsi_type)17 const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
18 {
19 switch (vsi_type) {
20 case ICE_VSI_PF:
21 return "ICE_VSI_PF";
22 case ICE_VSI_VF:
23 return "ICE_VSI_VF";
24 case ICE_VSI_CTRL:
25 return "ICE_VSI_CTRL";
26 case ICE_VSI_CHNL:
27 return "ICE_VSI_CHNL";
28 case ICE_VSI_LB:
29 return "ICE_VSI_LB";
30 case ICE_VSI_SWITCHDEV_CTRL:
31 return "ICE_VSI_SWITCHDEV_CTRL";
32 default:
33 return "unknown";
34 }
35 }
36
37 /**
38 * ice_vsi_ctrl_all_rx_rings - Start or stop a VSI's Rx rings
39 * @vsi: the VSI being configured
40 * @ena: start or stop the Rx rings
41 *
42 * First enable/disable all of the Rx rings, flush any remaining writes, and
43 * then verify that they have all been enabled/disabled successfully. This will
44 * let all of the register writes complete when enabling/disabling the Rx rings
45 * before waiting for the change in hardware to complete.
46 */
ice_vsi_ctrl_all_rx_rings(struct ice_vsi * vsi,bool ena)47 static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
48 {
49 int ret = 0;
50 u16 i;
51
52 ice_for_each_rxq(vsi, i)
53 ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
54
55 ice_flush(&vsi->back->hw);
56
57 ice_for_each_rxq(vsi, i) {
58 ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
59 if (ret)
60 break;
61 }
62
63 return ret;
64 }
65
66 /**
67 * ice_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the VSI
68 * @vsi: VSI pointer
69 *
70 * On error: returns error code (negative)
71 * On success: returns 0
72 */
ice_vsi_alloc_arrays(struct ice_vsi * vsi)73 static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
74 {
75 struct ice_pf *pf = vsi->back;
76 struct device *dev;
77
78 dev = ice_pf_to_dev(pf);
79 if (vsi->type == ICE_VSI_CHNL)
80 return 0;
81
82 /* allocate memory for both Tx and Rx ring pointers */
83 vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
84 sizeof(*vsi->tx_rings), GFP_KERNEL);
85 if (!vsi->tx_rings)
86 return -ENOMEM;
87
88 vsi->rx_rings = devm_kcalloc(dev, vsi->alloc_rxq,
89 sizeof(*vsi->rx_rings), GFP_KERNEL);
90 if (!vsi->rx_rings)
91 goto err_rings;
92
93 /* txq_map needs to have enough space to track both Tx (stack) rings
94 * and XDP rings; at this point vsi->num_xdp_txq might not be set,
95 * so use num_possible_cpus() as we want to always provide XDP ring
96 * per CPU, regardless of queue count settings from user that might
97 * have come from ethtool's set_channels() callback;
98 */
99 vsi->txq_map = devm_kcalloc(dev, (vsi->alloc_txq + num_possible_cpus()),
100 sizeof(*vsi->txq_map), GFP_KERNEL);
101
102 if (!vsi->txq_map)
103 goto err_txq_map;
104
105 vsi->rxq_map = devm_kcalloc(dev, vsi->alloc_rxq,
106 sizeof(*vsi->rxq_map), GFP_KERNEL);
107 if (!vsi->rxq_map)
108 goto err_rxq_map;
109
110 /* There is no need to allocate q_vectors for a loopback VSI. */
111 if (vsi->type == ICE_VSI_LB)
112 return 0;
113
114 /* allocate memory for q_vector pointers */
115 vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
116 sizeof(*vsi->q_vectors), GFP_KERNEL);
117 if (!vsi->q_vectors)
118 goto err_vectors;
119
120 return 0;
121
122 err_vectors:
123 devm_kfree(dev, vsi->rxq_map);
124 err_rxq_map:
125 devm_kfree(dev, vsi->txq_map);
126 err_txq_map:
127 devm_kfree(dev, vsi->rx_rings);
128 err_rings:
129 devm_kfree(dev, vsi->tx_rings);
130 return -ENOMEM;
131 }
132
133 /**
134 * ice_vsi_set_num_desc - Set number of descriptors for queues on this VSI
135 * @vsi: the VSI being configured
136 */
ice_vsi_set_num_desc(struct ice_vsi * vsi)137 static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
138 {
139 switch (vsi->type) {
140 case ICE_VSI_PF:
141 case ICE_VSI_SWITCHDEV_CTRL:
142 case ICE_VSI_CTRL:
143 case ICE_VSI_LB:
144 /* a user could change the values of num_[tr]x_desc using
145 * ethtool -G so we should keep those values instead of
146 * overwriting them with the defaults.
147 */
148 if (!vsi->num_rx_desc)
149 vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
150 if (!vsi->num_tx_desc)
151 vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
152 break;
153 default:
154 dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
155 vsi->type);
156 break;
157 }
158 }
159
160 /**
161 * ice_vsi_set_num_qs - Set number of queues, descriptors and vectors for a VSI
162 * @vsi: the VSI being configured
163 *
164 * Return 0 on success and a negative value on error
165 */
ice_vsi_set_num_qs(struct ice_vsi * vsi)166 static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
167 {
168 enum ice_vsi_type vsi_type = vsi->type;
169 struct ice_pf *pf = vsi->back;
170 struct ice_vf *vf = vsi->vf;
171
172 if (WARN_ON(vsi_type == ICE_VSI_VF && !vf))
173 return;
174
175 switch (vsi_type) {
176 case ICE_VSI_PF:
177 if (vsi->req_txq) {
178 vsi->alloc_txq = vsi->req_txq;
179 vsi->num_txq = vsi->req_txq;
180 } else {
181 vsi->alloc_txq = min3(pf->num_lan_msix,
182 ice_get_avail_txq_count(pf),
183 (u16)num_online_cpus());
184 }
185
186 pf->num_lan_tx = vsi->alloc_txq;
187
188 /* only 1 Rx queue unless RSS is enabled */
189 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
190 vsi->alloc_rxq = 1;
191 } else {
192 if (vsi->req_rxq) {
193 vsi->alloc_rxq = vsi->req_rxq;
194 vsi->num_rxq = vsi->req_rxq;
195 } else {
196 vsi->alloc_rxq = min3(pf->num_lan_msix,
197 ice_get_avail_rxq_count(pf),
198 (u16)num_online_cpus());
199 }
200 }
201
202 pf->num_lan_rx = vsi->alloc_rxq;
203
204 vsi->num_q_vectors = min_t(int, pf->num_lan_msix,
205 max_t(int, vsi->alloc_rxq,
206 vsi->alloc_txq));
207 break;
208 case ICE_VSI_SWITCHDEV_CTRL:
209 /* The number of queues for ctrl VSI is equal to number of VFs.
210 * Each ring is associated to the corresponding VF_PR netdev.
211 */
212 vsi->alloc_txq = ice_get_num_vfs(pf);
213 vsi->alloc_rxq = vsi->alloc_txq;
214 vsi->num_q_vectors = 1;
215 break;
216 case ICE_VSI_VF:
217 if (vf->num_req_qs)
218 vf->num_vf_qs = vf->num_req_qs;
219 vsi->alloc_txq = vf->num_vf_qs;
220 vsi->alloc_rxq = vf->num_vf_qs;
221 /* pf->vfs.num_msix_per includes (VF miscellaneous vector +
222 * data queue interrupts). Since vsi->num_q_vectors is number
223 * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
224 * original vector count
225 */
226 vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF;
227 break;
228 case ICE_VSI_CTRL:
229 vsi->alloc_txq = 1;
230 vsi->alloc_rxq = 1;
231 vsi->num_q_vectors = 1;
232 break;
233 case ICE_VSI_CHNL:
234 vsi->alloc_txq = 0;
235 vsi->alloc_rxq = 0;
236 break;
237 case ICE_VSI_LB:
238 vsi->alloc_txq = 1;
239 vsi->alloc_rxq = 1;
240 break;
241 default:
242 dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
243 break;
244 }
245
246 ice_vsi_set_num_desc(vsi);
247 }
248
249 /**
250 * ice_get_free_slot - get the next non-NULL location index in array
251 * @array: array to search
252 * @size: size of the array
253 * @curr: last known occupied index to be used as a search hint
254 *
255 * void * is being used to keep the functionality generic. This lets us use this
256 * function on any array of pointers.
257 */
ice_get_free_slot(void * array,int size,int curr)258 static int ice_get_free_slot(void *array, int size, int curr)
259 {
260 int **tmp_array = (int **)array;
261 int next;
262
263 if (curr < (size - 1) && !tmp_array[curr + 1]) {
264 next = curr + 1;
265 } else {
266 int i = 0;
267
268 while ((i < size) && (tmp_array[i]))
269 i++;
270 if (i == size)
271 next = ICE_NO_VSI;
272 else
273 next = i;
274 }
275 return next;
276 }
277
278 /**
279 * ice_vsi_delete_from_hw - delete a VSI from the switch
280 * @vsi: pointer to VSI being removed
281 */
ice_vsi_delete_from_hw(struct ice_vsi * vsi)282 static void ice_vsi_delete_from_hw(struct ice_vsi *vsi)
283 {
284 struct ice_pf *pf = vsi->back;
285 struct ice_vsi_ctx *ctxt;
286 int status;
287
288 ice_fltr_remove_all(vsi);
289 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
290 if (!ctxt)
291 return;
292
293 if (vsi->type == ICE_VSI_VF)
294 ctxt->vf_num = vsi->vf->vf_id;
295 ctxt->vsi_num = vsi->vsi_num;
296
297 memcpy(&ctxt->info, &vsi->info, sizeof(ctxt->info));
298
299 status = ice_free_vsi(&pf->hw, vsi->idx, ctxt, false, NULL);
300 if (status)
301 dev_err(ice_pf_to_dev(pf), "Failed to delete VSI %i in FW - error: %d\n",
302 vsi->vsi_num, status);
303
304 kfree(ctxt);
305 }
306
307 /**
308 * ice_vsi_free_arrays - De-allocate queue and vector pointer arrays for the VSI
309 * @vsi: pointer to VSI being cleared
310 */
ice_vsi_free_arrays(struct ice_vsi * vsi)311 static void ice_vsi_free_arrays(struct ice_vsi *vsi)
312 {
313 struct ice_pf *pf = vsi->back;
314 struct device *dev;
315
316 dev = ice_pf_to_dev(pf);
317
318 /* free the ring and vector containers */
319 devm_kfree(dev, vsi->q_vectors);
320 vsi->q_vectors = NULL;
321 devm_kfree(dev, vsi->tx_rings);
322 vsi->tx_rings = NULL;
323 devm_kfree(dev, vsi->rx_rings);
324 vsi->rx_rings = NULL;
325 devm_kfree(dev, vsi->txq_map);
326 vsi->txq_map = NULL;
327 devm_kfree(dev, vsi->rxq_map);
328 vsi->rxq_map = NULL;
329 }
330
331 /**
332 * ice_vsi_free_stats - Free the ring statistics structures
333 * @vsi: VSI pointer
334 */
ice_vsi_free_stats(struct ice_vsi * vsi)335 static void ice_vsi_free_stats(struct ice_vsi *vsi)
336 {
337 struct ice_vsi_stats *vsi_stat;
338 struct ice_pf *pf = vsi->back;
339 int i;
340
341 if (vsi->type == ICE_VSI_CHNL)
342 return;
343 if (!pf->vsi_stats)
344 return;
345
346 vsi_stat = pf->vsi_stats[vsi->idx];
347 if (!vsi_stat)
348 return;
349
350 ice_for_each_alloc_txq(vsi, i) {
351 if (vsi_stat->tx_ring_stats[i]) {
352 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
353 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
354 }
355 }
356
357 ice_for_each_alloc_rxq(vsi, i) {
358 if (vsi_stat->rx_ring_stats[i]) {
359 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
360 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
361 }
362 }
363
364 kfree(vsi_stat->tx_ring_stats);
365 kfree(vsi_stat->rx_ring_stats);
366 kfree(vsi_stat);
367 pf->vsi_stats[vsi->idx] = NULL;
368 }
369
370 /**
371 * ice_vsi_alloc_ring_stats - Allocates Tx and Rx ring stats for the VSI
372 * @vsi: VSI which is having stats allocated
373 */
ice_vsi_alloc_ring_stats(struct ice_vsi * vsi)374 static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
375 {
376 struct ice_ring_stats **tx_ring_stats;
377 struct ice_ring_stats **rx_ring_stats;
378 struct ice_vsi_stats *vsi_stats;
379 struct ice_pf *pf = vsi->back;
380 u16 i;
381
382 vsi_stats = pf->vsi_stats[vsi->idx];
383 tx_ring_stats = vsi_stats->tx_ring_stats;
384 rx_ring_stats = vsi_stats->rx_ring_stats;
385
386 /* Allocate Tx ring stats */
387 ice_for_each_alloc_txq(vsi, i) {
388 struct ice_ring_stats *ring_stats;
389 struct ice_tx_ring *ring;
390
391 ring = vsi->tx_rings[i];
392 ring_stats = tx_ring_stats[i];
393
394 if (!ring_stats) {
395 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
396 if (!ring_stats)
397 goto err_out;
398
399 WRITE_ONCE(tx_ring_stats[i], ring_stats);
400 }
401
402 ring->ring_stats = ring_stats;
403 }
404
405 /* Allocate Rx ring stats */
406 ice_for_each_alloc_rxq(vsi, i) {
407 struct ice_ring_stats *ring_stats;
408 struct ice_rx_ring *ring;
409
410 ring = vsi->rx_rings[i];
411 ring_stats = rx_ring_stats[i];
412
413 if (!ring_stats) {
414 ring_stats = kzalloc(sizeof(*ring_stats), GFP_KERNEL);
415 if (!ring_stats)
416 goto err_out;
417
418 WRITE_ONCE(rx_ring_stats[i], ring_stats);
419 }
420
421 ring->ring_stats = ring_stats;
422 }
423
424 return 0;
425
426 err_out:
427 ice_vsi_free_stats(vsi);
428 return -ENOMEM;
429 }
430
431 /**
432 * ice_vsi_free - clean up and deallocate the provided VSI
433 * @vsi: pointer to VSI being cleared
434 *
435 * This deallocates the VSI's queue resources, removes it from the PF's
436 * VSI array if necessary, and deallocates the VSI
437 */
ice_vsi_free(struct ice_vsi * vsi)438 static void ice_vsi_free(struct ice_vsi *vsi)
439 {
440 struct ice_pf *pf = NULL;
441 struct device *dev;
442
443 if (!vsi || !vsi->back)
444 return;
445
446 pf = vsi->back;
447 dev = ice_pf_to_dev(pf);
448
449 if (!pf->vsi[vsi->idx] || pf->vsi[vsi->idx] != vsi) {
450 dev_dbg(dev, "vsi does not exist at pf->vsi[%d]\n", vsi->idx);
451 return;
452 }
453
454 mutex_lock(&pf->sw_mutex);
455 /* updates the PF for this cleared VSI */
456
457 pf->vsi[vsi->idx] = NULL;
458 pf->next_vsi = vsi->idx;
459
460 ice_vsi_free_stats(vsi);
461 ice_vsi_free_arrays(vsi);
462 mutex_unlock(&pf->sw_mutex);
463 devm_kfree(dev, vsi);
464 }
465
ice_vsi_delete(struct ice_vsi * vsi)466 void ice_vsi_delete(struct ice_vsi *vsi)
467 {
468 ice_vsi_delete_from_hw(vsi);
469 ice_vsi_free(vsi);
470 }
471
472 /**
473 * ice_msix_clean_ctrl_vsi - MSIX mode interrupt handler for ctrl VSI
474 * @irq: interrupt number
475 * @data: pointer to a q_vector
476 */
ice_msix_clean_ctrl_vsi(int __always_unused irq,void * data)477 static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
478 {
479 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
480
481 if (!q_vector->tx.tx_ring)
482 return IRQ_HANDLED;
483
484 #define FDIR_RX_DESC_CLEAN_BUDGET 64
485 ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
486 ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
487
488 return IRQ_HANDLED;
489 }
490
491 /**
492 * ice_msix_clean_rings - MSIX mode Interrupt Handler
493 * @irq: interrupt number
494 * @data: pointer to a q_vector
495 */
ice_msix_clean_rings(int __always_unused irq,void * data)496 static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
497 {
498 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
499
500 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
501 return IRQ_HANDLED;
502
503 q_vector->total_events++;
504
505 napi_schedule(&q_vector->napi);
506
507 return IRQ_HANDLED;
508 }
509
ice_eswitch_msix_clean_rings(int __always_unused irq,void * data)510 static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
511 {
512 struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
513 struct ice_pf *pf = q_vector->vsi->back;
514 struct ice_vf *vf;
515 unsigned int bkt;
516
517 if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
518 return IRQ_HANDLED;
519
520 rcu_read_lock();
521 ice_for_each_vf_rcu(pf, bkt, vf)
522 napi_schedule(&vf->repr->q_vector->napi);
523 rcu_read_unlock();
524
525 return IRQ_HANDLED;
526 }
527
528 /**
529 * ice_vsi_alloc_stat_arrays - Allocate statistics arrays
530 * @vsi: VSI pointer
531 */
ice_vsi_alloc_stat_arrays(struct ice_vsi * vsi)532 static int ice_vsi_alloc_stat_arrays(struct ice_vsi *vsi)
533 {
534 struct ice_vsi_stats *vsi_stat;
535 struct ice_pf *pf = vsi->back;
536
537 if (vsi->type == ICE_VSI_CHNL)
538 return 0;
539 if (!pf->vsi_stats)
540 return -ENOENT;
541
542 if (pf->vsi_stats[vsi->idx])
543 /* realloc will happen in rebuild path */
544 return 0;
545
546 vsi_stat = kzalloc(sizeof(*vsi_stat), GFP_KERNEL);
547 if (!vsi_stat)
548 return -ENOMEM;
549
550 vsi_stat->tx_ring_stats =
551 kcalloc(vsi->alloc_txq, sizeof(*vsi_stat->tx_ring_stats),
552 GFP_KERNEL);
553 if (!vsi_stat->tx_ring_stats)
554 goto err_alloc_tx;
555
556 vsi_stat->rx_ring_stats =
557 kcalloc(vsi->alloc_rxq, sizeof(*vsi_stat->rx_ring_stats),
558 GFP_KERNEL);
559 if (!vsi_stat->rx_ring_stats)
560 goto err_alloc_rx;
561
562 pf->vsi_stats[vsi->idx] = vsi_stat;
563
564 return 0;
565
566 err_alloc_rx:
567 kfree(vsi_stat->rx_ring_stats);
568 err_alloc_tx:
569 kfree(vsi_stat->tx_ring_stats);
570 kfree(vsi_stat);
571 pf->vsi_stats[vsi->idx] = NULL;
572 return -ENOMEM;
573 }
574
575 /**
576 * ice_vsi_alloc_def - set default values for already allocated VSI
577 * @vsi: ptr to VSI
578 * @ch: ptr to channel
579 */
580 static int
ice_vsi_alloc_def(struct ice_vsi * vsi,struct ice_channel * ch)581 ice_vsi_alloc_def(struct ice_vsi *vsi, struct ice_channel *ch)
582 {
583 if (vsi->type != ICE_VSI_CHNL) {
584 ice_vsi_set_num_qs(vsi);
585 if (ice_vsi_alloc_arrays(vsi))
586 return -ENOMEM;
587 }
588
589 switch (vsi->type) {
590 case ICE_VSI_SWITCHDEV_CTRL:
591 /* Setup eswitch MSIX irq handler for VSI */
592 vsi->irq_handler = ice_eswitch_msix_clean_rings;
593 break;
594 case ICE_VSI_PF:
595 /* Setup default MSIX irq handler for VSI */
596 vsi->irq_handler = ice_msix_clean_rings;
597 break;
598 case ICE_VSI_CTRL:
599 /* Setup ctrl VSI MSIX irq handler */
600 vsi->irq_handler = ice_msix_clean_ctrl_vsi;
601 break;
602 case ICE_VSI_CHNL:
603 if (!ch)
604 return -EINVAL;
605
606 vsi->num_rxq = ch->num_rxq;
607 vsi->num_txq = ch->num_txq;
608 vsi->next_base_q = ch->base_q;
609 break;
610 case ICE_VSI_VF:
611 case ICE_VSI_LB:
612 break;
613 default:
614 ice_vsi_free_arrays(vsi);
615 return -EINVAL;
616 }
617
618 return 0;
619 }
620
621 /**
622 * ice_vsi_alloc - Allocates the next available struct VSI in the PF
623 * @pf: board private structure
624 *
625 * Reserves a VSI index from the PF and allocates an empty VSI structure
626 * without a type. The VSI structure must later be initialized by calling
627 * ice_vsi_cfg().
628 *
629 * returns a pointer to a VSI on success, NULL on failure.
630 */
ice_vsi_alloc(struct ice_pf * pf)631 static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf)
632 {
633 struct device *dev = ice_pf_to_dev(pf);
634 struct ice_vsi *vsi = NULL;
635
636 /* Need to protect the allocation of the VSIs at the PF level */
637 mutex_lock(&pf->sw_mutex);
638
639 /* If we have already allocated our maximum number of VSIs,
640 * pf->next_vsi will be ICE_NO_VSI. If not, pf->next_vsi index
641 * is available to be populated
642 */
643 if (pf->next_vsi == ICE_NO_VSI) {
644 dev_dbg(dev, "out of VSI slots!\n");
645 goto unlock_pf;
646 }
647
648 vsi = devm_kzalloc(dev, sizeof(*vsi), GFP_KERNEL);
649 if (!vsi)
650 goto unlock_pf;
651
652 vsi->back = pf;
653 set_bit(ICE_VSI_DOWN, vsi->state);
654
655 /* fill slot and make note of the index */
656 vsi->idx = pf->next_vsi;
657 pf->vsi[pf->next_vsi] = vsi;
658
659 /* prepare pf->next_vsi for next use */
660 pf->next_vsi = ice_get_free_slot(pf->vsi, pf->num_alloc_vsi,
661 pf->next_vsi);
662
663 unlock_pf:
664 mutex_unlock(&pf->sw_mutex);
665 return vsi;
666 }
667
668 /**
669 * ice_alloc_fd_res - Allocate FD resource for a VSI
670 * @vsi: pointer to the ice_vsi
671 *
672 * This allocates the FD resources
673 *
674 * Returns 0 on success, -EPERM on no-op or -EIO on failure
675 */
ice_alloc_fd_res(struct ice_vsi * vsi)676 static int ice_alloc_fd_res(struct ice_vsi *vsi)
677 {
678 struct ice_pf *pf = vsi->back;
679 u32 g_val, b_val;
680
681 /* Flow Director filters are only allocated/assigned to the PF VSI or
682 * CHNL VSI which passes the traffic. The CTRL VSI is only used to
683 * add/delete filters so resources are not allocated to it
684 */
685 if (!test_bit(ICE_FLAG_FD_ENA, pf->flags))
686 return -EPERM;
687
688 if (!(vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF ||
689 vsi->type == ICE_VSI_CHNL))
690 return -EPERM;
691
692 /* FD filters from guaranteed pool per VSI */
693 g_val = pf->hw.func_caps.fd_fltr_guar;
694 if (!g_val)
695 return -EPERM;
696
697 /* FD filters from best effort pool */
698 b_val = pf->hw.func_caps.fd_fltr_best_effort;
699 if (!b_val)
700 return -EPERM;
701
702 /* PF main VSI gets only 64 FD resources from guaranteed pool
703 * when ADQ is configured.
704 */
705 #define ICE_PF_VSI_GFLTR 64
706
707 /* determine FD filter resources per VSI from shared(best effort) and
708 * dedicated pool
709 */
710 if (vsi->type == ICE_VSI_PF) {
711 vsi->num_gfltr = g_val;
712 /* if MQPRIO is configured, main VSI doesn't get all FD
713 * resources from guaranteed pool. PF VSI gets 64 FD resources
714 */
715 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
716 if (g_val < ICE_PF_VSI_GFLTR)
717 return -EPERM;
718 /* allow bare minimum entries for PF VSI */
719 vsi->num_gfltr = ICE_PF_VSI_GFLTR;
720 }
721
722 /* each VSI gets same "best_effort" quota */
723 vsi->num_bfltr = b_val;
724 } else if (vsi->type == ICE_VSI_VF) {
725 vsi->num_gfltr = 0;
726
727 /* each VSI gets same "best_effort" quota */
728 vsi->num_bfltr = b_val;
729 } else {
730 struct ice_vsi *main_vsi;
731 int numtc;
732
733 main_vsi = ice_get_main_vsi(pf);
734 if (!main_vsi)
735 return -EPERM;
736
737 if (!main_vsi->all_numtc)
738 return -EINVAL;
739
740 /* figure out ADQ numtc */
741 numtc = main_vsi->all_numtc - ICE_CHNL_START_TC;
742
743 /* only one TC but still asking resources for channels,
744 * invalid config
745 */
746 if (numtc < ICE_CHNL_START_TC)
747 return -EPERM;
748
749 g_val -= ICE_PF_VSI_GFLTR;
750 /* channel VSIs gets equal share from guaranteed pool */
751 vsi->num_gfltr = g_val / numtc;
752
753 /* each VSI gets same "best_effort" quota */
754 vsi->num_bfltr = b_val;
755 }
756
757 return 0;
758 }
759
760 /**
761 * ice_vsi_get_qs - Assign queues from PF to VSI
762 * @vsi: the VSI to assign queues to
763 *
764 * Returns 0 on success and a negative value on error
765 */
ice_vsi_get_qs(struct ice_vsi * vsi)766 static int ice_vsi_get_qs(struct ice_vsi *vsi)
767 {
768 struct ice_pf *pf = vsi->back;
769 struct ice_qs_cfg tx_qs_cfg = {
770 .qs_mutex = &pf->avail_q_mutex,
771 .pf_map = pf->avail_txqs,
772 .pf_map_size = pf->max_pf_txqs,
773 .q_count = vsi->alloc_txq,
774 .scatter_count = ICE_MAX_SCATTER_TXQS,
775 .vsi_map = vsi->txq_map,
776 .vsi_map_offset = 0,
777 .mapping_mode = ICE_VSI_MAP_CONTIG
778 };
779 struct ice_qs_cfg rx_qs_cfg = {
780 .qs_mutex = &pf->avail_q_mutex,
781 .pf_map = pf->avail_rxqs,
782 .pf_map_size = pf->max_pf_rxqs,
783 .q_count = vsi->alloc_rxq,
784 .scatter_count = ICE_MAX_SCATTER_RXQS,
785 .vsi_map = vsi->rxq_map,
786 .vsi_map_offset = 0,
787 .mapping_mode = ICE_VSI_MAP_CONTIG
788 };
789 int ret;
790
791 if (vsi->type == ICE_VSI_CHNL)
792 return 0;
793
794 ret = __ice_vsi_get_qs(&tx_qs_cfg);
795 if (ret)
796 return ret;
797 vsi->tx_mapping_mode = tx_qs_cfg.mapping_mode;
798
799 ret = __ice_vsi_get_qs(&rx_qs_cfg);
800 if (ret)
801 return ret;
802 vsi->rx_mapping_mode = rx_qs_cfg.mapping_mode;
803
804 return 0;
805 }
806
807 /**
808 * ice_vsi_put_qs - Release queues from VSI to PF
809 * @vsi: the VSI that is going to release queues
810 */
ice_vsi_put_qs(struct ice_vsi * vsi)811 static void ice_vsi_put_qs(struct ice_vsi *vsi)
812 {
813 struct ice_pf *pf = vsi->back;
814 int i;
815
816 mutex_lock(&pf->avail_q_mutex);
817
818 ice_for_each_alloc_txq(vsi, i) {
819 clear_bit(vsi->txq_map[i], pf->avail_txqs);
820 vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
821 }
822
823 ice_for_each_alloc_rxq(vsi, i) {
824 clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
825 vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
826 }
827
828 mutex_unlock(&pf->avail_q_mutex);
829 }
830
831 /**
832 * ice_is_safe_mode
833 * @pf: pointer to the PF struct
834 *
835 * returns true if driver is in safe mode, false otherwise
836 */
ice_is_safe_mode(struct ice_pf * pf)837 bool ice_is_safe_mode(struct ice_pf *pf)
838 {
839 return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
840 }
841
842 /**
843 * ice_is_rdma_ena
844 * @pf: pointer to the PF struct
845 *
846 * returns true if RDMA is currently supported, false otherwise
847 */
ice_is_rdma_ena(struct ice_pf * pf)848 bool ice_is_rdma_ena(struct ice_pf *pf)
849 {
850 return test_bit(ICE_FLAG_RDMA_ENA, pf->flags);
851 }
852
853 /**
854 * ice_vsi_clean_rss_flow_fld - Delete RSS configuration
855 * @vsi: the VSI being cleaned up
856 *
857 * This function deletes RSS input set for all flows that were configured
858 * for this VSI
859 */
ice_vsi_clean_rss_flow_fld(struct ice_vsi * vsi)860 static void ice_vsi_clean_rss_flow_fld(struct ice_vsi *vsi)
861 {
862 struct ice_pf *pf = vsi->back;
863 int status;
864
865 if (ice_is_safe_mode(pf))
866 return;
867
868 status = ice_rem_vsi_rss_cfg(&pf->hw, vsi->idx);
869 if (status)
870 dev_dbg(ice_pf_to_dev(pf), "ice_rem_vsi_rss_cfg failed for vsi = %d, error = %d\n",
871 vsi->vsi_num, status);
872 }
873
874 /**
875 * ice_rss_clean - Delete RSS related VSI structures and configuration
876 * @vsi: the VSI being removed
877 */
ice_rss_clean(struct ice_vsi * vsi)878 static void ice_rss_clean(struct ice_vsi *vsi)
879 {
880 struct ice_pf *pf = vsi->back;
881 struct device *dev;
882
883 dev = ice_pf_to_dev(pf);
884
885 devm_kfree(dev, vsi->rss_hkey_user);
886 devm_kfree(dev, vsi->rss_lut_user);
887
888 ice_vsi_clean_rss_flow_fld(vsi);
889 /* remove RSS replay list */
890 if (!ice_is_safe_mode(pf))
891 ice_rem_vsi_rss_list(&pf->hw, vsi->idx);
892 }
893
894 /**
895 * ice_vsi_set_rss_params - Setup RSS capabilities per VSI type
896 * @vsi: the VSI being configured
897 */
ice_vsi_set_rss_params(struct ice_vsi * vsi)898 static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
899 {
900 struct ice_hw_common_caps *cap;
901 struct ice_pf *pf = vsi->back;
902 u16 max_rss_size;
903
904 if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
905 vsi->rss_size = 1;
906 return;
907 }
908
909 cap = &pf->hw.func_caps.common_cap;
910 max_rss_size = BIT(cap->rss_table_entry_width);
911 switch (vsi->type) {
912 case ICE_VSI_CHNL:
913 case ICE_VSI_PF:
914 /* PF VSI will inherit RSS instance of PF */
915 vsi->rss_table_size = (u16)cap->rss_table_size;
916 if (vsi->type == ICE_VSI_CHNL)
917 vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
918 else
919 vsi->rss_size = min_t(u16, num_online_cpus(),
920 max_rss_size);
921 vsi->rss_lut_type = ICE_LUT_PF;
922 break;
923 case ICE_VSI_SWITCHDEV_CTRL:
924 vsi->rss_table_size = ICE_LUT_VSI_SIZE;
925 vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
926 vsi->rss_lut_type = ICE_LUT_VSI;
927 break;
928 case ICE_VSI_VF:
929 /* VF VSI will get a small RSS table.
930 * For VSI_LUT, LUT size should be set to 64 bytes.
931 */
932 vsi->rss_table_size = ICE_LUT_VSI_SIZE;
933 vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
934 vsi->rss_lut_type = ICE_LUT_VSI;
935 break;
936 case ICE_VSI_LB:
937 break;
938 default:
939 dev_dbg(ice_pf_to_dev(pf), "Unsupported VSI type %s\n",
940 ice_vsi_type_str(vsi->type));
941 break;
942 }
943 }
944
945 /**
946 * ice_set_dflt_vsi_ctx - Set default VSI context before adding a VSI
947 * @hw: HW structure used to determine the VLAN mode of the device
948 * @ctxt: the VSI context being set
949 *
950 * This initializes a default VSI context for all sections except the Queues.
951 */
ice_set_dflt_vsi_ctx(struct ice_hw * hw,struct ice_vsi_ctx * ctxt)952 static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt)
953 {
954 u32 table = 0;
955
956 memset(&ctxt->info, 0, sizeof(ctxt->info));
957 /* VSI's should be allocated from shared pool */
958 ctxt->alloc_from_pool = true;
959 /* Src pruning enabled by default */
960 ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
961 /* Traffic from VSI can be sent to LAN */
962 ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
963 /* allow all untagged/tagged packets by default on Tx */
964 ctxt->info.inner_vlan_flags = ((ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL &
965 ICE_AQ_VSI_INNER_VLAN_TX_MODE_M) >>
966 ICE_AQ_VSI_INNER_VLAN_TX_MODE_S);
967 /* SVM - by default bits 3 and 4 in inner_vlan_flags are 0's which
968 * results in legacy behavior (show VLAN, DEI, and UP) in descriptor.
969 *
970 * DVM - leave inner VLAN in packet by default
971 */
972 if (ice_is_dvm_ena(hw)) {
973 ctxt->info.inner_vlan_flags |=
974 FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
975 ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
976 ctxt->info.outer_vlan_flags =
977 (ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL <<
978 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_S) &
979 ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M;
980 ctxt->info.outer_vlan_flags |=
981 (ICE_AQ_VSI_OUTER_TAG_VLAN_8100 <<
982 ICE_AQ_VSI_OUTER_TAG_TYPE_S) &
983 ICE_AQ_VSI_OUTER_TAG_TYPE_M;
984 ctxt->info.outer_vlan_flags |=
985 FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_EMODE_M,
986 ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING);
987 }
988 /* Have 1:1 UP mapping for both ingress/egress tables */
989 table |= ICE_UP_TABLE_TRANSLATE(0, 0);
990 table |= ICE_UP_TABLE_TRANSLATE(1, 1);
991 table |= ICE_UP_TABLE_TRANSLATE(2, 2);
992 table |= ICE_UP_TABLE_TRANSLATE(3, 3);
993 table |= ICE_UP_TABLE_TRANSLATE(4, 4);
994 table |= ICE_UP_TABLE_TRANSLATE(5, 5);
995 table |= ICE_UP_TABLE_TRANSLATE(6, 6);
996 table |= ICE_UP_TABLE_TRANSLATE(7, 7);
997 ctxt->info.ingress_table = cpu_to_le32(table);
998 ctxt->info.egress_table = cpu_to_le32(table);
999 /* Have 1:1 UP mapping for outer to inner UP table */
1000 ctxt->info.outer_up_table = cpu_to_le32(table);
1001 /* No Outer tag support outer_tag_flags remains to zero */
1002 }
1003
1004 /**
1005 * ice_vsi_setup_q_map - Setup a VSI queue map
1006 * @vsi: the VSI being configured
1007 * @ctxt: VSI context structure
1008 */
ice_vsi_setup_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt)1009 static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1010 {
1011 u16 offset = 0, qmap = 0, tx_count = 0, rx_count = 0, pow = 0;
1012 u16 num_txq_per_tc, num_rxq_per_tc;
1013 u16 qcount_tx = vsi->alloc_txq;
1014 u16 qcount_rx = vsi->alloc_rxq;
1015 u8 netdev_tc = 0;
1016 int i;
1017
1018 if (!vsi->tc_cfg.numtc) {
1019 /* at least TC0 should be enabled by default */
1020 vsi->tc_cfg.numtc = 1;
1021 vsi->tc_cfg.ena_tc = 1;
1022 }
1023
1024 num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
1025 if (!num_rxq_per_tc)
1026 num_rxq_per_tc = 1;
1027 num_txq_per_tc = qcount_tx / vsi->tc_cfg.numtc;
1028 if (!num_txq_per_tc)
1029 num_txq_per_tc = 1;
1030
1031 /* find the (rounded up) power-of-2 of qcount */
1032 pow = (u16)order_base_2(num_rxq_per_tc);
1033
1034 /* TC mapping is a function of the number of Rx queues assigned to the
1035 * VSI for each traffic class and the offset of these queues.
1036 * The first 10 bits are for queue offset for TC0, next 4 bits for no:of
1037 * queues allocated to TC0. No:of queues is a power-of-2.
1038 *
1039 * If TC is not enabled, the queue offset is set to 0, and allocate one
1040 * queue, this way, traffic for the given TC will be sent to the default
1041 * queue.
1042 *
1043 * Setup number and offset of Rx queues for all TCs for the VSI
1044 */
1045 ice_for_each_traffic_class(i) {
1046 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
1047 /* TC is not enabled */
1048 vsi->tc_cfg.tc_info[i].qoffset = 0;
1049 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
1050 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
1051 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
1052 ctxt->info.tc_mapping[i] = 0;
1053 continue;
1054 }
1055
1056 /* TC is enabled */
1057 vsi->tc_cfg.tc_info[i].qoffset = offset;
1058 vsi->tc_cfg.tc_info[i].qcount_rx = num_rxq_per_tc;
1059 vsi->tc_cfg.tc_info[i].qcount_tx = num_txq_per_tc;
1060 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
1061
1062 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
1063 ICE_AQ_VSI_TC_Q_OFFSET_M) |
1064 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
1065 ICE_AQ_VSI_TC_Q_NUM_M);
1066 offset += num_rxq_per_tc;
1067 tx_count += num_txq_per_tc;
1068 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1069 }
1070
1071 /* if offset is non-zero, means it is calculated correctly based on
1072 * enabled TCs for a given VSI otherwise qcount_rx will always
1073 * be correct and non-zero because it is based off - VSI's
1074 * allocated Rx queues which is at least 1 (hence qcount_tx will be
1075 * at least 1)
1076 */
1077 if (offset)
1078 rx_count = offset;
1079 else
1080 rx_count = num_rxq_per_tc;
1081
1082 if (rx_count > vsi->alloc_rxq) {
1083 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
1084 rx_count, vsi->alloc_rxq);
1085 return -EINVAL;
1086 }
1087
1088 if (tx_count > vsi->alloc_txq) {
1089 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
1090 tx_count, vsi->alloc_txq);
1091 return -EINVAL;
1092 }
1093
1094 vsi->num_txq = tx_count;
1095 vsi->num_rxq = rx_count;
1096
1097 if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
1098 dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
1099 /* since there is a chance that num_rxq could have been changed
1100 * in the above for loop, make num_txq equal to num_rxq.
1101 */
1102 vsi->num_txq = vsi->num_rxq;
1103 }
1104
1105 /* Rx queue mapping */
1106 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1107 /* q_mapping buffer holds the info for the first queue allocated for
1108 * this VSI in the PF space and also the number of queues associated
1109 * with this VSI.
1110 */
1111 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
1112 ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq);
1113
1114 return 0;
1115 }
1116
1117 /**
1118 * ice_set_fd_vsi_ctx - Set FD VSI context before adding a VSI
1119 * @ctxt: the VSI context being set
1120 * @vsi: the VSI being configured
1121 */
ice_set_fd_vsi_ctx(struct ice_vsi_ctx * ctxt,struct ice_vsi * vsi)1122 static void ice_set_fd_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1123 {
1124 u8 dflt_q_group, dflt_q_prio;
1125 u16 dflt_q, report_q, val;
1126
1127 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_CTRL &&
1128 vsi->type != ICE_VSI_VF && vsi->type != ICE_VSI_CHNL)
1129 return;
1130
1131 val = ICE_AQ_VSI_PROP_FLOW_DIR_VALID;
1132 ctxt->info.valid_sections |= cpu_to_le16(val);
1133 dflt_q = 0;
1134 dflt_q_group = 0;
1135 report_q = 0;
1136 dflt_q_prio = 0;
1137
1138 /* enable flow director filtering/programming */
1139 val = ICE_AQ_VSI_FD_ENABLE | ICE_AQ_VSI_FD_PROG_ENABLE;
1140 ctxt->info.fd_options = cpu_to_le16(val);
1141 /* max of allocated flow director filters */
1142 ctxt->info.max_fd_fltr_dedicated =
1143 cpu_to_le16(vsi->num_gfltr);
1144 /* max of shared flow director filters any VSI may program */
1145 ctxt->info.max_fd_fltr_shared =
1146 cpu_to_le16(vsi->num_bfltr);
1147 /* default queue index within the VSI of the default FD */
1148 val = ((dflt_q << ICE_AQ_VSI_FD_DEF_Q_S) &
1149 ICE_AQ_VSI_FD_DEF_Q_M);
1150 /* target queue or queue group to the FD filter */
1151 val |= ((dflt_q_group << ICE_AQ_VSI_FD_DEF_GRP_S) &
1152 ICE_AQ_VSI_FD_DEF_GRP_M);
1153 ctxt->info.fd_def_q = cpu_to_le16(val);
1154 /* queue index on which FD filter completion is reported */
1155 val = ((report_q << ICE_AQ_VSI_FD_REPORT_Q_S) &
1156 ICE_AQ_VSI_FD_REPORT_Q_M);
1157 /* priority of the default qindex action */
1158 val |= ((dflt_q_prio << ICE_AQ_VSI_FD_DEF_PRIORITY_S) &
1159 ICE_AQ_VSI_FD_DEF_PRIORITY_M);
1160 ctxt->info.fd_report_opt = cpu_to_le16(val);
1161 }
1162
1163 /**
1164 * ice_set_rss_vsi_ctx - Set RSS VSI context before adding a VSI
1165 * @ctxt: the VSI context being set
1166 * @vsi: the VSI being configured
1167 */
ice_set_rss_vsi_ctx(struct ice_vsi_ctx * ctxt,struct ice_vsi * vsi)1168 static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
1169 {
1170 u8 lut_type, hash_type;
1171 struct device *dev;
1172 struct ice_pf *pf;
1173
1174 pf = vsi->back;
1175 dev = ice_pf_to_dev(pf);
1176
1177 switch (vsi->type) {
1178 case ICE_VSI_CHNL:
1179 case ICE_VSI_PF:
1180 /* PF VSI will inherit RSS instance of PF */
1181 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
1182 hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
1183 break;
1184 case ICE_VSI_VF:
1185 /* VF VSI will gets a small RSS table which is a VSI LUT type */
1186 lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_VSI;
1187 hash_type = ICE_AQ_VSI_Q_OPT_RSS_HASH_TPLZ;
1188 break;
1189 default:
1190 dev_dbg(dev, "Unsupported VSI type %s\n",
1191 ice_vsi_type_str(vsi->type));
1192 return;
1193 }
1194
1195 ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
1196 ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
1197 (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
1198 }
1199
1200 static void
ice_chnl_vsi_setup_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt)1201 ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
1202 {
1203 struct ice_pf *pf = vsi->back;
1204 u16 qcount, qmap;
1205 u8 offset = 0;
1206 int pow;
1207
1208 qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
1209
1210 pow = order_base_2(qcount);
1211 qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
1212 ICE_AQ_VSI_TC_Q_OFFSET_M) |
1213 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
1214 ICE_AQ_VSI_TC_Q_NUM_M);
1215
1216 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
1217 ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
1218 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
1219 ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
1220 }
1221
1222 /**
1223 * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
1224 * @vsi: VSI to check whether or not VLAN pruning is enabled.
1225 *
1226 * returns true if Rx VLAN pruning is enabled and false otherwise.
1227 */
ice_vsi_is_vlan_pruning_ena(struct ice_vsi * vsi)1228 static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
1229 {
1230 return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1231 }
1232
1233 /**
1234 * ice_vsi_init - Create and initialize a VSI
1235 * @vsi: the VSI being configured
1236 * @vsi_flags: VSI configuration flags
1237 *
1238 * Set ICE_FLAG_VSI_INIT to initialize a new VSI context, clear it to
1239 * reconfigure an existing context.
1240 *
1241 * This initializes a VSI context depending on the VSI type to be added and
1242 * passes it down to the add_vsi aq command to create a new VSI.
1243 */
ice_vsi_init(struct ice_vsi * vsi,u32 vsi_flags)1244 static int ice_vsi_init(struct ice_vsi *vsi, u32 vsi_flags)
1245 {
1246 struct ice_pf *pf = vsi->back;
1247 struct ice_hw *hw = &pf->hw;
1248 struct ice_vsi_ctx *ctxt;
1249 struct device *dev;
1250 int ret = 0;
1251
1252 dev = ice_pf_to_dev(pf);
1253 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
1254 if (!ctxt)
1255 return -ENOMEM;
1256
1257 switch (vsi->type) {
1258 case ICE_VSI_CTRL:
1259 case ICE_VSI_LB:
1260 case ICE_VSI_PF:
1261 ctxt->flags = ICE_AQ_VSI_TYPE_PF;
1262 break;
1263 case ICE_VSI_SWITCHDEV_CTRL:
1264 case ICE_VSI_CHNL:
1265 ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
1266 break;
1267 case ICE_VSI_VF:
1268 ctxt->flags = ICE_AQ_VSI_TYPE_VF;
1269 /* VF number here is the absolute VF number (0-255) */
1270 ctxt->vf_num = vsi->vf->vf_id + hw->func_caps.vf_base_id;
1271 break;
1272 default:
1273 ret = -ENODEV;
1274 goto out;
1275 }
1276
1277 /* Handle VLAN pruning for channel VSI if main VSI has VLAN
1278 * prune enabled
1279 */
1280 if (vsi->type == ICE_VSI_CHNL) {
1281 struct ice_vsi *main_vsi;
1282
1283 main_vsi = ice_get_main_vsi(pf);
1284 if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
1285 ctxt->info.sw_flags2 |=
1286 ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1287 else
1288 ctxt->info.sw_flags2 &=
1289 ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
1290 }
1291
1292 ice_set_dflt_vsi_ctx(hw, ctxt);
1293 if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
1294 ice_set_fd_vsi_ctx(ctxt, vsi);
1295 /* if the switch is in VEB mode, allow VSI loopback */
1296 if (vsi->vsw->bridge_mode == BRIDGE_MODE_VEB)
1297 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB;
1298
1299 /* Set LUT type and HASH type if RSS is enabled */
1300 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags) &&
1301 vsi->type != ICE_VSI_CTRL) {
1302 ice_set_rss_vsi_ctx(ctxt, vsi);
1303 /* if updating VSI context, make sure to set valid_section:
1304 * to indicate which section of VSI context being updated
1305 */
1306 if (!(vsi_flags & ICE_VSI_FLAG_INIT))
1307 ctxt->info.valid_sections |=
1308 cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID);
1309 }
1310
1311 ctxt->info.sw_id = vsi->port_info->sw_id;
1312 if (vsi->type == ICE_VSI_CHNL) {
1313 ice_chnl_vsi_setup_q_map(vsi, ctxt);
1314 } else {
1315 ret = ice_vsi_setup_q_map(vsi, ctxt);
1316 if (ret)
1317 goto out;
1318
1319 if (!(vsi_flags & ICE_VSI_FLAG_INIT))
1320 /* means VSI being updated */
1321 /* must to indicate which section of VSI context are
1322 * being modified
1323 */
1324 ctxt->info.valid_sections |=
1325 cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
1326 }
1327
1328 /* Allow control frames out of main VSI */
1329 if (vsi->type == ICE_VSI_PF) {
1330 ctxt->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
1331 ctxt->info.valid_sections |=
1332 cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
1333 }
1334
1335 if (vsi_flags & ICE_VSI_FLAG_INIT) {
1336 ret = ice_add_vsi(hw, vsi->idx, ctxt, NULL);
1337 if (ret) {
1338 dev_err(dev, "Add VSI failed, err %d\n", ret);
1339 ret = -EIO;
1340 goto out;
1341 }
1342 } else {
1343 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
1344 if (ret) {
1345 dev_err(dev, "Update VSI failed, err %d\n", ret);
1346 ret = -EIO;
1347 goto out;
1348 }
1349 }
1350
1351 /* keep context for update VSI operations */
1352 vsi->info = ctxt->info;
1353
1354 /* record VSI number returned */
1355 vsi->vsi_num = ctxt->vsi_num;
1356
1357 out:
1358 kfree(ctxt);
1359 return ret;
1360 }
1361
1362 /**
1363 * ice_vsi_clear_rings - Deallocates the Tx and Rx rings for VSI
1364 * @vsi: the VSI having rings deallocated
1365 */
ice_vsi_clear_rings(struct ice_vsi * vsi)1366 static void ice_vsi_clear_rings(struct ice_vsi *vsi)
1367 {
1368 int i;
1369
1370 /* Avoid stale references by clearing map from vector to ring */
1371 if (vsi->q_vectors) {
1372 ice_for_each_q_vector(vsi, i) {
1373 struct ice_q_vector *q_vector = vsi->q_vectors[i];
1374
1375 if (q_vector) {
1376 q_vector->tx.tx_ring = NULL;
1377 q_vector->rx.rx_ring = NULL;
1378 }
1379 }
1380 }
1381
1382 if (vsi->tx_rings) {
1383 ice_for_each_alloc_txq(vsi, i) {
1384 if (vsi->tx_rings[i]) {
1385 kfree_rcu(vsi->tx_rings[i], rcu);
1386 WRITE_ONCE(vsi->tx_rings[i], NULL);
1387 }
1388 }
1389 }
1390 if (vsi->rx_rings) {
1391 ice_for_each_alloc_rxq(vsi, i) {
1392 if (vsi->rx_rings[i]) {
1393 kfree_rcu(vsi->rx_rings[i], rcu);
1394 WRITE_ONCE(vsi->rx_rings[i], NULL);
1395 }
1396 }
1397 }
1398 }
1399
1400 /**
1401 * ice_vsi_alloc_rings - Allocates Tx and Rx rings for the VSI
1402 * @vsi: VSI which is having rings allocated
1403 */
ice_vsi_alloc_rings(struct ice_vsi * vsi)1404 static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
1405 {
1406 bool dvm_ena = ice_is_dvm_ena(&vsi->back->hw);
1407 struct ice_pf *pf = vsi->back;
1408 struct device *dev;
1409 u16 i;
1410
1411 dev = ice_pf_to_dev(pf);
1412 /* Allocate Tx rings */
1413 ice_for_each_alloc_txq(vsi, i) {
1414 struct ice_tx_ring *ring;
1415
1416 /* allocate with kzalloc(), free with kfree_rcu() */
1417 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1418
1419 if (!ring)
1420 goto err_out;
1421
1422 ring->q_index = i;
1423 ring->reg_idx = vsi->txq_map[i];
1424 ring->vsi = vsi;
1425 ring->tx_tstamps = &pf->ptp.port.tx;
1426 ring->dev = dev;
1427 ring->count = vsi->num_tx_desc;
1428 ring->txq_teid = ICE_INVAL_TEID;
1429 if (dvm_ena)
1430 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG2;
1431 else
1432 ring->flags |= ICE_TX_FLAGS_RING_VLAN_L2TAG1;
1433 WRITE_ONCE(vsi->tx_rings[i], ring);
1434 }
1435
1436 /* Allocate Rx rings */
1437 ice_for_each_alloc_rxq(vsi, i) {
1438 struct ice_rx_ring *ring;
1439
1440 /* allocate with kzalloc(), free with kfree_rcu() */
1441 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1442 if (!ring)
1443 goto err_out;
1444
1445 ring->q_index = i;
1446 ring->reg_idx = vsi->rxq_map[i];
1447 ring->vsi = vsi;
1448 ring->netdev = vsi->netdev;
1449 ring->dev = dev;
1450 ring->count = vsi->num_rx_desc;
1451 ring->cached_phctime = pf->ptp.cached_phc_time;
1452 WRITE_ONCE(vsi->rx_rings[i], ring);
1453 }
1454
1455 return 0;
1456
1457 err_out:
1458 ice_vsi_clear_rings(vsi);
1459 return -ENOMEM;
1460 }
1461
1462 /**
1463 * ice_vsi_manage_rss_lut - disable/enable RSS
1464 * @vsi: the VSI being changed
1465 * @ena: boolean value indicating if this is an enable or disable request
1466 *
1467 * In the event of disable request for RSS, this function will zero out RSS
1468 * LUT, while in the event of enable request for RSS, it will reconfigure RSS
1469 * LUT.
1470 */
ice_vsi_manage_rss_lut(struct ice_vsi * vsi,bool ena)1471 void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
1472 {
1473 u8 *lut;
1474
1475 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1476 if (!lut)
1477 return;
1478
1479 if (ena) {
1480 if (vsi->rss_lut_user)
1481 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1482 else
1483 ice_fill_rss_lut(lut, vsi->rss_table_size,
1484 vsi->rss_size);
1485 }
1486
1487 ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1488 kfree(lut);
1489 }
1490
1491 /**
1492 * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
1493 * @vsi: VSI to be configured
1494 * @disable: set to true to have FCS / CRC in the frame data
1495 */
ice_vsi_cfg_crc_strip(struct ice_vsi * vsi,bool disable)1496 void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
1497 {
1498 int i;
1499
1500 ice_for_each_rxq(vsi, i)
1501 if (disable)
1502 vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
1503 else
1504 vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
1505 }
1506
1507 /**
1508 * ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
1509 * @vsi: VSI to be configured
1510 */
ice_vsi_cfg_rss_lut_key(struct ice_vsi * vsi)1511 int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
1512 {
1513 struct ice_pf *pf = vsi->back;
1514 struct device *dev;
1515 u8 *lut, *key;
1516 int err;
1517
1518 dev = ice_pf_to_dev(pf);
1519 if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
1520 (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
1521 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
1522 } else {
1523 vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
1524
1525 /* If orig_rss_size is valid and it is less than determined
1526 * main VSI's rss_size, update main VSI's rss_size to be
1527 * orig_rss_size so that when tc-qdisc is deleted, main VSI
1528 * RSS table gets programmed to be correct (whatever it was
1529 * to begin with (prior to setup-tc for ADQ config)
1530 */
1531 if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
1532 vsi->orig_rss_size <= vsi->num_rxq) {
1533 vsi->rss_size = vsi->orig_rss_size;
1534 /* now orig_rss_size is used, reset it to zero */
1535 vsi->orig_rss_size = 0;
1536 }
1537 }
1538
1539 lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
1540 if (!lut)
1541 return -ENOMEM;
1542
1543 if (vsi->rss_lut_user)
1544 memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
1545 else
1546 ice_fill_rss_lut(lut, vsi->rss_table_size, vsi->rss_size);
1547
1548 err = ice_set_rss_lut(vsi, lut, vsi->rss_table_size);
1549 if (err) {
1550 dev_err(dev, "set_rss_lut failed, error %d\n", err);
1551 goto ice_vsi_cfg_rss_exit;
1552 }
1553
1554 key = kzalloc(ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE, GFP_KERNEL);
1555 if (!key) {
1556 err = -ENOMEM;
1557 goto ice_vsi_cfg_rss_exit;
1558 }
1559
1560 if (vsi->rss_hkey_user)
1561 memcpy(key, vsi->rss_hkey_user, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1562 else
1563 netdev_rss_key_fill((void *)key, ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE);
1564
1565 err = ice_set_rss_key(vsi, key);
1566 if (err)
1567 dev_err(dev, "set_rss_key failed, error %d\n", err);
1568
1569 kfree(key);
1570 ice_vsi_cfg_rss_exit:
1571 kfree(lut);
1572 return err;
1573 }
1574
1575 /**
1576 * ice_vsi_set_vf_rss_flow_fld - Sets VF VSI RSS input set for different flows
1577 * @vsi: VSI to be configured
1578 *
1579 * This function will only be called during the VF VSI setup. Upon successful
1580 * completion of package download, this function will configure default RSS
1581 * input sets for VF VSI.
1582 */
ice_vsi_set_vf_rss_flow_fld(struct ice_vsi * vsi)1583 static void ice_vsi_set_vf_rss_flow_fld(struct ice_vsi *vsi)
1584 {
1585 struct ice_pf *pf = vsi->back;
1586 struct device *dev;
1587 int status;
1588
1589 dev = ice_pf_to_dev(pf);
1590 if (ice_is_safe_mode(pf)) {
1591 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1592 vsi->vsi_num);
1593 return;
1594 }
1595
1596 status = ice_add_avf_rss_cfg(&pf->hw, vsi->idx, ICE_DEFAULT_RSS_HENA);
1597 if (status)
1598 dev_dbg(dev, "ice_add_avf_rss_cfg failed for vsi = %d, error = %d\n",
1599 vsi->vsi_num, status);
1600 }
1601
1602 /**
1603 * ice_vsi_set_rss_flow_fld - Sets RSS input set for different flows
1604 * @vsi: VSI to be configured
1605 *
1606 * This function will only be called after successful download package call
1607 * during initialization of PF. Since the downloaded package will erase the
1608 * RSS section, this function will configure RSS input sets for different
1609 * flow types. The last profile added has the highest priority, therefore 2
1610 * tuple profiles (i.e. IPv4 src/dst) are added before 4 tuple profiles
1611 * (i.e. IPv4 src/dst TCP src/dst port).
1612 */
ice_vsi_set_rss_flow_fld(struct ice_vsi * vsi)1613 static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
1614 {
1615 u16 vsi_handle = vsi->idx, vsi_num = vsi->vsi_num;
1616 struct ice_pf *pf = vsi->back;
1617 struct ice_hw *hw = &pf->hw;
1618 struct device *dev;
1619 int status;
1620
1621 dev = ice_pf_to_dev(pf);
1622 if (ice_is_safe_mode(pf)) {
1623 dev_dbg(dev, "Advanced RSS disabled. Package download failed, vsi num = %d\n",
1624 vsi_num);
1625 return;
1626 }
1627 /* configure RSS for IPv4 with input set IP src/dst */
1628 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1629 ICE_FLOW_SEG_HDR_IPV4);
1630 if (status)
1631 dev_dbg(dev, "ice_add_rss_cfg failed for ipv4 flow, vsi = %d, error = %d\n",
1632 vsi_num, status);
1633
1634 /* configure RSS for IPv6 with input set IPv6 src/dst */
1635 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1636 ICE_FLOW_SEG_HDR_IPV6);
1637 if (status)
1638 dev_dbg(dev, "ice_add_rss_cfg failed for ipv6 flow, vsi = %d, error = %d\n",
1639 vsi_num, status);
1640
1641 /* configure RSS for tcp4 with input set IP src/dst, TCP src/dst */
1642 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV4,
1643 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV4);
1644 if (status)
1645 dev_dbg(dev, "ice_add_rss_cfg failed for tcp4 flow, vsi = %d, error = %d\n",
1646 vsi_num, status);
1647
1648 /* configure RSS for udp4 with input set IP src/dst, UDP src/dst */
1649 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV4,
1650 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV4);
1651 if (status)
1652 dev_dbg(dev, "ice_add_rss_cfg failed for udp4 flow, vsi = %d, error = %d\n",
1653 vsi_num, status);
1654
1655 /* configure RSS for sctp4 with input set IP src/dst */
1656 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV4,
1657 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV4);
1658 if (status)
1659 dev_dbg(dev, "ice_add_rss_cfg failed for sctp4 flow, vsi = %d, error = %d\n",
1660 vsi_num, status);
1661
1662 /* configure RSS for tcp6 with input set IPv6 src/dst, TCP src/dst */
1663 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_TCP_IPV6,
1664 ICE_FLOW_SEG_HDR_TCP | ICE_FLOW_SEG_HDR_IPV6);
1665 if (status)
1666 dev_dbg(dev, "ice_add_rss_cfg failed for tcp6 flow, vsi = %d, error = %d\n",
1667 vsi_num, status);
1668
1669 /* configure RSS for udp6 with input set IPv6 src/dst, UDP src/dst */
1670 status = ice_add_rss_cfg(hw, vsi_handle, ICE_HASH_UDP_IPV6,
1671 ICE_FLOW_SEG_HDR_UDP | ICE_FLOW_SEG_HDR_IPV6);
1672 if (status)
1673 dev_dbg(dev, "ice_add_rss_cfg failed for udp6 flow, vsi = %d, error = %d\n",
1674 vsi_num, status);
1675
1676 /* configure RSS for sctp6 with input set IPv6 src/dst */
1677 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_IPV6,
1678 ICE_FLOW_SEG_HDR_SCTP | ICE_FLOW_SEG_HDR_IPV6);
1679 if (status)
1680 dev_dbg(dev, "ice_add_rss_cfg failed for sctp6 flow, vsi = %d, error = %d\n",
1681 vsi_num, status);
1682
1683 status = ice_add_rss_cfg(hw, vsi_handle, ICE_FLOW_HASH_ESP_SPI,
1684 ICE_FLOW_SEG_HDR_ESP);
1685 if (status)
1686 dev_dbg(dev, "ice_add_rss_cfg failed for esp/spi flow, vsi = %d, error = %d\n",
1687 vsi_num, status);
1688 }
1689
1690 /**
1691 * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
1692 * @vsi: VSI
1693 */
ice_vsi_cfg_frame_size(struct ice_vsi * vsi)1694 static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
1695 {
1696 if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
1697 vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
1698 vsi->rx_buf_len = ICE_RXBUF_1664;
1699 #if (PAGE_SIZE < 8192)
1700 } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
1701 (vsi->netdev->mtu <= ETH_DATA_LEN)) {
1702 vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
1703 vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
1704 #endif
1705 } else {
1706 vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
1707 vsi->rx_buf_len = ICE_RXBUF_3072;
1708 }
1709 }
1710
1711 /**
1712 * ice_pf_state_is_nominal - checks the PF for nominal state
1713 * @pf: pointer to PF to check
1714 *
1715 * Check the PF's state for a collection of bits that would indicate
1716 * the PF is in a state that would inhibit normal operation for
1717 * driver functionality.
1718 *
1719 * Returns true if PF is in a nominal state, false otherwise
1720 */
ice_pf_state_is_nominal(struct ice_pf * pf)1721 bool ice_pf_state_is_nominal(struct ice_pf *pf)
1722 {
1723 DECLARE_BITMAP(check_bits, ICE_STATE_NBITS) = { 0 };
1724
1725 if (!pf)
1726 return false;
1727
1728 bitmap_set(check_bits, 0, ICE_STATE_NOMINAL_CHECK_BITS);
1729 if (bitmap_intersects(pf->state, check_bits, ICE_STATE_NBITS))
1730 return false;
1731
1732 return true;
1733 }
1734
1735 /**
1736 * ice_update_eth_stats - Update VSI-specific ethernet statistics counters
1737 * @vsi: the VSI to be updated
1738 */
ice_update_eth_stats(struct ice_vsi * vsi)1739 void ice_update_eth_stats(struct ice_vsi *vsi)
1740 {
1741 struct ice_eth_stats *prev_es, *cur_es;
1742 struct ice_hw *hw = &vsi->back->hw;
1743 struct ice_pf *pf = vsi->back;
1744 u16 vsi_num = vsi->vsi_num; /* HW absolute index of a VSI */
1745
1746 prev_es = &vsi->eth_stats_prev;
1747 cur_es = &vsi->eth_stats;
1748
1749 if (ice_is_reset_in_progress(pf->state))
1750 vsi->stat_offsets_loaded = false;
1751
1752 ice_stat_update40(hw, GLV_GORCL(vsi_num), vsi->stat_offsets_loaded,
1753 &prev_es->rx_bytes, &cur_es->rx_bytes);
1754
1755 ice_stat_update40(hw, GLV_UPRCL(vsi_num), vsi->stat_offsets_loaded,
1756 &prev_es->rx_unicast, &cur_es->rx_unicast);
1757
1758 ice_stat_update40(hw, GLV_MPRCL(vsi_num), vsi->stat_offsets_loaded,
1759 &prev_es->rx_multicast, &cur_es->rx_multicast);
1760
1761 ice_stat_update40(hw, GLV_BPRCL(vsi_num), vsi->stat_offsets_loaded,
1762 &prev_es->rx_broadcast, &cur_es->rx_broadcast);
1763
1764 ice_stat_update32(hw, GLV_RDPC(vsi_num), vsi->stat_offsets_loaded,
1765 &prev_es->rx_discards, &cur_es->rx_discards);
1766
1767 ice_stat_update40(hw, GLV_GOTCL(vsi_num), vsi->stat_offsets_loaded,
1768 &prev_es->tx_bytes, &cur_es->tx_bytes);
1769
1770 ice_stat_update40(hw, GLV_UPTCL(vsi_num), vsi->stat_offsets_loaded,
1771 &prev_es->tx_unicast, &cur_es->tx_unicast);
1772
1773 ice_stat_update40(hw, GLV_MPTCL(vsi_num), vsi->stat_offsets_loaded,
1774 &prev_es->tx_multicast, &cur_es->tx_multicast);
1775
1776 ice_stat_update40(hw, GLV_BPTCL(vsi_num), vsi->stat_offsets_loaded,
1777 &prev_es->tx_broadcast, &cur_es->tx_broadcast);
1778
1779 ice_stat_update32(hw, GLV_TEPC(vsi_num), vsi->stat_offsets_loaded,
1780 &prev_es->tx_errors, &cur_es->tx_errors);
1781
1782 vsi->stat_offsets_loaded = true;
1783 }
1784
1785 /**
1786 * ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
1787 * @hw: HW pointer
1788 * @pf_q: index of the Rx queue in the PF's queue space
1789 * @rxdid: flexible descriptor RXDID
1790 * @prio: priority for the RXDID for this queue
1791 * @ena_ts: true to enable timestamp and false to disable timestamp
1792 */
1793 void
ice_write_qrxflxp_cntxt(struct ice_hw * hw,u16 pf_q,u32 rxdid,u32 prio,bool ena_ts)1794 ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio,
1795 bool ena_ts)
1796 {
1797 int regval = rd32(hw, QRXFLXP_CNTXT(pf_q));
1798
1799 /* clear any previous values */
1800 regval &= ~(QRXFLXP_CNTXT_RXDID_IDX_M |
1801 QRXFLXP_CNTXT_RXDID_PRIO_M |
1802 QRXFLXP_CNTXT_TS_M);
1803
1804 regval |= (rxdid << QRXFLXP_CNTXT_RXDID_IDX_S) &
1805 QRXFLXP_CNTXT_RXDID_IDX_M;
1806
1807 regval |= (prio << QRXFLXP_CNTXT_RXDID_PRIO_S) &
1808 QRXFLXP_CNTXT_RXDID_PRIO_M;
1809
1810 if (ena_ts)
1811 /* Enable TimeSync on this queue */
1812 regval |= QRXFLXP_CNTXT_TS_M;
1813
1814 wr32(hw, QRXFLXP_CNTXT(pf_q), regval);
1815 }
1816
ice_vsi_cfg_single_rxq(struct ice_vsi * vsi,u16 q_idx)1817 int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
1818 {
1819 if (q_idx >= vsi->num_rxq)
1820 return -EINVAL;
1821
1822 return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
1823 }
1824
ice_vsi_cfg_single_txq(struct ice_vsi * vsi,struct ice_tx_ring ** tx_rings,u16 q_idx)1825 int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
1826 {
1827 struct ice_aqc_add_tx_qgrp *qg_buf;
1828 int err;
1829
1830 if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
1831 return -EINVAL;
1832
1833 qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
1834 if (!qg_buf)
1835 return -ENOMEM;
1836
1837 qg_buf->num_txqs = 1;
1838
1839 err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
1840 kfree(qg_buf);
1841 return err;
1842 }
1843
1844 /**
1845 * ice_vsi_cfg_rxqs - Configure the VSI for Rx
1846 * @vsi: the VSI being configured
1847 *
1848 * Return 0 on success and a negative value on error
1849 * Configure the Rx VSI for operation.
1850 */
ice_vsi_cfg_rxqs(struct ice_vsi * vsi)1851 int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
1852 {
1853 u16 i;
1854
1855 if (vsi->type == ICE_VSI_VF)
1856 goto setup_rings;
1857
1858 ice_vsi_cfg_frame_size(vsi);
1859 setup_rings:
1860 /* set up individual rings */
1861 ice_for_each_rxq(vsi, i) {
1862 int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
1863
1864 if (err)
1865 return err;
1866 }
1867
1868 return 0;
1869 }
1870
1871 /**
1872 * ice_vsi_cfg_txqs - Configure the VSI for Tx
1873 * @vsi: the VSI being configured
1874 * @rings: Tx ring array to be configured
1875 * @count: number of Tx ring array elements
1876 *
1877 * Return 0 on success and a negative value on error
1878 * Configure the Tx VSI for operation.
1879 */
1880 static int
ice_vsi_cfg_txqs(struct ice_vsi * vsi,struct ice_tx_ring ** rings,u16 count)1881 ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
1882 {
1883 struct ice_aqc_add_tx_qgrp *qg_buf;
1884 u16 q_idx = 0;
1885 int err = 0;
1886
1887 qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
1888 if (!qg_buf)
1889 return -ENOMEM;
1890
1891 qg_buf->num_txqs = 1;
1892
1893 for (q_idx = 0; q_idx < count; q_idx++) {
1894 err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
1895 if (err)
1896 goto err_cfg_txqs;
1897 }
1898
1899 err_cfg_txqs:
1900 kfree(qg_buf);
1901 return err;
1902 }
1903
1904 /**
1905 * ice_vsi_cfg_lan_txqs - Configure the VSI for Tx
1906 * @vsi: the VSI being configured
1907 *
1908 * Return 0 on success and a negative value on error
1909 * Configure the Tx VSI for operation.
1910 */
ice_vsi_cfg_lan_txqs(struct ice_vsi * vsi)1911 int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi)
1912 {
1913 return ice_vsi_cfg_txqs(vsi, vsi->tx_rings, vsi->num_txq);
1914 }
1915
1916 /**
1917 * ice_vsi_cfg_xdp_txqs - Configure Tx queues dedicated for XDP in given VSI
1918 * @vsi: the VSI being configured
1919 *
1920 * Return 0 on success and a negative value on error
1921 * Configure the Tx queues dedicated for XDP in given VSI for operation.
1922 */
ice_vsi_cfg_xdp_txqs(struct ice_vsi * vsi)1923 int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
1924 {
1925 int ret;
1926 int i;
1927
1928 ret = ice_vsi_cfg_txqs(vsi, vsi->xdp_rings, vsi->num_xdp_txq);
1929 if (ret)
1930 return ret;
1931
1932 ice_for_each_rxq(vsi, i)
1933 ice_tx_xsk_pool(vsi, i);
1934
1935 return 0;
1936 }
1937
1938 /**
1939 * ice_intrl_usec_to_reg - convert interrupt rate limit to register value
1940 * @intrl: interrupt rate limit in usecs
1941 * @gran: interrupt rate limit granularity in usecs
1942 *
1943 * This function converts a decimal interrupt rate limit in usecs to the format
1944 * expected by firmware.
1945 */
ice_intrl_usec_to_reg(u8 intrl,u8 gran)1946 static u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran)
1947 {
1948 u32 val = intrl / gran;
1949
1950 if (val)
1951 return val | GLINT_RATE_INTRL_ENA_M;
1952 return 0;
1953 }
1954
1955 /**
1956 * ice_write_intrl - write throttle rate limit to interrupt specific register
1957 * @q_vector: pointer to interrupt specific structure
1958 * @intrl: throttle rate limit in microseconds to write
1959 */
ice_write_intrl(struct ice_q_vector * q_vector,u8 intrl)1960 void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
1961 {
1962 struct ice_hw *hw = &q_vector->vsi->back->hw;
1963
1964 wr32(hw, GLINT_RATE(q_vector->reg_idx),
1965 ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
1966 }
1967
ice_pull_qvec_from_rc(struct ice_ring_container * rc)1968 static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
1969 {
1970 switch (rc->type) {
1971 case ICE_RX_CONTAINER:
1972 if (rc->rx_ring)
1973 return rc->rx_ring->q_vector;
1974 break;
1975 case ICE_TX_CONTAINER:
1976 if (rc->tx_ring)
1977 return rc->tx_ring->q_vector;
1978 break;
1979 default:
1980 break;
1981 }
1982
1983 return NULL;
1984 }
1985
1986 /**
1987 * __ice_write_itr - write throttle rate to register
1988 * @q_vector: pointer to interrupt data structure
1989 * @rc: pointer to ring container
1990 * @itr: throttle rate in microseconds to write
1991 */
__ice_write_itr(struct ice_q_vector * q_vector,struct ice_ring_container * rc,u16 itr)1992 static void __ice_write_itr(struct ice_q_vector *q_vector,
1993 struct ice_ring_container *rc, u16 itr)
1994 {
1995 struct ice_hw *hw = &q_vector->vsi->back->hw;
1996
1997 wr32(hw, GLINT_ITR(rc->itr_idx, q_vector->reg_idx),
1998 ITR_REG_ALIGN(itr) >> ICE_ITR_GRAN_S);
1999 }
2000
2001 /**
2002 * ice_write_itr - write throttle rate to queue specific register
2003 * @rc: pointer to ring container
2004 * @itr: throttle rate in microseconds to write
2005 */
ice_write_itr(struct ice_ring_container * rc,u16 itr)2006 void ice_write_itr(struct ice_ring_container *rc, u16 itr)
2007 {
2008 struct ice_q_vector *q_vector;
2009
2010 q_vector = ice_pull_qvec_from_rc(rc);
2011 if (!q_vector)
2012 return;
2013
2014 __ice_write_itr(q_vector, rc, itr);
2015 }
2016
2017 /**
2018 * ice_set_q_vector_intrl - set up interrupt rate limiting
2019 * @q_vector: the vector to be configured
2020 *
2021 * Interrupt rate limiting is local to the vector, not per-queue so we must
2022 * detect if either ring container has dynamic moderation enabled to decide
2023 * what to set the interrupt rate limit to via INTRL settings. In the case that
2024 * dynamic moderation is disabled on both, write the value with the cached
2025 * setting to make sure INTRL register matches the user visible value.
2026 */
ice_set_q_vector_intrl(struct ice_q_vector * q_vector)2027 void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
2028 {
2029 if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
2030 /* in the case of dynamic enabled, cap each vector to no more
2031 * than (4 us) 250,000 ints/sec, which allows low latency
2032 * but still less than 500,000 interrupts per second, which
2033 * reduces CPU a bit in the case of the lowest latency
2034 * setting. The 4 here is a value in microseconds.
2035 */
2036 ice_write_intrl(q_vector, 4);
2037 } else {
2038 ice_write_intrl(q_vector, q_vector->intrl);
2039 }
2040 }
2041
2042 /**
2043 * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
2044 * @vsi: the VSI being configured
2045 *
2046 * This configures MSIX mode interrupts for the PF VSI, and should not be used
2047 * for the VF VSI.
2048 */
ice_vsi_cfg_msix(struct ice_vsi * vsi)2049 void ice_vsi_cfg_msix(struct ice_vsi *vsi)
2050 {
2051 struct ice_pf *pf = vsi->back;
2052 struct ice_hw *hw = &pf->hw;
2053 u16 txq = 0, rxq = 0;
2054 int i, q;
2055
2056 ice_for_each_q_vector(vsi, i) {
2057 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2058 u16 reg_idx = q_vector->reg_idx;
2059
2060 ice_cfg_itr(hw, q_vector);
2061
2062 /* Both Transmit Queue Interrupt Cause Control register
2063 * and Receive Queue Interrupt Cause control register
2064 * expects MSIX_INDX field to be the vector index
2065 * within the function space and not the absolute
2066 * vector index across PF or across device.
2067 * For SR-IOV VF VSIs queue vector index always starts
2068 * with 1 since first vector index(0) is used for OICR
2069 * in VF space. Since VMDq and other PF VSIs are within
2070 * the PF function space, use the vector index that is
2071 * tracked for this PF.
2072 */
2073 for (q = 0; q < q_vector->num_ring_tx; q++) {
2074 ice_cfg_txq_interrupt(vsi, txq, reg_idx,
2075 q_vector->tx.itr_idx);
2076 txq++;
2077 }
2078
2079 for (q = 0; q < q_vector->num_ring_rx; q++) {
2080 ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
2081 q_vector->rx.itr_idx);
2082 rxq++;
2083 }
2084 }
2085 }
2086
2087 /**
2088 * ice_vsi_start_all_rx_rings - start/enable all of a VSI's Rx rings
2089 * @vsi: the VSI whose rings are to be enabled
2090 *
2091 * Returns 0 on success and a negative value on error
2092 */
ice_vsi_start_all_rx_rings(struct ice_vsi * vsi)2093 int ice_vsi_start_all_rx_rings(struct ice_vsi *vsi)
2094 {
2095 return ice_vsi_ctrl_all_rx_rings(vsi, true);
2096 }
2097
2098 /**
2099 * ice_vsi_stop_all_rx_rings - stop/disable all of a VSI's Rx rings
2100 * @vsi: the VSI whose rings are to be disabled
2101 *
2102 * Returns 0 on success and a negative value on error
2103 */
ice_vsi_stop_all_rx_rings(struct ice_vsi * vsi)2104 int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
2105 {
2106 return ice_vsi_ctrl_all_rx_rings(vsi, false);
2107 }
2108
2109 /**
2110 * ice_vsi_stop_tx_rings - Disable Tx rings
2111 * @vsi: the VSI being configured
2112 * @rst_src: reset source
2113 * @rel_vmvf_num: Relative ID of VF/VM
2114 * @rings: Tx ring array to be stopped
2115 * @count: number of Tx ring array elements
2116 */
2117 static int
ice_vsi_stop_tx_rings(struct ice_vsi * vsi,enum ice_disq_rst_src rst_src,u16 rel_vmvf_num,struct ice_tx_ring ** rings,u16 count)2118 ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2119 u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
2120 {
2121 u16 q_idx;
2122
2123 if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
2124 return -EINVAL;
2125
2126 for (q_idx = 0; q_idx < count; q_idx++) {
2127 struct ice_txq_meta txq_meta = { };
2128 int status;
2129
2130 if (!rings || !rings[q_idx])
2131 return -EINVAL;
2132
2133 ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
2134 status = ice_vsi_stop_tx_ring(vsi, rst_src, rel_vmvf_num,
2135 rings[q_idx], &txq_meta);
2136
2137 if (status)
2138 return status;
2139 }
2140
2141 return 0;
2142 }
2143
2144 /**
2145 * ice_vsi_stop_lan_tx_rings - Disable LAN Tx rings
2146 * @vsi: the VSI being configured
2147 * @rst_src: reset source
2148 * @rel_vmvf_num: Relative ID of VF/VM
2149 */
2150 int
ice_vsi_stop_lan_tx_rings(struct ice_vsi * vsi,enum ice_disq_rst_src rst_src,u16 rel_vmvf_num)2151 ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
2152 u16 rel_vmvf_num)
2153 {
2154 return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, vsi->num_txq);
2155 }
2156
2157 /**
2158 * ice_vsi_stop_xdp_tx_rings - Disable XDP Tx rings
2159 * @vsi: the VSI being configured
2160 */
ice_vsi_stop_xdp_tx_rings(struct ice_vsi * vsi)2161 int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi)
2162 {
2163 return ice_vsi_stop_tx_rings(vsi, ICE_NO_RESET, 0, vsi->xdp_rings, vsi->num_xdp_txq);
2164 }
2165
2166 /**
2167 * ice_vsi_is_rx_queue_active
2168 * @vsi: the VSI being configured
2169 *
2170 * Return true if at least one queue is active.
2171 */
ice_vsi_is_rx_queue_active(struct ice_vsi * vsi)2172 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
2173 {
2174 struct ice_pf *pf = vsi->back;
2175 struct ice_hw *hw = &pf->hw;
2176 int i;
2177
2178 ice_for_each_rxq(vsi, i) {
2179 u32 rx_reg;
2180 int pf_q;
2181
2182 pf_q = vsi->rxq_map[i];
2183 rx_reg = rd32(hw, QRX_CTRL(pf_q));
2184 if (rx_reg & QRX_CTRL_QENA_STAT_M)
2185 return true;
2186 }
2187
2188 return false;
2189 }
2190
ice_vsi_set_tc_cfg(struct ice_vsi * vsi)2191 static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
2192 {
2193 if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
2194 vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
2195 vsi->tc_cfg.numtc = 1;
2196 return;
2197 }
2198
2199 /* set VSI TC information based on DCB config */
2200 ice_vsi_set_dcb_tc_cfg(vsi);
2201 }
2202
2203 /**
2204 * ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
2205 * @vsi: the VSI being configured
2206 * @tx: bool to determine Tx or Rx rule
2207 * @create: bool to determine create or remove Rule
2208 */
ice_cfg_sw_lldp(struct ice_vsi * vsi,bool tx,bool create)2209 void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
2210 {
2211 int (*eth_fltr)(struct ice_vsi *v, u16 type, u16 flag,
2212 enum ice_sw_fwd_act_type act);
2213 struct ice_pf *pf = vsi->back;
2214 struct device *dev;
2215 int status;
2216
2217 dev = ice_pf_to_dev(pf);
2218 eth_fltr = create ? ice_fltr_add_eth : ice_fltr_remove_eth;
2219
2220 if (tx) {
2221 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_TX,
2222 ICE_DROP_PACKET);
2223 } else {
2224 if (ice_fw_supports_lldp_fltr_ctrl(&pf->hw)) {
2225 status = ice_lldp_fltr_add_remove(&pf->hw, vsi->vsi_num,
2226 create);
2227 } else {
2228 status = eth_fltr(vsi, ETH_P_LLDP, ICE_FLTR_RX,
2229 ICE_FWD_TO_VSI);
2230 }
2231 }
2232
2233 if (status)
2234 dev_dbg(dev, "Fail %s %s LLDP rule on VSI %i error: %d\n",
2235 create ? "adding" : "removing", tx ? "TX" : "RX",
2236 vsi->vsi_num, status);
2237 }
2238
2239 /**
2240 * ice_set_agg_vsi - sets up scheduler aggregator node and move VSI into it
2241 * @vsi: pointer to the VSI
2242 *
2243 * This function will allocate new scheduler aggregator now if needed and will
2244 * move specified VSI into it.
2245 */
ice_set_agg_vsi(struct ice_vsi * vsi)2246 static void ice_set_agg_vsi(struct ice_vsi *vsi)
2247 {
2248 struct device *dev = ice_pf_to_dev(vsi->back);
2249 struct ice_agg_node *agg_node_iter = NULL;
2250 u32 agg_id = ICE_INVALID_AGG_NODE_ID;
2251 struct ice_agg_node *agg_node = NULL;
2252 int node_offset, max_agg_nodes = 0;
2253 struct ice_port_info *port_info;
2254 struct ice_pf *pf = vsi->back;
2255 u32 agg_node_id_start = 0;
2256 int status;
2257
2258 /* create (as needed) scheduler aggregator node and move VSI into
2259 * corresponding aggregator node
2260 * - PF aggregator node to contains VSIs of type _PF and _CTRL
2261 * - VF aggregator nodes will contain VF VSI
2262 */
2263 port_info = pf->hw.port_info;
2264 if (!port_info)
2265 return;
2266
2267 switch (vsi->type) {
2268 case ICE_VSI_CTRL:
2269 case ICE_VSI_CHNL:
2270 case ICE_VSI_LB:
2271 case ICE_VSI_PF:
2272 case ICE_VSI_SWITCHDEV_CTRL:
2273 max_agg_nodes = ICE_MAX_PF_AGG_NODES;
2274 agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
2275 agg_node_iter = &pf->pf_agg_node[0];
2276 break;
2277 case ICE_VSI_VF:
2278 /* user can create 'n' VFs on a given PF, but since max children
2279 * per aggregator node can be only 64. Following code handles
2280 * aggregator(s) for VF VSIs, either selects a agg_node which
2281 * was already created provided num_vsis < 64, otherwise
2282 * select next available node, which will be created
2283 */
2284 max_agg_nodes = ICE_MAX_VF_AGG_NODES;
2285 agg_node_id_start = ICE_VF_AGG_NODE_ID_START;
2286 agg_node_iter = &pf->vf_agg_node[0];
2287 break;
2288 default:
2289 /* other VSI type, handle later if needed */
2290 dev_dbg(dev, "unexpected VSI type %s\n",
2291 ice_vsi_type_str(vsi->type));
2292 return;
2293 }
2294
2295 /* find the appropriate aggregator node */
2296 for (node_offset = 0; node_offset < max_agg_nodes; node_offset++) {
2297 /* see if we can find space in previously created
2298 * node if num_vsis < 64, otherwise skip
2299 */
2300 if (agg_node_iter->num_vsis &&
2301 agg_node_iter->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
2302 agg_node_iter++;
2303 continue;
2304 }
2305
2306 if (agg_node_iter->valid &&
2307 agg_node_iter->agg_id != ICE_INVALID_AGG_NODE_ID) {
2308 agg_id = agg_node_iter->agg_id;
2309 agg_node = agg_node_iter;
2310 break;
2311 }
2312
2313 /* find unclaimed agg_id */
2314 if (agg_node_iter->agg_id == ICE_INVALID_AGG_NODE_ID) {
2315 agg_id = node_offset + agg_node_id_start;
2316 agg_node = agg_node_iter;
2317 break;
2318 }
2319 /* move to next agg_node */
2320 agg_node_iter++;
2321 }
2322
2323 if (!agg_node)
2324 return;
2325
2326 /* if selected aggregator node was not created, create it */
2327 if (!agg_node->valid) {
2328 status = ice_cfg_agg(port_info, agg_id, ICE_AGG_TYPE_AGG,
2329 (u8)vsi->tc_cfg.ena_tc);
2330 if (status) {
2331 dev_err(dev, "unable to create aggregator node with agg_id %u\n",
2332 agg_id);
2333 return;
2334 }
2335 /* aggregator node is created, store the needed info */
2336 agg_node->valid = true;
2337 agg_node->agg_id = agg_id;
2338 }
2339
2340 /* move VSI to corresponding aggregator node */
2341 status = ice_move_vsi_to_agg(port_info, agg_id, vsi->idx,
2342 (u8)vsi->tc_cfg.ena_tc);
2343 if (status) {
2344 dev_err(dev, "unable to move VSI idx %u into aggregator %u node",
2345 vsi->idx, agg_id);
2346 return;
2347 }
2348
2349 /* keep active children count for aggregator node */
2350 agg_node->num_vsis++;
2351
2352 /* cache the 'agg_id' in VSI, so that after reset - VSI will be moved
2353 * to aggregator node
2354 */
2355 vsi->agg_node = agg_node;
2356 dev_dbg(dev, "successfully moved VSI idx %u tc_bitmap 0x%x) into aggregator node %d which has num_vsis %u\n",
2357 vsi->idx, vsi->tc_cfg.ena_tc, vsi->agg_node->agg_id,
2358 vsi->agg_node->num_vsis);
2359 }
2360
ice_vsi_cfg_tc_lan(struct ice_pf * pf,struct ice_vsi * vsi)2361 static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
2362 {
2363 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
2364 struct device *dev = ice_pf_to_dev(pf);
2365 int ret, i;
2366
2367 /* configure VSI nodes based on number of queues and TC's */
2368 ice_for_each_traffic_class(i) {
2369 if (!(vsi->tc_cfg.ena_tc & BIT(i)))
2370 continue;
2371
2372 if (vsi->type == ICE_VSI_CHNL) {
2373 if (!vsi->alloc_txq && vsi->num_txq)
2374 max_txqs[i] = vsi->num_txq;
2375 else
2376 max_txqs[i] = pf->num_lan_tx;
2377 } else {
2378 max_txqs[i] = vsi->alloc_txq;
2379 }
2380
2381 if (vsi->type == ICE_VSI_PF)
2382 max_txqs[i] += vsi->num_xdp_txq;
2383 }
2384
2385 dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
2386 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
2387 max_txqs);
2388 if (ret) {
2389 dev_err(dev, "VSI %d failed lan queue config, error %d\n",
2390 vsi->vsi_num, ret);
2391 return ret;
2392 }
2393
2394 return 0;
2395 }
2396
2397 /**
2398 * ice_vsi_cfg_def - configure default VSI based on the type
2399 * @vsi: pointer to VSI
2400 * @params: the parameters to configure this VSI with
2401 */
2402 static int
ice_vsi_cfg_def(struct ice_vsi * vsi,struct ice_vsi_cfg_params * params)2403 ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
2404 {
2405 struct device *dev = ice_pf_to_dev(vsi->back);
2406 struct ice_pf *pf = vsi->back;
2407 int ret;
2408
2409 vsi->vsw = pf->first_sw;
2410
2411 ret = ice_vsi_alloc_def(vsi, params->ch);
2412 if (ret)
2413 return ret;
2414
2415 /* allocate memory for Tx/Rx ring stat pointers */
2416 ret = ice_vsi_alloc_stat_arrays(vsi);
2417 if (ret)
2418 goto unroll_vsi_alloc;
2419
2420 ice_alloc_fd_res(vsi);
2421
2422 ret = ice_vsi_get_qs(vsi);
2423 if (ret) {
2424 dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
2425 vsi->idx);
2426 goto unroll_vsi_alloc_stat;
2427 }
2428
2429 /* set RSS capabilities */
2430 ice_vsi_set_rss_params(vsi);
2431
2432 /* set TC configuration */
2433 ice_vsi_set_tc_cfg(vsi);
2434
2435 /* create the VSI */
2436 ret = ice_vsi_init(vsi, params->flags);
2437 if (ret)
2438 goto unroll_get_qs;
2439
2440 ice_vsi_init_vlan_ops(vsi);
2441
2442 switch (vsi->type) {
2443 case ICE_VSI_CTRL:
2444 case ICE_VSI_SWITCHDEV_CTRL:
2445 case ICE_VSI_PF:
2446 ret = ice_vsi_alloc_q_vectors(vsi);
2447 if (ret)
2448 goto unroll_vsi_init;
2449
2450 ret = ice_vsi_alloc_rings(vsi);
2451 if (ret)
2452 goto unroll_vector_base;
2453
2454 ret = ice_vsi_alloc_ring_stats(vsi);
2455 if (ret)
2456 goto unroll_vector_base;
2457
2458 ice_vsi_map_rings_to_vectors(vsi);
2459 vsi->stat_offsets_loaded = false;
2460
2461 if (ice_is_xdp_ena_vsi(vsi)) {
2462 ret = ice_vsi_determine_xdp_res(vsi);
2463 if (ret)
2464 goto unroll_vector_base;
2465 ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog,
2466 ICE_XDP_CFG_PART);
2467 if (ret)
2468 goto unroll_vector_base;
2469 }
2470
2471 /* ICE_VSI_CTRL does not need RSS so skip RSS processing */
2472 if (vsi->type != ICE_VSI_CTRL)
2473 /* Do not exit if configuring RSS had an issue, at
2474 * least receive traffic on first queue. Hence no
2475 * need to capture return value
2476 */
2477 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2478 ice_vsi_cfg_rss_lut_key(vsi);
2479 ice_vsi_set_rss_flow_fld(vsi);
2480 }
2481 ice_init_arfs(vsi);
2482 break;
2483 case ICE_VSI_CHNL:
2484 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2485 ice_vsi_cfg_rss_lut_key(vsi);
2486 ice_vsi_set_rss_flow_fld(vsi);
2487 }
2488 break;
2489 case ICE_VSI_VF:
2490 /* VF driver will take care of creating netdev for this type and
2491 * map queues to vectors through Virtchnl, PF driver only
2492 * creates a VSI and corresponding structures for bookkeeping
2493 * purpose
2494 */
2495 ret = ice_vsi_alloc_q_vectors(vsi);
2496 if (ret)
2497 goto unroll_vsi_init;
2498
2499 ret = ice_vsi_alloc_rings(vsi);
2500 if (ret)
2501 goto unroll_alloc_q_vector;
2502
2503 ret = ice_vsi_alloc_ring_stats(vsi);
2504 if (ret)
2505 goto unroll_vector_base;
2506
2507 vsi->stat_offsets_loaded = false;
2508
2509 /* Do not exit if configuring RSS had an issue, at least
2510 * receive traffic on first queue. Hence no need to capture
2511 * return value
2512 */
2513 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
2514 ice_vsi_cfg_rss_lut_key(vsi);
2515 ice_vsi_set_vf_rss_flow_fld(vsi);
2516 }
2517 break;
2518 case ICE_VSI_LB:
2519 ret = ice_vsi_alloc_rings(vsi);
2520 if (ret)
2521 goto unroll_vsi_init;
2522
2523 ret = ice_vsi_alloc_ring_stats(vsi);
2524 if (ret)
2525 goto unroll_vector_base;
2526
2527 break;
2528 default:
2529 /* clean up the resources and exit */
2530 ret = -EINVAL;
2531 goto unroll_vsi_init;
2532 }
2533
2534 return 0;
2535
2536 unroll_vector_base:
2537 /* reclaim SW interrupts back to the common pool */
2538 unroll_alloc_q_vector:
2539 ice_vsi_free_q_vectors(vsi);
2540 unroll_vsi_init:
2541 ice_vsi_delete_from_hw(vsi);
2542 unroll_get_qs:
2543 ice_vsi_put_qs(vsi);
2544 unroll_vsi_alloc_stat:
2545 ice_vsi_free_stats(vsi);
2546 unroll_vsi_alloc:
2547 ice_vsi_free_arrays(vsi);
2548 return ret;
2549 }
2550
2551 /**
2552 * ice_vsi_cfg - configure a previously allocated VSI
2553 * @vsi: pointer to VSI
2554 * @params: parameters used to configure this VSI
2555 */
ice_vsi_cfg(struct ice_vsi * vsi,struct ice_vsi_cfg_params * params)2556 int ice_vsi_cfg(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
2557 {
2558 struct ice_pf *pf = vsi->back;
2559 int ret;
2560
2561 if (WARN_ON(params->type == ICE_VSI_VF && !params->vf))
2562 return -EINVAL;
2563
2564 vsi->type = params->type;
2565 vsi->port_info = params->pi;
2566
2567 /* For VSIs which don't have a connected VF, this will be NULL */
2568 vsi->vf = params->vf;
2569
2570 ret = ice_vsi_cfg_def(vsi, params);
2571 if (ret)
2572 return ret;
2573
2574 ret = ice_vsi_cfg_tc_lan(vsi->back, vsi);
2575 if (ret)
2576 ice_vsi_decfg(vsi);
2577
2578 if (vsi->type == ICE_VSI_CTRL) {
2579 if (vsi->vf) {
2580 WARN_ON(vsi->vf->ctrl_vsi_idx != ICE_NO_VSI);
2581 vsi->vf->ctrl_vsi_idx = vsi->idx;
2582 } else {
2583 WARN_ON(pf->ctrl_vsi_idx != ICE_NO_VSI);
2584 pf->ctrl_vsi_idx = vsi->idx;
2585 }
2586 }
2587
2588 return ret;
2589 }
2590
2591 /**
2592 * ice_vsi_decfg - remove all VSI configuration
2593 * @vsi: pointer to VSI
2594 */
ice_vsi_decfg(struct ice_vsi * vsi)2595 void ice_vsi_decfg(struct ice_vsi *vsi)
2596 {
2597 struct ice_pf *pf = vsi->back;
2598 int err;
2599
2600 /* The Rx rule will only exist to remove if the LLDP FW
2601 * engine is currently stopped
2602 */
2603 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF &&
2604 !test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
2605 ice_cfg_sw_lldp(vsi, false, false);
2606
2607 ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
2608 err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
2609 if (err)
2610 dev_err(ice_pf_to_dev(pf), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
2611 vsi->vsi_num, err);
2612
2613 if (ice_is_xdp_ena_vsi(vsi))
2614 /* return value check can be skipped here, it always returns
2615 * 0 if reset is in progress
2616 */
2617 ice_destroy_xdp_rings(vsi, ICE_XDP_CFG_PART);
2618
2619 ice_vsi_clear_rings(vsi);
2620 ice_vsi_free_q_vectors(vsi);
2621 ice_vsi_put_qs(vsi);
2622 ice_vsi_free_arrays(vsi);
2623
2624 /* SR-IOV determines needed MSIX resources all at once instead of per
2625 * VSI since when VFs are spawned we know how many VFs there are and how
2626 * many interrupts each VF needs. SR-IOV MSIX resources are also
2627 * cleared in the same manner.
2628 */
2629
2630 if (vsi->type == ICE_VSI_VF &&
2631 vsi->agg_node && vsi->agg_node->valid)
2632 vsi->agg_node->num_vsis--;
2633 }
2634
2635 /**
2636 * ice_vsi_setup - Set up a VSI by a given type
2637 * @pf: board private structure
2638 * @params: parameters to use when creating the VSI
2639 *
2640 * This allocates the sw VSI structure and its queue resources.
2641 *
2642 * Returns pointer to the successfully allocated and configured VSI sw struct on
2643 * success, NULL on failure.
2644 */
2645 struct ice_vsi *
ice_vsi_setup(struct ice_pf * pf,struct ice_vsi_cfg_params * params)2646 ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params)
2647 {
2648 struct device *dev = ice_pf_to_dev(pf);
2649 struct ice_vsi *vsi;
2650 int ret;
2651
2652 /* ice_vsi_setup can only initialize a new VSI, and we must have
2653 * a port_info structure for it.
2654 */
2655 if (WARN_ON(!(params->flags & ICE_VSI_FLAG_INIT)) ||
2656 WARN_ON(!params->pi))
2657 return NULL;
2658
2659 vsi = ice_vsi_alloc(pf);
2660 if (!vsi) {
2661 dev_err(dev, "could not allocate VSI\n");
2662 return NULL;
2663 }
2664
2665 ret = ice_vsi_cfg(vsi, params);
2666 if (ret)
2667 goto err_vsi_cfg;
2668
2669 /* Add switch rule to drop all Tx Flow Control Frames, of look up
2670 * type ETHERTYPE from VSIs, and restrict malicious VF from sending
2671 * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
2672 * The rule is added once for PF VSI in order to create appropriate
2673 * recipe, since VSI/VSI list is ignored with drop action...
2674 * Also add rules to handle LLDP Tx packets. Tx LLDP packets need to
2675 * be dropped so that VFs cannot send LLDP packets to reconfig DCB
2676 * settings in the HW.
2677 */
2678 if (!ice_is_safe_mode(pf) && vsi->type == ICE_VSI_PF) {
2679 ice_fltr_add_eth(vsi, ETH_P_PAUSE, ICE_FLTR_TX,
2680 ICE_DROP_PACKET);
2681 ice_cfg_sw_lldp(vsi, true, true);
2682 }
2683
2684 if (!vsi->agg_node)
2685 ice_set_agg_vsi(vsi);
2686
2687 return vsi;
2688
2689 err_vsi_cfg:
2690 ice_vsi_free(vsi);
2691
2692 return NULL;
2693 }
2694
2695 /**
2696 * ice_vsi_release_msix - Clear the queue to Interrupt mapping in HW
2697 * @vsi: the VSI being cleaned up
2698 */
ice_vsi_release_msix(struct ice_vsi * vsi)2699 static void ice_vsi_release_msix(struct ice_vsi *vsi)
2700 {
2701 struct ice_pf *pf = vsi->back;
2702 struct ice_hw *hw = &pf->hw;
2703 u32 txq = 0;
2704 u32 rxq = 0;
2705 int i, q;
2706
2707 ice_for_each_q_vector(vsi, i) {
2708 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2709
2710 ice_write_intrl(q_vector, 0);
2711 for (q = 0; q < q_vector->num_ring_tx; q++) {
2712 ice_write_itr(&q_vector->tx, 0);
2713 wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), 0);
2714 if (ice_is_xdp_ena_vsi(vsi)) {
2715 u32 xdp_txq = txq + vsi->num_xdp_txq;
2716
2717 wr32(hw, QINT_TQCTL(vsi->txq_map[xdp_txq]), 0);
2718 }
2719 txq++;
2720 }
2721
2722 for (q = 0; q < q_vector->num_ring_rx; q++) {
2723 ice_write_itr(&q_vector->rx, 0);
2724 wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), 0);
2725 rxq++;
2726 }
2727 }
2728
2729 ice_flush(hw);
2730 }
2731
2732 /**
2733 * ice_vsi_free_irq - Free the IRQ association with the OS
2734 * @vsi: the VSI being configured
2735 */
ice_vsi_free_irq(struct ice_vsi * vsi)2736 void ice_vsi_free_irq(struct ice_vsi *vsi)
2737 {
2738 struct ice_pf *pf = vsi->back;
2739 int i;
2740
2741 if (!vsi->q_vectors || !vsi->irqs_ready)
2742 return;
2743
2744 ice_vsi_release_msix(vsi);
2745 if (vsi->type == ICE_VSI_VF)
2746 return;
2747
2748 vsi->irqs_ready = false;
2749 ice_free_cpu_rx_rmap(vsi);
2750
2751 ice_for_each_q_vector(vsi, i) {
2752 int irq_num;
2753
2754 irq_num = vsi->q_vectors[i]->irq.virq;
2755
2756 /* free only the irqs that were actually requested */
2757 if (!vsi->q_vectors[i] ||
2758 !(vsi->q_vectors[i]->num_ring_tx ||
2759 vsi->q_vectors[i]->num_ring_rx))
2760 continue;
2761
2762 /* clear the affinity notifier in the IRQ descriptor */
2763 if (!IS_ENABLED(CONFIG_RFS_ACCEL))
2764 irq_set_affinity_notifier(irq_num, NULL);
2765
2766 /* clear the affinity_mask in the IRQ descriptor */
2767 irq_set_affinity_hint(irq_num, NULL);
2768 synchronize_irq(irq_num);
2769 devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]);
2770 }
2771 }
2772
2773 /**
2774 * ice_vsi_free_tx_rings - Free Tx resources for VSI queues
2775 * @vsi: the VSI having resources freed
2776 */
ice_vsi_free_tx_rings(struct ice_vsi * vsi)2777 void ice_vsi_free_tx_rings(struct ice_vsi *vsi)
2778 {
2779 int i;
2780
2781 if (!vsi->tx_rings)
2782 return;
2783
2784 ice_for_each_txq(vsi, i)
2785 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2786 ice_free_tx_ring(vsi->tx_rings[i]);
2787 }
2788
2789 /**
2790 * ice_vsi_free_rx_rings - Free Rx resources for VSI queues
2791 * @vsi: the VSI having resources freed
2792 */
ice_vsi_free_rx_rings(struct ice_vsi * vsi)2793 void ice_vsi_free_rx_rings(struct ice_vsi *vsi)
2794 {
2795 int i;
2796
2797 if (!vsi->rx_rings)
2798 return;
2799
2800 ice_for_each_rxq(vsi, i)
2801 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2802 ice_free_rx_ring(vsi->rx_rings[i]);
2803 }
2804
2805 /**
2806 * ice_vsi_close - Shut down a VSI
2807 * @vsi: the VSI being shut down
2808 */
ice_vsi_close(struct ice_vsi * vsi)2809 void ice_vsi_close(struct ice_vsi *vsi)
2810 {
2811 if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state))
2812 ice_down(vsi);
2813
2814 ice_vsi_free_irq(vsi);
2815 ice_vsi_free_tx_rings(vsi);
2816 ice_vsi_free_rx_rings(vsi);
2817 }
2818
2819 /**
2820 * ice_ena_vsi - resume a VSI
2821 * @vsi: the VSI being resume
2822 * @locked: is the rtnl_lock already held
2823 */
ice_ena_vsi(struct ice_vsi * vsi,bool locked)2824 int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
2825 {
2826 int err = 0;
2827
2828 if (!test_bit(ICE_VSI_NEEDS_RESTART, vsi->state))
2829 return 0;
2830
2831 clear_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2832
2833 if (vsi->netdev && vsi->type == ICE_VSI_PF) {
2834 if (netif_running(vsi->netdev)) {
2835 if (!locked)
2836 rtnl_lock();
2837
2838 err = ice_open_internal(vsi->netdev);
2839
2840 if (!locked)
2841 rtnl_unlock();
2842 }
2843 } else if (vsi->type == ICE_VSI_CTRL) {
2844 err = ice_vsi_open_ctrl(vsi);
2845 }
2846
2847 return err;
2848 }
2849
2850 /**
2851 * ice_dis_vsi - pause a VSI
2852 * @vsi: the VSI being paused
2853 * @locked: is the rtnl_lock already held
2854 */
ice_dis_vsi(struct ice_vsi * vsi,bool locked)2855 void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
2856 {
2857 if (test_bit(ICE_VSI_DOWN, vsi->state))
2858 return;
2859
2860 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state);
2861
2862 if (vsi->type == ICE_VSI_PF && vsi->netdev) {
2863 if (netif_running(vsi->netdev)) {
2864 if (!locked)
2865 rtnl_lock();
2866
2867 ice_vsi_close(vsi);
2868
2869 if (!locked)
2870 rtnl_unlock();
2871 } else {
2872 ice_vsi_close(vsi);
2873 }
2874 } else if (vsi->type == ICE_VSI_CTRL ||
2875 vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
2876 ice_vsi_close(vsi);
2877 }
2878 }
2879
2880 /**
2881 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
2882 * @vsi: the VSI being un-configured
2883 */
ice_vsi_dis_irq(struct ice_vsi * vsi)2884 void ice_vsi_dis_irq(struct ice_vsi *vsi)
2885 {
2886 struct ice_pf *pf = vsi->back;
2887 struct ice_hw *hw = &pf->hw;
2888 u32 val;
2889 int i;
2890
2891 /* disable interrupt causation from each queue */
2892 if (vsi->tx_rings) {
2893 ice_for_each_txq(vsi, i) {
2894 if (vsi->tx_rings[i]) {
2895 u16 reg;
2896
2897 reg = vsi->tx_rings[i]->reg_idx;
2898 val = rd32(hw, QINT_TQCTL(reg));
2899 val &= ~QINT_TQCTL_CAUSE_ENA_M;
2900 wr32(hw, QINT_TQCTL(reg), val);
2901 }
2902 }
2903 }
2904
2905 if (vsi->rx_rings) {
2906 ice_for_each_rxq(vsi, i) {
2907 if (vsi->rx_rings[i]) {
2908 u16 reg;
2909
2910 reg = vsi->rx_rings[i]->reg_idx;
2911 val = rd32(hw, QINT_RQCTL(reg));
2912 val &= ~QINT_RQCTL_CAUSE_ENA_M;
2913 wr32(hw, QINT_RQCTL(reg), val);
2914 }
2915 }
2916 }
2917
2918 /* disable each interrupt */
2919 ice_for_each_q_vector(vsi, i) {
2920 if (!vsi->q_vectors[i])
2921 continue;
2922 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
2923 }
2924
2925 ice_flush(hw);
2926
2927 /* don't call synchronize_irq() for VF's from the host */
2928 if (vsi->type == ICE_VSI_VF)
2929 return;
2930
2931 ice_for_each_q_vector(vsi, i)
2932 synchronize_irq(vsi->q_vectors[i]->irq.virq);
2933 }
2934
2935 /**
2936 * ice_vsi_release - Delete a VSI and free its resources
2937 * @vsi: the VSI being removed
2938 *
2939 * Returns 0 on success or < 0 on error
2940 */
ice_vsi_release(struct ice_vsi * vsi)2941 int ice_vsi_release(struct ice_vsi *vsi)
2942 {
2943 struct ice_pf *pf;
2944
2945 if (!vsi->back)
2946 return -ENODEV;
2947 pf = vsi->back;
2948
2949 if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
2950 ice_rss_clean(vsi);
2951
2952 ice_vsi_close(vsi);
2953 ice_vsi_decfg(vsi);
2954
2955 /* retain SW VSI data structure since it is needed to unregister and
2956 * free VSI netdev when PF is not in reset recovery pending state,\
2957 * for ex: during rmmod.
2958 */
2959 if (!ice_is_reset_in_progress(pf->state))
2960 ice_vsi_delete(vsi);
2961
2962 return 0;
2963 }
2964
2965 /**
2966 * ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
2967 * @vsi: VSI connected with q_vectors
2968 * @coalesce: array of struct with stored coalesce
2969 *
2970 * Returns array size.
2971 */
2972 static int
ice_vsi_rebuild_get_coalesce(struct ice_vsi * vsi,struct ice_coalesce_stored * coalesce)2973 ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
2974 struct ice_coalesce_stored *coalesce)
2975 {
2976 int i;
2977
2978 ice_for_each_q_vector(vsi, i) {
2979 struct ice_q_vector *q_vector = vsi->q_vectors[i];
2980
2981 coalesce[i].itr_tx = q_vector->tx.itr_settings;
2982 coalesce[i].itr_rx = q_vector->rx.itr_settings;
2983 coalesce[i].intrl = q_vector->intrl;
2984
2985 if (i < vsi->num_txq)
2986 coalesce[i].tx_valid = true;
2987 if (i < vsi->num_rxq)
2988 coalesce[i].rx_valid = true;
2989 }
2990
2991 return vsi->num_q_vectors;
2992 }
2993
2994 /**
2995 * ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
2996 * @vsi: VSI connected with q_vectors
2997 * @coalesce: pointer to array of struct with stored coalesce
2998 * @size: size of coalesce array
2999 *
3000 * Before this function, ice_vsi_rebuild_get_coalesce should be called to save
3001 * ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
3002 * to default value.
3003 */
3004 static void
ice_vsi_rebuild_set_coalesce(struct ice_vsi * vsi,struct ice_coalesce_stored * coalesce,int size)3005 ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
3006 struct ice_coalesce_stored *coalesce, int size)
3007 {
3008 struct ice_ring_container *rc;
3009 int i;
3010
3011 if ((size && !coalesce) || !vsi)
3012 return;
3013
3014 /* There are a couple of cases that have to be handled here:
3015 * 1. The case where the number of queue vectors stays the same, but
3016 * the number of Tx or Rx rings changes (the first for loop)
3017 * 2. The case where the number of queue vectors increased (the
3018 * second for loop)
3019 */
3020 for (i = 0; i < size && i < vsi->num_q_vectors; i++) {
3021 /* There are 2 cases to handle here and they are the same for
3022 * both Tx and Rx:
3023 * if the entry was valid previously (coalesce[i].[tr]x_valid
3024 * and the loop variable is less than the number of rings
3025 * allocated, then write the previous values
3026 *
3027 * if the entry was not valid previously, but the number of
3028 * rings is less than are allocated (this means the number of
3029 * rings increased from previously), then write out the
3030 * values in the first element
3031 *
3032 * Also, always write the ITR, even if in ITR_IS_DYNAMIC
3033 * as there is no harm because the dynamic algorithm
3034 * will just overwrite.
3035 */
3036 if (i < vsi->alloc_rxq && coalesce[i].rx_valid) {
3037 rc = &vsi->q_vectors[i]->rx;
3038 rc->itr_settings = coalesce[i].itr_rx;
3039 ice_write_itr(rc, rc->itr_setting);
3040 } else if (i < vsi->alloc_rxq) {
3041 rc = &vsi->q_vectors[i]->rx;
3042 rc->itr_settings = coalesce[0].itr_rx;
3043 ice_write_itr(rc, rc->itr_setting);
3044 }
3045
3046 if (i < vsi->alloc_txq && coalesce[i].tx_valid) {
3047 rc = &vsi->q_vectors[i]->tx;
3048 rc->itr_settings = coalesce[i].itr_tx;
3049 ice_write_itr(rc, rc->itr_setting);
3050 } else if (i < vsi->alloc_txq) {
3051 rc = &vsi->q_vectors[i]->tx;
3052 rc->itr_settings = coalesce[0].itr_tx;
3053 ice_write_itr(rc, rc->itr_setting);
3054 }
3055
3056 vsi->q_vectors[i]->intrl = coalesce[i].intrl;
3057 ice_set_q_vector_intrl(vsi->q_vectors[i]);
3058 }
3059
3060 /* the number of queue vectors increased so write whatever is in
3061 * the first element
3062 */
3063 for (; i < vsi->num_q_vectors; i++) {
3064 /* transmit */
3065 rc = &vsi->q_vectors[i]->tx;
3066 rc->itr_settings = coalesce[0].itr_tx;
3067 ice_write_itr(rc, rc->itr_setting);
3068
3069 /* receive */
3070 rc = &vsi->q_vectors[i]->rx;
3071 rc->itr_settings = coalesce[0].itr_rx;
3072 ice_write_itr(rc, rc->itr_setting);
3073
3074 vsi->q_vectors[i]->intrl = coalesce[0].intrl;
3075 ice_set_q_vector_intrl(vsi->q_vectors[i]);
3076 }
3077 }
3078
3079 /**
3080 * ice_vsi_realloc_stat_arrays - Frees unused stat structures or alloc new ones
3081 * @vsi: VSI pointer
3082 */
3083 static int
ice_vsi_realloc_stat_arrays(struct ice_vsi * vsi)3084 ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi)
3085 {
3086 u16 req_txq = vsi->req_txq ? vsi->req_txq : vsi->alloc_txq;
3087 u16 req_rxq = vsi->req_rxq ? vsi->req_rxq : vsi->alloc_rxq;
3088 struct ice_ring_stats **tx_ring_stats;
3089 struct ice_ring_stats **rx_ring_stats;
3090 struct ice_vsi_stats *vsi_stat;
3091 struct ice_pf *pf = vsi->back;
3092 u16 prev_txq = vsi->alloc_txq;
3093 u16 prev_rxq = vsi->alloc_rxq;
3094 int i;
3095
3096 vsi_stat = pf->vsi_stats[vsi->idx];
3097
3098 if (req_txq < prev_txq) {
3099 for (i = req_txq; i < prev_txq; i++) {
3100 if (vsi_stat->tx_ring_stats[i]) {
3101 kfree_rcu(vsi_stat->tx_ring_stats[i], rcu);
3102 WRITE_ONCE(vsi_stat->tx_ring_stats[i], NULL);
3103 }
3104 }
3105 }
3106
3107 tx_ring_stats = vsi_stat->tx_ring_stats;
3108 vsi_stat->tx_ring_stats =
3109 krealloc_array(vsi_stat->tx_ring_stats, req_txq,
3110 sizeof(*vsi_stat->tx_ring_stats),
3111 GFP_KERNEL | __GFP_ZERO);
3112 if (!vsi_stat->tx_ring_stats) {
3113 vsi_stat->tx_ring_stats = tx_ring_stats;
3114 return -ENOMEM;
3115 }
3116
3117 if (req_rxq < prev_rxq) {
3118 for (i = req_rxq; i < prev_rxq; i++) {
3119 if (vsi_stat->rx_ring_stats[i]) {
3120 kfree_rcu(vsi_stat->rx_ring_stats[i], rcu);
3121 WRITE_ONCE(vsi_stat->rx_ring_stats[i], NULL);
3122 }
3123 }
3124 }
3125
3126 rx_ring_stats = vsi_stat->rx_ring_stats;
3127 vsi_stat->rx_ring_stats =
3128 krealloc_array(vsi_stat->rx_ring_stats, req_rxq,
3129 sizeof(*vsi_stat->rx_ring_stats),
3130 GFP_KERNEL | __GFP_ZERO);
3131 if (!vsi_stat->rx_ring_stats) {
3132 vsi_stat->rx_ring_stats = rx_ring_stats;
3133 return -ENOMEM;
3134 }
3135
3136 return 0;
3137 }
3138
3139 /**
3140 * ice_vsi_rebuild - Rebuild VSI after reset
3141 * @vsi: VSI to be rebuild
3142 * @vsi_flags: flags used for VSI rebuild flow
3143 *
3144 * Set vsi_flags to ICE_VSI_FLAG_INIT to initialize a new VSI, or
3145 * ICE_VSI_FLAG_NO_INIT to rebuild an existing VSI in hardware.
3146 *
3147 * Returns 0 on success and negative value on failure
3148 */
ice_vsi_rebuild(struct ice_vsi * vsi,u32 vsi_flags)3149 int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
3150 {
3151 struct ice_vsi_cfg_params params = {};
3152 struct ice_coalesce_stored *coalesce;
3153 int prev_num_q_vectors;
3154 struct ice_pf *pf;
3155 int ret;
3156
3157 if (!vsi)
3158 return -EINVAL;
3159
3160 params = ice_vsi_to_params(vsi);
3161 params.flags = vsi_flags;
3162
3163 pf = vsi->back;
3164 if (WARN_ON(vsi->type == ICE_VSI_VF && !vsi->vf))
3165 return -EINVAL;
3166
3167 ret = ice_vsi_realloc_stat_arrays(vsi);
3168 if (ret)
3169 goto err_vsi_cfg;
3170
3171 ice_vsi_decfg(vsi);
3172 ret = ice_vsi_cfg_def(vsi, ¶ms);
3173 if (ret)
3174 goto err_vsi_cfg;
3175
3176 coalesce = kcalloc(vsi->num_q_vectors,
3177 sizeof(struct ice_coalesce_stored), GFP_KERNEL);
3178 if (!coalesce)
3179 return -ENOMEM;
3180
3181 prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
3182
3183 ret = ice_vsi_cfg_tc_lan(pf, vsi);
3184 if (ret) {
3185 if (vsi_flags & ICE_VSI_FLAG_INIT) {
3186 ret = -EIO;
3187 goto err_vsi_cfg_tc_lan;
3188 }
3189
3190 kfree(coalesce);
3191 return ice_schedule_reset(pf, ICE_RESET_PFR);
3192 }
3193
3194 ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
3195 kfree(coalesce);
3196
3197 return 0;
3198
3199 err_vsi_cfg_tc_lan:
3200 ice_vsi_decfg(vsi);
3201 kfree(coalesce);
3202 err_vsi_cfg:
3203 return ret;
3204 }
3205
3206 /**
3207 * ice_is_reset_in_progress - check for a reset in progress
3208 * @state: PF state field
3209 */
ice_is_reset_in_progress(unsigned long * state)3210 bool ice_is_reset_in_progress(unsigned long *state)
3211 {
3212 return test_bit(ICE_RESET_OICR_RECV, state) ||
3213 test_bit(ICE_PFR_REQ, state) ||
3214 test_bit(ICE_CORER_REQ, state) ||
3215 test_bit(ICE_GLOBR_REQ, state);
3216 }
3217
3218 /**
3219 * ice_wait_for_reset - Wait for driver to finish reset and rebuild
3220 * @pf: pointer to the PF structure
3221 * @timeout: length of time to wait, in jiffies
3222 *
3223 * Wait (sleep) for a short time until the driver finishes cleaning up from
3224 * a device reset. The caller must be able to sleep. Use this to delay
3225 * operations that could fail while the driver is cleaning up after a device
3226 * reset.
3227 *
3228 * Returns 0 on success, -EBUSY if the reset is not finished within the
3229 * timeout, and -ERESTARTSYS if the thread was interrupted.
3230 */
ice_wait_for_reset(struct ice_pf * pf,unsigned long timeout)3231 int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
3232 {
3233 long ret;
3234
3235 ret = wait_event_interruptible_timeout(pf->reset_wait_queue,
3236 !ice_is_reset_in_progress(pf->state),
3237 timeout);
3238 if (ret < 0)
3239 return ret;
3240 else if (!ret)
3241 return -EBUSY;
3242 else
3243 return 0;
3244 }
3245
3246 /**
3247 * ice_vsi_update_q_map - update our copy of the VSI info with new queue map
3248 * @vsi: VSI being configured
3249 * @ctx: the context buffer returned from AQ VSI update command
3250 */
ice_vsi_update_q_map(struct ice_vsi * vsi,struct ice_vsi_ctx * ctx)3251 static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
3252 {
3253 vsi->info.mapping_flags = ctx->info.mapping_flags;
3254 memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
3255 sizeof(vsi->info.q_mapping));
3256 memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
3257 sizeof(vsi->info.tc_mapping));
3258 }
3259
3260 /**
3261 * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
3262 * @vsi: the VSI being configured
3263 * @ena_tc: TC map to be enabled
3264 */
ice_vsi_cfg_netdev_tc(struct ice_vsi * vsi,u8 ena_tc)3265 void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
3266 {
3267 struct net_device *netdev = vsi->netdev;
3268 struct ice_pf *pf = vsi->back;
3269 int numtc = vsi->tc_cfg.numtc;
3270 struct ice_dcbx_cfg *dcbcfg;
3271 u8 netdev_tc;
3272 int i;
3273
3274 if (!netdev)
3275 return;
3276
3277 /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
3278 if (vsi->type == ICE_VSI_CHNL)
3279 return;
3280
3281 if (!ena_tc) {
3282 netdev_reset_tc(netdev);
3283 return;
3284 }
3285
3286 if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
3287 numtc = vsi->all_numtc;
3288
3289 if (netdev_set_num_tc(netdev, numtc))
3290 return;
3291
3292 dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
3293
3294 ice_for_each_traffic_class(i)
3295 if (vsi->tc_cfg.ena_tc & BIT(i))
3296 netdev_set_tc_queue(netdev,
3297 vsi->tc_cfg.tc_info[i].netdev_tc,
3298 vsi->tc_cfg.tc_info[i].qcount_tx,
3299 vsi->tc_cfg.tc_info[i].qoffset);
3300 /* setup TC queue map for CHNL TCs */
3301 ice_for_each_chnl_tc(i) {
3302 if (!(vsi->all_enatc & BIT(i)))
3303 break;
3304 if (!vsi->mqprio_qopt.qopt.count[i])
3305 break;
3306 netdev_set_tc_queue(netdev, i,
3307 vsi->mqprio_qopt.qopt.count[i],
3308 vsi->mqprio_qopt.qopt.offset[i]);
3309 }
3310
3311 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3312 return;
3313
3314 for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
3315 u8 ets_tc = dcbcfg->etscfg.prio_table[i];
3316
3317 /* Get the mapped netdev TC# for the UP */
3318 netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
3319 netdev_set_prio_tc_map(netdev, i, netdev_tc);
3320 }
3321 }
3322
3323 /**
3324 * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
3325 * @vsi: the VSI being configured,
3326 * @ctxt: VSI context structure
3327 * @ena_tc: number of traffic classes to enable
3328 *
3329 * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
3330 */
3331 static int
ice_vsi_setup_q_map_mqprio(struct ice_vsi * vsi,struct ice_vsi_ctx * ctxt,u8 ena_tc)3332 ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
3333 u8 ena_tc)
3334 {
3335 u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
3336 u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
3337 int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
3338 u16 new_txq, new_rxq;
3339 u8 netdev_tc = 0;
3340 int i;
3341
3342 vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
3343
3344 pow = order_base_2(tc0_qcount);
3345 qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
3346 ICE_AQ_VSI_TC_Q_OFFSET_M) |
3347 ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M);
3348
3349 ice_for_each_traffic_class(i) {
3350 if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
3351 /* TC is not enabled */
3352 vsi->tc_cfg.tc_info[i].qoffset = 0;
3353 vsi->tc_cfg.tc_info[i].qcount_rx = 1;
3354 vsi->tc_cfg.tc_info[i].qcount_tx = 1;
3355 vsi->tc_cfg.tc_info[i].netdev_tc = 0;
3356 ctxt->info.tc_mapping[i] = 0;
3357 continue;
3358 }
3359
3360 offset = vsi->mqprio_qopt.qopt.offset[i];
3361 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3362 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3363 vsi->tc_cfg.tc_info[i].qoffset = offset;
3364 vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
3365 vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
3366 vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
3367 }
3368
3369 if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
3370 ice_for_each_chnl_tc(i) {
3371 if (!(vsi->all_enatc & BIT(i)))
3372 continue;
3373 offset = vsi->mqprio_qopt.qopt.offset[i];
3374 qcount_rx = vsi->mqprio_qopt.qopt.count[i];
3375 qcount_tx = vsi->mqprio_qopt.qopt.count[i];
3376 }
3377 }
3378
3379 new_txq = offset + qcount_tx;
3380 if (new_txq > vsi->alloc_txq) {
3381 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n",
3382 new_txq, vsi->alloc_txq);
3383 return -EINVAL;
3384 }
3385
3386 new_rxq = offset + qcount_rx;
3387 if (new_rxq > vsi->alloc_rxq) {
3388 dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n",
3389 new_rxq, vsi->alloc_rxq);
3390 return -EINVAL;
3391 }
3392
3393 /* Set actual Tx/Rx queue pairs */
3394 vsi->num_txq = new_txq;
3395 vsi->num_rxq = new_rxq;
3396
3397 /* Setup queue TC[0].qmap for given VSI context */
3398 ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
3399 ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
3400 ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
3401
3402 /* Find queue count available for channel VSIs and starting offset
3403 * for channel VSIs
3404 */
3405 if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
3406 vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
3407 vsi->next_base_q = tc0_qcount;
3408 }
3409 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq);
3410 dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
3411 dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
3412 vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
3413
3414 return 0;
3415 }
3416
3417 /**
3418 * ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
3419 * @vsi: VSI to be configured
3420 * @ena_tc: TC bitmap
3421 *
3422 * VSI queues expected to be quiesced before calling this function
3423 */
ice_vsi_cfg_tc(struct ice_vsi * vsi,u8 ena_tc)3424 int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
3425 {
3426 u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
3427 struct ice_pf *pf = vsi->back;
3428 struct ice_tc_cfg old_tc_cfg;
3429 struct ice_vsi_ctx *ctx;
3430 struct device *dev;
3431 int i, ret = 0;
3432 u8 num_tc = 0;
3433
3434 dev = ice_pf_to_dev(pf);
3435 if (vsi->tc_cfg.ena_tc == ena_tc &&
3436 vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
3437 return 0;
3438
3439 ice_for_each_traffic_class(i) {
3440 /* build bitmap of enabled TCs */
3441 if (ena_tc & BIT(i))
3442 num_tc++;
3443 /* populate max_txqs per TC */
3444 max_txqs[i] = vsi->alloc_txq;
3445 /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are
3446 * zero for CHNL VSI, hence use num_txq instead as max_txqs
3447 */
3448 if (vsi->type == ICE_VSI_CHNL &&
3449 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3450 max_txqs[i] = vsi->num_txq;
3451 }
3452
3453 memcpy(&old_tc_cfg, &vsi->tc_cfg, sizeof(old_tc_cfg));
3454 vsi->tc_cfg.ena_tc = ena_tc;
3455 vsi->tc_cfg.numtc = num_tc;
3456
3457 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
3458 if (!ctx)
3459 return -ENOMEM;
3460
3461 ctx->vf_num = 0;
3462 ctx->info = vsi->info;
3463
3464 if (vsi->type == ICE_VSI_PF &&
3465 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3466 ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
3467 else
3468 ret = ice_vsi_setup_q_map(vsi, ctx);
3469
3470 if (ret) {
3471 memcpy(&vsi->tc_cfg, &old_tc_cfg, sizeof(vsi->tc_cfg));
3472 goto out;
3473 }
3474
3475 /* must to indicate which section of VSI context are being modified */
3476 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
3477 ret = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
3478 if (ret) {
3479 dev_info(dev, "Failed VSI Update\n");
3480 goto out;
3481 }
3482
3483 if (vsi->type == ICE_VSI_PF &&
3484 test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
3485 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
3486 else
3487 ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
3488 vsi->tc_cfg.ena_tc, max_txqs);
3489
3490 if (ret) {
3491 dev_err(dev, "VSI %d failed TC config, error %d\n",
3492 vsi->vsi_num, ret);
3493 goto out;
3494 }
3495 ice_vsi_update_q_map(vsi, ctx);
3496 vsi->info.valid_sections = 0;
3497
3498 ice_vsi_cfg_netdev_tc(vsi, ena_tc);
3499 out:
3500 kfree(ctx);
3501 return ret;
3502 }
3503
3504 /**
3505 * ice_update_ring_stats - Update ring statistics
3506 * @stats: stats to be updated
3507 * @pkts: number of processed packets
3508 * @bytes: number of processed bytes
3509 *
3510 * This function assumes that caller has acquired a u64_stats_sync lock.
3511 */
ice_update_ring_stats(struct ice_q_stats * stats,u64 pkts,u64 bytes)3512 static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
3513 {
3514 stats->bytes += bytes;
3515 stats->pkts += pkts;
3516 }
3517
3518 /**
3519 * ice_update_tx_ring_stats - Update Tx ring specific counters
3520 * @tx_ring: ring to update
3521 * @pkts: number of processed packets
3522 * @bytes: number of processed bytes
3523 */
ice_update_tx_ring_stats(struct ice_tx_ring * tx_ring,u64 pkts,u64 bytes)3524 void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
3525 {
3526 u64_stats_update_begin(&tx_ring->ring_stats->syncp);
3527 ice_update_ring_stats(&tx_ring->ring_stats->stats, pkts, bytes);
3528 u64_stats_update_end(&tx_ring->ring_stats->syncp);
3529 }
3530
3531 /**
3532 * ice_update_rx_ring_stats - Update Rx ring specific counters
3533 * @rx_ring: ring to update
3534 * @pkts: number of processed packets
3535 * @bytes: number of processed bytes
3536 */
ice_update_rx_ring_stats(struct ice_rx_ring * rx_ring,u64 pkts,u64 bytes)3537 void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
3538 {
3539 u64_stats_update_begin(&rx_ring->ring_stats->syncp);
3540 ice_update_ring_stats(&rx_ring->ring_stats->stats, pkts, bytes);
3541 u64_stats_update_end(&rx_ring->ring_stats->syncp);
3542 }
3543
3544 /**
3545 * ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
3546 * @pi: port info of the switch with default VSI
3547 *
3548 * Return true if the there is a single VSI in default forwarding VSI list
3549 */
ice_is_dflt_vsi_in_use(struct ice_port_info * pi)3550 bool ice_is_dflt_vsi_in_use(struct ice_port_info *pi)
3551 {
3552 bool exists = false;
3553
3554 ice_check_if_dflt_vsi(pi, 0, &exists);
3555 return exists;
3556 }
3557
3558 /**
3559 * ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
3560 * @vsi: VSI to compare against default forwarding VSI
3561 *
3562 * If this VSI passed in is the default forwarding VSI then return true, else
3563 * return false
3564 */
ice_is_vsi_dflt_vsi(struct ice_vsi * vsi)3565 bool ice_is_vsi_dflt_vsi(struct ice_vsi *vsi)
3566 {
3567 return ice_check_if_dflt_vsi(vsi->port_info, vsi->idx, NULL);
3568 }
3569
3570 /**
3571 * ice_set_dflt_vsi - set the default forwarding VSI
3572 * @vsi: VSI getting set as the default forwarding VSI on the switch
3573 *
3574 * If the VSI passed in is already the default VSI and it's enabled just return
3575 * success.
3576 *
3577 * Otherwise try to set the VSI passed in as the switch's default VSI and
3578 * return the result.
3579 */
ice_set_dflt_vsi(struct ice_vsi * vsi)3580 int ice_set_dflt_vsi(struct ice_vsi *vsi)
3581 {
3582 struct device *dev;
3583 int status;
3584
3585 if (!vsi)
3586 return -EINVAL;
3587
3588 dev = ice_pf_to_dev(vsi->back);
3589
3590 if (ice_lag_is_switchdev_running(vsi->back)) {
3591 dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n",
3592 vsi->vsi_num);
3593 return 0;
3594 }
3595
3596 /* the VSI passed in is already the default VSI */
3597 if (ice_is_vsi_dflt_vsi(vsi)) {
3598 dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
3599 vsi->vsi_num);
3600 return 0;
3601 }
3602
3603 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, true, ICE_FLTR_RX);
3604 if (status) {
3605 dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
3606 vsi->vsi_num, status);
3607 return status;
3608 }
3609
3610 return 0;
3611 }
3612
3613 /**
3614 * ice_clear_dflt_vsi - clear the default forwarding VSI
3615 * @vsi: VSI to remove from filter list
3616 *
3617 * If the switch has no default VSI or it's not enabled then return error.
3618 *
3619 * Otherwise try to clear the default VSI and return the result.
3620 */
ice_clear_dflt_vsi(struct ice_vsi * vsi)3621 int ice_clear_dflt_vsi(struct ice_vsi *vsi)
3622 {
3623 struct device *dev;
3624 int status;
3625
3626 if (!vsi)
3627 return -EINVAL;
3628
3629 dev = ice_pf_to_dev(vsi->back);
3630
3631 /* there is no default VSI configured */
3632 if (!ice_is_dflt_vsi_in_use(vsi->port_info))
3633 return -ENODEV;
3634
3635 status = ice_cfg_dflt_vsi(vsi->port_info, vsi->idx, false,
3636 ICE_FLTR_RX);
3637 if (status) {
3638 dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
3639 vsi->vsi_num, status);
3640 return -EIO;
3641 }
3642
3643 return 0;
3644 }
3645
3646 /**
3647 * ice_get_link_speed_mbps - get link speed in Mbps
3648 * @vsi: the VSI whose link speed is being queried
3649 *
3650 * Return current VSI link speed and 0 if the speed is unknown.
3651 */
ice_get_link_speed_mbps(struct ice_vsi * vsi)3652 int ice_get_link_speed_mbps(struct ice_vsi *vsi)
3653 {
3654 unsigned int link_speed;
3655
3656 link_speed = vsi->port_info->phy.link_info.link_speed;
3657
3658 return (int)ice_get_link_speed(fls(link_speed) - 1);
3659 }
3660
3661 /**
3662 * ice_get_link_speed_kbps - get link speed in Kbps
3663 * @vsi: the VSI whose link speed is being queried
3664 *
3665 * Return current VSI link speed and 0 if the speed is unknown.
3666 */
ice_get_link_speed_kbps(struct ice_vsi * vsi)3667 int ice_get_link_speed_kbps(struct ice_vsi *vsi)
3668 {
3669 int speed_mbps;
3670
3671 speed_mbps = ice_get_link_speed_mbps(vsi);
3672
3673 return speed_mbps * 1000;
3674 }
3675
3676 /**
3677 * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
3678 * @vsi: VSI to be configured
3679 * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
3680 *
3681 * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit
3682 * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
3683 * on TC 0.
3684 */
ice_set_min_bw_limit(struct ice_vsi * vsi,u64 min_tx_rate)3685 int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
3686 {
3687 struct ice_pf *pf = vsi->back;
3688 struct device *dev;
3689 int status;
3690 int speed;
3691
3692 dev = ice_pf_to_dev(pf);
3693 if (!vsi->port_info) {
3694 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3695 vsi->idx, vsi->type);
3696 return -EINVAL;
3697 }
3698
3699 speed = ice_get_link_speed_kbps(vsi);
3700 if (min_tx_rate > (u64)speed) {
3701 dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3702 min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3703 speed);
3704 return -EINVAL;
3705 }
3706
3707 /* Configure min BW for VSI limit */
3708 if (min_tx_rate) {
3709 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3710 ICE_MIN_BW, min_tx_rate);
3711 if (status) {
3712 dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
3713 min_tx_rate, ice_vsi_type_str(vsi->type),
3714 vsi->idx);
3715 return status;
3716 }
3717
3718 dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
3719 min_tx_rate, ice_vsi_type_str(vsi->type));
3720 } else {
3721 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3722 vsi->idx, 0,
3723 ICE_MIN_BW);
3724 if (status) {
3725 dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
3726 ice_vsi_type_str(vsi->type), vsi->idx);
3727 return status;
3728 }
3729
3730 dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
3731 ice_vsi_type_str(vsi->type), vsi->idx);
3732 }
3733
3734 return 0;
3735 }
3736
3737 /**
3738 * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
3739 * @vsi: VSI to be configured
3740 * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
3741 *
3742 * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit
3743 * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
3744 * on TC 0.
3745 */
ice_set_max_bw_limit(struct ice_vsi * vsi,u64 max_tx_rate)3746 int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
3747 {
3748 struct ice_pf *pf = vsi->back;
3749 struct device *dev;
3750 int status;
3751 int speed;
3752
3753 dev = ice_pf_to_dev(pf);
3754 if (!vsi->port_info) {
3755 dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
3756 vsi->idx, vsi->type);
3757 return -EINVAL;
3758 }
3759
3760 speed = ice_get_link_speed_kbps(vsi);
3761 if (max_tx_rate > (u64)speed) {
3762 dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
3763 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
3764 speed);
3765 return -EINVAL;
3766 }
3767
3768 /* Configure max BW for VSI limit */
3769 if (max_tx_rate) {
3770 status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
3771 ICE_MAX_BW, max_tx_rate);
3772 if (status) {
3773 dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
3774 max_tx_rate, ice_vsi_type_str(vsi->type),
3775 vsi->idx);
3776 return status;
3777 }
3778
3779 dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
3780 max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
3781 } else {
3782 status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
3783 vsi->idx, 0,
3784 ICE_MAX_BW);
3785 if (status) {
3786 dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
3787 ice_vsi_type_str(vsi->type), vsi->idx);
3788 return status;
3789 }
3790
3791 dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
3792 ice_vsi_type_str(vsi->type), vsi->idx);
3793 }
3794
3795 return 0;
3796 }
3797
3798 /**
3799 * ice_set_link - turn on/off physical link
3800 * @vsi: VSI to modify physical link on
3801 * @ena: turn on/off physical link
3802 */
ice_set_link(struct ice_vsi * vsi,bool ena)3803 int ice_set_link(struct ice_vsi *vsi, bool ena)
3804 {
3805 struct device *dev = ice_pf_to_dev(vsi->back);
3806 struct ice_port_info *pi = vsi->port_info;
3807 struct ice_hw *hw = pi->hw;
3808 int status;
3809
3810 if (vsi->type != ICE_VSI_PF)
3811 return -EINVAL;
3812
3813 status = ice_aq_set_link_restart_an(pi, ena, NULL);
3814
3815 /* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
3816 * this is not a fatal error, so print a warning message and return
3817 * a success code. Return an error if FW returns an error code other
3818 * than ICE_AQ_RC_EMODE
3819 */
3820 if (status == -EIO) {
3821 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
3822 dev_dbg(dev, "can't set link to %s, err %d aq_err %s. not fatal, continuing\n",
3823 (ena ? "ON" : "OFF"), status,
3824 ice_aq_str(hw->adminq.sq_last_status));
3825 } else if (status) {
3826 dev_err(dev, "can't set link to %s, err %d aq_err %s\n",
3827 (ena ? "ON" : "OFF"), status,
3828 ice_aq_str(hw->adminq.sq_last_status));
3829 return status;
3830 }
3831
3832 return 0;
3833 }
3834
3835 /**
3836 * ice_vsi_add_vlan_zero - add VLAN 0 filter(s) for this VSI
3837 * @vsi: VSI used to add VLAN filters
3838 *
3839 * In Single VLAN Mode (SVM), single VLAN filters via ICE_SW_LKUP_VLAN are based
3840 * on the inner VLAN ID, so the VLAN TPID (i.e. 0x8100 or 0x888a8) doesn't
3841 * matter. In Double VLAN Mode (DVM), outer/single VLAN filters via
3842 * ICE_SW_LKUP_VLAN are based on the outer/single VLAN ID + VLAN TPID.
3843 *
3844 * For both modes add a VLAN 0 + no VLAN TPID filter to handle untagged traffic
3845 * when VLAN pruning is enabled. Also, this handles VLAN 0 priority tagged
3846 * traffic in SVM, since the VLAN TPID isn't part of filtering.
3847 *
3848 * If DVM is enabled then an explicit VLAN 0 + VLAN TPID filter needs to be
3849 * added to allow VLAN 0 priority tagged traffic in DVM, since the VLAN TPID is
3850 * part of filtering.
3851 */
ice_vsi_add_vlan_zero(struct ice_vsi * vsi)3852 int ice_vsi_add_vlan_zero(struct ice_vsi *vsi)
3853 {
3854 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3855 struct ice_vlan vlan;
3856 int err;
3857
3858 vlan = ICE_VLAN(0, 0, 0);
3859 err = vlan_ops->add_vlan(vsi, &vlan);
3860 if (err && err != -EEXIST)
3861 return err;
3862
3863 /* in SVM both VLAN 0 filters are identical */
3864 if (!ice_is_dvm_ena(&vsi->back->hw))
3865 return 0;
3866
3867 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3868 err = vlan_ops->add_vlan(vsi, &vlan);
3869 if (err && err != -EEXIST)
3870 return err;
3871
3872 return 0;
3873 }
3874
3875 /**
3876 * ice_vsi_del_vlan_zero - delete VLAN 0 filter(s) for this VSI
3877 * @vsi: VSI used to add VLAN filters
3878 *
3879 * Delete the VLAN 0 filters in the same manner that they were added in
3880 * ice_vsi_add_vlan_zero.
3881 */
ice_vsi_del_vlan_zero(struct ice_vsi * vsi)3882 int ice_vsi_del_vlan_zero(struct ice_vsi *vsi)
3883 {
3884 struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
3885 struct ice_vlan vlan;
3886 int err;
3887
3888 vlan = ICE_VLAN(0, 0, 0);
3889 err = vlan_ops->del_vlan(vsi, &vlan);
3890 if (err && err != -EEXIST)
3891 return err;
3892
3893 /* in SVM both VLAN 0 filters are identical */
3894 if (!ice_is_dvm_ena(&vsi->back->hw))
3895 return 0;
3896
3897 vlan = ICE_VLAN(ETH_P_8021Q, 0, 0);
3898 err = vlan_ops->del_vlan(vsi, &vlan);
3899 if (err && err != -EEXIST)
3900 return err;
3901
3902 /* when deleting the last VLAN filter, make sure to disable the VLAN
3903 * promisc mode so the filter isn't left by accident
3904 */
3905 return ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx,
3906 ICE_MCAST_VLAN_PROMISC_BITS, 0);
3907 }
3908
3909 /**
3910 * ice_vsi_num_zero_vlans - get number of VLAN 0 filters based on VLAN mode
3911 * @vsi: VSI used to get the VLAN mode
3912 *
3913 * If DVM is enabled then 2 VLAN 0 filters are added, else if SVM is enabled
3914 * then 1 VLAN 0 filter is added. See ice_vsi_add_vlan_zero for more details.
3915 */
ice_vsi_num_zero_vlans(struct ice_vsi * vsi)3916 static u16 ice_vsi_num_zero_vlans(struct ice_vsi *vsi)
3917 {
3918 #define ICE_DVM_NUM_ZERO_VLAN_FLTRS 2
3919 #define ICE_SVM_NUM_ZERO_VLAN_FLTRS 1
3920 /* no VLAN 0 filter is created when a port VLAN is active */
3921 if (vsi->type == ICE_VSI_VF) {
3922 if (WARN_ON(!vsi->vf))
3923 return 0;
3924
3925 if (ice_vf_is_port_vlan_ena(vsi->vf))
3926 return 0;
3927 }
3928
3929 if (ice_is_dvm_ena(&vsi->back->hw))
3930 return ICE_DVM_NUM_ZERO_VLAN_FLTRS;
3931 else
3932 return ICE_SVM_NUM_ZERO_VLAN_FLTRS;
3933 }
3934
3935 /**
3936 * ice_vsi_has_non_zero_vlans - check if VSI has any non-zero VLANs
3937 * @vsi: VSI used to determine if any non-zero VLANs have been added
3938 */
ice_vsi_has_non_zero_vlans(struct ice_vsi * vsi)3939 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi)
3940 {
3941 return (vsi->num_vlan > ice_vsi_num_zero_vlans(vsi));
3942 }
3943
3944 /**
3945 * ice_vsi_num_non_zero_vlans - get the number of non-zero VLANs for this VSI
3946 * @vsi: VSI used to get the number of non-zero VLANs added
3947 */
ice_vsi_num_non_zero_vlans(struct ice_vsi * vsi)3948 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi)
3949 {
3950 return (vsi->num_vlan - ice_vsi_num_zero_vlans(vsi));
3951 }
3952
3953 /**
3954 * ice_is_feature_supported
3955 * @pf: pointer to the struct ice_pf instance
3956 * @f: feature enum to be checked
3957 *
3958 * returns true if feature is supported, false otherwise
3959 */
ice_is_feature_supported(struct ice_pf * pf,enum ice_feature f)3960 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
3961 {
3962 if (f < 0 || f >= ICE_F_MAX)
3963 return false;
3964
3965 return test_bit(f, pf->features);
3966 }
3967
3968 /**
3969 * ice_set_feature_support
3970 * @pf: pointer to the struct ice_pf instance
3971 * @f: feature enum to set
3972 */
ice_set_feature_support(struct ice_pf * pf,enum ice_feature f)3973 void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
3974 {
3975 if (f < 0 || f >= ICE_F_MAX)
3976 return;
3977
3978 set_bit(f, pf->features);
3979 }
3980
3981 /**
3982 * ice_clear_feature_support
3983 * @pf: pointer to the struct ice_pf instance
3984 * @f: feature enum to clear
3985 */
ice_clear_feature_support(struct ice_pf * pf,enum ice_feature f)3986 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
3987 {
3988 if (f < 0 || f >= ICE_F_MAX)
3989 return;
3990
3991 clear_bit(f, pf->features);
3992 }
3993
3994 /**
3995 * ice_init_feature_support
3996 * @pf: pointer to the struct ice_pf instance
3997 *
3998 * called during init to setup supported feature
3999 */
ice_init_feature_support(struct ice_pf * pf)4000 void ice_init_feature_support(struct ice_pf *pf)
4001 {
4002 switch (pf->hw.device_id) {
4003 case ICE_DEV_ID_E810C_BACKPLANE:
4004 case ICE_DEV_ID_E810C_QSFP:
4005 case ICE_DEV_ID_E810C_SFP:
4006 ice_set_feature_support(pf, ICE_F_DSCP);
4007 ice_set_feature_support(pf, ICE_F_PTP_EXTTS);
4008 if (ice_is_e810t(&pf->hw)) {
4009 ice_set_feature_support(pf, ICE_F_SMA_CTRL);
4010 if (ice_gnss_is_gps_present(&pf->hw))
4011 ice_set_feature_support(pf, ICE_F_GNSS);
4012 }
4013 break;
4014 default:
4015 break;
4016 }
4017 }
4018
4019 /**
4020 * ice_vsi_update_security - update security block in VSI
4021 * @vsi: pointer to VSI structure
4022 * @fill: function pointer to fill ctx
4023 */
4024 int
ice_vsi_update_security(struct ice_vsi * vsi,void (* fill)(struct ice_vsi_ctx *))4025 ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
4026 {
4027 struct ice_vsi_ctx ctx = { 0 };
4028
4029 ctx.info = vsi->info;
4030 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
4031 fill(&ctx);
4032
4033 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
4034 return -ENODEV;
4035
4036 vsi->info = ctx.info;
4037 return 0;
4038 }
4039
4040 /**
4041 * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
4042 * @ctx: pointer to VSI ctx structure
4043 */
ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx * ctx)4044 void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
4045 {
4046 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
4047 (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4048 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4049 }
4050
4051 /**
4052 * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
4053 * @ctx: pointer to VSI ctx structure
4054 */
ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx * ctx)4055 void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
4056 {
4057 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
4058 ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
4059 ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
4060 }
4061
4062 /**
4063 * ice_vsi_ctx_set_allow_override - allow destination override on VSI
4064 * @ctx: pointer to VSI ctx structure
4065 */
ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx * ctx)4066 void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
4067 {
4068 ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
4069 }
4070
4071 /**
4072 * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
4073 * @ctx: pointer to VSI ctx structure
4074 */
ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx * ctx)4075 void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
4076 {
4077 ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
4078 }
4079
4080 /**
4081 * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
4082 * @vsi: pointer to VSI structure
4083 * @set: set or unset the bit
4084 */
4085 int
ice_vsi_update_local_lb(struct ice_vsi * vsi,bool set)4086 ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
4087 {
4088 struct ice_vsi_ctx ctx = {
4089 .info = vsi->info,
4090 };
4091
4092 ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
4093 if (set)
4094 ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
4095 else
4096 ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
4097
4098 if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
4099 return -ENODEV;
4100
4101 vsi->info = ctx.info;
4102 return 0;
4103 }
4104