1 /*******************************************************************************
2 
3   Intel 10 Gigabit PCI Express Linux driver
4   Copyright(c) 1999 - 2016 Intel Corporation.
5 
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9 
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14 
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21 
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 
27 *******************************************************************************/
28 
29 #include "ixgbe.h"
30 #include "ixgbe_sriov.h"
31 
32 #ifdef CONFIG_IXGBE_DCB
33 /**
34  * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV
35  * @adapter: board private structure to initialize
36  *
37  * Cache the descriptor ring offsets for SR-IOV to the assigned rings.  It
38  * will also try to cache the proper offsets if RSS/FCoE are enabled along
39  * with VMDq.
40  *
41  **/
42 static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
43 {
44 #ifdef IXGBE_FCOE
45 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
46 #endif /* IXGBE_FCOE */
47 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
48 	int i;
49 	u16 reg_idx;
50 	u8 tcs = netdev_get_num_tc(adapter->netdev);
51 
52 	/* verify we have DCB queueing enabled before proceeding */
53 	if (tcs <= 1)
54 		return false;
55 
56 	/* verify we have VMDq enabled before proceeding */
57 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
58 		return false;
59 
60 	/* start at VMDq register offset for SR-IOV enabled setups */
61 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
62 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
63 		/* If we are greater than indices move to next pool */
64 		if ((reg_idx & ~vmdq->mask) >= tcs)
65 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
66 		adapter->rx_ring[i]->reg_idx = reg_idx;
67 	}
68 
69 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
70 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
71 		/* If we are greater than indices move to next pool */
72 		if ((reg_idx & ~vmdq->mask) >= tcs)
73 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
74 		adapter->tx_ring[i]->reg_idx = reg_idx;
75 	}
76 
77 #ifdef IXGBE_FCOE
78 	/* nothing to do if FCoE is disabled */
79 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
80 		return true;
81 
82 	/* The work is already done if the FCoE ring is shared */
83 	if (fcoe->offset < tcs)
84 		return true;
85 
86 	/* The FCoE rings exist separately, we need to move their reg_idx */
87 	if (fcoe->indices) {
88 		u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask);
89 		u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter);
90 
91 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
92 		for (i = fcoe->offset; i < adapter->num_rx_queues; i++) {
93 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
94 			adapter->rx_ring[i]->reg_idx = reg_idx;
95 			reg_idx++;
96 		}
97 
98 		reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool;
99 		for (i = fcoe->offset; i < adapter->num_tx_queues; i++) {
100 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc;
101 			adapter->tx_ring[i]->reg_idx = reg_idx;
102 			reg_idx++;
103 		}
104 	}
105 
106 #endif /* IXGBE_FCOE */
107 	return true;
108 }
109 
110 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */
111 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
112 				    unsigned int *tx, unsigned int *rx)
113 {
114 	struct net_device *dev = adapter->netdev;
115 	struct ixgbe_hw *hw = &adapter->hw;
116 	u8 num_tcs = netdev_get_num_tc(dev);
117 
118 	*tx = 0;
119 	*rx = 0;
120 
121 	switch (hw->mac.type) {
122 	case ixgbe_mac_82598EB:
123 		/* TxQs/TC: 4	RxQs/TC: 8 */
124 		*tx = tc << 2; /* 0, 4,  8, 12, 16, 20, 24, 28 */
125 		*rx = tc << 3; /* 0, 8, 16, 24, 32, 40, 48, 56 */
126 		break;
127 	case ixgbe_mac_82599EB:
128 	case ixgbe_mac_X540:
129 	case ixgbe_mac_X550:
130 	case ixgbe_mac_X550EM_x:
131 	case ixgbe_mac_x550em_a:
132 		if (num_tcs > 4) {
133 			/*
134 			 * TCs    : TC0/1 TC2/3 TC4-7
135 			 * TxQs/TC:    32    16     8
136 			 * RxQs/TC:    16    16    16
137 			 */
138 			*rx = tc << 4;
139 			if (tc < 3)
140 				*tx = tc << 5;		/*   0,  32,  64 */
141 			else if (tc < 5)
142 				*tx = (tc + 2) << 4;	/*  80,  96 */
143 			else
144 				*tx = (tc + 8) << 3;	/* 104, 112, 120 */
145 		} else {
146 			/*
147 			 * TCs    : TC0 TC1 TC2/3
148 			 * TxQs/TC:  64  32    16
149 			 * RxQs/TC:  32  32    32
150 			 */
151 			*rx = tc << 5;
152 			if (tc < 2)
153 				*tx = tc << 6;		/*  0,  64 */
154 			else
155 				*tx = (tc + 4) << 4;	/* 96, 112 */
156 		}
157 	default:
158 		break;
159 	}
160 }
161 
162 /**
163  * ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
164  * @adapter: board private structure to initialize
165  *
166  * Cache the descriptor ring offsets for DCB to the assigned rings.
167  *
168  **/
169 static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
170 {
171 	struct net_device *dev = adapter->netdev;
172 	unsigned int tx_idx, rx_idx;
173 	int tc, offset, rss_i, i;
174 	u8 num_tcs = netdev_get_num_tc(dev);
175 
176 	/* verify we have DCB queueing enabled before proceeding */
177 	if (num_tcs <= 1)
178 		return false;
179 
180 	rss_i = adapter->ring_feature[RING_F_RSS].indices;
181 
182 	for (tc = 0, offset = 0; tc < num_tcs; tc++, offset += rss_i) {
183 		ixgbe_get_first_reg_idx(adapter, tc, &tx_idx, &rx_idx);
184 		for (i = 0; i < rss_i; i++, tx_idx++, rx_idx++) {
185 			adapter->tx_ring[offset + i]->reg_idx = tx_idx;
186 			adapter->rx_ring[offset + i]->reg_idx = rx_idx;
187 			adapter->tx_ring[offset + i]->dcb_tc = tc;
188 			adapter->rx_ring[offset + i]->dcb_tc = tc;
189 		}
190 	}
191 
192 	return true;
193 }
194 
195 #endif
196 /**
197  * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
198  * @adapter: board private structure to initialize
199  *
200  * SR-IOV doesn't use any descriptor rings but changes the default if
201  * no other mapping is used.
202  *
203  */
204 static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
205 {
206 #ifdef IXGBE_FCOE
207 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
208 #endif /* IXGBE_FCOE */
209 	struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ];
210 	struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS];
211 	int i;
212 	u16 reg_idx;
213 
214 	/* only proceed if VMDq is enabled */
215 	if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED))
216 		return false;
217 
218 	/* start at VMDq register offset for SR-IOV enabled setups */
219 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
220 	for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
221 #ifdef IXGBE_FCOE
222 		/* Allow first FCoE queue to be mapped as RSS */
223 		if (fcoe->offset && (i > fcoe->offset))
224 			break;
225 #endif
226 		/* If we are greater than indices move to next pool */
227 		if ((reg_idx & ~vmdq->mask) >= rss->indices)
228 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
229 		adapter->rx_ring[i]->reg_idx = reg_idx;
230 	}
231 
232 #ifdef IXGBE_FCOE
233 	/* FCoE uses a linear block of queues so just assigning 1:1 */
234 	for (; i < adapter->num_rx_queues; i++, reg_idx++)
235 		adapter->rx_ring[i]->reg_idx = reg_idx;
236 
237 #endif
238 	reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
239 	for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) {
240 #ifdef IXGBE_FCOE
241 		/* Allow first FCoE queue to be mapped as RSS */
242 		if (fcoe->offset && (i > fcoe->offset))
243 			break;
244 #endif
245 		/* If we are greater than indices move to next pool */
246 		if ((reg_idx & rss->mask) >= rss->indices)
247 			reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask);
248 		adapter->tx_ring[i]->reg_idx = reg_idx;
249 	}
250 
251 #ifdef IXGBE_FCOE
252 	/* FCoE uses a linear block of queues so just assigning 1:1 */
253 	for (; i < adapter->num_tx_queues; i++, reg_idx++)
254 		adapter->tx_ring[i]->reg_idx = reg_idx;
255 
256 #endif
257 
258 	return true;
259 }
260 
261 /**
262  * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
263  * @adapter: board private structure to initialize
264  *
265  * Cache the descriptor ring offsets for RSS to the assigned rings.
266  *
267  **/
268 static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
269 {
270 	int i, reg_idx;
271 
272 	for (i = 0; i < adapter->num_rx_queues; i++)
273 		adapter->rx_ring[i]->reg_idx = i;
274 	for (i = 0, reg_idx = 0; i < adapter->num_tx_queues; i++, reg_idx++)
275 		adapter->tx_ring[i]->reg_idx = reg_idx;
276 	for (i = 0; i < adapter->num_xdp_queues; i++, reg_idx++)
277 		adapter->xdp_ring[i]->reg_idx = reg_idx;
278 
279 	return true;
280 }
281 
282 /**
283  * ixgbe_cache_ring_register - Descriptor ring to register mapping
284  * @adapter: board private structure to initialize
285  *
286  * Once we know the feature-set enabled for the device, we'll cache
287  * the register offset the descriptor ring is assigned to.
288  *
289  * Note, the order the various feature calls is important.  It must start with
290  * the "most" features enabled at the same time, then trickle down to the
291  * least amount of features turned on at once.
292  **/
293 static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
294 {
295 	/* start with default case */
296 	adapter->rx_ring[0]->reg_idx = 0;
297 	adapter->tx_ring[0]->reg_idx = 0;
298 
299 #ifdef CONFIG_IXGBE_DCB
300 	if (ixgbe_cache_ring_dcb_sriov(adapter))
301 		return;
302 
303 	if (ixgbe_cache_ring_dcb(adapter))
304 		return;
305 
306 #endif
307 	if (ixgbe_cache_ring_sriov(adapter))
308 		return;
309 
310 	ixgbe_cache_ring_rss(adapter);
311 }
312 
313 static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
314 {
315 	return adapter->xdp_prog ? nr_cpu_ids : 0;
316 }
317 
318 #define IXGBE_RSS_64Q_MASK	0x3F
319 #define IXGBE_RSS_16Q_MASK	0xF
320 #define IXGBE_RSS_8Q_MASK	0x7
321 #define IXGBE_RSS_4Q_MASK	0x3
322 #define IXGBE_RSS_2Q_MASK	0x1
323 #define IXGBE_RSS_DISABLED_MASK	0x0
324 
325 #ifdef CONFIG_IXGBE_DCB
326 /**
327  * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB
328  * @adapter: board private structure to initialize
329  *
330  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
331  * and VM pools where appropriate.  Also assign queues based on DCB
332  * priorities and map accordingly..
333  *
334  **/
335 static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter)
336 {
337 	int i;
338 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
339 	u16 vmdq_m = 0;
340 #ifdef IXGBE_FCOE
341 	u16 fcoe_i = 0;
342 #endif
343 	u8 tcs = netdev_get_num_tc(adapter->netdev);
344 
345 	/* verify we have DCB queueing enabled before proceeding */
346 	if (tcs <= 1)
347 		return false;
348 
349 	/* verify we have VMDq enabled before proceeding */
350 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
351 		return false;
352 
353 	/* Add starting offset to total pool count */
354 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
355 
356 	/* 16 pools w/ 8 TC per pool */
357 	if (tcs > 4) {
358 		vmdq_i = min_t(u16, vmdq_i, 16);
359 		vmdq_m = IXGBE_82599_VMDQ_8Q_MASK;
360 	/* 32 pools w/ 4 TC per pool */
361 	} else {
362 		vmdq_i = min_t(u16, vmdq_i, 32);
363 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
364 	}
365 
366 #ifdef IXGBE_FCOE
367 	/* queues in the remaining pools are available for FCoE */
368 	fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i;
369 
370 #endif
371 	/* remove the starting offset from the pool count */
372 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
373 
374 	/* save features for later use */
375 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
376 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
377 
378 	/*
379 	 * We do not support DCB, VMDq, and RSS all simultaneously
380 	 * so we will disable RSS since it is the lowest priority
381 	 */
382 	adapter->ring_feature[RING_F_RSS].indices = 1;
383 	adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK;
384 
385 	/* disable ATR as it is not supported when VMDq is enabled */
386 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
387 
388 	adapter->num_rx_pools = vmdq_i;
389 	adapter->num_rx_queues_per_pool = tcs;
390 
391 	adapter->num_tx_queues = vmdq_i * tcs;
392 	adapter->num_xdp_queues = 0;
393 	adapter->num_rx_queues = vmdq_i * tcs;
394 
395 #ifdef IXGBE_FCOE
396 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
397 		struct ixgbe_ring_feature *fcoe;
398 
399 		fcoe = &adapter->ring_feature[RING_F_FCOE];
400 
401 		/* limit ourselves based on feature limits */
402 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
403 
404 		if (fcoe_i) {
405 			/* alloc queues for FCoE separately */
406 			fcoe->indices = fcoe_i;
407 			fcoe->offset = vmdq_i * tcs;
408 
409 			/* add queues to adapter */
410 			adapter->num_tx_queues += fcoe_i;
411 			adapter->num_rx_queues += fcoe_i;
412 		} else if (tcs > 1) {
413 			/* use queue belonging to FcoE TC */
414 			fcoe->indices = 1;
415 			fcoe->offset = ixgbe_fcoe_get_tc(adapter);
416 		} else {
417 			adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
418 
419 			fcoe->indices = 0;
420 			fcoe->offset = 0;
421 		}
422 	}
423 
424 #endif /* IXGBE_FCOE */
425 	/* configure TC to queue mapping */
426 	for (i = 0; i < tcs; i++)
427 		netdev_set_tc_queue(adapter->netdev, i, 1, i);
428 
429 	return true;
430 }
431 
432 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
433 {
434 	struct net_device *dev = adapter->netdev;
435 	struct ixgbe_ring_feature *f;
436 	int rss_i, rss_m, i;
437 	int tcs;
438 
439 	/* Map queue offset and counts onto allocated tx queues */
440 	tcs = netdev_get_num_tc(dev);
441 
442 	/* verify we have DCB queueing enabled before proceeding */
443 	if (tcs <= 1)
444 		return false;
445 
446 	/* determine the upper limit for our current DCB mode */
447 	rss_i = dev->num_tx_queues / tcs;
448 	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
449 		/* 8 TC w/ 4 queues per TC */
450 		rss_i = min_t(u16, rss_i, 4);
451 		rss_m = IXGBE_RSS_4Q_MASK;
452 	} else if (tcs > 4) {
453 		/* 8 TC w/ 8 queues per TC */
454 		rss_i = min_t(u16, rss_i, 8);
455 		rss_m = IXGBE_RSS_8Q_MASK;
456 	} else {
457 		/* 4 TC w/ 16 queues per TC */
458 		rss_i = min_t(u16, rss_i, 16);
459 		rss_m = IXGBE_RSS_16Q_MASK;
460 	}
461 
462 	/* set RSS mask and indices */
463 	f = &adapter->ring_feature[RING_F_RSS];
464 	rss_i = min_t(int, rss_i, f->limit);
465 	f->indices = rss_i;
466 	f->mask = rss_m;
467 
468 	/* disable ATR as it is not supported when multiple TCs are enabled */
469 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
470 
471 #ifdef IXGBE_FCOE
472 	/* FCoE enabled queues require special configuration indexed
473 	 * by feature specific indices and offset. Here we map FCoE
474 	 * indices onto the DCB queue pairs allowing FCoE to own
475 	 * configuration later.
476 	 */
477 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
478 		u8 tc = ixgbe_fcoe_get_tc(adapter);
479 
480 		f = &adapter->ring_feature[RING_F_FCOE];
481 		f->indices = min_t(u16, rss_i, f->limit);
482 		f->offset = rss_i * tc;
483 	}
484 
485 #endif /* IXGBE_FCOE */
486 	for (i = 0; i < tcs; i++)
487 		netdev_set_tc_queue(dev, i, rss_i, rss_i * i);
488 
489 	adapter->num_tx_queues = rss_i * tcs;
490 	adapter->num_xdp_queues = 0;
491 	adapter->num_rx_queues = rss_i * tcs;
492 
493 	return true;
494 }
495 
496 #endif
497 /**
498  * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices
499  * @adapter: board private structure to initialize
500  *
501  * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues
502  * and VM pools where appropriate.  If RSS is available, then also try and
503  * enable RSS and map accordingly.
504  *
505  **/
506 static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
507 {
508 	u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit;
509 	u16 vmdq_m = 0;
510 	u16 rss_i = adapter->ring_feature[RING_F_RSS].limit;
511 	u16 rss_m = IXGBE_RSS_DISABLED_MASK;
512 #ifdef IXGBE_FCOE
513 	u16 fcoe_i = 0;
514 #endif
515 	bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1);
516 
517 	/* only proceed if SR-IOV is enabled */
518 	if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
519 		return false;
520 
521 	/* Add starting offset to total pool count */
522 	vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset;
523 
524 	/* double check we are limited to maximum pools */
525 	vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i);
526 
527 	/* 64 pool mode with 2 queues per pool */
528 	if ((vmdq_i > 32) || (vmdq_i > 16 && pools)) {
529 		vmdq_m = IXGBE_82599_VMDQ_2Q_MASK;
530 		rss_m = IXGBE_RSS_2Q_MASK;
531 		rss_i = min_t(u16, rss_i, 2);
532 	/* 32 pool mode with up to 4 queues per pool */
533 	} else {
534 		vmdq_m = IXGBE_82599_VMDQ_4Q_MASK;
535 		rss_m = IXGBE_RSS_4Q_MASK;
536 		/* We can support 4, 2, or 1 queues */
537 		rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1;
538 	}
539 
540 #ifdef IXGBE_FCOE
541 	/* queues in the remaining pools are available for FCoE */
542 	fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m));
543 
544 #endif
545 	/* remove the starting offset from the pool count */
546 	vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset;
547 
548 	/* save features for later use */
549 	adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i;
550 	adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m;
551 
552 	/* limit RSS based on user input and save for later use */
553 	adapter->ring_feature[RING_F_RSS].indices = rss_i;
554 	adapter->ring_feature[RING_F_RSS].mask = rss_m;
555 
556 	adapter->num_rx_pools = vmdq_i;
557 	adapter->num_rx_queues_per_pool = rss_i;
558 
559 	adapter->num_rx_queues = vmdq_i * rss_i;
560 	adapter->num_tx_queues = vmdq_i * rss_i;
561 	adapter->num_xdp_queues = 0;
562 
563 	/* disable ATR as it is not supported when VMDq is enabled */
564 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
565 
566 #ifdef IXGBE_FCOE
567 	/*
568 	 * FCoE can use rings from adjacent buffers to allow RSS
569 	 * like behavior.  To account for this we need to add the
570 	 * FCoE indices to the total ring count.
571 	 */
572 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
573 		struct ixgbe_ring_feature *fcoe;
574 
575 		fcoe = &adapter->ring_feature[RING_F_FCOE];
576 
577 		/* limit ourselves based on feature limits */
578 		fcoe_i = min_t(u16, fcoe_i, fcoe->limit);
579 
580 		if (vmdq_i > 1 && fcoe_i) {
581 			/* alloc queues for FCoE separately */
582 			fcoe->indices = fcoe_i;
583 			fcoe->offset = vmdq_i * rss_i;
584 		} else {
585 			/* merge FCoE queues with RSS queues */
586 			fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus());
587 
588 			/* limit indices to rss_i if MSI-X is disabled */
589 			if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
590 				fcoe_i = rss_i;
591 
592 			/* attempt to reserve some queues for just FCoE */
593 			fcoe->indices = min_t(u16, fcoe_i, fcoe->limit);
594 			fcoe->offset = fcoe_i - fcoe->indices;
595 
596 			fcoe_i -= rss_i;
597 		}
598 
599 		/* add queues to adapter */
600 		adapter->num_tx_queues += fcoe_i;
601 		adapter->num_rx_queues += fcoe_i;
602 	}
603 
604 #endif
605 	return true;
606 }
607 
608 /**
609  * ixgbe_set_rss_queues - Allocate queues for RSS
610  * @adapter: board private structure to initialize
611  *
612  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
613  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
614  *
615  **/
616 static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
617 {
618 	struct ixgbe_hw *hw = &adapter->hw;
619 	struct ixgbe_ring_feature *f;
620 	u16 rss_i;
621 
622 	/* set mask for 16 queue limit of RSS */
623 	f = &adapter->ring_feature[RING_F_RSS];
624 	rss_i = f->limit;
625 
626 	f->indices = rss_i;
627 
628 	if (hw->mac.type < ixgbe_mac_X550)
629 		f->mask = IXGBE_RSS_16Q_MASK;
630 	else
631 		f->mask = IXGBE_RSS_64Q_MASK;
632 
633 	/* disable ATR by default, it will be configured below */
634 	adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
635 
636 	/*
637 	 * Use Flow Director in addition to RSS to ensure the best
638 	 * distribution of flows across cores, even when an FDIR flow
639 	 * isn't matched.
640 	 */
641 	if (rss_i > 1 && adapter->atr_sample_rate) {
642 		f = &adapter->ring_feature[RING_F_FDIR];
643 
644 		rss_i = f->indices = f->limit;
645 
646 		if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
647 			adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
648 	}
649 
650 #ifdef IXGBE_FCOE
651 	/*
652 	 * FCoE can exist on the same rings as standard network traffic
653 	 * however it is preferred to avoid that if possible.  In order
654 	 * to get the best performance we allocate as many FCoE queues
655 	 * as we can and we place them at the end of the ring array to
656 	 * avoid sharing queues with standard RSS on systems with 24 or
657 	 * more CPUs.
658 	 */
659 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
660 		struct net_device *dev = adapter->netdev;
661 		u16 fcoe_i;
662 
663 		f = &adapter->ring_feature[RING_F_FCOE];
664 
665 		/* merge FCoE queues with RSS queues */
666 		fcoe_i = min_t(u16, f->limit + rss_i, num_online_cpus());
667 		fcoe_i = min_t(u16, fcoe_i, dev->num_tx_queues);
668 
669 		/* limit indices to rss_i if MSI-X is disabled */
670 		if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
671 			fcoe_i = rss_i;
672 
673 		/* attempt to reserve some queues for just FCoE */
674 		f->indices = min_t(u16, fcoe_i, f->limit);
675 		f->offset = fcoe_i - f->indices;
676 		rss_i = max_t(u16, fcoe_i, rss_i);
677 	}
678 
679 #endif /* IXGBE_FCOE */
680 	adapter->num_rx_queues = rss_i;
681 	adapter->num_tx_queues = rss_i;
682 	adapter->num_xdp_queues = ixgbe_xdp_queues(adapter);
683 
684 	return true;
685 }
686 
687 /**
688  * ixgbe_set_num_queues - Allocate queues for device, feature dependent
689  * @adapter: board private structure to initialize
690  *
691  * This is the top level queue allocation routine.  The order here is very
692  * important, starting with the "most" number of features turned on at once,
693  * and ending with the smallest set of features.  This way large combinations
694  * can be allocated if they're turned on, and smaller combinations are the
695  * fallthrough conditions.
696  *
697  **/
698 static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
699 {
700 	/* Start with base case */
701 	adapter->num_rx_queues = 1;
702 	adapter->num_tx_queues = 1;
703 	adapter->num_xdp_queues = 0;
704 	adapter->num_rx_pools = adapter->num_rx_queues;
705 	adapter->num_rx_queues_per_pool = 1;
706 
707 #ifdef CONFIG_IXGBE_DCB
708 	if (ixgbe_set_dcb_sriov_queues(adapter))
709 		return;
710 
711 	if (ixgbe_set_dcb_queues(adapter))
712 		return;
713 
714 #endif
715 	if (ixgbe_set_sriov_queues(adapter))
716 		return;
717 
718 	ixgbe_set_rss_queues(adapter);
719 }
720 
721 /**
722  * ixgbe_acquire_msix_vectors - acquire MSI-X vectors
723  * @adapter: board private structure
724  *
725  * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
726  * return a negative error code if unable to acquire MSI-X vectors for any
727  * reason.
728  */
729 static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter)
730 {
731 	struct ixgbe_hw *hw = &adapter->hw;
732 	int i, vectors, vector_threshold;
733 
734 	/* We start by asking for one vector per queue pair with XDP queues
735 	 * being stacked with TX queues.
736 	 */
737 	vectors = max(adapter->num_rx_queues, adapter->num_tx_queues);
738 	vectors = max(vectors, adapter->num_xdp_queues);
739 
740 	/* It is easy to be greedy for MSI-X vectors. However, it really
741 	 * doesn't do much good if we have a lot more vectors than CPUs. We'll
742 	 * be somewhat conservative and only ask for (roughly) the same number
743 	 * of vectors as there are CPUs.
744 	 */
745 	vectors = min_t(int, vectors, num_online_cpus());
746 
747 	/* Some vectors are necessary for non-queue interrupts */
748 	vectors += NON_Q_VECTORS;
749 
750 	/* Hardware can only support a maximum of hw.mac->max_msix_vectors.
751 	 * With features such as RSS and VMDq, we can easily surpass the
752 	 * number of Rx and Tx descriptor queues supported by our device.
753 	 * Thus, we cap the maximum in the rare cases where the CPU count also
754 	 * exceeds our vector limit
755 	 */
756 	vectors = min_t(int, vectors, hw->mac.max_msix_vectors);
757 
758 	/* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0]
759 	 * handler, and (2) an Other (Link Status Change, etc.) handler.
760 	 */
761 	vector_threshold = MIN_MSIX_COUNT;
762 
763 	adapter->msix_entries = kcalloc(vectors,
764 					sizeof(struct msix_entry),
765 					GFP_KERNEL);
766 	if (!adapter->msix_entries)
767 		return -ENOMEM;
768 
769 	for (i = 0; i < vectors; i++)
770 		adapter->msix_entries[i].entry = i;
771 
772 	vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
773 					vector_threshold, vectors);
774 
775 	if (vectors < 0) {
776 		/* A negative count of allocated vectors indicates an error in
777 		 * acquiring within the specified range of MSI-X vectors
778 		 */
779 		e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n",
780 			   vectors);
781 
782 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
783 		kfree(adapter->msix_entries);
784 		adapter->msix_entries = NULL;
785 
786 		return vectors;
787 	}
788 
789 	/* we successfully allocated some number of vectors within our
790 	 * requested range.
791 	 */
792 	adapter->flags |= IXGBE_FLAG_MSIX_ENABLED;
793 
794 	/* Adjust for only the vectors we'll use, which is minimum
795 	 * of max_q_vectors, or the number of vectors we were allocated.
796 	 */
797 	vectors -= NON_Q_VECTORS;
798 	adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors);
799 
800 	return 0;
801 }
802 
803 static void ixgbe_add_ring(struct ixgbe_ring *ring,
804 			   struct ixgbe_ring_container *head)
805 {
806 	ring->next = head->ring;
807 	head->ring = ring;
808 	head->count++;
809 }
810 
811 /**
812  * ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
813  * @adapter: board private structure to initialize
814  * @v_count: q_vectors allocated on adapter, used for ring interleaving
815  * @v_idx: index of vector in adapter struct
816  * @txr_count: total number of Tx rings to allocate
817  * @txr_idx: index of first Tx ring to allocate
818  * @xdp_count: total number of XDP rings to allocate
819  * @xdp_idx: index of first XDP ring to allocate
820  * @rxr_count: total number of Rx rings to allocate
821  * @rxr_idx: index of first Rx ring to allocate
822  *
823  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
824  **/
825 static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
826 				int v_count, int v_idx,
827 				int txr_count, int txr_idx,
828 				int xdp_count, int xdp_idx,
829 				int rxr_count, int rxr_idx)
830 {
831 	struct ixgbe_q_vector *q_vector;
832 	struct ixgbe_ring *ring;
833 	int node = NUMA_NO_NODE;
834 	int cpu = -1;
835 	int ring_count, size;
836 	u8 tcs = netdev_get_num_tc(adapter->netdev);
837 
838 	ring_count = txr_count + rxr_count + xdp_count;
839 	size = sizeof(struct ixgbe_q_vector) +
840 	       (sizeof(struct ixgbe_ring) * ring_count);
841 
842 	/* customize cpu for Flow Director mapping */
843 	if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) {
844 		u16 rss_i = adapter->ring_feature[RING_F_RSS].indices;
845 		if (rss_i > 1 && adapter->atr_sample_rate) {
846 			if (cpu_online(v_idx)) {
847 				cpu = v_idx;
848 				node = cpu_to_node(cpu);
849 			}
850 		}
851 	}
852 
853 	/* allocate q_vector and rings */
854 	q_vector = kzalloc_node(size, GFP_KERNEL, node);
855 	if (!q_vector)
856 		q_vector = kzalloc(size, GFP_KERNEL);
857 	if (!q_vector)
858 		return -ENOMEM;
859 
860 	/* setup affinity mask and node */
861 	if (cpu != -1)
862 		cpumask_set_cpu(cpu, &q_vector->affinity_mask);
863 	q_vector->numa_node = node;
864 
865 #ifdef CONFIG_IXGBE_DCA
866 	/* initialize CPU for DCA */
867 	q_vector->cpu = -1;
868 
869 #endif
870 	/* initialize NAPI */
871 	netif_napi_add(adapter->netdev, &q_vector->napi,
872 		       ixgbe_poll, 64);
873 
874 	/* tie q_vector and adapter together */
875 	adapter->q_vector[v_idx] = q_vector;
876 	q_vector->adapter = adapter;
877 	q_vector->v_idx = v_idx;
878 
879 	/* initialize work limits */
880 	q_vector->tx.work_limit = adapter->tx_work_limit;
881 
882 	/* initialize pointer to rings */
883 	ring = q_vector->ring;
884 
885 	/* intialize ITR */
886 	if (txr_count && !rxr_count) {
887 		/* tx only vector */
888 		if (adapter->tx_itr_setting == 1)
889 			q_vector->itr = IXGBE_12K_ITR;
890 		else
891 			q_vector->itr = adapter->tx_itr_setting;
892 	} else {
893 		/* rx or rx/tx vector */
894 		if (adapter->rx_itr_setting == 1)
895 			q_vector->itr = IXGBE_20K_ITR;
896 		else
897 			q_vector->itr = adapter->rx_itr_setting;
898 	}
899 
900 	while (txr_count) {
901 		/* assign generic ring traits */
902 		ring->dev = &adapter->pdev->dev;
903 		ring->netdev = adapter->netdev;
904 
905 		/* configure backlink on ring */
906 		ring->q_vector = q_vector;
907 
908 		/* update q_vector Tx values */
909 		ixgbe_add_ring(ring, &q_vector->tx);
910 
911 		/* apply Tx specific ring traits */
912 		ring->count = adapter->tx_ring_count;
913 		if (adapter->num_rx_pools > 1)
914 			ring->queue_index =
915 				txr_idx % adapter->num_rx_queues_per_pool;
916 		else
917 			ring->queue_index = txr_idx;
918 
919 		/* assign ring to adapter */
920 		adapter->tx_ring[txr_idx] = ring;
921 
922 		/* update count and index */
923 		txr_count--;
924 		txr_idx += v_count;
925 
926 		/* push pointer to next ring */
927 		ring++;
928 	}
929 
930 	while (xdp_count) {
931 		/* assign generic ring traits */
932 		ring->dev = &adapter->pdev->dev;
933 		ring->netdev = adapter->netdev;
934 
935 		/* configure backlink on ring */
936 		ring->q_vector = q_vector;
937 
938 		/* update q_vector Tx values */
939 		ixgbe_add_ring(ring, &q_vector->tx);
940 
941 		/* apply Tx specific ring traits */
942 		ring->count = adapter->tx_ring_count;
943 		ring->queue_index = xdp_idx;
944 		set_ring_xdp(ring);
945 
946 		/* assign ring to adapter */
947 		adapter->xdp_ring[xdp_idx] = ring;
948 
949 		/* update count and index */
950 		xdp_count--;
951 		xdp_idx++;
952 
953 		/* push pointer to next ring */
954 		ring++;
955 	}
956 
957 	while (rxr_count) {
958 		/* assign generic ring traits */
959 		ring->dev = &adapter->pdev->dev;
960 		ring->netdev = adapter->netdev;
961 
962 		/* configure backlink on ring */
963 		ring->q_vector = q_vector;
964 
965 		/* update q_vector Rx values */
966 		ixgbe_add_ring(ring, &q_vector->rx);
967 
968 		/*
969 		 * 82599 errata, UDP frames with a 0 checksum
970 		 * can be marked as checksum errors.
971 		 */
972 		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
973 			set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
974 
975 #ifdef IXGBE_FCOE
976 		if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
977 			struct ixgbe_ring_feature *f;
978 			f = &adapter->ring_feature[RING_F_FCOE];
979 			if ((rxr_idx >= f->offset) &&
980 			    (rxr_idx < f->offset + f->indices))
981 				set_bit(__IXGBE_RX_FCOE, &ring->state);
982 		}
983 
984 #endif /* IXGBE_FCOE */
985 		/* apply Rx specific ring traits */
986 		ring->count = adapter->rx_ring_count;
987 		if (adapter->num_rx_pools > 1)
988 			ring->queue_index =
989 				rxr_idx % adapter->num_rx_queues_per_pool;
990 		else
991 			ring->queue_index = rxr_idx;
992 
993 		/* assign ring to adapter */
994 		adapter->rx_ring[rxr_idx] = ring;
995 
996 		/* update count and index */
997 		rxr_count--;
998 		rxr_idx += v_count;
999 
1000 		/* push pointer to next ring */
1001 		ring++;
1002 	}
1003 
1004 	return 0;
1005 }
1006 
1007 /**
1008  * ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
1009  * @adapter: board private structure to initialize
1010  * @v_idx: Index of vector to be freed
1011  *
1012  * This function frees the memory allocated to the q_vector.  In addition if
1013  * NAPI is enabled it will delete any references to the NAPI struct prior
1014  * to freeing the q_vector.
1015  **/
1016 static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
1017 {
1018 	struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
1019 	struct ixgbe_ring *ring;
1020 
1021 	ixgbe_for_each_ring(ring, q_vector->tx)
1022 		adapter->tx_ring[ring->queue_index] = NULL;
1023 
1024 	ixgbe_for_each_ring(ring, q_vector->rx)
1025 		adapter->rx_ring[ring->queue_index] = NULL;
1026 
1027 	adapter->q_vector[v_idx] = NULL;
1028 	napi_hash_del(&q_vector->napi);
1029 	netif_napi_del(&q_vector->napi);
1030 
1031 	/*
1032 	 * ixgbe_get_stats64() might access the rings on this vector,
1033 	 * we must wait a grace period before freeing it.
1034 	 */
1035 	kfree_rcu(q_vector, rcu);
1036 }
1037 
1038 /**
1039  * ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
1040  * @adapter: board private structure to initialize
1041  *
1042  * We allocate one q_vector per queue interrupt.  If allocation fails we
1043  * return -ENOMEM.
1044  **/
1045 static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
1046 {
1047 	int q_vectors = adapter->num_q_vectors;
1048 	int rxr_remaining = adapter->num_rx_queues;
1049 	int txr_remaining = adapter->num_tx_queues;
1050 	int xdp_remaining = adapter->num_xdp_queues;
1051 	int rxr_idx = 0, txr_idx = 0, xdp_idx = 0, v_idx = 0;
1052 	int err;
1053 
1054 	/* only one q_vector if MSI-X is disabled. */
1055 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
1056 		q_vectors = 1;
1057 
1058 	if (q_vectors >= (rxr_remaining + txr_remaining + xdp_remaining)) {
1059 		for (; rxr_remaining; v_idx++) {
1060 			err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1061 						   0, 0, 0, 0, 1, rxr_idx);
1062 
1063 			if (err)
1064 				goto err_out;
1065 
1066 			/* update counts and index */
1067 			rxr_remaining--;
1068 			rxr_idx++;
1069 		}
1070 	}
1071 
1072 	for (; v_idx < q_vectors; v_idx++) {
1073 		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
1074 		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
1075 		int xqpv = DIV_ROUND_UP(xdp_remaining, q_vectors - v_idx);
1076 
1077 		err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx,
1078 					   tqpv, txr_idx,
1079 					   xqpv, xdp_idx,
1080 					   rqpv, rxr_idx);
1081 
1082 		if (err)
1083 			goto err_out;
1084 
1085 		/* update counts and index */
1086 		rxr_remaining -= rqpv;
1087 		txr_remaining -= tqpv;
1088 		xdp_remaining -= xqpv;
1089 		rxr_idx++;
1090 		txr_idx++;
1091 		xdp_idx += xqpv;
1092 	}
1093 
1094 	return 0;
1095 
1096 err_out:
1097 	adapter->num_tx_queues = 0;
1098 	adapter->num_xdp_queues = 0;
1099 	adapter->num_rx_queues = 0;
1100 	adapter->num_q_vectors = 0;
1101 
1102 	while (v_idx--)
1103 		ixgbe_free_q_vector(adapter, v_idx);
1104 
1105 	return -ENOMEM;
1106 }
1107 
1108 /**
1109  * ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
1110  * @adapter: board private structure to initialize
1111  *
1112  * This function frees the memory allocated to the q_vectors.  In addition if
1113  * NAPI is enabled it will delete any references to the NAPI struct prior
1114  * to freeing the q_vector.
1115  **/
1116 static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
1117 {
1118 	int v_idx = adapter->num_q_vectors;
1119 
1120 	adapter->num_tx_queues = 0;
1121 	adapter->num_xdp_queues = 0;
1122 	adapter->num_rx_queues = 0;
1123 	adapter->num_q_vectors = 0;
1124 
1125 	while (v_idx--)
1126 		ixgbe_free_q_vector(adapter, v_idx);
1127 }
1128 
1129 static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
1130 {
1131 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
1132 		adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
1133 		pci_disable_msix(adapter->pdev);
1134 		kfree(adapter->msix_entries);
1135 		adapter->msix_entries = NULL;
1136 	} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
1137 		adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
1138 		pci_disable_msi(adapter->pdev);
1139 	}
1140 }
1141 
1142 /**
1143  * ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
1144  * @adapter: board private structure to initialize
1145  *
1146  * Attempt to configure the interrupts using the best available
1147  * capabilities of the hardware and the kernel.
1148  **/
1149 static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
1150 {
1151 	int err;
1152 
1153 	/* We will try to get MSI-X interrupts first */
1154 	if (!ixgbe_acquire_msix_vectors(adapter))
1155 		return;
1156 
1157 	/* At this point, we do not have MSI-X capabilities. We need to
1158 	 * reconfigure or disable various features which require MSI-X
1159 	 * capability.
1160 	 */
1161 
1162 	/* Disable DCB unless we only have a single traffic class */
1163 	if (netdev_get_num_tc(adapter->netdev) > 1) {
1164 		e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n");
1165 		netdev_reset_tc(adapter->netdev);
1166 
1167 		if (adapter->hw.mac.type == ixgbe_mac_82598EB)
1168 			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
1169 
1170 		adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
1171 		adapter->temp_dcb_cfg.pfc_mode_enable = false;
1172 		adapter->dcb_cfg.pfc_mode_enable = false;
1173 	}
1174 
1175 	adapter->dcb_cfg.num_tcs.pg_tcs = 1;
1176 	adapter->dcb_cfg.num_tcs.pfc_tcs = 1;
1177 
1178 	/* Disable SR-IOV support */
1179 	e_dev_warn("Disabling SR-IOV support\n");
1180 	ixgbe_disable_sriov(adapter);
1181 
1182 	/* Disable RSS */
1183 	e_dev_warn("Disabling RSS support\n");
1184 	adapter->ring_feature[RING_F_RSS].limit = 1;
1185 
1186 	/* recalculate number of queues now that many features have been
1187 	 * changed or disabled.
1188 	 */
1189 	ixgbe_set_num_queues(adapter);
1190 	adapter->num_q_vectors = 1;
1191 
1192 	err = pci_enable_msi(adapter->pdev);
1193 	if (err)
1194 		e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n",
1195 			   err);
1196 	else
1197 		adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
1198 }
1199 
1200 /**
1201  * ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
1202  * @adapter: board private structure to initialize
1203  *
1204  * We determine which interrupt scheme to use based on...
1205  * - Kernel support (MSI, MSI-X)
1206  *   - which can be user-defined (via MODULE_PARAM)
1207  * - Hardware queue count (num_*_queues)
1208  *   - defined by miscellaneous hardware support/features (RSS, etc.)
1209  **/
1210 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
1211 {
1212 	int err;
1213 
1214 	/* Number of supported queues */
1215 	ixgbe_set_num_queues(adapter);
1216 
1217 	/* Set interrupt mode */
1218 	ixgbe_set_interrupt_capability(adapter);
1219 
1220 	err = ixgbe_alloc_q_vectors(adapter);
1221 	if (err) {
1222 		e_dev_err("Unable to allocate memory for queue vectors\n");
1223 		goto err_alloc_q_vectors;
1224 	}
1225 
1226 	ixgbe_cache_ring_register(adapter);
1227 
1228 	e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u XDP Queue count = %u\n",
1229 		   (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
1230 		   adapter->num_rx_queues, adapter->num_tx_queues,
1231 		   adapter->num_xdp_queues);
1232 
1233 	set_bit(__IXGBE_DOWN, &adapter->state);
1234 
1235 	return 0;
1236 
1237 err_alloc_q_vectors:
1238 	ixgbe_reset_interrupt_capability(adapter);
1239 	return err;
1240 }
1241 
1242 /**
1243  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
1244  * @adapter: board private structure to clear interrupt scheme on
1245  *
1246  * We go through and clear interrupt specific resources and reset the structure
1247  * to pre-load conditions
1248  **/
1249 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
1250 {
1251 	adapter->num_tx_queues = 0;
1252 	adapter->num_xdp_queues = 0;
1253 	adapter->num_rx_queues = 0;
1254 
1255 	ixgbe_free_q_vectors(adapter);
1256 	ixgbe_reset_interrupt_capability(adapter);
1257 }
1258 
1259 void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
1260 		       u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
1261 {
1262 	struct ixgbe_adv_tx_context_desc *context_desc;
1263 	u16 i = tx_ring->next_to_use;
1264 
1265 	context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
1266 
1267 	i++;
1268 	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
1269 
1270 	/* set bits to identify this as an advanced context descriptor */
1271 	type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
1272 
1273 	context_desc->vlan_macip_lens	= cpu_to_le32(vlan_macip_lens);
1274 	context_desc->seqnum_seed	= cpu_to_le32(fcoe_sof_eof);
1275 	context_desc->type_tucmd_mlhl	= cpu_to_le32(type_tucmd);
1276 	context_desc->mss_l4len_idx	= cpu_to_le32(mss_l4len_idx);
1277 }
1278 
1279