1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd. */
3 
4 #include <linux/etherdevice.h>
5 #include <net/page_pool.h>
6 #include <linux/iopoll.h>
7 #include <linux/pci.h>
8 
9 #include "wx_type.h"
10 #include "wx_lib.h"
11 
12 /**
13  * wx_poll - NAPI polling RX/TX cleanup routine
14  * @napi: napi struct with our devices info in it
15  * @budget: amount of work driver is allowed to do this pass, in packets
16  *
17  * This function will clean all queues associated with a q_vector.
18  **/
19 static int wx_poll(struct napi_struct *napi, int budget)
20 {
21 	return 0;
22 }
23 
24 /**
25  * wx_set_rss_queues: Allocate queues for RSS
26  * @wx: board private structure to initialize
27  *
28  * This is our "base" multiqueue mode.  RSS (Receive Side Scaling) will try
29  * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
30  *
31  **/
32 static void wx_set_rss_queues(struct wx *wx)
33 {
34 	wx->num_rx_queues = wx->mac.max_rx_queues;
35 	wx->num_tx_queues = wx->mac.max_tx_queues;
36 }
37 
38 static void wx_set_num_queues(struct wx *wx)
39 {
40 	/* Start with base case */
41 	wx->num_rx_queues = 1;
42 	wx->num_tx_queues = 1;
43 	wx->queues_per_pool = 1;
44 
45 	wx_set_rss_queues(wx);
46 }
47 
48 /**
49  * wx_acquire_msix_vectors - acquire MSI-X vectors
50  * @wx: board private structure
51  *
52  * Attempts to acquire a suitable range of MSI-X vector interrupts. Will
53  * return a negative error code if unable to acquire MSI-X vectors for any
54  * reason.
55  */
56 static int wx_acquire_msix_vectors(struct wx *wx)
57 {
58 	struct irq_affinity affd = {0, };
59 	int nvecs, i;
60 
61 	nvecs = min_t(int, num_online_cpus(), wx->mac.max_msix_vectors);
62 
63 	wx->msix_entries = kcalloc(nvecs,
64 				   sizeof(struct msix_entry),
65 				   GFP_KERNEL);
66 	if (!wx->msix_entries)
67 		return -ENOMEM;
68 
69 	nvecs = pci_alloc_irq_vectors_affinity(wx->pdev, nvecs,
70 					       nvecs,
71 					       PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
72 					       &affd);
73 	if (nvecs < 0) {
74 		wx_err(wx, "Failed to allocate MSI-X interrupts. Err: %d\n", nvecs);
75 		kfree(wx->msix_entries);
76 		wx->msix_entries = NULL;
77 		return nvecs;
78 	}
79 
80 	for (i = 0; i < nvecs; i++) {
81 		wx->msix_entries[i].entry = i;
82 		wx->msix_entries[i].vector = pci_irq_vector(wx->pdev, i);
83 	}
84 
85 	/* one for msix_other */
86 	nvecs -= 1;
87 	wx->num_q_vectors = nvecs;
88 	wx->num_rx_queues = nvecs;
89 	wx->num_tx_queues = nvecs;
90 
91 	return 0;
92 }
93 
94 /**
95  * wx_set_interrupt_capability - set MSI-X or MSI if supported
96  * @wx: board private structure to initialize
97  *
98  * Attempt to configure the interrupts using the best available
99  * capabilities of the hardware and the kernel.
100  **/
101 static int wx_set_interrupt_capability(struct wx *wx)
102 {
103 	struct pci_dev *pdev = wx->pdev;
104 	int nvecs, ret;
105 
106 	/* We will try to get MSI-X interrupts first */
107 	ret = wx_acquire_msix_vectors(wx);
108 	if (ret == 0 || (ret == -ENOMEM))
109 		return ret;
110 
111 	wx->num_rx_queues = 1;
112 	wx->num_tx_queues = 1;
113 	wx->num_q_vectors = 1;
114 
115 	/* minmum one for queue, one for misc*/
116 	nvecs = 1;
117 	nvecs = pci_alloc_irq_vectors(pdev, nvecs,
118 				      nvecs, PCI_IRQ_MSI | PCI_IRQ_LEGACY);
119 	if (nvecs == 1) {
120 		if (pdev->msi_enabled)
121 			wx_err(wx, "Fallback to MSI.\n");
122 		else
123 			wx_err(wx, "Fallback to LEGACY.\n");
124 	} else {
125 		wx_err(wx, "Failed to allocate MSI/LEGACY interrupts. Error: %d\n", nvecs);
126 		return nvecs;
127 	}
128 
129 	pdev->irq = pci_irq_vector(pdev, 0);
130 
131 	return 0;
132 }
133 
134 /**
135  * wx_cache_ring_rss - Descriptor ring to register mapping for RSS
136  * @wx: board private structure to initialize
137  *
138  * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV.
139  *
140  **/
141 static void wx_cache_ring_rss(struct wx *wx)
142 {
143 	u16 i;
144 
145 	for (i = 0; i < wx->num_rx_queues; i++)
146 		wx->rx_ring[i]->reg_idx = i;
147 
148 	for (i = 0; i < wx->num_tx_queues; i++)
149 		wx->tx_ring[i]->reg_idx = i;
150 }
151 
152 static void wx_add_ring(struct wx_ring *ring, struct wx_ring_container *head)
153 {
154 	ring->next = head->ring;
155 	head->ring = ring;
156 	head->count++;
157 }
158 
159 /**
160  * wx_alloc_q_vector - Allocate memory for a single interrupt vector
161  * @wx: board private structure to initialize
162  * @v_count: q_vectors allocated on wx, used for ring interleaving
163  * @v_idx: index of vector in wx struct
164  * @txr_count: total number of Tx rings to allocate
165  * @txr_idx: index of first Tx ring to allocate
166  * @rxr_count: total number of Rx rings to allocate
167  * @rxr_idx: index of first Rx ring to allocate
168  *
169  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
170  **/
171 static int wx_alloc_q_vector(struct wx *wx,
172 			     unsigned int v_count, unsigned int v_idx,
173 			     unsigned int txr_count, unsigned int txr_idx,
174 			     unsigned int rxr_count, unsigned int rxr_idx)
175 {
176 	struct wx_q_vector *q_vector;
177 	int ring_count, default_itr;
178 	struct wx_ring *ring;
179 
180 	/* note this will allocate space for the ring structure as well! */
181 	ring_count = txr_count + rxr_count;
182 
183 	q_vector = kzalloc(struct_size(q_vector, ring, ring_count),
184 			   GFP_KERNEL);
185 	if (!q_vector)
186 		return -ENOMEM;
187 
188 	/* initialize NAPI */
189 	netif_napi_add(wx->netdev, &q_vector->napi,
190 		       wx_poll);
191 
192 	/* tie q_vector and wx together */
193 	wx->q_vector[v_idx] = q_vector;
194 	q_vector->wx = wx;
195 	q_vector->v_idx = v_idx;
196 	if (cpu_online(v_idx))
197 		q_vector->numa_node = cpu_to_node(v_idx);
198 
199 	/* initialize pointer to rings */
200 	ring = q_vector->ring;
201 
202 	if (wx->mac.type == wx_mac_sp)
203 		default_itr = WX_12K_ITR;
204 	else
205 		default_itr = WX_7K_ITR;
206 	/* initialize ITR */
207 	if (txr_count && !rxr_count)
208 		/* tx only vector */
209 		q_vector->itr = wx->tx_itr_setting ?
210 				default_itr : wx->tx_itr_setting;
211 	else
212 		/* rx or rx/tx vector */
213 		q_vector->itr = wx->rx_itr_setting ?
214 				default_itr : wx->rx_itr_setting;
215 
216 	while (txr_count) {
217 		/* assign generic ring traits */
218 		ring->dev = &wx->pdev->dev;
219 		ring->netdev = wx->netdev;
220 
221 		/* configure backlink on ring */
222 		ring->q_vector = q_vector;
223 
224 		/* update q_vector Tx values */
225 		wx_add_ring(ring, &q_vector->tx);
226 
227 		/* apply Tx specific ring traits */
228 		ring->count = wx->tx_ring_count;
229 
230 		ring->queue_index = txr_idx;
231 
232 		/* assign ring to wx */
233 		wx->tx_ring[txr_idx] = ring;
234 
235 		/* update count and index */
236 		txr_count--;
237 		txr_idx += v_count;
238 
239 		/* push pointer to next ring */
240 		ring++;
241 	}
242 
243 	while (rxr_count) {
244 		/* assign generic ring traits */
245 		ring->dev = &wx->pdev->dev;
246 		ring->netdev = wx->netdev;
247 
248 		/* configure backlink on ring */
249 		ring->q_vector = q_vector;
250 
251 		/* update q_vector Rx values */
252 		wx_add_ring(ring, &q_vector->rx);
253 
254 		/* apply Rx specific ring traits */
255 		ring->count = wx->rx_ring_count;
256 		ring->queue_index = rxr_idx;
257 
258 		/* assign ring to wx */
259 		wx->rx_ring[rxr_idx] = ring;
260 
261 		/* update count and index */
262 		rxr_count--;
263 		rxr_idx += v_count;
264 
265 		/* push pointer to next ring */
266 		ring++;
267 	}
268 
269 	return 0;
270 }
271 
272 /**
273  * wx_free_q_vector - Free memory allocated for specific interrupt vector
274  * @wx: board private structure to initialize
275  * @v_idx: Index of vector to be freed
276  *
277  * This function frees the memory allocated to the q_vector.  In addition if
278  * NAPI is enabled it will delete any references to the NAPI struct prior
279  * to freeing the q_vector.
280  **/
281 static void wx_free_q_vector(struct wx *wx, int v_idx)
282 {
283 	struct wx_q_vector *q_vector = wx->q_vector[v_idx];
284 	struct wx_ring *ring;
285 
286 	wx_for_each_ring(ring, q_vector->tx)
287 		wx->tx_ring[ring->queue_index] = NULL;
288 
289 	wx_for_each_ring(ring, q_vector->rx)
290 		wx->rx_ring[ring->queue_index] = NULL;
291 
292 	wx->q_vector[v_idx] = NULL;
293 	netif_napi_del(&q_vector->napi);
294 	kfree_rcu(q_vector, rcu);
295 }
296 
297 /**
298  * wx_alloc_q_vectors - Allocate memory for interrupt vectors
299  * @wx: board private structure to initialize
300  *
301  * We allocate one q_vector per queue interrupt.  If allocation fails we
302  * return -ENOMEM.
303  **/
304 static int wx_alloc_q_vectors(struct wx *wx)
305 {
306 	unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0;
307 	unsigned int rxr_remaining = wx->num_rx_queues;
308 	unsigned int txr_remaining = wx->num_tx_queues;
309 	unsigned int q_vectors = wx->num_q_vectors;
310 	int rqpv, tqpv;
311 	int err;
312 
313 	for (; v_idx < q_vectors; v_idx++) {
314 		rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
315 		tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
316 		err = wx_alloc_q_vector(wx, q_vectors, v_idx,
317 					tqpv, txr_idx,
318 					rqpv, rxr_idx);
319 
320 		if (err)
321 			goto err_out;
322 
323 		/* update counts and index */
324 		rxr_remaining -= rqpv;
325 		txr_remaining -= tqpv;
326 		rxr_idx++;
327 		txr_idx++;
328 	}
329 
330 	return 0;
331 
332 err_out:
333 	wx->num_tx_queues = 0;
334 	wx->num_rx_queues = 0;
335 	wx->num_q_vectors = 0;
336 
337 	while (v_idx--)
338 		wx_free_q_vector(wx, v_idx);
339 
340 	return -ENOMEM;
341 }
342 
343 /**
344  * wx_free_q_vectors - Free memory allocated for interrupt vectors
345  * @wx: board private structure to initialize
346  *
347  * This function frees the memory allocated to the q_vectors.  In addition if
348  * NAPI is enabled it will delete any references to the NAPI struct prior
349  * to freeing the q_vector.
350  **/
351 static void wx_free_q_vectors(struct wx *wx)
352 {
353 	int v_idx = wx->num_q_vectors;
354 
355 	wx->num_tx_queues = 0;
356 	wx->num_rx_queues = 0;
357 	wx->num_q_vectors = 0;
358 
359 	while (v_idx--)
360 		wx_free_q_vector(wx, v_idx);
361 }
362 
363 void wx_reset_interrupt_capability(struct wx *wx)
364 {
365 	struct pci_dev *pdev = wx->pdev;
366 
367 	if (!pdev->msi_enabled && !pdev->msix_enabled)
368 		return;
369 
370 	pci_free_irq_vectors(wx->pdev);
371 	if (pdev->msix_enabled) {
372 		kfree(wx->msix_entries);
373 		wx->msix_entries = NULL;
374 	}
375 }
376 EXPORT_SYMBOL(wx_reset_interrupt_capability);
377 
378 /**
379  * wx_clear_interrupt_scheme - Clear the current interrupt scheme settings
380  * @wx: board private structure to clear interrupt scheme on
381  *
382  * We go through and clear interrupt specific resources and reset the structure
383  * to pre-load conditions
384  **/
385 void wx_clear_interrupt_scheme(struct wx *wx)
386 {
387 	wx_free_q_vectors(wx);
388 	wx_reset_interrupt_capability(wx);
389 }
390 EXPORT_SYMBOL(wx_clear_interrupt_scheme);
391 
392 int wx_init_interrupt_scheme(struct wx *wx)
393 {
394 	int ret;
395 
396 	/* Number of supported queues */
397 	wx_set_num_queues(wx);
398 
399 	/* Set interrupt mode */
400 	ret = wx_set_interrupt_capability(wx);
401 	if (ret) {
402 		wx_err(wx, "Allocate irq vectors for failed.\n");
403 		return ret;
404 	}
405 
406 	/* Allocate memory for queues */
407 	ret = wx_alloc_q_vectors(wx);
408 	if (ret) {
409 		wx_err(wx, "Unable to allocate memory for queue vectors.\n");
410 		wx_reset_interrupt_capability(wx);
411 		return ret;
412 	}
413 
414 	wx_cache_ring_rss(wx);
415 
416 	return 0;
417 }
418 EXPORT_SYMBOL(wx_init_interrupt_scheme);
419 
420 irqreturn_t wx_msix_clean_rings(int __always_unused irq, void *data)
421 {
422 	struct wx_q_vector *q_vector = data;
423 
424 	/* EIAM disabled interrupts (on this vector) for us */
425 	if (q_vector->rx.ring || q_vector->tx.ring)
426 		napi_schedule_irqoff(&q_vector->napi);
427 
428 	return IRQ_HANDLED;
429 }
430 EXPORT_SYMBOL(wx_msix_clean_rings);
431 
432 void wx_free_irq(struct wx *wx)
433 {
434 	struct pci_dev *pdev = wx->pdev;
435 	int vector;
436 
437 	if (!(pdev->msix_enabled)) {
438 		free_irq(pdev->irq, wx);
439 		return;
440 	}
441 
442 	for (vector = 0; vector < wx->num_q_vectors; vector++) {
443 		struct wx_q_vector *q_vector = wx->q_vector[vector];
444 		struct msix_entry *entry = &wx->msix_entries[vector];
445 
446 		/* free only the irqs that were actually requested */
447 		if (!q_vector->rx.ring && !q_vector->tx.ring)
448 			continue;
449 
450 		free_irq(entry->vector, q_vector);
451 	}
452 
453 	free_irq(wx->msix_entries[vector].vector, wx);
454 }
455 EXPORT_SYMBOL(wx_free_irq);
456 
457 /**
458  * wx_setup_isb_resources - allocate interrupt status resources
459  * @wx: board private structure
460  *
461  * Return 0 on success, negative on failure
462  **/
463 int wx_setup_isb_resources(struct wx *wx)
464 {
465 	struct pci_dev *pdev = wx->pdev;
466 
467 	wx->isb_mem = dma_alloc_coherent(&pdev->dev,
468 					 sizeof(u32) * 4,
469 					 &wx->isb_dma,
470 					 GFP_KERNEL);
471 	if (!wx->isb_mem) {
472 		wx_err(wx, "Alloc isb_mem failed\n");
473 		return -ENOMEM;
474 	}
475 
476 	return 0;
477 }
478 EXPORT_SYMBOL(wx_setup_isb_resources);
479 
480 /**
481  * wx_free_isb_resources - allocate all queues Rx resources
482  * @wx: board private structure
483  *
484  * Return 0 on success, negative on failure
485  **/
486 void wx_free_isb_resources(struct wx *wx)
487 {
488 	struct pci_dev *pdev = wx->pdev;
489 
490 	dma_free_coherent(&pdev->dev, sizeof(u32) * 4,
491 			  wx->isb_mem, wx->isb_dma);
492 	wx->isb_mem = NULL;
493 }
494 EXPORT_SYMBOL(wx_free_isb_resources);
495 
496 u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx)
497 {
498 	u32 cur_tag = 0;
499 
500 	cur_tag = wx->isb_mem[WX_ISB_HEADER];
501 	wx->isb_tag[idx] = cur_tag;
502 
503 	return (__force u32)cpu_to_le32(wx->isb_mem[idx]);
504 }
505 EXPORT_SYMBOL(wx_misc_isb);
506 
507 /**
508  * wx_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
509  * @wx: pointer to wx struct
510  * @direction: 0 for Rx, 1 for Tx, -1 for other causes
511  * @queue: queue to map the corresponding interrupt to
512  * @msix_vector: the vector to map to the corresponding queue
513  *
514  **/
515 static void wx_set_ivar(struct wx *wx, s8 direction,
516 			u16 queue, u16 msix_vector)
517 {
518 	u32 ivar, index;
519 
520 	if (direction == -1) {
521 		/* other causes */
522 		msix_vector |= WX_PX_IVAR_ALLOC_VAL;
523 		index = 0;
524 		ivar = rd32(wx, WX_PX_MISC_IVAR);
525 		ivar &= ~(0xFF << index);
526 		ivar |= (msix_vector << index);
527 		wr32(wx, WX_PX_MISC_IVAR, ivar);
528 	} else {
529 		/* tx or rx causes */
530 		msix_vector |= WX_PX_IVAR_ALLOC_VAL;
531 		index = ((16 * (queue & 1)) + (8 * direction));
532 		ivar = rd32(wx, WX_PX_IVAR(queue >> 1));
533 		ivar &= ~(0xFF << index);
534 		ivar |= (msix_vector << index);
535 		wr32(wx, WX_PX_IVAR(queue >> 1), ivar);
536 	}
537 }
538 
539 /**
540  * wx_write_eitr - write EITR register in hardware specific way
541  * @q_vector: structure containing interrupt and ring information
542  *
543  * This function is made to be called by ethtool and by the driver
544  * when it needs to update EITR registers at runtime.  Hardware
545  * specific quirks/differences are taken care of here.
546  */
547 static void wx_write_eitr(struct wx_q_vector *q_vector)
548 {
549 	struct wx *wx = q_vector->wx;
550 	int v_idx = q_vector->v_idx;
551 	u32 itr_reg;
552 
553 	if (wx->mac.type == wx_mac_sp)
554 		itr_reg = q_vector->itr & WX_SP_MAX_EITR;
555 	else
556 		itr_reg = q_vector->itr & WX_EM_MAX_EITR;
557 
558 	itr_reg |= WX_PX_ITR_CNT_WDIS;
559 
560 	wr32(wx, WX_PX_ITR(v_idx), itr_reg);
561 }
562 
563 /**
564  * wx_configure_vectors - Configure vectors for hardware
565  * @wx: board private structure
566  *
567  * wx_configure_vectors sets up the hardware to properly generate MSI-X/MSI/LEGACY
568  * interrupts.
569  **/
570 void wx_configure_vectors(struct wx *wx)
571 {
572 	struct pci_dev *pdev = wx->pdev;
573 	u32 eitrsel = 0;
574 	u16 v_idx;
575 
576 	if (pdev->msix_enabled) {
577 		/* Populate MSIX to EITR Select */
578 		wr32(wx, WX_PX_ITRSEL, eitrsel);
579 		/* use EIAM to auto-mask when MSI-X interrupt is asserted
580 		 * this saves a register write for every interrupt
581 		 */
582 		wr32(wx, WX_PX_GPIE, WX_PX_GPIE_MODEL);
583 	} else {
584 		/* legacy interrupts, use EIAM to auto-mask when reading EICR,
585 		 * specifically only auto mask tx and rx interrupts.
586 		 */
587 		wr32(wx, WX_PX_GPIE, 0);
588 	}
589 
590 	/* Populate the IVAR table and set the ITR values to the
591 	 * corresponding register.
592 	 */
593 	for (v_idx = 0; v_idx < wx->num_q_vectors; v_idx++) {
594 		struct wx_q_vector *q_vector = wx->q_vector[v_idx];
595 		struct wx_ring *ring;
596 
597 		wx_for_each_ring(ring, q_vector->rx)
598 			wx_set_ivar(wx, 0, ring->reg_idx, v_idx);
599 
600 		wx_for_each_ring(ring, q_vector->tx)
601 			wx_set_ivar(wx, 1, ring->reg_idx, v_idx);
602 
603 		wx_write_eitr(q_vector);
604 	}
605 
606 	wx_set_ivar(wx, -1, 0, v_idx);
607 	if (pdev->msix_enabled)
608 		wr32(wx, WX_PX_ITR(v_idx), 1950);
609 }
610 EXPORT_SYMBOL(wx_configure_vectors);
611 
612 /**
613  * wx_free_rx_resources - Free Rx Resources
614  * @rx_ring: ring to clean the resources from
615  *
616  * Free all receive software resources
617  **/
618 static void wx_free_rx_resources(struct wx_ring *rx_ring)
619 {
620 	kvfree(rx_ring->rx_buffer_info);
621 	rx_ring->rx_buffer_info = NULL;
622 
623 	/* if not set, then don't free */
624 	if (!rx_ring->desc)
625 		return;
626 
627 	dma_free_coherent(rx_ring->dev, rx_ring->size,
628 			  rx_ring->desc, rx_ring->dma);
629 
630 	rx_ring->desc = NULL;
631 
632 	if (rx_ring->page_pool) {
633 		page_pool_destroy(rx_ring->page_pool);
634 		rx_ring->page_pool = NULL;
635 	}
636 }
637 
638 /**
639  * wx_free_all_rx_resources - Free Rx Resources for All Queues
640  * @wx: pointer to hardware structure
641  *
642  * Free all receive software resources
643  **/
644 static void wx_free_all_rx_resources(struct wx *wx)
645 {
646 	int i;
647 
648 	for (i = 0; i < wx->num_rx_queues; i++)
649 		wx_free_rx_resources(wx->rx_ring[i]);
650 }
651 
652 /**
653  * wx_free_tx_resources - Free Tx Resources per Queue
654  * @tx_ring: Tx descriptor ring for a specific queue
655  *
656  * Free all transmit software resources
657  **/
658 static void wx_free_tx_resources(struct wx_ring *tx_ring)
659 {
660 	kvfree(tx_ring->tx_buffer_info);
661 	tx_ring->tx_buffer_info = NULL;
662 
663 	/* if not set, then don't free */
664 	if (!tx_ring->desc)
665 		return;
666 
667 	dma_free_coherent(tx_ring->dev, tx_ring->size,
668 			  tx_ring->desc, tx_ring->dma);
669 	tx_ring->desc = NULL;
670 }
671 
672 /**
673  * wx_free_all_tx_resources - Free Tx Resources for All Queues
674  * @wx: pointer to hardware structure
675  *
676  * Free all transmit software resources
677  **/
678 static void wx_free_all_tx_resources(struct wx *wx)
679 {
680 	int i;
681 
682 	for (i = 0; i < wx->num_tx_queues; i++)
683 		wx_free_tx_resources(wx->tx_ring[i]);
684 }
685 
686 void wx_free_resources(struct wx *wx)
687 {
688 	wx_free_isb_resources(wx);
689 	wx_free_all_rx_resources(wx);
690 	wx_free_all_tx_resources(wx);
691 }
692 EXPORT_SYMBOL(wx_free_resources);
693 
694 static int wx_alloc_page_pool(struct wx_ring *rx_ring)
695 {
696 	int ret = 0;
697 
698 	struct page_pool_params pp_params = {
699 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
700 		.order = 0,
701 		.pool_size = rx_ring->size,
702 		.nid = dev_to_node(rx_ring->dev),
703 		.dev = rx_ring->dev,
704 		.dma_dir = DMA_FROM_DEVICE,
705 		.offset = 0,
706 		.max_len = PAGE_SIZE,
707 	};
708 
709 	rx_ring->page_pool = page_pool_create(&pp_params);
710 	if (IS_ERR(rx_ring->page_pool)) {
711 		rx_ring->page_pool = NULL;
712 		ret = PTR_ERR(rx_ring->page_pool);
713 	}
714 
715 	return ret;
716 }
717 
718 /**
719  * wx_setup_rx_resources - allocate Rx resources (Descriptors)
720  * @rx_ring: rx descriptor ring (for a specific queue) to setup
721  *
722  * Returns 0 on success, negative on failure
723  **/
724 static int wx_setup_rx_resources(struct wx_ring *rx_ring)
725 {
726 	struct device *dev = rx_ring->dev;
727 	int orig_node = dev_to_node(dev);
728 	int numa_node = NUMA_NO_NODE;
729 	int size, ret;
730 
731 	size = sizeof(struct wx_rx_buffer) * rx_ring->count;
732 
733 	if (rx_ring->q_vector)
734 		numa_node = rx_ring->q_vector->numa_node;
735 
736 	rx_ring->rx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
737 	if (!rx_ring->rx_buffer_info)
738 		rx_ring->rx_buffer_info = kvmalloc(size, GFP_KERNEL);
739 	if (!rx_ring->rx_buffer_info)
740 		goto err;
741 
742 	/* Round up to nearest 4K */
743 	rx_ring->size = rx_ring->count * sizeof(union wx_rx_desc);
744 	rx_ring->size = ALIGN(rx_ring->size, 4096);
745 
746 	set_dev_node(dev, numa_node);
747 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
748 					   &rx_ring->dma, GFP_KERNEL);
749 	if (!rx_ring->desc) {
750 		set_dev_node(dev, orig_node);
751 		rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
752 						   &rx_ring->dma, GFP_KERNEL);
753 	}
754 
755 	if (!rx_ring->desc)
756 		goto err;
757 
758 	ret = wx_alloc_page_pool(rx_ring);
759 	if (ret < 0) {
760 		dev_err(rx_ring->dev, "Page pool creation failed: %d\n", ret);
761 		goto err;
762 	}
763 
764 	return 0;
765 err:
766 	kvfree(rx_ring->rx_buffer_info);
767 	rx_ring->rx_buffer_info = NULL;
768 	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
769 	return -ENOMEM;
770 }
771 
772 /**
773  * wx_setup_all_rx_resources - allocate all queues Rx resources
774  * @wx: pointer to hardware structure
775  *
776  * If this function returns with an error, then it's possible one or
777  * more of the rings is populated (while the rest are not).  It is the
778  * callers duty to clean those orphaned rings.
779  *
780  * Return 0 on success, negative on failure
781  **/
782 static int wx_setup_all_rx_resources(struct wx *wx)
783 {
784 	int i, err = 0;
785 
786 	for (i = 0; i < wx->num_rx_queues; i++) {
787 		err = wx_setup_rx_resources(wx->rx_ring[i]);
788 		if (!err)
789 			continue;
790 
791 		wx_err(wx, "Allocation for Rx Queue %u failed\n", i);
792 		goto err_setup_rx;
793 	}
794 
795 		return 0;
796 err_setup_rx:
797 	/* rewind the index freeing the rings as we go */
798 	while (i--)
799 		wx_free_rx_resources(wx->rx_ring[i]);
800 	return err;
801 }
802 
803 /**
804  * wx_setup_tx_resources - allocate Tx resources (Descriptors)
805  * @tx_ring: tx descriptor ring (for a specific queue) to setup
806  *
807  * Return 0 on success, negative on failure
808  **/
809 static int wx_setup_tx_resources(struct wx_ring *tx_ring)
810 {
811 	struct device *dev = tx_ring->dev;
812 	int orig_node = dev_to_node(dev);
813 	int numa_node = NUMA_NO_NODE;
814 	int size;
815 
816 	size = sizeof(struct wx_tx_buffer) * tx_ring->count;
817 
818 	if (tx_ring->q_vector)
819 		numa_node = tx_ring->q_vector->numa_node;
820 
821 	tx_ring->tx_buffer_info = kvmalloc_node(size, GFP_KERNEL, numa_node);
822 	if (!tx_ring->tx_buffer_info)
823 		tx_ring->tx_buffer_info = kvmalloc(size, GFP_KERNEL);
824 	if (!tx_ring->tx_buffer_info)
825 		goto err;
826 
827 	/* round up to nearest 4K */
828 	tx_ring->size = tx_ring->count * sizeof(union wx_tx_desc);
829 	tx_ring->size = ALIGN(tx_ring->size, 4096);
830 
831 	set_dev_node(dev, numa_node);
832 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
833 					   &tx_ring->dma, GFP_KERNEL);
834 	if (!tx_ring->desc) {
835 		set_dev_node(dev, orig_node);
836 		tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
837 						   &tx_ring->dma, GFP_KERNEL);
838 	}
839 
840 	if (!tx_ring->desc)
841 		goto err;
842 
843 	return 0;
844 
845 err:
846 	kvfree(tx_ring->tx_buffer_info);
847 	tx_ring->tx_buffer_info = NULL;
848 	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
849 	return -ENOMEM;
850 }
851 
852 /**
853  * wx_setup_all_tx_resources - allocate all queues Tx resources
854  * @wx: pointer to private structure
855  *
856  * If this function returns with an error, then it's possible one or
857  * more of the rings is populated (while the rest are not).  It is the
858  * callers duty to clean those orphaned rings.
859  *
860  * Return 0 on success, negative on failure
861  **/
862 static int wx_setup_all_tx_resources(struct wx *wx)
863 {
864 	int i, err = 0;
865 
866 	for (i = 0; i < wx->num_tx_queues; i++) {
867 		err = wx_setup_tx_resources(wx->tx_ring[i]);
868 		if (!err)
869 			continue;
870 
871 		wx_err(wx, "Allocation for Tx Queue %u failed\n", i);
872 		goto err_setup_tx;
873 	}
874 
875 	return 0;
876 err_setup_tx:
877 	/* rewind the index freeing the rings as we go */
878 	while (i--)
879 		wx_free_tx_resources(wx->tx_ring[i]);
880 	return err;
881 }
882 
883 int wx_setup_resources(struct wx *wx)
884 {
885 	int err;
886 
887 	/* allocate transmit descriptors */
888 	err = wx_setup_all_tx_resources(wx);
889 	if (err)
890 		return err;
891 
892 	/* allocate receive descriptors */
893 	err = wx_setup_all_rx_resources(wx);
894 	if (err)
895 		goto err_free_tx;
896 
897 	err = wx_setup_isb_resources(wx);
898 	if (err)
899 		goto err_free_rx;
900 
901 	return 0;
902 
903 err_free_rx:
904 	wx_free_all_rx_resources(wx);
905 err_free_tx:
906 	wx_free_all_tx_resources(wx);
907 
908 	return err;
909 }
910 EXPORT_SYMBOL(wx_setup_resources);
911 
912 MODULE_LICENSE("GPL");
913