xref: /openbmc/linux/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c (revision 04295878beac396dae47ba93141cae0d9386e7ef)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4  *
5  * Copyright (C) 2014 Marvell
6  *
7  * Marcin Wojtas <mw@semihalf.com>
8  */
9 
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/phy/phy.h>
31 #include <linux/ptp_classify.h>
32 #include <linux/clk.h>
33 #include <linux/hrtimer.h>
34 #include <linux/ktime.h>
35 #include <linux/regmap.h>
36 #include <uapi/linux/ppp_defs.h>
37 #include <net/ip.h>
38 #include <net/ipv6.h>
39 #include <net/tso.h>
40 #include <linux/bpf_trace.h>
41 
42 #include "mvpp2.h"
43 #include "mvpp2_prs.h"
44 #include "mvpp2_cls.h"
45 
46 enum mvpp2_bm_pool_log_num {
47 	MVPP2_BM_SHORT,
48 	MVPP2_BM_LONG,
49 	MVPP2_BM_JUMBO,
50 	MVPP2_BM_POOLS_NUM
51 };
52 
53 static struct {
54 	int pkt_size;
55 	int buf_num;
56 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
57 
58 /* The prototype is added here to be used in start_dev when using ACPI. This
59  * will be removed once phylink is used for all modes (dt+ACPI).
60  */
61 static void mvpp2_acpi_start(struct mvpp2_port *port);
62 
63 /* Queue modes */
64 #define MVPP2_QDIST_SINGLE_MODE	0
65 #define MVPP2_QDIST_MULTI_MODE	1
66 
67 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
68 
69 module_param(queue_mode, int, 0444);
70 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
71 
72 /* Utility/helper methods */
73 
74 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
75 {
76 	writel(data, priv->swth_base[0] + offset);
77 }
78 
79 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
80 {
81 	return readl(priv->swth_base[0] + offset);
82 }
83 
84 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
85 {
86 	return readl_relaxed(priv->swth_base[0] + offset);
87 }
88 
89 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
90 {
91 	return cpu % priv->nthreads;
92 }
93 
94 static struct page_pool *
95 mvpp2_create_page_pool(struct device *dev, int num, int len,
96 		       enum dma_data_direction dma_dir)
97 {
98 	struct page_pool_params pp_params = {
99 		/* internal DMA mapping in page_pool */
100 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
101 		.pool_size = num,
102 		.nid = NUMA_NO_NODE,
103 		.dev = dev,
104 		.dma_dir = dma_dir,
105 		.offset = MVPP2_SKB_HEADROOM,
106 		.max_len = len,
107 	};
108 
109 	return page_pool_create(&pp_params);
110 }
111 
112 /* These accessors should be used to access:
113  *
114  * - per-thread registers, where each thread has its own copy of the
115  *   register.
116  *
117  *   MVPP2_BM_VIRT_ALLOC_REG
118  *   MVPP2_BM_ADDR_HIGH_ALLOC
119  *   MVPP22_BM_ADDR_HIGH_RLS_REG
120  *   MVPP2_BM_VIRT_RLS_REG
121  *   MVPP2_ISR_RX_TX_CAUSE_REG
122  *   MVPP2_ISR_RX_TX_MASK_REG
123  *   MVPP2_TXQ_NUM_REG
124  *   MVPP2_AGGR_TXQ_UPDATE_REG
125  *   MVPP2_TXQ_RSVD_REQ_REG
126  *   MVPP2_TXQ_RSVD_RSLT_REG
127  *   MVPP2_TXQ_SENT_REG
128  *   MVPP2_RXQ_NUM_REG
129  *
130  * - global registers that must be accessed through a specific thread
131  *   window, because they are related to an access to a per-thread
132  *   register
133  *
134  *   MVPP2_BM_PHY_ALLOC_REG    (related to MVPP2_BM_VIRT_ALLOC_REG)
135  *   MVPP2_BM_PHY_RLS_REG      (related to MVPP2_BM_VIRT_RLS_REG)
136  *   MVPP2_RXQ_THRESH_REG      (related to MVPP2_RXQ_NUM_REG)
137  *   MVPP2_RXQ_DESC_ADDR_REG   (related to MVPP2_RXQ_NUM_REG)
138  *   MVPP2_RXQ_DESC_SIZE_REG   (related to MVPP2_RXQ_NUM_REG)
139  *   MVPP2_RXQ_INDEX_REG       (related to MVPP2_RXQ_NUM_REG)
140  *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
141  *   MVPP2_TXQ_DESC_ADDR_REG   (related to MVPP2_TXQ_NUM_REG)
142  *   MVPP2_TXQ_DESC_SIZE_REG   (related to MVPP2_TXQ_NUM_REG)
143  *   MVPP2_TXQ_INDEX_REG       (related to MVPP2_TXQ_NUM_REG)
144  *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
145  *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
146  *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
147  */
148 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
149 			       u32 offset, u32 data)
150 {
151 	writel(data, priv->swth_base[thread] + offset);
152 }
153 
154 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
155 			     u32 offset)
156 {
157 	return readl(priv->swth_base[thread] + offset);
158 }
159 
160 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
161 				       u32 offset, u32 data)
162 {
163 	writel_relaxed(data, priv->swth_base[thread] + offset);
164 }
165 
166 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
167 				     u32 offset)
168 {
169 	return readl_relaxed(priv->swth_base[thread] + offset);
170 }
171 
172 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
173 					    struct mvpp2_tx_desc *tx_desc)
174 {
175 	if (port->priv->hw_version == MVPP21)
176 		return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
177 	else
178 		return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
179 		       MVPP2_DESC_DMA_MASK;
180 }
181 
182 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
183 				      struct mvpp2_tx_desc *tx_desc,
184 				      dma_addr_t dma_addr)
185 {
186 	dma_addr_t addr, offset;
187 
188 	addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
189 	offset = dma_addr & MVPP2_TX_DESC_ALIGN;
190 
191 	if (port->priv->hw_version == MVPP21) {
192 		tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
193 		tx_desc->pp21.packet_offset = offset;
194 	} else {
195 		__le64 val = cpu_to_le64(addr);
196 
197 		tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
198 		tx_desc->pp22.buf_dma_addr_ptp |= val;
199 		tx_desc->pp22.packet_offset = offset;
200 	}
201 }
202 
203 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
204 				    struct mvpp2_tx_desc *tx_desc)
205 {
206 	if (port->priv->hw_version == MVPP21)
207 		return le16_to_cpu(tx_desc->pp21.data_size);
208 	else
209 		return le16_to_cpu(tx_desc->pp22.data_size);
210 }
211 
212 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
213 				  struct mvpp2_tx_desc *tx_desc,
214 				  size_t size)
215 {
216 	if (port->priv->hw_version == MVPP21)
217 		tx_desc->pp21.data_size = cpu_to_le16(size);
218 	else
219 		tx_desc->pp22.data_size = cpu_to_le16(size);
220 }
221 
222 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
223 				 struct mvpp2_tx_desc *tx_desc,
224 				 unsigned int txq)
225 {
226 	if (port->priv->hw_version == MVPP21)
227 		tx_desc->pp21.phys_txq = txq;
228 	else
229 		tx_desc->pp22.phys_txq = txq;
230 }
231 
232 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
233 				 struct mvpp2_tx_desc *tx_desc,
234 				 unsigned int command)
235 {
236 	if (port->priv->hw_version == MVPP21)
237 		tx_desc->pp21.command = cpu_to_le32(command);
238 	else
239 		tx_desc->pp22.command = cpu_to_le32(command);
240 }
241 
242 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
243 					    struct mvpp2_tx_desc *tx_desc)
244 {
245 	if (port->priv->hw_version == MVPP21)
246 		return tx_desc->pp21.packet_offset;
247 	else
248 		return tx_desc->pp22.packet_offset;
249 }
250 
251 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
252 					    struct mvpp2_rx_desc *rx_desc)
253 {
254 	if (port->priv->hw_version == MVPP21)
255 		return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
256 	else
257 		return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
258 		       MVPP2_DESC_DMA_MASK;
259 }
260 
261 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
262 					     struct mvpp2_rx_desc *rx_desc)
263 {
264 	if (port->priv->hw_version == MVPP21)
265 		return le32_to_cpu(rx_desc->pp21.buf_cookie);
266 	else
267 		return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
268 		       MVPP2_DESC_DMA_MASK;
269 }
270 
271 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
272 				    struct mvpp2_rx_desc *rx_desc)
273 {
274 	if (port->priv->hw_version == MVPP21)
275 		return le16_to_cpu(rx_desc->pp21.data_size);
276 	else
277 		return le16_to_cpu(rx_desc->pp22.data_size);
278 }
279 
280 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
281 				   struct mvpp2_rx_desc *rx_desc)
282 {
283 	if (port->priv->hw_version == MVPP21)
284 		return le32_to_cpu(rx_desc->pp21.status);
285 	else
286 		return le32_to_cpu(rx_desc->pp22.status);
287 }
288 
289 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
290 {
291 	txq_pcpu->txq_get_index++;
292 	if (txq_pcpu->txq_get_index == txq_pcpu->size)
293 		txq_pcpu->txq_get_index = 0;
294 }
295 
296 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
297 			      struct mvpp2_txq_pcpu *txq_pcpu,
298 			      void *data,
299 			      struct mvpp2_tx_desc *tx_desc,
300 			      enum mvpp2_tx_buf_type buf_type)
301 {
302 	struct mvpp2_txq_pcpu_buf *tx_buf =
303 		txq_pcpu->buffs + txq_pcpu->txq_put_index;
304 	tx_buf->type = buf_type;
305 	if (buf_type == MVPP2_TYPE_SKB)
306 		tx_buf->skb = data;
307 	else
308 		tx_buf->xdpf = data;
309 	tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
310 	tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
311 		mvpp2_txdesc_offset_get(port, tx_desc);
312 	txq_pcpu->txq_put_index++;
313 	if (txq_pcpu->txq_put_index == txq_pcpu->size)
314 		txq_pcpu->txq_put_index = 0;
315 }
316 
317 /* Get number of maximum RXQ */
318 static int mvpp2_get_nrxqs(struct mvpp2 *priv)
319 {
320 	unsigned int nrxqs;
321 
322 	if (priv->hw_version == MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
323 		return 1;
324 
325 	/* According to the PPv2.2 datasheet and our experiments on
326 	 * PPv2.1, RX queues have an allocation granularity of 4 (when
327 	 * more than a single one on PPv2.2).
328 	 * Round up to nearest multiple of 4.
329 	 */
330 	nrxqs = (num_possible_cpus() + 3) & ~0x3;
331 	if (nrxqs > MVPP2_PORT_MAX_RXQ)
332 		nrxqs = MVPP2_PORT_MAX_RXQ;
333 
334 	return nrxqs;
335 }
336 
337 /* Get number of physical egress port */
338 static inline int mvpp2_egress_port(struct mvpp2_port *port)
339 {
340 	return MVPP2_MAX_TCONT + port->id;
341 }
342 
343 /* Get number of physical TXQ */
344 static inline int mvpp2_txq_phys(int port, int txq)
345 {
346 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
347 }
348 
349 /* Returns a struct page if page_pool is set, otherwise a buffer */
350 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
351 			      struct page_pool *page_pool)
352 {
353 	if (page_pool)
354 		return page_pool_dev_alloc_pages(page_pool);
355 
356 	if (likely(pool->frag_size <= PAGE_SIZE))
357 		return netdev_alloc_frag(pool->frag_size);
358 
359 	return kmalloc(pool->frag_size, GFP_ATOMIC);
360 }
361 
362 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
363 			    struct page_pool *page_pool, void *data)
364 {
365 	if (page_pool)
366 		page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
367 	else if (likely(pool->frag_size <= PAGE_SIZE))
368 		skb_free_frag(data);
369 	else
370 		kfree(data);
371 }
372 
373 /* Buffer Manager configuration routines */
374 
375 /* Create pool */
376 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
377 				struct mvpp2_bm_pool *bm_pool, int size)
378 {
379 	u32 val;
380 
381 	/* Number of buffer pointers must be a multiple of 16, as per
382 	 * hardware constraints
383 	 */
384 	if (!IS_ALIGNED(size, 16))
385 		return -EINVAL;
386 
387 	/* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 needs 16
388 	 * bytes per buffer pointer
389 	 */
390 	if (priv->hw_version == MVPP21)
391 		bm_pool->size_bytes = 2 * sizeof(u32) * size;
392 	else
393 		bm_pool->size_bytes = 2 * sizeof(u64) * size;
394 
395 	bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
396 						&bm_pool->dma_addr,
397 						GFP_KERNEL);
398 	if (!bm_pool->virt_addr)
399 		return -ENOMEM;
400 
401 	if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
402 			MVPP2_BM_POOL_PTR_ALIGN)) {
403 		dma_free_coherent(dev, bm_pool->size_bytes,
404 				  bm_pool->virt_addr, bm_pool->dma_addr);
405 		dev_err(dev, "BM pool %d is not %d bytes aligned\n",
406 			bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
407 		return -ENOMEM;
408 	}
409 
410 	mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
411 		    lower_32_bits(bm_pool->dma_addr));
412 	mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
413 
414 	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
415 	val |= MVPP2_BM_START_MASK;
416 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
417 
418 	bm_pool->size = size;
419 	bm_pool->pkt_size = 0;
420 	bm_pool->buf_num = 0;
421 
422 	return 0;
423 }
424 
425 /* Set pool buffer size */
426 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
427 				      struct mvpp2_bm_pool *bm_pool,
428 				      int buf_size)
429 {
430 	u32 val;
431 
432 	bm_pool->buf_size = buf_size;
433 
434 	val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
435 	mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
436 }
437 
438 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
439 				    struct mvpp2_bm_pool *bm_pool,
440 				    dma_addr_t *dma_addr,
441 				    phys_addr_t *phys_addr)
442 {
443 	unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
444 
445 	*dma_addr = mvpp2_thread_read(priv, thread,
446 				      MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
447 	*phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
448 
449 	if (priv->hw_version == MVPP22) {
450 		u32 val;
451 		u32 dma_addr_highbits, phys_addr_highbits;
452 
453 		val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
454 		dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
455 		phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
456 			MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
457 
458 		if (sizeof(dma_addr_t) == 8)
459 			*dma_addr |= (u64)dma_addr_highbits << 32;
460 
461 		if (sizeof(phys_addr_t) == 8)
462 			*phys_addr |= (u64)phys_addr_highbits << 32;
463 	}
464 
465 	put_cpu();
466 }
467 
468 /* Free all buffers from the pool */
469 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
470 			       struct mvpp2_bm_pool *bm_pool, int buf_num)
471 {
472 	struct page_pool *pp = NULL;
473 	int i;
474 
475 	if (buf_num > bm_pool->buf_num) {
476 		WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
477 		     bm_pool->id, buf_num);
478 		buf_num = bm_pool->buf_num;
479 	}
480 
481 	if (priv->percpu_pools)
482 		pp = priv->page_pool[bm_pool->id];
483 
484 	for (i = 0; i < buf_num; i++) {
485 		dma_addr_t buf_dma_addr;
486 		phys_addr_t buf_phys_addr;
487 		void *data;
488 
489 		mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
490 					&buf_dma_addr, &buf_phys_addr);
491 
492 		if (!pp)
493 			dma_unmap_single(dev, buf_dma_addr,
494 					 bm_pool->buf_size, DMA_FROM_DEVICE);
495 
496 		data = (void *)phys_to_virt(buf_phys_addr);
497 		if (!data)
498 			break;
499 
500 		mvpp2_frag_free(bm_pool, pp, data);
501 	}
502 
503 	/* Update BM driver with number of buffers removed from pool */
504 	bm_pool->buf_num -= i;
505 }
506 
507 /* Check number of buffers in BM pool */
508 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
509 {
510 	int buf_num = 0;
511 
512 	buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
513 				    MVPP22_BM_POOL_PTRS_NUM_MASK;
514 	buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
515 				    MVPP2_BM_BPPI_PTR_NUM_MASK;
516 
517 	/* HW has one buffer ready which is not reflected in the counters */
518 	if (buf_num)
519 		buf_num += 1;
520 
521 	return buf_num;
522 }
523 
524 /* Cleanup pool */
525 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
526 				 struct mvpp2_bm_pool *bm_pool)
527 {
528 	int buf_num;
529 	u32 val;
530 
531 	buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
532 	mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
533 
534 	/* Check buffer counters after free */
535 	buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
536 	if (buf_num) {
537 		WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
538 		     bm_pool->id, bm_pool->buf_num);
539 		return 0;
540 	}
541 
542 	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
543 	val |= MVPP2_BM_STOP_MASK;
544 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
545 
546 	if (priv->percpu_pools) {
547 		page_pool_destroy(priv->page_pool[bm_pool->id]);
548 		priv->page_pool[bm_pool->id] = NULL;
549 	}
550 
551 	dma_free_coherent(dev, bm_pool->size_bytes,
552 			  bm_pool->virt_addr,
553 			  bm_pool->dma_addr);
554 	return 0;
555 }
556 
557 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
558 {
559 	int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
560 	struct mvpp2_bm_pool *bm_pool;
561 
562 	if (priv->percpu_pools)
563 		poolnum = mvpp2_get_nrxqs(priv) * 2;
564 
565 	/* Create all pools with maximum size */
566 	size = MVPP2_BM_POOL_SIZE_MAX;
567 	for (i = 0; i < poolnum; i++) {
568 		bm_pool = &priv->bm_pools[i];
569 		bm_pool->id = i;
570 		err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
571 		if (err)
572 			goto err_unroll_pools;
573 		mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
574 	}
575 	return 0;
576 
577 err_unroll_pools:
578 	dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
579 	for (i = i - 1; i >= 0; i--)
580 		mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
581 	return err;
582 }
583 
584 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
585 {
586 	enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
587 	int i, err, poolnum = MVPP2_BM_POOLS_NUM;
588 	struct mvpp2_port *port;
589 
590 	if (priv->percpu_pools) {
591 		for (i = 0; i < priv->port_count; i++) {
592 			port = priv->port_list[i];
593 			if (port->xdp_prog) {
594 				dma_dir = DMA_BIDIRECTIONAL;
595 				break;
596 			}
597 		}
598 
599 		poolnum = mvpp2_get_nrxqs(priv) * 2;
600 		for (i = 0; i < poolnum; i++) {
601 			/* the pool in use */
602 			int pn = i / (poolnum / 2);
603 
604 			priv->page_pool[i] =
605 				mvpp2_create_page_pool(dev,
606 						       mvpp2_pools[pn].buf_num,
607 						       mvpp2_pools[pn].pkt_size,
608 						       dma_dir);
609 			if (IS_ERR(priv->page_pool[i])) {
610 				int j;
611 
612 				for (j = 0; j < i; j++) {
613 					page_pool_destroy(priv->page_pool[j]);
614 					priv->page_pool[j] = NULL;
615 				}
616 				return PTR_ERR(priv->page_pool[i]);
617 			}
618 		}
619 	}
620 
621 	dev_info(dev, "using %d %s buffers\n", poolnum,
622 		 priv->percpu_pools ? "per-cpu" : "shared");
623 
624 	for (i = 0; i < poolnum; i++) {
625 		/* Mask BM all interrupts */
626 		mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
627 		/* Clear BM cause register */
628 		mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
629 	}
630 
631 	/* Allocate and initialize BM pools */
632 	priv->bm_pools = devm_kcalloc(dev, poolnum,
633 				      sizeof(*priv->bm_pools), GFP_KERNEL);
634 	if (!priv->bm_pools)
635 		return -ENOMEM;
636 
637 	err = mvpp2_bm_pools_init(dev, priv);
638 	if (err < 0)
639 		return err;
640 	return 0;
641 }
642 
643 static void mvpp2_setup_bm_pool(void)
644 {
645 	/* Short pool */
646 	mvpp2_pools[MVPP2_BM_SHORT].buf_num  = MVPP2_BM_SHORT_BUF_NUM;
647 	mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
648 
649 	/* Long pool */
650 	mvpp2_pools[MVPP2_BM_LONG].buf_num  = MVPP2_BM_LONG_BUF_NUM;
651 	mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
652 
653 	/* Jumbo pool */
654 	mvpp2_pools[MVPP2_BM_JUMBO].buf_num  = MVPP2_BM_JUMBO_BUF_NUM;
655 	mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
656 }
657 
658 /* Attach long pool to rxq */
659 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
660 				    int lrxq, int long_pool)
661 {
662 	u32 val, mask;
663 	int prxq;
664 
665 	/* Get queue physical ID */
666 	prxq = port->rxqs[lrxq]->id;
667 
668 	if (port->priv->hw_version == MVPP21)
669 		mask = MVPP21_RXQ_POOL_LONG_MASK;
670 	else
671 		mask = MVPP22_RXQ_POOL_LONG_MASK;
672 
673 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
674 	val &= ~mask;
675 	val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
676 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
677 }
678 
679 /* Attach short pool to rxq */
680 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
681 				     int lrxq, int short_pool)
682 {
683 	u32 val, mask;
684 	int prxq;
685 
686 	/* Get queue physical ID */
687 	prxq = port->rxqs[lrxq]->id;
688 
689 	if (port->priv->hw_version == MVPP21)
690 		mask = MVPP21_RXQ_POOL_SHORT_MASK;
691 	else
692 		mask = MVPP22_RXQ_POOL_SHORT_MASK;
693 
694 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
695 	val &= ~mask;
696 	val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
697 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
698 }
699 
700 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
701 			     struct mvpp2_bm_pool *bm_pool,
702 			     struct page_pool *page_pool,
703 			     dma_addr_t *buf_dma_addr,
704 			     phys_addr_t *buf_phys_addr,
705 			     gfp_t gfp_mask)
706 {
707 	dma_addr_t dma_addr;
708 	struct page *page;
709 	void *data;
710 
711 	data = mvpp2_frag_alloc(bm_pool, page_pool);
712 	if (!data)
713 		return NULL;
714 
715 	if (page_pool) {
716 		page = (struct page *)data;
717 		dma_addr = page_pool_get_dma_addr(page);
718 		data = page_to_virt(page);
719 	} else {
720 		dma_addr = dma_map_single(port->dev->dev.parent, data,
721 					  MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
722 					  DMA_FROM_DEVICE);
723 		if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
724 			mvpp2_frag_free(bm_pool, NULL, data);
725 			return NULL;
726 		}
727 	}
728 	*buf_dma_addr = dma_addr;
729 	*buf_phys_addr = virt_to_phys(data);
730 
731 	return data;
732 }
733 
734 /* Release buffer to BM */
735 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
736 				     dma_addr_t buf_dma_addr,
737 				     phys_addr_t buf_phys_addr)
738 {
739 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
740 	unsigned long flags = 0;
741 
742 	if (test_bit(thread, &port->priv->lock_map))
743 		spin_lock_irqsave(&port->bm_lock[thread], flags);
744 
745 	if (port->priv->hw_version == MVPP22) {
746 		u32 val = 0;
747 
748 		if (sizeof(dma_addr_t) == 8)
749 			val |= upper_32_bits(buf_dma_addr) &
750 				MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
751 
752 		if (sizeof(phys_addr_t) == 8)
753 			val |= (upper_32_bits(buf_phys_addr)
754 				<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
755 				MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
756 
757 		mvpp2_thread_write_relaxed(port->priv, thread,
758 					   MVPP22_BM_ADDR_HIGH_RLS_REG, val);
759 	}
760 
761 	/* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
762 	 * returned in the "cookie" field of the RX
763 	 * descriptor. Instead of storing the virtual address, we
764 	 * store the physical address
765 	 */
766 	mvpp2_thread_write_relaxed(port->priv, thread,
767 				   MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
768 	mvpp2_thread_write_relaxed(port->priv, thread,
769 				   MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
770 
771 	if (test_bit(thread, &port->priv->lock_map))
772 		spin_unlock_irqrestore(&port->bm_lock[thread], flags);
773 
774 	put_cpu();
775 }
776 
777 /* Allocate buffers for the pool */
778 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
779 			     struct mvpp2_bm_pool *bm_pool, int buf_num)
780 {
781 	int i, buf_size, total_size;
782 	dma_addr_t dma_addr;
783 	phys_addr_t phys_addr;
784 	struct page_pool *pp = NULL;
785 	void *buf;
786 
787 	if (port->priv->percpu_pools &&
788 	    bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
789 		netdev_err(port->dev,
790 			   "attempted to use jumbo frames with per-cpu pools");
791 		return 0;
792 	}
793 
794 	buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
795 	total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
796 
797 	if (buf_num < 0 ||
798 	    (buf_num + bm_pool->buf_num > bm_pool->size)) {
799 		netdev_err(port->dev,
800 			   "cannot allocate %d buffers for pool %d\n",
801 			   buf_num, bm_pool->id);
802 		return 0;
803 	}
804 
805 	if (port->priv->percpu_pools)
806 		pp = port->priv->page_pool[bm_pool->id];
807 	for (i = 0; i < buf_num; i++) {
808 		buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
809 				      &phys_addr, GFP_KERNEL);
810 		if (!buf)
811 			break;
812 
813 		mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
814 				  phys_addr);
815 	}
816 
817 	/* Update BM driver with number of buffers added to pool */
818 	bm_pool->buf_num += i;
819 
820 	netdev_dbg(port->dev,
821 		   "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
822 		   bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
823 
824 	netdev_dbg(port->dev,
825 		   "pool %d: %d of %d buffers added\n",
826 		   bm_pool->id, i, buf_num);
827 	return i;
828 }
829 
830 /* Notify the driver that BM pool is being used as specific type and return the
831  * pool pointer on success
832  */
833 static struct mvpp2_bm_pool *
834 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
835 {
836 	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
837 	int num;
838 
839 	if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
840 	    (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
841 		netdev_err(port->dev, "Invalid pool %d\n", pool);
842 		return NULL;
843 	}
844 
845 	/* Allocate buffers in case BM pool is used as long pool, but packet
846 	 * size doesn't match MTU or BM pool hasn't being used yet
847 	 */
848 	if (new_pool->pkt_size == 0) {
849 		int pkts_num;
850 
851 		/* Set default buffer number or free all the buffers in case
852 		 * the pool is not empty
853 		 */
854 		pkts_num = new_pool->buf_num;
855 		if (pkts_num == 0) {
856 			if (port->priv->percpu_pools) {
857 				if (pool < port->nrxqs)
858 					pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
859 				else
860 					pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
861 			} else {
862 				pkts_num = mvpp2_pools[pool].buf_num;
863 			}
864 		} else {
865 			mvpp2_bm_bufs_free(port->dev->dev.parent,
866 					   port->priv, new_pool, pkts_num);
867 		}
868 
869 		new_pool->pkt_size = pkt_size;
870 		new_pool->frag_size =
871 			SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
872 			MVPP2_SKB_SHINFO_SIZE;
873 
874 		/* Allocate buffers for this pool */
875 		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
876 		if (num != pkts_num) {
877 			WARN(1, "pool %d: %d of %d allocated\n",
878 			     new_pool->id, num, pkts_num);
879 			return NULL;
880 		}
881 	}
882 
883 	mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
884 				  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
885 
886 	return new_pool;
887 }
888 
889 static struct mvpp2_bm_pool *
890 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
891 			 unsigned int pool, int pkt_size)
892 {
893 	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
894 	int num;
895 
896 	if (pool > port->nrxqs * 2) {
897 		netdev_err(port->dev, "Invalid pool %d\n", pool);
898 		return NULL;
899 	}
900 
901 	/* Allocate buffers in case BM pool is used as long pool, but packet
902 	 * size doesn't match MTU or BM pool hasn't being used yet
903 	 */
904 	if (new_pool->pkt_size == 0) {
905 		int pkts_num;
906 
907 		/* Set default buffer number or free all the buffers in case
908 		 * the pool is not empty
909 		 */
910 		pkts_num = new_pool->buf_num;
911 		if (pkts_num == 0)
912 			pkts_num = mvpp2_pools[type].buf_num;
913 		else
914 			mvpp2_bm_bufs_free(port->dev->dev.parent,
915 					   port->priv, new_pool, pkts_num);
916 
917 		new_pool->pkt_size = pkt_size;
918 		new_pool->frag_size =
919 			SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
920 			MVPP2_SKB_SHINFO_SIZE;
921 
922 		/* Allocate buffers for this pool */
923 		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
924 		if (num != pkts_num) {
925 			WARN(1, "pool %d: %d of %d allocated\n",
926 			     new_pool->id, num, pkts_num);
927 			return NULL;
928 		}
929 	}
930 
931 	mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
932 				  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
933 
934 	return new_pool;
935 }
936 
937 /* Initialize pools for swf, shared buffers variant */
938 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
939 {
940 	enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
941 	int rxq;
942 
943 	/* If port pkt_size is higher than 1518B:
944 	 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
945 	 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
946 	 */
947 	if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
948 		long_log_pool = MVPP2_BM_JUMBO;
949 		short_log_pool = MVPP2_BM_LONG;
950 	} else {
951 		long_log_pool = MVPP2_BM_LONG;
952 		short_log_pool = MVPP2_BM_SHORT;
953 	}
954 
955 	if (!port->pool_long) {
956 		port->pool_long =
957 			mvpp2_bm_pool_use(port, long_log_pool,
958 					  mvpp2_pools[long_log_pool].pkt_size);
959 		if (!port->pool_long)
960 			return -ENOMEM;
961 
962 		port->pool_long->port_map |= BIT(port->id);
963 
964 		for (rxq = 0; rxq < port->nrxqs; rxq++)
965 			mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
966 	}
967 
968 	if (!port->pool_short) {
969 		port->pool_short =
970 			mvpp2_bm_pool_use(port, short_log_pool,
971 					  mvpp2_pools[short_log_pool].pkt_size);
972 		if (!port->pool_short)
973 			return -ENOMEM;
974 
975 		port->pool_short->port_map |= BIT(port->id);
976 
977 		for (rxq = 0; rxq < port->nrxqs; rxq++)
978 			mvpp2_rxq_short_pool_set(port, rxq,
979 						 port->pool_short->id);
980 	}
981 
982 	return 0;
983 }
984 
985 /* Initialize pools for swf, percpu buffers variant */
986 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
987 {
988 	struct mvpp2_bm_pool *bm_pool;
989 	int i;
990 
991 	for (i = 0; i < port->nrxqs; i++) {
992 		bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
993 						   mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
994 		if (!bm_pool)
995 			return -ENOMEM;
996 
997 		bm_pool->port_map |= BIT(port->id);
998 		mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
999 	}
1000 
1001 	for (i = 0; i < port->nrxqs; i++) {
1002 		bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
1003 						   mvpp2_pools[MVPP2_BM_LONG].pkt_size);
1004 		if (!bm_pool)
1005 			return -ENOMEM;
1006 
1007 		bm_pool->port_map |= BIT(port->id);
1008 		mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
1009 	}
1010 
1011 	port->pool_long = NULL;
1012 	port->pool_short = NULL;
1013 
1014 	return 0;
1015 }
1016 
1017 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
1018 {
1019 	if (port->priv->percpu_pools)
1020 		return mvpp2_swf_bm_pool_init_percpu(port);
1021 	else
1022 		return mvpp2_swf_bm_pool_init_shared(port);
1023 }
1024 
1025 static void mvpp2_set_hw_csum(struct mvpp2_port *port,
1026 			      enum mvpp2_bm_pool_log_num new_long_pool)
1027 {
1028 	const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1029 
1030 	/* Update L4 checksum when jumbo enable/disable on port.
1031 	 * Only port 0 supports hardware checksum offload due to
1032 	 * the Tx FIFO size limitation.
1033 	 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
1034 	 * has 7 bits, so the maximum L3 offset is 128.
1035 	 */
1036 	if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1037 		port->dev->features &= ~csums;
1038 		port->dev->hw_features &= ~csums;
1039 	} else {
1040 		port->dev->features |= csums;
1041 		port->dev->hw_features |= csums;
1042 	}
1043 }
1044 
1045 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
1046 {
1047 	struct mvpp2_port *port = netdev_priv(dev);
1048 	enum mvpp2_bm_pool_log_num new_long_pool;
1049 	int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
1050 
1051 	if (port->priv->percpu_pools)
1052 		goto out_set;
1053 
1054 	/* If port MTU is higher than 1518B:
1055 	 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1056 	 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1057 	 */
1058 	if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1059 		new_long_pool = MVPP2_BM_JUMBO;
1060 	else
1061 		new_long_pool = MVPP2_BM_LONG;
1062 
1063 	if (new_long_pool != port->pool_long->id) {
1064 		/* Remove port from old short & long pool */
1065 		port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
1066 						    port->pool_long->pkt_size);
1067 		port->pool_long->port_map &= ~BIT(port->id);
1068 		port->pool_long = NULL;
1069 
1070 		port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
1071 						     port->pool_short->pkt_size);
1072 		port->pool_short->port_map &= ~BIT(port->id);
1073 		port->pool_short = NULL;
1074 
1075 		port->pkt_size =  pkt_size;
1076 
1077 		/* Add port to new short & long pool */
1078 		mvpp2_swf_bm_pool_init(port);
1079 
1080 		mvpp2_set_hw_csum(port, new_long_pool);
1081 	}
1082 
1083 out_set:
1084 	dev->mtu = mtu;
1085 	dev->wanted_features = dev->features;
1086 
1087 	netdev_update_features(dev);
1088 	return 0;
1089 }
1090 
1091 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1092 {
1093 	int i, sw_thread_mask = 0;
1094 
1095 	for (i = 0; i < port->nqvecs; i++)
1096 		sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1097 
1098 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1099 		    MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1100 }
1101 
1102 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1103 {
1104 	int i, sw_thread_mask = 0;
1105 
1106 	for (i = 0; i < port->nqvecs; i++)
1107 		sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1108 
1109 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1110 		    MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1111 }
1112 
1113 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1114 {
1115 	struct mvpp2_port *port = qvec->port;
1116 
1117 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1118 		    MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1119 }
1120 
1121 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1122 {
1123 	struct mvpp2_port *port = qvec->port;
1124 
1125 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1126 		    MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1127 }
1128 
1129 /* Mask the current thread's Rx/Tx interrupts
1130  * Called by on_each_cpu(), guaranteed to run with migration disabled,
1131  * using smp_processor_id() is OK.
1132  */
1133 static void mvpp2_interrupts_mask(void *arg)
1134 {
1135 	struct mvpp2_port *port = arg;
1136 
1137 	/* If the thread isn't used, don't do anything */
1138 	if (smp_processor_id() > port->priv->nthreads)
1139 		return;
1140 
1141 	mvpp2_thread_write(port->priv,
1142 			   mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1143 			   MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1144 }
1145 
1146 /* Unmask the current thread's Rx/Tx interrupts.
1147  * Called by on_each_cpu(), guaranteed to run with migration disabled,
1148  * using smp_processor_id() is OK.
1149  */
1150 static void mvpp2_interrupts_unmask(void *arg)
1151 {
1152 	struct mvpp2_port *port = arg;
1153 	u32 val;
1154 
1155 	/* If the thread isn't used, don't do anything */
1156 	if (smp_processor_id() > port->priv->nthreads)
1157 		return;
1158 
1159 	val = MVPP2_CAUSE_MISC_SUM_MASK |
1160 		MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1161 	if (port->has_tx_irqs)
1162 		val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1163 
1164 	mvpp2_thread_write(port->priv,
1165 			   mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
1166 			   MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1167 }
1168 
1169 static void
1170 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1171 {
1172 	u32 val;
1173 	int i;
1174 
1175 	if (port->priv->hw_version != MVPP22)
1176 		return;
1177 
1178 	if (mask)
1179 		val = 0;
1180 	else
1181 		val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1182 
1183 	for (i = 0; i < port->nqvecs; i++) {
1184 		struct mvpp2_queue_vector *v = port->qvecs + i;
1185 
1186 		if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1187 			continue;
1188 
1189 		mvpp2_thread_write(port->priv, v->sw_thread_id,
1190 				   MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1191 	}
1192 }
1193 
1194 /* Only GOP port 0 has an XLG MAC */
1195 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
1196 {
1197 	return port->gop_id == 0;
1198 }
1199 
1200 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
1201 {
1202 	return !(port->priv->hw_version == MVPP22 && port->gop_id == 0);
1203 }
1204 
1205 /* Port configuration routines */
1206 static bool mvpp2_is_xlg(phy_interface_t interface)
1207 {
1208 	return interface == PHY_INTERFACE_MODE_10GBASER ||
1209 	       interface == PHY_INTERFACE_MODE_XAUI;
1210 }
1211 
1212 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
1213 {
1214 	u32 old, val;
1215 
1216 	old = val = readl(ptr);
1217 	val &= ~mask;
1218 	val |= set;
1219 	if (old != val)
1220 		writel(val, ptr);
1221 }
1222 
1223 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1224 {
1225 	struct mvpp2 *priv = port->priv;
1226 	u32 val;
1227 
1228 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1229 	val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1230 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1231 
1232 	regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1233 	if (port->gop_id == 2)
1234 		val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
1235 	else if (port->gop_id == 3)
1236 		val |= GENCONF_CTRL0_PORT1_RGMII_MII;
1237 	regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1238 }
1239 
1240 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1241 {
1242 	struct mvpp2 *priv = port->priv;
1243 	u32 val;
1244 
1245 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1246 	val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1247 	       GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1248 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1249 
1250 	if (port->gop_id > 1) {
1251 		regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1252 		if (port->gop_id == 2)
1253 			val &= ~GENCONF_CTRL0_PORT0_RGMII;
1254 		else if (port->gop_id == 3)
1255 			val &= ~GENCONF_CTRL0_PORT1_RGMII_MII;
1256 		regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1257 	}
1258 }
1259 
1260 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1261 {
1262 	struct mvpp2 *priv = port->priv;
1263 	void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1264 	void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1265 	u32 val;
1266 
1267 	val = readl(xpcs + MVPP22_XPCS_CFG0);
1268 	val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1269 		 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1270 	val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1271 	writel(val, xpcs + MVPP22_XPCS_CFG0);
1272 
1273 	val = readl(mpcs + MVPP22_MPCS_CTRL);
1274 	val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1275 	writel(val, mpcs + MVPP22_MPCS_CTRL);
1276 
1277 	val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1278 	val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1279 	val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1280 	writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1281 }
1282 
1283 static int mvpp22_gop_init(struct mvpp2_port *port)
1284 {
1285 	struct mvpp2 *priv = port->priv;
1286 	u32 val;
1287 
1288 	if (!priv->sysctrl_base)
1289 		return 0;
1290 
1291 	switch (port->phy_interface) {
1292 	case PHY_INTERFACE_MODE_RGMII:
1293 	case PHY_INTERFACE_MODE_RGMII_ID:
1294 	case PHY_INTERFACE_MODE_RGMII_RXID:
1295 	case PHY_INTERFACE_MODE_RGMII_TXID:
1296 		if (!mvpp2_port_supports_rgmii(port))
1297 			goto invalid_conf;
1298 		mvpp22_gop_init_rgmii(port);
1299 		break;
1300 	case PHY_INTERFACE_MODE_SGMII:
1301 	case PHY_INTERFACE_MODE_1000BASEX:
1302 	case PHY_INTERFACE_MODE_2500BASEX:
1303 		mvpp22_gop_init_sgmii(port);
1304 		break;
1305 	case PHY_INTERFACE_MODE_10GBASER:
1306 		if (!mvpp2_port_supports_xlg(port))
1307 			goto invalid_conf;
1308 		mvpp22_gop_init_10gkr(port);
1309 		break;
1310 	default:
1311 		goto unsupported_conf;
1312 	}
1313 
1314 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1315 	val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1316 	       GENCONF_PORT_CTRL1_EN(port->gop_id);
1317 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1318 
1319 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1320 	val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1321 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1322 
1323 	regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1324 	val |= GENCONF_SOFT_RESET1_GOP;
1325 	regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1326 
1327 unsupported_conf:
1328 	return 0;
1329 
1330 invalid_conf:
1331 	netdev_err(port->dev, "Invalid port configuration\n");
1332 	return -EINVAL;
1333 }
1334 
1335 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1336 {
1337 	u32 val;
1338 
1339 	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1340 	    phy_interface_mode_is_8023z(port->phy_interface) ||
1341 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1342 		/* Enable the GMAC link status irq for this port */
1343 		val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1344 		val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1345 		writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1346 	}
1347 
1348 	if (mvpp2_port_supports_xlg(port)) {
1349 		/* Enable the XLG/GIG irqs for this port */
1350 		val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1351 		if (mvpp2_is_xlg(port->phy_interface))
1352 			val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1353 		else
1354 			val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1355 		writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1356 	}
1357 }
1358 
1359 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1360 {
1361 	u32 val;
1362 
1363 	if (mvpp2_port_supports_xlg(port)) {
1364 		val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1365 		val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1366 			 MVPP22_XLG_EXT_INT_MASK_GIG);
1367 		writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1368 	}
1369 
1370 	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1371 	    phy_interface_mode_is_8023z(port->phy_interface) ||
1372 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1373 		val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1374 		val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1375 		writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1376 	}
1377 }
1378 
1379 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1380 {
1381 	u32 val;
1382 
1383 	mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
1384 		     MVPP22_GMAC_INT_SUM_MASK_PTP,
1385 		     MVPP22_GMAC_INT_SUM_MASK_PTP);
1386 
1387 	if (port->phylink ||
1388 	    phy_interface_mode_is_rgmii(port->phy_interface) ||
1389 	    phy_interface_mode_is_8023z(port->phy_interface) ||
1390 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1391 		val = readl(port->base + MVPP22_GMAC_INT_MASK);
1392 		val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1393 		writel(val, port->base + MVPP22_GMAC_INT_MASK);
1394 	}
1395 
1396 	if (mvpp2_port_supports_xlg(port)) {
1397 		val = readl(port->base + MVPP22_XLG_INT_MASK);
1398 		val |= MVPP22_XLG_INT_MASK_LINK;
1399 		writel(val, port->base + MVPP22_XLG_INT_MASK);
1400 
1401 		mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
1402 			     MVPP22_XLG_EXT_INT_MASK_PTP,
1403 			     MVPP22_XLG_EXT_INT_MASK_PTP);
1404 	}
1405 
1406 	mvpp22_gop_unmask_irq(port);
1407 }
1408 
1409 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1410  *
1411  * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1412  * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1413  * differ.
1414  *
1415  * The COMPHY configures the serdes lanes regardless of the actual use of the
1416  * lanes by the physical layer. This is why configurations like
1417  * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1418  */
1419 static int mvpp22_comphy_init(struct mvpp2_port *port)
1420 {
1421 	int ret;
1422 
1423 	if (!port->comphy)
1424 		return 0;
1425 
1426 	ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET,
1427 			       port->phy_interface);
1428 	if (ret)
1429 		return ret;
1430 
1431 	return phy_power_on(port->comphy);
1432 }
1433 
1434 static void mvpp2_port_enable(struct mvpp2_port *port)
1435 {
1436 	u32 val;
1437 
1438 	if (mvpp2_port_supports_xlg(port) &&
1439 	    mvpp2_is_xlg(port->phy_interface)) {
1440 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1441 		val |= MVPP22_XLG_CTRL0_PORT_EN;
1442 		val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1443 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1444 	} else {
1445 		val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1446 		val |= MVPP2_GMAC_PORT_EN_MASK;
1447 		val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1448 		writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1449 	}
1450 }
1451 
1452 static void mvpp2_port_disable(struct mvpp2_port *port)
1453 {
1454 	u32 val;
1455 
1456 	if (mvpp2_port_supports_xlg(port) &&
1457 	    mvpp2_is_xlg(port->phy_interface)) {
1458 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1459 		val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1460 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1461 	}
1462 
1463 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1464 	val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1465 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1466 }
1467 
1468 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1469 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1470 {
1471 	u32 val;
1472 
1473 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1474 		    ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1475 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1476 }
1477 
1478 /* Configure loopback port */
1479 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1480 				    const struct phylink_link_state *state)
1481 {
1482 	u32 val;
1483 
1484 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1485 
1486 	if (state->speed == 1000)
1487 		val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1488 	else
1489 		val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1490 
1491 	if (phy_interface_mode_is_8023z(state->interface) ||
1492 	    state->interface == PHY_INTERFACE_MODE_SGMII)
1493 		val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1494 	else
1495 		val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1496 
1497 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1498 }
1499 
1500 enum {
1501 	ETHTOOL_XDP_REDIRECT,
1502 	ETHTOOL_XDP_PASS,
1503 	ETHTOOL_XDP_DROP,
1504 	ETHTOOL_XDP_TX,
1505 	ETHTOOL_XDP_TX_ERR,
1506 	ETHTOOL_XDP_XMIT,
1507 	ETHTOOL_XDP_XMIT_ERR,
1508 };
1509 
1510 struct mvpp2_ethtool_counter {
1511 	unsigned int offset;
1512 	const char string[ETH_GSTRING_LEN];
1513 	bool reg_is_64b;
1514 };
1515 
1516 static u64 mvpp2_read_count(struct mvpp2_port *port,
1517 			    const struct mvpp2_ethtool_counter *counter)
1518 {
1519 	u64 val;
1520 
1521 	val = readl(port->stats_base + counter->offset);
1522 	if (counter->reg_is_64b)
1523 		val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1524 
1525 	return val;
1526 }
1527 
1528 /* Some counters are accessed indirectly by first writing an index to
1529  * MVPP2_CTRS_IDX. The index can represent various resources depending on the
1530  * register we access, it can be a hit counter for some classification tables,
1531  * a counter specific to a rxq, a txq or a buffer pool.
1532  */
1533 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1534 {
1535 	mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1536 	return mvpp2_read(priv, reg);
1537 }
1538 
1539 /* Due to the fact that software statistics and hardware statistics are, by
1540  * design, incremented at different moments in the chain of packet processing,
1541  * it is very likely that incoming packets could have been dropped after being
1542  * counted by hardware but before reaching software statistics (most probably
1543  * multicast packets), and in the oppposite way, during transmission, FCS bytes
1544  * are added in between as well as TSO skb will be split and header bytes added.
1545  * Hence, statistics gathered from userspace with ifconfig (software) and
1546  * ethtool (hardware) cannot be compared.
1547  */
1548 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1549 	{ MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1550 	{ MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1551 	{ MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1552 	{ MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1553 	{ MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1554 	{ MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1555 	{ MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1556 	{ MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1557 	{ MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1558 	{ MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1559 	{ MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1560 	{ MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1561 	{ MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1562 	{ MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1563 	{ MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1564 	{ MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1565 	{ MVPP2_MIB_FC_SENT, "fc_sent" },
1566 	{ MVPP2_MIB_FC_RCVD, "fc_received" },
1567 	{ MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1568 	{ MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1569 	{ MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1570 	{ MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1571 	{ MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1572 	{ MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1573 	{ MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1574 	{ MVPP2_MIB_COLLISION, "collision" },
1575 	{ MVPP2_MIB_LATE_COLLISION, "late_collision" },
1576 };
1577 
1578 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1579 	{ MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1580 	{ MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1581 };
1582 
1583 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1584 	{ MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1585 	{ MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1586 	{ MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1587 	{ MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1588 	{ MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1589 	{ MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1590 	{ MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1591 	{ MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1592 	{ MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1593 };
1594 
1595 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1596 	{ MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1597 	{ MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1598 	{ MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1599 	{ MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1600 };
1601 
1602 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
1603 	{ ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
1604 	{ ETHTOOL_XDP_PASS, "rx_xdp_pass", },
1605 	{ ETHTOOL_XDP_DROP, "rx_xdp_drop", },
1606 	{ ETHTOOL_XDP_TX, "rx_xdp_tx", },
1607 	{ ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
1608 	{ ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
1609 	{ ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
1610 };
1611 
1612 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs)	(ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1613 						 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1614 						 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1615 						 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
1616 						 ARRAY_SIZE(mvpp2_ethtool_xdp))
1617 
1618 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1619 				      u8 *data)
1620 {
1621 	struct mvpp2_port *port = netdev_priv(netdev);
1622 	int i, q;
1623 
1624 	if (sset != ETH_SS_STATS)
1625 		return;
1626 
1627 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1628 		strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1629 			ETH_GSTRING_LEN);
1630 		data += ETH_GSTRING_LEN;
1631 	}
1632 
1633 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
1634 		strscpy(data, mvpp2_ethtool_port_regs[i].string,
1635 			ETH_GSTRING_LEN);
1636 		data += ETH_GSTRING_LEN;
1637 	}
1638 
1639 	for (q = 0; q < port->ntxqs; q++) {
1640 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
1641 			snprintf(data, ETH_GSTRING_LEN,
1642 				 mvpp2_ethtool_txq_regs[i].string, q);
1643 			data += ETH_GSTRING_LEN;
1644 		}
1645 	}
1646 
1647 	for (q = 0; q < port->nrxqs; q++) {
1648 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
1649 			snprintf(data, ETH_GSTRING_LEN,
1650 				 mvpp2_ethtool_rxq_regs[i].string,
1651 				 q);
1652 			data += ETH_GSTRING_LEN;
1653 		}
1654 	}
1655 
1656 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
1657 		strscpy(data, mvpp2_ethtool_xdp[i].string,
1658 			ETH_GSTRING_LEN);
1659 		data += ETH_GSTRING_LEN;
1660 	}
1661 }
1662 
1663 static void
1664 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
1665 {
1666 	unsigned int start;
1667 	unsigned int cpu;
1668 
1669 	/* Gather XDP Statistics */
1670 	for_each_possible_cpu(cpu) {
1671 		struct mvpp2_pcpu_stats *cpu_stats;
1672 		u64	xdp_redirect;
1673 		u64	xdp_pass;
1674 		u64	xdp_drop;
1675 		u64	xdp_xmit;
1676 		u64	xdp_xmit_err;
1677 		u64	xdp_tx;
1678 		u64	xdp_tx_err;
1679 
1680 		cpu_stats = per_cpu_ptr(port->stats, cpu);
1681 		do {
1682 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1683 			xdp_redirect = cpu_stats->xdp_redirect;
1684 			xdp_pass   = cpu_stats->xdp_pass;
1685 			xdp_drop = cpu_stats->xdp_drop;
1686 			xdp_xmit   = cpu_stats->xdp_xmit;
1687 			xdp_xmit_err   = cpu_stats->xdp_xmit_err;
1688 			xdp_tx   = cpu_stats->xdp_tx;
1689 			xdp_tx_err   = cpu_stats->xdp_tx_err;
1690 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1691 
1692 		xdp_stats->xdp_redirect += xdp_redirect;
1693 		xdp_stats->xdp_pass   += xdp_pass;
1694 		xdp_stats->xdp_drop += xdp_drop;
1695 		xdp_stats->xdp_xmit   += xdp_xmit;
1696 		xdp_stats->xdp_xmit_err   += xdp_xmit_err;
1697 		xdp_stats->xdp_tx   += xdp_tx;
1698 		xdp_stats->xdp_tx_err   += xdp_tx_err;
1699 	}
1700 }
1701 
1702 static void mvpp2_read_stats(struct mvpp2_port *port)
1703 {
1704 	struct mvpp2_pcpu_stats xdp_stats = {};
1705 	const struct mvpp2_ethtool_counter *s;
1706 	u64 *pstats;
1707 	int i, q;
1708 
1709 	pstats = port->ethtool_stats;
1710 
1711 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
1712 		*pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
1713 
1714 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
1715 		*pstats++ += mvpp2_read(port->priv,
1716 					mvpp2_ethtool_port_regs[i].offset +
1717 					4 * port->id);
1718 
1719 	for (q = 0; q < port->ntxqs; q++)
1720 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
1721 			*pstats++ += mvpp2_read_index(port->priv,
1722 						      MVPP22_CTRS_TX_CTR(port->id, q),
1723 						      mvpp2_ethtool_txq_regs[i].offset);
1724 
1725 	/* Rxqs are numbered from 0 from the user standpoint, but not from the
1726 	 * driver's. We need to add the  port->first_rxq offset.
1727 	 */
1728 	for (q = 0; q < port->nrxqs; q++)
1729 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
1730 			*pstats++ += mvpp2_read_index(port->priv,
1731 						      port->first_rxq + q,
1732 						      mvpp2_ethtool_rxq_regs[i].offset);
1733 
1734 	/* Gather XDP Statistics */
1735 	mvpp2_get_xdp_stats(port, &xdp_stats);
1736 
1737 	for (i = 0, s = mvpp2_ethtool_xdp;
1738 		 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
1739 	     s++, i++) {
1740 		switch (s->offset) {
1741 		case ETHTOOL_XDP_REDIRECT:
1742 			*pstats++ = xdp_stats.xdp_redirect;
1743 			break;
1744 		case ETHTOOL_XDP_PASS:
1745 			*pstats++ = xdp_stats.xdp_pass;
1746 			break;
1747 		case ETHTOOL_XDP_DROP:
1748 			*pstats++ = xdp_stats.xdp_drop;
1749 			break;
1750 		case ETHTOOL_XDP_TX:
1751 			*pstats++ = xdp_stats.xdp_tx;
1752 			break;
1753 		case ETHTOOL_XDP_TX_ERR:
1754 			*pstats++ = xdp_stats.xdp_tx_err;
1755 			break;
1756 		case ETHTOOL_XDP_XMIT:
1757 			*pstats++ = xdp_stats.xdp_xmit;
1758 			break;
1759 		case ETHTOOL_XDP_XMIT_ERR:
1760 			*pstats++ = xdp_stats.xdp_xmit_err;
1761 			break;
1762 		}
1763 	}
1764 }
1765 
1766 static void mvpp2_gather_hw_statistics(struct work_struct *work)
1767 {
1768 	struct delayed_work *del_work = to_delayed_work(work);
1769 	struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
1770 					       stats_work);
1771 
1772 	mutex_lock(&port->gather_stats_lock);
1773 
1774 	mvpp2_read_stats(port);
1775 
1776 	/* No need to read again the counters right after this function if it
1777 	 * was called asynchronously by the user (ie. use of ethtool).
1778 	 */
1779 	cancel_delayed_work(&port->stats_work);
1780 	queue_delayed_work(port->priv->stats_queue, &port->stats_work,
1781 			   MVPP2_MIB_COUNTERS_STATS_DELAY);
1782 
1783 	mutex_unlock(&port->gather_stats_lock);
1784 }
1785 
1786 static void mvpp2_ethtool_get_stats(struct net_device *dev,
1787 				    struct ethtool_stats *stats, u64 *data)
1788 {
1789 	struct mvpp2_port *port = netdev_priv(dev);
1790 
1791 	/* Update statistics for the given port, then take the lock to avoid
1792 	 * concurrent accesses on the ethtool_stats structure during its copy.
1793 	 */
1794 	mvpp2_gather_hw_statistics(&port->stats_work.work);
1795 
1796 	mutex_lock(&port->gather_stats_lock);
1797 	memcpy(data, port->ethtool_stats,
1798 	       sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
1799 	mutex_unlock(&port->gather_stats_lock);
1800 }
1801 
1802 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
1803 {
1804 	struct mvpp2_port *port = netdev_priv(dev);
1805 
1806 	if (sset == ETH_SS_STATS)
1807 		return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
1808 
1809 	return -EOPNOTSUPP;
1810 }
1811 
1812 static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
1813 {
1814 	u32 val;
1815 
1816 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
1817 	      MVPP2_GMAC_PORT_RESET_MASK;
1818 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
1819 
1820 	if (port->priv->hw_version == MVPP22 && port->gop_id == 0) {
1821 		val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
1822 		      ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
1823 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1824 	}
1825 }
1826 
1827 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
1828 {
1829 	struct mvpp2 *priv = port->priv;
1830 	void __iomem *mpcs, *xpcs;
1831 	u32 val;
1832 
1833 	if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1834 		return;
1835 
1836 	mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1837 	xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1838 
1839 	val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1840 	val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
1841 	val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
1842 	writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1843 
1844 	val = readl(xpcs + MVPP22_XPCS_CFG0);
1845 	writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1846 }
1847 
1848 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
1849 {
1850 	struct mvpp2 *priv = port->priv;
1851 	void __iomem *mpcs, *xpcs;
1852 	u32 val;
1853 
1854 	if (port->priv->hw_version != MVPP22 || port->gop_id != 0)
1855 		return;
1856 
1857 	mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1858 	xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1859 
1860 	switch (port->phy_interface) {
1861 	case PHY_INTERFACE_MODE_10GBASER:
1862 		val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1863 		val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
1864 		       MAC_CLK_RESET_SD_TX;
1865 		val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
1866 		writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1867 		break;
1868 	case PHY_INTERFACE_MODE_XAUI:
1869 	case PHY_INTERFACE_MODE_RXAUI:
1870 		val = readl(xpcs + MVPP22_XPCS_CFG0);
1871 		writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
1872 		break;
1873 	default:
1874 		break;
1875 	}
1876 }
1877 
1878 /* Change maximum receive size of the port */
1879 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
1880 {
1881 	u32 val;
1882 
1883 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1884 	val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
1885 	val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1886 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
1887 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1888 }
1889 
1890 /* Change maximum receive size of the port */
1891 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
1892 {
1893 	u32 val;
1894 
1895 	val =  readl(port->base + MVPP22_XLG_CTRL1_REG);
1896 	val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
1897 	val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
1898 	       MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
1899 	writel(val, port->base + MVPP22_XLG_CTRL1_REG);
1900 }
1901 
1902 /* Set defaults to the MVPP2 port */
1903 static void mvpp2_defaults_set(struct mvpp2_port *port)
1904 {
1905 	int tx_port_num, val, queue, lrxq;
1906 
1907 	if (port->priv->hw_version == MVPP21) {
1908 		/* Update TX FIFO MIN Threshold */
1909 		val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1910 		val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
1911 		/* Min. TX threshold must be less than minimal packet length */
1912 		val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
1913 		writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
1914 	}
1915 
1916 	/* Disable Legacy WRR, Disable EJP, Release from reset */
1917 	tx_port_num = mvpp2_egress_port(port);
1918 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
1919 		    tx_port_num);
1920 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
1921 
1922 	/* Set TXQ scheduling to Round-Robin */
1923 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
1924 
1925 	/* Close bandwidth for all queues */
1926 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
1927 		mvpp2_write(port->priv,
1928 			    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
1929 
1930 	/* Set refill period to 1 usec, refill tokens
1931 	 * and bucket size to maximum
1932 	 */
1933 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
1934 		    port->priv->tclk / USEC_PER_SEC);
1935 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
1936 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
1937 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
1938 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
1939 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
1940 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
1941 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
1942 
1943 	/* Set MaximumLowLatencyPacketSize value to 256 */
1944 	mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
1945 		    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
1946 		    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
1947 
1948 	/* Enable Rx cache snoop */
1949 	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1950 		queue = port->rxqs[lrxq]->id;
1951 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1952 		val |= MVPP2_SNOOP_PKT_SIZE_MASK |
1953 			   MVPP2_SNOOP_BUF_HDR_MASK;
1954 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1955 	}
1956 
1957 	/* At default, mask all interrupts to all present cpus */
1958 	mvpp2_interrupts_disable(port);
1959 }
1960 
1961 /* Enable/disable receiving packets */
1962 static void mvpp2_ingress_enable(struct mvpp2_port *port)
1963 {
1964 	u32 val;
1965 	int lrxq, queue;
1966 
1967 	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1968 		queue = port->rxqs[lrxq]->id;
1969 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1970 		val &= ~MVPP2_RXQ_DISABLE_MASK;
1971 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1972 	}
1973 }
1974 
1975 static void mvpp2_ingress_disable(struct mvpp2_port *port)
1976 {
1977 	u32 val;
1978 	int lrxq, queue;
1979 
1980 	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
1981 		queue = port->rxqs[lrxq]->id;
1982 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
1983 		val |= MVPP2_RXQ_DISABLE_MASK;
1984 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
1985 	}
1986 }
1987 
1988 /* Enable transmit via physical egress queue
1989  * - HW starts take descriptors from DRAM
1990  */
1991 static void mvpp2_egress_enable(struct mvpp2_port *port)
1992 {
1993 	u32 qmap;
1994 	int queue;
1995 	int tx_port_num = mvpp2_egress_port(port);
1996 
1997 	/* Enable all initialized TXs. */
1998 	qmap = 0;
1999 	for (queue = 0; queue < port->ntxqs; queue++) {
2000 		struct mvpp2_tx_queue *txq = port->txqs[queue];
2001 
2002 		if (txq->descs)
2003 			qmap |= (1 << queue);
2004 	}
2005 
2006 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2007 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2008 }
2009 
2010 /* Disable transmit via physical egress queue
2011  * - HW doesn't take descriptors from DRAM
2012  */
2013 static void mvpp2_egress_disable(struct mvpp2_port *port)
2014 {
2015 	u32 reg_data;
2016 	int delay;
2017 	int tx_port_num = mvpp2_egress_port(port);
2018 
2019 	/* Issue stop command for active channels only */
2020 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2021 	reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2022 		    MVPP2_TXP_SCHED_ENQ_MASK;
2023 	if (reg_data != 0)
2024 		mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2025 			    (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2026 
2027 	/* Wait for all Tx activity to terminate. */
2028 	delay = 0;
2029 	do {
2030 		if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2031 			netdev_warn(port->dev,
2032 				    "Tx stop timed out, status=0x%08x\n",
2033 				    reg_data);
2034 			break;
2035 		}
2036 		mdelay(1);
2037 		delay++;
2038 
2039 		/* Check port TX Command register that all
2040 		 * Tx queues are stopped
2041 		 */
2042 		reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2043 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2044 }
2045 
2046 /* Rx descriptors helper methods */
2047 
2048 /* Get number of Rx descriptors occupied by received packets */
2049 static inline int
2050 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2051 {
2052 	u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2053 
2054 	return val & MVPP2_RXQ_OCCUPIED_MASK;
2055 }
2056 
2057 /* Update Rx queue status with the number of occupied and available
2058  * Rx descriptor slots.
2059  */
2060 static inline void
2061 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2062 			int used_count, int free_count)
2063 {
2064 	/* Decrement the number of used descriptors and increment count
2065 	 * increment the number of free descriptors.
2066 	 */
2067 	u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2068 
2069 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2070 }
2071 
2072 /* Get pointer to next RX descriptor to be processed by SW */
2073 static inline struct mvpp2_rx_desc *
2074 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2075 {
2076 	int rx_desc = rxq->next_desc_to_proc;
2077 
2078 	rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2079 	prefetch(rxq->descs + rxq->next_desc_to_proc);
2080 	return rxq->descs + rx_desc;
2081 }
2082 
2083 /* Set rx queue offset */
2084 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2085 				 int prxq, int offset)
2086 {
2087 	u32 val;
2088 
2089 	/* Convert offset from bytes to units of 32 bytes */
2090 	offset = offset >> 5;
2091 
2092 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2093 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2094 
2095 	/* Offset is in */
2096 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2097 		    MVPP2_RXQ_PACKET_OFFSET_MASK);
2098 
2099 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2100 }
2101 
2102 /* Tx descriptors helper methods */
2103 
2104 /* Get pointer to next Tx descriptor to be processed (send) by HW */
2105 static struct mvpp2_tx_desc *
2106 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2107 {
2108 	int tx_desc = txq->next_desc_to_proc;
2109 
2110 	txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2111 	return txq->descs + tx_desc;
2112 }
2113 
2114 /* Update HW with number of aggregated Tx descriptors to be sent
2115  *
2116  * Called only from mvpp2_tx(), so migration is disabled, using
2117  * smp_processor_id() is OK.
2118  */
2119 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2120 {
2121 	/* aggregated access - relevant TXQ number is written in TX desc */
2122 	mvpp2_thread_write(port->priv,
2123 			   mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2124 			   MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2125 }
2126 
2127 /* Check if there are enough free descriptors in aggregated txq.
2128  * If not, update the number of occupied descriptors and repeat the check.
2129  *
2130  * Called only from mvpp2_tx(), so migration is disabled, using
2131  * smp_processor_id() is OK.
2132  */
2133 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
2134 				     struct mvpp2_tx_queue *aggr_txq, int num)
2135 {
2136 	if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
2137 		/* Update number of occupied aggregated Tx descriptors */
2138 		unsigned int thread =
2139 			mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2140 		u32 val = mvpp2_read_relaxed(port->priv,
2141 					     MVPP2_AGGR_TXQ_STATUS_REG(thread));
2142 
2143 		aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
2144 
2145 		if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
2146 			return -ENOMEM;
2147 	}
2148 	return 0;
2149 }
2150 
2151 /* Reserved Tx descriptors allocation request
2152  *
2153  * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
2154  * only by mvpp2_tx(), so migration is disabled, using
2155  * smp_processor_id() is OK.
2156  */
2157 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
2158 					 struct mvpp2_tx_queue *txq, int num)
2159 {
2160 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2161 	struct mvpp2 *priv = port->priv;
2162 	u32 val;
2163 
2164 	val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
2165 	mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
2166 
2167 	val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
2168 
2169 	return val & MVPP2_TXQ_RSVD_RSLT_MASK;
2170 }
2171 
2172 /* Check if there are enough reserved descriptors for transmission.
2173  * If not, request chunk of reserved descriptors and check again.
2174  */
2175 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
2176 					    struct mvpp2_tx_queue *txq,
2177 					    struct mvpp2_txq_pcpu *txq_pcpu,
2178 					    int num)
2179 {
2180 	int req, desc_count;
2181 	unsigned int thread;
2182 
2183 	if (txq_pcpu->reserved_num >= num)
2184 		return 0;
2185 
2186 	/* Not enough descriptors reserved! Update the reserved descriptor
2187 	 * count and check again.
2188 	 */
2189 
2190 	desc_count = 0;
2191 	/* Compute total of used descriptors */
2192 	for (thread = 0; thread < port->priv->nthreads; thread++) {
2193 		struct mvpp2_txq_pcpu *txq_pcpu_aux;
2194 
2195 		txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
2196 		desc_count += txq_pcpu_aux->count;
2197 		desc_count += txq_pcpu_aux->reserved_num;
2198 	}
2199 
2200 	req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
2201 	desc_count += req;
2202 
2203 	if (desc_count >
2204 	   (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
2205 		return -ENOMEM;
2206 
2207 	txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
2208 
2209 	/* OK, the descriptor could have been updated: check again. */
2210 	if (txq_pcpu->reserved_num < num)
2211 		return -ENOMEM;
2212 	return 0;
2213 }
2214 
2215 /* Release the last allocated Tx descriptor. Useful to handle DMA
2216  * mapping failures in the Tx path.
2217  */
2218 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2219 {
2220 	if (txq->next_desc_to_proc == 0)
2221 		txq->next_desc_to_proc = txq->last_desc - 1;
2222 	else
2223 		txq->next_desc_to_proc--;
2224 }
2225 
2226 /* Set Tx descriptors fields relevant for CSUM calculation */
2227 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2228 			       int ip_hdr_len, int l4_proto)
2229 {
2230 	u32 command;
2231 
2232 	/* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
2233 	 * G_L4_chk, L4_type required only for checksum calculation
2234 	 */
2235 	command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2236 	command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2237 	command |= MVPP2_TXD_IP_CSUM_DISABLE;
2238 
2239 	if (l3_proto == htons(ETH_P_IP)) {
2240 		command &= ~MVPP2_TXD_IP_CSUM_DISABLE;	/* enable IPv4 csum */
2241 		command &= ~MVPP2_TXD_L3_IP6;		/* enable IPv4 */
2242 	} else {
2243 		command |= MVPP2_TXD_L3_IP6;		/* enable IPv6 */
2244 	}
2245 
2246 	if (l4_proto == IPPROTO_TCP) {
2247 		command &= ~MVPP2_TXD_L4_UDP;		/* enable TCP */
2248 		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
2249 	} else if (l4_proto == IPPROTO_UDP) {
2250 		command |= MVPP2_TXD_L4_UDP;		/* enable UDP */
2251 		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
2252 	} else {
2253 		command |= MVPP2_TXD_L4_CSUM_NOT;
2254 	}
2255 
2256 	return command;
2257 }
2258 
2259 /* Get number of sent descriptors and decrement counter.
2260  * The number of sent descriptors is returned.
2261  * Per-thread access
2262  *
2263  * Called only from mvpp2_txq_done(), called from mvpp2_tx()
2264  * (migration disabled) and from the TX completion tasklet (migration
2265  * disabled) so using smp_processor_id() is OK.
2266  */
2267 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2268 					   struct mvpp2_tx_queue *txq)
2269 {
2270 	u32 val;
2271 
2272 	/* Reading status reg resets transmitted descriptor counter */
2273 	val = mvpp2_thread_read_relaxed(port->priv,
2274 					mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2275 					MVPP2_TXQ_SENT_REG(txq->id));
2276 
2277 	return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2278 		MVPP2_TRANSMITTED_COUNT_OFFSET;
2279 }
2280 
2281 /* Called through on_each_cpu(), so runs on all CPUs, with migration
2282  * disabled, therefore using smp_processor_id() is OK.
2283  */
2284 static void mvpp2_txq_sent_counter_clear(void *arg)
2285 {
2286 	struct mvpp2_port *port = arg;
2287 	int queue;
2288 
2289 	/* If the thread isn't used, don't do anything */
2290 	if (smp_processor_id() > port->priv->nthreads)
2291 		return;
2292 
2293 	for (queue = 0; queue < port->ntxqs; queue++) {
2294 		int id = port->txqs[queue]->id;
2295 
2296 		mvpp2_thread_read(port->priv,
2297 				  mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2298 				  MVPP2_TXQ_SENT_REG(id));
2299 	}
2300 }
2301 
2302 /* Set max sizes for Tx queues */
2303 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2304 {
2305 	u32	val, size, mtu;
2306 	int	txq, tx_port_num;
2307 
2308 	mtu = port->pkt_size * 8;
2309 	if (mtu > MVPP2_TXP_MTU_MAX)
2310 		mtu = MVPP2_TXP_MTU_MAX;
2311 
2312 	/* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2313 	mtu = 3 * mtu;
2314 
2315 	/* Indirect access to registers */
2316 	tx_port_num = mvpp2_egress_port(port);
2317 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2318 
2319 	/* Set MTU */
2320 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2321 	val &= ~MVPP2_TXP_MTU_MAX;
2322 	val |= mtu;
2323 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2324 
2325 	/* TXP token size and all TXQs token size must be larger that MTU */
2326 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2327 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2328 	if (size < mtu) {
2329 		size = mtu;
2330 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2331 		val |= size;
2332 		mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2333 	}
2334 
2335 	for (txq = 0; txq < port->ntxqs; txq++) {
2336 		val = mvpp2_read(port->priv,
2337 				 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2338 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2339 
2340 		if (size < mtu) {
2341 			size = mtu;
2342 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2343 			val |= size;
2344 			mvpp2_write(port->priv,
2345 				    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2346 				    val);
2347 		}
2348 	}
2349 }
2350 
2351 /* Set the number of packets that will be received before Rx interrupt
2352  * will be generated by HW.
2353  */
2354 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2355 				   struct mvpp2_rx_queue *rxq)
2356 {
2357 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2358 
2359 	if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2360 		rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2361 
2362 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2363 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2364 			   rxq->pkts_coal);
2365 
2366 	put_cpu();
2367 }
2368 
2369 /* For some reason in the LSP this is done on each CPU. Why ? */
2370 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2371 				   struct mvpp2_tx_queue *txq)
2372 {
2373 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2374 	u32 val;
2375 
2376 	if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2377 		txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2378 
2379 	val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2380 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2381 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2382 
2383 	put_cpu();
2384 }
2385 
2386 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2387 {
2388 	u64 tmp = (u64)clk_hz * usec;
2389 
2390 	do_div(tmp, USEC_PER_SEC);
2391 
2392 	return tmp > U32_MAX ? U32_MAX : tmp;
2393 }
2394 
2395 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2396 {
2397 	u64 tmp = (u64)cycles * USEC_PER_SEC;
2398 
2399 	do_div(tmp, clk_hz);
2400 
2401 	return tmp > U32_MAX ? U32_MAX : tmp;
2402 }
2403 
2404 /* Set the time delay in usec before Rx interrupt */
2405 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2406 				   struct mvpp2_rx_queue *rxq)
2407 {
2408 	unsigned long freq = port->priv->tclk;
2409 	u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2410 
2411 	if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2412 		rxq->time_coal =
2413 			mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2414 
2415 		/* re-evaluate to get actual register value */
2416 		val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2417 	}
2418 
2419 	mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2420 }
2421 
2422 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2423 {
2424 	unsigned long freq = port->priv->tclk;
2425 	u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2426 
2427 	if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2428 		port->tx_time_coal =
2429 			mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2430 
2431 		/* re-evaluate to get actual register value */
2432 		val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2433 	}
2434 
2435 	mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2436 }
2437 
2438 /* Free Tx queue skbuffs */
2439 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2440 				struct mvpp2_tx_queue *txq,
2441 				struct mvpp2_txq_pcpu *txq_pcpu, int num)
2442 {
2443 	struct xdp_frame_bulk bq;
2444 	int i;
2445 
2446 	xdp_frame_bulk_init(&bq);
2447 
2448 	rcu_read_lock(); /* need for xdp_return_frame_bulk */
2449 
2450 	for (i = 0; i < num; i++) {
2451 		struct mvpp2_txq_pcpu_buf *tx_buf =
2452 			txq_pcpu->buffs + txq_pcpu->txq_get_index;
2453 
2454 		if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
2455 		    tx_buf->type != MVPP2_TYPE_XDP_TX)
2456 			dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2457 					 tx_buf->size, DMA_TO_DEVICE);
2458 		if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
2459 			dev_kfree_skb_any(tx_buf->skb);
2460 		else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
2461 			 tx_buf->type == MVPP2_TYPE_XDP_NDO)
2462 			xdp_return_frame_bulk(tx_buf->xdpf, &bq);
2463 
2464 		mvpp2_txq_inc_get(txq_pcpu);
2465 	}
2466 	xdp_flush_frame_bulk(&bq);
2467 
2468 	rcu_read_unlock();
2469 }
2470 
2471 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2472 							u32 cause)
2473 {
2474 	int queue = fls(cause) - 1;
2475 
2476 	return port->rxqs[queue];
2477 }
2478 
2479 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2480 							u32 cause)
2481 {
2482 	int queue = fls(cause) - 1;
2483 
2484 	return port->txqs[queue];
2485 }
2486 
2487 /* Handle end of transmission */
2488 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2489 			   struct mvpp2_txq_pcpu *txq_pcpu)
2490 {
2491 	struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2492 	int tx_done;
2493 
2494 	if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2495 		netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2496 
2497 	tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2498 	if (!tx_done)
2499 		return;
2500 	mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2501 
2502 	txq_pcpu->count -= tx_done;
2503 
2504 	if (netif_tx_queue_stopped(nq))
2505 		if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2506 			netif_tx_wake_queue(nq);
2507 }
2508 
2509 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2510 				  unsigned int thread)
2511 {
2512 	struct mvpp2_tx_queue *txq;
2513 	struct mvpp2_txq_pcpu *txq_pcpu;
2514 	unsigned int tx_todo = 0;
2515 
2516 	while (cause) {
2517 		txq = mvpp2_get_tx_queue(port, cause);
2518 		if (!txq)
2519 			break;
2520 
2521 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2522 
2523 		if (txq_pcpu->count) {
2524 			mvpp2_txq_done(port, txq, txq_pcpu);
2525 			tx_todo += txq_pcpu->count;
2526 		}
2527 
2528 		cause &= ~(1 << txq->log_id);
2529 	}
2530 	return tx_todo;
2531 }
2532 
2533 /* Rx/Tx queue initialization/cleanup methods */
2534 
2535 /* Allocate and initialize descriptors for aggr TXQ */
2536 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2537 			       struct mvpp2_tx_queue *aggr_txq,
2538 			       unsigned int thread, struct mvpp2 *priv)
2539 {
2540 	u32 txq_dma;
2541 
2542 	/* Allocate memory for TX descriptors */
2543 	aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2544 					     MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2545 					     &aggr_txq->descs_dma, GFP_KERNEL);
2546 	if (!aggr_txq->descs)
2547 		return -ENOMEM;
2548 
2549 	aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2550 
2551 	/* Aggr TXQ no reset WA */
2552 	aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2553 						 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2554 
2555 	/* Set Tx descriptors queue starting address indirect
2556 	 * access
2557 	 */
2558 	if (priv->hw_version == MVPP21)
2559 		txq_dma = aggr_txq->descs_dma;
2560 	else
2561 		txq_dma = aggr_txq->descs_dma >>
2562 			MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2563 
2564 	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2565 	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2566 		    MVPP2_AGGR_TXQ_SIZE);
2567 
2568 	return 0;
2569 }
2570 
2571 /* Create a specified Rx queue */
2572 static int mvpp2_rxq_init(struct mvpp2_port *port,
2573 			  struct mvpp2_rx_queue *rxq)
2574 {
2575 	struct mvpp2 *priv = port->priv;
2576 	unsigned int thread;
2577 	u32 rxq_dma;
2578 	int err;
2579 
2580 	rxq->size = port->rx_ring_size;
2581 
2582 	/* Allocate memory for RX descriptors */
2583 	rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2584 					rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2585 					&rxq->descs_dma, GFP_KERNEL);
2586 	if (!rxq->descs)
2587 		return -ENOMEM;
2588 
2589 	rxq->last_desc = rxq->size - 1;
2590 
2591 	/* Zero occupied and non-occupied counters - direct access */
2592 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2593 
2594 	/* Set Rx descriptors queue starting address - indirect access */
2595 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2596 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2597 	if (port->priv->hw_version == MVPP21)
2598 		rxq_dma = rxq->descs_dma;
2599 	else
2600 		rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2601 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2602 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2603 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2604 	put_cpu();
2605 
2606 	/* Set Offset */
2607 	mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
2608 
2609 	/* Set coalescing pkts and time */
2610 	mvpp2_rx_pkts_coal_set(port, rxq);
2611 	mvpp2_rx_time_coal_set(port, rxq);
2612 
2613 	/* Add number of descriptors ready for receiving packets */
2614 	mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2615 
2616 	if (priv->percpu_pools) {
2617 		err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id);
2618 		if (err < 0)
2619 			goto err_free_dma;
2620 
2621 		err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id);
2622 		if (err < 0)
2623 			goto err_unregister_rxq_short;
2624 
2625 		/* Every RXQ has a pool for short and another for long packets */
2626 		err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
2627 						 MEM_TYPE_PAGE_POOL,
2628 						 priv->page_pool[rxq->logic_rxq]);
2629 		if (err < 0)
2630 			goto err_unregister_rxq_long;
2631 
2632 		err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
2633 						 MEM_TYPE_PAGE_POOL,
2634 						 priv->page_pool[rxq->logic_rxq +
2635 								 port->nrxqs]);
2636 		if (err < 0)
2637 			goto err_unregister_mem_rxq_short;
2638 	}
2639 
2640 	return 0;
2641 
2642 err_unregister_mem_rxq_short:
2643 	xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
2644 err_unregister_rxq_long:
2645 	xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
2646 err_unregister_rxq_short:
2647 	xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2648 err_free_dma:
2649 	dma_free_coherent(port->dev->dev.parent,
2650 			  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2651 			  rxq->descs, rxq->descs_dma);
2652 	return err;
2653 }
2654 
2655 /* Push packets received by the RXQ to BM pool */
2656 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
2657 				struct mvpp2_rx_queue *rxq)
2658 {
2659 	int rx_received, i;
2660 
2661 	rx_received = mvpp2_rxq_received(port, rxq->id);
2662 	if (!rx_received)
2663 		return;
2664 
2665 	for (i = 0; i < rx_received; i++) {
2666 		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
2667 		u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
2668 		int pool;
2669 
2670 		pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
2671 			MVPP2_RXD_BM_POOL_ID_OFFS;
2672 
2673 		mvpp2_bm_pool_put(port, pool,
2674 				  mvpp2_rxdesc_dma_addr_get(port, rx_desc),
2675 				  mvpp2_rxdesc_cookie_get(port, rx_desc));
2676 	}
2677 	mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
2678 }
2679 
2680 /* Cleanup Rx queue */
2681 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
2682 			     struct mvpp2_rx_queue *rxq)
2683 {
2684 	unsigned int thread;
2685 
2686 	if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
2687 		xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2688 
2689 	if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
2690 		xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
2691 
2692 	mvpp2_rxq_drop_pkts(port, rxq);
2693 
2694 	if (rxq->descs)
2695 		dma_free_coherent(port->dev->dev.parent,
2696 				  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2697 				  rxq->descs,
2698 				  rxq->descs_dma);
2699 
2700 	rxq->descs             = NULL;
2701 	rxq->last_desc         = 0;
2702 	rxq->next_desc_to_proc = 0;
2703 	rxq->descs_dma         = 0;
2704 
2705 	/* Clear Rx descriptors queue starting address and size;
2706 	 * free descriptor number
2707 	 */
2708 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2709 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2710 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2711 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
2712 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
2713 	put_cpu();
2714 }
2715 
2716 /* Create and initialize a Tx queue */
2717 static int mvpp2_txq_init(struct mvpp2_port *port,
2718 			  struct mvpp2_tx_queue *txq)
2719 {
2720 	u32 val;
2721 	unsigned int thread;
2722 	int desc, desc_per_txq, tx_port_num;
2723 	struct mvpp2_txq_pcpu *txq_pcpu;
2724 
2725 	txq->size = port->tx_ring_size;
2726 
2727 	/* Allocate memory for Tx descriptors */
2728 	txq->descs = dma_alloc_coherent(port->dev->dev.parent,
2729 				txq->size * MVPP2_DESC_ALIGNED_SIZE,
2730 				&txq->descs_dma, GFP_KERNEL);
2731 	if (!txq->descs)
2732 		return -ENOMEM;
2733 
2734 	txq->last_desc = txq->size - 1;
2735 
2736 	/* Set Tx descriptors queue starting address - indirect access */
2737 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2738 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2739 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
2740 			   txq->descs_dma);
2741 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
2742 			   txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
2743 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
2744 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
2745 			   txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
2746 	val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
2747 	val &= ~MVPP2_TXQ_PENDING_MASK;
2748 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
2749 
2750 	/* Calculate base address in prefetch buffer. We reserve 16 descriptors
2751 	 * for each existing TXQ.
2752 	 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
2753 	 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
2754 	 */
2755 	desc_per_txq = 16;
2756 	desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
2757 	       (txq->log_id * desc_per_txq);
2758 
2759 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
2760 			   MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
2761 			   MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
2762 	put_cpu();
2763 
2764 	/* WRR / EJP configuration - indirect access */
2765 	tx_port_num = mvpp2_egress_port(port);
2766 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2767 
2768 	val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
2769 	val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
2770 	val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
2771 	val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
2772 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
2773 
2774 	val = MVPP2_TXQ_TOKEN_SIZE_MAX;
2775 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
2776 		    val);
2777 
2778 	for (thread = 0; thread < port->priv->nthreads; thread++) {
2779 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2780 		txq_pcpu->size = txq->size;
2781 		txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
2782 						sizeof(*txq_pcpu->buffs),
2783 						GFP_KERNEL);
2784 		if (!txq_pcpu->buffs)
2785 			return -ENOMEM;
2786 
2787 		txq_pcpu->count = 0;
2788 		txq_pcpu->reserved_num = 0;
2789 		txq_pcpu->txq_put_index = 0;
2790 		txq_pcpu->txq_get_index = 0;
2791 		txq_pcpu->tso_headers = NULL;
2792 
2793 		txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
2794 		txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
2795 
2796 		txq_pcpu->tso_headers =
2797 			dma_alloc_coherent(port->dev->dev.parent,
2798 					   txq_pcpu->size * TSO_HEADER_SIZE,
2799 					   &txq_pcpu->tso_headers_dma,
2800 					   GFP_KERNEL);
2801 		if (!txq_pcpu->tso_headers)
2802 			return -ENOMEM;
2803 	}
2804 
2805 	return 0;
2806 }
2807 
2808 /* Free allocated TXQ resources */
2809 static void mvpp2_txq_deinit(struct mvpp2_port *port,
2810 			     struct mvpp2_tx_queue *txq)
2811 {
2812 	struct mvpp2_txq_pcpu *txq_pcpu;
2813 	unsigned int thread;
2814 
2815 	for (thread = 0; thread < port->priv->nthreads; thread++) {
2816 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2817 		kfree(txq_pcpu->buffs);
2818 
2819 		if (txq_pcpu->tso_headers)
2820 			dma_free_coherent(port->dev->dev.parent,
2821 					  txq_pcpu->size * TSO_HEADER_SIZE,
2822 					  txq_pcpu->tso_headers,
2823 					  txq_pcpu->tso_headers_dma);
2824 
2825 		txq_pcpu->tso_headers = NULL;
2826 	}
2827 
2828 	if (txq->descs)
2829 		dma_free_coherent(port->dev->dev.parent,
2830 				  txq->size * MVPP2_DESC_ALIGNED_SIZE,
2831 				  txq->descs, txq->descs_dma);
2832 
2833 	txq->descs             = NULL;
2834 	txq->last_desc         = 0;
2835 	txq->next_desc_to_proc = 0;
2836 	txq->descs_dma         = 0;
2837 
2838 	/* Set minimum bandwidth for disabled TXQs */
2839 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
2840 
2841 	/* Set Tx descriptors queue starting address and size */
2842 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2843 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2844 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
2845 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
2846 	put_cpu();
2847 }
2848 
2849 /* Cleanup Tx ports */
2850 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
2851 {
2852 	struct mvpp2_txq_pcpu *txq_pcpu;
2853 	int delay, pending;
2854 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2855 	u32 val;
2856 
2857 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2858 	val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
2859 	val |= MVPP2_TXQ_DRAIN_EN_MASK;
2860 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2861 
2862 	/* The napi queue has been stopped so wait for all packets
2863 	 * to be transmitted.
2864 	 */
2865 	delay = 0;
2866 	do {
2867 		if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
2868 			netdev_warn(port->dev,
2869 				    "port %d: cleaning queue %d timed out\n",
2870 				    port->id, txq->log_id);
2871 			break;
2872 		}
2873 		mdelay(1);
2874 		delay++;
2875 
2876 		pending = mvpp2_thread_read(port->priv, thread,
2877 					    MVPP2_TXQ_PENDING_REG);
2878 		pending &= MVPP2_TXQ_PENDING_MASK;
2879 	} while (pending);
2880 
2881 	val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
2882 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
2883 	put_cpu();
2884 
2885 	for (thread = 0; thread < port->priv->nthreads; thread++) {
2886 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2887 
2888 		/* Release all packets */
2889 		mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
2890 
2891 		/* Reset queue */
2892 		txq_pcpu->count = 0;
2893 		txq_pcpu->txq_put_index = 0;
2894 		txq_pcpu->txq_get_index = 0;
2895 	}
2896 }
2897 
2898 /* Cleanup all Tx queues */
2899 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
2900 {
2901 	struct mvpp2_tx_queue *txq;
2902 	int queue;
2903 	u32 val;
2904 
2905 	val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
2906 
2907 	/* Reset Tx ports and delete Tx queues */
2908 	val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
2909 	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2910 
2911 	for (queue = 0; queue < port->ntxqs; queue++) {
2912 		txq = port->txqs[queue];
2913 		mvpp2_txq_clean(port, txq);
2914 		mvpp2_txq_deinit(port, txq);
2915 	}
2916 
2917 	on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2918 
2919 	val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
2920 	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
2921 }
2922 
2923 /* Cleanup all Rx queues */
2924 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
2925 {
2926 	int queue;
2927 
2928 	for (queue = 0; queue < port->nrxqs; queue++)
2929 		mvpp2_rxq_deinit(port, port->rxqs[queue]);
2930 }
2931 
2932 /* Init all Rx queues for port */
2933 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
2934 {
2935 	int queue, err;
2936 
2937 	for (queue = 0; queue < port->nrxqs; queue++) {
2938 		err = mvpp2_rxq_init(port, port->rxqs[queue]);
2939 		if (err)
2940 			goto err_cleanup;
2941 	}
2942 	return 0;
2943 
2944 err_cleanup:
2945 	mvpp2_cleanup_rxqs(port);
2946 	return err;
2947 }
2948 
2949 /* Init all tx queues for port */
2950 static int mvpp2_setup_txqs(struct mvpp2_port *port)
2951 {
2952 	struct mvpp2_tx_queue *txq;
2953 	int queue, err;
2954 
2955 	for (queue = 0; queue < port->ntxqs; queue++) {
2956 		txq = port->txqs[queue];
2957 		err = mvpp2_txq_init(port, txq);
2958 		if (err)
2959 			goto err_cleanup;
2960 
2961 		/* Assign this queue to a CPU */
2962 		if (queue < num_possible_cpus())
2963 			netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
2964 	}
2965 
2966 	if (port->has_tx_irqs) {
2967 		mvpp2_tx_time_coal_set(port);
2968 		for (queue = 0; queue < port->ntxqs; queue++) {
2969 			txq = port->txqs[queue];
2970 			mvpp2_tx_pkts_coal_set(port, txq);
2971 		}
2972 	}
2973 
2974 	on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
2975 	return 0;
2976 
2977 err_cleanup:
2978 	mvpp2_cleanup_txqs(port);
2979 	return err;
2980 }
2981 
2982 /* The callback for per-port interrupt */
2983 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
2984 {
2985 	struct mvpp2_queue_vector *qv = dev_id;
2986 
2987 	mvpp2_qvec_interrupt_disable(qv);
2988 
2989 	napi_schedule(&qv->napi);
2990 
2991 	return IRQ_HANDLED;
2992 }
2993 
2994 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
2995 {
2996 	struct skb_shared_hwtstamps shhwtstamps;
2997 	struct mvpp2_hwtstamp_queue *queue;
2998 	struct sk_buff *skb;
2999 	void __iomem *ptp_q;
3000 	unsigned int id;
3001 	u32 r0, r1, r2;
3002 
3003 	ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3004 	if (nq)
3005 		ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
3006 
3007 	queue = &port->tx_hwtstamp_queue[nq];
3008 
3009 	while (1) {
3010 		r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
3011 		if (!r0)
3012 			break;
3013 
3014 		r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
3015 		r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
3016 
3017 		id = (r0 >> 1) & 31;
3018 
3019 		skb = queue->skb[id];
3020 		queue->skb[id] = NULL;
3021 		if (skb) {
3022 			u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
3023 
3024 			mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
3025 			skb_tstamp_tx(skb, &shhwtstamps);
3026 			dev_kfree_skb_any(skb);
3027 		}
3028 	}
3029 }
3030 
3031 static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
3032 {
3033 	void __iomem *ptp;
3034 	u32 val;
3035 
3036 	ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3037 	val = readl(ptp + MVPP22_PTP_INT_CAUSE);
3038 	if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
3039 		mvpp2_isr_handle_ptp_queue(port, 0);
3040 	if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
3041 		mvpp2_isr_handle_ptp_queue(port, 1);
3042 }
3043 
3044 static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
3045 {
3046 	struct net_device *dev = port->dev;
3047 
3048 	if (port->phylink) {
3049 		phylink_mac_change(port->phylink, link);
3050 		return;
3051 	}
3052 
3053 	if (!netif_running(dev))
3054 		return;
3055 
3056 	if (link) {
3057 		mvpp2_interrupts_enable(port);
3058 
3059 		mvpp2_egress_enable(port);
3060 		mvpp2_ingress_enable(port);
3061 		netif_carrier_on(dev);
3062 		netif_tx_wake_all_queues(dev);
3063 	} else {
3064 		netif_tx_stop_all_queues(dev);
3065 		netif_carrier_off(dev);
3066 		mvpp2_ingress_disable(port);
3067 		mvpp2_egress_disable(port);
3068 
3069 		mvpp2_interrupts_disable(port);
3070 	}
3071 }
3072 
3073 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
3074 {
3075 	bool link;
3076 	u32 val;
3077 
3078 	val = readl(port->base + MVPP22_XLG_INT_STAT);
3079 	if (val & MVPP22_XLG_INT_STAT_LINK) {
3080 		val = readl(port->base + MVPP22_XLG_STATUS);
3081 		link = (val & MVPP22_XLG_STATUS_LINK_UP);
3082 		mvpp2_isr_handle_link(port, link);
3083 	}
3084 }
3085 
3086 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
3087 {
3088 	bool link;
3089 	u32 val;
3090 
3091 	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
3092 	    phy_interface_mode_is_8023z(port->phy_interface) ||
3093 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
3094 		val = readl(port->base + MVPP22_GMAC_INT_STAT);
3095 		if (val & MVPP22_GMAC_INT_STAT_LINK) {
3096 			val = readl(port->base + MVPP2_GMAC_STATUS0);
3097 			link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
3098 			mvpp2_isr_handle_link(port, link);
3099 		}
3100 	}
3101 }
3102 
3103 /* Per-port interrupt for link status changes */
3104 static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
3105 {
3106 	struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
3107 	u32 val;
3108 
3109 	mvpp22_gop_mask_irq(port);
3110 
3111 	if (mvpp2_port_supports_xlg(port) &&
3112 	    mvpp2_is_xlg(port->phy_interface)) {
3113 		/* Check the external status register */
3114 		val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
3115 		if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
3116 			mvpp2_isr_handle_xlg(port);
3117 		if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
3118 			mvpp2_isr_handle_ptp(port);
3119 	} else {
3120 		/* If it's not the XLG, we must be using the GMAC.
3121 		 * Check the summary status.
3122 		 */
3123 		val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
3124 		if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
3125 			mvpp2_isr_handle_gmac_internal(port);
3126 		if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
3127 			mvpp2_isr_handle_ptp(port);
3128 	}
3129 
3130 	mvpp22_gop_unmask_irq(port);
3131 	return IRQ_HANDLED;
3132 }
3133 
3134 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
3135 {
3136 	struct net_device *dev;
3137 	struct mvpp2_port *port;
3138 	struct mvpp2_port_pcpu *port_pcpu;
3139 	unsigned int tx_todo, cause;
3140 
3141 	port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
3142 	dev = port_pcpu->dev;
3143 
3144 	if (!netif_running(dev))
3145 		return HRTIMER_NORESTART;
3146 
3147 	port_pcpu->timer_scheduled = false;
3148 	port = netdev_priv(dev);
3149 
3150 	/* Process all the Tx queues */
3151 	cause = (1 << port->ntxqs) - 1;
3152 	tx_todo = mvpp2_tx_done(port, cause,
3153 				mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
3154 
3155 	/* Set the timer in case not all the packets were processed */
3156 	if (tx_todo && !port_pcpu->timer_scheduled) {
3157 		port_pcpu->timer_scheduled = true;
3158 		hrtimer_forward_now(&port_pcpu->tx_done_timer,
3159 				    MVPP2_TXDONE_HRTIMER_PERIOD_NS);
3160 
3161 		return HRTIMER_RESTART;
3162 	}
3163 	return HRTIMER_NORESTART;
3164 }
3165 
3166 /* Main RX/TX processing routines */
3167 
3168 /* Display more error info */
3169 static void mvpp2_rx_error(struct mvpp2_port *port,
3170 			   struct mvpp2_rx_desc *rx_desc)
3171 {
3172 	u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3173 	size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3174 	char *err_str = NULL;
3175 
3176 	switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3177 	case MVPP2_RXD_ERR_CRC:
3178 		err_str = "crc";
3179 		break;
3180 	case MVPP2_RXD_ERR_OVERRUN:
3181 		err_str = "overrun";
3182 		break;
3183 	case MVPP2_RXD_ERR_RESOURCE:
3184 		err_str = "resource";
3185 		break;
3186 	}
3187 	if (err_str && net_ratelimit())
3188 		netdev_err(port->dev,
3189 			   "bad rx status %08x (%s error), size=%zu\n",
3190 			   status, err_str, sz);
3191 }
3192 
3193 /* Handle RX checksum offload */
3194 static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
3195 			  struct sk_buff *skb)
3196 {
3197 	if (((status & MVPP2_RXD_L3_IP4) &&
3198 	     !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
3199 	    (status & MVPP2_RXD_L3_IP6))
3200 		if (((status & MVPP2_RXD_L4_UDP) ||
3201 		     (status & MVPP2_RXD_L4_TCP)) &&
3202 		     (status & MVPP2_RXD_L4_CSUM_OK)) {
3203 			skb->csum = 0;
3204 			skb->ip_summed = CHECKSUM_UNNECESSARY;
3205 			return;
3206 		}
3207 
3208 	skb->ip_summed = CHECKSUM_NONE;
3209 }
3210 
3211 /* Allocate a new skb and add it to BM pool */
3212 static int mvpp2_rx_refill(struct mvpp2_port *port,
3213 			   struct mvpp2_bm_pool *bm_pool,
3214 			   struct page_pool *page_pool, int pool)
3215 {
3216 	dma_addr_t dma_addr;
3217 	phys_addr_t phys_addr;
3218 	void *buf;
3219 
3220 	buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
3221 			      &dma_addr, &phys_addr, GFP_ATOMIC);
3222 	if (!buf)
3223 		return -ENOMEM;
3224 
3225 	mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3226 
3227 	return 0;
3228 }
3229 
3230 /* Handle tx checksum */
3231 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
3232 {
3233 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
3234 		int ip_hdr_len = 0;
3235 		u8 l4_proto;
3236 		__be16 l3_proto = vlan_get_protocol(skb);
3237 
3238 		if (l3_proto == htons(ETH_P_IP)) {
3239 			struct iphdr *ip4h = ip_hdr(skb);
3240 
3241 			/* Calculate IPv4 checksum and L4 checksum */
3242 			ip_hdr_len = ip4h->ihl;
3243 			l4_proto = ip4h->protocol;
3244 		} else if (l3_proto == htons(ETH_P_IPV6)) {
3245 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
3246 
3247 			/* Read l4_protocol from one of IPv6 extra headers */
3248 			if (skb_network_header_len(skb) > 0)
3249 				ip_hdr_len = (skb_network_header_len(skb) >> 2);
3250 			l4_proto = ip6h->nexthdr;
3251 		} else {
3252 			return MVPP2_TXD_L4_CSUM_NOT;
3253 		}
3254 
3255 		return mvpp2_txq_desc_csum(skb_network_offset(skb),
3256 					   l3_proto, ip_hdr_len, l4_proto);
3257 	}
3258 
3259 	return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
3260 }
3261 
3262 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
3263 {
3264 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3265 	struct mvpp2_tx_queue *aggr_txq;
3266 	struct mvpp2_txq_pcpu *txq_pcpu;
3267 	struct mvpp2_tx_queue *txq;
3268 	struct netdev_queue *nq;
3269 
3270 	txq = port->txqs[txq_id];
3271 	txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3272 	nq = netdev_get_tx_queue(port->dev, txq_id);
3273 	aggr_txq = &port->priv->aggr_txqs[thread];
3274 
3275 	txq_pcpu->reserved_num -= nxmit;
3276 	txq_pcpu->count += nxmit;
3277 	aggr_txq->count += nxmit;
3278 
3279 	/* Enable transmit */
3280 	wmb();
3281 	mvpp2_aggr_txq_pend_desc_add(port, nxmit);
3282 
3283 	if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3284 		netif_tx_stop_queue(nq);
3285 
3286 	/* Finalize TX processing */
3287 	if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3288 		mvpp2_txq_done(port, txq, txq_pcpu);
3289 }
3290 
3291 static int
3292 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
3293 		       struct xdp_frame *xdpf, bool dma_map)
3294 {
3295 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3296 	u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
3297 		     MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3298 	enum mvpp2_tx_buf_type buf_type;
3299 	struct mvpp2_txq_pcpu *txq_pcpu;
3300 	struct mvpp2_tx_queue *aggr_txq;
3301 	struct mvpp2_tx_desc *tx_desc;
3302 	struct mvpp2_tx_queue *txq;
3303 	int ret = MVPP2_XDP_TX;
3304 	dma_addr_t dma_addr;
3305 
3306 	txq = port->txqs[txq_id];
3307 	txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3308 	aggr_txq = &port->priv->aggr_txqs[thread];
3309 
3310 	/* Check number of available descriptors */
3311 	if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
3312 	    mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
3313 		ret = MVPP2_XDP_DROPPED;
3314 		goto out;
3315 	}
3316 
3317 	/* Get a descriptor for the first part of the packet */
3318 	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3319 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3320 	mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
3321 
3322 	if (dma_map) {
3323 		/* XDP_REDIRECT or AF_XDP */
3324 		dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
3325 					  xdpf->len, DMA_TO_DEVICE);
3326 
3327 		if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3328 			mvpp2_txq_desc_put(txq);
3329 			ret = MVPP2_XDP_DROPPED;
3330 			goto out;
3331 		}
3332 
3333 		buf_type = MVPP2_TYPE_XDP_NDO;
3334 	} else {
3335 		/* XDP_TX */
3336 		struct page *page = virt_to_page(xdpf->data);
3337 
3338 		dma_addr = page_pool_get_dma_addr(page) +
3339 			   sizeof(*xdpf) + xdpf->headroom;
3340 		dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
3341 					   xdpf->len, DMA_BIDIRECTIONAL);
3342 
3343 		buf_type = MVPP2_TYPE_XDP_TX;
3344 	}
3345 
3346 	mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
3347 
3348 	mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3349 	mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
3350 
3351 out:
3352 	return ret;
3353 }
3354 
3355 static int
3356 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
3357 {
3358 	struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3359 	struct xdp_frame *xdpf;
3360 	u16 txq_id;
3361 	int ret;
3362 
3363 	xdpf = xdp_convert_buff_to_frame(xdp);
3364 	if (unlikely(!xdpf))
3365 		return MVPP2_XDP_DROPPED;
3366 
3367 	/* The first of the TX queues are used for XPS,
3368 	 * the second half for XDP_TX
3369 	 */
3370 	txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3371 
3372 	ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
3373 	if (ret == MVPP2_XDP_TX) {
3374 		u64_stats_update_begin(&stats->syncp);
3375 		stats->tx_bytes += xdpf->len;
3376 		stats->tx_packets++;
3377 		stats->xdp_tx++;
3378 		u64_stats_update_end(&stats->syncp);
3379 
3380 		mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
3381 	} else {
3382 		u64_stats_update_begin(&stats->syncp);
3383 		stats->xdp_tx_err++;
3384 		u64_stats_update_end(&stats->syncp);
3385 	}
3386 
3387 	return ret;
3388 }
3389 
3390 static int
3391 mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
3392 	       struct xdp_frame **frames, u32 flags)
3393 {
3394 	struct mvpp2_port *port = netdev_priv(dev);
3395 	int i, nxmit_byte = 0, nxmit = num_frame;
3396 	struct mvpp2_pcpu_stats *stats;
3397 	u16 txq_id;
3398 	u32 ret;
3399 
3400 	if (unlikely(test_bit(0, &port->state)))
3401 		return -ENETDOWN;
3402 
3403 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3404 		return -EINVAL;
3405 
3406 	/* The first of the TX queues are used for XPS,
3407 	 * the second half for XDP_TX
3408 	 */
3409 	txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3410 
3411 	for (i = 0; i < num_frame; i++) {
3412 		ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
3413 		if (ret == MVPP2_XDP_TX) {
3414 			nxmit_byte += frames[i]->len;
3415 		} else {
3416 			xdp_return_frame_rx_napi(frames[i]);
3417 			nxmit--;
3418 		}
3419 	}
3420 
3421 	if (likely(nxmit > 0))
3422 		mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
3423 
3424 	stats = this_cpu_ptr(port->stats);
3425 	u64_stats_update_begin(&stats->syncp);
3426 	stats->tx_bytes += nxmit_byte;
3427 	stats->tx_packets += nxmit;
3428 	stats->xdp_xmit += nxmit;
3429 	stats->xdp_xmit_err += num_frame - nxmit;
3430 	u64_stats_update_end(&stats->syncp);
3431 
3432 	return nxmit;
3433 }
3434 
3435 static int
3436 mvpp2_run_xdp(struct mvpp2_port *port, struct mvpp2_rx_queue *rxq,
3437 	      struct bpf_prog *prog, struct xdp_buff *xdp,
3438 	      struct page_pool *pp, struct mvpp2_pcpu_stats *stats)
3439 {
3440 	unsigned int len, sync, err;
3441 	struct page *page;
3442 	u32 ret, act;
3443 
3444 	len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3445 	act = bpf_prog_run_xdp(prog, xdp);
3446 
3447 	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
3448 	sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3449 	sync = max(sync, len);
3450 
3451 	switch (act) {
3452 	case XDP_PASS:
3453 		stats->xdp_pass++;
3454 		ret = MVPP2_XDP_PASS;
3455 		break;
3456 	case XDP_REDIRECT:
3457 		err = xdp_do_redirect(port->dev, xdp, prog);
3458 		if (unlikely(err)) {
3459 			ret = MVPP2_XDP_DROPPED;
3460 			page = virt_to_head_page(xdp->data);
3461 			page_pool_put_page(pp, page, sync, true);
3462 		} else {
3463 			ret = MVPP2_XDP_REDIR;
3464 			stats->xdp_redirect++;
3465 		}
3466 		break;
3467 	case XDP_TX:
3468 		ret = mvpp2_xdp_xmit_back(port, xdp);
3469 		if (ret != MVPP2_XDP_TX) {
3470 			page = virt_to_head_page(xdp->data);
3471 			page_pool_put_page(pp, page, sync, true);
3472 		}
3473 		break;
3474 	default:
3475 		bpf_warn_invalid_xdp_action(act);
3476 		fallthrough;
3477 	case XDP_ABORTED:
3478 		trace_xdp_exception(port->dev, prog, act);
3479 		fallthrough;
3480 	case XDP_DROP:
3481 		page = virt_to_head_page(xdp->data);
3482 		page_pool_put_page(pp, page, sync, true);
3483 		ret = MVPP2_XDP_DROPPED;
3484 		stats->xdp_drop++;
3485 		break;
3486 	}
3487 
3488 	return ret;
3489 }
3490 
3491 /* Main rx processing */
3492 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
3493 		    int rx_todo, struct mvpp2_rx_queue *rxq)
3494 {
3495 	struct net_device *dev = port->dev;
3496 	struct mvpp2_pcpu_stats ps = {};
3497 	enum dma_data_direction dma_dir;
3498 	struct bpf_prog *xdp_prog;
3499 	struct xdp_buff xdp;
3500 	int rx_received;
3501 	int rx_done = 0;
3502 	u32 xdp_ret = 0;
3503 
3504 	rcu_read_lock();
3505 
3506 	xdp_prog = READ_ONCE(port->xdp_prog);
3507 
3508 	/* Get number of received packets and clamp the to-do */
3509 	rx_received = mvpp2_rxq_received(port, rxq->id);
3510 	if (rx_todo > rx_received)
3511 		rx_todo = rx_received;
3512 
3513 	while (rx_done < rx_todo) {
3514 		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3515 		struct mvpp2_bm_pool *bm_pool;
3516 		struct page_pool *pp = NULL;
3517 		struct sk_buff *skb;
3518 		unsigned int frag_size;
3519 		dma_addr_t dma_addr;
3520 		phys_addr_t phys_addr;
3521 		u32 rx_status, timestamp;
3522 		int pool, rx_bytes, err, ret;
3523 		void *data;
3524 
3525 		rx_done++;
3526 		rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3527 		rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3528 		rx_bytes -= MVPP2_MH_SIZE;
3529 		dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3530 		phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3531 		data = (void *)phys_to_virt(phys_addr);
3532 
3533 		pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3534 			MVPP2_RXD_BM_POOL_ID_OFFS;
3535 		bm_pool = &port->priv->bm_pools[pool];
3536 
3537 		/* In case of an error, release the requested buffer pointer
3538 		 * to the Buffer Manager. This request process is controlled
3539 		 * by the hardware, and the information about the buffer is
3540 		 * comprised by the RX descriptor.
3541 		 */
3542 		if (rx_status & MVPP2_RXD_ERR_SUMMARY)
3543 			goto err_drop_frame;
3544 
3545 		if (port->priv->percpu_pools) {
3546 			pp = port->priv->page_pool[pool];
3547 			dma_dir = page_pool_get_dma_dir(pp);
3548 		} else {
3549 			dma_dir = DMA_FROM_DEVICE;
3550 		}
3551 
3552 		dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
3553 					rx_bytes + MVPP2_MH_SIZE,
3554 					dma_dir);
3555 
3556 		/* Prefetch header */
3557 		prefetch(data);
3558 
3559 		if (bm_pool->frag_size > PAGE_SIZE)
3560 			frag_size = 0;
3561 		else
3562 			frag_size = bm_pool->frag_size;
3563 
3564 		if (xdp_prog) {
3565 			xdp.data_hard_start = data;
3566 			xdp.data = data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM;
3567 			xdp.data_end = xdp.data + rx_bytes;
3568 			xdp.frame_sz = PAGE_SIZE;
3569 
3570 			if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
3571 				xdp.rxq = &rxq->xdp_rxq_short;
3572 			else
3573 				xdp.rxq = &rxq->xdp_rxq_long;
3574 
3575 			xdp_set_data_meta_invalid(&xdp);
3576 
3577 			ret = mvpp2_run_xdp(port, rxq, xdp_prog, &xdp, pp, &ps);
3578 
3579 			if (ret) {
3580 				xdp_ret |= ret;
3581 				err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3582 				if (err) {
3583 					netdev_err(port->dev, "failed to refill BM pools\n");
3584 					goto err_drop_frame;
3585 				}
3586 
3587 				ps.rx_packets++;
3588 				ps.rx_bytes += rx_bytes;
3589 				continue;
3590 			}
3591 		}
3592 
3593 		skb = build_skb(data, frag_size);
3594 		if (!skb) {
3595 			netdev_warn(port->dev, "skb build failed\n");
3596 			goto err_drop_frame;
3597 		}
3598 
3599 		/* If we have RX hardware timestamping enabled, grab the
3600 		 * timestamp from the queue and convert.
3601 		 */
3602 		if (mvpp22_rx_hwtstamping(port)) {
3603 			timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
3604 			mvpp22_tai_tstamp(port->priv->tai, timestamp,
3605 					 skb_hwtstamps(skb));
3606 		}
3607 
3608 		err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3609 		if (err) {
3610 			netdev_err(port->dev, "failed to refill BM pools\n");
3611 			dev_kfree_skb_any(skb);
3612 			goto err_drop_frame;
3613 		}
3614 
3615 		if (pp)
3616 			page_pool_release_page(pp, virt_to_page(data));
3617 		else
3618 			dma_unmap_single_attrs(dev->dev.parent, dma_addr,
3619 					       bm_pool->buf_size, DMA_FROM_DEVICE,
3620 					       DMA_ATTR_SKIP_CPU_SYNC);
3621 
3622 		ps.rx_packets++;
3623 		ps.rx_bytes += rx_bytes;
3624 
3625 		skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
3626 		skb_put(skb, rx_bytes);
3627 		skb->protocol = eth_type_trans(skb, dev);
3628 		mvpp2_rx_csum(port, rx_status, skb);
3629 
3630 		napi_gro_receive(napi, skb);
3631 		continue;
3632 
3633 err_drop_frame:
3634 		dev->stats.rx_errors++;
3635 		mvpp2_rx_error(port, rx_desc);
3636 		/* Return the buffer to the pool */
3637 		mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3638 	}
3639 
3640 	rcu_read_unlock();
3641 
3642 	if (xdp_ret & MVPP2_XDP_REDIR)
3643 		xdp_do_flush_map();
3644 
3645 	if (ps.rx_packets) {
3646 		struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3647 
3648 		u64_stats_update_begin(&stats->syncp);
3649 		stats->rx_packets += ps.rx_packets;
3650 		stats->rx_bytes   += ps.rx_bytes;
3651 		/* xdp */
3652 		stats->xdp_redirect += ps.xdp_redirect;
3653 		stats->xdp_pass += ps.xdp_pass;
3654 		stats->xdp_drop += ps.xdp_drop;
3655 		u64_stats_update_end(&stats->syncp);
3656 	}
3657 
3658 	/* Update Rx queue management counters */
3659 	wmb();
3660 	mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
3661 
3662 	return rx_todo;
3663 }
3664 
3665 static inline void
3666 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
3667 		  struct mvpp2_tx_desc *desc)
3668 {
3669 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3670 	struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3671 
3672 	dma_addr_t buf_dma_addr =
3673 		mvpp2_txdesc_dma_addr_get(port, desc);
3674 	size_t buf_sz =
3675 		mvpp2_txdesc_size_get(port, desc);
3676 	if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
3677 		dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
3678 				 buf_sz, DMA_TO_DEVICE);
3679 	mvpp2_txq_desc_put(txq);
3680 }
3681 
3682 static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
3683 				   struct mvpp2_tx_desc *desc)
3684 {
3685 	/* We only need to clear the low bits */
3686 	if (port->priv->hw_version != MVPP21)
3687 		desc->pp22.ptp_descriptor &=
3688 			cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
3689 }
3690 
3691 static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
3692 			       struct mvpp2_tx_desc *tx_desc,
3693 			       struct sk_buff *skb)
3694 {
3695 	struct mvpp2_hwtstamp_queue *queue;
3696 	unsigned int mtype, type, i;
3697 	struct ptp_header *hdr;
3698 	u64 ptpdesc;
3699 
3700 	if (port->priv->hw_version == MVPP21 ||
3701 	    port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
3702 		return false;
3703 
3704 	type = ptp_classify_raw(skb);
3705 	if (!type)
3706 		return false;
3707 
3708 	hdr = ptp_parse_header(skb, type);
3709 	if (!hdr)
3710 		return false;
3711 
3712 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3713 
3714 	ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
3715 		  MVPP22_PTP_ACTION_CAPTURE;
3716 	queue = &port->tx_hwtstamp_queue[0];
3717 
3718 	switch (type & PTP_CLASS_VMASK) {
3719 	case PTP_CLASS_V1:
3720 		ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
3721 		break;
3722 
3723 	case PTP_CLASS_V2:
3724 		ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
3725 		mtype = hdr->tsmt & 15;
3726 		/* Direct PTP Sync messages to queue 1 */
3727 		if (mtype == 0) {
3728 			ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
3729 			queue = &port->tx_hwtstamp_queue[1];
3730 		}
3731 		break;
3732 	}
3733 
3734 	/* Take a reference on the skb and insert into our queue */
3735 	i = queue->next;
3736 	queue->next = (i + 1) & 31;
3737 	if (queue->skb[i])
3738 		dev_kfree_skb_any(queue->skb[i]);
3739 	queue->skb[i] = skb_get(skb);
3740 
3741 	ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
3742 
3743 	/*
3744 	 * 3:0		- PTPAction
3745 	 * 6:4		- PTPPacketFormat
3746 	 * 7		- PTP_CF_WraparoundCheckEn
3747 	 * 9:8		- IngressTimestampSeconds[1:0]
3748 	 * 10		- Reserved
3749 	 * 11		- MACTimestampingEn
3750 	 * 17:12	- PTP_TimestampQueueEntryID[5:0]
3751 	 * 18		- PTPTimestampQueueSelect
3752 	 * 19		- UDPChecksumUpdateEn
3753 	 * 27:20	- TimestampOffset
3754 	 *			PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
3755 	 *			NTPTs, Y.1731 - L3 to timestamp entry
3756 	 * 35:28	- UDP Checksum Offset
3757 	 *
3758 	 * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
3759 	 */
3760 	tx_desc->pp22.ptp_descriptor &=
3761 		cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
3762 	tx_desc->pp22.ptp_descriptor |=
3763 		cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
3764 	tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
3765 	tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
3766 
3767 	return true;
3768 }
3769 
3770 /* Handle tx fragmentation processing */
3771 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
3772 				 struct mvpp2_tx_queue *aggr_txq,
3773 				 struct mvpp2_tx_queue *txq)
3774 {
3775 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3776 	struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3777 	struct mvpp2_tx_desc *tx_desc;
3778 	int i;
3779 	dma_addr_t buf_dma_addr;
3780 
3781 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3782 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3783 		void *addr = skb_frag_address(frag);
3784 
3785 		tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3786 		mvpp2_txdesc_clear_ptp(port, tx_desc);
3787 		mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3788 		mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
3789 
3790 		buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
3791 					      skb_frag_size(frag),
3792 					      DMA_TO_DEVICE);
3793 		if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
3794 			mvpp2_txq_desc_put(txq);
3795 			goto cleanup;
3796 		}
3797 
3798 		mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3799 
3800 		if (i == (skb_shinfo(skb)->nr_frags - 1)) {
3801 			/* Last descriptor */
3802 			mvpp2_txdesc_cmd_set(port, tx_desc,
3803 					     MVPP2_TXD_L_DESC);
3804 			mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
3805 		} else {
3806 			/* Descriptor in the middle: Not First, Not Last */
3807 			mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3808 			mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3809 		}
3810 	}
3811 
3812 	return 0;
3813 cleanup:
3814 	/* Release all descriptors that were used to map fragments of
3815 	 * this packet, as well as the corresponding DMA mappings
3816 	 */
3817 	for (i = i - 1; i >= 0; i--) {
3818 		tx_desc = txq->descs + i;
3819 		tx_desc_unmap_put(port, txq, tx_desc);
3820 	}
3821 
3822 	return -ENOMEM;
3823 }
3824 
3825 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
3826 				     struct net_device *dev,
3827 				     struct mvpp2_tx_queue *txq,
3828 				     struct mvpp2_tx_queue *aggr_txq,
3829 				     struct mvpp2_txq_pcpu *txq_pcpu,
3830 				     int hdr_sz)
3831 {
3832 	struct mvpp2_port *port = netdev_priv(dev);
3833 	struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3834 	dma_addr_t addr;
3835 
3836 	mvpp2_txdesc_clear_ptp(port, tx_desc);
3837 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3838 	mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
3839 
3840 	addr = txq_pcpu->tso_headers_dma +
3841 	       txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3842 	mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
3843 
3844 	mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
3845 					    MVPP2_TXD_F_DESC |
3846 					    MVPP2_TXD_PADDING_DISABLE);
3847 	mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3848 }
3849 
3850 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
3851 				     struct net_device *dev, struct tso_t *tso,
3852 				     struct mvpp2_tx_queue *txq,
3853 				     struct mvpp2_tx_queue *aggr_txq,
3854 				     struct mvpp2_txq_pcpu *txq_pcpu,
3855 				     int sz, bool left, bool last)
3856 {
3857 	struct mvpp2_port *port = netdev_priv(dev);
3858 	struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3859 	dma_addr_t buf_dma_addr;
3860 
3861 	mvpp2_txdesc_clear_ptp(port, tx_desc);
3862 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3863 	mvpp2_txdesc_size_set(port, tx_desc, sz);
3864 
3865 	buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
3866 				      DMA_TO_DEVICE);
3867 	if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3868 		mvpp2_txq_desc_put(txq);
3869 		return -ENOMEM;
3870 	}
3871 
3872 	mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3873 
3874 	if (!left) {
3875 		mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
3876 		if (last) {
3877 			mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
3878 			return 0;
3879 		}
3880 	} else {
3881 		mvpp2_txdesc_cmd_set(port, tx_desc, 0);
3882 	}
3883 
3884 	mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
3885 	return 0;
3886 }
3887 
3888 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
3889 			struct mvpp2_tx_queue *txq,
3890 			struct mvpp2_tx_queue *aggr_txq,
3891 			struct mvpp2_txq_pcpu *txq_pcpu)
3892 {
3893 	struct mvpp2_port *port = netdev_priv(dev);
3894 	int hdr_sz, i, len, descs = 0;
3895 	struct tso_t tso;
3896 
3897 	/* Check number of available descriptors */
3898 	if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
3899 	    mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
3900 					     tso_count_descs(skb)))
3901 		return 0;
3902 
3903 	hdr_sz = tso_start(skb, &tso);
3904 
3905 	len = skb->len - hdr_sz;
3906 	while (len > 0) {
3907 		int left = min_t(int, skb_shinfo(skb)->gso_size, len);
3908 		char *hdr = txq_pcpu->tso_headers +
3909 			    txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
3910 
3911 		len -= left;
3912 		descs++;
3913 
3914 		tso_build_hdr(skb, hdr, &tso, left, len == 0);
3915 		mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
3916 
3917 		while (left > 0) {
3918 			int sz = min_t(int, tso.size, left);
3919 			left -= sz;
3920 			descs++;
3921 
3922 			if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
3923 					       txq_pcpu, sz, left, len == 0))
3924 				goto release;
3925 			tso_build_data(skb, &tso, sz);
3926 		}
3927 	}
3928 
3929 	return descs;
3930 
3931 release:
3932 	for (i = descs - 1; i >= 0; i--) {
3933 		struct mvpp2_tx_desc *tx_desc = txq->descs + i;
3934 		tx_desc_unmap_put(port, txq, tx_desc);
3935 	}
3936 	return 0;
3937 }
3938 
3939 /* Main tx processing */
3940 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
3941 {
3942 	struct mvpp2_port *port = netdev_priv(dev);
3943 	struct mvpp2_tx_queue *txq, *aggr_txq;
3944 	struct mvpp2_txq_pcpu *txq_pcpu;
3945 	struct mvpp2_tx_desc *tx_desc;
3946 	dma_addr_t buf_dma_addr;
3947 	unsigned long flags = 0;
3948 	unsigned int thread;
3949 	int frags = 0;
3950 	u16 txq_id;
3951 	u32 tx_cmd;
3952 
3953 	thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3954 
3955 	txq_id = skb_get_queue_mapping(skb);
3956 	txq = port->txqs[txq_id];
3957 	txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3958 	aggr_txq = &port->priv->aggr_txqs[thread];
3959 
3960 	if (test_bit(thread, &port->priv->lock_map))
3961 		spin_lock_irqsave(&port->tx_lock[thread], flags);
3962 
3963 	if (skb_is_gso(skb)) {
3964 		frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
3965 		goto out;
3966 	}
3967 	frags = skb_shinfo(skb)->nr_frags + 1;
3968 
3969 	/* Check number of available descriptors */
3970 	if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
3971 	    mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
3972 		frags = 0;
3973 		goto out;
3974 	}
3975 
3976 	/* Get a descriptor for the first part of the packet */
3977 	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3978 	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
3979 	    !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
3980 		mvpp2_txdesc_clear_ptp(port, tx_desc);
3981 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3982 	mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
3983 
3984 	buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
3985 				      skb_headlen(skb), DMA_TO_DEVICE);
3986 	if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
3987 		mvpp2_txq_desc_put(txq);
3988 		frags = 0;
3989 		goto out;
3990 	}
3991 
3992 	mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
3993 
3994 	tx_cmd = mvpp2_skb_tx_csum(port, skb);
3995 
3996 	if (frags == 1) {
3997 		/* First and Last descriptor */
3998 		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3999 		mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4000 		mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4001 	} else {
4002 		/* First but not Last */
4003 		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
4004 		mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4005 		mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4006 
4007 		/* Continue with other skb fragments */
4008 		if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
4009 			tx_desc_unmap_put(port, txq, tx_desc);
4010 			frags = 0;
4011 		}
4012 	}
4013 
4014 out:
4015 	if (frags > 0) {
4016 		struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
4017 		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
4018 
4019 		txq_pcpu->reserved_num -= frags;
4020 		txq_pcpu->count += frags;
4021 		aggr_txq->count += frags;
4022 
4023 		/* Enable transmit */
4024 		wmb();
4025 		mvpp2_aggr_txq_pend_desc_add(port, frags);
4026 
4027 		if (txq_pcpu->count >= txq_pcpu->stop_threshold)
4028 			netif_tx_stop_queue(nq);
4029 
4030 		u64_stats_update_begin(&stats->syncp);
4031 		stats->tx_packets++;
4032 		stats->tx_bytes += skb->len;
4033 		u64_stats_update_end(&stats->syncp);
4034 	} else {
4035 		dev->stats.tx_dropped++;
4036 		dev_kfree_skb_any(skb);
4037 	}
4038 
4039 	/* Finalize TX processing */
4040 	if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
4041 		mvpp2_txq_done(port, txq, txq_pcpu);
4042 
4043 	/* Set the timer in case not all frags were processed */
4044 	if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
4045 	    txq_pcpu->count > 0) {
4046 		struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
4047 
4048 		if (!port_pcpu->timer_scheduled) {
4049 			port_pcpu->timer_scheduled = true;
4050 			hrtimer_start(&port_pcpu->tx_done_timer,
4051 				      MVPP2_TXDONE_HRTIMER_PERIOD_NS,
4052 				      HRTIMER_MODE_REL_PINNED_SOFT);
4053 		}
4054 	}
4055 
4056 	if (test_bit(thread, &port->priv->lock_map))
4057 		spin_unlock_irqrestore(&port->tx_lock[thread], flags);
4058 
4059 	return NETDEV_TX_OK;
4060 }
4061 
4062 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
4063 {
4064 	if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
4065 		netdev_err(dev, "FCS error\n");
4066 	if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
4067 		netdev_err(dev, "rx fifo overrun error\n");
4068 	if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
4069 		netdev_err(dev, "tx fifo underrun error\n");
4070 }
4071 
4072 static int mvpp2_poll(struct napi_struct *napi, int budget)
4073 {
4074 	u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
4075 	int rx_done = 0;
4076 	struct mvpp2_port *port = netdev_priv(napi->dev);
4077 	struct mvpp2_queue_vector *qv;
4078 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4079 
4080 	qv = container_of(napi, struct mvpp2_queue_vector, napi);
4081 
4082 	/* Rx/Tx cause register
4083 	 *
4084 	 * Bits 0-15: each bit indicates received packets on the Rx queue
4085 	 * (bit 0 is for Rx queue 0).
4086 	 *
4087 	 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
4088 	 * (bit 16 is for Tx queue 0).
4089 	 *
4090 	 * Each CPU has its own Rx/Tx cause register
4091 	 */
4092 	cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
4093 						MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
4094 
4095 	cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
4096 	if (cause_misc) {
4097 		mvpp2_cause_error(port->dev, cause_misc);
4098 
4099 		/* Clear the cause register */
4100 		mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
4101 		mvpp2_thread_write(port->priv, thread,
4102 				   MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
4103 				   cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
4104 	}
4105 
4106 	if (port->has_tx_irqs) {
4107 		cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4108 		if (cause_tx) {
4109 			cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
4110 			mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
4111 		}
4112 	}
4113 
4114 	/* Process RX packets */
4115 	cause_rx = cause_rx_tx &
4116 		   MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
4117 	cause_rx <<= qv->first_rxq;
4118 	cause_rx |= qv->pending_cause_rx;
4119 	while (cause_rx && budget > 0) {
4120 		int count;
4121 		struct mvpp2_rx_queue *rxq;
4122 
4123 		rxq = mvpp2_get_rx_queue(port, cause_rx);
4124 		if (!rxq)
4125 			break;
4126 
4127 		count = mvpp2_rx(port, napi, budget, rxq);
4128 		rx_done += count;
4129 		budget -= count;
4130 		if (budget > 0) {
4131 			/* Clear the bit associated to this Rx queue
4132 			 * so that next iteration will continue from
4133 			 * the next Rx queue.
4134 			 */
4135 			cause_rx &= ~(1 << rxq->logic_rxq);
4136 		}
4137 	}
4138 
4139 	if (budget > 0) {
4140 		cause_rx = 0;
4141 		napi_complete_done(napi, rx_done);
4142 
4143 		mvpp2_qvec_interrupt_enable(qv);
4144 	}
4145 	qv->pending_cause_rx = cause_rx;
4146 	return rx_done;
4147 }
4148 
4149 static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
4150 {
4151 	u32 ctrl3;
4152 
4153 	/* Set the GMAC & XLG MAC in reset */
4154 	mvpp2_mac_reset_assert(port);
4155 
4156 	/* Set the MPCS and XPCS in reset */
4157 	mvpp22_pcs_reset_assert(port);
4158 
4159 	/* comphy reconfiguration */
4160 	mvpp22_comphy_init(port);
4161 
4162 	/* gop reconfiguration */
4163 	mvpp22_gop_init(port);
4164 
4165 	mvpp22_pcs_reset_deassert(port);
4166 
4167 	if (mvpp2_port_supports_xlg(port)) {
4168 		ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
4169 		ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4170 
4171 		if (mvpp2_is_xlg(port->phy_interface))
4172 			ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4173 		else
4174 			ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4175 
4176 		writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
4177 	}
4178 
4179 	if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface))
4180 		mvpp2_xlg_max_rx_size_set(port);
4181 	else
4182 		mvpp2_gmac_max_rx_size_set(port);
4183 }
4184 
4185 /* Set hw internals when starting port */
4186 static void mvpp2_start_dev(struct mvpp2_port *port)
4187 {
4188 	int i;
4189 
4190 	mvpp2_txp_max_tx_size_set(port);
4191 
4192 	for (i = 0; i < port->nqvecs; i++)
4193 		napi_enable(&port->qvecs[i].napi);
4194 
4195 	/* Enable interrupts on all threads */
4196 	mvpp2_interrupts_enable(port);
4197 
4198 	if (port->priv->hw_version == MVPP22)
4199 		mvpp22_mode_reconfigure(port);
4200 
4201 	if (port->phylink) {
4202 		phylink_start(port->phylink);
4203 	} else {
4204 		mvpp2_acpi_start(port);
4205 	}
4206 
4207 	netif_tx_start_all_queues(port->dev);
4208 
4209 	clear_bit(0, &port->state);
4210 }
4211 
4212 /* Set hw internals when stopping port */
4213 static void mvpp2_stop_dev(struct mvpp2_port *port)
4214 {
4215 	int i;
4216 
4217 	set_bit(0, &port->state);
4218 
4219 	/* Disable interrupts on all threads */
4220 	mvpp2_interrupts_disable(port);
4221 
4222 	for (i = 0; i < port->nqvecs; i++)
4223 		napi_disable(&port->qvecs[i].napi);
4224 
4225 	if (port->phylink)
4226 		phylink_stop(port->phylink);
4227 	phy_power_off(port->comphy);
4228 }
4229 
4230 static int mvpp2_check_ringparam_valid(struct net_device *dev,
4231 				       struct ethtool_ringparam *ring)
4232 {
4233 	u16 new_rx_pending = ring->rx_pending;
4234 	u16 new_tx_pending = ring->tx_pending;
4235 
4236 	if (ring->rx_pending == 0 || ring->tx_pending == 0)
4237 		return -EINVAL;
4238 
4239 	if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
4240 		new_rx_pending = MVPP2_MAX_RXD_MAX;
4241 	else if (!IS_ALIGNED(ring->rx_pending, 16))
4242 		new_rx_pending = ALIGN(ring->rx_pending, 16);
4243 
4244 	if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
4245 		new_tx_pending = MVPP2_MAX_TXD_MAX;
4246 	else if (!IS_ALIGNED(ring->tx_pending, 32))
4247 		new_tx_pending = ALIGN(ring->tx_pending, 32);
4248 
4249 	/* The Tx ring size cannot be smaller than the minimum number of
4250 	 * descriptors needed for TSO.
4251 	 */
4252 	if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
4253 		new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
4254 
4255 	if (ring->rx_pending != new_rx_pending) {
4256 		netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
4257 			    ring->rx_pending, new_rx_pending);
4258 		ring->rx_pending = new_rx_pending;
4259 	}
4260 
4261 	if (ring->tx_pending != new_tx_pending) {
4262 		netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
4263 			    ring->tx_pending, new_tx_pending);
4264 		ring->tx_pending = new_tx_pending;
4265 	}
4266 
4267 	return 0;
4268 }
4269 
4270 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
4271 {
4272 	u32 mac_addr_l, mac_addr_m, mac_addr_h;
4273 
4274 	mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4275 	mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
4276 	mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
4277 	addr[0] = (mac_addr_h >> 24) & 0xFF;
4278 	addr[1] = (mac_addr_h >> 16) & 0xFF;
4279 	addr[2] = (mac_addr_h >> 8) & 0xFF;
4280 	addr[3] = mac_addr_h & 0xFF;
4281 	addr[4] = mac_addr_m & 0xFF;
4282 	addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
4283 }
4284 
4285 static int mvpp2_irqs_init(struct mvpp2_port *port)
4286 {
4287 	int err, i;
4288 
4289 	for (i = 0; i < port->nqvecs; i++) {
4290 		struct mvpp2_queue_vector *qv = port->qvecs + i;
4291 
4292 		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4293 			qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
4294 			if (!qv->mask) {
4295 				err = -ENOMEM;
4296 				goto err;
4297 			}
4298 
4299 			irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
4300 		}
4301 
4302 		err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
4303 		if (err)
4304 			goto err;
4305 
4306 		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4307 			unsigned int cpu;
4308 
4309 			for_each_present_cpu(cpu) {
4310 				if (mvpp2_cpu_to_thread(port->priv, cpu) ==
4311 				    qv->sw_thread_id)
4312 					cpumask_set_cpu(cpu, qv->mask);
4313 			}
4314 
4315 			irq_set_affinity_hint(qv->irq, qv->mask);
4316 		}
4317 	}
4318 
4319 	return 0;
4320 err:
4321 	for (i = 0; i < port->nqvecs; i++) {
4322 		struct mvpp2_queue_vector *qv = port->qvecs + i;
4323 
4324 		irq_set_affinity_hint(qv->irq, NULL);
4325 		kfree(qv->mask);
4326 		qv->mask = NULL;
4327 		free_irq(qv->irq, qv);
4328 	}
4329 
4330 	return err;
4331 }
4332 
4333 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
4334 {
4335 	int i;
4336 
4337 	for (i = 0; i < port->nqvecs; i++) {
4338 		struct mvpp2_queue_vector *qv = port->qvecs + i;
4339 
4340 		irq_set_affinity_hint(qv->irq, NULL);
4341 		kfree(qv->mask);
4342 		qv->mask = NULL;
4343 		irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
4344 		free_irq(qv->irq, qv);
4345 	}
4346 }
4347 
4348 static bool mvpp22_rss_is_supported(void)
4349 {
4350 	return queue_mode == MVPP2_QDIST_MULTI_MODE;
4351 }
4352 
4353 static int mvpp2_open(struct net_device *dev)
4354 {
4355 	struct mvpp2_port *port = netdev_priv(dev);
4356 	struct mvpp2 *priv = port->priv;
4357 	unsigned char mac_bcast[ETH_ALEN] = {
4358 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4359 	bool valid = false;
4360 	int err;
4361 
4362 	err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
4363 	if (err) {
4364 		netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4365 		return err;
4366 	}
4367 	err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
4368 	if (err) {
4369 		netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
4370 		return err;
4371 	}
4372 	err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
4373 	if (err) {
4374 		netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
4375 		return err;
4376 	}
4377 	err = mvpp2_prs_def_flow(port);
4378 	if (err) {
4379 		netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4380 		return err;
4381 	}
4382 
4383 	/* Allocate the Rx/Tx queues */
4384 	err = mvpp2_setup_rxqs(port);
4385 	if (err) {
4386 		netdev_err(port->dev, "cannot allocate Rx queues\n");
4387 		return err;
4388 	}
4389 
4390 	err = mvpp2_setup_txqs(port);
4391 	if (err) {
4392 		netdev_err(port->dev, "cannot allocate Tx queues\n");
4393 		goto err_cleanup_rxqs;
4394 	}
4395 
4396 	err = mvpp2_irqs_init(port);
4397 	if (err) {
4398 		netdev_err(port->dev, "cannot init IRQs\n");
4399 		goto err_cleanup_txqs;
4400 	}
4401 
4402 	/* Phylink isn't supported yet in ACPI mode */
4403 	if (port->of_node) {
4404 		err = phylink_of_phy_connect(port->phylink, port->of_node, 0);
4405 		if (err) {
4406 			netdev_err(port->dev, "could not attach PHY (%d)\n",
4407 				   err);
4408 			goto err_free_irq;
4409 		}
4410 
4411 		valid = true;
4412 	}
4413 
4414 	if (priv->hw_version == MVPP22 && port->port_irq) {
4415 		err = request_irq(port->port_irq, mvpp2_port_isr, 0,
4416 				  dev->name, port);
4417 		if (err) {
4418 			netdev_err(port->dev,
4419 				   "cannot request port link/ptp IRQ %d\n",
4420 				   port->port_irq);
4421 			goto err_free_irq;
4422 		}
4423 
4424 		mvpp22_gop_setup_irq(port);
4425 
4426 		/* In default link is down */
4427 		netif_carrier_off(port->dev);
4428 
4429 		valid = true;
4430 	} else {
4431 		port->port_irq = 0;
4432 	}
4433 
4434 	if (!valid) {
4435 		netdev_err(port->dev,
4436 			   "invalid configuration: no dt or link IRQ");
4437 		goto err_free_irq;
4438 	}
4439 
4440 	/* Unmask interrupts on all CPUs */
4441 	on_each_cpu(mvpp2_interrupts_unmask, port, 1);
4442 	mvpp2_shared_interrupt_mask_unmask(port, false);
4443 
4444 	mvpp2_start_dev(port);
4445 
4446 	/* Start hardware statistics gathering */
4447 	queue_delayed_work(priv->stats_queue, &port->stats_work,
4448 			   MVPP2_MIB_COUNTERS_STATS_DELAY);
4449 
4450 	return 0;
4451 
4452 err_free_irq:
4453 	mvpp2_irqs_deinit(port);
4454 err_cleanup_txqs:
4455 	mvpp2_cleanup_txqs(port);
4456 err_cleanup_rxqs:
4457 	mvpp2_cleanup_rxqs(port);
4458 	return err;
4459 }
4460 
4461 static int mvpp2_stop(struct net_device *dev)
4462 {
4463 	struct mvpp2_port *port = netdev_priv(dev);
4464 	struct mvpp2_port_pcpu *port_pcpu;
4465 	unsigned int thread;
4466 
4467 	mvpp2_stop_dev(port);
4468 
4469 	/* Mask interrupts on all threads */
4470 	on_each_cpu(mvpp2_interrupts_mask, port, 1);
4471 	mvpp2_shared_interrupt_mask_unmask(port, true);
4472 
4473 	if (port->phylink)
4474 		phylink_disconnect_phy(port->phylink);
4475 	if (port->port_irq)
4476 		free_irq(port->port_irq, port);
4477 
4478 	mvpp2_irqs_deinit(port);
4479 	if (!port->has_tx_irqs) {
4480 		for (thread = 0; thread < port->priv->nthreads; thread++) {
4481 			port_pcpu = per_cpu_ptr(port->pcpu, thread);
4482 
4483 			hrtimer_cancel(&port_pcpu->tx_done_timer);
4484 			port_pcpu->timer_scheduled = false;
4485 		}
4486 	}
4487 	mvpp2_cleanup_rxqs(port);
4488 	mvpp2_cleanup_txqs(port);
4489 
4490 	cancel_delayed_work_sync(&port->stats_work);
4491 
4492 	mvpp2_mac_reset_assert(port);
4493 	mvpp22_pcs_reset_assert(port);
4494 
4495 	return 0;
4496 }
4497 
4498 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
4499 					struct netdev_hw_addr_list *list)
4500 {
4501 	struct netdev_hw_addr *ha;
4502 	int ret;
4503 
4504 	netdev_hw_addr_list_for_each(ha, list) {
4505 		ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
4506 		if (ret)
4507 			return ret;
4508 	}
4509 
4510 	return 0;
4511 }
4512 
4513 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
4514 {
4515 	if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
4516 		mvpp2_prs_vid_enable_filtering(port);
4517 	else
4518 		mvpp2_prs_vid_disable_filtering(port);
4519 
4520 	mvpp2_prs_mac_promisc_set(port->priv, port->id,
4521 				  MVPP2_PRS_L2_UNI_CAST, enable);
4522 
4523 	mvpp2_prs_mac_promisc_set(port->priv, port->id,
4524 				  MVPP2_PRS_L2_MULTI_CAST, enable);
4525 }
4526 
4527 static void mvpp2_set_rx_mode(struct net_device *dev)
4528 {
4529 	struct mvpp2_port *port = netdev_priv(dev);
4530 
4531 	/* Clear the whole UC and MC list */
4532 	mvpp2_prs_mac_del_all(port);
4533 
4534 	if (dev->flags & IFF_PROMISC) {
4535 		mvpp2_set_rx_promisc(port, true);
4536 		return;
4537 	}
4538 
4539 	mvpp2_set_rx_promisc(port, false);
4540 
4541 	if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
4542 	    mvpp2_prs_mac_da_accept_list(port, &dev->uc))
4543 		mvpp2_prs_mac_promisc_set(port->priv, port->id,
4544 					  MVPP2_PRS_L2_UNI_CAST, true);
4545 
4546 	if (dev->flags & IFF_ALLMULTI) {
4547 		mvpp2_prs_mac_promisc_set(port->priv, port->id,
4548 					  MVPP2_PRS_L2_MULTI_CAST, true);
4549 		return;
4550 	}
4551 
4552 	if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
4553 	    mvpp2_prs_mac_da_accept_list(port, &dev->mc))
4554 		mvpp2_prs_mac_promisc_set(port->priv, port->id,
4555 					  MVPP2_PRS_L2_MULTI_CAST, true);
4556 }
4557 
4558 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
4559 {
4560 	const struct sockaddr *addr = p;
4561 	int err;
4562 
4563 	if (!is_valid_ether_addr(addr->sa_data))
4564 		return -EADDRNOTAVAIL;
4565 
4566 	err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
4567 	if (err) {
4568 		/* Reconfigure parser accept the original MAC address */
4569 		mvpp2_prs_update_mac_da(dev, dev->dev_addr);
4570 		netdev_err(dev, "failed to change MAC address\n");
4571 	}
4572 	return err;
4573 }
4574 
4575 /* Shut down all the ports, reconfigure the pools as percpu or shared,
4576  * then bring up again all ports.
4577  */
4578 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
4579 {
4580 	int numbufs = MVPP2_BM_POOLS_NUM, i;
4581 	struct mvpp2_port *port = NULL;
4582 	bool status[MVPP2_MAX_PORTS];
4583 
4584 	for (i = 0; i < priv->port_count; i++) {
4585 		port = priv->port_list[i];
4586 		status[i] = netif_running(port->dev);
4587 		if (status[i])
4588 			mvpp2_stop(port->dev);
4589 	}
4590 
4591 	/* nrxqs is the same for all ports */
4592 	if (priv->percpu_pools)
4593 		numbufs = port->nrxqs * 2;
4594 
4595 	for (i = 0; i < numbufs; i++)
4596 		mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
4597 
4598 	devm_kfree(port->dev->dev.parent, priv->bm_pools);
4599 	priv->percpu_pools = percpu;
4600 	mvpp2_bm_init(port->dev->dev.parent, priv);
4601 
4602 	for (i = 0; i < priv->port_count; i++) {
4603 		port = priv->port_list[i];
4604 		mvpp2_swf_bm_pool_init(port);
4605 		if (status[i])
4606 			mvpp2_open(port->dev);
4607 	}
4608 
4609 	return 0;
4610 }
4611 
4612 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
4613 {
4614 	struct mvpp2_port *port = netdev_priv(dev);
4615 	bool running = netif_running(dev);
4616 	struct mvpp2 *priv = port->priv;
4617 	int err;
4618 
4619 	if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
4620 		netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
4621 			    ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
4622 		mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
4623 	}
4624 
4625 	if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
4626 		if (port->xdp_prog) {
4627 			netdev_err(dev, "Jumbo frames are not supported with XDP\n");
4628 			return -EINVAL;
4629 		}
4630 		if (priv->percpu_pools) {
4631 			netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
4632 			mvpp2_bm_switch_buffers(priv, false);
4633 		}
4634 	} else {
4635 		bool jumbo = false;
4636 		int i;
4637 
4638 		for (i = 0; i < priv->port_count; i++)
4639 			if (priv->port_list[i] != port &&
4640 			    MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
4641 			    MVPP2_BM_LONG_PKT_SIZE) {
4642 				jumbo = true;
4643 				break;
4644 			}
4645 
4646 		/* No port is using jumbo frames */
4647 		if (!jumbo) {
4648 			dev_info(port->dev->dev.parent,
4649 				 "all ports have a low MTU, switching to per-cpu buffers");
4650 			mvpp2_bm_switch_buffers(priv, true);
4651 		}
4652 	}
4653 
4654 	if (running)
4655 		mvpp2_stop_dev(port);
4656 
4657 	err = mvpp2_bm_update_mtu(dev, mtu);
4658 	if (err) {
4659 		netdev_err(dev, "failed to change MTU\n");
4660 		/* Reconfigure BM to the original MTU */
4661 		mvpp2_bm_update_mtu(dev, dev->mtu);
4662 	} else {
4663 		port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
4664 	}
4665 
4666 	if (running) {
4667 		mvpp2_start_dev(port);
4668 		mvpp2_egress_enable(port);
4669 		mvpp2_ingress_enable(port);
4670 	}
4671 
4672 	return err;
4673 }
4674 
4675 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
4676 {
4677 	enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
4678 	struct mvpp2 *priv = port->priv;
4679 	int err = -1, i;
4680 
4681 	if (!priv->percpu_pools)
4682 		return err;
4683 
4684 	if (!priv->page_pool[0])
4685 		return -ENOMEM;
4686 
4687 	for (i = 0; i < priv->port_count; i++) {
4688 		port = priv->port_list[i];
4689 		if (port->xdp_prog) {
4690 			dma_dir = DMA_BIDIRECTIONAL;
4691 			break;
4692 		}
4693 	}
4694 
4695 	/* All pools are equal in terms of DMA direction */
4696 	if (priv->page_pool[0]->p.dma_dir != dma_dir)
4697 		err = mvpp2_bm_switch_buffers(priv, true);
4698 
4699 	return err;
4700 }
4701 
4702 static void
4703 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
4704 {
4705 	struct mvpp2_port *port = netdev_priv(dev);
4706 	unsigned int start;
4707 	unsigned int cpu;
4708 
4709 	for_each_possible_cpu(cpu) {
4710 		struct mvpp2_pcpu_stats *cpu_stats;
4711 		u64 rx_packets;
4712 		u64 rx_bytes;
4713 		u64 tx_packets;
4714 		u64 tx_bytes;
4715 
4716 		cpu_stats = per_cpu_ptr(port->stats, cpu);
4717 		do {
4718 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
4719 			rx_packets = cpu_stats->rx_packets;
4720 			rx_bytes   = cpu_stats->rx_bytes;
4721 			tx_packets = cpu_stats->tx_packets;
4722 			tx_bytes   = cpu_stats->tx_bytes;
4723 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
4724 
4725 		stats->rx_packets += rx_packets;
4726 		stats->rx_bytes   += rx_bytes;
4727 		stats->tx_packets += tx_packets;
4728 		stats->tx_bytes   += tx_bytes;
4729 	}
4730 
4731 	stats->rx_errors	= dev->stats.rx_errors;
4732 	stats->rx_dropped	= dev->stats.rx_dropped;
4733 	stats->tx_dropped	= dev->stats.tx_dropped;
4734 }
4735 
4736 static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
4737 {
4738 	struct hwtstamp_config config;
4739 	void __iomem *ptp;
4740 	u32 gcr, int_mask;
4741 
4742 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
4743 		return -EFAULT;
4744 
4745 	if (config.flags)
4746 		return -EINVAL;
4747 
4748 	if (config.tx_type != HWTSTAMP_TX_OFF &&
4749 	    config.tx_type != HWTSTAMP_TX_ON)
4750 		return -ERANGE;
4751 
4752 	ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
4753 
4754 	int_mask = gcr = 0;
4755 	if (config.tx_type != HWTSTAMP_TX_OFF) {
4756 		gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
4757 		int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
4758 			    MVPP22_PTP_INT_MASK_QUEUE0;
4759 	}
4760 
4761 	/* It seems we must also release the TX reset when enabling the TSU */
4762 	if (config.rx_filter != HWTSTAMP_FILTER_NONE)
4763 		gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
4764 		       MVPP22_PTP_GCR_TX_RESET;
4765 
4766 	if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
4767 		mvpp22_tai_start(port->priv->tai);
4768 
4769 	if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
4770 		config.rx_filter = HWTSTAMP_FILTER_ALL;
4771 		mvpp2_modify(ptp + MVPP22_PTP_GCR,
4772 			     MVPP22_PTP_GCR_RX_RESET |
4773 			     MVPP22_PTP_GCR_TX_RESET |
4774 			     MVPP22_PTP_GCR_TSU_ENABLE, gcr);
4775 		port->rx_hwtstamp = true;
4776 	} else {
4777 		port->rx_hwtstamp = false;
4778 		mvpp2_modify(ptp + MVPP22_PTP_GCR,
4779 			     MVPP22_PTP_GCR_RX_RESET |
4780 			     MVPP22_PTP_GCR_TX_RESET |
4781 			     MVPP22_PTP_GCR_TSU_ENABLE, gcr);
4782 	}
4783 
4784 	mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
4785 		     MVPP22_PTP_INT_MASK_QUEUE1 |
4786 		     MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
4787 
4788 	if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
4789 		mvpp22_tai_stop(port->priv->tai);
4790 
4791 	port->tx_hwtstamp_type = config.tx_type;
4792 
4793 	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
4794 		return -EFAULT;
4795 
4796 	return 0;
4797 }
4798 
4799 static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
4800 {
4801 	struct hwtstamp_config config;
4802 
4803 	memset(&config, 0, sizeof(config));
4804 
4805 	config.tx_type = port->tx_hwtstamp_type;
4806 	config.rx_filter = port->rx_hwtstamp ?
4807 		HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
4808 
4809 	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
4810 		return -EFAULT;
4811 
4812 	return 0;
4813 }
4814 
4815 static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
4816 				     struct ethtool_ts_info *info)
4817 {
4818 	struct mvpp2_port *port = netdev_priv(dev);
4819 
4820 	if (!port->hwtstamp)
4821 		return -EOPNOTSUPP;
4822 
4823 	info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
4824 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
4825 				SOF_TIMESTAMPING_RX_SOFTWARE |
4826 				SOF_TIMESTAMPING_SOFTWARE |
4827 				SOF_TIMESTAMPING_TX_HARDWARE |
4828 				SOF_TIMESTAMPING_RX_HARDWARE |
4829 				SOF_TIMESTAMPING_RAW_HARDWARE;
4830 	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
4831 			 BIT(HWTSTAMP_TX_ON);
4832 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
4833 			   BIT(HWTSTAMP_FILTER_ALL);
4834 
4835 	return 0;
4836 }
4837 
4838 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
4839 {
4840 	struct mvpp2_port *port = netdev_priv(dev);
4841 
4842 	switch (cmd) {
4843 	case SIOCSHWTSTAMP:
4844 		if (port->hwtstamp)
4845 			return mvpp2_set_ts_config(port, ifr);
4846 		break;
4847 
4848 	case SIOCGHWTSTAMP:
4849 		if (port->hwtstamp)
4850 			return mvpp2_get_ts_config(port, ifr);
4851 		break;
4852 	}
4853 
4854 	if (!port->phylink)
4855 		return -ENOTSUPP;
4856 
4857 	return phylink_mii_ioctl(port->phylink, ifr, cmd);
4858 }
4859 
4860 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
4861 {
4862 	struct mvpp2_port *port = netdev_priv(dev);
4863 	int ret;
4864 
4865 	ret = mvpp2_prs_vid_entry_add(port, vid);
4866 	if (ret)
4867 		netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
4868 			   MVPP2_PRS_VLAN_FILT_MAX - 1);
4869 	return ret;
4870 }
4871 
4872 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
4873 {
4874 	struct mvpp2_port *port = netdev_priv(dev);
4875 
4876 	mvpp2_prs_vid_entry_remove(port, vid);
4877 	return 0;
4878 }
4879 
4880 static int mvpp2_set_features(struct net_device *dev,
4881 			      netdev_features_t features)
4882 {
4883 	netdev_features_t changed = dev->features ^ features;
4884 	struct mvpp2_port *port = netdev_priv(dev);
4885 
4886 	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
4887 		if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
4888 			mvpp2_prs_vid_enable_filtering(port);
4889 		} else {
4890 			/* Invalidate all registered VID filters for this
4891 			 * port
4892 			 */
4893 			mvpp2_prs_vid_remove_all(port);
4894 
4895 			mvpp2_prs_vid_disable_filtering(port);
4896 		}
4897 	}
4898 
4899 	if (changed & NETIF_F_RXHASH) {
4900 		if (features & NETIF_F_RXHASH)
4901 			mvpp22_port_rss_enable(port);
4902 		else
4903 			mvpp22_port_rss_disable(port);
4904 	}
4905 
4906 	return 0;
4907 }
4908 
4909 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
4910 {
4911 	struct bpf_prog *prog = bpf->prog, *old_prog;
4912 	bool running = netif_running(port->dev);
4913 	bool reset = !prog != !port->xdp_prog;
4914 
4915 	if (port->dev->mtu > ETH_DATA_LEN) {
4916 		NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled");
4917 		return -EOPNOTSUPP;
4918 	}
4919 
4920 	if (!port->priv->percpu_pools) {
4921 		NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
4922 		return -EOPNOTSUPP;
4923 	}
4924 
4925 	if (port->ntxqs < num_possible_cpus() * 2) {
4926 		NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
4927 		return -EOPNOTSUPP;
4928 	}
4929 
4930 	/* device is up and bpf is added/removed, must setup the RX queues */
4931 	if (running && reset)
4932 		mvpp2_stop(port->dev);
4933 
4934 	old_prog = xchg(&port->xdp_prog, prog);
4935 	if (old_prog)
4936 		bpf_prog_put(old_prog);
4937 
4938 	/* bpf is just replaced, RXQ and MTU are already setup */
4939 	if (!reset)
4940 		return 0;
4941 
4942 	/* device was up, restore the link */
4943 	if (running)
4944 		mvpp2_open(port->dev);
4945 
4946 	/* Check Page Pool DMA Direction */
4947 	mvpp2_check_pagepool_dma(port);
4948 
4949 	return 0;
4950 }
4951 
4952 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
4953 {
4954 	struct mvpp2_port *port = netdev_priv(dev);
4955 
4956 	switch (xdp->command) {
4957 	case XDP_SETUP_PROG:
4958 		return mvpp2_xdp_setup(port, xdp);
4959 	default:
4960 		return -EINVAL;
4961 	}
4962 }
4963 
4964 /* Ethtool methods */
4965 
4966 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
4967 {
4968 	struct mvpp2_port *port = netdev_priv(dev);
4969 
4970 	if (!port->phylink)
4971 		return -ENOTSUPP;
4972 
4973 	return phylink_ethtool_nway_reset(port->phylink);
4974 }
4975 
4976 /* Set interrupt coalescing for ethtools */
4977 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
4978 				      struct ethtool_coalesce *c)
4979 {
4980 	struct mvpp2_port *port = netdev_priv(dev);
4981 	int queue;
4982 
4983 	for (queue = 0; queue < port->nrxqs; queue++) {
4984 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
4985 
4986 		rxq->time_coal = c->rx_coalesce_usecs;
4987 		rxq->pkts_coal = c->rx_max_coalesced_frames;
4988 		mvpp2_rx_pkts_coal_set(port, rxq);
4989 		mvpp2_rx_time_coal_set(port, rxq);
4990 	}
4991 
4992 	if (port->has_tx_irqs) {
4993 		port->tx_time_coal = c->tx_coalesce_usecs;
4994 		mvpp2_tx_time_coal_set(port);
4995 	}
4996 
4997 	for (queue = 0; queue < port->ntxqs; queue++) {
4998 		struct mvpp2_tx_queue *txq = port->txqs[queue];
4999 
5000 		txq->done_pkts_coal = c->tx_max_coalesced_frames;
5001 
5002 		if (port->has_tx_irqs)
5003 			mvpp2_tx_pkts_coal_set(port, txq);
5004 	}
5005 
5006 	return 0;
5007 }
5008 
5009 /* get coalescing for ethtools */
5010 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5011 				      struct ethtool_coalesce *c)
5012 {
5013 	struct mvpp2_port *port = netdev_priv(dev);
5014 
5015 	c->rx_coalesce_usecs       = port->rxqs[0]->time_coal;
5016 	c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5017 	c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5018 	c->tx_coalesce_usecs       = port->tx_time_coal;
5019 	return 0;
5020 }
5021 
5022 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5023 				      struct ethtool_drvinfo *drvinfo)
5024 {
5025 	strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5026 		sizeof(drvinfo->driver));
5027 	strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5028 		sizeof(drvinfo->version));
5029 	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5030 		sizeof(drvinfo->bus_info));
5031 }
5032 
5033 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5034 					struct ethtool_ringparam *ring)
5035 {
5036 	struct mvpp2_port *port = netdev_priv(dev);
5037 
5038 	ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
5039 	ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
5040 	ring->rx_pending = port->rx_ring_size;
5041 	ring->tx_pending = port->tx_ring_size;
5042 }
5043 
5044 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5045 				       struct ethtool_ringparam *ring)
5046 {
5047 	struct mvpp2_port *port = netdev_priv(dev);
5048 	u16 prev_rx_ring_size = port->rx_ring_size;
5049 	u16 prev_tx_ring_size = port->tx_ring_size;
5050 	int err;
5051 
5052 	err = mvpp2_check_ringparam_valid(dev, ring);
5053 	if (err)
5054 		return err;
5055 
5056 	if (!netif_running(dev)) {
5057 		port->rx_ring_size = ring->rx_pending;
5058 		port->tx_ring_size = ring->tx_pending;
5059 		return 0;
5060 	}
5061 
5062 	/* The interface is running, so we have to force a
5063 	 * reallocation of the queues
5064 	 */
5065 	mvpp2_stop_dev(port);
5066 	mvpp2_cleanup_rxqs(port);
5067 	mvpp2_cleanup_txqs(port);
5068 
5069 	port->rx_ring_size = ring->rx_pending;
5070 	port->tx_ring_size = ring->tx_pending;
5071 
5072 	err = mvpp2_setup_rxqs(port);
5073 	if (err) {
5074 		/* Reallocate Rx queues with the original ring size */
5075 		port->rx_ring_size = prev_rx_ring_size;
5076 		ring->rx_pending = prev_rx_ring_size;
5077 		err = mvpp2_setup_rxqs(port);
5078 		if (err)
5079 			goto err_out;
5080 	}
5081 	err = mvpp2_setup_txqs(port);
5082 	if (err) {
5083 		/* Reallocate Tx queues with the original ring size */
5084 		port->tx_ring_size = prev_tx_ring_size;
5085 		ring->tx_pending = prev_tx_ring_size;
5086 		err = mvpp2_setup_txqs(port);
5087 		if (err)
5088 			goto err_clean_rxqs;
5089 	}
5090 
5091 	mvpp2_start_dev(port);
5092 	mvpp2_egress_enable(port);
5093 	mvpp2_ingress_enable(port);
5094 
5095 	return 0;
5096 
5097 err_clean_rxqs:
5098 	mvpp2_cleanup_rxqs(port);
5099 err_out:
5100 	netdev_err(dev, "failed to change ring parameters");
5101 	return err;
5102 }
5103 
5104 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
5105 					  struct ethtool_pauseparam *pause)
5106 {
5107 	struct mvpp2_port *port = netdev_priv(dev);
5108 
5109 	if (!port->phylink)
5110 		return;
5111 
5112 	phylink_ethtool_get_pauseparam(port->phylink, pause);
5113 }
5114 
5115 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
5116 					 struct ethtool_pauseparam *pause)
5117 {
5118 	struct mvpp2_port *port = netdev_priv(dev);
5119 
5120 	if (!port->phylink)
5121 		return -ENOTSUPP;
5122 
5123 	return phylink_ethtool_set_pauseparam(port->phylink, pause);
5124 }
5125 
5126 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
5127 					    struct ethtool_link_ksettings *cmd)
5128 {
5129 	struct mvpp2_port *port = netdev_priv(dev);
5130 
5131 	if (!port->phylink)
5132 		return -ENOTSUPP;
5133 
5134 	return phylink_ethtool_ksettings_get(port->phylink, cmd);
5135 }
5136 
5137 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
5138 					    const struct ethtool_link_ksettings *cmd)
5139 {
5140 	struct mvpp2_port *port = netdev_priv(dev);
5141 
5142 	if (!port->phylink)
5143 		return -ENOTSUPP;
5144 
5145 	return phylink_ethtool_ksettings_set(port->phylink, cmd);
5146 }
5147 
5148 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
5149 				   struct ethtool_rxnfc *info, u32 *rules)
5150 {
5151 	struct mvpp2_port *port = netdev_priv(dev);
5152 	int ret = 0, i, loc = 0;
5153 
5154 	if (!mvpp22_rss_is_supported())
5155 		return -EOPNOTSUPP;
5156 
5157 	switch (info->cmd) {
5158 	case ETHTOOL_GRXFH:
5159 		ret = mvpp2_ethtool_rxfh_get(port, info);
5160 		break;
5161 	case ETHTOOL_GRXRINGS:
5162 		info->data = port->nrxqs;
5163 		break;
5164 	case ETHTOOL_GRXCLSRLCNT:
5165 		info->rule_cnt = port->n_rfs_rules;
5166 		break;
5167 	case ETHTOOL_GRXCLSRULE:
5168 		ret = mvpp2_ethtool_cls_rule_get(port, info);
5169 		break;
5170 	case ETHTOOL_GRXCLSRLALL:
5171 		for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
5172 			if (port->rfs_rules[i])
5173 				rules[loc++] = i;
5174 		}
5175 		break;
5176 	default:
5177 		return -ENOTSUPP;
5178 	}
5179 
5180 	return ret;
5181 }
5182 
5183 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
5184 				   struct ethtool_rxnfc *info)
5185 {
5186 	struct mvpp2_port *port = netdev_priv(dev);
5187 	int ret = 0;
5188 
5189 	if (!mvpp22_rss_is_supported())
5190 		return -EOPNOTSUPP;
5191 
5192 	switch (info->cmd) {
5193 	case ETHTOOL_SRXFH:
5194 		ret = mvpp2_ethtool_rxfh_set(port, info);
5195 		break;
5196 	case ETHTOOL_SRXCLSRLINS:
5197 		ret = mvpp2_ethtool_cls_rule_ins(port, info);
5198 		break;
5199 	case ETHTOOL_SRXCLSRLDEL:
5200 		ret = mvpp2_ethtool_cls_rule_del(port, info);
5201 		break;
5202 	default:
5203 		return -EOPNOTSUPP;
5204 	}
5205 	return ret;
5206 }
5207 
5208 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
5209 {
5210 	return mvpp22_rss_is_supported() ? MVPP22_RSS_TABLE_ENTRIES : 0;
5211 }
5212 
5213 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
5214 				  u8 *hfunc)
5215 {
5216 	struct mvpp2_port *port = netdev_priv(dev);
5217 	int ret = 0;
5218 
5219 	if (!mvpp22_rss_is_supported())
5220 		return -EOPNOTSUPP;
5221 
5222 	if (indir)
5223 		ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
5224 
5225 	if (hfunc)
5226 		*hfunc = ETH_RSS_HASH_CRC32;
5227 
5228 	return ret;
5229 }
5230 
5231 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
5232 				  const u8 *key, const u8 hfunc)
5233 {
5234 	struct mvpp2_port *port = netdev_priv(dev);
5235 	int ret = 0;
5236 
5237 	if (!mvpp22_rss_is_supported())
5238 		return -EOPNOTSUPP;
5239 
5240 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
5241 		return -EOPNOTSUPP;
5242 
5243 	if (key)
5244 		return -EOPNOTSUPP;
5245 
5246 	if (indir)
5247 		ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
5248 
5249 	return ret;
5250 }
5251 
5252 static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
5253 					  u8 *key, u8 *hfunc, u32 rss_context)
5254 {
5255 	struct mvpp2_port *port = netdev_priv(dev);
5256 	int ret = 0;
5257 
5258 	if (!mvpp22_rss_is_supported())
5259 		return -EOPNOTSUPP;
5260 	if (rss_context >= MVPP22_N_RSS_TABLES)
5261 		return -EINVAL;
5262 
5263 	if (hfunc)
5264 		*hfunc = ETH_RSS_HASH_CRC32;
5265 
5266 	if (indir)
5267 		ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
5268 
5269 	return ret;
5270 }
5271 
5272 static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
5273 					  const u32 *indir, const u8 *key,
5274 					  const u8 hfunc, u32 *rss_context,
5275 					  bool delete)
5276 {
5277 	struct mvpp2_port *port = netdev_priv(dev);
5278 	int ret;
5279 
5280 	if (!mvpp22_rss_is_supported())
5281 		return -EOPNOTSUPP;
5282 
5283 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
5284 		return -EOPNOTSUPP;
5285 
5286 	if (key)
5287 		return -EOPNOTSUPP;
5288 
5289 	if (delete)
5290 		return mvpp22_port_rss_ctx_delete(port, *rss_context);
5291 
5292 	if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
5293 		ret = mvpp22_port_rss_ctx_create(port, rss_context);
5294 		if (ret)
5295 			return ret;
5296 	}
5297 
5298 	return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
5299 }
5300 /* Device ops */
5301 
5302 static const struct net_device_ops mvpp2_netdev_ops = {
5303 	.ndo_open		= mvpp2_open,
5304 	.ndo_stop		= mvpp2_stop,
5305 	.ndo_start_xmit		= mvpp2_tx,
5306 	.ndo_set_rx_mode	= mvpp2_set_rx_mode,
5307 	.ndo_set_mac_address	= mvpp2_set_mac_address,
5308 	.ndo_change_mtu		= mvpp2_change_mtu,
5309 	.ndo_get_stats64	= mvpp2_get_stats64,
5310 	.ndo_do_ioctl		= mvpp2_ioctl,
5311 	.ndo_vlan_rx_add_vid	= mvpp2_vlan_rx_add_vid,
5312 	.ndo_vlan_rx_kill_vid	= mvpp2_vlan_rx_kill_vid,
5313 	.ndo_set_features	= mvpp2_set_features,
5314 	.ndo_bpf		= mvpp2_xdp,
5315 	.ndo_xdp_xmit		= mvpp2_xdp_xmit,
5316 };
5317 
5318 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5319 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5320 				     ETHTOOL_COALESCE_MAX_FRAMES,
5321 	.nway_reset		= mvpp2_ethtool_nway_reset,
5322 	.get_link		= ethtool_op_get_link,
5323 	.get_ts_info		= mvpp2_ethtool_get_ts_info,
5324 	.set_coalesce		= mvpp2_ethtool_set_coalesce,
5325 	.get_coalesce		= mvpp2_ethtool_get_coalesce,
5326 	.get_drvinfo		= mvpp2_ethtool_get_drvinfo,
5327 	.get_ringparam		= mvpp2_ethtool_get_ringparam,
5328 	.set_ringparam		= mvpp2_ethtool_set_ringparam,
5329 	.get_strings		= mvpp2_ethtool_get_strings,
5330 	.get_ethtool_stats	= mvpp2_ethtool_get_stats,
5331 	.get_sset_count		= mvpp2_ethtool_get_sset_count,
5332 	.get_pauseparam		= mvpp2_ethtool_get_pause_param,
5333 	.set_pauseparam		= mvpp2_ethtool_set_pause_param,
5334 	.get_link_ksettings	= mvpp2_ethtool_get_link_ksettings,
5335 	.set_link_ksettings	= mvpp2_ethtool_set_link_ksettings,
5336 	.get_rxnfc		= mvpp2_ethtool_get_rxnfc,
5337 	.set_rxnfc		= mvpp2_ethtool_set_rxnfc,
5338 	.get_rxfh_indir_size	= mvpp2_ethtool_get_rxfh_indir_size,
5339 	.get_rxfh		= mvpp2_ethtool_get_rxfh,
5340 	.set_rxfh		= mvpp2_ethtool_set_rxfh,
5341 	.get_rxfh_context	= mvpp2_ethtool_get_rxfh_context,
5342 	.set_rxfh_context	= mvpp2_ethtool_set_rxfh_context,
5343 };
5344 
5345 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
5346  * had a single IRQ defined per-port.
5347  */
5348 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
5349 					   struct device_node *port_node)
5350 {
5351 	struct mvpp2_queue_vector *v = &port->qvecs[0];
5352 
5353 	v->first_rxq = 0;
5354 	v->nrxqs = port->nrxqs;
5355 	v->type = MVPP2_QUEUE_VECTOR_SHARED;
5356 	v->sw_thread_id = 0;
5357 	v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
5358 	v->port = port;
5359 	v->irq = irq_of_parse_and_map(port_node, 0);
5360 	if (v->irq <= 0)
5361 		return -EINVAL;
5362 	netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5363 		       NAPI_POLL_WEIGHT);
5364 
5365 	port->nqvecs = 1;
5366 
5367 	return 0;
5368 }
5369 
5370 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
5371 					  struct device_node *port_node)
5372 {
5373 	struct mvpp2 *priv = port->priv;
5374 	struct mvpp2_queue_vector *v;
5375 	int i, ret;
5376 
5377 	switch (queue_mode) {
5378 	case MVPP2_QDIST_SINGLE_MODE:
5379 		port->nqvecs = priv->nthreads + 1;
5380 		break;
5381 	case MVPP2_QDIST_MULTI_MODE:
5382 		port->nqvecs = priv->nthreads;
5383 		break;
5384 	}
5385 
5386 	for (i = 0; i < port->nqvecs; i++) {
5387 		char irqname[16];
5388 
5389 		v = port->qvecs + i;
5390 
5391 		v->port = port;
5392 		v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
5393 		v->sw_thread_id = i;
5394 		v->sw_thread_mask = BIT(i);
5395 
5396 		if (port->flags & MVPP2_F_DT_COMPAT)
5397 			snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5398 		else
5399 			snprintf(irqname, sizeof(irqname), "hif%d", i);
5400 
5401 		if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
5402 			v->first_rxq = i;
5403 			v->nrxqs = 1;
5404 		} else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
5405 			   i == (port->nqvecs - 1)) {
5406 			v->first_rxq = 0;
5407 			v->nrxqs = port->nrxqs;
5408 			v->type = MVPP2_QUEUE_VECTOR_SHARED;
5409 
5410 			if (port->flags & MVPP2_F_DT_COMPAT)
5411 				strncpy(irqname, "rx-shared", sizeof(irqname));
5412 		}
5413 
5414 		if (port_node)
5415 			v->irq = of_irq_get_byname(port_node, irqname);
5416 		else
5417 			v->irq = fwnode_irq_get(port->fwnode, i);
5418 		if (v->irq <= 0) {
5419 			ret = -EINVAL;
5420 			goto err;
5421 		}
5422 
5423 		netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5424 			       NAPI_POLL_WEIGHT);
5425 	}
5426 
5427 	return 0;
5428 
5429 err:
5430 	for (i = 0; i < port->nqvecs; i++)
5431 		irq_dispose_mapping(port->qvecs[i].irq);
5432 	return ret;
5433 }
5434 
5435 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
5436 				    struct device_node *port_node)
5437 {
5438 	if (port->has_tx_irqs)
5439 		return mvpp2_multi_queue_vectors_init(port, port_node);
5440 	else
5441 		return mvpp2_simple_queue_vectors_init(port, port_node);
5442 }
5443 
5444 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
5445 {
5446 	int i;
5447 
5448 	for (i = 0; i < port->nqvecs; i++)
5449 		irq_dispose_mapping(port->qvecs[i].irq);
5450 }
5451 
5452 /* Configure Rx queue group interrupt for this port */
5453 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
5454 {
5455 	struct mvpp2 *priv = port->priv;
5456 	u32 val;
5457 	int i;
5458 
5459 	if (priv->hw_version == MVPP21) {
5460 		mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
5461 			    port->nrxqs);
5462 		return;
5463 	}
5464 
5465 	/* Handle the more complicated PPv2.2 case */
5466 	for (i = 0; i < port->nqvecs; i++) {
5467 		struct mvpp2_queue_vector *qv = port->qvecs + i;
5468 
5469 		if (!qv->nrxqs)
5470 			continue;
5471 
5472 		val = qv->sw_thread_id;
5473 		val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
5474 		mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
5475 
5476 		val = qv->first_rxq;
5477 		val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
5478 		mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
5479 	}
5480 }
5481 
5482 /* Initialize port HW */
5483 static int mvpp2_port_init(struct mvpp2_port *port)
5484 {
5485 	struct device *dev = port->dev->dev.parent;
5486 	struct mvpp2 *priv = port->priv;
5487 	struct mvpp2_txq_pcpu *txq_pcpu;
5488 	unsigned int thread;
5489 	int queue, err;
5490 
5491 	/* Checks for hardware constraints */
5492 	if (port->first_rxq + port->nrxqs >
5493 	    MVPP2_MAX_PORTS * priv->max_port_rxqs)
5494 		return -EINVAL;
5495 
5496 	if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
5497 		return -EINVAL;
5498 
5499 	/* Disable port */
5500 	mvpp2_egress_disable(port);
5501 	mvpp2_port_disable(port);
5502 
5503 	port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
5504 
5505 	port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
5506 				  GFP_KERNEL);
5507 	if (!port->txqs)
5508 		return -ENOMEM;
5509 
5510 	/* Associate physical Tx queues to this port and initialize.
5511 	 * The mapping is predefined.
5512 	 */
5513 	for (queue = 0; queue < port->ntxqs; queue++) {
5514 		int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5515 		struct mvpp2_tx_queue *txq;
5516 
5517 		txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5518 		if (!txq) {
5519 			err = -ENOMEM;
5520 			goto err_free_percpu;
5521 		}
5522 
5523 		txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5524 		if (!txq->pcpu) {
5525 			err = -ENOMEM;
5526 			goto err_free_percpu;
5527 		}
5528 
5529 		txq->id = queue_phy_id;
5530 		txq->log_id = queue;
5531 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5532 		for (thread = 0; thread < priv->nthreads; thread++) {
5533 			txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
5534 			txq_pcpu->thread = thread;
5535 		}
5536 
5537 		port->txqs[queue] = txq;
5538 	}
5539 
5540 	port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
5541 				  GFP_KERNEL);
5542 	if (!port->rxqs) {
5543 		err = -ENOMEM;
5544 		goto err_free_percpu;
5545 	}
5546 
5547 	/* Allocate and initialize Rx queue for this port */
5548 	for (queue = 0; queue < port->nrxqs; queue++) {
5549 		struct mvpp2_rx_queue *rxq;
5550 
5551 		/* Map physical Rx queue to port's logical Rx queue */
5552 		rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5553 		if (!rxq) {
5554 			err = -ENOMEM;
5555 			goto err_free_percpu;
5556 		}
5557 		/* Map this Rx queue to a physical queue */
5558 		rxq->id = port->first_rxq + queue;
5559 		rxq->port = port->id;
5560 		rxq->logic_rxq = queue;
5561 
5562 		port->rxqs[queue] = rxq;
5563 	}
5564 
5565 	mvpp2_rx_irqs_setup(port);
5566 
5567 	/* Create Rx descriptor rings */
5568 	for (queue = 0; queue < port->nrxqs; queue++) {
5569 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5570 
5571 		rxq->size = port->rx_ring_size;
5572 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
5573 		rxq->time_coal = MVPP2_RX_COAL_USEC;
5574 	}
5575 
5576 	mvpp2_ingress_disable(port);
5577 
5578 	/* Port default configuration */
5579 	mvpp2_defaults_set(port);
5580 
5581 	/* Port's classifier configuration */
5582 	mvpp2_cls_oversize_rxq_set(port);
5583 	mvpp2_cls_port_config(port);
5584 
5585 	if (mvpp22_rss_is_supported())
5586 		mvpp22_port_rss_init(port);
5587 
5588 	/* Provide an initial Rx packet size */
5589 	port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
5590 
5591 	/* Initialize pools for swf */
5592 	err = mvpp2_swf_bm_pool_init(port);
5593 	if (err)
5594 		goto err_free_percpu;
5595 
5596 	/* Clear all port stats */
5597 	mvpp2_read_stats(port);
5598 	memset(port->ethtool_stats, 0,
5599 	       MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
5600 
5601 	return 0;
5602 
5603 err_free_percpu:
5604 	for (queue = 0; queue < port->ntxqs; queue++) {
5605 		if (!port->txqs[queue])
5606 			continue;
5607 		free_percpu(port->txqs[queue]->pcpu);
5608 	}
5609 	return err;
5610 }
5611 
5612 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
5613 					   unsigned long *flags)
5614 {
5615 	char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
5616 			  "tx-cpu3" };
5617 	int i;
5618 
5619 	for (i = 0; i < 5; i++)
5620 		if (of_property_match_string(port_node, "interrupt-names",
5621 					     irqs[i]) < 0)
5622 			return false;
5623 
5624 	*flags |= MVPP2_F_DT_COMPAT;
5625 	return true;
5626 }
5627 
5628 /* Checks if the port dt description has the required Tx interrupts:
5629  * - PPv2.1: there are no such interrupts.
5630  * - PPv2.2:
5631  *   - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
5632  *   - The new ones have: "hifX" with X in [0..8]
5633  *
5634  * All those variants are supported to keep the backward compatibility.
5635  */
5636 static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
5637 				struct device_node *port_node,
5638 				unsigned long *flags)
5639 {
5640 	char name[5];
5641 	int i;
5642 
5643 	/* ACPI */
5644 	if (!port_node)
5645 		return true;
5646 
5647 	if (priv->hw_version == MVPP21)
5648 		return false;
5649 
5650 	if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
5651 		return true;
5652 
5653 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
5654 		snprintf(name, 5, "hif%d", i);
5655 		if (of_property_match_string(port_node, "interrupt-names",
5656 					     name) < 0)
5657 			return false;
5658 	}
5659 
5660 	return true;
5661 }
5662 
5663 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
5664 				     struct fwnode_handle *fwnode,
5665 				     char **mac_from)
5666 {
5667 	struct mvpp2_port *port = netdev_priv(dev);
5668 	char hw_mac_addr[ETH_ALEN] = {0};
5669 	char fw_mac_addr[ETH_ALEN];
5670 
5671 	if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
5672 		*mac_from = "firmware node";
5673 		ether_addr_copy(dev->dev_addr, fw_mac_addr);
5674 		return;
5675 	}
5676 
5677 	if (priv->hw_version == MVPP21) {
5678 		mvpp21_get_mac_address(port, hw_mac_addr);
5679 		if (is_valid_ether_addr(hw_mac_addr)) {
5680 			*mac_from = "hardware";
5681 			ether_addr_copy(dev->dev_addr, hw_mac_addr);
5682 			return;
5683 		}
5684 	}
5685 
5686 	*mac_from = "random";
5687 	eth_hw_addr_random(dev);
5688 }
5689 
5690 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
5691 {
5692 	return container_of(config, struct mvpp2_port, phylink_config);
5693 }
5694 
5695 static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
5696 {
5697 	return container_of(pcs, struct mvpp2_port, phylink_pcs);
5698 }
5699 
5700 static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
5701 				    struct phylink_link_state *state)
5702 {
5703 	struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5704 	u32 val;
5705 
5706 	state->speed = SPEED_10000;
5707 	state->duplex = 1;
5708 	state->an_complete = 1;
5709 
5710 	val = readl(port->base + MVPP22_XLG_STATUS);
5711 	state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
5712 
5713 	state->pause = 0;
5714 	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5715 	if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
5716 		state->pause |= MLO_PAUSE_TX;
5717 	if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
5718 		state->pause |= MLO_PAUSE_RX;
5719 }
5720 
5721 static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs,
5722 				unsigned int mode,
5723 				phy_interface_t interface,
5724 				const unsigned long *advertising,
5725 				bool permit_pause_to_mac)
5726 {
5727 	return 0;
5728 }
5729 
5730 static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
5731 	.pcs_get_state = mvpp2_xlg_pcs_get_state,
5732 	.pcs_config = mvpp2_xlg_pcs_config,
5733 };
5734 
5735 static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
5736 				     struct phylink_link_state *state)
5737 {
5738 	struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5739 	u32 val;
5740 
5741 	val = readl(port->base + MVPP2_GMAC_STATUS0);
5742 
5743 	state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
5744 	state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
5745 	state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
5746 
5747 	switch (port->phy_interface) {
5748 	case PHY_INTERFACE_MODE_1000BASEX:
5749 		state->speed = SPEED_1000;
5750 		break;
5751 	case PHY_INTERFACE_MODE_2500BASEX:
5752 		state->speed = SPEED_2500;
5753 		break;
5754 	default:
5755 		if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
5756 			state->speed = SPEED_1000;
5757 		else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
5758 			state->speed = SPEED_100;
5759 		else
5760 			state->speed = SPEED_10;
5761 	}
5762 
5763 	state->pause = 0;
5764 	if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
5765 		state->pause |= MLO_PAUSE_RX;
5766 	if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
5767 		state->pause |= MLO_PAUSE_TX;
5768 }
5769 
5770 static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
5771 				 phy_interface_t interface,
5772 				 const unsigned long *advertising,
5773 				 bool permit_pause_to_mac)
5774 {
5775 	struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5776 	u32 mask, val, an, old_an, changed;
5777 
5778 	mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
5779 	       MVPP2_GMAC_IN_BAND_AUTONEG |
5780 	       MVPP2_GMAC_AN_SPEED_EN |
5781 	       MVPP2_GMAC_FLOW_CTRL_AUTONEG |
5782 	       MVPP2_GMAC_AN_DUPLEX_EN;
5783 
5784 	if (phylink_autoneg_inband(mode)) {
5785 		mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
5786 			MVPP2_GMAC_CONFIG_GMII_SPEED |
5787 			MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5788 		val = MVPP2_GMAC_IN_BAND_AUTONEG;
5789 
5790 		if (interface == PHY_INTERFACE_MODE_SGMII) {
5791 			/* SGMII mode receives the speed and duplex from PHY */
5792 			val |= MVPP2_GMAC_AN_SPEED_EN |
5793 			       MVPP2_GMAC_AN_DUPLEX_EN;
5794 		} else {
5795 			/* 802.3z mode has fixed speed and duplex */
5796 			val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
5797 			       MVPP2_GMAC_CONFIG_FULL_DUPLEX;
5798 
5799 			/* The FLOW_CTRL_AUTONEG bit selects either the hardware
5800 			 * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
5801 			 * manually controls the GMAC pause modes.
5802 			 */
5803 			if (permit_pause_to_mac)
5804 				val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
5805 
5806 			/* Configure advertisement bits */
5807 			mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
5808 			if (phylink_test(advertising, Pause))
5809 				val |= MVPP2_GMAC_FC_ADV_EN;
5810 			if (phylink_test(advertising, Asym_Pause))
5811 				val |= MVPP2_GMAC_FC_ADV_ASM_EN;
5812 		}
5813 	} else {
5814 		val = 0;
5815 	}
5816 
5817 	old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5818 	an = (an & ~mask) | val;
5819 	changed = an ^ old_an;
5820 	if (changed)
5821 		writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5822 
5823 	/* We are only interested in the advertisement bits changing */
5824 	return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
5825 }
5826 
5827 static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
5828 {
5829 	struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
5830 	u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5831 
5832 	writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
5833 	       port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5834 	writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
5835 	       port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5836 }
5837 
5838 static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
5839 	.pcs_get_state = mvpp2_gmac_pcs_get_state,
5840 	.pcs_config = mvpp2_gmac_pcs_config,
5841 	.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
5842 };
5843 
5844 static void mvpp2_phylink_validate(struct phylink_config *config,
5845 				   unsigned long *supported,
5846 				   struct phylink_link_state *state)
5847 {
5848 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
5849 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
5850 
5851 	/* Invalid combinations */
5852 	switch (state->interface) {
5853 	case PHY_INTERFACE_MODE_10GBASER:
5854 	case PHY_INTERFACE_MODE_XAUI:
5855 		if (!mvpp2_port_supports_xlg(port))
5856 			goto empty_set;
5857 		break;
5858 	case PHY_INTERFACE_MODE_RGMII:
5859 	case PHY_INTERFACE_MODE_RGMII_ID:
5860 	case PHY_INTERFACE_MODE_RGMII_RXID:
5861 	case PHY_INTERFACE_MODE_RGMII_TXID:
5862 		if (!mvpp2_port_supports_rgmii(port))
5863 			goto empty_set;
5864 		break;
5865 	default:
5866 		break;
5867 	}
5868 
5869 	phylink_set(mask, Autoneg);
5870 	phylink_set_port_modes(mask);
5871 	phylink_set(mask, Pause);
5872 	phylink_set(mask, Asym_Pause);
5873 
5874 	switch (state->interface) {
5875 	case PHY_INTERFACE_MODE_10GBASER:
5876 	case PHY_INTERFACE_MODE_XAUI:
5877 	case PHY_INTERFACE_MODE_NA:
5878 		if (mvpp2_port_supports_xlg(port)) {
5879 			phylink_set(mask, 10000baseT_Full);
5880 			phylink_set(mask, 10000baseCR_Full);
5881 			phylink_set(mask, 10000baseSR_Full);
5882 			phylink_set(mask, 10000baseLR_Full);
5883 			phylink_set(mask, 10000baseLRM_Full);
5884 			phylink_set(mask, 10000baseER_Full);
5885 			phylink_set(mask, 10000baseKR_Full);
5886 		}
5887 		if (state->interface != PHY_INTERFACE_MODE_NA)
5888 			break;
5889 		fallthrough;
5890 	case PHY_INTERFACE_MODE_RGMII:
5891 	case PHY_INTERFACE_MODE_RGMII_ID:
5892 	case PHY_INTERFACE_MODE_RGMII_RXID:
5893 	case PHY_INTERFACE_MODE_RGMII_TXID:
5894 	case PHY_INTERFACE_MODE_SGMII:
5895 		phylink_set(mask, 10baseT_Half);
5896 		phylink_set(mask, 10baseT_Full);
5897 		phylink_set(mask, 100baseT_Half);
5898 		phylink_set(mask, 100baseT_Full);
5899 		phylink_set(mask, 1000baseT_Full);
5900 		phylink_set(mask, 1000baseX_Full);
5901 		if (state->interface != PHY_INTERFACE_MODE_NA)
5902 			break;
5903 		fallthrough;
5904 	case PHY_INTERFACE_MODE_1000BASEX:
5905 	case PHY_INTERFACE_MODE_2500BASEX:
5906 		if (port->comphy ||
5907 		    state->interface != PHY_INTERFACE_MODE_2500BASEX) {
5908 			phylink_set(mask, 1000baseT_Full);
5909 			phylink_set(mask, 1000baseX_Full);
5910 		}
5911 		if (port->comphy ||
5912 		    state->interface == PHY_INTERFACE_MODE_2500BASEX) {
5913 			phylink_set(mask, 2500baseT_Full);
5914 			phylink_set(mask, 2500baseX_Full);
5915 		}
5916 		break;
5917 	default:
5918 		goto empty_set;
5919 	}
5920 
5921 	bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
5922 	bitmap_and(state->advertising, state->advertising, mask,
5923 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
5924 
5925 	phylink_helper_basex_speed(state);
5926 	return;
5927 
5928 empty_set:
5929 	bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
5930 }
5931 
5932 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
5933 			     const struct phylink_link_state *state)
5934 {
5935 	u32 val;
5936 
5937 	mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
5938 		     MVPP22_XLG_CTRL0_MAC_RESET_DIS,
5939 		     MVPP22_XLG_CTRL0_MAC_RESET_DIS);
5940 	mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
5941 		     MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
5942 		     MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
5943 		     MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
5944 		     MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
5945 
5946 	/* Wait for reset to deassert */
5947 	do {
5948 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5949 	} while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
5950 }
5951 
5952 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
5953 			      const struct phylink_link_state *state)
5954 {
5955 	u32 old_ctrl0, ctrl0;
5956 	u32 old_ctrl2, ctrl2;
5957 	u32 old_ctrl4, ctrl4;
5958 
5959 	old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
5960 	old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
5961 	old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
5962 
5963 	ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
5964 	ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK);
5965 
5966 	/* Configure port type */
5967 	if (phy_interface_mode_is_8023z(state->interface)) {
5968 		ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
5969 		ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
5970 		ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
5971 			 MVPP22_CTRL4_DP_CLK_SEL |
5972 			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5973 	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5974 		ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
5975 		ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
5976 		ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
5977 			 MVPP22_CTRL4_DP_CLK_SEL |
5978 			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5979 	} else if (phy_interface_mode_is_rgmii(state->interface)) {
5980 		ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
5981 		ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
5982 			 MVPP22_CTRL4_SYNC_BYPASS_DIS |
5983 			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
5984 	}
5985 
5986 	/* Configure negotiation style */
5987 	if (!phylink_autoneg_inband(mode)) {
5988 		/* Phy or fixed speed - no in-band AN, nothing to do, leave the
5989 		 * configured speed, duplex and flow control as-is.
5990 		 */
5991 	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
5992 		/* SGMII in-band mode receives the speed and duplex from
5993 		 * the PHY. Flow control information is not received. */
5994 	} else if (phy_interface_mode_is_8023z(state->interface)) {
5995 		/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
5996 		 * they negotiate duplex: they are always operating with a fixed
5997 		 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
5998 		 * speed and full duplex here.
5999 		 */
6000 		ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
6001 	}
6002 
6003 	if (old_ctrl0 != ctrl0)
6004 		writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
6005 	if (old_ctrl2 != ctrl2)
6006 		writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
6007 	if (old_ctrl4 != ctrl4)
6008 		writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
6009 }
6010 
6011 static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
6012 			      phy_interface_t interface)
6013 {
6014 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6015 
6016 	/* Check for invalid configuration */
6017 	if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
6018 		netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
6019 		return -EINVAL;
6020 	}
6021 
6022 	if (port->phy_interface != interface ||
6023 	    phylink_autoneg_inband(mode)) {
6024 		/* Force the link down when changing the interface or if in
6025 		 * in-band mode to ensure we do not change the configuration
6026 		 * while the hardware is indicating link is up. We force both
6027 		 * XLG and GMAC down to ensure that they're both in a known
6028 		 * state.
6029 		 */
6030 		mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6031 			     MVPP2_GMAC_FORCE_LINK_PASS |
6032 			     MVPP2_GMAC_FORCE_LINK_DOWN,
6033 			     MVPP2_GMAC_FORCE_LINK_DOWN);
6034 
6035 		if (mvpp2_port_supports_xlg(port))
6036 			mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6037 				     MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6038 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
6039 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
6040 	}
6041 
6042 	/* Make sure the port is disabled when reconfiguring the mode */
6043 	mvpp2_port_disable(port);
6044 
6045 	if (port->phy_interface != interface) {
6046 		/* Place GMAC into reset */
6047 		mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6048 			     MVPP2_GMAC_PORT_RESET_MASK,
6049 			     MVPP2_GMAC_PORT_RESET_MASK);
6050 
6051 		if (port->priv->hw_version == MVPP22) {
6052 			mvpp22_gop_mask_irq(port);
6053 
6054 			phy_power_off(port->comphy);
6055 		}
6056 	}
6057 
6058 	/* Select the appropriate PCS operations depending on the
6059 	 * configured interface mode. We will only switch to a mode
6060 	 * that the validate() checks have already passed.
6061 	 */
6062 	if (mvpp2_is_xlg(interface))
6063 		port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
6064 	else
6065 		port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
6066 
6067 	return 0;
6068 }
6069 
6070 static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
6071 			     phy_interface_t interface)
6072 {
6073 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6074 	int ret;
6075 
6076 	ret = mvpp2__mac_prepare(config, mode, interface);
6077 	if (ret == 0)
6078 		phylink_set_pcs(port->phylink, &port->phylink_pcs);
6079 
6080 	return ret;
6081 }
6082 
6083 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
6084 			     const struct phylink_link_state *state)
6085 {
6086 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6087 
6088 	/* mac (re)configuration */
6089 	if (mvpp2_is_xlg(state->interface))
6090 		mvpp2_xlg_config(port, mode, state);
6091 	else if (phy_interface_mode_is_rgmii(state->interface) ||
6092 		 phy_interface_mode_is_8023z(state->interface) ||
6093 		 state->interface == PHY_INTERFACE_MODE_SGMII)
6094 		mvpp2_gmac_config(port, mode, state);
6095 
6096 	if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
6097 		mvpp2_port_loopback_set(port, state);
6098 }
6099 
6100 static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
6101 			    phy_interface_t interface)
6102 {
6103 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6104 
6105 	if (port->priv->hw_version == MVPP22 &&
6106 	    port->phy_interface != interface) {
6107 		port->phy_interface = interface;
6108 
6109 		/* Reconfigure the serdes lanes */
6110 		mvpp22_mode_reconfigure(port);
6111 
6112 		/* Unmask interrupts */
6113 		mvpp22_gop_unmask_irq(port);
6114 	}
6115 
6116 	if (!mvpp2_is_xlg(interface)) {
6117 		/* Release GMAC reset and wait */
6118 		mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6119 			     MVPP2_GMAC_PORT_RESET_MASK, 0);
6120 
6121 		while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
6122 		       MVPP2_GMAC_PORT_RESET_MASK)
6123 			continue;
6124 	}
6125 
6126 	mvpp2_port_enable(port);
6127 
6128 	/* Allow the link to come up if in in-band mode, otherwise the
6129 	 * link is forced via mac_link_down()/mac_link_up()
6130 	 */
6131 	if (phylink_autoneg_inband(mode)) {
6132 		if (mvpp2_is_xlg(interface))
6133 			mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6134 				     MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6135 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
6136 		else
6137 			mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6138 				     MVPP2_GMAC_FORCE_LINK_PASS |
6139 				     MVPP2_GMAC_FORCE_LINK_DOWN, 0);
6140 	}
6141 
6142 	return 0;
6143 }
6144 
6145 static void mvpp2_mac_link_up(struct phylink_config *config,
6146 			      struct phy_device *phy,
6147 			      unsigned int mode, phy_interface_t interface,
6148 			      int speed, int duplex,
6149 			      bool tx_pause, bool rx_pause)
6150 {
6151 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6152 	u32 val;
6153 
6154 	if (mvpp2_is_xlg(interface)) {
6155 		if (!phylink_autoneg_inband(mode)) {
6156 			val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6157 			if (tx_pause)
6158 				val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
6159 			if (rx_pause)
6160 				val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
6161 
6162 			mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6163 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
6164 				     MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6165 				     MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
6166 				     MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
6167 		}
6168 	} else {
6169 		if (!phylink_autoneg_inband(mode)) {
6170 			val = MVPP2_GMAC_FORCE_LINK_PASS;
6171 
6172 			if (speed == SPEED_1000 || speed == SPEED_2500)
6173 				val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6174 			else if (speed == SPEED_100)
6175 				val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6176 
6177 			if (duplex == DUPLEX_FULL)
6178 				val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6179 
6180 			mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6181 				     MVPP2_GMAC_FORCE_LINK_DOWN |
6182 				     MVPP2_GMAC_FORCE_LINK_PASS |
6183 				     MVPP2_GMAC_CONFIG_MII_SPEED |
6184 				     MVPP2_GMAC_CONFIG_GMII_SPEED |
6185 				     MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
6186 		}
6187 
6188 		/* We can always update the flow control enable bits;
6189 		 * these will only be effective if flow control AN
6190 		 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
6191 		 */
6192 		val = 0;
6193 		if (tx_pause)
6194 			val |= MVPP22_CTRL4_TX_FC_EN;
6195 		if (rx_pause)
6196 			val |= MVPP22_CTRL4_RX_FC_EN;
6197 
6198 		mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
6199 			     MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
6200 			     val);
6201 	}
6202 
6203 	mvpp2_port_enable(port);
6204 
6205 	mvpp2_egress_enable(port);
6206 	mvpp2_ingress_enable(port);
6207 	netif_tx_wake_all_queues(port->dev);
6208 }
6209 
6210 static void mvpp2_mac_link_down(struct phylink_config *config,
6211 				unsigned int mode, phy_interface_t interface)
6212 {
6213 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6214 	u32 val;
6215 
6216 	if (!phylink_autoneg_inband(mode)) {
6217 		if (mvpp2_is_xlg(interface)) {
6218 			val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6219 			val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6220 			val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
6221 			writel(val, port->base + MVPP22_XLG_CTRL0_REG);
6222 		} else {
6223 			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6224 			val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
6225 			val |= MVPP2_GMAC_FORCE_LINK_DOWN;
6226 			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6227 		}
6228 	}
6229 
6230 	netif_tx_stop_all_queues(port->dev);
6231 	mvpp2_egress_disable(port);
6232 	mvpp2_ingress_disable(port);
6233 
6234 	mvpp2_port_disable(port);
6235 }
6236 
6237 static const struct phylink_mac_ops mvpp2_phylink_ops = {
6238 	.validate = mvpp2_phylink_validate,
6239 	.mac_prepare = mvpp2_mac_prepare,
6240 	.mac_config = mvpp2_mac_config,
6241 	.mac_finish = mvpp2_mac_finish,
6242 	.mac_link_up = mvpp2_mac_link_up,
6243 	.mac_link_down = mvpp2_mac_link_down,
6244 };
6245 
6246 /* Work-around for ACPI */
6247 static void mvpp2_acpi_start(struct mvpp2_port *port)
6248 {
6249 	/* Phylink isn't used as of now for ACPI, so the MAC has to be
6250 	 * configured manually when the interface is started. This will
6251 	 * be removed as soon as the phylink ACPI support lands in.
6252 	 */
6253 	struct phylink_link_state state = {
6254 		.interface = port->phy_interface,
6255 	};
6256 	mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
6257 			   port->phy_interface);
6258 	mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
6259 	port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
6260 					  port->phy_interface,
6261 					  state.advertising, false);
6262 	mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
6263 			 port->phy_interface);
6264 	mvpp2_mac_link_up(&port->phylink_config, NULL,
6265 			  MLO_AN_INBAND, port->phy_interface,
6266 			  SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
6267 }
6268 
6269 /* Ports initialization */
6270 static int mvpp2_port_probe(struct platform_device *pdev,
6271 			    struct fwnode_handle *port_fwnode,
6272 			    struct mvpp2 *priv)
6273 {
6274 	struct phy *comphy = NULL;
6275 	struct mvpp2_port *port;
6276 	struct mvpp2_port_pcpu *port_pcpu;
6277 	struct device_node *port_node = to_of_node(port_fwnode);
6278 	netdev_features_t features;
6279 	struct net_device *dev;
6280 	struct phylink *phylink;
6281 	char *mac_from = "";
6282 	unsigned int ntxqs, nrxqs, thread;
6283 	unsigned long flags = 0;
6284 	bool has_tx_irqs;
6285 	u32 id;
6286 	int phy_mode;
6287 	int err, i;
6288 
6289 	has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
6290 	if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
6291 		dev_err(&pdev->dev,
6292 			"not enough IRQs to support multi queue mode\n");
6293 		return -EINVAL;
6294 	}
6295 
6296 	ntxqs = MVPP2_MAX_TXQ;
6297 	nrxqs = mvpp2_get_nrxqs(priv);
6298 
6299 	dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
6300 	if (!dev)
6301 		return -ENOMEM;
6302 
6303 	phy_mode = fwnode_get_phy_mode(port_fwnode);
6304 	if (phy_mode < 0) {
6305 		dev_err(&pdev->dev, "incorrect phy mode\n");
6306 		err = phy_mode;
6307 		goto err_free_netdev;
6308 	}
6309 
6310 	/*
6311 	 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
6312 	 * Existing usage of 10GBASE-KR is not correct; no backplane
6313 	 * negotiation is done, and this driver does not actually support
6314 	 * 10GBASE-KR.
6315 	 */
6316 	if (phy_mode == PHY_INTERFACE_MODE_10GKR)
6317 		phy_mode = PHY_INTERFACE_MODE_10GBASER;
6318 
6319 	if (port_node) {
6320 		comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
6321 		if (IS_ERR(comphy)) {
6322 			if (PTR_ERR(comphy) == -EPROBE_DEFER) {
6323 				err = -EPROBE_DEFER;
6324 				goto err_free_netdev;
6325 			}
6326 			comphy = NULL;
6327 		}
6328 	}
6329 
6330 	if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
6331 		err = -EINVAL;
6332 		dev_err(&pdev->dev, "missing port-id value\n");
6333 		goto err_free_netdev;
6334 	}
6335 
6336 	dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
6337 	dev->watchdog_timeo = 5 * HZ;
6338 	dev->netdev_ops = &mvpp2_netdev_ops;
6339 	dev->ethtool_ops = &mvpp2_eth_tool_ops;
6340 
6341 	port = netdev_priv(dev);
6342 	port->dev = dev;
6343 	port->fwnode = port_fwnode;
6344 	port->has_phy = !!of_find_property(port_node, "phy", NULL);
6345 	port->ntxqs = ntxqs;
6346 	port->nrxqs = nrxqs;
6347 	port->priv = priv;
6348 	port->has_tx_irqs = has_tx_irqs;
6349 	port->flags = flags;
6350 
6351 	err = mvpp2_queue_vectors_init(port, port_node);
6352 	if (err)
6353 		goto err_free_netdev;
6354 
6355 	if (port_node)
6356 		port->port_irq = of_irq_get_byname(port_node, "link");
6357 	else
6358 		port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
6359 	if (port->port_irq == -EPROBE_DEFER) {
6360 		err = -EPROBE_DEFER;
6361 		goto err_deinit_qvecs;
6362 	}
6363 	if (port->port_irq <= 0)
6364 		/* the link irq is optional */
6365 		port->port_irq = 0;
6366 
6367 	if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
6368 		port->flags |= MVPP2_F_LOOPBACK;
6369 
6370 	port->id = id;
6371 	if (priv->hw_version == MVPP21)
6372 		port->first_rxq = port->id * port->nrxqs;
6373 	else
6374 		port->first_rxq = port->id * priv->max_port_rxqs;
6375 
6376 	port->of_node = port_node;
6377 	port->phy_interface = phy_mode;
6378 	port->comphy = comphy;
6379 
6380 	if (priv->hw_version == MVPP21) {
6381 		port->base = devm_platform_ioremap_resource(pdev, 2 + id);
6382 		if (IS_ERR(port->base)) {
6383 			err = PTR_ERR(port->base);
6384 			goto err_free_irq;
6385 		}
6386 
6387 		port->stats_base = port->priv->lms_base +
6388 				   MVPP21_MIB_COUNTERS_OFFSET +
6389 				   port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
6390 	} else {
6391 		if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
6392 					     &port->gop_id)) {
6393 			err = -EINVAL;
6394 			dev_err(&pdev->dev, "missing gop-port-id value\n");
6395 			goto err_deinit_qvecs;
6396 		}
6397 
6398 		port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6399 		port->stats_base = port->priv->iface_base +
6400 				   MVPP22_MIB_COUNTERS_OFFSET +
6401 				   port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
6402 
6403 		/* We may want a property to describe whether we should use
6404 		 * MAC hardware timestamping.
6405 		 */
6406 		if (priv->tai)
6407 			port->hwtstamp = true;
6408 	}
6409 
6410 	/* Alloc per-cpu and ethtool stats */
6411 	port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6412 	if (!port->stats) {
6413 		err = -ENOMEM;
6414 		goto err_free_irq;
6415 	}
6416 
6417 	port->ethtool_stats = devm_kcalloc(&pdev->dev,
6418 					   MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
6419 					   sizeof(u64), GFP_KERNEL);
6420 	if (!port->ethtool_stats) {
6421 		err = -ENOMEM;
6422 		goto err_free_stats;
6423 	}
6424 
6425 	mutex_init(&port->gather_stats_lock);
6426 	INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
6427 
6428 	mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
6429 
6430 	port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
6431 	port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
6432 	SET_NETDEV_DEV(dev, &pdev->dev);
6433 
6434 	err = mvpp2_port_init(port);
6435 	if (err < 0) {
6436 		dev_err(&pdev->dev, "failed to init port %d\n", id);
6437 		goto err_free_stats;
6438 	}
6439 
6440 	mvpp2_port_periodic_xon_disable(port);
6441 
6442 	mvpp2_mac_reset_assert(port);
6443 	mvpp22_pcs_reset_assert(port);
6444 
6445 	port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6446 	if (!port->pcpu) {
6447 		err = -ENOMEM;
6448 		goto err_free_txq_pcpu;
6449 	}
6450 
6451 	if (!port->has_tx_irqs) {
6452 		for (thread = 0; thread < priv->nthreads; thread++) {
6453 			port_pcpu = per_cpu_ptr(port->pcpu, thread);
6454 
6455 			hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6456 				     HRTIMER_MODE_REL_PINNED_SOFT);
6457 			port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6458 			port_pcpu->timer_scheduled = false;
6459 			port_pcpu->dev = dev;
6460 		}
6461 	}
6462 
6463 	features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6464 		   NETIF_F_TSO;
6465 	dev->features = features | NETIF_F_RXCSUM;
6466 	dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
6467 			    NETIF_F_HW_VLAN_CTAG_FILTER;
6468 
6469 	if (mvpp22_rss_is_supported()) {
6470 		dev->hw_features |= NETIF_F_RXHASH;
6471 		dev->features |= NETIF_F_NTUPLE;
6472 	}
6473 
6474 	if (!port->priv->percpu_pools)
6475 		mvpp2_set_hw_csum(port, port->pool_long->id);
6476 
6477 	dev->vlan_features |= features;
6478 	dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
6479 	dev->priv_flags |= IFF_UNICAST_FLT;
6480 
6481 	/* MTU range: 68 - 9704 */
6482 	dev->min_mtu = ETH_MIN_MTU;
6483 	/* 9704 == 9728 - 20 and rounding to 8 */
6484 	dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
6485 	dev->dev.of_node = port_node;
6486 
6487 	/* Phylink isn't used w/ ACPI as of now */
6488 	if (port_node) {
6489 		port->phylink_config.dev = &dev->dev;
6490 		port->phylink_config.type = PHYLINK_NETDEV;
6491 
6492 		phylink = phylink_create(&port->phylink_config, port_fwnode,
6493 					 phy_mode, &mvpp2_phylink_ops);
6494 		if (IS_ERR(phylink)) {
6495 			err = PTR_ERR(phylink);
6496 			goto err_free_port_pcpu;
6497 		}
6498 		port->phylink = phylink;
6499 	} else {
6500 		port->phylink = NULL;
6501 	}
6502 
6503 	/* Cycle the comphy to power it down, saving 270mW per port -
6504 	 * don't worry about an error powering it up. When the comphy
6505 	 * driver does this, we can remove this code.
6506 	 */
6507 	if (port->comphy) {
6508 		err = mvpp22_comphy_init(port);
6509 		if (err == 0)
6510 			phy_power_off(port->comphy);
6511 	}
6512 
6513 	err = register_netdev(dev);
6514 	if (err < 0) {
6515 		dev_err(&pdev->dev, "failed to register netdev\n");
6516 		goto err_phylink;
6517 	}
6518 	netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6519 
6520 	priv->port_list[priv->port_count++] = port;
6521 
6522 	return 0;
6523 
6524 err_phylink:
6525 	if (port->phylink)
6526 		phylink_destroy(port->phylink);
6527 err_free_port_pcpu:
6528 	free_percpu(port->pcpu);
6529 err_free_txq_pcpu:
6530 	for (i = 0; i < port->ntxqs; i++)
6531 		free_percpu(port->txqs[i]->pcpu);
6532 err_free_stats:
6533 	free_percpu(port->stats);
6534 err_free_irq:
6535 	if (port->port_irq)
6536 		irq_dispose_mapping(port->port_irq);
6537 err_deinit_qvecs:
6538 	mvpp2_queue_vectors_deinit(port);
6539 err_free_netdev:
6540 	free_netdev(dev);
6541 	return err;
6542 }
6543 
6544 /* Ports removal routine */
6545 static void mvpp2_port_remove(struct mvpp2_port *port)
6546 {
6547 	int i;
6548 
6549 	unregister_netdev(port->dev);
6550 	if (port->phylink)
6551 		phylink_destroy(port->phylink);
6552 	free_percpu(port->pcpu);
6553 	free_percpu(port->stats);
6554 	for (i = 0; i < port->ntxqs; i++)
6555 		free_percpu(port->txqs[i]->pcpu);
6556 	mvpp2_queue_vectors_deinit(port);
6557 	if (port->port_irq)
6558 		irq_dispose_mapping(port->port_irq);
6559 	free_netdev(port->dev);
6560 }
6561 
6562 /* Initialize decoding windows */
6563 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
6564 				    struct mvpp2 *priv)
6565 {
6566 	u32 win_enable;
6567 	int i;
6568 
6569 	for (i = 0; i < 6; i++) {
6570 		mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
6571 		mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
6572 
6573 		if (i < 4)
6574 			mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
6575 	}
6576 
6577 	win_enable = 0;
6578 
6579 	for (i = 0; i < dram->num_cs; i++) {
6580 		const struct mbus_dram_window *cs = dram->cs + i;
6581 
6582 		mvpp2_write(priv, MVPP2_WIN_BASE(i),
6583 			    (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
6584 			    dram->mbus_dram_target_id);
6585 
6586 		mvpp2_write(priv, MVPP2_WIN_SIZE(i),
6587 			    (cs->size - 1) & 0xffff0000);
6588 
6589 		win_enable |= (1 << i);
6590 	}
6591 
6592 	mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
6593 }
6594 
6595 /* Initialize Rx FIFO's */
6596 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
6597 {
6598 	int port;
6599 
6600 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6601 		mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6602 			    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
6603 		mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6604 			    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
6605 	}
6606 
6607 	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6608 		    MVPP2_RX_FIFO_PORT_MIN_PKT);
6609 	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6610 }
6611 
6612 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
6613 {
6614 	int port;
6615 
6616 	/* The FIFO size parameters are set depending on the maximum speed a
6617 	 * given port can handle:
6618 	 * - Port 0: 10Gbps
6619 	 * - Port 1: 2.5Gbps
6620 	 * - Ports 2 and 3: 1Gbps
6621 	 */
6622 
6623 	mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(0),
6624 		    MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
6625 	mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(0),
6626 		    MVPP2_RX_FIFO_PORT_ATTR_SIZE_32KB);
6627 
6628 	mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(1),
6629 		    MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
6630 	mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(1),
6631 		    MVPP2_RX_FIFO_PORT_ATTR_SIZE_8KB);
6632 
6633 	for (port = 2; port < MVPP2_MAX_PORTS; port++) {
6634 		mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
6635 			    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
6636 		mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
6637 			    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
6638 	}
6639 
6640 	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
6641 		    MVPP2_RX_FIFO_PORT_MIN_PKT);
6642 	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
6643 }
6644 
6645 /* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
6646  * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
6647  * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
6648  */
6649 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
6650 {
6651 	int port, size, thrs;
6652 
6653 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
6654 		if (port == 0) {
6655 			size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
6656 			thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
6657 		} else {
6658 			size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
6659 			thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
6660 		}
6661 		mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
6662 		mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
6663 	}
6664 }
6665 
6666 static void mvpp2_axi_init(struct mvpp2 *priv)
6667 {
6668 	u32 val, rdval, wrval;
6669 
6670 	mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
6671 
6672 	/* AXI Bridge Configuration */
6673 
6674 	rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
6675 		<< MVPP22_AXI_ATTR_CACHE_OFFS;
6676 	rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6677 		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
6678 
6679 	wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
6680 		<< MVPP22_AXI_ATTR_CACHE_OFFS;
6681 	wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6682 		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
6683 
6684 	/* BM */
6685 	mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
6686 	mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
6687 
6688 	/* Descriptors */
6689 	mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
6690 	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
6691 	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
6692 	mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
6693 
6694 	/* Buffer Data */
6695 	mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
6696 	mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
6697 
6698 	val = MVPP22_AXI_CODE_CACHE_NON_CACHE
6699 		<< MVPP22_AXI_CODE_CACHE_OFFS;
6700 	val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
6701 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
6702 	mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
6703 	mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
6704 
6705 	val = MVPP22_AXI_CODE_CACHE_RD_CACHE
6706 		<< MVPP22_AXI_CODE_CACHE_OFFS;
6707 	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6708 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
6709 
6710 	mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
6711 
6712 	val = MVPP22_AXI_CODE_CACHE_WR_CACHE
6713 		<< MVPP22_AXI_CODE_CACHE_OFFS;
6714 	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
6715 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
6716 
6717 	mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
6718 }
6719 
6720 /* Initialize network controller common part HW */
6721 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
6722 {
6723 	const struct mbus_dram_target_info *dram_target_info;
6724 	int err, i;
6725 	u32 val;
6726 
6727 	/* MBUS windows configuration */
6728 	dram_target_info = mv_mbus_dram_info();
6729 	if (dram_target_info)
6730 		mvpp2_conf_mbus_windows(dram_target_info, priv);
6731 
6732 	if (priv->hw_version == MVPP22)
6733 		mvpp2_axi_init(priv);
6734 
6735 	/* Disable HW PHY polling */
6736 	if (priv->hw_version == MVPP21) {
6737 		val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6738 		val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
6739 		writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
6740 	} else {
6741 		val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6742 		val &= ~MVPP22_SMI_POLLING_EN;
6743 		writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
6744 	}
6745 
6746 	/* Allocate and initialize aggregated TXQs */
6747 	priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
6748 				       sizeof(*priv->aggr_txqs),
6749 				       GFP_KERNEL);
6750 	if (!priv->aggr_txqs)
6751 		return -ENOMEM;
6752 
6753 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6754 		priv->aggr_txqs[i].id = i;
6755 		priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
6756 		err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
6757 		if (err < 0)
6758 			return err;
6759 	}
6760 
6761 	/* Fifo Init */
6762 	if (priv->hw_version == MVPP21) {
6763 		mvpp2_rx_fifo_init(priv);
6764 	} else {
6765 		mvpp22_rx_fifo_init(priv);
6766 		mvpp22_tx_fifo_init(priv);
6767 	}
6768 
6769 	if (priv->hw_version == MVPP21)
6770 		writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
6771 		       priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
6772 
6773 	/* Allow cache snoop when transmiting packets */
6774 	mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
6775 
6776 	/* Buffer Manager initialization */
6777 	err = mvpp2_bm_init(&pdev->dev, priv);
6778 	if (err < 0)
6779 		return err;
6780 
6781 	/* Parser default initialization */
6782 	err = mvpp2_prs_default_init(pdev, priv);
6783 	if (err < 0)
6784 		return err;
6785 
6786 	/* Classifier default initialization */
6787 	mvpp2_cls_init(priv);
6788 
6789 	return 0;
6790 }
6791 
6792 static int mvpp2_probe(struct platform_device *pdev)
6793 {
6794 	const struct acpi_device_id *acpi_id;
6795 	struct fwnode_handle *fwnode = pdev->dev.fwnode;
6796 	struct fwnode_handle *port_fwnode;
6797 	struct mvpp2 *priv;
6798 	struct resource *res;
6799 	void __iomem *base;
6800 	int i, shared;
6801 	int err;
6802 
6803 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
6804 	if (!priv)
6805 		return -ENOMEM;
6806 
6807 	if (has_acpi_companion(&pdev->dev)) {
6808 		acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table,
6809 					    &pdev->dev);
6810 		if (!acpi_id)
6811 			return -EINVAL;
6812 		priv->hw_version = (unsigned long)acpi_id->driver_data;
6813 	} else {
6814 		priv->hw_version =
6815 			(unsigned long)of_device_get_match_data(&pdev->dev);
6816 	}
6817 
6818 	/* multi queue mode isn't supported on PPV2.1, fallback to single
6819 	 * mode
6820 	 */
6821 	if (priv->hw_version == MVPP21)
6822 		queue_mode = MVPP2_QDIST_SINGLE_MODE;
6823 
6824 	base = devm_platform_ioremap_resource(pdev, 0);
6825 	if (IS_ERR(base))
6826 		return PTR_ERR(base);
6827 
6828 	if (priv->hw_version == MVPP21) {
6829 		priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
6830 		if (IS_ERR(priv->lms_base))
6831 			return PTR_ERR(priv->lms_base);
6832 	} else {
6833 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
6834 		if (has_acpi_companion(&pdev->dev)) {
6835 			/* In case the MDIO memory region is declared in
6836 			 * the ACPI, it can already appear as 'in-use'
6837 			 * in the OS. Because it is overlapped by second
6838 			 * region of the network controller, make
6839 			 * sure it is released, before requesting it again.
6840 			 * The care is taken by mvpp2 driver to avoid
6841 			 * concurrent access to this memory region.
6842 			 */
6843 			release_resource(res);
6844 		}
6845 		priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
6846 		if (IS_ERR(priv->iface_base))
6847 			return PTR_ERR(priv->iface_base);
6848 	}
6849 
6850 	if (priv->hw_version == MVPP22 && dev_of_node(&pdev->dev)) {
6851 		priv->sysctrl_base =
6852 			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
6853 							"marvell,system-controller");
6854 		if (IS_ERR(priv->sysctrl_base))
6855 			/* The system controller regmap is optional for dt
6856 			 * compatibility reasons. When not provided, the
6857 			 * configuration of the GoP relies on the
6858 			 * firmware/bootloader.
6859 			 */
6860 			priv->sysctrl_base = NULL;
6861 	}
6862 
6863 	if (priv->hw_version == MVPP22 &&
6864 	    mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
6865 		priv->percpu_pools = 1;
6866 
6867 	mvpp2_setup_bm_pool();
6868 
6869 
6870 	priv->nthreads = min_t(unsigned int, num_present_cpus(),
6871 			       MVPP2_MAX_THREADS);
6872 
6873 	shared = num_present_cpus() - priv->nthreads;
6874 	if (shared > 0)
6875 		bitmap_fill(&priv->lock_map,
6876 			    min_t(int, shared, MVPP2_MAX_THREADS));
6877 
6878 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6879 		u32 addr_space_sz;
6880 
6881 		addr_space_sz = (priv->hw_version == MVPP21 ?
6882 				 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
6883 		priv->swth_base[i] = base + i * addr_space_sz;
6884 	}
6885 
6886 	if (priv->hw_version == MVPP21)
6887 		priv->max_port_rxqs = 8;
6888 	else
6889 		priv->max_port_rxqs = 32;
6890 
6891 	if (dev_of_node(&pdev->dev)) {
6892 		priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
6893 		if (IS_ERR(priv->pp_clk))
6894 			return PTR_ERR(priv->pp_clk);
6895 		err = clk_prepare_enable(priv->pp_clk);
6896 		if (err < 0)
6897 			return err;
6898 
6899 		priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
6900 		if (IS_ERR(priv->gop_clk)) {
6901 			err = PTR_ERR(priv->gop_clk);
6902 			goto err_pp_clk;
6903 		}
6904 		err = clk_prepare_enable(priv->gop_clk);
6905 		if (err < 0)
6906 			goto err_pp_clk;
6907 
6908 		if (priv->hw_version == MVPP22) {
6909 			priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
6910 			if (IS_ERR(priv->mg_clk)) {
6911 				err = PTR_ERR(priv->mg_clk);
6912 				goto err_gop_clk;
6913 			}
6914 
6915 			err = clk_prepare_enable(priv->mg_clk);
6916 			if (err < 0)
6917 				goto err_gop_clk;
6918 
6919 			priv->mg_core_clk = devm_clk_get(&pdev->dev, "mg_core_clk");
6920 			if (IS_ERR(priv->mg_core_clk)) {
6921 				priv->mg_core_clk = NULL;
6922 			} else {
6923 				err = clk_prepare_enable(priv->mg_core_clk);
6924 				if (err < 0)
6925 					goto err_mg_clk;
6926 			}
6927 		}
6928 
6929 		priv->axi_clk = devm_clk_get(&pdev->dev, "axi_clk");
6930 		if (IS_ERR(priv->axi_clk)) {
6931 			err = PTR_ERR(priv->axi_clk);
6932 			if (err == -EPROBE_DEFER)
6933 				goto err_mg_core_clk;
6934 			priv->axi_clk = NULL;
6935 		} else {
6936 			err = clk_prepare_enable(priv->axi_clk);
6937 			if (err < 0)
6938 				goto err_mg_core_clk;
6939 		}
6940 
6941 		/* Get system's tclk rate */
6942 		priv->tclk = clk_get_rate(priv->pp_clk);
6943 	} else if (device_property_read_u32(&pdev->dev, "clock-frequency",
6944 					    &priv->tclk)) {
6945 		dev_err(&pdev->dev, "missing clock-frequency value\n");
6946 		return -EINVAL;
6947 	}
6948 
6949 	if (priv->hw_version == MVPP22) {
6950 		err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
6951 		if (err)
6952 			goto err_axi_clk;
6953 		/* Sadly, the BM pools all share the same register to
6954 		 * store the high 32 bits of their address. So they
6955 		 * must all have the same high 32 bits, which forces
6956 		 * us to restrict coherent memory to DMA_BIT_MASK(32).
6957 		 */
6958 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
6959 		if (err)
6960 			goto err_axi_clk;
6961 	}
6962 
6963 	/* Initialize network controller */
6964 	err = mvpp2_init(pdev, priv);
6965 	if (err < 0) {
6966 		dev_err(&pdev->dev, "failed to initialize controller\n");
6967 		goto err_axi_clk;
6968 	}
6969 
6970 	err = mvpp22_tai_probe(&pdev->dev, priv);
6971 	if (err < 0)
6972 		goto err_axi_clk;
6973 
6974 	/* Initialize ports */
6975 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
6976 		err = mvpp2_port_probe(pdev, port_fwnode, priv);
6977 		if (err < 0)
6978 			goto err_port_probe;
6979 	}
6980 
6981 	if (priv->port_count == 0) {
6982 		dev_err(&pdev->dev, "no ports enabled\n");
6983 		err = -ENODEV;
6984 		goto err_axi_clk;
6985 	}
6986 
6987 	/* Statistics must be gathered regularly because some of them (like
6988 	 * packets counters) are 32-bit registers and could overflow quite
6989 	 * quickly. For instance, a 10Gb link used at full bandwidth with the
6990 	 * smallest packets (64B) will overflow a 32-bit counter in less than
6991 	 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
6992 	 */
6993 	snprintf(priv->queue_name, sizeof(priv->queue_name),
6994 		 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
6995 		 priv->port_count > 1 ? "+" : "");
6996 	priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
6997 	if (!priv->stats_queue) {
6998 		err = -ENOMEM;
6999 		goto err_port_probe;
7000 	}
7001 
7002 	mvpp2_dbgfs_init(priv, pdev->name);
7003 
7004 	platform_set_drvdata(pdev, priv);
7005 	return 0;
7006 
7007 err_port_probe:
7008 	i = 0;
7009 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7010 		if (priv->port_list[i])
7011 			mvpp2_port_remove(priv->port_list[i]);
7012 		i++;
7013 	}
7014 err_axi_clk:
7015 	clk_disable_unprepare(priv->axi_clk);
7016 
7017 err_mg_core_clk:
7018 	if (priv->hw_version == MVPP22)
7019 		clk_disable_unprepare(priv->mg_core_clk);
7020 err_mg_clk:
7021 	if (priv->hw_version == MVPP22)
7022 		clk_disable_unprepare(priv->mg_clk);
7023 err_gop_clk:
7024 	clk_disable_unprepare(priv->gop_clk);
7025 err_pp_clk:
7026 	clk_disable_unprepare(priv->pp_clk);
7027 	return err;
7028 }
7029 
7030 static int mvpp2_remove(struct platform_device *pdev)
7031 {
7032 	struct mvpp2 *priv = platform_get_drvdata(pdev);
7033 	struct fwnode_handle *fwnode = pdev->dev.fwnode;
7034 	int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
7035 	struct fwnode_handle *port_fwnode;
7036 
7037 	mvpp2_dbgfs_cleanup(priv);
7038 
7039 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7040 		if (priv->port_list[i]) {
7041 			mutex_destroy(&priv->port_list[i]->gather_stats_lock);
7042 			mvpp2_port_remove(priv->port_list[i]);
7043 		}
7044 		i++;
7045 	}
7046 
7047 	destroy_workqueue(priv->stats_queue);
7048 
7049 	if (priv->percpu_pools)
7050 		poolnum = mvpp2_get_nrxqs(priv) * 2;
7051 
7052 	for (i = 0; i < poolnum; i++) {
7053 		struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7054 
7055 		mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
7056 	}
7057 
7058 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7059 		struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7060 
7061 		dma_free_coherent(&pdev->dev,
7062 				  MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7063 				  aggr_txq->descs,
7064 				  aggr_txq->descs_dma);
7065 	}
7066 
7067 	if (is_acpi_node(port_fwnode))
7068 		return 0;
7069 
7070 	clk_disable_unprepare(priv->axi_clk);
7071 	clk_disable_unprepare(priv->mg_core_clk);
7072 	clk_disable_unprepare(priv->mg_clk);
7073 	clk_disable_unprepare(priv->pp_clk);
7074 	clk_disable_unprepare(priv->gop_clk);
7075 
7076 	return 0;
7077 }
7078 
7079 static const struct of_device_id mvpp2_match[] = {
7080 	{
7081 		.compatible = "marvell,armada-375-pp2",
7082 		.data = (void *)MVPP21,
7083 	},
7084 	{
7085 		.compatible = "marvell,armada-7k-pp22",
7086 		.data = (void *)MVPP22,
7087 	},
7088 	{ }
7089 };
7090 MODULE_DEVICE_TABLE(of, mvpp2_match);
7091 
7092 #ifdef CONFIG_ACPI
7093 static const struct acpi_device_id mvpp2_acpi_match[] = {
7094 	{ "MRVL0110", MVPP22 },
7095 	{ },
7096 };
7097 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
7098 #endif
7099 
7100 static struct platform_driver mvpp2_driver = {
7101 	.probe = mvpp2_probe,
7102 	.remove = mvpp2_remove,
7103 	.driver = {
7104 		.name = MVPP2_DRIVER_NAME,
7105 		.of_match_table = mvpp2_match,
7106 		.acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
7107 	},
7108 };
7109 
7110 module_platform_driver(mvpp2_driver);
7111 
7112 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7113 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7114 MODULE_LICENSE("GPL v2");
7115