1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for Marvell PPv2 network controller for Armada 375 SoC.
4  *
5  * Copyright (C) 2014 Marvell
6  *
7  * Marcin Wojtas <mw@semihalf.com>
8  */
9 
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/platform_device.h>
15 #include <linux/skbuff.h>
16 #include <linux/inetdevice.h>
17 #include <linux/mbus.h>
18 #include <linux/module.h>
19 #include <linux/mfd/syscon.h>
20 #include <linux/interrupt.h>
21 #include <linux/cpumask.h>
22 #include <linux/of.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_mdio.h>
25 #include <linux/of_net.h>
26 #include <linux/of_address.h>
27 #include <linux/of_device.h>
28 #include <linux/phy.h>
29 #include <linux/phylink.h>
30 #include <linux/phy/phy.h>
31 #include <linux/ptp_classify.h>
32 #include <linux/clk.h>
33 #include <linux/hrtimer.h>
34 #include <linux/ktime.h>
35 #include <linux/regmap.h>
36 #include <uapi/linux/ppp_defs.h>
37 #include <net/ip.h>
38 #include <net/ipv6.h>
39 #include <net/tso.h>
40 #include <linux/bpf_trace.h>
41 
42 #include "mvpp2.h"
43 #include "mvpp2_prs.h"
44 #include "mvpp2_cls.h"
45 
46 enum mvpp2_bm_pool_log_num {
47 	MVPP2_BM_SHORT,
48 	MVPP2_BM_LONG,
49 	MVPP2_BM_JUMBO,
50 	MVPP2_BM_POOLS_NUM
51 };
52 
53 static struct {
54 	int pkt_size;
55 	int buf_num;
56 } mvpp2_pools[MVPP2_BM_POOLS_NUM];
57 
58 /* The prototype is added here to be used in start_dev when using ACPI. This
59  * will be removed once phylink is used for all modes (dt+ACPI).
60  */
61 static void mvpp2_acpi_start(struct mvpp2_port *port);
62 
63 /* Queue modes */
64 #define MVPP2_QDIST_SINGLE_MODE	0
65 #define MVPP2_QDIST_MULTI_MODE	1
66 
67 static int queue_mode = MVPP2_QDIST_MULTI_MODE;
68 
69 module_param(queue_mode, int, 0444);
70 MODULE_PARM_DESC(queue_mode, "Set queue_mode (single=0, multi=1)");
71 
72 /* Utility/helper methods */
73 
74 void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
75 {
76 	writel(data, priv->swth_base[0] + offset);
77 }
78 
79 u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
80 {
81 	return readl(priv->swth_base[0] + offset);
82 }
83 
84 static u32 mvpp2_read_relaxed(struct mvpp2 *priv, u32 offset)
85 {
86 	return readl_relaxed(priv->swth_base[0] + offset);
87 }
88 
89 static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
90 {
91 	return cpu % priv->nthreads;
92 }
93 
94 static void mvpp2_cm3_write(struct mvpp2 *priv, u32 offset, u32 data)
95 {
96 	writel(data, priv->cm3_base + offset);
97 }
98 
99 static u32 mvpp2_cm3_read(struct mvpp2 *priv, u32 offset)
100 {
101 	return readl(priv->cm3_base + offset);
102 }
103 
104 static struct page_pool *
105 mvpp2_create_page_pool(struct device *dev, int num, int len,
106 		       enum dma_data_direction dma_dir)
107 {
108 	struct page_pool_params pp_params = {
109 		/* internal DMA mapping in page_pool */
110 		.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
111 		.pool_size = num,
112 		.nid = NUMA_NO_NODE,
113 		.dev = dev,
114 		.dma_dir = dma_dir,
115 		.offset = MVPP2_SKB_HEADROOM,
116 		.max_len = len,
117 	};
118 
119 	return page_pool_create(&pp_params);
120 }
121 
122 /* These accessors should be used to access:
123  *
124  * - per-thread registers, where each thread has its own copy of the
125  *   register.
126  *
127  *   MVPP2_BM_VIRT_ALLOC_REG
128  *   MVPP2_BM_ADDR_HIGH_ALLOC
129  *   MVPP22_BM_ADDR_HIGH_RLS_REG
130  *   MVPP2_BM_VIRT_RLS_REG
131  *   MVPP2_ISR_RX_TX_CAUSE_REG
132  *   MVPP2_ISR_RX_TX_MASK_REG
133  *   MVPP2_TXQ_NUM_REG
134  *   MVPP2_AGGR_TXQ_UPDATE_REG
135  *   MVPP2_TXQ_RSVD_REQ_REG
136  *   MVPP2_TXQ_RSVD_RSLT_REG
137  *   MVPP2_TXQ_SENT_REG
138  *   MVPP2_RXQ_NUM_REG
139  *
140  * - global registers that must be accessed through a specific thread
141  *   window, because they are related to an access to a per-thread
142  *   register
143  *
144  *   MVPP2_BM_PHY_ALLOC_REG    (related to MVPP2_BM_VIRT_ALLOC_REG)
145  *   MVPP2_BM_PHY_RLS_REG      (related to MVPP2_BM_VIRT_RLS_REG)
146  *   MVPP2_RXQ_THRESH_REG      (related to MVPP2_RXQ_NUM_REG)
147  *   MVPP2_RXQ_DESC_ADDR_REG   (related to MVPP2_RXQ_NUM_REG)
148  *   MVPP2_RXQ_DESC_SIZE_REG   (related to MVPP2_RXQ_NUM_REG)
149  *   MVPP2_RXQ_INDEX_REG       (related to MVPP2_RXQ_NUM_REG)
150  *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
151  *   MVPP2_TXQ_DESC_ADDR_REG   (related to MVPP2_TXQ_NUM_REG)
152  *   MVPP2_TXQ_DESC_SIZE_REG   (related to MVPP2_TXQ_NUM_REG)
153  *   MVPP2_TXQ_INDEX_REG       (related to MVPP2_TXQ_NUM_REG)
154  *   MVPP2_TXQ_PENDING_REG     (related to MVPP2_TXQ_NUM_REG)
155  *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
156  *   MVPP2_TXQ_PREF_BUF_REG    (related to MVPP2_TXQ_NUM_REG)
157  */
158 static void mvpp2_thread_write(struct mvpp2 *priv, unsigned int thread,
159 			       u32 offset, u32 data)
160 {
161 	writel(data, priv->swth_base[thread] + offset);
162 }
163 
164 static u32 mvpp2_thread_read(struct mvpp2 *priv, unsigned int thread,
165 			     u32 offset)
166 {
167 	return readl(priv->swth_base[thread] + offset);
168 }
169 
170 static void mvpp2_thread_write_relaxed(struct mvpp2 *priv, unsigned int thread,
171 				       u32 offset, u32 data)
172 {
173 	writel_relaxed(data, priv->swth_base[thread] + offset);
174 }
175 
176 static u32 mvpp2_thread_read_relaxed(struct mvpp2 *priv, unsigned int thread,
177 				     u32 offset)
178 {
179 	return readl_relaxed(priv->swth_base[thread] + offset);
180 }
181 
182 static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port,
183 					    struct mvpp2_tx_desc *tx_desc)
184 {
185 	if (port->priv->hw_version == MVPP21)
186 		return le32_to_cpu(tx_desc->pp21.buf_dma_addr);
187 	else
188 		return le64_to_cpu(tx_desc->pp22.buf_dma_addr_ptp) &
189 		       MVPP2_DESC_DMA_MASK;
190 }
191 
192 static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port,
193 				      struct mvpp2_tx_desc *tx_desc,
194 				      dma_addr_t dma_addr)
195 {
196 	dma_addr_t addr, offset;
197 
198 	addr = dma_addr & ~MVPP2_TX_DESC_ALIGN;
199 	offset = dma_addr & MVPP2_TX_DESC_ALIGN;
200 
201 	if (port->priv->hw_version == MVPP21) {
202 		tx_desc->pp21.buf_dma_addr = cpu_to_le32(addr);
203 		tx_desc->pp21.packet_offset = offset;
204 	} else {
205 		__le64 val = cpu_to_le64(addr);
206 
207 		tx_desc->pp22.buf_dma_addr_ptp &= ~cpu_to_le64(MVPP2_DESC_DMA_MASK);
208 		tx_desc->pp22.buf_dma_addr_ptp |= val;
209 		tx_desc->pp22.packet_offset = offset;
210 	}
211 }
212 
213 static size_t mvpp2_txdesc_size_get(struct mvpp2_port *port,
214 				    struct mvpp2_tx_desc *tx_desc)
215 {
216 	if (port->priv->hw_version == MVPP21)
217 		return le16_to_cpu(tx_desc->pp21.data_size);
218 	else
219 		return le16_to_cpu(tx_desc->pp22.data_size);
220 }
221 
222 static void mvpp2_txdesc_size_set(struct mvpp2_port *port,
223 				  struct mvpp2_tx_desc *tx_desc,
224 				  size_t size)
225 {
226 	if (port->priv->hw_version == MVPP21)
227 		tx_desc->pp21.data_size = cpu_to_le16(size);
228 	else
229 		tx_desc->pp22.data_size = cpu_to_le16(size);
230 }
231 
232 static void mvpp2_txdesc_txq_set(struct mvpp2_port *port,
233 				 struct mvpp2_tx_desc *tx_desc,
234 				 unsigned int txq)
235 {
236 	if (port->priv->hw_version == MVPP21)
237 		tx_desc->pp21.phys_txq = txq;
238 	else
239 		tx_desc->pp22.phys_txq = txq;
240 }
241 
242 static void mvpp2_txdesc_cmd_set(struct mvpp2_port *port,
243 				 struct mvpp2_tx_desc *tx_desc,
244 				 unsigned int command)
245 {
246 	if (port->priv->hw_version == MVPP21)
247 		tx_desc->pp21.command = cpu_to_le32(command);
248 	else
249 		tx_desc->pp22.command = cpu_to_le32(command);
250 }
251 
252 static unsigned int mvpp2_txdesc_offset_get(struct mvpp2_port *port,
253 					    struct mvpp2_tx_desc *tx_desc)
254 {
255 	if (port->priv->hw_version == MVPP21)
256 		return tx_desc->pp21.packet_offset;
257 	else
258 		return tx_desc->pp22.packet_offset;
259 }
260 
261 static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port,
262 					    struct mvpp2_rx_desc *rx_desc)
263 {
264 	if (port->priv->hw_version == MVPP21)
265 		return le32_to_cpu(rx_desc->pp21.buf_dma_addr);
266 	else
267 		return le64_to_cpu(rx_desc->pp22.buf_dma_addr_key_hash) &
268 		       MVPP2_DESC_DMA_MASK;
269 }
270 
271 static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port,
272 					     struct mvpp2_rx_desc *rx_desc)
273 {
274 	if (port->priv->hw_version == MVPP21)
275 		return le32_to_cpu(rx_desc->pp21.buf_cookie);
276 	else
277 		return le64_to_cpu(rx_desc->pp22.buf_cookie_misc) &
278 		       MVPP2_DESC_DMA_MASK;
279 }
280 
281 static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port,
282 				    struct mvpp2_rx_desc *rx_desc)
283 {
284 	if (port->priv->hw_version == MVPP21)
285 		return le16_to_cpu(rx_desc->pp21.data_size);
286 	else
287 		return le16_to_cpu(rx_desc->pp22.data_size);
288 }
289 
290 static u32 mvpp2_rxdesc_status_get(struct mvpp2_port *port,
291 				   struct mvpp2_rx_desc *rx_desc)
292 {
293 	if (port->priv->hw_version == MVPP21)
294 		return le32_to_cpu(rx_desc->pp21.status);
295 	else
296 		return le32_to_cpu(rx_desc->pp22.status);
297 }
298 
299 static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
300 {
301 	txq_pcpu->txq_get_index++;
302 	if (txq_pcpu->txq_get_index == txq_pcpu->size)
303 		txq_pcpu->txq_get_index = 0;
304 }
305 
306 static void mvpp2_txq_inc_put(struct mvpp2_port *port,
307 			      struct mvpp2_txq_pcpu *txq_pcpu,
308 			      void *data,
309 			      struct mvpp2_tx_desc *tx_desc,
310 			      enum mvpp2_tx_buf_type buf_type)
311 {
312 	struct mvpp2_txq_pcpu_buf *tx_buf =
313 		txq_pcpu->buffs + txq_pcpu->txq_put_index;
314 	tx_buf->type = buf_type;
315 	if (buf_type == MVPP2_TYPE_SKB)
316 		tx_buf->skb = data;
317 	else
318 		tx_buf->xdpf = data;
319 	tx_buf->size = mvpp2_txdesc_size_get(port, tx_desc);
320 	tx_buf->dma = mvpp2_txdesc_dma_addr_get(port, tx_desc) +
321 		mvpp2_txdesc_offset_get(port, tx_desc);
322 	txq_pcpu->txq_put_index++;
323 	if (txq_pcpu->txq_put_index == txq_pcpu->size)
324 		txq_pcpu->txq_put_index = 0;
325 }
326 
327 /* Get number of maximum RXQ */
328 static int mvpp2_get_nrxqs(struct mvpp2 *priv)
329 {
330 	unsigned int nrxqs;
331 
332 	if (priv->hw_version >= MVPP22 && queue_mode == MVPP2_QDIST_SINGLE_MODE)
333 		return 1;
334 
335 	/* According to the PPv2.2 datasheet and our experiments on
336 	 * PPv2.1, RX queues have an allocation granularity of 4 (when
337 	 * more than a single one on PPv2.2).
338 	 * Round up to nearest multiple of 4.
339 	 */
340 	nrxqs = (num_possible_cpus() + 3) & ~0x3;
341 	if (nrxqs > MVPP2_PORT_MAX_RXQ)
342 		nrxqs = MVPP2_PORT_MAX_RXQ;
343 
344 	return nrxqs;
345 }
346 
347 /* Get number of physical egress port */
348 static inline int mvpp2_egress_port(struct mvpp2_port *port)
349 {
350 	return MVPP2_MAX_TCONT + port->id;
351 }
352 
353 /* Get number of physical TXQ */
354 static inline int mvpp2_txq_phys(int port, int txq)
355 {
356 	return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
357 }
358 
359 /* Returns a struct page if page_pool is set, otherwise a buffer */
360 static void *mvpp2_frag_alloc(const struct mvpp2_bm_pool *pool,
361 			      struct page_pool *page_pool)
362 {
363 	if (page_pool)
364 		return page_pool_dev_alloc_pages(page_pool);
365 
366 	if (likely(pool->frag_size <= PAGE_SIZE))
367 		return netdev_alloc_frag(pool->frag_size);
368 
369 	return kmalloc(pool->frag_size, GFP_ATOMIC);
370 }
371 
372 static void mvpp2_frag_free(const struct mvpp2_bm_pool *pool,
373 			    struct page_pool *page_pool, void *data)
374 {
375 	if (page_pool)
376 		page_pool_put_full_page(page_pool, virt_to_head_page(data), false);
377 	else if (likely(pool->frag_size <= PAGE_SIZE))
378 		skb_free_frag(data);
379 	else
380 		kfree(data);
381 }
382 
383 /* Buffer Manager configuration routines */
384 
385 /* Create pool */
386 static int mvpp2_bm_pool_create(struct device *dev, struct mvpp2 *priv,
387 				struct mvpp2_bm_pool *bm_pool, int size)
388 {
389 	u32 val;
390 
391 	/* Number of buffer pointers must be a multiple of 16, as per
392 	 * hardware constraints
393 	 */
394 	if (!IS_ALIGNED(size, 16))
395 		return -EINVAL;
396 
397 	/* PPv2.1 needs 8 bytes per buffer pointer, PPv2.2 and PPv2.3 needs 16
398 	 * bytes per buffer pointer
399 	 */
400 	if (priv->hw_version == MVPP21)
401 		bm_pool->size_bytes = 2 * sizeof(u32) * size;
402 	else
403 		bm_pool->size_bytes = 2 * sizeof(u64) * size;
404 
405 	bm_pool->virt_addr = dma_alloc_coherent(dev, bm_pool->size_bytes,
406 						&bm_pool->dma_addr,
407 						GFP_KERNEL);
408 	if (!bm_pool->virt_addr)
409 		return -ENOMEM;
410 
411 	if (!IS_ALIGNED((unsigned long)bm_pool->virt_addr,
412 			MVPP2_BM_POOL_PTR_ALIGN)) {
413 		dma_free_coherent(dev, bm_pool->size_bytes,
414 				  bm_pool->virt_addr, bm_pool->dma_addr);
415 		dev_err(dev, "BM pool %d is not %d bytes aligned\n",
416 			bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
417 		return -ENOMEM;
418 	}
419 
420 	mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
421 		    lower_32_bits(bm_pool->dma_addr));
422 	mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
423 
424 	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
425 	val |= MVPP2_BM_START_MASK;
426 
427 	val &= ~MVPP2_BM_LOW_THRESH_MASK;
428 	val &= ~MVPP2_BM_HIGH_THRESH_MASK;
429 
430 	/* Set 8 Pools BPPI threshold for MVPP23 */
431 	if (priv->hw_version == MVPP23) {
432 		val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP23_BM_BPPI_LOW_THRESH);
433 		val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP23_BM_BPPI_HIGH_THRESH);
434 	} else {
435 		val |= MVPP2_BM_LOW_THRESH_VALUE(MVPP2_BM_BPPI_LOW_THRESH);
436 		val |= MVPP2_BM_HIGH_THRESH_VALUE(MVPP2_BM_BPPI_HIGH_THRESH);
437 	}
438 
439 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
440 
441 	bm_pool->size = size;
442 	bm_pool->pkt_size = 0;
443 	bm_pool->buf_num = 0;
444 
445 	return 0;
446 }
447 
448 /* Set pool buffer size */
449 static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
450 				      struct mvpp2_bm_pool *bm_pool,
451 				      int buf_size)
452 {
453 	u32 val;
454 
455 	bm_pool->buf_size = buf_size;
456 
457 	val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
458 	mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
459 }
460 
461 static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
462 				    struct mvpp2_bm_pool *bm_pool,
463 				    dma_addr_t *dma_addr,
464 				    phys_addr_t *phys_addr)
465 {
466 	unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
467 
468 	*dma_addr = mvpp2_thread_read(priv, thread,
469 				      MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
470 	*phys_addr = mvpp2_thread_read(priv, thread, MVPP2_BM_VIRT_ALLOC_REG);
471 
472 	if (priv->hw_version >= MVPP22) {
473 		u32 val;
474 		u32 dma_addr_highbits, phys_addr_highbits;
475 
476 		val = mvpp2_thread_read(priv, thread, MVPP22_BM_ADDR_HIGH_ALLOC);
477 		dma_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_PHYS_MASK);
478 		phys_addr_highbits = (val & MVPP22_BM_ADDR_HIGH_VIRT_MASK) >>
479 			MVPP22_BM_ADDR_HIGH_VIRT_SHIFT;
480 
481 		if (sizeof(dma_addr_t) == 8)
482 			*dma_addr |= (u64)dma_addr_highbits << 32;
483 
484 		if (sizeof(phys_addr_t) == 8)
485 			*phys_addr |= (u64)phys_addr_highbits << 32;
486 	}
487 
488 	put_cpu();
489 }
490 
491 /* Free all buffers from the pool */
492 static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
493 			       struct mvpp2_bm_pool *bm_pool, int buf_num)
494 {
495 	struct page_pool *pp = NULL;
496 	int i;
497 
498 	if (buf_num > bm_pool->buf_num) {
499 		WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
500 		     bm_pool->id, buf_num);
501 		buf_num = bm_pool->buf_num;
502 	}
503 
504 	if (priv->percpu_pools)
505 		pp = priv->page_pool[bm_pool->id];
506 
507 	for (i = 0; i < buf_num; i++) {
508 		dma_addr_t buf_dma_addr;
509 		phys_addr_t buf_phys_addr;
510 		void *data;
511 
512 		mvpp2_bm_bufs_get_addrs(dev, priv, bm_pool,
513 					&buf_dma_addr, &buf_phys_addr);
514 
515 		if (!pp)
516 			dma_unmap_single(dev, buf_dma_addr,
517 					 bm_pool->buf_size, DMA_FROM_DEVICE);
518 
519 		data = (void *)phys_to_virt(buf_phys_addr);
520 		if (!data)
521 			break;
522 
523 		mvpp2_frag_free(bm_pool, pp, data);
524 	}
525 
526 	/* Update BM driver with number of buffers removed from pool */
527 	bm_pool->buf_num -= i;
528 }
529 
530 /* Check number of buffers in BM pool */
531 static int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
532 {
533 	int buf_num = 0;
534 
535 	buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
536 				    MVPP22_BM_POOL_PTRS_NUM_MASK;
537 	buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
538 				    MVPP2_BM_BPPI_PTR_NUM_MASK;
539 
540 	/* HW has one buffer ready which is not reflected in the counters */
541 	if (buf_num)
542 		buf_num += 1;
543 
544 	return buf_num;
545 }
546 
547 /* Cleanup pool */
548 static int mvpp2_bm_pool_destroy(struct device *dev, struct mvpp2 *priv,
549 				 struct mvpp2_bm_pool *bm_pool)
550 {
551 	int buf_num;
552 	u32 val;
553 
554 	buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
555 	mvpp2_bm_bufs_free(dev, priv, bm_pool, buf_num);
556 
557 	/* Check buffer counters after free */
558 	buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
559 	if (buf_num) {
560 		WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
561 		     bm_pool->id, bm_pool->buf_num);
562 		return 0;
563 	}
564 
565 	val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
566 	val |= MVPP2_BM_STOP_MASK;
567 	mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
568 
569 	if (priv->percpu_pools) {
570 		page_pool_destroy(priv->page_pool[bm_pool->id]);
571 		priv->page_pool[bm_pool->id] = NULL;
572 	}
573 
574 	dma_free_coherent(dev, bm_pool->size_bytes,
575 			  bm_pool->virt_addr,
576 			  bm_pool->dma_addr);
577 	return 0;
578 }
579 
580 static int mvpp2_bm_pools_init(struct device *dev, struct mvpp2 *priv)
581 {
582 	int i, err, size, poolnum = MVPP2_BM_POOLS_NUM;
583 	struct mvpp2_bm_pool *bm_pool;
584 
585 	if (priv->percpu_pools)
586 		poolnum = mvpp2_get_nrxqs(priv) * 2;
587 
588 	/* Create all pools with maximum size */
589 	size = MVPP2_BM_POOL_SIZE_MAX;
590 	for (i = 0; i < poolnum; i++) {
591 		bm_pool = &priv->bm_pools[i];
592 		bm_pool->id = i;
593 		err = mvpp2_bm_pool_create(dev, priv, bm_pool, size);
594 		if (err)
595 			goto err_unroll_pools;
596 		mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
597 	}
598 	return 0;
599 
600 err_unroll_pools:
601 	dev_err(dev, "failed to create BM pool %d, size %d\n", i, size);
602 	for (i = i - 1; i >= 0; i--)
603 		mvpp2_bm_pool_destroy(dev, priv, &priv->bm_pools[i]);
604 	return err;
605 }
606 
607 /* Routine enable PPv23 8 pool mode */
608 static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
609 {
610 	int val;
611 
612 	val = mvpp2_read(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG);
613 	val |= MVPP23_BM_8POOL_MODE;
614 	mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
615 }
616 
617 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
618 {
619 	enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
620 	int i, err, poolnum = MVPP2_BM_POOLS_NUM;
621 	struct mvpp2_port *port;
622 
623 	if (priv->percpu_pools) {
624 		for (i = 0; i < priv->port_count; i++) {
625 			port = priv->port_list[i];
626 			if (port->xdp_prog) {
627 				dma_dir = DMA_BIDIRECTIONAL;
628 				break;
629 			}
630 		}
631 
632 		poolnum = mvpp2_get_nrxqs(priv) * 2;
633 		for (i = 0; i < poolnum; i++) {
634 			/* the pool in use */
635 			int pn = i / (poolnum / 2);
636 
637 			priv->page_pool[i] =
638 				mvpp2_create_page_pool(dev,
639 						       mvpp2_pools[pn].buf_num,
640 						       mvpp2_pools[pn].pkt_size,
641 						       dma_dir);
642 			if (IS_ERR(priv->page_pool[i])) {
643 				int j;
644 
645 				for (j = 0; j < i; j++) {
646 					page_pool_destroy(priv->page_pool[j]);
647 					priv->page_pool[j] = NULL;
648 				}
649 				return PTR_ERR(priv->page_pool[i]);
650 			}
651 		}
652 	}
653 
654 	dev_info(dev, "using %d %s buffers\n", poolnum,
655 		 priv->percpu_pools ? "per-cpu" : "shared");
656 
657 	for (i = 0; i < poolnum; i++) {
658 		/* Mask BM all interrupts */
659 		mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
660 		/* Clear BM cause register */
661 		mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
662 	}
663 
664 	/* Allocate and initialize BM pools */
665 	priv->bm_pools = devm_kcalloc(dev, poolnum,
666 				      sizeof(*priv->bm_pools), GFP_KERNEL);
667 	if (!priv->bm_pools)
668 		return -ENOMEM;
669 
670 	if (priv->hw_version == MVPP23)
671 		mvpp23_bm_set_8pool_mode(priv);
672 
673 	err = mvpp2_bm_pools_init(dev, priv);
674 	if (err < 0)
675 		return err;
676 	return 0;
677 }
678 
679 static void mvpp2_setup_bm_pool(void)
680 {
681 	/* Short pool */
682 	mvpp2_pools[MVPP2_BM_SHORT].buf_num  = MVPP2_BM_SHORT_BUF_NUM;
683 	mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
684 
685 	/* Long pool */
686 	mvpp2_pools[MVPP2_BM_LONG].buf_num  = MVPP2_BM_LONG_BUF_NUM;
687 	mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
688 
689 	/* Jumbo pool */
690 	mvpp2_pools[MVPP2_BM_JUMBO].buf_num  = MVPP2_BM_JUMBO_BUF_NUM;
691 	mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
692 }
693 
694 /* Attach long pool to rxq */
695 static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
696 				    int lrxq, int long_pool)
697 {
698 	u32 val, mask;
699 	int prxq;
700 
701 	/* Get queue physical ID */
702 	prxq = port->rxqs[lrxq]->id;
703 
704 	if (port->priv->hw_version == MVPP21)
705 		mask = MVPP21_RXQ_POOL_LONG_MASK;
706 	else
707 		mask = MVPP22_RXQ_POOL_LONG_MASK;
708 
709 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
710 	val &= ~mask;
711 	val |= (long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & mask;
712 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
713 }
714 
715 /* Attach short pool to rxq */
716 static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
717 				     int lrxq, int short_pool)
718 {
719 	u32 val, mask;
720 	int prxq;
721 
722 	/* Get queue physical ID */
723 	prxq = port->rxqs[lrxq]->id;
724 
725 	if (port->priv->hw_version == MVPP21)
726 		mask = MVPP21_RXQ_POOL_SHORT_MASK;
727 	else
728 		mask = MVPP22_RXQ_POOL_SHORT_MASK;
729 
730 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
731 	val &= ~mask;
732 	val |= (short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & mask;
733 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
734 }
735 
736 static void *mvpp2_buf_alloc(struct mvpp2_port *port,
737 			     struct mvpp2_bm_pool *bm_pool,
738 			     struct page_pool *page_pool,
739 			     dma_addr_t *buf_dma_addr,
740 			     phys_addr_t *buf_phys_addr,
741 			     gfp_t gfp_mask)
742 {
743 	dma_addr_t dma_addr;
744 	struct page *page;
745 	void *data;
746 
747 	data = mvpp2_frag_alloc(bm_pool, page_pool);
748 	if (!data)
749 		return NULL;
750 
751 	if (page_pool) {
752 		page = (struct page *)data;
753 		dma_addr = page_pool_get_dma_addr(page);
754 		data = page_to_virt(page);
755 	} else {
756 		dma_addr = dma_map_single(port->dev->dev.parent, data,
757 					  MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
758 					  DMA_FROM_DEVICE);
759 		if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
760 			mvpp2_frag_free(bm_pool, NULL, data);
761 			return NULL;
762 		}
763 	}
764 	*buf_dma_addr = dma_addr;
765 	*buf_phys_addr = virt_to_phys(data);
766 
767 	return data;
768 }
769 
770 /* Routine enable flow control for RXQs condition */
771 static void mvpp2_rxq_enable_fc(struct mvpp2_port *port)
772 {
773 	int val, cm3_state, host_id, q;
774 	int fq = port->first_rxq;
775 	unsigned long flags;
776 
777 	spin_lock_irqsave(&port->priv->mss_spinlock, flags);
778 
779 	/* Remove Flow control enable bit to prevent race between FW and Kernel
780 	 * If Flow control was enabled, it would be re-enabled.
781 	 */
782 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
783 	cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
784 	val &= ~FLOW_CONTROL_ENABLE_BIT;
785 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
786 
787 	/* Set same Flow control for all RXQs */
788 	for (q = 0; q < port->nrxqs; q++) {
789 		/* Set stop and start Flow control RXQ thresholds */
790 		val = MSS_THRESHOLD_START;
791 		val |= (MSS_THRESHOLD_STOP << MSS_RXQ_TRESH_STOP_OFFS);
792 		mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
793 
794 		val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
795 		/* Set RXQ port ID */
796 		val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
797 		val |= (port->id << MSS_RXQ_ASS_Q_BASE(q, fq));
798 		val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
799 			+ MSS_RXQ_ASS_HOSTID_OFFS));
800 
801 		/* Calculate RXQ host ID:
802 		 * In Single queue mode: Host ID equal to Host ID used for
803 		 *			 shared RX interrupt
804 		 * In Multi queue mode: Host ID equal to number of
805 		 *			RXQ ID / number of CoS queues
806 		 * In Single resource mode: Host ID always equal to 0
807 		 */
808 		if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
809 			host_id = port->nqvecs;
810 		else if (queue_mode == MVPP2_QDIST_MULTI_MODE)
811 			host_id = q;
812 		else
813 			host_id = 0;
814 
815 		/* Set RXQ host ID */
816 		val |= (host_id << (MSS_RXQ_ASS_Q_BASE(q, fq)
817 			+ MSS_RXQ_ASS_HOSTID_OFFS));
818 
819 		mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
820 	}
821 
822 	/* Notify Firmware that Flow control config space ready for update */
823 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
824 	val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
825 	val |= cm3_state;
826 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
827 
828 	spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
829 }
830 
831 /* Routine disable flow control for RXQs condition */
832 static void mvpp2_rxq_disable_fc(struct mvpp2_port *port)
833 {
834 	int val, cm3_state, q;
835 	unsigned long flags;
836 	int fq = port->first_rxq;
837 
838 	spin_lock_irqsave(&port->priv->mss_spinlock, flags);
839 
840 	/* Remove Flow control enable bit to prevent race between FW and Kernel
841 	 * If Flow control was enabled, it would be re-enabled.
842 	 */
843 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
844 	cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
845 	val &= ~FLOW_CONTROL_ENABLE_BIT;
846 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
847 
848 	/* Disable Flow control for all RXQs */
849 	for (q = 0; q < port->nrxqs; q++) {
850 		/* Set threshold 0 to disable Flow control */
851 		val = 0;
852 		val |= (0 << MSS_RXQ_TRESH_STOP_OFFS);
853 		mvpp2_cm3_write(port->priv, MSS_RXQ_TRESH_REG(q, fq), val);
854 
855 		val = mvpp2_cm3_read(port->priv, MSS_RXQ_ASS_REG(q, fq));
856 
857 		val &= ~(MSS_RXQ_ASS_PORTID_MASK << MSS_RXQ_ASS_Q_BASE(q, fq));
858 
859 		val &= ~(MSS_RXQ_ASS_HOSTID_MASK << (MSS_RXQ_ASS_Q_BASE(q, fq)
860 			+ MSS_RXQ_ASS_HOSTID_OFFS));
861 
862 		mvpp2_cm3_write(port->priv, MSS_RXQ_ASS_REG(q, fq), val);
863 	}
864 
865 	/* Notify Firmware that Flow control config space ready for update */
866 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
867 	val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
868 	val |= cm3_state;
869 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
870 
871 	spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
872 }
873 
874 /* Routine disable/enable flow control for BM pool condition */
875 static void mvpp2_bm_pool_update_fc(struct mvpp2_port *port,
876 				    struct mvpp2_bm_pool *pool,
877 				    bool en)
878 {
879 	int val, cm3_state;
880 	unsigned long flags;
881 
882 	spin_lock_irqsave(&port->priv->mss_spinlock, flags);
883 
884 	/* Remove Flow control enable bit to prevent race between FW and Kernel
885 	 * If Flow control were enabled, it would be re-enabled.
886 	 */
887 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
888 	cm3_state = (val & FLOW_CONTROL_ENABLE_BIT);
889 	val &= ~FLOW_CONTROL_ENABLE_BIT;
890 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
891 
892 	/* Check if BM pool should be enabled/disable */
893 	if (en) {
894 		/* Set BM pool start and stop thresholds per port */
895 		val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
896 		val |= MSS_BUF_POOL_PORT_OFFS(port->id);
897 		val &= ~MSS_BUF_POOL_START_MASK;
898 		val |= (MSS_THRESHOLD_START << MSS_BUF_POOL_START_OFFS);
899 		val &= ~MSS_BUF_POOL_STOP_MASK;
900 		val |= MSS_THRESHOLD_STOP;
901 		mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
902 	} else {
903 		/* Remove BM pool from the port */
904 		val = mvpp2_cm3_read(port->priv, MSS_BUF_POOL_REG(pool->id));
905 		val &= ~MSS_BUF_POOL_PORT_OFFS(port->id);
906 
907 		/* Zero BM pool start and stop thresholds to disable pool
908 		 * flow control if pool empty (not used by any port)
909 		 */
910 		if (!pool->buf_num) {
911 			val &= ~MSS_BUF_POOL_START_MASK;
912 			val &= ~MSS_BUF_POOL_STOP_MASK;
913 		}
914 
915 		mvpp2_cm3_write(port->priv, MSS_BUF_POOL_REG(pool->id), val);
916 	}
917 
918 	/* Notify Firmware that Flow control config space ready for update */
919 	val = mvpp2_cm3_read(port->priv, MSS_FC_COM_REG);
920 	val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
921 	val |= cm3_state;
922 	mvpp2_cm3_write(port->priv, MSS_FC_COM_REG, val);
923 
924 	spin_unlock_irqrestore(&port->priv->mss_spinlock, flags);
925 }
926 
927 /* disable/enable flow control for BM pool on all ports */
928 static void mvpp2_bm_pool_update_priv_fc(struct mvpp2 *priv, bool en)
929 {
930 	struct mvpp2_port *port;
931 	int i;
932 
933 	for (i = 0; i < priv->port_count; i++) {
934 		port = priv->port_list[i];
935 		if (port->priv->percpu_pools) {
936 			for (i = 0; i < port->nrxqs; i++)
937 				mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i],
938 							port->tx_fc & en);
939 		} else {
940 			mvpp2_bm_pool_update_fc(port, port->pool_long, port->tx_fc & en);
941 			mvpp2_bm_pool_update_fc(port, port->pool_short, port->tx_fc & en);
942 		}
943 	}
944 }
945 
946 static int mvpp2_enable_global_fc(struct mvpp2 *priv)
947 {
948 	int val, timeout = 0;
949 
950 	/* Enable global flow control. In this stage global
951 	 * flow control enabled, but still disabled per port.
952 	 */
953 	val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
954 	val |= FLOW_CONTROL_ENABLE_BIT;
955 	mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
956 
957 	/* Check if Firmware running and disable FC if not*/
958 	val |= FLOW_CONTROL_UPDATE_COMMAND_BIT;
959 	mvpp2_cm3_write(priv, MSS_FC_COM_REG, val);
960 
961 	while (timeout < MSS_FC_MAX_TIMEOUT) {
962 		val = mvpp2_cm3_read(priv, MSS_FC_COM_REG);
963 
964 		if (!(val & FLOW_CONTROL_UPDATE_COMMAND_BIT))
965 			return 0;
966 		usleep_range(10, 20);
967 		timeout++;
968 	}
969 
970 	priv->global_tx_fc = false;
971 	return -EOPNOTSUPP;
972 }
973 
974 /* Release buffer to BM */
975 static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
976 				     dma_addr_t buf_dma_addr,
977 				     phys_addr_t buf_phys_addr)
978 {
979 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
980 	unsigned long flags = 0;
981 
982 	if (test_bit(thread, &port->priv->lock_map))
983 		spin_lock_irqsave(&port->bm_lock[thread], flags);
984 
985 	if (port->priv->hw_version >= MVPP22) {
986 		u32 val = 0;
987 
988 		if (sizeof(dma_addr_t) == 8)
989 			val |= upper_32_bits(buf_dma_addr) &
990 				MVPP22_BM_ADDR_HIGH_PHYS_RLS_MASK;
991 
992 		if (sizeof(phys_addr_t) == 8)
993 			val |= (upper_32_bits(buf_phys_addr)
994 				<< MVPP22_BM_ADDR_HIGH_VIRT_RLS_SHIFT) &
995 				MVPP22_BM_ADDR_HIGH_VIRT_RLS_MASK;
996 
997 		mvpp2_thread_write_relaxed(port->priv, thread,
998 					   MVPP22_BM_ADDR_HIGH_RLS_REG, val);
999 	}
1000 
1001 	/* MVPP2_BM_VIRT_RLS_REG is not interpreted by HW, and simply
1002 	 * returned in the "cookie" field of the RX
1003 	 * descriptor. Instead of storing the virtual address, we
1004 	 * store the physical address
1005 	 */
1006 	mvpp2_thread_write_relaxed(port->priv, thread,
1007 				   MVPP2_BM_VIRT_RLS_REG, buf_phys_addr);
1008 	mvpp2_thread_write_relaxed(port->priv, thread,
1009 				   MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
1010 
1011 	if (test_bit(thread, &port->priv->lock_map))
1012 		spin_unlock_irqrestore(&port->bm_lock[thread], flags);
1013 
1014 	put_cpu();
1015 }
1016 
1017 /* Allocate buffers for the pool */
1018 static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
1019 			     struct mvpp2_bm_pool *bm_pool, int buf_num)
1020 {
1021 	int i, buf_size, total_size;
1022 	dma_addr_t dma_addr;
1023 	phys_addr_t phys_addr;
1024 	struct page_pool *pp = NULL;
1025 	void *buf;
1026 
1027 	if (port->priv->percpu_pools &&
1028 	    bm_pool->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
1029 		netdev_err(port->dev,
1030 			   "attempted to use jumbo frames with per-cpu pools");
1031 		return 0;
1032 	}
1033 
1034 	buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
1035 	total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
1036 
1037 	if (buf_num < 0 ||
1038 	    (buf_num + bm_pool->buf_num > bm_pool->size)) {
1039 		netdev_err(port->dev,
1040 			   "cannot allocate %d buffers for pool %d\n",
1041 			   buf_num, bm_pool->id);
1042 		return 0;
1043 	}
1044 
1045 	if (port->priv->percpu_pools)
1046 		pp = port->priv->page_pool[bm_pool->id];
1047 	for (i = 0; i < buf_num; i++) {
1048 		buf = mvpp2_buf_alloc(port, bm_pool, pp, &dma_addr,
1049 				      &phys_addr, GFP_KERNEL);
1050 		if (!buf)
1051 			break;
1052 
1053 		mvpp2_bm_pool_put(port, bm_pool->id, dma_addr,
1054 				  phys_addr);
1055 	}
1056 
1057 	/* Update BM driver with number of buffers added to pool */
1058 	bm_pool->buf_num += i;
1059 
1060 	netdev_dbg(port->dev,
1061 		   "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
1062 		   bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
1063 
1064 	netdev_dbg(port->dev,
1065 		   "pool %d: %d of %d buffers added\n",
1066 		   bm_pool->id, i, buf_num);
1067 	return i;
1068 }
1069 
1070 /* Notify the driver that BM pool is being used as specific type and return the
1071  * pool pointer on success
1072  */
1073 static struct mvpp2_bm_pool *
1074 mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
1075 {
1076 	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
1077 	int num;
1078 
1079 	if ((port->priv->percpu_pools && pool > mvpp2_get_nrxqs(port->priv) * 2) ||
1080 	    (!port->priv->percpu_pools && pool >= MVPP2_BM_POOLS_NUM)) {
1081 		netdev_err(port->dev, "Invalid pool %d\n", pool);
1082 		return NULL;
1083 	}
1084 
1085 	/* Allocate buffers in case BM pool is used as long pool, but packet
1086 	 * size doesn't match MTU or BM pool hasn't being used yet
1087 	 */
1088 	if (new_pool->pkt_size == 0) {
1089 		int pkts_num;
1090 
1091 		/* Set default buffer number or free all the buffers in case
1092 		 * the pool is not empty
1093 		 */
1094 		pkts_num = new_pool->buf_num;
1095 		if (pkts_num == 0) {
1096 			if (port->priv->percpu_pools) {
1097 				if (pool < port->nrxqs)
1098 					pkts_num = mvpp2_pools[MVPP2_BM_SHORT].buf_num;
1099 				else
1100 					pkts_num = mvpp2_pools[MVPP2_BM_LONG].buf_num;
1101 			} else {
1102 				pkts_num = mvpp2_pools[pool].buf_num;
1103 			}
1104 		} else {
1105 			mvpp2_bm_bufs_free(port->dev->dev.parent,
1106 					   port->priv, new_pool, pkts_num);
1107 		}
1108 
1109 		new_pool->pkt_size = pkt_size;
1110 		new_pool->frag_size =
1111 			SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
1112 			MVPP2_SKB_SHINFO_SIZE;
1113 
1114 		/* Allocate buffers for this pool */
1115 		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
1116 		if (num != pkts_num) {
1117 			WARN(1, "pool %d: %d of %d allocated\n",
1118 			     new_pool->id, num, pkts_num);
1119 			return NULL;
1120 		}
1121 	}
1122 
1123 	mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
1124 				  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
1125 
1126 	return new_pool;
1127 }
1128 
1129 static struct mvpp2_bm_pool *
1130 mvpp2_bm_pool_use_percpu(struct mvpp2_port *port, int type,
1131 			 unsigned int pool, int pkt_size)
1132 {
1133 	struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
1134 	int num;
1135 
1136 	if (pool > port->nrxqs * 2) {
1137 		netdev_err(port->dev, "Invalid pool %d\n", pool);
1138 		return NULL;
1139 	}
1140 
1141 	/* Allocate buffers in case BM pool is used as long pool, but packet
1142 	 * size doesn't match MTU or BM pool hasn't being used yet
1143 	 */
1144 	if (new_pool->pkt_size == 0) {
1145 		int pkts_num;
1146 
1147 		/* Set default buffer number or free all the buffers in case
1148 		 * the pool is not empty
1149 		 */
1150 		pkts_num = new_pool->buf_num;
1151 		if (pkts_num == 0)
1152 			pkts_num = mvpp2_pools[type].buf_num;
1153 		else
1154 			mvpp2_bm_bufs_free(port->dev->dev.parent,
1155 					   port->priv, new_pool, pkts_num);
1156 
1157 		new_pool->pkt_size = pkt_size;
1158 		new_pool->frag_size =
1159 			SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
1160 			MVPP2_SKB_SHINFO_SIZE;
1161 
1162 		/* Allocate buffers for this pool */
1163 		num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
1164 		if (num != pkts_num) {
1165 			WARN(1, "pool %d: %d of %d allocated\n",
1166 			     new_pool->id, num, pkts_num);
1167 			return NULL;
1168 		}
1169 	}
1170 
1171 	mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
1172 				  MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
1173 
1174 	return new_pool;
1175 }
1176 
1177 /* Initialize pools for swf, shared buffers variant */
1178 static int mvpp2_swf_bm_pool_init_shared(struct mvpp2_port *port)
1179 {
1180 	enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
1181 	int rxq;
1182 
1183 	/* If port pkt_size is higher than 1518B:
1184 	 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1185 	 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1186 	 */
1187 	if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
1188 		long_log_pool = MVPP2_BM_JUMBO;
1189 		short_log_pool = MVPP2_BM_LONG;
1190 	} else {
1191 		long_log_pool = MVPP2_BM_LONG;
1192 		short_log_pool = MVPP2_BM_SHORT;
1193 	}
1194 
1195 	if (!port->pool_long) {
1196 		port->pool_long =
1197 			mvpp2_bm_pool_use(port, long_log_pool,
1198 					  mvpp2_pools[long_log_pool].pkt_size);
1199 		if (!port->pool_long)
1200 			return -ENOMEM;
1201 
1202 		port->pool_long->port_map |= BIT(port->id);
1203 
1204 		for (rxq = 0; rxq < port->nrxqs; rxq++)
1205 			mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
1206 	}
1207 
1208 	if (!port->pool_short) {
1209 		port->pool_short =
1210 			mvpp2_bm_pool_use(port, short_log_pool,
1211 					  mvpp2_pools[short_log_pool].pkt_size);
1212 		if (!port->pool_short)
1213 			return -ENOMEM;
1214 
1215 		port->pool_short->port_map |= BIT(port->id);
1216 
1217 		for (rxq = 0; rxq < port->nrxqs; rxq++)
1218 			mvpp2_rxq_short_pool_set(port, rxq,
1219 						 port->pool_short->id);
1220 	}
1221 
1222 	return 0;
1223 }
1224 
1225 /* Initialize pools for swf, percpu buffers variant */
1226 static int mvpp2_swf_bm_pool_init_percpu(struct mvpp2_port *port)
1227 {
1228 	struct mvpp2_bm_pool *bm_pool;
1229 	int i;
1230 
1231 	for (i = 0; i < port->nrxqs; i++) {
1232 		bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_SHORT, i,
1233 						   mvpp2_pools[MVPP2_BM_SHORT].pkt_size);
1234 		if (!bm_pool)
1235 			return -ENOMEM;
1236 
1237 		bm_pool->port_map |= BIT(port->id);
1238 		mvpp2_rxq_short_pool_set(port, i, bm_pool->id);
1239 	}
1240 
1241 	for (i = 0; i < port->nrxqs; i++) {
1242 		bm_pool = mvpp2_bm_pool_use_percpu(port, MVPP2_BM_LONG, i + port->nrxqs,
1243 						   mvpp2_pools[MVPP2_BM_LONG].pkt_size);
1244 		if (!bm_pool)
1245 			return -ENOMEM;
1246 
1247 		bm_pool->port_map |= BIT(port->id);
1248 		mvpp2_rxq_long_pool_set(port, i, bm_pool->id);
1249 	}
1250 
1251 	port->pool_long = NULL;
1252 	port->pool_short = NULL;
1253 
1254 	return 0;
1255 }
1256 
1257 static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
1258 {
1259 	if (port->priv->percpu_pools)
1260 		return mvpp2_swf_bm_pool_init_percpu(port);
1261 	else
1262 		return mvpp2_swf_bm_pool_init_shared(port);
1263 }
1264 
1265 static void mvpp2_set_hw_csum(struct mvpp2_port *port,
1266 			      enum mvpp2_bm_pool_log_num new_long_pool)
1267 {
1268 	const netdev_features_t csums = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1269 
1270 	/* Update L4 checksum when jumbo enable/disable on port.
1271 	 * Only port 0 supports hardware checksum offload due to
1272 	 * the Tx FIFO size limitation.
1273 	 * Also, don't set NETIF_F_HW_CSUM because L3_offset in TX descriptor
1274 	 * has 7 bits, so the maximum L3 offset is 128.
1275 	 */
1276 	if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1277 		port->dev->features &= ~csums;
1278 		port->dev->hw_features &= ~csums;
1279 	} else {
1280 		port->dev->features |= csums;
1281 		port->dev->hw_features |= csums;
1282 	}
1283 }
1284 
1285 static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
1286 {
1287 	struct mvpp2_port *port = netdev_priv(dev);
1288 	enum mvpp2_bm_pool_log_num new_long_pool;
1289 	int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
1290 
1291 	if (port->priv->percpu_pools)
1292 		goto out_set;
1293 
1294 	/* If port MTU is higher than 1518B:
1295 	 * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
1296 	 * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
1297 	 */
1298 	if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1299 		new_long_pool = MVPP2_BM_JUMBO;
1300 	else
1301 		new_long_pool = MVPP2_BM_LONG;
1302 
1303 	if (new_long_pool != port->pool_long->id) {
1304 		if (port->tx_fc) {
1305 			if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1306 				mvpp2_bm_pool_update_fc(port,
1307 							port->pool_short,
1308 							false);
1309 			else
1310 				mvpp2_bm_pool_update_fc(port, port->pool_long,
1311 							false);
1312 		}
1313 
1314 		/* Remove port from old short & long pool */
1315 		port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
1316 						    port->pool_long->pkt_size);
1317 		port->pool_long->port_map &= ~BIT(port->id);
1318 		port->pool_long = NULL;
1319 
1320 		port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
1321 						     port->pool_short->pkt_size);
1322 		port->pool_short->port_map &= ~BIT(port->id);
1323 		port->pool_short = NULL;
1324 
1325 		port->pkt_size =  pkt_size;
1326 
1327 		/* Add port to new short & long pool */
1328 		mvpp2_swf_bm_pool_init(port);
1329 
1330 		mvpp2_set_hw_csum(port, new_long_pool);
1331 
1332 		if (port->tx_fc) {
1333 			if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
1334 				mvpp2_bm_pool_update_fc(port, port->pool_long,
1335 							true);
1336 			else
1337 				mvpp2_bm_pool_update_fc(port, port->pool_short,
1338 							true);
1339 		}
1340 
1341 		/* Update L4 checksum when jumbo enable/disable on port */
1342 		if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
1343 			dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
1344 			dev->hw_features &= ~(NETIF_F_IP_CSUM |
1345 					      NETIF_F_IPV6_CSUM);
1346 		} else {
1347 			dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1348 			dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1349 		}
1350 	}
1351 
1352 out_set:
1353 	dev->mtu = mtu;
1354 	dev->wanted_features = dev->features;
1355 
1356 	netdev_update_features(dev);
1357 	return 0;
1358 }
1359 
1360 static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
1361 {
1362 	int i, sw_thread_mask = 0;
1363 
1364 	for (i = 0; i < port->nqvecs; i++)
1365 		sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1366 
1367 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1368 		    MVPP2_ISR_ENABLE_INTERRUPT(sw_thread_mask));
1369 }
1370 
1371 static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
1372 {
1373 	int i, sw_thread_mask = 0;
1374 
1375 	for (i = 0; i < port->nqvecs; i++)
1376 		sw_thread_mask |= port->qvecs[i].sw_thread_mask;
1377 
1378 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1379 		    MVPP2_ISR_DISABLE_INTERRUPT(sw_thread_mask));
1380 }
1381 
1382 static inline void mvpp2_qvec_interrupt_enable(struct mvpp2_queue_vector *qvec)
1383 {
1384 	struct mvpp2_port *port = qvec->port;
1385 
1386 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1387 		    MVPP2_ISR_ENABLE_INTERRUPT(qvec->sw_thread_mask));
1388 }
1389 
1390 static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
1391 {
1392 	struct mvpp2_port *port = qvec->port;
1393 
1394 	mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
1395 		    MVPP2_ISR_DISABLE_INTERRUPT(qvec->sw_thread_mask));
1396 }
1397 
1398 /* Mask the current thread's Rx/Tx interrupts
1399  * Called by on_each_cpu(), guaranteed to run with migration disabled,
1400  * using smp_processor_id() is OK.
1401  */
1402 static void mvpp2_interrupts_mask(void *arg)
1403 {
1404 	struct mvpp2_port *port = arg;
1405 	int cpu = smp_processor_id();
1406 	u32 thread;
1407 
1408 	/* If the thread isn't used, don't do anything */
1409 	if (cpu > port->priv->nthreads)
1410 		return;
1411 
1412 	thread = mvpp2_cpu_to_thread(port->priv, cpu);
1413 
1414 	mvpp2_thread_write(port->priv, thread,
1415 			   MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
1416 	mvpp2_thread_write(port->priv, thread,
1417 			   MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
1418 }
1419 
1420 /* Unmask the current thread's Rx/Tx interrupts.
1421  * Called by on_each_cpu(), guaranteed to run with migration disabled,
1422  * using smp_processor_id() is OK.
1423  */
1424 static void mvpp2_interrupts_unmask(void *arg)
1425 {
1426 	struct mvpp2_port *port = arg;
1427 	int cpu = smp_processor_id();
1428 	u32 val, thread;
1429 
1430 	/* If the thread isn't used, don't do anything */
1431 	if (cpu >= port->priv->nthreads)
1432 		return;
1433 
1434 	thread = mvpp2_cpu_to_thread(port->priv, cpu);
1435 
1436 	val = MVPP2_CAUSE_MISC_SUM_MASK |
1437 		MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
1438 	if (port->has_tx_irqs)
1439 		val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
1440 
1441 	mvpp2_thread_write(port->priv, thread,
1442 			   MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1443 	mvpp2_thread_write(port->priv, thread,
1444 			   MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1445 			   MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1446 }
1447 
1448 static void
1449 mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
1450 {
1451 	u32 val;
1452 	int i;
1453 
1454 	if (port->priv->hw_version == MVPP21)
1455 		return;
1456 
1457 	if (mask)
1458 		val = 0;
1459 	else
1460 		val = MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(MVPP22);
1461 
1462 	for (i = 0; i < port->nqvecs; i++) {
1463 		struct mvpp2_queue_vector *v = port->qvecs + i;
1464 
1465 		if (v->type != MVPP2_QUEUE_VECTOR_SHARED)
1466 			continue;
1467 
1468 		mvpp2_thread_write(port->priv, v->sw_thread_id,
1469 				   MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
1470 		mvpp2_thread_write(port->priv, v->sw_thread_id,
1471 				   MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
1472 				   MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
1473 	}
1474 }
1475 
1476 /* Only GOP port 0 has an XLG MAC */
1477 static bool mvpp2_port_supports_xlg(struct mvpp2_port *port)
1478 {
1479 	return port->gop_id == 0;
1480 }
1481 
1482 static bool mvpp2_port_supports_rgmii(struct mvpp2_port *port)
1483 {
1484 	return !(port->priv->hw_version >= MVPP22 && port->gop_id == 0);
1485 }
1486 
1487 /* Port configuration routines */
1488 static bool mvpp2_is_xlg(phy_interface_t interface)
1489 {
1490 	return interface == PHY_INTERFACE_MODE_10GBASER ||
1491 	       interface == PHY_INTERFACE_MODE_XAUI;
1492 }
1493 
1494 static void mvpp2_modify(void __iomem *ptr, u32 mask, u32 set)
1495 {
1496 	u32 old, val;
1497 
1498 	old = val = readl(ptr);
1499 	val &= ~mask;
1500 	val |= set;
1501 	if (old != val)
1502 		writel(val, ptr);
1503 }
1504 
1505 static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
1506 {
1507 	struct mvpp2 *priv = port->priv;
1508 	u32 val;
1509 
1510 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1511 	val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT;
1512 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1513 
1514 	regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1515 	if (port->gop_id == 2)
1516 		val |= GENCONF_CTRL0_PORT2_RGMII;
1517 	else if (port->gop_id == 3)
1518 		val |= GENCONF_CTRL0_PORT3_RGMII_MII;
1519 	regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1520 }
1521 
1522 static void mvpp22_gop_init_sgmii(struct mvpp2_port *port)
1523 {
1524 	struct mvpp2 *priv = port->priv;
1525 	u32 val;
1526 
1527 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1528 	val |= GENCONF_PORT_CTRL0_BUS_WIDTH_SELECT |
1529 	       GENCONF_PORT_CTRL0_RX_DATA_SAMPLE;
1530 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1531 
1532 	if (port->gop_id > 1) {
1533 		regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
1534 		if (port->gop_id == 2)
1535 			val &= ~GENCONF_CTRL0_PORT2_RGMII;
1536 		else if (port->gop_id == 3)
1537 			val &= ~GENCONF_CTRL0_PORT3_RGMII_MII;
1538 		regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
1539 	}
1540 }
1541 
1542 static void mvpp22_gop_init_10gkr(struct mvpp2_port *port)
1543 {
1544 	struct mvpp2 *priv = port->priv;
1545 	void __iomem *mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
1546 	void __iomem *xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
1547 	u32 val;
1548 
1549 	val = readl(xpcs + MVPP22_XPCS_CFG0);
1550 	val &= ~(MVPP22_XPCS_CFG0_PCS_MODE(0x3) |
1551 		 MVPP22_XPCS_CFG0_ACTIVE_LANE(0x3));
1552 	val |= MVPP22_XPCS_CFG0_ACTIVE_LANE(2);
1553 	writel(val, xpcs + MVPP22_XPCS_CFG0);
1554 
1555 	val = readl(mpcs + MVPP22_MPCS_CTRL);
1556 	val &= ~MVPP22_MPCS_CTRL_FWD_ERR_CONN;
1557 	writel(val, mpcs + MVPP22_MPCS_CTRL);
1558 
1559 	val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
1560 	val &= ~MVPP22_MPCS_CLK_RESET_DIV_RATIO(0x7);
1561 	val |= MVPP22_MPCS_CLK_RESET_DIV_RATIO(1);
1562 	writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
1563 }
1564 
1565 static void mvpp22_gop_fca_enable_periodic(struct mvpp2_port *port, bool en)
1566 {
1567 	struct mvpp2 *priv = port->priv;
1568 	void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
1569 	u32 val;
1570 
1571 	val = readl(fca + MVPP22_FCA_CONTROL_REG);
1572 	val &= ~MVPP22_FCA_ENABLE_PERIODIC;
1573 	if (en)
1574 		val |= MVPP22_FCA_ENABLE_PERIODIC;
1575 	writel(val, fca + MVPP22_FCA_CONTROL_REG);
1576 }
1577 
1578 static void mvpp22_gop_fca_set_timer(struct mvpp2_port *port, u32 timer)
1579 {
1580 	struct mvpp2 *priv = port->priv;
1581 	void __iomem *fca = priv->iface_base + MVPP22_FCA_BASE(port->gop_id);
1582 	u32 lsb, msb;
1583 
1584 	lsb = timer & MVPP22_FCA_REG_MASK;
1585 	msb = timer >> MVPP22_FCA_REG_SIZE;
1586 
1587 	writel(lsb, fca + MVPP22_PERIODIC_COUNTER_LSB_REG);
1588 	writel(msb, fca + MVPP22_PERIODIC_COUNTER_MSB_REG);
1589 }
1590 
1591 /* Set Flow Control timer x100 faster than pause quanta to ensure that link
1592  * partner won't send traffic if port is in XOFF mode.
1593  */
1594 static void mvpp22_gop_fca_set_periodic_timer(struct mvpp2_port *port)
1595 {
1596 	u32 timer;
1597 
1598 	timer = (port->priv->tclk / (USEC_PER_SEC * FC_CLK_DIVIDER))
1599 		* FC_QUANTA;
1600 
1601 	mvpp22_gop_fca_enable_periodic(port, false);
1602 
1603 	mvpp22_gop_fca_set_timer(port, timer);
1604 
1605 	mvpp22_gop_fca_enable_periodic(port, true);
1606 }
1607 
1608 static int mvpp22_gop_init(struct mvpp2_port *port)
1609 {
1610 	struct mvpp2 *priv = port->priv;
1611 	u32 val;
1612 
1613 	if (!priv->sysctrl_base)
1614 		return 0;
1615 
1616 	switch (port->phy_interface) {
1617 	case PHY_INTERFACE_MODE_RGMII:
1618 	case PHY_INTERFACE_MODE_RGMII_ID:
1619 	case PHY_INTERFACE_MODE_RGMII_RXID:
1620 	case PHY_INTERFACE_MODE_RGMII_TXID:
1621 		if (!mvpp2_port_supports_rgmii(port))
1622 			goto invalid_conf;
1623 		mvpp22_gop_init_rgmii(port);
1624 		break;
1625 	case PHY_INTERFACE_MODE_SGMII:
1626 	case PHY_INTERFACE_MODE_1000BASEX:
1627 	case PHY_INTERFACE_MODE_2500BASEX:
1628 		mvpp22_gop_init_sgmii(port);
1629 		break;
1630 	case PHY_INTERFACE_MODE_10GBASER:
1631 		if (!mvpp2_port_supports_xlg(port))
1632 			goto invalid_conf;
1633 		mvpp22_gop_init_10gkr(port);
1634 		break;
1635 	default:
1636 		goto unsupported_conf;
1637 	}
1638 
1639 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL1, &val);
1640 	val |= GENCONF_PORT_CTRL1_RESET(port->gop_id) |
1641 	       GENCONF_PORT_CTRL1_EN(port->gop_id);
1642 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL1, val);
1643 
1644 	regmap_read(priv->sysctrl_base, GENCONF_PORT_CTRL0, &val);
1645 	val |= GENCONF_PORT_CTRL0_CLK_DIV_PHASE_CLR;
1646 	regmap_write(priv->sysctrl_base, GENCONF_PORT_CTRL0, val);
1647 
1648 	regmap_read(priv->sysctrl_base, GENCONF_SOFT_RESET1, &val);
1649 	val |= GENCONF_SOFT_RESET1_GOP;
1650 	regmap_write(priv->sysctrl_base, GENCONF_SOFT_RESET1, val);
1651 
1652 	mvpp22_gop_fca_set_periodic_timer(port);
1653 
1654 unsupported_conf:
1655 	return 0;
1656 
1657 invalid_conf:
1658 	netdev_err(port->dev, "Invalid port configuration\n");
1659 	return -EINVAL;
1660 }
1661 
1662 static void mvpp22_gop_unmask_irq(struct mvpp2_port *port)
1663 {
1664 	u32 val;
1665 
1666 	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1667 	    phy_interface_mode_is_8023z(port->phy_interface) ||
1668 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1669 		/* Enable the GMAC link status irq for this port */
1670 		val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1671 		val |= MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1672 		writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1673 	}
1674 
1675 	if (mvpp2_port_supports_xlg(port)) {
1676 		/* Enable the XLG/GIG irqs for this port */
1677 		val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1678 		if (mvpp2_is_xlg(port->phy_interface))
1679 			val |= MVPP22_XLG_EXT_INT_MASK_XLG;
1680 		else
1681 			val |= MVPP22_XLG_EXT_INT_MASK_GIG;
1682 		writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1683 	}
1684 }
1685 
1686 static void mvpp22_gop_mask_irq(struct mvpp2_port *port)
1687 {
1688 	u32 val;
1689 
1690 	if (mvpp2_port_supports_xlg(port)) {
1691 		val = readl(port->base + MVPP22_XLG_EXT_INT_MASK);
1692 		val &= ~(MVPP22_XLG_EXT_INT_MASK_XLG |
1693 			 MVPP22_XLG_EXT_INT_MASK_GIG);
1694 		writel(val, port->base + MVPP22_XLG_EXT_INT_MASK);
1695 	}
1696 
1697 	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
1698 	    phy_interface_mode_is_8023z(port->phy_interface) ||
1699 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1700 		val = readl(port->base + MVPP22_GMAC_INT_SUM_MASK);
1701 		val &= ~MVPP22_GMAC_INT_SUM_MASK_LINK_STAT;
1702 		writel(val, port->base + MVPP22_GMAC_INT_SUM_MASK);
1703 	}
1704 }
1705 
1706 static void mvpp22_gop_setup_irq(struct mvpp2_port *port)
1707 {
1708 	u32 val;
1709 
1710 	mvpp2_modify(port->base + MVPP22_GMAC_INT_SUM_MASK,
1711 		     MVPP22_GMAC_INT_SUM_MASK_PTP,
1712 		     MVPP22_GMAC_INT_SUM_MASK_PTP);
1713 
1714 	if (port->phylink ||
1715 	    phy_interface_mode_is_rgmii(port->phy_interface) ||
1716 	    phy_interface_mode_is_8023z(port->phy_interface) ||
1717 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1718 		val = readl(port->base + MVPP22_GMAC_INT_MASK);
1719 		val |= MVPP22_GMAC_INT_MASK_LINK_STAT;
1720 		writel(val, port->base + MVPP22_GMAC_INT_MASK);
1721 	}
1722 
1723 	if (mvpp2_port_supports_xlg(port)) {
1724 		val = readl(port->base + MVPP22_XLG_INT_MASK);
1725 		val |= MVPP22_XLG_INT_MASK_LINK;
1726 		writel(val, port->base + MVPP22_XLG_INT_MASK);
1727 
1728 		mvpp2_modify(port->base + MVPP22_XLG_EXT_INT_MASK,
1729 			     MVPP22_XLG_EXT_INT_MASK_PTP,
1730 			     MVPP22_XLG_EXT_INT_MASK_PTP);
1731 	}
1732 
1733 	mvpp22_gop_unmask_irq(port);
1734 }
1735 
1736 /* Sets the PHY mode of the COMPHY (which configures the serdes lanes).
1737  *
1738  * The PHY mode used by the PPv2 driver comes from the network subsystem, while
1739  * the one given to the COMPHY comes from the generic PHY subsystem. Hence they
1740  * differ.
1741  *
1742  * The COMPHY configures the serdes lanes regardless of the actual use of the
1743  * lanes by the physical layer. This is why configurations like
1744  * "PPv2 (2500BaseX) - COMPHY (2500SGMII)" are valid.
1745  */
1746 static int mvpp22_comphy_init(struct mvpp2_port *port)
1747 {
1748 	int ret;
1749 
1750 	if (!port->comphy)
1751 		return 0;
1752 
1753 	ret = phy_set_mode_ext(port->comphy, PHY_MODE_ETHERNET,
1754 			       port->phy_interface);
1755 	if (ret)
1756 		return ret;
1757 
1758 	return phy_power_on(port->comphy);
1759 }
1760 
1761 static void mvpp2_port_enable(struct mvpp2_port *port)
1762 {
1763 	u32 val;
1764 
1765 	if (mvpp2_port_supports_xlg(port) &&
1766 	    mvpp2_is_xlg(port->phy_interface)) {
1767 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1768 		val |= MVPP22_XLG_CTRL0_PORT_EN;
1769 		val &= ~MVPP22_XLG_CTRL0_MIB_CNT_DIS;
1770 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1771 	} else {
1772 		val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1773 		val |= MVPP2_GMAC_PORT_EN_MASK;
1774 		val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
1775 		writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1776 	}
1777 }
1778 
1779 static void mvpp2_port_disable(struct mvpp2_port *port)
1780 {
1781 	u32 val;
1782 
1783 	if (mvpp2_port_supports_xlg(port) &&
1784 	    mvpp2_is_xlg(port->phy_interface)) {
1785 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
1786 		val &= ~MVPP22_XLG_CTRL0_PORT_EN;
1787 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
1788 	}
1789 
1790 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
1791 	val &= ~(MVPP2_GMAC_PORT_EN_MASK);
1792 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
1793 }
1794 
1795 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
1796 static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
1797 {
1798 	u32 val;
1799 
1800 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
1801 		    ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
1802 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1803 }
1804 
1805 /* Configure loopback port */
1806 static void mvpp2_port_loopback_set(struct mvpp2_port *port,
1807 				    const struct phylink_link_state *state)
1808 {
1809 	u32 val;
1810 
1811 	val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
1812 
1813 	if (state->speed == 1000)
1814 		val |= MVPP2_GMAC_GMII_LB_EN_MASK;
1815 	else
1816 		val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
1817 
1818 	if (phy_interface_mode_is_8023z(state->interface) ||
1819 	    state->interface == PHY_INTERFACE_MODE_SGMII)
1820 		val |= MVPP2_GMAC_PCS_LB_EN_MASK;
1821 	else
1822 		val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
1823 
1824 	writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
1825 }
1826 
1827 enum {
1828 	ETHTOOL_XDP_REDIRECT,
1829 	ETHTOOL_XDP_PASS,
1830 	ETHTOOL_XDP_DROP,
1831 	ETHTOOL_XDP_TX,
1832 	ETHTOOL_XDP_TX_ERR,
1833 	ETHTOOL_XDP_XMIT,
1834 	ETHTOOL_XDP_XMIT_ERR,
1835 };
1836 
1837 struct mvpp2_ethtool_counter {
1838 	unsigned int offset;
1839 	const char string[ETH_GSTRING_LEN];
1840 	bool reg_is_64b;
1841 };
1842 
1843 static u64 mvpp2_read_count(struct mvpp2_port *port,
1844 			    const struct mvpp2_ethtool_counter *counter)
1845 {
1846 	u64 val;
1847 
1848 	val = readl(port->stats_base + counter->offset);
1849 	if (counter->reg_is_64b)
1850 		val += (u64)readl(port->stats_base + counter->offset + 4) << 32;
1851 
1852 	return val;
1853 }
1854 
1855 /* Some counters are accessed indirectly by first writing an index to
1856  * MVPP2_CTRS_IDX. The index can represent various resources depending on the
1857  * register we access, it can be a hit counter for some classification tables,
1858  * a counter specific to a rxq, a txq or a buffer pool.
1859  */
1860 static u32 mvpp2_read_index(struct mvpp2 *priv, u32 index, u32 reg)
1861 {
1862 	mvpp2_write(priv, MVPP2_CTRS_IDX, index);
1863 	return mvpp2_read(priv, reg);
1864 }
1865 
1866 /* Due to the fact that software statistics and hardware statistics are, by
1867  * design, incremented at different moments in the chain of packet processing,
1868  * it is very likely that incoming packets could have been dropped after being
1869  * counted by hardware but before reaching software statistics (most probably
1870  * multicast packets), and in the oppposite way, during transmission, FCS bytes
1871  * are added in between as well as TSO skb will be split and header bytes added.
1872  * Hence, statistics gathered from userspace with ifconfig (software) and
1873  * ethtool (hardware) cannot be compared.
1874  */
1875 static const struct mvpp2_ethtool_counter mvpp2_ethtool_mib_regs[] = {
1876 	{ MVPP2_MIB_GOOD_OCTETS_RCVD, "good_octets_received", true },
1877 	{ MVPP2_MIB_BAD_OCTETS_RCVD, "bad_octets_received" },
1878 	{ MVPP2_MIB_CRC_ERRORS_SENT, "crc_errors_sent" },
1879 	{ MVPP2_MIB_UNICAST_FRAMES_RCVD, "unicast_frames_received" },
1880 	{ MVPP2_MIB_BROADCAST_FRAMES_RCVD, "broadcast_frames_received" },
1881 	{ MVPP2_MIB_MULTICAST_FRAMES_RCVD, "multicast_frames_received" },
1882 	{ MVPP2_MIB_FRAMES_64_OCTETS, "frames_64_octets" },
1883 	{ MVPP2_MIB_FRAMES_65_TO_127_OCTETS, "frames_65_to_127_octet" },
1884 	{ MVPP2_MIB_FRAMES_128_TO_255_OCTETS, "frames_128_to_255_octet" },
1885 	{ MVPP2_MIB_FRAMES_256_TO_511_OCTETS, "frames_256_to_511_octet" },
1886 	{ MVPP2_MIB_FRAMES_512_TO_1023_OCTETS, "frames_512_to_1023_octet" },
1887 	{ MVPP2_MIB_FRAMES_1024_TO_MAX_OCTETS, "frames_1024_to_max_octet" },
1888 	{ MVPP2_MIB_GOOD_OCTETS_SENT, "good_octets_sent", true },
1889 	{ MVPP2_MIB_UNICAST_FRAMES_SENT, "unicast_frames_sent" },
1890 	{ MVPP2_MIB_MULTICAST_FRAMES_SENT, "multicast_frames_sent" },
1891 	{ MVPP2_MIB_BROADCAST_FRAMES_SENT, "broadcast_frames_sent" },
1892 	{ MVPP2_MIB_FC_SENT, "fc_sent" },
1893 	{ MVPP2_MIB_FC_RCVD, "fc_received" },
1894 	{ MVPP2_MIB_RX_FIFO_OVERRUN, "rx_fifo_overrun" },
1895 	{ MVPP2_MIB_UNDERSIZE_RCVD, "undersize_received" },
1896 	{ MVPP2_MIB_FRAGMENTS_RCVD, "fragments_received" },
1897 	{ MVPP2_MIB_OVERSIZE_RCVD, "oversize_received" },
1898 	{ MVPP2_MIB_JABBER_RCVD, "jabber_received" },
1899 	{ MVPP2_MIB_MAC_RCV_ERROR, "mac_receive_error" },
1900 	{ MVPP2_MIB_BAD_CRC_EVENT, "bad_crc_event" },
1901 	{ MVPP2_MIB_COLLISION, "collision" },
1902 	{ MVPP2_MIB_LATE_COLLISION, "late_collision" },
1903 };
1904 
1905 static const struct mvpp2_ethtool_counter mvpp2_ethtool_port_regs[] = {
1906 	{ MVPP2_OVERRUN_ETH_DROP, "rx_fifo_or_parser_overrun_drops" },
1907 	{ MVPP2_CLS_ETH_DROP, "rx_classifier_drops" },
1908 };
1909 
1910 static const struct mvpp2_ethtool_counter mvpp2_ethtool_txq_regs[] = {
1911 	{ MVPP2_TX_DESC_ENQ_CTR, "txq_%d_desc_enqueue" },
1912 	{ MVPP2_TX_DESC_ENQ_TO_DDR_CTR, "txq_%d_desc_enqueue_to_ddr" },
1913 	{ MVPP2_TX_BUFF_ENQ_TO_DDR_CTR, "txq_%d_buff_euqueue_to_ddr" },
1914 	{ MVPP2_TX_DESC_ENQ_HW_FWD_CTR, "txq_%d_desc_hardware_forwarded" },
1915 	{ MVPP2_TX_PKTS_DEQ_CTR, "txq_%d_packets_dequeued" },
1916 	{ MVPP2_TX_PKTS_FULL_QUEUE_DROP_CTR, "txq_%d_queue_full_drops" },
1917 	{ MVPP2_TX_PKTS_EARLY_DROP_CTR, "txq_%d_packets_early_drops" },
1918 	{ MVPP2_TX_PKTS_BM_DROP_CTR, "txq_%d_packets_bm_drops" },
1919 	{ MVPP2_TX_PKTS_BM_MC_DROP_CTR, "txq_%d_packets_rep_bm_drops" },
1920 };
1921 
1922 static const struct mvpp2_ethtool_counter mvpp2_ethtool_rxq_regs[] = {
1923 	{ MVPP2_RX_DESC_ENQ_CTR, "rxq_%d_desc_enqueue" },
1924 	{ MVPP2_RX_PKTS_FULL_QUEUE_DROP_CTR, "rxq_%d_queue_full_drops" },
1925 	{ MVPP2_RX_PKTS_EARLY_DROP_CTR, "rxq_%d_packets_early_drops" },
1926 	{ MVPP2_RX_PKTS_BM_DROP_CTR, "rxq_%d_packets_bm_drops" },
1927 };
1928 
1929 static const struct mvpp2_ethtool_counter mvpp2_ethtool_xdp[] = {
1930 	{ ETHTOOL_XDP_REDIRECT, "rx_xdp_redirect", },
1931 	{ ETHTOOL_XDP_PASS, "rx_xdp_pass", },
1932 	{ ETHTOOL_XDP_DROP, "rx_xdp_drop", },
1933 	{ ETHTOOL_XDP_TX, "rx_xdp_tx", },
1934 	{ ETHTOOL_XDP_TX_ERR, "rx_xdp_tx_errors", },
1935 	{ ETHTOOL_XDP_XMIT, "tx_xdp_xmit", },
1936 	{ ETHTOOL_XDP_XMIT_ERR, "tx_xdp_xmit_errors", },
1937 };
1938 
1939 #define MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs)	(ARRAY_SIZE(mvpp2_ethtool_mib_regs) + \
1940 						 ARRAY_SIZE(mvpp2_ethtool_port_regs) + \
1941 						 (ARRAY_SIZE(mvpp2_ethtool_txq_regs) * (ntxqs)) + \
1942 						 (ARRAY_SIZE(mvpp2_ethtool_rxq_regs) * (nrxqs)) + \
1943 						 ARRAY_SIZE(mvpp2_ethtool_xdp))
1944 
1945 static void mvpp2_ethtool_get_strings(struct net_device *netdev, u32 sset,
1946 				      u8 *data)
1947 {
1948 	struct mvpp2_port *port = netdev_priv(netdev);
1949 	int i, q;
1950 
1951 	if (sset != ETH_SS_STATS)
1952 		return;
1953 
1954 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++) {
1955 		strscpy(data, mvpp2_ethtool_mib_regs[i].string,
1956 			ETH_GSTRING_LEN);
1957 		data += ETH_GSTRING_LEN;
1958 	}
1959 
1960 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++) {
1961 		strscpy(data, mvpp2_ethtool_port_regs[i].string,
1962 			ETH_GSTRING_LEN);
1963 		data += ETH_GSTRING_LEN;
1964 	}
1965 
1966 	for (q = 0; q < port->ntxqs; q++) {
1967 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++) {
1968 			snprintf(data, ETH_GSTRING_LEN,
1969 				 mvpp2_ethtool_txq_regs[i].string, q);
1970 			data += ETH_GSTRING_LEN;
1971 		}
1972 	}
1973 
1974 	for (q = 0; q < port->nrxqs; q++) {
1975 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++) {
1976 			snprintf(data, ETH_GSTRING_LEN,
1977 				 mvpp2_ethtool_rxq_regs[i].string,
1978 				 q);
1979 			data += ETH_GSTRING_LEN;
1980 		}
1981 	}
1982 
1983 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_xdp); i++) {
1984 		strscpy(data, mvpp2_ethtool_xdp[i].string,
1985 			ETH_GSTRING_LEN);
1986 		data += ETH_GSTRING_LEN;
1987 	}
1988 }
1989 
1990 static void
1991 mvpp2_get_xdp_stats(struct mvpp2_port *port, struct mvpp2_pcpu_stats *xdp_stats)
1992 {
1993 	unsigned int start;
1994 	unsigned int cpu;
1995 
1996 	/* Gather XDP Statistics */
1997 	for_each_possible_cpu(cpu) {
1998 		struct mvpp2_pcpu_stats *cpu_stats;
1999 		u64	xdp_redirect;
2000 		u64	xdp_pass;
2001 		u64	xdp_drop;
2002 		u64	xdp_xmit;
2003 		u64	xdp_xmit_err;
2004 		u64	xdp_tx;
2005 		u64	xdp_tx_err;
2006 
2007 		cpu_stats = per_cpu_ptr(port->stats, cpu);
2008 		do {
2009 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
2010 			xdp_redirect = cpu_stats->xdp_redirect;
2011 			xdp_pass   = cpu_stats->xdp_pass;
2012 			xdp_drop = cpu_stats->xdp_drop;
2013 			xdp_xmit   = cpu_stats->xdp_xmit;
2014 			xdp_xmit_err   = cpu_stats->xdp_xmit_err;
2015 			xdp_tx   = cpu_stats->xdp_tx;
2016 			xdp_tx_err   = cpu_stats->xdp_tx_err;
2017 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
2018 
2019 		xdp_stats->xdp_redirect += xdp_redirect;
2020 		xdp_stats->xdp_pass   += xdp_pass;
2021 		xdp_stats->xdp_drop += xdp_drop;
2022 		xdp_stats->xdp_xmit   += xdp_xmit;
2023 		xdp_stats->xdp_xmit_err   += xdp_xmit_err;
2024 		xdp_stats->xdp_tx   += xdp_tx;
2025 		xdp_stats->xdp_tx_err   += xdp_tx_err;
2026 	}
2027 }
2028 
2029 static void mvpp2_read_stats(struct mvpp2_port *port)
2030 {
2031 	struct mvpp2_pcpu_stats xdp_stats = {};
2032 	const struct mvpp2_ethtool_counter *s;
2033 	u64 *pstats;
2034 	int i, q;
2035 
2036 	pstats = port->ethtool_stats;
2037 
2038 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_mib_regs); i++)
2039 		*pstats++ += mvpp2_read_count(port, &mvpp2_ethtool_mib_regs[i]);
2040 
2041 	for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_port_regs); i++)
2042 		*pstats++ += mvpp2_read(port->priv,
2043 					mvpp2_ethtool_port_regs[i].offset +
2044 					4 * port->id);
2045 
2046 	for (q = 0; q < port->ntxqs; q++)
2047 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_txq_regs); i++)
2048 			*pstats++ += mvpp2_read_index(port->priv,
2049 						      MVPP22_CTRS_TX_CTR(port->id, q),
2050 						      mvpp2_ethtool_txq_regs[i].offset);
2051 
2052 	/* Rxqs are numbered from 0 from the user standpoint, but not from the
2053 	 * driver's. We need to add the  port->first_rxq offset.
2054 	 */
2055 	for (q = 0; q < port->nrxqs; q++)
2056 		for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_rxq_regs); i++)
2057 			*pstats++ += mvpp2_read_index(port->priv,
2058 						      port->first_rxq + q,
2059 						      mvpp2_ethtool_rxq_regs[i].offset);
2060 
2061 	/* Gather XDP Statistics */
2062 	mvpp2_get_xdp_stats(port, &xdp_stats);
2063 
2064 	for (i = 0, s = mvpp2_ethtool_xdp;
2065 		 s < mvpp2_ethtool_xdp + ARRAY_SIZE(mvpp2_ethtool_xdp);
2066 	     s++, i++) {
2067 		switch (s->offset) {
2068 		case ETHTOOL_XDP_REDIRECT:
2069 			*pstats++ = xdp_stats.xdp_redirect;
2070 			break;
2071 		case ETHTOOL_XDP_PASS:
2072 			*pstats++ = xdp_stats.xdp_pass;
2073 			break;
2074 		case ETHTOOL_XDP_DROP:
2075 			*pstats++ = xdp_stats.xdp_drop;
2076 			break;
2077 		case ETHTOOL_XDP_TX:
2078 			*pstats++ = xdp_stats.xdp_tx;
2079 			break;
2080 		case ETHTOOL_XDP_TX_ERR:
2081 			*pstats++ = xdp_stats.xdp_tx_err;
2082 			break;
2083 		case ETHTOOL_XDP_XMIT:
2084 			*pstats++ = xdp_stats.xdp_xmit;
2085 			break;
2086 		case ETHTOOL_XDP_XMIT_ERR:
2087 			*pstats++ = xdp_stats.xdp_xmit_err;
2088 			break;
2089 		}
2090 	}
2091 }
2092 
2093 static void mvpp2_gather_hw_statistics(struct work_struct *work)
2094 {
2095 	struct delayed_work *del_work = to_delayed_work(work);
2096 	struct mvpp2_port *port = container_of(del_work, struct mvpp2_port,
2097 					       stats_work);
2098 
2099 	mutex_lock(&port->gather_stats_lock);
2100 
2101 	mvpp2_read_stats(port);
2102 
2103 	/* No need to read again the counters right after this function if it
2104 	 * was called asynchronously by the user (ie. use of ethtool).
2105 	 */
2106 	cancel_delayed_work(&port->stats_work);
2107 	queue_delayed_work(port->priv->stats_queue, &port->stats_work,
2108 			   MVPP2_MIB_COUNTERS_STATS_DELAY);
2109 
2110 	mutex_unlock(&port->gather_stats_lock);
2111 }
2112 
2113 static void mvpp2_ethtool_get_stats(struct net_device *dev,
2114 				    struct ethtool_stats *stats, u64 *data)
2115 {
2116 	struct mvpp2_port *port = netdev_priv(dev);
2117 
2118 	/* Update statistics for the given port, then take the lock to avoid
2119 	 * concurrent accesses on the ethtool_stats structure during its copy.
2120 	 */
2121 	mvpp2_gather_hw_statistics(&port->stats_work.work);
2122 
2123 	mutex_lock(&port->gather_stats_lock);
2124 	memcpy(data, port->ethtool_stats,
2125 	       sizeof(u64) * MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs));
2126 	mutex_unlock(&port->gather_stats_lock);
2127 }
2128 
2129 static int mvpp2_ethtool_get_sset_count(struct net_device *dev, int sset)
2130 {
2131 	struct mvpp2_port *port = netdev_priv(dev);
2132 
2133 	if (sset == ETH_SS_STATS)
2134 		return MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs);
2135 
2136 	return -EOPNOTSUPP;
2137 }
2138 
2139 static void mvpp2_mac_reset_assert(struct mvpp2_port *port)
2140 {
2141 	u32 val;
2142 
2143 	val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
2144 	      MVPP2_GMAC_PORT_RESET_MASK;
2145 	writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2146 
2147 	if (port->priv->hw_version >= MVPP22 && port->gop_id == 0) {
2148 		val = readl(port->base + MVPP22_XLG_CTRL0_REG) &
2149 		      ~MVPP22_XLG_CTRL0_MAC_RESET_DIS;
2150 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
2151 	}
2152 }
2153 
2154 static void mvpp22_pcs_reset_assert(struct mvpp2_port *port)
2155 {
2156 	struct mvpp2 *priv = port->priv;
2157 	void __iomem *mpcs, *xpcs;
2158 	u32 val;
2159 
2160 	if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
2161 		return;
2162 
2163 	mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
2164 	xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
2165 
2166 	val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
2167 	val &= ~(MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX | MAC_CLK_RESET_SD_TX);
2168 	val |= MVPP22_MPCS_CLK_RESET_DIV_SET;
2169 	writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
2170 
2171 	val = readl(xpcs + MVPP22_XPCS_CFG0);
2172 	writel(val & ~MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
2173 }
2174 
2175 static void mvpp22_pcs_reset_deassert(struct mvpp2_port *port)
2176 {
2177 	struct mvpp2 *priv = port->priv;
2178 	void __iomem *mpcs, *xpcs;
2179 	u32 val;
2180 
2181 	if (port->priv->hw_version == MVPP21 || port->gop_id != 0)
2182 		return;
2183 
2184 	mpcs = priv->iface_base + MVPP22_MPCS_BASE(port->gop_id);
2185 	xpcs = priv->iface_base + MVPP22_XPCS_BASE(port->gop_id);
2186 
2187 	switch (port->phy_interface) {
2188 	case PHY_INTERFACE_MODE_10GBASER:
2189 		val = readl(mpcs + MVPP22_MPCS_CLK_RESET);
2190 		val |= MAC_CLK_RESET_MAC | MAC_CLK_RESET_SD_RX |
2191 		       MAC_CLK_RESET_SD_TX;
2192 		val &= ~MVPP22_MPCS_CLK_RESET_DIV_SET;
2193 		writel(val, mpcs + MVPP22_MPCS_CLK_RESET);
2194 		break;
2195 	case PHY_INTERFACE_MODE_XAUI:
2196 	case PHY_INTERFACE_MODE_RXAUI:
2197 		val = readl(xpcs + MVPP22_XPCS_CFG0);
2198 		writel(val | MVPP22_XPCS_CFG0_RESET_DIS, xpcs + MVPP22_XPCS_CFG0);
2199 		break;
2200 	default:
2201 		break;
2202 	}
2203 }
2204 
2205 /* Change maximum receive size of the port */
2206 static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
2207 {
2208 	u32 val;
2209 
2210 	val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2211 	val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
2212 	val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2213 		    MVPP2_GMAC_MAX_RX_SIZE_OFFS);
2214 	writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
2215 }
2216 
2217 /* Change maximum receive size of the port */
2218 static inline void mvpp2_xlg_max_rx_size_set(struct mvpp2_port *port)
2219 {
2220 	u32 val;
2221 
2222 	val =  readl(port->base + MVPP22_XLG_CTRL1_REG);
2223 	val &= ~MVPP22_XLG_CTRL1_FRAMESIZELIMIT_MASK;
2224 	val |= ((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
2225 	       MVPP22_XLG_CTRL1_FRAMESIZELIMIT_OFFS;
2226 	writel(val, port->base + MVPP22_XLG_CTRL1_REG);
2227 }
2228 
2229 /* Set defaults to the MVPP2 port */
2230 static void mvpp2_defaults_set(struct mvpp2_port *port)
2231 {
2232 	int tx_port_num, val, queue, lrxq;
2233 
2234 	if (port->priv->hw_version == MVPP21) {
2235 		/* Update TX FIFO MIN Threshold */
2236 		val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2237 		val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
2238 		/* Min. TX threshold must be less than minimal packet length */
2239 		val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
2240 		writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
2241 	}
2242 
2243 	/* Disable Legacy WRR, Disable EJP, Release from reset */
2244 	tx_port_num = mvpp2_egress_port(port);
2245 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
2246 		    tx_port_num);
2247 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
2248 
2249 	/* Set TXQ scheduling to Round-Robin */
2250 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_FIXED_PRIO_REG, 0);
2251 
2252 	/* Close bandwidth for all queues */
2253 	for (queue = 0; queue < MVPP2_MAX_TXQ; queue++)
2254 		mvpp2_write(port->priv,
2255 			    MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(queue), 0);
2256 
2257 	/* Set refill period to 1 usec, refill tokens
2258 	 * and bucket size to maximum
2259 	 */
2260 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
2261 		    port->priv->tclk / USEC_PER_SEC);
2262 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
2263 	val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
2264 	val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
2265 	val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
2266 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
2267 	val = MVPP2_TXP_TOKEN_SIZE_MAX;
2268 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2269 
2270 	/* Set MaximumLowLatencyPacketSize value to 256 */
2271 	mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
2272 		    MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
2273 		    MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
2274 
2275 	/* Enable Rx cache snoop */
2276 	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2277 		queue = port->rxqs[lrxq]->id;
2278 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2279 		val |= MVPP2_SNOOP_PKT_SIZE_MASK |
2280 			   MVPP2_SNOOP_BUF_HDR_MASK;
2281 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2282 	}
2283 
2284 	/* At default, mask all interrupts to all present cpus */
2285 	mvpp2_interrupts_disable(port);
2286 }
2287 
2288 /* Enable/disable receiving packets */
2289 static void mvpp2_ingress_enable(struct mvpp2_port *port)
2290 {
2291 	u32 val;
2292 	int lrxq, queue;
2293 
2294 	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2295 		queue = port->rxqs[lrxq]->id;
2296 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2297 		val &= ~MVPP2_RXQ_DISABLE_MASK;
2298 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2299 	}
2300 }
2301 
2302 static void mvpp2_ingress_disable(struct mvpp2_port *port)
2303 {
2304 	u32 val;
2305 	int lrxq, queue;
2306 
2307 	for (lrxq = 0; lrxq < port->nrxqs; lrxq++) {
2308 		queue = port->rxqs[lrxq]->id;
2309 		val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
2310 		val |= MVPP2_RXQ_DISABLE_MASK;
2311 		mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
2312 	}
2313 }
2314 
2315 /* Enable transmit via physical egress queue
2316  * - HW starts take descriptors from DRAM
2317  */
2318 static void mvpp2_egress_enable(struct mvpp2_port *port)
2319 {
2320 	u32 qmap;
2321 	int queue;
2322 	int tx_port_num = mvpp2_egress_port(port);
2323 
2324 	/* Enable all initialized TXs. */
2325 	qmap = 0;
2326 	for (queue = 0; queue < port->ntxqs; queue++) {
2327 		struct mvpp2_tx_queue *txq = port->txqs[queue];
2328 
2329 		if (txq->descs)
2330 			qmap |= (1 << queue);
2331 	}
2332 
2333 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2334 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
2335 }
2336 
2337 /* Disable transmit via physical egress queue
2338  * - HW doesn't take descriptors from DRAM
2339  */
2340 static void mvpp2_egress_disable(struct mvpp2_port *port)
2341 {
2342 	u32 reg_data;
2343 	int delay;
2344 	int tx_port_num = mvpp2_egress_port(port);
2345 
2346 	/* Issue stop command for active channels only */
2347 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2348 	reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
2349 		    MVPP2_TXP_SCHED_ENQ_MASK;
2350 	if (reg_data != 0)
2351 		mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
2352 			    (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
2353 
2354 	/* Wait for all Tx activity to terminate. */
2355 	delay = 0;
2356 	do {
2357 		if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
2358 			netdev_warn(port->dev,
2359 				    "Tx stop timed out, status=0x%08x\n",
2360 				    reg_data);
2361 			break;
2362 		}
2363 		mdelay(1);
2364 		delay++;
2365 
2366 		/* Check port TX Command register that all
2367 		 * Tx queues are stopped
2368 		 */
2369 		reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
2370 	} while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
2371 }
2372 
2373 /* Rx descriptors helper methods */
2374 
2375 /* Get number of Rx descriptors occupied by received packets */
2376 static inline int
2377 mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
2378 {
2379 	u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
2380 
2381 	return val & MVPP2_RXQ_OCCUPIED_MASK;
2382 }
2383 
2384 /* Update Rx queue status with the number of occupied and available
2385  * Rx descriptor slots.
2386  */
2387 static inline void
2388 mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
2389 			int used_count, int free_count)
2390 {
2391 	/* Decrement the number of used descriptors and increment count
2392 	 * increment the number of free descriptors.
2393 	 */
2394 	u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
2395 
2396 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
2397 }
2398 
2399 /* Get pointer to next RX descriptor to be processed by SW */
2400 static inline struct mvpp2_rx_desc *
2401 mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
2402 {
2403 	int rx_desc = rxq->next_desc_to_proc;
2404 
2405 	rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
2406 	prefetch(rxq->descs + rxq->next_desc_to_proc);
2407 	return rxq->descs + rx_desc;
2408 }
2409 
2410 /* Set rx queue offset */
2411 static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
2412 				 int prxq, int offset)
2413 {
2414 	u32 val;
2415 
2416 	/* Convert offset from bytes to units of 32 bytes */
2417 	offset = offset >> 5;
2418 
2419 	val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
2420 	val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
2421 
2422 	/* Offset is in */
2423 	val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
2424 		    MVPP2_RXQ_PACKET_OFFSET_MASK);
2425 
2426 	mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
2427 }
2428 
2429 /* Tx descriptors helper methods */
2430 
2431 /* Get pointer to next Tx descriptor to be processed (send) by HW */
2432 static struct mvpp2_tx_desc *
2433 mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
2434 {
2435 	int tx_desc = txq->next_desc_to_proc;
2436 
2437 	txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
2438 	return txq->descs + tx_desc;
2439 }
2440 
2441 /* Update HW with number of aggregated Tx descriptors to be sent
2442  *
2443  * Called only from mvpp2_tx(), so migration is disabled, using
2444  * smp_processor_id() is OK.
2445  */
2446 static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
2447 {
2448 	/* aggregated access - relevant TXQ number is written in TX desc */
2449 	mvpp2_thread_write(port->priv,
2450 			   mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2451 			   MVPP2_AGGR_TXQ_UPDATE_REG, pending);
2452 }
2453 
2454 /* Check if there are enough free descriptors in aggregated txq.
2455  * If not, update the number of occupied descriptors and repeat the check.
2456  *
2457  * Called only from mvpp2_tx(), so migration is disabled, using
2458  * smp_processor_id() is OK.
2459  */
2460 static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
2461 				     struct mvpp2_tx_queue *aggr_txq, int num)
2462 {
2463 	if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
2464 		/* Update number of occupied aggregated Tx descriptors */
2465 		unsigned int thread =
2466 			mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2467 		u32 val = mvpp2_read_relaxed(port->priv,
2468 					     MVPP2_AGGR_TXQ_STATUS_REG(thread));
2469 
2470 		aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
2471 
2472 		if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE)
2473 			return -ENOMEM;
2474 	}
2475 	return 0;
2476 }
2477 
2478 /* Reserved Tx descriptors allocation request
2479  *
2480  * Called only from mvpp2_txq_reserved_desc_num_proc(), itself called
2481  * only by mvpp2_tx(), so migration is disabled, using
2482  * smp_processor_id() is OK.
2483  */
2484 static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
2485 					 struct mvpp2_tx_queue *txq, int num)
2486 {
2487 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
2488 	struct mvpp2 *priv = port->priv;
2489 	u32 val;
2490 
2491 	val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
2492 	mvpp2_thread_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
2493 
2494 	val = mvpp2_thread_read_relaxed(priv, thread, MVPP2_TXQ_RSVD_RSLT_REG);
2495 
2496 	return val & MVPP2_TXQ_RSVD_RSLT_MASK;
2497 }
2498 
2499 /* Check if there are enough reserved descriptors for transmission.
2500  * If not, request chunk of reserved descriptors and check again.
2501  */
2502 static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2_port *port,
2503 					    struct mvpp2_tx_queue *txq,
2504 					    struct mvpp2_txq_pcpu *txq_pcpu,
2505 					    int num)
2506 {
2507 	int req, desc_count;
2508 	unsigned int thread;
2509 
2510 	if (txq_pcpu->reserved_num >= num)
2511 		return 0;
2512 
2513 	/* Not enough descriptors reserved! Update the reserved descriptor
2514 	 * count and check again.
2515 	 */
2516 
2517 	desc_count = 0;
2518 	/* Compute total of used descriptors */
2519 	for (thread = 0; thread < port->priv->nthreads; thread++) {
2520 		struct mvpp2_txq_pcpu *txq_pcpu_aux;
2521 
2522 		txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
2523 		desc_count += txq_pcpu_aux->count;
2524 		desc_count += txq_pcpu_aux->reserved_num;
2525 	}
2526 
2527 	req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
2528 	desc_count += req;
2529 
2530 	if (desc_count >
2531 	   (txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
2532 		return -ENOMEM;
2533 
2534 	txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
2535 
2536 	/* OK, the descriptor could have been updated: check again. */
2537 	if (txq_pcpu->reserved_num < num)
2538 		return -ENOMEM;
2539 	return 0;
2540 }
2541 
2542 /* Release the last allocated Tx descriptor. Useful to handle DMA
2543  * mapping failures in the Tx path.
2544  */
2545 static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
2546 {
2547 	if (txq->next_desc_to_proc == 0)
2548 		txq->next_desc_to_proc = txq->last_desc - 1;
2549 	else
2550 		txq->next_desc_to_proc--;
2551 }
2552 
2553 /* Set Tx descriptors fields relevant for CSUM calculation */
2554 static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
2555 			       int ip_hdr_len, int l4_proto)
2556 {
2557 	u32 command;
2558 
2559 	/* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
2560 	 * G_L4_chk, L4_type required only for checksum calculation
2561 	 */
2562 	command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
2563 	command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
2564 	command |= MVPP2_TXD_IP_CSUM_DISABLE;
2565 
2566 	if (l3_proto == htons(ETH_P_IP)) {
2567 		command &= ~MVPP2_TXD_IP_CSUM_DISABLE;	/* enable IPv4 csum */
2568 		command &= ~MVPP2_TXD_L3_IP6;		/* enable IPv4 */
2569 	} else {
2570 		command |= MVPP2_TXD_L3_IP6;		/* enable IPv6 */
2571 	}
2572 
2573 	if (l4_proto == IPPROTO_TCP) {
2574 		command &= ~MVPP2_TXD_L4_UDP;		/* enable TCP */
2575 		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
2576 	} else if (l4_proto == IPPROTO_UDP) {
2577 		command |= MVPP2_TXD_L4_UDP;		/* enable UDP */
2578 		command &= ~MVPP2_TXD_L4_CSUM_FRAG;	/* generate L4 csum */
2579 	} else {
2580 		command |= MVPP2_TXD_L4_CSUM_NOT;
2581 	}
2582 
2583 	return command;
2584 }
2585 
2586 /* Get number of sent descriptors and decrement counter.
2587  * The number of sent descriptors is returned.
2588  * Per-thread access
2589  *
2590  * Called only from mvpp2_txq_done(), called from mvpp2_tx()
2591  * (migration disabled) and from the TX completion tasklet (migration
2592  * disabled) so using smp_processor_id() is OK.
2593  */
2594 static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
2595 					   struct mvpp2_tx_queue *txq)
2596 {
2597 	u32 val;
2598 
2599 	/* Reading status reg resets transmitted descriptor counter */
2600 	val = mvpp2_thread_read_relaxed(port->priv,
2601 					mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2602 					MVPP2_TXQ_SENT_REG(txq->id));
2603 
2604 	return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
2605 		MVPP2_TRANSMITTED_COUNT_OFFSET;
2606 }
2607 
2608 /* Called through on_each_cpu(), so runs on all CPUs, with migration
2609  * disabled, therefore using smp_processor_id() is OK.
2610  */
2611 static void mvpp2_txq_sent_counter_clear(void *arg)
2612 {
2613 	struct mvpp2_port *port = arg;
2614 	int queue;
2615 
2616 	/* If the thread isn't used, don't do anything */
2617 	if (smp_processor_id() >= port->priv->nthreads)
2618 		return;
2619 
2620 	for (queue = 0; queue < port->ntxqs; queue++) {
2621 		int id = port->txqs[queue]->id;
2622 
2623 		mvpp2_thread_read(port->priv,
2624 				  mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
2625 				  MVPP2_TXQ_SENT_REG(id));
2626 	}
2627 }
2628 
2629 /* Set max sizes for Tx queues */
2630 static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
2631 {
2632 	u32	val, size, mtu;
2633 	int	txq, tx_port_num;
2634 
2635 	mtu = port->pkt_size * 8;
2636 	if (mtu > MVPP2_TXP_MTU_MAX)
2637 		mtu = MVPP2_TXP_MTU_MAX;
2638 
2639 	/* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
2640 	mtu = 3 * mtu;
2641 
2642 	/* Indirect access to registers */
2643 	tx_port_num = mvpp2_egress_port(port);
2644 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
2645 
2646 	/* Set MTU */
2647 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
2648 	val &= ~MVPP2_TXP_MTU_MAX;
2649 	val |= mtu;
2650 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
2651 
2652 	/* TXP token size and all TXQs token size must be larger that MTU */
2653 	val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
2654 	size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
2655 	if (size < mtu) {
2656 		size = mtu;
2657 		val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
2658 		val |= size;
2659 		mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
2660 	}
2661 
2662 	for (txq = 0; txq < port->ntxqs; txq++) {
2663 		val = mvpp2_read(port->priv,
2664 				 MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
2665 		size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
2666 
2667 		if (size < mtu) {
2668 			size = mtu;
2669 			val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
2670 			val |= size;
2671 			mvpp2_write(port->priv,
2672 				    MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
2673 				    val);
2674 		}
2675 	}
2676 }
2677 
2678 /* Set the number of non-occupied descriptors threshold */
2679 static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
2680 				     struct mvpp2_rx_queue *rxq)
2681 {
2682 	u32 val;
2683 
2684 	mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
2685 
2686 	val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
2687 	val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
2688 	val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
2689 	mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
2690 }
2691 
2692 /* Set the number of packets that will be received before Rx interrupt
2693  * will be generated by HW.
2694  */
2695 static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
2696 				   struct mvpp2_rx_queue *rxq)
2697 {
2698 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2699 
2700 	if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
2701 		rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
2702 
2703 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2704 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_THRESH_REG,
2705 			   rxq->pkts_coal);
2706 
2707 	put_cpu();
2708 }
2709 
2710 /* For some reason in the LSP this is done on each CPU. Why ? */
2711 static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
2712 				   struct mvpp2_tx_queue *txq)
2713 {
2714 	unsigned int thread;
2715 	u32 val;
2716 
2717 	if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
2718 		txq->done_pkts_coal = MVPP2_TXQ_THRESH_MASK;
2719 
2720 	val = (txq->done_pkts_coal << MVPP2_TXQ_THRESH_OFFSET);
2721 	/* PKT-coalescing registers are per-queue + per-thread */
2722 	for (thread = 0; thread < MVPP2_MAX_THREADS; thread++) {
2723 		mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
2724 		mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_THRESH_REG, val);
2725 	}
2726 }
2727 
2728 static u32 mvpp2_usec_to_cycles(u32 usec, unsigned long clk_hz)
2729 {
2730 	u64 tmp = (u64)clk_hz * usec;
2731 
2732 	do_div(tmp, USEC_PER_SEC);
2733 
2734 	return tmp > U32_MAX ? U32_MAX : tmp;
2735 }
2736 
2737 static u32 mvpp2_cycles_to_usec(u32 cycles, unsigned long clk_hz)
2738 {
2739 	u64 tmp = (u64)cycles * USEC_PER_SEC;
2740 
2741 	do_div(tmp, clk_hz);
2742 
2743 	return tmp > U32_MAX ? U32_MAX : tmp;
2744 }
2745 
2746 /* Set the time delay in usec before Rx interrupt */
2747 static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
2748 				   struct mvpp2_rx_queue *rxq)
2749 {
2750 	unsigned long freq = port->priv->tclk;
2751 	u32 val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2752 
2753 	if (val > MVPP2_MAX_ISR_RX_THRESHOLD) {
2754 		rxq->time_coal =
2755 			mvpp2_cycles_to_usec(MVPP2_MAX_ISR_RX_THRESHOLD, freq);
2756 
2757 		/* re-evaluate to get actual register value */
2758 		val = mvpp2_usec_to_cycles(rxq->time_coal, freq);
2759 	}
2760 
2761 	mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
2762 }
2763 
2764 static void mvpp2_tx_time_coal_set(struct mvpp2_port *port)
2765 {
2766 	unsigned long freq = port->priv->tclk;
2767 	u32 val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2768 
2769 	if (val > MVPP2_MAX_ISR_TX_THRESHOLD) {
2770 		port->tx_time_coal =
2771 			mvpp2_cycles_to_usec(MVPP2_MAX_ISR_TX_THRESHOLD, freq);
2772 
2773 		/* re-evaluate to get actual register value */
2774 		val = mvpp2_usec_to_cycles(port->tx_time_coal, freq);
2775 	}
2776 
2777 	mvpp2_write(port->priv, MVPP2_ISR_TX_THRESHOLD_REG(port->id), val);
2778 }
2779 
2780 /* Free Tx queue skbuffs */
2781 static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
2782 				struct mvpp2_tx_queue *txq,
2783 				struct mvpp2_txq_pcpu *txq_pcpu, int num)
2784 {
2785 	struct xdp_frame_bulk bq;
2786 	int i;
2787 
2788 	xdp_frame_bulk_init(&bq);
2789 
2790 	rcu_read_lock(); /* need for xdp_return_frame_bulk */
2791 
2792 	for (i = 0; i < num; i++) {
2793 		struct mvpp2_txq_pcpu_buf *tx_buf =
2794 			txq_pcpu->buffs + txq_pcpu->txq_get_index;
2795 
2796 		if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma) &&
2797 		    tx_buf->type != MVPP2_TYPE_XDP_TX)
2798 			dma_unmap_single(port->dev->dev.parent, tx_buf->dma,
2799 					 tx_buf->size, DMA_TO_DEVICE);
2800 		if (tx_buf->type == MVPP2_TYPE_SKB && tx_buf->skb)
2801 			dev_kfree_skb_any(tx_buf->skb);
2802 		else if (tx_buf->type == MVPP2_TYPE_XDP_TX ||
2803 			 tx_buf->type == MVPP2_TYPE_XDP_NDO)
2804 			xdp_return_frame_bulk(tx_buf->xdpf, &bq);
2805 
2806 		mvpp2_txq_inc_get(txq_pcpu);
2807 	}
2808 	xdp_flush_frame_bulk(&bq);
2809 
2810 	rcu_read_unlock();
2811 }
2812 
2813 static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
2814 							u32 cause)
2815 {
2816 	int queue = fls(cause) - 1;
2817 
2818 	return port->rxqs[queue];
2819 }
2820 
2821 static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
2822 							u32 cause)
2823 {
2824 	int queue = fls(cause) - 1;
2825 
2826 	return port->txqs[queue];
2827 }
2828 
2829 /* Handle end of transmission */
2830 static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
2831 			   struct mvpp2_txq_pcpu *txq_pcpu)
2832 {
2833 	struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
2834 	int tx_done;
2835 
2836 	if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
2837 		netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
2838 
2839 	tx_done = mvpp2_txq_sent_desc_proc(port, txq);
2840 	if (!tx_done)
2841 		return;
2842 	mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
2843 
2844 	txq_pcpu->count -= tx_done;
2845 
2846 	if (netif_tx_queue_stopped(nq))
2847 		if (txq_pcpu->count <= txq_pcpu->wake_threshold)
2848 			netif_tx_wake_queue(nq);
2849 }
2850 
2851 static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause,
2852 				  unsigned int thread)
2853 {
2854 	struct mvpp2_tx_queue *txq;
2855 	struct mvpp2_txq_pcpu *txq_pcpu;
2856 	unsigned int tx_todo = 0;
2857 
2858 	while (cause) {
2859 		txq = mvpp2_get_tx_queue(port, cause);
2860 		if (!txq)
2861 			break;
2862 
2863 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
2864 
2865 		if (txq_pcpu->count) {
2866 			mvpp2_txq_done(port, txq, txq_pcpu);
2867 			tx_todo += txq_pcpu->count;
2868 		}
2869 
2870 		cause &= ~(1 << txq->log_id);
2871 	}
2872 	return tx_todo;
2873 }
2874 
2875 /* Rx/Tx queue initialization/cleanup methods */
2876 
2877 /* Allocate and initialize descriptors for aggr TXQ */
2878 static int mvpp2_aggr_txq_init(struct platform_device *pdev,
2879 			       struct mvpp2_tx_queue *aggr_txq,
2880 			       unsigned int thread, struct mvpp2 *priv)
2881 {
2882 	u32 txq_dma;
2883 
2884 	/* Allocate memory for TX descriptors */
2885 	aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
2886 					     MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
2887 					     &aggr_txq->descs_dma, GFP_KERNEL);
2888 	if (!aggr_txq->descs)
2889 		return -ENOMEM;
2890 
2891 	aggr_txq->last_desc = MVPP2_AGGR_TXQ_SIZE - 1;
2892 
2893 	/* Aggr TXQ no reset WA */
2894 	aggr_txq->next_desc_to_proc = mvpp2_read(priv,
2895 						 MVPP2_AGGR_TXQ_INDEX_REG(thread));
2896 
2897 	/* Set Tx descriptors queue starting address indirect
2898 	 * access
2899 	 */
2900 	if (priv->hw_version == MVPP21)
2901 		txq_dma = aggr_txq->descs_dma;
2902 	else
2903 		txq_dma = aggr_txq->descs_dma >>
2904 			MVPP22_AGGR_TXQ_DESC_ADDR_OFFS;
2905 
2906 	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(thread), txq_dma);
2907 	mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(thread),
2908 		    MVPP2_AGGR_TXQ_SIZE);
2909 
2910 	return 0;
2911 }
2912 
2913 /* Create a specified Rx queue */
2914 static int mvpp2_rxq_init(struct mvpp2_port *port,
2915 			  struct mvpp2_rx_queue *rxq)
2916 {
2917 	struct mvpp2 *priv = port->priv;
2918 	unsigned int thread;
2919 	u32 rxq_dma;
2920 	int err;
2921 
2922 	rxq->size = port->rx_ring_size;
2923 
2924 	/* Allocate memory for RX descriptors */
2925 	rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
2926 					rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2927 					&rxq->descs_dma, GFP_KERNEL);
2928 	if (!rxq->descs)
2929 		return -ENOMEM;
2930 
2931 	rxq->last_desc = rxq->size - 1;
2932 
2933 	/* Zero occupied and non-occupied counters - direct access */
2934 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
2935 
2936 	/* Set Rx descriptors queue starting address - indirect access */
2937 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
2938 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
2939 	if (port->priv->hw_version == MVPP21)
2940 		rxq_dma = rxq->descs_dma;
2941 	else
2942 		rxq_dma = rxq->descs_dma >> MVPP22_DESC_ADDR_OFFS;
2943 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, rxq_dma);
2944 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
2945 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_INDEX_REG, 0);
2946 	put_cpu();
2947 
2948 	/* Set Offset */
2949 	mvpp2_rxq_offset_set(port, rxq->id, MVPP2_SKB_HEADROOM);
2950 
2951 	/* Set coalescing pkts and time */
2952 	mvpp2_rx_pkts_coal_set(port, rxq);
2953 	mvpp2_rx_time_coal_set(port, rxq);
2954 
2955 	/* Set the number of non occupied descriptors threshold */
2956 	mvpp2_set_rxq_free_tresh(port, rxq);
2957 
2958 	/* Add number of descriptors ready for receiving packets */
2959 	mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
2960 
2961 	if (priv->percpu_pools) {
2962 		err = xdp_rxq_info_reg(&rxq->xdp_rxq_short, port->dev, rxq->id, 0);
2963 		if (err < 0)
2964 			goto err_free_dma;
2965 
2966 		err = xdp_rxq_info_reg(&rxq->xdp_rxq_long, port->dev, rxq->id, 0);
2967 		if (err < 0)
2968 			goto err_unregister_rxq_short;
2969 
2970 		/* Every RXQ has a pool for short and another for long packets */
2971 		err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_short,
2972 						 MEM_TYPE_PAGE_POOL,
2973 						 priv->page_pool[rxq->logic_rxq]);
2974 		if (err < 0)
2975 			goto err_unregister_rxq_long;
2976 
2977 		err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq_long,
2978 						 MEM_TYPE_PAGE_POOL,
2979 						 priv->page_pool[rxq->logic_rxq +
2980 								 port->nrxqs]);
2981 		if (err < 0)
2982 			goto err_unregister_mem_rxq_short;
2983 	}
2984 
2985 	return 0;
2986 
2987 err_unregister_mem_rxq_short:
2988 	xdp_rxq_info_unreg_mem_model(&rxq->xdp_rxq_short);
2989 err_unregister_rxq_long:
2990 	xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
2991 err_unregister_rxq_short:
2992 	xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
2993 err_free_dma:
2994 	dma_free_coherent(port->dev->dev.parent,
2995 			  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
2996 			  rxq->descs, rxq->descs_dma);
2997 	return err;
2998 }
2999 
3000 /* Push packets received by the RXQ to BM pool */
3001 static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
3002 				struct mvpp2_rx_queue *rxq)
3003 {
3004 	int rx_received, i;
3005 
3006 	rx_received = mvpp2_rxq_received(port, rxq->id);
3007 	if (!rx_received)
3008 		return;
3009 
3010 	for (i = 0; i < rx_received; i++) {
3011 		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3012 		u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3013 		int pool;
3014 
3015 		pool = (status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3016 			MVPP2_RXD_BM_POOL_ID_OFFS;
3017 
3018 		mvpp2_bm_pool_put(port, pool,
3019 				  mvpp2_rxdesc_dma_addr_get(port, rx_desc),
3020 				  mvpp2_rxdesc_cookie_get(port, rx_desc));
3021 	}
3022 	mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
3023 }
3024 
3025 /* Cleanup Rx queue */
3026 static void mvpp2_rxq_deinit(struct mvpp2_port *port,
3027 			     struct mvpp2_rx_queue *rxq)
3028 {
3029 	unsigned int thread;
3030 
3031 	if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_short))
3032 		xdp_rxq_info_unreg(&rxq->xdp_rxq_short);
3033 
3034 	if (xdp_rxq_info_is_reg(&rxq->xdp_rxq_long))
3035 		xdp_rxq_info_unreg(&rxq->xdp_rxq_long);
3036 
3037 	mvpp2_rxq_drop_pkts(port, rxq);
3038 
3039 	if (rxq->descs)
3040 		dma_free_coherent(port->dev->dev.parent,
3041 				  rxq->size * MVPP2_DESC_ALIGNED_SIZE,
3042 				  rxq->descs,
3043 				  rxq->descs_dma);
3044 
3045 	rxq->descs             = NULL;
3046 	rxq->last_desc         = 0;
3047 	rxq->next_desc_to_proc = 0;
3048 	rxq->descs_dma         = 0;
3049 
3050 	/* Clear Rx descriptors queue starting address and size;
3051 	 * free descriptor number
3052 	 */
3053 	mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
3054 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3055 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
3056 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
3057 	mvpp2_thread_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
3058 	put_cpu();
3059 }
3060 
3061 /* Create and initialize a Tx queue */
3062 static int mvpp2_txq_init(struct mvpp2_port *port,
3063 			  struct mvpp2_tx_queue *txq)
3064 {
3065 	u32 val;
3066 	unsigned int thread;
3067 	int desc, desc_per_txq, tx_port_num;
3068 	struct mvpp2_txq_pcpu *txq_pcpu;
3069 
3070 	txq->size = port->tx_ring_size;
3071 
3072 	/* Allocate memory for Tx descriptors */
3073 	txq->descs = dma_alloc_coherent(port->dev->dev.parent,
3074 				txq->size * MVPP2_DESC_ALIGNED_SIZE,
3075 				&txq->descs_dma, GFP_KERNEL);
3076 	if (!txq->descs)
3077 		return -ENOMEM;
3078 
3079 	txq->last_desc = txq->size - 1;
3080 
3081 	/* Set Tx descriptors queue starting address - indirect access */
3082 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3083 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3084 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
3085 			   txq->descs_dma);
3086 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG,
3087 			   txq->size & MVPP2_TXQ_DESC_SIZE_MASK);
3088 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_INDEX_REG, 0);
3089 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_RSVD_CLR_REG,
3090 			   txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
3091 	val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PENDING_REG);
3092 	val &= ~MVPP2_TXQ_PENDING_MASK;
3093 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PENDING_REG, val);
3094 
3095 	/* Calculate base address in prefetch buffer. We reserve 16 descriptors
3096 	 * for each existing TXQ.
3097 	 * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
3098 	 * GBE ports assumed to be continuous from 0 to MVPP2_MAX_PORTS
3099 	 */
3100 	desc_per_txq = 16;
3101 	desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
3102 	       (txq->log_id * desc_per_txq);
3103 
3104 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG,
3105 			   MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
3106 			   MVPP2_PREF_BUF_THRESH(desc_per_txq / 2));
3107 	put_cpu();
3108 
3109 	/* WRR / EJP configuration - indirect access */
3110 	tx_port_num = mvpp2_egress_port(port);
3111 	mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
3112 
3113 	val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
3114 	val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
3115 	val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
3116 	val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
3117 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
3118 
3119 	val = MVPP2_TXQ_TOKEN_SIZE_MAX;
3120 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
3121 		    val);
3122 
3123 	for (thread = 0; thread < port->priv->nthreads; thread++) {
3124 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3125 		txq_pcpu->size = txq->size;
3126 		txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
3127 						sizeof(*txq_pcpu->buffs),
3128 						GFP_KERNEL);
3129 		if (!txq_pcpu->buffs)
3130 			return -ENOMEM;
3131 
3132 		txq_pcpu->count = 0;
3133 		txq_pcpu->reserved_num = 0;
3134 		txq_pcpu->txq_put_index = 0;
3135 		txq_pcpu->txq_get_index = 0;
3136 		txq_pcpu->tso_headers = NULL;
3137 
3138 		txq_pcpu->stop_threshold = txq->size - MVPP2_MAX_SKB_DESCS;
3139 		txq_pcpu->wake_threshold = txq_pcpu->stop_threshold / 2;
3140 
3141 		txq_pcpu->tso_headers =
3142 			dma_alloc_coherent(port->dev->dev.parent,
3143 					   txq_pcpu->size * TSO_HEADER_SIZE,
3144 					   &txq_pcpu->tso_headers_dma,
3145 					   GFP_KERNEL);
3146 		if (!txq_pcpu->tso_headers)
3147 			return -ENOMEM;
3148 	}
3149 
3150 	return 0;
3151 }
3152 
3153 /* Free allocated TXQ resources */
3154 static void mvpp2_txq_deinit(struct mvpp2_port *port,
3155 			     struct mvpp2_tx_queue *txq)
3156 {
3157 	struct mvpp2_txq_pcpu *txq_pcpu;
3158 	unsigned int thread;
3159 
3160 	for (thread = 0; thread < port->priv->nthreads; thread++) {
3161 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3162 		kfree(txq_pcpu->buffs);
3163 
3164 		if (txq_pcpu->tso_headers)
3165 			dma_free_coherent(port->dev->dev.parent,
3166 					  txq_pcpu->size * TSO_HEADER_SIZE,
3167 					  txq_pcpu->tso_headers,
3168 					  txq_pcpu->tso_headers_dma);
3169 
3170 		txq_pcpu->tso_headers = NULL;
3171 	}
3172 
3173 	if (txq->descs)
3174 		dma_free_coherent(port->dev->dev.parent,
3175 				  txq->size * MVPP2_DESC_ALIGNED_SIZE,
3176 				  txq->descs, txq->descs_dma);
3177 
3178 	txq->descs             = NULL;
3179 	txq->last_desc         = 0;
3180 	txq->next_desc_to_proc = 0;
3181 	txq->descs_dma         = 0;
3182 
3183 	/* Set minimum bandwidth for disabled TXQs */
3184 	mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->log_id), 0);
3185 
3186 	/* Set Tx descriptors queue starting address and size */
3187 	thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3188 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3189 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
3190 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
3191 	put_cpu();
3192 }
3193 
3194 /* Cleanup Tx ports */
3195 static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
3196 {
3197 	struct mvpp2_txq_pcpu *txq_pcpu;
3198 	int delay, pending;
3199 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
3200 	u32 val;
3201 
3202 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
3203 	val = mvpp2_thread_read(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG);
3204 	val |= MVPP2_TXQ_DRAIN_EN_MASK;
3205 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3206 
3207 	/* The napi queue has been stopped so wait for all packets
3208 	 * to be transmitted.
3209 	 */
3210 	delay = 0;
3211 	do {
3212 		if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
3213 			netdev_warn(port->dev,
3214 				    "port %d: cleaning queue %d timed out\n",
3215 				    port->id, txq->log_id);
3216 			break;
3217 		}
3218 		mdelay(1);
3219 		delay++;
3220 
3221 		pending = mvpp2_thread_read(port->priv, thread,
3222 					    MVPP2_TXQ_PENDING_REG);
3223 		pending &= MVPP2_TXQ_PENDING_MASK;
3224 	} while (pending);
3225 
3226 	val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
3227 	mvpp2_thread_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
3228 	put_cpu();
3229 
3230 	for (thread = 0; thread < port->priv->nthreads; thread++) {
3231 		txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3232 
3233 		/* Release all packets */
3234 		mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
3235 
3236 		/* Reset queue */
3237 		txq_pcpu->count = 0;
3238 		txq_pcpu->txq_put_index = 0;
3239 		txq_pcpu->txq_get_index = 0;
3240 	}
3241 }
3242 
3243 /* Cleanup all Tx queues */
3244 static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
3245 {
3246 	struct mvpp2_tx_queue *txq;
3247 	int queue;
3248 	u32 val;
3249 
3250 	val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
3251 
3252 	/* Reset Tx ports and delete Tx queues */
3253 	val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
3254 	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3255 
3256 	for (queue = 0; queue < port->ntxqs; queue++) {
3257 		txq = port->txqs[queue];
3258 		mvpp2_txq_clean(port, txq);
3259 		mvpp2_txq_deinit(port, txq);
3260 	}
3261 
3262 	on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3263 
3264 	val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
3265 	mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
3266 }
3267 
3268 /* Cleanup all Rx queues */
3269 static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
3270 {
3271 	int queue;
3272 
3273 	for (queue = 0; queue < port->nrxqs; queue++)
3274 		mvpp2_rxq_deinit(port, port->rxqs[queue]);
3275 
3276 	if (port->tx_fc)
3277 		mvpp2_rxq_disable_fc(port);
3278 }
3279 
3280 /* Init all Rx queues for port */
3281 static int mvpp2_setup_rxqs(struct mvpp2_port *port)
3282 {
3283 	int queue, err;
3284 
3285 	for (queue = 0; queue < port->nrxqs; queue++) {
3286 		err = mvpp2_rxq_init(port, port->rxqs[queue]);
3287 		if (err)
3288 			goto err_cleanup;
3289 	}
3290 
3291 	if (port->tx_fc)
3292 		mvpp2_rxq_enable_fc(port);
3293 
3294 	return 0;
3295 
3296 err_cleanup:
3297 	mvpp2_cleanup_rxqs(port);
3298 	return err;
3299 }
3300 
3301 /* Init all tx queues for port */
3302 static int mvpp2_setup_txqs(struct mvpp2_port *port)
3303 {
3304 	struct mvpp2_tx_queue *txq;
3305 	int queue, err;
3306 
3307 	for (queue = 0; queue < port->ntxqs; queue++) {
3308 		txq = port->txqs[queue];
3309 		err = mvpp2_txq_init(port, txq);
3310 		if (err)
3311 			goto err_cleanup;
3312 
3313 		/* Assign this queue to a CPU */
3314 		if (queue < num_possible_cpus())
3315 			netif_set_xps_queue(port->dev, cpumask_of(queue), queue);
3316 	}
3317 
3318 	if (port->has_tx_irqs) {
3319 		mvpp2_tx_time_coal_set(port);
3320 		for (queue = 0; queue < port->ntxqs; queue++) {
3321 			txq = port->txqs[queue];
3322 			mvpp2_tx_pkts_coal_set(port, txq);
3323 		}
3324 	}
3325 
3326 	on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
3327 	return 0;
3328 
3329 err_cleanup:
3330 	mvpp2_cleanup_txqs(port);
3331 	return err;
3332 }
3333 
3334 /* The callback for per-port interrupt */
3335 static irqreturn_t mvpp2_isr(int irq, void *dev_id)
3336 {
3337 	struct mvpp2_queue_vector *qv = dev_id;
3338 
3339 	mvpp2_qvec_interrupt_disable(qv);
3340 
3341 	napi_schedule(&qv->napi);
3342 
3343 	return IRQ_HANDLED;
3344 }
3345 
3346 static void mvpp2_isr_handle_ptp_queue(struct mvpp2_port *port, int nq)
3347 {
3348 	struct skb_shared_hwtstamps shhwtstamps;
3349 	struct mvpp2_hwtstamp_queue *queue;
3350 	struct sk_buff *skb;
3351 	void __iomem *ptp_q;
3352 	unsigned int id;
3353 	u32 r0, r1, r2;
3354 
3355 	ptp_q = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3356 	if (nq)
3357 		ptp_q += MVPP22_PTP_TX_Q1_R0 - MVPP22_PTP_TX_Q0_R0;
3358 
3359 	queue = &port->tx_hwtstamp_queue[nq];
3360 
3361 	while (1) {
3362 		r0 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R0) & 0xffff;
3363 		if (!r0)
3364 			break;
3365 
3366 		r1 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R1) & 0xffff;
3367 		r2 = readl_relaxed(ptp_q + MVPP22_PTP_TX_Q0_R2) & 0xffff;
3368 
3369 		id = (r0 >> 1) & 31;
3370 
3371 		skb = queue->skb[id];
3372 		queue->skb[id] = NULL;
3373 		if (skb) {
3374 			u32 ts = r2 << 19 | r1 << 3 | r0 >> 13;
3375 
3376 			mvpp22_tai_tstamp(port->priv->tai, ts, &shhwtstamps);
3377 			skb_tstamp_tx(skb, &shhwtstamps);
3378 			dev_kfree_skb_any(skb);
3379 		}
3380 	}
3381 }
3382 
3383 static void mvpp2_isr_handle_ptp(struct mvpp2_port *port)
3384 {
3385 	void __iomem *ptp;
3386 	u32 val;
3387 
3388 	ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
3389 	val = readl(ptp + MVPP22_PTP_INT_CAUSE);
3390 	if (val & MVPP22_PTP_INT_CAUSE_QUEUE0)
3391 		mvpp2_isr_handle_ptp_queue(port, 0);
3392 	if (val & MVPP22_PTP_INT_CAUSE_QUEUE1)
3393 		mvpp2_isr_handle_ptp_queue(port, 1);
3394 }
3395 
3396 static void mvpp2_isr_handle_link(struct mvpp2_port *port, bool link)
3397 {
3398 	struct net_device *dev = port->dev;
3399 
3400 	if (port->phylink) {
3401 		phylink_mac_change(port->phylink, link);
3402 		return;
3403 	}
3404 
3405 	if (!netif_running(dev))
3406 		return;
3407 
3408 	if (link) {
3409 		mvpp2_interrupts_enable(port);
3410 
3411 		mvpp2_egress_enable(port);
3412 		mvpp2_ingress_enable(port);
3413 		netif_carrier_on(dev);
3414 		netif_tx_wake_all_queues(dev);
3415 	} else {
3416 		netif_tx_stop_all_queues(dev);
3417 		netif_carrier_off(dev);
3418 		mvpp2_ingress_disable(port);
3419 		mvpp2_egress_disable(port);
3420 
3421 		mvpp2_interrupts_disable(port);
3422 	}
3423 }
3424 
3425 static void mvpp2_isr_handle_xlg(struct mvpp2_port *port)
3426 {
3427 	bool link;
3428 	u32 val;
3429 
3430 	val = readl(port->base + MVPP22_XLG_INT_STAT);
3431 	if (val & MVPP22_XLG_INT_STAT_LINK) {
3432 		val = readl(port->base + MVPP22_XLG_STATUS);
3433 		link = (val & MVPP22_XLG_STATUS_LINK_UP);
3434 		mvpp2_isr_handle_link(port, link);
3435 	}
3436 }
3437 
3438 static void mvpp2_isr_handle_gmac_internal(struct mvpp2_port *port)
3439 {
3440 	bool link;
3441 	u32 val;
3442 
3443 	if (phy_interface_mode_is_rgmii(port->phy_interface) ||
3444 	    phy_interface_mode_is_8023z(port->phy_interface) ||
3445 	    port->phy_interface == PHY_INTERFACE_MODE_SGMII) {
3446 		val = readl(port->base + MVPP22_GMAC_INT_STAT);
3447 		if (val & MVPP22_GMAC_INT_STAT_LINK) {
3448 			val = readl(port->base + MVPP2_GMAC_STATUS0);
3449 			link = (val & MVPP2_GMAC_STATUS0_LINK_UP);
3450 			mvpp2_isr_handle_link(port, link);
3451 		}
3452 	}
3453 }
3454 
3455 /* Per-port interrupt for link status changes */
3456 static irqreturn_t mvpp2_port_isr(int irq, void *dev_id)
3457 {
3458 	struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
3459 	u32 val;
3460 
3461 	mvpp22_gop_mask_irq(port);
3462 
3463 	if (mvpp2_port_supports_xlg(port) &&
3464 	    mvpp2_is_xlg(port->phy_interface)) {
3465 		/* Check the external status register */
3466 		val = readl(port->base + MVPP22_XLG_EXT_INT_STAT);
3467 		if (val & MVPP22_XLG_EXT_INT_STAT_XLG)
3468 			mvpp2_isr_handle_xlg(port);
3469 		if (val & MVPP22_XLG_EXT_INT_STAT_PTP)
3470 			mvpp2_isr_handle_ptp(port);
3471 	} else {
3472 		/* If it's not the XLG, we must be using the GMAC.
3473 		 * Check the summary status.
3474 		 */
3475 		val = readl(port->base + MVPP22_GMAC_INT_SUM_STAT);
3476 		if (val & MVPP22_GMAC_INT_SUM_STAT_INTERNAL)
3477 			mvpp2_isr_handle_gmac_internal(port);
3478 		if (val & MVPP22_GMAC_INT_SUM_STAT_PTP)
3479 			mvpp2_isr_handle_ptp(port);
3480 	}
3481 
3482 	mvpp22_gop_unmask_irq(port);
3483 	return IRQ_HANDLED;
3484 }
3485 
3486 static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
3487 {
3488 	struct net_device *dev;
3489 	struct mvpp2_port *port;
3490 	struct mvpp2_port_pcpu *port_pcpu;
3491 	unsigned int tx_todo, cause;
3492 
3493 	port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
3494 	dev = port_pcpu->dev;
3495 
3496 	if (!netif_running(dev))
3497 		return HRTIMER_NORESTART;
3498 
3499 	port_pcpu->timer_scheduled = false;
3500 	port = netdev_priv(dev);
3501 
3502 	/* Process all the Tx queues */
3503 	cause = (1 << port->ntxqs) - 1;
3504 	tx_todo = mvpp2_tx_done(port, cause,
3505 				mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
3506 
3507 	/* Set the timer in case not all the packets were processed */
3508 	if (tx_todo && !port_pcpu->timer_scheduled) {
3509 		port_pcpu->timer_scheduled = true;
3510 		hrtimer_forward_now(&port_pcpu->tx_done_timer,
3511 				    MVPP2_TXDONE_HRTIMER_PERIOD_NS);
3512 
3513 		return HRTIMER_RESTART;
3514 	}
3515 	return HRTIMER_NORESTART;
3516 }
3517 
3518 /* Main RX/TX processing routines */
3519 
3520 /* Display more error info */
3521 static void mvpp2_rx_error(struct mvpp2_port *port,
3522 			   struct mvpp2_rx_desc *rx_desc)
3523 {
3524 	u32 status = mvpp2_rxdesc_status_get(port, rx_desc);
3525 	size_t sz = mvpp2_rxdesc_size_get(port, rx_desc);
3526 	char *err_str = NULL;
3527 
3528 	switch (status & MVPP2_RXD_ERR_CODE_MASK) {
3529 	case MVPP2_RXD_ERR_CRC:
3530 		err_str = "crc";
3531 		break;
3532 	case MVPP2_RXD_ERR_OVERRUN:
3533 		err_str = "overrun";
3534 		break;
3535 	case MVPP2_RXD_ERR_RESOURCE:
3536 		err_str = "resource";
3537 		break;
3538 	}
3539 	if (err_str && net_ratelimit())
3540 		netdev_err(port->dev,
3541 			   "bad rx status %08x (%s error), size=%zu\n",
3542 			   status, err_str, sz);
3543 }
3544 
3545 /* Handle RX checksum offload */
3546 static int mvpp2_rx_csum(struct mvpp2_port *port, u32 status)
3547 {
3548 	if (((status & MVPP2_RXD_L3_IP4) &&
3549 	     !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
3550 	    (status & MVPP2_RXD_L3_IP6))
3551 		if (((status & MVPP2_RXD_L4_UDP) ||
3552 		     (status & MVPP2_RXD_L4_TCP)) &&
3553 		     (status & MVPP2_RXD_L4_CSUM_OK))
3554 			return CHECKSUM_UNNECESSARY;
3555 
3556 	return CHECKSUM_NONE;
3557 }
3558 
3559 /* Allocate a new skb and add it to BM pool */
3560 static int mvpp2_rx_refill(struct mvpp2_port *port,
3561 			   struct mvpp2_bm_pool *bm_pool,
3562 			   struct page_pool *page_pool, int pool)
3563 {
3564 	dma_addr_t dma_addr;
3565 	phys_addr_t phys_addr;
3566 	void *buf;
3567 
3568 	buf = mvpp2_buf_alloc(port, bm_pool, page_pool,
3569 			      &dma_addr, &phys_addr, GFP_ATOMIC);
3570 	if (!buf)
3571 		return -ENOMEM;
3572 
3573 	mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3574 
3575 	return 0;
3576 }
3577 
3578 /* Handle tx checksum */
3579 static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
3580 {
3581 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
3582 		int ip_hdr_len = 0;
3583 		u8 l4_proto;
3584 		__be16 l3_proto = vlan_get_protocol(skb);
3585 
3586 		if (l3_proto == htons(ETH_P_IP)) {
3587 			struct iphdr *ip4h = ip_hdr(skb);
3588 
3589 			/* Calculate IPv4 checksum and L4 checksum */
3590 			ip_hdr_len = ip4h->ihl;
3591 			l4_proto = ip4h->protocol;
3592 		} else if (l3_proto == htons(ETH_P_IPV6)) {
3593 			struct ipv6hdr *ip6h = ipv6_hdr(skb);
3594 
3595 			/* Read l4_protocol from one of IPv6 extra headers */
3596 			if (skb_network_header_len(skb) > 0)
3597 				ip_hdr_len = (skb_network_header_len(skb) >> 2);
3598 			l4_proto = ip6h->nexthdr;
3599 		} else {
3600 			return MVPP2_TXD_L4_CSUM_NOT;
3601 		}
3602 
3603 		return mvpp2_txq_desc_csum(skb_network_offset(skb),
3604 					   l3_proto, ip_hdr_len, l4_proto);
3605 	}
3606 
3607 	return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
3608 }
3609 
3610 static void mvpp2_xdp_finish_tx(struct mvpp2_port *port, u16 txq_id, int nxmit, int nxmit_byte)
3611 {
3612 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3613 	struct mvpp2_tx_queue *aggr_txq;
3614 	struct mvpp2_txq_pcpu *txq_pcpu;
3615 	struct mvpp2_tx_queue *txq;
3616 	struct netdev_queue *nq;
3617 
3618 	txq = port->txqs[txq_id];
3619 	txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3620 	nq = netdev_get_tx_queue(port->dev, txq_id);
3621 	aggr_txq = &port->priv->aggr_txqs[thread];
3622 
3623 	txq_pcpu->reserved_num -= nxmit;
3624 	txq_pcpu->count += nxmit;
3625 	aggr_txq->count += nxmit;
3626 
3627 	/* Enable transmit */
3628 	wmb();
3629 	mvpp2_aggr_txq_pend_desc_add(port, nxmit);
3630 
3631 	if (txq_pcpu->count >= txq_pcpu->stop_threshold)
3632 		netif_tx_stop_queue(nq);
3633 
3634 	/* Finalize TX processing */
3635 	if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
3636 		mvpp2_txq_done(port, txq, txq_pcpu);
3637 }
3638 
3639 static int
3640 mvpp2_xdp_submit_frame(struct mvpp2_port *port, u16 txq_id,
3641 		       struct xdp_frame *xdpf, bool dma_map)
3642 {
3643 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
3644 	u32 tx_cmd = MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE |
3645 		     MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
3646 	enum mvpp2_tx_buf_type buf_type;
3647 	struct mvpp2_txq_pcpu *txq_pcpu;
3648 	struct mvpp2_tx_queue *aggr_txq;
3649 	struct mvpp2_tx_desc *tx_desc;
3650 	struct mvpp2_tx_queue *txq;
3651 	int ret = MVPP2_XDP_TX;
3652 	dma_addr_t dma_addr;
3653 
3654 	txq = port->txqs[txq_id];
3655 	txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
3656 	aggr_txq = &port->priv->aggr_txqs[thread];
3657 
3658 	/* Check number of available descriptors */
3659 	if (mvpp2_aggr_desc_num_check(port, aggr_txq, 1) ||
3660 	    mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, 1)) {
3661 		ret = MVPP2_XDP_DROPPED;
3662 		goto out;
3663 	}
3664 
3665 	/* Get a descriptor for the first part of the packet */
3666 	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
3667 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
3668 	mvpp2_txdesc_size_set(port, tx_desc, xdpf->len);
3669 
3670 	if (dma_map) {
3671 		/* XDP_REDIRECT or AF_XDP */
3672 		dma_addr = dma_map_single(port->dev->dev.parent, xdpf->data,
3673 					  xdpf->len, DMA_TO_DEVICE);
3674 
3675 		if (unlikely(dma_mapping_error(port->dev->dev.parent, dma_addr))) {
3676 			mvpp2_txq_desc_put(txq);
3677 			ret = MVPP2_XDP_DROPPED;
3678 			goto out;
3679 		}
3680 
3681 		buf_type = MVPP2_TYPE_XDP_NDO;
3682 	} else {
3683 		/* XDP_TX */
3684 		struct page *page = virt_to_page(xdpf->data);
3685 
3686 		dma_addr = page_pool_get_dma_addr(page) +
3687 			   sizeof(*xdpf) + xdpf->headroom;
3688 		dma_sync_single_for_device(port->dev->dev.parent, dma_addr,
3689 					   xdpf->len, DMA_BIDIRECTIONAL);
3690 
3691 		buf_type = MVPP2_TYPE_XDP_TX;
3692 	}
3693 
3694 	mvpp2_txdesc_dma_addr_set(port, tx_desc, dma_addr);
3695 
3696 	mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
3697 	mvpp2_txq_inc_put(port, txq_pcpu, xdpf, tx_desc, buf_type);
3698 
3699 out:
3700 	return ret;
3701 }
3702 
3703 static int
3704 mvpp2_xdp_xmit_back(struct mvpp2_port *port, struct xdp_buff *xdp)
3705 {
3706 	struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
3707 	struct xdp_frame *xdpf;
3708 	u16 txq_id;
3709 	int ret;
3710 
3711 	xdpf = xdp_convert_buff_to_frame(xdp);
3712 	if (unlikely(!xdpf))
3713 		return MVPP2_XDP_DROPPED;
3714 
3715 	/* The first of the TX queues are used for XPS,
3716 	 * the second half for XDP_TX
3717 	 */
3718 	txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3719 
3720 	ret = mvpp2_xdp_submit_frame(port, txq_id, xdpf, false);
3721 	if (ret == MVPP2_XDP_TX) {
3722 		u64_stats_update_begin(&stats->syncp);
3723 		stats->tx_bytes += xdpf->len;
3724 		stats->tx_packets++;
3725 		stats->xdp_tx++;
3726 		u64_stats_update_end(&stats->syncp);
3727 
3728 		mvpp2_xdp_finish_tx(port, txq_id, 1, xdpf->len);
3729 	} else {
3730 		u64_stats_update_begin(&stats->syncp);
3731 		stats->xdp_tx_err++;
3732 		u64_stats_update_end(&stats->syncp);
3733 	}
3734 
3735 	return ret;
3736 }
3737 
3738 static int
3739 mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
3740 	       struct xdp_frame **frames, u32 flags)
3741 {
3742 	struct mvpp2_port *port = netdev_priv(dev);
3743 	int i, nxmit_byte = 0, nxmit = 0;
3744 	struct mvpp2_pcpu_stats *stats;
3745 	u16 txq_id;
3746 	u32 ret;
3747 
3748 	if (unlikely(test_bit(0, &port->state)))
3749 		return -ENETDOWN;
3750 
3751 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
3752 		return -EINVAL;
3753 
3754 	/* The first of the TX queues are used for XPS,
3755 	 * the second half for XDP_TX
3756 	 */
3757 	txq_id = mvpp2_cpu_to_thread(port->priv, smp_processor_id()) + (port->ntxqs / 2);
3758 
3759 	for (i = 0; i < num_frame; i++) {
3760 		ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
3761 		if (ret != MVPP2_XDP_TX)
3762 			break;
3763 
3764 		nxmit_byte += frames[i]->len;
3765 		nxmit++;
3766 	}
3767 
3768 	if (likely(nxmit > 0))
3769 		mvpp2_xdp_finish_tx(port, txq_id, nxmit, nxmit_byte);
3770 
3771 	stats = this_cpu_ptr(port->stats);
3772 	u64_stats_update_begin(&stats->syncp);
3773 	stats->tx_bytes += nxmit_byte;
3774 	stats->tx_packets += nxmit;
3775 	stats->xdp_xmit += nxmit;
3776 	stats->xdp_xmit_err += num_frame - nxmit;
3777 	u64_stats_update_end(&stats->syncp);
3778 
3779 	return nxmit;
3780 }
3781 
3782 static int
3783 mvpp2_run_xdp(struct mvpp2_port *port, struct bpf_prog *prog,
3784 	      struct xdp_buff *xdp, struct page_pool *pp,
3785 	      struct mvpp2_pcpu_stats *stats)
3786 {
3787 	unsigned int len, sync, err;
3788 	struct page *page;
3789 	u32 ret, act;
3790 
3791 	len = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3792 	act = bpf_prog_run_xdp(prog, xdp);
3793 
3794 	/* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */
3795 	sync = xdp->data_end - xdp->data_hard_start - MVPP2_SKB_HEADROOM;
3796 	sync = max(sync, len);
3797 
3798 	switch (act) {
3799 	case XDP_PASS:
3800 		stats->xdp_pass++;
3801 		ret = MVPP2_XDP_PASS;
3802 		break;
3803 	case XDP_REDIRECT:
3804 		err = xdp_do_redirect(port->dev, xdp, prog);
3805 		if (unlikely(err)) {
3806 			ret = MVPP2_XDP_DROPPED;
3807 			page = virt_to_head_page(xdp->data);
3808 			page_pool_put_page(pp, page, sync, true);
3809 		} else {
3810 			ret = MVPP2_XDP_REDIR;
3811 			stats->xdp_redirect++;
3812 		}
3813 		break;
3814 	case XDP_TX:
3815 		ret = mvpp2_xdp_xmit_back(port, xdp);
3816 		if (ret != MVPP2_XDP_TX) {
3817 			page = virt_to_head_page(xdp->data);
3818 			page_pool_put_page(pp, page, sync, true);
3819 		}
3820 		break;
3821 	default:
3822 		bpf_warn_invalid_xdp_action(act);
3823 		fallthrough;
3824 	case XDP_ABORTED:
3825 		trace_xdp_exception(port->dev, prog, act);
3826 		fallthrough;
3827 	case XDP_DROP:
3828 		page = virt_to_head_page(xdp->data);
3829 		page_pool_put_page(pp, page, sync, true);
3830 		ret = MVPP2_XDP_DROPPED;
3831 		stats->xdp_drop++;
3832 		break;
3833 	}
3834 
3835 	return ret;
3836 }
3837 
3838 static void mvpp2_buff_hdr_pool_put(struct mvpp2_port *port, struct mvpp2_rx_desc *rx_desc,
3839 				    int pool, u32 rx_status)
3840 {
3841 	phys_addr_t phys_addr, phys_addr_next;
3842 	dma_addr_t dma_addr, dma_addr_next;
3843 	struct mvpp2_buff_hdr *buff_hdr;
3844 
3845 	phys_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3846 	dma_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3847 
3848 	do {
3849 		buff_hdr = (struct mvpp2_buff_hdr *)phys_to_virt(phys_addr);
3850 
3851 		phys_addr_next = le32_to_cpu(buff_hdr->next_phys_addr);
3852 		dma_addr_next = le32_to_cpu(buff_hdr->next_dma_addr);
3853 
3854 		if (port->priv->hw_version >= MVPP22) {
3855 			phys_addr_next |= ((u64)buff_hdr->next_phys_addr_high << 32);
3856 			dma_addr_next |= ((u64)buff_hdr->next_dma_addr_high << 32);
3857 		}
3858 
3859 		mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
3860 
3861 		phys_addr = phys_addr_next;
3862 		dma_addr = dma_addr_next;
3863 
3864 	} while (!MVPP2_B_HDR_INFO_IS_LAST(le16_to_cpu(buff_hdr->info)));
3865 }
3866 
3867 /* Main rx processing */
3868 static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi,
3869 		    int rx_todo, struct mvpp2_rx_queue *rxq)
3870 {
3871 	struct net_device *dev = port->dev;
3872 	struct mvpp2_pcpu_stats ps = {};
3873 	enum dma_data_direction dma_dir;
3874 	struct bpf_prog *xdp_prog;
3875 	struct xdp_buff xdp;
3876 	int rx_received;
3877 	int rx_done = 0;
3878 	u32 xdp_ret = 0;
3879 
3880 	xdp_prog = READ_ONCE(port->xdp_prog);
3881 
3882 	/* Get number of received packets and clamp the to-do */
3883 	rx_received = mvpp2_rxq_received(port, rxq->id);
3884 	if (rx_todo > rx_received)
3885 		rx_todo = rx_received;
3886 
3887 	while (rx_done < rx_todo) {
3888 		struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
3889 		struct mvpp2_bm_pool *bm_pool;
3890 		struct page_pool *pp = NULL;
3891 		struct sk_buff *skb;
3892 		unsigned int frag_size;
3893 		dma_addr_t dma_addr;
3894 		phys_addr_t phys_addr;
3895 		u32 rx_status, timestamp;
3896 		int pool, rx_bytes, err, ret;
3897 		struct page *page;
3898 		void *data;
3899 
3900 		phys_addr = mvpp2_rxdesc_cookie_get(port, rx_desc);
3901 		data = (void *)phys_to_virt(phys_addr);
3902 		page = virt_to_page(data);
3903 		prefetch(page);
3904 
3905 		rx_done++;
3906 		rx_status = mvpp2_rxdesc_status_get(port, rx_desc);
3907 		rx_bytes = mvpp2_rxdesc_size_get(port, rx_desc);
3908 		rx_bytes -= MVPP2_MH_SIZE;
3909 		dma_addr = mvpp2_rxdesc_dma_addr_get(port, rx_desc);
3910 
3911 		pool = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
3912 			MVPP2_RXD_BM_POOL_ID_OFFS;
3913 		bm_pool = &port->priv->bm_pools[pool];
3914 
3915 		if (port->priv->percpu_pools) {
3916 			pp = port->priv->page_pool[pool];
3917 			dma_dir = page_pool_get_dma_dir(pp);
3918 		} else {
3919 			dma_dir = DMA_FROM_DEVICE;
3920 		}
3921 
3922 		dma_sync_single_for_cpu(dev->dev.parent, dma_addr,
3923 					rx_bytes + MVPP2_MH_SIZE,
3924 					dma_dir);
3925 
3926 		/* Buffer header not supported */
3927 		if (rx_status & MVPP2_RXD_BUF_HDR)
3928 			goto err_drop_frame;
3929 
3930 		/* In case of an error, release the requested buffer pointer
3931 		 * to the Buffer Manager. This request process is controlled
3932 		 * by the hardware, and the information about the buffer is
3933 		 * comprised by the RX descriptor.
3934 		 */
3935 		if (rx_status & MVPP2_RXD_ERR_SUMMARY)
3936 			goto err_drop_frame;
3937 
3938 		/* Prefetch header */
3939 		prefetch(data + MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
3940 
3941 		if (bm_pool->frag_size > PAGE_SIZE)
3942 			frag_size = 0;
3943 		else
3944 			frag_size = bm_pool->frag_size;
3945 
3946 		if (xdp_prog) {
3947 			struct xdp_rxq_info *xdp_rxq;
3948 
3949 			if (bm_pool->pkt_size == MVPP2_BM_SHORT_PKT_SIZE)
3950 				xdp_rxq = &rxq->xdp_rxq_short;
3951 			else
3952 				xdp_rxq = &rxq->xdp_rxq_long;
3953 
3954 			xdp_init_buff(&xdp, PAGE_SIZE, xdp_rxq);
3955 			xdp_prepare_buff(&xdp, data,
3956 					 MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM,
3957 					 rx_bytes, false);
3958 
3959 			ret = mvpp2_run_xdp(port, xdp_prog, &xdp, pp, &ps);
3960 
3961 			if (ret) {
3962 				xdp_ret |= ret;
3963 				err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3964 				if (err) {
3965 					netdev_err(port->dev, "failed to refill BM pools\n");
3966 					goto err_drop_frame;
3967 				}
3968 
3969 				ps.rx_packets++;
3970 				ps.rx_bytes += rx_bytes;
3971 				continue;
3972 			}
3973 		}
3974 
3975 		skb = build_skb(data, frag_size);
3976 		if (!skb) {
3977 			netdev_warn(port->dev, "skb build failed\n");
3978 			goto err_drop_frame;
3979 		}
3980 
3981 		/* If we have RX hardware timestamping enabled, grab the
3982 		 * timestamp from the queue and convert.
3983 		 */
3984 		if (mvpp22_rx_hwtstamping(port)) {
3985 			timestamp = le32_to_cpu(rx_desc->pp22.timestamp);
3986 			mvpp22_tai_tstamp(port->priv->tai, timestamp,
3987 					 skb_hwtstamps(skb));
3988 		}
3989 
3990 		err = mvpp2_rx_refill(port, bm_pool, pp, pool);
3991 		if (err) {
3992 			netdev_err(port->dev, "failed to refill BM pools\n");
3993 			dev_kfree_skb_any(skb);
3994 			goto err_drop_frame;
3995 		}
3996 
3997 		if (pp)
3998 			skb_mark_for_recycle(skb, page, pp);
3999 		else
4000 			dma_unmap_single_attrs(dev->dev.parent, dma_addr,
4001 					       bm_pool->buf_size, DMA_FROM_DEVICE,
4002 					       DMA_ATTR_SKIP_CPU_SYNC);
4003 
4004 		ps.rx_packets++;
4005 		ps.rx_bytes += rx_bytes;
4006 
4007 		skb_reserve(skb, MVPP2_MH_SIZE + MVPP2_SKB_HEADROOM);
4008 		skb_put(skb, rx_bytes);
4009 		skb->ip_summed = mvpp2_rx_csum(port, rx_status);
4010 		skb->protocol = eth_type_trans(skb, dev);
4011 
4012 		napi_gro_receive(napi, skb);
4013 		continue;
4014 
4015 err_drop_frame:
4016 		dev->stats.rx_errors++;
4017 		mvpp2_rx_error(port, rx_desc);
4018 		/* Return the buffer to the pool */
4019 		if (rx_status & MVPP2_RXD_BUF_HDR)
4020 			mvpp2_buff_hdr_pool_put(port, rx_desc, pool, rx_status);
4021 		else
4022 			mvpp2_bm_pool_put(port, pool, dma_addr, phys_addr);
4023 	}
4024 
4025 	if (xdp_ret & MVPP2_XDP_REDIR)
4026 		xdp_do_flush_map();
4027 
4028 	if (ps.rx_packets) {
4029 		struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
4030 
4031 		u64_stats_update_begin(&stats->syncp);
4032 		stats->rx_packets += ps.rx_packets;
4033 		stats->rx_bytes   += ps.rx_bytes;
4034 		/* xdp */
4035 		stats->xdp_redirect += ps.xdp_redirect;
4036 		stats->xdp_pass += ps.xdp_pass;
4037 		stats->xdp_drop += ps.xdp_drop;
4038 		u64_stats_update_end(&stats->syncp);
4039 	}
4040 
4041 	/* Update Rx queue management counters */
4042 	wmb();
4043 	mvpp2_rxq_status_update(port, rxq->id, rx_done, rx_done);
4044 
4045 	return rx_todo;
4046 }
4047 
4048 static inline void
4049 tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4050 		  struct mvpp2_tx_desc *desc)
4051 {
4052 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4053 	struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4054 
4055 	dma_addr_t buf_dma_addr =
4056 		mvpp2_txdesc_dma_addr_get(port, desc);
4057 	size_t buf_sz =
4058 		mvpp2_txdesc_size_get(port, desc);
4059 	if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr))
4060 		dma_unmap_single(port->dev->dev.parent, buf_dma_addr,
4061 				 buf_sz, DMA_TO_DEVICE);
4062 	mvpp2_txq_desc_put(txq);
4063 }
4064 
4065 static void mvpp2_txdesc_clear_ptp(struct mvpp2_port *port,
4066 				   struct mvpp2_tx_desc *desc)
4067 {
4068 	/* We only need to clear the low bits */
4069 	if (port->priv->hw_version >= MVPP22)
4070 		desc->pp22.ptp_descriptor &=
4071 			cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4072 }
4073 
4074 static bool mvpp2_tx_hw_tstamp(struct mvpp2_port *port,
4075 			       struct mvpp2_tx_desc *tx_desc,
4076 			       struct sk_buff *skb)
4077 {
4078 	struct mvpp2_hwtstamp_queue *queue;
4079 	unsigned int mtype, type, i;
4080 	struct ptp_header *hdr;
4081 	u64 ptpdesc;
4082 
4083 	if (port->priv->hw_version == MVPP21 ||
4084 	    port->tx_hwtstamp_type == HWTSTAMP_TX_OFF)
4085 		return false;
4086 
4087 	type = ptp_classify_raw(skb);
4088 	if (!type)
4089 		return false;
4090 
4091 	hdr = ptp_parse_header(skb, type);
4092 	if (!hdr)
4093 		return false;
4094 
4095 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4096 
4097 	ptpdesc = MVPP22_PTP_MACTIMESTAMPINGEN |
4098 		  MVPP22_PTP_ACTION_CAPTURE;
4099 	queue = &port->tx_hwtstamp_queue[0];
4100 
4101 	switch (type & PTP_CLASS_VMASK) {
4102 	case PTP_CLASS_V1:
4103 		ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV1);
4104 		break;
4105 
4106 	case PTP_CLASS_V2:
4107 		ptpdesc |= MVPP22_PTP_PACKETFORMAT(MVPP22_PTP_PKT_FMT_PTPV2);
4108 		mtype = hdr->tsmt & 15;
4109 		/* Direct PTP Sync messages to queue 1 */
4110 		if (mtype == 0) {
4111 			ptpdesc |= MVPP22_PTP_TIMESTAMPQUEUESELECT;
4112 			queue = &port->tx_hwtstamp_queue[1];
4113 		}
4114 		break;
4115 	}
4116 
4117 	/* Take a reference on the skb and insert into our queue */
4118 	i = queue->next;
4119 	queue->next = (i + 1) & 31;
4120 	if (queue->skb[i])
4121 		dev_kfree_skb_any(queue->skb[i]);
4122 	queue->skb[i] = skb_get(skb);
4123 
4124 	ptpdesc |= MVPP22_PTP_TIMESTAMPENTRYID(i);
4125 
4126 	/*
4127 	 * 3:0		- PTPAction
4128 	 * 6:4		- PTPPacketFormat
4129 	 * 7		- PTP_CF_WraparoundCheckEn
4130 	 * 9:8		- IngressTimestampSeconds[1:0]
4131 	 * 10		- Reserved
4132 	 * 11		- MACTimestampingEn
4133 	 * 17:12	- PTP_TimestampQueueEntryID[5:0]
4134 	 * 18		- PTPTimestampQueueSelect
4135 	 * 19		- UDPChecksumUpdateEn
4136 	 * 27:20	- TimestampOffset
4137 	 *			PTP, NTPTransmit, OWAMP/TWAMP - L3 to PTP header
4138 	 *			NTPTs, Y.1731 - L3 to timestamp entry
4139 	 * 35:28	- UDP Checksum Offset
4140 	 *
4141 	 * stored in tx descriptor bits 75:64 (11:0) and 191:168 (35:12)
4142 	 */
4143 	tx_desc->pp22.ptp_descriptor &=
4144 		cpu_to_le32(~MVPP22_PTP_DESC_MASK_LOW);
4145 	tx_desc->pp22.ptp_descriptor |=
4146 		cpu_to_le32(ptpdesc & MVPP22_PTP_DESC_MASK_LOW);
4147 	tx_desc->pp22.buf_dma_addr_ptp &= cpu_to_le64(~0xffffff0000000000ULL);
4148 	tx_desc->pp22.buf_dma_addr_ptp |= cpu_to_le64((ptpdesc >> 12) << 40);
4149 
4150 	return true;
4151 }
4152 
4153 /* Handle tx fragmentation processing */
4154 static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
4155 				 struct mvpp2_tx_queue *aggr_txq,
4156 				 struct mvpp2_tx_queue *txq)
4157 {
4158 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4159 	struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4160 	struct mvpp2_tx_desc *tx_desc;
4161 	int i;
4162 	dma_addr_t buf_dma_addr;
4163 
4164 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4165 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4166 		void *addr = skb_frag_address(frag);
4167 
4168 		tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4169 		mvpp2_txdesc_clear_ptp(port, tx_desc);
4170 		mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4171 		mvpp2_txdesc_size_set(port, tx_desc, skb_frag_size(frag));
4172 
4173 		buf_dma_addr = dma_map_single(port->dev->dev.parent, addr,
4174 					      skb_frag_size(frag),
4175 					      DMA_TO_DEVICE);
4176 		if (dma_mapping_error(port->dev->dev.parent, buf_dma_addr)) {
4177 			mvpp2_txq_desc_put(txq);
4178 			goto cleanup;
4179 		}
4180 
4181 		mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4182 
4183 		if (i == (skb_shinfo(skb)->nr_frags - 1)) {
4184 			/* Last descriptor */
4185 			mvpp2_txdesc_cmd_set(port, tx_desc,
4186 					     MVPP2_TXD_L_DESC);
4187 			mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4188 		} else {
4189 			/* Descriptor in the middle: Not First, Not Last */
4190 			mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4191 			mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4192 		}
4193 	}
4194 
4195 	return 0;
4196 cleanup:
4197 	/* Release all descriptors that were used to map fragments of
4198 	 * this packet, as well as the corresponding DMA mappings
4199 	 */
4200 	for (i = i - 1; i >= 0; i--) {
4201 		tx_desc = txq->descs + i;
4202 		tx_desc_unmap_put(port, txq, tx_desc);
4203 	}
4204 
4205 	return -ENOMEM;
4206 }
4207 
4208 static inline void mvpp2_tso_put_hdr(struct sk_buff *skb,
4209 				     struct net_device *dev,
4210 				     struct mvpp2_tx_queue *txq,
4211 				     struct mvpp2_tx_queue *aggr_txq,
4212 				     struct mvpp2_txq_pcpu *txq_pcpu,
4213 				     int hdr_sz)
4214 {
4215 	struct mvpp2_port *port = netdev_priv(dev);
4216 	struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4217 	dma_addr_t addr;
4218 
4219 	mvpp2_txdesc_clear_ptp(port, tx_desc);
4220 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4221 	mvpp2_txdesc_size_set(port, tx_desc, hdr_sz);
4222 
4223 	addr = txq_pcpu->tso_headers_dma +
4224 	       txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4225 	mvpp2_txdesc_dma_addr_set(port, tx_desc, addr);
4226 
4227 	mvpp2_txdesc_cmd_set(port, tx_desc, mvpp2_skb_tx_csum(port, skb) |
4228 					    MVPP2_TXD_F_DESC |
4229 					    MVPP2_TXD_PADDING_DISABLE);
4230 	mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4231 }
4232 
4233 static inline int mvpp2_tso_put_data(struct sk_buff *skb,
4234 				     struct net_device *dev, struct tso_t *tso,
4235 				     struct mvpp2_tx_queue *txq,
4236 				     struct mvpp2_tx_queue *aggr_txq,
4237 				     struct mvpp2_txq_pcpu *txq_pcpu,
4238 				     int sz, bool left, bool last)
4239 {
4240 	struct mvpp2_port *port = netdev_priv(dev);
4241 	struct mvpp2_tx_desc *tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4242 	dma_addr_t buf_dma_addr;
4243 
4244 	mvpp2_txdesc_clear_ptp(port, tx_desc);
4245 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4246 	mvpp2_txdesc_size_set(port, tx_desc, sz);
4247 
4248 	buf_dma_addr = dma_map_single(dev->dev.parent, tso->data, sz,
4249 				      DMA_TO_DEVICE);
4250 	if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4251 		mvpp2_txq_desc_put(txq);
4252 		return -ENOMEM;
4253 	}
4254 
4255 	mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4256 
4257 	if (!left) {
4258 		mvpp2_txdesc_cmd_set(port, tx_desc, MVPP2_TXD_L_DESC);
4259 		if (last) {
4260 			mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4261 			return 0;
4262 		}
4263 	} else {
4264 		mvpp2_txdesc_cmd_set(port, tx_desc, 0);
4265 	}
4266 
4267 	mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4268 	return 0;
4269 }
4270 
4271 static int mvpp2_tx_tso(struct sk_buff *skb, struct net_device *dev,
4272 			struct mvpp2_tx_queue *txq,
4273 			struct mvpp2_tx_queue *aggr_txq,
4274 			struct mvpp2_txq_pcpu *txq_pcpu)
4275 {
4276 	struct mvpp2_port *port = netdev_priv(dev);
4277 	int hdr_sz, i, len, descs = 0;
4278 	struct tso_t tso;
4279 
4280 	/* Check number of available descriptors */
4281 	if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
4282 	    mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
4283 					     tso_count_descs(skb)))
4284 		return 0;
4285 
4286 	hdr_sz = tso_start(skb, &tso);
4287 
4288 	len = skb->len - hdr_sz;
4289 	while (len > 0) {
4290 		int left = min_t(int, skb_shinfo(skb)->gso_size, len);
4291 		char *hdr = txq_pcpu->tso_headers +
4292 			    txq_pcpu->txq_put_index * TSO_HEADER_SIZE;
4293 
4294 		len -= left;
4295 		descs++;
4296 
4297 		tso_build_hdr(skb, hdr, &tso, left, len == 0);
4298 		mvpp2_tso_put_hdr(skb, dev, txq, aggr_txq, txq_pcpu, hdr_sz);
4299 
4300 		while (left > 0) {
4301 			int sz = min_t(int, tso.size, left);
4302 			left -= sz;
4303 			descs++;
4304 
4305 			if (mvpp2_tso_put_data(skb, dev, &tso, txq, aggr_txq,
4306 					       txq_pcpu, sz, left, len == 0))
4307 				goto release;
4308 			tso_build_data(skb, &tso, sz);
4309 		}
4310 	}
4311 
4312 	return descs;
4313 
4314 release:
4315 	for (i = descs - 1; i >= 0; i--) {
4316 		struct mvpp2_tx_desc *tx_desc = txq->descs + i;
4317 		tx_desc_unmap_put(port, txq, tx_desc);
4318 	}
4319 	return 0;
4320 }
4321 
4322 /* Main tx processing */
4323 static netdev_tx_t mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
4324 {
4325 	struct mvpp2_port *port = netdev_priv(dev);
4326 	struct mvpp2_tx_queue *txq, *aggr_txq;
4327 	struct mvpp2_txq_pcpu *txq_pcpu;
4328 	struct mvpp2_tx_desc *tx_desc;
4329 	dma_addr_t buf_dma_addr;
4330 	unsigned long flags = 0;
4331 	unsigned int thread;
4332 	int frags = 0;
4333 	u16 txq_id;
4334 	u32 tx_cmd;
4335 
4336 	thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4337 
4338 	txq_id = skb_get_queue_mapping(skb);
4339 	txq = port->txqs[txq_id];
4340 	txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
4341 	aggr_txq = &port->priv->aggr_txqs[thread];
4342 
4343 	if (test_bit(thread, &port->priv->lock_map))
4344 		spin_lock_irqsave(&port->tx_lock[thread], flags);
4345 
4346 	if (skb_is_gso(skb)) {
4347 		frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
4348 		goto out;
4349 	}
4350 	frags = skb_shinfo(skb)->nr_frags + 1;
4351 
4352 	/* Check number of available descriptors */
4353 	if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
4354 	    mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
4355 		frags = 0;
4356 		goto out;
4357 	}
4358 
4359 	/* Get a descriptor for the first part of the packet */
4360 	tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
4361 	if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ||
4362 	    !mvpp2_tx_hw_tstamp(port, tx_desc, skb))
4363 		mvpp2_txdesc_clear_ptp(port, tx_desc);
4364 	mvpp2_txdesc_txq_set(port, tx_desc, txq->id);
4365 	mvpp2_txdesc_size_set(port, tx_desc, skb_headlen(skb));
4366 
4367 	buf_dma_addr = dma_map_single(dev->dev.parent, skb->data,
4368 				      skb_headlen(skb), DMA_TO_DEVICE);
4369 	if (unlikely(dma_mapping_error(dev->dev.parent, buf_dma_addr))) {
4370 		mvpp2_txq_desc_put(txq);
4371 		frags = 0;
4372 		goto out;
4373 	}
4374 
4375 	mvpp2_txdesc_dma_addr_set(port, tx_desc, buf_dma_addr);
4376 
4377 	tx_cmd = mvpp2_skb_tx_csum(port, skb);
4378 
4379 	if (frags == 1) {
4380 		/* First and Last descriptor */
4381 		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
4382 		mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4383 		mvpp2_txq_inc_put(port, txq_pcpu, skb, tx_desc, MVPP2_TYPE_SKB);
4384 	} else {
4385 		/* First but not Last */
4386 		tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
4387 		mvpp2_txdesc_cmd_set(port, tx_desc, tx_cmd);
4388 		mvpp2_txq_inc_put(port, txq_pcpu, NULL, tx_desc, MVPP2_TYPE_SKB);
4389 
4390 		/* Continue with other skb fragments */
4391 		if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
4392 			tx_desc_unmap_put(port, txq, tx_desc);
4393 			frags = 0;
4394 		}
4395 	}
4396 
4397 out:
4398 	if (frags > 0) {
4399 		struct mvpp2_pcpu_stats *stats = per_cpu_ptr(port->stats, thread);
4400 		struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
4401 
4402 		txq_pcpu->reserved_num -= frags;
4403 		txq_pcpu->count += frags;
4404 		aggr_txq->count += frags;
4405 
4406 		/* Enable transmit */
4407 		wmb();
4408 		mvpp2_aggr_txq_pend_desc_add(port, frags);
4409 
4410 		if (txq_pcpu->count >= txq_pcpu->stop_threshold)
4411 			netif_tx_stop_queue(nq);
4412 
4413 		u64_stats_update_begin(&stats->syncp);
4414 		stats->tx_packets++;
4415 		stats->tx_bytes += skb->len;
4416 		u64_stats_update_end(&stats->syncp);
4417 	} else {
4418 		dev->stats.tx_dropped++;
4419 		dev_kfree_skb_any(skb);
4420 	}
4421 
4422 	/* Finalize TX processing */
4423 	if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal)
4424 		mvpp2_txq_done(port, txq, txq_pcpu);
4425 
4426 	/* Set the timer in case not all frags were processed */
4427 	if (!port->has_tx_irqs && txq_pcpu->count <= frags &&
4428 	    txq_pcpu->count > 0) {
4429 		struct mvpp2_port_pcpu *port_pcpu = per_cpu_ptr(port->pcpu, thread);
4430 
4431 		if (!port_pcpu->timer_scheduled) {
4432 			port_pcpu->timer_scheduled = true;
4433 			hrtimer_start(&port_pcpu->tx_done_timer,
4434 				      MVPP2_TXDONE_HRTIMER_PERIOD_NS,
4435 				      HRTIMER_MODE_REL_PINNED_SOFT);
4436 		}
4437 	}
4438 
4439 	if (test_bit(thread, &port->priv->lock_map))
4440 		spin_unlock_irqrestore(&port->tx_lock[thread], flags);
4441 
4442 	return NETDEV_TX_OK;
4443 }
4444 
4445 static inline void mvpp2_cause_error(struct net_device *dev, int cause)
4446 {
4447 	if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
4448 		netdev_err(dev, "FCS error\n");
4449 	if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
4450 		netdev_err(dev, "rx fifo overrun error\n");
4451 	if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
4452 		netdev_err(dev, "tx fifo underrun error\n");
4453 }
4454 
4455 static int mvpp2_poll(struct napi_struct *napi, int budget)
4456 {
4457 	u32 cause_rx_tx, cause_rx, cause_tx, cause_misc;
4458 	int rx_done = 0;
4459 	struct mvpp2_port *port = netdev_priv(napi->dev);
4460 	struct mvpp2_queue_vector *qv;
4461 	unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
4462 
4463 	qv = container_of(napi, struct mvpp2_queue_vector, napi);
4464 
4465 	/* Rx/Tx cause register
4466 	 *
4467 	 * Bits 0-15: each bit indicates received packets on the Rx queue
4468 	 * (bit 0 is for Rx queue 0).
4469 	 *
4470 	 * Bits 16-23: each bit indicates transmitted packets on the Tx queue
4471 	 * (bit 16 is for Tx queue 0).
4472 	 *
4473 	 * Each CPU has its own Rx/Tx cause register
4474 	 */
4475 	cause_rx_tx = mvpp2_thread_read_relaxed(port->priv, qv->sw_thread_id,
4476 						MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
4477 
4478 	cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
4479 	if (cause_misc) {
4480 		mvpp2_cause_error(port->dev, cause_misc);
4481 
4482 		/* Clear the cause register */
4483 		mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
4484 		mvpp2_thread_write(port->priv, thread,
4485 				   MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
4486 				   cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
4487 	}
4488 
4489 	if (port->has_tx_irqs) {
4490 		cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
4491 		if (cause_tx) {
4492 			cause_tx >>= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_OFFSET;
4493 			mvpp2_tx_done(port, cause_tx, qv->sw_thread_id);
4494 		}
4495 	}
4496 
4497 	/* Process RX packets */
4498 	cause_rx = cause_rx_tx &
4499 		   MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
4500 	cause_rx <<= qv->first_rxq;
4501 	cause_rx |= qv->pending_cause_rx;
4502 	while (cause_rx && budget > 0) {
4503 		int count;
4504 		struct mvpp2_rx_queue *rxq;
4505 
4506 		rxq = mvpp2_get_rx_queue(port, cause_rx);
4507 		if (!rxq)
4508 			break;
4509 
4510 		count = mvpp2_rx(port, napi, budget, rxq);
4511 		rx_done += count;
4512 		budget -= count;
4513 		if (budget > 0) {
4514 			/* Clear the bit associated to this Rx queue
4515 			 * so that next iteration will continue from
4516 			 * the next Rx queue.
4517 			 */
4518 			cause_rx &= ~(1 << rxq->logic_rxq);
4519 		}
4520 	}
4521 
4522 	if (budget > 0) {
4523 		cause_rx = 0;
4524 		napi_complete_done(napi, rx_done);
4525 
4526 		mvpp2_qvec_interrupt_enable(qv);
4527 	}
4528 	qv->pending_cause_rx = cause_rx;
4529 	return rx_done;
4530 }
4531 
4532 static void mvpp22_mode_reconfigure(struct mvpp2_port *port)
4533 {
4534 	u32 ctrl3;
4535 
4536 	/* Set the GMAC & XLG MAC in reset */
4537 	mvpp2_mac_reset_assert(port);
4538 
4539 	/* Set the MPCS and XPCS in reset */
4540 	mvpp22_pcs_reset_assert(port);
4541 
4542 	/* comphy reconfiguration */
4543 	mvpp22_comphy_init(port);
4544 
4545 	/* gop reconfiguration */
4546 	mvpp22_gop_init(port);
4547 
4548 	mvpp22_pcs_reset_deassert(port);
4549 
4550 	if (mvpp2_port_supports_xlg(port)) {
4551 		ctrl3 = readl(port->base + MVPP22_XLG_CTRL3_REG);
4552 		ctrl3 &= ~MVPP22_XLG_CTRL3_MACMODESELECT_MASK;
4553 
4554 		if (mvpp2_is_xlg(port->phy_interface))
4555 			ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_10G;
4556 		else
4557 			ctrl3 |= MVPP22_XLG_CTRL3_MACMODESELECT_GMAC;
4558 
4559 		writel(ctrl3, port->base + MVPP22_XLG_CTRL3_REG);
4560 	}
4561 
4562 	if (mvpp2_port_supports_xlg(port) && mvpp2_is_xlg(port->phy_interface))
4563 		mvpp2_xlg_max_rx_size_set(port);
4564 	else
4565 		mvpp2_gmac_max_rx_size_set(port);
4566 }
4567 
4568 /* Set hw internals when starting port */
4569 static void mvpp2_start_dev(struct mvpp2_port *port)
4570 {
4571 	int i;
4572 
4573 	mvpp2_txp_max_tx_size_set(port);
4574 
4575 	for (i = 0; i < port->nqvecs; i++)
4576 		napi_enable(&port->qvecs[i].napi);
4577 
4578 	/* Enable interrupts on all threads */
4579 	mvpp2_interrupts_enable(port);
4580 
4581 	if (port->priv->hw_version >= MVPP22)
4582 		mvpp22_mode_reconfigure(port);
4583 
4584 	if (port->phylink) {
4585 		phylink_start(port->phylink);
4586 	} else {
4587 		mvpp2_acpi_start(port);
4588 	}
4589 
4590 	netif_tx_start_all_queues(port->dev);
4591 
4592 	clear_bit(0, &port->state);
4593 }
4594 
4595 /* Set hw internals when stopping port */
4596 static void mvpp2_stop_dev(struct mvpp2_port *port)
4597 {
4598 	int i;
4599 
4600 	set_bit(0, &port->state);
4601 
4602 	/* Disable interrupts on all threads */
4603 	mvpp2_interrupts_disable(port);
4604 
4605 	for (i = 0; i < port->nqvecs; i++)
4606 		napi_disable(&port->qvecs[i].napi);
4607 
4608 	if (port->phylink)
4609 		phylink_stop(port->phylink);
4610 	phy_power_off(port->comphy);
4611 }
4612 
4613 static int mvpp2_check_ringparam_valid(struct net_device *dev,
4614 				       struct ethtool_ringparam *ring)
4615 {
4616 	u16 new_rx_pending = ring->rx_pending;
4617 	u16 new_tx_pending = ring->tx_pending;
4618 
4619 	if (ring->rx_pending == 0 || ring->tx_pending == 0)
4620 		return -EINVAL;
4621 
4622 	if (ring->rx_pending > MVPP2_MAX_RXD_MAX)
4623 		new_rx_pending = MVPP2_MAX_RXD_MAX;
4624 	else if (ring->rx_pending < MSS_THRESHOLD_START)
4625 		new_rx_pending = MSS_THRESHOLD_START;
4626 	else if (!IS_ALIGNED(ring->rx_pending, 16))
4627 		new_rx_pending = ALIGN(ring->rx_pending, 16);
4628 
4629 	if (ring->tx_pending > MVPP2_MAX_TXD_MAX)
4630 		new_tx_pending = MVPP2_MAX_TXD_MAX;
4631 	else if (!IS_ALIGNED(ring->tx_pending, 32))
4632 		new_tx_pending = ALIGN(ring->tx_pending, 32);
4633 
4634 	/* The Tx ring size cannot be smaller than the minimum number of
4635 	 * descriptors needed for TSO.
4636 	 */
4637 	if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
4638 		new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
4639 
4640 	if (ring->rx_pending != new_rx_pending) {
4641 		netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
4642 			    ring->rx_pending, new_rx_pending);
4643 		ring->rx_pending = new_rx_pending;
4644 	}
4645 
4646 	if (ring->tx_pending != new_tx_pending) {
4647 		netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
4648 			    ring->tx_pending, new_tx_pending);
4649 		ring->tx_pending = new_tx_pending;
4650 	}
4651 
4652 	return 0;
4653 }
4654 
4655 static void mvpp21_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
4656 {
4657 	u32 mac_addr_l, mac_addr_m, mac_addr_h;
4658 
4659 	mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
4660 	mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
4661 	mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
4662 	addr[0] = (mac_addr_h >> 24) & 0xFF;
4663 	addr[1] = (mac_addr_h >> 16) & 0xFF;
4664 	addr[2] = (mac_addr_h >> 8) & 0xFF;
4665 	addr[3] = mac_addr_h & 0xFF;
4666 	addr[4] = mac_addr_m & 0xFF;
4667 	addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
4668 }
4669 
4670 static int mvpp2_irqs_init(struct mvpp2_port *port)
4671 {
4672 	int err, i;
4673 
4674 	for (i = 0; i < port->nqvecs; i++) {
4675 		struct mvpp2_queue_vector *qv = port->qvecs + i;
4676 
4677 		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4678 			qv->mask = kzalloc(cpumask_size(), GFP_KERNEL);
4679 			if (!qv->mask) {
4680 				err = -ENOMEM;
4681 				goto err;
4682 			}
4683 
4684 			irq_set_status_flags(qv->irq, IRQ_NO_BALANCING);
4685 		}
4686 
4687 		err = request_irq(qv->irq, mvpp2_isr, 0, port->dev->name, qv);
4688 		if (err)
4689 			goto err;
4690 
4691 		if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
4692 			unsigned int cpu;
4693 
4694 			for_each_present_cpu(cpu) {
4695 				if (mvpp2_cpu_to_thread(port->priv, cpu) ==
4696 				    qv->sw_thread_id)
4697 					cpumask_set_cpu(cpu, qv->mask);
4698 			}
4699 
4700 			irq_set_affinity_hint(qv->irq, qv->mask);
4701 		}
4702 	}
4703 
4704 	return 0;
4705 err:
4706 	for (i = 0; i < port->nqvecs; i++) {
4707 		struct mvpp2_queue_vector *qv = port->qvecs + i;
4708 
4709 		irq_set_affinity_hint(qv->irq, NULL);
4710 		kfree(qv->mask);
4711 		qv->mask = NULL;
4712 		free_irq(qv->irq, qv);
4713 	}
4714 
4715 	return err;
4716 }
4717 
4718 static void mvpp2_irqs_deinit(struct mvpp2_port *port)
4719 {
4720 	int i;
4721 
4722 	for (i = 0; i < port->nqvecs; i++) {
4723 		struct mvpp2_queue_vector *qv = port->qvecs + i;
4724 
4725 		irq_set_affinity_hint(qv->irq, NULL);
4726 		kfree(qv->mask);
4727 		qv->mask = NULL;
4728 		irq_clear_status_flags(qv->irq, IRQ_NO_BALANCING);
4729 		free_irq(qv->irq, qv);
4730 	}
4731 }
4732 
4733 static bool mvpp22_rss_is_supported(struct mvpp2_port *port)
4734 {
4735 	return (queue_mode == MVPP2_QDIST_MULTI_MODE) &&
4736 		!(port->flags & MVPP2_F_LOOPBACK);
4737 }
4738 
4739 static int mvpp2_open(struct net_device *dev)
4740 {
4741 	struct mvpp2_port *port = netdev_priv(dev);
4742 	struct mvpp2 *priv = port->priv;
4743 	unsigned char mac_bcast[ETH_ALEN] = {
4744 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
4745 	bool valid = false;
4746 	int err;
4747 
4748 	err = mvpp2_prs_mac_da_accept(port, mac_bcast, true);
4749 	if (err) {
4750 		netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
4751 		return err;
4752 	}
4753 	err = mvpp2_prs_mac_da_accept(port, dev->dev_addr, true);
4754 	if (err) {
4755 		netdev_err(dev, "mvpp2_prs_mac_da_accept own addr failed\n");
4756 		return err;
4757 	}
4758 	err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
4759 	if (err) {
4760 		netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
4761 		return err;
4762 	}
4763 	err = mvpp2_prs_def_flow(port);
4764 	if (err) {
4765 		netdev_err(dev, "mvpp2_prs_def_flow failed\n");
4766 		return err;
4767 	}
4768 
4769 	/* Allocate the Rx/Tx queues */
4770 	err = mvpp2_setup_rxqs(port);
4771 	if (err) {
4772 		netdev_err(port->dev, "cannot allocate Rx queues\n");
4773 		return err;
4774 	}
4775 
4776 	err = mvpp2_setup_txqs(port);
4777 	if (err) {
4778 		netdev_err(port->dev, "cannot allocate Tx queues\n");
4779 		goto err_cleanup_rxqs;
4780 	}
4781 
4782 	err = mvpp2_irqs_init(port);
4783 	if (err) {
4784 		netdev_err(port->dev, "cannot init IRQs\n");
4785 		goto err_cleanup_txqs;
4786 	}
4787 
4788 	if (port->phylink) {
4789 		err = phylink_fwnode_phy_connect(port->phylink, port->fwnode, 0);
4790 		if (err) {
4791 			netdev_err(port->dev, "could not attach PHY (%d)\n",
4792 				   err);
4793 			goto err_free_irq;
4794 		}
4795 
4796 		valid = true;
4797 	}
4798 
4799 	if (priv->hw_version >= MVPP22 && port->port_irq) {
4800 		err = request_irq(port->port_irq, mvpp2_port_isr, 0,
4801 				  dev->name, port);
4802 		if (err) {
4803 			netdev_err(port->dev,
4804 				   "cannot request port link/ptp IRQ %d\n",
4805 				   port->port_irq);
4806 			goto err_free_irq;
4807 		}
4808 
4809 		mvpp22_gop_setup_irq(port);
4810 
4811 		/* In default link is down */
4812 		netif_carrier_off(port->dev);
4813 
4814 		valid = true;
4815 	} else {
4816 		port->port_irq = 0;
4817 	}
4818 
4819 	if (!valid) {
4820 		netdev_err(port->dev,
4821 			   "invalid configuration: no dt or link IRQ");
4822 		err = -ENOENT;
4823 		goto err_free_irq;
4824 	}
4825 
4826 	/* Unmask interrupts on all CPUs */
4827 	on_each_cpu(mvpp2_interrupts_unmask, port, 1);
4828 	mvpp2_shared_interrupt_mask_unmask(port, false);
4829 
4830 	mvpp2_start_dev(port);
4831 
4832 	/* Start hardware statistics gathering */
4833 	queue_delayed_work(priv->stats_queue, &port->stats_work,
4834 			   MVPP2_MIB_COUNTERS_STATS_DELAY);
4835 
4836 	return 0;
4837 
4838 err_free_irq:
4839 	mvpp2_irqs_deinit(port);
4840 err_cleanup_txqs:
4841 	mvpp2_cleanup_txqs(port);
4842 err_cleanup_rxqs:
4843 	mvpp2_cleanup_rxqs(port);
4844 	return err;
4845 }
4846 
4847 static int mvpp2_stop(struct net_device *dev)
4848 {
4849 	struct mvpp2_port *port = netdev_priv(dev);
4850 	struct mvpp2_port_pcpu *port_pcpu;
4851 	unsigned int thread;
4852 
4853 	mvpp2_stop_dev(port);
4854 
4855 	/* Mask interrupts on all threads */
4856 	on_each_cpu(mvpp2_interrupts_mask, port, 1);
4857 	mvpp2_shared_interrupt_mask_unmask(port, true);
4858 
4859 	if (port->phylink)
4860 		phylink_disconnect_phy(port->phylink);
4861 	if (port->port_irq)
4862 		free_irq(port->port_irq, port);
4863 
4864 	mvpp2_irqs_deinit(port);
4865 	if (!port->has_tx_irqs) {
4866 		for (thread = 0; thread < port->priv->nthreads; thread++) {
4867 			port_pcpu = per_cpu_ptr(port->pcpu, thread);
4868 
4869 			hrtimer_cancel(&port_pcpu->tx_done_timer);
4870 			port_pcpu->timer_scheduled = false;
4871 		}
4872 	}
4873 	mvpp2_cleanup_rxqs(port);
4874 	mvpp2_cleanup_txqs(port);
4875 
4876 	cancel_delayed_work_sync(&port->stats_work);
4877 
4878 	mvpp2_mac_reset_assert(port);
4879 	mvpp22_pcs_reset_assert(port);
4880 
4881 	return 0;
4882 }
4883 
4884 static int mvpp2_prs_mac_da_accept_list(struct mvpp2_port *port,
4885 					struct netdev_hw_addr_list *list)
4886 {
4887 	struct netdev_hw_addr *ha;
4888 	int ret;
4889 
4890 	netdev_hw_addr_list_for_each(ha, list) {
4891 		ret = mvpp2_prs_mac_da_accept(port, ha->addr, true);
4892 		if (ret)
4893 			return ret;
4894 	}
4895 
4896 	return 0;
4897 }
4898 
4899 static void mvpp2_set_rx_promisc(struct mvpp2_port *port, bool enable)
4900 {
4901 	if (!enable && (port->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
4902 		mvpp2_prs_vid_enable_filtering(port);
4903 	else
4904 		mvpp2_prs_vid_disable_filtering(port);
4905 
4906 	mvpp2_prs_mac_promisc_set(port->priv, port->id,
4907 				  MVPP2_PRS_L2_UNI_CAST, enable);
4908 
4909 	mvpp2_prs_mac_promisc_set(port->priv, port->id,
4910 				  MVPP2_PRS_L2_MULTI_CAST, enable);
4911 }
4912 
4913 static void mvpp2_set_rx_mode(struct net_device *dev)
4914 {
4915 	struct mvpp2_port *port = netdev_priv(dev);
4916 
4917 	/* Clear the whole UC and MC list */
4918 	mvpp2_prs_mac_del_all(port);
4919 
4920 	if (dev->flags & IFF_PROMISC) {
4921 		mvpp2_set_rx_promisc(port, true);
4922 		return;
4923 	}
4924 
4925 	mvpp2_set_rx_promisc(port, false);
4926 
4927 	if (netdev_uc_count(dev) > MVPP2_PRS_MAC_UC_FILT_MAX ||
4928 	    mvpp2_prs_mac_da_accept_list(port, &dev->uc))
4929 		mvpp2_prs_mac_promisc_set(port->priv, port->id,
4930 					  MVPP2_PRS_L2_UNI_CAST, true);
4931 
4932 	if (dev->flags & IFF_ALLMULTI) {
4933 		mvpp2_prs_mac_promisc_set(port->priv, port->id,
4934 					  MVPP2_PRS_L2_MULTI_CAST, true);
4935 		return;
4936 	}
4937 
4938 	if (netdev_mc_count(dev) > MVPP2_PRS_MAC_MC_FILT_MAX ||
4939 	    mvpp2_prs_mac_da_accept_list(port, &dev->mc))
4940 		mvpp2_prs_mac_promisc_set(port->priv, port->id,
4941 					  MVPP2_PRS_L2_MULTI_CAST, true);
4942 }
4943 
4944 static int mvpp2_set_mac_address(struct net_device *dev, void *p)
4945 {
4946 	const struct sockaddr *addr = p;
4947 	int err;
4948 
4949 	if (!is_valid_ether_addr(addr->sa_data))
4950 		return -EADDRNOTAVAIL;
4951 
4952 	err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
4953 	if (err) {
4954 		/* Reconfigure parser accept the original MAC address */
4955 		mvpp2_prs_update_mac_da(dev, dev->dev_addr);
4956 		netdev_err(dev, "failed to change MAC address\n");
4957 	}
4958 	return err;
4959 }
4960 
4961 /* Shut down all the ports, reconfigure the pools as percpu or shared,
4962  * then bring up again all ports.
4963  */
4964 static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu)
4965 {
4966 	bool change_percpu = (percpu != priv->percpu_pools);
4967 	int numbufs = MVPP2_BM_POOLS_NUM, i;
4968 	struct mvpp2_port *port = NULL;
4969 	bool status[MVPP2_MAX_PORTS];
4970 
4971 	for (i = 0; i < priv->port_count; i++) {
4972 		port = priv->port_list[i];
4973 		status[i] = netif_running(port->dev);
4974 		if (status[i])
4975 			mvpp2_stop(port->dev);
4976 	}
4977 
4978 	/* nrxqs is the same for all ports */
4979 	if (priv->percpu_pools)
4980 		numbufs = port->nrxqs * 2;
4981 
4982 	if (change_percpu)
4983 		mvpp2_bm_pool_update_priv_fc(priv, false);
4984 
4985 	for (i = 0; i < numbufs; i++)
4986 		mvpp2_bm_pool_destroy(port->dev->dev.parent, priv, &priv->bm_pools[i]);
4987 
4988 	devm_kfree(port->dev->dev.parent, priv->bm_pools);
4989 	priv->percpu_pools = percpu;
4990 	mvpp2_bm_init(port->dev->dev.parent, priv);
4991 
4992 	for (i = 0; i < priv->port_count; i++) {
4993 		port = priv->port_list[i];
4994 		mvpp2_swf_bm_pool_init(port);
4995 		if (status[i])
4996 			mvpp2_open(port->dev);
4997 	}
4998 
4999 	if (change_percpu)
5000 		mvpp2_bm_pool_update_priv_fc(priv, true);
5001 
5002 	return 0;
5003 }
5004 
5005 static int mvpp2_change_mtu(struct net_device *dev, int mtu)
5006 {
5007 	struct mvpp2_port *port = netdev_priv(dev);
5008 	bool running = netif_running(dev);
5009 	struct mvpp2 *priv = port->priv;
5010 	int err;
5011 
5012 	if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
5013 		netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
5014 			    ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
5015 		mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
5016 	}
5017 
5018 	if (MVPP2_RX_PKT_SIZE(mtu) > MVPP2_BM_LONG_PKT_SIZE) {
5019 		if (port->xdp_prog) {
5020 			netdev_err(dev, "Jumbo frames are not supported with XDP\n");
5021 			return -EINVAL;
5022 		}
5023 		if (priv->percpu_pools) {
5024 			netdev_warn(dev, "mtu %d too high, switching to shared buffers", mtu);
5025 			mvpp2_bm_switch_buffers(priv, false);
5026 		}
5027 	} else {
5028 		bool jumbo = false;
5029 		int i;
5030 
5031 		for (i = 0; i < priv->port_count; i++)
5032 			if (priv->port_list[i] != port &&
5033 			    MVPP2_RX_PKT_SIZE(priv->port_list[i]->dev->mtu) >
5034 			    MVPP2_BM_LONG_PKT_SIZE) {
5035 				jumbo = true;
5036 				break;
5037 			}
5038 
5039 		/* No port is using jumbo frames */
5040 		if (!jumbo) {
5041 			dev_info(port->dev->dev.parent,
5042 				 "all ports have a low MTU, switching to per-cpu buffers");
5043 			mvpp2_bm_switch_buffers(priv, true);
5044 		}
5045 	}
5046 
5047 	if (running)
5048 		mvpp2_stop_dev(port);
5049 
5050 	err = mvpp2_bm_update_mtu(dev, mtu);
5051 	if (err) {
5052 		netdev_err(dev, "failed to change MTU\n");
5053 		/* Reconfigure BM to the original MTU */
5054 		mvpp2_bm_update_mtu(dev, dev->mtu);
5055 	} else {
5056 		port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
5057 	}
5058 
5059 	if (running) {
5060 		mvpp2_start_dev(port);
5061 		mvpp2_egress_enable(port);
5062 		mvpp2_ingress_enable(port);
5063 	}
5064 
5065 	return err;
5066 }
5067 
5068 static int mvpp2_check_pagepool_dma(struct mvpp2_port *port)
5069 {
5070 	enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
5071 	struct mvpp2 *priv = port->priv;
5072 	int err = -1, i;
5073 
5074 	if (!priv->percpu_pools)
5075 		return err;
5076 
5077 	if (!priv->page_pool[0])
5078 		return -ENOMEM;
5079 
5080 	for (i = 0; i < priv->port_count; i++) {
5081 		port = priv->port_list[i];
5082 		if (port->xdp_prog) {
5083 			dma_dir = DMA_BIDIRECTIONAL;
5084 			break;
5085 		}
5086 	}
5087 
5088 	/* All pools are equal in terms of DMA direction */
5089 	if (priv->page_pool[0]->p.dma_dir != dma_dir)
5090 		err = mvpp2_bm_switch_buffers(priv, true);
5091 
5092 	return err;
5093 }
5094 
5095 static void
5096 mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
5097 {
5098 	struct mvpp2_port *port = netdev_priv(dev);
5099 	unsigned int start;
5100 	unsigned int cpu;
5101 
5102 	for_each_possible_cpu(cpu) {
5103 		struct mvpp2_pcpu_stats *cpu_stats;
5104 		u64 rx_packets;
5105 		u64 rx_bytes;
5106 		u64 tx_packets;
5107 		u64 tx_bytes;
5108 
5109 		cpu_stats = per_cpu_ptr(port->stats, cpu);
5110 		do {
5111 			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
5112 			rx_packets = cpu_stats->rx_packets;
5113 			rx_bytes   = cpu_stats->rx_bytes;
5114 			tx_packets = cpu_stats->tx_packets;
5115 			tx_bytes   = cpu_stats->tx_bytes;
5116 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
5117 
5118 		stats->rx_packets += rx_packets;
5119 		stats->rx_bytes   += rx_bytes;
5120 		stats->tx_packets += tx_packets;
5121 		stats->tx_bytes   += tx_bytes;
5122 	}
5123 
5124 	stats->rx_errors	= dev->stats.rx_errors;
5125 	stats->rx_dropped	= dev->stats.rx_dropped;
5126 	stats->tx_dropped	= dev->stats.tx_dropped;
5127 }
5128 
5129 static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5130 {
5131 	struct hwtstamp_config config;
5132 	void __iomem *ptp;
5133 	u32 gcr, int_mask;
5134 
5135 	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5136 		return -EFAULT;
5137 
5138 	if (config.flags)
5139 		return -EINVAL;
5140 
5141 	if (config.tx_type != HWTSTAMP_TX_OFF &&
5142 	    config.tx_type != HWTSTAMP_TX_ON)
5143 		return -ERANGE;
5144 
5145 	ptp = port->priv->iface_base + MVPP22_PTP_BASE(port->gop_id);
5146 
5147 	int_mask = gcr = 0;
5148 	if (config.tx_type != HWTSTAMP_TX_OFF) {
5149 		gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_TX_RESET;
5150 		int_mask |= MVPP22_PTP_INT_MASK_QUEUE1 |
5151 			    MVPP22_PTP_INT_MASK_QUEUE0;
5152 	}
5153 
5154 	/* It seems we must also release the TX reset when enabling the TSU */
5155 	if (config.rx_filter != HWTSTAMP_FILTER_NONE)
5156 		gcr |= MVPP22_PTP_GCR_TSU_ENABLE | MVPP22_PTP_GCR_RX_RESET |
5157 		       MVPP22_PTP_GCR_TX_RESET;
5158 
5159 	if (gcr & MVPP22_PTP_GCR_TSU_ENABLE)
5160 		mvpp22_tai_start(port->priv->tai);
5161 
5162 	if (config.rx_filter != HWTSTAMP_FILTER_NONE) {
5163 		config.rx_filter = HWTSTAMP_FILTER_ALL;
5164 		mvpp2_modify(ptp + MVPP22_PTP_GCR,
5165 			     MVPP22_PTP_GCR_RX_RESET |
5166 			     MVPP22_PTP_GCR_TX_RESET |
5167 			     MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5168 		port->rx_hwtstamp = true;
5169 	} else {
5170 		port->rx_hwtstamp = false;
5171 		mvpp2_modify(ptp + MVPP22_PTP_GCR,
5172 			     MVPP22_PTP_GCR_RX_RESET |
5173 			     MVPP22_PTP_GCR_TX_RESET |
5174 			     MVPP22_PTP_GCR_TSU_ENABLE, gcr);
5175 	}
5176 
5177 	mvpp2_modify(ptp + MVPP22_PTP_INT_MASK,
5178 		     MVPP22_PTP_INT_MASK_QUEUE1 |
5179 		     MVPP22_PTP_INT_MASK_QUEUE0, int_mask);
5180 
5181 	if (!(gcr & MVPP22_PTP_GCR_TSU_ENABLE))
5182 		mvpp22_tai_stop(port->priv->tai);
5183 
5184 	port->tx_hwtstamp_type = config.tx_type;
5185 
5186 	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5187 		return -EFAULT;
5188 
5189 	return 0;
5190 }
5191 
5192 static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr)
5193 {
5194 	struct hwtstamp_config config;
5195 
5196 	memset(&config, 0, sizeof(config));
5197 
5198 	config.tx_type = port->tx_hwtstamp_type;
5199 	config.rx_filter = port->rx_hwtstamp ?
5200 		HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
5201 
5202 	if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
5203 		return -EFAULT;
5204 
5205 	return 0;
5206 }
5207 
5208 static int mvpp2_ethtool_get_ts_info(struct net_device *dev,
5209 				     struct ethtool_ts_info *info)
5210 {
5211 	struct mvpp2_port *port = netdev_priv(dev);
5212 
5213 	if (!port->hwtstamp)
5214 		return -EOPNOTSUPP;
5215 
5216 	info->phc_index = mvpp22_tai_ptp_clock_index(port->priv->tai);
5217 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
5218 				SOF_TIMESTAMPING_RX_SOFTWARE |
5219 				SOF_TIMESTAMPING_SOFTWARE |
5220 				SOF_TIMESTAMPING_TX_HARDWARE |
5221 				SOF_TIMESTAMPING_RX_HARDWARE |
5222 				SOF_TIMESTAMPING_RAW_HARDWARE;
5223 	info->tx_types = BIT(HWTSTAMP_TX_OFF) |
5224 			 BIT(HWTSTAMP_TX_ON);
5225 	info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
5226 			   BIT(HWTSTAMP_FILTER_ALL);
5227 
5228 	return 0;
5229 }
5230 
5231 static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5232 {
5233 	struct mvpp2_port *port = netdev_priv(dev);
5234 
5235 	switch (cmd) {
5236 	case SIOCSHWTSTAMP:
5237 		if (port->hwtstamp)
5238 			return mvpp2_set_ts_config(port, ifr);
5239 		break;
5240 
5241 	case SIOCGHWTSTAMP:
5242 		if (port->hwtstamp)
5243 			return mvpp2_get_ts_config(port, ifr);
5244 		break;
5245 	}
5246 
5247 	if (!port->phylink)
5248 		return -ENOTSUPP;
5249 
5250 	return phylink_mii_ioctl(port->phylink, ifr, cmd);
5251 }
5252 
5253 static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
5254 {
5255 	struct mvpp2_port *port = netdev_priv(dev);
5256 	int ret;
5257 
5258 	ret = mvpp2_prs_vid_entry_add(port, vid);
5259 	if (ret)
5260 		netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
5261 			   MVPP2_PRS_VLAN_FILT_MAX - 1);
5262 	return ret;
5263 }
5264 
5265 static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
5266 {
5267 	struct mvpp2_port *port = netdev_priv(dev);
5268 
5269 	mvpp2_prs_vid_entry_remove(port, vid);
5270 	return 0;
5271 }
5272 
5273 static int mvpp2_set_features(struct net_device *dev,
5274 			      netdev_features_t features)
5275 {
5276 	netdev_features_t changed = dev->features ^ features;
5277 	struct mvpp2_port *port = netdev_priv(dev);
5278 
5279 	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
5280 		if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
5281 			mvpp2_prs_vid_enable_filtering(port);
5282 		} else {
5283 			/* Invalidate all registered VID filters for this
5284 			 * port
5285 			 */
5286 			mvpp2_prs_vid_remove_all(port);
5287 
5288 			mvpp2_prs_vid_disable_filtering(port);
5289 		}
5290 	}
5291 
5292 	if (changed & NETIF_F_RXHASH) {
5293 		if (features & NETIF_F_RXHASH)
5294 			mvpp22_port_rss_enable(port);
5295 		else
5296 			mvpp22_port_rss_disable(port);
5297 	}
5298 
5299 	return 0;
5300 }
5301 
5302 static int mvpp2_xdp_setup(struct mvpp2_port *port, struct netdev_bpf *bpf)
5303 {
5304 	struct bpf_prog *prog = bpf->prog, *old_prog;
5305 	bool running = netif_running(port->dev);
5306 	bool reset = !prog != !port->xdp_prog;
5307 
5308 	if (port->dev->mtu > ETH_DATA_LEN) {
5309 		NL_SET_ERR_MSG_MOD(bpf->extack, "XDP is not supported with jumbo frames enabled");
5310 		return -EOPNOTSUPP;
5311 	}
5312 
5313 	if (!port->priv->percpu_pools) {
5314 		NL_SET_ERR_MSG_MOD(bpf->extack, "Per CPU Pools required for XDP");
5315 		return -EOPNOTSUPP;
5316 	}
5317 
5318 	if (port->ntxqs < num_possible_cpus() * 2) {
5319 		NL_SET_ERR_MSG_MOD(bpf->extack, "XDP_TX needs two TX queues per CPU");
5320 		return -EOPNOTSUPP;
5321 	}
5322 
5323 	/* device is up and bpf is added/removed, must setup the RX queues */
5324 	if (running && reset)
5325 		mvpp2_stop(port->dev);
5326 
5327 	old_prog = xchg(&port->xdp_prog, prog);
5328 	if (old_prog)
5329 		bpf_prog_put(old_prog);
5330 
5331 	/* bpf is just replaced, RXQ and MTU are already setup */
5332 	if (!reset)
5333 		return 0;
5334 
5335 	/* device was up, restore the link */
5336 	if (running)
5337 		mvpp2_open(port->dev);
5338 
5339 	/* Check Page Pool DMA Direction */
5340 	mvpp2_check_pagepool_dma(port);
5341 
5342 	return 0;
5343 }
5344 
5345 static int mvpp2_xdp(struct net_device *dev, struct netdev_bpf *xdp)
5346 {
5347 	struct mvpp2_port *port = netdev_priv(dev);
5348 
5349 	switch (xdp->command) {
5350 	case XDP_SETUP_PROG:
5351 		return mvpp2_xdp_setup(port, xdp);
5352 	default:
5353 		return -EINVAL;
5354 	}
5355 }
5356 
5357 /* Ethtool methods */
5358 
5359 static int mvpp2_ethtool_nway_reset(struct net_device *dev)
5360 {
5361 	struct mvpp2_port *port = netdev_priv(dev);
5362 
5363 	if (!port->phylink)
5364 		return -ENOTSUPP;
5365 
5366 	return phylink_ethtool_nway_reset(port->phylink);
5367 }
5368 
5369 /* Set interrupt coalescing for ethtools */
5370 static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5371 				      struct ethtool_coalesce *c)
5372 {
5373 	struct mvpp2_port *port = netdev_priv(dev);
5374 	int queue;
5375 
5376 	for (queue = 0; queue < port->nrxqs; queue++) {
5377 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5378 
5379 		rxq->time_coal = c->rx_coalesce_usecs;
5380 		rxq->pkts_coal = c->rx_max_coalesced_frames;
5381 		mvpp2_rx_pkts_coal_set(port, rxq);
5382 		mvpp2_rx_time_coal_set(port, rxq);
5383 	}
5384 
5385 	if (port->has_tx_irqs) {
5386 		port->tx_time_coal = c->tx_coalesce_usecs;
5387 		mvpp2_tx_time_coal_set(port);
5388 	}
5389 
5390 	for (queue = 0; queue < port->ntxqs; queue++) {
5391 		struct mvpp2_tx_queue *txq = port->txqs[queue];
5392 
5393 		txq->done_pkts_coal = c->tx_max_coalesced_frames;
5394 
5395 		if (port->has_tx_irqs)
5396 			mvpp2_tx_pkts_coal_set(port, txq);
5397 	}
5398 
5399 	return 0;
5400 }
5401 
5402 /* get coalescing for ethtools */
5403 static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
5404 				      struct ethtool_coalesce *c)
5405 {
5406 	struct mvpp2_port *port = netdev_priv(dev);
5407 
5408 	c->rx_coalesce_usecs       = port->rxqs[0]->time_coal;
5409 	c->rx_max_coalesced_frames = port->rxqs[0]->pkts_coal;
5410 	c->tx_max_coalesced_frames = port->txqs[0]->done_pkts_coal;
5411 	c->tx_coalesce_usecs       = port->tx_time_coal;
5412 	return 0;
5413 }
5414 
5415 static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
5416 				      struct ethtool_drvinfo *drvinfo)
5417 {
5418 	strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
5419 		sizeof(drvinfo->driver));
5420 	strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
5421 		sizeof(drvinfo->version));
5422 	strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
5423 		sizeof(drvinfo->bus_info));
5424 }
5425 
5426 static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
5427 					struct ethtool_ringparam *ring)
5428 {
5429 	struct mvpp2_port *port = netdev_priv(dev);
5430 
5431 	ring->rx_max_pending = MVPP2_MAX_RXD_MAX;
5432 	ring->tx_max_pending = MVPP2_MAX_TXD_MAX;
5433 	ring->rx_pending = port->rx_ring_size;
5434 	ring->tx_pending = port->tx_ring_size;
5435 }
5436 
5437 static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
5438 				       struct ethtool_ringparam *ring)
5439 {
5440 	struct mvpp2_port *port = netdev_priv(dev);
5441 	u16 prev_rx_ring_size = port->rx_ring_size;
5442 	u16 prev_tx_ring_size = port->tx_ring_size;
5443 	int err;
5444 
5445 	err = mvpp2_check_ringparam_valid(dev, ring);
5446 	if (err)
5447 		return err;
5448 
5449 	if (!netif_running(dev)) {
5450 		port->rx_ring_size = ring->rx_pending;
5451 		port->tx_ring_size = ring->tx_pending;
5452 		return 0;
5453 	}
5454 
5455 	/* The interface is running, so we have to force a
5456 	 * reallocation of the queues
5457 	 */
5458 	mvpp2_stop_dev(port);
5459 	mvpp2_cleanup_rxqs(port);
5460 	mvpp2_cleanup_txqs(port);
5461 
5462 	port->rx_ring_size = ring->rx_pending;
5463 	port->tx_ring_size = ring->tx_pending;
5464 
5465 	err = mvpp2_setup_rxqs(port);
5466 	if (err) {
5467 		/* Reallocate Rx queues with the original ring size */
5468 		port->rx_ring_size = prev_rx_ring_size;
5469 		ring->rx_pending = prev_rx_ring_size;
5470 		err = mvpp2_setup_rxqs(port);
5471 		if (err)
5472 			goto err_out;
5473 	}
5474 	err = mvpp2_setup_txqs(port);
5475 	if (err) {
5476 		/* Reallocate Tx queues with the original ring size */
5477 		port->tx_ring_size = prev_tx_ring_size;
5478 		ring->tx_pending = prev_tx_ring_size;
5479 		err = mvpp2_setup_txqs(port);
5480 		if (err)
5481 			goto err_clean_rxqs;
5482 	}
5483 
5484 	mvpp2_start_dev(port);
5485 	mvpp2_egress_enable(port);
5486 	mvpp2_ingress_enable(port);
5487 
5488 	return 0;
5489 
5490 err_clean_rxqs:
5491 	mvpp2_cleanup_rxqs(port);
5492 err_out:
5493 	netdev_err(dev, "failed to change ring parameters");
5494 	return err;
5495 }
5496 
5497 static void mvpp2_ethtool_get_pause_param(struct net_device *dev,
5498 					  struct ethtool_pauseparam *pause)
5499 {
5500 	struct mvpp2_port *port = netdev_priv(dev);
5501 
5502 	if (!port->phylink)
5503 		return;
5504 
5505 	phylink_ethtool_get_pauseparam(port->phylink, pause);
5506 }
5507 
5508 static int mvpp2_ethtool_set_pause_param(struct net_device *dev,
5509 					 struct ethtool_pauseparam *pause)
5510 {
5511 	struct mvpp2_port *port = netdev_priv(dev);
5512 
5513 	if (!port->phylink)
5514 		return -ENOTSUPP;
5515 
5516 	return phylink_ethtool_set_pauseparam(port->phylink, pause);
5517 }
5518 
5519 static int mvpp2_ethtool_get_link_ksettings(struct net_device *dev,
5520 					    struct ethtool_link_ksettings *cmd)
5521 {
5522 	struct mvpp2_port *port = netdev_priv(dev);
5523 
5524 	if (!port->phylink)
5525 		return -ENOTSUPP;
5526 
5527 	return phylink_ethtool_ksettings_get(port->phylink, cmd);
5528 }
5529 
5530 static int mvpp2_ethtool_set_link_ksettings(struct net_device *dev,
5531 					    const struct ethtool_link_ksettings *cmd)
5532 {
5533 	struct mvpp2_port *port = netdev_priv(dev);
5534 
5535 	if (!port->phylink)
5536 		return -ENOTSUPP;
5537 
5538 	return phylink_ethtool_ksettings_set(port->phylink, cmd);
5539 }
5540 
5541 static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
5542 				   struct ethtool_rxnfc *info, u32 *rules)
5543 {
5544 	struct mvpp2_port *port = netdev_priv(dev);
5545 	int ret = 0, i, loc = 0;
5546 
5547 	if (!mvpp22_rss_is_supported(port))
5548 		return -EOPNOTSUPP;
5549 
5550 	switch (info->cmd) {
5551 	case ETHTOOL_GRXFH:
5552 		ret = mvpp2_ethtool_rxfh_get(port, info);
5553 		break;
5554 	case ETHTOOL_GRXRINGS:
5555 		info->data = port->nrxqs;
5556 		break;
5557 	case ETHTOOL_GRXCLSRLCNT:
5558 		info->rule_cnt = port->n_rfs_rules;
5559 		break;
5560 	case ETHTOOL_GRXCLSRULE:
5561 		ret = mvpp2_ethtool_cls_rule_get(port, info);
5562 		break;
5563 	case ETHTOOL_GRXCLSRLALL:
5564 		for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
5565 			if (port->rfs_rules[i])
5566 				rules[loc++] = i;
5567 		}
5568 		break;
5569 	default:
5570 		return -ENOTSUPP;
5571 	}
5572 
5573 	return ret;
5574 }
5575 
5576 static int mvpp2_ethtool_set_rxnfc(struct net_device *dev,
5577 				   struct ethtool_rxnfc *info)
5578 {
5579 	struct mvpp2_port *port = netdev_priv(dev);
5580 	int ret = 0;
5581 
5582 	if (!mvpp22_rss_is_supported(port))
5583 		return -EOPNOTSUPP;
5584 
5585 	switch (info->cmd) {
5586 	case ETHTOOL_SRXFH:
5587 		ret = mvpp2_ethtool_rxfh_set(port, info);
5588 		break;
5589 	case ETHTOOL_SRXCLSRLINS:
5590 		ret = mvpp2_ethtool_cls_rule_ins(port, info);
5591 		break;
5592 	case ETHTOOL_SRXCLSRLDEL:
5593 		ret = mvpp2_ethtool_cls_rule_del(port, info);
5594 		break;
5595 	default:
5596 		return -EOPNOTSUPP;
5597 	}
5598 	return ret;
5599 }
5600 
5601 static u32 mvpp2_ethtool_get_rxfh_indir_size(struct net_device *dev)
5602 {
5603 	struct mvpp2_port *port = netdev_priv(dev);
5604 
5605 	return mvpp22_rss_is_supported(port) ? MVPP22_RSS_TABLE_ENTRIES : 0;
5606 }
5607 
5608 static int mvpp2_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
5609 				  u8 *hfunc)
5610 {
5611 	struct mvpp2_port *port = netdev_priv(dev);
5612 	int ret = 0;
5613 
5614 	if (!mvpp22_rss_is_supported(port))
5615 		return -EOPNOTSUPP;
5616 
5617 	if (indir)
5618 		ret = mvpp22_port_rss_ctx_indir_get(port, 0, indir);
5619 
5620 	if (hfunc)
5621 		*hfunc = ETH_RSS_HASH_CRC32;
5622 
5623 	return ret;
5624 }
5625 
5626 static int mvpp2_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
5627 				  const u8 *key, const u8 hfunc)
5628 {
5629 	struct mvpp2_port *port = netdev_priv(dev);
5630 	int ret = 0;
5631 
5632 	if (!mvpp22_rss_is_supported(port))
5633 		return -EOPNOTSUPP;
5634 
5635 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
5636 		return -EOPNOTSUPP;
5637 
5638 	if (key)
5639 		return -EOPNOTSUPP;
5640 
5641 	if (indir)
5642 		ret = mvpp22_port_rss_ctx_indir_set(port, 0, indir);
5643 
5644 	return ret;
5645 }
5646 
5647 static int mvpp2_ethtool_get_rxfh_context(struct net_device *dev, u32 *indir,
5648 					  u8 *key, u8 *hfunc, u32 rss_context)
5649 {
5650 	struct mvpp2_port *port = netdev_priv(dev);
5651 	int ret = 0;
5652 
5653 	if (!mvpp22_rss_is_supported(port))
5654 		return -EOPNOTSUPP;
5655 	if (rss_context >= MVPP22_N_RSS_TABLES)
5656 		return -EINVAL;
5657 
5658 	if (hfunc)
5659 		*hfunc = ETH_RSS_HASH_CRC32;
5660 
5661 	if (indir)
5662 		ret = mvpp22_port_rss_ctx_indir_get(port, rss_context, indir);
5663 
5664 	return ret;
5665 }
5666 
5667 static int mvpp2_ethtool_set_rxfh_context(struct net_device *dev,
5668 					  const u32 *indir, const u8 *key,
5669 					  const u8 hfunc, u32 *rss_context,
5670 					  bool delete)
5671 {
5672 	struct mvpp2_port *port = netdev_priv(dev);
5673 	int ret;
5674 
5675 	if (!mvpp22_rss_is_supported(port))
5676 		return -EOPNOTSUPP;
5677 
5678 	if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_CRC32)
5679 		return -EOPNOTSUPP;
5680 
5681 	if (key)
5682 		return -EOPNOTSUPP;
5683 
5684 	if (delete)
5685 		return mvpp22_port_rss_ctx_delete(port, *rss_context);
5686 
5687 	if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) {
5688 		ret = mvpp22_port_rss_ctx_create(port, rss_context);
5689 		if (ret)
5690 			return ret;
5691 	}
5692 
5693 	return mvpp22_port_rss_ctx_indir_set(port, *rss_context, indir);
5694 }
5695 /* Device ops */
5696 
5697 static const struct net_device_ops mvpp2_netdev_ops = {
5698 	.ndo_open		= mvpp2_open,
5699 	.ndo_stop		= mvpp2_stop,
5700 	.ndo_start_xmit		= mvpp2_tx,
5701 	.ndo_set_rx_mode	= mvpp2_set_rx_mode,
5702 	.ndo_set_mac_address	= mvpp2_set_mac_address,
5703 	.ndo_change_mtu		= mvpp2_change_mtu,
5704 	.ndo_get_stats64	= mvpp2_get_stats64,
5705 	.ndo_do_ioctl		= mvpp2_ioctl,
5706 	.ndo_vlan_rx_add_vid	= mvpp2_vlan_rx_add_vid,
5707 	.ndo_vlan_rx_kill_vid	= mvpp2_vlan_rx_kill_vid,
5708 	.ndo_set_features	= mvpp2_set_features,
5709 	.ndo_bpf		= mvpp2_xdp,
5710 	.ndo_xdp_xmit		= mvpp2_xdp_xmit,
5711 };
5712 
5713 static const struct ethtool_ops mvpp2_eth_tool_ops = {
5714 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
5715 				     ETHTOOL_COALESCE_MAX_FRAMES,
5716 	.nway_reset		= mvpp2_ethtool_nway_reset,
5717 	.get_link		= ethtool_op_get_link,
5718 	.get_ts_info		= mvpp2_ethtool_get_ts_info,
5719 	.set_coalesce		= mvpp2_ethtool_set_coalesce,
5720 	.get_coalesce		= mvpp2_ethtool_get_coalesce,
5721 	.get_drvinfo		= mvpp2_ethtool_get_drvinfo,
5722 	.get_ringparam		= mvpp2_ethtool_get_ringparam,
5723 	.set_ringparam		= mvpp2_ethtool_set_ringparam,
5724 	.get_strings		= mvpp2_ethtool_get_strings,
5725 	.get_ethtool_stats	= mvpp2_ethtool_get_stats,
5726 	.get_sset_count		= mvpp2_ethtool_get_sset_count,
5727 	.get_pauseparam		= mvpp2_ethtool_get_pause_param,
5728 	.set_pauseparam		= mvpp2_ethtool_set_pause_param,
5729 	.get_link_ksettings	= mvpp2_ethtool_get_link_ksettings,
5730 	.set_link_ksettings	= mvpp2_ethtool_set_link_ksettings,
5731 	.get_rxnfc		= mvpp2_ethtool_get_rxnfc,
5732 	.set_rxnfc		= mvpp2_ethtool_set_rxnfc,
5733 	.get_rxfh_indir_size	= mvpp2_ethtool_get_rxfh_indir_size,
5734 	.get_rxfh		= mvpp2_ethtool_get_rxfh,
5735 	.set_rxfh		= mvpp2_ethtool_set_rxfh,
5736 	.get_rxfh_context	= mvpp2_ethtool_get_rxfh_context,
5737 	.set_rxfh_context	= mvpp2_ethtool_set_rxfh_context,
5738 };
5739 
5740 /* Used for PPv2.1, or PPv2.2 with the old Device Tree binding that
5741  * had a single IRQ defined per-port.
5742  */
5743 static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
5744 					   struct device_node *port_node)
5745 {
5746 	struct mvpp2_queue_vector *v = &port->qvecs[0];
5747 
5748 	v->first_rxq = 0;
5749 	v->nrxqs = port->nrxqs;
5750 	v->type = MVPP2_QUEUE_VECTOR_SHARED;
5751 	v->sw_thread_id = 0;
5752 	v->sw_thread_mask = *cpumask_bits(cpu_online_mask);
5753 	v->port = port;
5754 	v->irq = irq_of_parse_and_map(port_node, 0);
5755 	if (v->irq <= 0)
5756 		return -EINVAL;
5757 	netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5758 		       NAPI_POLL_WEIGHT);
5759 
5760 	port->nqvecs = 1;
5761 
5762 	return 0;
5763 }
5764 
5765 static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
5766 					  struct device_node *port_node)
5767 {
5768 	struct mvpp2 *priv = port->priv;
5769 	struct mvpp2_queue_vector *v;
5770 	int i, ret;
5771 
5772 	switch (queue_mode) {
5773 	case MVPP2_QDIST_SINGLE_MODE:
5774 		port->nqvecs = priv->nthreads + 1;
5775 		break;
5776 	case MVPP2_QDIST_MULTI_MODE:
5777 		port->nqvecs = priv->nthreads;
5778 		break;
5779 	}
5780 
5781 	for (i = 0; i < port->nqvecs; i++) {
5782 		char irqname[16];
5783 
5784 		v = port->qvecs + i;
5785 
5786 		v->port = port;
5787 		v->type = MVPP2_QUEUE_VECTOR_PRIVATE;
5788 		v->sw_thread_id = i;
5789 		v->sw_thread_mask = BIT(i);
5790 
5791 		if (port->flags & MVPP2_F_DT_COMPAT)
5792 			snprintf(irqname, sizeof(irqname), "tx-cpu%d", i);
5793 		else
5794 			snprintf(irqname, sizeof(irqname), "hif%d", i);
5795 
5796 		if (queue_mode == MVPP2_QDIST_MULTI_MODE) {
5797 			v->first_rxq = i;
5798 			v->nrxqs = 1;
5799 		} else if (queue_mode == MVPP2_QDIST_SINGLE_MODE &&
5800 			   i == (port->nqvecs - 1)) {
5801 			v->first_rxq = 0;
5802 			v->nrxqs = port->nrxqs;
5803 			v->type = MVPP2_QUEUE_VECTOR_SHARED;
5804 
5805 			if (port->flags & MVPP2_F_DT_COMPAT)
5806 				strncpy(irqname, "rx-shared", sizeof(irqname));
5807 		}
5808 
5809 		if (port_node)
5810 			v->irq = of_irq_get_byname(port_node, irqname);
5811 		else
5812 			v->irq = fwnode_irq_get(port->fwnode, i);
5813 		if (v->irq <= 0) {
5814 			ret = -EINVAL;
5815 			goto err;
5816 		}
5817 
5818 		netif_napi_add(port->dev, &v->napi, mvpp2_poll,
5819 			       NAPI_POLL_WEIGHT);
5820 	}
5821 
5822 	return 0;
5823 
5824 err:
5825 	for (i = 0; i < port->nqvecs; i++)
5826 		irq_dispose_mapping(port->qvecs[i].irq);
5827 	return ret;
5828 }
5829 
5830 static int mvpp2_queue_vectors_init(struct mvpp2_port *port,
5831 				    struct device_node *port_node)
5832 {
5833 	if (port->has_tx_irqs)
5834 		return mvpp2_multi_queue_vectors_init(port, port_node);
5835 	else
5836 		return mvpp2_simple_queue_vectors_init(port, port_node);
5837 }
5838 
5839 static void mvpp2_queue_vectors_deinit(struct mvpp2_port *port)
5840 {
5841 	int i;
5842 
5843 	for (i = 0; i < port->nqvecs; i++)
5844 		irq_dispose_mapping(port->qvecs[i].irq);
5845 }
5846 
5847 /* Configure Rx queue group interrupt for this port */
5848 static void mvpp2_rx_irqs_setup(struct mvpp2_port *port)
5849 {
5850 	struct mvpp2 *priv = port->priv;
5851 	u32 val;
5852 	int i;
5853 
5854 	if (priv->hw_version == MVPP21) {
5855 		mvpp2_write(priv, MVPP21_ISR_RXQ_GROUP_REG(port->id),
5856 			    port->nrxqs);
5857 		return;
5858 	}
5859 
5860 	/* Handle the more complicated PPv2.2 and PPv2.3 case */
5861 	for (i = 0; i < port->nqvecs; i++) {
5862 		struct mvpp2_queue_vector *qv = port->qvecs + i;
5863 
5864 		if (!qv->nrxqs)
5865 			continue;
5866 
5867 		val = qv->sw_thread_id;
5868 		val |= port->id << MVPP22_ISR_RXQ_GROUP_INDEX_GROUP_OFFSET;
5869 		mvpp2_write(priv, MVPP22_ISR_RXQ_GROUP_INDEX_REG, val);
5870 
5871 		val = qv->first_rxq;
5872 		val |= qv->nrxqs << MVPP22_ISR_RXQ_SUB_GROUP_SIZE_OFFSET;
5873 		mvpp2_write(priv, MVPP22_ISR_RXQ_SUB_GROUP_CONFIG_REG, val);
5874 	}
5875 }
5876 
5877 /* Initialize port HW */
5878 static int mvpp2_port_init(struct mvpp2_port *port)
5879 {
5880 	struct device *dev = port->dev->dev.parent;
5881 	struct mvpp2 *priv = port->priv;
5882 	struct mvpp2_txq_pcpu *txq_pcpu;
5883 	unsigned int thread;
5884 	int queue, err, val;
5885 
5886 	/* Checks for hardware constraints */
5887 	if (port->first_rxq + port->nrxqs >
5888 	    MVPP2_MAX_PORTS * priv->max_port_rxqs)
5889 		return -EINVAL;
5890 
5891 	if (port->nrxqs > priv->max_port_rxqs || port->ntxqs > MVPP2_MAX_TXQ)
5892 		return -EINVAL;
5893 
5894 	/* Disable port */
5895 	mvpp2_egress_disable(port);
5896 	mvpp2_port_disable(port);
5897 
5898 	if (mvpp2_is_xlg(port->phy_interface)) {
5899 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
5900 		val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
5901 		val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
5902 		writel(val, port->base + MVPP22_XLG_CTRL0_REG);
5903 	} else {
5904 		val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5905 		val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
5906 		val |= MVPP2_GMAC_FORCE_LINK_DOWN;
5907 		writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
5908 	}
5909 
5910 	port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
5911 
5912 	port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
5913 				  GFP_KERNEL);
5914 	if (!port->txqs)
5915 		return -ENOMEM;
5916 
5917 	/* Associate physical Tx queues to this port and initialize.
5918 	 * The mapping is predefined.
5919 	 */
5920 	for (queue = 0; queue < port->ntxqs; queue++) {
5921 		int queue_phy_id = mvpp2_txq_phys(port->id, queue);
5922 		struct mvpp2_tx_queue *txq;
5923 
5924 		txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
5925 		if (!txq) {
5926 			err = -ENOMEM;
5927 			goto err_free_percpu;
5928 		}
5929 
5930 		txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
5931 		if (!txq->pcpu) {
5932 			err = -ENOMEM;
5933 			goto err_free_percpu;
5934 		}
5935 
5936 		txq->id = queue_phy_id;
5937 		txq->log_id = queue;
5938 		txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
5939 		for (thread = 0; thread < priv->nthreads; thread++) {
5940 			txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
5941 			txq_pcpu->thread = thread;
5942 		}
5943 
5944 		port->txqs[queue] = txq;
5945 	}
5946 
5947 	port->rxqs = devm_kcalloc(dev, port->nrxqs, sizeof(*port->rxqs),
5948 				  GFP_KERNEL);
5949 	if (!port->rxqs) {
5950 		err = -ENOMEM;
5951 		goto err_free_percpu;
5952 	}
5953 
5954 	/* Allocate and initialize Rx queue for this port */
5955 	for (queue = 0; queue < port->nrxqs; queue++) {
5956 		struct mvpp2_rx_queue *rxq;
5957 
5958 		/* Map physical Rx queue to port's logical Rx queue */
5959 		rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
5960 		if (!rxq) {
5961 			err = -ENOMEM;
5962 			goto err_free_percpu;
5963 		}
5964 		/* Map this Rx queue to a physical queue */
5965 		rxq->id = port->first_rxq + queue;
5966 		rxq->port = port->id;
5967 		rxq->logic_rxq = queue;
5968 
5969 		port->rxqs[queue] = rxq;
5970 	}
5971 
5972 	mvpp2_rx_irqs_setup(port);
5973 
5974 	/* Create Rx descriptor rings */
5975 	for (queue = 0; queue < port->nrxqs; queue++) {
5976 		struct mvpp2_rx_queue *rxq = port->rxqs[queue];
5977 
5978 		rxq->size = port->rx_ring_size;
5979 		rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
5980 		rxq->time_coal = MVPP2_RX_COAL_USEC;
5981 	}
5982 
5983 	mvpp2_ingress_disable(port);
5984 
5985 	/* Port default configuration */
5986 	mvpp2_defaults_set(port);
5987 
5988 	/* Port's classifier configuration */
5989 	mvpp2_cls_oversize_rxq_set(port);
5990 	mvpp2_cls_port_config(port);
5991 
5992 	if (mvpp22_rss_is_supported(port))
5993 		mvpp22_port_rss_init(port);
5994 
5995 	/* Provide an initial Rx packet size */
5996 	port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
5997 
5998 	/* Initialize pools for swf */
5999 	err = mvpp2_swf_bm_pool_init(port);
6000 	if (err)
6001 		goto err_free_percpu;
6002 
6003 	/* Clear all port stats */
6004 	mvpp2_read_stats(port);
6005 	memset(port->ethtool_stats, 0,
6006 	       MVPP2_N_ETHTOOL_STATS(port->ntxqs, port->nrxqs) * sizeof(u64));
6007 
6008 	return 0;
6009 
6010 err_free_percpu:
6011 	for (queue = 0; queue < port->ntxqs; queue++) {
6012 		if (!port->txqs[queue])
6013 			continue;
6014 		free_percpu(port->txqs[queue]->pcpu);
6015 	}
6016 	return err;
6017 }
6018 
6019 static bool mvpp22_port_has_legacy_tx_irqs(struct device_node *port_node,
6020 					   unsigned long *flags)
6021 {
6022 	char *irqs[5] = { "rx-shared", "tx-cpu0", "tx-cpu1", "tx-cpu2",
6023 			  "tx-cpu3" };
6024 	int i;
6025 
6026 	for (i = 0; i < 5; i++)
6027 		if (of_property_match_string(port_node, "interrupt-names",
6028 					     irqs[i]) < 0)
6029 			return false;
6030 
6031 	*flags |= MVPP2_F_DT_COMPAT;
6032 	return true;
6033 }
6034 
6035 /* Checks if the port dt description has the required Tx interrupts:
6036  * - PPv2.1: there are no such interrupts.
6037  * - PPv2.2 and PPv2.3:
6038  *   - The old DTs have: "rx-shared", "tx-cpuX" with X in [0...3]
6039  *   - The new ones have: "hifX" with X in [0..8]
6040  *
6041  * All those variants are supported to keep the backward compatibility.
6042  */
6043 static bool mvpp2_port_has_irqs(struct mvpp2 *priv,
6044 				struct device_node *port_node,
6045 				unsigned long *flags)
6046 {
6047 	char name[5];
6048 	int i;
6049 
6050 	/* ACPI */
6051 	if (!port_node)
6052 		return true;
6053 
6054 	if (priv->hw_version == MVPP21)
6055 		return false;
6056 
6057 	if (mvpp22_port_has_legacy_tx_irqs(port_node, flags))
6058 		return true;
6059 
6060 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
6061 		snprintf(name, 5, "hif%d", i);
6062 		if (of_property_match_string(port_node, "interrupt-names",
6063 					     name) < 0)
6064 			return false;
6065 	}
6066 
6067 	return true;
6068 }
6069 
6070 static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
6071 				     struct fwnode_handle *fwnode,
6072 				     char **mac_from)
6073 {
6074 	struct mvpp2_port *port = netdev_priv(dev);
6075 	char hw_mac_addr[ETH_ALEN] = {0};
6076 	char fw_mac_addr[ETH_ALEN];
6077 
6078 	if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
6079 		*mac_from = "firmware node";
6080 		ether_addr_copy(dev->dev_addr, fw_mac_addr);
6081 		return;
6082 	}
6083 
6084 	if (priv->hw_version == MVPP21) {
6085 		mvpp21_get_mac_address(port, hw_mac_addr);
6086 		if (is_valid_ether_addr(hw_mac_addr)) {
6087 			*mac_from = "hardware";
6088 			ether_addr_copy(dev->dev_addr, hw_mac_addr);
6089 			return;
6090 		}
6091 	}
6092 
6093 	*mac_from = "random";
6094 	eth_hw_addr_random(dev);
6095 }
6096 
6097 static struct mvpp2_port *mvpp2_phylink_to_port(struct phylink_config *config)
6098 {
6099 	return container_of(config, struct mvpp2_port, phylink_config);
6100 }
6101 
6102 static struct mvpp2_port *mvpp2_pcs_to_port(struct phylink_pcs *pcs)
6103 {
6104 	return container_of(pcs, struct mvpp2_port, phylink_pcs);
6105 }
6106 
6107 static void mvpp2_xlg_pcs_get_state(struct phylink_pcs *pcs,
6108 				    struct phylink_link_state *state)
6109 {
6110 	struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
6111 	u32 val;
6112 
6113 	state->speed = SPEED_10000;
6114 	state->duplex = 1;
6115 	state->an_complete = 1;
6116 
6117 	val = readl(port->base + MVPP22_XLG_STATUS);
6118 	state->link = !!(val & MVPP22_XLG_STATUS_LINK_UP);
6119 
6120 	state->pause = 0;
6121 	val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6122 	if (val & MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN)
6123 		state->pause |= MLO_PAUSE_TX;
6124 	if (val & MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN)
6125 		state->pause |= MLO_PAUSE_RX;
6126 }
6127 
6128 static int mvpp2_xlg_pcs_config(struct phylink_pcs *pcs,
6129 				unsigned int mode,
6130 				phy_interface_t interface,
6131 				const unsigned long *advertising,
6132 				bool permit_pause_to_mac)
6133 {
6134 	return 0;
6135 }
6136 
6137 static const struct phylink_pcs_ops mvpp2_phylink_xlg_pcs_ops = {
6138 	.pcs_get_state = mvpp2_xlg_pcs_get_state,
6139 	.pcs_config = mvpp2_xlg_pcs_config,
6140 };
6141 
6142 static void mvpp2_gmac_pcs_get_state(struct phylink_pcs *pcs,
6143 				     struct phylink_link_state *state)
6144 {
6145 	struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
6146 	u32 val;
6147 
6148 	val = readl(port->base + MVPP2_GMAC_STATUS0);
6149 
6150 	state->an_complete = !!(val & MVPP2_GMAC_STATUS0_AN_COMPLETE);
6151 	state->link = !!(val & MVPP2_GMAC_STATUS0_LINK_UP);
6152 	state->duplex = !!(val & MVPP2_GMAC_STATUS0_FULL_DUPLEX);
6153 
6154 	switch (port->phy_interface) {
6155 	case PHY_INTERFACE_MODE_1000BASEX:
6156 		state->speed = SPEED_1000;
6157 		break;
6158 	case PHY_INTERFACE_MODE_2500BASEX:
6159 		state->speed = SPEED_2500;
6160 		break;
6161 	default:
6162 		if (val & MVPP2_GMAC_STATUS0_GMII_SPEED)
6163 			state->speed = SPEED_1000;
6164 		else if (val & MVPP2_GMAC_STATUS0_MII_SPEED)
6165 			state->speed = SPEED_100;
6166 		else
6167 			state->speed = SPEED_10;
6168 	}
6169 
6170 	state->pause = 0;
6171 	if (val & MVPP2_GMAC_STATUS0_RX_PAUSE)
6172 		state->pause |= MLO_PAUSE_RX;
6173 	if (val & MVPP2_GMAC_STATUS0_TX_PAUSE)
6174 		state->pause |= MLO_PAUSE_TX;
6175 }
6176 
6177 static int mvpp2_gmac_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
6178 				 phy_interface_t interface,
6179 				 const unsigned long *advertising,
6180 				 bool permit_pause_to_mac)
6181 {
6182 	struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
6183 	u32 mask, val, an, old_an, changed;
6184 
6185 	mask = MVPP2_GMAC_IN_BAND_AUTONEG_BYPASS |
6186 	       MVPP2_GMAC_IN_BAND_AUTONEG |
6187 	       MVPP2_GMAC_AN_SPEED_EN |
6188 	       MVPP2_GMAC_FLOW_CTRL_AUTONEG |
6189 	       MVPP2_GMAC_AN_DUPLEX_EN;
6190 
6191 	if (phylink_autoneg_inband(mode)) {
6192 		mask |= MVPP2_GMAC_CONFIG_MII_SPEED |
6193 			MVPP2_GMAC_CONFIG_GMII_SPEED |
6194 			MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6195 		val = MVPP2_GMAC_IN_BAND_AUTONEG;
6196 
6197 		if (interface == PHY_INTERFACE_MODE_SGMII) {
6198 			/* SGMII mode receives the speed and duplex from PHY */
6199 			val |= MVPP2_GMAC_AN_SPEED_EN |
6200 			       MVPP2_GMAC_AN_DUPLEX_EN;
6201 		} else {
6202 			/* 802.3z mode has fixed speed and duplex */
6203 			val |= MVPP2_GMAC_CONFIG_GMII_SPEED |
6204 			       MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6205 
6206 			/* The FLOW_CTRL_AUTONEG bit selects either the hardware
6207 			 * automatically or the bits in MVPP22_GMAC_CTRL_4_REG
6208 			 * manually controls the GMAC pause modes.
6209 			 */
6210 			if (permit_pause_to_mac)
6211 				val |= MVPP2_GMAC_FLOW_CTRL_AUTONEG;
6212 
6213 			/* Configure advertisement bits */
6214 			mask |= MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN;
6215 			if (phylink_test(advertising, Pause))
6216 				val |= MVPP2_GMAC_FC_ADV_EN;
6217 			if (phylink_test(advertising, Asym_Pause))
6218 				val |= MVPP2_GMAC_FC_ADV_ASM_EN;
6219 		}
6220 	} else {
6221 		val = 0;
6222 	}
6223 
6224 	old_an = an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6225 	an = (an & ~mask) | val;
6226 	changed = an ^ old_an;
6227 	if (changed)
6228 		writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6229 
6230 	/* We are only interested in the advertisement bits changing */
6231 	return changed & (MVPP2_GMAC_FC_ADV_EN | MVPP2_GMAC_FC_ADV_ASM_EN);
6232 }
6233 
6234 static void mvpp2_gmac_pcs_an_restart(struct phylink_pcs *pcs)
6235 {
6236 	struct mvpp2_port *port = mvpp2_pcs_to_port(pcs);
6237 	u32 val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6238 
6239 	writel(val | MVPP2_GMAC_IN_BAND_RESTART_AN,
6240 	       port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6241 	writel(val & ~MVPP2_GMAC_IN_BAND_RESTART_AN,
6242 	       port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6243 }
6244 
6245 static const struct phylink_pcs_ops mvpp2_phylink_gmac_pcs_ops = {
6246 	.pcs_get_state = mvpp2_gmac_pcs_get_state,
6247 	.pcs_config = mvpp2_gmac_pcs_config,
6248 	.pcs_an_restart = mvpp2_gmac_pcs_an_restart,
6249 };
6250 
6251 static void mvpp2_phylink_validate(struct phylink_config *config,
6252 				   unsigned long *supported,
6253 				   struct phylink_link_state *state)
6254 {
6255 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6256 	__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
6257 
6258 	/* Invalid combinations */
6259 	switch (state->interface) {
6260 	case PHY_INTERFACE_MODE_10GBASER:
6261 	case PHY_INTERFACE_MODE_XAUI:
6262 		if (!mvpp2_port_supports_xlg(port))
6263 			goto empty_set;
6264 		break;
6265 	case PHY_INTERFACE_MODE_RGMII:
6266 	case PHY_INTERFACE_MODE_RGMII_ID:
6267 	case PHY_INTERFACE_MODE_RGMII_RXID:
6268 	case PHY_INTERFACE_MODE_RGMII_TXID:
6269 		if (!mvpp2_port_supports_rgmii(port))
6270 			goto empty_set;
6271 		break;
6272 	default:
6273 		break;
6274 	}
6275 
6276 	phylink_set(mask, Autoneg);
6277 	phylink_set_port_modes(mask);
6278 
6279 	if (port->priv->global_tx_fc) {
6280 		phylink_set(mask, Pause);
6281 		phylink_set(mask, Asym_Pause);
6282 	}
6283 
6284 	switch (state->interface) {
6285 	case PHY_INTERFACE_MODE_10GBASER:
6286 	case PHY_INTERFACE_MODE_XAUI:
6287 	case PHY_INTERFACE_MODE_NA:
6288 		if (mvpp2_port_supports_xlg(port)) {
6289 			phylink_set(mask, 10000baseT_Full);
6290 			phylink_set(mask, 10000baseCR_Full);
6291 			phylink_set(mask, 10000baseSR_Full);
6292 			phylink_set(mask, 10000baseLR_Full);
6293 			phylink_set(mask, 10000baseLRM_Full);
6294 			phylink_set(mask, 10000baseER_Full);
6295 			phylink_set(mask, 10000baseKR_Full);
6296 		}
6297 		if (state->interface != PHY_INTERFACE_MODE_NA)
6298 			break;
6299 		fallthrough;
6300 	case PHY_INTERFACE_MODE_RGMII:
6301 	case PHY_INTERFACE_MODE_RGMII_ID:
6302 	case PHY_INTERFACE_MODE_RGMII_RXID:
6303 	case PHY_INTERFACE_MODE_RGMII_TXID:
6304 	case PHY_INTERFACE_MODE_SGMII:
6305 		phylink_set(mask, 10baseT_Half);
6306 		phylink_set(mask, 10baseT_Full);
6307 		phylink_set(mask, 100baseT_Half);
6308 		phylink_set(mask, 100baseT_Full);
6309 		phylink_set(mask, 1000baseT_Full);
6310 		phylink_set(mask, 1000baseX_Full);
6311 		if (state->interface != PHY_INTERFACE_MODE_NA)
6312 			break;
6313 		fallthrough;
6314 	case PHY_INTERFACE_MODE_1000BASEX:
6315 	case PHY_INTERFACE_MODE_2500BASEX:
6316 		if (port->comphy ||
6317 		    state->interface != PHY_INTERFACE_MODE_2500BASEX) {
6318 			phylink_set(mask, 1000baseT_Full);
6319 			phylink_set(mask, 1000baseX_Full);
6320 		}
6321 		if (port->comphy ||
6322 		    state->interface == PHY_INTERFACE_MODE_2500BASEX) {
6323 			phylink_set(mask, 2500baseT_Full);
6324 			phylink_set(mask, 2500baseX_Full);
6325 		}
6326 		break;
6327 	default:
6328 		goto empty_set;
6329 	}
6330 
6331 	bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
6332 	bitmap_and(state->advertising, state->advertising, mask,
6333 		   __ETHTOOL_LINK_MODE_MASK_NBITS);
6334 
6335 	phylink_helper_basex_speed(state);
6336 	return;
6337 
6338 empty_set:
6339 	bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
6340 }
6341 
6342 static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
6343 			     const struct phylink_link_state *state)
6344 {
6345 	u32 val;
6346 
6347 	mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6348 		     MVPP22_XLG_CTRL0_MAC_RESET_DIS,
6349 		     MVPP22_XLG_CTRL0_MAC_RESET_DIS);
6350 	mvpp2_modify(port->base + MVPP22_XLG_CTRL4_REG,
6351 		     MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
6352 		     MVPP22_XLG_CTRL4_EN_IDLE_CHECK |
6353 		     MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC,
6354 		     MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC);
6355 
6356 	/* Wait for reset to deassert */
6357 	do {
6358 		val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6359 	} while (!(val & MVPP22_XLG_CTRL0_MAC_RESET_DIS));
6360 }
6361 
6362 static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
6363 			      const struct phylink_link_state *state)
6364 {
6365 	u32 old_ctrl0, ctrl0;
6366 	u32 old_ctrl2, ctrl2;
6367 	u32 old_ctrl4, ctrl4;
6368 
6369 	old_ctrl0 = ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
6370 	old_ctrl2 = ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
6371 	old_ctrl4 = ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
6372 
6373 	ctrl0 &= ~MVPP2_GMAC_PORT_TYPE_MASK;
6374 	ctrl2 &= ~(MVPP2_GMAC_INBAND_AN_MASK | MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_FLOW_CTRL_MASK);
6375 
6376 	/* Configure port type */
6377 	if (phy_interface_mode_is_8023z(state->interface)) {
6378 		ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK;
6379 		ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6380 		ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6381 			 MVPP22_CTRL4_DP_CLK_SEL |
6382 			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6383 	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6384 		ctrl2 |= MVPP2_GMAC_PCS_ENABLE_MASK | MVPP2_GMAC_INBAND_AN_MASK;
6385 		ctrl4 &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
6386 		ctrl4 |= MVPP22_CTRL4_SYNC_BYPASS_DIS |
6387 			 MVPP22_CTRL4_DP_CLK_SEL |
6388 			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6389 	} else if (phy_interface_mode_is_rgmii(state->interface)) {
6390 		ctrl4 &= ~MVPP22_CTRL4_DP_CLK_SEL;
6391 		ctrl4 |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
6392 			 MVPP22_CTRL4_SYNC_BYPASS_DIS |
6393 			 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
6394 	}
6395 
6396 	/* Configure negotiation style */
6397 	if (!phylink_autoneg_inband(mode)) {
6398 		/* Phy or fixed speed - no in-band AN, nothing to do, leave the
6399 		 * configured speed, duplex and flow control as-is.
6400 		 */
6401 	} else if (state->interface == PHY_INTERFACE_MODE_SGMII) {
6402 		/* SGMII in-band mode receives the speed and duplex from
6403 		 * the PHY. Flow control information is not received. */
6404 	} else if (phy_interface_mode_is_8023z(state->interface)) {
6405 		/* 1000BaseX and 2500BaseX ports cannot negotiate speed nor can
6406 		 * they negotiate duplex: they are always operating with a fixed
6407 		 * speed of 1000/2500Mbps in full duplex, so force 1000/2500
6408 		 * speed and full duplex here.
6409 		 */
6410 		ctrl0 |= MVPP2_GMAC_PORT_TYPE_MASK;
6411 	}
6412 
6413 	if (old_ctrl0 != ctrl0)
6414 		writel(ctrl0, port->base + MVPP2_GMAC_CTRL_0_REG);
6415 	if (old_ctrl2 != ctrl2)
6416 		writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
6417 	if (old_ctrl4 != ctrl4)
6418 		writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
6419 }
6420 
6421 static int mvpp2__mac_prepare(struct phylink_config *config, unsigned int mode,
6422 			      phy_interface_t interface)
6423 {
6424 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6425 
6426 	/* Check for invalid configuration */
6427 	if (mvpp2_is_xlg(interface) && port->gop_id != 0) {
6428 		netdev_err(port->dev, "Invalid mode on %s\n", port->dev->name);
6429 		return -EINVAL;
6430 	}
6431 
6432 	if (port->phy_interface != interface ||
6433 	    phylink_autoneg_inband(mode)) {
6434 		/* Force the link down when changing the interface or if in
6435 		 * in-band mode to ensure we do not change the configuration
6436 		 * while the hardware is indicating link is up. We force both
6437 		 * XLG and GMAC down to ensure that they're both in a known
6438 		 * state.
6439 		 */
6440 		mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6441 			     MVPP2_GMAC_FORCE_LINK_PASS |
6442 			     MVPP2_GMAC_FORCE_LINK_DOWN,
6443 			     MVPP2_GMAC_FORCE_LINK_DOWN);
6444 
6445 		if (mvpp2_port_supports_xlg(port))
6446 			mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6447 				     MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6448 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN,
6449 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN);
6450 	}
6451 
6452 	/* Make sure the port is disabled when reconfiguring the mode */
6453 	mvpp2_port_disable(port);
6454 
6455 	if (port->phy_interface != interface) {
6456 		/* Place GMAC into reset */
6457 		mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6458 			     MVPP2_GMAC_PORT_RESET_MASK,
6459 			     MVPP2_GMAC_PORT_RESET_MASK);
6460 
6461 		if (port->priv->hw_version >= MVPP22) {
6462 			mvpp22_gop_mask_irq(port);
6463 
6464 			phy_power_off(port->comphy);
6465 		}
6466 	}
6467 
6468 	/* Select the appropriate PCS operations depending on the
6469 	 * configured interface mode. We will only switch to a mode
6470 	 * that the validate() checks have already passed.
6471 	 */
6472 	if (mvpp2_is_xlg(interface))
6473 		port->phylink_pcs.ops = &mvpp2_phylink_xlg_pcs_ops;
6474 	else
6475 		port->phylink_pcs.ops = &mvpp2_phylink_gmac_pcs_ops;
6476 
6477 	return 0;
6478 }
6479 
6480 static int mvpp2_mac_prepare(struct phylink_config *config, unsigned int mode,
6481 			     phy_interface_t interface)
6482 {
6483 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6484 	int ret;
6485 
6486 	ret = mvpp2__mac_prepare(config, mode, interface);
6487 	if (ret == 0)
6488 		phylink_set_pcs(port->phylink, &port->phylink_pcs);
6489 
6490 	return ret;
6491 }
6492 
6493 static void mvpp2_mac_config(struct phylink_config *config, unsigned int mode,
6494 			     const struct phylink_link_state *state)
6495 {
6496 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6497 
6498 	/* mac (re)configuration */
6499 	if (mvpp2_is_xlg(state->interface))
6500 		mvpp2_xlg_config(port, mode, state);
6501 	else if (phy_interface_mode_is_rgmii(state->interface) ||
6502 		 phy_interface_mode_is_8023z(state->interface) ||
6503 		 state->interface == PHY_INTERFACE_MODE_SGMII)
6504 		mvpp2_gmac_config(port, mode, state);
6505 
6506 	if (port->priv->hw_version == MVPP21 && port->flags & MVPP2_F_LOOPBACK)
6507 		mvpp2_port_loopback_set(port, state);
6508 }
6509 
6510 static int mvpp2_mac_finish(struct phylink_config *config, unsigned int mode,
6511 			    phy_interface_t interface)
6512 {
6513 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6514 
6515 	if (port->priv->hw_version >= MVPP22 &&
6516 	    port->phy_interface != interface) {
6517 		port->phy_interface = interface;
6518 
6519 		/* Reconfigure the serdes lanes */
6520 		mvpp22_mode_reconfigure(port);
6521 
6522 		/* Unmask interrupts */
6523 		mvpp22_gop_unmask_irq(port);
6524 	}
6525 
6526 	if (!mvpp2_is_xlg(interface)) {
6527 		/* Release GMAC reset and wait */
6528 		mvpp2_modify(port->base + MVPP2_GMAC_CTRL_2_REG,
6529 			     MVPP2_GMAC_PORT_RESET_MASK, 0);
6530 
6531 		while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
6532 		       MVPP2_GMAC_PORT_RESET_MASK)
6533 			continue;
6534 	}
6535 
6536 	mvpp2_port_enable(port);
6537 
6538 	/* Allow the link to come up if in in-band mode, otherwise the
6539 	 * link is forced via mac_link_down()/mac_link_up()
6540 	 */
6541 	if (phylink_autoneg_inband(mode)) {
6542 		if (mvpp2_is_xlg(interface))
6543 			mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6544 				     MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6545 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN, 0);
6546 		else
6547 			mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6548 				     MVPP2_GMAC_FORCE_LINK_PASS |
6549 				     MVPP2_GMAC_FORCE_LINK_DOWN, 0);
6550 	}
6551 
6552 	return 0;
6553 }
6554 
6555 static void mvpp2_mac_link_up(struct phylink_config *config,
6556 			      struct phy_device *phy,
6557 			      unsigned int mode, phy_interface_t interface,
6558 			      int speed, int duplex,
6559 			      bool tx_pause, bool rx_pause)
6560 {
6561 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6562 	u32 val;
6563 	int i;
6564 
6565 	if (mvpp2_is_xlg(interface)) {
6566 		if (!phylink_autoneg_inband(mode)) {
6567 			val = MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6568 			if (tx_pause)
6569 				val |= MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN;
6570 			if (rx_pause)
6571 				val |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
6572 
6573 			mvpp2_modify(port->base + MVPP22_XLG_CTRL0_REG,
6574 				     MVPP22_XLG_CTRL0_FORCE_LINK_DOWN |
6575 				     MVPP22_XLG_CTRL0_FORCE_LINK_PASS |
6576 				     MVPP22_XLG_CTRL0_TX_FLOW_CTRL_EN |
6577 				     MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN, val);
6578 		}
6579 	} else {
6580 		if (!phylink_autoneg_inband(mode)) {
6581 			val = MVPP2_GMAC_FORCE_LINK_PASS;
6582 
6583 			if (speed == SPEED_1000 || speed == SPEED_2500)
6584 				val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
6585 			else if (speed == SPEED_100)
6586 				val |= MVPP2_GMAC_CONFIG_MII_SPEED;
6587 
6588 			if (duplex == DUPLEX_FULL)
6589 				val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
6590 
6591 			mvpp2_modify(port->base + MVPP2_GMAC_AUTONEG_CONFIG,
6592 				     MVPP2_GMAC_FORCE_LINK_DOWN |
6593 				     MVPP2_GMAC_FORCE_LINK_PASS |
6594 				     MVPP2_GMAC_CONFIG_MII_SPEED |
6595 				     MVPP2_GMAC_CONFIG_GMII_SPEED |
6596 				     MVPP2_GMAC_CONFIG_FULL_DUPLEX, val);
6597 		}
6598 
6599 		/* We can always update the flow control enable bits;
6600 		 * these will only be effective if flow control AN
6601 		 * (MVPP2_GMAC_FLOW_CTRL_AUTONEG) is disabled.
6602 		 */
6603 		val = 0;
6604 		if (tx_pause)
6605 			val |= MVPP22_CTRL4_TX_FC_EN;
6606 		if (rx_pause)
6607 			val |= MVPP22_CTRL4_RX_FC_EN;
6608 
6609 		mvpp2_modify(port->base + MVPP22_GMAC_CTRL_4_REG,
6610 			     MVPP22_CTRL4_RX_FC_EN | MVPP22_CTRL4_TX_FC_EN,
6611 			     val);
6612 	}
6613 
6614 	if (port->priv->global_tx_fc) {
6615 		port->tx_fc = tx_pause;
6616 		if (tx_pause)
6617 			mvpp2_rxq_enable_fc(port);
6618 		else
6619 			mvpp2_rxq_disable_fc(port);
6620 		if (port->priv->percpu_pools) {
6621 			for (i = 0; i < port->nrxqs; i++)
6622 				mvpp2_bm_pool_update_fc(port, &port->priv->bm_pools[i], tx_pause);
6623 		} else {
6624 			mvpp2_bm_pool_update_fc(port, port->pool_long, tx_pause);
6625 			mvpp2_bm_pool_update_fc(port, port->pool_short, tx_pause);
6626 		}
6627 		if (port->priv->hw_version == MVPP23)
6628 			mvpp23_rx_fifo_fc_en(port->priv, port->id, tx_pause);
6629 	}
6630 
6631 	mvpp2_port_enable(port);
6632 
6633 	mvpp2_egress_enable(port);
6634 	mvpp2_ingress_enable(port);
6635 	netif_tx_wake_all_queues(port->dev);
6636 }
6637 
6638 static void mvpp2_mac_link_down(struct phylink_config *config,
6639 				unsigned int mode, phy_interface_t interface)
6640 {
6641 	struct mvpp2_port *port = mvpp2_phylink_to_port(config);
6642 	u32 val;
6643 
6644 	if (!phylink_autoneg_inband(mode)) {
6645 		if (mvpp2_is_xlg(interface)) {
6646 			val = readl(port->base + MVPP22_XLG_CTRL0_REG);
6647 			val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
6648 			val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
6649 			writel(val, port->base + MVPP22_XLG_CTRL0_REG);
6650 		} else {
6651 			val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6652 			val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
6653 			val |= MVPP2_GMAC_FORCE_LINK_DOWN;
6654 			writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
6655 		}
6656 	}
6657 
6658 	netif_tx_stop_all_queues(port->dev);
6659 	mvpp2_egress_disable(port);
6660 	mvpp2_ingress_disable(port);
6661 
6662 	mvpp2_port_disable(port);
6663 }
6664 
6665 static const struct phylink_mac_ops mvpp2_phylink_ops = {
6666 	.validate = mvpp2_phylink_validate,
6667 	.mac_prepare = mvpp2_mac_prepare,
6668 	.mac_config = mvpp2_mac_config,
6669 	.mac_finish = mvpp2_mac_finish,
6670 	.mac_link_up = mvpp2_mac_link_up,
6671 	.mac_link_down = mvpp2_mac_link_down,
6672 };
6673 
6674 /* Work-around for ACPI */
6675 static void mvpp2_acpi_start(struct mvpp2_port *port)
6676 {
6677 	/* Phylink isn't used as of now for ACPI, so the MAC has to be
6678 	 * configured manually when the interface is started. This will
6679 	 * be removed as soon as the phylink ACPI support lands in.
6680 	 */
6681 	struct phylink_link_state state = {
6682 		.interface = port->phy_interface,
6683 	};
6684 	mvpp2__mac_prepare(&port->phylink_config, MLO_AN_INBAND,
6685 			   port->phy_interface);
6686 	mvpp2_mac_config(&port->phylink_config, MLO_AN_INBAND, &state);
6687 	port->phylink_pcs.ops->pcs_config(&port->phylink_pcs, MLO_AN_INBAND,
6688 					  port->phy_interface,
6689 					  state.advertising, false);
6690 	mvpp2_mac_finish(&port->phylink_config, MLO_AN_INBAND,
6691 			 port->phy_interface);
6692 	mvpp2_mac_link_up(&port->phylink_config, NULL,
6693 			  MLO_AN_INBAND, port->phy_interface,
6694 			  SPEED_UNKNOWN, DUPLEX_UNKNOWN, false, false);
6695 }
6696 
6697 /* In order to ensure backward compatibility for ACPI, check if the port
6698  * firmware node comprises the necessary description allowing to use phylink.
6699  */
6700 static bool mvpp2_use_acpi_compat_mode(struct fwnode_handle *port_fwnode)
6701 {
6702 	if (!is_acpi_node(port_fwnode))
6703 		return false;
6704 
6705 	return (!fwnode_property_present(port_fwnode, "phy-handle") &&
6706 		!fwnode_property_present(port_fwnode, "managed") &&
6707 		!fwnode_get_named_child_node(port_fwnode, "fixed-link"));
6708 }
6709 
6710 /* Ports initialization */
6711 static int mvpp2_port_probe(struct platform_device *pdev,
6712 			    struct fwnode_handle *port_fwnode,
6713 			    struct mvpp2 *priv)
6714 {
6715 	struct phy *comphy = NULL;
6716 	struct mvpp2_port *port;
6717 	struct mvpp2_port_pcpu *port_pcpu;
6718 	struct device_node *port_node = to_of_node(port_fwnode);
6719 	netdev_features_t features;
6720 	struct net_device *dev;
6721 	struct phylink *phylink;
6722 	char *mac_from = "";
6723 	unsigned int ntxqs, nrxqs, thread;
6724 	unsigned long flags = 0;
6725 	bool has_tx_irqs;
6726 	u32 id;
6727 	int phy_mode;
6728 	int err, i;
6729 
6730 	has_tx_irqs = mvpp2_port_has_irqs(priv, port_node, &flags);
6731 	if (!has_tx_irqs && queue_mode == MVPP2_QDIST_MULTI_MODE) {
6732 		dev_err(&pdev->dev,
6733 			"not enough IRQs to support multi queue mode\n");
6734 		return -EINVAL;
6735 	}
6736 
6737 	ntxqs = MVPP2_MAX_TXQ;
6738 	nrxqs = mvpp2_get_nrxqs(priv);
6739 
6740 	dev = alloc_etherdev_mqs(sizeof(*port), ntxqs, nrxqs);
6741 	if (!dev)
6742 		return -ENOMEM;
6743 
6744 	phy_mode = fwnode_get_phy_mode(port_fwnode);
6745 	if (phy_mode < 0) {
6746 		dev_err(&pdev->dev, "incorrect phy mode\n");
6747 		err = phy_mode;
6748 		goto err_free_netdev;
6749 	}
6750 
6751 	/*
6752 	 * Rewrite 10GBASE-KR to 10GBASE-R for compatibility with existing DT.
6753 	 * Existing usage of 10GBASE-KR is not correct; no backplane
6754 	 * negotiation is done, and this driver does not actually support
6755 	 * 10GBASE-KR.
6756 	 */
6757 	if (phy_mode == PHY_INTERFACE_MODE_10GKR)
6758 		phy_mode = PHY_INTERFACE_MODE_10GBASER;
6759 
6760 	if (port_node) {
6761 		comphy = devm_of_phy_get(&pdev->dev, port_node, NULL);
6762 		if (IS_ERR(comphy)) {
6763 			if (PTR_ERR(comphy) == -EPROBE_DEFER) {
6764 				err = -EPROBE_DEFER;
6765 				goto err_free_netdev;
6766 			}
6767 			comphy = NULL;
6768 		}
6769 	}
6770 
6771 	if (fwnode_property_read_u32(port_fwnode, "port-id", &id)) {
6772 		err = -EINVAL;
6773 		dev_err(&pdev->dev, "missing port-id value\n");
6774 		goto err_free_netdev;
6775 	}
6776 
6777 	dev->tx_queue_len = MVPP2_MAX_TXD_MAX;
6778 	dev->watchdog_timeo = 5 * HZ;
6779 	dev->netdev_ops = &mvpp2_netdev_ops;
6780 	dev->ethtool_ops = &mvpp2_eth_tool_ops;
6781 
6782 	port = netdev_priv(dev);
6783 	port->dev = dev;
6784 	port->fwnode = port_fwnode;
6785 	port->ntxqs = ntxqs;
6786 	port->nrxqs = nrxqs;
6787 	port->priv = priv;
6788 	port->has_tx_irqs = has_tx_irqs;
6789 	port->flags = flags;
6790 
6791 	err = mvpp2_queue_vectors_init(port, port_node);
6792 	if (err)
6793 		goto err_free_netdev;
6794 
6795 	if (port_node)
6796 		port->port_irq = of_irq_get_byname(port_node, "link");
6797 	else
6798 		port->port_irq = fwnode_irq_get(port_fwnode, port->nqvecs + 1);
6799 	if (port->port_irq == -EPROBE_DEFER) {
6800 		err = -EPROBE_DEFER;
6801 		goto err_deinit_qvecs;
6802 	}
6803 	if (port->port_irq <= 0)
6804 		/* the link irq is optional */
6805 		port->port_irq = 0;
6806 
6807 	if (fwnode_property_read_bool(port_fwnode, "marvell,loopback"))
6808 		port->flags |= MVPP2_F_LOOPBACK;
6809 
6810 	port->id = id;
6811 	if (priv->hw_version == MVPP21)
6812 		port->first_rxq = port->id * port->nrxqs;
6813 	else
6814 		port->first_rxq = port->id * priv->max_port_rxqs;
6815 
6816 	port->of_node = port_node;
6817 	port->phy_interface = phy_mode;
6818 	port->comphy = comphy;
6819 
6820 	if (priv->hw_version == MVPP21) {
6821 		port->base = devm_platform_ioremap_resource(pdev, 2 + id);
6822 		if (IS_ERR(port->base)) {
6823 			err = PTR_ERR(port->base);
6824 			goto err_free_irq;
6825 		}
6826 
6827 		port->stats_base = port->priv->lms_base +
6828 				   MVPP21_MIB_COUNTERS_OFFSET +
6829 				   port->gop_id * MVPP21_MIB_COUNTERS_PORT_SZ;
6830 	} else {
6831 		if (fwnode_property_read_u32(port_fwnode, "gop-port-id",
6832 					     &port->gop_id)) {
6833 			err = -EINVAL;
6834 			dev_err(&pdev->dev, "missing gop-port-id value\n");
6835 			goto err_deinit_qvecs;
6836 		}
6837 
6838 		port->base = priv->iface_base + MVPP22_GMAC_BASE(port->gop_id);
6839 		port->stats_base = port->priv->iface_base +
6840 				   MVPP22_MIB_COUNTERS_OFFSET +
6841 				   port->gop_id * MVPP22_MIB_COUNTERS_PORT_SZ;
6842 
6843 		/* We may want a property to describe whether we should use
6844 		 * MAC hardware timestamping.
6845 		 */
6846 		if (priv->tai)
6847 			port->hwtstamp = true;
6848 	}
6849 
6850 	/* Alloc per-cpu and ethtool stats */
6851 	port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
6852 	if (!port->stats) {
6853 		err = -ENOMEM;
6854 		goto err_free_irq;
6855 	}
6856 
6857 	port->ethtool_stats = devm_kcalloc(&pdev->dev,
6858 					   MVPP2_N_ETHTOOL_STATS(ntxqs, nrxqs),
6859 					   sizeof(u64), GFP_KERNEL);
6860 	if (!port->ethtool_stats) {
6861 		err = -ENOMEM;
6862 		goto err_free_stats;
6863 	}
6864 
6865 	mutex_init(&port->gather_stats_lock);
6866 	INIT_DELAYED_WORK(&port->stats_work, mvpp2_gather_hw_statistics);
6867 
6868 	mvpp2_port_copy_mac_addr(dev, priv, port_fwnode, &mac_from);
6869 
6870 	port->tx_ring_size = MVPP2_MAX_TXD_DFLT;
6871 	port->rx_ring_size = MVPP2_MAX_RXD_DFLT;
6872 	SET_NETDEV_DEV(dev, &pdev->dev);
6873 
6874 	err = mvpp2_port_init(port);
6875 	if (err < 0) {
6876 		dev_err(&pdev->dev, "failed to init port %d\n", id);
6877 		goto err_free_stats;
6878 	}
6879 
6880 	mvpp2_port_periodic_xon_disable(port);
6881 
6882 	mvpp2_mac_reset_assert(port);
6883 	mvpp22_pcs_reset_assert(port);
6884 
6885 	port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6886 	if (!port->pcpu) {
6887 		err = -ENOMEM;
6888 		goto err_free_txq_pcpu;
6889 	}
6890 
6891 	if (!port->has_tx_irqs) {
6892 		for (thread = 0; thread < priv->nthreads; thread++) {
6893 			port_pcpu = per_cpu_ptr(port->pcpu, thread);
6894 
6895 			hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6896 				     HRTIMER_MODE_REL_PINNED_SOFT);
6897 			port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6898 			port_pcpu->timer_scheduled = false;
6899 			port_pcpu->dev = dev;
6900 		}
6901 	}
6902 
6903 	features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
6904 		   NETIF_F_TSO;
6905 	dev->features = features | NETIF_F_RXCSUM;
6906 	dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
6907 			    NETIF_F_HW_VLAN_CTAG_FILTER;
6908 
6909 	if (mvpp22_rss_is_supported(port)) {
6910 		dev->hw_features |= NETIF_F_RXHASH;
6911 		dev->features |= NETIF_F_NTUPLE;
6912 	}
6913 
6914 	if (!port->priv->percpu_pools)
6915 		mvpp2_set_hw_csum(port, port->pool_long->id);
6916 
6917 	dev->vlan_features |= features;
6918 	dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
6919 	dev->priv_flags |= IFF_UNICAST_FLT;
6920 
6921 	/* MTU range: 68 - 9704 */
6922 	dev->min_mtu = ETH_MIN_MTU;
6923 	/* 9704 == 9728 - 20 and rounding to 8 */
6924 	dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
6925 	dev->dev.of_node = port_node;
6926 
6927 	if (!mvpp2_use_acpi_compat_mode(port_fwnode)) {
6928 		port->phylink_config.dev = &dev->dev;
6929 		port->phylink_config.type = PHYLINK_NETDEV;
6930 
6931 		phylink = phylink_create(&port->phylink_config, port_fwnode,
6932 					 phy_mode, &mvpp2_phylink_ops);
6933 		if (IS_ERR(phylink)) {
6934 			err = PTR_ERR(phylink);
6935 			goto err_free_port_pcpu;
6936 		}
6937 		port->phylink = phylink;
6938 	} else {
6939 		dev_warn(&pdev->dev, "Use link irqs for port#%d. FW update required\n", port->id);
6940 		port->phylink = NULL;
6941 	}
6942 
6943 	/* Cycle the comphy to power it down, saving 270mW per port -
6944 	 * don't worry about an error powering it up. When the comphy
6945 	 * driver does this, we can remove this code.
6946 	 */
6947 	if (port->comphy) {
6948 		err = mvpp22_comphy_init(port);
6949 		if (err == 0)
6950 			phy_power_off(port->comphy);
6951 	}
6952 
6953 	err = register_netdev(dev);
6954 	if (err < 0) {
6955 		dev_err(&pdev->dev, "failed to register netdev\n");
6956 		goto err_phylink;
6957 	}
6958 	netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6959 
6960 	priv->port_list[priv->port_count++] = port;
6961 
6962 	return 0;
6963 
6964 err_phylink:
6965 	if (port->phylink)
6966 		phylink_destroy(port->phylink);
6967 err_free_port_pcpu:
6968 	free_percpu(port->pcpu);
6969 err_free_txq_pcpu:
6970 	for (i = 0; i < port->ntxqs; i++)
6971 		free_percpu(port->txqs[i]->pcpu);
6972 err_free_stats:
6973 	free_percpu(port->stats);
6974 err_free_irq:
6975 	if (port->port_irq)
6976 		irq_dispose_mapping(port->port_irq);
6977 err_deinit_qvecs:
6978 	mvpp2_queue_vectors_deinit(port);
6979 err_free_netdev:
6980 	free_netdev(dev);
6981 	return err;
6982 }
6983 
6984 /* Ports removal routine */
6985 static void mvpp2_port_remove(struct mvpp2_port *port)
6986 {
6987 	int i;
6988 
6989 	unregister_netdev(port->dev);
6990 	if (port->phylink)
6991 		phylink_destroy(port->phylink);
6992 	free_percpu(port->pcpu);
6993 	free_percpu(port->stats);
6994 	for (i = 0; i < port->ntxqs; i++)
6995 		free_percpu(port->txqs[i]->pcpu);
6996 	mvpp2_queue_vectors_deinit(port);
6997 	if (port->port_irq)
6998 		irq_dispose_mapping(port->port_irq);
6999 	free_netdev(port->dev);
7000 }
7001 
7002 /* Initialize decoding windows */
7003 static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
7004 				    struct mvpp2 *priv)
7005 {
7006 	u32 win_enable;
7007 	int i;
7008 
7009 	for (i = 0; i < 6; i++) {
7010 		mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
7011 		mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
7012 
7013 		if (i < 4)
7014 			mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
7015 	}
7016 
7017 	win_enable = 0;
7018 
7019 	for (i = 0; i < dram->num_cs; i++) {
7020 		const struct mbus_dram_window *cs = dram->cs + i;
7021 
7022 		mvpp2_write(priv, MVPP2_WIN_BASE(i),
7023 			    (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
7024 			    dram->mbus_dram_target_id);
7025 
7026 		mvpp2_write(priv, MVPP2_WIN_SIZE(i),
7027 			    (cs->size - 1) & 0xffff0000);
7028 
7029 		win_enable |= (1 << i);
7030 	}
7031 
7032 	mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
7033 }
7034 
7035 /* Initialize Rx FIFO's */
7036 static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
7037 {
7038 	int port;
7039 
7040 	for (port = 0; port < MVPP2_MAX_PORTS; port++) {
7041 		mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
7042 			    MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
7043 		mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
7044 			    MVPP2_RX_FIFO_PORT_ATTR_SIZE_4KB);
7045 	}
7046 
7047 	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7048 		    MVPP2_RX_FIFO_PORT_MIN_PKT);
7049 	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7050 }
7051 
7052 static void mvpp22_rx_fifo_set_hw(struct mvpp2 *priv, int port, int data_size)
7053 {
7054 	int attr_size = MVPP2_RX_FIFO_PORT_ATTR_SIZE(data_size);
7055 
7056 	mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), data_size);
7057 	mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), attr_size);
7058 }
7059 
7060 /* Initialize TX FIFO's: the total FIFO size is 48kB on PPv2.2 and PPv2.3.
7061  * 4kB fixed space must be assigned for the loopback port.
7062  * Redistribute remaining avialable 44kB space among all active ports.
7063  * Guarantee minimum 32kB for 10G port and 8kB for port 1, capable of 2.5G
7064  * SGMII link.
7065  */
7066 static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
7067 {
7068 	int remaining_ports_count;
7069 	unsigned long port_map;
7070 	int size_remainder;
7071 	int port, size;
7072 
7073 	/* The loopback requires fixed 4kB of the FIFO space assignment. */
7074 	mvpp22_rx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
7075 			      MVPP2_RX_FIFO_PORT_DATA_SIZE_4KB);
7076 	port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
7077 
7078 	/* Set RX FIFO size to 0 for inactive ports. */
7079 	for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
7080 		mvpp22_rx_fifo_set_hw(priv, port, 0);
7081 
7082 	/* Assign remaining RX FIFO space among all active ports. */
7083 	size_remainder = MVPP2_RX_FIFO_PORT_DATA_SIZE_44KB;
7084 	remaining_ports_count = hweight_long(port_map);
7085 
7086 	for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
7087 		if (remaining_ports_count == 1)
7088 			size = size_remainder;
7089 		else if (port == 0)
7090 			size = max(size_remainder / remaining_ports_count,
7091 				   MVPP2_RX_FIFO_PORT_DATA_SIZE_32KB);
7092 		else if (port == 1)
7093 			size = max(size_remainder / remaining_ports_count,
7094 				   MVPP2_RX_FIFO_PORT_DATA_SIZE_8KB);
7095 		else
7096 			size = size_remainder / remaining_ports_count;
7097 
7098 		size_remainder -= size;
7099 		remaining_ports_count--;
7100 
7101 		mvpp22_rx_fifo_set_hw(priv, port, size);
7102 	}
7103 
7104 	mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
7105 		    MVPP2_RX_FIFO_PORT_MIN_PKT);
7106 	mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
7107 }
7108 
7109 /* Configure Rx FIFO Flow control thresholds */
7110 static void mvpp23_rx_fifo_fc_set_tresh(struct mvpp2 *priv)
7111 {
7112 	int port, val;
7113 
7114 	/* Port 0: maximum speed -10Gb/s port
7115 	 *	   required by spec RX FIFO threshold 9KB
7116 	 * Port 1: maximum speed -5Gb/s port
7117 	 *	   required by spec RX FIFO threshold 4KB
7118 	 * Port 2: maximum speed -1Gb/s port
7119 	 *	   required by spec RX FIFO threshold 2KB
7120 	 */
7121 
7122 	/* Without loopback port */
7123 	for (port = 0; port < (MVPP2_MAX_PORTS - 1); port++) {
7124 		if (port == 0) {
7125 			val = (MVPP23_PORT0_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7126 				<< MVPP2_RX_FC_TRSH_OFFS;
7127 			val &= MVPP2_RX_FC_TRSH_MASK;
7128 			mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7129 		} else if (port == 1) {
7130 			val = (MVPP23_PORT1_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7131 				<< MVPP2_RX_FC_TRSH_OFFS;
7132 			val &= MVPP2_RX_FC_TRSH_MASK;
7133 			mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7134 		} else {
7135 			val = (MVPP23_PORT2_FIFO_TRSH / MVPP2_RX_FC_TRSH_UNIT)
7136 				<< MVPP2_RX_FC_TRSH_OFFS;
7137 			val &= MVPP2_RX_FC_TRSH_MASK;
7138 			mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7139 		}
7140 	}
7141 }
7142 
7143 /* Configure Rx FIFO Flow control thresholds */
7144 void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en)
7145 {
7146 	int val;
7147 
7148 	val = mvpp2_read(priv, MVPP2_RX_FC_REG(port));
7149 
7150 	if (en)
7151 		val |= MVPP2_RX_FC_EN;
7152 	else
7153 		val &= ~MVPP2_RX_FC_EN;
7154 
7155 	mvpp2_write(priv, MVPP2_RX_FC_REG(port), val);
7156 }
7157 
7158 static void mvpp22_tx_fifo_set_hw(struct mvpp2 *priv, int port, int size)
7159 {
7160 	int threshold = MVPP2_TX_FIFO_THRESHOLD(size);
7161 
7162 	mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
7163 	mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), threshold);
7164 }
7165 
7166 /* Initialize TX FIFO's: the total FIFO size is 19kB on PPv2.2 and PPv2.3.
7167  * 1kB fixed space must be assigned for the loopback port.
7168  * Redistribute remaining avialable 18kB space among all active ports.
7169  * The 10G interface should use 10kB (which is maximum possible size
7170  * per single port).
7171  */
7172 static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
7173 {
7174 	int remaining_ports_count;
7175 	unsigned long port_map;
7176 	int size_remainder;
7177 	int port, size;
7178 
7179 	/* The loopback requires fixed 1kB of the FIFO space assignment. */
7180 	mvpp22_tx_fifo_set_hw(priv, MVPP2_LOOPBACK_PORT_INDEX,
7181 			      MVPP22_TX_FIFO_DATA_SIZE_1KB);
7182 	port_map = priv->port_map & ~BIT(MVPP2_LOOPBACK_PORT_INDEX);
7183 
7184 	/* Set TX FIFO size to 0 for inactive ports. */
7185 	for_each_clear_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX)
7186 		mvpp22_tx_fifo_set_hw(priv, port, 0);
7187 
7188 	/* Assign remaining TX FIFO space among all active ports. */
7189 	size_remainder = MVPP22_TX_FIFO_DATA_SIZE_18KB;
7190 	remaining_ports_count = hweight_long(port_map);
7191 
7192 	for_each_set_bit(port, &port_map, MVPP2_LOOPBACK_PORT_INDEX) {
7193 		if (remaining_ports_count == 1)
7194 			size = min(size_remainder,
7195 				   MVPP22_TX_FIFO_DATA_SIZE_10KB);
7196 		else if (port == 0)
7197 			size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
7198 		else
7199 			size = size_remainder / remaining_ports_count;
7200 
7201 		size_remainder -= size;
7202 		remaining_ports_count--;
7203 
7204 		mvpp22_tx_fifo_set_hw(priv, port, size);
7205 	}
7206 }
7207 
7208 static void mvpp2_axi_init(struct mvpp2 *priv)
7209 {
7210 	u32 val, rdval, wrval;
7211 
7212 	mvpp2_write(priv, MVPP22_BM_ADDR_HIGH_RLS_REG, 0x0);
7213 
7214 	/* AXI Bridge Configuration */
7215 
7216 	rdval = MVPP22_AXI_CODE_CACHE_RD_CACHE
7217 		<< MVPP22_AXI_ATTR_CACHE_OFFS;
7218 	rdval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7219 		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
7220 
7221 	wrval = MVPP22_AXI_CODE_CACHE_WR_CACHE
7222 		<< MVPP22_AXI_ATTR_CACHE_OFFS;
7223 	wrval |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7224 		<< MVPP22_AXI_ATTR_DOMAIN_OFFS;
7225 
7226 	/* BM */
7227 	mvpp2_write(priv, MVPP22_AXI_BM_WR_ATTR_REG, wrval);
7228 	mvpp2_write(priv, MVPP22_AXI_BM_RD_ATTR_REG, rdval);
7229 
7230 	/* Descriptors */
7231 	mvpp2_write(priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, rdval);
7232 	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, wrval);
7233 	mvpp2_write(priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, rdval);
7234 	mvpp2_write(priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, wrval);
7235 
7236 	/* Buffer Data */
7237 	mvpp2_write(priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, rdval);
7238 	mvpp2_write(priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, wrval);
7239 
7240 	val = MVPP22_AXI_CODE_CACHE_NON_CACHE
7241 		<< MVPP22_AXI_CODE_CACHE_OFFS;
7242 	val |= MVPP22_AXI_CODE_DOMAIN_SYSTEM
7243 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
7244 	mvpp2_write(priv, MVPP22_AXI_RD_NORMAL_CODE_REG, val);
7245 	mvpp2_write(priv, MVPP22_AXI_WR_NORMAL_CODE_REG, val);
7246 
7247 	val = MVPP22_AXI_CODE_CACHE_RD_CACHE
7248 		<< MVPP22_AXI_CODE_CACHE_OFFS;
7249 	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7250 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
7251 
7252 	mvpp2_write(priv, MVPP22_AXI_RD_SNOOP_CODE_REG, val);
7253 
7254 	val = MVPP22_AXI_CODE_CACHE_WR_CACHE
7255 		<< MVPP22_AXI_CODE_CACHE_OFFS;
7256 	val |= MVPP22_AXI_CODE_DOMAIN_OUTER_DOM
7257 		<< MVPP22_AXI_CODE_DOMAIN_OFFS;
7258 
7259 	mvpp2_write(priv, MVPP22_AXI_WR_SNOOP_CODE_REG, val);
7260 }
7261 
7262 /* Initialize network controller common part HW */
7263 static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
7264 {
7265 	const struct mbus_dram_target_info *dram_target_info;
7266 	int err, i;
7267 	u32 val;
7268 
7269 	/* MBUS windows configuration */
7270 	dram_target_info = mv_mbus_dram_info();
7271 	if (dram_target_info)
7272 		mvpp2_conf_mbus_windows(dram_target_info, priv);
7273 
7274 	if (priv->hw_version >= MVPP22)
7275 		mvpp2_axi_init(priv);
7276 
7277 	/* Disable HW PHY polling */
7278 	if (priv->hw_version == MVPP21) {
7279 		val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7280 		val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
7281 		writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
7282 	} else {
7283 		val = readl(priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7284 		val &= ~MVPP22_SMI_POLLING_EN;
7285 		writel(val, priv->iface_base + MVPP22_SMI_MISC_CFG_REG);
7286 	}
7287 
7288 	/* Allocate and initialize aggregated TXQs */
7289 	priv->aggr_txqs = devm_kcalloc(&pdev->dev, MVPP2_MAX_THREADS,
7290 				       sizeof(*priv->aggr_txqs),
7291 				       GFP_KERNEL);
7292 	if (!priv->aggr_txqs)
7293 		return -ENOMEM;
7294 
7295 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7296 		priv->aggr_txqs[i].id = i;
7297 		priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
7298 		err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i], i, priv);
7299 		if (err < 0)
7300 			return err;
7301 	}
7302 
7303 	/* Fifo Init */
7304 	if (priv->hw_version == MVPP21) {
7305 		mvpp2_rx_fifo_init(priv);
7306 	} else {
7307 		mvpp22_rx_fifo_init(priv);
7308 		mvpp22_tx_fifo_init(priv);
7309 		if (priv->hw_version == MVPP23)
7310 			mvpp23_rx_fifo_fc_set_tresh(priv);
7311 	}
7312 
7313 	if (priv->hw_version == MVPP21)
7314 		writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
7315 		       priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
7316 
7317 	/* Allow cache snoop when transmiting packets */
7318 	mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
7319 
7320 	/* Buffer Manager initialization */
7321 	err = mvpp2_bm_init(&pdev->dev, priv);
7322 	if (err < 0)
7323 		return err;
7324 
7325 	/* Parser default initialization */
7326 	err = mvpp2_prs_default_init(pdev, priv);
7327 	if (err < 0)
7328 		return err;
7329 
7330 	/* Classifier default initialization */
7331 	mvpp2_cls_init(priv);
7332 
7333 	return 0;
7334 }
7335 
7336 static int mvpp2_get_sram(struct platform_device *pdev,
7337 			  struct mvpp2 *priv)
7338 {
7339 	struct resource *res;
7340 
7341 	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
7342 	if (!res) {
7343 		if (has_acpi_companion(&pdev->dev))
7344 			dev_warn(&pdev->dev, "ACPI is too old, Flow control not supported\n");
7345 		else
7346 			dev_warn(&pdev->dev, "DT is too old, Flow control not supported\n");
7347 		return 0;
7348 	}
7349 
7350 	priv->cm3_base = devm_ioremap_resource(&pdev->dev, res);
7351 
7352 	return PTR_ERR_OR_ZERO(priv->cm3_base);
7353 }
7354 
7355 static int mvpp2_probe(struct platform_device *pdev)
7356 {
7357 	struct fwnode_handle *fwnode = pdev->dev.fwnode;
7358 	struct fwnode_handle *port_fwnode;
7359 	struct mvpp2 *priv;
7360 	struct resource *res;
7361 	void __iomem *base;
7362 	int i, shared;
7363 	int err;
7364 
7365 	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
7366 	if (!priv)
7367 		return -ENOMEM;
7368 
7369 	priv->hw_version = (unsigned long)device_get_match_data(&pdev->dev);
7370 
7371 	/* multi queue mode isn't supported on PPV2.1, fallback to single
7372 	 * mode
7373 	 */
7374 	if (priv->hw_version == MVPP21)
7375 		queue_mode = MVPP2_QDIST_SINGLE_MODE;
7376 
7377 	base = devm_platform_ioremap_resource(pdev, 0);
7378 	if (IS_ERR(base))
7379 		return PTR_ERR(base);
7380 
7381 	if (priv->hw_version == MVPP21) {
7382 		priv->lms_base = devm_platform_ioremap_resource(pdev, 1);
7383 		if (IS_ERR(priv->lms_base))
7384 			return PTR_ERR(priv->lms_base);
7385 	} else {
7386 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
7387 		if (!res) {
7388 			dev_err(&pdev->dev, "Invalid resource\n");
7389 			return -EINVAL;
7390 		}
7391 		if (has_acpi_companion(&pdev->dev)) {
7392 			/* In case the MDIO memory region is declared in
7393 			 * the ACPI, it can already appear as 'in-use'
7394 			 * in the OS. Because it is overlapped by second
7395 			 * region of the network controller, make
7396 			 * sure it is released, before requesting it again.
7397 			 * The care is taken by mvpp2 driver to avoid
7398 			 * concurrent access to this memory region.
7399 			 */
7400 			release_resource(res);
7401 		}
7402 		priv->iface_base = devm_ioremap_resource(&pdev->dev, res);
7403 		if (IS_ERR(priv->iface_base))
7404 			return PTR_ERR(priv->iface_base);
7405 
7406 		/* Map CM3 SRAM */
7407 		err = mvpp2_get_sram(pdev, priv);
7408 		if (err)
7409 			dev_warn(&pdev->dev, "Fail to alloc CM3 SRAM\n");
7410 
7411 		/* Enable global Flow Control only if handler to SRAM not NULL */
7412 		if (priv->cm3_base)
7413 			priv->global_tx_fc = true;
7414 	}
7415 
7416 	if (priv->hw_version >= MVPP22 && dev_of_node(&pdev->dev)) {
7417 		priv->sysctrl_base =
7418 			syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
7419 							"marvell,system-controller");
7420 		if (IS_ERR(priv->sysctrl_base))
7421 			/* The system controller regmap is optional for dt
7422 			 * compatibility reasons. When not provided, the
7423 			 * configuration of the GoP relies on the
7424 			 * firmware/bootloader.
7425 			 */
7426 			priv->sysctrl_base = NULL;
7427 	}
7428 
7429 	if (priv->hw_version >= MVPP22 &&
7430 	    mvpp2_get_nrxqs(priv) * 2 <= MVPP2_BM_MAX_POOLS)
7431 		priv->percpu_pools = 1;
7432 
7433 	mvpp2_setup_bm_pool();
7434 
7435 
7436 	priv->nthreads = min_t(unsigned int, num_present_cpus(),
7437 			       MVPP2_MAX_THREADS);
7438 
7439 	shared = num_present_cpus() - priv->nthreads;
7440 	if (shared > 0)
7441 		bitmap_fill(&priv->lock_map,
7442 			    min_t(int, shared, MVPP2_MAX_THREADS));
7443 
7444 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7445 		u32 addr_space_sz;
7446 
7447 		addr_space_sz = (priv->hw_version == MVPP21 ?
7448 				 MVPP21_ADDR_SPACE_SZ : MVPP22_ADDR_SPACE_SZ);
7449 		priv->swth_base[i] = base + i * addr_space_sz;
7450 	}
7451 
7452 	if (priv->hw_version == MVPP21)
7453 		priv->max_port_rxqs = 8;
7454 	else
7455 		priv->max_port_rxqs = 32;
7456 
7457 	if (dev_of_node(&pdev->dev)) {
7458 		priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
7459 		if (IS_ERR(priv->pp_clk))
7460 			return PTR_ERR(priv->pp_clk);
7461 		err = clk_prepare_enable(priv->pp_clk);
7462 		if (err < 0)
7463 			return err;
7464 
7465 		priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
7466 		if (IS_ERR(priv->gop_clk)) {
7467 			err = PTR_ERR(priv->gop_clk);
7468 			goto err_pp_clk;
7469 		}
7470 		err = clk_prepare_enable(priv->gop_clk);
7471 		if (err < 0)
7472 			goto err_pp_clk;
7473 
7474 		if (priv->hw_version >= MVPP22) {
7475 			priv->mg_clk = devm_clk_get(&pdev->dev, "mg_clk");
7476 			if (IS_ERR(priv->mg_clk)) {
7477 				err = PTR_ERR(priv->mg_clk);
7478 				goto err_gop_clk;
7479 			}
7480 
7481 			err = clk_prepare_enable(priv->mg_clk);
7482 			if (err < 0)
7483 				goto err_gop_clk;
7484 
7485 			priv->mg_core_clk = devm_clk_get_optional(&pdev->dev, "mg_core_clk");
7486 			if (IS_ERR(priv->mg_core_clk)) {
7487 				err = PTR_ERR(priv->mg_core_clk);
7488 				goto err_mg_clk;
7489 			}
7490 
7491 			err = clk_prepare_enable(priv->mg_core_clk);
7492 			if (err < 0)
7493 				goto err_mg_clk;
7494 		}
7495 
7496 		priv->axi_clk = devm_clk_get_optional(&pdev->dev, "axi_clk");
7497 		if (IS_ERR(priv->axi_clk)) {
7498 			err = PTR_ERR(priv->axi_clk);
7499 			goto err_mg_core_clk;
7500 		}
7501 
7502 		err = clk_prepare_enable(priv->axi_clk);
7503 		if (err < 0)
7504 			goto err_mg_core_clk;
7505 
7506 		/* Get system's tclk rate */
7507 		priv->tclk = clk_get_rate(priv->pp_clk);
7508 	} else {
7509 		err = device_property_read_u32(&pdev->dev, "clock-frequency", &priv->tclk);
7510 		if (err) {
7511 			dev_err(&pdev->dev, "missing clock-frequency value\n");
7512 			return err;
7513 		}
7514 	}
7515 
7516 	if (priv->hw_version >= MVPP22) {
7517 		err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK);
7518 		if (err)
7519 			goto err_axi_clk;
7520 		/* Sadly, the BM pools all share the same register to
7521 		 * store the high 32 bits of their address. So they
7522 		 * must all have the same high 32 bits, which forces
7523 		 * us to restrict coherent memory to DMA_BIT_MASK(32).
7524 		 */
7525 		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
7526 		if (err)
7527 			goto err_axi_clk;
7528 	}
7529 
7530 	/* Map DTS-active ports. Should be done before FIFO mvpp2_init */
7531 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7532 		if (!fwnode_property_read_u32(port_fwnode, "port-id", &i))
7533 			priv->port_map |= BIT(i);
7534 	}
7535 
7536 	if (mvpp2_read(priv, MVPP2_VER_ID_REG) == MVPP2_VER_PP23)
7537 		priv->hw_version = MVPP23;
7538 
7539 	/* Init mss lock */
7540 	spin_lock_init(&priv->mss_spinlock);
7541 
7542 	/* Initialize network controller */
7543 	err = mvpp2_init(pdev, priv);
7544 	if (err < 0) {
7545 		dev_err(&pdev->dev, "failed to initialize controller\n");
7546 		goto err_axi_clk;
7547 	}
7548 
7549 	err = mvpp22_tai_probe(&pdev->dev, priv);
7550 	if (err < 0)
7551 		goto err_axi_clk;
7552 
7553 	/* Initialize ports */
7554 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7555 		err = mvpp2_port_probe(pdev, port_fwnode, priv);
7556 		if (err < 0)
7557 			goto err_port_probe;
7558 	}
7559 
7560 	if (priv->port_count == 0) {
7561 		dev_err(&pdev->dev, "no ports enabled\n");
7562 		err = -ENODEV;
7563 		goto err_axi_clk;
7564 	}
7565 
7566 	/* Statistics must be gathered regularly because some of them (like
7567 	 * packets counters) are 32-bit registers and could overflow quite
7568 	 * quickly. For instance, a 10Gb link used at full bandwidth with the
7569 	 * smallest packets (64B) will overflow a 32-bit counter in less than
7570 	 * 30 seconds. Then, use a workqueue to fill 64-bit counters.
7571 	 */
7572 	snprintf(priv->queue_name, sizeof(priv->queue_name),
7573 		 "stats-wq-%s%s", netdev_name(priv->port_list[0]->dev),
7574 		 priv->port_count > 1 ? "+" : "");
7575 	priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
7576 	if (!priv->stats_queue) {
7577 		err = -ENOMEM;
7578 		goto err_port_probe;
7579 	}
7580 
7581 	if (priv->global_tx_fc && priv->hw_version >= MVPP22) {
7582 		err = mvpp2_enable_global_fc(priv);
7583 		if (err)
7584 			dev_warn(&pdev->dev, "Minimum of CM3 firmware 18.09 and chip revision B0 required for flow control\n");
7585 	}
7586 
7587 	mvpp2_dbgfs_init(priv, pdev->name);
7588 
7589 	platform_set_drvdata(pdev, priv);
7590 	return 0;
7591 
7592 err_port_probe:
7593 	fwnode_handle_put(port_fwnode);
7594 
7595 	i = 0;
7596 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7597 		if (priv->port_list[i])
7598 			mvpp2_port_remove(priv->port_list[i]);
7599 		i++;
7600 	}
7601 err_axi_clk:
7602 	clk_disable_unprepare(priv->axi_clk);
7603 err_mg_core_clk:
7604 	clk_disable_unprepare(priv->mg_core_clk);
7605 err_mg_clk:
7606 	clk_disable_unprepare(priv->mg_clk);
7607 err_gop_clk:
7608 	clk_disable_unprepare(priv->gop_clk);
7609 err_pp_clk:
7610 	clk_disable_unprepare(priv->pp_clk);
7611 	return err;
7612 }
7613 
7614 static int mvpp2_remove(struct platform_device *pdev)
7615 {
7616 	struct mvpp2 *priv = platform_get_drvdata(pdev);
7617 	struct fwnode_handle *fwnode = pdev->dev.fwnode;
7618 	int i = 0, poolnum = MVPP2_BM_POOLS_NUM;
7619 	struct fwnode_handle *port_fwnode;
7620 
7621 	mvpp2_dbgfs_cleanup(priv);
7622 
7623 	fwnode_for_each_available_child_node(fwnode, port_fwnode) {
7624 		if (priv->port_list[i]) {
7625 			mutex_destroy(&priv->port_list[i]->gather_stats_lock);
7626 			mvpp2_port_remove(priv->port_list[i]);
7627 		}
7628 		i++;
7629 	}
7630 
7631 	destroy_workqueue(priv->stats_queue);
7632 
7633 	if (priv->percpu_pools)
7634 		poolnum = mvpp2_get_nrxqs(priv) * 2;
7635 
7636 	for (i = 0; i < poolnum; i++) {
7637 		struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
7638 
7639 		mvpp2_bm_pool_destroy(&pdev->dev, priv, bm_pool);
7640 	}
7641 
7642 	for (i = 0; i < MVPP2_MAX_THREADS; i++) {
7643 		struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
7644 
7645 		dma_free_coherent(&pdev->dev,
7646 				  MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
7647 				  aggr_txq->descs,
7648 				  aggr_txq->descs_dma);
7649 	}
7650 
7651 	if (is_acpi_node(port_fwnode))
7652 		return 0;
7653 
7654 	clk_disable_unprepare(priv->axi_clk);
7655 	clk_disable_unprepare(priv->mg_core_clk);
7656 	clk_disable_unprepare(priv->mg_clk);
7657 	clk_disable_unprepare(priv->pp_clk);
7658 	clk_disable_unprepare(priv->gop_clk);
7659 
7660 	return 0;
7661 }
7662 
7663 static const struct of_device_id mvpp2_match[] = {
7664 	{
7665 		.compatible = "marvell,armada-375-pp2",
7666 		.data = (void *)MVPP21,
7667 	},
7668 	{
7669 		.compatible = "marvell,armada-7k-pp22",
7670 		.data = (void *)MVPP22,
7671 	},
7672 	{ }
7673 };
7674 MODULE_DEVICE_TABLE(of, mvpp2_match);
7675 
7676 #ifdef CONFIG_ACPI
7677 static const struct acpi_device_id mvpp2_acpi_match[] = {
7678 	{ "MRVL0110", MVPP22 },
7679 	{ },
7680 };
7681 MODULE_DEVICE_TABLE(acpi, mvpp2_acpi_match);
7682 #endif
7683 
7684 static struct platform_driver mvpp2_driver = {
7685 	.probe = mvpp2_probe,
7686 	.remove = mvpp2_remove,
7687 	.driver = {
7688 		.name = MVPP2_DRIVER_NAME,
7689 		.of_match_table = mvpp2_match,
7690 		.acpi_match_table = ACPI_PTR(mvpp2_acpi_match),
7691 	},
7692 };
7693 
7694 module_platform_driver(mvpp2_driver);
7695 
7696 MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
7697 MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
7698 MODULE_LICENSE("GPL v2");
7699