1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3  * Copyright 2016-2020 NXP
4  */
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/fsl/mc.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_trace.h>
18 #include <net/sock.h>
19 
20 #include "dpaa2-eth.h"
21 
22 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
23  * using trace events only need to #include <trace/events/sched.h>
24  */
25 #define CREATE_TRACE_POINTS
26 #include "dpaa2-eth-trace.h"
27 
28 MODULE_LICENSE("Dual BSD/GPL");
29 MODULE_AUTHOR("Freescale Semiconductor, Inc");
30 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
31 
32 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
33 				dma_addr_t iova_addr)
34 {
35 	phys_addr_t phys_addr;
36 
37 	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
38 
39 	return phys_to_virt(phys_addr);
40 }
41 
42 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
43 			     u32 fd_status,
44 			     struct sk_buff *skb)
45 {
46 	skb_checksum_none_assert(skb);
47 
48 	/* HW checksum validation is disabled, nothing to do here */
49 	if (!(priv->net_dev->features & NETIF_F_RXCSUM))
50 		return;
51 
52 	/* Read checksum validation bits */
53 	if (!((fd_status & DPAA2_FAS_L3CV) &&
54 	      (fd_status & DPAA2_FAS_L4CV)))
55 		return;
56 
57 	/* Inform the stack there's no need to compute L3/L4 csum anymore */
58 	skb->ip_summed = CHECKSUM_UNNECESSARY;
59 }
60 
61 /* Free a received FD.
62  * Not to be used for Tx conf FDs or on any other paths.
63  */
64 static void free_rx_fd(struct dpaa2_eth_priv *priv,
65 		       const struct dpaa2_fd *fd,
66 		       void *vaddr)
67 {
68 	struct device *dev = priv->net_dev->dev.parent;
69 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
70 	u8 fd_format = dpaa2_fd_get_format(fd);
71 	struct dpaa2_sg_entry *sgt;
72 	void *sg_vaddr;
73 	int i;
74 
75 	/* If single buffer frame, just free the data buffer */
76 	if (fd_format == dpaa2_fd_single)
77 		goto free_buf;
78 	else if (fd_format != dpaa2_fd_sg)
79 		/* We don't support any other format */
80 		return;
81 
82 	/* For S/G frames, we first need to free all SG entries
83 	 * except the first one, which was taken care of already
84 	 */
85 	sgt = vaddr + dpaa2_fd_get_offset(fd);
86 	for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
87 		addr = dpaa2_sg_get_addr(&sgt[i]);
88 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
89 		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
90 			       DMA_BIDIRECTIONAL);
91 
92 		free_pages((unsigned long)sg_vaddr, 0);
93 		if (dpaa2_sg_is_final(&sgt[i]))
94 			break;
95 	}
96 
97 free_buf:
98 	free_pages((unsigned long)vaddr, 0);
99 }
100 
101 /* Build a linear skb based on a single-buffer frame descriptor */
102 static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
103 					const struct dpaa2_fd *fd,
104 					void *fd_vaddr)
105 {
106 	struct sk_buff *skb = NULL;
107 	u16 fd_offset = dpaa2_fd_get_offset(fd);
108 	u32 fd_length = dpaa2_fd_get_len(fd);
109 
110 	ch->buf_count--;
111 
112 	skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
113 	if (unlikely(!skb))
114 		return NULL;
115 
116 	skb_reserve(skb, fd_offset);
117 	skb_put(skb, fd_length);
118 
119 	return skb;
120 }
121 
122 /* Build a non linear (fragmented) skb based on a S/G table */
123 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
124 				      struct dpaa2_eth_channel *ch,
125 				      struct dpaa2_sg_entry *sgt)
126 {
127 	struct sk_buff *skb = NULL;
128 	struct device *dev = priv->net_dev->dev.parent;
129 	void *sg_vaddr;
130 	dma_addr_t sg_addr;
131 	u16 sg_offset;
132 	u32 sg_length;
133 	struct page *page, *head_page;
134 	int page_offset;
135 	int i;
136 
137 	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
138 		struct dpaa2_sg_entry *sge = &sgt[i];
139 
140 		/* NOTE: We only support SG entries in dpaa2_sg_single format,
141 		 * but this is the only format we may receive from HW anyway
142 		 */
143 
144 		/* Get the address and length from the S/G entry */
145 		sg_addr = dpaa2_sg_get_addr(sge);
146 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
147 		dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
148 			       DMA_BIDIRECTIONAL);
149 
150 		sg_length = dpaa2_sg_get_len(sge);
151 
152 		if (i == 0) {
153 			/* We build the skb around the first data buffer */
154 			skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
155 			if (unlikely(!skb)) {
156 				/* Free the first SG entry now, since we already
157 				 * unmapped it and obtained the virtual address
158 				 */
159 				free_pages((unsigned long)sg_vaddr, 0);
160 
161 				/* We still need to subtract the buffers used
162 				 * by this FD from our software counter
163 				 */
164 				while (!dpaa2_sg_is_final(&sgt[i]) &&
165 				       i < DPAA2_ETH_MAX_SG_ENTRIES)
166 					i++;
167 				break;
168 			}
169 
170 			sg_offset = dpaa2_sg_get_offset(sge);
171 			skb_reserve(skb, sg_offset);
172 			skb_put(skb, sg_length);
173 		} else {
174 			/* Rest of the data buffers are stored as skb frags */
175 			page = virt_to_page(sg_vaddr);
176 			head_page = virt_to_head_page(sg_vaddr);
177 
178 			/* Offset in page (which may be compound).
179 			 * Data in subsequent SG entries is stored from the
180 			 * beginning of the buffer, so we don't need to add the
181 			 * sg_offset.
182 			 */
183 			page_offset = ((unsigned long)sg_vaddr &
184 				(PAGE_SIZE - 1)) +
185 				(page_address(page) - page_address(head_page));
186 
187 			skb_add_rx_frag(skb, i - 1, head_page, page_offset,
188 					sg_length, DPAA2_ETH_RX_BUF_SIZE);
189 		}
190 
191 		if (dpaa2_sg_is_final(sge))
192 			break;
193 	}
194 
195 	WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
196 
197 	/* Count all data buffers + SG table buffer */
198 	ch->buf_count -= i + 2;
199 
200 	return skb;
201 }
202 
203 /* Free buffers acquired from the buffer pool or which were meant to
204  * be released in the pool
205  */
206 static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
207 {
208 	struct device *dev = priv->net_dev->dev.parent;
209 	void *vaddr;
210 	int i;
211 
212 	for (i = 0; i < count; i++) {
213 		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
214 		dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
215 			       DMA_BIDIRECTIONAL);
216 		free_pages((unsigned long)vaddr, 0);
217 	}
218 }
219 
220 static void xdp_release_buf(struct dpaa2_eth_priv *priv,
221 			    struct dpaa2_eth_channel *ch,
222 			    dma_addr_t addr)
223 {
224 	int retries = 0;
225 	int err;
226 
227 	ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
228 	if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
229 		return;
230 
231 	while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
232 					       ch->xdp.drop_bufs,
233 					       ch->xdp.drop_cnt)) == -EBUSY) {
234 		if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
235 			break;
236 		cpu_relax();
237 	}
238 
239 	if (err) {
240 		free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
241 		ch->buf_count -= ch->xdp.drop_cnt;
242 	}
243 
244 	ch->xdp.drop_cnt = 0;
245 }
246 
247 static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
248 		       void *buf_start, u16 queue_id)
249 {
250 	struct dpaa2_eth_fq *fq;
251 	struct dpaa2_faead *faead;
252 	u32 ctrl, frc;
253 	int i, err;
254 
255 	/* Mark the egress frame hardware annotation area as valid */
256 	frc = dpaa2_fd_get_frc(fd);
257 	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
258 	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
259 
260 	/* Instruct hardware to release the FD buffer directly into
261 	 * the buffer pool once transmission is completed, instead of
262 	 * sending a Tx confirmation frame to us
263 	 */
264 	ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
265 	faead = dpaa2_get_faead(buf_start, false);
266 	faead->ctrl = cpu_to_le32(ctrl);
267 	faead->conf_fqid = 0;
268 
269 	fq = &priv->fq[queue_id];
270 	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
271 		err = priv->enqueue(priv, fq, fd, 0, 1, NULL);
272 		if (err != -EBUSY)
273 			break;
274 	}
275 
276 	return err;
277 }
278 
279 static u32 run_xdp(struct dpaa2_eth_priv *priv,
280 		   struct dpaa2_eth_channel *ch,
281 		   struct dpaa2_eth_fq *rx_fq,
282 		   struct dpaa2_fd *fd, void *vaddr)
283 {
284 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
285 	struct rtnl_link_stats64 *percpu_stats;
286 	struct bpf_prog *xdp_prog;
287 	struct xdp_buff xdp;
288 	u32 xdp_act = XDP_PASS;
289 	int err;
290 
291 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
292 
293 	rcu_read_lock();
294 
295 	xdp_prog = READ_ONCE(ch->xdp.prog);
296 	if (!xdp_prog)
297 		goto out;
298 
299 	xdp.data = vaddr + dpaa2_fd_get_offset(fd);
300 	xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
301 	xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
302 	xdp_set_data_meta_invalid(&xdp);
303 	xdp.rxq = &ch->xdp_rxq;
304 
305 	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
306 
307 	/* xdp.data pointer may have changed */
308 	dpaa2_fd_set_offset(fd, xdp.data - vaddr);
309 	dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
310 
311 	switch (xdp_act) {
312 	case XDP_PASS:
313 		break;
314 	case XDP_TX:
315 		err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
316 		if (err) {
317 			xdp_release_buf(priv, ch, addr);
318 			percpu_stats->tx_errors++;
319 			ch->stats.xdp_tx_err++;
320 		} else {
321 			percpu_stats->tx_packets++;
322 			percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
323 			ch->stats.xdp_tx++;
324 		}
325 		break;
326 	default:
327 		bpf_warn_invalid_xdp_action(xdp_act);
328 		/* fall through */
329 	case XDP_ABORTED:
330 		trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
331 		/* fall through */
332 	case XDP_DROP:
333 		xdp_release_buf(priv, ch, addr);
334 		ch->stats.xdp_drop++;
335 		break;
336 	case XDP_REDIRECT:
337 		dma_unmap_page(priv->net_dev->dev.parent, addr,
338 			       DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
339 		ch->buf_count--;
340 		xdp.data_hard_start = vaddr;
341 		err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
342 		if (unlikely(err))
343 			ch->stats.xdp_drop++;
344 		else
345 			ch->stats.xdp_redirect++;
346 		break;
347 	}
348 
349 	ch->xdp.res |= xdp_act;
350 out:
351 	rcu_read_unlock();
352 	return xdp_act;
353 }
354 
355 /* Main Rx frame processing routine */
356 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
357 			 struct dpaa2_eth_channel *ch,
358 			 const struct dpaa2_fd *fd,
359 			 struct dpaa2_eth_fq *fq)
360 {
361 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
362 	u8 fd_format = dpaa2_fd_get_format(fd);
363 	void *vaddr;
364 	struct sk_buff *skb;
365 	struct rtnl_link_stats64 *percpu_stats;
366 	struct dpaa2_eth_drv_stats *percpu_extras;
367 	struct device *dev = priv->net_dev->dev.parent;
368 	struct dpaa2_fas *fas;
369 	void *buf_data;
370 	u32 status = 0;
371 	u32 xdp_act;
372 
373 	/* Tracing point */
374 	trace_dpaa2_rx_fd(priv->net_dev, fd);
375 
376 	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
377 	dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
378 				DMA_BIDIRECTIONAL);
379 
380 	fas = dpaa2_get_fas(vaddr, false);
381 	prefetch(fas);
382 	buf_data = vaddr + dpaa2_fd_get_offset(fd);
383 	prefetch(buf_data);
384 
385 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
386 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
387 
388 	if (fd_format == dpaa2_fd_single) {
389 		xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
390 		if (xdp_act != XDP_PASS) {
391 			percpu_stats->rx_packets++;
392 			percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
393 			return;
394 		}
395 
396 		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
397 			       DMA_BIDIRECTIONAL);
398 		skb = build_linear_skb(ch, fd, vaddr);
399 	} else if (fd_format == dpaa2_fd_sg) {
400 		WARN_ON(priv->xdp_prog);
401 
402 		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
403 			       DMA_BIDIRECTIONAL);
404 		skb = build_frag_skb(priv, ch, buf_data);
405 		free_pages((unsigned long)vaddr, 0);
406 		percpu_extras->rx_sg_frames++;
407 		percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
408 	} else {
409 		/* We don't support any other format */
410 		goto err_frame_format;
411 	}
412 
413 	if (unlikely(!skb))
414 		goto err_build_skb;
415 
416 	prefetch(skb->data);
417 
418 	/* Get the timestamp value */
419 	if (priv->rx_tstamp) {
420 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
421 		__le64 *ts = dpaa2_get_ts(vaddr, false);
422 		u64 ns;
423 
424 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
425 
426 		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
427 		shhwtstamps->hwtstamp = ns_to_ktime(ns);
428 	}
429 
430 	/* Check if we need to validate the L4 csum */
431 	if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
432 		status = le32_to_cpu(fas->status);
433 		validate_rx_csum(priv, status, skb);
434 	}
435 
436 	skb->protocol = eth_type_trans(skb, priv->net_dev);
437 	skb_record_rx_queue(skb, fq->flowid);
438 
439 	percpu_stats->rx_packets++;
440 	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
441 
442 	list_add_tail(&skb->list, ch->rx_list);
443 
444 	return;
445 
446 err_build_skb:
447 	free_rx_fd(priv, fd, vaddr);
448 err_frame_format:
449 	percpu_stats->rx_dropped++;
450 }
451 
452 /* Consume all frames pull-dequeued into the store. This is the simplest way to
453  * make sure we don't accidentally issue another volatile dequeue which would
454  * overwrite (leak) frames already in the store.
455  *
456  * Observance of NAPI budget is not our concern, leaving that to the caller.
457  */
458 static int consume_frames(struct dpaa2_eth_channel *ch,
459 			  struct dpaa2_eth_fq **src)
460 {
461 	struct dpaa2_eth_priv *priv = ch->priv;
462 	struct dpaa2_eth_fq *fq = NULL;
463 	struct dpaa2_dq *dq;
464 	const struct dpaa2_fd *fd;
465 	int cleaned = 0, retries = 0;
466 	int is_last;
467 
468 	do {
469 		dq = dpaa2_io_store_next(ch->store, &is_last);
470 		if (unlikely(!dq)) {
471 			/* If we're here, we *must* have placed a
472 			 * volatile dequeue comnmand, so keep reading through
473 			 * the store until we get some sort of valid response
474 			 * token (either a valid frame or an "empty dequeue")
475 			 */
476 			if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
477 				netdev_err_once(priv->net_dev,
478 						"Unable to read a valid dequeue response\n");
479 				return -ETIMEDOUT;
480 			}
481 			continue;
482 		}
483 
484 		fd = dpaa2_dq_fd(dq);
485 		fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
486 
487 		fq->consume(priv, ch, fd, fq);
488 		cleaned++;
489 		retries = 0;
490 	} while (!is_last);
491 
492 	if (!cleaned)
493 		return 0;
494 
495 	fq->stats.frames += cleaned;
496 
497 	/* A dequeue operation only pulls frames from a single queue
498 	 * into the store. Return the frame queue as an out param.
499 	 */
500 	if (src)
501 		*src = fq;
502 
503 	return cleaned;
504 }
505 
506 /* Configure the egress frame annotation for timestamp update */
507 static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
508 {
509 	struct dpaa2_faead *faead;
510 	u32 ctrl, frc;
511 
512 	/* Mark the egress frame annotation area as valid */
513 	frc = dpaa2_fd_get_frc(fd);
514 	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
515 
516 	/* Set hardware annotation size */
517 	ctrl = dpaa2_fd_get_ctrl(fd);
518 	dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
519 
520 	/* enable UPD (update prepanded data) bit in FAEAD field of
521 	 * hardware frame annotation area
522 	 */
523 	ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
524 	faead = dpaa2_get_faead(buf_start, true);
525 	faead->ctrl = cpu_to_le32(ctrl);
526 }
527 
528 /* Create a frame descriptor based on a fragmented skb */
529 static int build_sg_fd(struct dpaa2_eth_priv *priv,
530 		       struct sk_buff *skb,
531 		       struct dpaa2_fd *fd)
532 {
533 	struct device *dev = priv->net_dev->dev.parent;
534 	void *sgt_buf = NULL;
535 	dma_addr_t addr;
536 	int nr_frags = skb_shinfo(skb)->nr_frags;
537 	struct dpaa2_sg_entry *sgt;
538 	int i, err;
539 	int sgt_buf_size;
540 	struct scatterlist *scl, *crt_scl;
541 	int num_sg;
542 	int num_dma_bufs;
543 	struct dpaa2_eth_swa *swa;
544 
545 	/* Create and map scatterlist.
546 	 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
547 	 * to go beyond nr_frags+1.
548 	 * Note: We don't support chained scatterlists
549 	 */
550 	if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
551 		return -EINVAL;
552 
553 	scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
554 	if (unlikely(!scl))
555 		return -ENOMEM;
556 
557 	sg_init_table(scl, nr_frags + 1);
558 	num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
559 	num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
560 	if (unlikely(!num_dma_bufs)) {
561 		err = -ENOMEM;
562 		goto dma_map_sg_failed;
563 	}
564 
565 	/* Prepare the HW SGT structure */
566 	sgt_buf_size = priv->tx_data_offset +
567 		       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
568 	sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
569 	if (unlikely(!sgt_buf)) {
570 		err = -ENOMEM;
571 		goto sgt_buf_alloc_failed;
572 	}
573 	sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
574 	memset(sgt_buf, 0, sgt_buf_size);
575 
576 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
577 
578 	/* Fill in the HW SGT structure.
579 	 *
580 	 * sgt_buf is zeroed out, so the following fields are implicit
581 	 * in all sgt entries:
582 	 *   - offset is 0
583 	 *   - format is 'dpaa2_sg_single'
584 	 */
585 	for_each_sg(scl, crt_scl, num_dma_bufs, i) {
586 		dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
587 		dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
588 	}
589 	dpaa2_sg_set_final(&sgt[i - 1], true);
590 
591 	/* Store the skb backpointer in the SGT buffer.
592 	 * Fit the scatterlist and the number of buffers alongside the
593 	 * skb backpointer in the software annotation area. We'll need
594 	 * all of them on Tx Conf.
595 	 */
596 	swa = (struct dpaa2_eth_swa *)sgt_buf;
597 	swa->type = DPAA2_ETH_SWA_SG;
598 	swa->sg.skb = skb;
599 	swa->sg.scl = scl;
600 	swa->sg.num_sg = num_sg;
601 	swa->sg.sgt_size = sgt_buf_size;
602 
603 	/* Separately map the SGT buffer */
604 	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
605 	if (unlikely(dma_mapping_error(dev, addr))) {
606 		err = -ENOMEM;
607 		goto dma_map_single_failed;
608 	}
609 	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
610 	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
611 	dpaa2_fd_set_addr(fd, addr);
612 	dpaa2_fd_set_len(fd, skb->len);
613 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
614 
615 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
616 		enable_tx_tstamp(fd, sgt_buf);
617 
618 	return 0;
619 
620 dma_map_single_failed:
621 	skb_free_frag(sgt_buf);
622 sgt_buf_alloc_failed:
623 	dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
624 dma_map_sg_failed:
625 	kfree(scl);
626 	return err;
627 }
628 
629 /* Create a frame descriptor based on a linear skb */
630 static int build_single_fd(struct dpaa2_eth_priv *priv,
631 			   struct sk_buff *skb,
632 			   struct dpaa2_fd *fd)
633 {
634 	struct device *dev = priv->net_dev->dev.parent;
635 	u8 *buffer_start, *aligned_start;
636 	struct dpaa2_eth_swa *swa;
637 	dma_addr_t addr;
638 
639 	buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
640 
641 	/* If there's enough room to align the FD address, do it.
642 	 * It will help hardware optimize accesses.
643 	 */
644 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
645 				  DPAA2_ETH_TX_BUF_ALIGN);
646 	if (aligned_start >= skb->head)
647 		buffer_start = aligned_start;
648 
649 	/* Store a backpointer to the skb at the beginning of the buffer
650 	 * (in the private data area) such that we can release it
651 	 * on Tx confirm
652 	 */
653 	swa = (struct dpaa2_eth_swa *)buffer_start;
654 	swa->type = DPAA2_ETH_SWA_SINGLE;
655 	swa->single.skb = skb;
656 
657 	addr = dma_map_single(dev, buffer_start,
658 			      skb_tail_pointer(skb) - buffer_start,
659 			      DMA_BIDIRECTIONAL);
660 	if (unlikely(dma_mapping_error(dev, addr)))
661 		return -ENOMEM;
662 
663 	dpaa2_fd_set_addr(fd, addr);
664 	dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
665 	dpaa2_fd_set_len(fd, skb->len);
666 	dpaa2_fd_set_format(fd, dpaa2_fd_single);
667 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
668 
669 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
670 		enable_tx_tstamp(fd, buffer_start);
671 
672 	return 0;
673 }
674 
675 /* FD freeing routine on the Tx path
676  *
677  * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
678  * back-pointed to is also freed.
679  * This can be called either from dpaa2_eth_tx_conf() or on the error path of
680  * dpaa2_eth_tx().
681  */
682 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
683 		       struct dpaa2_eth_fq *fq,
684 		       const struct dpaa2_fd *fd, bool in_napi)
685 {
686 	struct device *dev = priv->net_dev->dev.parent;
687 	dma_addr_t fd_addr;
688 	struct sk_buff *skb = NULL;
689 	unsigned char *buffer_start;
690 	struct dpaa2_eth_swa *swa;
691 	u8 fd_format = dpaa2_fd_get_format(fd);
692 	u32 fd_len = dpaa2_fd_get_len(fd);
693 
694 	fd_addr = dpaa2_fd_get_addr(fd);
695 	buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
696 	swa = (struct dpaa2_eth_swa *)buffer_start;
697 
698 	if (fd_format == dpaa2_fd_single) {
699 		if (swa->type == DPAA2_ETH_SWA_SINGLE) {
700 			skb = swa->single.skb;
701 			/* Accessing the skb buffer is safe before dma unmap,
702 			 * because we didn't map the actual skb shell.
703 			 */
704 			dma_unmap_single(dev, fd_addr,
705 					 skb_tail_pointer(skb) - buffer_start,
706 					 DMA_BIDIRECTIONAL);
707 		} else {
708 			WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
709 			dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
710 					 DMA_BIDIRECTIONAL);
711 		}
712 	} else if (fd_format == dpaa2_fd_sg) {
713 		skb = swa->sg.skb;
714 
715 		/* Unmap the scatterlist */
716 		dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
717 			     DMA_BIDIRECTIONAL);
718 		kfree(swa->sg.scl);
719 
720 		/* Unmap the SGT buffer */
721 		dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
722 				 DMA_BIDIRECTIONAL);
723 	} else {
724 		netdev_dbg(priv->net_dev, "Invalid FD format\n");
725 		return;
726 	}
727 
728 	if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
729 		fq->dq_frames++;
730 		fq->dq_bytes += fd_len;
731 	}
732 
733 	if (swa->type == DPAA2_ETH_SWA_XDP) {
734 		xdp_return_frame(swa->xdp.xdpf);
735 		return;
736 	}
737 
738 	/* Get the timestamp value */
739 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
740 		struct skb_shared_hwtstamps shhwtstamps;
741 		__le64 *ts = dpaa2_get_ts(buffer_start, true);
742 		u64 ns;
743 
744 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
745 
746 		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
747 		shhwtstamps.hwtstamp = ns_to_ktime(ns);
748 		skb_tstamp_tx(skb, &shhwtstamps);
749 	}
750 
751 	/* Free SGT buffer allocated on tx */
752 	if (fd_format != dpaa2_fd_single)
753 		skb_free_frag(buffer_start);
754 
755 	/* Move on with skb release */
756 	napi_consume_skb(skb, in_napi);
757 }
758 
759 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
760 {
761 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
762 	struct dpaa2_fd fd;
763 	struct rtnl_link_stats64 *percpu_stats;
764 	struct dpaa2_eth_drv_stats *percpu_extras;
765 	struct dpaa2_eth_fq *fq;
766 	struct netdev_queue *nq;
767 	u16 queue_mapping;
768 	unsigned int needed_headroom;
769 	u32 fd_len;
770 	u8 prio = 0;
771 	int err, i;
772 
773 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
774 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
775 
776 	needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
777 	if (skb_headroom(skb) < needed_headroom) {
778 		struct sk_buff *ns;
779 
780 		ns = skb_realloc_headroom(skb, needed_headroom);
781 		if (unlikely(!ns)) {
782 			percpu_stats->tx_dropped++;
783 			goto err_alloc_headroom;
784 		}
785 		percpu_extras->tx_reallocs++;
786 
787 		if (skb->sk)
788 			skb_set_owner_w(ns, skb->sk);
789 
790 		dev_kfree_skb(skb);
791 		skb = ns;
792 	}
793 
794 	/* We'll be holding a back-reference to the skb until Tx Confirmation;
795 	 * we don't want that overwritten by a concurrent Tx with a cloned skb.
796 	 */
797 	skb = skb_unshare(skb, GFP_ATOMIC);
798 	if (unlikely(!skb)) {
799 		/* skb_unshare() has already freed the skb */
800 		percpu_stats->tx_dropped++;
801 		return NETDEV_TX_OK;
802 	}
803 
804 	/* Setup the FD fields */
805 	memset(&fd, 0, sizeof(fd));
806 
807 	if (skb_is_nonlinear(skb)) {
808 		err = build_sg_fd(priv, skb, &fd);
809 		percpu_extras->tx_sg_frames++;
810 		percpu_extras->tx_sg_bytes += skb->len;
811 	} else {
812 		err = build_single_fd(priv, skb, &fd);
813 	}
814 
815 	if (unlikely(err)) {
816 		percpu_stats->tx_dropped++;
817 		goto err_build_fd;
818 	}
819 
820 	/* Tracing point */
821 	trace_dpaa2_tx_fd(net_dev, &fd);
822 
823 	/* TxConf FQ selection relies on queue id from the stack.
824 	 * In case of a forwarded frame from another DPNI interface, we choose
825 	 * a queue affined to the same core that processed the Rx frame
826 	 */
827 	queue_mapping = skb_get_queue_mapping(skb);
828 
829 	if (net_dev->num_tc) {
830 		prio = netdev_txq_to_tc(net_dev, queue_mapping);
831 		/* Hardware interprets priority level 0 as being the highest,
832 		 * so we need to do a reverse mapping to the netdev tc index
833 		 */
834 		prio = net_dev->num_tc - prio - 1;
835 		/* We have only one FQ array entry for all Tx hardware queues
836 		 * with the same flow id (but different priority levels)
837 		 */
838 		queue_mapping %= dpaa2_eth_queue_count(priv);
839 	}
840 	fq = &priv->fq[queue_mapping];
841 
842 	fd_len = dpaa2_fd_get_len(&fd);
843 	nq = netdev_get_tx_queue(net_dev, queue_mapping);
844 	netdev_tx_sent_queue(nq, fd_len);
845 
846 	/* Everything that happens after this enqueues might race with
847 	 * the Tx confirmation callback for this frame
848 	 */
849 	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
850 		err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
851 		if (err != -EBUSY)
852 			break;
853 	}
854 	percpu_extras->tx_portal_busy += i;
855 	if (unlikely(err < 0)) {
856 		percpu_stats->tx_errors++;
857 		/* Clean up everything, including freeing the skb */
858 		free_tx_fd(priv, fq, &fd, false);
859 		netdev_tx_completed_queue(nq, 1, fd_len);
860 	} else {
861 		percpu_stats->tx_packets++;
862 		percpu_stats->tx_bytes += fd_len;
863 	}
864 
865 	return NETDEV_TX_OK;
866 
867 err_build_fd:
868 err_alloc_headroom:
869 	dev_kfree_skb(skb);
870 
871 	return NETDEV_TX_OK;
872 }
873 
874 /* Tx confirmation frame processing routine */
875 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
876 			      struct dpaa2_eth_channel *ch __always_unused,
877 			      const struct dpaa2_fd *fd,
878 			      struct dpaa2_eth_fq *fq)
879 {
880 	struct rtnl_link_stats64 *percpu_stats;
881 	struct dpaa2_eth_drv_stats *percpu_extras;
882 	u32 fd_len = dpaa2_fd_get_len(fd);
883 	u32 fd_errors;
884 
885 	/* Tracing point */
886 	trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
887 
888 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
889 	percpu_extras->tx_conf_frames++;
890 	percpu_extras->tx_conf_bytes += fd_len;
891 
892 	/* Check frame errors in the FD field */
893 	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
894 	free_tx_fd(priv, fq, fd, true);
895 
896 	if (likely(!fd_errors))
897 		return;
898 
899 	if (net_ratelimit())
900 		netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
901 			   fd_errors);
902 
903 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
904 	/* Tx-conf logically pertains to the egress path. */
905 	percpu_stats->tx_errors++;
906 }
907 
908 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
909 {
910 	int err;
911 
912 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
913 			       DPNI_OFF_RX_L3_CSUM, enable);
914 	if (err) {
915 		netdev_err(priv->net_dev,
916 			   "dpni_set_offload(RX_L3_CSUM) failed\n");
917 		return err;
918 	}
919 
920 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
921 			       DPNI_OFF_RX_L4_CSUM, enable);
922 	if (err) {
923 		netdev_err(priv->net_dev,
924 			   "dpni_set_offload(RX_L4_CSUM) failed\n");
925 		return err;
926 	}
927 
928 	return 0;
929 }
930 
931 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
932 {
933 	int err;
934 
935 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
936 			       DPNI_OFF_TX_L3_CSUM, enable);
937 	if (err) {
938 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
939 		return err;
940 	}
941 
942 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
943 			       DPNI_OFF_TX_L4_CSUM, enable);
944 	if (err) {
945 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
946 		return err;
947 	}
948 
949 	return 0;
950 }
951 
952 /* Perform a single release command to add buffers
953  * to the specified buffer pool
954  */
955 static int add_bufs(struct dpaa2_eth_priv *priv,
956 		    struct dpaa2_eth_channel *ch, u16 bpid)
957 {
958 	struct device *dev = priv->net_dev->dev.parent;
959 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
960 	struct page *page;
961 	dma_addr_t addr;
962 	int retries = 0;
963 	int i, err;
964 
965 	for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
966 		/* Allocate buffer visible to WRIOP + skb shared info +
967 		 * alignment padding
968 		 */
969 		/* allocate one page for each Rx buffer. WRIOP sees
970 		 * the entire page except for a tailroom reserved for
971 		 * skb shared info
972 		 */
973 		page = dev_alloc_pages(0);
974 		if (!page)
975 			goto err_alloc;
976 
977 		addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
978 				    DMA_BIDIRECTIONAL);
979 		if (unlikely(dma_mapping_error(dev, addr)))
980 			goto err_map;
981 
982 		buf_array[i] = addr;
983 
984 		/* tracing point */
985 		trace_dpaa2_eth_buf_seed(priv->net_dev,
986 					 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
987 					 addr, DPAA2_ETH_RX_BUF_SIZE,
988 					 bpid);
989 	}
990 
991 release_bufs:
992 	/* In case the portal is busy, retry until successful */
993 	while ((err = dpaa2_io_service_release(ch->dpio, bpid,
994 					       buf_array, i)) == -EBUSY) {
995 		if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
996 			break;
997 		cpu_relax();
998 	}
999 
1000 	/* If release command failed, clean up and bail out;
1001 	 * not much else we can do about it
1002 	 */
1003 	if (err) {
1004 		free_bufs(priv, buf_array, i);
1005 		return 0;
1006 	}
1007 
1008 	return i;
1009 
1010 err_map:
1011 	__free_pages(page, 0);
1012 err_alloc:
1013 	/* If we managed to allocate at least some buffers,
1014 	 * release them to hardware
1015 	 */
1016 	if (i)
1017 		goto release_bufs;
1018 
1019 	return 0;
1020 }
1021 
1022 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
1023 {
1024 	int i, j;
1025 	int new_count;
1026 
1027 	for (j = 0; j < priv->num_channels; j++) {
1028 		for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1029 		     i += DPAA2_ETH_BUFS_PER_CMD) {
1030 			new_count = add_bufs(priv, priv->channel[j], bpid);
1031 			priv->channel[j]->buf_count += new_count;
1032 
1033 			if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
1034 				return -ENOMEM;
1035 			}
1036 		}
1037 	}
1038 
1039 	return 0;
1040 }
1041 
1042 /**
1043  * Drain the specified number of buffers from the DPNI's private buffer pool.
1044  * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1045  */
1046 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
1047 {
1048 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1049 	int retries = 0;
1050 	int ret;
1051 
1052 	do {
1053 		ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1054 					       buf_array, count);
1055 		if (ret < 0) {
1056 			if (ret == -EBUSY &&
1057 			    retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1058 				continue;
1059 			netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1060 			return;
1061 		}
1062 		free_bufs(priv, buf_array, ret);
1063 		retries = 0;
1064 	} while (ret);
1065 }
1066 
1067 static void drain_pool(struct dpaa2_eth_priv *priv)
1068 {
1069 	int i;
1070 
1071 	drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1072 	drain_bufs(priv, 1);
1073 
1074 	for (i = 0; i < priv->num_channels; i++)
1075 		priv->channel[i]->buf_count = 0;
1076 }
1077 
1078 /* Function is called from softirq context only, so we don't need to guard
1079  * the access to percpu count
1080  */
1081 static int refill_pool(struct dpaa2_eth_priv *priv,
1082 		       struct dpaa2_eth_channel *ch,
1083 		       u16 bpid)
1084 {
1085 	int new_count;
1086 
1087 	if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1088 		return 0;
1089 
1090 	do {
1091 		new_count = add_bufs(priv, ch, bpid);
1092 		if (unlikely(!new_count)) {
1093 			/* Out of memory; abort for now, we'll try later on */
1094 			break;
1095 		}
1096 		ch->buf_count += new_count;
1097 	} while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1098 
1099 	if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1100 		return -ENOMEM;
1101 
1102 	return 0;
1103 }
1104 
1105 static int pull_channel(struct dpaa2_eth_channel *ch)
1106 {
1107 	int err;
1108 	int dequeues = -1;
1109 
1110 	/* Retry while portal is busy */
1111 	do {
1112 		err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1113 						    ch->store);
1114 		dequeues++;
1115 		cpu_relax();
1116 	} while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1117 
1118 	ch->stats.dequeue_portal_busy += dequeues;
1119 	if (unlikely(err))
1120 		ch->stats.pull_err++;
1121 
1122 	return err;
1123 }
1124 
1125 /* NAPI poll routine
1126  *
1127  * Frames are dequeued from the QMan channel associated with this NAPI context.
1128  * Rx, Tx confirmation and (if configured) Rx error frames all count
1129  * towards the NAPI budget.
1130  */
1131 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1132 {
1133 	struct dpaa2_eth_channel *ch;
1134 	struct dpaa2_eth_priv *priv;
1135 	int rx_cleaned = 0, txconf_cleaned = 0;
1136 	struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1137 	struct netdev_queue *nq;
1138 	int store_cleaned, work_done;
1139 	struct list_head rx_list;
1140 	int retries = 0;
1141 	int err;
1142 
1143 	ch = container_of(napi, struct dpaa2_eth_channel, napi);
1144 	ch->xdp.res = 0;
1145 	priv = ch->priv;
1146 
1147 	INIT_LIST_HEAD(&rx_list);
1148 	ch->rx_list = &rx_list;
1149 
1150 	do {
1151 		err = pull_channel(ch);
1152 		if (unlikely(err))
1153 			break;
1154 
1155 		/* Refill pool if appropriate */
1156 		refill_pool(priv, ch, priv->bpid);
1157 
1158 		store_cleaned = consume_frames(ch, &fq);
1159 		if (store_cleaned <= 0)
1160 			break;
1161 		if (fq->type == DPAA2_RX_FQ) {
1162 			rx_cleaned += store_cleaned;
1163 		} else {
1164 			txconf_cleaned += store_cleaned;
1165 			/* We have a single Tx conf FQ on this channel */
1166 			txc_fq = fq;
1167 		}
1168 
1169 		/* If we either consumed the whole NAPI budget with Rx frames
1170 		 * or we reached the Tx confirmations threshold, we're done.
1171 		 */
1172 		if (rx_cleaned >= budget ||
1173 		    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1174 			work_done = budget;
1175 			goto out;
1176 		}
1177 	} while (store_cleaned);
1178 
1179 	/* We didn't consume the entire budget, so finish napi and
1180 	 * re-enable data availability notifications
1181 	 */
1182 	napi_complete_done(napi, rx_cleaned);
1183 	do {
1184 		err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1185 		cpu_relax();
1186 	} while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
1187 	WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1188 		  ch->nctx.desired_cpu);
1189 
1190 	work_done = max(rx_cleaned, 1);
1191 
1192 out:
1193 	netif_receive_skb_list(ch->rx_list);
1194 
1195 	if (txc_fq && txc_fq->dq_frames) {
1196 		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1197 		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1198 					  txc_fq->dq_bytes);
1199 		txc_fq->dq_frames = 0;
1200 		txc_fq->dq_bytes = 0;
1201 	}
1202 
1203 	if (ch->xdp.res & XDP_REDIRECT)
1204 		xdp_do_flush_map();
1205 
1206 	return work_done;
1207 }
1208 
1209 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
1210 {
1211 	struct dpaa2_eth_channel *ch;
1212 	int i;
1213 
1214 	for (i = 0; i < priv->num_channels; i++) {
1215 		ch = priv->channel[i];
1216 		napi_enable(&ch->napi);
1217 	}
1218 }
1219 
1220 static void disable_ch_napi(struct dpaa2_eth_priv *priv)
1221 {
1222 	struct dpaa2_eth_channel *ch;
1223 	int i;
1224 
1225 	for (i = 0; i < priv->num_channels; i++) {
1226 		ch = priv->channel[i];
1227 		napi_disable(&ch->napi);
1228 	}
1229 }
1230 
1231 static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
1232 {
1233 	struct dpni_taildrop td = {0};
1234 	int i, err;
1235 
1236 	if (priv->rx_td_enabled == enable)
1237 		return;
1238 
1239 	td.enable = enable;
1240 	td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1241 
1242 	for (i = 0; i < priv->num_fqs; i++) {
1243 		if (priv->fq[i].type != DPAA2_RX_FQ)
1244 			continue;
1245 		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1246 					DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
1247 					priv->fq[i].flowid, &td);
1248 		if (err) {
1249 			netdev_err(priv->net_dev,
1250 				   "dpni_set_taildrop() failed\n");
1251 			break;
1252 		}
1253 	}
1254 
1255 	priv->rx_td_enabled = enable;
1256 }
1257 
1258 static int link_state_update(struct dpaa2_eth_priv *priv)
1259 {
1260 	struct dpni_link_state state = {0};
1261 	bool tx_pause;
1262 	int err;
1263 
1264 	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1265 	if (unlikely(err)) {
1266 		netdev_err(priv->net_dev,
1267 			   "dpni_get_link_state() failed\n");
1268 		return err;
1269 	}
1270 
1271 	/* If Tx pause frame settings have changed, we need to update
1272 	 * Rx FQ taildrop configuration as well. We configure taildrop
1273 	 * only when pause frame generation is disabled.
1274 	 */
1275 	tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
1276 		   !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
1277 	dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
1278 
1279 	/* When we manage the MAC/PHY using phylink there is no need
1280 	 * to manually update the netif_carrier.
1281 	 */
1282 	if (priv->mac)
1283 		goto out;
1284 
1285 	/* Chech link state; speed / duplex changes are not treated yet */
1286 	if (priv->link_state.up == state.up)
1287 		goto out;
1288 
1289 	if (state.up) {
1290 		netif_carrier_on(priv->net_dev);
1291 		netif_tx_start_all_queues(priv->net_dev);
1292 	} else {
1293 		netif_tx_stop_all_queues(priv->net_dev);
1294 		netif_carrier_off(priv->net_dev);
1295 	}
1296 
1297 	netdev_info(priv->net_dev, "Link Event: state %s\n",
1298 		    state.up ? "up" : "down");
1299 
1300 out:
1301 	priv->link_state = state;
1302 
1303 	return 0;
1304 }
1305 
1306 static int dpaa2_eth_open(struct net_device *net_dev)
1307 {
1308 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1309 	int err;
1310 
1311 	err = seed_pool(priv, priv->bpid);
1312 	if (err) {
1313 		/* Not much to do; the buffer pool, though not filled up,
1314 		 * may still contain some buffers which would enable us
1315 		 * to limp on.
1316 		 */
1317 		netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1318 			   priv->dpbp_dev->obj_desc.id, priv->bpid);
1319 	}
1320 
1321 	if (!priv->mac) {
1322 		/* We'll only start the txqs when the link is actually ready;
1323 		 * make sure we don't race against the link up notification,
1324 		 * which may come immediately after dpni_enable();
1325 		 */
1326 		netif_tx_stop_all_queues(net_dev);
1327 
1328 		/* Also, explicitly set carrier off, otherwise
1329 		 * netif_carrier_ok() will return true and cause 'ip link show'
1330 		 * to report the LOWER_UP flag, even though the link
1331 		 * notification wasn't even received.
1332 		 */
1333 		netif_carrier_off(net_dev);
1334 	}
1335 	enable_ch_napi(priv);
1336 
1337 	err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1338 	if (err < 0) {
1339 		netdev_err(net_dev, "dpni_enable() failed\n");
1340 		goto enable_err;
1341 	}
1342 
1343 	if (!priv->mac) {
1344 		/* If the DPMAC object has already processed the link up
1345 		 * interrupt, we have to learn the link state ourselves.
1346 		 */
1347 		err = link_state_update(priv);
1348 		if (err < 0) {
1349 			netdev_err(net_dev, "Can't update link state\n");
1350 			goto link_state_err;
1351 		}
1352 	} else {
1353 		phylink_start(priv->mac->phylink);
1354 	}
1355 
1356 	return 0;
1357 
1358 link_state_err:
1359 enable_err:
1360 	disable_ch_napi(priv);
1361 	drain_pool(priv);
1362 	return err;
1363 }
1364 
1365 /* Total number of in-flight frames on ingress queues */
1366 static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
1367 {
1368 	struct dpaa2_eth_fq *fq;
1369 	u32 fcnt = 0, bcnt = 0, total = 0;
1370 	int i, err;
1371 
1372 	for (i = 0; i < priv->num_fqs; i++) {
1373 		fq = &priv->fq[i];
1374 		err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1375 		if (err) {
1376 			netdev_warn(priv->net_dev, "query_fq_count failed");
1377 			break;
1378 		}
1379 		total += fcnt;
1380 	}
1381 
1382 	return total;
1383 }
1384 
1385 static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
1386 {
1387 	int retries = 10;
1388 	u32 pending;
1389 
1390 	do {
1391 		pending = ingress_fq_count(priv);
1392 		if (pending)
1393 			msleep(100);
1394 	} while (pending && --retries);
1395 }
1396 
1397 #define DPNI_TX_PENDING_VER_MAJOR	7
1398 #define DPNI_TX_PENDING_VER_MINOR	13
1399 static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
1400 {
1401 	union dpni_statistics stats;
1402 	int retries = 10;
1403 	int err;
1404 
1405 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
1406 				   DPNI_TX_PENDING_VER_MINOR) < 0)
1407 		goto out;
1408 
1409 	do {
1410 		err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
1411 					  &stats);
1412 		if (err)
1413 			goto out;
1414 		if (stats.page_6.tx_pending_frames == 0)
1415 			return;
1416 	} while (--retries);
1417 
1418 out:
1419 	msleep(500);
1420 }
1421 
1422 static int dpaa2_eth_stop(struct net_device *net_dev)
1423 {
1424 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1425 	int dpni_enabled = 0;
1426 	int retries = 10;
1427 
1428 	if (!priv->mac) {
1429 		netif_tx_stop_all_queues(net_dev);
1430 		netif_carrier_off(net_dev);
1431 	} else {
1432 		phylink_stop(priv->mac->phylink);
1433 	}
1434 
1435 	/* On dpni_disable(), the MC firmware will:
1436 	 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1437 	 * - cut off WRIOP dequeues from egress FQs and wait until transmission
1438 	 * of all in flight Tx frames is finished (and corresponding Tx conf
1439 	 * frames are enqueued back to software)
1440 	 *
1441 	 * Before calling dpni_disable(), we wait for all Tx frames to arrive
1442 	 * on WRIOP. After it finishes, wait until all remaining frames on Rx
1443 	 * and Tx conf queues are consumed on NAPI poll.
1444 	 */
1445 	wait_for_egress_fq_empty(priv);
1446 
1447 	do {
1448 		dpni_disable(priv->mc_io, 0, priv->mc_token);
1449 		dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1450 		if (dpni_enabled)
1451 			/* Allow the hardware some slack */
1452 			msleep(100);
1453 	} while (dpni_enabled && --retries);
1454 	if (!retries) {
1455 		netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1456 		/* Must go on and disable NAPI nonetheless, so we don't crash at
1457 		 * the next "ifconfig up"
1458 		 */
1459 	}
1460 
1461 	wait_for_ingress_fq_empty(priv);
1462 	disable_ch_napi(priv);
1463 
1464 	/* Empty the buffer pool */
1465 	drain_pool(priv);
1466 
1467 	return 0;
1468 }
1469 
1470 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1471 {
1472 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1473 	struct device *dev = net_dev->dev.parent;
1474 	int err;
1475 
1476 	err = eth_mac_addr(net_dev, addr);
1477 	if (err < 0) {
1478 		dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1479 		return err;
1480 	}
1481 
1482 	err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1483 					net_dev->dev_addr);
1484 	if (err) {
1485 		dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1486 		return err;
1487 	}
1488 
1489 	return 0;
1490 }
1491 
1492 /** Fill in counters maintained by the GPP driver. These may be different from
1493  * the hardware counters obtained by ethtool.
1494  */
1495 static void dpaa2_eth_get_stats(struct net_device *net_dev,
1496 				struct rtnl_link_stats64 *stats)
1497 {
1498 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1499 	struct rtnl_link_stats64 *percpu_stats;
1500 	u64 *cpustats;
1501 	u64 *netstats = (u64 *)stats;
1502 	int i, j;
1503 	int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1504 
1505 	for_each_possible_cpu(i) {
1506 		percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1507 		cpustats = (u64 *)percpu_stats;
1508 		for (j = 0; j < num; j++)
1509 			netstats[j] += cpustats[j];
1510 	}
1511 }
1512 
1513 /* Copy mac unicast addresses from @net_dev to @priv.
1514  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1515  */
1516 static void add_uc_hw_addr(const struct net_device *net_dev,
1517 			   struct dpaa2_eth_priv *priv)
1518 {
1519 	struct netdev_hw_addr *ha;
1520 	int err;
1521 
1522 	netdev_for_each_uc_addr(ha, net_dev) {
1523 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1524 					ha->addr);
1525 		if (err)
1526 			netdev_warn(priv->net_dev,
1527 				    "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1528 				    ha->addr, err);
1529 	}
1530 }
1531 
1532 /* Copy mac multicast addresses from @net_dev to @priv
1533  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1534  */
1535 static void add_mc_hw_addr(const struct net_device *net_dev,
1536 			   struct dpaa2_eth_priv *priv)
1537 {
1538 	struct netdev_hw_addr *ha;
1539 	int err;
1540 
1541 	netdev_for_each_mc_addr(ha, net_dev) {
1542 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1543 					ha->addr);
1544 		if (err)
1545 			netdev_warn(priv->net_dev,
1546 				    "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1547 				    ha->addr, err);
1548 	}
1549 }
1550 
1551 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1552 {
1553 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1554 	int uc_count = netdev_uc_count(net_dev);
1555 	int mc_count = netdev_mc_count(net_dev);
1556 	u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1557 	u32 options = priv->dpni_attrs.options;
1558 	u16 mc_token = priv->mc_token;
1559 	struct fsl_mc_io *mc_io = priv->mc_io;
1560 	int err;
1561 
1562 	/* Basic sanity checks; these probably indicate a misconfiguration */
1563 	if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1564 		netdev_info(net_dev,
1565 			    "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1566 			    max_mac);
1567 
1568 	/* Force promiscuous if the uc or mc counts exceed our capabilities. */
1569 	if (uc_count > max_mac) {
1570 		netdev_info(net_dev,
1571 			    "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1572 			    uc_count, max_mac);
1573 		goto force_promisc;
1574 	}
1575 	if (mc_count + uc_count > max_mac) {
1576 		netdev_info(net_dev,
1577 			    "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1578 			    uc_count + mc_count, max_mac);
1579 		goto force_mc_promisc;
1580 	}
1581 
1582 	/* Adjust promisc settings due to flag combinations */
1583 	if (net_dev->flags & IFF_PROMISC)
1584 		goto force_promisc;
1585 	if (net_dev->flags & IFF_ALLMULTI) {
1586 		/* First, rebuild unicast filtering table. This should be done
1587 		 * in promisc mode, in order to avoid frame loss while we
1588 		 * progressively add entries to the table.
1589 		 * We don't know whether we had been in promisc already, and
1590 		 * making an MC call to find out is expensive; so set uc promisc
1591 		 * nonetheless.
1592 		 */
1593 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1594 		if (err)
1595 			netdev_warn(net_dev, "Can't set uc promisc\n");
1596 
1597 		/* Actual uc table reconstruction. */
1598 		err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1599 		if (err)
1600 			netdev_warn(net_dev, "Can't clear uc filters\n");
1601 		add_uc_hw_addr(net_dev, priv);
1602 
1603 		/* Finally, clear uc promisc and set mc promisc as requested. */
1604 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1605 		if (err)
1606 			netdev_warn(net_dev, "Can't clear uc promisc\n");
1607 		goto force_mc_promisc;
1608 	}
1609 
1610 	/* Neither unicast, nor multicast promisc will be on... eventually.
1611 	 * For now, rebuild mac filtering tables while forcing both of them on.
1612 	 */
1613 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1614 	if (err)
1615 		netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1616 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1617 	if (err)
1618 		netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1619 
1620 	/* Actual mac filtering tables reconstruction */
1621 	err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1622 	if (err)
1623 		netdev_warn(net_dev, "Can't clear mac filters\n");
1624 	add_mc_hw_addr(net_dev, priv);
1625 	add_uc_hw_addr(net_dev, priv);
1626 
1627 	/* Now we can clear both ucast and mcast promisc, without risking
1628 	 * to drop legitimate frames anymore.
1629 	 */
1630 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1631 	if (err)
1632 		netdev_warn(net_dev, "Can't clear ucast promisc\n");
1633 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1634 	if (err)
1635 		netdev_warn(net_dev, "Can't clear mcast promisc\n");
1636 
1637 	return;
1638 
1639 force_promisc:
1640 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1641 	if (err)
1642 		netdev_warn(net_dev, "Can't set ucast promisc\n");
1643 force_mc_promisc:
1644 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1645 	if (err)
1646 		netdev_warn(net_dev, "Can't set mcast promisc\n");
1647 }
1648 
1649 static int dpaa2_eth_set_features(struct net_device *net_dev,
1650 				  netdev_features_t features)
1651 {
1652 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1653 	netdev_features_t changed = features ^ net_dev->features;
1654 	bool enable;
1655 	int err;
1656 
1657 	if (changed & NETIF_F_RXCSUM) {
1658 		enable = !!(features & NETIF_F_RXCSUM);
1659 		err = set_rx_csum(priv, enable);
1660 		if (err)
1661 			return err;
1662 	}
1663 
1664 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1665 		enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1666 		err = set_tx_csum(priv, enable);
1667 		if (err)
1668 			return err;
1669 	}
1670 
1671 	return 0;
1672 }
1673 
1674 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1675 {
1676 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1677 	struct hwtstamp_config config;
1678 
1679 	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
1680 		return -EFAULT;
1681 
1682 	switch (config.tx_type) {
1683 	case HWTSTAMP_TX_OFF:
1684 		priv->tx_tstamp = false;
1685 		break;
1686 	case HWTSTAMP_TX_ON:
1687 		priv->tx_tstamp = true;
1688 		break;
1689 	default:
1690 		return -ERANGE;
1691 	}
1692 
1693 	if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
1694 		priv->rx_tstamp = false;
1695 	} else {
1696 		priv->rx_tstamp = true;
1697 		/* TS is set for all frame types, not only those requested */
1698 		config.rx_filter = HWTSTAMP_FILTER_ALL;
1699 	}
1700 
1701 	return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
1702 			-EFAULT : 0;
1703 }
1704 
1705 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1706 {
1707 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1708 
1709 	if (cmd == SIOCSHWTSTAMP)
1710 		return dpaa2_eth_ts_ioctl(dev, rq, cmd);
1711 
1712 	if (priv->mac)
1713 		return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
1714 
1715 	return -EOPNOTSUPP;
1716 }
1717 
1718 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
1719 {
1720 	int mfl, linear_mfl;
1721 
1722 	mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1723 	linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
1724 		     dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
1725 
1726 	if (mfl > linear_mfl) {
1727 		netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
1728 			    linear_mfl - VLAN_ETH_HLEN);
1729 		return false;
1730 	}
1731 
1732 	return true;
1733 }
1734 
1735 static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
1736 {
1737 	int mfl, err;
1738 
1739 	/* We enforce a maximum Rx frame length based on MTU only if we have
1740 	 * an XDP program attached (in order to avoid Rx S/G frames).
1741 	 * Otherwise, we accept all incoming frames as long as they are not
1742 	 * larger than maximum size supported in hardware
1743 	 */
1744 	if (has_xdp)
1745 		mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1746 	else
1747 		mfl = DPAA2_ETH_MFL;
1748 
1749 	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
1750 	if (err) {
1751 		netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
1752 		return err;
1753 	}
1754 
1755 	return 0;
1756 }
1757 
1758 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
1759 {
1760 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1761 	int err;
1762 
1763 	if (!priv->xdp_prog)
1764 		goto out;
1765 
1766 	if (!xdp_mtu_valid(priv, new_mtu))
1767 		return -EINVAL;
1768 
1769 	err = set_rx_mfl(priv, new_mtu, true);
1770 	if (err)
1771 		return err;
1772 
1773 out:
1774 	dev->mtu = new_mtu;
1775 	return 0;
1776 }
1777 
1778 static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
1779 {
1780 	struct dpni_buffer_layout buf_layout = {0};
1781 	int err;
1782 
1783 	err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
1784 				     DPNI_QUEUE_RX, &buf_layout);
1785 	if (err) {
1786 		netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
1787 		return err;
1788 	}
1789 
1790 	/* Reserve extra headroom for XDP header size changes */
1791 	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
1792 				    (has_xdp ? XDP_PACKET_HEADROOM : 0);
1793 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
1794 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1795 				     DPNI_QUEUE_RX, &buf_layout);
1796 	if (err) {
1797 		netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
1798 		return err;
1799 	}
1800 
1801 	return 0;
1802 }
1803 
1804 static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
1805 {
1806 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1807 	struct dpaa2_eth_channel *ch;
1808 	struct bpf_prog *old;
1809 	bool up, need_update;
1810 	int i, err;
1811 
1812 	if (prog && !xdp_mtu_valid(priv, dev->mtu))
1813 		return -EINVAL;
1814 
1815 	if (prog)
1816 		bpf_prog_add(prog, priv->num_channels);
1817 
1818 	up = netif_running(dev);
1819 	need_update = (!!priv->xdp_prog != !!prog);
1820 
1821 	if (up)
1822 		dpaa2_eth_stop(dev);
1823 
1824 	/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
1825 	 * Also, when switching between xdp/non-xdp modes we need to reconfigure
1826 	 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
1827 	 * so we are sure no old format buffers will be used from now on.
1828 	 */
1829 	if (need_update) {
1830 		err = set_rx_mfl(priv, dev->mtu, !!prog);
1831 		if (err)
1832 			goto out_err;
1833 		err = update_rx_buffer_headroom(priv, !!prog);
1834 		if (err)
1835 			goto out_err;
1836 	}
1837 
1838 	old = xchg(&priv->xdp_prog, prog);
1839 	if (old)
1840 		bpf_prog_put(old);
1841 
1842 	for (i = 0; i < priv->num_channels; i++) {
1843 		ch = priv->channel[i];
1844 		old = xchg(&ch->xdp.prog, prog);
1845 		if (old)
1846 			bpf_prog_put(old);
1847 	}
1848 
1849 	if (up) {
1850 		err = dpaa2_eth_open(dev);
1851 		if (err)
1852 			return err;
1853 	}
1854 
1855 	return 0;
1856 
1857 out_err:
1858 	if (prog)
1859 		bpf_prog_sub(prog, priv->num_channels);
1860 	if (up)
1861 		dpaa2_eth_open(dev);
1862 
1863 	return err;
1864 }
1865 
1866 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1867 {
1868 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1869 
1870 	switch (xdp->command) {
1871 	case XDP_SETUP_PROG:
1872 		return setup_xdp(dev, xdp->prog);
1873 	case XDP_QUERY_PROG:
1874 		xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
1875 		break;
1876 	default:
1877 		return -EINVAL;
1878 	}
1879 
1880 	return 0;
1881 }
1882 
1883 static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
1884 				   struct xdp_frame *xdpf,
1885 				   struct dpaa2_fd *fd)
1886 {
1887 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1888 	struct device *dev = net_dev->dev.parent;
1889 	unsigned int needed_headroom;
1890 	struct dpaa2_eth_swa *swa;
1891 	void *buffer_start, *aligned_start;
1892 	dma_addr_t addr;
1893 
1894 	/* We require a minimum headroom to be able to transmit the frame.
1895 	 * Otherwise return an error and let the original net_device handle it
1896 	 */
1897 	needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
1898 	if (xdpf->headroom < needed_headroom)
1899 		return -EINVAL;
1900 
1901 	/* Setup the FD fields */
1902 	memset(fd, 0, sizeof(*fd));
1903 
1904 	/* Align FD address, if possible */
1905 	buffer_start = xdpf->data - needed_headroom;
1906 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
1907 				  DPAA2_ETH_TX_BUF_ALIGN);
1908 	if (aligned_start >= xdpf->data - xdpf->headroom)
1909 		buffer_start = aligned_start;
1910 
1911 	swa = (struct dpaa2_eth_swa *)buffer_start;
1912 	/* fill in necessary fields here */
1913 	swa->type = DPAA2_ETH_SWA_XDP;
1914 	swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
1915 	swa->xdp.xdpf = xdpf;
1916 
1917 	addr = dma_map_single(dev, buffer_start,
1918 			      swa->xdp.dma_size,
1919 			      DMA_BIDIRECTIONAL);
1920 	if (unlikely(dma_mapping_error(dev, addr)))
1921 		return -ENOMEM;
1922 
1923 	dpaa2_fd_set_addr(fd, addr);
1924 	dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
1925 	dpaa2_fd_set_len(fd, xdpf->len);
1926 	dpaa2_fd_set_format(fd, dpaa2_fd_single);
1927 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1928 
1929 	return 0;
1930 }
1931 
1932 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
1933 			      struct xdp_frame **frames, u32 flags)
1934 {
1935 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1936 	struct dpaa2_eth_drv_stats *percpu_extras;
1937 	struct rtnl_link_stats64 *percpu_stats;
1938 	struct dpaa2_eth_fq *fq;
1939 	struct dpaa2_fd fd;
1940 	int drops = 0;
1941 	int i, err;
1942 
1943 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1944 		return -EINVAL;
1945 
1946 	if (!netif_running(net_dev))
1947 		return -ENETDOWN;
1948 
1949 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
1950 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
1951 
1952 	for (i = 0; i < n; i++) {
1953 		struct xdp_frame *xdpf = frames[i];
1954 
1955 		/* create the FD from the xdp_frame */
1956 		err = dpaa2_eth_xdp_create_fd(net_dev, xdpf, &fd);
1957 		if (err) {
1958 			percpu_stats->tx_dropped++;
1959 			xdp_return_frame_rx_napi(xdpf);
1960 			drops++;
1961 			continue;
1962 		}
1963 
1964 		/* enqueue the newly created FD */
1965 		fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
1966 		for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1967 			err = priv->enqueue(priv, fq, &fd, 0, 1);
1968 			if (err != -EBUSY)
1969 				break;
1970 		}
1971 
1972 		percpu_extras->tx_portal_busy += i;
1973 		if (unlikely(err < 0)) {
1974 			percpu_stats->tx_errors++;
1975 			xdp_return_frame_rx_napi(xdpf);
1976 			continue;
1977 		}
1978 
1979 		percpu_stats->tx_packets++;
1980 		percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
1981 	}
1982 
1983 	return n - drops;
1984 }
1985 
1986 static int update_xps(struct dpaa2_eth_priv *priv)
1987 {
1988 	struct net_device *net_dev = priv->net_dev;
1989 	struct cpumask xps_mask;
1990 	struct dpaa2_eth_fq *fq;
1991 	int i, num_queues, netdev_queues;
1992 	int err = 0;
1993 
1994 	num_queues = dpaa2_eth_queue_count(priv);
1995 	netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
1996 
1997 	/* The first <num_queues> entries in priv->fq array are Tx/Tx conf
1998 	 * queues, so only process those
1999 	 */
2000 	for (i = 0; i < netdev_queues; i++) {
2001 		fq = &priv->fq[i % num_queues];
2002 
2003 		cpumask_clear(&xps_mask);
2004 		cpumask_set_cpu(fq->target_cpu, &xps_mask);
2005 
2006 		err = netif_set_xps_queue(net_dev, &xps_mask, i);
2007 		if (err) {
2008 			netdev_warn_once(net_dev, "Error setting XPS queue\n");
2009 			break;
2010 		}
2011 	}
2012 
2013 	return err;
2014 }
2015 
2016 static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2017 			      enum tc_setup_type type, void *type_data)
2018 {
2019 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2020 	struct tc_mqprio_qopt *mqprio = type_data;
2021 	u8 num_tc, num_queues;
2022 	int i;
2023 
2024 	if (type != TC_SETUP_QDISC_MQPRIO)
2025 		return -EINVAL;
2026 
2027 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2028 	num_queues = dpaa2_eth_queue_count(priv);
2029 	num_tc = mqprio->num_tc;
2030 
2031 	if (num_tc == net_dev->num_tc)
2032 		return 0;
2033 
2034 	if (num_tc  > dpaa2_eth_tc_count(priv)) {
2035 		netdev_err(net_dev, "Max %d traffic classes supported\n",
2036 			   dpaa2_eth_tc_count(priv));
2037 		return -EINVAL;
2038 	}
2039 
2040 	if (!num_tc) {
2041 		netdev_reset_tc(net_dev);
2042 		netif_set_real_num_tx_queues(net_dev, num_queues);
2043 		goto out;
2044 	}
2045 
2046 	netdev_set_num_tc(net_dev, num_tc);
2047 	netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2048 
2049 	for (i = 0; i < num_tc; i++)
2050 		netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2051 
2052 out:
2053 	update_xps(priv);
2054 
2055 	return 0;
2056 }
2057 
2058 static const struct net_device_ops dpaa2_eth_ops = {
2059 	.ndo_open = dpaa2_eth_open,
2060 	.ndo_start_xmit = dpaa2_eth_tx,
2061 	.ndo_stop = dpaa2_eth_stop,
2062 	.ndo_set_mac_address = dpaa2_eth_set_addr,
2063 	.ndo_get_stats64 = dpaa2_eth_get_stats,
2064 	.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2065 	.ndo_set_features = dpaa2_eth_set_features,
2066 	.ndo_do_ioctl = dpaa2_eth_ioctl,
2067 	.ndo_change_mtu = dpaa2_eth_change_mtu,
2068 	.ndo_bpf = dpaa2_eth_xdp,
2069 	.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
2070 	.ndo_setup_tc = dpaa2_eth_setup_tc,
2071 };
2072 
2073 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2074 {
2075 	struct dpaa2_eth_channel *ch;
2076 
2077 	ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2078 
2079 	/* Update NAPI statistics */
2080 	ch->stats.cdan++;
2081 
2082 	napi_schedule_irqoff(&ch->napi);
2083 }
2084 
2085 /* Allocate and configure a DPCON object */
2086 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
2087 {
2088 	struct fsl_mc_device *dpcon;
2089 	struct device *dev = priv->net_dev->dev.parent;
2090 	int err;
2091 
2092 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2093 				     FSL_MC_POOL_DPCON, &dpcon);
2094 	if (err) {
2095 		if (err == -ENXIO)
2096 			err = -EPROBE_DEFER;
2097 		else
2098 			dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2099 		return ERR_PTR(err);
2100 	}
2101 
2102 	err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2103 	if (err) {
2104 		dev_err(dev, "dpcon_open() failed\n");
2105 		goto free;
2106 	}
2107 
2108 	err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2109 	if (err) {
2110 		dev_err(dev, "dpcon_reset() failed\n");
2111 		goto close;
2112 	}
2113 
2114 	err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2115 	if (err) {
2116 		dev_err(dev, "dpcon_enable() failed\n");
2117 		goto close;
2118 	}
2119 
2120 	return dpcon;
2121 
2122 close:
2123 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2124 free:
2125 	fsl_mc_object_free(dpcon);
2126 
2127 	return NULL;
2128 }
2129 
2130 static void free_dpcon(struct dpaa2_eth_priv *priv,
2131 		       struct fsl_mc_device *dpcon)
2132 {
2133 	dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2134 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2135 	fsl_mc_object_free(dpcon);
2136 }
2137 
2138 static struct dpaa2_eth_channel *
2139 alloc_channel(struct dpaa2_eth_priv *priv)
2140 {
2141 	struct dpaa2_eth_channel *channel;
2142 	struct dpcon_attr attr;
2143 	struct device *dev = priv->net_dev->dev.parent;
2144 	int err;
2145 
2146 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2147 	if (!channel)
2148 		return NULL;
2149 
2150 	channel->dpcon = setup_dpcon(priv);
2151 	if (IS_ERR_OR_NULL(channel->dpcon)) {
2152 		err = PTR_ERR_OR_ZERO(channel->dpcon);
2153 		goto err_setup;
2154 	}
2155 
2156 	err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2157 				   &attr);
2158 	if (err) {
2159 		dev_err(dev, "dpcon_get_attributes() failed\n");
2160 		goto err_get_attr;
2161 	}
2162 
2163 	channel->dpcon_id = attr.id;
2164 	channel->ch_id = attr.qbman_ch_id;
2165 	channel->priv = priv;
2166 
2167 	return channel;
2168 
2169 err_get_attr:
2170 	free_dpcon(priv, channel->dpcon);
2171 err_setup:
2172 	kfree(channel);
2173 	return ERR_PTR(err);
2174 }
2175 
2176 static void free_channel(struct dpaa2_eth_priv *priv,
2177 			 struct dpaa2_eth_channel *channel)
2178 {
2179 	free_dpcon(priv, channel->dpcon);
2180 	kfree(channel);
2181 }
2182 
2183 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
2184  * and register data availability notifications
2185  */
2186 static int setup_dpio(struct dpaa2_eth_priv *priv)
2187 {
2188 	struct dpaa2_io_notification_ctx *nctx;
2189 	struct dpaa2_eth_channel *channel;
2190 	struct dpcon_notification_cfg dpcon_notif_cfg;
2191 	struct device *dev = priv->net_dev->dev.parent;
2192 	int i, err;
2193 
2194 	/* We want the ability to spread ingress traffic (RX, TX conf) to as
2195 	 * many cores as possible, so we need one channel for each core
2196 	 * (unless there's fewer queues than cores, in which case the extra
2197 	 * channels would be wasted).
2198 	 * Allocate one channel per core and register it to the core's
2199 	 * affine DPIO. If not enough channels are available for all cores
2200 	 * or if some cores don't have an affine DPIO, there will be no
2201 	 * ingress frame processing on those cores.
2202 	 */
2203 	cpumask_clear(&priv->dpio_cpumask);
2204 	for_each_online_cpu(i) {
2205 		/* Try to allocate a channel */
2206 		channel = alloc_channel(priv);
2207 		if (IS_ERR_OR_NULL(channel)) {
2208 			err = PTR_ERR_OR_ZERO(channel);
2209 			if (err != -EPROBE_DEFER)
2210 				dev_info(dev,
2211 					 "No affine channel for cpu %d and above\n", i);
2212 			goto err_alloc_ch;
2213 		}
2214 
2215 		priv->channel[priv->num_channels] = channel;
2216 
2217 		nctx = &channel->nctx;
2218 		nctx->is_cdan = 1;
2219 		nctx->cb = cdan_cb;
2220 		nctx->id = channel->ch_id;
2221 		nctx->desired_cpu = i;
2222 
2223 		/* Register the new context */
2224 		channel->dpio = dpaa2_io_service_select(i);
2225 		err = dpaa2_io_service_register(channel->dpio, nctx, dev);
2226 		if (err) {
2227 			dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2228 			/* If no affine DPIO for this core, there's probably
2229 			 * none available for next cores either. Signal we want
2230 			 * to retry later, in case the DPIO devices weren't
2231 			 * probed yet.
2232 			 */
2233 			err = -EPROBE_DEFER;
2234 			goto err_service_reg;
2235 		}
2236 
2237 		/* Register DPCON notification with MC */
2238 		dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2239 		dpcon_notif_cfg.priority = 0;
2240 		dpcon_notif_cfg.user_ctx = nctx->qman64;
2241 		err = dpcon_set_notification(priv->mc_io, 0,
2242 					     channel->dpcon->mc_handle,
2243 					     &dpcon_notif_cfg);
2244 		if (err) {
2245 			dev_err(dev, "dpcon_set_notification failed()\n");
2246 			goto err_set_cdan;
2247 		}
2248 
2249 		/* If we managed to allocate a channel and also found an affine
2250 		 * DPIO for this core, add it to the final mask
2251 		 */
2252 		cpumask_set_cpu(i, &priv->dpio_cpumask);
2253 		priv->num_channels++;
2254 
2255 		/* Stop if we already have enough channels to accommodate all
2256 		 * RX and TX conf queues
2257 		 */
2258 		if (priv->num_channels == priv->dpni_attrs.num_queues)
2259 			break;
2260 	}
2261 
2262 	return 0;
2263 
2264 err_set_cdan:
2265 	dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2266 err_service_reg:
2267 	free_channel(priv, channel);
2268 err_alloc_ch:
2269 	if (err == -EPROBE_DEFER) {
2270 		for (i = 0; i < priv->num_channels; i++) {
2271 			channel = priv->channel[i];
2272 			nctx = &channel->nctx;
2273 			dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2274 			free_channel(priv, channel);
2275 		}
2276 		priv->num_channels = 0;
2277 		return err;
2278 	}
2279 
2280 	if (cpumask_empty(&priv->dpio_cpumask)) {
2281 		dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
2282 		return -ENODEV;
2283 	}
2284 
2285 	dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2286 		 cpumask_pr_args(&priv->dpio_cpumask));
2287 
2288 	return 0;
2289 }
2290 
2291 static void free_dpio(struct dpaa2_eth_priv *priv)
2292 {
2293 	struct device *dev = priv->net_dev->dev.parent;
2294 	struct dpaa2_eth_channel *ch;
2295 	int i;
2296 
2297 	/* deregister CDAN notifications and free channels */
2298 	for (i = 0; i < priv->num_channels; i++) {
2299 		ch = priv->channel[i];
2300 		dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
2301 		free_channel(priv, ch);
2302 	}
2303 }
2304 
2305 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
2306 						    int cpu)
2307 {
2308 	struct device *dev = priv->net_dev->dev.parent;
2309 	int i;
2310 
2311 	for (i = 0; i < priv->num_channels; i++)
2312 		if (priv->channel[i]->nctx.desired_cpu == cpu)
2313 			return priv->channel[i];
2314 
2315 	/* We should never get here. Issue a warning and return
2316 	 * the first channel, because it's still better than nothing
2317 	 */
2318 	dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2319 
2320 	return priv->channel[0];
2321 }
2322 
2323 static void set_fq_affinity(struct dpaa2_eth_priv *priv)
2324 {
2325 	struct device *dev = priv->net_dev->dev.parent;
2326 	struct dpaa2_eth_fq *fq;
2327 	int rx_cpu, txc_cpu;
2328 	int i;
2329 
2330 	/* For each FQ, pick one channel/CPU to deliver frames to.
2331 	 * This may well change at runtime, either through irqbalance or
2332 	 * through direct user intervention.
2333 	 */
2334 	rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
2335 
2336 	for (i = 0; i < priv->num_fqs; i++) {
2337 		fq = &priv->fq[i];
2338 		switch (fq->type) {
2339 		case DPAA2_RX_FQ:
2340 			fq->target_cpu = rx_cpu;
2341 			rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
2342 			if (rx_cpu >= nr_cpu_ids)
2343 				rx_cpu = cpumask_first(&priv->dpio_cpumask);
2344 			break;
2345 		case DPAA2_TX_CONF_FQ:
2346 			fq->target_cpu = txc_cpu;
2347 			txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
2348 			if (txc_cpu >= nr_cpu_ids)
2349 				txc_cpu = cpumask_first(&priv->dpio_cpumask);
2350 			break;
2351 		default:
2352 			dev_err(dev, "Unknown FQ type: %d\n", fq->type);
2353 		}
2354 		fq->channel = get_affine_channel(priv, fq->target_cpu);
2355 	}
2356 
2357 	update_xps(priv);
2358 }
2359 
2360 static void setup_fqs(struct dpaa2_eth_priv *priv)
2361 {
2362 	int i;
2363 
2364 	/* We have one TxConf FQ per Tx flow.
2365 	 * The number of Tx and Rx queues is the same.
2366 	 * Tx queues come first in the fq array.
2367 	 */
2368 	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2369 		priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
2370 		priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
2371 		priv->fq[priv->num_fqs++].flowid = (u16)i;
2372 	}
2373 
2374 	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2375 		priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2376 		priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2377 		priv->fq[priv->num_fqs++].flowid = (u16)i;
2378 	}
2379 
2380 	/* For each FQ, decide on which core to process incoming frames */
2381 	set_fq_affinity(priv);
2382 }
2383 
2384 /* Allocate and configure one buffer pool for each interface */
2385 static int setup_dpbp(struct dpaa2_eth_priv *priv)
2386 {
2387 	int err;
2388 	struct fsl_mc_device *dpbp_dev;
2389 	struct device *dev = priv->net_dev->dev.parent;
2390 	struct dpbp_attr dpbp_attrs;
2391 
2392 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2393 				     &dpbp_dev);
2394 	if (err) {
2395 		if (err == -ENXIO)
2396 			err = -EPROBE_DEFER;
2397 		else
2398 			dev_err(dev, "DPBP device allocation failed\n");
2399 		return err;
2400 	}
2401 
2402 	priv->dpbp_dev = dpbp_dev;
2403 
2404 	err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2405 			&dpbp_dev->mc_handle);
2406 	if (err) {
2407 		dev_err(dev, "dpbp_open() failed\n");
2408 		goto err_open;
2409 	}
2410 
2411 	err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2412 	if (err) {
2413 		dev_err(dev, "dpbp_reset() failed\n");
2414 		goto err_reset;
2415 	}
2416 
2417 	err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2418 	if (err) {
2419 		dev_err(dev, "dpbp_enable() failed\n");
2420 		goto err_enable;
2421 	}
2422 
2423 	err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
2424 				  &dpbp_attrs);
2425 	if (err) {
2426 		dev_err(dev, "dpbp_get_attributes() failed\n");
2427 		goto err_get_attr;
2428 	}
2429 	priv->bpid = dpbp_attrs.bpid;
2430 
2431 	return 0;
2432 
2433 err_get_attr:
2434 	dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2435 err_enable:
2436 err_reset:
2437 	dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2438 err_open:
2439 	fsl_mc_object_free(dpbp_dev);
2440 
2441 	return err;
2442 }
2443 
2444 static void free_dpbp(struct dpaa2_eth_priv *priv)
2445 {
2446 	drain_pool(priv);
2447 	dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2448 	dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2449 	fsl_mc_object_free(priv->dpbp_dev);
2450 }
2451 
2452 static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2453 {
2454 	struct device *dev = priv->net_dev->dev.parent;
2455 	struct dpni_buffer_layout buf_layout = {0};
2456 	u16 rx_buf_align;
2457 	int err;
2458 
2459 	/* We need to check for WRIOP version 1.0.0, but depending on the MC
2460 	 * version, this number is not always provided correctly on rev1.
2461 	 * We need to check for both alternatives in this situation.
2462 	 */
2463 	if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
2464 	    priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
2465 		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
2466 	else
2467 		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
2468 
2469 	/* tx buffer */
2470 	buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
2471 	buf_layout.pass_timestamp = true;
2472 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
2473 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2474 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2475 				     DPNI_QUEUE_TX, &buf_layout);
2476 	if (err) {
2477 		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
2478 		return err;
2479 	}
2480 
2481 	/* tx-confirm buffer */
2482 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2483 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2484 				     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
2485 	if (err) {
2486 		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
2487 		return err;
2488 	}
2489 
2490 	/* Now that we've set our tx buffer layout, retrieve the minimum
2491 	 * required tx data offset.
2492 	 */
2493 	err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
2494 				      &priv->tx_data_offset);
2495 	if (err) {
2496 		dev_err(dev, "dpni_get_tx_data_offset() failed\n");
2497 		return err;
2498 	}
2499 
2500 	if ((priv->tx_data_offset % 64) != 0)
2501 		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
2502 			 priv->tx_data_offset);
2503 
2504 	/* rx buffer */
2505 	buf_layout.pass_frame_status = true;
2506 	buf_layout.pass_parser_result = true;
2507 	buf_layout.data_align = rx_buf_align;
2508 	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
2509 	buf_layout.private_data_size = 0;
2510 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
2511 			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2512 			     DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
2513 			     DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
2514 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2515 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2516 				     DPNI_QUEUE_RX, &buf_layout);
2517 	if (err) {
2518 		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
2519 		return err;
2520 	}
2521 
2522 	return 0;
2523 }
2524 
2525 #define DPNI_ENQUEUE_FQID_VER_MAJOR	7
2526 #define DPNI_ENQUEUE_FQID_VER_MINOR	9
2527 
2528 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
2529 				       struct dpaa2_eth_fq *fq,
2530 				       struct dpaa2_fd *fd, u8 prio,
2531 				       u32 num_frames __always_unused,
2532 				       int *frames_enqueued)
2533 {
2534 	int err;
2535 
2536 	err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
2537 					  priv->tx_qdid, prio,
2538 					  fq->tx_qdbin, fd);
2539 	if (!err && frames_enqueued)
2540 		*frames_enqueued = 1;
2541 	return err;
2542 }
2543 
2544 static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
2545 						struct dpaa2_eth_fq *fq,
2546 						struct dpaa2_fd *fd,
2547 						u8 prio, u32 num_frames,
2548 						int *frames_enqueued)
2549 {
2550 	int err;
2551 
2552 	err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
2553 						   fq->tx_fqid[prio],
2554 						   fd, num_frames);
2555 
2556 	if (err == 0)
2557 		return -EBUSY;
2558 
2559 	if (frames_enqueued)
2560 		*frames_enqueued = err;
2561 	return 0;
2562 }
2563 
2564 static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
2565 {
2566 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
2567 				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
2568 		priv->enqueue = dpaa2_eth_enqueue_qd;
2569 	else
2570 		priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
2571 }
2572 
2573 static int set_pause(struct dpaa2_eth_priv *priv)
2574 {
2575 	struct device *dev = priv->net_dev->dev.parent;
2576 	struct dpni_link_cfg link_cfg = {0};
2577 	int err;
2578 
2579 	/* Get the default link options so we don't override other flags */
2580 	err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
2581 	if (err) {
2582 		dev_err(dev, "dpni_get_link_cfg() failed\n");
2583 		return err;
2584 	}
2585 
2586 	/* By default, enable both Rx and Tx pause frames */
2587 	link_cfg.options |= DPNI_LINK_OPT_PAUSE;
2588 	link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2589 	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
2590 	if (err) {
2591 		dev_err(dev, "dpni_set_link_cfg() failed\n");
2592 		return err;
2593 	}
2594 
2595 	priv->link_state.options = link_cfg.options;
2596 
2597 	return 0;
2598 }
2599 
2600 static void update_tx_fqids(struct dpaa2_eth_priv *priv)
2601 {
2602 	struct dpni_queue_id qid = {0};
2603 	struct dpaa2_eth_fq *fq;
2604 	struct dpni_queue queue;
2605 	int i, j, err;
2606 
2607 	/* We only use Tx FQIDs for FQID-based enqueue, so check
2608 	 * if DPNI version supports it before updating FQIDs
2609 	 */
2610 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
2611 				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
2612 		return;
2613 
2614 	for (i = 0; i < priv->num_fqs; i++) {
2615 		fq = &priv->fq[i];
2616 		if (fq->type != DPAA2_TX_CONF_FQ)
2617 			continue;
2618 		for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
2619 			err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2620 					     DPNI_QUEUE_TX, j, fq->flowid,
2621 					     &queue, &qid);
2622 			if (err)
2623 				goto out_err;
2624 
2625 			fq->tx_fqid[j] = qid.fqid;
2626 			if (fq->tx_fqid[j] == 0)
2627 				goto out_err;
2628 		}
2629 	}
2630 
2631 	priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
2632 
2633 	return;
2634 
2635 out_err:
2636 	netdev_info(priv->net_dev,
2637 		    "Error reading Tx FQID, fallback to QDID-based enqueue\n");
2638 	priv->enqueue = dpaa2_eth_enqueue_qd;
2639 }
2640 
2641 /* Configure the DPNI object this interface is associated with */
2642 static int setup_dpni(struct fsl_mc_device *ls_dev)
2643 {
2644 	struct device *dev = &ls_dev->dev;
2645 	struct dpaa2_eth_priv *priv;
2646 	struct net_device *net_dev;
2647 	int err;
2648 
2649 	net_dev = dev_get_drvdata(dev);
2650 	priv = netdev_priv(net_dev);
2651 
2652 	/* get a handle for the DPNI object */
2653 	err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
2654 	if (err) {
2655 		dev_err(dev, "dpni_open() failed\n");
2656 		return err;
2657 	}
2658 
2659 	/* Check if we can work with this DPNI object */
2660 	err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
2661 				   &priv->dpni_ver_minor);
2662 	if (err) {
2663 		dev_err(dev, "dpni_get_api_version() failed\n");
2664 		goto close;
2665 	}
2666 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
2667 		dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
2668 			priv->dpni_ver_major, priv->dpni_ver_minor,
2669 			DPNI_VER_MAJOR, DPNI_VER_MINOR);
2670 		err = -ENOTSUPP;
2671 		goto close;
2672 	}
2673 
2674 	ls_dev->mc_io = priv->mc_io;
2675 	ls_dev->mc_handle = priv->mc_token;
2676 
2677 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2678 	if (err) {
2679 		dev_err(dev, "dpni_reset() failed\n");
2680 		goto close;
2681 	}
2682 
2683 	err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
2684 				  &priv->dpni_attrs);
2685 	if (err) {
2686 		dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
2687 		goto close;
2688 	}
2689 
2690 	err = set_buffer_layout(priv);
2691 	if (err)
2692 		goto close;
2693 
2694 	set_enqueue_mode(priv);
2695 
2696 	/* Enable pause frame support */
2697 	if (dpaa2_eth_has_pause_support(priv)) {
2698 		err = set_pause(priv);
2699 		if (err)
2700 			goto close;
2701 	}
2702 
2703 	priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
2704 				       dpaa2_eth_fs_count(priv), GFP_KERNEL);
2705 	if (!priv->cls_rules)
2706 		goto close;
2707 
2708 	return 0;
2709 
2710 close:
2711 	dpni_close(priv->mc_io, 0, priv->mc_token);
2712 
2713 	return err;
2714 }
2715 
2716 static void free_dpni(struct dpaa2_eth_priv *priv)
2717 {
2718 	int err;
2719 
2720 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2721 	if (err)
2722 		netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
2723 			    err);
2724 
2725 	dpni_close(priv->mc_io, 0, priv->mc_token);
2726 }
2727 
2728 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
2729 			 struct dpaa2_eth_fq *fq)
2730 {
2731 	struct device *dev = priv->net_dev->dev.parent;
2732 	struct dpni_queue queue;
2733 	struct dpni_queue_id qid;
2734 	int err;
2735 
2736 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2737 			     DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
2738 	if (err) {
2739 		dev_err(dev, "dpni_get_queue(RX) failed\n");
2740 		return err;
2741 	}
2742 
2743 	fq->fqid = qid.fqid;
2744 
2745 	queue.destination.id = fq->channel->dpcon_id;
2746 	queue.destination.type = DPNI_DEST_DPCON;
2747 	queue.destination.priority = 1;
2748 	queue.user_context = (u64)(uintptr_t)fq;
2749 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2750 			     DPNI_QUEUE_RX, 0, fq->flowid,
2751 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2752 			     &queue);
2753 	if (err) {
2754 		dev_err(dev, "dpni_set_queue(RX) failed\n");
2755 		return err;
2756 	}
2757 
2758 	/* xdp_rxq setup */
2759 	err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
2760 			       fq->flowid);
2761 	if (err) {
2762 		dev_err(dev, "xdp_rxq_info_reg failed\n");
2763 		return err;
2764 	}
2765 
2766 	err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
2767 					 MEM_TYPE_PAGE_ORDER0, NULL);
2768 	if (err) {
2769 		dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
2770 		return err;
2771 	}
2772 
2773 	return 0;
2774 }
2775 
2776 static int setup_tx_flow(struct dpaa2_eth_priv *priv,
2777 			 struct dpaa2_eth_fq *fq)
2778 {
2779 	struct device *dev = priv->net_dev->dev.parent;
2780 	struct dpni_queue queue;
2781 	struct dpni_queue_id qid;
2782 	int i, err;
2783 
2784 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
2785 		err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2786 				     DPNI_QUEUE_TX, i, fq->flowid,
2787 				     &queue, &qid);
2788 		if (err) {
2789 			dev_err(dev, "dpni_get_queue(TX) failed\n");
2790 			return err;
2791 		}
2792 		fq->tx_fqid[i] = qid.fqid;
2793 	}
2794 
2795 	/* All Tx queues belonging to the same flowid have the same qdbin */
2796 	fq->tx_qdbin = qid.qdbin;
2797 
2798 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2799 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2800 			     &queue, &qid);
2801 	if (err) {
2802 		dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
2803 		return err;
2804 	}
2805 
2806 	fq->fqid = qid.fqid;
2807 
2808 	queue.destination.id = fq->channel->dpcon_id;
2809 	queue.destination.type = DPNI_DEST_DPCON;
2810 	queue.destination.priority = 0;
2811 	queue.user_context = (u64)(uintptr_t)fq;
2812 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2813 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2814 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2815 			     &queue);
2816 	if (err) {
2817 		dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
2818 		return err;
2819 	}
2820 
2821 	return 0;
2822 }
2823 
2824 /* Supported header fields for Rx hash distribution key */
2825 static const struct dpaa2_eth_dist_fields dist_fields[] = {
2826 	{
2827 		/* L2 header */
2828 		.rxnfc_field = RXH_L2DA,
2829 		.cls_prot = NET_PROT_ETH,
2830 		.cls_field = NH_FLD_ETH_DA,
2831 		.id = DPAA2_ETH_DIST_ETHDST,
2832 		.size = 6,
2833 	}, {
2834 		.cls_prot = NET_PROT_ETH,
2835 		.cls_field = NH_FLD_ETH_SA,
2836 		.id = DPAA2_ETH_DIST_ETHSRC,
2837 		.size = 6,
2838 	}, {
2839 		/* This is the last ethertype field parsed:
2840 		 * depending on frame format, it can be the MAC ethertype
2841 		 * or the VLAN etype.
2842 		 */
2843 		.cls_prot = NET_PROT_ETH,
2844 		.cls_field = NH_FLD_ETH_TYPE,
2845 		.id = DPAA2_ETH_DIST_ETHTYPE,
2846 		.size = 2,
2847 	}, {
2848 		/* VLAN header */
2849 		.rxnfc_field = RXH_VLAN,
2850 		.cls_prot = NET_PROT_VLAN,
2851 		.cls_field = NH_FLD_VLAN_TCI,
2852 		.id = DPAA2_ETH_DIST_VLAN,
2853 		.size = 2,
2854 	}, {
2855 		/* IP header */
2856 		.rxnfc_field = RXH_IP_SRC,
2857 		.cls_prot = NET_PROT_IP,
2858 		.cls_field = NH_FLD_IP_SRC,
2859 		.id = DPAA2_ETH_DIST_IPSRC,
2860 		.size = 4,
2861 	}, {
2862 		.rxnfc_field = RXH_IP_DST,
2863 		.cls_prot = NET_PROT_IP,
2864 		.cls_field = NH_FLD_IP_DST,
2865 		.id = DPAA2_ETH_DIST_IPDST,
2866 		.size = 4,
2867 	}, {
2868 		.rxnfc_field = RXH_L3_PROTO,
2869 		.cls_prot = NET_PROT_IP,
2870 		.cls_field = NH_FLD_IP_PROTO,
2871 		.id = DPAA2_ETH_DIST_IPPROTO,
2872 		.size = 1,
2873 	}, {
2874 		/* Using UDP ports, this is functionally equivalent to raw
2875 		 * byte pairs from L4 header.
2876 		 */
2877 		.rxnfc_field = RXH_L4_B_0_1,
2878 		.cls_prot = NET_PROT_UDP,
2879 		.cls_field = NH_FLD_UDP_PORT_SRC,
2880 		.id = DPAA2_ETH_DIST_L4SRC,
2881 		.size = 2,
2882 	}, {
2883 		.rxnfc_field = RXH_L4_B_2_3,
2884 		.cls_prot = NET_PROT_UDP,
2885 		.cls_field = NH_FLD_UDP_PORT_DST,
2886 		.id = DPAA2_ETH_DIST_L4DST,
2887 		.size = 2,
2888 	},
2889 };
2890 
2891 /* Configure the Rx hash key using the legacy API */
2892 static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2893 {
2894 	struct device *dev = priv->net_dev->dev.parent;
2895 	struct dpni_rx_tc_dist_cfg dist_cfg;
2896 	int err;
2897 
2898 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2899 
2900 	dist_cfg.key_cfg_iova = key;
2901 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2902 	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2903 
2904 	err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2905 	if (err)
2906 		dev_err(dev, "dpni_set_rx_tc_dist failed\n");
2907 
2908 	return err;
2909 }
2910 
2911 /* Configure the Rx hash key using the new API */
2912 static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2913 {
2914 	struct device *dev = priv->net_dev->dev.parent;
2915 	struct dpni_rx_dist_cfg dist_cfg;
2916 	int err;
2917 
2918 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2919 
2920 	dist_cfg.key_cfg_iova = key;
2921 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2922 	dist_cfg.enable = 1;
2923 
2924 	err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2925 	if (err)
2926 		dev_err(dev, "dpni_set_rx_hash_dist failed\n");
2927 
2928 	return err;
2929 }
2930 
2931 /* Configure the Rx flow classification key */
2932 static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2933 {
2934 	struct device *dev = priv->net_dev->dev.parent;
2935 	struct dpni_rx_dist_cfg dist_cfg;
2936 	int err;
2937 
2938 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2939 
2940 	dist_cfg.key_cfg_iova = key;
2941 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2942 	dist_cfg.enable = 1;
2943 
2944 	err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2945 	if (err)
2946 		dev_err(dev, "dpni_set_rx_fs_dist failed\n");
2947 
2948 	return err;
2949 }
2950 
2951 /* Size of the Rx flow classification key */
2952 int dpaa2_eth_cls_key_size(u64 fields)
2953 {
2954 	int i, size = 0;
2955 
2956 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2957 		if (!(fields & dist_fields[i].id))
2958 			continue;
2959 		size += dist_fields[i].size;
2960 	}
2961 
2962 	return size;
2963 }
2964 
2965 /* Offset of header field in Rx classification key */
2966 int dpaa2_eth_cls_fld_off(int prot, int field)
2967 {
2968 	int i, off = 0;
2969 
2970 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2971 		if (dist_fields[i].cls_prot == prot &&
2972 		    dist_fields[i].cls_field == field)
2973 			return off;
2974 		off += dist_fields[i].size;
2975 	}
2976 
2977 	WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
2978 	return 0;
2979 }
2980 
2981 /* Prune unused fields from the classification rule.
2982  * Used when masking is not supported
2983  */
2984 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
2985 {
2986 	int off = 0, new_off = 0;
2987 	int i, size;
2988 
2989 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2990 		size = dist_fields[i].size;
2991 		if (dist_fields[i].id & fields) {
2992 			memcpy(key_mem + new_off, key_mem + off, size);
2993 			new_off += size;
2994 		}
2995 		off += size;
2996 	}
2997 }
2998 
2999 /* Set Rx distribution (hash or flow classification) key
3000  * flags is a combination of RXH_ bits
3001  */
3002 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
3003 				  enum dpaa2_eth_rx_dist type, u64 flags)
3004 {
3005 	struct device *dev = net_dev->dev.parent;
3006 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3007 	struct dpkg_profile_cfg cls_cfg;
3008 	u32 rx_hash_fields = 0;
3009 	dma_addr_t key_iova;
3010 	u8 *dma_mem;
3011 	int i;
3012 	int err = 0;
3013 
3014 	memset(&cls_cfg, 0, sizeof(cls_cfg));
3015 
3016 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3017 		struct dpkg_extract *key =
3018 			&cls_cfg.extracts[cls_cfg.num_extracts];
3019 
3020 		/* For both Rx hashing and classification keys
3021 		 * we set only the selected fields.
3022 		 */
3023 		if (!(flags & dist_fields[i].id))
3024 			continue;
3025 		if (type == DPAA2_ETH_RX_DIST_HASH)
3026 			rx_hash_fields |= dist_fields[i].rxnfc_field;
3027 
3028 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3029 			dev_err(dev, "error adding key extraction rule, too many rules?\n");
3030 			return -E2BIG;
3031 		}
3032 
3033 		key->type = DPKG_EXTRACT_FROM_HDR;
3034 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3035 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
3036 		key->extract.from_hdr.field = dist_fields[i].cls_field;
3037 		cls_cfg.num_extracts++;
3038 	}
3039 
3040 	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3041 	if (!dma_mem)
3042 		return -ENOMEM;
3043 
3044 	err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3045 	if (err) {
3046 		dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
3047 		goto free_key;
3048 	}
3049 
3050 	/* Prepare for setting the rx dist */
3051 	key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
3052 				  DMA_TO_DEVICE);
3053 	if (dma_mapping_error(dev, key_iova)) {
3054 		dev_err(dev, "DMA mapping failed\n");
3055 		err = -ENOMEM;
3056 		goto free_key;
3057 	}
3058 
3059 	if (type == DPAA2_ETH_RX_DIST_HASH) {
3060 		if (dpaa2_eth_has_legacy_dist(priv))
3061 			err = config_legacy_hash_key(priv, key_iova);
3062 		else
3063 			err = config_hash_key(priv, key_iova);
3064 	} else {
3065 		err = config_cls_key(priv, key_iova);
3066 	}
3067 
3068 	dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3069 			 DMA_TO_DEVICE);
3070 	if (!err && type == DPAA2_ETH_RX_DIST_HASH)
3071 		priv->rx_hash_fields = rx_hash_fields;
3072 
3073 free_key:
3074 	kfree(dma_mem);
3075 	return err;
3076 }
3077 
3078 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
3079 {
3080 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3081 	u64 key = 0;
3082 	int i;
3083 
3084 	if (!dpaa2_eth_hash_enabled(priv))
3085 		return -EOPNOTSUPP;
3086 
3087 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
3088 		if (dist_fields[i].rxnfc_field & flags)
3089 			key |= dist_fields[i].id;
3090 
3091 	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
3092 }
3093 
3094 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
3095 {
3096 	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
3097 }
3098 
3099 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
3100 {
3101 	struct device *dev = priv->net_dev->dev.parent;
3102 	int err;
3103 
3104 	/* Check if we actually support Rx flow classification */
3105 	if (dpaa2_eth_has_legacy_dist(priv)) {
3106 		dev_dbg(dev, "Rx cls not supported by current MC version\n");
3107 		return -EOPNOTSUPP;
3108 	}
3109 
3110 	if (!dpaa2_eth_fs_enabled(priv)) {
3111 		dev_dbg(dev, "Rx cls disabled in DPNI options\n");
3112 		return -EOPNOTSUPP;
3113 	}
3114 
3115 	if (!dpaa2_eth_hash_enabled(priv)) {
3116 		dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
3117 		return -EOPNOTSUPP;
3118 	}
3119 
3120 	/* If there is no support for masking in the classification table,
3121 	 * we don't set a default key, as it will depend on the rules
3122 	 * added by the user at runtime.
3123 	 */
3124 	if (!dpaa2_eth_fs_mask_enabled(priv))
3125 		goto out;
3126 
3127 	err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
3128 	if (err)
3129 		return err;
3130 
3131 out:
3132 	priv->rx_cls_enabled = 1;
3133 
3134 	return 0;
3135 }
3136 
3137 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3138  * frame queues and channels
3139  */
3140 static int bind_dpni(struct dpaa2_eth_priv *priv)
3141 {
3142 	struct net_device *net_dev = priv->net_dev;
3143 	struct device *dev = net_dev->dev.parent;
3144 	struct dpni_pools_cfg pools_params;
3145 	struct dpni_error_cfg err_cfg;
3146 	int err = 0;
3147 	int i;
3148 
3149 	pools_params.num_dpbp = 1;
3150 	pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3151 	pools_params.pools[0].backup_pool = 0;
3152 	pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
3153 	err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3154 	if (err) {
3155 		dev_err(dev, "dpni_set_pools() failed\n");
3156 		return err;
3157 	}
3158 
3159 	/* have the interface implicitly distribute traffic based on
3160 	 * the default hash key
3161 	 */
3162 	err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
3163 	if (err && err != -EOPNOTSUPP)
3164 		dev_err(dev, "Failed to configure hashing\n");
3165 
3166 	/* Configure the flow classification key; it includes all
3167 	 * supported header fields and cannot be modified at runtime
3168 	 */
3169 	err = dpaa2_eth_set_default_cls(priv);
3170 	if (err && err != -EOPNOTSUPP)
3171 		dev_err(dev, "Failed to configure Rx classification key\n");
3172 
3173 	/* Configure handling of error frames */
3174 	err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3175 	err_cfg.set_frame_annotation = 1;
3176 	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3177 	err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3178 				       &err_cfg);
3179 	if (err) {
3180 		dev_err(dev, "dpni_set_errors_behavior failed\n");
3181 		return err;
3182 	}
3183 
3184 	/* Configure Rx and Tx conf queues to generate CDANs */
3185 	for (i = 0; i < priv->num_fqs; i++) {
3186 		switch (priv->fq[i].type) {
3187 		case DPAA2_RX_FQ:
3188 			err = setup_rx_flow(priv, &priv->fq[i]);
3189 			break;
3190 		case DPAA2_TX_CONF_FQ:
3191 			err = setup_tx_flow(priv, &priv->fq[i]);
3192 			break;
3193 		default:
3194 			dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3195 			return -EINVAL;
3196 		}
3197 		if (err)
3198 			return err;
3199 	}
3200 
3201 	err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
3202 			    DPNI_QUEUE_TX, &priv->tx_qdid);
3203 	if (err) {
3204 		dev_err(dev, "dpni_get_qdid() failed\n");
3205 		return err;
3206 	}
3207 
3208 	return 0;
3209 }
3210 
3211 /* Allocate rings for storing incoming frame descriptors */
3212 static int alloc_rings(struct dpaa2_eth_priv *priv)
3213 {
3214 	struct net_device *net_dev = priv->net_dev;
3215 	struct device *dev = net_dev->dev.parent;
3216 	int i;
3217 
3218 	for (i = 0; i < priv->num_channels; i++) {
3219 		priv->channel[i]->store =
3220 			dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3221 		if (!priv->channel[i]->store) {
3222 			netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3223 			goto err_ring;
3224 		}
3225 	}
3226 
3227 	return 0;
3228 
3229 err_ring:
3230 	for (i = 0; i < priv->num_channels; i++) {
3231 		if (!priv->channel[i]->store)
3232 			break;
3233 		dpaa2_io_store_destroy(priv->channel[i]->store);
3234 	}
3235 
3236 	return -ENOMEM;
3237 }
3238 
3239 static void free_rings(struct dpaa2_eth_priv *priv)
3240 {
3241 	int i;
3242 
3243 	for (i = 0; i < priv->num_channels; i++)
3244 		dpaa2_io_store_destroy(priv->channel[i]->store);
3245 }
3246 
3247 static int set_mac_addr(struct dpaa2_eth_priv *priv)
3248 {
3249 	struct net_device *net_dev = priv->net_dev;
3250 	struct device *dev = net_dev->dev.parent;
3251 	u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3252 	int err;
3253 
3254 	/* Get firmware address, if any */
3255 	err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3256 	if (err) {
3257 		dev_err(dev, "dpni_get_port_mac_addr() failed\n");
3258 		return err;
3259 	}
3260 
3261 	/* Get DPNI attributes address, if any */
3262 	err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3263 					dpni_mac_addr);
3264 	if (err) {
3265 		dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
3266 		return err;
3267 	}
3268 
3269 	/* First check if firmware has any address configured by bootloader */
3270 	if (!is_zero_ether_addr(mac_addr)) {
3271 		/* If the DPMAC addr != DPNI addr, update it */
3272 		if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3273 			err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3274 							priv->mc_token,
3275 							mac_addr);
3276 			if (err) {
3277 				dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3278 				return err;
3279 			}
3280 		}
3281 		memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3282 	} else if (is_zero_ether_addr(dpni_mac_addr)) {
3283 		/* No MAC address configured, fill in net_dev->dev_addr
3284 		 * with a random one
3285 		 */
3286 		eth_hw_addr_random(net_dev);
3287 		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
3288 
3289 		err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3290 						net_dev->dev_addr);
3291 		if (err) {
3292 			dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3293 			return err;
3294 		}
3295 
3296 		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3297 		 * practical purposes, this will be our "permanent" mac address,
3298 		 * at least until the next reboot. This move will also permit
3299 		 * register_netdevice() to properly fill up net_dev->perm_addr.
3300 		 */
3301 		net_dev->addr_assign_type = NET_ADDR_PERM;
3302 	} else {
3303 		/* NET_ADDR_PERM is default, all we have to do is
3304 		 * fill in the device addr.
3305 		 */
3306 		memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
3307 	}
3308 
3309 	return 0;
3310 }
3311 
3312 static int netdev_init(struct net_device *net_dev)
3313 {
3314 	struct device *dev = net_dev->dev.parent;
3315 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3316 	u32 options = priv->dpni_attrs.options;
3317 	u64 supported = 0, not_supported = 0;
3318 	u8 bcast_addr[ETH_ALEN];
3319 	u8 num_queues;
3320 	int err;
3321 
3322 	net_dev->netdev_ops = &dpaa2_eth_ops;
3323 	net_dev->ethtool_ops = &dpaa2_ethtool_ops;
3324 
3325 	err = set_mac_addr(priv);
3326 	if (err)
3327 		return err;
3328 
3329 	/* Explicitly add the broadcast address to the MAC filtering table */
3330 	eth_broadcast_addr(bcast_addr);
3331 	err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3332 	if (err) {
3333 		dev_err(dev, "dpni_add_mac_addr() failed\n");
3334 		return err;
3335 	}
3336 
3337 	/* Set MTU upper limit; lower limit is 68B (default value) */
3338 	net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3339 	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
3340 					DPAA2_ETH_MFL);
3341 	if (err) {
3342 		dev_err(dev, "dpni_set_max_frame_length() failed\n");
3343 		return err;
3344 	}
3345 
3346 	/* Set actual number of queues in the net device */
3347 	num_queues = dpaa2_eth_queue_count(priv);
3348 	err = netif_set_real_num_tx_queues(net_dev, num_queues);
3349 	if (err) {
3350 		dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
3351 		return err;
3352 	}
3353 	err = netif_set_real_num_rx_queues(net_dev, num_queues);
3354 	if (err) {
3355 		dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
3356 		return err;
3357 	}
3358 
3359 	/* Capabilities listing */
3360 	supported |= IFF_LIVE_ADDR_CHANGE;
3361 
3362 	if (options & DPNI_OPT_NO_MAC_FILTER)
3363 		not_supported |= IFF_UNICAST_FLT;
3364 	else
3365 		supported |= IFF_UNICAST_FLT;
3366 
3367 	net_dev->priv_flags |= supported;
3368 	net_dev->priv_flags &= ~not_supported;
3369 
3370 	/* Features */
3371 	net_dev->features = NETIF_F_RXCSUM |
3372 			    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3373 			    NETIF_F_SG | NETIF_F_HIGHDMA |
3374 			    NETIF_F_LLTX;
3375 	net_dev->hw_features = net_dev->features;
3376 
3377 	return 0;
3378 }
3379 
3380 static int poll_link_state(void *arg)
3381 {
3382 	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
3383 	int err;
3384 
3385 	while (!kthread_should_stop()) {
3386 		err = link_state_update(priv);
3387 		if (unlikely(err))
3388 			return err;
3389 
3390 		msleep(DPAA2_ETH_LINK_STATE_REFRESH);
3391 	}
3392 
3393 	return 0;
3394 }
3395 
3396 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
3397 {
3398 	struct fsl_mc_device *dpni_dev, *dpmac_dev;
3399 	struct dpaa2_mac *mac;
3400 	int err;
3401 
3402 	dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
3403 	dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
3404 	if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
3405 		return 0;
3406 
3407 	if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
3408 		return 0;
3409 
3410 	mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
3411 	if (!mac)
3412 		return -ENOMEM;
3413 
3414 	mac->mc_dev = dpmac_dev;
3415 	mac->mc_io = priv->mc_io;
3416 	mac->net_dev = priv->net_dev;
3417 
3418 	err = dpaa2_mac_connect(mac);
3419 	if (err) {
3420 		netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
3421 		kfree(mac);
3422 		return err;
3423 	}
3424 	priv->mac = mac;
3425 
3426 	return 0;
3427 }
3428 
3429 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
3430 {
3431 	if (!priv->mac)
3432 		return;
3433 
3434 	dpaa2_mac_disconnect(priv->mac);
3435 	kfree(priv->mac);
3436 	priv->mac = NULL;
3437 }
3438 
3439 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
3440 {
3441 	u32 status = ~0;
3442 	struct device *dev = (struct device *)arg;
3443 	struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
3444 	struct net_device *net_dev = dev_get_drvdata(dev);
3445 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3446 	int err;
3447 
3448 	err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3449 				  DPNI_IRQ_INDEX, &status);
3450 	if (unlikely(err)) {
3451 		netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
3452 		return IRQ_HANDLED;
3453 	}
3454 
3455 	if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
3456 		link_state_update(netdev_priv(net_dev));
3457 
3458 	if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
3459 		set_mac_addr(netdev_priv(net_dev));
3460 		update_tx_fqids(priv);
3461 
3462 		rtnl_lock();
3463 		if (priv->mac)
3464 			dpaa2_eth_disconnect_mac(priv);
3465 		else
3466 			dpaa2_eth_connect_mac(priv);
3467 		rtnl_unlock();
3468 	}
3469 
3470 	return IRQ_HANDLED;
3471 }
3472 
3473 static int setup_irqs(struct fsl_mc_device *ls_dev)
3474 {
3475 	int err = 0;
3476 	struct fsl_mc_device_irq *irq;
3477 
3478 	err = fsl_mc_allocate_irqs(ls_dev);
3479 	if (err) {
3480 		dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
3481 		return err;
3482 	}
3483 
3484 	irq = ls_dev->irqs[0];
3485 	err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
3486 					NULL, dpni_irq0_handler_thread,
3487 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
3488 					dev_name(&ls_dev->dev), &ls_dev->dev);
3489 	if (err < 0) {
3490 		dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
3491 		goto free_mc_irq;
3492 	}
3493 
3494 	err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
3495 				DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
3496 				DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
3497 	if (err < 0) {
3498 		dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
3499 		goto free_irq;
3500 	}
3501 
3502 	err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
3503 				  DPNI_IRQ_INDEX, 1);
3504 	if (err < 0) {
3505 		dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
3506 		goto free_irq;
3507 	}
3508 
3509 	return 0;
3510 
3511 free_irq:
3512 	devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
3513 free_mc_irq:
3514 	fsl_mc_free_irqs(ls_dev);
3515 
3516 	return err;
3517 }
3518 
3519 static void add_ch_napi(struct dpaa2_eth_priv *priv)
3520 {
3521 	int i;
3522 	struct dpaa2_eth_channel *ch;
3523 
3524 	for (i = 0; i < priv->num_channels; i++) {
3525 		ch = priv->channel[i];
3526 		/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
3527 		netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
3528 			       NAPI_POLL_WEIGHT);
3529 	}
3530 }
3531 
3532 static void del_ch_napi(struct dpaa2_eth_priv *priv)
3533 {
3534 	int i;
3535 	struct dpaa2_eth_channel *ch;
3536 
3537 	for (i = 0; i < priv->num_channels; i++) {
3538 		ch = priv->channel[i];
3539 		netif_napi_del(&ch->napi);
3540 	}
3541 }
3542 
3543 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
3544 {
3545 	struct device *dev;
3546 	struct net_device *net_dev = NULL;
3547 	struct dpaa2_eth_priv *priv = NULL;
3548 	int err = 0;
3549 
3550 	dev = &dpni_dev->dev;
3551 
3552 	/* Net device */
3553 	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
3554 	if (!net_dev) {
3555 		dev_err(dev, "alloc_etherdev_mq() failed\n");
3556 		return -ENOMEM;
3557 	}
3558 
3559 	SET_NETDEV_DEV(net_dev, dev);
3560 	dev_set_drvdata(dev, net_dev);
3561 
3562 	priv = netdev_priv(net_dev);
3563 	priv->net_dev = net_dev;
3564 
3565 	priv->iommu_domain = iommu_get_domain_for_dev(dev);
3566 
3567 	/* Obtain a MC portal */
3568 	err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3569 				     &priv->mc_io);
3570 	if (err) {
3571 		if (err == -ENXIO)
3572 			err = -EPROBE_DEFER;
3573 		else
3574 			dev_err(dev, "MC portal allocation failed\n");
3575 		goto err_portal_alloc;
3576 	}
3577 
3578 	/* MC objects initialization and configuration */
3579 	err = setup_dpni(dpni_dev);
3580 	if (err)
3581 		goto err_dpni_setup;
3582 
3583 	err = setup_dpio(priv);
3584 	if (err)
3585 		goto err_dpio_setup;
3586 
3587 	setup_fqs(priv);
3588 
3589 	err = setup_dpbp(priv);
3590 	if (err)
3591 		goto err_dpbp_setup;
3592 
3593 	err = bind_dpni(priv);
3594 	if (err)
3595 		goto err_bind;
3596 
3597 	/* Add a NAPI context for each channel */
3598 	add_ch_napi(priv);
3599 
3600 	/* Percpu statistics */
3601 	priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
3602 	if (!priv->percpu_stats) {
3603 		dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
3604 		err = -ENOMEM;
3605 		goto err_alloc_percpu_stats;
3606 	}
3607 	priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
3608 	if (!priv->percpu_extras) {
3609 		dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
3610 		err = -ENOMEM;
3611 		goto err_alloc_percpu_extras;
3612 	}
3613 
3614 	err = netdev_init(net_dev);
3615 	if (err)
3616 		goto err_netdev_init;
3617 
3618 	/* Configure checksum offload based on current interface flags */
3619 	err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
3620 	if (err)
3621 		goto err_csum;
3622 
3623 	err = set_tx_csum(priv, !!(net_dev->features &
3624 				   (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
3625 	if (err)
3626 		goto err_csum;
3627 
3628 	err = alloc_rings(priv);
3629 	if (err)
3630 		goto err_alloc_rings;
3631 
3632 	err = setup_irqs(dpni_dev);
3633 	if (err) {
3634 		netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
3635 		priv->poll_thread = kthread_run(poll_link_state, priv,
3636 						"%s_poll_link", net_dev->name);
3637 		if (IS_ERR(priv->poll_thread)) {
3638 			dev_err(dev, "Error starting polling thread\n");
3639 			goto err_poll_thread;
3640 		}
3641 		priv->do_link_poll = true;
3642 	}
3643 
3644 	err = dpaa2_eth_connect_mac(priv);
3645 	if (err)
3646 		goto err_connect_mac;
3647 
3648 	err = register_netdev(net_dev);
3649 	if (err < 0) {
3650 		dev_err(dev, "register_netdev() failed\n");
3651 		goto err_netdev_reg;
3652 	}
3653 
3654 #ifdef CONFIG_DEBUG_FS
3655 	dpaa2_dbg_add(priv);
3656 #endif
3657 
3658 	dev_info(dev, "Probed interface %s\n", net_dev->name);
3659 	return 0;
3660 
3661 err_netdev_reg:
3662 	dpaa2_eth_disconnect_mac(priv);
3663 err_connect_mac:
3664 	if (priv->do_link_poll)
3665 		kthread_stop(priv->poll_thread);
3666 	else
3667 		fsl_mc_free_irqs(dpni_dev);
3668 err_poll_thread:
3669 	free_rings(priv);
3670 err_alloc_rings:
3671 err_csum:
3672 err_netdev_init:
3673 	free_percpu(priv->percpu_extras);
3674 err_alloc_percpu_extras:
3675 	free_percpu(priv->percpu_stats);
3676 err_alloc_percpu_stats:
3677 	del_ch_napi(priv);
3678 err_bind:
3679 	free_dpbp(priv);
3680 err_dpbp_setup:
3681 	free_dpio(priv);
3682 err_dpio_setup:
3683 	free_dpni(priv);
3684 err_dpni_setup:
3685 	fsl_mc_portal_free(priv->mc_io);
3686 err_portal_alloc:
3687 	dev_set_drvdata(dev, NULL);
3688 	free_netdev(net_dev);
3689 
3690 	return err;
3691 }
3692 
3693 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
3694 {
3695 	struct device *dev;
3696 	struct net_device *net_dev;
3697 	struct dpaa2_eth_priv *priv;
3698 
3699 	dev = &ls_dev->dev;
3700 	net_dev = dev_get_drvdata(dev);
3701 	priv = netdev_priv(net_dev);
3702 
3703 #ifdef CONFIG_DEBUG_FS
3704 	dpaa2_dbg_remove(priv);
3705 #endif
3706 	rtnl_lock();
3707 	dpaa2_eth_disconnect_mac(priv);
3708 	rtnl_unlock();
3709 
3710 	unregister_netdev(net_dev);
3711 
3712 	if (priv->do_link_poll)
3713 		kthread_stop(priv->poll_thread);
3714 	else
3715 		fsl_mc_free_irqs(ls_dev);
3716 
3717 	free_rings(priv);
3718 	free_percpu(priv->percpu_stats);
3719 	free_percpu(priv->percpu_extras);
3720 
3721 	del_ch_napi(priv);
3722 	free_dpbp(priv);
3723 	free_dpio(priv);
3724 	free_dpni(priv);
3725 
3726 	fsl_mc_portal_free(priv->mc_io);
3727 
3728 	free_netdev(net_dev);
3729 
3730 	dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
3731 
3732 	return 0;
3733 }
3734 
3735 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
3736 	{
3737 		.vendor = FSL_MC_VENDOR_FREESCALE,
3738 		.obj_type = "dpni",
3739 	},
3740 	{ .vendor = 0x0 }
3741 };
3742 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
3743 
3744 static struct fsl_mc_driver dpaa2_eth_driver = {
3745 	.driver = {
3746 		.name = KBUILD_MODNAME,
3747 		.owner = THIS_MODULE,
3748 	},
3749 	.probe = dpaa2_eth_probe,
3750 	.remove = dpaa2_eth_remove,
3751 	.match_id_table = dpaa2_eth_match_id_table
3752 };
3753 
3754 static int __init dpaa2_eth_driver_init(void)
3755 {
3756 	int err;
3757 
3758 	dpaa2_eth_dbg_init();
3759 	err = fsl_mc_driver_register(&dpaa2_eth_driver);
3760 	if (err) {
3761 		dpaa2_eth_dbg_exit();
3762 		return err;
3763 	}
3764 
3765 	return 0;
3766 }
3767 
3768 static void __exit dpaa2_eth_driver_exit(void)
3769 {
3770 	dpaa2_eth_dbg_exit();
3771 	fsl_mc_driver_unregister(&dpaa2_eth_driver);
3772 }
3773 
3774 module_init(dpaa2_eth_driver_init);
3775 module_exit(dpaa2_eth_driver_exit);
3776