1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3  * Copyright 2016-2019 NXP
4  */
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/fsl/mc.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_trace.h>
18 #include <net/sock.h>
19 
20 #include "dpaa2-eth.h"
21 
22 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
23  * using trace events only need to #include <trace/events/sched.h>
24  */
25 #define CREATE_TRACE_POINTS
26 #include "dpaa2-eth-trace.h"
27 
28 MODULE_LICENSE("Dual BSD/GPL");
29 MODULE_AUTHOR("Freescale Semiconductor, Inc");
30 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
31 
32 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
33 				dma_addr_t iova_addr)
34 {
35 	phys_addr_t phys_addr;
36 
37 	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
38 
39 	return phys_to_virt(phys_addr);
40 }
41 
42 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
43 			     u32 fd_status,
44 			     struct sk_buff *skb)
45 {
46 	skb_checksum_none_assert(skb);
47 
48 	/* HW checksum validation is disabled, nothing to do here */
49 	if (!(priv->net_dev->features & NETIF_F_RXCSUM))
50 		return;
51 
52 	/* Read checksum validation bits */
53 	if (!((fd_status & DPAA2_FAS_L3CV) &&
54 	      (fd_status & DPAA2_FAS_L4CV)))
55 		return;
56 
57 	/* Inform the stack there's no need to compute L3/L4 csum anymore */
58 	skb->ip_summed = CHECKSUM_UNNECESSARY;
59 }
60 
61 /* Free a received FD.
62  * Not to be used for Tx conf FDs or on any other paths.
63  */
64 static void free_rx_fd(struct dpaa2_eth_priv *priv,
65 		       const struct dpaa2_fd *fd,
66 		       void *vaddr)
67 {
68 	struct device *dev = priv->net_dev->dev.parent;
69 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
70 	u8 fd_format = dpaa2_fd_get_format(fd);
71 	struct dpaa2_sg_entry *sgt;
72 	void *sg_vaddr;
73 	int i;
74 
75 	/* If single buffer frame, just free the data buffer */
76 	if (fd_format == dpaa2_fd_single)
77 		goto free_buf;
78 	else if (fd_format != dpaa2_fd_sg)
79 		/* We don't support any other format */
80 		return;
81 
82 	/* For S/G frames, we first need to free all SG entries
83 	 * except the first one, which was taken care of already
84 	 */
85 	sgt = vaddr + dpaa2_fd_get_offset(fd);
86 	for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
87 		addr = dpaa2_sg_get_addr(&sgt[i]);
88 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
89 		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
90 			       DMA_BIDIRECTIONAL);
91 
92 		free_pages((unsigned long)sg_vaddr, 0);
93 		if (dpaa2_sg_is_final(&sgt[i]))
94 			break;
95 	}
96 
97 free_buf:
98 	free_pages((unsigned long)vaddr, 0);
99 }
100 
101 /* Build a linear skb based on a single-buffer frame descriptor */
102 static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
103 					const struct dpaa2_fd *fd,
104 					void *fd_vaddr)
105 {
106 	struct sk_buff *skb = NULL;
107 	u16 fd_offset = dpaa2_fd_get_offset(fd);
108 	u32 fd_length = dpaa2_fd_get_len(fd);
109 
110 	ch->buf_count--;
111 
112 	skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
113 	if (unlikely(!skb))
114 		return NULL;
115 
116 	skb_reserve(skb, fd_offset);
117 	skb_put(skb, fd_length);
118 
119 	return skb;
120 }
121 
122 /* Build a non linear (fragmented) skb based on a S/G table */
123 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
124 				      struct dpaa2_eth_channel *ch,
125 				      struct dpaa2_sg_entry *sgt)
126 {
127 	struct sk_buff *skb = NULL;
128 	struct device *dev = priv->net_dev->dev.parent;
129 	void *sg_vaddr;
130 	dma_addr_t sg_addr;
131 	u16 sg_offset;
132 	u32 sg_length;
133 	struct page *page, *head_page;
134 	int page_offset;
135 	int i;
136 
137 	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
138 		struct dpaa2_sg_entry *sge = &sgt[i];
139 
140 		/* NOTE: We only support SG entries in dpaa2_sg_single format,
141 		 * but this is the only format we may receive from HW anyway
142 		 */
143 
144 		/* Get the address and length from the S/G entry */
145 		sg_addr = dpaa2_sg_get_addr(sge);
146 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
147 		dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
148 			       DMA_BIDIRECTIONAL);
149 
150 		sg_length = dpaa2_sg_get_len(sge);
151 
152 		if (i == 0) {
153 			/* We build the skb around the first data buffer */
154 			skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
155 			if (unlikely(!skb)) {
156 				/* Free the first SG entry now, since we already
157 				 * unmapped it and obtained the virtual address
158 				 */
159 				free_pages((unsigned long)sg_vaddr, 0);
160 
161 				/* We still need to subtract the buffers used
162 				 * by this FD from our software counter
163 				 */
164 				while (!dpaa2_sg_is_final(&sgt[i]) &&
165 				       i < DPAA2_ETH_MAX_SG_ENTRIES)
166 					i++;
167 				break;
168 			}
169 
170 			sg_offset = dpaa2_sg_get_offset(sge);
171 			skb_reserve(skb, sg_offset);
172 			skb_put(skb, sg_length);
173 		} else {
174 			/* Rest of the data buffers are stored as skb frags */
175 			page = virt_to_page(sg_vaddr);
176 			head_page = virt_to_head_page(sg_vaddr);
177 
178 			/* Offset in page (which may be compound).
179 			 * Data in subsequent SG entries is stored from the
180 			 * beginning of the buffer, so we don't need to add the
181 			 * sg_offset.
182 			 */
183 			page_offset = ((unsigned long)sg_vaddr &
184 				(PAGE_SIZE - 1)) +
185 				(page_address(page) - page_address(head_page));
186 
187 			skb_add_rx_frag(skb, i - 1, head_page, page_offset,
188 					sg_length, DPAA2_ETH_RX_BUF_SIZE);
189 		}
190 
191 		if (dpaa2_sg_is_final(sge))
192 			break;
193 	}
194 
195 	WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
196 
197 	/* Count all data buffers + SG table buffer */
198 	ch->buf_count -= i + 2;
199 
200 	return skb;
201 }
202 
203 /* Free buffers acquired from the buffer pool or which were meant to
204  * be released in the pool
205  */
206 static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
207 {
208 	struct device *dev = priv->net_dev->dev.parent;
209 	void *vaddr;
210 	int i;
211 
212 	for (i = 0; i < count; i++) {
213 		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
214 		dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
215 			       DMA_BIDIRECTIONAL);
216 		free_pages((unsigned long)vaddr, 0);
217 	}
218 }
219 
220 static void xdp_release_buf(struct dpaa2_eth_priv *priv,
221 			    struct dpaa2_eth_channel *ch,
222 			    dma_addr_t addr)
223 {
224 	int retries = 0;
225 	int err;
226 
227 	ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
228 	if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
229 		return;
230 
231 	while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
232 					       ch->xdp.drop_bufs,
233 					       ch->xdp.drop_cnt)) == -EBUSY) {
234 		if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
235 			break;
236 		cpu_relax();
237 	}
238 
239 	if (err) {
240 		free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
241 		ch->buf_count -= ch->xdp.drop_cnt;
242 	}
243 
244 	ch->xdp.drop_cnt = 0;
245 }
246 
247 static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
248 		       void *buf_start, u16 queue_id)
249 {
250 	struct dpaa2_eth_fq *fq;
251 	struct dpaa2_faead *faead;
252 	u32 ctrl, frc;
253 	int i, err;
254 
255 	/* Mark the egress frame hardware annotation area as valid */
256 	frc = dpaa2_fd_get_frc(fd);
257 	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
258 	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
259 
260 	/* Instruct hardware to release the FD buffer directly into
261 	 * the buffer pool once transmission is completed, instead of
262 	 * sending a Tx confirmation frame to us
263 	 */
264 	ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
265 	faead = dpaa2_get_faead(buf_start, false);
266 	faead->ctrl = cpu_to_le32(ctrl);
267 	faead->conf_fqid = 0;
268 
269 	fq = &priv->fq[queue_id];
270 	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
271 		err = priv->enqueue(priv, fq, fd, 0);
272 		if (err != -EBUSY)
273 			break;
274 	}
275 
276 	return err;
277 }
278 
279 static u32 run_xdp(struct dpaa2_eth_priv *priv,
280 		   struct dpaa2_eth_channel *ch,
281 		   struct dpaa2_eth_fq *rx_fq,
282 		   struct dpaa2_fd *fd, void *vaddr)
283 {
284 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
285 	struct rtnl_link_stats64 *percpu_stats;
286 	struct bpf_prog *xdp_prog;
287 	struct xdp_buff xdp;
288 	u32 xdp_act = XDP_PASS;
289 	int err;
290 
291 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
292 
293 	rcu_read_lock();
294 
295 	xdp_prog = READ_ONCE(ch->xdp.prog);
296 	if (!xdp_prog)
297 		goto out;
298 
299 	xdp.data = vaddr + dpaa2_fd_get_offset(fd);
300 	xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
301 	xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
302 	xdp_set_data_meta_invalid(&xdp);
303 	xdp.rxq = &ch->xdp_rxq;
304 
305 	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
306 
307 	/* xdp.data pointer may have changed */
308 	dpaa2_fd_set_offset(fd, xdp.data - vaddr);
309 	dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
310 
311 	switch (xdp_act) {
312 	case XDP_PASS:
313 		break;
314 	case XDP_TX:
315 		err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
316 		if (err) {
317 			xdp_release_buf(priv, ch, addr);
318 			percpu_stats->tx_errors++;
319 			ch->stats.xdp_tx_err++;
320 		} else {
321 			percpu_stats->tx_packets++;
322 			percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
323 			ch->stats.xdp_tx++;
324 		}
325 		break;
326 	default:
327 		bpf_warn_invalid_xdp_action(xdp_act);
328 		/* fall through */
329 	case XDP_ABORTED:
330 		trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
331 		/* fall through */
332 	case XDP_DROP:
333 		xdp_release_buf(priv, ch, addr);
334 		ch->stats.xdp_drop++;
335 		break;
336 	case XDP_REDIRECT:
337 		dma_unmap_page(priv->net_dev->dev.parent, addr,
338 			       DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
339 		ch->buf_count--;
340 		xdp.data_hard_start = vaddr;
341 		err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
342 		if (unlikely(err))
343 			ch->stats.xdp_drop++;
344 		else
345 			ch->stats.xdp_redirect++;
346 		break;
347 	}
348 
349 	ch->xdp.res |= xdp_act;
350 out:
351 	rcu_read_unlock();
352 	return xdp_act;
353 }
354 
355 /* Main Rx frame processing routine */
356 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
357 			 struct dpaa2_eth_channel *ch,
358 			 const struct dpaa2_fd *fd,
359 			 struct dpaa2_eth_fq *fq)
360 {
361 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
362 	u8 fd_format = dpaa2_fd_get_format(fd);
363 	void *vaddr;
364 	struct sk_buff *skb;
365 	struct rtnl_link_stats64 *percpu_stats;
366 	struct dpaa2_eth_drv_stats *percpu_extras;
367 	struct device *dev = priv->net_dev->dev.parent;
368 	struct dpaa2_fas *fas;
369 	void *buf_data;
370 	u32 status = 0;
371 	u32 xdp_act;
372 
373 	/* Tracing point */
374 	trace_dpaa2_rx_fd(priv->net_dev, fd);
375 
376 	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
377 	dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
378 				DMA_BIDIRECTIONAL);
379 
380 	fas = dpaa2_get_fas(vaddr, false);
381 	prefetch(fas);
382 	buf_data = vaddr + dpaa2_fd_get_offset(fd);
383 	prefetch(buf_data);
384 
385 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
386 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
387 
388 	if (fd_format == dpaa2_fd_single) {
389 		xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
390 		if (xdp_act != XDP_PASS) {
391 			percpu_stats->rx_packets++;
392 			percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
393 			return;
394 		}
395 
396 		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
397 			       DMA_BIDIRECTIONAL);
398 		skb = build_linear_skb(ch, fd, vaddr);
399 	} else if (fd_format == dpaa2_fd_sg) {
400 		WARN_ON(priv->xdp_prog);
401 
402 		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
403 			       DMA_BIDIRECTIONAL);
404 		skb = build_frag_skb(priv, ch, buf_data);
405 		free_pages((unsigned long)vaddr, 0);
406 		percpu_extras->rx_sg_frames++;
407 		percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
408 	} else {
409 		/* We don't support any other format */
410 		goto err_frame_format;
411 	}
412 
413 	if (unlikely(!skb))
414 		goto err_build_skb;
415 
416 	prefetch(skb->data);
417 
418 	/* Get the timestamp value */
419 	if (priv->rx_tstamp) {
420 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
421 		__le64 *ts = dpaa2_get_ts(vaddr, false);
422 		u64 ns;
423 
424 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
425 
426 		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
427 		shhwtstamps->hwtstamp = ns_to_ktime(ns);
428 	}
429 
430 	/* Check if we need to validate the L4 csum */
431 	if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
432 		status = le32_to_cpu(fas->status);
433 		validate_rx_csum(priv, status, skb);
434 	}
435 
436 	skb->protocol = eth_type_trans(skb, priv->net_dev);
437 	skb_record_rx_queue(skb, fq->flowid);
438 
439 	percpu_stats->rx_packets++;
440 	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
441 
442 	list_add_tail(&skb->list, ch->rx_list);
443 
444 	return;
445 
446 err_build_skb:
447 	free_rx_fd(priv, fd, vaddr);
448 err_frame_format:
449 	percpu_stats->rx_dropped++;
450 }
451 
452 /* Consume all frames pull-dequeued into the store. This is the simplest way to
453  * make sure we don't accidentally issue another volatile dequeue which would
454  * overwrite (leak) frames already in the store.
455  *
456  * Observance of NAPI budget is not our concern, leaving that to the caller.
457  */
458 static int consume_frames(struct dpaa2_eth_channel *ch,
459 			  struct dpaa2_eth_fq **src)
460 {
461 	struct dpaa2_eth_priv *priv = ch->priv;
462 	struct dpaa2_eth_fq *fq = NULL;
463 	struct dpaa2_dq *dq;
464 	const struct dpaa2_fd *fd;
465 	int cleaned = 0, retries = 0;
466 	int is_last;
467 
468 	do {
469 		dq = dpaa2_io_store_next(ch->store, &is_last);
470 		if (unlikely(!dq)) {
471 			/* If we're here, we *must* have placed a
472 			 * volatile dequeue comnmand, so keep reading through
473 			 * the store until we get some sort of valid response
474 			 * token (either a valid frame or an "empty dequeue")
475 			 */
476 			if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
477 				netdev_err_once(priv->net_dev,
478 						"Unable to read a valid dequeue response\n");
479 				return -ETIMEDOUT;
480 			}
481 			continue;
482 		}
483 
484 		fd = dpaa2_dq_fd(dq);
485 		fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
486 
487 		fq->consume(priv, ch, fd, fq);
488 		cleaned++;
489 		retries = 0;
490 	} while (!is_last);
491 
492 	if (!cleaned)
493 		return 0;
494 
495 	fq->stats.frames += cleaned;
496 
497 	/* A dequeue operation only pulls frames from a single queue
498 	 * into the store. Return the frame queue as an out param.
499 	 */
500 	if (src)
501 		*src = fq;
502 
503 	return cleaned;
504 }
505 
506 /* Configure the egress frame annotation for timestamp update */
507 static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
508 {
509 	struct dpaa2_faead *faead;
510 	u32 ctrl, frc;
511 
512 	/* Mark the egress frame annotation area as valid */
513 	frc = dpaa2_fd_get_frc(fd);
514 	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
515 
516 	/* Set hardware annotation size */
517 	ctrl = dpaa2_fd_get_ctrl(fd);
518 	dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
519 
520 	/* enable UPD (update prepanded data) bit in FAEAD field of
521 	 * hardware frame annotation area
522 	 */
523 	ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
524 	faead = dpaa2_get_faead(buf_start, true);
525 	faead->ctrl = cpu_to_le32(ctrl);
526 }
527 
528 /* Create a frame descriptor based on a fragmented skb */
529 static int build_sg_fd(struct dpaa2_eth_priv *priv,
530 		       struct sk_buff *skb,
531 		       struct dpaa2_fd *fd)
532 {
533 	struct device *dev = priv->net_dev->dev.parent;
534 	void *sgt_buf = NULL;
535 	dma_addr_t addr;
536 	int nr_frags = skb_shinfo(skb)->nr_frags;
537 	struct dpaa2_sg_entry *sgt;
538 	int i, err;
539 	int sgt_buf_size;
540 	struct scatterlist *scl, *crt_scl;
541 	int num_sg;
542 	int num_dma_bufs;
543 	struct dpaa2_eth_swa *swa;
544 
545 	/* Create and map scatterlist.
546 	 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
547 	 * to go beyond nr_frags+1.
548 	 * Note: We don't support chained scatterlists
549 	 */
550 	if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
551 		return -EINVAL;
552 
553 	scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
554 	if (unlikely(!scl))
555 		return -ENOMEM;
556 
557 	sg_init_table(scl, nr_frags + 1);
558 	num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
559 	num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
560 	if (unlikely(!num_dma_bufs)) {
561 		err = -ENOMEM;
562 		goto dma_map_sg_failed;
563 	}
564 
565 	/* Prepare the HW SGT structure */
566 	sgt_buf_size = priv->tx_data_offset +
567 		       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
568 	sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
569 	if (unlikely(!sgt_buf)) {
570 		err = -ENOMEM;
571 		goto sgt_buf_alloc_failed;
572 	}
573 	sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
574 	memset(sgt_buf, 0, sgt_buf_size);
575 
576 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
577 
578 	/* Fill in the HW SGT structure.
579 	 *
580 	 * sgt_buf is zeroed out, so the following fields are implicit
581 	 * in all sgt entries:
582 	 *   - offset is 0
583 	 *   - format is 'dpaa2_sg_single'
584 	 */
585 	for_each_sg(scl, crt_scl, num_dma_bufs, i) {
586 		dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
587 		dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
588 	}
589 	dpaa2_sg_set_final(&sgt[i - 1], true);
590 
591 	/* Store the skb backpointer in the SGT buffer.
592 	 * Fit the scatterlist and the number of buffers alongside the
593 	 * skb backpointer in the software annotation area. We'll need
594 	 * all of them on Tx Conf.
595 	 */
596 	swa = (struct dpaa2_eth_swa *)sgt_buf;
597 	swa->type = DPAA2_ETH_SWA_SG;
598 	swa->sg.skb = skb;
599 	swa->sg.scl = scl;
600 	swa->sg.num_sg = num_sg;
601 	swa->sg.sgt_size = sgt_buf_size;
602 
603 	/* Separately map the SGT buffer */
604 	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
605 	if (unlikely(dma_mapping_error(dev, addr))) {
606 		err = -ENOMEM;
607 		goto dma_map_single_failed;
608 	}
609 	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
610 	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
611 	dpaa2_fd_set_addr(fd, addr);
612 	dpaa2_fd_set_len(fd, skb->len);
613 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
614 
615 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
616 		enable_tx_tstamp(fd, sgt_buf);
617 
618 	return 0;
619 
620 dma_map_single_failed:
621 	skb_free_frag(sgt_buf);
622 sgt_buf_alloc_failed:
623 	dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
624 dma_map_sg_failed:
625 	kfree(scl);
626 	return err;
627 }
628 
629 /* Create a frame descriptor based on a linear skb */
630 static int build_single_fd(struct dpaa2_eth_priv *priv,
631 			   struct sk_buff *skb,
632 			   struct dpaa2_fd *fd)
633 {
634 	struct device *dev = priv->net_dev->dev.parent;
635 	u8 *buffer_start, *aligned_start;
636 	struct dpaa2_eth_swa *swa;
637 	dma_addr_t addr;
638 
639 	buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
640 
641 	/* If there's enough room to align the FD address, do it.
642 	 * It will help hardware optimize accesses.
643 	 */
644 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
645 				  DPAA2_ETH_TX_BUF_ALIGN);
646 	if (aligned_start >= skb->head)
647 		buffer_start = aligned_start;
648 
649 	/* Store a backpointer to the skb at the beginning of the buffer
650 	 * (in the private data area) such that we can release it
651 	 * on Tx confirm
652 	 */
653 	swa = (struct dpaa2_eth_swa *)buffer_start;
654 	swa->type = DPAA2_ETH_SWA_SINGLE;
655 	swa->single.skb = skb;
656 
657 	addr = dma_map_single(dev, buffer_start,
658 			      skb_tail_pointer(skb) - buffer_start,
659 			      DMA_BIDIRECTIONAL);
660 	if (unlikely(dma_mapping_error(dev, addr)))
661 		return -ENOMEM;
662 
663 	dpaa2_fd_set_addr(fd, addr);
664 	dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
665 	dpaa2_fd_set_len(fd, skb->len);
666 	dpaa2_fd_set_format(fd, dpaa2_fd_single);
667 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
668 
669 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
670 		enable_tx_tstamp(fd, buffer_start);
671 
672 	return 0;
673 }
674 
675 /* FD freeing routine on the Tx path
676  *
677  * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
678  * back-pointed to is also freed.
679  * This can be called either from dpaa2_eth_tx_conf() or on the error path of
680  * dpaa2_eth_tx().
681  */
682 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
683 		       struct dpaa2_eth_fq *fq,
684 		       const struct dpaa2_fd *fd, bool in_napi)
685 {
686 	struct device *dev = priv->net_dev->dev.parent;
687 	dma_addr_t fd_addr;
688 	struct sk_buff *skb = NULL;
689 	unsigned char *buffer_start;
690 	struct dpaa2_eth_swa *swa;
691 	u8 fd_format = dpaa2_fd_get_format(fd);
692 	u32 fd_len = dpaa2_fd_get_len(fd);
693 
694 	fd_addr = dpaa2_fd_get_addr(fd);
695 	buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
696 	swa = (struct dpaa2_eth_swa *)buffer_start;
697 
698 	if (fd_format == dpaa2_fd_single) {
699 		if (swa->type == DPAA2_ETH_SWA_SINGLE) {
700 			skb = swa->single.skb;
701 			/* Accessing the skb buffer is safe before dma unmap,
702 			 * because we didn't map the actual skb shell.
703 			 */
704 			dma_unmap_single(dev, fd_addr,
705 					 skb_tail_pointer(skb) - buffer_start,
706 					 DMA_BIDIRECTIONAL);
707 		} else {
708 			WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
709 			dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
710 					 DMA_BIDIRECTIONAL);
711 		}
712 	} else if (fd_format == dpaa2_fd_sg) {
713 		skb = swa->sg.skb;
714 
715 		/* Unmap the scatterlist */
716 		dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
717 			     DMA_BIDIRECTIONAL);
718 		kfree(swa->sg.scl);
719 
720 		/* Unmap the SGT buffer */
721 		dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
722 				 DMA_BIDIRECTIONAL);
723 	} else {
724 		netdev_dbg(priv->net_dev, "Invalid FD format\n");
725 		return;
726 	}
727 
728 	if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
729 		fq->dq_frames++;
730 		fq->dq_bytes += fd_len;
731 	}
732 
733 	if (swa->type == DPAA2_ETH_SWA_XDP) {
734 		xdp_return_frame(swa->xdp.xdpf);
735 		return;
736 	}
737 
738 	/* Get the timestamp value */
739 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
740 		struct skb_shared_hwtstamps shhwtstamps;
741 		__le64 *ts = dpaa2_get_ts(buffer_start, true);
742 		u64 ns;
743 
744 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
745 
746 		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
747 		shhwtstamps.hwtstamp = ns_to_ktime(ns);
748 		skb_tstamp_tx(skb, &shhwtstamps);
749 	}
750 
751 	/* Free SGT buffer allocated on tx */
752 	if (fd_format != dpaa2_fd_single)
753 		skb_free_frag(buffer_start);
754 
755 	/* Move on with skb release */
756 	napi_consume_skb(skb, in_napi);
757 }
758 
759 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
760 {
761 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
762 	struct dpaa2_fd fd;
763 	struct rtnl_link_stats64 *percpu_stats;
764 	struct dpaa2_eth_drv_stats *percpu_extras;
765 	struct dpaa2_eth_fq *fq;
766 	struct netdev_queue *nq;
767 	u16 queue_mapping;
768 	unsigned int needed_headroom;
769 	u32 fd_len;
770 	u8 prio = 0;
771 	int err, i;
772 
773 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
774 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
775 
776 	needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
777 	if (skb_headroom(skb) < needed_headroom) {
778 		struct sk_buff *ns;
779 
780 		ns = skb_realloc_headroom(skb, needed_headroom);
781 		if (unlikely(!ns)) {
782 			percpu_stats->tx_dropped++;
783 			goto err_alloc_headroom;
784 		}
785 		percpu_extras->tx_reallocs++;
786 
787 		if (skb->sk)
788 			skb_set_owner_w(ns, skb->sk);
789 
790 		dev_kfree_skb(skb);
791 		skb = ns;
792 	}
793 
794 	/* We'll be holding a back-reference to the skb until Tx Confirmation;
795 	 * we don't want that overwritten by a concurrent Tx with a cloned skb.
796 	 */
797 	skb = skb_unshare(skb, GFP_ATOMIC);
798 	if (unlikely(!skb)) {
799 		/* skb_unshare() has already freed the skb */
800 		percpu_stats->tx_dropped++;
801 		return NETDEV_TX_OK;
802 	}
803 
804 	/* Setup the FD fields */
805 	memset(&fd, 0, sizeof(fd));
806 
807 	if (skb_is_nonlinear(skb)) {
808 		err = build_sg_fd(priv, skb, &fd);
809 		percpu_extras->tx_sg_frames++;
810 		percpu_extras->tx_sg_bytes += skb->len;
811 	} else {
812 		err = build_single_fd(priv, skb, &fd);
813 	}
814 
815 	if (unlikely(err)) {
816 		percpu_stats->tx_dropped++;
817 		goto err_build_fd;
818 	}
819 
820 	/* Tracing point */
821 	trace_dpaa2_tx_fd(net_dev, &fd);
822 
823 	/* TxConf FQ selection relies on queue id from the stack.
824 	 * In case of a forwarded frame from another DPNI interface, we choose
825 	 * a queue affined to the same core that processed the Rx frame
826 	 */
827 	queue_mapping = skb_get_queue_mapping(skb);
828 
829 	if (net_dev->num_tc) {
830 		prio = netdev_txq_to_tc(net_dev, queue_mapping);
831 		/* Hardware interprets priority level 0 as being the highest,
832 		 * so we need to do a reverse mapping to the netdev tc index
833 		 */
834 		prio = net_dev->num_tc - prio - 1;
835 		/* We have only one FQ array entry for all Tx hardware queues
836 		 * with the same flow id (but different priority levels)
837 		 */
838 		queue_mapping %= dpaa2_eth_queue_count(priv);
839 	}
840 	fq = &priv->fq[queue_mapping];
841 
842 	fd_len = dpaa2_fd_get_len(&fd);
843 	nq = netdev_get_tx_queue(net_dev, queue_mapping);
844 	netdev_tx_sent_queue(nq, fd_len);
845 
846 	/* Everything that happens after this enqueues might race with
847 	 * the Tx confirmation callback for this frame
848 	 */
849 	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
850 		err = priv->enqueue(priv, fq, &fd, prio);
851 		if (err != -EBUSY)
852 			break;
853 	}
854 	percpu_extras->tx_portal_busy += i;
855 	if (unlikely(err < 0)) {
856 		percpu_stats->tx_errors++;
857 		/* Clean up everything, including freeing the skb */
858 		free_tx_fd(priv, fq, &fd, false);
859 		netdev_tx_completed_queue(nq, 1, fd_len);
860 	} else {
861 		percpu_stats->tx_packets++;
862 		percpu_stats->tx_bytes += fd_len;
863 	}
864 
865 	return NETDEV_TX_OK;
866 
867 err_build_fd:
868 err_alloc_headroom:
869 	dev_kfree_skb(skb);
870 
871 	return NETDEV_TX_OK;
872 }
873 
874 /* Tx confirmation frame processing routine */
875 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
876 			      struct dpaa2_eth_channel *ch __always_unused,
877 			      const struct dpaa2_fd *fd,
878 			      struct dpaa2_eth_fq *fq)
879 {
880 	struct rtnl_link_stats64 *percpu_stats;
881 	struct dpaa2_eth_drv_stats *percpu_extras;
882 	u32 fd_len = dpaa2_fd_get_len(fd);
883 	u32 fd_errors;
884 
885 	/* Tracing point */
886 	trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
887 
888 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
889 	percpu_extras->tx_conf_frames++;
890 	percpu_extras->tx_conf_bytes += fd_len;
891 
892 	/* Check frame errors in the FD field */
893 	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
894 	free_tx_fd(priv, fq, fd, true);
895 
896 	if (likely(!fd_errors))
897 		return;
898 
899 	if (net_ratelimit())
900 		netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
901 			   fd_errors);
902 
903 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
904 	/* Tx-conf logically pertains to the egress path. */
905 	percpu_stats->tx_errors++;
906 }
907 
908 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
909 {
910 	int err;
911 
912 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
913 			       DPNI_OFF_RX_L3_CSUM, enable);
914 	if (err) {
915 		netdev_err(priv->net_dev,
916 			   "dpni_set_offload(RX_L3_CSUM) failed\n");
917 		return err;
918 	}
919 
920 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
921 			       DPNI_OFF_RX_L4_CSUM, enable);
922 	if (err) {
923 		netdev_err(priv->net_dev,
924 			   "dpni_set_offload(RX_L4_CSUM) failed\n");
925 		return err;
926 	}
927 
928 	return 0;
929 }
930 
931 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
932 {
933 	int err;
934 
935 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
936 			       DPNI_OFF_TX_L3_CSUM, enable);
937 	if (err) {
938 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
939 		return err;
940 	}
941 
942 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
943 			       DPNI_OFF_TX_L4_CSUM, enable);
944 	if (err) {
945 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
946 		return err;
947 	}
948 
949 	return 0;
950 }
951 
952 /* Perform a single release command to add buffers
953  * to the specified buffer pool
954  */
955 static int add_bufs(struct dpaa2_eth_priv *priv,
956 		    struct dpaa2_eth_channel *ch, u16 bpid)
957 {
958 	struct device *dev = priv->net_dev->dev.parent;
959 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
960 	struct page *page;
961 	dma_addr_t addr;
962 	int retries = 0;
963 	int i, err;
964 
965 	for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
966 		/* Allocate buffer visible to WRIOP + skb shared info +
967 		 * alignment padding
968 		 */
969 		/* allocate one page for each Rx buffer. WRIOP sees
970 		 * the entire page except for a tailroom reserved for
971 		 * skb shared info
972 		 */
973 		page = dev_alloc_pages(0);
974 		if (!page)
975 			goto err_alloc;
976 
977 		addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
978 				    DMA_BIDIRECTIONAL);
979 		if (unlikely(dma_mapping_error(dev, addr)))
980 			goto err_map;
981 
982 		buf_array[i] = addr;
983 
984 		/* tracing point */
985 		trace_dpaa2_eth_buf_seed(priv->net_dev,
986 					 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
987 					 addr, DPAA2_ETH_RX_BUF_SIZE,
988 					 bpid);
989 	}
990 
991 release_bufs:
992 	/* In case the portal is busy, retry until successful */
993 	while ((err = dpaa2_io_service_release(ch->dpio, bpid,
994 					       buf_array, i)) == -EBUSY) {
995 		if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
996 			break;
997 		cpu_relax();
998 	}
999 
1000 	/* If release command failed, clean up and bail out;
1001 	 * not much else we can do about it
1002 	 */
1003 	if (err) {
1004 		free_bufs(priv, buf_array, i);
1005 		return 0;
1006 	}
1007 
1008 	return i;
1009 
1010 err_map:
1011 	__free_pages(page, 0);
1012 err_alloc:
1013 	/* If we managed to allocate at least some buffers,
1014 	 * release them to hardware
1015 	 */
1016 	if (i)
1017 		goto release_bufs;
1018 
1019 	return 0;
1020 }
1021 
1022 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
1023 {
1024 	int i, j;
1025 	int new_count;
1026 
1027 	for (j = 0; j < priv->num_channels; j++) {
1028 		for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1029 		     i += DPAA2_ETH_BUFS_PER_CMD) {
1030 			new_count = add_bufs(priv, priv->channel[j], bpid);
1031 			priv->channel[j]->buf_count += new_count;
1032 
1033 			if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
1034 				return -ENOMEM;
1035 			}
1036 		}
1037 	}
1038 
1039 	return 0;
1040 }
1041 
1042 /**
1043  * Drain the specified number of buffers from the DPNI's private buffer pool.
1044  * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1045  */
1046 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
1047 {
1048 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1049 	int retries = 0;
1050 	int ret;
1051 
1052 	do {
1053 		ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1054 					       buf_array, count);
1055 		if (ret < 0) {
1056 			if (ret == -EBUSY &&
1057 			    retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1058 				continue;
1059 			netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1060 			return;
1061 		}
1062 		free_bufs(priv, buf_array, ret);
1063 		retries = 0;
1064 	} while (ret);
1065 }
1066 
1067 static void drain_pool(struct dpaa2_eth_priv *priv)
1068 {
1069 	int i;
1070 
1071 	drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1072 	drain_bufs(priv, 1);
1073 
1074 	for (i = 0; i < priv->num_channels; i++)
1075 		priv->channel[i]->buf_count = 0;
1076 }
1077 
1078 /* Function is called from softirq context only, so we don't need to guard
1079  * the access to percpu count
1080  */
1081 static int refill_pool(struct dpaa2_eth_priv *priv,
1082 		       struct dpaa2_eth_channel *ch,
1083 		       u16 bpid)
1084 {
1085 	int new_count;
1086 
1087 	if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1088 		return 0;
1089 
1090 	do {
1091 		new_count = add_bufs(priv, ch, bpid);
1092 		if (unlikely(!new_count)) {
1093 			/* Out of memory; abort for now, we'll try later on */
1094 			break;
1095 		}
1096 		ch->buf_count += new_count;
1097 	} while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1098 
1099 	if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1100 		return -ENOMEM;
1101 
1102 	return 0;
1103 }
1104 
1105 static int pull_channel(struct dpaa2_eth_channel *ch)
1106 {
1107 	int err;
1108 	int dequeues = -1;
1109 
1110 	/* Retry while portal is busy */
1111 	do {
1112 		err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1113 						    ch->store);
1114 		dequeues++;
1115 		cpu_relax();
1116 	} while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1117 
1118 	ch->stats.dequeue_portal_busy += dequeues;
1119 	if (unlikely(err))
1120 		ch->stats.pull_err++;
1121 
1122 	return err;
1123 }
1124 
1125 /* NAPI poll routine
1126  *
1127  * Frames are dequeued from the QMan channel associated with this NAPI context.
1128  * Rx, Tx confirmation and (if configured) Rx error frames all count
1129  * towards the NAPI budget.
1130  */
1131 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1132 {
1133 	struct dpaa2_eth_channel *ch;
1134 	struct dpaa2_eth_priv *priv;
1135 	int rx_cleaned = 0, txconf_cleaned = 0;
1136 	struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1137 	struct netdev_queue *nq;
1138 	int store_cleaned, work_done;
1139 	struct list_head rx_list;
1140 	int retries = 0;
1141 	int err;
1142 
1143 	ch = container_of(napi, struct dpaa2_eth_channel, napi);
1144 	ch->xdp.res = 0;
1145 	priv = ch->priv;
1146 
1147 	INIT_LIST_HEAD(&rx_list);
1148 	ch->rx_list = &rx_list;
1149 
1150 	do {
1151 		err = pull_channel(ch);
1152 		if (unlikely(err))
1153 			break;
1154 
1155 		/* Refill pool if appropriate */
1156 		refill_pool(priv, ch, priv->bpid);
1157 
1158 		store_cleaned = consume_frames(ch, &fq);
1159 		if (store_cleaned <= 0)
1160 			break;
1161 		if (fq->type == DPAA2_RX_FQ) {
1162 			rx_cleaned += store_cleaned;
1163 		} else {
1164 			txconf_cleaned += store_cleaned;
1165 			/* We have a single Tx conf FQ on this channel */
1166 			txc_fq = fq;
1167 		}
1168 
1169 		/* If we either consumed the whole NAPI budget with Rx frames
1170 		 * or we reached the Tx confirmations threshold, we're done.
1171 		 */
1172 		if (rx_cleaned >= budget ||
1173 		    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1174 			work_done = budget;
1175 			goto out;
1176 		}
1177 	} while (store_cleaned);
1178 
1179 	/* We didn't consume the entire budget, so finish napi and
1180 	 * re-enable data availability notifications
1181 	 */
1182 	napi_complete_done(napi, rx_cleaned);
1183 	do {
1184 		err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1185 		cpu_relax();
1186 	} while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
1187 	WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1188 		  ch->nctx.desired_cpu);
1189 
1190 	work_done = max(rx_cleaned, 1);
1191 
1192 out:
1193 	netif_receive_skb_list(ch->rx_list);
1194 
1195 	if (txc_fq && txc_fq->dq_frames) {
1196 		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1197 		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1198 					  txc_fq->dq_bytes);
1199 		txc_fq->dq_frames = 0;
1200 		txc_fq->dq_bytes = 0;
1201 	}
1202 
1203 	if (ch->xdp.res & XDP_REDIRECT)
1204 		xdp_do_flush_map();
1205 
1206 	return work_done;
1207 }
1208 
1209 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
1210 {
1211 	struct dpaa2_eth_channel *ch;
1212 	int i;
1213 
1214 	for (i = 0; i < priv->num_channels; i++) {
1215 		ch = priv->channel[i];
1216 		napi_enable(&ch->napi);
1217 	}
1218 }
1219 
1220 static void disable_ch_napi(struct dpaa2_eth_priv *priv)
1221 {
1222 	struct dpaa2_eth_channel *ch;
1223 	int i;
1224 
1225 	for (i = 0; i < priv->num_channels; i++) {
1226 		ch = priv->channel[i];
1227 		napi_disable(&ch->napi);
1228 	}
1229 }
1230 
1231 static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
1232 {
1233 	struct dpni_taildrop td = {0};
1234 	int i, err;
1235 
1236 	if (priv->rx_td_enabled == enable)
1237 		return;
1238 
1239 	td.enable = enable;
1240 	td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1241 
1242 	for (i = 0; i < priv->num_fqs; i++) {
1243 		if (priv->fq[i].type != DPAA2_RX_FQ)
1244 			continue;
1245 		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1246 					DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
1247 					priv->fq[i].flowid, &td);
1248 		if (err) {
1249 			netdev_err(priv->net_dev,
1250 				   "dpni_set_taildrop() failed\n");
1251 			break;
1252 		}
1253 	}
1254 
1255 	priv->rx_td_enabled = enable;
1256 }
1257 
1258 static int link_state_update(struct dpaa2_eth_priv *priv)
1259 {
1260 	struct dpni_link_state state = {0};
1261 	bool tx_pause;
1262 	int err;
1263 
1264 	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1265 	if (unlikely(err)) {
1266 		netdev_err(priv->net_dev,
1267 			   "dpni_get_link_state() failed\n");
1268 		return err;
1269 	}
1270 
1271 	/* If Tx pause frame settings have changed, we need to update
1272 	 * Rx FQ taildrop configuration as well. We configure taildrop
1273 	 * only when pause frame generation is disabled.
1274 	 */
1275 	tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
1276 		   !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
1277 	dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
1278 
1279 	/* When we manage the MAC/PHY using phylink there is no need
1280 	 * to manually update the netif_carrier.
1281 	 */
1282 	if (priv->mac)
1283 		goto out;
1284 
1285 	/* Chech link state; speed / duplex changes are not treated yet */
1286 	if (priv->link_state.up == state.up)
1287 		goto out;
1288 
1289 	if (state.up) {
1290 		netif_carrier_on(priv->net_dev);
1291 		netif_tx_start_all_queues(priv->net_dev);
1292 	} else {
1293 		netif_tx_stop_all_queues(priv->net_dev);
1294 		netif_carrier_off(priv->net_dev);
1295 	}
1296 
1297 	netdev_info(priv->net_dev, "Link Event: state %s\n",
1298 		    state.up ? "up" : "down");
1299 
1300 out:
1301 	priv->link_state = state;
1302 
1303 	return 0;
1304 }
1305 
1306 static int dpaa2_eth_open(struct net_device *net_dev)
1307 {
1308 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1309 	int err;
1310 
1311 	err = seed_pool(priv, priv->bpid);
1312 	if (err) {
1313 		/* Not much to do; the buffer pool, though not filled up,
1314 		 * may still contain some buffers which would enable us
1315 		 * to limp on.
1316 		 */
1317 		netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1318 			   priv->dpbp_dev->obj_desc.id, priv->bpid);
1319 	}
1320 
1321 	if (!priv->mac) {
1322 		/* We'll only start the txqs when the link is actually ready;
1323 		 * make sure we don't race against the link up notification,
1324 		 * which may come immediately after dpni_enable();
1325 		 */
1326 		netif_tx_stop_all_queues(net_dev);
1327 
1328 		/* Also, explicitly set carrier off, otherwise
1329 		 * netif_carrier_ok() will return true and cause 'ip link show'
1330 		 * to report the LOWER_UP flag, even though the link
1331 		 * notification wasn't even received.
1332 		 */
1333 		netif_carrier_off(net_dev);
1334 	}
1335 	enable_ch_napi(priv);
1336 
1337 	err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1338 	if (err < 0) {
1339 		netdev_err(net_dev, "dpni_enable() failed\n");
1340 		goto enable_err;
1341 	}
1342 
1343 	if (!priv->mac) {
1344 		/* If the DPMAC object has already processed the link up
1345 		 * interrupt, we have to learn the link state ourselves.
1346 		 */
1347 		err = link_state_update(priv);
1348 		if (err < 0) {
1349 			netdev_err(net_dev, "Can't update link state\n");
1350 			goto link_state_err;
1351 		}
1352 	} else {
1353 		phylink_start(priv->mac->phylink);
1354 	}
1355 
1356 	return 0;
1357 
1358 link_state_err:
1359 enable_err:
1360 	disable_ch_napi(priv);
1361 	drain_pool(priv);
1362 	return err;
1363 }
1364 
1365 /* Total number of in-flight frames on ingress queues */
1366 static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
1367 {
1368 	struct dpaa2_eth_fq *fq;
1369 	u32 fcnt = 0, bcnt = 0, total = 0;
1370 	int i, err;
1371 
1372 	for (i = 0; i < priv->num_fqs; i++) {
1373 		fq = &priv->fq[i];
1374 		err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1375 		if (err) {
1376 			netdev_warn(priv->net_dev, "query_fq_count failed");
1377 			break;
1378 		}
1379 		total += fcnt;
1380 	}
1381 
1382 	return total;
1383 }
1384 
1385 static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
1386 {
1387 	int retries = 10;
1388 	u32 pending;
1389 
1390 	do {
1391 		pending = ingress_fq_count(priv);
1392 		if (pending)
1393 			msleep(100);
1394 	} while (pending && --retries);
1395 }
1396 
1397 #define DPNI_TX_PENDING_VER_MAJOR	7
1398 #define DPNI_TX_PENDING_VER_MINOR	13
1399 static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
1400 {
1401 	union dpni_statistics stats;
1402 	int retries = 10;
1403 	int err;
1404 
1405 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
1406 				   DPNI_TX_PENDING_VER_MINOR) < 0)
1407 		goto out;
1408 
1409 	do {
1410 		err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
1411 					  &stats);
1412 		if (err)
1413 			goto out;
1414 		if (stats.page_6.tx_pending_frames == 0)
1415 			return;
1416 	} while (--retries);
1417 
1418 out:
1419 	msleep(500);
1420 }
1421 
1422 static int dpaa2_eth_stop(struct net_device *net_dev)
1423 {
1424 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1425 	int dpni_enabled = 0;
1426 	int retries = 10;
1427 
1428 	if (!priv->mac) {
1429 		netif_tx_stop_all_queues(net_dev);
1430 		netif_carrier_off(net_dev);
1431 	} else {
1432 		phylink_stop(priv->mac->phylink);
1433 	}
1434 
1435 	/* On dpni_disable(), the MC firmware will:
1436 	 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1437 	 * - cut off WRIOP dequeues from egress FQs and wait until transmission
1438 	 * of all in flight Tx frames is finished (and corresponding Tx conf
1439 	 * frames are enqueued back to software)
1440 	 *
1441 	 * Before calling dpni_disable(), we wait for all Tx frames to arrive
1442 	 * on WRIOP. After it finishes, wait until all remaining frames on Rx
1443 	 * and Tx conf queues are consumed on NAPI poll.
1444 	 */
1445 	wait_for_egress_fq_empty(priv);
1446 
1447 	do {
1448 		dpni_disable(priv->mc_io, 0, priv->mc_token);
1449 		dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1450 		if (dpni_enabled)
1451 			/* Allow the hardware some slack */
1452 			msleep(100);
1453 	} while (dpni_enabled && --retries);
1454 	if (!retries) {
1455 		netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1456 		/* Must go on and disable NAPI nonetheless, so we don't crash at
1457 		 * the next "ifconfig up"
1458 		 */
1459 	}
1460 
1461 	wait_for_ingress_fq_empty(priv);
1462 	disable_ch_napi(priv);
1463 
1464 	/* Empty the buffer pool */
1465 	drain_pool(priv);
1466 
1467 	return 0;
1468 }
1469 
1470 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1471 {
1472 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1473 	struct device *dev = net_dev->dev.parent;
1474 	int err;
1475 
1476 	err = eth_mac_addr(net_dev, addr);
1477 	if (err < 0) {
1478 		dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1479 		return err;
1480 	}
1481 
1482 	err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1483 					net_dev->dev_addr);
1484 	if (err) {
1485 		dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1486 		return err;
1487 	}
1488 
1489 	return 0;
1490 }
1491 
1492 /** Fill in counters maintained by the GPP driver. These may be different from
1493  * the hardware counters obtained by ethtool.
1494  */
1495 static void dpaa2_eth_get_stats(struct net_device *net_dev,
1496 				struct rtnl_link_stats64 *stats)
1497 {
1498 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1499 	struct rtnl_link_stats64 *percpu_stats;
1500 	u64 *cpustats;
1501 	u64 *netstats = (u64 *)stats;
1502 	int i, j;
1503 	int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1504 
1505 	for_each_possible_cpu(i) {
1506 		percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1507 		cpustats = (u64 *)percpu_stats;
1508 		for (j = 0; j < num; j++)
1509 			netstats[j] += cpustats[j];
1510 	}
1511 }
1512 
1513 /* Copy mac unicast addresses from @net_dev to @priv.
1514  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1515  */
1516 static void add_uc_hw_addr(const struct net_device *net_dev,
1517 			   struct dpaa2_eth_priv *priv)
1518 {
1519 	struct netdev_hw_addr *ha;
1520 	int err;
1521 
1522 	netdev_for_each_uc_addr(ha, net_dev) {
1523 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1524 					ha->addr);
1525 		if (err)
1526 			netdev_warn(priv->net_dev,
1527 				    "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1528 				    ha->addr, err);
1529 	}
1530 }
1531 
1532 /* Copy mac multicast addresses from @net_dev to @priv
1533  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1534  */
1535 static void add_mc_hw_addr(const struct net_device *net_dev,
1536 			   struct dpaa2_eth_priv *priv)
1537 {
1538 	struct netdev_hw_addr *ha;
1539 	int err;
1540 
1541 	netdev_for_each_mc_addr(ha, net_dev) {
1542 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1543 					ha->addr);
1544 		if (err)
1545 			netdev_warn(priv->net_dev,
1546 				    "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1547 				    ha->addr, err);
1548 	}
1549 }
1550 
1551 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1552 {
1553 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1554 	int uc_count = netdev_uc_count(net_dev);
1555 	int mc_count = netdev_mc_count(net_dev);
1556 	u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1557 	u32 options = priv->dpni_attrs.options;
1558 	u16 mc_token = priv->mc_token;
1559 	struct fsl_mc_io *mc_io = priv->mc_io;
1560 	int err;
1561 
1562 	/* Basic sanity checks; these probably indicate a misconfiguration */
1563 	if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1564 		netdev_info(net_dev,
1565 			    "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1566 			    max_mac);
1567 
1568 	/* Force promiscuous if the uc or mc counts exceed our capabilities. */
1569 	if (uc_count > max_mac) {
1570 		netdev_info(net_dev,
1571 			    "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1572 			    uc_count, max_mac);
1573 		goto force_promisc;
1574 	}
1575 	if (mc_count + uc_count > max_mac) {
1576 		netdev_info(net_dev,
1577 			    "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1578 			    uc_count + mc_count, max_mac);
1579 		goto force_mc_promisc;
1580 	}
1581 
1582 	/* Adjust promisc settings due to flag combinations */
1583 	if (net_dev->flags & IFF_PROMISC)
1584 		goto force_promisc;
1585 	if (net_dev->flags & IFF_ALLMULTI) {
1586 		/* First, rebuild unicast filtering table. This should be done
1587 		 * in promisc mode, in order to avoid frame loss while we
1588 		 * progressively add entries to the table.
1589 		 * We don't know whether we had been in promisc already, and
1590 		 * making an MC call to find out is expensive; so set uc promisc
1591 		 * nonetheless.
1592 		 */
1593 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1594 		if (err)
1595 			netdev_warn(net_dev, "Can't set uc promisc\n");
1596 
1597 		/* Actual uc table reconstruction. */
1598 		err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1599 		if (err)
1600 			netdev_warn(net_dev, "Can't clear uc filters\n");
1601 		add_uc_hw_addr(net_dev, priv);
1602 
1603 		/* Finally, clear uc promisc and set mc promisc as requested. */
1604 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1605 		if (err)
1606 			netdev_warn(net_dev, "Can't clear uc promisc\n");
1607 		goto force_mc_promisc;
1608 	}
1609 
1610 	/* Neither unicast, nor multicast promisc will be on... eventually.
1611 	 * For now, rebuild mac filtering tables while forcing both of them on.
1612 	 */
1613 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1614 	if (err)
1615 		netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1616 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1617 	if (err)
1618 		netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1619 
1620 	/* Actual mac filtering tables reconstruction */
1621 	err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1622 	if (err)
1623 		netdev_warn(net_dev, "Can't clear mac filters\n");
1624 	add_mc_hw_addr(net_dev, priv);
1625 	add_uc_hw_addr(net_dev, priv);
1626 
1627 	/* Now we can clear both ucast and mcast promisc, without risking
1628 	 * to drop legitimate frames anymore.
1629 	 */
1630 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1631 	if (err)
1632 		netdev_warn(net_dev, "Can't clear ucast promisc\n");
1633 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1634 	if (err)
1635 		netdev_warn(net_dev, "Can't clear mcast promisc\n");
1636 
1637 	return;
1638 
1639 force_promisc:
1640 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1641 	if (err)
1642 		netdev_warn(net_dev, "Can't set ucast promisc\n");
1643 force_mc_promisc:
1644 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1645 	if (err)
1646 		netdev_warn(net_dev, "Can't set mcast promisc\n");
1647 }
1648 
1649 static int dpaa2_eth_set_features(struct net_device *net_dev,
1650 				  netdev_features_t features)
1651 {
1652 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1653 	netdev_features_t changed = features ^ net_dev->features;
1654 	bool enable;
1655 	int err;
1656 
1657 	if (changed & NETIF_F_RXCSUM) {
1658 		enable = !!(features & NETIF_F_RXCSUM);
1659 		err = set_rx_csum(priv, enable);
1660 		if (err)
1661 			return err;
1662 	}
1663 
1664 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1665 		enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1666 		err = set_tx_csum(priv, enable);
1667 		if (err)
1668 			return err;
1669 	}
1670 
1671 	return 0;
1672 }
1673 
1674 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1675 {
1676 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1677 	struct hwtstamp_config config;
1678 
1679 	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
1680 		return -EFAULT;
1681 
1682 	switch (config.tx_type) {
1683 	case HWTSTAMP_TX_OFF:
1684 		priv->tx_tstamp = false;
1685 		break;
1686 	case HWTSTAMP_TX_ON:
1687 		priv->tx_tstamp = true;
1688 		break;
1689 	default:
1690 		return -ERANGE;
1691 	}
1692 
1693 	if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
1694 		priv->rx_tstamp = false;
1695 	} else {
1696 		priv->rx_tstamp = true;
1697 		/* TS is set for all frame types, not only those requested */
1698 		config.rx_filter = HWTSTAMP_FILTER_ALL;
1699 	}
1700 
1701 	return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
1702 			-EFAULT : 0;
1703 }
1704 
1705 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1706 {
1707 	if (cmd == SIOCSHWTSTAMP)
1708 		return dpaa2_eth_ts_ioctl(dev, rq, cmd);
1709 
1710 	return -EINVAL;
1711 }
1712 
1713 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
1714 {
1715 	int mfl, linear_mfl;
1716 
1717 	mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1718 	linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
1719 		     dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
1720 
1721 	if (mfl > linear_mfl) {
1722 		netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
1723 			    linear_mfl - VLAN_ETH_HLEN);
1724 		return false;
1725 	}
1726 
1727 	return true;
1728 }
1729 
1730 static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
1731 {
1732 	int mfl, err;
1733 
1734 	/* We enforce a maximum Rx frame length based on MTU only if we have
1735 	 * an XDP program attached (in order to avoid Rx S/G frames).
1736 	 * Otherwise, we accept all incoming frames as long as they are not
1737 	 * larger than maximum size supported in hardware
1738 	 */
1739 	if (has_xdp)
1740 		mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1741 	else
1742 		mfl = DPAA2_ETH_MFL;
1743 
1744 	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
1745 	if (err) {
1746 		netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
1747 		return err;
1748 	}
1749 
1750 	return 0;
1751 }
1752 
1753 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
1754 {
1755 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1756 	int err;
1757 
1758 	if (!priv->xdp_prog)
1759 		goto out;
1760 
1761 	if (!xdp_mtu_valid(priv, new_mtu))
1762 		return -EINVAL;
1763 
1764 	err = set_rx_mfl(priv, new_mtu, true);
1765 	if (err)
1766 		return err;
1767 
1768 out:
1769 	dev->mtu = new_mtu;
1770 	return 0;
1771 }
1772 
1773 static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
1774 {
1775 	struct dpni_buffer_layout buf_layout = {0};
1776 	int err;
1777 
1778 	err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
1779 				     DPNI_QUEUE_RX, &buf_layout);
1780 	if (err) {
1781 		netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
1782 		return err;
1783 	}
1784 
1785 	/* Reserve extra headroom for XDP header size changes */
1786 	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
1787 				    (has_xdp ? XDP_PACKET_HEADROOM : 0);
1788 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
1789 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1790 				     DPNI_QUEUE_RX, &buf_layout);
1791 	if (err) {
1792 		netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
1793 		return err;
1794 	}
1795 
1796 	return 0;
1797 }
1798 
1799 static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
1800 {
1801 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1802 	struct dpaa2_eth_channel *ch;
1803 	struct bpf_prog *old;
1804 	bool up, need_update;
1805 	int i, err;
1806 
1807 	if (prog && !xdp_mtu_valid(priv, dev->mtu))
1808 		return -EINVAL;
1809 
1810 	if (prog)
1811 		bpf_prog_add(prog, priv->num_channels);
1812 
1813 	up = netif_running(dev);
1814 	need_update = (!!priv->xdp_prog != !!prog);
1815 
1816 	if (up)
1817 		dpaa2_eth_stop(dev);
1818 
1819 	/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
1820 	 * Also, when switching between xdp/non-xdp modes we need to reconfigure
1821 	 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
1822 	 * so we are sure no old format buffers will be used from now on.
1823 	 */
1824 	if (need_update) {
1825 		err = set_rx_mfl(priv, dev->mtu, !!prog);
1826 		if (err)
1827 			goto out_err;
1828 		err = update_rx_buffer_headroom(priv, !!prog);
1829 		if (err)
1830 			goto out_err;
1831 	}
1832 
1833 	old = xchg(&priv->xdp_prog, prog);
1834 	if (old)
1835 		bpf_prog_put(old);
1836 
1837 	for (i = 0; i < priv->num_channels; i++) {
1838 		ch = priv->channel[i];
1839 		old = xchg(&ch->xdp.prog, prog);
1840 		if (old)
1841 			bpf_prog_put(old);
1842 	}
1843 
1844 	if (up) {
1845 		err = dpaa2_eth_open(dev);
1846 		if (err)
1847 			return err;
1848 	}
1849 
1850 	return 0;
1851 
1852 out_err:
1853 	if (prog)
1854 		bpf_prog_sub(prog, priv->num_channels);
1855 	if (up)
1856 		dpaa2_eth_open(dev);
1857 
1858 	return err;
1859 }
1860 
1861 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1862 {
1863 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1864 
1865 	switch (xdp->command) {
1866 	case XDP_SETUP_PROG:
1867 		return setup_xdp(dev, xdp->prog);
1868 	case XDP_QUERY_PROG:
1869 		xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
1870 		break;
1871 	default:
1872 		return -EINVAL;
1873 	}
1874 
1875 	return 0;
1876 }
1877 
1878 static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
1879 				    struct xdp_frame *xdpf)
1880 {
1881 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1882 	struct device *dev = net_dev->dev.parent;
1883 	struct rtnl_link_stats64 *percpu_stats;
1884 	struct dpaa2_eth_drv_stats *percpu_extras;
1885 	unsigned int needed_headroom;
1886 	struct dpaa2_eth_swa *swa;
1887 	struct dpaa2_eth_fq *fq;
1888 	struct dpaa2_fd fd;
1889 	void *buffer_start, *aligned_start;
1890 	dma_addr_t addr;
1891 	int err, i;
1892 
1893 	/* We require a minimum headroom to be able to transmit the frame.
1894 	 * Otherwise return an error and let the original net_device handle it
1895 	 */
1896 	needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
1897 	if (xdpf->headroom < needed_headroom)
1898 		return -EINVAL;
1899 
1900 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
1901 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
1902 
1903 	/* Setup the FD fields */
1904 	memset(&fd, 0, sizeof(fd));
1905 
1906 	/* Align FD address, if possible */
1907 	buffer_start = xdpf->data - needed_headroom;
1908 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
1909 				  DPAA2_ETH_TX_BUF_ALIGN);
1910 	if (aligned_start >= xdpf->data - xdpf->headroom)
1911 		buffer_start = aligned_start;
1912 
1913 	swa = (struct dpaa2_eth_swa *)buffer_start;
1914 	/* fill in necessary fields here */
1915 	swa->type = DPAA2_ETH_SWA_XDP;
1916 	swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
1917 	swa->xdp.xdpf = xdpf;
1918 
1919 	addr = dma_map_single(dev, buffer_start,
1920 			      swa->xdp.dma_size,
1921 			      DMA_BIDIRECTIONAL);
1922 	if (unlikely(dma_mapping_error(dev, addr))) {
1923 		percpu_stats->tx_dropped++;
1924 		return -ENOMEM;
1925 	}
1926 
1927 	dpaa2_fd_set_addr(&fd, addr);
1928 	dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
1929 	dpaa2_fd_set_len(&fd, xdpf->len);
1930 	dpaa2_fd_set_format(&fd, dpaa2_fd_single);
1931 	dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
1932 
1933 	fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
1934 	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1935 		err = priv->enqueue(priv, fq, &fd, 0);
1936 		if (err != -EBUSY)
1937 			break;
1938 	}
1939 	percpu_extras->tx_portal_busy += i;
1940 	if (unlikely(err < 0)) {
1941 		percpu_stats->tx_errors++;
1942 		/* let the Rx device handle the cleanup */
1943 		return err;
1944 	}
1945 
1946 	percpu_stats->tx_packets++;
1947 	percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
1948 
1949 	return 0;
1950 }
1951 
1952 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
1953 			      struct xdp_frame **frames, u32 flags)
1954 {
1955 	int drops = 0;
1956 	int i, err;
1957 
1958 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1959 		return -EINVAL;
1960 
1961 	if (!netif_running(net_dev))
1962 		return -ENETDOWN;
1963 
1964 	for (i = 0; i < n; i++) {
1965 		struct xdp_frame *xdpf = frames[i];
1966 
1967 		err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf);
1968 		if (err) {
1969 			xdp_return_frame_rx_napi(xdpf);
1970 			drops++;
1971 		}
1972 	}
1973 
1974 	return n - drops;
1975 }
1976 
1977 static int update_xps(struct dpaa2_eth_priv *priv)
1978 {
1979 	struct net_device *net_dev = priv->net_dev;
1980 	struct cpumask xps_mask;
1981 	struct dpaa2_eth_fq *fq;
1982 	int i, num_queues, netdev_queues;
1983 	int err = 0;
1984 
1985 	num_queues = dpaa2_eth_queue_count(priv);
1986 	netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
1987 
1988 	/* The first <num_queues> entries in priv->fq array are Tx/Tx conf
1989 	 * queues, so only process those
1990 	 */
1991 	for (i = 0; i < netdev_queues; i++) {
1992 		fq = &priv->fq[i % num_queues];
1993 
1994 		cpumask_clear(&xps_mask);
1995 		cpumask_set_cpu(fq->target_cpu, &xps_mask);
1996 
1997 		err = netif_set_xps_queue(net_dev, &xps_mask, i);
1998 		if (err) {
1999 			netdev_warn_once(net_dev, "Error setting XPS queue\n");
2000 			break;
2001 		}
2002 	}
2003 
2004 	return err;
2005 }
2006 
2007 static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2008 			      enum tc_setup_type type, void *type_data)
2009 {
2010 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2011 	struct tc_mqprio_qopt *mqprio = type_data;
2012 	u8 num_tc, num_queues;
2013 	int i;
2014 
2015 	if (type != TC_SETUP_QDISC_MQPRIO)
2016 		return -EINVAL;
2017 
2018 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2019 	num_queues = dpaa2_eth_queue_count(priv);
2020 	num_tc = mqprio->num_tc;
2021 
2022 	if (num_tc == net_dev->num_tc)
2023 		return 0;
2024 
2025 	if (num_tc  > dpaa2_eth_tc_count(priv)) {
2026 		netdev_err(net_dev, "Max %d traffic classes supported\n",
2027 			   dpaa2_eth_tc_count(priv));
2028 		return -EINVAL;
2029 	}
2030 
2031 	if (!num_tc) {
2032 		netdev_reset_tc(net_dev);
2033 		netif_set_real_num_tx_queues(net_dev, num_queues);
2034 		goto out;
2035 	}
2036 
2037 	netdev_set_num_tc(net_dev, num_tc);
2038 	netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2039 
2040 	for (i = 0; i < num_tc; i++)
2041 		netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2042 
2043 out:
2044 	update_xps(priv);
2045 
2046 	return 0;
2047 }
2048 
2049 static const struct net_device_ops dpaa2_eth_ops = {
2050 	.ndo_open = dpaa2_eth_open,
2051 	.ndo_start_xmit = dpaa2_eth_tx,
2052 	.ndo_stop = dpaa2_eth_stop,
2053 	.ndo_set_mac_address = dpaa2_eth_set_addr,
2054 	.ndo_get_stats64 = dpaa2_eth_get_stats,
2055 	.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2056 	.ndo_set_features = dpaa2_eth_set_features,
2057 	.ndo_do_ioctl = dpaa2_eth_ioctl,
2058 	.ndo_change_mtu = dpaa2_eth_change_mtu,
2059 	.ndo_bpf = dpaa2_eth_xdp,
2060 	.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
2061 	.ndo_setup_tc = dpaa2_eth_setup_tc,
2062 };
2063 
2064 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2065 {
2066 	struct dpaa2_eth_channel *ch;
2067 
2068 	ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2069 
2070 	/* Update NAPI statistics */
2071 	ch->stats.cdan++;
2072 
2073 	napi_schedule_irqoff(&ch->napi);
2074 }
2075 
2076 /* Allocate and configure a DPCON object */
2077 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
2078 {
2079 	struct fsl_mc_device *dpcon;
2080 	struct device *dev = priv->net_dev->dev.parent;
2081 	int err;
2082 
2083 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2084 				     FSL_MC_POOL_DPCON, &dpcon);
2085 	if (err) {
2086 		if (err == -ENXIO)
2087 			err = -EPROBE_DEFER;
2088 		else
2089 			dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2090 		return ERR_PTR(err);
2091 	}
2092 
2093 	err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2094 	if (err) {
2095 		dev_err(dev, "dpcon_open() failed\n");
2096 		goto free;
2097 	}
2098 
2099 	err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2100 	if (err) {
2101 		dev_err(dev, "dpcon_reset() failed\n");
2102 		goto close;
2103 	}
2104 
2105 	err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2106 	if (err) {
2107 		dev_err(dev, "dpcon_enable() failed\n");
2108 		goto close;
2109 	}
2110 
2111 	return dpcon;
2112 
2113 close:
2114 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2115 free:
2116 	fsl_mc_object_free(dpcon);
2117 
2118 	return NULL;
2119 }
2120 
2121 static void free_dpcon(struct dpaa2_eth_priv *priv,
2122 		       struct fsl_mc_device *dpcon)
2123 {
2124 	dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2125 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2126 	fsl_mc_object_free(dpcon);
2127 }
2128 
2129 static struct dpaa2_eth_channel *
2130 alloc_channel(struct dpaa2_eth_priv *priv)
2131 {
2132 	struct dpaa2_eth_channel *channel;
2133 	struct dpcon_attr attr;
2134 	struct device *dev = priv->net_dev->dev.parent;
2135 	int err;
2136 
2137 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2138 	if (!channel)
2139 		return NULL;
2140 
2141 	channel->dpcon = setup_dpcon(priv);
2142 	if (IS_ERR_OR_NULL(channel->dpcon)) {
2143 		err = PTR_ERR_OR_ZERO(channel->dpcon);
2144 		goto err_setup;
2145 	}
2146 
2147 	err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2148 				   &attr);
2149 	if (err) {
2150 		dev_err(dev, "dpcon_get_attributes() failed\n");
2151 		goto err_get_attr;
2152 	}
2153 
2154 	channel->dpcon_id = attr.id;
2155 	channel->ch_id = attr.qbman_ch_id;
2156 	channel->priv = priv;
2157 
2158 	return channel;
2159 
2160 err_get_attr:
2161 	free_dpcon(priv, channel->dpcon);
2162 err_setup:
2163 	kfree(channel);
2164 	return ERR_PTR(err);
2165 }
2166 
2167 static void free_channel(struct dpaa2_eth_priv *priv,
2168 			 struct dpaa2_eth_channel *channel)
2169 {
2170 	free_dpcon(priv, channel->dpcon);
2171 	kfree(channel);
2172 }
2173 
2174 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
2175  * and register data availability notifications
2176  */
2177 static int setup_dpio(struct dpaa2_eth_priv *priv)
2178 {
2179 	struct dpaa2_io_notification_ctx *nctx;
2180 	struct dpaa2_eth_channel *channel;
2181 	struct dpcon_notification_cfg dpcon_notif_cfg;
2182 	struct device *dev = priv->net_dev->dev.parent;
2183 	int i, err;
2184 
2185 	/* We want the ability to spread ingress traffic (RX, TX conf) to as
2186 	 * many cores as possible, so we need one channel for each core
2187 	 * (unless there's fewer queues than cores, in which case the extra
2188 	 * channels would be wasted).
2189 	 * Allocate one channel per core and register it to the core's
2190 	 * affine DPIO. If not enough channels are available for all cores
2191 	 * or if some cores don't have an affine DPIO, there will be no
2192 	 * ingress frame processing on those cores.
2193 	 */
2194 	cpumask_clear(&priv->dpio_cpumask);
2195 	for_each_online_cpu(i) {
2196 		/* Try to allocate a channel */
2197 		channel = alloc_channel(priv);
2198 		if (IS_ERR_OR_NULL(channel)) {
2199 			err = PTR_ERR_OR_ZERO(channel);
2200 			if (err != -EPROBE_DEFER)
2201 				dev_info(dev,
2202 					 "No affine channel for cpu %d and above\n", i);
2203 			goto err_alloc_ch;
2204 		}
2205 
2206 		priv->channel[priv->num_channels] = channel;
2207 
2208 		nctx = &channel->nctx;
2209 		nctx->is_cdan = 1;
2210 		nctx->cb = cdan_cb;
2211 		nctx->id = channel->ch_id;
2212 		nctx->desired_cpu = i;
2213 
2214 		/* Register the new context */
2215 		channel->dpio = dpaa2_io_service_select(i);
2216 		err = dpaa2_io_service_register(channel->dpio, nctx, dev);
2217 		if (err) {
2218 			dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2219 			/* If no affine DPIO for this core, there's probably
2220 			 * none available for next cores either. Signal we want
2221 			 * to retry later, in case the DPIO devices weren't
2222 			 * probed yet.
2223 			 */
2224 			err = -EPROBE_DEFER;
2225 			goto err_service_reg;
2226 		}
2227 
2228 		/* Register DPCON notification with MC */
2229 		dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2230 		dpcon_notif_cfg.priority = 0;
2231 		dpcon_notif_cfg.user_ctx = nctx->qman64;
2232 		err = dpcon_set_notification(priv->mc_io, 0,
2233 					     channel->dpcon->mc_handle,
2234 					     &dpcon_notif_cfg);
2235 		if (err) {
2236 			dev_err(dev, "dpcon_set_notification failed()\n");
2237 			goto err_set_cdan;
2238 		}
2239 
2240 		/* If we managed to allocate a channel and also found an affine
2241 		 * DPIO for this core, add it to the final mask
2242 		 */
2243 		cpumask_set_cpu(i, &priv->dpio_cpumask);
2244 		priv->num_channels++;
2245 
2246 		/* Stop if we already have enough channels to accommodate all
2247 		 * RX and TX conf queues
2248 		 */
2249 		if (priv->num_channels == priv->dpni_attrs.num_queues)
2250 			break;
2251 	}
2252 
2253 	return 0;
2254 
2255 err_set_cdan:
2256 	dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2257 err_service_reg:
2258 	free_channel(priv, channel);
2259 err_alloc_ch:
2260 	if (err == -EPROBE_DEFER) {
2261 		for (i = 0; i < priv->num_channels; i++) {
2262 			channel = priv->channel[i];
2263 			nctx = &channel->nctx;
2264 			dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2265 			free_channel(priv, channel);
2266 		}
2267 		priv->num_channels = 0;
2268 		return err;
2269 	}
2270 
2271 	if (cpumask_empty(&priv->dpio_cpumask)) {
2272 		dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
2273 		return -ENODEV;
2274 	}
2275 
2276 	dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2277 		 cpumask_pr_args(&priv->dpio_cpumask));
2278 
2279 	return 0;
2280 }
2281 
2282 static void free_dpio(struct dpaa2_eth_priv *priv)
2283 {
2284 	struct device *dev = priv->net_dev->dev.parent;
2285 	struct dpaa2_eth_channel *ch;
2286 	int i;
2287 
2288 	/* deregister CDAN notifications and free channels */
2289 	for (i = 0; i < priv->num_channels; i++) {
2290 		ch = priv->channel[i];
2291 		dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
2292 		free_channel(priv, ch);
2293 	}
2294 }
2295 
2296 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
2297 						    int cpu)
2298 {
2299 	struct device *dev = priv->net_dev->dev.parent;
2300 	int i;
2301 
2302 	for (i = 0; i < priv->num_channels; i++)
2303 		if (priv->channel[i]->nctx.desired_cpu == cpu)
2304 			return priv->channel[i];
2305 
2306 	/* We should never get here. Issue a warning and return
2307 	 * the first channel, because it's still better than nothing
2308 	 */
2309 	dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2310 
2311 	return priv->channel[0];
2312 }
2313 
2314 static void set_fq_affinity(struct dpaa2_eth_priv *priv)
2315 {
2316 	struct device *dev = priv->net_dev->dev.parent;
2317 	struct dpaa2_eth_fq *fq;
2318 	int rx_cpu, txc_cpu;
2319 	int i;
2320 
2321 	/* For each FQ, pick one channel/CPU to deliver frames to.
2322 	 * This may well change at runtime, either through irqbalance or
2323 	 * through direct user intervention.
2324 	 */
2325 	rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
2326 
2327 	for (i = 0; i < priv->num_fqs; i++) {
2328 		fq = &priv->fq[i];
2329 		switch (fq->type) {
2330 		case DPAA2_RX_FQ:
2331 			fq->target_cpu = rx_cpu;
2332 			rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
2333 			if (rx_cpu >= nr_cpu_ids)
2334 				rx_cpu = cpumask_first(&priv->dpio_cpumask);
2335 			break;
2336 		case DPAA2_TX_CONF_FQ:
2337 			fq->target_cpu = txc_cpu;
2338 			txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
2339 			if (txc_cpu >= nr_cpu_ids)
2340 				txc_cpu = cpumask_first(&priv->dpio_cpumask);
2341 			break;
2342 		default:
2343 			dev_err(dev, "Unknown FQ type: %d\n", fq->type);
2344 		}
2345 		fq->channel = get_affine_channel(priv, fq->target_cpu);
2346 	}
2347 
2348 	update_xps(priv);
2349 }
2350 
2351 static void setup_fqs(struct dpaa2_eth_priv *priv)
2352 {
2353 	int i;
2354 
2355 	/* We have one TxConf FQ per Tx flow.
2356 	 * The number of Tx and Rx queues is the same.
2357 	 * Tx queues come first in the fq array.
2358 	 */
2359 	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2360 		priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
2361 		priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
2362 		priv->fq[priv->num_fqs++].flowid = (u16)i;
2363 	}
2364 
2365 	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2366 		priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2367 		priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2368 		priv->fq[priv->num_fqs++].flowid = (u16)i;
2369 	}
2370 
2371 	/* For each FQ, decide on which core to process incoming frames */
2372 	set_fq_affinity(priv);
2373 }
2374 
2375 /* Allocate and configure one buffer pool for each interface */
2376 static int setup_dpbp(struct dpaa2_eth_priv *priv)
2377 {
2378 	int err;
2379 	struct fsl_mc_device *dpbp_dev;
2380 	struct device *dev = priv->net_dev->dev.parent;
2381 	struct dpbp_attr dpbp_attrs;
2382 
2383 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2384 				     &dpbp_dev);
2385 	if (err) {
2386 		if (err == -ENXIO)
2387 			err = -EPROBE_DEFER;
2388 		else
2389 			dev_err(dev, "DPBP device allocation failed\n");
2390 		return err;
2391 	}
2392 
2393 	priv->dpbp_dev = dpbp_dev;
2394 
2395 	err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2396 			&dpbp_dev->mc_handle);
2397 	if (err) {
2398 		dev_err(dev, "dpbp_open() failed\n");
2399 		goto err_open;
2400 	}
2401 
2402 	err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2403 	if (err) {
2404 		dev_err(dev, "dpbp_reset() failed\n");
2405 		goto err_reset;
2406 	}
2407 
2408 	err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2409 	if (err) {
2410 		dev_err(dev, "dpbp_enable() failed\n");
2411 		goto err_enable;
2412 	}
2413 
2414 	err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
2415 				  &dpbp_attrs);
2416 	if (err) {
2417 		dev_err(dev, "dpbp_get_attributes() failed\n");
2418 		goto err_get_attr;
2419 	}
2420 	priv->bpid = dpbp_attrs.bpid;
2421 
2422 	return 0;
2423 
2424 err_get_attr:
2425 	dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2426 err_enable:
2427 err_reset:
2428 	dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2429 err_open:
2430 	fsl_mc_object_free(dpbp_dev);
2431 
2432 	return err;
2433 }
2434 
2435 static void free_dpbp(struct dpaa2_eth_priv *priv)
2436 {
2437 	drain_pool(priv);
2438 	dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2439 	dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2440 	fsl_mc_object_free(priv->dpbp_dev);
2441 }
2442 
2443 static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2444 {
2445 	struct device *dev = priv->net_dev->dev.parent;
2446 	struct dpni_buffer_layout buf_layout = {0};
2447 	u16 rx_buf_align;
2448 	int err;
2449 
2450 	/* We need to check for WRIOP version 1.0.0, but depending on the MC
2451 	 * version, this number is not always provided correctly on rev1.
2452 	 * We need to check for both alternatives in this situation.
2453 	 */
2454 	if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
2455 	    priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
2456 		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
2457 	else
2458 		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
2459 
2460 	/* tx buffer */
2461 	buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
2462 	buf_layout.pass_timestamp = true;
2463 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
2464 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2465 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2466 				     DPNI_QUEUE_TX, &buf_layout);
2467 	if (err) {
2468 		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
2469 		return err;
2470 	}
2471 
2472 	/* tx-confirm buffer */
2473 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2474 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2475 				     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
2476 	if (err) {
2477 		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
2478 		return err;
2479 	}
2480 
2481 	/* Now that we've set our tx buffer layout, retrieve the minimum
2482 	 * required tx data offset.
2483 	 */
2484 	err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
2485 				      &priv->tx_data_offset);
2486 	if (err) {
2487 		dev_err(dev, "dpni_get_tx_data_offset() failed\n");
2488 		return err;
2489 	}
2490 
2491 	if ((priv->tx_data_offset % 64) != 0)
2492 		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
2493 			 priv->tx_data_offset);
2494 
2495 	/* rx buffer */
2496 	buf_layout.pass_frame_status = true;
2497 	buf_layout.pass_parser_result = true;
2498 	buf_layout.data_align = rx_buf_align;
2499 	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
2500 	buf_layout.private_data_size = 0;
2501 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
2502 			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2503 			     DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
2504 			     DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
2505 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2506 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2507 				     DPNI_QUEUE_RX, &buf_layout);
2508 	if (err) {
2509 		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
2510 		return err;
2511 	}
2512 
2513 	return 0;
2514 }
2515 
2516 #define DPNI_ENQUEUE_FQID_VER_MAJOR	7
2517 #define DPNI_ENQUEUE_FQID_VER_MINOR	9
2518 
2519 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
2520 				       struct dpaa2_eth_fq *fq,
2521 				       struct dpaa2_fd *fd, u8 prio)
2522 {
2523 	return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
2524 					   priv->tx_qdid, prio,
2525 					   fq->tx_qdbin, fd);
2526 }
2527 
2528 static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
2529 				       struct dpaa2_eth_fq *fq,
2530 				       struct dpaa2_fd *fd, u8 prio)
2531 {
2532 	return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
2533 					   fq->tx_fqid[prio], fd);
2534 }
2535 
2536 static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
2537 {
2538 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
2539 				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
2540 		priv->enqueue = dpaa2_eth_enqueue_qd;
2541 	else
2542 		priv->enqueue = dpaa2_eth_enqueue_fq;
2543 }
2544 
2545 static int set_pause(struct dpaa2_eth_priv *priv)
2546 {
2547 	struct device *dev = priv->net_dev->dev.parent;
2548 	struct dpni_link_cfg link_cfg = {0};
2549 	int err;
2550 
2551 	/* Get the default link options so we don't override other flags */
2552 	err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
2553 	if (err) {
2554 		dev_err(dev, "dpni_get_link_cfg() failed\n");
2555 		return err;
2556 	}
2557 
2558 	/* By default, enable both Rx and Tx pause frames */
2559 	link_cfg.options |= DPNI_LINK_OPT_PAUSE;
2560 	link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2561 	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
2562 	if (err) {
2563 		dev_err(dev, "dpni_set_link_cfg() failed\n");
2564 		return err;
2565 	}
2566 
2567 	priv->link_state.options = link_cfg.options;
2568 
2569 	return 0;
2570 }
2571 
2572 static void update_tx_fqids(struct dpaa2_eth_priv *priv)
2573 {
2574 	struct dpni_queue_id qid = {0};
2575 	struct dpaa2_eth_fq *fq;
2576 	struct dpni_queue queue;
2577 	int i, j, err;
2578 
2579 	/* We only use Tx FQIDs for FQID-based enqueue, so check
2580 	 * if DPNI version supports it before updating FQIDs
2581 	 */
2582 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
2583 				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
2584 		return;
2585 
2586 	for (i = 0; i < priv->num_fqs; i++) {
2587 		fq = &priv->fq[i];
2588 		if (fq->type != DPAA2_TX_CONF_FQ)
2589 			continue;
2590 		for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
2591 			err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2592 					     DPNI_QUEUE_TX, j, fq->flowid,
2593 					     &queue, &qid);
2594 			if (err)
2595 				goto out_err;
2596 
2597 			fq->tx_fqid[j] = qid.fqid;
2598 			if (fq->tx_fqid[j] == 0)
2599 				goto out_err;
2600 		}
2601 	}
2602 
2603 	priv->enqueue = dpaa2_eth_enqueue_fq;
2604 
2605 	return;
2606 
2607 out_err:
2608 	netdev_info(priv->net_dev,
2609 		    "Error reading Tx FQID, fallback to QDID-based enqueue\n");
2610 	priv->enqueue = dpaa2_eth_enqueue_qd;
2611 }
2612 
2613 /* Configure the DPNI object this interface is associated with */
2614 static int setup_dpni(struct fsl_mc_device *ls_dev)
2615 {
2616 	struct device *dev = &ls_dev->dev;
2617 	struct dpaa2_eth_priv *priv;
2618 	struct net_device *net_dev;
2619 	int err;
2620 
2621 	net_dev = dev_get_drvdata(dev);
2622 	priv = netdev_priv(net_dev);
2623 
2624 	/* get a handle for the DPNI object */
2625 	err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
2626 	if (err) {
2627 		dev_err(dev, "dpni_open() failed\n");
2628 		return err;
2629 	}
2630 
2631 	/* Check if we can work with this DPNI object */
2632 	err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
2633 				   &priv->dpni_ver_minor);
2634 	if (err) {
2635 		dev_err(dev, "dpni_get_api_version() failed\n");
2636 		goto close;
2637 	}
2638 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
2639 		dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
2640 			priv->dpni_ver_major, priv->dpni_ver_minor,
2641 			DPNI_VER_MAJOR, DPNI_VER_MINOR);
2642 		err = -ENOTSUPP;
2643 		goto close;
2644 	}
2645 
2646 	ls_dev->mc_io = priv->mc_io;
2647 	ls_dev->mc_handle = priv->mc_token;
2648 
2649 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2650 	if (err) {
2651 		dev_err(dev, "dpni_reset() failed\n");
2652 		goto close;
2653 	}
2654 
2655 	err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
2656 				  &priv->dpni_attrs);
2657 	if (err) {
2658 		dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
2659 		goto close;
2660 	}
2661 
2662 	err = set_buffer_layout(priv);
2663 	if (err)
2664 		goto close;
2665 
2666 	set_enqueue_mode(priv);
2667 
2668 	/* Enable pause frame support */
2669 	if (dpaa2_eth_has_pause_support(priv)) {
2670 		err = set_pause(priv);
2671 		if (err)
2672 			goto close;
2673 	}
2674 
2675 	priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
2676 				       dpaa2_eth_fs_count(priv), GFP_KERNEL);
2677 	if (!priv->cls_rules)
2678 		goto close;
2679 
2680 	return 0;
2681 
2682 close:
2683 	dpni_close(priv->mc_io, 0, priv->mc_token);
2684 
2685 	return err;
2686 }
2687 
2688 static void free_dpni(struct dpaa2_eth_priv *priv)
2689 {
2690 	int err;
2691 
2692 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2693 	if (err)
2694 		netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
2695 			    err);
2696 
2697 	dpni_close(priv->mc_io, 0, priv->mc_token);
2698 }
2699 
2700 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
2701 			 struct dpaa2_eth_fq *fq)
2702 {
2703 	struct device *dev = priv->net_dev->dev.parent;
2704 	struct dpni_queue queue;
2705 	struct dpni_queue_id qid;
2706 	int err;
2707 
2708 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2709 			     DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
2710 	if (err) {
2711 		dev_err(dev, "dpni_get_queue(RX) failed\n");
2712 		return err;
2713 	}
2714 
2715 	fq->fqid = qid.fqid;
2716 
2717 	queue.destination.id = fq->channel->dpcon_id;
2718 	queue.destination.type = DPNI_DEST_DPCON;
2719 	queue.destination.priority = 1;
2720 	queue.user_context = (u64)(uintptr_t)fq;
2721 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2722 			     DPNI_QUEUE_RX, 0, fq->flowid,
2723 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2724 			     &queue);
2725 	if (err) {
2726 		dev_err(dev, "dpni_set_queue(RX) failed\n");
2727 		return err;
2728 	}
2729 
2730 	/* xdp_rxq setup */
2731 	err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
2732 			       fq->flowid);
2733 	if (err) {
2734 		dev_err(dev, "xdp_rxq_info_reg failed\n");
2735 		return err;
2736 	}
2737 
2738 	err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
2739 					 MEM_TYPE_PAGE_ORDER0, NULL);
2740 	if (err) {
2741 		dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
2742 		return err;
2743 	}
2744 
2745 	return 0;
2746 }
2747 
2748 static int setup_tx_flow(struct dpaa2_eth_priv *priv,
2749 			 struct dpaa2_eth_fq *fq)
2750 {
2751 	struct device *dev = priv->net_dev->dev.parent;
2752 	struct dpni_queue queue;
2753 	struct dpni_queue_id qid;
2754 	int i, err;
2755 
2756 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
2757 		err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2758 				     DPNI_QUEUE_TX, i, fq->flowid,
2759 				     &queue, &qid);
2760 		if (err) {
2761 			dev_err(dev, "dpni_get_queue(TX) failed\n");
2762 			return err;
2763 		}
2764 		fq->tx_fqid[i] = qid.fqid;
2765 	}
2766 
2767 	/* All Tx queues belonging to the same flowid have the same qdbin */
2768 	fq->tx_qdbin = qid.qdbin;
2769 
2770 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2771 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2772 			     &queue, &qid);
2773 	if (err) {
2774 		dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
2775 		return err;
2776 	}
2777 
2778 	fq->fqid = qid.fqid;
2779 
2780 	queue.destination.id = fq->channel->dpcon_id;
2781 	queue.destination.type = DPNI_DEST_DPCON;
2782 	queue.destination.priority = 0;
2783 	queue.user_context = (u64)(uintptr_t)fq;
2784 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2785 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2786 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2787 			     &queue);
2788 	if (err) {
2789 		dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
2790 		return err;
2791 	}
2792 
2793 	return 0;
2794 }
2795 
2796 /* Supported header fields for Rx hash distribution key */
2797 static const struct dpaa2_eth_dist_fields dist_fields[] = {
2798 	{
2799 		/* L2 header */
2800 		.rxnfc_field = RXH_L2DA,
2801 		.cls_prot = NET_PROT_ETH,
2802 		.cls_field = NH_FLD_ETH_DA,
2803 		.id = DPAA2_ETH_DIST_ETHDST,
2804 		.size = 6,
2805 	}, {
2806 		.cls_prot = NET_PROT_ETH,
2807 		.cls_field = NH_FLD_ETH_SA,
2808 		.id = DPAA2_ETH_DIST_ETHSRC,
2809 		.size = 6,
2810 	}, {
2811 		/* This is the last ethertype field parsed:
2812 		 * depending on frame format, it can be the MAC ethertype
2813 		 * or the VLAN etype.
2814 		 */
2815 		.cls_prot = NET_PROT_ETH,
2816 		.cls_field = NH_FLD_ETH_TYPE,
2817 		.id = DPAA2_ETH_DIST_ETHTYPE,
2818 		.size = 2,
2819 	}, {
2820 		/* VLAN header */
2821 		.rxnfc_field = RXH_VLAN,
2822 		.cls_prot = NET_PROT_VLAN,
2823 		.cls_field = NH_FLD_VLAN_TCI,
2824 		.id = DPAA2_ETH_DIST_VLAN,
2825 		.size = 2,
2826 	}, {
2827 		/* IP header */
2828 		.rxnfc_field = RXH_IP_SRC,
2829 		.cls_prot = NET_PROT_IP,
2830 		.cls_field = NH_FLD_IP_SRC,
2831 		.id = DPAA2_ETH_DIST_IPSRC,
2832 		.size = 4,
2833 	}, {
2834 		.rxnfc_field = RXH_IP_DST,
2835 		.cls_prot = NET_PROT_IP,
2836 		.cls_field = NH_FLD_IP_DST,
2837 		.id = DPAA2_ETH_DIST_IPDST,
2838 		.size = 4,
2839 	}, {
2840 		.rxnfc_field = RXH_L3_PROTO,
2841 		.cls_prot = NET_PROT_IP,
2842 		.cls_field = NH_FLD_IP_PROTO,
2843 		.id = DPAA2_ETH_DIST_IPPROTO,
2844 		.size = 1,
2845 	}, {
2846 		/* Using UDP ports, this is functionally equivalent to raw
2847 		 * byte pairs from L4 header.
2848 		 */
2849 		.rxnfc_field = RXH_L4_B_0_1,
2850 		.cls_prot = NET_PROT_UDP,
2851 		.cls_field = NH_FLD_UDP_PORT_SRC,
2852 		.id = DPAA2_ETH_DIST_L4SRC,
2853 		.size = 2,
2854 	}, {
2855 		.rxnfc_field = RXH_L4_B_2_3,
2856 		.cls_prot = NET_PROT_UDP,
2857 		.cls_field = NH_FLD_UDP_PORT_DST,
2858 		.id = DPAA2_ETH_DIST_L4DST,
2859 		.size = 2,
2860 	},
2861 };
2862 
2863 /* Configure the Rx hash key using the legacy API */
2864 static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2865 {
2866 	struct device *dev = priv->net_dev->dev.parent;
2867 	struct dpni_rx_tc_dist_cfg dist_cfg;
2868 	int err;
2869 
2870 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2871 
2872 	dist_cfg.key_cfg_iova = key;
2873 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2874 	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2875 
2876 	err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2877 	if (err)
2878 		dev_err(dev, "dpni_set_rx_tc_dist failed\n");
2879 
2880 	return err;
2881 }
2882 
2883 /* Configure the Rx hash key using the new API */
2884 static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2885 {
2886 	struct device *dev = priv->net_dev->dev.parent;
2887 	struct dpni_rx_dist_cfg dist_cfg;
2888 	int err;
2889 
2890 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2891 
2892 	dist_cfg.key_cfg_iova = key;
2893 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2894 	dist_cfg.enable = 1;
2895 
2896 	err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2897 	if (err)
2898 		dev_err(dev, "dpni_set_rx_hash_dist failed\n");
2899 
2900 	return err;
2901 }
2902 
2903 /* Configure the Rx flow classification key */
2904 static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2905 {
2906 	struct device *dev = priv->net_dev->dev.parent;
2907 	struct dpni_rx_dist_cfg dist_cfg;
2908 	int err;
2909 
2910 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2911 
2912 	dist_cfg.key_cfg_iova = key;
2913 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2914 	dist_cfg.enable = 1;
2915 
2916 	err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2917 	if (err)
2918 		dev_err(dev, "dpni_set_rx_fs_dist failed\n");
2919 
2920 	return err;
2921 }
2922 
2923 /* Size of the Rx flow classification key */
2924 int dpaa2_eth_cls_key_size(u64 fields)
2925 {
2926 	int i, size = 0;
2927 
2928 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2929 		if (!(fields & dist_fields[i].id))
2930 			continue;
2931 		size += dist_fields[i].size;
2932 	}
2933 
2934 	return size;
2935 }
2936 
2937 /* Offset of header field in Rx classification key */
2938 int dpaa2_eth_cls_fld_off(int prot, int field)
2939 {
2940 	int i, off = 0;
2941 
2942 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2943 		if (dist_fields[i].cls_prot == prot &&
2944 		    dist_fields[i].cls_field == field)
2945 			return off;
2946 		off += dist_fields[i].size;
2947 	}
2948 
2949 	WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
2950 	return 0;
2951 }
2952 
2953 /* Prune unused fields from the classification rule.
2954  * Used when masking is not supported
2955  */
2956 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
2957 {
2958 	int off = 0, new_off = 0;
2959 	int i, size;
2960 
2961 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2962 		size = dist_fields[i].size;
2963 		if (dist_fields[i].id & fields) {
2964 			memcpy(key_mem + new_off, key_mem + off, size);
2965 			new_off += size;
2966 		}
2967 		off += size;
2968 	}
2969 }
2970 
2971 /* Set Rx distribution (hash or flow classification) key
2972  * flags is a combination of RXH_ bits
2973  */
2974 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
2975 				  enum dpaa2_eth_rx_dist type, u64 flags)
2976 {
2977 	struct device *dev = net_dev->dev.parent;
2978 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2979 	struct dpkg_profile_cfg cls_cfg;
2980 	u32 rx_hash_fields = 0;
2981 	dma_addr_t key_iova;
2982 	u8 *dma_mem;
2983 	int i;
2984 	int err = 0;
2985 
2986 	memset(&cls_cfg, 0, sizeof(cls_cfg));
2987 
2988 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2989 		struct dpkg_extract *key =
2990 			&cls_cfg.extracts[cls_cfg.num_extracts];
2991 
2992 		/* For both Rx hashing and classification keys
2993 		 * we set only the selected fields.
2994 		 */
2995 		if (!(flags & dist_fields[i].id))
2996 			continue;
2997 		if (type == DPAA2_ETH_RX_DIST_HASH)
2998 			rx_hash_fields |= dist_fields[i].rxnfc_field;
2999 
3000 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
3001 			dev_err(dev, "error adding key extraction rule, too many rules?\n");
3002 			return -E2BIG;
3003 		}
3004 
3005 		key->type = DPKG_EXTRACT_FROM_HDR;
3006 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3007 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
3008 		key->extract.from_hdr.field = dist_fields[i].cls_field;
3009 		cls_cfg.num_extracts++;
3010 	}
3011 
3012 	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3013 	if (!dma_mem)
3014 		return -ENOMEM;
3015 
3016 	err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
3017 	if (err) {
3018 		dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
3019 		goto free_key;
3020 	}
3021 
3022 	/* Prepare for setting the rx dist */
3023 	key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
3024 				  DMA_TO_DEVICE);
3025 	if (dma_mapping_error(dev, key_iova)) {
3026 		dev_err(dev, "DMA mapping failed\n");
3027 		err = -ENOMEM;
3028 		goto free_key;
3029 	}
3030 
3031 	if (type == DPAA2_ETH_RX_DIST_HASH) {
3032 		if (dpaa2_eth_has_legacy_dist(priv))
3033 			err = config_legacy_hash_key(priv, key_iova);
3034 		else
3035 			err = config_hash_key(priv, key_iova);
3036 	} else {
3037 		err = config_cls_key(priv, key_iova);
3038 	}
3039 
3040 	dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3041 			 DMA_TO_DEVICE);
3042 	if (!err && type == DPAA2_ETH_RX_DIST_HASH)
3043 		priv->rx_hash_fields = rx_hash_fields;
3044 
3045 free_key:
3046 	kfree(dma_mem);
3047 	return err;
3048 }
3049 
3050 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
3051 {
3052 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3053 	u64 key = 0;
3054 	int i;
3055 
3056 	if (!dpaa2_eth_hash_enabled(priv))
3057 		return -EOPNOTSUPP;
3058 
3059 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
3060 		if (dist_fields[i].rxnfc_field & flags)
3061 			key |= dist_fields[i].id;
3062 
3063 	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
3064 }
3065 
3066 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
3067 {
3068 	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
3069 }
3070 
3071 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
3072 {
3073 	struct device *dev = priv->net_dev->dev.parent;
3074 	int err;
3075 
3076 	/* Check if we actually support Rx flow classification */
3077 	if (dpaa2_eth_has_legacy_dist(priv)) {
3078 		dev_dbg(dev, "Rx cls not supported by current MC version\n");
3079 		return -EOPNOTSUPP;
3080 	}
3081 
3082 	if (!dpaa2_eth_fs_enabled(priv)) {
3083 		dev_dbg(dev, "Rx cls disabled in DPNI options\n");
3084 		return -EOPNOTSUPP;
3085 	}
3086 
3087 	if (!dpaa2_eth_hash_enabled(priv)) {
3088 		dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
3089 		return -EOPNOTSUPP;
3090 	}
3091 
3092 	/* If there is no support for masking in the classification table,
3093 	 * we don't set a default key, as it will depend on the rules
3094 	 * added by the user at runtime.
3095 	 */
3096 	if (!dpaa2_eth_fs_mask_enabled(priv))
3097 		goto out;
3098 
3099 	err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
3100 	if (err)
3101 		return err;
3102 
3103 out:
3104 	priv->rx_cls_enabled = 1;
3105 
3106 	return 0;
3107 }
3108 
3109 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3110  * frame queues and channels
3111  */
3112 static int bind_dpni(struct dpaa2_eth_priv *priv)
3113 {
3114 	struct net_device *net_dev = priv->net_dev;
3115 	struct device *dev = net_dev->dev.parent;
3116 	struct dpni_pools_cfg pools_params;
3117 	struct dpni_error_cfg err_cfg;
3118 	int err = 0;
3119 	int i;
3120 
3121 	pools_params.num_dpbp = 1;
3122 	pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3123 	pools_params.pools[0].backup_pool = 0;
3124 	pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
3125 	err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3126 	if (err) {
3127 		dev_err(dev, "dpni_set_pools() failed\n");
3128 		return err;
3129 	}
3130 
3131 	/* have the interface implicitly distribute traffic based on
3132 	 * the default hash key
3133 	 */
3134 	err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
3135 	if (err && err != -EOPNOTSUPP)
3136 		dev_err(dev, "Failed to configure hashing\n");
3137 
3138 	/* Configure the flow classification key; it includes all
3139 	 * supported header fields and cannot be modified at runtime
3140 	 */
3141 	err = dpaa2_eth_set_default_cls(priv);
3142 	if (err && err != -EOPNOTSUPP)
3143 		dev_err(dev, "Failed to configure Rx classification key\n");
3144 
3145 	/* Configure handling of error frames */
3146 	err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3147 	err_cfg.set_frame_annotation = 1;
3148 	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3149 	err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3150 				       &err_cfg);
3151 	if (err) {
3152 		dev_err(dev, "dpni_set_errors_behavior failed\n");
3153 		return err;
3154 	}
3155 
3156 	/* Configure Rx and Tx conf queues to generate CDANs */
3157 	for (i = 0; i < priv->num_fqs; i++) {
3158 		switch (priv->fq[i].type) {
3159 		case DPAA2_RX_FQ:
3160 			err = setup_rx_flow(priv, &priv->fq[i]);
3161 			break;
3162 		case DPAA2_TX_CONF_FQ:
3163 			err = setup_tx_flow(priv, &priv->fq[i]);
3164 			break;
3165 		default:
3166 			dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3167 			return -EINVAL;
3168 		}
3169 		if (err)
3170 			return err;
3171 	}
3172 
3173 	err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
3174 			    DPNI_QUEUE_TX, &priv->tx_qdid);
3175 	if (err) {
3176 		dev_err(dev, "dpni_get_qdid() failed\n");
3177 		return err;
3178 	}
3179 
3180 	return 0;
3181 }
3182 
3183 /* Allocate rings for storing incoming frame descriptors */
3184 static int alloc_rings(struct dpaa2_eth_priv *priv)
3185 {
3186 	struct net_device *net_dev = priv->net_dev;
3187 	struct device *dev = net_dev->dev.parent;
3188 	int i;
3189 
3190 	for (i = 0; i < priv->num_channels; i++) {
3191 		priv->channel[i]->store =
3192 			dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3193 		if (!priv->channel[i]->store) {
3194 			netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3195 			goto err_ring;
3196 		}
3197 	}
3198 
3199 	return 0;
3200 
3201 err_ring:
3202 	for (i = 0; i < priv->num_channels; i++) {
3203 		if (!priv->channel[i]->store)
3204 			break;
3205 		dpaa2_io_store_destroy(priv->channel[i]->store);
3206 	}
3207 
3208 	return -ENOMEM;
3209 }
3210 
3211 static void free_rings(struct dpaa2_eth_priv *priv)
3212 {
3213 	int i;
3214 
3215 	for (i = 0; i < priv->num_channels; i++)
3216 		dpaa2_io_store_destroy(priv->channel[i]->store);
3217 }
3218 
3219 static int set_mac_addr(struct dpaa2_eth_priv *priv)
3220 {
3221 	struct net_device *net_dev = priv->net_dev;
3222 	struct device *dev = net_dev->dev.parent;
3223 	u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3224 	int err;
3225 
3226 	/* Get firmware address, if any */
3227 	err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3228 	if (err) {
3229 		dev_err(dev, "dpni_get_port_mac_addr() failed\n");
3230 		return err;
3231 	}
3232 
3233 	/* Get DPNI attributes address, if any */
3234 	err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3235 					dpni_mac_addr);
3236 	if (err) {
3237 		dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
3238 		return err;
3239 	}
3240 
3241 	/* First check if firmware has any address configured by bootloader */
3242 	if (!is_zero_ether_addr(mac_addr)) {
3243 		/* If the DPMAC addr != DPNI addr, update it */
3244 		if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3245 			err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3246 							priv->mc_token,
3247 							mac_addr);
3248 			if (err) {
3249 				dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3250 				return err;
3251 			}
3252 		}
3253 		memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3254 	} else if (is_zero_ether_addr(dpni_mac_addr)) {
3255 		/* No MAC address configured, fill in net_dev->dev_addr
3256 		 * with a random one
3257 		 */
3258 		eth_hw_addr_random(net_dev);
3259 		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
3260 
3261 		err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3262 						net_dev->dev_addr);
3263 		if (err) {
3264 			dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3265 			return err;
3266 		}
3267 
3268 		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3269 		 * practical purposes, this will be our "permanent" mac address,
3270 		 * at least until the next reboot. This move will also permit
3271 		 * register_netdevice() to properly fill up net_dev->perm_addr.
3272 		 */
3273 		net_dev->addr_assign_type = NET_ADDR_PERM;
3274 	} else {
3275 		/* NET_ADDR_PERM is default, all we have to do is
3276 		 * fill in the device addr.
3277 		 */
3278 		memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
3279 	}
3280 
3281 	return 0;
3282 }
3283 
3284 static int netdev_init(struct net_device *net_dev)
3285 {
3286 	struct device *dev = net_dev->dev.parent;
3287 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3288 	u32 options = priv->dpni_attrs.options;
3289 	u64 supported = 0, not_supported = 0;
3290 	u8 bcast_addr[ETH_ALEN];
3291 	u8 num_queues;
3292 	int err;
3293 
3294 	net_dev->netdev_ops = &dpaa2_eth_ops;
3295 	net_dev->ethtool_ops = &dpaa2_ethtool_ops;
3296 
3297 	err = set_mac_addr(priv);
3298 	if (err)
3299 		return err;
3300 
3301 	/* Explicitly add the broadcast address to the MAC filtering table */
3302 	eth_broadcast_addr(bcast_addr);
3303 	err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3304 	if (err) {
3305 		dev_err(dev, "dpni_add_mac_addr() failed\n");
3306 		return err;
3307 	}
3308 
3309 	/* Set MTU upper limit; lower limit is 68B (default value) */
3310 	net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3311 	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
3312 					DPAA2_ETH_MFL);
3313 	if (err) {
3314 		dev_err(dev, "dpni_set_max_frame_length() failed\n");
3315 		return err;
3316 	}
3317 
3318 	/* Set actual number of queues in the net device */
3319 	num_queues = dpaa2_eth_queue_count(priv);
3320 	err = netif_set_real_num_tx_queues(net_dev, num_queues);
3321 	if (err) {
3322 		dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
3323 		return err;
3324 	}
3325 	err = netif_set_real_num_rx_queues(net_dev, num_queues);
3326 	if (err) {
3327 		dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
3328 		return err;
3329 	}
3330 
3331 	/* Capabilities listing */
3332 	supported |= IFF_LIVE_ADDR_CHANGE;
3333 
3334 	if (options & DPNI_OPT_NO_MAC_FILTER)
3335 		not_supported |= IFF_UNICAST_FLT;
3336 	else
3337 		supported |= IFF_UNICAST_FLT;
3338 
3339 	net_dev->priv_flags |= supported;
3340 	net_dev->priv_flags &= ~not_supported;
3341 
3342 	/* Features */
3343 	net_dev->features = NETIF_F_RXCSUM |
3344 			    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3345 			    NETIF_F_SG | NETIF_F_HIGHDMA |
3346 			    NETIF_F_LLTX;
3347 	net_dev->hw_features = net_dev->features;
3348 
3349 	return 0;
3350 }
3351 
3352 static int poll_link_state(void *arg)
3353 {
3354 	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
3355 	int err;
3356 
3357 	while (!kthread_should_stop()) {
3358 		err = link_state_update(priv);
3359 		if (unlikely(err))
3360 			return err;
3361 
3362 		msleep(DPAA2_ETH_LINK_STATE_REFRESH);
3363 	}
3364 
3365 	return 0;
3366 }
3367 
3368 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
3369 {
3370 	struct fsl_mc_device *dpni_dev, *dpmac_dev;
3371 	struct dpaa2_mac *mac;
3372 	int err;
3373 
3374 	dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
3375 	dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
3376 	if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
3377 		return 0;
3378 
3379 	if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
3380 		return 0;
3381 
3382 	mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
3383 	if (!mac)
3384 		return -ENOMEM;
3385 
3386 	mac->mc_dev = dpmac_dev;
3387 	mac->mc_io = priv->mc_io;
3388 	mac->net_dev = priv->net_dev;
3389 
3390 	err = dpaa2_mac_connect(mac);
3391 	if (err) {
3392 		netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
3393 		kfree(mac);
3394 		return err;
3395 	}
3396 	priv->mac = mac;
3397 
3398 	return 0;
3399 }
3400 
3401 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
3402 {
3403 	if (!priv->mac)
3404 		return;
3405 
3406 	dpaa2_mac_disconnect(priv->mac);
3407 	kfree(priv->mac);
3408 	priv->mac = NULL;
3409 }
3410 
3411 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
3412 {
3413 	u32 status = ~0;
3414 	struct device *dev = (struct device *)arg;
3415 	struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
3416 	struct net_device *net_dev = dev_get_drvdata(dev);
3417 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3418 	int err;
3419 
3420 	err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3421 				  DPNI_IRQ_INDEX, &status);
3422 	if (unlikely(err)) {
3423 		netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
3424 		return IRQ_HANDLED;
3425 	}
3426 
3427 	if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
3428 		link_state_update(netdev_priv(net_dev));
3429 
3430 	if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
3431 		set_mac_addr(netdev_priv(net_dev));
3432 		update_tx_fqids(priv);
3433 
3434 		rtnl_lock();
3435 		if (priv->mac)
3436 			dpaa2_eth_disconnect_mac(priv);
3437 		else
3438 			dpaa2_eth_connect_mac(priv);
3439 		rtnl_unlock();
3440 	}
3441 
3442 	return IRQ_HANDLED;
3443 }
3444 
3445 static int setup_irqs(struct fsl_mc_device *ls_dev)
3446 {
3447 	int err = 0;
3448 	struct fsl_mc_device_irq *irq;
3449 
3450 	err = fsl_mc_allocate_irqs(ls_dev);
3451 	if (err) {
3452 		dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
3453 		return err;
3454 	}
3455 
3456 	irq = ls_dev->irqs[0];
3457 	err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
3458 					NULL, dpni_irq0_handler_thread,
3459 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
3460 					dev_name(&ls_dev->dev), &ls_dev->dev);
3461 	if (err < 0) {
3462 		dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
3463 		goto free_mc_irq;
3464 	}
3465 
3466 	err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
3467 				DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
3468 				DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
3469 	if (err < 0) {
3470 		dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
3471 		goto free_irq;
3472 	}
3473 
3474 	err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
3475 				  DPNI_IRQ_INDEX, 1);
3476 	if (err < 0) {
3477 		dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
3478 		goto free_irq;
3479 	}
3480 
3481 	return 0;
3482 
3483 free_irq:
3484 	devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
3485 free_mc_irq:
3486 	fsl_mc_free_irqs(ls_dev);
3487 
3488 	return err;
3489 }
3490 
3491 static void add_ch_napi(struct dpaa2_eth_priv *priv)
3492 {
3493 	int i;
3494 	struct dpaa2_eth_channel *ch;
3495 
3496 	for (i = 0; i < priv->num_channels; i++) {
3497 		ch = priv->channel[i];
3498 		/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
3499 		netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
3500 			       NAPI_POLL_WEIGHT);
3501 	}
3502 }
3503 
3504 static void del_ch_napi(struct dpaa2_eth_priv *priv)
3505 {
3506 	int i;
3507 	struct dpaa2_eth_channel *ch;
3508 
3509 	for (i = 0; i < priv->num_channels; i++) {
3510 		ch = priv->channel[i];
3511 		netif_napi_del(&ch->napi);
3512 	}
3513 }
3514 
3515 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
3516 {
3517 	struct device *dev;
3518 	struct net_device *net_dev = NULL;
3519 	struct dpaa2_eth_priv *priv = NULL;
3520 	int err = 0;
3521 
3522 	dev = &dpni_dev->dev;
3523 
3524 	/* Net device */
3525 	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
3526 	if (!net_dev) {
3527 		dev_err(dev, "alloc_etherdev_mq() failed\n");
3528 		return -ENOMEM;
3529 	}
3530 
3531 	SET_NETDEV_DEV(net_dev, dev);
3532 	dev_set_drvdata(dev, net_dev);
3533 
3534 	priv = netdev_priv(net_dev);
3535 	priv->net_dev = net_dev;
3536 
3537 	priv->iommu_domain = iommu_get_domain_for_dev(dev);
3538 
3539 	/* Obtain a MC portal */
3540 	err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3541 				     &priv->mc_io);
3542 	if (err) {
3543 		if (err == -ENXIO)
3544 			err = -EPROBE_DEFER;
3545 		else
3546 			dev_err(dev, "MC portal allocation failed\n");
3547 		goto err_portal_alloc;
3548 	}
3549 
3550 	/* MC objects initialization and configuration */
3551 	err = setup_dpni(dpni_dev);
3552 	if (err)
3553 		goto err_dpni_setup;
3554 
3555 	err = setup_dpio(priv);
3556 	if (err)
3557 		goto err_dpio_setup;
3558 
3559 	setup_fqs(priv);
3560 
3561 	err = setup_dpbp(priv);
3562 	if (err)
3563 		goto err_dpbp_setup;
3564 
3565 	err = bind_dpni(priv);
3566 	if (err)
3567 		goto err_bind;
3568 
3569 	/* Add a NAPI context for each channel */
3570 	add_ch_napi(priv);
3571 
3572 	/* Percpu statistics */
3573 	priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
3574 	if (!priv->percpu_stats) {
3575 		dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
3576 		err = -ENOMEM;
3577 		goto err_alloc_percpu_stats;
3578 	}
3579 	priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
3580 	if (!priv->percpu_extras) {
3581 		dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
3582 		err = -ENOMEM;
3583 		goto err_alloc_percpu_extras;
3584 	}
3585 
3586 	err = netdev_init(net_dev);
3587 	if (err)
3588 		goto err_netdev_init;
3589 
3590 	/* Configure checksum offload based on current interface flags */
3591 	err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
3592 	if (err)
3593 		goto err_csum;
3594 
3595 	err = set_tx_csum(priv, !!(net_dev->features &
3596 				   (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
3597 	if (err)
3598 		goto err_csum;
3599 
3600 	err = alloc_rings(priv);
3601 	if (err)
3602 		goto err_alloc_rings;
3603 
3604 	err = setup_irqs(dpni_dev);
3605 	if (err) {
3606 		netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
3607 		priv->poll_thread = kthread_run(poll_link_state, priv,
3608 						"%s_poll_link", net_dev->name);
3609 		if (IS_ERR(priv->poll_thread)) {
3610 			dev_err(dev, "Error starting polling thread\n");
3611 			goto err_poll_thread;
3612 		}
3613 		priv->do_link_poll = true;
3614 	}
3615 
3616 	err = dpaa2_eth_connect_mac(priv);
3617 	if (err)
3618 		goto err_connect_mac;
3619 
3620 	err = register_netdev(net_dev);
3621 	if (err < 0) {
3622 		dev_err(dev, "register_netdev() failed\n");
3623 		goto err_netdev_reg;
3624 	}
3625 
3626 #ifdef CONFIG_DEBUG_FS
3627 	dpaa2_dbg_add(priv);
3628 #endif
3629 
3630 	dev_info(dev, "Probed interface %s\n", net_dev->name);
3631 	return 0;
3632 
3633 err_netdev_reg:
3634 	dpaa2_eth_disconnect_mac(priv);
3635 err_connect_mac:
3636 	if (priv->do_link_poll)
3637 		kthread_stop(priv->poll_thread);
3638 	else
3639 		fsl_mc_free_irqs(dpni_dev);
3640 err_poll_thread:
3641 	free_rings(priv);
3642 err_alloc_rings:
3643 err_csum:
3644 err_netdev_init:
3645 	free_percpu(priv->percpu_extras);
3646 err_alloc_percpu_extras:
3647 	free_percpu(priv->percpu_stats);
3648 err_alloc_percpu_stats:
3649 	del_ch_napi(priv);
3650 err_bind:
3651 	free_dpbp(priv);
3652 err_dpbp_setup:
3653 	free_dpio(priv);
3654 err_dpio_setup:
3655 	free_dpni(priv);
3656 err_dpni_setup:
3657 	fsl_mc_portal_free(priv->mc_io);
3658 err_portal_alloc:
3659 	dev_set_drvdata(dev, NULL);
3660 	free_netdev(net_dev);
3661 
3662 	return err;
3663 }
3664 
3665 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
3666 {
3667 	struct device *dev;
3668 	struct net_device *net_dev;
3669 	struct dpaa2_eth_priv *priv;
3670 
3671 	dev = &ls_dev->dev;
3672 	net_dev = dev_get_drvdata(dev);
3673 	priv = netdev_priv(net_dev);
3674 
3675 #ifdef CONFIG_DEBUG_FS
3676 	dpaa2_dbg_remove(priv);
3677 #endif
3678 	rtnl_lock();
3679 	dpaa2_eth_disconnect_mac(priv);
3680 	rtnl_unlock();
3681 
3682 	unregister_netdev(net_dev);
3683 
3684 	if (priv->do_link_poll)
3685 		kthread_stop(priv->poll_thread);
3686 	else
3687 		fsl_mc_free_irqs(ls_dev);
3688 
3689 	free_rings(priv);
3690 	free_percpu(priv->percpu_stats);
3691 	free_percpu(priv->percpu_extras);
3692 
3693 	del_ch_napi(priv);
3694 	free_dpbp(priv);
3695 	free_dpio(priv);
3696 	free_dpni(priv);
3697 
3698 	fsl_mc_portal_free(priv->mc_io);
3699 
3700 	free_netdev(net_dev);
3701 
3702 	dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
3703 
3704 	return 0;
3705 }
3706 
3707 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
3708 	{
3709 		.vendor = FSL_MC_VENDOR_FREESCALE,
3710 		.obj_type = "dpni",
3711 	},
3712 	{ .vendor = 0x0 }
3713 };
3714 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
3715 
3716 static struct fsl_mc_driver dpaa2_eth_driver = {
3717 	.driver = {
3718 		.name = KBUILD_MODNAME,
3719 		.owner = THIS_MODULE,
3720 	},
3721 	.probe = dpaa2_eth_probe,
3722 	.remove = dpaa2_eth_remove,
3723 	.match_id_table = dpaa2_eth_match_id_table
3724 };
3725 
3726 static int __init dpaa2_eth_driver_init(void)
3727 {
3728 	int err;
3729 
3730 	dpaa2_eth_dbg_init();
3731 	err = fsl_mc_driver_register(&dpaa2_eth_driver);
3732 	if (err) {
3733 		dpaa2_eth_dbg_exit();
3734 		return err;
3735 	}
3736 
3737 	return 0;
3738 }
3739 
3740 static void __exit dpaa2_eth_driver_exit(void)
3741 {
3742 	dpaa2_eth_dbg_exit();
3743 	fsl_mc_driver_unregister(&dpaa2_eth_driver);
3744 }
3745 
3746 module_init(dpaa2_eth_driver_init);
3747 module_exit(dpaa2_eth_driver_exit);
3748