1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3  * Copyright 2016-2022 NXP
4  */
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/fsl/mc.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf_trace.h>
17 #include <linux/fsl/ptp_qoriq.h>
18 #include <linux/ptp_classify.h>
19 #include <net/pkt_cls.h>
20 #include <net/sock.h>
21 #include <net/tso.h>
22 #include <net/xdp_sock_drv.h>
23 
24 #include "dpaa2-eth.h"
25 
26 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
27  * using trace events only need to #include <trace/events/sched.h>
28  */
29 #define CREATE_TRACE_POINTS
30 #include "dpaa2-eth-trace.h"
31 
32 MODULE_LICENSE("Dual BSD/GPL");
33 MODULE_AUTHOR("Freescale Semiconductor, Inc");
34 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
35 
36 struct ptp_qoriq *dpaa2_ptp;
37 EXPORT_SYMBOL(dpaa2_ptp);
38 
39 static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
40 {
41 	priv->features = 0;
42 
43 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
44 				   DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
45 		priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
46 }
47 
48 static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
49 					      u32 offset, u8 udp)
50 {
51 	struct dpni_single_step_cfg cfg;
52 
53 	cfg.en = 1;
54 	cfg.ch_update = udp;
55 	cfg.offset = offset;
56 	cfg.peer_delay = 0;
57 
58 	if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
59 		WARN_ONCE(1, "Failed to set single step register");
60 }
61 
62 static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
63 					    u32 offset, u8 udp)
64 {
65 	u32 val = 0;
66 
67 	val = DPAA2_PTP_SINGLE_STEP_ENABLE |
68 	       DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
69 
70 	if (udp)
71 		val |= DPAA2_PTP_SINGLE_STEP_CH;
72 
73 	if (priv->onestep_reg_base)
74 		writel(val, priv->onestep_reg_base);
75 }
76 
77 static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
78 {
79 	struct device *dev = priv->net_dev->dev.parent;
80 	struct dpni_single_step_cfg ptp_cfg;
81 
82 	priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
83 
84 	if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
85 		return;
86 
87 	if (dpni_get_single_step_cfg(priv->mc_io, 0,
88 				     priv->mc_token, &ptp_cfg)) {
89 		dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
90 		return;
91 	}
92 
93 	if (!ptp_cfg.ptp_onestep_reg_base) {
94 		dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
95 		return;
96 	}
97 
98 	priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
99 					 sizeof(u32));
100 	if (!priv->onestep_reg_base) {
101 		dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
102 		return;
103 	}
104 
105 	priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
106 }
107 
108 void *dpaa2_iova_to_virt(struct iommu_domain *domain,
109 			 dma_addr_t iova_addr)
110 {
111 	phys_addr_t phys_addr;
112 
113 	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
114 
115 	return phys_to_virt(phys_addr);
116 }
117 
118 static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
119 				       u32 fd_status,
120 				       struct sk_buff *skb)
121 {
122 	skb_checksum_none_assert(skb);
123 
124 	/* HW checksum validation is disabled, nothing to do here */
125 	if (!(priv->net_dev->features & NETIF_F_RXCSUM))
126 		return;
127 
128 	/* Read checksum validation bits */
129 	if (!((fd_status & DPAA2_FAS_L3CV) &&
130 	      (fd_status & DPAA2_FAS_L4CV)))
131 		return;
132 
133 	/* Inform the stack there's no need to compute L3/L4 csum anymore */
134 	skb->ip_summed = CHECKSUM_UNNECESSARY;
135 }
136 
137 /* Free a received FD.
138  * Not to be used for Tx conf FDs or on any other paths.
139  */
140 static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
141 				 const struct dpaa2_fd *fd,
142 				 void *vaddr)
143 {
144 	struct device *dev = priv->net_dev->dev.parent;
145 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
146 	u8 fd_format = dpaa2_fd_get_format(fd);
147 	struct dpaa2_sg_entry *sgt;
148 	void *sg_vaddr;
149 	int i;
150 
151 	/* If single buffer frame, just free the data buffer */
152 	if (fd_format == dpaa2_fd_single)
153 		goto free_buf;
154 	else if (fd_format != dpaa2_fd_sg)
155 		/* We don't support any other format */
156 		return;
157 
158 	/* For S/G frames, we first need to free all SG entries
159 	 * except the first one, which was taken care of already
160 	 */
161 	sgt = vaddr + dpaa2_fd_get_offset(fd);
162 	for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
163 		addr = dpaa2_sg_get_addr(&sgt[i]);
164 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
165 		dma_unmap_page(dev, addr, priv->rx_buf_size,
166 			       DMA_BIDIRECTIONAL);
167 
168 		free_pages((unsigned long)sg_vaddr, 0);
169 		if (dpaa2_sg_is_final(&sgt[i]))
170 			break;
171 	}
172 
173 free_buf:
174 	free_pages((unsigned long)vaddr, 0);
175 }
176 
177 /* Build a linear skb based on a single-buffer frame descriptor */
178 static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
179 						  const struct dpaa2_fd *fd,
180 						  void *fd_vaddr)
181 {
182 	struct sk_buff *skb = NULL;
183 	u16 fd_offset = dpaa2_fd_get_offset(fd);
184 	u32 fd_length = dpaa2_fd_get_len(fd);
185 
186 	ch->buf_count--;
187 
188 	skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
189 	if (unlikely(!skb))
190 		return NULL;
191 
192 	skb_reserve(skb, fd_offset);
193 	skb_put(skb, fd_length);
194 
195 	return skb;
196 }
197 
198 /* Build a non linear (fragmented) skb based on a S/G table */
199 static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
200 						struct dpaa2_eth_channel *ch,
201 						struct dpaa2_sg_entry *sgt)
202 {
203 	struct sk_buff *skb = NULL;
204 	struct device *dev = priv->net_dev->dev.parent;
205 	void *sg_vaddr;
206 	dma_addr_t sg_addr;
207 	u16 sg_offset;
208 	u32 sg_length;
209 	struct page *page, *head_page;
210 	int page_offset;
211 	int i;
212 
213 	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
214 		struct dpaa2_sg_entry *sge = &sgt[i];
215 
216 		/* NOTE: We only support SG entries in dpaa2_sg_single format,
217 		 * but this is the only format we may receive from HW anyway
218 		 */
219 
220 		/* Get the address and length from the S/G entry */
221 		sg_addr = dpaa2_sg_get_addr(sge);
222 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
223 		dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
224 			       DMA_BIDIRECTIONAL);
225 
226 		sg_length = dpaa2_sg_get_len(sge);
227 
228 		if (i == 0) {
229 			/* We build the skb around the first data buffer */
230 			skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
231 			if (unlikely(!skb)) {
232 				/* Free the first SG entry now, since we already
233 				 * unmapped it and obtained the virtual address
234 				 */
235 				free_pages((unsigned long)sg_vaddr, 0);
236 
237 				/* We still need to subtract the buffers used
238 				 * by this FD from our software counter
239 				 */
240 				while (!dpaa2_sg_is_final(&sgt[i]) &&
241 				       i < DPAA2_ETH_MAX_SG_ENTRIES)
242 					i++;
243 				break;
244 			}
245 
246 			sg_offset = dpaa2_sg_get_offset(sge);
247 			skb_reserve(skb, sg_offset);
248 			skb_put(skb, sg_length);
249 		} else {
250 			/* Rest of the data buffers are stored as skb frags */
251 			page = virt_to_page(sg_vaddr);
252 			head_page = virt_to_head_page(sg_vaddr);
253 
254 			/* Offset in page (which may be compound).
255 			 * Data in subsequent SG entries is stored from the
256 			 * beginning of the buffer, so we don't need to add the
257 			 * sg_offset.
258 			 */
259 			page_offset = ((unsigned long)sg_vaddr &
260 				(PAGE_SIZE - 1)) +
261 				(page_address(page) - page_address(head_page));
262 
263 			skb_add_rx_frag(skb, i - 1, head_page, page_offset,
264 					sg_length, priv->rx_buf_size);
265 		}
266 
267 		if (dpaa2_sg_is_final(sge))
268 			break;
269 	}
270 
271 	WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
272 
273 	/* Count all data buffers + SG table buffer */
274 	ch->buf_count -= i + 2;
275 
276 	return skb;
277 }
278 
279 /* Free buffers acquired from the buffer pool or which were meant to
280  * be released in the pool
281  */
282 static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
283 				int count, bool xsk_zc)
284 {
285 	struct device *dev = priv->net_dev->dev.parent;
286 	struct dpaa2_eth_swa *swa;
287 	struct xdp_buff *xdp_buff;
288 	void *vaddr;
289 	int i;
290 
291 	for (i = 0; i < count; i++) {
292 		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
293 
294 		if (!xsk_zc) {
295 			dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
296 				       DMA_BIDIRECTIONAL);
297 			free_pages((unsigned long)vaddr, 0);
298 		} else {
299 			swa = (struct dpaa2_eth_swa *)
300 				(vaddr + DPAA2_ETH_RX_HWA_SIZE);
301 			xdp_buff = swa->xsk.xdp_buff;
302 			xsk_buff_free(xdp_buff);
303 		}
304 	}
305 }
306 
307 void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
308 			   struct dpaa2_eth_channel *ch,
309 			   dma_addr_t addr)
310 {
311 	int retries = 0;
312 	int err;
313 
314 	ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
315 	if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
316 		return;
317 
318 	while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
319 					       ch->recycled_bufs,
320 					       ch->recycled_bufs_cnt)) == -EBUSY) {
321 		if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
322 			break;
323 		cpu_relax();
324 	}
325 
326 	if (err) {
327 		dpaa2_eth_free_bufs(priv, ch->recycled_bufs,
328 				    ch->recycled_bufs_cnt, ch->xsk_zc);
329 		ch->buf_count -= ch->recycled_bufs_cnt;
330 	}
331 
332 	ch->recycled_bufs_cnt = 0;
333 }
334 
335 static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
336 			       struct dpaa2_eth_fq *fq,
337 			       struct dpaa2_eth_xdp_fds *xdp_fds)
338 {
339 	int total_enqueued = 0, retries = 0, enqueued;
340 	struct dpaa2_eth_drv_stats *percpu_extras;
341 	int num_fds, err, max_retries;
342 	struct dpaa2_fd *fds;
343 
344 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
345 
346 	/* try to enqueue all the FDs until the max number of retries is hit */
347 	fds = xdp_fds->fds;
348 	num_fds = xdp_fds->num;
349 	max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
350 	while (total_enqueued < num_fds && retries < max_retries) {
351 		err = priv->enqueue(priv, fq, &fds[total_enqueued],
352 				    0, num_fds - total_enqueued, &enqueued);
353 		if (err == -EBUSY) {
354 			percpu_extras->tx_portal_busy += ++retries;
355 			continue;
356 		}
357 		total_enqueued += enqueued;
358 	}
359 	xdp_fds->num = 0;
360 
361 	return total_enqueued;
362 }
363 
364 static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
365 				   struct dpaa2_eth_channel *ch,
366 				   struct dpaa2_eth_fq *fq)
367 {
368 	struct rtnl_link_stats64 *percpu_stats;
369 	struct dpaa2_fd *fds;
370 	int enqueued, i;
371 
372 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
373 
374 	// enqueue the array of XDP_TX frames
375 	enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
376 
377 	/* update statistics */
378 	percpu_stats->tx_packets += enqueued;
379 	fds = fq->xdp_tx_fds.fds;
380 	for (i = 0; i < enqueued; i++) {
381 		percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
382 		ch->stats.xdp_tx++;
383 	}
384 	for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
385 		dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
386 		percpu_stats->tx_errors++;
387 		ch->stats.xdp_tx_err++;
388 	}
389 	fq->xdp_tx_fds.num = 0;
390 }
391 
392 void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
393 			   struct dpaa2_eth_channel *ch,
394 			   struct dpaa2_fd *fd,
395 			   void *buf_start, u16 queue_id)
396 {
397 	struct dpaa2_faead *faead;
398 	struct dpaa2_fd *dest_fd;
399 	struct dpaa2_eth_fq *fq;
400 	u32 ctrl, frc;
401 
402 	/* Mark the egress frame hardware annotation area as valid */
403 	frc = dpaa2_fd_get_frc(fd);
404 	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
405 	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
406 
407 	/* Instruct hardware to release the FD buffer directly into
408 	 * the buffer pool once transmission is completed, instead of
409 	 * sending a Tx confirmation frame to us
410 	 */
411 	ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
412 	faead = dpaa2_get_faead(buf_start, false);
413 	faead->ctrl = cpu_to_le32(ctrl);
414 	faead->conf_fqid = 0;
415 
416 	fq = &priv->fq[queue_id];
417 	dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
418 	memcpy(dest_fd, fd, sizeof(*dest_fd));
419 
420 	if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
421 		return;
422 
423 	dpaa2_eth_xdp_tx_flush(priv, ch, fq);
424 }
425 
426 static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
427 			     struct dpaa2_eth_channel *ch,
428 			     struct dpaa2_eth_fq *rx_fq,
429 			     struct dpaa2_fd *fd, void *vaddr)
430 {
431 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
432 	struct bpf_prog *xdp_prog;
433 	struct xdp_buff xdp;
434 	u32 xdp_act = XDP_PASS;
435 	int err, offset;
436 
437 	xdp_prog = READ_ONCE(ch->xdp.prog);
438 	if (!xdp_prog)
439 		goto out;
440 
441 	offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
442 	xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
443 	xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
444 			 dpaa2_fd_get_len(fd), false);
445 
446 	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
447 
448 	/* xdp.data pointer may have changed */
449 	dpaa2_fd_set_offset(fd, xdp.data - vaddr);
450 	dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
451 
452 	switch (xdp_act) {
453 	case XDP_PASS:
454 		break;
455 	case XDP_TX:
456 		dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
457 		break;
458 	default:
459 		bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
460 		fallthrough;
461 	case XDP_ABORTED:
462 		trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
463 		fallthrough;
464 	case XDP_DROP:
465 		dpaa2_eth_recycle_buf(priv, ch, addr);
466 		ch->stats.xdp_drop++;
467 		break;
468 	case XDP_REDIRECT:
469 		dma_unmap_page(priv->net_dev->dev.parent, addr,
470 			       priv->rx_buf_size, DMA_BIDIRECTIONAL);
471 		ch->buf_count--;
472 
473 		/* Allow redirect use of full headroom */
474 		xdp.data_hard_start = vaddr;
475 		xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
476 
477 		err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
478 		if (unlikely(err)) {
479 			addr = dma_map_page(priv->net_dev->dev.parent,
480 					    virt_to_page(vaddr), 0,
481 					    priv->rx_buf_size, DMA_BIDIRECTIONAL);
482 			if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
483 				free_pages((unsigned long)vaddr, 0);
484 			} else {
485 				ch->buf_count++;
486 				dpaa2_eth_recycle_buf(priv, ch, addr);
487 			}
488 			ch->stats.xdp_drop++;
489 		} else {
490 			ch->stats.xdp_redirect++;
491 		}
492 		break;
493 	}
494 
495 	ch->xdp.res |= xdp_act;
496 out:
497 	return xdp_act;
498 }
499 
500 struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
501 				    struct dpaa2_eth_channel *ch,
502 				    const struct dpaa2_fd *fd, u32 fd_length,
503 				    void *fd_vaddr)
504 {
505 	u16 fd_offset = dpaa2_fd_get_offset(fd);
506 	struct sk_buff *skb = NULL;
507 	unsigned int skb_len;
508 
509 	skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
510 
511 	skb = napi_alloc_skb(&ch->napi, skb_len);
512 	if (!skb)
513 		return NULL;
514 
515 	skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
516 	skb_put(skb, fd_length);
517 
518 	memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
519 
520 	dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
521 
522 	return skb;
523 }
524 
525 static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
526 					   const struct dpaa2_fd *fd,
527 					   void *fd_vaddr)
528 {
529 	struct dpaa2_eth_priv *priv = ch->priv;
530 	u32 fd_length = dpaa2_fd_get_len(fd);
531 
532 	if (fd_length > priv->rx_copybreak)
533 		return NULL;
534 
535 	return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr);
536 }
537 
538 void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
539 			   struct dpaa2_eth_channel *ch,
540 			   const struct dpaa2_fd *fd, void *vaddr,
541 			   struct dpaa2_eth_fq *fq,
542 			   struct rtnl_link_stats64 *percpu_stats,
543 			   struct sk_buff *skb)
544 {
545 	struct dpaa2_fas *fas;
546 	u32 status = 0;
547 
548 	fas = dpaa2_get_fas(vaddr, false);
549 	prefetch(fas);
550 	prefetch(skb->data);
551 
552 	/* Get the timestamp value */
553 	if (priv->rx_tstamp) {
554 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
555 		__le64 *ts = dpaa2_get_ts(vaddr, false);
556 		u64 ns;
557 
558 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
559 
560 		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
561 		shhwtstamps->hwtstamp = ns_to_ktime(ns);
562 	}
563 
564 	/* Check if we need to validate the L4 csum */
565 	if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
566 		status = le32_to_cpu(fas->status);
567 		dpaa2_eth_validate_rx_csum(priv, status, skb);
568 	}
569 
570 	skb->protocol = eth_type_trans(skb, priv->net_dev);
571 	skb_record_rx_queue(skb, fq->flowid);
572 
573 	percpu_stats->rx_packets++;
574 	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
575 	ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
576 
577 	list_add_tail(&skb->list, ch->rx_list);
578 }
579 
580 /* Main Rx frame processing routine */
581 void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
582 		  struct dpaa2_eth_channel *ch,
583 		  const struct dpaa2_fd *fd,
584 		  struct dpaa2_eth_fq *fq)
585 {
586 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
587 	u8 fd_format = dpaa2_fd_get_format(fd);
588 	void *vaddr;
589 	struct sk_buff *skb;
590 	struct rtnl_link_stats64 *percpu_stats;
591 	struct dpaa2_eth_drv_stats *percpu_extras;
592 	struct device *dev = priv->net_dev->dev.parent;
593 	void *buf_data;
594 	u32 xdp_act;
595 
596 	/* Tracing point */
597 	trace_dpaa2_rx_fd(priv->net_dev, fd);
598 
599 	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
600 	dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
601 				DMA_BIDIRECTIONAL);
602 
603 	buf_data = vaddr + dpaa2_fd_get_offset(fd);
604 	prefetch(buf_data);
605 
606 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
607 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
608 
609 	if (fd_format == dpaa2_fd_single) {
610 		xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
611 		if (xdp_act != XDP_PASS) {
612 			percpu_stats->rx_packets++;
613 			percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
614 			return;
615 		}
616 
617 		skb = dpaa2_eth_copybreak(ch, fd, vaddr);
618 		if (!skb) {
619 			dma_unmap_page(dev, addr, priv->rx_buf_size,
620 				       DMA_BIDIRECTIONAL);
621 			skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
622 		}
623 	} else if (fd_format == dpaa2_fd_sg) {
624 		WARN_ON(priv->xdp_prog);
625 
626 		dma_unmap_page(dev, addr, priv->rx_buf_size,
627 			       DMA_BIDIRECTIONAL);
628 		skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
629 		free_pages((unsigned long)vaddr, 0);
630 		percpu_extras->rx_sg_frames++;
631 		percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
632 	} else {
633 		/* We don't support any other format */
634 		goto err_frame_format;
635 	}
636 
637 	if (unlikely(!skb))
638 		goto err_build_skb;
639 
640 	dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
641 	return;
642 
643 err_build_skb:
644 	dpaa2_eth_free_rx_fd(priv, fd, vaddr);
645 err_frame_format:
646 	percpu_stats->rx_dropped++;
647 }
648 
649 /* Processing of Rx frames received on the error FQ
650  * We check and print the error bits and then free the frame
651  */
652 static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
653 			     struct dpaa2_eth_channel *ch,
654 			     const struct dpaa2_fd *fd,
655 			     struct dpaa2_eth_fq *fq __always_unused)
656 {
657 	struct device *dev = priv->net_dev->dev.parent;
658 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
659 	u8 fd_format = dpaa2_fd_get_format(fd);
660 	struct rtnl_link_stats64 *percpu_stats;
661 	struct dpaa2_eth_trap_item *trap_item;
662 	struct dpaa2_fapr *fapr;
663 	struct sk_buff *skb;
664 	void *buf_data;
665 	void *vaddr;
666 
667 	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
668 	dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
669 				DMA_BIDIRECTIONAL);
670 
671 	buf_data = vaddr + dpaa2_fd_get_offset(fd);
672 
673 	if (fd_format == dpaa2_fd_single) {
674 		dma_unmap_page(dev, addr, priv->rx_buf_size,
675 			       DMA_BIDIRECTIONAL);
676 		skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
677 	} else if (fd_format == dpaa2_fd_sg) {
678 		dma_unmap_page(dev, addr, priv->rx_buf_size,
679 			       DMA_BIDIRECTIONAL);
680 		skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
681 		free_pages((unsigned long)vaddr, 0);
682 	} else {
683 		/* We don't support any other format */
684 		dpaa2_eth_free_rx_fd(priv, fd, vaddr);
685 		goto err_frame_format;
686 	}
687 
688 	fapr = dpaa2_get_fapr(vaddr, false);
689 	trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
690 	if (trap_item)
691 		devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
692 				    &priv->devlink_port, NULL);
693 	consume_skb(skb);
694 
695 err_frame_format:
696 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
697 	percpu_stats->rx_errors++;
698 	ch->buf_count--;
699 }
700 
701 /* Consume all frames pull-dequeued into the store. This is the simplest way to
702  * make sure we don't accidentally issue another volatile dequeue which would
703  * overwrite (leak) frames already in the store.
704  *
705  * Observance of NAPI budget is not our concern, leaving that to the caller.
706  */
707 static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
708 				    struct dpaa2_eth_fq **src)
709 {
710 	struct dpaa2_eth_priv *priv = ch->priv;
711 	struct dpaa2_eth_fq *fq = NULL;
712 	struct dpaa2_dq *dq;
713 	const struct dpaa2_fd *fd;
714 	int cleaned = 0, retries = 0;
715 	int is_last;
716 
717 	do {
718 		dq = dpaa2_io_store_next(ch->store, &is_last);
719 		if (unlikely(!dq)) {
720 			/* If we're here, we *must* have placed a
721 			 * volatile dequeue comnmand, so keep reading through
722 			 * the store until we get some sort of valid response
723 			 * token (either a valid frame or an "empty dequeue")
724 			 */
725 			if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
726 				netdev_err_once(priv->net_dev,
727 						"Unable to read a valid dequeue response\n");
728 				return -ETIMEDOUT;
729 			}
730 			continue;
731 		}
732 
733 		fd = dpaa2_dq_fd(dq);
734 		fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
735 
736 		fq->consume(priv, ch, fd, fq);
737 		cleaned++;
738 		retries = 0;
739 	} while (!is_last);
740 
741 	if (!cleaned)
742 		return 0;
743 
744 	fq->stats.frames += cleaned;
745 	ch->stats.frames += cleaned;
746 	ch->stats.frames_per_cdan += cleaned;
747 
748 	/* A dequeue operation only pulls frames from a single queue
749 	 * into the store. Return the frame queue as an out param.
750 	 */
751 	if (src)
752 		*src = fq;
753 
754 	return cleaned;
755 }
756 
757 static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
758 			       u8 *msgtype, u8 *twostep, u8 *udp,
759 			       u16 *correction_offset,
760 			       u16 *origintimestamp_offset)
761 {
762 	unsigned int ptp_class;
763 	struct ptp_header *hdr;
764 	unsigned int type;
765 	u8 *base;
766 
767 	ptp_class = ptp_classify_raw(skb);
768 	if (ptp_class == PTP_CLASS_NONE)
769 		return -EINVAL;
770 
771 	hdr = ptp_parse_header(skb, ptp_class);
772 	if (!hdr)
773 		return -EINVAL;
774 
775 	*msgtype = ptp_get_msgtype(hdr, ptp_class);
776 	*twostep = hdr->flag_field[0] & 0x2;
777 
778 	type = ptp_class & PTP_CLASS_PMASK;
779 	if (type == PTP_CLASS_IPV4 ||
780 	    type == PTP_CLASS_IPV6)
781 		*udp = 1;
782 	else
783 		*udp = 0;
784 
785 	base = skb_mac_header(skb);
786 	*correction_offset = (u8 *)&hdr->correction - base;
787 	*origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
788 
789 	return 0;
790 }
791 
792 /* Configure the egress frame annotation for timestamp update */
793 static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
794 				       struct dpaa2_fd *fd,
795 				       void *buf_start,
796 				       struct sk_buff *skb)
797 {
798 	struct ptp_tstamp origin_timestamp;
799 	u8 msgtype, twostep, udp;
800 	struct dpaa2_faead *faead;
801 	struct dpaa2_fas *fas;
802 	struct timespec64 ts;
803 	u16 offset1, offset2;
804 	u32 ctrl, frc;
805 	__le64 *ns;
806 	u8 *data;
807 
808 	/* Mark the egress frame annotation area as valid */
809 	frc = dpaa2_fd_get_frc(fd);
810 	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
811 
812 	/* Set hardware annotation size */
813 	ctrl = dpaa2_fd_get_ctrl(fd);
814 	dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
815 
816 	/* enable UPD (update prepanded data) bit in FAEAD field of
817 	 * hardware frame annotation area
818 	 */
819 	ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
820 	faead = dpaa2_get_faead(buf_start, true);
821 	faead->ctrl = cpu_to_le32(ctrl);
822 
823 	if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
824 		if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
825 					&offset1, &offset2) ||
826 		    msgtype != PTP_MSGTYPE_SYNC || twostep) {
827 			WARN_ONCE(1, "Bad packet for one-step timestamping\n");
828 			return;
829 		}
830 
831 		/* Mark the frame annotation status as valid */
832 		frc = dpaa2_fd_get_frc(fd);
833 		dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
834 
835 		/* Mark the PTP flag for one step timestamping */
836 		fas = dpaa2_get_fas(buf_start, true);
837 		fas->status = cpu_to_le32(DPAA2_FAS_PTP);
838 
839 		dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
840 		ns = dpaa2_get_ts(buf_start, true);
841 		*ns = cpu_to_le64(timespec64_to_ns(&ts) /
842 				  DPAA2_PTP_CLK_PERIOD_NS);
843 
844 		/* Update current time to PTP message originTimestamp field */
845 		ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
846 		data = skb_mac_header(skb);
847 		*(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
848 		*(__be32 *)(data + offset2 + 2) =
849 			htonl(origin_timestamp.sec_lsb);
850 		*(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
851 
852 		if (priv->ptp_correction_off == offset1)
853 			return;
854 
855 		priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
856 		priv->ptp_correction_off = offset1;
857 
858 	}
859 }
860 
861 void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
862 {
863 	struct dpaa2_eth_sgt_cache *sgt_cache;
864 	void *sgt_buf = NULL;
865 	int sgt_buf_size;
866 
867 	sgt_cache = this_cpu_ptr(priv->sgt_cache);
868 	sgt_buf_size = priv->tx_data_offset +
869 		DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
870 
871 	if (sgt_cache->count == 0)
872 		sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
873 	else
874 		sgt_buf = sgt_cache->buf[--sgt_cache->count];
875 	if (!sgt_buf)
876 		return NULL;
877 
878 	memset(sgt_buf, 0, sgt_buf_size);
879 
880 	return sgt_buf;
881 }
882 
883 void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
884 {
885 	struct dpaa2_eth_sgt_cache *sgt_cache;
886 
887 	sgt_cache = this_cpu_ptr(priv->sgt_cache);
888 	if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
889 		skb_free_frag(sgt_buf);
890 	else
891 		sgt_cache->buf[sgt_cache->count++] = sgt_buf;
892 }
893 
894 /* Create a frame descriptor based on a fragmented skb */
895 static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
896 				 struct sk_buff *skb,
897 				 struct dpaa2_fd *fd,
898 				 void **swa_addr)
899 {
900 	struct device *dev = priv->net_dev->dev.parent;
901 	void *sgt_buf = NULL;
902 	dma_addr_t addr;
903 	int nr_frags = skb_shinfo(skb)->nr_frags;
904 	struct dpaa2_sg_entry *sgt;
905 	int i, err;
906 	int sgt_buf_size;
907 	struct scatterlist *scl, *crt_scl;
908 	int num_sg;
909 	int num_dma_bufs;
910 	struct dpaa2_eth_swa *swa;
911 
912 	/* Create and map scatterlist.
913 	 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
914 	 * to go beyond nr_frags+1.
915 	 * Note: We don't support chained scatterlists
916 	 */
917 	if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
918 		return -EINVAL;
919 
920 	scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
921 	if (unlikely(!scl))
922 		return -ENOMEM;
923 
924 	sg_init_table(scl, nr_frags + 1);
925 	num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
926 	if (unlikely(num_sg < 0)) {
927 		err = -ENOMEM;
928 		goto dma_map_sg_failed;
929 	}
930 	num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
931 	if (unlikely(!num_dma_bufs)) {
932 		err = -ENOMEM;
933 		goto dma_map_sg_failed;
934 	}
935 
936 	/* Prepare the HW SGT structure */
937 	sgt_buf_size = priv->tx_data_offset +
938 		       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
939 	sgt_buf = dpaa2_eth_sgt_get(priv);
940 	if (unlikely(!sgt_buf)) {
941 		err = -ENOMEM;
942 		goto sgt_buf_alloc_failed;
943 	}
944 
945 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
946 
947 	/* Fill in the HW SGT structure.
948 	 *
949 	 * sgt_buf is zeroed out, so the following fields are implicit
950 	 * in all sgt entries:
951 	 *   - offset is 0
952 	 *   - format is 'dpaa2_sg_single'
953 	 */
954 	for_each_sg(scl, crt_scl, num_dma_bufs, i) {
955 		dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
956 		dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
957 	}
958 	dpaa2_sg_set_final(&sgt[i - 1], true);
959 
960 	/* Store the skb backpointer in the SGT buffer.
961 	 * Fit the scatterlist and the number of buffers alongside the
962 	 * skb backpointer in the software annotation area. We'll need
963 	 * all of them on Tx Conf.
964 	 */
965 	*swa_addr = (void *)sgt_buf;
966 	swa = (struct dpaa2_eth_swa *)sgt_buf;
967 	swa->type = DPAA2_ETH_SWA_SG;
968 	swa->sg.skb = skb;
969 	swa->sg.scl = scl;
970 	swa->sg.num_sg = num_sg;
971 	swa->sg.sgt_size = sgt_buf_size;
972 
973 	/* Separately map the SGT buffer */
974 	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
975 	if (unlikely(dma_mapping_error(dev, addr))) {
976 		err = -ENOMEM;
977 		goto dma_map_single_failed;
978 	}
979 	memset(fd, 0, sizeof(struct dpaa2_fd));
980 	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
981 	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
982 	dpaa2_fd_set_addr(fd, addr);
983 	dpaa2_fd_set_len(fd, skb->len);
984 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
985 
986 	return 0;
987 
988 dma_map_single_failed:
989 	dpaa2_eth_sgt_recycle(priv, sgt_buf);
990 sgt_buf_alloc_failed:
991 	dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
992 dma_map_sg_failed:
993 	kfree(scl);
994 	return err;
995 }
996 
997 /* Create a SG frame descriptor based on a linear skb.
998  *
999  * This function is used on the Tx path when the skb headroom is not large
1000  * enough for the HW requirements, thus instead of realloc-ing the skb we
1001  * create a SG frame descriptor with only one entry.
1002  */
1003 static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
1004 					    struct sk_buff *skb,
1005 					    struct dpaa2_fd *fd,
1006 					    void **swa_addr)
1007 {
1008 	struct device *dev = priv->net_dev->dev.parent;
1009 	struct dpaa2_sg_entry *sgt;
1010 	struct dpaa2_eth_swa *swa;
1011 	dma_addr_t addr, sgt_addr;
1012 	void *sgt_buf = NULL;
1013 	int sgt_buf_size;
1014 	int err;
1015 
1016 	/* Prepare the HW SGT structure */
1017 	sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
1018 	sgt_buf = dpaa2_eth_sgt_get(priv);
1019 	if (unlikely(!sgt_buf))
1020 		return -ENOMEM;
1021 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1022 
1023 	addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
1024 	if (unlikely(dma_mapping_error(dev, addr))) {
1025 		err = -ENOMEM;
1026 		goto data_map_failed;
1027 	}
1028 
1029 	/* Fill in the HW SGT structure */
1030 	dpaa2_sg_set_addr(sgt, addr);
1031 	dpaa2_sg_set_len(sgt, skb->len);
1032 	dpaa2_sg_set_final(sgt, true);
1033 
1034 	/* Store the skb backpointer in the SGT buffer */
1035 	*swa_addr = (void *)sgt_buf;
1036 	swa = (struct dpaa2_eth_swa *)sgt_buf;
1037 	swa->type = DPAA2_ETH_SWA_SINGLE;
1038 	swa->single.skb = skb;
1039 	swa->single.sgt_size = sgt_buf_size;
1040 
1041 	/* Separately map the SGT buffer */
1042 	sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1043 	if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1044 		err = -ENOMEM;
1045 		goto sgt_map_failed;
1046 	}
1047 
1048 	memset(fd, 0, sizeof(struct dpaa2_fd));
1049 	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1050 	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1051 	dpaa2_fd_set_addr(fd, sgt_addr);
1052 	dpaa2_fd_set_len(fd, skb->len);
1053 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1054 
1055 	return 0;
1056 
1057 sgt_map_failed:
1058 	dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
1059 data_map_failed:
1060 	dpaa2_eth_sgt_recycle(priv, sgt_buf);
1061 
1062 	return err;
1063 }
1064 
1065 /* Create a frame descriptor based on a linear skb */
1066 static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
1067 				     struct sk_buff *skb,
1068 				     struct dpaa2_fd *fd,
1069 				     void **swa_addr)
1070 {
1071 	struct device *dev = priv->net_dev->dev.parent;
1072 	u8 *buffer_start, *aligned_start;
1073 	struct dpaa2_eth_swa *swa;
1074 	dma_addr_t addr;
1075 
1076 	buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
1077 
1078 	/* If there's enough room to align the FD address, do it.
1079 	 * It will help hardware optimize accesses.
1080 	 */
1081 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
1082 				  DPAA2_ETH_TX_BUF_ALIGN);
1083 	if (aligned_start >= skb->head)
1084 		buffer_start = aligned_start;
1085 
1086 	/* Store a backpointer to the skb at the beginning of the buffer
1087 	 * (in the private data area) such that we can release it
1088 	 * on Tx confirm
1089 	 */
1090 	*swa_addr = (void *)buffer_start;
1091 	swa = (struct dpaa2_eth_swa *)buffer_start;
1092 	swa->type = DPAA2_ETH_SWA_SINGLE;
1093 	swa->single.skb = skb;
1094 
1095 	addr = dma_map_single(dev, buffer_start,
1096 			      skb_tail_pointer(skb) - buffer_start,
1097 			      DMA_BIDIRECTIONAL);
1098 	if (unlikely(dma_mapping_error(dev, addr)))
1099 		return -ENOMEM;
1100 
1101 	memset(fd, 0, sizeof(struct dpaa2_fd));
1102 	dpaa2_fd_set_addr(fd, addr);
1103 	dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
1104 	dpaa2_fd_set_len(fd, skb->len);
1105 	dpaa2_fd_set_format(fd, dpaa2_fd_single);
1106 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1107 
1108 	return 0;
1109 }
1110 
1111 /* FD freeing routine on the Tx path
1112  *
1113  * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
1114  * back-pointed to is also freed.
1115  * This can be called either from dpaa2_eth_tx_conf() or on the error path of
1116  * dpaa2_eth_tx().
1117  */
1118 void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
1119 			  struct dpaa2_eth_channel *ch,
1120 			  struct dpaa2_eth_fq *fq,
1121 			  const struct dpaa2_fd *fd, bool in_napi)
1122 {
1123 	struct device *dev = priv->net_dev->dev.parent;
1124 	dma_addr_t fd_addr, sg_addr;
1125 	struct sk_buff *skb = NULL;
1126 	unsigned char *buffer_start;
1127 	struct dpaa2_eth_swa *swa;
1128 	u8 fd_format = dpaa2_fd_get_format(fd);
1129 	u32 fd_len = dpaa2_fd_get_len(fd);
1130 	struct dpaa2_sg_entry *sgt;
1131 	int should_free_skb = 1;
1132 	void *tso_hdr;
1133 	int i;
1134 
1135 	fd_addr = dpaa2_fd_get_addr(fd);
1136 	buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
1137 	swa = (struct dpaa2_eth_swa *)buffer_start;
1138 
1139 	if (fd_format == dpaa2_fd_single) {
1140 		if (swa->type == DPAA2_ETH_SWA_SINGLE) {
1141 			skb = swa->single.skb;
1142 			/* Accessing the skb buffer is safe before dma unmap,
1143 			 * because we didn't map the actual skb shell.
1144 			 */
1145 			dma_unmap_single(dev, fd_addr,
1146 					 skb_tail_pointer(skb) - buffer_start,
1147 					 DMA_BIDIRECTIONAL);
1148 		} else {
1149 			WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
1150 			dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
1151 					 DMA_BIDIRECTIONAL);
1152 		}
1153 	} else if (fd_format == dpaa2_fd_sg) {
1154 		if (swa->type == DPAA2_ETH_SWA_SG) {
1155 			skb = swa->sg.skb;
1156 
1157 			/* Unmap the scatterlist */
1158 			dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1159 				     DMA_BIDIRECTIONAL);
1160 			kfree(swa->sg.scl);
1161 
1162 			/* Unmap the SGT buffer */
1163 			dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1164 					 DMA_BIDIRECTIONAL);
1165 		} else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
1166 			skb = swa->tso.skb;
1167 
1168 			sgt = (struct dpaa2_sg_entry *)(buffer_start +
1169 							priv->tx_data_offset);
1170 
1171 			/* Unmap the SGT buffer */
1172 			dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
1173 					 DMA_BIDIRECTIONAL);
1174 
1175 			/* Unmap and free the header */
1176 			tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
1177 			dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
1178 					 DMA_TO_DEVICE);
1179 			kfree(tso_hdr);
1180 
1181 			/* Unmap the other SG entries for the data */
1182 			for (i = 1; i < swa->tso.num_sg; i++)
1183 				dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1184 						 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1185 
1186 			if (!swa->tso.is_last_fd)
1187 				should_free_skb = 0;
1188 		} else if (swa->type == DPAA2_ETH_SWA_XSK) {
1189 			/* Unmap the SGT Buffer */
1190 			dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size,
1191 					 DMA_BIDIRECTIONAL);
1192 		} else {
1193 			skb = swa->single.skb;
1194 
1195 			/* Unmap the SGT Buffer */
1196 			dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1197 					 DMA_BIDIRECTIONAL);
1198 
1199 			sgt = (struct dpaa2_sg_entry *)(buffer_start +
1200 							priv->tx_data_offset);
1201 			sg_addr = dpaa2_sg_get_addr(sgt);
1202 			dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1203 		}
1204 	} else {
1205 		netdev_dbg(priv->net_dev, "Invalid FD format\n");
1206 		return;
1207 	}
1208 
1209 	if (swa->type == DPAA2_ETH_SWA_XSK) {
1210 		ch->xsk_tx_pkts_sent++;
1211 		dpaa2_eth_sgt_recycle(priv, buffer_start);
1212 		return;
1213 	}
1214 
1215 	if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
1216 		fq->dq_frames++;
1217 		fq->dq_bytes += fd_len;
1218 	}
1219 
1220 	if (swa->type == DPAA2_ETH_SWA_XDP) {
1221 		xdp_return_frame(swa->xdp.xdpf);
1222 		return;
1223 	}
1224 
1225 	/* Get the timestamp value */
1226 	if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
1227 		if (skb->cb[0] == TX_TSTAMP) {
1228 			struct skb_shared_hwtstamps shhwtstamps;
1229 			__le64 *ts = dpaa2_get_ts(buffer_start, true);
1230 			u64 ns;
1231 
1232 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1233 
1234 			ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
1235 			shhwtstamps.hwtstamp = ns_to_ktime(ns);
1236 			skb_tstamp_tx(skb, &shhwtstamps);
1237 		} else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1238 			mutex_unlock(&priv->onestep_tstamp_lock);
1239 		}
1240 	}
1241 
1242 	/* Free SGT buffer allocated on tx */
1243 	if (fd_format != dpaa2_fd_single)
1244 		dpaa2_eth_sgt_recycle(priv, buffer_start);
1245 
1246 	/* Move on with skb release. If we are just confirming multiple FDs
1247 	 * from the same TSO skb then only the last one will need to free the
1248 	 * skb.
1249 	 */
1250 	if (should_free_skb)
1251 		napi_consume_skb(skb, in_napi);
1252 }
1253 
1254 static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
1255 				  struct sk_buff *skb, struct dpaa2_fd *fd,
1256 				  int *num_fds, u32 *total_fds_len)
1257 {
1258 	struct device *dev = priv->net_dev->dev.parent;
1259 	int hdr_len, total_len, data_left, fd_len;
1260 	int num_sge, err, i, sgt_buf_size;
1261 	struct dpaa2_fd *fd_start = fd;
1262 	struct dpaa2_sg_entry *sgt;
1263 	struct dpaa2_eth_swa *swa;
1264 	dma_addr_t sgt_addr, addr;
1265 	dma_addr_t tso_hdr_dma;
1266 	unsigned int index = 0;
1267 	struct tso_t tso;
1268 	char *tso_hdr;
1269 	void *sgt_buf;
1270 
1271 	/* Initialize the TSO handler, and prepare the first payload */
1272 	hdr_len = tso_start(skb, &tso);
1273 	*total_fds_len = 0;
1274 
1275 	total_len = skb->len - hdr_len;
1276 	while (total_len > 0) {
1277 		/* Prepare the HW SGT structure for this frame */
1278 		sgt_buf = dpaa2_eth_sgt_get(priv);
1279 		if (unlikely(!sgt_buf)) {
1280 			netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
1281 			err = -ENOMEM;
1282 			goto err_sgt_get;
1283 		}
1284 		sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1285 
1286 		/* Determine the data length of this frame */
1287 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1288 		total_len -= data_left;
1289 		fd_len = data_left + hdr_len;
1290 
1291 		/* Prepare packet headers: MAC + IP + TCP */
1292 		tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
1293 		if (!tso_hdr) {
1294 			err =  -ENOMEM;
1295 			goto err_alloc_tso_hdr;
1296 		}
1297 
1298 		tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
1299 		tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1300 		if (dma_mapping_error(dev, tso_hdr_dma)) {
1301 			netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
1302 			err = -ENOMEM;
1303 			goto err_map_tso_hdr;
1304 		}
1305 
1306 		/* Setup the SG entry for the header */
1307 		dpaa2_sg_set_addr(sgt, tso_hdr_dma);
1308 		dpaa2_sg_set_len(sgt, hdr_len);
1309 		dpaa2_sg_set_final(sgt, data_left <= 0);
1310 
1311 		/* Compose the SG entries for each fragment of data */
1312 		num_sge = 1;
1313 		while (data_left > 0) {
1314 			int size;
1315 
1316 			/* Move to the next SG entry */
1317 			sgt++;
1318 			size = min_t(int, tso.size, data_left);
1319 
1320 			addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
1321 			if (dma_mapping_error(dev, addr)) {
1322 				netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
1323 				err = -ENOMEM;
1324 				goto err_map_data;
1325 			}
1326 			dpaa2_sg_set_addr(sgt, addr);
1327 			dpaa2_sg_set_len(sgt, size);
1328 			dpaa2_sg_set_final(sgt, size == data_left);
1329 
1330 			num_sge++;
1331 
1332 			/* Build the data for the __next__ fragment */
1333 			data_left -= size;
1334 			tso_build_data(skb, &tso, size);
1335 		}
1336 
1337 		/* Store the skb backpointer in the SGT buffer */
1338 		sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
1339 		swa = (struct dpaa2_eth_swa *)sgt_buf;
1340 		swa->type = DPAA2_ETH_SWA_SW_TSO;
1341 		swa->tso.skb = skb;
1342 		swa->tso.num_sg = num_sge;
1343 		swa->tso.sgt_size = sgt_buf_size;
1344 		swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
1345 
1346 		/* Separately map the SGT buffer */
1347 		sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1348 		if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1349 			netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
1350 			err = -ENOMEM;
1351 			goto err_map_sgt;
1352 		}
1353 
1354 		/* Setup the frame descriptor */
1355 		memset(fd, 0, sizeof(struct dpaa2_fd));
1356 		dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1357 		dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1358 		dpaa2_fd_set_addr(fd, sgt_addr);
1359 		dpaa2_fd_set_len(fd, fd_len);
1360 		dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1361 
1362 		*total_fds_len += fd_len;
1363 		/* Advance to the next frame descriptor */
1364 		fd++;
1365 		index++;
1366 	}
1367 
1368 	*num_fds = index;
1369 
1370 	return 0;
1371 
1372 err_map_sgt:
1373 err_map_data:
1374 	/* Unmap all the data S/G entries for the current FD */
1375 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1376 	for (i = 1; i < num_sge; i++)
1377 		dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1378 				 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1379 
1380 	/* Unmap the header entry */
1381 	dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1382 err_map_tso_hdr:
1383 	kfree(tso_hdr);
1384 err_alloc_tso_hdr:
1385 	dpaa2_eth_sgt_recycle(priv, sgt_buf);
1386 err_sgt_get:
1387 	/* Free all the other FDs that were already fully created */
1388 	for (i = 0; i < index; i++)
1389 		dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false);
1390 
1391 	return err;
1392 }
1393 
1394 static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
1395 				  struct net_device *net_dev)
1396 {
1397 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1398 	int total_enqueued = 0, retries = 0, enqueued;
1399 	struct dpaa2_eth_drv_stats *percpu_extras;
1400 	struct rtnl_link_stats64 *percpu_stats;
1401 	unsigned int needed_headroom;
1402 	int num_fds = 1, max_retries;
1403 	struct dpaa2_eth_fq *fq;
1404 	struct netdev_queue *nq;
1405 	struct dpaa2_fd *fd;
1406 	u16 queue_mapping;
1407 	void *swa = NULL;
1408 	u8 prio = 0;
1409 	int err, i;
1410 	u32 fd_len;
1411 
1412 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
1413 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
1414 	fd = (this_cpu_ptr(priv->fd))->array;
1415 
1416 	needed_headroom = dpaa2_eth_needed_headroom(skb);
1417 
1418 	/* We'll be holding a back-reference to the skb until Tx Confirmation;
1419 	 * we don't want that overwritten by a concurrent Tx with a cloned skb.
1420 	 */
1421 	skb = skb_unshare(skb, GFP_ATOMIC);
1422 	if (unlikely(!skb)) {
1423 		/* skb_unshare() has already freed the skb */
1424 		percpu_stats->tx_dropped++;
1425 		return NETDEV_TX_OK;
1426 	}
1427 
1428 	/* Setup the FD fields */
1429 
1430 	if (skb_is_gso(skb)) {
1431 		err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
1432 		percpu_extras->tx_sg_frames += num_fds;
1433 		percpu_extras->tx_sg_bytes += fd_len;
1434 		percpu_extras->tx_tso_frames += num_fds;
1435 		percpu_extras->tx_tso_bytes += fd_len;
1436 	} else if (skb_is_nonlinear(skb)) {
1437 		err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
1438 		percpu_extras->tx_sg_frames++;
1439 		percpu_extras->tx_sg_bytes += skb->len;
1440 		fd_len = dpaa2_fd_get_len(fd);
1441 	} else if (skb_headroom(skb) < needed_headroom) {
1442 		err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
1443 		percpu_extras->tx_sg_frames++;
1444 		percpu_extras->tx_sg_bytes += skb->len;
1445 		percpu_extras->tx_converted_sg_frames++;
1446 		percpu_extras->tx_converted_sg_bytes += skb->len;
1447 		fd_len = dpaa2_fd_get_len(fd);
1448 	} else {
1449 		err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
1450 		fd_len = dpaa2_fd_get_len(fd);
1451 	}
1452 
1453 	if (unlikely(err)) {
1454 		percpu_stats->tx_dropped++;
1455 		goto err_build_fd;
1456 	}
1457 
1458 	if (swa && skb->cb[0])
1459 		dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
1460 
1461 	/* Tracing point */
1462 	for (i = 0; i < num_fds; i++)
1463 		trace_dpaa2_tx_fd(net_dev, &fd[i]);
1464 
1465 	/* TxConf FQ selection relies on queue id from the stack.
1466 	 * In case of a forwarded frame from another DPNI interface, we choose
1467 	 * a queue affined to the same core that processed the Rx frame
1468 	 */
1469 	queue_mapping = skb_get_queue_mapping(skb);
1470 
1471 	if (net_dev->num_tc) {
1472 		prio = netdev_txq_to_tc(net_dev, queue_mapping);
1473 		/* Hardware interprets priority level 0 as being the highest,
1474 		 * so we need to do a reverse mapping to the netdev tc index
1475 		 */
1476 		prio = net_dev->num_tc - prio - 1;
1477 		/* We have only one FQ array entry for all Tx hardware queues
1478 		 * with the same flow id (but different priority levels)
1479 		 */
1480 		queue_mapping %= dpaa2_eth_queue_count(priv);
1481 	}
1482 	fq = &priv->fq[queue_mapping];
1483 	nq = netdev_get_tx_queue(net_dev, queue_mapping);
1484 	netdev_tx_sent_queue(nq, fd_len);
1485 
1486 	/* Everything that happens after this enqueues might race with
1487 	 * the Tx confirmation callback for this frame
1488 	 */
1489 	max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
1490 	while (total_enqueued < num_fds && retries < max_retries) {
1491 		err = priv->enqueue(priv, fq, &fd[total_enqueued],
1492 				    prio, num_fds - total_enqueued, &enqueued);
1493 		if (err == -EBUSY) {
1494 			retries++;
1495 			continue;
1496 		}
1497 
1498 		total_enqueued += enqueued;
1499 	}
1500 	percpu_extras->tx_portal_busy += retries;
1501 
1502 	if (unlikely(err < 0)) {
1503 		percpu_stats->tx_errors++;
1504 		/* Clean up everything, including freeing the skb */
1505 		dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false);
1506 		netdev_tx_completed_queue(nq, 1, fd_len);
1507 	} else {
1508 		percpu_stats->tx_packets += total_enqueued;
1509 		percpu_stats->tx_bytes += fd_len;
1510 	}
1511 
1512 	return NETDEV_TX_OK;
1513 
1514 err_build_fd:
1515 	dev_kfree_skb(skb);
1516 
1517 	return NETDEV_TX_OK;
1518 }
1519 
1520 static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
1521 {
1522 	struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
1523 						   tx_onestep_tstamp);
1524 	struct sk_buff *skb;
1525 
1526 	while (true) {
1527 		skb = skb_dequeue(&priv->tx_skbs);
1528 		if (!skb)
1529 			return;
1530 
1531 		/* Lock just before TX one-step timestamping packet,
1532 		 * and release the lock in dpaa2_eth_free_tx_fd when
1533 		 * confirm the packet has been sent on hardware, or
1534 		 * when clean up during transmit failure.
1535 		 */
1536 		mutex_lock(&priv->onestep_tstamp_lock);
1537 		__dpaa2_eth_tx(skb, priv->net_dev);
1538 	}
1539 }
1540 
1541 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1542 {
1543 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1544 	u8 msgtype, twostep, udp;
1545 	u16 offset1, offset2;
1546 
1547 	/* Utilize skb->cb[0] for timestamping request per skb */
1548 	skb->cb[0] = 0;
1549 
1550 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
1551 		if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
1552 			skb->cb[0] = TX_TSTAMP;
1553 		else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
1554 			skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
1555 	}
1556 
1557 	/* TX for one-step timestamping PTP Sync packet */
1558 	if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1559 		if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
1560 					 &offset1, &offset2))
1561 			if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
1562 				skb_queue_tail(&priv->tx_skbs, skb);
1563 				queue_work(priv->dpaa2_ptp_wq,
1564 					   &priv->tx_onestep_tstamp);
1565 				return NETDEV_TX_OK;
1566 			}
1567 		/* Use two-step timestamping if not one-step timestamping
1568 		 * PTP Sync packet
1569 		 */
1570 		skb->cb[0] = TX_TSTAMP;
1571 	}
1572 
1573 	/* TX for other packets */
1574 	return __dpaa2_eth_tx(skb, net_dev);
1575 }
1576 
1577 /* Tx confirmation frame processing routine */
1578 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
1579 			      struct dpaa2_eth_channel *ch,
1580 			      const struct dpaa2_fd *fd,
1581 			      struct dpaa2_eth_fq *fq)
1582 {
1583 	struct rtnl_link_stats64 *percpu_stats;
1584 	struct dpaa2_eth_drv_stats *percpu_extras;
1585 	u32 fd_len = dpaa2_fd_get_len(fd);
1586 	u32 fd_errors;
1587 
1588 	/* Tracing point */
1589 	trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1590 
1591 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
1592 	percpu_extras->tx_conf_frames++;
1593 	percpu_extras->tx_conf_bytes += fd_len;
1594 	ch->stats.bytes_per_cdan += fd_len;
1595 
1596 	/* Check frame errors in the FD field */
1597 	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
1598 	dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true);
1599 
1600 	if (likely(!fd_errors))
1601 		return;
1602 
1603 	if (net_ratelimit())
1604 		netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1605 			   fd_errors);
1606 
1607 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
1608 	/* Tx-conf logically pertains to the egress path. */
1609 	percpu_stats->tx_errors++;
1610 }
1611 
1612 static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
1613 					   bool enable)
1614 {
1615 	int err;
1616 
1617 	err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
1618 
1619 	if (err) {
1620 		netdev_err(priv->net_dev,
1621 			   "dpni_enable_vlan_filter failed\n");
1622 		return err;
1623 	}
1624 
1625 	return 0;
1626 }
1627 
1628 static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
1629 {
1630 	int err;
1631 
1632 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1633 			       DPNI_OFF_RX_L3_CSUM, enable);
1634 	if (err) {
1635 		netdev_err(priv->net_dev,
1636 			   "dpni_set_offload(RX_L3_CSUM) failed\n");
1637 		return err;
1638 	}
1639 
1640 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1641 			       DPNI_OFF_RX_L4_CSUM, enable);
1642 	if (err) {
1643 		netdev_err(priv->net_dev,
1644 			   "dpni_set_offload(RX_L4_CSUM) failed\n");
1645 		return err;
1646 	}
1647 
1648 	return 0;
1649 }
1650 
1651 static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
1652 {
1653 	int err;
1654 
1655 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1656 			       DPNI_OFF_TX_L3_CSUM, enable);
1657 	if (err) {
1658 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1659 		return err;
1660 	}
1661 
1662 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1663 			       DPNI_OFF_TX_L4_CSUM, enable);
1664 	if (err) {
1665 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1666 		return err;
1667 	}
1668 
1669 	return 0;
1670 }
1671 
1672 /* Perform a single release command to add buffers
1673  * to the specified buffer pool
1674  */
1675 static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
1676 			      struct dpaa2_eth_channel *ch)
1677 {
1678 	struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD];
1679 	struct device *dev = priv->net_dev->dev.parent;
1680 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1681 	struct dpaa2_eth_swa *swa;
1682 	struct page *page;
1683 	dma_addr_t addr;
1684 	int retries = 0;
1685 	int i = 0, err;
1686 	u32 batch;
1687 
1688 	/* Allocate buffers visible to WRIOP */
1689 	if (!ch->xsk_zc) {
1690 		for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1691 			/* Also allocate skb shared info and alignment padding.
1692 			 * There is one page for each Rx buffer. WRIOP sees
1693 			 * the entire page except for a tailroom reserved for
1694 			 * skb shared info
1695 			 */
1696 			page = dev_alloc_pages(0);
1697 			if (!page)
1698 				goto err_alloc;
1699 
1700 			addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1701 					    DMA_BIDIRECTIONAL);
1702 			if (unlikely(dma_mapping_error(dev, addr)))
1703 				goto err_map;
1704 
1705 			buf_array[i] = addr;
1706 
1707 			/* tracing point */
1708 			trace_dpaa2_eth_buf_seed(priv->net_dev,
1709 						 page_address(page),
1710 						 DPAA2_ETH_RX_BUF_RAW_SIZE,
1711 						 addr, priv->rx_buf_size,
1712 						 ch->bp->bpid);
1713 		}
1714 	} else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) {
1715 		/* Allocate XSK buffers for AF_XDP fast path in batches
1716 		 * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot
1717 		 * provide enough buffers at the moment
1718 		 */
1719 		batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs,
1720 					     DPAA2_ETH_BUFS_PER_CMD);
1721 		if (!batch)
1722 			goto err_alloc;
1723 
1724 		for (i = 0; i < batch; i++) {
1725 			swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start +
1726 						       DPAA2_ETH_RX_HWA_SIZE);
1727 			swa->xsk.xdp_buff = xdp_buffs[i];
1728 
1729 			addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]);
1730 			if (unlikely(dma_mapping_error(dev, addr)))
1731 				goto err_map;
1732 
1733 			buf_array[i] = addr;
1734 		}
1735 	}
1736 
1737 release_bufs:
1738 	/* In case the portal is busy, retry until successful */
1739 	while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
1740 					       buf_array, i)) == -EBUSY) {
1741 		if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1742 			break;
1743 		cpu_relax();
1744 	}
1745 
1746 	/* If release command failed, clean up and bail out;
1747 	 * not much else we can do about it
1748 	 */
1749 	if (err) {
1750 		dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc);
1751 		return 0;
1752 	}
1753 
1754 	return i;
1755 
1756 err_map:
1757 	if (!ch->xsk_zc) {
1758 		__free_pages(page, 0);
1759 	} else {
1760 		for (; i < batch; i++)
1761 			xsk_buff_free(xdp_buffs[i]);
1762 	}
1763 err_alloc:
1764 	/* If we managed to allocate at least some buffers,
1765 	 * release them to hardware
1766 	 */
1767 	if (i)
1768 		goto release_bufs;
1769 
1770 	return 0;
1771 }
1772 
1773 static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
1774 			       struct dpaa2_eth_channel *ch)
1775 {
1776 	int i;
1777 	int new_count;
1778 
1779 	for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
1780 		new_count = dpaa2_eth_add_bufs(priv, ch);
1781 		ch->buf_count += new_count;
1782 
1783 		if (new_count < DPAA2_ETH_BUFS_PER_CMD)
1784 			return -ENOMEM;
1785 	}
1786 
1787 	return 0;
1788 }
1789 
1790 static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
1791 {
1792 	struct net_device *net_dev = priv->net_dev;
1793 	struct dpaa2_eth_channel *channel;
1794 	int i, err = 0;
1795 
1796 	for (i = 0; i < priv->num_channels; i++) {
1797 		channel = priv->channel[i];
1798 
1799 		err = dpaa2_eth_seed_pool(priv, channel);
1800 
1801 		/* Not much to do; the buffer pool, though not filled up,
1802 		 * may still contain some buffers which would enable us
1803 		 * to limp on.
1804 		 */
1805 		if (err)
1806 			netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1807 				   channel->bp->dev->obj_desc.id,
1808 				   channel->bp->bpid);
1809 	}
1810 }
1811 
1812 /*
1813  * Drain the specified number of buffers from one of the DPNI's private buffer
1814  * pools.
1815  * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1816  */
1817 static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
1818 				 int count)
1819 {
1820 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1821 	bool xsk_zc = false;
1822 	int retries = 0;
1823 	int i, ret;
1824 
1825 	for (i = 0; i < priv->num_channels; i++)
1826 		if (priv->channel[i]->bp->bpid == bpid)
1827 			xsk_zc = priv->channel[i]->xsk_zc;
1828 
1829 	do {
1830 		ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count);
1831 		if (ret < 0) {
1832 			if (ret == -EBUSY &&
1833 			    retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
1834 				continue;
1835 			netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1836 			return;
1837 		}
1838 		dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc);
1839 		retries = 0;
1840 	} while (ret);
1841 }
1842 
1843 static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
1844 {
1845 	int i;
1846 
1847 	/* Drain the buffer pool */
1848 	dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
1849 	dpaa2_eth_drain_bufs(priv, bpid, 1);
1850 
1851 	/* Setup to zero the buffer count of all channels which were
1852 	 * using this buffer pool.
1853 	 */
1854 	for (i = 0; i < priv->num_channels; i++)
1855 		if (priv->channel[i]->bp->bpid == bpid)
1856 			priv->channel[i]->buf_count = 0;
1857 }
1858 
1859 static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
1860 {
1861 	int i;
1862 
1863 	for (i = 0; i < priv->num_bps; i++)
1864 		dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
1865 }
1866 
1867 /* Function is called from softirq context only, so we don't need to guard
1868  * the access to percpu count
1869  */
1870 static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
1871 				 struct dpaa2_eth_channel *ch)
1872 {
1873 	int new_count;
1874 
1875 	if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1876 		return 0;
1877 
1878 	do {
1879 		new_count = dpaa2_eth_add_bufs(priv, ch);
1880 		if (unlikely(!new_count)) {
1881 			/* Out of memory; abort for now, we'll try later on */
1882 			break;
1883 		}
1884 		ch->buf_count += new_count;
1885 	} while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1886 
1887 	if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1888 		return -ENOMEM;
1889 
1890 	return 0;
1891 }
1892 
1893 static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1894 {
1895 	struct dpaa2_eth_sgt_cache *sgt_cache;
1896 	u16 count;
1897 	int k, i;
1898 
1899 	for_each_possible_cpu(k) {
1900 		sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1901 		count = sgt_cache->count;
1902 
1903 		for (i = 0; i < count; i++)
1904 			skb_free_frag(sgt_cache->buf[i]);
1905 		sgt_cache->count = 0;
1906 	}
1907 }
1908 
1909 static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
1910 {
1911 	int err;
1912 	int dequeues = -1;
1913 
1914 	/* Retry while portal is busy */
1915 	do {
1916 		err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1917 						    ch->store);
1918 		dequeues++;
1919 		cpu_relax();
1920 	} while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1921 
1922 	ch->stats.dequeue_portal_busy += dequeues;
1923 	if (unlikely(err))
1924 		ch->stats.pull_err++;
1925 
1926 	return err;
1927 }
1928 
1929 /* NAPI poll routine
1930  *
1931  * Frames are dequeued from the QMan channel associated with this NAPI context.
1932  * Rx, Tx confirmation and (if configured) Rx error frames all count
1933  * towards the NAPI budget.
1934  */
1935 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1936 {
1937 	struct dpaa2_eth_channel *ch;
1938 	struct dpaa2_eth_priv *priv;
1939 	int rx_cleaned = 0, txconf_cleaned = 0;
1940 	struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1941 	struct netdev_queue *nq;
1942 	int store_cleaned, work_done;
1943 	bool work_done_zc = false;
1944 	struct list_head rx_list;
1945 	int retries = 0;
1946 	u16 flowid;
1947 	int err;
1948 
1949 	ch = container_of(napi, struct dpaa2_eth_channel, napi);
1950 	ch->xdp.res = 0;
1951 	priv = ch->priv;
1952 
1953 	INIT_LIST_HEAD(&rx_list);
1954 	ch->rx_list = &rx_list;
1955 
1956 	if (ch->xsk_zc) {
1957 		work_done_zc = dpaa2_xsk_tx(priv, ch);
1958 		/* If we reached the XSK Tx per NAPI threshold, we're done */
1959 		if (work_done_zc) {
1960 			work_done = budget;
1961 			goto out;
1962 		}
1963 	}
1964 
1965 	do {
1966 		err = dpaa2_eth_pull_channel(ch);
1967 		if (unlikely(err))
1968 			break;
1969 
1970 		/* Refill pool if appropriate */
1971 		dpaa2_eth_refill_pool(priv, ch);
1972 
1973 		store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
1974 		if (store_cleaned <= 0)
1975 			break;
1976 		if (fq->type == DPAA2_RX_FQ) {
1977 			rx_cleaned += store_cleaned;
1978 			flowid = fq->flowid;
1979 		} else {
1980 			txconf_cleaned += store_cleaned;
1981 			/* We have a single Tx conf FQ on this channel */
1982 			txc_fq = fq;
1983 		}
1984 
1985 		/* If we either consumed the whole NAPI budget with Rx frames
1986 		 * or we reached the Tx confirmations threshold, we're done.
1987 		 */
1988 		if (rx_cleaned >= budget ||
1989 		    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1990 			work_done = budget;
1991 			goto out;
1992 		}
1993 	} while (store_cleaned);
1994 
1995 	/* Update NET DIM with the values for this CDAN */
1996 	dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
1997 				ch->stats.bytes_per_cdan);
1998 	ch->stats.frames_per_cdan = 0;
1999 	ch->stats.bytes_per_cdan = 0;
2000 
2001 	/* We didn't consume the entire budget, so finish napi and
2002 	 * re-enable data availability notifications
2003 	 */
2004 	napi_complete_done(napi, rx_cleaned);
2005 	do {
2006 		err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
2007 		cpu_relax();
2008 	} while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
2009 	WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
2010 		  ch->nctx.desired_cpu);
2011 
2012 	work_done = max(rx_cleaned, 1);
2013 
2014 out:
2015 	netif_receive_skb_list(ch->rx_list);
2016 
2017 	if (ch->xsk_tx_pkts_sent) {
2018 		xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
2019 		ch->xsk_tx_pkts_sent = 0;
2020 	}
2021 
2022 	if (txc_fq && txc_fq->dq_frames) {
2023 		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
2024 		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
2025 					  txc_fq->dq_bytes);
2026 		txc_fq->dq_frames = 0;
2027 		txc_fq->dq_bytes = 0;
2028 	}
2029 
2030 	if (ch->xdp.res & XDP_REDIRECT)
2031 		xdp_do_flush_map();
2032 	else if (rx_cleaned && ch->xdp.res & XDP_TX)
2033 		dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
2034 
2035 	return work_done;
2036 }
2037 
2038 static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
2039 {
2040 	struct dpaa2_eth_channel *ch;
2041 	int i;
2042 
2043 	for (i = 0; i < priv->num_channels; i++) {
2044 		ch = priv->channel[i];
2045 		napi_enable(&ch->napi);
2046 	}
2047 }
2048 
2049 static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
2050 {
2051 	struct dpaa2_eth_channel *ch;
2052 	int i;
2053 
2054 	for (i = 0; i < priv->num_channels; i++) {
2055 		ch = priv->channel[i];
2056 		napi_disable(&ch->napi);
2057 	}
2058 }
2059 
2060 void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
2061 			       bool tx_pause, bool pfc)
2062 {
2063 	struct dpni_taildrop td = {0};
2064 	struct dpaa2_eth_fq *fq;
2065 	int i, err;
2066 
2067 	/* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
2068 	 * flow control is disabled (as it might interfere with either the
2069 	 * buffer pool depletion trigger for pause frames or with the group
2070 	 * congestion trigger for PFC frames)
2071 	 */
2072 	td.enable = !tx_pause;
2073 	if (priv->rx_fqtd_enabled == td.enable)
2074 		goto set_cgtd;
2075 
2076 	td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
2077 	td.units = DPNI_CONGESTION_UNIT_BYTES;
2078 
2079 	for (i = 0; i < priv->num_fqs; i++) {
2080 		fq = &priv->fq[i];
2081 		if (fq->type != DPAA2_RX_FQ)
2082 			continue;
2083 		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
2084 					DPNI_CP_QUEUE, DPNI_QUEUE_RX,
2085 					fq->tc, fq->flowid, &td);
2086 		if (err) {
2087 			netdev_err(priv->net_dev,
2088 				   "dpni_set_taildrop(FQ) failed\n");
2089 			return;
2090 		}
2091 	}
2092 
2093 	priv->rx_fqtd_enabled = td.enable;
2094 
2095 set_cgtd:
2096 	/* Congestion group taildrop: threshold is in frames, per group
2097 	 * of FQs belonging to the same traffic class
2098 	 * Enabled if general Tx pause disabled or if PFCs are enabled
2099 	 * (congestion group threhsold for PFC generation is lower than the
2100 	 * CG taildrop threshold, so it won't interfere with it; we also
2101 	 * want frames in non-PFC enabled traffic classes to be kept in check)
2102 	 */
2103 	td.enable = !tx_pause || pfc;
2104 	if (priv->rx_cgtd_enabled == td.enable)
2105 		return;
2106 
2107 	td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
2108 	td.units = DPNI_CONGESTION_UNIT_FRAMES;
2109 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
2110 		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
2111 					DPNI_CP_GROUP, DPNI_QUEUE_RX,
2112 					i, 0, &td);
2113 		if (err) {
2114 			netdev_err(priv->net_dev,
2115 				   "dpni_set_taildrop(CG) failed\n");
2116 			return;
2117 		}
2118 	}
2119 
2120 	priv->rx_cgtd_enabled = td.enable;
2121 }
2122 
2123 static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
2124 {
2125 	struct dpni_link_state state = {0};
2126 	bool tx_pause;
2127 	int err;
2128 
2129 	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
2130 	if (unlikely(err)) {
2131 		netdev_err(priv->net_dev,
2132 			   "dpni_get_link_state() failed\n");
2133 		return err;
2134 	}
2135 
2136 	/* If Tx pause frame settings have changed, we need to update
2137 	 * Rx FQ taildrop configuration as well. We configure taildrop
2138 	 * only when pause frame generation is disabled.
2139 	 */
2140 	tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
2141 	dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
2142 
2143 	/* When we manage the MAC/PHY using phylink there is no need
2144 	 * to manually update the netif_carrier.
2145 	 */
2146 	if (dpaa2_eth_is_type_phy(priv))
2147 		goto out;
2148 
2149 	/* Chech link state; speed / duplex changes are not treated yet */
2150 	if (priv->link_state.up == state.up)
2151 		goto out;
2152 
2153 	if (state.up) {
2154 		netif_carrier_on(priv->net_dev);
2155 		netif_tx_start_all_queues(priv->net_dev);
2156 	} else {
2157 		netif_tx_stop_all_queues(priv->net_dev);
2158 		netif_carrier_off(priv->net_dev);
2159 	}
2160 
2161 	netdev_info(priv->net_dev, "Link Event: state %s\n",
2162 		    state.up ? "up" : "down");
2163 
2164 out:
2165 	priv->link_state = state;
2166 
2167 	return 0;
2168 }
2169 
2170 static int dpaa2_eth_open(struct net_device *net_dev)
2171 {
2172 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2173 	int err;
2174 
2175 	dpaa2_eth_seed_pools(priv);
2176 
2177 	if (!dpaa2_eth_is_type_phy(priv)) {
2178 		/* We'll only start the txqs when the link is actually ready;
2179 		 * make sure we don't race against the link up notification,
2180 		 * which may come immediately after dpni_enable();
2181 		 */
2182 		netif_tx_stop_all_queues(net_dev);
2183 
2184 		/* Also, explicitly set carrier off, otherwise
2185 		 * netif_carrier_ok() will return true and cause 'ip link show'
2186 		 * to report the LOWER_UP flag, even though the link
2187 		 * notification wasn't even received.
2188 		 */
2189 		netif_carrier_off(net_dev);
2190 	}
2191 	dpaa2_eth_enable_ch_napi(priv);
2192 
2193 	err = dpni_enable(priv->mc_io, 0, priv->mc_token);
2194 	if (err < 0) {
2195 		netdev_err(net_dev, "dpni_enable() failed\n");
2196 		goto enable_err;
2197 	}
2198 
2199 	if (dpaa2_eth_is_type_phy(priv)) {
2200 		dpaa2_mac_start(priv->mac);
2201 		phylink_start(priv->mac->phylink);
2202 	}
2203 
2204 	return 0;
2205 
2206 enable_err:
2207 	dpaa2_eth_disable_ch_napi(priv);
2208 	dpaa2_eth_drain_pools(priv);
2209 	return err;
2210 }
2211 
2212 /* Total number of in-flight frames on ingress queues */
2213 static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
2214 {
2215 	struct dpaa2_eth_fq *fq;
2216 	u32 fcnt = 0, bcnt = 0, total = 0;
2217 	int i, err;
2218 
2219 	for (i = 0; i < priv->num_fqs; i++) {
2220 		fq = &priv->fq[i];
2221 		err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
2222 		if (err) {
2223 			netdev_warn(priv->net_dev, "query_fq_count failed");
2224 			break;
2225 		}
2226 		total += fcnt;
2227 	}
2228 
2229 	return total;
2230 }
2231 
2232 static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
2233 {
2234 	int retries = 10;
2235 	u32 pending;
2236 
2237 	do {
2238 		pending = dpaa2_eth_ingress_fq_count(priv);
2239 		if (pending)
2240 			msleep(100);
2241 	} while (pending && --retries);
2242 }
2243 
2244 #define DPNI_TX_PENDING_VER_MAJOR	7
2245 #define DPNI_TX_PENDING_VER_MINOR	13
2246 static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
2247 {
2248 	union dpni_statistics stats;
2249 	int retries = 10;
2250 	int err;
2251 
2252 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
2253 				   DPNI_TX_PENDING_VER_MINOR) < 0)
2254 		goto out;
2255 
2256 	do {
2257 		err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
2258 					  &stats);
2259 		if (err)
2260 			goto out;
2261 		if (stats.page_6.tx_pending_frames == 0)
2262 			return;
2263 	} while (--retries);
2264 
2265 out:
2266 	msleep(500);
2267 }
2268 
2269 static int dpaa2_eth_stop(struct net_device *net_dev)
2270 {
2271 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2272 	int dpni_enabled = 0;
2273 	int retries = 10;
2274 
2275 	if (dpaa2_eth_is_type_phy(priv)) {
2276 		phylink_stop(priv->mac->phylink);
2277 		dpaa2_mac_stop(priv->mac);
2278 	} else {
2279 		netif_tx_stop_all_queues(net_dev);
2280 		netif_carrier_off(net_dev);
2281 	}
2282 
2283 	/* On dpni_disable(), the MC firmware will:
2284 	 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
2285 	 * - cut off WRIOP dequeues from egress FQs and wait until transmission
2286 	 * of all in flight Tx frames is finished (and corresponding Tx conf
2287 	 * frames are enqueued back to software)
2288 	 *
2289 	 * Before calling dpni_disable(), we wait for all Tx frames to arrive
2290 	 * on WRIOP. After it finishes, wait until all remaining frames on Rx
2291 	 * and Tx conf queues are consumed on NAPI poll.
2292 	 */
2293 	dpaa2_eth_wait_for_egress_fq_empty(priv);
2294 
2295 	do {
2296 		dpni_disable(priv->mc_io, 0, priv->mc_token);
2297 		dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
2298 		if (dpni_enabled)
2299 			/* Allow the hardware some slack */
2300 			msleep(100);
2301 	} while (dpni_enabled && --retries);
2302 	if (!retries) {
2303 		netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
2304 		/* Must go on and disable NAPI nonetheless, so we don't crash at
2305 		 * the next "ifconfig up"
2306 		 */
2307 	}
2308 
2309 	dpaa2_eth_wait_for_ingress_fq_empty(priv);
2310 	dpaa2_eth_disable_ch_napi(priv);
2311 
2312 	/* Empty the buffer pool */
2313 	dpaa2_eth_drain_pools(priv);
2314 
2315 	/* Empty the Scatter-Gather Buffer cache */
2316 	dpaa2_eth_sgt_cache_drain(priv);
2317 
2318 	return 0;
2319 }
2320 
2321 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
2322 {
2323 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2324 	struct device *dev = net_dev->dev.parent;
2325 	int err;
2326 
2327 	err = eth_mac_addr(net_dev, addr);
2328 	if (err < 0) {
2329 		dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
2330 		return err;
2331 	}
2332 
2333 	err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2334 					net_dev->dev_addr);
2335 	if (err) {
2336 		dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
2337 		return err;
2338 	}
2339 
2340 	return 0;
2341 }
2342 
2343 /** Fill in counters maintained by the GPP driver. These may be different from
2344  * the hardware counters obtained by ethtool.
2345  */
2346 static void dpaa2_eth_get_stats(struct net_device *net_dev,
2347 				struct rtnl_link_stats64 *stats)
2348 {
2349 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2350 	struct rtnl_link_stats64 *percpu_stats;
2351 	u64 *cpustats;
2352 	u64 *netstats = (u64 *)stats;
2353 	int i, j;
2354 	int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
2355 
2356 	for_each_possible_cpu(i) {
2357 		percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
2358 		cpustats = (u64 *)percpu_stats;
2359 		for (j = 0; j < num; j++)
2360 			netstats[j] += cpustats[j];
2361 	}
2362 }
2363 
2364 /* Copy mac unicast addresses from @net_dev to @priv.
2365  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2366  */
2367 static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
2368 				     struct dpaa2_eth_priv *priv)
2369 {
2370 	struct netdev_hw_addr *ha;
2371 	int err;
2372 
2373 	netdev_for_each_uc_addr(ha, net_dev) {
2374 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2375 					ha->addr);
2376 		if (err)
2377 			netdev_warn(priv->net_dev,
2378 				    "Could not add ucast MAC %pM to the filtering table (err %d)\n",
2379 				    ha->addr, err);
2380 	}
2381 }
2382 
2383 /* Copy mac multicast addresses from @net_dev to @priv
2384  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2385  */
2386 static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
2387 				     struct dpaa2_eth_priv *priv)
2388 {
2389 	struct netdev_hw_addr *ha;
2390 	int err;
2391 
2392 	netdev_for_each_mc_addr(ha, net_dev) {
2393 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2394 					ha->addr);
2395 		if (err)
2396 			netdev_warn(priv->net_dev,
2397 				    "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2398 				    ha->addr, err);
2399 	}
2400 }
2401 
2402 static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
2403 				__be16 vlan_proto, u16 vid)
2404 {
2405 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2406 	int err;
2407 
2408 	err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
2409 			       vid, 0, 0, 0);
2410 
2411 	if (err) {
2412 		netdev_warn(priv->net_dev,
2413 			    "Could not add the vlan id %u\n",
2414 			    vid);
2415 		return err;
2416 	}
2417 
2418 	return 0;
2419 }
2420 
2421 static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
2422 				 __be16 vlan_proto, u16 vid)
2423 {
2424 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2425 	int err;
2426 
2427 	err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
2428 
2429 	if (err) {
2430 		netdev_warn(priv->net_dev,
2431 			    "Could not remove the vlan id %u\n",
2432 			    vid);
2433 		return err;
2434 	}
2435 
2436 	return 0;
2437 }
2438 
2439 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2440 {
2441 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2442 	int uc_count = netdev_uc_count(net_dev);
2443 	int mc_count = netdev_mc_count(net_dev);
2444 	u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2445 	u32 options = priv->dpni_attrs.options;
2446 	u16 mc_token = priv->mc_token;
2447 	struct fsl_mc_io *mc_io = priv->mc_io;
2448 	int err;
2449 
2450 	/* Basic sanity checks; these probably indicate a misconfiguration */
2451 	if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2452 		netdev_info(net_dev,
2453 			    "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2454 			    max_mac);
2455 
2456 	/* Force promiscuous if the uc or mc counts exceed our capabilities. */
2457 	if (uc_count > max_mac) {
2458 		netdev_info(net_dev,
2459 			    "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2460 			    uc_count, max_mac);
2461 		goto force_promisc;
2462 	}
2463 	if (mc_count + uc_count > max_mac) {
2464 		netdev_info(net_dev,
2465 			    "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2466 			    uc_count + mc_count, max_mac);
2467 		goto force_mc_promisc;
2468 	}
2469 
2470 	/* Adjust promisc settings due to flag combinations */
2471 	if (net_dev->flags & IFF_PROMISC)
2472 		goto force_promisc;
2473 	if (net_dev->flags & IFF_ALLMULTI) {
2474 		/* First, rebuild unicast filtering table. This should be done
2475 		 * in promisc mode, in order to avoid frame loss while we
2476 		 * progressively add entries to the table.
2477 		 * We don't know whether we had been in promisc already, and
2478 		 * making an MC call to find out is expensive; so set uc promisc
2479 		 * nonetheless.
2480 		 */
2481 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2482 		if (err)
2483 			netdev_warn(net_dev, "Can't set uc promisc\n");
2484 
2485 		/* Actual uc table reconstruction. */
2486 		err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2487 		if (err)
2488 			netdev_warn(net_dev, "Can't clear uc filters\n");
2489 		dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2490 
2491 		/* Finally, clear uc promisc and set mc promisc as requested. */
2492 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2493 		if (err)
2494 			netdev_warn(net_dev, "Can't clear uc promisc\n");
2495 		goto force_mc_promisc;
2496 	}
2497 
2498 	/* Neither unicast, nor multicast promisc will be on... eventually.
2499 	 * For now, rebuild mac filtering tables while forcing both of them on.
2500 	 */
2501 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2502 	if (err)
2503 		netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2504 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2505 	if (err)
2506 		netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2507 
2508 	/* Actual mac filtering tables reconstruction */
2509 	err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2510 	if (err)
2511 		netdev_warn(net_dev, "Can't clear mac filters\n");
2512 	dpaa2_eth_add_mc_hw_addr(net_dev, priv);
2513 	dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2514 
2515 	/* Now we can clear both ucast and mcast promisc, without risking
2516 	 * to drop legitimate frames anymore.
2517 	 */
2518 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2519 	if (err)
2520 		netdev_warn(net_dev, "Can't clear ucast promisc\n");
2521 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2522 	if (err)
2523 		netdev_warn(net_dev, "Can't clear mcast promisc\n");
2524 
2525 	return;
2526 
2527 force_promisc:
2528 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2529 	if (err)
2530 		netdev_warn(net_dev, "Can't set ucast promisc\n");
2531 force_mc_promisc:
2532 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2533 	if (err)
2534 		netdev_warn(net_dev, "Can't set mcast promisc\n");
2535 }
2536 
2537 static int dpaa2_eth_set_features(struct net_device *net_dev,
2538 				  netdev_features_t features)
2539 {
2540 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2541 	netdev_features_t changed = features ^ net_dev->features;
2542 	bool enable;
2543 	int err;
2544 
2545 	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
2546 		enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2547 		err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
2548 		if (err)
2549 			return err;
2550 	}
2551 
2552 	if (changed & NETIF_F_RXCSUM) {
2553 		enable = !!(features & NETIF_F_RXCSUM);
2554 		err = dpaa2_eth_set_rx_csum(priv, enable);
2555 		if (err)
2556 			return err;
2557 	}
2558 
2559 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2560 		enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2561 		err = dpaa2_eth_set_tx_csum(priv, enable);
2562 		if (err)
2563 			return err;
2564 	}
2565 
2566 	return 0;
2567 }
2568 
2569 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2570 {
2571 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
2572 	struct hwtstamp_config config;
2573 
2574 	if (!dpaa2_ptp)
2575 		return -EINVAL;
2576 
2577 	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2578 		return -EFAULT;
2579 
2580 	switch (config.tx_type) {
2581 	case HWTSTAMP_TX_OFF:
2582 	case HWTSTAMP_TX_ON:
2583 	case HWTSTAMP_TX_ONESTEP_SYNC:
2584 		priv->tx_tstamp_type = config.tx_type;
2585 		break;
2586 	default:
2587 		return -ERANGE;
2588 	}
2589 
2590 	if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2591 		priv->rx_tstamp = false;
2592 	} else {
2593 		priv->rx_tstamp = true;
2594 		/* TS is set for all frame types, not only those requested */
2595 		config.rx_filter = HWTSTAMP_FILTER_ALL;
2596 	}
2597 
2598 	if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
2599 		dpaa2_ptp_onestep_reg_update_method(priv);
2600 
2601 	return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2602 			-EFAULT : 0;
2603 }
2604 
2605 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2606 {
2607 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
2608 
2609 	if (cmd == SIOCSHWTSTAMP)
2610 		return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2611 
2612 	if (dpaa2_eth_is_type_phy(priv))
2613 		return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
2614 
2615 	return -EOPNOTSUPP;
2616 }
2617 
2618 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
2619 {
2620 	int mfl, linear_mfl;
2621 
2622 	mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2623 	linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
2624 		     dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
2625 
2626 	if (mfl > linear_mfl) {
2627 		netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
2628 			    linear_mfl - VLAN_ETH_HLEN);
2629 		return false;
2630 	}
2631 
2632 	return true;
2633 }
2634 
2635 static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
2636 {
2637 	int mfl, err;
2638 
2639 	/* We enforce a maximum Rx frame length based on MTU only if we have
2640 	 * an XDP program attached (in order to avoid Rx S/G frames).
2641 	 * Otherwise, we accept all incoming frames as long as they are not
2642 	 * larger than maximum size supported in hardware
2643 	 */
2644 	if (has_xdp)
2645 		mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2646 	else
2647 		mfl = DPAA2_ETH_MFL;
2648 
2649 	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
2650 	if (err) {
2651 		netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
2652 		return err;
2653 	}
2654 
2655 	return 0;
2656 }
2657 
2658 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2659 {
2660 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
2661 	int err;
2662 
2663 	if (!priv->xdp_prog)
2664 		goto out;
2665 
2666 	if (!xdp_mtu_valid(priv, new_mtu))
2667 		return -EINVAL;
2668 
2669 	err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
2670 	if (err)
2671 		return err;
2672 
2673 out:
2674 	dev->mtu = new_mtu;
2675 	return 0;
2676 }
2677 
2678 static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
2679 {
2680 	struct dpni_buffer_layout buf_layout = {0};
2681 	int err;
2682 
2683 	err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
2684 				     DPNI_QUEUE_RX, &buf_layout);
2685 	if (err) {
2686 		netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2687 		return err;
2688 	}
2689 
2690 	/* Reserve extra headroom for XDP header size changes */
2691 	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2692 				    (has_xdp ? XDP_PACKET_HEADROOM : 0);
2693 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2694 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2695 				     DPNI_QUEUE_RX, &buf_layout);
2696 	if (err) {
2697 		netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2698 		return err;
2699 	}
2700 
2701 	return 0;
2702 }
2703 
2704 static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
2705 {
2706 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
2707 	struct dpaa2_eth_channel *ch;
2708 	struct bpf_prog *old;
2709 	bool up, need_update;
2710 	int i, err;
2711 
2712 	if (prog && !xdp_mtu_valid(priv, dev->mtu))
2713 		return -EINVAL;
2714 
2715 	if (prog)
2716 		bpf_prog_add(prog, priv->num_channels);
2717 
2718 	up = netif_running(dev);
2719 	need_update = (!!priv->xdp_prog != !!prog);
2720 
2721 	if (up)
2722 		dev_close(dev);
2723 
2724 	/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2725 	 * Also, when switching between xdp/non-xdp modes we need to reconfigure
2726 	 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2727 	 * so we are sure no old format buffers will be used from now on.
2728 	 */
2729 	if (need_update) {
2730 		err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
2731 		if (err)
2732 			goto out_err;
2733 		err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
2734 		if (err)
2735 			goto out_err;
2736 	}
2737 
2738 	old = xchg(&priv->xdp_prog, prog);
2739 	if (old)
2740 		bpf_prog_put(old);
2741 
2742 	for (i = 0; i < priv->num_channels; i++) {
2743 		ch = priv->channel[i];
2744 		old = xchg(&ch->xdp.prog, prog);
2745 		if (old)
2746 			bpf_prog_put(old);
2747 	}
2748 
2749 	if (up) {
2750 		err = dev_open(dev, NULL);
2751 		if (err)
2752 			return err;
2753 	}
2754 
2755 	return 0;
2756 
2757 out_err:
2758 	if (prog)
2759 		bpf_prog_sub(prog, priv->num_channels);
2760 	if (up)
2761 		dev_open(dev, NULL);
2762 
2763 	return err;
2764 }
2765 
2766 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2767 {
2768 	switch (xdp->command) {
2769 	case XDP_SETUP_PROG:
2770 		return dpaa2_eth_setup_xdp(dev, xdp->prog);
2771 	case XDP_SETUP_XSK_POOL:
2772 		return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id);
2773 	default:
2774 		return -EINVAL;
2775 	}
2776 
2777 	return 0;
2778 }
2779 
2780 static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2781 				   struct xdp_frame *xdpf,
2782 				   struct dpaa2_fd *fd)
2783 {
2784 	struct device *dev = net_dev->dev.parent;
2785 	unsigned int needed_headroom;
2786 	struct dpaa2_eth_swa *swa;
2787 	void *buffer_start, *aligned_start;
2788 	dma_addr_t addr;
2789 
2790 	/* We require a minimum headroom to be able to transmit the frame.
2791 	 * Otherwise return an error and let the original net_device handle it
2792 	 */
2793 	needed_headroom = dpaa2_eth_needed_headroom(NULL);
2794 	if (xdpf->headroom < needed_headroom)
2795 		return -EINVAL;
2796 
2797 	/* Setup the FD fields */
2798 	memset(fd, 0, sizeof(*fd));
2799 
2800 	/* Align FD address, if possible */
2801 	buffer_start = xdpf->data - needed_headroom;
2802 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2803 				  DPAA2_ETH_TX_BUF_ALIGN);
2804 	if (aligned_start >= xdpf->data - xdpf->headroom)
2805 		buffer_start = aligned_start;
2806 
2807 	swa = (struct dpaa2_eth_swa *)buffer_start;
2808 	/* fill in necessary fields here */
2809 	swa->type = DPAA2_ETH_SWA_XDP;
2810 	swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2811 	swa->xdp.xdpf = xdpf;
2812 
2813 	addr = dma_map_single(dev, buffer_start,
2814 			      swa->xdp.dma_size,
2815 			      DMA_BIDIRECTIONAL);
2816 	if (unlikely(dma_mapping_error(dev, addr)))
2817 		return -ENOMEM;
2818 
2819 	dpaa2_fd_set_addr(fd, addr);
2820 	dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2821 	dpaa2_fd_set_len(fd, xdpf->len);
2822 	dpaa2_fd_set_format(fd, dpaa2_fd_single);
2823 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2824 
2825 	return 0;
2826 }
2827 
2828 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2829 			      struct xdp_frame **frames, u32 flags)
2830 {
2831 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2832 	struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
2833 	struct rtnl_link_stats64 *percpu_stats;
2834 	struct dpaa2_eth_fq *fq;
2835 	struct dpaa2_fd *fds;
2836 	int enqueued, i, err;
2837 
2838 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2839 		return -EINVAL;
2840 
2841 	if (!netif_running(net_dev))
2842 		return -ENETDOWN;
2843 
2844 	fq = &priv->fq[smp_processor_id()];
2845 	xdp_redirect_fds = &fq->xdp_redirect_fds;
2846 	fds = xdp_redirect_fds->fds;
2847 
2848 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
2849 
2850 	/* create a FD for each xdp_frame in the list received */
2851 	for (i = 0; i < n; i++) {
2852 		err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2853 		if (err)
2854 			break;
2855 	}
2856 	xdp_redirect_fds->num = i;
2857 
2858 	/* enqueue all the frame descriptors */
2859 	enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
2860 
2861 	/* update statistics */
2862 	percpu_stats->tx_packets += enqueued;
2863 	for (i = 0; i < enqueued; i++)
2864 		percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
2865 
2866 	return enqueued;
2867 }
2868 
2869 static int update_xps(struct dpaa2_eth_priv *priv)
2870 {
2871 	struct net_device *net_dev = priv->net_dev;
2872 	struct cpumask xps_mask;
2873 	struct dpaa2_eth_fq *fq;
2874 	int i, num_queues, netdev_queues;
2875 	int err = 0;
2876 
2877 	num_queues = dpaa2_eth_queue_count(priv);
2878 	netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
2879 
2880 	/* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2881 	 * queues, so only process those
2882 	 */
2883 	for (i = 0; i < netdev_queues; i++) {
2884 		fq = &priv->fq[i % num_queues];
2885 
2886 		cpumask_clear(&xps_mask);
2887 		cpumask_set_cpu(fq->target_cpu, &xps_mask);
2888 
2889 		err = netif_set_xps_queue(net_dev, &xps_mask, i);
2890 		if (err) {
2891 			netdev_warn_once(net_dev, "Error setting XPS queue\n");
2892 			break;
2893 		}
2894 	}
2895 
2896 	return err;
2897 }
2898 
2899 static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2900 				  struct tc_mqprio_qopt *mqprio)
2901 {
2902 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2903 	u8 num_tc, num_queues;
2904 	int i;
2905 
2906 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2907 	num_queues = dpaa2_eth_queue_count(priv);
2908 	num_tc = mqprio->num_tc;
2909 
2910 	if (num_tc == net_dev->num_tc)
2911 		return 0;
2912 
2913 	if (num_tc  > dpaa2_eth_tc_count(priv)) {
2914 		netdev_err(net_dev, "Max %d traffic classes supported\n",
2915 			   dpaa2_eth_tc_count(priv));
2916 		return -EOPNOTSUPP;
2917 	}
2918 
2919 	if (!num_tc) {
2920 		netdev_reset_tc(net_dev);
2921 		netif_set_real_num_tx_queues(net_dev, num_queues);
2922 		goto out;
2923 	}
2924 
2925 	netdev_set_num_tc(net_dev, num_tc);
2926 	netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2927 
2928 	for (i = 0; i < num_tc; i++)
2929 		netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2930 
2931 out:
2932 	update_xps(priv);
2933 
2934 	return 0;
2935 }
2936 
2937 #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2938 
2939 static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2940 {
2941 	struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2942 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2943 	struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2944 	struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2945 	int err;
2946 
2947 	if (p->command == TC_TBF_STATS)
2948 		return -EOPNOTSUPP;
2949 
2950 	/* Only per port Tx shaping */
2951 	if (p->parent != TC_H_ROOT)
2952 		return -EOPNOTSUPP;
2953 
2954 	if (p->command == TC_TBF_REPLACE) {
2955 		if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2956 			netdev_err(net_dev, "burst size cannot be greater than %d\n",
2957 				   DPAA2_ETH_MAX_BURST_SIZE);
2958 			return -EINVAL;
2959 		}
2960 
2961 		tx_cr_shaper.max_burst_size = cfg->max_size;
2962 		/* The TBF interface is in bytes/s, whereas DPAA2 expects the
2963 		 * rate in Mbits/s
2964 		 */
2965 		tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2966 	}
2967 
2968 	err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
2969 				  &tx_er_shaper, 0);
2970 	if (err) {
2971 		netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
2972 		return err;
2973 	}
2974 
2975 	return 0;
2976 }
2977 
2978 static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2979 			      enum tc_setup_type type, void *type_data)
2980 {
2981 	switch (type) {
2982 	case TC_SETUP_QDISC_MQPRIO:
2983 		return dpaa2_eth_setup_mqprio(net_dev, type_data);
2984 	case TC_SETUP_QDISC_TBF:
2985 		return dpaa2_eth_setup_tbf(net_dev, type_data);
2986 	default:
2987 		return -EOPNOTSUPP;
2988 	}
2989 }
2990 
2991 static const struct net_device_ops dpaa2_eth_ops = {
2992 	.ndo_open = dpaa2_eth_open,
2993 	.ndo_start_xmit = dpaa2_eth_tx,
2994 	.ndo_stop = dpaa2_eth_stop,
2995 	.ndo_set_mac_address = dpaa2_eth_set_addr,
2996 	.ndo_get_stats64 = dpaa2_eth_get_stats,
2997 	.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
2998 	.ndo_set_features = dpaa2_eth_set_features,
2999 	.ndo_eth_ioctl = dpaa2_eth_ioctl,
3000 	.ndo_change_mtu = dpaa2_eth_change_mtu,
3001 	.ndo_bpf = dpaa2_eth_xdp,
3002 	.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
3003 	.ndo_xsk_wakeup = dpaa2_xsk_wakeup,
3004 	.ndo_setup_tc = dpaa2_eth_setup_tc,
3005 	.ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
3006 	.ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
3007 };
3008 
3009 static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
3010 {
3011 	struct dpaa2_eth_channel *ch;
3012 
3013 	ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
3014 
3015 	/* Update NAPI statistics */
3016 	ch->stats.cdan++;
3017 
3018 	/* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed
3019 	 * so that it can be rescheduled again.
3020 	 */
3021 	if (!napi_if_scheduled_mark_missed(&ch->napi))
3022 		napi_schedule(&ch->napi);
3023 }
3024 
3025 /* Allocate and configure a DPCON object */
3026 static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
3027 {
3028 	struct fsl_mc_device *dpcon;
3029 	struct device *dev = priv->net_dev->dev.parent;
3030 	int err;
3031 
3032 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
3033 				     FSL_MC_POOL_DPCON, &dpcon);
3034 	if (err) {
3035 		if (err == -ENXIO)
3036 			err = -EPROBE_DEFER;
3037 		else
3038 			dev_info(dev, "Not enough DPCONs, will go on as-is\n");
3039 		return ERR_PTR(err);
3040 	}
3041 
3042 	err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
3043 	if (err) {
3044 		dev_err(dev, "dpcon_open() failed\n");
3045 		goto free;
3046 	}
3047 
3048 	err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
3049 	if (err) {
3050 		dev_err(dev, "dpcon_reset() failed\n");
3051 		goto close;
3052 	}
3053 
3054 	err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
3055 	if (err) {
3056 		dev_err(dev, "dpcon_enable() failed\n");
3057 		goto close;
3058 	}
3059 
3060 	return dpcon;
3061 
3062 close:
3063 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
3064 free:
3065 	fsl_mc_object_free(dpcon);
3066 
3067 	return ERR_PTR(err);
3068 }
3069 
3070 static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
3071 				 struct fsl_mc_device *dpcon)
3072 {
3073 	dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
3074 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
3075 	fsl_mc_object_free(dpcon);
3076 }
3077 
3078 static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
3079 {
3080 	struct dpaa2_eth_channel *channel;
3081 	struct dpcon_attr attr;
3082 	struct device *dev = priv->net_dev->dev.parent;
3083 	int err;
3084 
3085 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
3086 	if (!channel)
3087 		return NULL;
3088 
3089 	channel->dpcon = dpaa2_eth_setup_dpcon(priv);
3090 	if (IS_ERR(channel->dpcon)) {
3091 		err = PTR_ERR(channel->dpcon);
3092 		goto err_setup;
3093 	}
3094 
3095 	err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
3096 				   &attr);
3097 	if (err) {
3098 		dev_err(dev, "dpcon_get_attributes() failed\n");
3099 		goto err_get_attr;
3100 	}
3101 
3102 	channel->dpcon_id = attr.id;
3103 	channel->ch_id = attr.qbman_ch_id;
3104 	channel->priv = priv;
3105 
3106 	return channel;
3107 
3108 err_get_attr:
3109 	dpaa2_eth_free_dpcon(priv, channel->dpcon);
3110 err_setup:
3111 	kfree(channel);
3112 	return ERR_PTR(err);
3113 }
3114 
3115 static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
3116 				   struct dpaa2_eth_channel *channel)
3117 {
3118 	dpaa2_eth_free_dpcon(priv, channel->dpcon);
3119 	kfree(channel);
3120 }
3121 
3122 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
3123  * and register data availability notifications
3124  */
3125 static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
3126 {
3127 	struct dpaa2_io_notification_ctx *nctx;
3128 	struct dpaa2_eth_channel *channel;
3129 	struct dpcon_notification_cfg dpcon_notif_cfg;
3130 	struct device *dev = priv->net_dev->dev.parent;
3131 	int i, err;
3132 
3133 	/* We want the ability to spread ingress traffic (RX, TX conf) to as
3134 	 * many cores as possible, so we need one channel for each core
3135 	 * (unless there's fewer queues than cores, in which case the extra
3136 	 * channels would be wasted).
3137 	 * Allocate one channel per core and register it to the core's
3138 	 * affine DPIO. If not enough channels are available for all cores
3139 	 * or if some cores don't have an affine DPIO, there will be no
3140 	 * ingress frame processing on those cores.
3141 	 */
3142 	cpumask_clear(&priv->dpio_cpumask);
3143 	for_each_online_cpu(i) {
3144 		/* Try to allocate a channel */
3145 		channel = dpaa2_eth_alloc_channel(priv);
3146 		if (IS_ERR_OR_NULL(channel)) {
3147 			err = PTR_ERR_OR_ZERO(channel);
3148 			if (err != -EPROBE_DEFER)
3149 				dev_info(dev,
3150 					 "No affine channel for cpu %d and above\n", i);
3151 			goto err_alloc_ch;
3152 		}
3153 
3154 		priv->channel[priv->num_channels] = channel;
3155 
3156 		nctx = &channel->nctx;
3157 		nctx->is_cdan = 1;
3158 		nctx->cb = dpaa2_eth_cdan_cb;
3159 		nctx->id = channel->ch_id;
3160 		nctx->desired_cpu = i;
3161 
3162 		/* Register the new context */
3163 		channel->dpio = dpaa2_io_service_select(i);
3164 		err = dpaa2_io_service_register(channel->dpio, nctx, dev);
3165 		if (err) {
3166 			dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
3167 			/* If no affine DPIO for this core, there's probably
3168 			 * none available for next cores either. Signal we want
3169 			 * to retry later, in case the DPIO devices weren't
3170 			 * probed yet.
3171 			 */
3172 			err = -EPROBE_DEFER;
3173 			goto err_service_reg;
3174 		}
3175 
3176 		/* Register DPCON notification with MC */
3177 		dpcon_notif_cfg.dpio_id = nctx->dpio_id;
3178 		dpcon_notif_cfg.priority = 0;
3179 		dpcon_notif_cfg.user_ctx = nctx->qman64;
3180 		err = dpcon_set_notification(priv->mc_io, 0,
3181 					     channel->dpcon->mc_handle,
3182 					     &dpcon_notif_cfg);
3183 		if (err) {
3184 			dev_err(dev, "dpcon_set_notification failed()\n");
3185 			goto err_set_cdan;
3186 		}
3187 
3188 		/* If we managed to allocate a channel and also found an affine
3189 		 * DPIO for this core, add it to the final mask
3190 		 */
3191 		cpumask_set_cpu(i, &priv->dpio_cpumask);
3192 		priv->num_channels++;
3193 
3194 		/* Stop if we already have enough channels to accommodate all
3195 		 * RX and TX conf queues
3196 		 */
3197 		if (priv->num_channels == priv->dpni_attrs.num_queues)
3198 			break;
3199 	}
3200 
3201 	return 0;
3202 
3203 err_set_cdan:
3204 	dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3205 err_service_reg:
3206 	dpaa2_eth_free_channel(priv, channel);
3207 err_alloc_ch:
3208 	if (err == -EPROBE_DEFER) {
3209 		for (i = 0; i < priv->num_channels; i++) {
3210 			channel = priv->channel[i];
3211 			nctx = &channel->nctx;
3212 			dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3213 			dpaa2_eth_free_channel(priv, channel);
3214 		}
3215 		priv->num_channels = 0;
3216 		return err;
3217 	}
3218 
3219 	if (cpumask_empty(&priv->dpio_cpumask)) {
3220 		dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
3221 		return -ENODEV;
3222 	}
3223 
3224 	dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
3225 		 cpumask_pr_args(&priv->dpio_cpumask));
3226 
3227 	return 0;
3228 }
3229 
3230 static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
3231 {
3232 	struct device *dev = priv->net_dev->dev.parent;
3233 	struct dpaa2_eth_channel *ch;
3234 	int i;
3235 
3236 	/* deregister CDAN notifications and free channels */
3237 	for (i = 0; i < priv->num_channels; i++) {
3238 		ch = priv->channel[i];
3239 		dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
3240 		dpaa2_eth_free_channel(priv, ch);
3241 	}
3242 }
3243 
3244 static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
3245 							      int cpu)
3246 {
3247 	struct device *dev = priv->net_dev->dev.parent;
3248 	int i;
3249 
3250 	for (i = 0; i < priv->num_channels; i++)
3251 		if (priv->channel[i]->nctx.desired_cpu == cpu)
3252 			return priv->channel[i];
3253 
3254 	/* We should never get here. Issue a warning and return
3255 	 * the first channel, because it's still better than nothing
3256 	 */
3257 	dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
3258 
3259 	return priv->channel[0];
3260 }
3261 
3262 static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
3263 {
3264 	struct device *dev = priv->net_dev->dev.parent;
3265 	struct dpaa2_eth_fq *fq;
3266 	int rx_cpu, txc_cpu;
3267 	int i;
3268 
3269 	/* For each FQ, pick one channel/CPU to deliver frames to.
3270 	 * This may well change at runtime, either through irqbalance or
3271 	 * through direct user intervention.
3272 	 */
3273 	rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
3274 
3275 	for (i = 0; i < priv->num_fqs; i++) {
3276 		fq = &priv->fq[i];
3277 		switch (fq->type) {
3278 		case DPAA2_RX_FQ:
3279 		case DPAA2_RX_ERR_FQ:
3280 			fq->target_cpu = rx_cpu;
3281 			rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3282 			if (rx_cpu >= nr_cpu_ids)
3283 				rx_cpu = cpumask_first(&priv->dpio_cpumask);
3284 			break;
3285 		case DPAA2_TX_CONF_FQ:
3286 			fq->target_cpu = txc_cpu;
3287 			txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
3288 			if (txc_cpu >= nr_cpu_ids)
3289 				txc_cpu = cpumask_first(&priv->dpio_cpumask);
3290 			break;
3291 		default:
3292 			dev_err(dev, "Unknown FQ type: %d\n", fq->type);
3293 		}
3294 		fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
3295 	}
3296 
3297 	update_xps(priv);
3298 }
3299 
3300 static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
3301 {
3302 	int i, j;
3303 
3304 	/* We have one TxConf FQ per Tx flow.
3305 	 * The number of Tx and Rx queues is the same.
3306 	 * Tx queues come first in the fq array.
3307 	 */
3308 	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3309 		priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
3310 		priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
3311 		priv->fq[priv->num_fqs++].flowid = (u16)i;
3312 	}
3313 
3314 	for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3315 		for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3316 			priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3317 			priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3318 			priv->fq[priv->num_fqs].tc = (u8)j;
3319 			priv->fq[priv->num_fqs++].flowid = (u16)i;
3320 		}
3321 	}
3322 
3323 	/* We have exactly one Rx error queue per DPNI */
3324 	priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3325 	priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3326 
3327 	/* For each FQ, decide on which core to process incoming frames */
3328 	dpaa2_eth_set_fq_affinity(priv);
3329 }
3330 
3331 /* Allocate and configure a buffer pool */
3332 struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
3333 {
3334 	struct device *dev = priv->net_dev->dev.parent;
3335 	struct fsl_mc_device *dpbp_dev;
3336 	struct dpbp_attr dpbp_attrs;
3337 	struct dpaa2_eth_bp *bp;
3338 	int err;
3339 
3340 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
3341 				     &dpbp_dev);
3342 	if (err) {
3343 		if (err == -ENXIO)
3344 			err = -EPROBE_DEFER;
3345 		else
3346 			dev_err(dev, "DPBP device allocation failed\n");
3347 		return ERR_PTR(err);
3348 	}
3349 
3350 	bp = kzalloc(sizeof(*bp), GFP_KERNEL);
3351 	if (!bp) {
3352 		err = -ENOMEM;
3353 		goto err_alloc;
3354 	}
3355 
3356 	err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id,
3357 			&dpbp_dev->mc_handle);
3358 	if (err) {
3359 		dev_err(dev, "dpbp_open() failed\n");
3360 		goto err_open;
3361 	}
3362 
3363 	err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
3364 	if (err) {
3365 		dev_err(dev, "dpbp_reset() failed\n");
3366 		goto err_reset;
3367 	}
3368 
3369 	err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
3370 	if (err) {
3371 		dev_err(dev, "dpbp_enable() failed\n");
3372 		goto err_enable;
3373 	}
3374 
3375 	err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
3376 				  &dpbp_attrs);
3377 	if (err) {
3378 		dev_err(dev, "dpbp_get_attributes() failed\n");
3379 		goto err_get_attr;
3380 	}
3381 
3382 	bp->dev = dpbp_dev;
3383 	bp->bpid = dpbp_attrs.bpid;
3384 
3385 	return bp;
3386 
3387 err_get_attr:
3388 	dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
3389 err_enable:
3390 err_reset:
3391 	dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
3392 err_open:
3393 	kfree(bp);
3394 err_alloc:
3395 	fsl_mc_object_free(dpbp_dev);
3396 
3397 	return ERR_PTR(err);
3398 }
3399 
3400 static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
3401 {
3402 	struct dpaa2_eth_bp *bp;
3403 	int i;
3404 
3405 	bp = dpaa2_eth_allocate_dpbp(priv);
3406 	if (IS_ERR(bp))
3407 		return PTR_ERR(bp);
3408 
3409 	priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp;
3410 	priv->num_bps++;
3411 
3412 	for (i = 0; i < priv->num_channels; i++)
3413 		priv->channel[i]->bp = bp;
3414 
3415 	return 0;
3416 }
3417 
3418 void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
3419 {
3420 	int idx_bp;
3421 
3422 	/* Find the index at which this BP is stored */
3423 	for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++)
3424 		if (priv->bp[idx_bp] == bp)
3425 			break;
3426 
3427 	/* Drain the pool and disable the associated MC object */
3428 	dpaa2_eth_drain_pool(priv, bp->bpid);
3429 	dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
3430 	dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
3431 	fsl_mc_object_free(bp->dev);
3432 	kfree(bp);
3433 
3434 	/* Move the last in use DPBP over in this position */
3435 	priv->bp[idx_bp] = priv->bp[priv->num_bps - 1];
3436 	priv->num_bps--;
3437 }
3438 
3439 static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
3440 {
3441 	int i;
3442 
3443 	for (i = 0; i < priv->num_bps; i++)
3444 		dpaa2_eth_free_dpbp(priv, priv->bp[i]);
3445 }
3446 
3447 static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
3448 {
3449 	struct device *dev = priv->net_dev->dev.parent;
3450 	struct dpni_buffer_layout buf_layout = {0};
3451 	u16 rx_buf_align;
3452 	int err;
3453 
3454 	/* We need to check for WRIOP version 1.0.0, but depending on the MC
3455 	 * version, this number is not always provided correctly on rev1.
3456 	 * We need to check for both alternatives in this situation.
3457 	 */
3458 	if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
3459 	    priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
3460 		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
3461 	else
3462 		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
3463 
3464 	/* We need to ensure that the buffer size seen by WRIOP is a multiple
3465 	 * of 64 or 256 bytes depending on the WRIOP version.
3466 	 */
3467 	priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
3468 
3469 	/* tx buffer */
3470 	buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3471 	buf_layout.pass_timestamp = true;
3472 	buf_layout.pass_frame_status = true;
3473 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3474 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3475 			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3476 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3477 				     DPNI_QUEUE_TX, &buf_layout);
3478 	if (err) {
3479 		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3480 		return err;
3481 	}
3482 
3483 	/* tx-confirm buffer */
3484 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3485 			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3486 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3487 				     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3488 	if (err) {
3489 		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3490 		return err;
3491 	}
3492 
3493 	/* Now that we've set our tx buffer layout, retrieve the minimum
3494 	 * required tx data offset.
3495 	 */
3496 	err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3497 				      &priv->tx_data_offset);
3498 	if (err) {
3499 		dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3500 		return err;
3501 	}
3502 
3503 	if ((priv->tx_data_offset % 64) != 0)
3504 		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3505 			 priv->tx_data_offset);
3506 
3507 	/* rx buffer */
3508 	buf_layout.pass_frame_status = true;
3509 	buf_layout.pass_parser_result = true;
3510 	buf_layout.data_align = rx_buf_align;
3511 	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
3512 	buf_layout.private_data_size = 0;
3513 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3514 			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3515 			     DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3516 			     DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
3517 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3518 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3519 				     DPNI_QUEUE_RX, &buf_layout);
3520 	if (err) {
3521 		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3522 		return err;
3523 	}
3524 
3525 	return 0;
3526 }
3527 
3528 #define DPNI_ENQUEUE_FQID_VER_MAJOR	7
3529 #define DPNI_ENQUEUE_FQID_VER_MINOR	9
3530 
3531 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
3532 				       struct dpaa2_eth_fq *fq,
3533 				       struct dpaa2_fd *fd, u8 prio,
3534 				       u32 num_frames __always_unused,
3535 				       int *frames_enqueued)
3536 {
3537 	int err;
3538 
3539 	err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3540 					  priv->tx_qdid, prio,
3541 					  fq->tx_qdbin, fd);
3542 	if (!err && frames_enqueued)
3543 		*frames_enqueued = 1;
3544 	return err;
3545 }
3546 
3547 static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
3548 						struct dpaa2_eth_fq *fq,
3549 						struct dpaa2_fd *fd,
3550 						u8 prio, u32 num_frames,
3551 						int *frames_enqueued)
3552 {
3553 	int err;
3554 
3555 	err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
3556 						   fq->tx_fqid[prio],
3557 						   fd, num_frames);
3558 
3559 	if (err == 0)
3560 		return -EBUSY;
3561 
3562 	if (frames_enqueued)
3563 		*frames_enqueued = err;
3564 	return 0;
3565 }
3566 
3567 static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
3568 {
3569 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3570 				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3571 		priv->enqueue = dpaa2_eth_enqueue_qd;
3572 	else
3573 		priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3574 }
3575 
3576 static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
3577 {
3578 	struct device *dev = priv->net_dev->dev.parent;
3579 	struct dpni_link_cfg link_cfg = {0};
3580 	int err;
3581 
3582 	/* Get the default link options so we don't override other flags */
3583 	err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3584 	if (err) {
3585 		dev_err(dev, "dpni_get_link_cfg() failed\n");
3586 		return err;
3587 	}
3588 
3589 	/* By default, enable both Rx and Tx pause frames */
3590 	link_cfg.options |= DPNI_LINK_OPT_PAUSE;
3591 	link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
3592 	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3593 	if (err) {
3594 		dev_err(dev, "dpni_set_link_cfg() failed\n");
3595 		return err;
3596 	}
3597 
3598 	priv->link_state.options = link_cfg.options;
3599 
3600 	return 0;
3601 }
3602 
3603 static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
3604 {
3605 	struct dpni_queue_id qid = {0};
3606 	struct dpaa2_eth_fq *fq;
3607 	struct dpni_queue queue;
3608 	int i, j, err;
3609 
3610 	/* We only use Tx FQIDs for FQID-based enqueue, so check
3611 	 * if DPNI version supports it before updating FQIDs
3612 	 */
3613 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3614 				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3615 		return;
3616 
3617 	for (i = 0; i < priv->num_fqs; i++) {
3618 		fq = &priv->fq[i];
3619 		if (fq->type != DPAA2_TX_CONF_FQ)
3620 			continue;
3621 		for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3622 			err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3623 					     DPNI_QUEUE_TX, j, fq->flowid,
3624 					     &queue, &qid);
3625 			if (err)
3626 				goto out_err;
3627 
3628 			fq->tx_fqid[j] = qid.fqid;
3629 			if (fq->tx_fqid[j] == 0)
3630 				goto out_err;
3631 		}
3632 	}
3633 
3634 	priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3635 
3636 	return;
3637 
3638 out_err:
3639 	netdev_info(priv->net_dev,
3640 		    "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3641 	priv->enqueue = dpaa2_eth_enqueue_qd;
3642 }
3643 
3644 /* Configure ingress classification based on VLAN PCP */
3645 static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
3646 {
3647 	struct device *dev = priv->net_dev->dev.parent;
3648 	struct dpkg_profile_cfg kg_cfg = {0};
3649 	struct dpni_qos_tbl_cfg qos_cfg = {0};
3650 	struct dpni_rule_cfg key_params;
3651 	void *dma_mem, *key, *mask;
3652 	u8 key_size = 2;	/* VLAN TCI field */
3653 	int i, pcp, err;
3654 
3655 	/* VLAN-based classification only makes sense if we have multiple
3656 	 * traffic classes.
3657 	 * Also, we need to extract just the 3-bit PCP field from the VLAN
3658 	 * header and we can only do that by using a mask
3659 	 */
3660 	if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
3661 		dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3662 		return -EOPNOTSUPP;
3663 	}
3664 
3665 	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3666 	if (!dma_mem)
3667 		return -ENOMEM;
3668 
3669 	kg_cfg.num_extracts = 1;
3670 	kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
3671 	kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
3672 	kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
3673 	kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
3674 
3675 	err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
3676 	if (err) {
3677 		dev_err(dev, "dpni_prepare_key_cfg failed\n");
3678 		goto out_free_tbl;
3679 	}
3680 
3681 	/* set QoS table */
3682 	qos_cfg.default_tc = 0;
3683 	qos_cfg.discard_on_miss = 0;
3684 	qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3685 					      DPAA2_CLASSIFIER_DMA_SIZE,
3686 					      DMA_TO_DEVICE);
3687 	if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3688 		dev_err(dev, "QoS table DMA mapping failed\n");
3689 		err = -ENOMEM;
3690 		goto out_free_tbl;
3691 	}
3692 
3693 	err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
3694 	if (err) {
3695 		dev_err(dev, "dpni_set_qos_table failed\n");
3696 		goto out_unmap_tbl;
3697 	}
3698 
3699 	/* Add QoS table entries */
3700 	key = kzalloc(key_size * 2, GFP_KERNEL);
3701 	if (!key) {
3702 		err = -ENOMEM;
3703 		goto out_unmap_tbl;
3704 	}
3705 	mask = key + key_size;
3706 	*(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
3707 
3708 	key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3709 					     DMA_TO_DEVICE);
3710 	if (dma_mapping_error(dev, key_params.key_iova)) {
3711 		dev_err(dev, "Qos table entry DMA mapping failed\n");
3712 		err = -ENOMEM;
3713 		goto out_free_key;
3714 	}
3715 
3716 	key_params.mask_iova = key_params.key_iova + key_size;
3717 	key_params.key_size = key_size;
3718 
3719 	/* We add rules for PCP-based distribution starting with highest
3720 	 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
3721 	 * classes to accommodate all priority levels, the lowest ones end up
3722 	 * on TC 0 which was configured as default
3723 	 */
3724 	for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
3725 		*(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
3726 		dma_sync_single_for_device(dev, key_params.key_iova,
3727 					   key_size * 2, DMA_TO_DEVICE);
3728 
3729 		err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
3730 					 &key_params, i, i);
3731 		if (err) {
3732 			dev_err(dev, "dpni_add_qos_entry failed\n");
3733 			dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
3734 			goto out_unmap_key;
3735 		}
3736 	}
3737 
3738 	priv->vlan_cls_enabled = true;
3739 
3740 	/* Table and key memory is not persistent, clean everything up after
3741 	 * configuration is finished
3742 	 */
3743 out_unmap_key:
3744 	dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3745 out_free_key:
3746 	kfree(key);
3747 out_unmap_tbl:
3748 	dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3749 			 DMA_TO_DEVICE);
3750 out_free_tbl:
3751 	kfree(dma_mem);
3752 
3753 	return err;
3754 }
3755 
3756 /* Configure the DPNI object this interface is associated with */
3757 static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
3758 {
3759 	struct device *dev = &ls_dev->dev;
3760 	struct dpaa2_eth_priv *priv;
3761 	struct net_device *net_dev;
3762 	int err;
3763 
3764 	net_dev = dev_get_drvdata(dev);
3765 	priv = netdev_priv(net_dev);
3766 
3767 	/* get a handle for the DPNI object */
3768 	err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3769 	if (err) {
3770 		dev_err(dev, "dpni_open() failed\n");
3771 		return err;
3772 	}
3773 
3774 	/* Check if we can work with this DPNI object */
3775 	err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3776 				   &priv->dpni_ver_minor);
3777 	if (err) {
3778 		dev_err(dev, "dpni_get_api_version() failed\n");
3779 		goto close;
3780 	}
3781 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3782 		dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3783 			priv->dpni_ver_major, priv->dpni_ver_minor,
3784 			DPNI_VER_MAJOR, DPNI_VER_MINOR);
3785 		err = -ENOTSUPP;
3786 		goto close;
3787 	}
3788 
3789 	ls_dev->mc_io = priv->mc_io;
3790 	ls_dev->mc_handle = priv->mc_token;
3791 
3792 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3793 	if (err) {
3794 		dev_err(dev, "dpni_reset() failed\n");
3795 		goto close;
3796 	}
3797 
3798 	err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3799 				  &priv->dpni_attrs);
3800 	if (err) {
3801 		dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3802 		goto close;
3803 	}
3804 
3805 	err = dpaa2_eth_set_buffer_layout(priv);
3806 	if (err)
3807 		goto close;
3808 
3809 	dpaa2_eth_set_enqueue_mode(priv);
3810 
3811 	/* Enable pause frame support */
3812 	if (dpaa2_eth_has_pause_support(priv)) {
3813 		err = dpaa2_eth_set_pause(priv);
3814 		if (err)
3815 			goto close;
3816 	}
3817 
3818 	err = dpaa2_eth_set_vlan_qos(priv);
3819 	if (err && err != -EOPNOTSUPP)
3820 		goto close;
3821 
3822 	priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3823 				       sizeof(struct dpaa2_eth_cls_rule),
3824 				       GFP_KERNEL);
3825 	if (!priv->cls_rules) {
3826 		err = -ENOMEM;
3827 		goto close;
3828 	}
3829 
3830 	return 0;
3831 
3832 close:
3833 	dpni_close(priv->mc_io, 0, priv->mc_token);
3834 
3835 	return err;
3836 }
3837 
3838 static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
3839 {
3840 	int err;
3841 
3842 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3843 	if (err)
3844 		netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3845 			    err);
3846 
3847 	dpni_close(priv->mc_io, 0, priv->mc_token);
3848 }
3849 
3850 static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
3851 				   struct dpaa2_eth_fq *fq)
3852 {
3853 	struct device *dev = priv->net_dev->dev.parent;
3854 	struct dpni_queue queue;
3855 	struct dpni_queue_id qid;
3856 	int err;
3857 
3858 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3859 			     DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
3860 	if (err) {
3861 		dev_err(dev, "dpni_get_queue(RX) failed\n");
3862 		return err;
3863 	}
3864 
3865 	fq->fqid = qid.fqid;
3866 
3867 	queue.destination.id = fq->channel->dpcon_id;
3868 	queue.destination.type = DPNI_DEST_DPCON;
3869 	queue.destination.priority = 1;
3870 	queue.user_context = (u64)(uintptr_t)fq;
3871 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3872 			     DPNI_QUEUE_RX, fq->tc, fq->flowid,
3873 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3874 			     &queue);
3875 	if (err) {
3876 		dev_err(dev, "dpni_set_queue(RX) failed\n");
3877 		return err;
3878 	}
3879 
3880 	/* xdp_rxq setup */
3881 	/* only once for each channel */
3882 	if (fq->tc > 0)
3883 		return 0;
3884 
3885 	err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
3886 			       fq->flowid, 0);
3887 	if (err) {
3888 		dev_err(dev, "xdp_rxq_info_reg failed\n");
3889 		return err;
3890 	}
3891 
3892 	err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3893 					 MEM_TYPE_PAGE_ORDER0, NULL);
3894 	if (err) {
3895 		dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3896 		return err;
3897 	}
3898 
3899 	return 0;
3900 }
3901 
3902 static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
3903 				   struct dpaa2_eth_fq *fq)
3904 {
3905 	struct device *dev = priv->net_dev->dev.parent;
3906 	struct dpni_queue queue;
3907 	struct dpni_queue_id qid;
3908 	int i, err;
3909 
3910 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3911 		err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3912 				     DPNI_QUEUE_TX, i, fq->flowid,
3913 				     &queue, &qid);
3914 		if (err) {
3915 			dev_err(dev, "dpni_get_queue(TX) failed\n");
3916 			return err;
3917 		}
3918 		fq->tx_fqid[i] = qid.fqid;
3919 	}
3920 
3921 	/* All Tx queues belonging to the same flowid have the same qdbin */
3922 	fq->tx_qdbin = qid.qdbin;
3923 
3924 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3925 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3926 			     &queue, &qid);
3927 	if (err) {
3928 		dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3929 		return err;
3930 	}
3931 
3932 	fq->fqid = qid.fqid;
3933 
3934 	queue.destination.id = fq->channel->dpcon_id;
3935 	queue.destination.type = DPNI_DEST_DPCON;
3936 	queue.destination.priority = 0;
3937 	queue.user_context = (u64)(uintptr_t)fq;
3938 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3939 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3940 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3941 			     &queue);
3942 	if (err) {
3943 		dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3944 		return err;
3945 	}
3946 
3947 	return 0;
3948 }
3949 
3950 static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3951 			     struct dpaa2_eth_fq *fq)
3952 {
3953 	struct device *dev = priv->net_dev->dev.parent;
3954 	struct dpni_queue q = { { 0 } };
3955 	struct dpni_queue_id qid;
3956 	u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3957 	int err;
3958 
3959 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3960 			     DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3961 	if (err) {
3962 		dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3963 		return err;
3964 	}
3965 
3966 	fq->fqid = qid.fqid;
3967 
3968 	q.destination.id = fq->channel->dpcon_id;
3969 	q.destination.type = DPNI_DEST_DPCON;
3970 	q.destination.priority = 1;
3971 	q.user_context = (u64)(uintptr_t)fq;
3972 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3973 			     DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3974 	if (err) {
3975 		dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3976 		return err;
3977 	}
3978 
3979 	return 0;
3980 }
3981 
3982 /* Supported header fields for Rx hash distribution key */
3983 static const struct dpaa2_eth_dist_fields dist_fields[] = {
3984 	{
3985 		/* L2 header */
3986 		.rxnfc_field = RXH_L2DA,
3987 		.cls_prot = NET_PROT_ETH,
3988 		.cls_field = NH_FLD_ETH_DA,
3989 		.id = DPAA2_ETH_DIST_ETHDST,
3990 		.size = 6,
3991 	}, {
3992 		.cls_prot = NET_PROT_ETH,
3993 		.cls_field = NH_FLD_ETH_SA,
3994 		.id = DPAA2_ETH_DIST_ETHSRC,
3995 		.size = 6,
3996 	}, {
3997 		/* This is the last ethertype field parsed:
3998 		 * depending on frame format, it can be the MAC ethertype
3999 		 * or the VLAN etype.
4000 		 */
4001 		.cls_prot = NET_PROT_ETH,
4002 		.cls_field = NH_FLD_ETH_TYPE,
4003 		.id = DPAA2_ETH_DIST_ETHTYPE,
4004 		.size = 2,
4005 	}, {
4006 		/* VLAN header */
4007 		.rxnfc_field = RXH_VLAN,
4008 		.cls_prot = NET_PROT_VLAN,
4009 		.cls_field = NH_FLD_VLAN_TCI,
4010 		.id = DPAA2_ETH_DIST_VLAN,
4011 		.size = 2,
4012 	}, {
4013 		/* IP header */
4014 		.rxnfc_field = RXH_IP_SRC,
4015 		.cls_prot = NET_PROT_IP,
4016 		.cls_field = NH_FLD_IP_SRC,
4017 		.id = DPAA2_ETH_DIST_IPSRC,
4018 		.size = 4,
4019 	}, {
4020 		.rxnfc_field = RXH_IP_DST,
4021 		.cls_prot = NET_PROT_IP,
4022 		.cls_field = NH_FLD_IP_DST,
4023 		.id = DPAA2_ETH_DIST_IPDST,
4024 		.size = 4,
4025 	}, {
4026 		.rxnfc_field = RXH_L3_PROTO,
4027 		.cls_prot = NET_PROT_IP,
4028 		.cls_field = NH_FLD_IP_PROTO,
4029 		.id = DPAA2_ETH_DIST_IPPROTO,
4030 		.size = 1,
4031 	}, {
4032 		/* Using UDP ports, this is functionally equivalent to raw
4033 		 * byte pairs from L4 header.
4034 		 */
4035 		.rxnfc_field = RXH_L4_B_0_1,
4036 		.cls_prot = NET_PROT_UDP,
4037 		.cls_field = NH_FLD_UDP_PORT_SRC,
4038 		.id = DPAA2_ETH_DIST_L4SRC,
4039 		.size = 2,
4040 	}, {
4041 		.rxnfc_field = RXH_L4_B_2_3,
4042 		.cls_prot = NET_PROT_UDP,
4043 		.cls_field = NH_FLD_UDP_PORT_DST,
4044 		.id = DPAA2_ETH_DIST_L4DST,
4045 		.size = 2,
4046 	},
4047 };
4048 
4049 /* Configure the Rx hash key using the legacy API */
4050 static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4051 {
4052 	struct device *dev = priv->net_dev->dev.parent;
4053 	struct dpni_rx_tc_dist_cfg dist_cfg;
4054 	int i, err = 0;
4055 
4056 	memset(&dist_cfg, 0, sizeof(dist_cfg));
4057 
4058 	dist_cfg.key_cfg_iova = key;
4059 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4060 	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
4061 
4062 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4063 		err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
4064 					  i, &dist_cfg);
4065 		if (err) {
4066 			dev_err(dev, "dpni_set_rx_tc_dist failed\n");
4067 			break;
4068 		}
4069 	}
4070 
4071 	return err;
4072 }
4073 
4074 /* Configure the Rx hash key using the new API */
4075 static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4076 {
4077 	struct device *dev = priv->net_dev->dev.parent;
4078 	struct dpni_rx_dist_cfg dist_cfg;
4079 	int i, err = 0;
4080 
4081 	memset(&dist_cfg, 0, sizeof(dist_cfg));
4082 
4083 	dist_cfg.key_cfg_iova = key;
4084 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4085 	dist_cfg.enable = 1;
4086 
4087 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4088 		dist_cfg.tc = i;
4089 		err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
4090 					    &dist_cfg);
4091 		if (err) {
4092 			dev_err(dev, "dpni_set_rx_hash_dist failed\n");
4093 			break;
4094 		}
4095 
4096 		/* If the flow steering / hashing key is shared between all
4097 		 * traffic classes, install it just once
4098 		 */
4099 		if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
4100 			break;
4101 	}
4102 
4103 	return err;
4104 }
4105 
4106 /* Configure the Rx flow classification key */
4107 static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4108 {
4109 	struct device *dev = priv->net_dev->dev.parent;
4110 	struct dpni_rx_dist_cfg dist_cfg;
4111 	int i, err = 0;
4112 
4113 	memset(&dist_cfg, 0, sizeof(dist_cfg));
4114 
4115 	dist_cfg.key_cfg_iova = key;
4116 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4117 	dist_cfg.enable = 1;
4118 
4119 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4120 		dist_cfg.tc = i;
4121 		err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
4122 					  &dist_cfg);
4123 		if (err) {
4124 			dev_err(dev, "dpni_set_rx_fs_dist failed\n");
4125 			break;
4126 		}
4127 
4128 		/* If the flow steering / hashing key is shared between all
4129 		 * traffic classes, install it just once
4130 		 */
4131 		if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
4132 			break;
4133 	}
4134 
4135 	return err;
4136 }
4137 
4138 /* Size of the Rx flow classification key */
4139 int dpaa2_eth_cls_key_size(u64 fields)
4140 {
4141 	int i, size = 0;
4142 
4143 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4144 		if (!(fields & dist_fields[i].id))
4145 			continue;
4146 		size += dist_fields[i].size;
4147 	}
4148 
4149 	return size;
4150 }
4151 
4152 /* Offset of header field in Rx classification key */
4153 int dpaa2_eth_cls_fld_off(int prot, int field)
4154 {
4155 	int i, off = 0;
4156 
4157 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4158 		if (dist_fields[i].cls_prot == prot &&
4159 		    dist_fields[i].cls_field == field)
4160 			return off;
4161 		off += dist_fields[i].size;
4162 	}
4163 
4164 	WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
4165 	return 0;
4166 }
4167 
4168 /* Prune unused fields from the classification rule.
4169  * Used when masking is not supported
4170  */
4171 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
4172 {
4173 	int off = 0, new_off = 0;
4174 	int i, size;
4175 
4176 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4177 		size = dist_fields[i].size;
4178 		if (dist_fields[i].id & fields) {
4179 			memcpy(key_mem + new_off, key_mem + off, size);
4180 			new_off += size;
4181 		}
4182 		off += size;
4183 	}
4184 }
4185 
4186 /* Set Rx distribution (hash or flow classification) key
4187  * flags is a combination of RXH_ bits
4188  */
4189 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
4190 				  enum dpaa2_eth_rx_dist type, u64 flags)
4191 {
4192 	struct device *dev = net_dev->dev.parent;
4193 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4194 	struct dpkg_profile_cfg cls_cfg;
4195 	u32 rx_hash_fields = 0;
4196 	dma_addr_t key_iova;
4197 	u8 *dma_mem;
4198 	int i;
4199 	int err = 0;
4200 
4201 	memset(&cls_cfg, 0, sizeof(cls_cfg));
4202 
4203 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4204 		struct dpkg_extract *key =
4205 			&cls_cfg.extracts[cls_cfg.num_extracts];
4206 
4207 		/* For both Rx hashing and classification keys
4208 		 * we set only the selected fields.
4209 		 */
4210 		if (!(flags & dist_fields[i].id))
4211 			continue;
4212 		if (type == DPAA2_ETH_RX_DIST_HASH)
4213 			rx_hash_fields |= dist_fields[i].rxnfc_field;
4214 
4215 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
4216 			dev_err(dev, "error adding key extraction rule, too many rules?\n");
4217 			return -E2BIG;
4218 		}
4219 
4220 		key->type = DPKG_EXTRACT_FROM_HDR;
4221 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
4222 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
4223 		key->extract.from_hdr.field = dist_fields[i].cls_field;
4224 		cls_cfg.num_extracts++;
4225 	}
4226 
4227 	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4228 	if (!dma_mem)
4229 		return -ENOMEM;
4230 
4231 	err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
4232 	if (err) {
4233 		dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
4234 		goto free_key;
4235 	}
4236 
4237 	/* Prepare for setting the rx dist */
4238 	key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
4239 				  DMA_TO_DEVICE);
4240 	if (dma_mapping_error(dev, key_iova)) {
4241 		dev_err(dev, "DMA mapping failed\n");
4242 		err = -ENOMEM;
4243 		goto free_key;
4244 	}
4245 
4246 	if (type == DPAA2_ETH_RX_DIST_HASH) {
4247 		if (dpaa2_eth_has_legacy_dist(priv))
4248 			err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
4249 		else
4250 			err = dpaa2_eth_config_hash_key(priv, key_iova);
4251 	} else {
4252 		err = dpaa2_eth_config_cls_key(priv, key_iova);
4253 	}
4254 
4255 	dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
4256 			 DMA_TO_DEVICE);
4257 	if (!err && type == DPAA2_ETH_RX_DIST_HASH)
4258 		priv->rx_hash_fields = rx_hash_fields;
4259 
4260 free_key:
4261 	kfree(dma_mem);
4262 	return err;
4263 }
4264 
4265 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
4266 {
4267 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4268 	u64 key = 0;
4269 	int i;
4270 
4271 	if (!dpaa2_eth_hash_enabled(priv))
4272 		return -EOPNOTSUPP;
4273 
4274 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
4275 		if (dist_fields[i].rxnfc_field & flags)
4276 			key |= dist_fields[i].id;
4277 
4278 	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
4279 }
4280 
4281 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
4282 {
4283 	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
4284 }
4285 
4286 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
4287 {
4288 	struct device *dev = priv->net_dev->dev.parent;
4289 	int err;
4290 
4291 	/* Check if we actually support Rx flow classification */
4292 	if (dpaa2_eth_has_legacy_dist(priv)) {
4293 		dev_dbg(dev, "Rx cls not supported by current MC version\n");
4294 		return -EOPNOTSUPP;
4295 	}
4296 
4297 	if (!dpaa2_eth_fs_enabled(priv)) {
4298 		dev_dbg(dev, "Rx cls disabled in DPNI options\n");
4299 		return -EOPNOTSUPP;
4300 	}
4301 
4302 	if (!dpaa2_eth_hash_enabled(priv)) {
4303 		dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
4304 		return -EOPNOTSUPP;
4305 	}
4306 
4307 	/* If there is no support for masking in the classification table,
4308 	 * we don't set a default key, as it will depend on the rules
4309 	 * added by the user at runtime.
4310 	 */
4311 	if (!dpaa2_eth_fs_mask_enabled(priv))
4312 		goto out;
4313 
4314 	err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
4315 	if (err)
4316 		return err;
4317 
4318 out:
4319 	priv->rx_cls_enabled = 1;
4320 
4321 	return 0;
4322 }
4323 
4324 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
4325  * frame queues and channels
4326  */
4327 static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
4328 {
4329 	struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
4330 	struct net_device *net_dev = priv->net_dev;
4331 	struct dpni_pools_cfg pools_params = { 0 };
4332 	struct device *dev = net_dev->dev.parent;
4333 	struct dpni_error_cfg err_cfg;
4334 	int err = 0;
4335 	int i;
4336 
4337 	pools_params.num_dpbp = 1;
4338 	pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
4339 	pools_params.pools[0].backup_pool = 0;
4340 	pools_params.pools[0].buffer_size = priv->rx_buf_size;
4341 	err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
4342 	if (err) {
4343 		dev_err(dev, "dpni_set_pools() failed\n");
4344 		return err;
4345 	}
4346 
4347 	/* have the interface implicitly distribute traffic based on
4348 	 * the default hash key
4349 	 */
4350 	err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
4351 	if (err && err != -EOPNOTSUPP)
4352 		dev_err(dev, "Failed to configure hashing\n");
4353 
4354 	/* Configure the flow classification key; it includes all
4355 	 * supported header fields and cannot be modified at runtime
4356 	 */
4357 	err = dpaa2_eth_set_default_cls(priv);
4358 	if (err && err != -EOPNOTSUPP)
4359 		dev_err(dev, "Failed to configure Rx classification key\n");
4360 
4361 	/* Configure handling of error frames */
4362 	err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
4363 	err_cfg.set_frame_annotation = 1;
4364 	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
4365 	err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
4366 				       &err_cfg);
4367 	if (err) {
4368 		dev_err(dev, "dpni_set_errors_behavior failed\n");
4369 		return err;
4370 	}
4371 
4372 	/* Configure Rx and Tx conf queues to generate CDANs */
4373 	for (i = 0; i < priv->num_fqs; i++) {
4374 		switch (priv->fq[i].type) {
4375 		case DPAA2_RX_FQ:
4376 			err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
4377 			break;
4378 		case DPAA2_TX_CONF_FQ:
4379 			err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
4380 			break;
4381 		case DPAA2_RX_ERR_FQ:
4382 			err = setup_rx_err_flow(priv, &priv->fq[i]);
4383 			break;
4384 		default:
4385 			dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
4386 			return -EINVAL;
4387 		}
4388 		if (err)
4389 			return err;
4390 	}
4391 
4392 	err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
4393 			    DPNI_QUEUE_TX, &priv->tx_qdid);
4394 	if (err) {
4395 		dev_err(dev, "dpni_get_qdid() failed\n");
4396 		return err;
4397 	}
4398 
4399 	return 0;
4400 }
4401 
4402 /* Allocate rings for storing incoming frame descriptors */
4403 static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
4404 {
4405 	struct net_device *net_dev = priv->net_dev;
4406 	struct device *dev = net_dev->dev.parent;
4407 	int i;
4408 
4409 	for (i = 0; i < priv->num_channels; i++) {
4410 		priv->channel[i]->store =
4411 			dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
4412 		if (!priv->channel[i]->store) {
4413 			netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
4414 			goto err_ring;
4415 		}
4416 	}
4417 
4418 	return 0;
4419 
4420 err_ring:
4421 	for (i = 0; i < priv->num_channels; i++) {
4422 		if (!priv->channel[i]->store)
4423 			break;
4424 		dpaa2_io_store_destroy(priv->channel[i]->store);
4425 	}
4426 
4427 	return -ENOMEM;
4428 }
4429 
4430 static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
4431 {
4432 	int i;
4433 
4434 	for (i = 0; i < priv->num_channels; i++)
4435 		dpaa2_io_store_destroy(priv->channel[i]->store);
4436 }
4437 
4438 static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
4439 {
4440 	struct net_device *net_dev = priv->net_dev;
4441 	struct device *dev = net_dev->dev.parent;
4442 	u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
4443 	int err;
4444 
4445 	/* Get firmware address, if any */
4446 	err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
4447 	if (err) {
4448 		dev_err(dev, "dpni_get_port_mac_addr() failed\n");
4449 		return err;
4450 	}
4451 
4452 	/* Get DPNI attributes address, if any */
4453 	err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4454 					dpni_mac_addr);
4455 	if (err) {
4456 		dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
4457 		return err;
4458 	}
4459 
4460 	/* First check if firmware has any address configured by bootloader */
4461 	if (!is_zero_ether_addr(mac_addr)) {
4462 		/* If the DPMAC addr != DPNI addr, update it */
4463 		if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
4464 			err = dpni_set_primary_mac_addr(priv->mc_io, 0,
4465 							priv->mc_token,
4466 							mac_addr);
4467 			if (err) {
4468 				dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4469 				return err;
4470 			}
4471 		}
4472 		eth_hw_addr_set(net_dev, mac_addr);
4473 	} else if (is_zero_ether_addr(dpni_mac_addr)) {
4474 		/* No MAC address configured, fill in net_dev->dev_addr
4475 		 * with a random one
4476 		 */
4477 		eth_hw_addr_random(net_dev);
4478 		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
4479 
4480 		err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4481 						net_dev->dev_addr);
4482 		if (err) {
4483 			dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4484 			return err;
4485 		}
4486 
4487 		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
4488 		 * practical purposes, this will be our "permanent" mac address,
4489 		 * at least until the next reboot. This move will also permit
4490 		 * register_netdevice() to properly fill up net_dev->perm_addr.
4491 		 */
4492 		net_dev->addr_assign_type = NET_ADDR_PERM;
4493 	} else {
4494 		/* NET_ADDR_PERM is default, all we have to do is
4495 		 * fill in the device addr.
4496 		 */
4497 		eth_hw_addr_set(net_dev, dpni_mac_addr);
4498 	}
4499 
4500 	return 0;
4501 }
4502 
4503 static int dpaa2_eth_netdev_init(struct net_device *net_dev)
4504 {
4505 	struct device *dev = net_dev->dev.parent;
4506 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4507 	u32 options = priv->dpni_attrs.options;
4508 	u64 supported = 0, not_supported = 0;
4509 	u8 bcast_addr[ETH_ALEN];
4510 	u8 num_queues;
4511 	int err;
4512 
4513 	net_dev->netdev_ops = &dpaa2_eth_ops;
4514 	net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4515 
4516 	err = dpaa2_eth_set_mac_addr(priv);
4517 	if (err)
4518 		return err;
4519 
4520 	/* Explicitly add the broadcast address to the MAC filtering table */
4521 	eth_broadcast_addr(bcast_addr);
4522 	err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
4523 	if (err) {
4524 		dev_err(dev, "dpni_add_mac_addr() failed\n");
4525 		return err;
4526 	}
4527 
4528 	/* Set MTU upper limit; lower limit is 68B (default value) */
4529 	net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
4530 	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
4531 					DPAA2_ETH_MFL);
4532 	if (err) {
4533 		dev_err(dev, "dpni_set_max_frame_length() failed\n");
4534 		return err;
4535 	}
4536 
4537 	/* Set actual number of queues in the net device */
4538 	num_queues = dpaa2_eth_queue_count(priv);
4539 	err = netif_set_real_num_tx_queues(net_dev, num_queues);
4540 	if (err) {
4541 		dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
4542 		return err;
4543 	}
4544 	err = netif_set_real_num_rx_queues(net_dev, num_queues);
4545 	if (err) {
4546 		dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
4547 		return err;
4548 	}
4549 
4550 	dpaa2_eth_detect_features(priv);
4551 
4552 	/* Capabilities listing */
4553 	supported |= IFF_LIVE_ADDR_CHANGE;
4554 
4555 	if (options & DPNI_OPT_NO_MAC_FILTER)
4556 		not_supported |= IFF_UNICAST_FLT;
4557 	else
4558 		supported |= IFF_UNICAST_FLT;
4559 
4560 	net_dev->priv_flags |= supported;
4561 	net_dev->priv_flags &= ~not_supported;
4562 
4563 	/* Features */
4564 	net_dev->features = NETIF_F_RXCSUM |
4565 			    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4566 			    NETIF_F_SG | NETIF_F_HIGHDMA |
4567 			    NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
4568 	net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
4569 	net_dev->hw_features = net_dev->features;
4570 
4571 	if (priv->dpni_attrs.vlan_filter_entries)
4572 		net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4573 
4574 	return 0;
4575 }
4576 
4577 static int dpaa2_eth_poll_link_state(void *arg)
4578 {
4579 	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
4580 	int err;
4581 
4582 	while (!kthread_should_stop()) {
4583 		err = dpaa2_eth_link_state_update(priv);
4584 		if (unlikely(err))
4585 			return err;
4586 
4587 		msleep(DPAA2_ETH_LINK_STATE_REFRESH);
4588 	}
4589 
4590 	return 0;
4591 }
4592 
4593 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
4594 {
4595 	struct fsl_mc_device *dpni_dev, *dpmac_dev;
4596 	struct dpaa2_mac *mac;
4597 	int err;
4598 
4599 	dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4600 	dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
4601 
4602 	if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER)
4603 		return PTR_ERR(dpmac_dev);
4604 
4605 	if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
4606 		return 0;
4607 
4608 	mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
4609 	if (!mac)
4610 		return -ENOMEM;
4611 
4612 	mac->mc_dev = dpmac_dev;
4613 	mac->mc_io = priv->mc_io;
4614 	mac->net_dev = priv->net_dev;
4615 
4616 	err = dpaa2_mac_open(mac);
4617 	if (err)
4618 		goto err_free_mac;
4619 	priv->mac = mac;
4620 
4621 	if (dpaa2_eth_is_type_phy(priv)) {
4622 		err = dpaa2_mac_connect(mac);
4623 		if (err && err != -EPROBE_DEFER)
4624 			netdev_err(priv->net_dev, "Error connecting to the MAC endpoint: %pe",
4625 				   ERR_PTR(err));
4626 		if (err)
4627 			goto err_close_mac;
4628 	}
4629 
4630 	return 0;
4631 
4632 err_close_mac:
4633 	dpaa2_mac_close(mac);
4634 	priv->mac = NULL;
4635 err_free_mac:
4636 	kfree(mac);
4637 	return err;
4638 }
4639 
4640 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
4641 {
4642 	if (dpaa2_eth_is_type_phy(priv))
4643 		dpaa2_mac_disconnect(priv->mac);
4644 
4645 	if (!dpaa2_eth_has_mac(priv))
4646 		return;
4647 
4648 	dpaa2_mac_close(priv->mac);
4649 	kfree(priv->mac);
4650 	priv->mac = NULL;
4651 }
4652 
4653 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4654 {
4655 	u32 status = ~0;
4656 	struct device *dev = (struct device *)arg;
4657 	struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4658 	struct net_device *net_dev = dev_get_drvdata(dev);
4659 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4660 	int err;
4661 
4662 	err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4663 				  DPNI_IRQ_INDEX, &status);
4664 	if (unlikely(err)) {
4665 		netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
4666 		return IRQ_HANDLED;
4667 	}
4668 
4669 	if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
4670 		dpaa2_eth_link_state_update(netdev_priv(net_dev));
4671 
4672 	if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
4673 		dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
4674 		dpaa2_eth_update_tx_fqids(priv);
4675 
4676 		rtnl_lock();
4677 		if (dpaa2_eth_has_mac(priv))
4678 			dpaa2_eth_disconnect_mac(priv);
4679 		else
4680 			dpaa2_eth_connect_mac(priv);
4681 		rtnl_unlock();
4682 	}
4683 
4684 	return IRQ_HANDLED;
4685 }
4686 
4687 static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
4688 {
4689 	int err = 0;
4690 	struct fsl_mc_device_irq *irq;
4691 
4692 	err = fsl_mc_allocate_irqs(ls_dev);
4693 	if (err) {
4694 		dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4695 		return err;
4696 	}
4697 
4698 	irq = ls_dev->irqs[0];
4699 	err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
4700 					NULL, dpni_irq0_handler_thread,
4701 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
4702 					dev_name(&ls_dev->dev), &ls_dev->dev);
4703 	if (err < 0) {
4704 		dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
4705 		goto free_mc_irq;
4706 	}
4707 
4708 	err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
4709 				DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
4710 				DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
4711 	if (err < 0) {
4712 		dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
4713 		goto free_irq;
4714 	}
4715 
4716 	err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4717 				  DPNI_IRQ_INDEX, 1);
4718 	if (err < 0) {
4719 		dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
4720 		goto free_irq;
4721 	}
4722 
4723 	return 0;
4724 
4725 free_irq:
4726 	devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
4727 free_mc_irq:
4728 	fsl_mc_free_irqs(ls_dev);
4729 
4730 	return err;
4731 }
4732 
4733 static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
4734 {
4735 	int i;
4736 	struct dpaa2_eth_channel *ch;
4737 
4738 	for (i = 0; i < priv->num_channels; i++) {
4739 		ch = priv->channel[i];
4740 		/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4741 		netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
4742 	}
4743 }
4744 
4745 static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
4746 {
4747 	int i;
4748 	struct dpaa2_eth_channel *ch;
4749 
4750 	for (i = 0; i < priv->num_channels; i++) {
4751 		ch = priv->channel[i];
4752 		netif_napi_del(&ch->napi);
4753 	}
4754 }
4755 
4756 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4757 {
4758 	struct device *dev;
4759 	struct net_device *net_dev = NULL;
4760 	struct dpaa2_eth_priv *priv = NULL;
4761 	int err = 0;
4762 
4763 	dev = &dpni_dev->dev;
4764 
4765 	/* Net device */
4766 	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
4767 	if (!net_dev) {
4768 		dev_err(dev, "alloc_etherdev_mq() failed\n");
4769 		return -ENOMEM;
4770 	}
4771 
4772 	SET_NETDEV_DEV(net_dev, dev);
4773 	dev_set_drvdata(dev, net_dev);
4774 
4775 	priv = netdev_priv(net_dev);
4776 	priv->net_dev = net_dev;
4777 
4778 	priv->iommu_domain = iommu_get_domain_for_dev(dev);
4779 
4780 	priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
4781 	priv->rx_tstamp = false;
4782 
4783 	priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4784 	if (!priv->dpaa2_ptp_wq) {
4785 		err = -ENOMEM;
4786 		goto err_wq_alloc;
4787 	}
4788 
4789 	INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
4790 	mutex_init(&priv->onestep_tstamp_lock);
4791 	skb_queue_head_init(&priv->tx_skbs);
4792 
4793 	priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
4794 
4795 	/* Obtain a MC portal */
4796 	err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4797 				     &priv->mc_io);
4798 	if (err) {
4799 		if (err == -ENXIO)
4800 			err = -EPROBE_DEFER;
4801 		else
4802 			dev_err(dev, "MC portal allocation failed\n");
4803 		goto err_portal_alloc;
4804 	}
4805 
4806 	/* MC objects initialization and configuration */
4807 	err = dpaa2_eth_setup_dpni(dpni_dev);
4808 	if (err)
4809 		goto err_dpni_setup;
4810 
4811 	err = dpaa2_eth_setup_dpio(priv);
4812 	if (err)
4813 		goto err_dpio_setup;
4814 
4815 	dpaa2_eth_setup_fqs(priv);
4816 
4817 	err = dpaa2_eth_setup_default_dpbp(priv);
4818 	if (err)
4819 		goto err_dpbp_setup;
4820 
4821 	err = dpaa2_eth_bind_dpni(priv);
4822 	if (err)
4823 		goto err_bind;
4824 
4825 	/* Add a NAPI context for each channel */
4826 	dpaa2_eth_add_ch_napi(priv);
4827 
4828 	/* Percpu statistics */
4829 	priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4830 	if (!priv->percpu_stats) {
4831 		dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4832 		err = -ENOMEM;
4833 		goto err_alloc_percpu_stats;
4834 	}
4835 	priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4836 	if (!priv->percpu_extras) {
4837 		dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4838 		err = -ENOMEM;
4839 		goto err_alloc_percpu_extras;
4840 	}
4841 
4842 	priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4843 	if (!priv->sgt_cache) {
4844 		dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4845 		err = -ENOMEM;
4846 		goto err_alloc_sgt_cache;
4847 	}
4848 
4849 	priv->fd = alloc_percpu(*priv->fd);
4850 	if (!priv->fd) {
4851 		dev_err(dev, "alloc_percpu(fds) failed\n");
4852 		err = -ENOMEM;
4853 		goto err_alloc_fds;
4854 	}
4855 
4856 	err = dpaa2_eth_netdev_init(net_dev);
4857 	if (err)
4858 		goto err_netdev_init;
4859 
4860 	/* Configure checksum offload based on current interface flags */
4861 	err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4862 	if (err)
4863 		goto err_csum;
4864 
4865 	err = dpaa2_eth_set_tx_csum(priv,
4866 				    !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4867 	if (err)
4868 		goto err_csum;
4869 
4870 	err = dpaa2_eth_alloc_rings(priv);
4871 	if (err)
4872 		goto err_alloc_rings;
4873 
4874 #ifdef CONFIG_FSL_DPAA2_ETH_DCB
4875 	if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4876 		priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4877 		net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4878 	} else {
4879 		dev_dbg(dev, "PFC not supported\n");
4880 	}
4881 #endif
4882 
4883 	err = dpaa2_eth_setup_irqs(dpni_dev);
4884 	if (err) {
4885 		netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4886 		priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
4887 						"%s_poll_link", net_dev->name);
4888 		if (IS_ERR(priv->poll_thread)) {
4889 			dev_err(dev, "Error starting polling thread\n");
4890 			goto err_poll_thread;
4891 		}
4892 		priv->do_link_poll = true;
4893 	}
4894 
4895 	err = dpaa2_eth_connect_mac(priv);
4896 	if (err)
4897 		goto err_connect_mac;
4898 
4899 	err = dpaa2_eth_dl_alloc(priv);
4900 	if (err)
4901 		goto err_dl_register;
4902 
4903 	err = dpaa2_eth_dl_traps_register(priv);
4904 	if (err)
4905 		goto err_dl_trap_register;
4906 
4907 	err = dpaa2_eth_dl_port_add(priv);
4908 	if (err)
4909 		goto err_dl_port_add;
4910 
4911 	err = register_netdev(net_dev);
4912 	if (err < 0) {
4913 		dev_err(dev, "register_netdev() failed\n");
4914 		goto err_netdev_reg;
4915 	}
4916 
4917 #ifdef CONFIG_DEBUG_FS
4918 	dpaa2_dbg_add(priv);
4919 #endif
4920 
4921 	dpaa2_eth_dl_register(priv);
4922 	dev_info(dev, "Probed interface %s\n", net_dev->name);
4923 	return 0;
4924 
4925 err_netdev_reg:
4926 	dpaa2_eth_dl_port_del(priv);
4927 err_dl_port_add:
4928 	dpaa2_eth_dl_traps_unregister(priv);
4929 err_dl_trap_register:
4930 	dpaa2_eth_dl_free(priv);
4931 err_dl_register:
4932 	dpaa2_eth_disconnect_mac(priv);
4933 err_connect_mac:
4934 	if (priv->do_link_poll)
4935 		kthread_stop(priv->poll_thread);
4936 	else
4937 		fsl_mc_free_irqs(dpni_dev);
4938 err_poll_thread:
4939 	dpaa2_eth_free_rings(priv);
4940 err_alloc_rings:
4941 err_csum:
4942 err_netdev_init:
4943 	free_percpu(priv->fd);
4944 err_alloc_fds:
4945 	free_percpu(priv->sgt_cache);
4946 err_alloc_sgt_cache:
4947 	free_percpu(priv->percpu_extras);
4948 err_alloc_percpu_extras:
4949 	free_percpu(priv->percpu_stats);
4950 err_alloc_percpu_stats:
4951 	dpaa2_eth_del_ch_napi(priv);
4952 err_bind:
4953 	dpaa2_eth_free_dpbps(priv);
4954 err_dpbp_setup:
4955 	dpaa2_eth_free_dpio(priv);
4956 err_dpio_setup:
4957 	dpaa2_eth_free_dpni(priv);
4958 err_dpni_setup:
4959 	fsl_mc_portal_free(priv->mc_io);
4960 err_portal_alloc:
4961 	destroy_workqueue(priv->dpaa2_ptp_wq);
4962 err_wq_alloc:
4963 	dev_set_drvdata(dev, NULL);
4964 	free_netdev(net_dev);
4965 
4966 	return err;
4967 }
4968 
4969 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4970 {
4971 	struct device *dev;
4972 	struct net_device *net_dev;
4973 	struct dpaa2_eth_priv *priv;
4974 
4975 	dev = &ls_dev->dev;
4976 	net_dev = dev_get_drvdata(dev);
4977 	priv = netdev_priv(net_dev);
4978 
4979 	dpaa2_eth_dl_unregister(priv);
4980 
4981 #ifdef CONFIG_DEBUG_FS
4982 	dpaa2_dbg_remove(priv);
4983 #endif
4984 
4985 	unregister_netdev(net_dev);
4986 	rtnl_lock();
4987 	dpaa2_eth_disconnect_mac(priv);
4988 	rtnl_unlock();
4989 
4990 	dpaa2_eth_dl_port_del(priv);
4991 	dpaa2_eth_dl_traps_unregister(priv);
4992 	dpaa2_eth_dl_free(priv);
4993 
4994 	if (priv->do_link_poll)
4995 		kthread_stop(priv->poll_thread);
4996 	else
4997 		fsl_mc_free_irqs(ls_dev);
4998 
4999 	dpaa2_eth_free_rings(priv);
5000 	free_percpu(priv->fd);
5001 	free_percpu(priv->sgt_cache);
5002 	free_percpu(priv->percpu_stats);
5003 	free_percpu(priv->percpu_extras);
5004 
5005 	dpaa2_eth_del_ch_napi(priv);
5006 	dpaa2_eth_free_dpbps(priv);
5007 	dpaa2_eth_free_dpio(priv);
5008 	dpaa2_eth_free_dpni(priv);
5009 	if (priv->onestep_reg_base)
5010 		iounmap(priv->onestep_reg_base);
5011 
5012 	fsl_mc_portal_free(priv->mc_io);
5013 
5014 	destroy_workqueue(priv->dpaa2_ptp_wq);
5015 
5016 	dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
5017 
5018 	free_netdev(net_dev);
5019 
5020 	return 0;
5021 }
5022 
5023 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
5024 	{
5025 		.vendor = FSL_MC_VENDOR_FREESCALE,
5026 		.obj_type = "dpni",
5027 	},
5028 	{ .vendor = 0x0 }
5029 };
5030 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
5031 
5032 static struct fsl_mc_driver dpaa2_eth_driver = {
5033 	.driver = {
5034 		.name = KBUILD_MODNAME,
5035 		.owner = THIS_MODULE,
5036 	},
5037 	.probe = dpaa2_eth_probe,
5038 	.remove = dpaa2_eth_remove,
5039 	.match_id_table = dpaa2_eth_match_id_table
5040 };
5041 
5042 static int __init dpaa2_eth_driver_init(void)
5043 {
5044 	int err;
5045 
5046 	dpaa2_eth_dbg_init();
5047 	err = fsl_mc_driver_register(&dpaa2_eth_driver);
5048 	if (err) {
5049 		dpaa2_eth_dbg_exit();
5050 		return err;
5051 	}
5052 
5053 	return 0;
5054 }
5055 
5056 static void __exit dpaa2_eth_driver_exit(void)
5057 {
5058 	dpaa2_eth_dbg_exit();
5059 	fsl_mc_driver_unregister(&dpaa2_eth_driver);
5060 }
5061 
5062 module_init(dpaa2_eth_driver_init);
5063 module_exit(dpaa2_eth_driver_exit);
5064