1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3  * Copyright 2016-2022 NXP
4  */
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/kthread.h>
12 #include <linux/iommu.h>
13 #include <linux/fsl/mc.h>
14 #include <linux/bpf.h>
15 #include <linux/bpf_trace.h>
16 #include <linux/fsl/ptp_qoriq.h>
17 #include <linux/ptp_classify.h>
18 #include <net/pkt_cls.h>
19 #include <net/sock.h>
20 #include <net/tso.h>
21 #include <net/xdp_sock_drv.h>
22 
23 #include "dpaa2-eth.h"
24 
25 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
26  * using trace events only need to #include <trace/events/sched.h>
27  */
28 #define CREATE_TRACE_POINTS
29 #include "dpaa2-eth-trace.h"
30 
31 MODULE_LICENSE("Dual BSD/GPL");
32 MODULE_AUTHOR("Freescale Semiconductor, Inc");
33 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
34 
35 struct ptp_qoriq *dpaa2_ptp;
36 EXPORT_SYMBOL(dpaa2_ptp);
37 
38 static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
39 {
40 	priv->features = 0;
41 
42 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
43 				   DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
44 		priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
45 }
46 
47 static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
48 					      u32 offset, u8 udp)
49 {
50 	struct dpni_single_step_cfg cfg;
51 
52 	cfg.en = 1;
53 	cfg.ch_update = udp;
54 	cfg.offset = offset;
55 	cfg.peer_delay = 0;
56 
57 	if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
58 		WARN_ONCE(1, "Failed to set single step register");
59 }
60 
61 static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
62 					    u32 offset, u8 udp)
63 {
64 	u32 val = 0;
65 
66 	val = DPAA2_PTP_SINGLE_STEP_ENABLE |
67 	       DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
68 
69 	if (udp)
70 		val |= DPAA2_PTP_SINGLE_STEP_CH;
71 
72 	if (priv->onestep_reg_base)
73 		writel(val, priv->onestep_reg_base);
74 }
75 
76 static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
77 {
78 	struct device *dev = priv->net_dev->dev.parent;
79 	struct dpni_single_step_cfg ptp_cfg;
80 
81 	priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
82 
83 	if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
84 		return;
85 
86 	if (dpni_get_single_step_cfg(priv->mc_io, 0,
87 				     priv->mc_token, &ptp_cfg)) {
88 		dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
89 		return;
90 	}
91 
92 	if (!ptp_cfg.ptp_onestep_reg_base) {
93 		dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
94 		return;
95 	}
96 
97 	priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
98 					 sizeof(u32));
99 	if (!priv->onestep_reg_base) {
100 		dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
101 		return;
102 	}
103 
104 	priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
105 }
106 
107 void *dpaa2_iova_to_virt(struct iommu_domain *domain,
108 			 dma_addr_t iova_addr)
109 {
110 	phys_addr_t phys_addr;
111 
112 	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
113 
114 	return phys_to_virt(phys_addr);
115 }
116 
117 static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
118 				       u32 fd_status,
119 				       struct sk_buff *skb)
120 {
121 	skb_checksum_none_assert(skb);
122 
123 	/* HW checksum validation is disabled, nothing to do here */
124 	if (!(priv->net_dev->features & NETIF_F_RXCSUM))
125 		return;
126 
127 	/* Read checksum validation bits */
128 	if (!((fd_status & DPAA2_FAS_L3CV) &&
129 	      (fd_status & DPAA2_FAS_L4CV)))
130 		return;
131 
132 	/* Inform the stack there's no need to compute L3/L4 csum anymore */
133 	skb->ip_summed = CHECKSUM_UNNECESSARY;
134 }
135 
136 /* Free a received FD.
137  * Not to be used for Tx conf FDs or on any other paths.
138  */
139 static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
140 				 const struct dpaa2_fd *fd,
141 				 void *vaddr)
142 {
143 	struct device *dev = priv->net_dev->dev.parent;
144 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
145 	u8 fd_format = dpaa2_fd_get_format(fd);
146 	struct dpaa2_sg_entry *sgt;
147 	void *sg_vaddr;
148 	int i;
149 
150 	/* If single buffer frame, just free the data buffer */
151 	if (fd_format == dpaa2_fd_single)
152 		goto free_buf;
153 	else if (fd_format != dpaa2_fd_sg)
154 		/* We don't support any other format */
155 		return;
156 
157 	/* For S/G frames, we first need to free all SG entries
158 	 * except the first one, which was taken care of already
159 	 */
160 	sgt = vaddr + dpaa2_fd_get_offset(fd);
161 	for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
162 		addr = dpaa2_sg_get_addr(&sgt[i]);
163 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
164 		dma_unmap_page(dev, addr, priv->rx_buf_size,
165 			       DMA_BIDIRECTIONAL);
166 
167 		free_pages((unsigned long)sg_vaddr, 0);
168 		if (dpaa2_sg_is_final(&sgt[i]))
169 			break;
170 	}
171 
172 free_buf:
173 	free_pages((unsigned long)vaddr, 0);
174 }
175 
176 /* Build a linear skb based on a single-buffer frame descriptor */
177 static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
178 						  const struct dpaa2_fd *fd,
179 						  void *fd_vaddr)
180 {
181 	struct sk_buff *skb = NULL;
182 	u16 fd_offset = dpaa2_fd_get_offset(fd);
183 	u32 fd_length = dpaa2_fd_get_len(fd);
184 
185 	ch->buf_count--;
186 
187 	skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
188 	if (unlikely(!skb))
189 		return NULL;
190 
191 	skb_reserve(skb, fd_offset);
192 	skb_put(skb, fd_length);
193 
194 	return skb;
195 }
196 
197 /* Build a non linear (fragmented) skb based on a S/G table */
198 static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
199 						struct dpaa2_eth_channel *ch,
200 						struct dpaa2_sg_entry *sgt)
201 {
202 	struct sk_buff *skb = NULL;
203 	struct device *dev = priv->net_dev->dev.parent;
204 	void *sg_vaddr;
205 	dma_addr_t sg_addr;
206 	u16 sg_offset;
207 	u32 sg_length;
208 	struct page *page, *head_page;
209 	int page_offset;
210 	int i;
211 
212 	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
213 		struct dpaa2_sg_entry *sge = &sgt[i];
214 
215 		/* NOTE: We only support SG entries in dpaa2_sg_single format,
216 		 * but this is the only format we may receive from HW anyway
217 		 */
218 
219 		/* Get the address and length from the S/G entry */
220 		sg_addr = dpaa2_sg_get_addr(sge);
221 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
222 		dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
223 			       DMA_BIDIRECTIONAL);
224 
225 		sg_length = dpaa2_sg_get_len(sge);
226 
227 		if (i == 0) {
228 			/* We build the skb around the first data buffer */
229 			skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
230 			if (unlikely(!skb)) {
231 				/* Free the first SG entry now, since we already
232 				 * unmapped it and obtained the virtual address
233 				 */
234 				free_pages((unsigned long)sg_vaddr, 0);
235 
236 				/* We still need to subtract the buffers used
237 				 * by this FD from our software counter
238 				 */
239 				while (!dpaa2_sg_is_final(&sgt[i]) &&
240 				       i < DPAA2_ETH_MAX_SG_ENTRIES)
241 					i++;
242 				break;
243 			}
244 
245 			sg_offset = dpaa2_sg_get_offset(sge);
246 			skb_reserve(skb, sg_offset);
247 			skb_put(skb, sg_length);
248 		} else {
249 			/* Rest of the data buffers are stored as skb frags */
250 			page = virt_to_page(sg_vaddr);
251 			head_page = virt_to_head_page(sg_vaddr);
252 
253 			/* Offset in page (which may be compound).
254 			 * Data in subsequent SG entries is stored from the
255 			 * beginning of the buffer, so we don't need to add the
256 			 * sg_offset.
257 			 */
258 			page_offset = ((unsigned long)sg_vaddr &
259 				(PAGE_SIZE - 1)) +
260 				(page_address(page) - page_address(head_page));
261 
262 			skb_add_rx_frag(skb, i - 1, head_page, page_offset,
263 					sg_length, priv->rx_buf_size);
264 		}
265 
266 		if (dpaa2_sg_is_final(sge))
267 			break;
268 	}
269 
270 	WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
271 
272 	/* Count all data buffers + SG table buffer */
273 	ch->buf_count -= i + 2;
274 
275 	return skb;
276 }
277 
278 /* Free buffers acquired from the buffer pool or which were meant to
279  * be released in the pool
280  */
281 static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
282 				int count, bool xsk_zc)
283 {
284 	struct device *dev = priv->net_dev->dev.parent;
285 	struct dpaa2_eth_swa *swa;
286 	struct xdp_buff *xdp_buff;
287 	void *vaddr;
288 	int i;
289 
290 	for (i = 0; i < count; i++) {
291 		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
292 
293 		if (!xsk_zc) {
294 			dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
295 				       DMA_BIDIRECTIONAL);
296 			free_pages((unsigned long)vaddr, 0);
297 		} else {
298 			swa = (struct dpaa2_eth_swa *)
299 				(vaddr + DPAA2_ETH_RX_HWA_SIZE);
300 			xdp_buff = swa->xsk.xdp_buff;
301 			xsk_buff_free(xdp_buff);
302 		}
303 	}
304 }
305 
306 void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
307 			   struct dpaa2_eth_channel *ch,
308 			   dma_addr_t addr)
309 {
310 	int retries = 0;
311 	int err;
312 
313 	ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
314 	if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
315 		return;
316 
317 	while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
318 					       ch->recycled_bufs,
319 					       ch->recycled_bufs_cnt)) == -EBUSY) {
320 		if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
321 			break;
322 		cpu_relax();
323 	}
324 
325 	if (err) {
326 		dpaa2_eth_free_bufs(priv, ch->recycled_bufs,
327 				    ch->recycled_bufs_cnt, ch->xsk_zc);
328 		ch->buf_count -= ch->recycled_bufs_cnt;
329 	}
330 
331 	ch->recycled_bufs_cnt = 0;
332 }
333 
334 static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
335 			       struct dpaa2_eth_fq *fq,
336 			       struct dpaa2_eth_xdp_fds *xdp_fds)
337 {
338 	int total_enqueued = 0, retries = 0, enqueued;
339 	struct dpaa2_eth_drv_stats *percpu_extras;
340 	int num_fds, err, max_retries;
341 	struct dpaa2_fd *fds;
342 
343 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
344 
345 	/* try to enqueue all the FDs until the max number of retries is hit */
346 	fds = xdp_fds->fds;
347 	num_fds = xdp_fds->num;
348 	max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
349 	while (total_enqueued < num_fds && retries < max_retries) {
350 		err = priv->enqueue(priv, fq, &fds[total_enqueued],
351 				    0, num_fds - total_enqueued, &enqueued);
352 		if (err == -EBUSY) {
353 			percpu_extras->tx_portal_busy += ++retries;
354 			continue;
355 		}
356 		total_enqueued += enqueued;
357 	}
358 	xdp_fds->num = 0;
359 
360 	return total_enqueued;
361 }
362 
363 static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
364 				   struct dpaa2_eth_channel *ch,
365 				   struct dpaa2_eth_fq *fq)
366 {
367 	struct rtnl_link_stats64 *percpu_stats;
368 	struct dpaa2_fd *fds;
369 	int enqueued, i;
370 
371 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
372 
373 	// enqueue the array of XDP_TX frames
374 	enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
375 
376 	/* update statistics */
377 	percpu_stats->tx_packets += enqueued;
378 	fds = fq->xdp_tx_fds.fds;
379 	for (i = 0; i < enqueued; i++) {
380 		percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
381 		ch->stats.xdp_tx++;
382 	}
383 	for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
384 		dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
385 		percpu_stats->tx_errors++;
386 		ch->stats.xdp_tx_err++;
387 	}
388 	fq->xdp_tx_fds.num = 0;
389 }
390 
391 void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
392 			   struct dpaa2_eth_channel *ch,
393 			   struct dpaa2_fd *fd,
394 			   void *buf_start, u16 queue_id)
395 {
396 	struct dpaa2_faead *faead;
397 	struct dpaa2_fd *dest_fd;
398 	struct dpaa2_eth_fq *fq;
399 	u32 ctrl, frc;
400 
401 	/* Mark the egress frame hardware annotation area as valid */
402 	frc = dpaa2_fd_get_frc(fd);
403 	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
404 	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
405 
406 	/* Instruct hardware to release the FD buffer directly into
407 	 * the buffer pool once transmission is completed, instead of
408 	 * sending a Tx confirmation frame to us
409 	 */
410 	ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
411 	faead = dpaa2_get_faead(buf_start, false);
412 	faead->ctrl = cpu_to_le32(ctrl);
413 	faead->conf_fqid = 0;
414 
415 	fq = &priv->fq[queue_id];
416 	dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
417 	memcpy(dest_fd, fd, sizeof(*dest_fd));
418 
419 	if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
420 		return;
421 
422 	dpaa2_eth_xdp_tx_flush(priv, ch, fq);
423 }
424 
425 static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
426 			     struct dpaa2_eth_channel *ch,
427 			     struct dpaa2_eth_fq *rx_fq,
428 			     struct dpaa2_fd *fd, void *vaddr)
429 {
430 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
431 	struct bpf_prog *xdp_prog;
432 	struct xdp_buff xdp;
433 	u32 xdp_act = XDP_PASS;
434 	int err, offset;
435 
436 	xdp_prog = READ_ONCE(ch->xdp.prog);
437 	if (!xdp_prog)
438 		goto out;
439 
440 	offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
441 	xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
442 	xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
443 			 dpaa2_fd_get_len(fd), false);
444 
445 	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
446 
447 	/* xdp.data pointer may have changed */
448 	dpaa2_fd_set_offset(fd, xdp.data - vaddr);
449 	dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
450 
451 	switch (xdp_act) {
452 	case XDP_PASS:
453 		break;
454 	case XDP_TX:
455 		dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
456 		break;
457 	default:
458 		bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
459 		fallthrough;
460 	case XDP_ABORTED:
461 		trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
462 		fallthrough;
463 	case XDP_DROP:
464 		dpaa2_eth_recycle_buf(priv, ch, addr);
465 		ch->stats.xdp_drop++;
466 		break;
467 	case XDP_REDIRECT:
468 		dma_unmap_page(priv->net_dev->dev.parent, addr,
469 			       priv->rx_buf_size, DMA_BIDIRECTIONAL);
470 		ch->buf_count--;
471 
472 		/* Allow redirect use of full headroom */
473 		xdp.data_hard_start = vaddr;
474 		xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
475 
476 		err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
477 		if (unlikely(err)) {
478 			addr = dma_map_page(priv->net_dev->dev.parent,
479 					    virt_to_page(vaddr), 0,
480 					    priv->rx_buf_size, DMA_BIDIRECTIONAL);
481 			if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
482 				free_pages((unsigned long)vaddr, 0);
483 			} else {
484 				ch->buf_count++;
485 				dpaa2_eth_recycle_buf(priv, ch, addr);
486 			}
487 			ch->stats.xdp_drop++;
488 		} else {
489 			ch->stats.xdp_redirect++;
490 		}
491 		break;
492 	}
493 
494 	ch->xdp.res |= xdp_act;
495 out:
496 	return xdp_act;
497 }
498 
499 struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
500 				    struct dpaa2_eth_channel *ch,
501 				    const struct dpaa2_fd *fd, u32 fd_length,
502 				    void *fd_vaddr)
503 {
504 	u16 fd_offset = dpaa2_fd_get_offset(fd);
505 	struct sk_buff *skb = NULL;
506 	unsigned int skb_len;
507 
508 	skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
509 
510 	skb = napi_alloc_skb(&ch->napi, skb_len);
511 	if (!skb)
512 		return NULL;
513 
514 	skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
515 	skb_put(skb, fd_length);
516 
517 	memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
518 
519 	dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
520 
521 	return skb;
522 }
523 
524 static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
525 					   const struct dpaa2_fd *fd,
526 					   void *fd_vaddr)
527 {
528 	struct dpaa2_eth_priv *priv = ch->priv;
529 	u32 fd_length = dpaa2_fd_get_len(fd);
530 
531 	if (fd_length > priv->rx_copybreak)
532 		return NULL;
533 
534 	return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr);
535 }
536 
537 void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
538 			   struct dpaa2_eth_channel *ch,
539 			   const struct dpaa2_fd *fd, void *vaddr,
540 			   struct dpaa2_eth_fq *fq,
541 			   struct rtnl_link_stats64 *percpu_stats,
542 			   struct sk_buff *skb)
543 {
544 	struct dpaa2_fas *fas;
545 	u32 status = 0;
546 
547 	fas = dpaa2_get_fas(vaddr, false);
548 	prefetch(fas);
549 	prefetch(skb->data);
550 
551 	/* Get the timestamp value */
552 	if (priv->rx_tstamp) {
553 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
554 		__le64 *ts = dpaa2_get_ts(vaddr, false);
555 		u64 ns;
556 
557 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
558 
559 		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
560 		shhwtstamps->hwtstamp = ns_to_ktime(ns);
561 	}
562 
563 	/* Check if we need to validate the L4 csum */
564 	if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
565 		status = le32_to_cpu(fas->status);
566 		dpaa2_eth_validate_rx_csum(priv, status, skb);
567 	}
568 
569 	skb->protocol = eth_type_trans(skb, priv->net_dev);
570 	skb_record_rx_queue(skb, fq->flowid);
571 
572 	percpu_stats->rx_packets++;
573 	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
574 	ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
575 
576 	list_add_tail(&skb->list, ch->rx_list);
577 }
578 
579 /* Main Rx frame processing routine */
580 void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
581 		  struct dpaa2_eth_channel *ch,
582 		  const struct dpaa2_fd *fd,
583 		  struct dpaa2_eth_fq *fq)
584 {
585 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
586 	u8 fd_format = dpaa2_fd_get_format(fd);
587 	void *vaddr;
588 	struct sk_buff *skb;
589 	struct rtnl_link_stats64 *percpu_stats;
590 	struct dpaa2_eth_drv_stats *percpu_extras;
591 	struct device *dev = priv->net_dev->dev.parent;
592 	void *buf_data;
593 	u32 xdp_act;
594 
595 	/* Tracing point */
596 	trace_dpaa2_rx_fd(priv->net_dev, fd);
597 
598 	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
599 	dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
600 				DMA_BIDIRECTIONAL);
601 
602 	buf_data = vaddr + dpaa2_fd_get_offset(fd);
603 	prefetch(buf_data);
604 
605 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
606 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
607 
608 	if (fd_format == dpaa2_fd_single) {
609 		xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
610 		if (xdp_act != XDP_PASS) {
611 			percpu_stats->rx_packets++;
612 			percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
613 			return;
614 		}
615 
616 		skb = dpaa2_eth_copybreak(ch, fd, vaddr);
617 		if (!skb) {
618 			dma_unmap_page(dev, addr, priv->rx_buf_size,
619 				       DMA_BIDIRECTIONAL);
620 			skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
621 		}
622 	} else if (fd_format == dpaa2_fd_sg) {
623 		WARN_ON(priv->xdp_prog);
624 
625 		dma_unmap_page(dev, addr, priv->rx_buf_size,
626 			       DMA_BIDIRECTIONAL);
627 		skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
628 		free_pages((unsigned long)vaddr, 0);
629 		percpu_extras->rx_sg_frames++;
630 		percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
631 	} else {
632 		/* We don't support any other format */
633 		goto err_frame_format;
634 	}
635 
636 	if (unlikely(!skb))
637 		goto err_build_skb;
638 
639 	dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
640 	return;
641 
642 err_build_skb:
643 	dpaa2_eth_free_rx_fd(priv, fd, vaddr);
644 err_frame_format:
645 	percpu_stats->rx_dropped++;
646 }
647 
648 /* Processing of Rx frames received on the error FQ
649  * We check and print the error bits and then free the frame
650  */
651 static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
652 			     struct dpaa2_eth_channel *ch,
653 			     const struct dpaa2_fd *fd,
654 			     struct dpaa2_eth_fq *fq __always_unused)
655 {
656 	struct device *dev = priv->net_dev->dev.parent;
657 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
658 	u8 fd_format = dpaa2_fd_get_format(fd);
659 	struct rtnl_link_stats64 *percpu_stats;
660 	struct dpaa2_eth_trap_item *trap_item;
661 	struct dpaa2_fapr *fapr;
662 	struct sk_buff *skb;
663 	void *buf_data;
664 	void *vaddr;
665 
666 	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
667 	dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
668 				DMA_BIDIRECTIONAL);
669 
670 	buf_data = vaddr + dpaa2_fd_get_offset(fd);
671 
672 	if (fd_format == dpaa2_fd_single) {
673 		dma_unmap_page(dev, addr, priv->rx_buf_size,
674 			       DMA_BIDIRECTIONAL);
675 		skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
676 	} else if (fd_format == dpaa2_fd_sg) {
677 		dma_unmap_page(dev, addr, priv->rx_buf_size,
678 			       DMA_BIDIRECTIONAL);
679 		skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
680 		free_pages((unsigned long)vaddr, 0);
681 	} else {
682 		/* We don't support any other format */
683 		dpaa2_eth_free_rx_fd(priv, fd, vaddr);
684 		goto err_frame_format;
685 	}
686 
687 	fapr = dpaa2_get_fapr(vaddr, false);
688 	trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
689 	if (trap_item)
690 		devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
691 				    &priv->devlink_port, NULL);
692 	consume_skb(skb);
693 
694 err_frame_format:
695 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
696 	percpu_stats->rx_errors++;
697 	ch->buf_count--;
698 }
699 
700 /* Consume all frames pull-dequeued into the store. This is the simplest way to
701  * make sure we don't accidentally issue another volatile dequeue which would
702  * overwrite (leak) frames already in the store.
703  *
704  * Observance of NAPI budget is not our concern, leaving that to the caller.
705  */
706 static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
707 				    struct dpaa2_eth_fq **src)
708 {
709 	struct dpaa2_eth_priv *priv = ch->priv;
710 	struct dpaa2_eth_fq *fq = NULL;
711 	struct dpaa2_dq *dq;
712 	const struct dpaa2_fd *fd;
713 	int cleaned = 0, retries = 0;
714 	int is_last;
715 
716 	do {
717 		dq = dpaa2_io_store_next(ch->store, &is_last);
718 		if (unlikely(!dq)) {
719 			/* If we're here, we *must* have placed a
720 			 * volatile dequeue comnmand, so keep reading through
721 			 * the store until we get some sort of valid response
722 			 * token (either a valid frame or an "empty dequeue")
723 			 */
724 			if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
725 				netdev_err_once(priv->net_dev,
726 						"Unable to read a valid dequeue response\n");
727 				return -ETIMEDOUT;
728 			}
729 			continue;
730 		}
731 
732 		fd = dpaa2_dq_fd(dq);
733 		fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
734 
735 		fq->consume(priv, ch, fd, fq);
736 		cleaned++;
737 		retries = 0;
738 	} while (!is_last);
739 
740 	if (!cleaned)
741 		return 0;
742 
743 	fq->stats.frames += cleaned;
744 	ch->stats.frames += cleaned;
745 	ch->stats.frames_per_cdan += cleaned;
746 
747 	/* A dequeue operation only pulls frames from a single queue
748 	 * into the store. Return the frame queue as an out param.
749 	 */
750 	if (src)
751 		*src = fq;
752 
753 	return cleaned;
754 }
755 
756 static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
757 			       u8 *msgtype, u8 *twostep, u8 *udp,
758 			       u16 *correction_offset,
759 			       u16 *origintimestamp_offset)
760 {
761 	unsigned int ptp_class;
762 	struct ptp_header *hdr;
763 	unsigned int type;
764 	u8 *base;
765 
766 	ptp_class = ptp_classify_raw(skb);
767 	if (ptp_class == PTP_CLASS_NONE)
768 		return -EINVAL;
769 
770 	hdr = ptp_parse_header(skb, ptp_class);
771 	if (!hdr)
772 		return -EINVAL;
773 
774 	*msgtype = ptp_get_msgtype(hdr, ptp_class);
775 	*twostep = hdr->flag_field[0] & 0x2;
776 
777 	type = ptp_class & PTP_CLASS_PMASK;
778 	if (type == PTP_CLASS_IPV4 ||
779 	    type == PTP_CLASS_IPV6)
780 		*udp = 1;
781 	else
782 		*udp = 0;
783 
784 	base = skb_mac_header(skb);
785 	*correction_offset = (u8 *)&hdr->correction - base;
786 	*origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
787 
788 	return 0;
789 }
790 
791 /* Configure the egress frame annotation for timestamp update */
792 static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
793 				       struct dpaa2_fd *fd,
794 				       void *buf_start,
795 				       struct sk_buff *skb)
796 {
797 	struct ptp_tstamp origin_timestamp;
798 	u8 msgtype, twostep, udp;
799 	struct dpaa2_faead *faead;
800 	struct dpaa2_fas *fas;
801 	struct timespec64 ts;
802 	u16 offset1, offset2;
803 	u32 ctrl, frc;
804 	__le64 *ns;
805 	u8 *data;
806 
807 	/* Mark the egress frame annotation area as valid */
808 	frc = dpaa2_fd_get_frc(fd);
809 	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
810 
811 	/* Set hardware annotation size */
812 	ctrl = dpaa2_fd_get_ctrl(fd);
813 	dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
814 
815 	/* enable UPD (update prepanded data) bit in FAEAD field of
816 	 * hardware frame annotation area
817 	 */
818 	ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
819 	faead = dpaa2_get_faead(buf_start, true);
820 	faead->ctrl = cpu_to_le32(ctrl);
821 
822 	if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
823 		if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
824 					&offset1, &offset2) ||
825 		    msgtype != PTP_MSGTYPE_SYNC || twostep) {
826 			WARN_ONCE(1, "Bad packet for one-step timestamping\n");
827 			return;
828 		}
829 
830 		/* Mark the frame annotation status as valid */
831 		frc = dpaa2_fd_get_frc(fd);
832 		dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
833 
834 		/* Mark the PTP flag for one step timestamping */
835 		fas = dpaa2_get_fas(buf_start, true);
836 		fas->status = cpu_to_le32(DPAA2_FAS_PTP);
837 
838 		dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
839 		ns = dpaa2_get_ts(buf_start, true);
840 		*ns = cpu_to_le64(timespec64_to_ns(&ts) /
841 				  DPAA2_PTP_CLK_PERIOD_NS);
842 
843 		/* Update current time to PTP message originTimestamp field */
844 		ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
845 		data = skb_mac_header(skb);
846 		*(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
847 		*(__be32 *)(data + offset2 + 2) =
848 			htonl(origin_timestamp.sec_lsb);
849 		*(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
850 
851 		if (priv->ptp_correction_off == offset1)
852 			return;
853 
854 		priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
855 		priv->ptp_correction_off = offset1;
856 
857 	}
858 }
859 
860 void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
861 {
862 	struct dpaa2_eth_sgt_cache *sgt_cache;
863 	void *sgt_buf = NULL;
864 	int sgt_buf_size;
865 
866 	sgt_cache = this_cpu_ptr(priv->sgt_cache);
867 	sgt_buf_size = priv->tx_data_offset +
868 		DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
869 
870 	if (sgt_cache->count == 0)
871 		sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
872 	else
873 		sgt_buf = sgt_cache->buf[--sgt_cache->count];
874 	if (!sgt_buf)
875 		return NULL;
876 
877 	memset(sgt_buf, 0, sgt_buf_size);
878 
879 	return sgt_buf;
880 }
881 
882 void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
883 {
884 	struct dpaa2_eth_sgt_cache *sgt_cache;
885 
886 	sgt_cache = this_cpu_ptr(priv->sgt_cache);
887 	if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
888 		skb_free_frag(sgt_buf);
889 	else
890 		sgt_cache->buf[sgt_cache->count++] = sgt_buf;
891 }
892 
893 /* Create a frame descriptor based on a fragmented skb */
894 static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
895 				 struct sk_buff *skb,
896 				 struct dpaa2_fd *fd,
897 				 void **swa_addr)
898 {
899 	struct device *dev = priv->net_dev->dev.parent;
900 	void *sgt_buf = NULL;
901 	dma_addr_t addr;
902 	int nr_frags = skb_shinfo(skb)->nr_frags;
903 	struct dpaa2_sg_entry *sgt;
904 	int i, err;
905 	int sgt_buf_size;
906 	struct scatterlist *scl, *crt_scl;
907 	int num_sg;
908 	int num_dma_bufs;
909 	struct dpaa2_eth_swa *swa;
910 
911 	/* Create and map scatterlist.
912 	 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
913 	 * to go beyond nr_frags+1.
914 	 * Note: We don't support chained scatterlists
915 	 */
916 	if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
917 		return -EINVAL;
918 
919 	scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
920 	if (unlikely(!scl))
921 		return -ENOMEM;
922 
923 	sg_init_table(scl, nr_frags + 1);
924 	num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
925 	if (unlikely(num_sg < 0)) {
926 		err = -ENOMEM;
927 		goto dma_map_sg_failed;
928 	}
929 	num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
930 	if (unlikely(!num_dma_bufs)) {
931 		err = -ENOMEM;
932 		goto dma_map_sg_failed;
933 	}
934 
935 	/* Prepare the HW SGT structure */
936 	sgt_buf_size = priv->tx_data_offset +
937 		       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
938 	sgt_buf = dpaa2_eth_sgt_get(priv);
939 	if (unlikely(!sgt_buf)) {
940 		err = -ENOMEM;
941 		goto sgt_buf_alloc_failed;
942 	}
943 
944 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
945 
946 	/* Fill in the HW SGT structure.
947 	 *
948 	 * sgt_buf is zeroed out, so the following fields are implicit
949 	 * in all sgt entries:
950 	 *   - offset is 0
951 	 *   - format is 'dpaa2_sg_single'
952 	 */
953 	for_each_sg(scl, crt_scl, num_dma_bufs, i) {
954 		dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
955 		dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
956 	}
957 	dpaa2_sg_set_final(&sgt[i - 1], true);
958 
959 	/* Store the skb backpointer in the SGT buffer.
960 	 * Fit the scatterlist and the number of buffers alongside the
961 	 * skb backpointer in the software annotation area. We'll need
962 	 * all of them on Tx Conf.
963 	 */
964 	*swa_addr = (void *)sgt_buf;
965 	swa = (struct dpaa2_eth_swa *)sgt_buf;
966 	swa->type = DPAA2_ETH_SWA_SG;
967 	swa->sg.skb = skb;
968 	swa->sg.scl = scl;
969 	swa->sg.num_sg = num_sg;
970 	swa->sg.sgt_size = sgt_buf_size;
971 
972 	/* Separately map the SGT buffer */
973 	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
974 	if (unlikely(dma_mapping_error(dev, addr))) {
975 		err = -ENOMEM;
976 		goto dma_map_single_failed;
977 	}
978 	memset(fd, 0, sizeof(struct dpaa2_fd));
979 	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
980 	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
981 	dpaa2_fd_set_addr(fd, addr);
982 	dpaa2_fd_set_len(fd, skb->len);
983 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
984 
985 	return 0;
986 
987 dma_map_single_failed:
988 	dpaa2_eth_sgt_recycle(priv, sgt_buf);
989 sgt_buf_alloc_failed:
990 	dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
991 dma_map_sg_failed:
992 	kfree(scl);
993 	return err;
994 }
995 
996 /* Create a SG frame descriptor based on a linear skb.
997  *
998  * This function is used on the Tx path when the skb headroom is not large
999  * enough for the HW requirements, thus instead of realloc-ing the skb we
1000  * create a SG frame descriptor with only one entry.
1001  */
1002 static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
1003 					    struct sk_buff *skb,
1004 					    struct dpaa2_fd *fd,
1005 					    void **swa_addr)
1006 {
1007 	struct device *dev = priv->net_dev->dev.parent;
1008 	struct dpaa2_sg_entry *sgt;
1009 	struct dpaa2_eth_swa *swa;
1010 	dma_addr_t addr, sgt_addr;
1011 	void *sgt_buf = NULL;
1012 	int sgt_buf_size;
1013 	int err;
1014 
1015 	/* Prepare the HW SGT structure */
1016 	sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
1017 	sgt_buf = dpaa2_eth_sgt_get(priv);
1018 	if (unlikely(!sgt_buf))
1019 		return -ENOMEM;
1020 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1021 
1022 	addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
1023 	if (unlikely(dma_mapping_error(dev, addr))) {
1024 		err = -ENOMEM;
1025 		goto data_map_failed;
1026 	}
1027 
1028 	/* Fill in the HW SGT structure */
1029 	dpaa2_sg_set_addr(sgt, addr);
1030 	dpaa2_sg_set_len(sgt, skb->len);
1031 	dpaa2_sg_set_final(sgt, true);
1032 
1033 	/* Store the skb backpointer in the SGT buffer */
1034 	*swa_addr = (void *)sgt_buf;
1035 	swa = (struct dpaa2_eth_swa *)sgt_buf;
1036 	swa->type = DPAA2_ETH_SWA_SINGLE;
1037 	swa->single.skb = skb;
1038 	swa->single.sgt_size = sgt_buf_size;
1039 
1040 	/* Separately map the SGT buffer */
1041 	sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1042 	if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1043 		err = -ENOMEM;
1044 		goto sgt_map_failed;
1045 	}
1046 
1047 	memset(fd, 0, sizeof(struct dpaa2_fd));
1048 	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1049 	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1050 	dpaa2_fd_set_addr(fd, sgt_addr);
1051 	dpaa2_fd_set_len(fd, skb->len);
1052 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1053 
1054 	return 0;
1055 
1056 sgt_map_failed:
1057 	dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
1058 data_map_failed:
1059 	dpaa2_eth_sgt_recycle(priv, sgt_buf);
1060 
1061 	return err;
1062 }
1063 
1064 /* Create a frame descriptor based on a linear skb */
1065 static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
1066 				     struct sk_buff *skb,
1067 				     struct dpaa2_fd *fd,
1068 				     void **swa_addr)
1069 {
1070 	struct device *dev = priv->net_dev->dev.parent;
1071 	u8 *buffer_start, *aligned_start;
1072 	struct dpaa2_eth_swa *swa;
1073 	dma_addr_t addr;
1074 
1075 	buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
1076 
1077 	/* If there's enough room to align the FD address, do it.
1078 	 * It will help hardware optimize accesses.
1079 	 */
1080 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
1081 				  DPAA2_ETH_TX_BUF_ALIGN);
1082 	if (aligned_start >= skb->head)
1083 		buffer_start = aligned_start;
1084 
1085 	/* Store a backpointer to the skb at the beginning of the buffer
1086 	 * (in the private data area) such that we can release it
1087 	 * on Tx confirm
1088 	 */
1089 	*swa_addr = (void *)buffer_start;
1090 	swa = (struct dpaa2_eth_swa *)buffer_start;
1091 	swa->type = DPAA2_ETH_SWA_SINGLE;
1092 	swa->single.skb = skb;
1093 
1094 	addr = dma_map_single(dev, buffer_start,
1095 			      skb_tail_pointer(skb) - buffer_start,
1096 			      DMA_BIDIRECTIONAL);
1097 	if (unlikely(dma_mapping_error(dev, addr)))
1098 		return -ENOMEM;
1099 
1100 	memset(fd, 0, sizeof(struct dpaa2_fd));
1101 	dpaa2_fd_set_addr(fd, addr);
1102 	dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
1103 	dpaa2_fd_set_len(fd, skb->len);
1104 	dpaa2_fd_set_format(fd, dpaa2_fd_single);
1105 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1106 
1107 	return 0;
1108 }
1109 
1110 /* FD freeing routine on the Tx path
1111  *
1112  * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
1113  * back-pointed to is also freed.
1114  * This can be called either from dpaa2_eth_tx_conf() or on the error path of
1115  * dpaa2_eth_tx().
1116  */
1117 void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
1118 			  struct dpaa2_eth_channel *ch,
1119 			  struct dpaa2_eth_fq *fq,
1120 			  const struct dpaa2_fd *fd, bool in_napi)
1121 {
1122 	struct device *dev = priv->net_dev->dev.parent;
1123 	dma_addr_t fd_addr, sg_addr;
1124 	struct sk_buff *skb = NULL;
1125 	unsigned char *buffer_start;
1126 	struct dpaa2_eth_swa *swa;
1127 	u8 fd_format = dpaa2_fd_get_format(fd);
1128 	u32 fd_len = dpaa2_fd_get_len(fd);
1129 	struct dpaa2_sg_entry *sgt;
1130 	int should_free_skb = 1;
1131 	void *tso_hdr;
1132 	int i;
1133 
1134 	fd_addr = dpaa2_fd_get_addr(fd);
1135 	buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
1136 	swa = (struct dpaa2_eth_swa *)buffer_start;
1137 
1138 	if (fd_format == dpaa2_fd_single) {
1139 		if (swa->type == DPAA2_ETH_SWA_SINGLE) {
1140 			skb = swa->single.skb;
1141 			/* Accessing the skb buffer is safe before dma unmap,
1142 			 * because we didn't map the actual skb shell.
1143 			 */
1144 			dma_unmap_single(dev, fd_addr,
1145 					 skb_tail_pointer(skb) - buffer_start,
1146 					 DMA_BIDIRECTIONAL);
1147 		} else {
1148 			WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
1149 			dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
1150 					 DMA_BIDIRECTIONAL);
1151 		}
1152 	} else if (fd_format == dpaa2_fd_sg) {
1153 		if (swa->type == DPAA2_ETH_SWA_SG) {
1154 			skb = swa->sg.skb;
1155 
1156 			/* Unmap the scatterlist */
1157 			dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1158 				     DMA_BIDIRECTIONAL);
1159 			kfree(swa->sg.scl);
1160 
1161 			/* Unmap the SGT buffer */
1162 			dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1163 					 DMA_BIDIRECTIONAL);
1164 		} else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
1165 			skb = swa->tso.skb;
1166 
1167 			sgt = (struct dpaa2_sg_entry *)(buffer_start +
1168 							priv->tx_data_offset);
1169 
1170 			/* Unmap the SGT buffer */
1171 			dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
1172 					 DMA_BIDIRECTIONAL);
1173 
1174 			/* Unmap and free the header */
1175 			tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
1176 			dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
1177 					 DMA_TO_DEVICE);
1178 			kfree(tso_hdr);
1179 
1180 			/* Unmap the other SG entries for the data */
1181 			for (i = 1; i < swa->tso.num_sg; i++)
1182 				dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1183 						 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1184 
1185 			if (!swa->tso.is_last_fd)
1186 				should_free_skb = 0;
1187 		} else if (swa->type == DPAA2_ETH_SWA_XSK) {
1188 			/* Unmap the SGT Buffer */
1189 			dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size,
1190 					 DMA_BIDIRECTIONAL);
1191 		} else {
1192 			skb = swa->single.skb;
1193 
1194 			/* Unmap the SGT Buffer */
1195 			dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1196 					 DMA_BIDIRECTIONAL);
1197 
1198 			sgt = (struct dpaa2_sg_entry *)(buffer_start +
1199 							priv->tx_data_offset);
1200 			sg_addr = dpaa2_sg_get_addr(sgt);
1201 			dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1202 		}
1203 	} else {
1204 		netdev_dbg(priv->net_dev, "Invalid FD format\n");
1205 		return;
1206 	}
1207 
1208 	if (swa->type == DPAA2_ETH_SWA_XSK) {
1209 		ch->xsk_tx_pkts_sent++;
1210 		dpaa2_eth_sgt_recycle(priv, buffer_start);
1211 		return;
1212 	}
1213 
1214 	if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
1215 		fq->dq_frames++;
1216 		fq->dq_bytes += fd_len;
1217 	}
1218 
1219 	if (swa->type == DPAA2_ETH_SWA_XDP) {
1220 		xdp_return_frame(swa->xdp.xdpf);
1221 		return;
1222 	}
1223 
1224 	/* Get the timestamp value */
1225 	if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
1226 		if (skb->cb[0] == TX_TSTAMP) {
1227 			struct skb_shared_hwtstamps shhwtstamps;
1228 			__le64 *ts = dpaa2_get_ts(buffer_start, true);
1229 			u64 ns;
1230 
1231 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1232 
1233 			ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
1234 			shhwtstamps.hwtstamp = ns_to_ktime(ns);
1235 			skb_tstamp_tx(skb, &shhwtstamps);
1236 		} else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1237 			mutex_unlock(&priv->onestep_tstamp_lock);
1238 		}
1239 	}
1240 
1241 	/* Free SGT buffer allocated on tx */
1242 	if (fd_format != dpaa2_fd_single)
1243 		dpaa2_eth_sgt_recycle(priv, buffer_start);
1244 
1245 	/* Move on with skb release. If we are just confirming multiple FDs
1246 	 * from the same TSO skb then only the last one will need to free the
1247 	 * skb.
1248 	 */
1249 	if (should_free_skb)
1250 		napi_consume_skb(skb, in_napi);
1251 }
1252 
1253 static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
1254 				  struct sk_buff *skb, struct dpaa2_fd *fd,
1255 				  int *num_fds, u32 *total_fds_len)
1256 {
1257 	struct device *dev = priv->net_dev->dev.parent;
1258 	int hdr_len, total_len, data_left, fd_len;
1259 	int num_sge, err, i, sgt_buf_size;
1260 	struct dpaa2_fd *fd_start = fd;
1261 	struct dpaa2_sg_entry *sgt;
1262 	struct dpaa2_eth_swa *swa;
1263 	dma_addr_t sgt_addr, addr;
1264 	dma_addr_t tso_hdr_dma;
1265 	unsigned int index = 0;
1266 	struct tso_t tso;
1267 	char *tso_hdr;
1268 	void *sgt_buf;
1269 
1270 	/* Initialize the TSO handler, and prepare the first payload */
1271 	hdr_len = tso_start(skb, &tso);
1272 	*total_fds_len = 0;
1273 
1274 	total_len = skb->len - hdr_len;
1275 	while (total_len > 0) {
1276 		/* Prepare the HW SGT structure for this frame */
1277 		sgt_buf = dpaa2_eth_sgt_get(priv);
1278 		if (unlikely(!sgt_buf)) {
1279 			netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
1280 			err = -ENOMEM;
1281 			goto err_sgt_get;
1282 		}
1283 		sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1284 
1285 		/* Determine the data length of this frame */
1286 		data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1287 		total_len -= data_left;
1288 		fd_len = data_left + hdr_len;
1289 
1290 		/* Prepare packet headers: MAC + IP + TCP */
1291 		tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
1292 		if (!tso_hdr) {
1293 			err =  -ENOMEM;
1294 			goto err_alloc_tso_hdr;
1295 		}
1296 
1297 		tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
1298 		tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1299 		if (dma_mapping_error(dev, tso_hdr_dma)) {
1300 			netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
1301 			err = -ENOMEM;
1302 			goto err_map_tso_hdr;
1303 		}
1304 
1305 		/* Setup the SG entry for the header */
1306 		dpaa2_sg_set_addr(sgt, tso_hdr_dma);
1307 		dpaa2_sg_set_len(sgt, hdr_len);
1308 		dpaa2_sg_set_final(sgt, data_left <= 0);
1309 
1310 		/* Compose the SG entries for each fragment of data */
1311 		num_sge = 1;
1312 		while (data_left > 0) {
1313 			int size;
1314 
1315 			/* Move to the next SG entry */
1316 			sgt++;
1317 			size = min_t(int, tso.size, data_left);
1318 
1319 			addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
1320 			if (dma_mapping_error(dev, addr)) {
1321 				netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
1322 				err = -ENOMEM;
1323 				goto err_map_data;
1324 			}
1325 			dpaa2_sg_set_addr(sgt, addr);
1326 			dpaa2_sg_set_len(sgt, size);
1327 			dpaa2_sg_set_final(sgt, size == data_left);
1328 
1329 			num_sge++;
1330 
1331 			/* Build the data for the __next__ fragment */
1332 			data_left -= size;
1333 			tso_build_data(skb, &tso, size);
1334 		}
1335 
1336 		/* Store the skb backpointer in the SGT buffer */
1337 		sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
1338 		swa = (struct dpaa2_eth_swa *)sgt_buf;
1339 		swa->type = DPAA2_ETH_SWA_SW_TSO;
1340 		swa->tso.skb = skb;
1341 		swa->tso.num_sg = num_sge;
1342 		swa->tso.sgt_size = sgt_buf_size;
1343 		swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
1344 
1345 		/* Separately map the SGT buffer */
1346 		sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1347 		if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1348 			netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
1349 			err = -ENOMEM;
1350 			goto err_map_sgt;
1351 		}
1352 
1353 		/* Setup the frame descriptor */
1354 		memset(fd, 0, sizeof(struct dpaa2_fd));
1355 		dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1356 		dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1357 		dpaa2_fd_set_addr(fd, sgt_addr);
1358 		dpaa2_fd_set_len(fd, fd_len);
1359 		dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1360 
1361 		*total_fds_len += fd_len;
1362 		/* Advance to the next frame descriptor */
1363 		fd++;
1364 		index++;
1365 	}
1366 
1367 	*num_fds = index;
1368 
1369 	return 0;
1370 
1371 err_map_sgt:
1372 err_map_data:
1373 	/* Unmap all the data S/G entries for the current FD */
1374 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1375 	for (i = 1; i < num_sge; i++)
1376 		dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1377 				 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1378 
1379 	/* Unmap the header entry */
1380 	dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1381 err_map_tso_hdr:
1382 	kfree(tso_hdr);
1383 err_alloc_tso_hdr:
1384 	dpaa2_eth_sgt_recycle(priv, sgt_buf);
1385 err_sgt_get:
1386 	/* Free all the other FDs that were already fully created */
1387 	for (i = 0; i < index; i++)
1388 		dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false);
1389 
1390 	return err;
1391 }
1392 
1393 static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
1394 				  struct net_device *net_dev)
1395 {
1396 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1397 	int total_enqueued = 0, retries = 0, enqueued;
1398 	struct dpaa2_eth_drv_stats *percpu_extras;
1399 	struct rtnl_link_stats64 *percpu_stats;
1400 	unsigned int needed_headroom;
1401 	int num_fds = 1, max_retries;
1402 	struct dpaa2_eth_fq *fq;
1403 	struct netdev_queue *nq;
1404 	struct dpaa2_fd *fd;
1405 	u16 queue_mapping;
1406 	void *swa = NULL;
1407 	u8 prio = 0;
1408 	int err, i;
1409 	u32 fd_len;
1410 
1411 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
1412 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
1413 	fd = (this_cpu_ptr(priv->fd))->array;
1414 
1415 	needed_headroom = dpaa2_eth_needed_headroom(skb);
1416 
1417 	/* We'll be holding a back-reference to the skb until Tx Confirmation;
1418 	 * we don't want that overwritten by a concurrent Tx with a cloned skb.
1419 	 */
1420 	skb = skb_unshare(skb, GFP_ATOMIC);
1421 	if (unlikely(!skb)) {
1422 		/* skb_unshare() has already freed the skb */
1423 		percpu_stats->tx_dropped++;
1424 		return NETDEV_TX_OK;
1425 	}
1426 
1427 	/* Setup the FD fields */
1428 
1429 	if (skb_is_gso(skb)) {
1430 		err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
1431 		percpu_extras->tx_sg_frames += num_fds;
1432 		percpu_extras->tx_sg_bytes += fd_len;
1433 		percpu_extras->tx_tso_frames += num_fds;
1434 		percpu_extras->tx_tso_bytes += fd_len;
1435 	} else if (skb_is_nonlinear(skb)) {
1436 		err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
1437 		percpu_extras->tx_sg_frames++;
1438 		percpu_extras->tx_sg_bytes += skb->len;
1439 		fd_len = dpaa2_fd_get_len(fd);
1440 	} else if (skb_headroom(skb) < needed_headroom) {
1441 		err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
1442 		percpu_extras->tx_sg_frames++;
1443 		percpu_extras->tx_sg_bytes += skb->len;
1444 		percpu_extras->tx_converted_sg_frames++;
1445 		percpu_extras->tx_converted_sg_bytes += skb->len;
1446 		fd_len = dpaa2_fd_get_len(fd);
1447 	} else {
1448 		err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
1449 		fd_len = dpaa2_fd_get_len(fd);
1450 	}
1451 
1452 	if (unlikely(err)) {
1453 		percpu_stats->tx_dropped++;
1454 		goto err_build_fd;
1455 	}
1456 
1457 	if (swa && skb->cb[0])
1458 		dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
1459 
1460 	/* Tracing point */
1461 	for (i = 0; i < num_fds; i++)
1462 		trace_dpaa2_tx_fd(net_dev, &fd[i]);
1463 
1464 	/* TxConf FQ selection relies on queue id from the stack.
1465 	 * In case of a forwarded frame from another DPNI interface, we choose
1466 	 * a queue affined to the same core that processed the Rx frame
1467 	 */
1468 	queue_mapping = skb_get_queue_mapping(skb);
1469 
1470 	if (net_dev->num_tc) {
1471 		prio = netdev_txq_to_tc(net_dev, queue_mapping);
1472 		/* Hardware interprets priority level 0 as being the highest,
1473 		 * so we need to do a reverse mapping to the netdev tc index
1474 		 */
1475 		prio = net_dev->num_tc - prio - 1;
1476 		/* We have only one FQ array entry for all Tx hardware queues
1477 		 * with the same flow id (but different priority levels)
1478 		 */
1479 		queue_mapping %= dpaa2_eth_queue_count(priv);
1480 	}
1481 	fq = &priv->fq[queue_mapping];
1482 	nq = netdev_get_tx_queue(net_dev, queue_mapping);
1483 	netdev_tx_sent_queue(nq, fd_len);
1484 
1485 	/* Everything that happens after this enqueues might race with
1486 	 * the Tx confirmation callback for this frame
1487 	 */
1488 	max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
1489 	while (total_enqueued < num_fds && retries < max_retries) {
1490 		err = priv->enqueue(priv, fq, &fd[total_enqueued],
1491 				    prio, num_fds - total_enqueued, &enqueued);
1492 		if (err == -EBUSY) {
1493 			retries++;
1494 			continue;
1495 		}
1496 
1497 		total_enqueued += enqueued;
1498 	}
1499 	percpu_extras->tx_portal_busy += retries;
1500 
1501 	if (unlikely(err < 0)) {
1502 		percpu_stats->tx_errors++;
1503 		/* Clean up everything, including freeing the skb */
1504 		dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false);
1505 		netdev_tx_completed_queue(nq, 1, fd_len);
1506 	} else {
1507 		percpu_stats->tx_packets += total_enqueued;
1508 		percpu_stats->tx_bytes += fd_len;
1509 	}
1510 
1511 	return NETDEV_TX_OK;
1512 
1513 err_build_fd:
1514 	dev_kfree_skb(skb);
1515 
1516 	return NETDEV_TX_OK;
1517 }
1518 
1519 static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
1520 {
1521 	struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
1522 						   tx_onestep_tstamp);
1523 	struct sk_buff *skb;
1524 
1525 	while (true) {
1526 		skb = skb_dequeue(&priv->tx_skbs);
1527 		if (!skb)
1528 			return;
1529 
1530 		/* Lock just before TX one-step timestamping packet,
1531 		 * and release the lock in dpaa2_eth_free_tx_fd when
1532 		 * confirm the packet has been sent on hardware, or
1533 		 * when clean up during transmit failure.
1534 		 */
1535 		mutex_lock(&priv->onestep_tstamp_lock);
1536 		__dpaa2_eth_tx(skb, priv->net_dev);
1537 	}
1538 }
1539 
1540 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1541 {
1542 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1543 	u8 msgtype, twostep, udp;
1544 	u16 offset1, offset2;
1545 
1546 	/* Utilize skb->cb[0] for timestamping request per skb */
1547 	skb->cb[0] = 0;
1548 
1549 	if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
1550 		if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
1551 			skb->cb[0] = TX_TSTAMP;
1552 		else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
1553 			skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
1554 	}
1555 
1556 	/* TX for one-step timestamping PTP Sync packet */
1557 	if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1558 		if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
1559 					 &offset1, &offset2))
1560 			if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
1561 				skb_queue_tail(&priv->tx_skbs, skb);
1562 				queue_work(priv->dpaa2_ptp_wq,
1563 					   &priv->tx_onestep_tstamp);
1564 				return NETDEV_TX_OK;
1565 			}
1566 		/* Use two-step timestamping if not one-step timestamping
1567 		 * PTP Sync packet
1568 		 */
1569 		skb->cb[0] = TX_TSTAMP;
1570 	}
1571 
1572 	/* TX for other packets */
1573 	return __dpaa2_eth_tx(skb, net_dev);
1574 }
1575 
1576 /* Tx confirmation frame processing routine */
1577 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
1578 			      struct dpaa2_eth_channel *ch,
1579 			      const struct dpaa2_fd *fd,
1580 			      struct dpaa2_eth_fq *fq)
1581 {
1582 	struct rtnl_link_stats64 *percpu_stats;
1583 	struct dpaa2_eth_drv_stats *percpu_extras;
1584 	u32 fd_len = dpaa2_fd_get_len(fd);
1585 	u32 fd_errors;
1586 
1587 	/* Tracing point */
1588 	trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1589 
1590 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
1591 	percpu_extras->tx_conf_frames++;
1592 	percpu_extras->tx_conf_bytes += fd_len;
1593 	ch->stats.bytes_per_cdan += fd_len;
1594 
1595 	/* Check frame errors in the FD field */
1596 	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
1597 	dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true);
1598 
1599 	if (likely(!fd_errors))
1600 		return;
1601 
1602 	if (net_ratelimit())
1603 		netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1604 			   fd_errors);
1605 
1606 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
1607 	/* Tx-conf logically pertains to the egress path. */
1608 	percpu_stats->tx_errors++;
1609 }
1610 
1611 static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
1612 					   bool enable)
1613 {
1614 	int err;
1615 
1616 	err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
1617 
1618 	if (err) {
1619 		netdev_err(priv->net_dev,
1620 			   "dpni_enable_vlan_filter failed\n");
1621 		return err;
1622 	}
1623 
1624 	return 0;
1625 }
1626 
1627 static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
1628 {
1629 	int err;
1630 
1631 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1632 			       DPNI_OFF_RX_L3_CSUM, enable);
1633 	if (err) {
1634 		netdev_err(priv->net_dev,
1635 			   "dpni_set_offload(RX_L3_CSUM) failed\n");
1636 		return err;
1637 	}
1638 
1639 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1640 			       DPNI_OFF_RX_L4_CSUM, enable);
1641 	if (err) {
1642 		netdev_err(priv->net_dev,
1643 			   "dpni_set_offload(RX_L4_CSUM) failed\n");
1644 		return err;
1645 	}
1646 
1647 	return 0;
1648 }
1649 
1650 static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
1651 {
1652 	int err;
1653 
1654 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1655 			       DPNI_OFF_TX_L3_CSUM, enable);
1656 	if (err) {
1657 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1658 		return err;
1659 	}
1660 
1661 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1662 			       DPNI_OFF_TX_L4_CSUM, enable);
1663 	if (err) {
1664 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1665 		return err;
1666 	}
1667 
1668 	return 0;
1669 }
1670 
1671 /* Perform a single release command to add buffers
1672  * to the specified buffer pool
1673  */
1674 static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
1675 			      struct dpaa2_eth_channel *ch)
1676 {
1677 	struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD];
1678 	struct device *dev = priv->net_dev->dev.parent;
1679 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1680 	struct dpaa2_eth_swa *swa;
1681 	struct page *page;
1682 	dma_addr_t addr;
1683 	int retries = 0;
1684 	int i = 0, err;
1685 	u32 batch;
1686 
1687 	/* Allocate buffers visible to WRIOP */
1688 	if (!ch->xsk_zc) {
1689 		for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1690 			/* Also allocate skb shared info and alignment padding.
1691 			 * There is one page for each Rx buffer. WRIOP sees
1692 			 * the entire page except for a tailroom reserved for
1693 			 * skb shared info
1694 			 */
1695 			page = dev_alloc_pages(0);
1696 			if (!page)
1697 				goto err_alloc;
1698 
1699 			addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1700 					    DMA_BIDIRECTIONAL);
1701 			if (unlikely(dma_mapping_error(dev, addr)))
1702 				goto err_map;
1703 
1704 			buf_array[i] = addr;
1705 
1706 			/* tracing point */
1707 			trace_dpaa2_eth_buf_seed(priv->net_dev,
1708 						 page_address(page),
1709 						 DPAA2_ETH_RX_BUF_RAW_SIZE,
1710 						 addr, priv->rx_buf_size,
1711 						 ch->bp->bpid);
1712 		}
1713 	} else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) {
1714 		/* Allocate XSK buffers for AF_XDP fast path in batches
1715 		 * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot
1716 		 * provide enough buffers at the moment
1717 		 */
1718 		batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs,
1719 					     DPAA2_ETH_BUFS_PER_CMD);
1720 		if (!batch)
1721 			goto err_alloc;
1722 
1723 		for (i = 0; i < batch; i++) {
1724 			swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start +
1725 						       DPAA2_ETH_RX_HWA_SIZE);
1726 			swa->xsk.xdp_buff = xdp_buffs[i];
1727 
1728 			addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]);
1729 			if (unlikely(dma_mapping_error(dev, addr)))
1730 				goto err_map;
1731 
1732 			buf_array[i] = addr;
1733 
1734 			trace_dpaa2_xsk_buf_seed(priv->net_dev,
1735 						 xdp_buffs[i]->data_hard_start,
1736 						 DPAA2_ETH_RX_BUF_RAW_SIZE,
1737 						 addr, priv->rx_buf_size,
1738 						 ch->bp->bpid);
1739 		}
1740 	}
1741 
1742 release_bufs:
1743 	/* In case the portal is busy, retry until successful */
1744 	while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
1745 					       buf_array, i)) == -EBUSY) {
1746 		if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1747 			break;
1748 		cpu_relax();
1749 	}
1750 
1751 	/* If release command failed, clean up and bail out;
1752 	 * not much else we can do about it
1753 	 */
1754 	if (err) {
1755 		dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc);
1756 		return 0;
1757 	}
1758 
1759 	return i;
1760 
1761 err_map:
1762 	if (!ch->xsk_zc) {
1763 		__free_pages(page, 0);
1764 	} else {
1765 		for (; i < batch; i++)
1766 			xsk_buff_free(xdp_buffs[i]);
1767 	}
1768 err_alloc:
1769 	/* If we managed to allocate at least some buffers,
1770 	 * release them to hardware
1771 	 */
1772 	if (i)
1773 		goto release_bufs;
1774 
1775 	return 0;
1776 }
1777 
1778 static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
1779 			       struct dpaa2_eth_channel *ch)
1780 {
1781 	int i;
1782 	int new_count;
1783 
1784 	for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
1785 		new_count = dpaa2_eth_add_bufs(priv, ch);
1786 		ch->buf_count += new_count;
1787 
1788 		if (new_count < DPAA2_ETH_BUFS_PER_CMD)
1789 			return -ENOMEM;
1790 	}
1791 
1792 	return 0;
1793 }
1794 
1795 static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
1796 {
1797 	struct net_device *net_dev = priv->net_dev;
1798 	struct dpaa2_eth_channel *channel;
1799 	int i, err = 0;
1800 
1801 	for (i = 0; i < priv->num_channels; i++) {
1802 		channel = priv->channel[i];
1803 
1804 		err = dpaa2_eth_seed_pool(priv, channel);
1805 
1806 		/* Not much to do; the buffer pool, though not filled up,
1807 		 * may still contain some buffers which would enable us
1808 		 * to limp on.
1809 		 */
1810 		if (err)
1811 			netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1812 				   channel->bp->dev->obj_desc.id,
1813 				   channel->bp->bpid);
1814 	}
1815 }
1816 
1817 /*
1818  * Drain the specified number of buffers from one of the DPNI's private buffer
1819  * pools.
1820  * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1821  */
1822 static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
1823 				 int count)
1824 {
1825 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1826 	bool xsk_zc = false;
1827 	int retries = 0;
1828 	int i, ret;
1829 
1830 	for (i = 0; i < priv->num_channels; i++)
1831 		if (priv->channel[i]->bp->bpid == bpid)
1832 			xsk_zc = priv->channel[i]->xsk_zc;
1833 
1834 	do {
1835 		ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count);
1836 		if (ret < 0) {
1837 			if (ret == -EBUSY &&
1838 			    retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
1839 				continue;
1840 			netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1841 			return;
1842 		}
1843 		dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc);
1844 		retries = 0;
1845 	} while (ret);
1846 }
1847 
1848 static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
1849 {
1850 	int i;
1851 
1852 	/* Drain the buffer pool */
1853 	dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
1854 	dpaa2_eth_drain_bufs(priv, bpid, 1);
1855 
1856 	/* Setup to zero the buffer count of all channels which were
1857 	 * using this buffer pool.
1858 	 */
1859 	for (i = 0; i < priv->num_channels; i++)
1860 		if (priv->channel[i]->bp->bpid == bpid)
1861 			priv->channel[i]->buf_count = 0;
1862 }
1863 
1864 static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
1865 {
1866 	int i;
1867 
1868 	for (i = 0; i < priv->num_bps; i++)
1869 		dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
1870 }
1871 
1872 /* Function is called from softirq context only, so we don't need to guard
1873  * the access to percpu count
1874  */
1875 static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
1876 				 struct dpaa2_eth_channel *ch)
1877 {
1878 	int new_count;
1879 
1880 	if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1881 		return 0;
1882 
1883 	do {
1884 		new_count = dpaa2_eth_add_bufs(priv, ch);
1885 		if (unlikely(!new_count)) {
1886 			/* Out of memory; abort for now, we'll try later on */
1887 			break;
1888 		}
1889 		ch->buf_count += new_count;
1890 	} while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1891 
1892 	if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1893 		return -ENOMEM;
1894 
1895 	return 0;
1896 }
1897 
1898 static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1899 {
1900 	struct dpaa2_eth_sgt_cache *sgt_cache;
1901 	u16 count;
1902 	int k, i;
1903 
1904 	for_each_possible_cpu(k) {
1905 		sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1906 		count = sgt_cache->count;
1907 
1908 		for (i = 0; i < count; i++)
1909 			skb_free_frag(sgt_cache->buf[i]);
1910 		sgt_cache->count = 0;
1911 	}
1912 }
1913 
1914 static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
1915 {
1916 	int err;
1917 	int dequeues = -1;
1918 
1919 	/* Retry while portal is busy */
1920 	do {
1921 		err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1922 						    ch->store);
1923 		dequeues++;
1924 		cpu_relax();
1925 	} while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1926 
1927 	ch->stats.dequeue_portal_busy += dequeues;
1928 	if (unlikely(err))
1929 		ch->stats.pull_err++;
1930 
1931 	return err;
1932 }
1933 
1934 /* NAPI poll routine
1935  *
1936  * Frames are dequeued from the QMan channel associated with this NAPI context.
1937  * Rx, Tx confirmation and (if configured) Rx error frames all count
1938  * towards the NAPI budget.
1939  */
1940 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1941 {
1942 	struct dpaa2_eth_channel *ch;
1943 	struct dpaa2_eth_priv *priv;
1944 	int rx_cleaned = 0, txconf_cleaned = 0;
1945 	struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1946 	struct netdev_queue *nq;
1947 	int store_cleaned, work_done;
1948 	bool work_done_zc = false;
1949 	struct list_head rx_list;
1950 	int retries = 0;
1951 	u16 flowid;
1952 	int err;
1953 
1954 	ch = container_of(napi, struct dpaa2_eth_channel, napi);
1955 	ch->xdp.res = 0;
1956 	priv = ch->priv;
1957 
1958 	INIT_LIST_HEAD(&rx_list);
1959 	ch->rx_list = &rx_list;
1960 
1961 	if (ch->xsk_zc) {
1962 		work_done_zc = dpaa2_xsk_tx(priv, ch);
1963 		/* If we reached the XSK Tx per NAPI threshold, we're done */
1964 		if (work_done_zc) {
1965 			work_done = budget;
1966 			goto out;
1967 		}
1968 	}
1969 
1970 	do {
1971 		err = dpaa2_eth_pull_channel(ch);
1972 		if (unlikely(err))
1973 			break;
1974 
1975 		/* Refill pool if appropriate */
1976 		dpaa2_eth_refill_pool(priv, ch);
1977 
1978 		store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
1979 		if (store_cleaned <= 0)
1980 			break;
1981 		if (fq->type == DPAA2_RX_FQ) {
1982 			rx_cleaned += store_cleaned;
1983 			flowid = fq->flowid;
1984 		} else {
1985 			txconf_cleaned += store_cleaned;
1986 			/* We have a single Tx conf FQ on this channel */
1987 			txc_fq = fq;
1988 		}
1989 
1990 		/* If we either consumed the whole NAPI budget with Rx frames
1991 		 * or we reached the Tx confirmations threshold, we're done.
1992 		 */
1993 		if (rx_cleaned >= budget ||
1994 		    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1995 			work_done = budget;
1996 			goto out;
1997 		}
1998 	} while (store_cleaned);
1999 
2000 	/* Update NET DIM with the values for this CDAN */
2001 	dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
2002 				ch->stats.bytes_per_cdan);
2003 	ch->stats.frames_per_cdan = 0;
2004 	ch->stats.bytes_per_cdan = 0;
2005 
2006 	/* We didn't consume the entire budget, so finish napi and
2007 	 * re-enable data availability notifications
2008 	 */
2009 	napi_complete_done(napi, rx_cleaned);
2010 	do {
2011 		err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
2012 		cpu_relax();
2013 	} while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
2014 	WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
2015 		  ch->nctx.desired_cpu);
2016 
2017 	work_done = max(rx_cleaned, 1);
2018 
2019 out:
2020 	netif_receive_skb_list(ch->rx_list);
2021 
2022 	if (ch->xsk_tx_pkts_sent) {
2023 		xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
2024 		ch->xsk_tx_pkts_sent = 0;
2025 	}
2026 
2027 	if (txc_fq && txc_fq->dq_frames) {
2028 		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
2029 		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
2030 					  txc_fq->dq_bytes);
2031 		txc_fq->dq_frames = 0;
2032 		txc_fq->dq_bytes = 0;
2033 	}
2034 
2035 	if (ch->xdp.res & XDP_REDIRECT)
2036 		xdp_do_flush_map();
2037 	else if (rx_cleaned && ch->xdp.res & XDP_TX)
2038 		dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
2039 
2040 	return work_done;
2041 }
2042 
2043 static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
2044 {
2045 	struct dpaa2_eth_channel *ch;
2046 	int i;
2047 
2048 	for (i = 0; i < priv->num_channels; i++) {
2049 		ch = priv->channel[i];
2050 		napi_enable(&ch->napi);
2051 	}
2052 }
2053 
2054 static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
2055 {
2056 	struct dpaa2_eth_channel *ch;
2057 	int i;
2058 
2059 	for (i = 0; i < priv->num_channels; i++) {
2060 		ch = priv->channel[i];
2061 		napi_disable(&ch->napi);
2062 	}
2063 }
2064 
2065 void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
2066 			       bool tx_pause, bool pfc)
2067 {
2068 	struct dpni_taildrop td = {0};
2069 	struct dpaa2_eth_fq *fq;
2070 	int i, err;
2071 
2072 	/* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
2073 	 * flow control is disabled (as it might interfere with either the
2074 	 * buffer pool depletion trigger for pause frames or with the group
2075 	 * congestion trigger for PFC frames)
2076 	 */
2077 	td.enable = !tx_pause;
2078 	if (priv->rx_fqtd_enabled == td.enable)
2079 		goto set_cgtd;
2080 
2081 	td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
2082 	td.units = DPNI_CONGESTION_UNIT_BYTES;
2083 
2084 	for (i = 0; i < priv->num_fqs; i++) {
2085 		fq = &priv->fq[i];
2086 		if (fq->type != DPAA2_RX_FQ)
2087 			continue;
2088 		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
2089 					DPNI_CP_QUEUE, DPNI_QUEUE_RX,
2090 					fq->tc, fq->flowid, &td);
2091 		if (err) {
2092 			netdev_err(priv->net_dev,
2093 				   "dpni_set_taildrop(FQ) failed\n");
2094 			return;
2095 		}
2096 	}
2097 
2098 	priv->rx_fqtd_enabled = td.enable;
2099 
2100 set_cgtd:
2101 	/* Congestion group taildrop: threshold is in frames, per group
2102 	 * of FQs belonging to the same traffic class
2103 	 * Enabled if general Tx pause disabled or if PFCs are enabled
2104 	 * (congestion group threhsold for PFC generation is lower than the
2105 	 * CG taildrop threshold, so it won't interfere with it; we also
2106 	 * want frames in non-PFC enabled traffic classes to be kept in check)
2107 	 */
2108 	td.enable = !tx_pause || pfc;
2109 	if (priv->rx_cgtd_enabled == td.enable)
2110 		return;
2111 
2112 	td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
2113 	td.units = DPNI_CONGESTION_UNIT_FRAMES;
2114 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
2115 		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
2116 					DPNI_CP_GROUP, DPNI_QUEUE_RX,
2117 					i, 0, &td);
2118 		if (err) {
2119 			netdev_err(priv->net_dev,
2120 				   "dpni_set_taildrop(CG) failed\n");
2121 			return;
2122 		}
2123 	}
2124 
2125 	priv->rx_cgtd_enabled = td.enable;
2126 }
2127 
2128 static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
2129 {
2130 	struct dpni_link_state state = {0};
2131 	bool tx_pause;
2132 	int err;
2133 
2134 	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
2135 	if (unlikely(err)) {
2136 		netdev_err(priv->net_dev,
2137 			   "dpni_get_link_state() failed\n");
2138 		return err;
2139 	}
2140 
2141 	/* If Tx pause frame settings have changed, we need to update
2142 	 * Rx FQ taildrop configuration as well. We configure taildrop
2143 	 * only when pause frame generation is disabled.
2144 	 */
2145 	tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
2146 	dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
2147 
2148 	/* When we manage the MAC/PHY using phylink there is no need
2149 	 * to manually update the netif_carrier.
2150 	 */
2151 	if (dpaa2_eth_is_type_phy(priv))
2152 		goto out;
2153 
2154 	/* Chech link state; speed / duplex changes are not treated yet */
2155 	if (priv->link_state.up == state.up)
2156 		goto out;
2157 
2158 	if (state.up) {
2159 		netif_carrier_on(priv->net_dev);
2160 		netif_tx_start_all_queues(priv->net_dev);
2161 	} else {
2162 		netif_tx_stop_all_queues(priv->net_dev);
2163 		netif_carrier_off(priv->net_dev);
2164 	}
2165 
2166 	netdev_info(priv->net_dev, "Link Event: state %s\n",
2167 		    state.up ? "up" : "down");
2168 
2169 out:
2170 	priv->link_state = state;
2171 
2172 	return 0;
2173 }
2174 
2175 static int dpaa2_eth_open(struct net_device *net_dev)
2176 {
2177 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2178 	int err;
2179 
2180 	dpaa2_eth_seed_pools(priv);
2181 
2182 	if (!dpaa2_eth_is_type_phy(priv)) {
2183 		/* We'll only start the txqs when the link is actually ready;
2184 		 * make sure we don't race against the link up notification,
2185 		 * which may come immediately after dpni_enable();
2186 		 */
2187 		netif_tx_stop_all_queues(net_dev);
2188 
2189 		/* Also, explicitly set carrier off, otherwise
2190 		 * netif_carrier_ok() will return true and cause 'ip link show'
2191 		 * to report the LOWER_UP flag, even though the link
2192 		 * notification wasn't even received.
2193 		 */
2194 		netif_carrier_off(net_dev);
2195 	}
2196 	dpaa2_eth_enable_ch_napi(priv);
2197 
2198 	err = dpni_enable(priv->mc_io, 0, priv->mc_token);
2199 	if (err < 0) {
2200 		netdev_err(net_dev, "dpni_enable() failed\n");
2201 		goto enable_err;
2202 	}
2203 
2204 	if (dpaa2_eth_is_type_phy(priv)) {
2205 		dpaa2_mac_start(priv->mac);
2206 		phylink_start(priv->mac->phylink);
2207 	}
2208 
2209 	return 0;
2210 
2211 enable_err:
2212 	dpaa2_eth_disable_ch_napi(priv);
2213 	dpaa2_eth_drain_pools(priv);
2214 	return err;
2215 }
2216 
2217 /* Total number of in-flight frames on ingress queues */
2218 static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
2219 {
2220 	struct dpaa2_eth_fq *fq;
2221 	u32 fcnt = 0, bcnt = 0, total = 0;
2222 	int i, err;
2223 
2224 	for (i = 0; i < priv->num_fqs; i++) {
2225 		fq = &priv->fq[i];
2226 		err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
2227 		if (err) {
2228 			netdev_warn(priv->net_dev, "query_fq_count failed");
2229 			break;
2230 		}
2231 		total += fcnt;
2232 	}
2233 
2234 	return total;
2235 }
2236 
2237 static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
2238 {
2239 	int retries = 10;
2240 	u32 pending;
2241 
2242 	do {
2243 		pending = dpaa2_eth_ingress_fq_count(priv);
2244 		if (pending)
2245 			msleep(100);
2246 	} while (pending && --retries);
2247 }
2248 
2249 #define DPNI_TX_PENDING_VER_MAJOR	7
2250 #define DPNI_TX_PENDING_VER_MINOR	13
2251 static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
2252 {
2253 	union dpni_statistics stats;
2254 	int retries = 10;
2255 	int err;
2256 
2257 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
2258 				   DPNI_TX_PENDING_VER_MINOR) < 0)
2259 		goto out;
2260 
2261 	do {
2262 		err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
2263 					  &stats);
2264 		if (err)
2265 			goto out;
2266 		if (stats.page_6.tx_pending_frames == 0)
2267 			return;
2268 	} while (--retries);
2269 
2270 out:
2271 	msleep(500);
2272 }
2273 
2274 static int dpaa2_eth_stop(struct net_device *net_dev)
2275 {
2276 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2277 	int dpni_enabled = 0;
2278 	int retries = 10;
2279 
2280 	if (dpaa2_eth_is_type_phy(priv)) {
2281 		phylink_stop(priv->mac->phylink);
2282 		dpaa2_mac_stop(priv->mac);
2283 	} else {
2284 		netif_tx_stop_all_queues(net_dev);
2285 		netif_carrier_off(net_dev);
2286 	}
2287 
2288 	/* On dpni_disable(), the MC firmware will:
2289 	 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
2290 	 * - cut off WRIOP dequeues from egress FQs and wait until transmission
2291 	 * of all in flight Tx frames is finished (and corresponding Tx conf
2292 	 * frames are enqueued back to software)
2293 	 *
2294 	 * Before calling dpni_disable(), we wait for all Tx frames to arrive
2295 	 * on WRIOP. After it finishes, wait until all remaining frames on Rx
2296 	 * and Tx conf queues are consumed on NAPI poll.
2297 	 */
2298 	dpaa2_eth_wait_for_egress_fq_empty(priv);
2299 
2300 	do {
2301 		dpni_disable(priv->mc_io, 0, priv->mc_token);
2302 		dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
2303 		if (dpni_enabled)
2304 			/* Allow the hardware some slack */
2305 			msleep(100);
2306 	} while (dpni_enabled && --retries);
2307 	if (!retries) {
2308 		netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
2309 		/* Must go on and disable NAPI nonetheless, so we don't crash at
2310 		 * the next "ifconfig up"
2311 		 */
2312 	}
2313 
2314 	dpaa2_eth_wait_for_ingress_fq_empty(priv);
2315 	dpaa2_eth_disable_ch_napi(priv);
2316 
2317 	/* Empty the buffer pool */
2318 	dpaa2_eth_drain_pools(priv);
2319 
2320 	/* Empty the Scatter-Gather Buffer cache */
2321 	dpaa2_eth_sgt_cache_drain(priv);
2322 
2323 	return 0;
2324 }
2325 
2326 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
2327 {
2328 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2329 	struct device *dev = net_dev->dev.parent;
2330 	int err;
2331 
2332 	err = eth_mac_addr(net_dev, addr);
2333 	if (err < 0) {
2334 		dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
2335 		return err;
2336 	}
2337 
2338 	err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2339 					net_dev->dev_addr);
2340 	if (err) {
2341 		dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
2342 		return err;
2343 	}
2344 
2345 	return 0;
2346 }
2347 
2348 /** Fill in counters maintained by the GPP driver. These may be different from
2349  * the hardware counters obtained by ethtool.
2350  */
2351 static void dpaa2_eth_get_stats(struct net_device *net_dev,
2352 				struct rtnl_link_stats64 *stats)
2353 {
2354 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2355 	struct rtnl_link_stats64 *percpu_stats;
2356 	u64 *cpustats;
2357 	u64 *netstats = (u64 *)stats;
2358 	int i, j;
2359 	int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
2360 
2361 	for_each_possible_cpu(i) {
2362 		percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
2363 		cpustats = (u64 *)percpu_stats;
2364 		for (j = 0; j < num; j++)
2365 			netstats[j] += cpustats[j];
2366 	}
2367 }
2368 
2369 /* Copy mac unicast addresses from @net_dev to @priv.
2370  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2371  */
2372 static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
2373 				     struct dpaa2_eth_priv *priv)
2374 {
2375 	struct netdev_hw_addr *ha;
2376 	int err;
2377 
2378 	netdev_for_each_uc_addr(ha, net_dev) {
2379 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2380 					ha->addr);
2381 		if (err)
2382 			netdev_warn(priv->net_dev,
2383 				    "Could not add ucast MAC %pM to the filtering table (err %d)\n",
2384 				    ha->addr, err);
2385 	}
2386 }
2387 
2388 /* Copy mac multicast addresses from @net_dev to @priv
2389  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2390  */
2391 static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
2392 				     struct dpaa2_eth_priv *priv)
2393 {
2394 	struct netdev_hw_addr *ha;
2395 	int err;
2396 
2397 	netdev_for_each_mc_addr(ha, net_dev) {
2398 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2399 					ha->addr);
2400 		if (err)
2401 			netdev_warn(priv->net_dev,
2402 				    "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2403 				    ha->addr, err);
2404 	}
2405 }
2406 
2407 static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
2408 				__be16 vlan_proto, u16 vid)
2409 {
2410 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2411 	int err;
2412 
2413 	err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
2414 			       vid, 0, 0, 0);
2415 
2416 	if (err) {
2417 		netdev_warn(priv->net_dev,
2418 			    "Could not add the vlan id %u\n",
2419 			    vid);
2420 		return err;
2421 	}
2422 
2423 	return 0;
2424 }
2425 
2426 static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
2427 				 __be16 vlan_proto, u16 vid)
2428 {
2429 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2430 	int err;
2431 
2432 	err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
2433 
2434 	if (err) {
2435 		netdev_warn(priv->net_dev,
2436 			    "Could not remove the vlan id %u\n",
2437 			    vid);
2438 		return err;
2439 	}
2440 
2441 	return 0;
2442 }
2443 
2444 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2445 {
2446 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2447 	int uc_count = netdev_uc_count(net_dev);
2448 	int mc_count = netdev_mc_count(net_dev);
2449 	u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2450 	u32 options = priv->dpni_attrs.options;
2451 	u16 mc_token = priv->mc_token;
2452 	struct fsl_mc_io *mc_io = priv->mc_io;
2453 	int err;
2454 
2455 	/* Basic sanity checks; these probably indicate a misconfiguration */
2456 	if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2457 		netdev_info(net_dev,
2458 			    "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2459 			    max_mac);
2460 
2461 	/* Force promiscuous if the uc or mc counts exceed our capabilities. */
2462 	if (uc_count > max_mac) {
2463 		netdev_info(net_dev,
2464 			    "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2465 			    uc_count, max_mac);
2466 		goto force_promisc;
2467 	}
2468 	if (mc_count + uc_count > max_mac) {
2469 		netdev_info(net_dev,
2470 			    "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2471 			    uc_count + mc_count, max_mac);
2472 		goto force_mc_promisc;
2473 	}
2474 
2475 	/* Adjust promisc settings due to flag combinations */
2476 	if (net_dev->flags & IFF_PROMISC)
2477 		goto force_promisc;
2478 	if (net_dev->flags & IFF_ALLMULTI) {
2479 		/* First, rebuild unicast filtering table. This should be done
2480 		 * in promisc mode, in order to avoid frame loss while we
2481 		 * progressively add entries to the table.
2482 		 * We don't know whether we had been in promisc already, and
2483 		 * making an MC call to find out is expensive; so set uc promisc
2484 		 * nonetheless.
2485 		 */
2486 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2487 		if (err)
2488 			netdev_warn(net_dev, "Can't set uc promisc\n");
2489 
2490 		/* Actual uc table reconstruction. */
2491 		err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2492 		if (err)
2493 			netdev_warn(net_dev, "Can't clear uc filters\n");
2494 		dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2495 
2496 		/* Finally, clear uc promisc and set mc promisc as requested. */
2497 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2498 		if (err)
2499 			netdev_warn(net_dev, "Can't clear uc promisc\n");
2500 		goto force_mc_promisc;
2501 	}
2502 
2503 	/* Neither unicast, nor multicast promisc will be on... eventually.
2504 	 * For now, rebuild mac filtering tables while forcing both of them on.
2505 	 */
2506 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2507 	if (err)
2508 		netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2509 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2510 	if (err)
2511 		netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2512 
2513 	/* Actual mac filtering tables reconstruction */
2514 	err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2515 	if (err)
2516 		netdev_warn(net_dev, "Can't clear mac filters\n");
2517 	dpaa2_eth_add_mc_hw_addr(net_dev, priv);
2518 	dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2519 
2520 	/* Now we can clear both ucast and mcast promisc, without risking
2521 	 * to drop legitimate frames anymore.
2522 	 */
2523 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2524 	if (err)
2525 		netdev_warn(net_dev, "Can't clear ucast promisc\n");
2526 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2527 	if (err)
2528 		netdev_warn(net_dev, "Can't clear mcast promisc\n");
2529 
2530 	return;
2531 
2532 force_promisc:
2533 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2534 	if (err)
2535 		netdev_warn(net_dev, "Can't set ucast promisc\n");
2536 force_mc_promisc:
2537 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2538 	if (err)
2539 		netdev_warn(net_dev, "Can't set mcast promisc\n");
2540 }
2541 
2542 static int dpaa2_eth_set_features(struct net_device *net_dev,
2543 				  netdev_features_t features)
2544 {
2545 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2546 	netdev_features_t changed = features ^ net_dev->features;
2547 	bool enable;
2548 	int err;
2549 
2550 	if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
2551 		enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2552 		err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
2553 		if (err)
2554 			return err;
2555 	}
2556 
2557 	if (changed & NETIF_F_RXCSUM) {
2558 		enable = !!(features & NETIF_F_RXCSUM);
2559 		err = dpaa2_eth_set_rx_csum(priv, enable);
2560 		if (err)
2561 			return err;
2562 	}
2563 
2564 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2565 		enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2566 		err = dpaa2_eth_set_tx_csum(priv, enable);
2567 		if (err)
2568 			return err;
2569 	}
2570 
2571 	return 0;
2572 }
2573 
2574 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2575 {
2576 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
2577 	struct hwtstamp_config config;
2578 
2579 	if (!dpaa2_ptp)
2580 		return -EINVAL;
2581 
2582 	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2583 		return -EFAULT;
2584 
2585 	switch (config.tx_type) {
2586 	case HWTSTAMP_TX_OFF:
2587 	case HWTSTAMP_TX_ON:
2588 	case HWTSTAMP_TX_ONESTEP_SYNC:
2589 		priv->tx_tstamp_type = config.tx_type;
2590 		break;
2591 	default:
2592 		return -ERANGE;
2593 	}
2594 
2595 	if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2596 		priv->rx_tstamp = false;
2597 	} else {
2598 		priv->rx_tstamp = true;
2599 		/* TS is set for all frame types, not only those requested */
2600 		config.rx_filter = HWTSTAMP_FILTER_ALL;
2601 	}
2602 
2603 	if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
2604 		dpaa2_ptp_onestep_reg_update_method(priv);
2605 
2606 	return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2607 			-EFAULT : 0;
2608 }
2609 
2610 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2611 {
2612 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
2613 
2614 	if (cmd == SIOCSHWTSTAMP)
2615 		return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2616 
2617 	if (dpaa2_eth_is_type_phy(priv))
2618 		return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
2619 
2620 	return -EOPNOTSUPP;
2621 }
2622 
2623 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
2624 {
2625 	int mfl, linear_mfl;
2626 
2627 	mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2628 	linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
2629 		     dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
2630 
2631 	if (mfl > linear_mfl) {
2632 		netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
2633 			    linear_mfl - VLAN_ETH_HLEN);
2634 		return false;
2635 	}
2636 
2637 	return true;
2638 }
2639 
2640 static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
2641 {
2642 	int mfl, err;
2643 
2644 	/* We enforce a maximum Rx frame length based on MTU only if we have
2645 	 * an XDP program attached (in order to avoid Rx S/G frames).
2646 	 * Otherwise, we accept all incoming frames as long as they are not
2647 	 * larger than maximum size supported in hardware
2648 	 */
2649 	if (has_xdp)
2650 		mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2651 	else
2652 		mfl = DPAA2_ETH_MFL;
2653 
2654 	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
2655 	if (err) {
2656 		netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
2657 		return err;
2658 	}
2659 
2660 	return 0;
2661 }
2662 
2663 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2664 {
2665 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
2666 	int err;
2667 
2668 	if (!priv->xdp_prog)
2669 		goto out;
2670 
2671 	if (!xdp_mtu_valid(priv, new_mtu))
2672 		return -EINVAL;
2673 
2674 	err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
2675 	if (err)
2676 		return err;
2677 
2678 out:
2679 	dev->mtu = new_mtu;
2680 	return 0;
2681 }
2682 
2683 static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
2684 {
2685 	struct dpni_buffer_layout buf_layout = {0};
2686 	int err;
2687 
2688 	err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
2689 				     DPNI_QUEUE_RX, &buf_layout);
2690 	if (err) {
2691 		netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2692 		return err;
2693 	}
2694 
2695 	/* Reserve extra headroom for XDP header size changes */
2696 	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2697 				    (has_xdp ? XDP_PACKET_HEADROOM : 0);
2698 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2699 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2700 				     DPNI_QUEUE_RX, &buf_layout);
2701 	if (err) {
2702 		netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2703 		return err;
2704 	}
2705 
2706 	return 0;
2707 }
2708 
2709 static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
2710 {
2711 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
2712 	struct dpaa2_eth_channel *ch;
2713 	struct bpf_prog *old;
2714 	bool up, need_update;
2715 	int i, err;
2716 
2717 	if (prog && !xdp_mtu_valid(priv, dev->mtu))
2718 		return -EINVAL;
2719 
2720 	if (prog)
2721 		bpf_prog_add(prog, priv->num_channels);
2722 
2723 	up = netif_running(dev);
2724 	need_update = (!!priv->xdp_prog != !!prog);
2725 
2726 	if (up)
2727 		dev_close(dev);
2728 
2729 	/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2730 	 * Also, when switching between xdp/non-xdp modes we need to reconfigure
2731 	 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2732 	 * so we are sure no old format buffers will be used from now on.
2733 	 */
2734 	if (need_update) {
2735 		err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
2736 		if (err)
2737 			goto out_err;
2738 		err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
2739 		if (err)
2740 			goto out_err;
2741 	}
2742 
2743 	old = xchg(&priv->xdp_prog, prog);
2744 	if (old)
2745 		bpf_prog_put(old);
2746 
2747 	for (i = 0; i < priv->num_channels; i++) {
2748 		ch = priv->channel[i];
2749 		old = xchg(&ch->xdp.prog, prog);
2750 		if (old)
2751 			bpf_prog_put(old);
2752 	}
2753 
2754 	if (up) {
2755 		err = dev_open(dev, NULL);
2756 		if (err)
2757 			return err;
2758 	}
2759 
2760 	return 0;
2761 
2762 out_err:
2763 	if (prog)
2764 		bpf_prog_sub(prog, priv->num_channels);
2765 	if (up)
2766 		dev_open(dev, NULL);
2767 
2768 	return err;
2769 }
2770 
2771 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2772 {
2773 	switch (xdp->command) {
2774 	case XDP_SETUP_PROG:
2775 		return dpaa2_eth_setup_xdp(dev, xdp->prog);
2776 	case XDP_SETUP_XSK_POOL:
2777 		return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id);
2778 	default:
2779 		return -EINVAL;
2780 	}
2781 
2782 	return 0;
2783 }
2784 
2785 static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2786 				   struct xdp_frame *xdpf,
2787 				   struct dpaa2_fd *fd)
2788 {
2789 	struct device *dev = net_dev->dev.parent;
2790 	unsigned int needed_headroom;
2791 	struct dpaa2_eth_swa *swa;
2792 	void *buffer_start, *aligned_start;
2793 	dma_addr_t addr;
2794 
2795 	/* We require a minimum headroom to be able to transmit the frame.
2796 	 * Otherwise return an error and let the original net_device handle it
2797 	 */
2798 	needed_headroom = dpaa2_eth_needed_headroom(NULL);
2799 	if (xdpf->headroom < needed_headroom)
2800 		return -EINVAL;
2801 
2802 	/* Setup the FD fields */
2803 	memset(fd, 0, sizeof(*fd));
2804 
2805 	/* Align FD address, if possible */
2806 	buffer_start = xdpf->data - needed_headroom;
2807 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2808 				  DPAA2_ETH_TX_BUF_ALIGN);
2809 	if (aligned_start >= xdpf->data - xdpf->headroom)
2810 		buffer_start = aligned_start;
2811 
2812 	swa = (struct dpaa2_eth_swa *)buffer_start;
2813 	/* fill in necessary fields here */
2814 	swa->type = DPAA2_ETH_SWA_XDP;
2815 	swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2816 	swa->xdp.xdpf = xdpf;
2817 
2818 	addr = dma_map_single(dev, buffer_start,
2819 			      swa->xdp.dma_size,
2820 			      DMA_BIDIRECTIONAL);
2821 	if (unlikely(dma_mapping_error(dev, addr)))
2822 		return -ENOMEM;
2823 
2824 	dpaa2_fd_set_addr(fd, addr);
2825 	dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2826 	dpaa2_fd_set_len(fd, xdpf->len);
2827 	dpaa2_fd_set_format(fd, dpaa2_fd_single);
2828 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2829 
2830 	return 0;
2831 }
2832 
2833 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2834 			      struct xdp_frame **frames, u32 flags)
2835 {
2836 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2837 	struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
2838 	struct rtnl_link_stats64 *percpu_stats;
2839 	struct dpaa2_eth_fq *fq;
2840 	struct dpaa2_fd *fds;
2841 	int enqueued, i, err;
2842 
2843 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2844 		return -EINVAL;
2845 
2846 	if (!netif_running(net_dev))
2847 		return -ENETDOWN;
2848 
2849 	fq = &priv->fq[smp_processor_id()];
2850 	xdp_redirect_fds = &fq->xdp_redirect_fds;
2851 	fds = xdp_redirect_fds->fds;
2852 
2853 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
2854 
2855 	/* create a FD for each xdp_frame in the list received */
2856 	for (i = 0; i < n; i++) {
2857 		err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2858 		if (err)
2859 			break;
2860 	}
2861 	xdp_redirect_fds->num = i;
2862 
2863 	/* enqueue all the frame descriptors */
2864 	enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
2865 
2866 	/* update statistics */
2867 	percpu_stats->tx_packets += enqueued;
2868 	for (i = 0; i < enqueued; i++)
2869 		percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
2870 
2871 	return enqueued;
2872 }
2873 
2874 static int update_xps(struct dpaa2_eth_priv *priv)
2875 {
2876 	struct net_device *net_dev = priv->net_dev;
2877 	struct cpumask xps_mask;
2878 	struct dpaa2_eth_fq *fq;
2879 	int i, num_queues, netdev_queues;
2880 	int err = 0;
2881 
2882 	num_queues = dpaa2_eth_queue_count(priv);
2883 	netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
2884 
2885 	/* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2886 	 * queues, so only process those
2887 	 */
2888 	for (i = 0; i < netdev_queues; i++) {
2889 		fq = &priv->fq[i % num_queues];
2890 
2891 		cpumask_clear(&xps_mask);
2892 		cpumask_set_cpu(fq->target_cpu, &xps_mask);
2893 
2894 		err = netif_set_xps_queue(net_dev, &xps_mask, i);
2895 		if (err) {
2896 			netdev_warn_once(net_dev, "Error setting XPS queue\n");
2897 			break;
2898 		}
2899 	}
2900 
2901 	return err;
2902 }
2903 
2904 static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2905 				  struct tc_mqprio_qopt *mqprio)
2906 {
2907 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2908 	u8 num_tc, num_queues;
2909 	int i;
2910 
2911 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2912 	num_queues = dpaa2_eth_queue_count(priv);
2913 	num_tc = mqprio->num_tc;
2914 
2915 	if (num_tc == net_dev->num_tc)
2916 		return 0;
2917 
2918 	if (num_tc  > dpaa2_eth_tc_count(priv)) {
2919 		netdev_err(net_dev, "Max %d traffic classes supported\n",
2920 			   dpaa2_eth_tc_count(priv));
2921 		return -EOPNOTSUPP;
2922 	}
2923 
2924 	if (!num_tc) {
2925 		netdev_reset_tc(net_dev);
2926 		netif_set_real_num_tx_queues(net_dev, num_queues);
2927 		goto out;
2928 	}
2929 
2930 	netdev_set_num_tc(net_dev, num_tc);
2931 	netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2932 
2933 	for (i = 0; i < num_tc; i++)
2934 		netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2935 
2936 out:
2937 	update_xps(priv);
2938 
2939 	return 0;
2940 }
2941 
2942 #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2943 
2944 static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2945 {
2946 	struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2947 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2948 	struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2949 	struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2950 	int err;
2951 
2952 	if (p->command == TC_TBF_STATS)
2953 		return -EOPNOTSUPP;
2954 
2955 	/* Only per port Tx shaping */
2956 	if (p->parent != TC_H_ROOT)
2957 		return -EOPNOTSUPP;
2958 
2959 	if (p->command == TC_TBF_REPLACE) {
2960 		if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2961 			netdev_err(net_dev, "burst size cannot be greater than %d\n",
2962 				   DPAA2_ETH_MAX_BURST_SIZE);
2963 			return -EINVAL;
2964 		}
2965 
2966 		tx_cr_shaper.max_burst_size = cfg->max_size;
2967 		/* The TBF interface is in bytes/s, whereas DPAA2 expects the
2968 		 * rate in Mbits/s
2969 		 */
2970 		tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2971 	}
2972 
2973 	err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
2974 				  &tx_er_shaper, 0);
2975 	if (err) {
2976 		netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
2977 		return err;
2978 	}
2979 
2980 	return 0;
2981 }
2982 
2983 static int dpaa2_eth_setup_tc(struct net_device *net_dev,
2984 			      enum tc_setup_type type, void *type_data)
2985 {
2986 	switch (type) {
2987 	case TC_SETUP_QDISC_MQPRIO:
2988 		return dpaa2_eth_setup_mqprio(net_dev, type_data);
2989 	case TC_SETUP_QDISC_TBF:
2990 		return dpaa2_eth_setup_tbf(net_dev, type_data);
2991 	default:
2992 		return -EOPNOTSUPP;
2993 	}
2994 }
2995 
2996 static const struct net_device_ops dpaa2_eth_ops = {
2997 	.ndo_open = dpaa2_eth_open,
2998 	.ndo_start_xmit = dpaa2_eth_tx,
2999 	.ndo_stop = dpaa2_eth_stop,
3000 	.ndo_set_mac_address = dpaa2_eth_set_addr,
3001 	.ndo_get_stats64 = dpaa2_eth_get_stats,
3002 	.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
3003 	.ndo_set_features = dpaa2_eth_set_features,
3004 	.ndo_eth_ioctl = dpaa2_eth_ioctl,
3005 	.ndo_change_mtu = dpaa2_eth_change_mtu,
3006 	.ndo_bpf = dpaa2_eth_xdp,
3007 	.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
3008 	.ndo_xsk_wakeup = dpaa2_xsk_wakeup,
3009 	.ndo_setup_tc = dpaa2_eth_setup_tc,
3010 	.ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
3011 	.ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
3012 };
3013 
3014 static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
3015 {
3016 	struct dpaa2_eth_channel *ch;
3017 
3018 	ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
3019 
3020 	/* Update NAPI statistics */
3021 	ch->stats.cdan++;
3022 
3023 	/* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed
3024 	 * so that it can be rescheduled again.
3025 	 */
3026 	if (!napi_if_scheduled_mark_missed(&ch->napi))
3027 		napi_schedule(&ch->napi);
3028 }
3029 
3030 /* Allocate and configure a DPCON object */
3031 static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
3032 {
3033 	struct fsl_mc_device *dpcon;
3034 	struct device *dev = priv->net_dev->dev.parent;
3035 	int err;
3036 
3037 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
3038 				     FSL_MC_POOL_DPCON, &dpcon);
3039 	if (err) {
3040 		if (err == -ENXIO) {
3041 			dev_dbg(dev, "Waiting for DPCON\n");
3042 			err = -EPROBE_DEFER;
3043 		} else {
3044 			dev_info(dev, "Not enough DPCONs, will go on as-is\n");
3045 		}
3046 		return ERR_PTR(err);
3047 	}
3048 
3049 	err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
3050 	if (err) {
3051 		dev_err(dev, "dpcon_open() failed\n");
3052 		goto free;
3053 	}
3054 
3055 	err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
3056 	if (err) {
3057 		dev_err(dev, "dpcon_reset() failed\n");
3058 		goto close;
3059 	}
3060 
3061 	err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
3062 	if (err) {
3063 		dev_err(dev, "dpcon_enable() failed\n");
3064 		goto close;
3065 	}
3066 
3067 	return dpcon;
3068 
3069 close:
3070 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
3071 free:
3072 	fsl_mc_object_free(dpcon);
3073 
3074 	return ERR_PTR(err);
3075 }
3076 
3077 static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
3078 				 struct fsl_mc_device *dpcon)
3079 {
3080 	dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
3081 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
3082 	fsl_mc_object_free(dpcon);
3083 }
3084 
3085 static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
3086 {
3087 	struct dpaa2_eth_channel *channel;
3088 	struct dpcon_attr attr;
3089 	struct device *dev = priv->net_dev->dev.parent;
3090 	int err;
3091 
3092 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
3093 	if (!channel)
3094 		return NULL;
3095 
3096 	channel->dpcon = dpaa2_eth_setup_dpcon(priv);
3097 	if (IS_ERR(channel->dpcon)) {
3098 		err = PTR_ERR(channel->dpcon);
3099 		goto err_setup;
3100 	}
3101 
3102 	err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
3103 				   &attr);
3104 	if (err) {
3105 		dev_err(dev, "dpcon_get_attributes() failed\n");
3106 		goto err_get_attr;
3107 	}
3108 
3109 	channel->dpcon_id = attr.id;
3110 	channel->ch_id = attr.qbman_ch_id;
3111 	channel->priv = priv;
3112 
3113 	return channel;
3114 
3115 err_get_attr:
3116 	dpaa2_eth_free_dpcon(priv, channel->dpcon);
3117 err_setup:
3118 	kfree(channel);
3119 	return ERR_PTR(err);
3120 }
3121 
3122 static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
3123 				   struct dpaa2_eth_channel *channel)
3124 {
3125 	dpaa2_eth_free_dpcon(priv, channel->dpcon);
3126 	kfree(channel);
3127 }
3128 
3129 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
3130  * and register data availability notifications
3131  */
3132 static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
3133 {
3134 	struct dpaa2_io_notification_ctx *nctx;
3135 	struct dpaa2_eth_channel *channel;
3136 	struct dpcon_notification_cfg dpcon_notif_cfg;
3137 	struct device *dev = priv->net_dev->dev.parent;
3138 	int i, err;
3139 
3140 	/* We want the ability to spread ingress traffic (RX, TX conf) to as
3141 	 * many cores as possible, so we need one channel for each core
3142 	 * (unless there's fewer queues than cores, in which case the extra
3143 	 * channels would be wasted).
3144 	 * Allocate one channel per core and register it to the core's
3145 	 * affine DPIO. If not enough channels are available for all cores
3146 	 * or if some cores don't have an affine DPIO, there will be no
3147 	 * ingress frame processing on those cores.
3148 	 */
3149 	cpumask_clear(&priv->dpio_cpumask);
3150 	for_each_online_cpu(i) {
3151 		/* Try to allocate a channel */
3152 		channel = dpaa2_eth_alloc_channel(priv);
3153 		if (IS_ERR_OR_NULL(channel)) {
3154 			err = PTR_ERR_OR_ZERO(channel);
3155 			if (err == -EPROBE_DEFER)
3156 				dev_dbg(dev, "waiting for affine channel\n");
3157 			else
3158 				dev_info(dev,
3159 					 "No affine channel for cpu %d and above\n", i);
3160 			goto err_alloc_ch;
3161 		}
3162 
3163 		priv->channel[priv->num_channels] = channel;
3164 
3165 		nctx = &channel->nctx;
3166 		nctx->is_cdan = 1;
3167 		nctx->cb = dpaa2_eth_cdan_cb;
3168 		nctx->id = channel->ch_id;
3169 		nctx->desired_cpu = i;
3170 
3171 		/* Register the new context */
3172 		channel->dpio = dpaa2_io_service_select(i);
3173 		err = dpaa2_io_service_register(channel->dpio, nctx, dev);
3174 		if (err) {
3175 			dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
3176 			/* If no affine DPIO for this core, there's probably
3177 			 * none available for next cores either. Signal we want
3178 			 * to retry later, in case the DPIO devices weren't
3179 			 * probed yet.
3180 			 */
3181 			err = -EPROBE_DEFER;
3182 			goto err_service_reg;
3183 		}
3184 
3185 		/* Register DPCON notification with MC */
3186 		dpcon_notif_cfg.dpio_id = nctx->dpio_id;
3187 		dpcon_notif_cfg.priority = 0;
3188 		dpcon_notif_cfg.user_ctx = nctx->qman64;
3189 		err = dpcon_set_notification(priv->mc_io, 0,
3190 					     channel->dpcon->mc_handle,
3191 					     &dpcon_notif_cfg);
3192 		if (err) {
3193 			dev_err(dev, "dpcon_set_notification failed()\n");
3194 			goto err_set_cdan;
3195 		}
3196 
3197 		/* If we managed to allocate a channel and also found an affine
3198 		 * DPIO for this core, add it to the final mask
3199 		 */
3200 		cpumask_set_cpu(i, &priv->dpio_cpumask);
3201 		priv->num_channels++;
3202 
3203 		/* Stop if we already have enough channels to accommodate all
3204 		 * RX and TX conf queues
3205 		 */
3206 		if (priv->num_channels == priv->dpni_attrs.num_queues)
3207 			break;
3208 	}
3209 
3210 	return 0;
3211 
3212 err_set_cdan:
3213 	dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3214 err_service_reg:
3215 	dpaa2_eth_free_channel(priv, channel);
3216 err_alloc_ch:
3217 	if (err == -EPROBE_DEFER) {
3218 		for (i = 0; i < priv->num_channels; i++) {
3219 			channel = priv->channel[i];
3220 			nctx = &channel->nctx;
3221 			dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3222 			dpaa2_eth_free_channel(priv, channel);
3223 		}
3224 		priv->num_channels = 0;
3225 		return err;
3226 	}
3227 
3228 	if (cpumask_empty(&priv->dpio_cpumask)) {
3229 		dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
3230 		return -ENODEV;
3231 	}
3232 
3233 	dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
3234 		 cpumask_pr_args(&priv->dpio_cpumask));
3235 
3236 	return 0;
3237 }
3238 
3239 static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
3240 {
3241 	struct device *dev = priv->net_dev->dev.parent;
3242 	struct dpaa2_eth_channel *ch;
3243 	int i;
3244 
3245 	/* deregister CDAN notifications and free channels */
3246 	for (i = 0; i < priv->num_channels; i++) {
3247 		ch = priv->channel[i];
3248 		dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
3249 		dpaa2_eth_free_channel(priv, ch);
3250 	}
3251 }
3252 
3253 static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
3254 							      int cpu)
3255 {
3256 	struct device *dev = priv->net_dev->dev.parent;
3257 	int i;
3258 
3259 	for (i = 0; i < priv->num_channels; i++)
3260 		if (priv->channel[i]->nctx.desired_cpu == cpu)
3261 			return priv->channel[i];
3262 
3263 	/* We should never get here. Issue a warning and return
3264 	 * the first channel, because it's still better than nothing
3265 	 */
3266 	dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
3267 
3268 	return priv->channel[0];
3269 }
3270 
3271 static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
3272 {
3273 	struct device *dev = priv->net_dev->dev.parent;
3274 	struct dpaa2_eth_fq *fq;
3275 	int rx_cpu, txc_cpu;
3276 	int i;
3277 
3278 	/* For each FQ, pick one channel/CPU to deliver frames to.
3279 	 * This may well change at runtime, either through irqbalance or
3280 	 * through direct user intervention.
3281 	 */
3282 	rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
3283 
3284 	for (i = 0; i < priv->num_fqs; i++) {
3285 		fq = &priv->fq[i];
3286 		switch (fq->type) {
3287 		case DPAA2_RX_FQ:
3288 		case DPAA2_RX_ERR_FQ:
3289 			fq->target_cpu = rx_cpu;
3290 			rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3291 			if (rx_cpu >= nr_cpu_ids)
3292 				rx_cpu = cpumask_first(&priv->dpio_cpumask);
3293 			break;
3294 		case DPAA2_TX_CONF_FQ:
3295 			fq->target_cpu = txc_cpu;
3296 			txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
3297 			if (txc_cpu >= nr_cpu_ids)
3298 				txc_cpu = cpumask_first(&priv->dpio_cpumask);
3299 			break;
3300 		default:
3301 			dev_err(dev, "Unknown FQ type: %d\n", fq->type);
3302 		}
3303 		fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
3304 	}
3305 
3306 	update_xps(priv);
3307 }
3308 
3309 static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
3310 {
3311 	int i, j;
3312 
3313 	/* We have one TxConf FQ per Tx flow.
3314 	 * The number of Tx and Rx queues is the same.
3315 	 * Tx queues come first in the fq array.
3316 	 */
3317 	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3318 		priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
3319 		priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
3320 		priv->fq[priv->num_fqs++].flowid = (u16)i;
3321 	}
3322 
3323 	for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3324 		for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3325 			priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3326 			priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3327 			priv->fq[priv->num_fqs].tc = (u8)j;
3328 			priv->fq[priv->num_fqs++].flowid = (u16)i;
3329 		}
3330 	}
3331 
3332 	/* We have exactly one Rx error queue per DPNI */
3333 	priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3334 	priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3335 
3336 	/* For each FQ, decide on which core to process incoming frames */
3337 	dpaa2_eth_set_fq_affinity(priv);
3338 }
3339 
3340 /* Allocate and configure a buffer pool */
3341 struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
3342 {
3343 	struct device *dev = priv->net_dev->dev.parent;
3344 	struct fsl_mc_device *dpbp_dev;
3345 	struct dpbp_attr dpbp_attrs;
3346 	struct dpaa2_eth_bp *bp;
3347 	int err;
3348 
3349 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
3350 				     &dpbp_dev);
3351 	if (err) {
3352 		if (err == -ENXIO)
3353 			err = -EPROBE_DEFER;
3354 		else
3355 			dev_err(dev, "DPBP device allocation failed\n");
3356 		return ERR_PTR(err);
3357 	}
3358 
3359 	bp = kzalloc(sizeof(*bp), GFP_KERNEL);
3360 	if (!bp) {
3361 		err = -ENOMEM;
3362 		goto err_alloc;
3363 	}
3364 
3365 	err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id,
3366 			&dpbp_dev->mc_handle);
3367 	if (err) {
3368 		dev_err(dev, "dpbp_open() failed\n");
3369 		goto err_open;
3370 	}
3371 
3372 	err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
3373 	if (err) {
3374 		dev_err(dev, "dpbp_reset() failed\n");
3375 		goto err_reset;
3376 	}
3377 
3378 	err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
3379 	if (err) {
3380 		dev_err(dev, "dpbp_enable() failed\n");
3381 		goto err_enable;
3382 	}
3383 
3384 	err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
3385 				  &dpbp_attrs);
3386 	if (err) {
3387 		dev_err(dev, "dpbp_get_attributes() failed\n");
3388 		goto err_get_attr;
3389 	}
3390 
3391 	bp->dev = dpbp_dev;
3392 	bp->bpid = dpbp_attrs.bpid;
3393 
3394 	return bp;
3395 
3396 err_get_attr:
3397 	dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
3398 err_enable:
3399 err_reset:
3400 	dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
3401 err_open:
3402 	kfree(bp);
3403 err_alloc:
3404 	fsl_mc_object_free(dpbp_dev);
3405 
3406 	return ERR_PTR(err);
3407 }
3408 
3409 static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
3410 {
3411 	struct dpaa2_eth_bp *bp;
3412 	int i;
3413 
3414 	bp = dpaa2_eth_allocate_dpbp(priv);
3415 	if (IS_ERR(bp))
3416 		return PTR_ERR(bp);
3417 
3418 	priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp;
3419 	priv->num_bps++;
3420 
3421 	for (i = 0; i < priv->num_channels; i++)
3422 		priv->channel[i]->bp = bp;
3423 
3424 	return 0;
3425 }
3426 
3427 void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
3428 {
3429 	int idx_bp;
3430 
3431 	/* Find the index at which this BP is stored */
3432 	for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++)
3433 		if (priv->bp[idx_bp] == bp)
3434 			break;
3435 
3436 	/* Drain the pool and disable the associated MC object */
3437 	dpaa2_eth_drain_pool(priv, bp->bpid);
3438 	dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
3439 	dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
3440 	fsl_mc_object_free(bp->dev);
3441 	kfree(bp);
3442 
3443 	/* Move the last in use DPBP over in this position */
3444 	priv->bp[idx_bp] = priv->bp[priv->num_bps - 1];
3445 	priv->num_bps--;
3446 }
3447 
3448 static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
3449 {
3450 	int i;
3451 
3452 	for (i = 0; i < priv->num_bps; i++)
3453 		dpaa2_eth_free_dpbp(priv, priv->bp[i]);
3454 }
3455 
3456 static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
3457 {
3458 	struct device *dev = priv->net_dev->dev.parent;
3459 	struct dpni_buffer_layout buf_layout = {0};
3460 	u16 rx_buf_align;
3461 	int err;
3462 
3463 	/* We need to check for WRIOP version 1.0.0, but depending on the MC
3464 	 * version, this number is not always provided correctly on rev1.
3465 	 * We need to check for both alternatives in this situation.
3466 	 */
3467 	if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
3468 	    priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
3469 		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
3470 	else
3471 		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
3472 
3473 	/* We need to ensure that the buffer size seen by WRIOP is a multiple
3474 	 * of 64 or 256 bytes depending on the WRIOP version.
3475 	 */
3476 	priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
3477 
3478 	/* tx buffer */
3479 	buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3480 	buf_layout.pass_timestamp = true;
3481 	buf_layout.pass_frame_status = true;
3482 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3483 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3484 			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3485 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3486 				     DPNI_QUEUE_TX, &buf_layout);
3487 	if (err) {
3488 		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3489 		return err;
3490 	}
3491 
3492 	/* tx-confirm buffer */
3493 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3494 			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3495 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3496 				     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3497 	if (err) {
3498 		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3499 		return err;
3500 	}
3501 
3502 	/* Now that we've set our tx buffer layout, retrieve the minimum
3503 	 * required tx data offset.
3504 	 */
3505 	err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3506 				      &priv->tx_data_offset);
3507 	if (err) {
3508 		dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3509 		return err;
3510 	}
3511 
3512 	if ((priv->tx_data_offset % 64) != 0)
3513 		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3514 			 priv->tx_data_offset);
3515 
3516 	/* rx buffer */
3517 	buf_layout.pass_frame_status = true;
3518 	buf_layout.pass_parser_result = true;
3519 	buf_layout.data_align = rx_buf_align;
3520 	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
3521 	buf_layout.private_data_size = 0;
3522 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3523 			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3524 			     DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3525 			     DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
3526 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3527 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3528 				     DPNI_QUEUE_RX, &buf_layout);
3529 	if (err) {
3530 		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3531 		return err;
3532 	}
3533 
3534 	return 0;
3535 }
3536 
3537 #define DPNI_ENQUEUE_FQID_VER_MAJOR	7
3538 #define DPNI_ENQUEUE_FQID_VER_MINOR	9
3539 
3540 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
3541 				       struct dpaa2_eth_fq *fq,
3542 				       struct dpaa2_fd *fd, u8 prio,
3543 				       u32 num_frames __always_unused,
3544 				       int *frames_enqueued)
3545 {
3546 	int err;
3547 
3548 	err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3549 					  priv->tx_qdid, prio,
3550 					  fq->tx_qdbin, fd);
3551 	if (!err && frames_enqueued)
3552 		*frames_enqueued = 1;
3553 	return err;
3554 }
3555 
3556 static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
3557 						struct dpaa2_eth_fq *fq,
3558 						struct dpaa2_fd *fd,
3559 						u8 prio, u32 num_frames,
3560 						int *frames_enqueued)
3561 {
3562 	int err;
3563 
3564 	err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
3565 						   fq->tx_fqid[prio],
3566 						   fd, num_frames);
3567 
3568 	if (err == 0)
3569 		return -EBUSY;
3570 
3571 	if (frames_enqueued)
3572 		*frames_enqueued = err;
3573 	return 0;
3574 }
3575 
3576 static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
3577 {
3578 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3579 				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3580 		priv->enqueue = dpaa2_eth_enqueue_qd;
3581 	else
3582 		priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3583 }
3584 
3585 static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
3586 {
3587 	struct device *dev = priv->net_dev->dev.parent;
3588 	struct dpni_link_cfg link_cfg = {0};
3589 	int err;
3590 
3591 	/* Get the default link options so we don't override other flags */
3592 	err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3593 	if (err) {
3594 		dev_err(dev, "dpni_get_link_cfg() failed\n");
3595 		return err;
3596 	}
3597 
3598 	/* By default, enable both Rx and Tx pause frames */
3599 	link_cfg.options |= DPNI_LINK_OPT_PAUSE;
3600 	link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
3601 	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3602 	if (err) {
3603 		dev_err(dev, "dpni_set_link_cfg() failed\n");
3604 		return err;
3605 	}
3606 
3607 	priv->link_state.options = link_cfg.options;
3608 
3609 	return 0;
3610 }
3611 
3612 static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
3613 {
3614 	struct dpni_queue_id qid = {0};
3615 	struct dpaa2_eth_fq *fq;
3616 	struct dpni_queue queue;
3617 	int i, j, err;
3618 
3619 	/* We only use Tx FQIDs for FQID-based enqueue, so check
3620 	 * if DPNI version supports it before updating FQIDs
3621 	 */
3622 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3623 				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3624 		return;
3625 
3626 	for (i = 0; i < priv->num_fqs; i++) {
3627 		fq = &priv->fq[i];
3628 		if (fq->type != DPAA2_TX_CONF_FQ)
3629 			continue;
3630 		for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3631 			err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3632 					     DPNI_QUEUE_TX, j, fq->flowid,
3633 					     &queue, &qid);
3634 			if (err)
3635 				goto out_err;
3636 
3637 			fq->tx_fqid[j] = qid.fqid;
3638 			if (fq->tx_fqid[j] == 0)
3639 				goto out_err;
3640 		}
3641 	}
3642 
3643 	priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3644 
3645 	return;
3646 
3647 out_err:
3648 	netdev_info(priv->net_dev,
3649 		    "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3650 	priv->enqueue = dpaa2_eth_enqueue_qd;
3651 }
3652 
3653 /* Configure ingress classification based on VLAN PCP */
3654 static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
3655 {
3656 	struct device *dev = priv->net_dev->dev.parent;
3657 	struct dpkg_profile_cfg kg_cfg = {0};
3658 	struct dpni_qos_tbl_cfg qos_cfg = {0};
3659 	struct dpni_rule_cfg key_params;
3660 	void *dma_mem, *key, *mask;
3661 	u8 key_size = 2;	/* VLAN TCI field */
3662 	int i, pcp, err;
3663 
3664 	/* VLAN-based classification only makes sense if we have multiple
3665 	 * traffic classes.
3666 	 * Also, we need to extract just the 3-bit PCP field from the VLAN
3667 	 * header and we can only do that by using a mask
3668 	 */
3669 	if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
3670 		dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3671 		return -EOPNOTSUPP;
3672 	}
3673 
3674 	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3675 	if (!dma_mem)
3676 		return -ENOMEM;
3677 
3678 	kg_cfg.num_extracts = 1;
3679 	kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
3680 	kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
3681 	kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
3682 	kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
3683 
3684 	err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
3685 	if (err) {
3686 		dev_err(dev, "dpni_prepare_key_cfg failed\n");
3687 		goto out_free_tbl;
3688 	}
3689 
3690 	/* set QoS table */
3691 	qos_cfg.default_tc = 0;
3692 	qos_cfg.discard_on_miss = 0;
3693 	qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3694 					      DPAA2_CLASSIFIER_DMA_SIZE,
3695 					      DMA_TO_DEVICE);
3696 	if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3697 		dev_err(dev, "QoS table DMA mapping failed\n");
3698 		err = -ENOMEM;
3699 		goto out_free_tbl;
3700 	}
3701 
3702 	err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
3703 	if (err) {
3704 		dev_err(dev, "dpni_set_qos_table failed\n");
3705 		goto out_unmap_tbl;
3706 	}
3707 
3708 	/* Add QoS table entries */
3709 	key = kzalloc(key_size * 2, GFP_KERNEL);
3710 	if (!key) {
3711 		err = -ENOMEM;
3712 		goto out_unmap_tbl;
3713 	}
3714 	mask = key + key_size;
3715 	*(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
3716 
3717 	key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3718 					     DMA_TO_DEVICE);
3719 	if (dma_mapping_error(dev, key_params.key_iova)) {
3720 		dev_err(dev, "Qos table entry DMA mapping failed\n");
3721 		err = -ENOMEM;
3722 		goto out_free_key;
3723 	}
3724 
3725 	key_params.mask_iova = key_params.key_iova + key_size;
3726 	key_params.key_size = key_size;
3727 
3728 	/* We add rules for PCP-based distribution starting with highest
3729 	 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
3730 	 * classes to accommodate all priority levels, the lowest ones end up
3731 	 * on TC 0 which was configured as default
3732 	 */
3733 	for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
3734 		*(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
3735 		dma_sync_single_for_device(dev, key_params.key_iova,
3736 					   key_size * 2, DMA_TO_DEVICE);
3737 
3738 		err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
3739 					 &key_params, i, i);
3740 		if (err) {
3741 			dev_err(dev, "dpni_add_qos_entry failed\n");
3742 			dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
3743 			goto out_unmap_key;
3744 		}
3745 	}
3746 
3747 	priv->vlan_cls_enabled = true;
3748 
3749 	/* Table and key memory is not persistent, clean everything up after
3750 	 * configuration is finished
3751 	 */
3752 out_unmap_key:
3753 	dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3754 out_free_key:
3755 	kfree(key);
3756 out_unmap_tbl:
3757 	dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3758 			 DMA_TO_DEVICE);
3759 out_free_tbl:
3760 	kfree(dma_mem);
3761 
3762 	return err;
3763 }
3764 
3765 /* Configure the DPNI object this interface is associated with */
3766 static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
3767 {
3768 	struct device *dev = &ls_dev->dev;
3769 	struct dpaa2_eth_priv *priv;
3770 	struct net_device *net_dev;
3771 	int err;
3772 
3773 	net_dev = dev_get_drvdata(dev);
3774 	priv = netdev_priv(net_dev);
3775 
3776 	/* get a handle for the DPNI object */
3777 	err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3778 	if (err) {
3779 		dev_err(dev, "dpni_open() failed\n");
3780 		return err;
3781 	}
3782 
3783 	/* Check if we can work with this DPNI object */
3784 	err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3785 				   &priv->dpni_ver_minor);
3786 	if (err) {
3787 		dev_err(dev, "dpni_get_api_version() failed\n");
3788 		goto close;
3789 	}
3790 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3791 		dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3792 			priv->dpni_ver_major, priv->dpni_ver_minor,
3793 			DPNI_VER_MAJOR, DPNI_VER_MINOR);
3794 		err = -ENOTSUPP;
3795 		goto close;
3796 	}
3797 
3798 	ls_dev->mc_io = priv->mc_io;
3799 	ls_dev->mc_handle = priv->mc_token;
3800 
3801 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3802 	if (err) {
3803 		dev_err(dev, "dpni_reset() failed\n");
3804 		goto close;
3805 	}
3806 
3807 	err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3808 				  &priv->dpni_attrs);
3809 	if (err) {
3810 		dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3811 		goto close;
3812 	}
3813 
3814 	err = dpaa2_eth_set_buffer_layout(priv);
3815 	if (err)
3816 		goto close;
3817 
3818 	dpaa2_eth_set_enqueue_mode(priv);
3819 
3820 	/* Enable pause frame support */
3821 	if (dpaa2_eth_has_pause_support(priv)) {
3822 		err = dpaa2_eth_set_pause(priv);
3823 		if (err)
3824 			goto close;
3825 	}
3826 
3827 	err = dpaa2_eth_set_vlan_qos(priv);
3828 	if (err && err != -EOPNOTSUPP)
3829 		goto close;
3830 
3831 	priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3832 				       sizeof(struct dpaa2_eth_cls_rule),
3833 				       GFP_KERNEL);
3834 	if (!priv->cls_rules) {
3835 		err = -ENOMEM;
3836 		goto close;
3837 	}
3838 
3839 	return 0;
3840 
3841 close:
3842 	dpni_close(priv->mc_io, 0, priv->mc_token);
3843 
3844 	return err;
3845 }
3846 
3847 static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
3848 {
3849 	int err;
3850 
3851 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3852 	if (err)
3853 		netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3854 			    err);
3855 
3856 	dpni_close(priv->mc_io, 0, priv->mc_token);
3857 }
3858 
3859 static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
3860 				   struct dpaa2_eth_fq *fq)
3861 {
3862 	struct device *dev = priv->net_dev->dev.parent;
3863 	struct dpni_queue queue;
3864 	struct dpni_queue_id qid;
3865 	int err;
3866 
3867 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3868 			     DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
3869 	if (err) {
3870 		dev_err(dev, "dpni_get_queue(RX) failed\n");
3871 		return err;
3872 	}
3873 
3874 	fq->fqid = qid.fqid;
3875 
3876 	queue.destination.id = fq->channel->dpcon_id;
3877 	queue.destination.type = DPNI_DEST_DPCON;
3878 	queue.destination.priority = 1;
3879 	queue.user_context = (u64)(uintptr_t)fq;
3880 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3881 			     DPNI_QUEUE_RX, fq->tc, fq->flowid,
3882 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3883 			     &queue);
3884 	if (err) {
3885 		dev_err(dev, "dpni_set_queue(RX) failed\n");
3886 		return err;
3887 	}
3888 
3889 	/* xdp_rxq setup */
3890 	/* only once for each channel */
3891 	if (fq->tc > 0)
3892 		return 0;
3893 
3894 	err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
3895 			       fq->flowid, 0);
3896 	if (err) {
3897 		dev_err(dev, "xdp_rxq_info_reg failed\n");
3898 		return err;
3899 	}
3900 
3901 	err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3902 					 MEM_TYPE_PAGE_ORDER0, NULL);
3903 	if (err) {
3904 		dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3905 		return err;
3906 	}
3907 
3908 	return 0;
3909 }
3910 
3911 static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
3912 				   struct dpaa2_eth_fq *fq)
3913 {
3914 	struct device *dev = priv->net_dev->dev.parent;
3915 	struct dpni_queue queue;
3916 	struct dpni_queue_id qid;
3917 	int i, err;
3918 
3919 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3920 		err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3921 				     DPNI_QUEUE_TX, i, fq->flowid,
3922 				     &queue, &qid);
3923 		if (err) {
3924 			dev_err(dev, "dpni_get_queue(TX) failed\n");
3925 			return err;
3926 		}
3927 		fq->tx_fqid[i] = qid.fqid;
3928 	}
3929 
3930 	/* All Tx queues belonging to the same flowid have the same qdbin */
3931 	fq->tx_qdbin = qid.qdbin;
3932 
3933 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3934 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3935 			     &queue, &qid);
3936 	if (err) {
3937 		dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3938 		return err;
3939 	}
3940 
3941 	fq->fqid = qid.fqid;
3942 
3943 	queue.destination.id = fq->channel->dpcon_id;
3944 	queue.destination.type = DPNI_DEST_DPCON;
3945 	queue.destination.priority = 0;
3946 	queue.user_context = (u64)(uintptr_t)fq;
3947 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3948 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3949 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3950 			     &queue);
3951 	if (err) {
3952 		dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3953 		return err;
3954 	}
3955 
3956 	return 0;
3957 }
3958 
3959 static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3960 			     struct dpaa2_eth_fq *fq)
3961 {
3962 	struct device *dev = priv->net_dev->dev.parent;
3963 	struct dpni_queue q = { { 0 } };
3964 	struct dpni_queue_id qid;
3965 	u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3966 	int err;
3967 
3968 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3969 			     DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3970 	if (err) {
3971 		dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3972 		return err;
3973 	}
3974 
3975 	fq->fqid = qid.fqid;
3976 
3977 	q.destination.id = fq->channel->dpcon_id;
3978 	q.destination.type = DPNI_DEST_DPCON;
3979 	q.destination.priority = 1;
3980 	q.user_context = (u64)(uintptr_t)fq;
3981 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3982 			     DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
3983 	if (err) {
3984 		dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
3985 		return err;
3986 	}
3987 
3988 	return 0;
3989 }
3990 
3991 /* Supported header fields for Rx hash distribution key */
3992 static const struct dpaa2_eth_dist_fields dist_fields[] = {
3993 	{
3994 		/* L2 header */
3995 		.rxnfc_field = RXH_L2DA,
3996 		.cls_prot = NET_PROT_ETH,
3997 		.cls_field = NH_FLD_ETH_DA,
3998 		.id = DPAA2_ETH_DIST_ETHDST,
3999 		.size = 6,
4000 	}, {
4001 		.cls_prot = NET_PROT_ETH,
4002 		.cls_field = NH_FLD_ETH_SA,
4003 		.id = DPAA2_ETH_DIST_ETHSRC,
4004 		.size = 6,
4005 	}, {
4006 		/* This is the last ethertype field parsed:
4007 		 * depending on frame format, it can be the MAC ethertype
4008 		 * or the VLAN etype.
4009 		 */
4010 		.cls_prot = NET_PROT_ETH,
4011 		.cls_field = NH_FLD_ETH_TYPE,
4012 		.id = DPAA2_ETH_DIST_ETHTYPE,
4013 		.size = 2,
4014 	}, {
4015 		/* VLAN header */
4016 		.rxnfc_field = RXH_VLAN,
4017 		.cls_prot = NET_PROT_VLAN,
4018 		.cls_field = NH_FLD_VLAN_TCI,
4019 		.id = DPAA2_ETH_DIST_VLAN,
4020 		.size = 2,
4021 	}, {
4022 		/* IP header */
4023 		.rxnfc_field = RXH_IP_SRC,
4024 		.cls_prot = NET_PROT_IP,
4025 		.cls_field = NH_FLD_IP_SRC,
4026 		.id = DPAA2_ETH_DIST_IPSRC,
4027 		.size = 4,
4028 	}, {
4029 		.rxnfc_field = RXH_IP_DST,
4030 		.cls_prot = NET_PROT_IP,
4031 		.cls_field = NH_FLD_IP_DST,
4032 		.id = DPAA2_ETH_DIST_IPDST,
4033 		.size = 4,
4034 	}, {
4035 		.rxnfc_field = RXH_L3_PROTO,
4036 		.cls_prot = NET_PROT_IP,
4037 		.cls_field = NH_FLD_IP_PROTO,
4038 		.id = DPAA2_ETH_DIST_IPPROTO,
4039 		.size = 1,
4040 	}, {
4041 		/* Using UDP ports, this is functionally equivalent to raw
4042 		 * byte pairs from L4 header.
4043 		 */
4044 		.rxnfc_field = RXH_L4_B_0_1,
4045 		.cls_prot = NET_PROT_UDP,
4046 		.cls_field = NH_FLD_UDP_PORT_SRC,
4047 		.id = DPAA2_ETH_DIST_L4SRC,
4048 		.size = 2,
4049 	}, {
4050 		.rxnfc_field = RXH_L4_B_2_3,
4051 		.cls_prot = NET_PROT_UDP,
4052 		.cls_field = NH_FLD_UDP_PORT_DST,
4053 		.id = DPAA2_ETH_DIST_L4DST,
4054 		.size = 2,
4055 	},
4056 };
4057 
4058 /* Configure the Rx hash key using the legacy API */
4059 static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4060 {
4061 	struct device *dev = priv->net_dev->dev.parent;
4062 	struct dpni_rx_tc_dist_cfg dist_cfg;
4063 	int i, err = 0;
4064 
4065 	memset(&dist_cfg, 0, sizeof(dist_cfg));
4066 
4067 	dist_cfg.key_cfg_iova = key;
4068 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4069 	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
4070 
4071 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4072 		err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
4073 					  i, &dist_cfg);
4074 		if (err) {
4075 			dev_err(dev, "dpni_set_rx_tc_dist failed\n");
4076 			break;
4077 		}
4078 	}
4079 
4080 	return err;
4081 }
4082 
4083 /* Configure the Rx hash key using the new API */
4084 static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4085 {
4086 	struct device *dev = priv->net_dev->dev.parent;
4087 	struct dpni_rx_dist_cfg dist_cfg;
4088 	int i, err = 0;
4089 
4090 	memset(&dist_cfg, 0, sizeof(dist_cfg));
4091 
4092 	dist_cfg.key_cfg_iova = key;
4093 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4094 	dist_cfg.enable = 1;
4095 
4096 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4097 		dist_cfg.tc = i;
4098 		err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
4099 					    &dist_cfg);
4100 		if (err) {
4101 			dev_err(dev, "dpni_set_rx_hash_dist failed\n");
4102 			break;
4103 		}
4104 
4105 		/* If the flow steering / hashing key is shared between all
4106 		 * traffic classes, install it just once
4107 		 */
4108 		if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
4109 			break;
4110 	}
4111 
4112 	return err;
4113 }
4114 
4115 /* Configure the Rx flow classification key */
4116 static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4117 {
4118 	struct device *dev = priv->net_dev->dev.parent;
4119 	struct dpni_rx_dist_cfg dist_cfg;
4120 	int i, err = 0;
4121 
4122 	memset(&dist_cfg, 0, sizeof(dist_cfg));
4123 
4124 	dist_cfg.key_cfg_iova = key;
4125 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4126 	dist_cfg.enable = 1;
4127 
4128 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4129 		dist_cfg.tc = i;
4130 		err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
4131 					  &dist_cfg);
4132 		if (err) {
4133 			dev_err(dev, "dpni_set_rx_fs_dist failed\n");
4134 			break;
4135 		}
4136 
4137 		/* If the flow steering / hashing key is shared between all
4138 		 * traffic classes, install it just once
4139 		 */
4140 		if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
4141 			break;
4142 	}
4143 
4144 	return err;
4145 }
4146 
4147 /* Size of the Rx flow classification key */
4148 int dpaa2_eth_cls_key_size(u64 fields)
4149 {
4150 	int i, size = 0;
4151 
4152 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4153 		if (!(fields & dist_fields[i].id))
4154 			continue;
4155 		size += dist_fields[i].size;
4156 	}
4157 
4158 	return size;
4159 }
4160 
4161 /* Offset of header field in Rx classification key */
4162 int dpaa2_eth_cls_fld_off(int prot, int field)
4163 {
4164 	int i, off = 0;
4165 
4166 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4167 		if (dist_fields[i].cls_prot == prot &&
4168 		    dist_fields[i].cls_field == field)
4169 			return off;
4170 		off += dist_fields[i].size;
4171 	}
4172 
4173 	WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
4174 	return 0;
4175 }
4176 
4177 /* Prune unused fields from the classification rule.
4178  * Used when masking is not supported
4179  */
4180 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
4181 {
4182 	int off = 0, new_off = 0;
4183 	int i, size;
4184 
4185 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4186 		size = dist_fields[i].size;
4187 		if (dist_fields[i].id & fields) {
4188 			memcpy(key_mem + new_off, key_mem + off, size);
4189 			new_off += size;
4190 		}
4191 		off += size;
4192 	}
4193 }
4194 
4195 /* Set Rx distribution (hash or flow classification) key
4196  * flags is a combination of RXH_ bits
4197  */
4198 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
4199 				  enum dpaa2_eth_rx_dist type, u64 flags)
4200 {
4201 	struct device *dev = net_dev->dev.parent;
4202 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4203 	struct dpkg_profile_cfg cls_cfg;
4204 	u32 rx_hash_fields = 0;
4205 	dma_addr_t key_iova;
4206 	u8 *dma_mem;
4207 	int i;
4208 	int err = 0;
4209 
4210 	memset(&cls_cfg, 0, sizeof(cls_cfg));
4211 
4212 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4213 		struct dpkg_extract *key =
4214 			&cls_cfg.extracts[cls_cfg.num_extracts];
4215 
4216 		/* For both Rx hashing and classification keys
4217 		 * we set only the selected fields.
4218 		 */
4219 		if (!(flags & dist_fields[i].id))
4220 			continue;
4221 		if (type == DPAA2_ETH_RX_DIST_HASH)
4222 			rx_hash_fields |= dist_fields[i].rxnfc_field;
4223 
4224 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
4225 			dev_err(dev, "error adding key extraction rule, too many rules?\n");
4226 			return -E2BIG;
4227 		}
4228 
4229 		key->type = DPKG_EXTRACT_FROM_HDR;
4230 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
4231 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
4232 		key->extract.from_hdr.field = dist_fields[i].cls_field;
4233 		cls_cfg.num_extracts++;
4234 	}
4235 
4236 	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4237 	if (!dma_mem)
4238 		return -ENOMEM;
4239 
4240 	err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
4241 	if (err) {
4242 		dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
4243 		goto free_key;
4244 	}
4245 
4246 	/* Prepare for setting the rx dist */
4247 	key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
4248 				  DMA_TO_DEVICE);
4249 	if (dma_mapping_error(dev, key_iova)) {
4250 		dev_err(dev, "DMA mapping failed\n");
4251 		err = -ENOMEM;
4252 		goto free_key;
4253 	}
4254 
4255 	if (type == DPAA2_ETH_RX_DIST_HASH) {
4256 		if (dpaa2_eth_has_legacy_dist(priv))
4257 			err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
4258 		else
4259 			err = dpaa2_eth_config_hash_key(priv, key_iova);
4260 	} else {
4261 		err = dpaa2_eth_config_cls_key(priv, key_iova);
4262 	}
4263 
4264 	dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
4265 			 DMA_TO_DEVICE);
4266 	if (!err && type == DPAA2_ETH_RX_DIST_HASH)
4267 		priv->rx_hash_fields = rx_hash_fields;
4268 
4269 free_key:
4270 	kfree(dma_mem);
4271 	return err;
4272 }
4273 
4274 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
4275 {
4276 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4277 	u64 key = 0;
4278 	int i;
4279 
4280 	if (!dpaa2_eth_hash_enabled(priv))
4281 		return -EOPNOTSUPP;
4282 
4283 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
4284 		if (dist_fields[i].rxnfc_field & flags)
4285 			key |= dist_fields[i].id;
4286 
4287 	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
4288 }
4289 
4290 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
4291 {
4292 	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
4293 }
4294 
4295 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
4296 {
4297 	struct device *dev = priv->net_dev->dev.parent;
4298 	int err;
4299 
4300 	/* Check if we actually support Rx flow classification */
4301 	if (dpaa2_eth_has_legacy_dist(priv)) {
4302 		dev_dbg(dev, "Rx cls not supported by current MC version\n");
4303 		return -EOPNOTSUPP;
4304 	}
4305 
4306 	if (!dpaa2_eth_fs_enabled(priv)) {
4307 		dev_dbg(dev, "Rx cls disabled in DPNI options\n");
4308 		return -EOPNOTSUPP;
4309 	}
4310 
4311 	if (!dpaa2_eth_hash_enabled(priv)) {
4312 		dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
4313 		return -EOPNOTSUPP;
4314 	}
4315 
4316 	/* If there is no support for masking in the classification table,
4317 	 * we don't set a default key, as it will depend on the rules
4318 	 * added by the user at runtime.
4319 	 */
4320 	if (!dpaa2_eth_fs_mask_enabled(priv))
4321 		goto out;
4322 
4323 	err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
4324 	if (err)
4325 		return err;
4326 
4327 out:
4328 	priv->rx_cls_enabled = 1;
4329 
4330 	return 0;
4331 }
4332 
4333 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
4334  * frame queues and channels
4335  */
4336 static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
4337 {
4338 	struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
4339 	struct net_device *net_dev = priv->net_dev;
4340 	struct dpni_pools_cfg pools_params = { 0 };
4341 	struct device *dev = net_dev->dev.parent;
4342 	struct dpni_error_cfg err_cfg;
4343 	int err = 0;
4344 	int i;
4345 
4346 	pools_params.num_dpbp = 1;
4347 	pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
4348 	pools_params.pools[0].backup_pool = 0;
4349 	pools_params.pools[0].buffer_size = priv->rx_buf_size;
4350 	err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
4351 	if (err) {
4352 		dev_err(dev, "dpni_set_pools() failed\n");
4353 		return err;
4354 	}
4355 
4356 	/* have the interface implicitly distribute traffic based on
4357 	 * the default hash key
4358 	 */
4359 	err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
4360 	if (err && err != -EOPNOTSUPP)
4361 		dev_err(dev, "Failed to configure hashing\n");
4362 
4363 	/* Configure the flow classification key; it includes all
4364 	 * supported header fields and cannot be modified at runtime
4365 	 */
4366 	err = dpaa2_eth_set_default_cls(priv);
4367 	if (err && err != -EOPNOTSUPP)
4368 		dev_err(dev, "Failed to configure Rx classification key\n");
4369 
4370 	/* Configure handling of error frames */
4371 	err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
4372 	err_cfg.set_frame_annotation = 1;
4373 	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
4374 	err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
4375 				       &err_cfg);
4376 	if (err) {
4377 		dev_err(dev, "dpni_set_errors_behavior failed\n");
4378 		return err;
4379 	}
4380 
4381 	/* Configure Rx and Tx conf queues to generate CDANs */
4382 	for (i = 0; i < priv->num_fqs; i++) {
4383 		switch (priv->fq[i].type) {
4384 		case DPAA2_RX_FQ:
4385 			err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
4386 			break;
4387 		case DPAA2_TX_CONF_FQ:
4388 			err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
4389 			break;
4390 		case DPAA2_RX_ERR_FQ:
4391 			err = setup_rx_err_flow(priv, &priv->fq[i]);
4392 			break;
4393 		default:
4394 			dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
4395 			return -EINVAL;
4396 		}
4397 		if (err)
4398 			return err;
4399 	}
4400 
4401 	err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
4402 			    DPNI_QUEUE_TX, &priv->tx_qdid);
4403 	if (err) {
4404 		dev_err(dev, "dpni_get_qdid() failed\n");
4405 		return err;
4406 	}
4407 
4408 	return 0;
4409 }
4410 
4411 /* Allocate rings for storing incoming frame descriptors */
4412 static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
4413 {
4414 	struct net_device *net_dev = priv->net_dev;
4415 	struct device *dev = net_dev->dev.parent;
4416 	int i;
4417 
4418 	for (i = 0; i < priv->num_channels; i++) {
4419 		priv->channel[i]->store =
4420 			dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
4421 		if (!priv->channel[i]->store) {
4422 			netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
4423 			goto err_ring;
4424 		}
4425 	}
4426 
4427 	return 0;
4428 
4429 err_ring:
4430 	for (i = 0; i < priv->num_channels; i++) {
4431 		if (!priv->channel[i]->store)
4432 			break;
4433 		dpaa2_io_store_destroy(priv->channel[i]->store);
4434 	}
4435 
4436 	return -ENOMEM;
4437 }
4438 
4439 static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
4440 {
4441 	int i;
4442 
4443 	for (i = 0; i < priv->num_channels; i++)
4444 		dpaa2_io_store_destroy(priv->channel[i]->store);
4445 }
4446 
4447 static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
4448 {
4449 	struct net_device *net_dev = priv->net_dev;
4450 	struct device *dev = net_dev->dev.parent;
4451 	u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
4452 	int err;
4453 
4454 	/* Get firmware address, if any */
4455 	err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
4456 	if (err) {
4457 		dev_err(dev, "dpni_get_port_mac_addr() failed\n");
4458 		return err;
4459 	}
4460 
4461 	/* Get DPNI attributes address, if any */
4462 	err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4463 					dpni_mac_addr);
4464 	if (err) {
4465 		dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
4466 		return err;
4467 	}
4468 
4469 	/* First check if firmware has any address configured by bootloader */
4470 	if (!is_zero_ether_addr(mac_addr)) {
4471 		/* If the DPMAC addr != DPNI addr, update it */
4472 		if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
4473 			err = dpni_set_primary_mac_addr(priv->mc_io, 0,
4474 							priv->mc_token,
4475 							mac_addr);
4476 			if (err) {
4477 				dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4478 				return err;
4479 			}
4480 		}
4481 		eth_hw_addr_set(net_dev, mac_addr);
4482 	} else if (is_zero_ether_addr(dpni_mac_addr)) {
4483 		/* No MAC address configured, fill in net_dev->dev_addr
4484 		 * with a random one
4485 		 */
4486 		eth_hw_addr_random(net_dev);
4487 		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
4488 
4489 		err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4490 						net_dev->dev_addr);
4491 		if (err) {
4492 			dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4493 			return err;
4494 		}
4495 
4496 		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
4497 		 * practical purposes, this will be our "permanent" mac address,
4498 		 * at least until the next reboot. This move will also permit
4499 		 * register_netdevice() to properly fill up net_dev->perm_addr.
4500 		 */
4501 		net_dev->addr_assign_type = NET_ADDR_PERM;
4502 	} else {
4503 		/* NET_ADDR_PERM is default, all we have to do is
4504 		 * fill in the device addr.
4505 		 */
4506 		eth_hw_addr_set(net_dev, dpni_mac_addr);
4507 	}
4508 
4509 	return 0;
4510 }
4511 
4512 static int dpaa2_eth_netdev_init(struct net_device *net_dev)
4513 {
4514 	struct device *dev = net_dev->dev.parent;
4515 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4516 	u32 options = priv->dpni_attrs.options;
4517 	u64 supported = 0, not_supported = 0;
4518 	u8 bcast_addr[ETH_ALEN];
4519 	u8 num_queues;
4520 	int err;
4521 
4522 	net_dev->netdev_ops = &dpaa2_eth_ops;
4523 	net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4524 
4525 	err = dpaa2_eth_set_mac_addr(priv);
4526 	if (err)
4527 		return err;
4528 
4529 	/* Explicitly add the broadcast address to the MAC filtering table */
4530 	eth_broadcast_addr(bcast_addr);
4531 	err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
4532 	if (err) {
4533 		dev_err(dev, "dpni_add_mac_addr() failed\n");
4534 		return err;
4535 	}
4536 
4537 	/* Set MTU upper limit; lower limit is 68B (default value) */
4538 	net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
4539 	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
4540 					DPAA2_ETH_MFL);
4541 	if (err) {
4542 		dev_err(dev, "dpni_set_max_frame_length() failed\n");
4543 		return err;
4544 	}
4545 
4546 	/* Set actual number of queues in the net device */
4547 	num_queues = dpaa2_eth_queue_count(priv);
4548 	err = netif_set_real_num_tx_queues(net_dev, num_queues);
4549 	if (err) {
4550 		dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
4551 		return err;
4552 	}
4553 	err = netif_set_real_num_rx_queues(net_dev, num_queues);
4554 	if (err) {
4555 		dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
4556 		return err;
4557 	}
4558 
4559 	dpaa2_eth_detect_features(priv);
4560 
4561 	/* Capabilities listing */
4562 	supported |= IFF_LIVE_ADDR_CHANGE;
4563 
4564 	if (options & DPNI_OPT_NO_MAC_FILTER)
4565 		not_supported |= IFF_UNICAST_FLT;
4566 	else
4567 		supported |= IFF_UNICAST_FLT;
4568 
4569 	net_dev->priv_flags |= supported;
4570 	net_dev->priv_flags &= ~not_supported;
4571 
4572 	/* Features */
4573 	net_dev->features = NETIF_F_RXCSUM |
4574 			    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4575 			    NETIF_F_SG | NETIF_F_HIGHDMA |
4576 			    NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
4577 	net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
4578 	net_dev->hw_features = net_dev->features;
4579 
4580 	if (priv->dpni_attrs.vlan_filter_entries)
4581 		net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4582 
4583 	return 0;
4584 }
4585 
4586 static int dpaa2_eth_poll_link_state(void *arg)
4587 {
4588 	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
4589 	int err;
4590 
4591 	while (!kthread_should_stop()) {
4592 		err = dpaa2_eth_link_state_update(priv);
4593 		if (unlikely(err))
4594 			return err;
4595 
4596 		msleep(DPAA2_ETH_LINK_STATE_REFRESH);
4597 	}
4598 
4599 	return 0;
4600 }
4601 
4602 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
4603 {
4604 	struct fsl_mc_device *dpni_dev, *dpmac_dev;
4605 	struct dpaa2_mac *mac;
4606 	int err;
4607 
4608 	dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4609 	dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
4610 
4611 	if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) {
4612 		netdev_dbg(priv->net_dev, "waiting for mac\n");
4613 		return PTR_ERR(dpmac_dev);
4614 	}
4615 
4616 	if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
4617 		return 0;
4618 
4619 	mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
4620 	if (!mac)
4621 		return -ENOMEM;
4622 
4623 	mac->mc_dev = dpmac_dev;
4624 	mac->mc_io = priv->mc_io;
4625 	mac->net_dev = priv->net_dev;
4626 
4627 	err = dpaa2_mac_open(mac);
4628 	if (err)
4629 		goto err_free_mac;
4630 	priv->mac = mac;
4631 
4632 	if (dpaa2_eth_is_type_phy(priv)) {
4633 		err = dpaa2_mac_connect(mac);
4634 		if (err) {
4635 			if (err == -EPROBE_DEFER)
4636 				netdev_dbg(priv->net_dev,
4637 					   "could not connect to MAC\n");
4638 			else
4639 				netdev_err(priv->net_dev,
4640 					   "Error connecting to the MAC endpoint: %pe",
4641 					   ERR_PTR(err));
4642 			goto err_close_mac;
4643 		}
4644 	}
4645 
4646 	return 0;
4647 
4648 err_close_mac:
4649 	dpaa2_mac_close(mac);
4650 	priv->mac = NULL;
4651 err_free_mac:
4652 	kfree(mac);
4653 	return err;
4654 }
4655 
4656 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
4657 {
4658 	if (dpaa2_eth_is_type_phy(priv))
4659 		dpaa2_mac_disconnect(priv->mac);
4660 
4661 	if (!dpaa2_eth_has_mac(priv))
4662 		return;
4663 
4664 	dpaa2_mac_close(priv->mac);
4665 	kfree(priv->mac);
4666 	priv->mac = NULL;
4667 }
4668 
4669 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4670 {
4671 	u32 status = ~0;
4672 	struct device *dev = (struct device *)arg;
4673 	struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4674 	struct net_device *net_dev = dev_get_drvdata(dev);
4675 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4676 	int err;
4677 
4678 	err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4679 				  DPNI_IRQ_INDEX, &status);
4680 	if (unlikely(err)) {
4681 		netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
4682 		return IRQ_HANDLED;
4683 	}
4684 
4685 	if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
4686 		dpaa2_eth_link_state_update(netdev_priv(net_dev));
4687 
4688 	if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
4689 		dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
4690 		dpaa2_eth_update_tx_fqids(priv);
4691 
4692 		rtnl_lock();
4693 		if (dpaa2_eth_has_mac(priv))
4694 			dpaa2_eth_disconnect_mac(priv);
4695 		else
4696 			dpaa2_eth_connect_mac(priv);
4697 		rtnl_unlock();
4698 	}
4699 
4700 	return IRQ_HANDLED;
4701 }
4702 
4703 static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
4704 {
4705 	int err = 0;
4706 	struct fsl_mc_device_irq *irq;
4707 
4708 	err = fsl_mc_allocate_irqs(ls_dev);
4709 	if (err) {
4710 		dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4711 		return err;
4712 	}
4713 
4714 	irq = ls_dev->irqs[0];
4715 	err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
4716 					NULL, dpni_irq0_handler_thread,
4717 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
4718 					dev_name(&ls_dev->dev), &ls_dev->dev);
4719 	if (err < 0) {
4720 		dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
4721 		goto free_mc_irq;
4722 	}
4723 
4724 	err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
4725 				DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
4726 				DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
4727 	if (err < 0) {
4728 		dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
4729 		goto free_irq;
4730 	}
4731 
4732 	err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4733 				  DPNI_IRQ_INDEX, 1);
4734 	if (err < 0) {
4735 		dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
4736 		goto free_irq;
4737 	}
4738 
4739 	return 0;
4740 
4741 free_irq:
4742 	devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
4743 free_mc_irq:
4744 	fsl_mc_free_irqs(ls_dev);
4745 
4746 	return err;
4747 }
4748 
4749 static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
4750 {
4751 	int i;
4752 	struct dpaa2_eth_channel *ch;
4753 
4754 	for (i = 0; i < priv->num_channels; i++) {
4755 		ch = priv->channel[i];
4756 		/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4757 		netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
4758 	}
4759 }
4760 
4761 static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
4762 {
4763 	int i;
4764 	struct dpaa2_eth_channel *ch;
4765 
4766 	for (i = 0; i < priv->num_channels; i++) {
4767 		ch = priv->channel[i];
4768 		netif_napi_del(&ch->napi);
4769 	}
4770 }
4771 
4772 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4773 {
4774 	struct device *dev;
4775 	struct net_device *net_dev = NULL;
4776 	struct dpaa2_eth_priv *priv = NULL;
4777 	int err = 0;
4778 
4779 	dev = &dpni_dev->dev;
4780 
4781 	/* Net device */
4782 	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
4783 	if (!net_dev) {
4784 		dev_err(dev, "alloc_etherdev_mq() failed\n");
4785 		return -ENOMEM;
4786 	}
4787 
4788 	SET_NETDEV_DEV(net_dev, dev);
4789 	dev_set_drvdata(dev, net_dev);
4790 
4791 	priv = netdev_priv(net_dev);
4792 	priv->net_dev = net_dev;
4793 	SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port);
4794 
4795 	priv->iommu_domain = iommu_get_domain_for_dev(dev);
4796 
4797 	priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
4798 	priv->rx_tstamp = false;
4799 
4800 	priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4801 	if (!priv->dpaa2_ptp_wq) {
4802 		err = -ENOMEM;
4803 		goto err_wq_alloc;
4804 	}
4805 
4806 	INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
4807 	mutex_init(&priv->onestep_tstamp_lock);
4808 	skb_queue_head_init(&priv->tx_skbs);
4809 
4810 	priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
4811 
4812 	/* Obtain a MC portal */
4813 	err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4814 				     &priv->mc_io);
4815 	if (err) {
4816 		if (err == -ENXIO) {
4817 			dev_dbg(dev, "waiting for MC portal\n");
4818 			err = -EPROBE_DEFER;
4819 		} else {
4820 			dev_err(dev, "MC portal allocation failed\n");
4821 		}
4822 		goto err_portal_alloc;
4823 	}
4824 
4825 	/* MC objects initialization and configuration */
4826 	err = dpaa2_eth_setup_dpni(dpni_dev);
4827 	if (err)
4828 		goto err_dpni_setup;
4829 
4830 	err = dpaa2_eth_setup_dpio(priv);
4831 	if (err)
4832 		goto err_dpio_setup;
4833 
4834 	dpaa2_eth_setup_fqs(priv);
4835 
4836 	err = dpaa2_eth_setup_default_dpbp(priv);
4837 	if (err)
4838 		goto err_dpbp_setup;
4839 
4840 	err = dpaa2_eth_bind_dpni(priv);
4841 	if (err)
4842 		goto err_bind;
4843 
4844 	/* Add a NAPI context for each channel */
4845 	dpaa2_eth_add_ch_napi(priv);
4846 
4847 	/* Percpu statistics */
4848 	priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4849 	if (!priv->percpu_stats) {
4850 		dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4851 		err = -ENOMEM;
4852 		goto err_alloc_percpu_stats;
4853 	}
4854 	priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4855 	if (!priv->percpu_extras) {
4856 		dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4857 		err = -ENOMEM;
4858 		goto err_alloc_percpu_extras;
4859 	}
4860 
4861 	priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4862 	if (!priv->sgt_cache) {
4863 		dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4864 		err = -ENOMEM;
4865 		goto err_alloc_sgt_cache;
4866 	}
4867 
4868 	priv->fd = alloc_percpu(*priv->fd);
4869 	if (!priv->fd) {
4870 		dev_err(dev, "alloc_percpu(fds) failed\n");
4871 		err = -ENOMEM;
4872 		goto err_alloc_fds;
4873 	}
4874 
4875 	err = dpaa2_eth_netdev_init(net_dev);
4876 	if (err)
4877 		goto err_netdev_init;
4878 
4879 	/* Configure checksum offload based on current interface flags */
4880 	err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4881 	if (err)
4882 		goto err_csum;
4883 
4884 	err = dpaa2_eth_set_tx_csum(priv,
4885 				    !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4886 	if (err)
4887 		goto err_csum;
4888 
4889 	err = dpaa2_eth_alloc_rings(priv);
4890 	if (err)
4891 		goto err_alloc_rings;
4892 
4893 #ifdef CONFIG_FSL_DPAA2_ETH_DCB
4894 	if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4895 		priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4896 		net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4897 	} else {
4898 		dev_dbg(dev, "PFC not supported\n");
4899 	}
4900 #endif
4901 
4902 	err = dpaa2_eth_setup_irqs(dpni_dev);
4903 	if (err) {
4904 		netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4905 		priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
4906 						"%s_poll_link", net_dev->name);
4907 		if (IS_ERR(priv->poll_thread)) {
4908 			dev_err(dev, "Error starting polling thread\n");
4909 			goto err_poll_thread;
4910 		}
4911 		priv->do_link_poll = true;
4912 	}
4913 
4914 	err = dpaa2_eth_connect_mac(priv);
4915 	if (err)
4916 		goto err_connect_mac;
4917 
4918 	err = dpaa2_eth_dl_alloc(priv);
4919 	if (err)
4920 		goto err_dl_register;
4921 
4922 	err = dpaa2_eth_dl_traps_register(priv);
4923 	if (err)
4924 		goto err_dl_trap_register;
4925 
4926 	err = dpaa2_eth_dl_port_add(priv);
4927 	if (err)
4928 		goto err_dl_port_add;
4929 
4930 	err = register_netdev(net_dev);
4931 	if (err < 0) {
4932 		dev_err(dev, "register_netdev() failed\n");
4933 		goto err_netdev_reg;
4934 	}
4935 
4936 #ifdef CONFIG_DEBUG_FS
4937 	dpaa2_dbg_add(priv);
4938 #endif
4939 
4940 	dpaa2_eth_dl_register(priv);
4941 	dev_info(dev, "Probed interface %s\n", net_dev->name);
4942 	return 0;
4943 
4944 err_netdev_reg:
4945 	dpaa2_eth_dl_port_del(priv);
4946 err_dl_port_add:
4947 	dpaa2_eth_dl_traps_unregister(priv);
4948 err_dl_trap_register:
4949 	dpaa2_eth_dl_free(priv);
4950 err_dl_register:
4951 	dpaa2_eth_disconnect_mac(priv);
4952 err_connect_mac:
4953 	if (priv->do_link_poll)
4954 		kthread_stop(priv->poll_thread);
4955 	else
4956 		fsl_mc_free_irqs(dpni_dev);
4957 err_poll_thread:
4958 	dpaa2_eth_free_rings(priv);
4959 err_alloc_rings:
4960 err_csum:
4961 err_netdev_init:
4962 	free_percpu(priv->fd);
4963 err_alloc_fds:
4964 	free_percpu(priv->sgt_cache);
4965 err_alloc_sgt_cache:
4966 	free_percpu(priv->percpu_extras);
4967 err_alloc_percpu_extras:
4968 	free_percpu(priv->percpu_stats);
4969 err_alloc_percpu_stats:
4970 	dpaa2_eth_del_ch_napi(priv);
4971 err_bind:
4972 	dpaa2_eth_free_dpbps(priv);
4973 err_dpbp_setup:
4974 	dpaa2_eth_free_dpio(priv);
4975 err_dpio_setup:
4976 	dpaa2_eth_free_dpni(priv);
4977 err_dpni_setup:
4978 	fsl_mc_portal_free(priv->mc_io);
4979 err_portal_alloc:
4980 	destroy_workqueue(priv->dpaa2_ptp_wq);
4981 err_wq_alloc:
4982 	dev_set_drvdata(dev, NULL);
4983 	free_netdev(net_dev);
4984 
4985 	return err;
4986 }
4987 
4988 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
4989 {
4990 	struct device *dev;
4991 	struct net_device *net_dev;
4992 	struct dpaa2_eth_priv *priv;
4993 
4994 	dev = &ls_dev->dev;
4995 	net_dev = dev_get_drvdata(dev);
4996 	priv = netdev_priv(net_dev);
4997 
4998 	dpaa2_eth_dl_unregister(priv);
4999 
5000 #ifdef CONFIG_DEBUG_FS
5001 	dpaa2_dbg_remove(priv);
5002 #endif
5003 
5004 	unregister_netdev(net_dev);
5005 	rtnl_lock();
5006 	dpaa2_eth_disconnect_mac(priv);
5007 	rtnl_unlock();
5008 
5009 	dpaa2_eth_dl_port_del(priv);
5010 	dpaa2_eth_dl_traps_unregister(priv);
5011 	dpaa2_eth_dl_free(priv);
5012 
5013 	if (priv->do_link_poll)
5014 		kthread_stop(priv->poll_thread);
5015 	else
5016 		fsl_mc_free_irqs(ls_dev);
5017 
5018 	dpaa2_eth_free_rings(priv);
5019 	free_percpu(priv->fd);
5020 	free_percpu(priv->sgt_cache);
5021 	free_percpu(priv->percpu_stats);
5022 	free_percpu(priv->percpu_extras);
5023 
5024 	dpaa2_eth_del_ch_napi(priv);
5025 	dpaa2_eth_free_dpbps(priv);
5026 	dpaa2_eth_free_dpio(priv);
5027 	dpaa2_eth_free_dpni(priv);
5028 	if (priv->onestep_reg_base)
5029 		iounmap(priv->onestep_reg_base);
5030 
5031 	fsl_mc_portal_free(priv->mc_io);
5032 
5033 	destroy_workqueue(priv->dpaa2_ptp_wq);
5034 
5035 	dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
5036 
5037 	free_netdev(net_dev);
5038 
5039 	return 0;
5040 }
5041 
5042 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
5043 	{
5044 		.vendor = FSL_MC_VENDOR_FREESCALE,
5045 		.obj_type = "dpni",
5046 	},
5047 	{ .vendor = 0x0 }
5048 };
5049 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
5050 
5051 static struct fsl_mc_driver dpaa2_eth_driver = {
5052 	.driver = {
5053 		.name = KBUILD_MODNAME,
5054 		.owner = THIS_MODULE,
5055 	},
5056 	.probe = dpaa2_eth_probe,
5057 	.remove = dpaa2_eth_remove,
5058 	.match_id_table = dpaa2_eth_match_id_table
5059 };
5060 
5061 static int __init dpaa2_eth_driver_init(void)
5062 {
5063 	int err;
5064 
5065 	dpaa2_eth_dbg_init();
5066 	err = fsl_mc_driver_register(&dpaa2_eth_driver);
5067 	if (err) {
5068 		dpaa2_eth_dbg_exit();
5069 		return err;
5070 	}
5071 
5072 	return 0;
5073 }
5074 
5075 static void __exit dpaa2_eth_driver_exit(void)
5076 {
5077 	dpaa2_eth_dbg_exit();
5078 	fsl_mc_driver_unregister(&dpaa2_eth_driver);
5079 }
5080 
5081 module_init(dpaa2_eth_driver_init);
5082 module_exit(dpaa2_eth_driver_exit);
5083