1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3  * Copyright 2016-2017 NXP
4  */
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/fsl/mc.h>
16 #include <linux/bpf.h>
17 #include <linux/bpf_trace.h>
18 #include <net/sock.h>
19 
20 #include "dpaa2-eth.h"
21 
22 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
23  * using trace events only need to #include <trace/events/sched.h>
24  */
25 #define CREATE_TRACE_POINTS
26 #include "dpaa2-eth-trace.h"
27 
28 MODULE_LICENSE("Dual BSD/GPL");
29 MODULE_AUTHOR("Freescale Semiconductor, Inc");
30 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
31 
32 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
33 				dma_addr_t iova_addr)
34 {
35 	phys_addr_t phys_addr;
36 
37 	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
38 
39 	return phys_to_virt(phys_addr);
40 }
41 
42 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
43 			     u32 fd_status,
44 			     struct sk_buff *skb)
45 {
46 	skb_checksum_none_assert(skb);
47 
48 	/* HW checksum validation is disabled, nothing to do here */
49 	if (!(priv->net_dev->features & NETIF_F_RXCSUM))
50 		return;
51 
52 	/* Read checksum validation bits */
53 	if (!((fd_status & DPAA2_FAS_L3CV) &&
54 	      (fd_status & DPAA2_FAS_L4CV)))
55 		return;
56 
57 	/* Inform the stack there's no need to compute L3/L4 csum anymore */
58 	skb->ip_summed = CHECKSUM_UNNECESSARY;
59 }
60 
61 /* Free a received FD.
62  * Not to be used for Tx conf FDs or on any other paths.
63  */
64 static void free_rx_fd(struct dpaa2_eth_priv *priv,
65 		       const struct dpaa2_fd *fd,
66 		       void *vaddr)
67 {
68 	struct device *dev = priv->net_dev->dev.parent;
69 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
70 	u8 fd_format = dpaa2_fd_get_format(fd);
71 	struct dpaa2_sg_entry *sgt;
72 	void *sg_vaddr;
73 	int i;
74 
75 	/* If single buffer frame, just free the data buffer */
76 	if (fd_format == dpaa2_fd_single)
77 		goto free_buf;
78 	else if (fd_format != dpaa2_fd_sg)
79 		/* We don't support any other format */
80 		return;
81 
82 	/* For S/G frames, we first need to free all SG entries
83 	 * except the first one, which was taken care of already
84 	 */
85 	sgt = vaddr + dpaa2_fd_get_offset(fd);
86 	for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
87 		addr = dpaa2_sg_get_addr(&sgt[i]);
88 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
89 		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
90 			       DMA_BIDIRECTIONAL);
91 
92 		free_pages((unsigned long)sg_vaddr, 0);
93 		if (dpaa2_sg_is_final(&sgt[i]))
94 			break;
95 	}
96 
97 free_buf:
98 	free_pages((unsigned long)vaddr, 0);
99 }
100 
101 /* Build a linear skb based on a single-buffer frame descriptor */
102 static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
103 					const struct dpaa2_fd *fd,
104 					void *fd_vaddr)
105 {
106 	struct sk_buff *skb = NULL;
107 	u16 fd_offset = dpaa2_fd_get_offset(fd);
108 	u32 fd_length = dpaa2_fd_get_len(fd);
109 
110 	ch->buf_count--;
111 
112 	skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
113 	if (unlikely(!skb))
114 		return NULL;
115 
116 	skb_reserve(skb, fd_offset);
117 	skb_put(skb, fd_length);
118 
119 	return skb;
120 }
121 
122 /* Build a non linear (fragmented) skb based on a S/G table */
123 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
124 				      struct dpaa2_eth_channel *ch,
125 				      struct dpaa2_sg_entry *sgt)
126 {
127 	struct sk_buff *skb = NULL;
128 	struct device *dev = priv->net_dev->dev.parent;
129 	void *sg_vaddr;
130 	dma_addr_t sg_addr;
131 	u16 sg_offset;
132 	u32 sg_length;
133 	struct page *page, *head_page;
134 	int page_offset;
135 	int i;
136 
137 	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
138 		struct dpaa2_sg_entry *sge = &sgt[i];
139 
140 		/* NOTE: We only support SG entries in dpaa2_sg_single format,
141 		 * but this is the only format we may receive from HW anyway
142 		 */
143 
144 		/* Get the address and length from the S/G entry */
145 		sg_addr = dpaa2_sg_get_addr(sge);
146 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
147 		dma_unmap_page(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
148 			       DMA_BIDIRECTIONAL);
149 
150 		sg_length = dpaa2_sg_get_len(sge);
151 
152 		if (i == 0) {
153 			/* We build the skb around the first data buffer */
154 			skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
155 			if (unlikely(!skb)) {
156 				/* Free the first SG entry now, since we already
157 				 * unmapped it and obtained the virtual address
158 				 */
159 				free_pages((unsigned long)sg_vaddr, 0);
160 
161 				/* We still need to subtract the buffers used
162 				 * by this FD from our software counter
163 				 */
164 				while (!dpaa2_sg_is_final(&sgt[i]) &&
165 				       i < DPAA2_ETH_MAX_SG_ENTRIES)
166 					i++;
167 				break;
168 			}
169 
170 			sg_offset = dpaa2_sg_get_offset(sge);
171 			skb_reserve(skb, sg_offset);
172 			skb_put(skb, sg_length);
173 		} else {
174 			/* Rest of the data buffers are stored as skb frags */
175 			page = virt_to_page(sg_vaddr);
176 			head_page = virt_to_head_page(sg_vaddr);
177 
178 			/* Offset in page (which may be compound).
179 			 * Data in subsequent SG entries is stored from the
180 			 * beginning of the buffer, so we don't need to add the
181 			 * sg_offset.
182 			 */
183 			page_offset = ((unsigned long)sg_vaddr &
184 				(PAGE_SIZE - 1)) +
185 				(page_address(page) - page_address(head_page));
186 
187 			skb_add_rx_frag(skb, i - 1, head_page, page_offset,
188 					sg_length, DPAA2_ETH_RX_BUF_SIZE);
189 		}
190 
191 		if (dpaa2_sg_is_final(sge))
192 			break;
193 	}
194 
195 	WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
196 
197 	/* Count all data buffers + SG table buffer */
198 	ch->buf_count -= i + 2;
199 
200 	return skb;
201 }
202 
203 /* Free buffers acquired from the buffer pool or which were meant to
204  * be released in the pool
205  */
206 static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
207 {
208 	struct device *dev = priv->net_dev->dev.parent;
209 	void *vaddr;
210 	int i;
211 
212 	for (i = 0; i < count; i++) {
213 		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
214 		dma_unmap_page(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
215 			       DMA_BIDIRECTIONAL);
216 		free_pages((unsigned long)vaddr, 0);
217 	}
218 }
219 
220 static void xdp_release_buf(struct dpaa2_eth_priv *priv,
221 			    struct dpaa2_eth_channel *ch,
222 			    dma_addr_t addr)
223 {
224 	int err;
225 
226 	ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
227 	if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
228 		return;
229 
230 	while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
231 					       ch->xdp.drop_bufs,
232 					       ch->xdp.drop_cnt)) == -EBUSY)
233 		cpu_relax();
234 
235 	if (err) {
236 		free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
237 		ch->buf_count -= ch->xdp.drop_cnt;
238 	}
239 
240 	ch->xdp.drop_cnt = 0;
241 }
242 
243 static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
244 		       void *buf_start, u16 queue_id)
245 {
246 	struct dpaa2_eth_fq *fq;
247 	struct dpaa2_faead *faead;
248 	u32 ctrl, frc;
249 	int i, err;
250 
251 	/* Mark the egress frame hardware annotation area as valid */
252 	frc = dpaa2_fd_get_frc(fd);
253 	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
254 	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
255 
256 	/* Instruct hardware to release the FD buffer directly into
257 	 * the buffer pool once transmission is completed, instead of
258 	 * sending a Tx confirmation frame to us
259 	 */
260 	ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
261 	faead = dpaa2_get_faead(buf_start, false);
262 	faead->ctrl = cpu_to_le32(ctrl);
263 	faead->conf_fqid = 0;
264 
265 	fq = &priv->fq[queue_id];
266 	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
267 		err = priv->enqueue(priv, fq, fd, 0);
268 		if (err != -EBUSY)
269 			break;
270 	}
271 
272 	return err;
273 }
274 
275 static u32 run_xdp(struct dpaa2_eth_priv *priv,
276 		   struct dpaa2_eth_channel *ch,
277 		   struct dpaa2_eth_fq *rx_fq,
278 		   struct dpaa2_fd *fd, void *vaddr)
279 {
280 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
281 	struct rtnl_link_stats64 *percpu_stats;
282 	struct bpf_prog *xdp_prog;
283 	struct xdp_buff xdp;
284 	u32 xdp_act = XDP_PASS;
285 	int err;
286 
287 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
288 
289 	rcu_read_lock();
290 
291 	xdp_prog = READ_ONCE(ch->xdp.prog);
292 	if (!xdp_prog)
293 		goto out;
294 
295 	xdp.data = vaddr + dpaa2_fd_get_offset(fd);
296 	xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
297 	xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
298 	xdp_set_data_meta_invalid(&xdp);
299 	xdp.rxq = &ch->xdp_rxq;
300 
301 	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
302 
303 	/* xdp.data pointer may have changed */
304 	dpaa2_fd_set_offset(fd, xdp.data - vaddr);
305 	dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
306 
307 	switch (xdp_act) {
308 	case XDP_PASS:
309 		break;
310 	case XDP_TX:
311 		err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
312 		if (err) {
313 			xdp_release_buf(priv, ch, addr);
314 			percpu_stats->tx_errors++;
315 			ch->stats.xdp_tx_err++;
316 		} else {
317 			percpu_stats->tx_packets++;
318 			percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
319 			ch->stats.xdp_tx++;
320 		}
321 		break;
322 	default:
323 		bpf_warn_invalid_xdp_action(xdp_act);
324 		/* fall through */
325 	case XDP_ABORTED:
326 		trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
327 		/* fall through */
328 	case XDP_DROP:
329 		xdp_release_buf(priv, ch, addr);
330 		ch->stats.xdp_drop++;
331 		break;
332 	case XDP_REDIRECT:
333 		dma_unmap_page(priv->net_dev->dev.parent, addr,
334 			       DPAA2_ETH_RX_BUF_SIZE, DMA_BIDIRECTIONAL);
335 		ch->buf_count--;
336 		xdp.data_hard_start = vaddr;
337 		err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
338 		if (unlikely(err))
339 			ch->stats.xdp_drop++;
340 		else
341 			ch->stats.xdp_redirect++;
342 		break;
343 	}
344 
345 	ch->xdp.res |= xdp_act;
346 out:
347 	rcu_read_unlock();
348 	return xdp_act;
349 }
350 
351 /* Main Rx frame processing routine */
352 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
353 			 struct dpaa2_eth_channel *ch,
354 			 const struct dpaa2_fd *fd,
355 			 struct dpaa2_eth_fq *fq)
356 {
357 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
358 	u8 fd_format = dpaa2_fd_get_format(fd);
359 	void *vaddr;
360 	struct sk_buff *skb;
361 	struct rtnl_link_stats64 *percpu_stats;
362 	struct dpaa2_eth_drv_stats *percpu_extras;
363 	struct device *dev = priv->net_dev->dev.parent;
364 	struct dpaa2_fas *fas;
365 	void *buf_data;
366 	u32 status = 0;
367 	u32 xdp_act;
368 
369 	/* Tracing point */
370 	trace_dpaa2_rx_fd(priv->net_dev, fd);
371 
372 	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
373 	dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
374 				DMA_BIDIRECTIONAL);
375 
376 	fas = dpaa2_get_fas(vaddr, false);
377 	prefetch(fas);
378 	buf_data = vaddr + dpaa2_fd_get_offset(fd);
379 	prefetch(buf_data);
380 
381 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
382 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
383 
384 	if (fd_format == dpaa2_fd_single) {
385 		xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
386 		if (xdp_act != XDP_PASS) {
387 			percpu_stats->rx_packets++;
388 			percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
389 			return;
390 		}
391 
392 		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
393 			       DMA_BIDIRECTIONAL);
394 		skb = build_linear_skb(ch, fd, vaddr);
395 	} else if (fd_format == dpaa2_fd_sg) {
396 		WARN_ON(priv->xdp_prog);
397 
398 		dma_unmap_page(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
399 			       DMA_BIDIRECTIONAL);
400 		skb = build_frag_skb(priv, ch, buf_data);
401 		free_pages((unsigned long)vaddr, 0);
402 		percpu_extras->rx_sg_frames++;
403 		percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
404 	} else {
405 		/* We don't support any other format */
406 		goto err_frame_format;
407 	}
408 
409 	if (unlikely(!skb))
410 		goto err_build_skb;
411 
412 	prefetch(skb->data);
413 
414 	/* Get the timestamp value */
415 	if (priv->rx_tstamp) {
416 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
417 		__le64 *ts = dpaa2_get_ts(vaddr, false);
418 		u64 ns;
419 
420 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
421 
422 		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
423 		shhwtstamps->hwtstamp = ns_to_ktime(ns);
424 	}
425 
426 	/* Check if we need to validate the L4 csum */
427 	if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
428 		status = le32_to_cpu(fas->status);
429 		validate_rx_csum(priv, status, skb);
430 	}
431 
432 	skb->protocol = eth_type_trans(skb, priv->net_dev);
433 	skb_record_rx_queue(skb, fq->flowid);
434 
435 	percpu_stats->rx_packets++;
436 	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
437 
438 	list_add_tail(&skb->list, ch->rx_list);
439 
440 	return;
441 
442 err_build_skb:
443 	free_rx_fd(priv, fd, vaddr);
444 err_frame_format:
445 	percpu_stats->rx_dropped++;
446 }
447 
448 /* Consume all frames pull-dequeued into the store. This is the simplest way to
449  * make sure we don't accidentally issue another volatile dequeue which would
450  * overwrite (leak) frames already in the store.
451  *
452  * Observance of NAPI budget is not our concern, leaving that to the caller.
453  */
454 static int consume_frames(struct dpaa2_eth_channel *ch,
455 			  struct dpaa2_eth_fq **src)
456 {
457 	struct dpaa2_eth_priv *priv = ch->priv;
458 	struct dpaa2_eth_fq *fq = NULL;
459 	struct dpaa2_dq *dq;
460 	const struct dpaa2_fd *fd;
461 	int cleaned = 0;
462 	int is_last;
463 
464 	do {
465 		dq = dpaa2_io_store_next(ch->store, &is_last);
466 		if (unlikely(!dq)) {
467 			/* If we're here, we *must* have placed a
468 			 * volatile dequeue comnmand, so keep reading through
469 			 * the store until we get some sort of valid response
470 			 * token (either a valid frame or an "empty dequeue")
471 			 */
472 			continue;
473 		}
474 
475 		fd = dpaa2_dq_fd(dq);
476 		fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
477 
478 		fq->consume(priv, ch, fd, fq);
479 		cleaned++;
480 	} while (!is_last);
481 
482 	if (!cleaned)
483 		return 0;
484 
485 	fq->stats.frames += cleaned;
486 
487 	/* A dequeue operation only pulls frames from a single queue
488 	 * into the store. Return the frame queue as an out param.
489 	 */
490 	if (src)
491 		*src = fq;
492 
493 	return cleaned;
494 }
495 
496 /* Configure the egress frame annotation for timestamp update */
497 static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
498 {
499 	struct dpaa2_faead *faead;
500 	u32 ctrl, frc;
501 
502 	/* Mark the egress frame annotation area as valid */
503 	frc = dpaa2_fd_get_frc(fd);
504 	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
505 
506 	/* Set hardware annotation size */
507 	ctrl = dpaa2_fd_get_ctrl(fd);
508 	dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
509 
510 	/* enable UPD (update prepanded data) bit in FAEAD field of
511 	 * hardware frame annotation area
512 	 */
513 	ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
514 	faead = dpaa2_get_faead(buf_start, true);
515 	faead->ctrl = cpu_to_le32(ctrl);
516 }
517 
518 /* Create a frame descriptor based on a fragmented skb */
519 static int build_sg_fd(struct dpaa2_eth_priv *priv,
520 		       struct sk_buff *skb,
521 		       struct dpaa2_fd *fd)
522 {
523 	struct device *dev = priv->net_dev->dev.parent;
524 	void *sgt_buf = NULL;
525 	dma_addr_t addr;
526 	int nr_frags = skb_shinfo(skb)->nr_frags;
527 	struct dpaa2_sg_entry *sgt;
528 	int i, err;
529 	int sgt_buf_size;
530 	struct scatterlist *scl, *crt_scl;
531 	int num_sg;
532 	int num_dma_bufs;
533 	struct dpaa2_eth_swa *swa;
534 
535 	/* Create and map scatterlist.
536 	 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
537 	 * to go beyond nr_frags+1.
538 	 * Note: We don't support chained scatterlists
539 	 */
540 	if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
541 		return -EINVAL;
542 
543 	scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
544 	if (unlikely(!scl))
545 		return -ENOMEM;
546 
547 	sg_init_table(scl, nr_frags + 1);
548 	num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
549 	num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
550 	if (unlikely(!num_dma_bufs)) {
551 		err = -ENOMEM;
552 		goto dma_map_sg_failed;
553 	}
554 
555 	/* Prepare the HW SGT structure */
556 	sgt_buf_size = priv->tx_data_offset +
557 		       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
558 	sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
559 	if (unlikely(!sgt_buf)) {
560 		err = -ENOMEM;
561 		goto sgt_buf_alloc_failed;
562 	}
563 	sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
564 	memset(sgt_buf, 0, sgt_buf_size);
565 
566 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
567 
568 	/* Fill in the HW SGT structure.
569 	 *
570 	 * sgt_buf is zeroed out, so the following fields are implicit
571 	 * in all sgt entries:
572 	 *   - offset is 0
573 	 *   - format is 'dpaa2_sg_single'
574 	 */
575 	for_each_sg(scl, crt_scl, num_dma_bufs, i) {
576 		dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
577 		dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
578 	}
579 	dpaa2_sg_set_final(&sgt[i - 1], true);
580 
581 	/* Store the skb backpointer in the SGT buffer.
582 	 * Fit the scatterlist and the number of buffers alongside the
583 	 * skb backpointer in the software annotation area. We'll need
584 	 * all of them on Tx Conf.
585 	 */
586 	swa = (struct dpaa2_eth_swa *)sgt_buf;
587 	swa->type = DPAA2_ETH_SWA_SG;
588 	swa->sg.skb = skb;
589 	swa->sg.scl = scl;
590 	swa->sg.num_sg = num_sg;
591 	swa->sg.sgt_size = sgt_buf_size;
592 
593 	/* Separately map the SGT buffer */
594 	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
595 	if (unlikely(dma_mapping_error(dev, addr))) {
596 		err = -ENOMEM;
597 		goto dma_map_single_failed;
598 	}
599 	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
600 	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
601 	dpaa2_fd_set_addr(fd, addr);
602 	dpaa2_fd_set_len(fd, skb->len);
603 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
604 
605 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
606 		enable_tx_tstamp(fd, sgt_buf);
607 
608 	return 0;
609 
610 dma_map_single_failed:
611 	skb_free_frag(sgt_buf);
612 sgt_buf_alloc_failed:
613 	dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
614 dma_map_sg_failed:
615 	kfree(scl);
616 	return err;
617 }
618 
619 /* Create a frame descriptor based on a linear skb */
620 static int build_single_fd(struct dpaa2_eth_priv *priv,
621 			   struct sk_buff *skb,
622 			   struct dpaa2_fd *fd)
623 {
624 	struct device *dev = priv->net_dev->dev.parent;
625 	u8 *buffer_start, *aligned_start;
626 	struct dpaa2_eth_swa *swa;
627 	dma_addr_t addr;
628 
629 	buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
630 
631 	/* If there's enough room to align the FD address, do it.
632 	 * It will help hardware optimize accesses.
633 	 */
634 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
635 				  DPAA2_ETH_TX_BUF_ALIGN);
636 	if (aligned_start >= skb->head)
637 		buffer_start = aligned_start;
638 
639 	/* Store a backpointer to the skb at the beginning of the buffer
640 	 * (in the private data area) such that we can release it
641 	 * on Tx confirm
642 	 */
643 	swa = (struct dpaa2_eth_swa *)buffer_start;
644 	swa->type = DPAA2_ETH_SWA_SINGLE;
645 	swa->single.skb = skb;
646 
647 	addr = dma_map_single(dev, buffer_start,
648 			      skb_tail_pointer(skb) - buffer_start,
649 			      DMA_BIDIRECTIONAL);
650 	if (unlikely(dma_mapping_error(dev, addr)))
651 		return -ENOMEM;
652 
653 	dpaa2_fd_set_addr(fd, addr);
654 	dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
655 	dpaa2_fd_set_len(fd, skb->len);
656 	dpaa2_fd_set_format(fd, dpaa2_fd_single);
657 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
658 
659 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
660 		enable_tx_tstamp(fd, buffer_start);
661 
662 	return 0;
663 }
664 
665 /* FD freeing routine on the Tx path
666  *
667  * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
668  * back-pointed to is also freed.
669  * This can be called either from dpaa2_eth_tx_conf() or on the error path of
670  * dpaa2_eth_tx().
671  */
672 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
673 		       struct dpaa2_eth_fq *fq,
674 		       const struct dpaa2_fd *fd, bool in_napi)
675 {
676 	struct device *dev = priv->net_dev->dev.parent;
677 	dma_addr_t fd_addr;
678 	struct sk_buff *skb = NULL;
679 	unsigned char *buffer_start;
680 	struct dpaa2_eth_swa *swa;
681 	u8 fd_format = dpaa2_fd_get_format(fd);
682 	u32 fd_len = dpaa2_fd_get_len(fd);
683 
684 	fd_addr = dpaa2_fd_get_addr(fd);
685 	buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
686 	swa = (struct dpaa2_eth_swa *)buffer_start;
687 
688 	if (fd_format == dpaa2_fd_single) {
689 		if (swa->type == DPAA2_ETH_SWA_SINGLE) {
690 			skb = swa->single.skb;
691 			/* Accessing the skb buffer is safe before dma unmap,
692 			 * because we didn't map the actual skb shell.
693 			 */
694 			dma_unmap_single(dev, fd_addr,
695 					 skb_tail_pointer(skb) - buffer_start,
696 					 DMA_BIDIRECTIONAL);
697 		} else {
698 			WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
699 			dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
700 					 DMA_BIDIRECTIONAL);
701 		}
702 	} else if (fd_format == dpaa2_fd_sg) {
703 		skb = swa->sg.skb;
704 
705 		/* Unmap the scatterlist */
706 		dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
707 			     DMA_BIDIRECTIONAL);
708 		kfree(swa->sg.scl);
709 
710 		/* Unmap the SGT buffer */
711 		dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
712 				 DMA_BIDIRECTIONAL);
713 	} else {
714 		netdev_dbg(priv->net_dev, "Invalid FD format\n");
715 		return;
716 	}
717 
718 	if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
719 		fq->dq_frames++;
720 		fq->dq_bytes += fd_len;
721 	}
722 
723 	if (swa->type == DPAA2_ETH_SWA_XDP) {
724 		xdp_return_frame(swa->xdp.xdpf);
725 		return;
726 	}
727 
728 	/* Get the timestamp value */
729 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
730 		struct skb_shared_hwtstamps shhwtstamps;
731 		__le64 *ts = dpaa2_get_ts(buffer_start, true);
732 		u64 ns;
733 
734 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
735 
736 		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
737 		shhwtstamps.hwtstamp = ns_to_ktime(ns);
738 		skb_tstamp_tx(skb, &shhwtstamps);
739 	}
740 
741 	/* Free SGT buffer allocated on tx */
742 	if (fd_format != dpaa2_fd_single)
743 		skb_free_frag(buffer_start);
744 
745 	/* Move on with skb release */
746 	napi_consume_skb(skb, in_napi);
747 }
748 
749 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
750 {
751 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
752 	struct dpaa2_fd fd;
753 	struct rtnl_link_stats64 *percpu_stats;
754 	struct dpaa2_eth_drv_stats *percpu_extras;
755 	struct dpaa2_eth_fq *fq;
756 	struct netdev_queue *nq;
757 	u16 queue_mapping;
758 	unsigned int needed_headroom;
759 	u32 fd_len;
760 	u8 prio = 0;
761 	int err, i;
762 
763 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
764 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
765 
766 	needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
767 	if (skb_headroom(skb) < needed_headroom) {
768 		struct sk_buff *ns;
769 
770 		ns = skb_realloc_headroom(skb, needed_headroom);
771 		if (unlikely(!ns)) {
772 			percpu_stats->tx_dropped++;
773 			goto err_alloc_headroom;
774 		}
775 		percpu_extras->tx_reallocs++;
776 
777 		if (skb->sk)
778 			skb_set_owner_w(ns, skb->sk);
779 
780 		dev_kfree_skb(skb);
781 		skb = ns;
782 	}
783 
784 	/* We'll be holding a back-reference to the skb until Tx Confirmation;
785 	 * we don't want that overwritten by a concurrent Tx with a cloned skb.
786 	 */
787 	skb = skb_unshare(skb, GFP_ATOMIC);
788 	if (unlikely(!skb)) {
789 		/* skb_unshare() has already freed the skb */
790 		percpu_stats->tx_dropped++;
791 		return NETDEV_TX_OK;
792 	}
793 
794 	/* Setup the FD fields */
795 	memset(&fd, 0, sizeof(fd));
796 
797 	if (skb_is_nonlinear(skb)) {
798 		err = build_sg_fd(priv, skb, &fd);
799 		percpu_extras->tx_sg_frames++;
800 		percpu_extras->tx_sg_bytes += skb->len;
801 	} else {
802 		err = build_single_fd(priv, skb, &fd);
803 	}
804 
805 	if (unlikely(err)) {
806 		percpu_stats->tx_dropped++;
807 		goto err_build_fd;
808 	}
809 
810 	/* Tracing point */
811 	trace_dpaa2_tx_fd(net_dev, &fd);
812 
813 	/* TxConf FQ selection relies on queue id from the stack.
814 	 * In case of a forwarded frame from another DPNI interface, we choose
815 	 * a queue affined to the same core that processed the Rx frame
816 	 */
817 	queue_mapping = skb_get_queue_mapping(skb);
818 
819 	if (net_dev->num_tc) {
820 		prio = netdev_txq_to_tc(net_dev, queue_mapping);
821 		/* Hardware interprets priority level 0 as being the highest,
822 		 * so we need to do a reverse mapping to the netdev tc index
823 		 */
824 		prio = net_dev->num_tc - prio - 1;
825 		/* We have only one FQ array entry for all Tx hardware queues
826 		 * with the same flow id (but different priority levels)
827 		 */
828 		queue_mapping %= dpaa2_eth_queue_count(priv);
829 	}
830 	fq = &priv->fq[queue_mapping];
831 
832 	fd_len = dpaa2_fd_get_len(&fd);
833 	nq = netdev_get_tx_queue(net_dev, queue_mapping);
834 	netdev_tx_sent_queue(nq, fd_len);
835 
836 	/* Everything that happens after this enqueues might race with
837 	 * the Tx confirmation callback for this frame
838 	 */
839 	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
840 		err = priv->enqueue(priv, fq, &fd, prio);
841 		if (err != -EBUSY)
842 			break;
843 	}
844 	percpu_extras->tx_portal_busy += i;
845 	if (unlikely(err < 0)) {
846 		percpu_stats->tx_errors++;
847 		/* Clean up everything, including freeing the skb */
848 		free_tx_fd(priv, fq, &fd, false);
849 		netdev_tx_completed_queue(nq, 1, fd_len);
850 	} else {
851 		percpu_stats->tx_packets++;
852 		percpu_stats->tx_bytes += fd_len;
853 	}
854 
855 	return NETDEV_TX_OK;
856 
857 err_build_fd:
858 err_alloc_headroom:
859 	dev_kfree_skb(skb);
860 
861 	return NETDEV_TX_OK;
862 }
863 
864 /* Tx confirmation frame processing routine */
865 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
866 			      struct dpaa2_eth_channel *ch __always_unused,
867 			      const struct dpaa2_fd *fd,
868 			      struct dpaa2_eth_fq *fq)
869 {
870 	struct rtnl_link_stats64 *percpu_stats;
871 	struct dpaa2_eth_drv_stats *percpu_extras;
872 	u32 fd_len = dpaa2_fd_get_len(fd);
873 	u32 fd_errors;
874 
875 	/* Tracing point */
876 	trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
877 
878 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
879 	percpu_extras->tx_conf_frames++;
880 	percpu_extras->tx_conf_bytes += fd_len;
881 
882 	/* Check frame errors in the FD field */
883 	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
884 	free_tx_fd(priv, fq, fd, true);
885 
886 	if (likely(!fd_errors))
887 		return;
888 
889 	if (net_ratelimit())
890 		netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
891 			   fd_errors);
892 
893 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
894 	/* Tx-conf logically pertains to the egress path. */
895 	percpu_stats->tx_errors++;
896 }
897 
898 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
899 {
900 	int err;
901 
902 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
903 			       DPNI_OFF_RX_L3_CSUM, enable);
904 	if (err) {
905 		netdev_err(priv->net_dev,
906 			   "dpni_set_offload(RX_L3_CSUM) failed\n");
907 		return err;
908 	}
909 
910 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
911 			       DPNI_OFF_RX_L4_CSUM, enable);
912 	if (err) {
913 		netdev_err(priv->net_dev,
914 			   "dpni_set_offload(RX_L4_CSUM) failed\n");
915 		return err;
916 	}
917 
918 	return 0;
919 }
920 
921 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
922 {
923 	int err;
924 
925 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
926 			       DPNI_OFF_TX_L3_CSUM, enable);
927 	if (err) {
928 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
929 		return err;
930 	}
931 
932 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
933 			       DPNI_OFF_TX_L4_CSUM, enable);
934 	if (err) {
935 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
936 		return err;
937 	}
938 
939 	return 0;
940 }
941 
942 /* Perform a single release command to add buffers
943  * to the specified buffer pool
944  */
945 static int add_bufs(struct dpaa2_eth_priv *priv,
946 		    struct dpaa2_eth_channel *ch, u16 bpid)
947 {
948 	struct device *dev = priv->net_dev->dev.parent;
949 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
950 	struct page *page;
951 	dma_addr_t addr;
952 	int i, err;
953 
954 	for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
955 		/* Allocate buffer visible to WRIOP + skb shared info +
956 		 * alignment padding
957 		 */
958 		/* allocate one page for each Rx buffer. WRIOP sees
959 		 * the entire page except for a tailroom reserved for
960 		 * skb shared info
961 		 */
962 		page = dev_alloc_pages(0);
963 		if (!page)
964 			goto err_alloc;
965 
966 		addr = dma_map_page(dev, page, 0, DPAA2_ETH_RX_BUF_SIZE,
967 				    DMA_BIDIRECTIONAL);
968 		if (unlikely(dma_mapping_error(dev, addr)))
969 			goto err_map;
970 
971 		buf_array[i] = addr;
972 
973 		/* tracing point */
974 		trace_dpaa2_eth_buf_seed(priv->net_dev,
975 					 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
976 					 addr, DPAA2_ETH_RX_BUF_SIZE,
977 					 bpid);
978 	}
979 
980 release_bufs:
981 	/* In case the portal is busy, retry until successful */
982 	while ((err = dpaa2_io_service_release(ch->dpio, bpid,
983 					       buf_array, i)) == -EBUSY)
984 		cpu_relax();
985 
986 	/* If release command failed, clean up and bail out;
987 	 * not much else we can do about it
988 	 */
989 	if (err) {
990 		free_bufs(priv, buf_array, i);
991 		return 0;
992 	}
993 
994 	return i;
995 
996 err_map:
997 	__free_pages(page, 0);
998 err_alloc:
999 	/* If we managed to allocate at least some buffers,
1000 	 * release them to hardware
1001 	 */
1002 	if (i)
1003 		goto release_bufs;
1004 
1005 	return 0;
1006 }
1007 
1008 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
1009 {
1010 	int i, j;
1011 	int new_count;
1012 
1013 	for (j = 0; j < priv->num_channels; j++) {
1014 		for (i = 0; i < DPAA2_ETH_NUM_BUFS;
1015 		     i += DPAA2_ETH_BUFS_PER_CMD) {
1016 			new_count = add_bufs(priv, priv->channel[j], bpid);
1017 			priv->channel[j]->buf_count += new_count;
1018 
1019 			if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
1020 				return -ENOMEM;
1021 			}
1022 		}
1023 	}
1024 
1025 	return 0;
1026 }
1027 
1028 /**
1029  * Drain the specified number of buffers from the DPNI's private buffer pool.
1030  * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1031  */
1032 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
1033 {
1034 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1035 	int ret;
1036 
1037 	do {
1038 		ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1039 					       buf_array, count);
1040 		if (ret < 0) {
1041 			netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1042 			return;
1043 		}
1044 		free_bufs(priv, buf_array, ret);
1045 	} while (ret);
1046 }
1047 
1048 static void drain_pool(struct dpaa2_eth_priv *priv)
1049 {
1050 	int i;
1051 
1052 	drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
1053 	drain_bufs(priv, 1);
1054 
1055 	for (i = 0; i < priv->num_channels; i++)
1056 		priv->channel[i]->buf_count = 0;
1057 }
1058 
1059 /* Function is called from softirq context only, so we don't need to guard
1060  * the access to percpu count
1061  */
1062 static int refill_pool(struct dpaa2_eth_priv *priv,
1063 		       struct dpaa2_eth_channel *ch,
1064 		       u16 bpid)
1065 {
1066 	int new_count;
1067 
1068 	if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1069 		return 0;
1070 
1071 	do {
1072 		new_count = add_bufs(priv, ch, bpid);
1073 		if (unlikely(!new_count)) {
1074 			/* Out of memory; abort for now, we'll try later on */
1075 			break;
1076 		}
1077 		ch->buf_count += new_count;
1078 	} while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1079 
1080 	if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1081 		return -ENOMEM;
1082 
1083 	return 0;
1084 }
1085 
1086 static int pull_channel(struct dpaa2_eth_channel *ch)
1087 {
1088 	int err;
1089 	int dequeues = -1;
1090 
1091 	/* Retry while portal is busy */
1092 	do {
1093 		err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1094 						    ch->store);
1095 		dequeues++;
1096 		cpu_relax();
1097 	} while (err == -EBUSY);
1098 
1099 	ch->stats.dequeue_portal_busy += dequeues;
1100 	if (unlikely(err))
1101 		ch->stats.pull_err++;
1102 
1103 	return err;
1104 }
1105 
1106 /* NAPI poll routine
1107  *
1108  * Frames are dequeued from the QMan channel associated with this NAPI context.
1109  * Rx, Tx confirmation and (if configured) Rx error frames all count
1110  * towards the NAPI budget.
1111  */
1112 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1113 {
1114 	struct dpaa2_eth_channel *ch;
1115 	struct dpaa2_eth_priv *priv;
1116 	int rx_cleaned = 0, txconf_cleaned = 0;
1117 	struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1118 	struct netdev_queue *nq;
1119 	int store_cleaned, work_done;
1120 	struct list_head rx_list;
1121 	int err;
1122 
1123 	ch = container_of(napi, struct dpaa2_eth_channel, napi);
1124 	ch->xdp.res = 0;
1125 	priv = ch->priv;
1126 
1127 	INIT_LIST_HEAD(&rx_list);
1128 	ch->rx_list = &rx_list;
1129 
1130 	do {
1131 		err = pull_channel(ch);
1132 		if (unlikely(err))
1133 			break;
1134 
1135 		/* Refill pool if appropriate */
1136 		refill_pool(priv, ch, priv->bpid);
1137 
1138 		store_cleaned = consume_frames(ch, &fq);
1139 		if (!store_cleaned)
1140 			break;
1141 		if (fq->type == DPAA2_RX_FQ) {
1142 			rx_cleaned += store_cleaned;
1143 		} else {
1144 			txconf_cleaned += store_cleaned;
1145 			/* We have a single Tx conf FQ on this channel */
1146 			txc_fq = fq;
1147 		}
1148 
1149 		/* If we either consumed the whole NAPI budget with Rx frames
1150 		 * or we reached the Tx confirmations threshold, we're done.
1151 		 */
1152 		if (rx_cleaned >= budget ||
1153 		    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1154 			work_done = budget;
1155 			goto out;
1156 		}
1157 	} while (store_cleaned);
1158 
1159 	/* We didn't consume the entire budget, so finish napi and
1160 	 * re-enable data availability notifications
1161 	 */
1162 	napi_complete_done(napi, rx_cleaned);
1163 	do {
1164 		err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
1165 		cpu_relax();
1166 	} while (err == -EBUSY);
1167 	WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
1168 		  ch->nctx.desired_cpu);
1169 
1170 	work_done = max(rx_cleaned, 1);
1171 
1172 out:
1173 	netif_receive_skb_list(ch->rx_list);
1174 
1175 	if (txc_fq && txc_fq->dq_frames) {
1176 		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
1177 		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
1178 					  txc_fq->dq_bytes);
1179 		txc_fq->dq_frames = 0;
1180 		txc_fq->dq_bytes = 0;
1181 	}
1182 
1183 	if (ch->xdp.res & XDP_REDIRECT)
1184 		xdp_do_flush_map();
1185 
1186 	return work_done;
1187 }
1188 
1189 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
1190 {
1191 	struct dpaa2_eth_channel *ch;
1192 	int i;
1193 
1194 	for (i = 0; i < priv->num_channels; i++) {
1195 		ch = priv->channel[i];
1196 		napi_enable(&ch->napi);
1197 	}
1198 }
1199 
1200 static void disable_ch_napi(struct dpaa2_eth_priv *priv)
1201 {
1202 	struct dpaa2_eth_channel *ch;
1203 	int i;
1204 
1205 	for (i = 0; i < priv->num_channels; i++) {
1206 		ch = priv->channel[i];
1207 		napi_disable(&ch->napi);
1208 	}
1209 }
1210 
1211 static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
1212 {
1213 	struct dpni_taildrop td = {0};
1214 	int i, err;
1215 
1216 	if (priv->rx_td_enabled == enable)
1217 		return;
1218 
1219 	td.enable = enable;
1220 	td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1221 
1222 	for (i = 0; i < priv->num_fqs; i++) {
1223 		if (priv->fq[i].type != DPAA2_RX_FQ)
1224 			continue;
1225 		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
1226 					DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
1227 					priv->fq[i].flowid, &td);
1228 		if (err) {
1229 			netdev_err(priv->net_dev,
1230 				   "dpni_set_taildrop() failed\n");
1231 			break;
1232 		}
1233 	}
1234 
1235 	priv->rx_td_enabled = enable;
1236 }
1237 
1238 static int link_state_update(struct dpaa2_eth_priv *priv)
1239 {
1240 	struct dpni_link_state state = {0};
1241 	bool tx_pause;
1242 	int err;
1243 
1244 	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
1245 	if (unlikely(err)) {
1246 		netdev_err(priv->net_dev,
1247 			   "dpni_get_link_state() failed\n");
1248 		return err;
1249 	}
1250 
1251 	/* If Tx pause frame settings have changed, we need to update
1252 	 * Rx FQ taildrop configuration as well. We configure taildrop
1253 	 * only when pause frame generation is disabled.
1254 	 */
1255 	tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
1256 		   !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
1257 	dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
1258 
1259 	/* Chech link state; speed / duplex changes are not treated yet */
1260 	if (priv->link_state.up == state.up)
1261 		goto out;
1262 
1263 	if (state.up) {
1264 		netif_carrier_on(priv->net_dev);
1265 		netif_tx_start_all_queues(priv->net_dev);
1266 	} else {
1267 		netif_tx_stop_all_queues(priv->net_dev);
1268 		netif_carrier_off(priv->net_dev);
1269 	}
1270 
1271 	netdev_info(priv->net_dev, "Link Event: state %s\n",
1272 		    state.up ? "up" : "down");
1273 
1274 out:
1275 	priv->link_state = state;
1276 
1277 	return 0;
1278 }
1279 
1280 static int dpaa2_eth_open(struct net_device *net_dev)
1281 {
1282 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1283 	int err;
1284 
1285 	err = seed_pool(priv, priv->bpid);
1286 	if (err) {
1287 		/* Not much to do; the buffer pool, though not filled up,
1288 		 * may still contain some buffers which would enable us
1289 		 * to limp on.
1290 		 */
1291 		netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1292 			   priv->dpbp_dev->obj_desc.id, priv->bpid);
1293 	}
1294 
1295 	/* We'll only start the txqs when the link is actually ready; make sure
1296 	 * we don't race against the link up notification, which may come
1297 	 * immediately after dpni_enable();
1298 	 */
1299 	netif_tx_stop_all_queues(net_dev);
1300 	enable_ch_napi(priv);
1301 	/* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
1302 	 * return true and cause 'ip link show' to report the LOWER_UP flag,
1303 	 * even though the link notification wasn't even received.
1304 	 */
1305 	netif_carrier_off(net_dev);
1306 
1307 	err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1308 	if (err < 0) {
1309 		netdev_err(net_dev, "dpni_enable() failed\n");
1310 		goto enable_err;
1311 	}
1312 
1313 	/* If the DPMAC object has already processed the link up interrupt,
1314 	 * we have to learn the link state ourselves.
1315 	 */
1316 	err = link_state_update(priv);
1317 	if (err < 0) {
1318 		netdev_err(net_dev, "Can't update link state\n");
1319 		goto link_state_err;
1320 	}
1321 
1322 	return 0;
1323 
1324 link_state_err:
1325 enable_err:
1326 	disable_ch_napi(priv);
1327 	drain_pool(priv);
1328 	return err;
1329 }
1330 
1331 /* Total number of in-flight frames on ingress queues */
1332 static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
1333 {
1334 	struct dpaa2_eth_fq *fq;
1335 	u32 fcnt = 0, bcnt = 0, total = 0;
1336 	int i, err;
1337 
1338 	for (i = 0; i < priv->num_fqs; i++) {
1339 		fq = &priv->fq[i];
1340 		err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
1341 		if (err) {
1342 			netdev_warn(priv->net_dev, "query_fq_count failed");
1343 			break;
1344 		}
1345 		total += fcnt;
1346 	}
1347 
1348 	return total;
1349 }
1350 
1351 static void wait_for_fq_empty(struct dpaa2_eth_priv *priv)
1352 {
1353 	int retries = 10;
1354 	u32 pending;
1355 
1356 	do {
1357 		pending = ingress_fq_count(priv);
1358 		if (pending)
1359 			msleep(100);
1360 	} while (pending && --retries);
1361 }
1362 
1363 static int dpaa2_eth_stop(struct net_device *net_dev)
1364 {
1365 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1366 	int dpni_enabled = 0;
1367 	int retries = 10;
1368 
1369 	netif_tx_stop_all_queues(net_dev);
1370 	netif_carrier_off(net_dev);
1371 
1372 	/* On dpni_disable(), the MC firmware will:
1373 	 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
1374 	 * - cut off WRIOP dequeues from egress FQs and wait until transmission
1375 	 * of all in flight Tx frames is finished (and corresponding Tx conf
1376 	 * frames are enqueued back to software)
1377 	 *
1378 	 * Before calling dpni_disable(), we wait for all Tx frames to arrive
1379 	 * on WRIOP. After it finishes, wait until all remaining frames on Rx
1380 	 * and Tx conf queues are consumed on NAPI poll.
1381 	 */
1382 	msleep(500);
1383 
1384 	do {
1385 		dpni_disable(priv->mc_io, 0, priv->mc_token);
1386 		dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1387 		if (dpni_enabled)
1388 			/* Allow the hardware some slack */
1389 			msleep(100);
1390 	} while (dpni_enabled && --retries);
1391 	if (!retries) {
1392 		netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1393 		/* Must go on and disable NAPI nonetheless, so we don't crash at
1394 		 * the next "ifconfig up"
1395 		 */
1396 	}
1397 
1398 	wait_for_fq_empty(priv);
1399 	disable_ch_napi(priv);
1400 
1401 	/* Empty the buffer pool */
1402 	drain_pool(priv);
1403 
1404 	return 0;
1405 }
1406 
1407 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1408 {
1409 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1410 	struct device *dev = net_dev->dev.parent;
1411 	int err;
1412 
1413 	err = eth_mac_addr(net_dev, addr);
1414 	if (err < 0) {
1415 		dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1416 		return err;
1417 	}
1418 
1419 	err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1420 					net_dev->dev_addr);
1421 	if (err) {
1422 		dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1423 		return err;
1424 	}
1425 
1426 	return 0;
1427 }
1428 
1429 /** Fill in counters maintained by the GPP driver. These may be different from
1430  * the hardware counters obtained by ethtool.
1431  */
1432 static void dpaa2_eth_get_stats(struct net_device *net_dev,
1433 				struct rtnl_link_stats64 *stats)
1434 {
1435 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1436 	struct rtnl_link_stats64 *percpu_stats;
1437 	u64 *cpustats;
1438 	u64 *netstats = (u64 *)stats;
1439 	int i, j;
1440 	int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1441 
1442 	for_each_possible_cpu(i) {
1443 		percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1444 		cpustats = (u64 *)percpu_stats;
1445 		for (j = 0; j < num; j++)
1446 			netstats[j] += cpustats[j];
1447 	}
1448 }
1449 
1450 /* Copy mac unicast addresses from @net_dev to @priv.
1451  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1452  */
1453 static void add_uc_hw_addr(const struct net_device *net_dev,
1454 			   struct dpaa2_eth_priv *priv)
1455 {
1456 	struct netdev_hw_addr *ha;
1457 	int err;
1458 
1459 	netdev_for_each_uc_addr(ha, net_dev) {
1460 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1461 					ha->addr);
1462 		if (err)
1463 			netdev_warn(priv->net_dev,
1464 				    "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1465 				    ha->addr, err);
1466 	}
1467 }
1468 
1469 /* Copy mac multicast addresses from @net_dev to @priv
1470  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1471  */
1472 static void add_mc_hw_addr(const struct net_device *net_dev,
1473 			   struct dpaa2_eth_priv *priv)
1474 {
1475 	struct netdev_hw_addr *ha;
1476 	int err;
1477 
1478 	netdev_for_each_mc_addr(ha, net_dev) {
1479 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1480 					ha->addr);
1481 		if (err)
1482 			netdev_warn(priv->net_dev,
1483 				    "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1484 				    ha->addr, err);
1485 	}
1486 }
1487 
1488 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1489 {
1490 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1491 	int uc_count = netdev_uc_count(net_dev);
1492 	int mc_count = netdev_mc_count(net_dev);
1493 	u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1494 	u32 options = priv->dpni_attrs.options;
1495 	u16 mc_token = priv->mc_token;
1496 	struct fsl_mc_io *mc_io = priv->mc_io;
1497 	int err;
1498 
1499 	/* Basic sanity checks; these probably indicate a misconfiguration */
1500 	if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1501 		netdev_info(net_dev,
1502 			    "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1503 			    max_mac);
1504 
1505 	/* Force promiscuous if the uc or mc counts exceed our capabilities. */
1506 	if (uc_count > max_mac) {
1507 		netdev_info(net_dev,
1508 			    "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1509 			    uc_count, max_mac);
1510 		goto force_promisc;
1511 	}
1512 	if (mc_count + uc_count > max_mac) {
1513 		netdev_info(net_dev,
1514 			    "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1515 			    uc_count + mc_count, max_mac);
1516 		goto force_mc_promisc;
1517 	}
1518 
1519 	/* Adjust promisc settings due to flag combinations */
1520 	if (net_dev->flags & IFF_PROMISC)
1521 		goto force_promisc;
1522 	if (net_dev->flags & IFF_ALLMULTI) {
1523 		/* First, rebuild unicast filtering table. This should be done
1524 		 * in promisc mode, in order to avoid frame loss while we
1525 		 * progressively add entries to the table.
1526 		 * We don't know whether we had been in promisc already, and
1527 		 * making an MC call to find out is expensive; so set uc promisc
1528 		 * nonetheless.
1529 		 */
1530 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1531 		if (err)
1532 			netdev_warn(net_dev, "Can't set uc promisc\n");
1533 
1534 		/* Actual uc table reconstruction. */
1535 		err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1536 		if (err)
1537 			netdev_warn(net_dev, "Can't clear uc filters\n");
1538 		add_uc_hw_addr(net_dev, priv);
1539 
1540 		/* Finally, clear uc promisc and set mc promisc as requested. */
1541 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1542 		if (err)
1543 			netdev_warn(net_dev, "Can't clear uc promisc\n");
1544 		goto force_mc_promisc;
1545 	}
1546 
1547 	/* Neither unicast, nor multicast promisc will be on... eventually.
1548 	 * For now, rebuild mac filtering tables while forcing both of them on.
1549 	 */
1550 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1551 	if (err)
1552 		netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1553 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1554 	if (err)
1555 		netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1556 
1557 	/* Actual mac filtering tables reconstruction */
1558 	err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1559 	if (err)
1560 		netdev_warn(net_dev, "Can't clear mac filters\n");
1561 	add_mc_hw_addr(net_dev, priv);
1562 	add_uc_hw_addr(net_dev, priv);
1563 
1564 	/* Now we can clear both ucast and mcast promisc, without risking
1565 	 * to drop legitimate frames anymore.
1566 	 */
1567 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1568 	if (err)
1569 		netdev_warn(net_dev, "Can't clear ucast promisc\n");
1570 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1571 	if (err)
1572 		netdev_warn(net_dev, "Can't clear mcast promisc\n");
1573 
1574 	return;
1575 
1576 force_promisc:
1577 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1578 	if (err)
1579 		netdev_warn(net_dev, "Can't set ucast promisc\n");
1580 force_mc_promisc:
1581 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1582 	if (err)
1583 		netdev_warn(net_dev, "Can't set mcast promisc\n");
1584 }
1585 
1586 static int dpaa2_eth_set_features(struct net_device *net_dev,
1587 				  netdev_features_t features)
1588 {
1589 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1590 	netdev_features_t changed = features ^ net_dev->features;
1591 	bool enable;
1592 	int err;
1593 
1594 	if (changed & NETIF_F_RXCSUM) {
1595 		enable = !!(features & NETIF_F_RXCSUM);
1596 		err = set_rx_csum(priv, enable);
1597 		if (err)
1598 			return err;
1599 	}
1600 
1601 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1602 		enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1603 		err = set_tx_csum(priv, enable);
1604 		if (err)
1605 			return err;
1606 	}
1607 
1608 	return 0;
1609 }
1610 
1611 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1612 {
1613 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1614 	struct hwtstamp_config config;
1615 
1616 	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
1617 		return -EFAULT;
1618 
1619 	switch (config.tx_type) {
1620 	case HWTSTAMP_TX_OFF:
1621 		priv->tx_tstamp = false;
1622 		break;
1623 	case HWTSTAMP_TX_ON:
1624 		priv->tx_tstamp = true;
1625 		break;
1626 	default:
1627 		return -ERANGE;
1628 	}
1629 
1630 	if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
1631 		priv->rx_tstamp = false;
1632 	} else {
1633 		priv->rx_tstamp = true;
1634 		/* TS is set for all frame types, not only those requested */
1635 		config.rx_filter = HWTSTAMP_FILTER_ALL;
1636 	}
1637 
1638 	return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
1639 			-EFAULT : 0;
1640 }
1641 
1642 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1643 {
1644 	if (cmd == SIOCSHWTSTAMP)
1645 		return dpaa2_eth_ts_ioctl(dev, rq, cmd);
1646 
1647 	return -EINVAL;
1648 }
1649 
1650 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
1651 {
1652 	int mfl, linear_mfl;
1653 
1654 	mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1655 	linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE -
1656 		     dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
1657 
1658 	if (mfl > linear_mfl) {
1659 		netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
1660 			    linear_mfl - VLAN_ETH_HLEN);
1661 		return false;
1662 	}
1663 
1664 	return true;
1665 }
1666 
1667 static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
1668 {
1669 	int mfl, err;
1670 
1671 	/* We enforce a maximum Rx frame length based on MTU only if we have
1672 	 * an XDP program attached (in order to avoid Rx S/G frames).
1673 	 * Otherwise, we accept all incoming frames as long as they are not
1674 	 * larger than maximum size supported in hardware
1675 	 */
1676 	if (has_xdp)
1677 		mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1678 	else
1679 		mfl = DPAA2_ETH_MFL;
1680 
1681 	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
1682 	if (err) {
1683 		netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
1684 		return err;
1685 	}
1686 
1687 	return 0;
1688 }
1689 
1690 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
1691 {
1692 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1693 	int err;
1694 
1695 	if (!priv->xdp_prog)
1696 		goto out;
1697 
1698 	if (!xdp_mtu_valid(priv, new_mtu))
1699 		return -EINVAL;
1700 
1701 	err = set_rx_mfl(priv, new_mtu, true);
1702 	if (err)
1703 		return err;
1704 
1705 out:
1706 	dev->mtu = new_mtu;
1707 	return 0;
1708 }
1709 
1710 static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
1711 {
1712 	struct dpni_buffer_layout buf_layout = {0};
1713 	int err;
1714 
1715 	err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
1716 				     DPNI_QUEUE_RX, &buf_layout);
1717 	if (err) {
1718 		netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
1719 		return err;
1720 	}
1721 
1722 	/* Reserve extra headroom for XDP header size changes */
1723 	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
1724 				    (has_xdp ? XDP_PACKET_HEADROOM : 0);
1725 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
1726 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1727 				     DPNI_QUEUE_RX, &buf_layout);
1728 	if (err) {
1729 		netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
1730 		return err;
1731 	}
1732 
1733 	return 0;
1734 }
1735 
1736 static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
1737 {
1738 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1739 	struct dpaa2_eth_channel *ch;
1740 	struct bpf_prog *old;
1741 	bool up, need_update;
1742 	int i, err;
1743 
1744 	if (prog && !xdp_mtu_valid(priv, dev->mtu))
1745 		return -EINVAL;
1746 
1747 	if (prog) {
1748 		prog = bpf_prog_add(prog, priv->num_channels);
1749 		if (IS_ERR(prog))
1750 			return PTR_ERR(prog);
1751 	}
1752 
1753 	up = netif_running(dev);
1754 	need_update = (!!priv->xdp_prog != !!prog);
1755 
1756 	if (up)
1757 		dpaa2_eth_stop(dev);
1758 
1759 	/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
1760 	 * Also, when switching between xdp/non-xdp modes we need to reconfigure
1761 	 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
1762 	 * so we are sure no old format buffers will be used from now on.
1763 	 */
1764 	if (need_update) {
1765 		err = set_rx_mfl(priv, dev->mtu, !!prog);
1766 		if (err)
1767 			goto out_err;
1768 		err = update_rx_buffer_headroom(priv, !!prog);
1769 		if (err)
1770 			goto out_err;
1771 	}
1772 
1773 	old = xchg(&priv->xdp_prog, prog);
1774 	if (old)
1775 		bpf_prog_put(old);
1776 
1777 	for (i = 0; i < priv->num_channels; i++) {
1778 		ch = priv->channel[i];
1779 		old = xchg(&ch->xdp.prog, prog);
1780 		if (old)
1781 			bpf_prog_put(old);
1782 	}
1783 
1784 	if (up) {
1785 		err = dpaa2_eth_open(dev);
1786 		if (err)
1787 			return err;
1788 	}
1789 
1790 	return 0;
1791 
1792 out_err:
1793 	if (prog)
1794 		bpf_prog_sub(prog, priv->num_channels);
1795 	if (up)
1796 		dpaa2_eth_open(dev);
1797 
1798 	return err;
1799 }
1800 
1801 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1802 {
1803 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1804 
1805 	switch (xdp->command) {
1806 	case XDP_SETUP_PROG:
1807 		return setup_xdp(dev, xdp->prog);
1808 	case XDP_QUERY_PROG:
1809 		xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
1810 		break;
1811 	default:
1812 		return -EINVAL;
1813 	}
1814 
1815 	return 0;
1816 }
1817 
1818 static int dpaa2_eth_xdp_xmit_frame(struct net_device *net_dev,
1819 				    struct xdp_frame *xdpf)
1820 {
1821 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1822 	struct device *dev = net_dev->dev.parent;
1823 	struct rtnl_link_stats64 *percpu_stats;
1824 	struct dpaa2_eth_drv_stats *percpu_extras;
1825 	unsigned int needed_headroom;
1826 	struct dpaa2_eth_swa *swa;
1827 	struct dpaa2_eth_fq *fq;
1828 	struct dpaa2_fd fd;
1829 	void *buffer_start, *aligned_start;
1830 	dma_addr_t addr;
1831 	int err, i;
1832 
1833 	/* We require a minimum headroom to be able to transmit the frame.
1834 	 * Otherwise return an error and let the original net_device handle it
1835 	 */
1836 	needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
1837 	if (xdpf->headroom < needed_headroom)
1838 		return -EINVAL;
1839 
1840 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
1841 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
1842 
1843 	/* Setup the FD fields */
1844 	memset(&fd, 0, sizeof(fd));
1845 
1846 	/* Align FD address, if possible */
1847 	buffer_start = xdpf->data - needed_headroom;
1848 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
1849 				  DPAA2_ETH_TX_BUF_ALIGN);
1850 	if (aligned_start >= xdpf->data - xdpf->headroom)
1851 		buffer_start = aligned_start;
1852 
1853 	swa = (struct dpaa2_eth_swa *)buffer_start;
1854 	/* fill in necessary fields here */
1855 	swa->type = DPAA2_ETH_SWA_XDP;
1856 	swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
1857 	swa->xdp.xdpf = xdpf;
1858 
1859 	addr = dma_map_single(dev, buffer_start,
1860 			      swa->xdp.dma_size,
1861 			      DMA_BIDIRECTIONAL);
1862 	if (unlikely(dma_mapping_error(dev, addr))) {
1863 		percpu_stats->tx_dropped++;
1864 		return -ENOMEM;
1865 	}
1866 
1867 	dpaa2_fd_set_addr(&fd, addr);
1868 	dpaa2_fd_set_offset(&fd, xdpf->data - buffer_start);
1869 	dpaa2_fd_set_len(&fd, xdpf->len);
1870 	dpaa2_fd_set_format(&fd, dpaa2_fd_single);
1871 	dpaa2_fd_set_ctrl(&fd, FD_CTRL_PTA);
1872 
1873 	fq = &priv->fq[smp_processor_id() % dpaa2_eth_queue_count(priv)];
1874 	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
1875 		err = priv->enqueue(priv, fq, &fd, 0);
1876 		if (err != -EBUSY)
1877 			break;
1878 	}
1879 	percpu_extras->tx_portal_busy += i;
1880 	if (unlikely(err < 0)) {
1881 		percpu_stats->tx_errors++;
1882 		/* let the Rx device handle the cleanup */
1883 		return err;
1884 	}
1885 
1886 	percpu_stats->tx_packets++;
1887 	percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
1888 
1889 	return 0;
1890 }
1891 
1892 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
1893 			      struct xdp_frame **frames, u32 flags)
1894 {
1895 	int drops = 0;
1896 	int i, err;
1897 
1898 	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1899 		return -EINVAL;
1900 
1901 	if (!netif_running(net_dev))
1902 		return -ENETDOWN;
1903 
1904 	for (i = 0; i < n; i++) {
1905 		struct xdp_frame *xdpf = frames[i];
1906 
1907 		err = dpaa2_eth_xdp_xmit_frame(net_dev, xdpf);
1908 		if (err) {
1909 			xdp_return_frame_rx_napi(xdpf);
1910 			drops++;
1911 		}
1912 	}
1913 
1914 	return n - drops;
1915 }
1916 
1917 static int update_xps(struct dpaa2_eth_priv *priv)
1918 {
1919 	struct net_device *net_dev = priv->net_dev;
1920 	struct cpumask xps_mask;
1921 	struct dpaa2_eth_fq *fq;
1922 	int i, num_queues, netdev_queues;
1923 	int err = 0;
1924 
1925 	num_queues = dpaa2_eth_queue_count(priv);
1926 	netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
1927 
1928 	/* The first <num_queues> entries in priv->fq array are Tx/Tx conf
1929 	 * queues, so only process those
1930 	 */
1931 	for (i = 0; i < netdev_queues; i++) {
1932 		fq = &priv->fq[i % num_queues];
1933 
1934 		cpumask_clear(&xps_mask);
1935 		cpumask_set_cpu(fq->target_cpu, &xps_mask);
1936 
1937 		err = netif_set_xps_queue(net_dev, &xps_mask, i);
1938 		if (err) {
1939 			netdev_warn_once(net_dev, "Error setting XPS queue\n");
1940 			break;
1941 		}
1942 	}
1943 
1944 	return err;
1945 }
1946 
1947 static int dpaa2_eth_setup_tc(struct net_device *net_dev,
1948 			      enum tc_setup_type type, void *type_data)
1949 {
1950 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1951 	struct tc_mqprio_qopt *mqprio = type_data;
1952 	u8 num_tc, num_queues;
1953 	int i;
1954 
1955 	if (type != TC_SETUP_QDISC_MQPRIO)
1956 		return -EINVAL;
1957 
1958 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1959 	num_queues = dpaa2_eth_queue_count(priv);
1960 	num_tc = mqprio->num_tc;
1961 
1962 	if (num_tc == net_dev->num_tc)
1963 		return 0;
1964 
1965 	if (num_tc  > dpaa2_eth_tc_count(priv)) {
1966 		netdev_err(net_dev, "Max %d traffic classes supported\n",
1967 			   dpaa2_eth_tc_count(priv));
1968 		return -EINVAL;
1969 	}
1970 
1971 	if (!num_tc) {
1972 		netdev_reset_tc(net_dev);
1973 		netif_set_real_num_tx_queues(net_dev, num_queues);
1974 		goto out;
1975 	}
1976 
1977 	netdev_set_num_tc(net_dev, num_tc);
1978 	netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
1979 
1980 	for (i = 0; i < num_tc; i++)
1981 		netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
1982 
1983 out:
1984 	update_xps(priv);
1985 
1986 	return 0;
1987 }
1988 
1989 static const struct net_device_ops dpaa2_eth_ops = {
1990 	.ndo_open = dpaa2_eth_open,
1991 	.ndo_start_xmit = dpaa2_eth_tx,
1992 	.ndo_stop = dpaa2_eth_stop,
1993 	.ndo_set_mac_address = dpaa2_eth_set_addr,
1994 	.ndo_get_stats64 = dpaa2_eth_get_stats,
1995 	.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1996 	.ndo_set_features = dpaa2_eth_set_features,
1997 	.ndo_do_ioctl = dpaa2_eth_ioctl,
1998 	.ndo_change_mtu = dpaa2_eth_change_mtu,
1999 	.ndo_bpf = dpaa2_eth_xdp,
2000 	.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
2001 	.ndo_setup_tc = dpaa2_eth_setup_tc,
2002 };
2003 
2004 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
2005 {
2006 	struct dpaa2_eth_channel *ch;
2007 
2008 	ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2009 
2010 	/* Update NAPI statistics */
2011 	ch->stats.cdan++;
2012 
2013 	napi_schedule_irqoff(&ch->napi);
2014 }
2015 
2016 /* Allocate and configure a DPCON object */
2017 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
2018 {
2019 	struct fsl_mc_device *dpcon;
2020 	struct device *dev = priv->net_dev->dev.parent;
2021 	struct dpcon_attr attrs;
2022 	int err;
2023 
2024 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
2025 				     FSL_MC_POOL_DPCON, &dpcon);
2026 	if (err) {
2027 		if (err == -ENXIO)
2028 			err = -EPROBE_DEFER;
2029 		else
2030 			dev_info(dev, "Not enough DPCONs, will go on as-is\n");
2031 		return ERR_PTR(err);
2032 	}
2033 
2034 	err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
2035 	if (err) {
2036 		dev_err(dev, "dpcon_open() failed\n");
2037 		goto free;
2038 	}
2039 
2040 	err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
2041 	if (err) {
2042 		dev_err(dev, "dpcon_reset() failed\n");
2043 		goto close;
2044 	}
2045 
2046 	err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
2047 	if (err) {
2048 		dev_err(dev, "dpcon_get_attributes() failed\n");
2049 		goto close;
2050 	}
2051 
2052 	err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
2053 	if (err) {
2054 		dev_err(dev, "dpcon_enable() failed\n");
2055 		goto close;
2056 	}
2057 
2058 	return dpcon;
2059 
2060 close:
2061 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2062 free:
2063 	fsl_mc_object_free(dpcon);
2064 
2065 	return NULL;
2066 }
2067 
2068 static void free_dpcon(struct dpaa2_eth_priv *priv,
2069 		       struct fsl_mc_device *dpcon)
2070 {
2071 	dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
2072 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2073 	fsl_mc_object_free(dpcon);
2074 }
2075 
2076 static struct dpaa2_eth_channel *
2077 alloc_channel(struct dpaa2_eth_priv *priv)
2078 {
2079 	struct dpaa2_eth_channel *channel;
2080 	struct dpcon_attr attr;
2081 	struct device *dev = priv->net_dev->dev.parent;
2082 	int err;
2083 
2084 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
2085 	if (!channel)
2086 		return NULL;
2087 
2088 	channel->dpcon = setup_dpcon(priv);
2089 	if (IS_ERR_OR_NULL(channel->dpcon)) {
2090 		err = PTR_ERR_OR_ZERO(channel->dpcon);
2091 		goto err_setup;
2092 	}
2093 
2094 	err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
2095 				   &attr);
2096 	if (err) {
2097 		dev_err(dev, "dpcon_get_attributes() failed\n");
2098 		goto err_get_attr;
2099 	}
2100 
2101 	channel->dpcon_id = attr.id;
2102 	channel->ch_id = attr.qbman_ch_id;
2103 	channel->priv = priv;
2104 
2105 	return channel;
2106 
2107 err_get_attr:
2108 	free_dpcon(priv, channel->dpcon);
2109 err_setup:
2110 	kfree(channel);
2111 	return ERR_PTR(err);
2112 }
2113 
2114 static void free_channel(struct dpaa2_eth_priv *priv,
2115 			 struct dpaa2_eth_channel *channel)
2116 {
2117 	free_dpcon(priv, channel->dpcon);
2118 	kfree(channel);
2119 }
2120 
2121 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
2122  * and register data availability notifications
2123  */
2124 static int setup_dpio(struct dpaa2_eth_priv *priv)
2125 {
2126 	struct dpaa2_io_notification_ctx *nctx;
2127 	struct dpaa2_eth_channel *channel;
2128 	struct dpcon_notification_cfg dpcon_notif_cfg;
2129 	struct device *dev = priv->net_dev->dev.parent;
2130 	int i, err;
2131 
2132 	/* We want the ability to spread ingress traffic (RX, TX conf) to as
2133 	 * many cores as possible, so we need one channel for each core
2134 	 * (unless there's fewer queues than cores, in which case the extra
2135 	 * channels would be wasted).
2136 	 * Allocate one channel per core and register it to the core's
2137 	 * affine DPIO. If not enough channels are available for all cores
2138 	 * or if some cores don't have an affine DPIO, there will be no
2139 	 * ingress frame processing on those cores.
2140 	 */
2141 	cpumask_clear(&priv->dpio_cpumask);
2142 	for_each_online_cpu(i) {
2143 		/* Try to allocate a channel */
2144 		channel = alloc_channel(priv);
2145 		if (IS_ERR_OR_NULL(channel)) {
2146 			err = PTR_ERR_OR_ZERO(channel);
2147 			if (err != -EPROBE_DEFER)
2148 				dev_info(dev,
2149 					 "No affine channel for cpu %d and above\n", i);
2150 			goto err_alloc_ch;
2151 		}
2152 
2153 		priv->channel[priv->num_channels] = channel;
2154 
2155 		nctx = &channel->nctx;
2156 		nctx->is_cdan = 1;
2157 		nctx->cb = cdan_cb;
2158 		nctx->id = channel->ch_id;
2159 		nctx->desired_cpu = i;
2160 
2161 		/* Register the new context */
2162 		channel->dpio = dpaa2_io_service_select(i);
2163 		err = dpaa2_io_service_register(channel->dpio, nctx, dev);
2164 		if (err) {
2165 			dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2166 			/* If no affine DPIO for this core, there's probably
2167 			 * none available for next cores either. Signal we want
2168 			 * to retry later, in case the DPIO devices weren't
2169 			 * probed yet.
2170 			 */
2171 			err = -EPROBE_DEFER;
2172 			goto err_service_reg;
2173 		}
2174 
2175 		/* Register DPCON notification with MC */
2176 		dpcon_notif_cfg.dpio_id = nctx->dpio_id;
2177 		dpcon_notif_cfg.priority = 0;
2178 		dpcon_notif_cfg.user_ctx = nctx->qman64;
2179 		err = dpcon_set_notification(priv->mc_io, 0,
2180 					     channel->dpcon->mc_handle,
2181 					     &dpcon_notif_cfg);
2182 		if (err) {
2183 			dev_err(dev, "dpcon_set_notification failed()\n");
2184 			goto err_set_cdan;
2185 		}
2186 
2187 		/* If we managed to allocate a channel and also found an affine
2188 		 * DPIO for this core, add it to the final mask
2189 		 */
2190 		cpumask_set_cpu(i, &priv->dpio_cpumask);
2191 		priv->num_channels++;
2192 
2193 		/* Stop if we already have enough channels to accommodate all
2194 		 * RX and TX conf queues
2195 		 */
2196 		if (priv->num_channels == priv->dpni_attrs.num_queues)
2197 			break;
2198 	}
2199 
2200 	return 0;
2201 
2202 err_set_cdan:
2203 	dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2204 err_service_reg:
2205 	free_channel(priv, channel);
2206 err_alloc_ch:
2207 	if (err == -EPROBE_DEFER)
2208 		return err;
2209 
2210 	if (cpumask_empty(&priv->dpio_cpumask)) {
2211 		dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
2212 		return -ENODEV;
2213 	}
2214 
2215 	dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
2216 		 cpumask_pr_args(&priv->dpio_cpumask));
2217 
2218 	return 0;
2219 }
2220 
2221 static void free_dpio(struct dpaa2_eth_priv *priv)
2222 {
2223 	struct device *dev = priv->net_dev->dev.parent;
2224 	struct dpaa2_eth_channel *ch;
2225 	int i;
2226 
2227 	/* deregister CDAN notifications and free channels */
2228 	for (i = 0; i < priv->num_channels; i++) {
2229 		ch = priv->channel[i];
2230 		dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
2231 		free_channel(priv, ch);
2232 	}
2233 }
2234 
2235 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
2236 						    int cpu)
2237 {
2238 	struct device *dev = priv->net_dev->dev.parent;
2239 	int i;
2240 
2241 	for (i = 0; i < priv->num_channels; i++)
2242 		if (priv->channel[i]->nctx.desired_cpu == cpu)
2243 			return priv->channel[i];
2244 
2245 	/* We should never get here. Issue a warning and return
2246 	 * the first channel, because it's still better than nothing
2247 	 */
2248 	dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
2249 
2250 	return priv->channel[0];
2251 }
2252 
2253 static void set_fq_affinity(struct dpaa2_eth_priv *priv)
2254 {
2255 	struct device *dev = priv->net_dev->dev.parent;
2256 	struct dpaa2_eth_fq *fq;
2257 	int rx_cpu, txc_cpu;
2258 	int i;
2259 
2260 	/* For each FQ, pick one channel/CPU to deliver frames to.
2261 	 * This may well change at runtime, either through irqbalance or
2262 	 * through direct user intervention.
2263 	 */
2264 	rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
2265 
2266 	for (i = 0; i < priv->num_fqs; i++) {
2267 		fq = &priv->fq[i];
2268 		switch (fq->type) {
2269 		case DPAA2_RX_FQ:
2270 			fq->target_cpu = rx_cpu;
2271 			rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
2272 			if (rx_cpu >= nr_cpu_ids)
2273 				rx_cpu = cpumask_first(&priv->dpio_cpumask);
2274 			break;
2275 		case DPAA2_TX_CONF_FQ:
2276 			fq->target_cpu = txc_cpu;
2277 			txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
2278 			if (txc_cpu >= nr_cpu_ids)
2279 				txc_cpu = cpumask_first(&priv->dpio_cpumask);
2280 			break;
2281 		default:
2282 			dev_err(dev, "Unknown FQ type: %d\n", fq->type);
2283 		}
2284 		fq->channel = get_affine_channel(priv, fq->target_cpu);
2285 	}
2286 
2287 	update_xps(priv);
2288 }
2289 
2290 static void setup_fqs(struct dpaa2_eth_priv *priv)
2291 {
2292 	int i;
2293 
2294 	/* We have one TxConf FQ per Tx flow.
2295 	 * The number of Tx and Rx queues is the same.
2296 	 * Tx queues come first in the fq array.
2297 	 */
2298 	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2299 		priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
2300 		priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
2301 		priv->fq[priv->num_fqs++].flowid = (u16)i;
2302 	}
2303 
2304 	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
2305 		priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
2306 		priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
2307 		priv->fq[priv->num_fqs++].flowid = (u16)i;
2308 	}
2309 
2310 	/* For each FQ, decide on which core to process incoming frames */
2311 	set_fq_affinity(priv);
2312 }
2313 
2314 /* Allocate and configure one buffer pool for each interface */
2315 static int setup_dpbp(struct dpaa2_eth_priv *priv)
2316 {
2317 	int err;
2318 	struct fsl_mc_device *dpbp_dev;
2319 	struct device *dev = priv->net_dev->dev.parent;
2320 	struct dpbp_attr dpbp_attrs;
2321 
2322 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
2323 				     &dpbp_dev);
2324 	if (err) {
2325 		if (err == -ENXIO)
2326 			err = -EPROBE_DEFER;
2327 		else
2328 			dev_err(dev, "DPBP device allocation failed\n");
2329 		return err;
2330 	}
2331 
2332 	priv->dpbp_dev = dpbp_dev;
2333 
2334 	err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
2335 			&dpbp_dev->mc_handle);
2336 	if (err) {
2337 		dev_err(dev, "dpbp_open() failed\n");
2338 		goto err_open;
2339 	}
2340 
2341 	err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
2342 	if (err) {
2343 		dev_err(dev, "dpbp_reset() failed\n");
2344 		goto err_reset;
2345 	}
2346 
2347 	err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
2348 	if (err) {
2349 		dev_err(dev, "dpbp_enable() failed\n");
2350 		goto err_enable;
2351 	}
2352 
2353 	err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
2354 				  &dpbp_attrs);
2355 	if (err) {
2356 		dev_err(dev, "dpbp_get_attributes() failed\n");
2357 		goto err_get_attr;
2358 	}
2359 	priv->bpid = dpbp_attrs.bpid;
2360 
2361 	return 0;
2362 
2363 err_get_attr:
2364 	dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
2365 err_enable:
2366 err_reset:
2367 	dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
2368 err_open:
2369 	fsl_mc_object_free(dpbp_dev);
2370 
2371 	return err;
2372 }
2373 
2374 static void free_dpbp(struct dpaa2_eth_priv *priv)
2375 {
2376 	drain_pool(priv);
2377 	dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2378 	dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
2379 	fsl_mc_object_free(priv->dpbp_dev);
2380 }
2381 
2382 static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2383 {
2384 	struct device *dev = priv->net_dev->dev.parent;
2385 	struct dpni_buffer_layout buf_layout = {0};
2386 	u16 rx_buf_align;
2387 	int err;
2388 
2389 	/* We need to check for WRIOP version 1.0.0, but depending on the MC
2390 	 * version, this number is not always provided correctly on rev1.
2391 	 * We need to check for both alternatives in this situation.
2392 	 */
2393 	if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
2394 	    priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
2395 		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
2396 	else
2397 		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
2398 
2399 	/* tx buffer */
2400 	buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
2401 	buf_layout.pass_timestamp = true;
2402 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
2403 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2404 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2405 				     DPNI_QUEUE_TX, &buf_layout);
2406 	if (err) {
2407 		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
2408 		return err;
2409 	}
2410 
2411 	/* tx-confirm buffer */
2412 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2413 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2414 				     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
2415 	if (err) {
2416 		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
2417 		return err;
2418 	}
2419 
2420 	/* Now that we've set our tx buffer layout, retrieve the minimum
2421 	 * required tx data offset.
2422 	 */
2423 	err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
2424 				      &priv->tx_data_offset);
2425 	if (err) {
2426 		dev_err(dev, "dpni_get_tx_data_offset() failed\n");
2427 		return err;
2428 	}
2429 
2430 	if ((priv->tx_data_offset % 64) != 0)
2431 		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
2432 			 priv->tx_data_offset);
2433 
2434 	/* rx buffer */
2435 	buf_layout.pass_frame_status = true;
2436 	buf_layout.pass_parser_result = true;
2437 	buf_layout.data_align = rx_buf_align;
2438 	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
2439 	buf_layout.private_data_size = 0;
2440 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
2441 			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
2442 			     DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
2443 			     DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
2444 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2445 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2446 				     DPNI_QUEUE_RX, &buf_layout);
2447 	if (err) {
2448 		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
2449 		return err;
2450 	}
2451 
2452 	return 0;
2453 }
2454 
2455 #define DPNI_ENQUEUE_FQID_VER_MAJOR	7
2456 #define DPNI_ENQUEUE_FQID_VER_MINOR	9
2457 
2458 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
2459 				       struct dpaa2_eth_fq *fq,
2460 				       struct dpaa2_fd *fd, u8 prio)
2461 {
2462 	return dpaa2_io_service_enqueue_qd(fq->channel->dpio,
2463 					   priv->tx_qdid, prio,
2464 					   fq->tx_qdbin, fd);
2465 }
2466 
2467 static inline int dpaa2_eth_enqueue_fq(struct dpaa2_eth_priv *priv,
2468 				       struct dpaa2_eth_fq *fq,
2469 				       struct dpaa2_fd *fd, u8 prio)
2470 {
2471 	return dpaa2_io_service_enqueue_fq(fq->channel->dpio,
2472 					   fq->tx_fqid[prio], fd);
2473 }
2474 
2475 static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
2476 {
2477 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
2478 				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
2479 		priv->enqueue = dpaa2_eth_enqueue_qd;
2480 	else
2481 		priv->enqueue = dpaa2_eth_enqueue_fq;
2482 }
2483 
2484 static int set_pause(struct dpaa2_eth_priv *priv)
2485 {
2486 	struct device *dev = priv->net_dev->dev.parent;
2487 	struct dpni_link_cfg link_cfg = {0};
2488 	int err;
2489 
2490 	/* Get the default link options so we don't override other flags */
2491 	err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
2492 	if (err) {
2493 		dev_err(dev, "dpni_get_link_cfg() failed\n");
2494 		return err;
2495 	}
2496 
2497 	/* By default, enable both Rx and Tx pause frames */
2498 	link_cfg.options |= DPNI_LINK_OPT_PAUSE;
2499 	link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
2500 	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
2501 	if (err) {
2502 		dev_err(dev, "dpni_set_link_cfg() failed\n");
2503 		return err;
2504 	}
2505 
2506 	priv->link_state.options = link_cfg.options;
2507 
2508 	return 0;
2509 }
2510 
2511 /* Configure the DPNI object this interface is associated with */
2512 static int setup_dpni(struct fsl_mc_device *ls_dev)
2513 {
2514 	struct device *dev = &ls_dev->dev;
2515 	struct dpaa2_eth_priv *priv;
2516 	struct net_device *net_dev;
2517 	int err;
2518 
2519 	net_dev = dev_get_drvdata(dev);
2520 	priv = netdev_priv(net_dev);
2521 
2522 	/* get a handle for the DPNI object */
2523 	err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
2524 	if (err) {
2525 		dev_err(dev, "dpni_open() failed\n");
2526 		return err;
2527 	}
2528 
2529 	/* Check if we can work with this DPNI object */
2530 	err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
2531 				   &priv->dpni_ver_minor);
2532 	if (err) {
2533 		dev_err(dev, "dpni_get_api_version() failed\n");
2534 		goto close;
2535 	}
2536 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
2537 		dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
2538 			priv->dpni_ver_major, priv->dpni_ver_minor,
2539 			DPNI_VER_MAJOR, DPNI_VER_MINOR);
2540 		err = -ENOTSUPP;
2541 		goto close;
2542 	}
2543 
2544 	ls_dev->mc_io = priv->mc_io;
2545 	ls_dev->mc_handle = priv->mc_token;
2546 
2547 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2548 	if (err) {
2549 		dev_err(dev, "dpni_reset() failed\n");
2550 		goto close;
2551 	}
2552 
2553 	err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
2554 				  &priv->dpni_attrs);
2555 	if (err) {
2556 		dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
2557 		goto close;
2558 	}
2559 
2560 	err = set_buffer_layout(priv);
2561 	if (err)
2562 		goto close;
2563 
2564 	set_enqueue_mode(priv);
2565 
2566 	/* Enable pause frame support */
2567 	if (dpaa2_eth_has_pause_support(priv)) {
2568 		err = set_pause(priv);
2569 		if (err)
2570 			goto close;
2571 	}
2572 
2573 	priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
2574 				       dpaa2_eth_fs_count(priv), GFP_KERNEL);
2575 	if (!priv->cls_rules)
2576 		goto close;
2577 
2578 	return 0;
2579 
2580 close:
2581 	dpni_close(priv->mc_io, 0, priv->mc_token);
2582 
2583 	return err;
2584 }
2585 
2586 static void free_dpni(struct dpaa2_eth_priv *priv)
2587 {
2588 	int err;
2589 
2590 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
2591 	if (err)
2592 		netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
2593 			    err);
2594 
2595 	dpni_close(priv->mc_io, 0, priv->mc_token);
2596 }
2597 
2598 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
2599 			 struct dpaa2_eth_fq *fq)
2600 {
2601 	struct device *dev = priv->net_dev->dev.parent;
2602 	struct dpni_queue queue;
2603 	struct dpni_queue_id qid;
2604 	int err;
2605 
2606 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2607 			     DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
2608 	if (err) {
2609 		dev_err(dev, "dpni_get_queue(RX) failed\n");
2610 		return err;
2611 	}
2612 
2613 	fq->fqid = qid.fqid;
2614 
2615 	queue.destination.id = fq->channel->dpcon_id;
2616 	queue.destination.type = DPNI_DEST_DPCON;
2617 	queue.destination.priority = 1;
2618 	queue.user_context = (u64)(uintptr_t)fq;
2619 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2620 			     DPNI_QUEUE_RX, 0, fq->flowid,
2621 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2622 			     &queue);
2623 	if (err) {
2624 		dev_err(dev, "dpni_set_queue(RX) failed\n");
2625 		return err;
2626 	}
2627 
2628 	/* xdp_rxq setup */
2629 	err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
2630 			       fq->flowid);
2631 	if (err) {
2632 		dev_err(dev, "xdp_rxq_info_reg failed\n");
2633 		return err;
2634 	}
2635 
2636 	err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
2637 					 MEM_TYPE_PAGE_ORDER0, NULL);
2638 	if (err) {
2639 		dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
2640 		return err;
2641 	}
2642 
2643 	return 0;
2644 }
2645 
2646 static int setup_tx_flow(struct dpaa2_eth_priv *priv,
2647 			 struct dpaa2_eth_fq *fq)
2648 {
2649 	struct device *dev = priv->net_dev->dev.parent;
2650 	struct dpni_queue queue;
2651 	struct dpni_queue_id qid;
2652 	int i, err;
2653 
2654 	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
2655 		err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2656 				     DPNI_QUEUE_TX, i, fq->flowid,
2657 				     &queue, &qid);
2658 		if (err) {
2659 			dev_err(dev, "dpni_get_queue(TX) failed\n");
2660 			return err;
2661 		}
2662 		fq->tx_fqid[i] = qid.fqid;
2663 	}
2664 
2665 	/* All Tx queues belonging to the same flowid have the same qdbin */
2666 	fq->tx_qdbin = qid.qdbin;
2667 
2668 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
2669 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2670 			     &queue, &qid);
2671 	if (err) {
2672 		dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
2673 		return err;
2674 	}
2675 
2676 	fq->fqid = qid.fqid;
2677 
2678 	queue.destination.id = fq->channel->dpcon_id;
2679 	queue.destination.type = DPNI_DEST_DPCON;
2680 	queue.destination.priority = 0;
2681 	queue.user_context = (u64)(uintptr_t)fq;
2682 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2683 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2684 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2685 			     &queue);
2686 	if (err) {
2687 		dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
2688 		return err;
2689 	}
2690 
2691 	return 0;
2692 }
2693 
2694 /* Supported header fields for Rx hash distribution key */
2695 static const struct dpaa2_eth_dist_fields dist_fields[] = {
2696 	{
2697 		/* L2 header */
2698 		.rxnfc_field = RXH_L2DA,
2699 		.cls_prot = NET_PROT_ETH,
2700 		.cls_field = NH_FLD_ETH_DA,
2701 		.id = DPAA2_ETH_DIST_ETHDST,
2702 		.size = 6,
2703 	}, {
2704 		.cls_prot = NET_PROT_ETH,
2705 		.cls_field = NH_FLD_ETH_SA,
2706 		.id = DPAA2_ETH_DIST_ETHSRC,
2707 		.size = 6,
2708 	}, {
2709 		/* This is the last ethertype field parsed:
2710 		 * depending on frame format, it can be the MAC ethertype
2711 		 * or the VLAN etype.
2712 		 */
2713 		.cls_prot = NET_PROT_ETH,
2714 		.cls_field = NH_FLD_ETH_TYPE,
2715 		.id = DPAA2_ETH_DIST_ETHTYPE,
2716 		.size = 2,
2717 	}, {
2718 		/* VLAN header */
2719 		.rxnfc_field = RXH_VLAN,
2720 		.cls_prot = NET_PROT_VLAN,
2721 		.cls_field = NH_FLD_VLAN_TCI,
2722 		.id = DPAA2_ETH_DIST_VLAN,
2723 		.size = 2,
2724 	}, {
2725 		/* IP header */
2726 		.rxnfc_field = RXH_IP_SRC,
2727 		.cls_prot = NET_PROT_IP,
2728 		.cls_field = NH_FLD_IP_SRC,
2729 		.id = DPAA2_ETH_DIST_IPSRC,
2730 		.size = 4,
2731 	}, {
2732 		.rxnfc_field = RXH_IP_DST,
2733 		.cls_prot = NET_PROT_IP,
2734 		.cls_field = NH_FLD_IP_DST,
2735 		.id = DPAA2_ETH_DIST_IPDST,
2736 		.size = 4,
2737 	}, {
2738 		.rxnfc_field = RXH_L3_PROTO,
2739 		.cls_prot = NET_PROT_IP,
2740 		.cls_field = NH_FLD_IP_PROTO,
2741 		.id = DPAA2_ETH_DIST_IPPROTO,
2742 		.size = 1,
2743 	}, {
2744 		/* Using UDP ports, this is functionally equivalent to raw
2745 		 * byte pairs from L4 header.
2746 		 */
2747 		.rxnfc_field = RXH_L4_B_0_1,
2748 		.cls_prot = NET_PROT_UDP,
2749 		.cls_field = NH_FLD_UDP_PORT_SRC,
2750 		.id = DPAA2_ETH_DIST_L4SRC,
2751 		.size = 2,
2752 	}, {
2753 		.rxnfc_field = RXH_L4_B_2_3,
2754 		.cls_prot = NET_PROT_UDP,
2755 		.cls_field = NH_FLD_UDP_PORT_DST,
2756 		.id = DPAA2_ETH_DIST_L4DST,
2757 		.size = 2,
2758 	},
2759 };
2760 
2761 /* Configure the Rx hash key using the legacy API */
2762 static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2763 {
2764 	struct device *dev = priv->net_dev->dev.parent;
2765 	struct dpni_rx_tc_dist_cfg dist_cfg;
2766 	int err;
2767 
2768 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2769 
2770 	dist_cfg.key_cfg_iova = key;
2771 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2772 	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2773 
2774 	err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2775 	if (err)
2776 		dev_err(dev, "dpni_set_rx_tc_dist failed\n");
2777 
2778 	return err;
2779 }
2780 
2781 /* Configure the Rx hash key using the new API */
2782 static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2783 {
2784 	struct device *dev = priv->net_dev->dev.parent;
2785 	struct dpni_rx_dist_cfg dist_cfg;
2786 	int err;
2787 
2788 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2789 
2790 	dist_cfg.key_cfg_iova = key;
2791 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2792 	dist_cfg.enable = 1;
2793 
2794 	err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2795 	if (err)
2796 		dev_err(dev, "dpni_set_rx_hash_dist failed\n");
2797 
2798 	return err;
2799 }
2800 
2801 /* Configure the Rx flow classification key */
2802 static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2803 {
2804 	struct device *dev = priv->net_dev->dev.parent;
2805 	struct dpni_rx_dist_cfg dist_cfg;
2806 	int err;
2807 
2808 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2809 
2810 	dist_cfg.key_cfg_iova = key;
2811 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2812 	dist_cfg.enable = 1;
2813 
2814 	err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2815 	if (err)
2816 		dev_err(dev, "dpni_set_rx_fs_dist failed\n");
2817 
2818 	return err;
2819 }
2820 
2821 /* Size of the Rx flow classification key */
2822 int dpaa2_eth_cls_key_size(u64 fields)
2823 {
2824 	int i, size = 0;
2825 
2826 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2827 		if (!(fields & dist_fields[i].id))
2828 			continue;
2829 		size += dist_fields[i].size;
2830 	}
2831 
2832 	return size;
2833 }
2834 
2835 /* Offset of header field in Rx classification key */
2836 int dpaa2_eth_cls_fld_off(int prot, int field)
2837 {
2838 	int i, off = 0;
2839 
2840 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2841 		if (dist_fields[i].cls_prot == prot &&
2842 		    dist_fields[i].cls_field == field)
2843 			return off;
2844 		off += dist_fields[i].size;
2845 	}
2846 
2847 	WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
2848 	return 0;
2849 }
2850 
2851 /* Prune unused fields from the classification rule.
2852  * Used when masking is not supported
2853  */
2854 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
2855 {
2856 	int off = 0, new_off = 0;
2857 	int i, size;
2858 
2859 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2860 		size = dist_fields[i].size;
2861 		if (dist_fields[i].id & fields) {
2862 			memcpy(key_mem + new_off, key_mem + off, size);
2863 			new_off += size;
2864 		}
2865 		off += size;
2866 	}
2867 }
2868 
2869 /* Set Rx distribution (hash or flow classification) key
2870  * flags is a combination of RXH_ bits
2871  */
2872 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
2873 				  enum dpaa2_eth_rx_dist type, u64 flags)
2874 {
2875 	struct device *dev = net_dev->dev.parent;
2876 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2877 	struct dpkg_profile_cfg cls_cfg;
2878 	u32 rx_hash_fields = 0;
2879 	dma_addr_t key_iova;
2880 	u8 *dma_mem;
2881 	int i;
2882 	int err = 0;
2883 
2884 	memset(&cls_cfg, 0, sizeof(cls_cfg));
2885 
2886 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2887 		struct dpkg_extract *key =
2888 			&cls_cfg.extracts[cls_cfg.num_extracts];
2889 
2890 		/* For both Rx hashing and classification keys
2891 		 * we set only the selected fields.
2892 		 */
2893 		if (!(flags & dist_fields[i].id))
2894 			continue;
2895 		if (type == DPAA2_ETH_RX_DIST_HASH)
2896 			rx_hash_fields |= dist_fields[i].rxnfc_field;
2897 
2898 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
2899 			dev_err(dev, "error adding key extraction rule, too many rules?\n");
2900 			return -E2BIG;
2901 		}
2902 
2903 		key->type = DPKG_EXTRACT_FROM_HDR;
2904 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
2905 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
2906 		key->extract.from_hdr.field = dist_fields[i].cls_field;
2907 		cls_cfg.num_extracts++;
2908 	}
2909 
2910 	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
2911 	if (!dma_mem)
2912 		return -ENOMEM;
2913 
2914 	err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
2915 	if (err) {
2916 		dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
2917 		goto free_key;
2918 	}
2919 
2920 	/* Prepare for setting the rx dist */
2921 	key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
2922 				  DMA_TO_DEVICE);
2923 	if (dma_mapping_error(dev, key_iova)) {
2924 		dev_err(dev, "DMA mapping failed\n");
2925 		err = -ENOMEM;
2926 		goto free_key;
2927 	}
2928 
2929 	if (type == DPAA2_ETH_RX_DIST_HASH) {
2930 		if (dpaa2_eth_has_legacy_dist(priv))
2931 			err = config_legacy_hash_key(priv, key_iova);
2932 		else
2933 			err = config_hash_key(priv, key_iova);
2934 	} else {
2935 		err = config_cls_key(priv, key_iova);
2936 	}
2937 
2938 	dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
2939 			 DMA_TO_DEVICE);
2940 	if (!err && type == DPAA2_ETH_RX_DIST_HASH)
2941 		priv->rx_hash_fields = rx_hash_fields;
2942 
2943 free_key:
2944 	kfree(dma_mem);
2945 	return err;
2946 }
2947 
2948 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
2949 {
2950 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2951 	u64 key = 0;
2952 	int i;
2953 
2954 	if (!dpaa2_eth_hash_enabled(priv))
2955 		return -EOPNOTSUPP;
2956 
2957 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
2958 		if (dist_fields[i].rxnfc_field & flags)
2959 			key |= dist_fields[i].id;
2960 
2961 	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
2962 }
2963 
2964 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
2965 {
2966 	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
2967 }
2968 
2969 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
2970 {
2971 	struct device *dev = priv->net_dev->dev.parent;
2972 	int err;
2973 
2974 	/* Check if we actually support Rx flow classification */
2975 	if (dpaa2_eth_has_legacy_dist(priv)) {
2976 		dev_dbg(dev, "Rx cls not supported by current MC version\n");
2977 		return -EOPNOTSUPP;
2978 	}
2979 
2980 	if (!dpaa2_eth_fs_enabled(priv)) {
2981 		dev_dbg(dev, "Rx cls disabled in DPNI options\n");
2982 		return -EOPNOTSUPP;
2983 	}
2984 
2985 	if (!dpaa2_eth_hash_enabled(priv)) {
2986 		dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
2987 		return -EOPNOTSUPP;
2988 	}
2989 
2990 	/* If there is no support for masking in the classification table,
2991 	 * we don't set a default key, as it will depend on the rules
2992 	 * added by the user at runtime.
2993 	 */
2994 	if (!dpaa2_eth_fs_mask_enabled(priv))
2995 		goto out;
2996 
2997 	err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
2998 	if (err)
2999 		return err;
3000 
3001 out:
3002 	priv->rx_cls_enabled = 1;
3003 
3004 	return 0;
3005 }
3006 
3007 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
3008  * frame queues and channels
3009  */
3010 static int bind_dpni(struct dpaa2_eth_priv *priv)
3011 {
3012 	struct net_device *net_dev = priv->net_dev;
3013 	struct device *dev = net_dev->dev.parent;
3014 	struct dpni_pools_cfg pools_params;
3015 	struct dpni_error_cfg err_cfg;
3016 	int err = 0;
3017 	int i;
3018 
3019 	pools_params.num_dpbp = 1;
3020 	pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
3021 	pools_params.pools[0].backup_pool = 0;
3022 	pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
3023 	err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
3024 	if (err) {
3025 		dev_err(dev, "dpni_set_pools() failed\n");
3026 		return err;
3027 	}
3028 
3029 	/* have the interface implicitly distribute traffic based on
3030 	 * the default hash key
3031 	 */
3032 	err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
3033 	if (err && err != -EOPNOTSUPP)
3034 		dev_err(dev, "Failed to configure hashing\n");
3035 
3036 	/* Configure the flow classification key; it includes all
3037 	 * supported header fields and cannot be modified at runtime
3038 	 */
3039 	err = dpaa2_eth_set_default_cls(priv);
3040 	if (err && err != -EOPNOTSUPP)
3041 		dev_err(dev, "Failed to configure Rx classification key\n");
3042 
3043 	/* Configure handling of error frames */
3044 	err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3045 	err_cfg.set_frame_annotation = 1;
3046 	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
3047 	err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
3048 				       &err_cfg);
3049 	if (err) {
3050 		dev_err(dev, "dpni_set_errors_behavior failed\n");
3051 		return err;
3052 	}
3053 
3054 	/* Configure Rx and Tx conf queues to generate CDANs */
3055 	for (i = 0; i < priv->num_fqs; i++) {
3056 		switch (priv->fq[i].type) {
3057 		case DPAA2_RX_FQ:
3058 			err = setup_rx_flow(priv, &priv->fq[i]);
3059 			break;
3060 		case DPAA2_TX_CONF_FQ:
3061 			err = setup_tx_flow(priv, &priv->fq[i]);
3062 			break;
3063 		default:
3064 			dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
3065 			return -EINVAL;
3066 		}
3067 		if (err)
3068 			return err;
3069 	}
3070 
3071 	err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
3072 			    DPNI_QUEUE_TX, &priv->tx_qdid);
3073 	if (err) {
3074 		dev_err(dev, "dpni_get_qdid() failed\n");
3075 		return err;
3076 	}
3077 
3078 	return 0;
3079 }
3080 
3081 /* Allocate rings for storing incoming frame descriptors */
3082 static int alloc_rings(struct dpaa2_eth_priv *priv)
3083 {
3084 	struct net_device *net_dev = priv->net_dev;
3085 	struct device *dev = net_dev->dev.parent;
3086 	int i;
3087 
3088 	for (i = 0; i < priv->num_channels; i++) {
3089 		priv->channel[i]->store =
3090 			dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
3091 		if (!priv->channel[i]->store) {
3092 			netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
3093 			goto err_ring;
3094 		}
3095 	}
3096 
3097 	return 0;
3098 
3099 err_ring:
3100 	for (i = 0; i < priv->num_channels; i++) {
3101 		if (!priv->channel[i]->store)
3102 			break;
3103 		dpaa2_io_store_destroy(priv->channel[i]->store);
3104 	}
3105 
3106 	return -ENOMEM;
3107 }
3108 
3109 static void free_rings(struct dpaa2_eth_priv *priv)
3110 {
3111 	int i;
3112 
3113 	for (i = 0; i < priv->num_channels; i++)
3114 		dpaa2_io_store_destroy(priv->channel[i]->store);
3115 }
3116 
3117 static int set_mac_addr(struct dpaa2_eth_priv *priv)
3118 {
3119 	struct net_device *net_dev = priv->net_dev;
3120 	struct device *dev = net_dev->dev.parent;
3121 	u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3122 	int err;
3123 
3124 	/* Get firmware address, if any */
3125 	err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
3126 	if (err) {
3127 		dev_err(dev, "dpni_get_port_mac_addr() failed\n");
3128 		return err;
3129 	}
3130 
3131 	/* Get DPNI attributes address, if any */
3132 	err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3133 					dpni_mac_addr);
3134 	if (err) {
3135 		dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
3136 		return err;
3137 	}
3138 
3139 	/* First check if firmware has any address configured by bootloader */
3140 	if (!is_zero_ether_addr(mac_addr)) {
3141 		/* If the DPMAC addr != DPNI addr, update it */
3142 		if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
3143 			err = dpni_set_primary_mac_addr(priv->mc_io, 0,
3144 							priv->mc_token,
3145 							mac_addr);
3146 			if (err) {
3147 				dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3148 				return err;
3149 			}
3150 		}
3151 		memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
3152 	} else if (is_zero_ether_addr(dpni_mac_addr)) {
3153 		/* No MAC address configured, fill in net_dev->dev_addr
3154 		 * with a random one
3155 		 */
3156 		eth_hw_addr_random(net_dev);
3157 		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
3158 
3159 		err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
3160 						net_dev->dev_addr);
3161 		if (err) {
3162 			dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3163 			return err;
3164 		}
3165 
3166 		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
3167 		 * practical purposes, this will be our "permanent" mac address,
3168 		 * at least until the next reboot. This move will also permit
3169 		 * register_netdevice() to properly fill up net_dev->perm_addr.
3170 		 */
3171 		net_dev->addr_assign_type = NET_ADDR_PERM;
3172 	} else {
3173 		/* NET_ADDR_PERM is default, all we have to do is
3174 		 * fill in the device addr.
3175 		 */
3176 		memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
3177 	}
3178 
3179 	return 0;
3180 }
3181 
3182 static int netdev_init(struct net_device *net_dev)
3183 {
3184 	struct device *dev = net_dev->dev.parent;
3185 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3186 	u32 options = priv->dpni_attrs.options;
3187 	u64 supported = 0, not_supported = 0;
3188 	u8 bcast_addr[ETH_ALEN];
3189 	u8 num_queues;
3190 	int err;
3191 
3192 	net_dev->netdev_ops = &dpaa2_eth_ops;
3193 	net_dev->ethtool_ops = &dpaa2_ethtool_ops;
3194 
3195 	err = set_mac_addr(priv);
3196 	if (err)
3197 		return err;
3198 
3199 	/* Explicitly add the broadcast address to the MAC filtering table */
3200 	eth_broadcast_addr(bcast_addr);
3201 	err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
3202 	if (err) {
3203 		dev_err(dev, "dpni_add_mac_addr() failed\n");
3204 		return err;
3205 	}
3206 
3207 	/* Set MTU upper limit; lower limit is 68B (default value) */
3208 	net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3209 	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
3210 					DPAA2_ETH_MFL);
3211 	if (err) {
3212 		dev_err(dev, "dpni_set_max_frame_length() failed\n");
3213 		return err;
3214 	}
3215 
3216 	/* Set actual number of queues in the net device */
3217 	num_queues = dpaa2_eth_queue_count(priv);
3218 	err = netif_set_real_num_tx_queues(net_dev, num_queues);
3219 	if (err) {
3220 		dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
3221 		return err;
3222 	}
3223 	err = netif_set_real_num_rx_queues(net_dev, num_queues);
3224 	if (err) {
3225 		dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
3226 		return err;
3227 	}
3228 
3229 	/* Capabilities listing */
3230 	supported |= IFF_LIVE_ADDR_CHANGE;
3231 
3232 	if (options & DPNI_OPT_NO_MAC_FILTER)
3233 		not_supported |= IFF_UNICAST_FLT;
3234 	else
3235 		supported |= IFF_UNICAST_FLT;
3236 
3237 	net_dev->priv_flags |= supported;
3238 	net_dev->priv_flags &= ~not_supported;
3239 
3240 	/* Features */
3241 	net_dev->features = NETIF_F_RXCSUM |
3242 			    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3243 			    NETIF_F_SG | NETIF_F_HIGHDMA |
3244 			    NETIF_F_LLTX;
3245 	net_dev->hw_features = net_dev->features;
3246 
3247 	return 0;
3248 }
3249 
3250 static int poll_link_state(void *arg)
3251 {
3252 	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
3253 	int err;
3254 
3255 	while (!kthread_should_stop()) {
3256 		err = link_state_update(priv);
3257 		if (unlikely(err))
3258 			return err;
3259 
3260 		msleep(DPAA2_ETH_LINK_STATE_REFRESH);
3261 	}
3262 
3263 	return 0;
3264 }
3265 
3266 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
3267 {
3268 	u32 status = ~0;
3269 	struct device *dev = (struct device *)arg;
3270 	struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
3271 	struct net_device *net_dev = dev_get_drvdata(dev);
3272 	int err;
3273 
3274 	err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
3275 				  DPNI_IRQ_INDEX, &status);
3276 	if (unlikely(err)) {
3277 		netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
3278 		return IRQ_HANDLED;
3279 	}
3280 
3281 	if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
3282 		link_state_update(netdev_priv(net_dev));
3283 
3284 	return IRQ_HANDLED;
3285 }
3286 
3287 static int setup_irqs(struct fsl_mc_device *ls_dev)
3288 {
3289 	int err = 0;
3290 	struct fsl_mc_device_irq *irq;
3291 
3292 	err = fsl_mc_allocate_irqs(ls_dev);
3293 	if (err) {
3294 		dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
3295 		return err;
3296 	}
3297 
3298 	irq = ls_dev->irqs[0];
3299 	err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
3300 					NULL, dpni_irq0_handler_thread,
3301 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
3302 					dev_name(&ls_dev->dev), &ls_dev->dev);
3303 	if (err < 0) {
3304 		dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
3305 		goto free_mc_irq;
3306 	}
3307 
3308 	err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
3309 				DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
3310 	if (err < 0) {
3311 		dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
3312 		goto free_irq;
3313 	}
3314 
3315 	err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
3316 				  DPNI_IRQ_INDEX, 1);
3317 	if (err < 0) {
3318 		dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
3319 		goto free_irq;
3320 	}
3321 
3322 	return 0;
3323 
3324 free_irq:
3325 	devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
3326 free_mc_irq:
3327 	fsl_mc_free_irqs(ls_dev);
3328 
3329 	return err;
3330 }
3331 
3332 static void add_ch_napi(struct dpaa2_eth_priv *priv)
3333 {
3334 	int i;
3335 	struct dpaa2_eth_channel *ch;
3336 
3337 	for (i = 0; i < priv->num_channels; i++) {
3338 		ch = priv->channel[i];
3339 		/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
3340 		netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
3341 			       NAPI_POLL_WEIGHT);
3342 	}
3343 }
3344 
3345 static void del_ch_napi(struct dpaa2_eth_priv *priv)
3346 {
3347 	int i;
3348 	struct dpaa2_eth_channel *ch;
3349 
3350 	for (i = 0; i < priv->num_channels; i++) {
3351 		ch = priv->channel[i];
3352 		netif_napi_del(&ch->napi);
3353 	}
3354 }
3355 
3356 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
3357 {
3358 	struct device *dev;
3359 	struct net_device *net_dev = NULL;
3360 	struct dpaa2_eth_priv *priv = NULL;
3361 	int err = 0;
3362 
3363 	dev = &dpni_dev->dev;
3364 
3365 	/* Net device */
3366 	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
3367 	if (!net_dev) {
3368 		dev_err(dev, "alloc_etherdev_mq() failed\n");
3369 		return -ENOMEM;
3370 	}
3371 
3372 	SET_NETDEV_DEV(net_dev, dev);
3373 	dev_set_drvdata(dev, net_dev);
3374 
3375 	priv = netdev_priv(net_dev);
3376 	priv->net_dev = net_dev;
3377 
3378 	priv->iommu_domain = iommu_get_domain_for_dev(dev);
3379 
3380 	/* Obtain a MC portal */
3381 	err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
3382 				     &priv->mc_io);
3383 	if (err) {
3384 		if (err == -ENXIO)
3385 			err = -EPROBE_DEFER;
3386 		else
3387 			dev_err(dev, "MC portal allocation failed\n");
3388 		goto err_portal_alloc;
3389 	}
3390 
3391 	/* MC objects initialization and configuration */
3392 	err = setup_dpni(dpni_dev);
3393 	if (err)
3394 		goto err_dpni_setup;
3395 
3396 	err = setup_dpio(priv);
3397 	if (err)
3398 		goto err_dpio_setup;
3399 
3400 	setup_fqs(priv);
3401 
3402 	err = setup_dpbp(priv);
3403 	if (err)
3404 		goto err_dpbp_setup;
3405 
3406 	err = bind_dpni(priv);
3407 	if (err)
3408 		goto err_bind;
3409 
3410 	/* Add a NAPI context for each channel */
3411 	add_ch_napi(priv);
3412 
3413 	/* Percpu statistics */
3414 	priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
3415 	if (!priv->percpu_stats) {
3416 		dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
3417 		err = -ENOMEM;
3418 		goto err_alloc_percpu_stats;
3419 	}
3420 	priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
3421 	if (!priv->percpu_extras) {
3422 		dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
3423 		err = -ENOMEM;
3424 		goto err_alloc_percpu_extras;
3425 	}
3426 
3427 	err = netdev_init(net_dev);
3428 	if (err)
3429 		goto err_netdev_init;
3430 
3431 	/* Configure checksum offload based on current interface flags */
3432 	err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
3433 	if (err)
3434 		goto err_csum;
3435 
3436 	err = set_tx_csum(priv, !!(net_dev->features &
3437 				   (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
3438 	if (err)
3439 		goto err_csum;
3440 
3441 	err = alloc_rings(priv);
3442 	if (err)
3443 		goto err_alloc_rings;
3444 
3445 	err = setup_irqs(dpni_dev);
3446 	if (err) {
3447 		netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
3448 		priv->poll_thread = kthread_run(poll_link_state, priv,
3449 						"%s_poll_link", net_dev->name);
3450 		if (IS_ERR(priv->poll_thread)) {
3451 			dev_err(dev, "Error starting polling thread\n");
3452 			goto err_poll_thread;
3453 		}
3454 		priv->do_link_poll = true;
3455 	}
3456 
3457 	err = register_netdev(net_dev);
3458 	if (err < 0) {
3459 		dev_err(dev, "register_netdev() failed\n");
3460 		goto err_netdev_reg;
3461 	}
3462 
3463 #ifdef CONFIG_DEBUG_FS
3464 	dpaa2_dbg_add(priv);
3465 #endif
3466 
3467 	dev_info(dev, "Probed interface %s\n", net_dev->name);
3468 	return 0;
3469 
3470 err_netdev_reg:
3471 	if (priv->do_link_poll)
3472 		kthread_stop(priv->poll_thread);
3473 	else
3474 		fsl_mc_free_irqs(dpni_dev);
3475 err_poll_thread:
3476 	free_rings(priv);
3477 err_alloc_rings:
3478 err_csum:
3479 err_netdev_init:
3480 	free_percpu(priv->percpu_extras);
3481 err_alloc_percpu_extras:
3482 	free_percpu(priv->percpu_stats);
3483 err_alloc_percpu_stats:
3484 	del_ch_napi(priv);
3485 err_bind:
3486 	free_dpbp(priv);
3487 err_dpbp_setup:
3488 	free_dpio(priv);
3489 err_dpio_setup:
3490 	free_dpni(priv);
3491 err_dpni_setup:
3492 	fsl_mc_portal_free(priv->mc_io);
3493 err_portal_alloc:
3494 	dev_set_drvdata(dev, NULL);
3495 	free_netdev(net_dev);
3496 
3497 	return err;
3498 }
3499 
3500 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
3501 {
3502 	struct device *dev;
3503 	struct net_device *net_dev;
3504 	struct dpaa2_eth_priv *priv;
3505 
3506 	dev = &ls_dev->dev;
3507 	net_dev = dev_get_drvdata(dev);
3508 	priv = netdev_priv(net_dev);
3509 
3510 #ifdef CONFIG_DEBUG_FS
3511 	dpaa2_dbg_remove(priv);
3512 #endif
3513 	unregister_netdev(net_dev);
3514 
3515 	if (priv->do_link_poll)
3516 		kthread_stop(priv->poll_thread);
3517 	else
3518 		fsl_mc_free_irqs(ls_dev);
3519 
3520 	free_rings(priv);
3521 	free_percpu(priv->percpu_stats);
3522 	free_percpu(priv->percpu_extras);
3523 
3524 	del_ch_napi(priv);
3525 	free_dpbp(priv);
3526 	free_dpio(priv);
3527 	free_dpni(priv);
3528 
3529 	fsl_mc_portal_free(priv->mc_io);
3530 
3531 	free_netdev(net_dev);
3532 
3533 	dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
3534 
3535 	return 0;
3536 }
3537 
3538 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
3539 	{
3540 		.vendor = FSL_MC_VENDOR_FREESCALE,
3541 		.obj_type = "dpni",
3542 	},
3543 	{ .vendor = 0x0 }
3544 };
3545 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
3546 
3547 static struct fsl_mc_driver dpaa2_eth_driver = {
3548 	.driver = {
3549 		.name = KBUILD_MODNAME,
3550 		.owner = THIS_MODULE,
3551 	},
3552 	.probe = dpaa2_eth_probe,
3553 	.remove = dpaa2_eth_remove,
3554 	.match_id_table = dpaa2_eth_match_id_table
3555 };
3556 
3557 static int __init dpaa2_eth_driver_init(void)
3558 {
3559 	int err;
3560 
3561 	dpaa2_eth_dbg_init();
3562 	err = fsl_mc_driver_register(&dpaa2_eth_driver);
3563 	if (err) {
3564 		dpaa2_eth_dbg_exit();
3565 		return err;
3566 	}
3567 
3568 	return 0;
3569 }
3570 
3571 static void __exit dpaa2_eth_driver_exit(void)
3572 {
3573 	dpaa2_eth_dbg_exit();
3574 	fsl_mc_driver_unregister(&dpaa2_eth_driver);
3575 }
3576 
3577 module_init(dpaa2_eth_driver_init);
3578 module_exit(dpaa2_eth_driver_exit);
3579