1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3  * Copyright 2016-2017 NXP
4  */
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/msi.h>
12 #include <linux/kthread.h>
13 #include <linux/iommu.h>
14 #include <linux/net_tstamp.h>
15 #include <linux/fsl/mc.h>
16 
17 #include <net/sock.h>
18 
19 #include "dpaa2-eth.h"
20 
21 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
22  * using trace events only need to #include <trace/events/sched.h>
23  */
24 #define CREATE_TRACE_POINTS
25 #include "dpaa2-eth-trace.h"
26 
27 MODULE_LICENSE("Dual BSD/GPL");
28 MODULE_AUTHOR("Freescale Semiconductor, Inc");
29 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
30 
31 static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
32 				dma_addr_t iova_addr)
33 {
34 	phys_addr_t phys_addr;
35 
36 	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
37 
38 	return phys_to_virt(phys_addr);
39 }
40 
41 static void validate_rx_csum(struct dpaa2_eth_priv *priv,
42 			     u32 fd_status,
43 			     struct sk_buff *skb)
44 {
45 	skb_checksum_none_assert(skb);
46 
47 	/* HW checksum validation is disabled, nothing to do here */
48 	if (!(priv->net_dev->features & NETIF_F_RXCSUM))
49 		return;
50 
51 	/* Read checksum validation bits */
52 	if (!((fd_status & DPAA2_FAS_L3CV) &&
53 	      (fd_status & DPAA2_FAS_L4CV)))
54 		return;
55 
56 	/* Inform the stack there's no need to compute L3/L4 csum anymore */
57 	skb->ip_summed = CHECKSUM_UNNECESSARY;
58 }
59 
60 /* Free a received FD.
61  * Not to be used for Tx conf FDs or on any other paths.
62  */
63 static void free_rx_fd(struct dpaa2_eth_priv *priv,
64 		       const struct dpaa2_fd *fd,
65 		       void *vaddr)
66 {
67 	struct device *dev = priv->net_dev->dev.parent;
68 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
69 	u8 fd_format = dpaa2_fd_get_format(fd);
70 	struct dpaa2_sg_entry *sgt;
71 	void *sg_vaddr;
72 	int i;
73 
74 	/* If single buffer frame, just free the data buffer */
75 	if (fd_format == dpaa2_fd_single)
76 		goto free_buf;
77 	else if (fd_format != dpaa2_fd_sg)
78 		/* We don't support any other format */
79 		return;
80 
81 	/* For S/G frames, we first need to free all SG entries
82 	 * except the first one, which was taken care of already
83 	 */
84 	sgt = vaddr + dpaa2_fd_get_offset(fd);
85 	for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
86 		addr = dpaa2_sg_get_addr(&sgt[i]);
87 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
88 		dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE,
89 				 DMA_FROM_DEVICE);
90 
91 		skb_free_frag(sg_vaddr);
92 		if (dpaa2_sg_is_final(&sgt[i]))
93 			break;
94 	}
95 
96 free_buf:
97 	skb_free_frag(vaddr);
98 }
99 
100 /* Build a linear skb based on a single-buffer frame descriptor */
101 static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv,
102 					struct dpaa2_eth_channel *ch,
103 					const struct dpaa2_fd *fd,
104 					void *fd_vaddr)
105 {
106 	struct sk_buff *skb = NULL;
107 	u16 fd_offset = dpaa2_fd_get_offset(fd);
108 	u32 fd_length = dpaa2_fd_get_len(fd);
109 
110 	ch->buf_count--;
111 
112 	skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE);
113 	if (unlikely(!skb))
114 		return NULL;
115 
116 	skb_reserve(skb, fd_offset);
117 	skb_put(skb, fd_length);
118 
119 	return skb;
120 }
121 
122 /* Build a non linear (fragmented) skb based on a S/G table */
123 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
124 				      struct dpaa2_eth_channel *ch,
125 				      struct dpaa2_sg_entry *sgt)
126 {
127 	struct sk_buff *skb = NULL;
128 	struct device *dev = priv->net_dev->dev.parent;
129 	void *sg_vaddr;
130 	dma_addr_t sg_addr;
131 	u16 sg_offset;
132 	u32 sg_length;
133 	struct page *page, *head_page;
134 	int page_offset;
135 	int i;
136 
137 	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
138 		struct dpaa2_sg_entry *sge = &sgt[i];
139 
140 		/* NOTE: We only support SG entries in dpaa2_sg_single format,
141 		 * but this is the only format we may receive from HW anyway
142 		 */
143 
144 		/* Get the address and length from the S/G entry */
145 		sg_addr = dpaa2_sg_get_addr(sge);
146 		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
147 		dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE,
148 				 DMA_FROM_DEVICE);
149 
150 		sg_length = dpaa2_sg_get_len(sge);
151 
152 		if (i == 0) {
153 			/* We build the skb around the first data buffer */
154 			skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE);
155 			if (unlikely(!skb)) {
156 				/* Free the first SG entry now, since we already
157 				 * unmapped it and obtained the virtual address
158 				 */
159 				skb_free_frag(sg_vaddr);
160 
161 				/* We still need to subtract the buffers used
162 				 * by this FD from our software counter
163 				 */
164 				while (!dpaa2_sg_is_final(&sgt[i]) &&
165 				       i < DPAA2_ETH_MAX_SG_ENTRIES)
166 					i++;
167 				break;
168 			}
169 
170 			sg_offset = dpaa2_sg_get_offset(sge);
171 			skb_reserve(skb, sg_offset);
172 			skb_put(skb, sg_length);
173 		} else {
174 			/* Rest of the data buffers are stored as skb frags */
175 			page = virt_to_page(sg_vaddr);
176 			head_page = virt_to_head_page(sg_vaddr);
177 
178 			/* Offset in page (which may be compound).
179 			 * Data in subsequent SG entries is stored from the
180 			 * beginning of the buffer, so we don't need to add the
181 			 * sg_offset.
182 			 */
183 			page_offset = ((unsigned long)sg_vaddr &
184 				(PAGE_SIZE - 1)) +
185 				(page_address(page) - page_address(head_page));
186 
187 			skb_add_rx_frag(skb, i - 1, head_page, page_offset,
188 					sg_length, DPAA2_ETH_RX_BUF_SIZE);
189 		}
190 
191 		if (dpaa2_sg_is_final(sge))
192 			break;
193 	}
194 
195 	WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
196 
197 	/* Count all data buffers + SG table buffer */
198 	ch->buf_count -= i + 2;
199 
200 	return skb;
201 }
202 
203 /* Main Rx frame processing routine */
204 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
205 			 struct dpaa2_eth_channel *ch,
206 			 const struct dpaa2_fd *fd,
207 			 struct napi_struct *napi,
208 			 u16 queue_id)
209 {
210 	dma_addr_t addr = dpaa2_fd_get_addr(fd);
211 	u8 fd_format = dpaa2_fd_get_format(fd);
212 	void *vaddr;
213 	struct sk_buff *skb;
214 	struct rtnl_link_stats64 *percpu_stats;
215 	struct dpaa2_eth_drv_stats *percpu_extras;
216 	struct device *dev = priv->net_dev->dev.parent;
217 	struct dpaa2_fas *fas;
218 	void *buf_data;
219 	u32 status = 0;
220 
221 	/* Tracing point */
222 	trace_dpaa2_rx_fd(priv->net_dev, fd);
223 
224 	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
225 	dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE);
226 
227 	fas = dpaa2_get_fas(vaddr, false);
228 	prefetch(fas);
229 	buf_data = vaddr + dpaa2_fd_get_offset(fd);
230 	prefetch(buf_data);
231 
232 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
233 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
234 
235 	if (fd_format == dpaa2_fd_single) {
236 		skb = build_linear_skb(priv, ch, fd, vaddr);
237 	} else if (fd_format == dpaa2_fd_sg) {
238 		skb = build_frag_skb(priv, ch, buf_data);
239 		skb_free_frag(vaddr);
240 		percpu_extras->rx_sg_frames++;
241 		percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
242 	} else {
243 		/* We don't support any other format */
244 		goto err_frame_format;
245 	}
246 
247 	if (unlikely(!skb))
248 		goto err_build_skb;
249 
250 	prefetch(skb->data);
251 
252 	/* Get the timestamp value */
253 	if (priv->rx_tstamp) {
254 		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
255 		__le64 *ts = dpaa2_get_ts(vaddr, false);
256 		u64 ns;
257 
258 		memset(shhwtstamps, 0, sizeof(*shhwtstamps));
259 
260 		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
261 		shhwtstamps->hwtstamp = ns_to_ktime(ns);
262 	}
263 
264 	/* Check if we need to validate the L4 csum */
265 	if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
266 		status = le32_to_cpu(fas->status);
267 		validate_rx_csum(priv, status, skb);
268 	}
269 
270 	skb->protocol = eth_type_trans(skb, priv->net_dev);
271 	skb_record_rx_queue(skb, queue_id);
272 
273 	percpu_stats->rx_packets++;
274 	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
275 
276 	napi_gro_receive(napi, skb);
277 
278 	return;
279 
280 err_build_skb:
281 	free_rx_fd(priv, fd, vaddr);
282 err_frame_format:
283 	percpu_stats->rx_dropped++;
284 }
285 
286 /* Consume all frames pull-dequeued into the store. This is the simplest way to
287  * make sure we don't accidentally issue another volatile dequeue which would
288  * overwrite (leak) frames already in the store.
289  *
290  * Observance of NAPI budget is not our concern, leaving that to the caller.
291  */
292 static int consume_frames(struct dpaa2_eth_channel *ch)
293 {
294 	struct dpaa2_eth_priv *priv = ch->priv;
295 	struct dpaa2_eth_fq *fq;
296 	struct dpaa2_dq *dq;
297 	const struct dpaa2_fd *fd;
298 	int cleaned = 0;
299 	int is_last;
300 
301 	do {
302 		dq = dpaa2_io_store_next(ch->store, &is_last);
303 		if (unlikely(!dq)) {
304 			/* If we're here, we *must* have placed a
305 			 * volatile dequeue comnmand, so keep reading through
306 			 * the store until we get some sort of valid response
307 			 * token (either a valid frame or an "empty dequeue")
308 			 */
309 			continue;
310 		}
311 
312 		fd = dpaa2_dq_fd(dq);
313 		fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
314 		fq->stats.frames++;
315 
316 		fq->consume(priv, ch, fd, &ch->napi, fq->flowid);
317 		cleaned++;
318 	} while (!is_last);
319 
320 	return cleaned;
321 }
322 
323 /* Configure the egress frame annotation for timestamp update */
324 static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
325 {
326 	struct dpaa2_faead *faead;
327 	u32 ctrl, frc;
328 
329 	/* Mark the egress frame annotation area as valid */
330 	frc = dpaa2_fd_get_frc(fd);
331 	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
332 
333 	/* Set hardware annotation size */
334 	ctrl = dpaa2_fd_get_ctrl(fd);
335 	dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
336 
337 	/* enable UPD (update prepanded data) bit in FAEAD field of
338 	 * hardware frame annotation area
339 	 */
340 	ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
341 	faead = dpaa2_get_faead(buf_start, true);
342 	faead->ctrl = cpu_to_le32(ctrl);
343 }
344 
345 /* Create a frame descriptor based on a fragmented skb */
346 static int build_sg_fd(struct dpaa2_eth_priv *priv,
347 		       struct sk_buff *skb,
348 		       struct dpaa2_fd *fd)
349 {
350 	struct device *dev = priv->net_dev->dev.parent;
351 	void *sgt_buf = NULL;
352 	dma_addr_t addr;
353 	int nr_frags = skb_shinfo(skb)->nr_frags;
354 	struct dpaa2_sg_entry *sgt;
355 	int i, err;
356 	int sgt_buf_size;
357 	struct scatterlist *scl, *crt_scl;
358 	int num_sg;
359 	int num_dma_bufs;
360 	struct dpaa2_eth_swa *swa;
361 
362 	/* Create and map scatterlist.
363 	 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
364 	 * to go beyond nr_frags+1.
365 	 * Note: We don't support chained scatterlists
366 	 */
367 	if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
368 		return -EINVAL;
369 
370 	scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
371 	if (unlikely(!scl))
372 		return -ENOMEM;
373 
374 	sg_init_table(scl, nr_frags + 1);
375 	num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
376 	num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
377 	if (unlikely(!num_dma_bufs)) {
378 		err = -ENOMEM;
379 		goto dma_map_sg_failed;
380 	}
381 
382 	/* Prepare the HW SGT structure */
383 	sgt_buf_size = priv->tx_data_offset +
384 		       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
385 	sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
386 	if (unlikely(!sgt_buf)) {
387 		err = -ENOMEM;
388 		goto sgt_buf_alloc_failed;
389 	}
390 	sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
391 	memset(sgt_buf, 0, sgt_buf_size);
392 
393 	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
394 
395 	/* Fill in the HW SGT structure.
396 	 *
397 	 * sgt_buf is zeroed out, so the following fields are implicit
398 	 * in all sgt entries:
399 	 *   - offset is 0
400 	 *   - format is 'dpaa2_sg_single'
401 	 */
402 	for_each_sg(scl, crt_scl, num_dma_bufs, i) {
403 		dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
404 		dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
405 	}
406 	dpaa2_sg_set_final(&sgt[i - 1], true);
407 
408 	/* Store the skb backpointer in the SGT buffer.
409 	 * Fit the scatterlist and the number of buffers alongside the
410 	 * skb backpointer in the software annotation area. We'll need
411 	 * all of them on Tx Conf.
412 	 */
413 	swa = (struct dpaa2_eth_swa *)sgt_buf;
414 	swa->skb = skb;
415 	swa->scl = scl;
416 	swa->num_sg = num_sg;
417 	swa->sgt_size = sgt_buf_size;
418 
419 	/* Separately map the SGT buffer */
420 	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
421 	if (unlikely(dma_mapping_error(dev, addr))) {
422 		err = -ENOMEM;
423 		goto dma_map_single_failed;
424 	}
425 	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
426 	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
427 	dpaa2_fd_set_addr(fd, addr);
428 	dpaa2_fd_set_len(fd, skb->len);
429 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
430 
431 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
432 		enable_tx_tstamp(fd, sgt_buf);
433 
434 	return 0;
435 
436 dma_map_single_failed:
437 	skb_free_frag(sgt_buf);
438 sgt_buf_alloc_failed:
439 	dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
440 dma_map_sg_failed:
441 	kfree(scl);
442 	return err;
443 }
444 
445 /* Create a frame descriptor based on a linear skb */
446 static int build_single_fd(struct dpaa2_eth_priv *priv,
447 			   struct sk_buff *skb,
448 			   struct dpaa2_fd *fd)
449 {
450 	struct device *dev = priv->net_dev->dev.parent;
451 	u8 *buffer_start, *aligned_start;
452 	struct sk_buff **skbh;
453 	dma_addr_t addr;
454 
455 	buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);
456 
457 	/* If there's enough room to align the FD address, do it.
458 	 * It will help hardware optimize accesses.
459 	 */
460 	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
461 				  DPAA2_ETH_TX_BUF_ALIGN);
462 	if (aligned_start >= skb->head)
463 		buffer_start = aligned_start;
464 
465 	/* Store a backpointer to the skb at the beginning of the buffer
466 	 * (in the private data area) such that we can release it
467 	 * on Tx confirm
468 	 */
469 	skbh = (struct sk_buff **)buffer_start;
470 	*skbh = skb;
471 
472 	addr = dma_map_single(dev, buffer_start,
473 			      skb_tail_pointer(skb) - buffer_start,
474 			      DMA_BIDIRECTIONAL);
475 	if (unlikely(dma_mapping_error(dev, addr)))
476 		return -ENOMEM;
477 
478 	dpaa2_fd_set_addr(fd, addr);
479 	dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
480 	dpaa2_fd_set_len(fd, skb->len);
481 	dpaa2_fd_set_format(fd, dpaa2_fd_single);
482 	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1);
483 
484 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
485 		enable_tx_tstamp(fd, buffer_start);
486 
487 	return 0;
488 }
489 
490 /* FD freeing routine on the Tx path
491  *
492  * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
493  * back-pointed to is also freed.
494  * This can be called either from dpaa2_eth_tx_conf() or on the error path of
495  * dpaa2_eth_tx().
496  */
497 static void free_tx_fd(const struct dpaa2_eth_priv *priv,
498 		       const struct dpaa2_fd *fd)
499 {
500 	struct device *dev = priv->net_dev->dev.parent;
501 	dma_addr_t fd_addr;
502 	struct sk_buff **skbh, *skb;
503 	unsigned char *buffer_start;
504 	struct dpaa2_eth_swa *swa;
505 	u8 fd_format = dpaa2_fd_get_format(fd);
506 
507 	fd_addr = dpaa2_fd_get_addr(fd);
508 	skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
509 
510 	if (fd_format == dpaa2_fd_single) {
511 		skb = *skbh;
512 		buffer_start = (unsigned char *)skbh;
513 		/* Accessing the skb buffer is safe before dma unmap, because
514 		 * we didn't map the actual skb shell.
515 		 */
516 		dma_unmap_single(dev, fd_addr,
517 				 skb_tail_pointer(skb) - buffer_start,
518 				 DMA_BIDIRECTIONAL);
519 	} else if (fd_format == dpaa2_fd_sg) {
520 		swa = (struct dpaa2_eth_swa *)skbh;
521 		skb = swa->skb;
522 
523 		/* Unmap the scatterlist */
524 		dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL);
525 		kfree(swa->scl);
526 
527 		/* Unmap the SGT buffer */
528 		dma_unmap_single(dev, fd_addr, swa->sgt_size,
529 				 DMA_BIDIRECTIONAL);
530 	} else {
531 		netdev_dbg(priv->net_dev, "Invalid FD format\n");
532 		return;
533 	}
534 
535 	/* Get the timestamp value */
536 	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
537 		struct skb_shared_hwtstamps shhwtstamps;
538 		__le64 *ts = dpaa2_get_ts(skbh, true);
539 		u64 ns;
540 
541 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
542 
543 		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
544 		shhwtstamps.hwtstamp = ns_to_ktime(ns);
545 		skb_tstamp_tx(skb, &shhwtstamps);
546 	}
547 
548 	/* Free SGT buffer allocated on tx */
549 	if (fd_format != dpaa2_fd_single)
550 		skb_free_frag(skbh);
551 
552 	/* Move on with skb release */
553 	dev_kfree_skb(skb);
554 }
555 
556 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
557 {
558 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
559 	struct dpaa2_fd fd;
560 	struct rtnl_link_stats64 *percpu_stats;
561 	struct dpaa2_eth_drv_stats *percpu_extras;
562 	struct dpaa2_eth_fq *fq;
563 	u16 queue_mapping;
564 	unsigned int needed_headroom;
565 	int err, i;
566 
567 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
568 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
569 
570 	needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
571 	if (skb_headroom(skb) < needed_headroom) {
572 		struct sk_buff *ns;
573 
574 		ns = skb_realloc_headroom(skb, needed_headroom);
575 		if (unlikely(!ns)) {
576 			percpu_stats->tx_dropped++;
577 			goto err_alloc_headroom;
578 		}
579 		percpu_extras->tx_reallocs++;
580 
581 		if (skb->sk)
582 			skb_set_owner_w(ns, skb->sk);
583 
584 		dev_kfree_skb(skb);
585 		skb = ns;
586 	}
587 
588 	/* We'll be holding a back-reference to the skb until Tx Confirmation;
589 	 * we don't want that overwritten by a concurrent Tx with a cloned skb.
590 	 */
591 	skb = skb_unshare(skb, GFP_ATOMIC);
592 	if (unlikely(!skb)) {
593 		/* skb_unshare() has already freed the skb */
594 		percpu_stats->tx_dropped++;
595 		return NETDEV_TX_OK;
596 	}
597 
598 	/* Setup the FD fields */
599 	memset(&fd, 0, sizeof(fd));
600 
601 	if (skb_is_nonlinear(skb)) {
602 		err = build_sg_fd(priv, skb, &fd);
603 		percpu_extras->tx_sg_frames++;
604 		percpu_extras->tx_sg_bytes += skb->len;
605 	} else {
606 		err = build_single_fd(priv, skb, &fd);
607 	}
608 
609 	if (unlikely(err)) {
610 		percpu_stats->tx_dropped++;
611 		goto err_build_fd;
612 	}
613 
614 	/* Tracing point */
615 	trace_dpaa2_tx_fd(net_dev, &fd);
616 
617 	/* TxConf FQ selection relies on queue id from the stack.
618 	 * In case of a forwarded frame from another DPNI interface, we choose
619 	 * a queue affined to the same core that processed the Rx frame
620 	 */
621 	queue_mapping = skb_get_queue_mapping(skb);
622 	fq = &priv->fq[queue_mapping];
623 	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
624 		err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
625 						  priv->tx_qdid, 0,
626 						  fq->tx_qdbin, &fd);
627 		if (err != -EBUSY)
628 			break;
629 	}
630 	percpu_extras->tx_portal_busy += i;
631 	if (unlikely(err < 0)) {
632 		percpu_stats->tx_errors++;
633 		/* Clean up everything, including freeing the skb */
634 		free_tx_fd(priv, &fd);
635 	} else {
636 		percpu_stats->tx_packets++;
637 		percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd);
638 	}
639 
640 	return NETDEV_TX_OK;
641 
642 err_build_fd:
643 err_alloc_headroom:
644 	dev_kfree_skb(skb);
645 
646 	return NETDEV_TX_OK;
647 }
648 
649 /* Tx confirmation frame processing routine */
650 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
651 			      struct dpaa2_eth_channel *ch,
652 			      const struct dpaa2_fd *fd,
653 			      struct napi_struct *napi __always_unused,
654 			      u16 queue_id __always_unused)
655 {
656 	struct rtnl_link_stats64 *percpu_stats;
657 	struct dpaa2_eth_drv_stats *percpu_extras;
658 	u32 fd_errors;
659 
660 	/* Tracing point */
661 	trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
662 
663 	percpu_extras = this_cpu_ptr(priv->percpu_extras);
664 	percpu_extras->tx_conf_frames++;
665 	percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd);
666 
667 	/* Check frame errors in the FD field */
668 	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
669 	free_tx_fd(priv, fd);
670 
671 	if (likely(!fd_errors))
672 		return;
673 
674 	if (net_ratelimit())
675 		netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
676 			   fd_errors);
677 
678 	percpu_stats = this_cpu_ptr(priv->percpu_stats);
679 	/* Tx-conf logically pertains to the egress path. */
680 	percpu_stats->tx_errors++;
681 }
682 
683 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
684 {
685 	int err;
686 
687 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
688 			       DPNI_OFF_RX_L3_CSUM, enable);
689 	if (err) {
690 		netdev_err(priv->net_dev,
691 			   "dpni_set_offload(RX_L3_CSUM) failed\n");
692 		return err;
693 	}
694 
695 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
696 			       DPNI_OFF_RX_L4_CSUM, enable);
697 	if (err) {
698 		netdev_err(priv->net_dev,
699 			   "dpni_set_offload(RX_L4_CSUM) failed\n");
700 		return err;
701 	}
702 
703 	return 0;
704 }
705 
706 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
707 {
708 	int err;
709 
710 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
711 			       DPNI_OFF_TX_L3_CSUM, enable);
712 	if (err) {
713 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
714 		return err;
715 	}
716 
717 	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
718 			       DPNI_OFF_TX_L4_CSUM, enable);
719 	if (err) {
720 		netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
721 		return err;
722 	}
723 
724 	return 0;
725 }
726 
727 /* Free buffers acquired from the buffer pool or which were meant to
728  * be released in the pool
729  */
730 static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
731 {
732 	struct device *dev = priv->net_dev->dev.parent;
733 	void *vaddr;
734 	int i;
735 
736 	for (i = 0; i < count; i++) {
737 		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
738 		dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE,
739 				 DMA_FROM_DEVICE);
740 		skb_free_frag(vaddr);
741 	}
742 }
743 
744 /* Perform a single release command to add buffers
745  * to the specified buffer pool
746  */
747 static int add_bufs(struct dpaa2_eth_priv *priv,
748 		    struct dpaa2_eth_channel *ch, u16 bpid)
749 {
750 	struct device *dev = priv->net_dev->dev.parent;
751 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
752 	void *buf;
753 	dma_addr_t addr;
754 	int i, err;
755 
756 	for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
757 		/* Allocate buffer visible to WRIOP + skb shared info +
758 		 * alignment padding
759 		 */
760 		buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv));
761 		if (unlikely(!buf))
762 			goto err_alloc;
763 
764 		buf = PTR_ALIGN(buf, priv->rx_buf_align);
765 
766 		addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE,
767 				      DMA_FROM_DEVICE);
768 		if (unlikely(dma_mapping_error(dev, addr)))
769 			goto err_map;
770 
771 		buf_array[i] = addr;
772 
773 		/* tracing point */
774 		trace_dpaa2_eth_buf_seed(priv->net_dev,
775 					 buf, dpaa2_eth_buf_raw_size(priv),
776 					 addr, DPAA2_ETH_RX_BUF_SIZE,
777 					 bpid);
778 	}
779 
780 release_bufs:
781 	/* In case the portal is busy, retry until successful */
782 	while ((err = dpaa2_io_service_release(ch->dpio, bpid,
783 					       buf_array, i)) == -EBUSY)
784 		cpu_relax();
785 
786 	/* If release command failed, clean up and bail out;
787 	 * not much else we can do about it
788 	 */
789 	if (err) {
790 		free_bufs(priv, buf_array, i);
791 		return 0;
792 	}
793 
794 	return i;
795 
796 err_map:
797 	skb_free_frag(buf);
798 err_alloc:
799 	/* If we managed to allocate at least some buffers,
800 	 * release them to hardware
801 	 */
802 	if (i)
803 		goto release_bufs;
804 
805 	return 0;
806 }
807 
808 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
809 {
810 	int i, j;
811 	int new_count;
812 
813 	/* This is the lazy seeding of Rx buffer pools.
814 	 * dpaa2_add_bufs() is also used on the Rx hotpath and calls
815 	 * napi_alloc_frag(). The trouble with that is that it in turn ends up
816 	 * calling this_cpu_ptr(), which mandates execution in atomic context.
817 	 * Rather than splitting up the code, do a one-off preempt disable.
818 	 */
819 	preempt_disable();
820 	for (j = 0; j < priv->num_channels; j++) {
821 		for (i = 0; i < DPAA2_ETH_NUM_BUFS;
822 		     i += DPAA2_ETH_BUFS_PER_CMD) {
823 			new_count = add_bufs(priv, priv->channel[j], bpid);
824 			priv->channel[j]->buf_count += new_count;
825 
826 			if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
827 				preempt_enable();
828 				return -ENOMEM;
829 			}
830 		}
831 	}
832 	preempt_enable();
833 
834 	return 0;
835 }
836 
837 /**
838  * Drain the specified number of buffers from the DPNI's private buffer pool.
839  * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
840  */
841 static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
842 {
843 	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
844 	int ret;
845 
846 	do {
847 		ret = dpaa2_io_service_acquire(NULL, priv->bpid,
848 					       buf_array, count);
849 		if (ret < 0) {
850 			netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
851 			return;
852 		}
853 		free_bufs(priv, buf_array, ret);
854 	} while (ret);
855 }
856 
857 static void drain_pool(struct dpaa2_eth_priv *priv)
858 {
859 	int i;
860 
861 	drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
862 	drain_bufs(priv, 1);
863 
864 	for (i = 0; i < priv->num_channels; i++)
865 		priv->channel[i]->buf_count = 0;
866 }
867 
868 /* Function is called from softirq context only, so we don't need to guard
869  * the access to percpu count
870  */
871 static int refill_pool(struct dpaa2_eth_priv *priv,
872 		       struct dpaa2_eth_channel *ch,
873 		       u16 bpid)
874 {
875 	int new_count;
876 
877 	if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
878 		return 0;
879 
880 	do {
881 		new_count = add_bufs(priv, ch, bpid);
882 		if (unlikely(!new_count)) {
883 			/* Out of memory; abort for now, we'll try later on */
884 			break;
885 		}
886 		ch->buf_count += new_count;
887 	} while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
888 
889 	if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
890 		return -ENOMEM;
891 
892 	return 0;
893 }
894 
895 static int pull_channel(struct dpaa2_eth_channel *ch)
896 {
897 	int err;
898 	int dequeues = -1;
899 
900 	/* Retry while portal is busy */
901 	do {
902 		err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
903 						    ch->store);
904 		dequeues++;
905 		cpu_relax();
906 	} while (err == -EBUSY);
907 
908 	ch->stats.dequeue_portal_busy += dequeues;
909 	if (unlikely(err))
910 		ch->stats.pull_err++;
911 
912 	return err;
913 }
914 
915 /* NAPI poll routine
916  *
917  * Frames are dequeued from the QMan channel associated with this NAPI context.
918  * Rx, Tx confirmation and (if configured) Rx error frames all count
919  * towards the NAPI budget.
920  */
921 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
922 {
923 	struct dpaa2_eth_channel *ch;
924 	int cleaned = 0, store_cleaned;
925 	struct dpaa2_eth_priv *priv;
926 	int err;
927 
928 	ch = container_of(napi, struct dpaa2_eth_channel, napi);
929 	priv = ch->priv;
930 
931 	while (cleaned < budget) {
932 		err = pull_channel(ch);
933 		if (unlikely(err))
934 			break;
935 
936 		/* Refill pool if appropriate */
937 		refill_pool(priv, ch, priv->bpid);
938 
939 		store_cleaned = consume_frames(ch);
940 		cleaned += store_cleaned;
941 
942 		/* If we have enough budget left for a full store,
943 		 * try a new pull dequeue, otherwise we're done here
944 		 */
945 		if (store_cleaned == 0 ||
946 		    cleaned > budget - DPAA2_ETH_STORE_SIZE)
947 			break;
948 	}
949 
950 	if (cleaned < budget && napi_complete_done(napi, cleaned)) {
951 		/* Re-enable data available notifications */
952 		do {
953 			err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
954 			cpu_relax();
955 		} while (err == -EBUSY);
956 		WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
957 			  ch->nctx.desired_cpu);
958 	}
959 
960 	ch->stats.frames += cleaned;
961 
962 	return cleaned;
963 }
964 
965 static void enable_ch_napi(struct dpaa2_eth_priv *priv)
966 {
967 	struct dpaa2_eth_channel *ch;
968 	int i;
969 
970 	for (i = 0; i < priv->num_channels; i++) {
971 		ch = priv->channel[i];
972 		napi_enable(&ch->napi);
973 	}
974 }
975 
976 static void disable_ch_napi(struct dpaa2_eth_priv *priv)
977 {
978 	struct dpaa2_eth_channel *ch;
979 	int i;
980 
981 	for (i = 0; i < priv->num_channels; i++) {
982 		ch = priv->channel[i];
983 		napi_disable(&ch->napi);
984 	}
985 }
986 
987 static int link_state_update(struct dpaa2_eth_priv *priv)
988 {
989 	struct dpni_link_state state;
990 	int err;
991 
992 	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
993 	if (unlikely(err)) {
994 		netdev_err(priv->net_dev,
995 			   "dpni_get_link_state() failed\n");
996 		return err;
997 	}
998 
999 	/* Chech link state; speed / duplex changes are not treated yet */
1000 	if (priv->link_state.up == state.up)
1001 		return 0;
1002 
1003 	priv->link_state = state;
1004 	if (state.up) {
1005 		netif_carrier_on(priv->net_dev);
1006 		netif_tx_start_all_queues(priv->net_dev);
1007 	} else {
1008 		netif_tx_stop_all_queues(priv->net_dev);
1009 		netif_carrier_off(priv->net_dev);
1010 	}
1011 
1012 	netdev_info(priv->net_dev, "Link Event: state %s\n",
1013 		    state.up ? "up" : "down");
1014 
1015 	return 0;
1016 }
1017 
1018 static int dpaa2_eth_open(struct net_device *net_dev)
1019 {
1020 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1021 	int err;
1022 
1023 	err = seed_pool(priv, priv->bpid);
1024 	if (err) {
1025 		/* Not much to do; the buffer pool, though not filled up,
1026 		 * may still contain some buffers which would enable us
1027 		 * to limp on.
1028 		 */
1029 		netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1030 			   priv->dpbp_dev->obj_desc.id, priv->bpid);
1031 	}
1032 
1033 	/* We'll only start the txqs when the link is actually ready; make sure
1034 	 * we don't race against the link up notification, which may come
1035 	 * immediately after dpni_enable();
1036 	 */
1037 	netif_tx_stop_all_queues(net_dev);
1038 	enable_ch_napi(priv);
1039 	/* Also, explicitly set carrier off, otherwise netif_carrier_ok() will
1040 	 * return true and cause 'ip link show' to report the LOWER_UP flag,
1041 	 * even though the link notification wasn't even received.
1042 	 */
1043 	netif_carrier_off(net_dev);
1044 
1045 	err = dpni_enable(priv->mc_io, 0, priv->mc_token);
1046 	if (err < 0) {
1047 		netdev_err(net_dev, "dpni_enable() failed\n");
1048 		goto enable_err;
1049 	}
1050 
1051 	/* If the DPMAC object has already processed the link up interrupt,
1052 	 * we have to learn the link state ourselves.
1053 	 */
1054 	err = link_state_update(priv);
1055 	if (err < 0) {
1056 		netdev_err(net_dev, "Can't update link state\n");
1057 		goto link_state_err;
1058 	}
1059 
1060 	return 0;
1061 
1062 link_state_err:
1063 enable_err:
1064 	disable_ch_napi(priv);
1065 	drain_pool(priv);
1066 	return err;
1067 }
1068 
1069 /* The DPIO store must be empty when we call this,
1070  * at the end of every NAPI cycle.
1071  */
1072 static u32 drain_channel(struct dpaa2_eth_priv *priv,
1073 			 struct dpaa2_eth_channel *ch)
1074 {
1075 	u32 drained = 0, total = 0;
1076 
1077 	do {
1078 		pull_channel(ch);
1079 		drained = consume_frames(ch);
1080 		total += drained;
1081 	} while (drained);
1082 
1083 	return total;
1084 }
1085 
1086 static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv)
1087 {
1088 	struct dpaa2_eth_channel *ch;
1089 	int i;
1090 	u32 drained = 0;
1091 
1092 	for (i = 0; i < priv->num_channels; i++) {
1093 		ch = priv->channel[i];
1094 		drained += drain_channel(priv, ch);
1095 	}
1096 
1097 	return drained;
1098 }
1099 
1100 static int dpaa2_eth_stop(struct net_device *net_dev)
1101 {
1102 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1103 	int dpni_enabled;
1104 	int retries = 10;
1105 	u32 drained;
1106 
1107 	netif_tx_stop_all_queues(net_dev);
1108 	netif_carrier_off(net_dev);
1109 
1110 	/* Loop while dpni_disable() attempts to drain the egress FQs
1111 	 * and confirm them back to us.
1112 	 */
1113 	do {
1114 		dpni_disable(priv->mc_io, 0, priv->mc_token);
1115 		dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
1116 		if (dpni_enabled)
1117 			/* Allow the hardware some slack */
1118 			msleep(100);
1119 	} while (dpni_enabled && --retries);
1120 	if (!retries) {
1121 		netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
1122 		/* Must go on and disable NAPI nonetheless, so we don't crash at
1123 		 * the next "ifconfig up"
1124 		 */
1125 	}
1126 
1127 	/* Wait for NAPI to complete on every core and disable it.
1128 	 * In particular, this will also prevent NAPI from being rescheduled if
1129 	 * a new CDAN is serviced, effectively discarding the CDAN. We therefore
1130 	 * don't even need to disarm the channels, except perhaps for the case
1131 	 * of a huge coalescing value.
1132 	 */
1133 	disable_ch_napi(priv);
1134 
1135 	 /* Manually drain the Rx and TxConf queues */
1136 	drained = drain_ingress_frames(priv);
1137 	if (drained)
1138 		netdev_dbg(net_dev, "Drained %d frames.\n", drained);
1139 
1140 	/* Empty the buffer pool */
1141 	drain_pool(priv);
1142 
1143 	return 0;
1144 }
1145 
1146 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
1147 {
1148 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1149 	struct device *dev = net_dev->dev.parent;
1150 	int err;
1151 
1152 	err = eth_mac_addr(net_dev, addr);
1153 	if (err < 0) {
1154 		dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
1155 		return err;
1156 	}
1157 
1158 	err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
1159 					net_dev->dev_addr);
1160 	if (err) {
1161 		dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
1162 		return err;
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 /** Fill in counters maintained by the GPP driver. These may be different from
1169  * the hardware counters obtained by ethtool.
1170  */
1171 static void dpaa2_eth_get_stats(struct net_device *net_dev,
1172 				struct rtnl_link_stats64 *stats)
1173 {
1174 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1175 	struct rtnl_link_stats64 *percpu_stats;
1176 	u64 *cpustats;
1177 	u64 *netstats = (u64 *)stats;
1178 	int i, j;
1179 	int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
1180 
1181 	for_each_possible_cpu(i) {
1182 		percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
1183 		cpustats = (u64 *)percpu_stats;
1184 		for (j = 0; j < num; j++)
1185 			netstats[j] += cpustats[j];
1186 	}
1187 }
1188 
1189 /* Copy mac unicast addresses from @net_dev to @priv.
1190  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1191  */
1192 static void add_uc_hw_addr(const struct net_device *net_dev,
1193 			   struct dpaa2_eth_priv *priv)
1194 {
1195 	struct netdev_hw_addr *ha;
1196 	int err;
1197 
1198 	netdev_for_each_uc_addr(ha, net_dev) {
1199 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1200 					ha->addr);
1201 		if (err)
1202 			netdev_warn(priv->net_dev,
1203 				    "Could not add ucast MAC %pM to the filtering table (err %d)\n",
1204 				    ha->addr, err);
1205 	}
1206 }
1207 
1208 /* Copy mac multicast addresses from @net_dev to @priv
1209  * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
1210  */
1211 static void add_mc_hw_addr(const struct net_device *net_dev,
1212 			   struct dpaa2_eth_priv *priv)
1213 {
1214 	struct netdev_hw_addr *ha;
1215 	int err;
1216 
1217 	netdev_for_each_mc_addr(ha, net_dev) {
1218 		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
1219 					ha->addr);
1220 		if (err)
1221 			netdev_warn(priv->net_dev,
1222 				    "Could not add mcast MAC %pM to the filtering table (err %d)\n",
1223 				    ha->addr, err);
1224 	}
1225 }
1226 
1227 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
1228 {
1229 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1230 	int uc_count = netdev_uc_count(net_dev);
1231 	int mc_count = netdev_mc_count(net_dev);
1232 	u8 max_mac = priv->dpni_attrs.mac_filter_entries;
1233 	u32 options = priv->dpni_attrs.options;
1234 	u16 mc_token = priv->mc_token;
1235 	struct fsl_mc_io *mc_io = priv->mc_io;
1236 	int err;
1237 
1238 	/* Basic sanity checks; these probably indicate a misconfiguration */
1239 	if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
1240 		netdev_info(net_dev,
1241 			    "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
1242 			    max_mac);
1243 
1244 	/* Force promiscuous if the uc or mc counts exceed our capabilities. */
1245 	if (uc_count > max_mac) {
1246 		netdev_info(net_dev,
1247 			    "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
1248 			    uc_count, max_mac);
1249 		goto force_promisc;
1250 	}
1251 	if (mc_count + uc_count > max_mac) {
1252 		netdev_info(net_dev,
1253 			    "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
1254 			    uc_count + mc_count, max_mac);
1255 		goto force_mc_promisc;
1256 	}
1257 
1258 	/* Adjust promisc settings due to flag combinations */
1259 	if (net_dev->flags & IFF_PROMISC)
1260 		goto force_promisc;
1261 	if (net_dev->flags & IFF_ALLMULTI) {
1262 		/* First, rebuild unicast filtering table. This should be done
1263 		 * in promisc mode, in order to avoid frame loss while we
1264 		 * progressively add entries to the table.
1265 		 * We don't know whether we had been in promisc already, and
1266 		 * making an MC call to find out is expensive; so set uc promisc
1267 		 * nonetheless.
1268 		 */
1269 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1270 		if (err)
1271 			netdev_warn(net_dev, "Can't set uc promisc\n");
1272 
1273 		/* Actual uc table reconstruction. */
1274 		err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
1275 		if (err)
1276 			netdev_warn(net_dev, "Can't clear uc filters\n");
1277 		add_uc_hw_addr(net_dev, priv);
1278 
1279 		/* Finally, clear uc promisc and set mc promisc as requested. */
1280 		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1281 		if (err)
1282 			netdev_warn(net_dev, "Can't clear uc promisc\n");
1283 		goto force_mc_promisc;
1284 	}
1285 
1286 	/* Neither unicast, nor multicast promisc will be on... eventually.
1287 	 * For now, rebuild mac filtering tables while forcing both of them on.
1288 	 */
1289 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1290 	if (err)
1291 		netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
1292 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1293 	if (err)
1294 		netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
1295 
1296 	/* Actual mac filtering tables reconstruction */
1297 	err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
1298 	if (err)
1299 		netdev_warn(net_dev, "Can't clear mac filters\n");
1300 	add_mc_hw_addr(net_dev, priv);
1301 	add_uc_hw_addr(net_dev, priv);
1302 
1303 	/* Now we can clear both ucast and mcast promisc, without risking
1304 	 * to drop legitimate frames anymore.
1305 	 */
1306 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
1307 	if (err)
1308 		netdev_warn(net_dev, "Can't clear ucast promisc\n");
1309 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
1310 	if (err)
1311 		netdev_warn(net_dev, "Can't clear mcast promisc\n");
1312 
1313 	return;
1314 
1315 force_promisc:
1316 	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
1317 	if (err)
1318 		netdev_warn(net_dev, "Can't set ucast promisc\n");
1319 force_mc_promisc:
1320 	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
1321 	if (err)
1322 		netdev_warn(net_dev, "Can't set mcast promisc\n");
1323 }
1324 
1325 static int dpaa2_eth_set_features(struct net_device *net_dev,
1326 				  netdev_features_t features)
1327 {
1328 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1329 	netdev_features_t changed = features ^ net_dev->features;
1330 	bool enable;
1331 	int err;
1332 
1333 	if (changed & NETIF_F_RXCSUM) {
1334 		enable = !!(features & NETIF_F_RXCSUM);
1335 		err = set_rx_csum(priv, enable);
1336 		if (err)
1337 			return err;
1338 	}
1339 
1340 	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
1341 		enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
1342 		err = set_tx_csum(priv, enable);
1343 		if (err)
1344 			return err;
1345 	}
1346 
1347 	return 0;
1348 }
1349 
1350 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1351 {
1352 	struct dpaa2_eth_priv *priv = netdev_priv(dev);
1353 	struct hwtstamp_config config;
1354 
1355 	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
1356 		return -EFAULT;
1357 
1358 	switch (config.tx_type) {
1359 	case HWTSTAMP_TX_OFF:
1360 		priv->tx_tstamp = false;
1361 		break;
1362 	case HWTSTAMP_TX_ON:
1363 		priv->tx_tstamp = true;
1364 		break;
1365 	default:
1366 		return -ERANGE;
1367 	}
1368 
1369 	if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
1370 		priv->rx_tstamp = false;
1371 	} else {
1372 		priv->rx_tstamp = true;
1373 		/* TS is set for all frame types, not only those requested */
1374 		config.rx_filter = HWTSTAMP_FILTER_ALL;
1375 	}
1376 
1377 	return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
1378 			-EFAULT : 0;
1379 }
1380 
1381 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1382 {
1383 	if (cmd == SIOCSHWTSTAMP)
1384 		return dpaa2_eth_ts_ioctl(dev, rq, cmd);
1385 
1386 	return -EINVAL;
1387 }
1388 
1389 static const struct net_device_ops dpaa2_eth_ops = {
1390 	.ndo_open = dpaa2_eth_open,
1391 	.ndo_start_xmit = dpaa2_eth_tx,
1392 	.ndo_stop = dpaa2_eth_stop,
1393 	.ndo_set_mac_address = dpaa2_eth_set_addr,
1394 	.ndo_get_stats64 = dpaa2_eth_get_stats,
1395 	.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
1396 	.ndo_set_features = dpaa2_eth_set_features,
1397 	.ndo_do_ioctl = dpaa2_eth_ioctl,
1398 };
1399 
1400 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
1401 {
1402 	struct dpaa2_eth_channel *ch;
1403 
1404 	ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
1405 
1406 	/* Update NAPI statistics */
1407 	ch->stats.cdan++;
1408 
1409 	napi_schedule_irqoff(&ch->napi);
1410 }
1411 
1412 /* Allocate and configure a DPCON object */
1413 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
1414 {
1415 	struct fsl_mc_device *dpcon;
1416 	struct device *dev = priv->net_dev->dev.parent;
1417 	struct dpcon_attr attrs;
1418 	int err;
1419 
1420 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
1421 				     FSL_MC_POOL_DPCON, &dpcon);
1422 	if (err) {
1423 		dev_info(dev, "Not enough DPCONs, will go on as-is\n");
1424 		return NULL;
1425 	}
1426 
1427 	err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
1428 	if (err) {
1429 		dev_err(dev, "dpcon_open() failed\n");
1430 		goto free;
1431 	}
1432 
1433 	err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
1434 	if (err) {
1435 		dev_err(dev, "dpcon_reset() failed\n");
1436 		goto close;
1437 	}
1438 
1439 	err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs);
1440 	if (err) {
1441 		dev_err(dev, "dpcon_get_attributes() failed\n");
1442 		goto close;
1443 	}
1444 
1445 	err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
1446 	if (err) {
1447 		dev_err(dev, "dpcon_enable() failed\n");
1448 		goto close;
1449 	}
1450 
1451 	return dpcon;
1452 
1453 close:
1454 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1455 free:
1456 	fsl_mc_object_free(dpcon);
1457 
1458 	return NULL;
1459 }
1460 
1461 static void free_dpcon(struct dpaa2_eth_priv *priv,
1462 		       struct fsl_mc_device *dpcon)
1463 {
1464 	dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
1465 	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
1466 	fsl_mc_object_free(dpcon);
1467 }
1468 
1469 static struct dpaa2_eth_channel *
1470 alloc_channel(struct dpaa2_eth_priv *priv)
1471 {
1472 	struct dpaa2_eth_channel *channel;
1473 	struct dpcon_attr attr;
1474 	struct device *dev = priv->net_dev->dev.parent;
1475 	int err;
1476 
1477 	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
1478 	if (!channel)
1479 		return NULL;
1480 
1481 	channel->dpcon = setup_dpcon(priv);
1482 	if (!channel->dpcon)
1483 		goto err_setup;
1484 
1485 	err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
1486 				   &attr);
1487 	if (err) {
1488 		dev_err(dev, "dpcon_get_attributes() failed\n");
1489 		goto err_get_attr;
1490 	}
1491 
1492 	channel->dpcon_id = attr.id;
1493 	channel->ch_id = attr.qbman_ch_id;
1494 	channel->priv = priv;
1495 
1496 	return channel;
1497 
1498 err_get_attr:
1499 	free_dpcon(priv, channel->dpcon);
1500 err_setup:
1501 	kfree(channel);
1502 	return NULL;
1503 }
1504 
1505 static void free_channel(struct dpaa2_eth_priv *priv,
1506 			 struct dpaa2_eth_channel *channel)
1507 {
1508 	free_dpcon(priv, channel->dpcon);
1509 	kfree(channel);
1510 }
1511 
1512 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
1513  * and register data availability notifications
1514  */
1515 static int setup_dpio(struct dpaa2_eth_priv *priv)
1516 {
1517 	struct dpaa2_io_notification_ctx *nctx;
1518 	struct dpaa2_eth_channel *channel;
1519 	struct dpcon_notification_cfg dpcon_notif_cfg;
1520 	struct device *dev = priv->net_dev->dev.parent;
1521 	int i, err;
1522 
1523 	/* We want the ability to spread ingress traffic (RX, TX conf) to as
1524 	 * many cores as possible, so we need one channel for each core
1525 	 * (unless there's fewer queues than cores, in which case the extra
1526 	 * channels would be wasted).
1527 	 * Allocate one channel per core and register it to the core's
1528 	 * affine DPIO. If not enough channels are available for all cores
1529 	 * or if some cores don't have an affine DPIO, there will be no
1530 	 * ingress frame processing on those cores.
1531 	 */
1532 	cpumask_clear(&priv->dpio_cpumask);
1533 	for_each_online_cpu(i) {
1534 		/* Try to allocate a channel */
1535 		channel = alloc_channel(priv);
1536 		if (!channel) {
1537 			dev_info(dev,
1538 				 "No affine channel for cpu %d and above\n", i);
1539 			err = -ENODEV;
1540 			goto err_alloc_ch;
1541 		}
1542 
1543 		priv->channel[priv->num_channels] = channel;
1544 
1545 		nctx = &channel->nctx;
1546 		nctx->is_cdan = 1;
1547 		nctx->cb = cdan_cb;
1548 		nctx->id = channel->ch_id;
1549 		nctx->desired_cpu = i;
1550 
1551 		/* Register the new context */
1552 		channel->dpio = dpaa2_io_service_select(i);
1553 		err = dpaa2_io_service_register(channel->dpio, nctx);
1554 		if (err) {
1555 			dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
1556 			/* If no affine DPIO for this core, there's probably
1557 			 * none available for next cores either. Signal we want
1558 			 * to retry later, in case the DPIO devices weren't
1559 			 * probed yet.
1560 			 */
1561 			err = -EPROBE_DEFER;
1562 			goto err_service_reg;
1563 		}
1564 
1565 		/* Register DPCON notification with MC */
1566 		dpcon_notif_cfg.dpio_id = nctx->dpio_id;
1567 		dpcon_notif_cfg.priority = 0;
1568 		dpcon_notif_cfg.user_ctx = nctx->qman64;
1569 		err = dpcon_set_notification(priv->mc_io, 0,
1570 					     channel->dpcon->mc_handle,
1571 					     &dpcon_notif_cfg);
1572 		if (err) {
1573 			dev_err(dev, "dpcon_set_notification failed()\n");
1574 			goto err_set_cdan;
1575 		}
1576 
1577 		/* If we managed to allocate a channel and also found an affine
1578 		 * DPIO for this core, add it to the final mask
1579 		 */
1580 		cpumask_set_cpu(i, &priv->dpio_cpumask);
1581 		priv->num_channels++;
1582 
1583 		/* Stop if we already have enough channels to accommodate all
1584 		 * RX and TX conf queues
1585 		 */
1586 		if (priv->num_channels == dpaa2_eth_queue_count(priv))
1587 			break;
1588 	}
1589 
1590 	return 0;
1591 
1592 err_set_cdan:
1593 	dpaa2_io_service_deregister(channel->dpio, nctx);
1594 err_service_reg:
1595 	free_channel(priv, channel);
1596 err_alloc_ch:
1597 	if (cpumask_empty(&priv->dpio_cpumask)) {
1598 		dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
1599 		return err;
1600 	}
1601 
1602 	dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
1603 		 cpumask_pr_args(&priv->dpio_cpumask));
1604 
1605 	return 0;
1606 }
1607 
1608 static void free_dpio(struct dpaa2_eth_priv *priv)
1609 {
1610 	int i;
1611 	struct dpaa2_eth_channel *ch;
1612 
1613 	/* deregister CDAN notifications and free channels */
1614 	for (i = 0; i < priv->num_channels; i++) {
1615 		ch = priv->channel[i];
1616 		dpaa2_io_service_deregister(ch->dpio, &ch->nctx);
1617 		free_channel(priv, ch);
1618 	}
1619 }
1620 
1621 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
1622 						    int cpu)
1623 {
1624 	struct device *dev = priv->net_dev->dev.parent;
1625 	int i;
1626 
1627 	for (i = 0; i < priv->num_channels; i++)
1628 		if (priv->channel[i]->nctx.desired_cpu == cpu)
1629 			return priv->channel[i];
1630 
1631 	/* We should never get here. Issue a warning and return
1632 	 * the first channel, because it's still better than nothing
1633 	 */
1634 	dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
1635 
1636 	return priv->channel[0];
1637 }
1638 
1639 static void set_fq_affinity(struct dpaa2_eth_priv *priv)
1640 {
1641 	struct device *dev = priv->net_dev->dev.parent;
1642 	struct cpumask xps_mask;
1643 	struct dpaa2_eth_fq *fq;
1644 	int rx_cpu, txc_cpu;
1645 	int i, err;
1646 
1647 	/* For each FQ, pick one channel/CPU to deliver frames to.
1648 	 * This may well change at runtime, either through irqbalance or
1649 	 * through direct user intervention.
1650 	 */
1651 	rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
1652 
1653 	for (i = 0; i < priv->num_fqs; i++) {
1654 		fq = &priv->fq[i];
1655 		switch (fq->type) {
1656 		case DPAA2_RX_FQ:
1657 			fq->target_cpu = rx_cpu;
1658 			rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
1659 			if (rx_cpu >= nr_cpu_ids)
1660 				rx_cpu = cpumask_first(&priv->dpio_cpumask);
1661 			break;
1662 		case DPAA2_TX_CONF_FQ:
1663 			fq->target_cpu = txc_cpu;
1664 
1665 			/* Tell the stack to affine to txc_cpu the Tx queue
1666 			 * associated with the confirmation one
1667 			 */
1668 			cpumask_clear(&xps_mask);
1669 			cpumask_set_cpu(txc_cpu, &xps_mask);
1670 			err = netif_set_xps_queue(priv->net_dev, &xps_mask,
1671 						  fq->flowid);
1672 			if (err)
1673 				dev_err(dev, "Error setting XPS queue\n");
1674 
1675 			txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
1676 			if (txc_cpu >= nr_cpu_ids)
1677 				txc_cpu = cpumask_first(&priv->dpio_cpumask);
1678 			break;
1679 		default:
1680 			dev_err(dev, "Unknown FQ type: %d\n", fq->type);
1681 		}
1682 		fq->channel = get_affine_channel(priv, fq->target_cpu);
1683 	}
1684 }
1685 
1686 static void setup_fqs(struct dpaa2_eth_priv *priv)
1687 {
1688 	int i;
1689 
1690 	/* We have one TxConf FQ per Tx flow.
1691 	 * The number of Tx and Rx queues is the same.
1692 	 * Tx queues come first in the fq array.
1693 	 */
1694 	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1695 		priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
1696 		priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
1697 		priv->fq[priv->num_fqs++].flowid = (u16)i;
1698 	}
1699 
1700 	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
1701 		priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
1702 		priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
1703 		priv->fq[priv->num_fqs++].flowid = (u16)i;
1704 	}
1705 
1706 	/* For each FQ, decide on which core to process incoming frames */
1707 	set_fq_affinity(priv);
1708 }
1709 
1710 /* Allocate and configure one buffer pool for each interface */
1711 static int setup_dpbp(struct dpaa2_eth_priv *priv)
1712 {
1713 	int err;
1714 	struct fsl_mc_device *dpbp_dev;
1715 	struct device *dev = priv->net_dev->dev.parent;
1716 	struct dpbp_attr dpbp_attrs;
1717 
1718 	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
1719 				     &dpbp_dev);
1720 	if (err) {
1721 		dev_err(dev, "DPBP device allocation failed\n");
1722 		return err;
1723 	}
1724 
1725 	priv->dpbp_dev = dpbp_dev;
1726 
1727 	err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
1728 			&dpbp_dev->mc_handle);
1729 	if (err) {
1730 		dev_err(dev, "dpbp_open() failed\n");
1731 		goto err_open;
1732 	}
1733 
1734 	err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
1735 	if (err) {
1736 		dev_err(dev, "dpbp_reset() failed\n");
1737 		goto err_reset;
1738 	}
1739 
1740 	err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
1741 	if (err) {
1742 		dev_err(dev, "dpbp_enable() failed\n");
1743 		goto err_enable;
1744 	}
1745 
1746 	err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
1747 				  &dpbp_attrs);
1748 	if (err) {
1749 		dev_err(dev, "dpbp_get_attributes() failed\n");
1750 		goto err_get_attr;
1751 	}
1752 	priv->bpid = dpbp_attrs.bpid;
1753 
1754 	return 0;
1755 
1756 err_get_attr:
1757 	dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
1758 err_enable:
1759 err_reset:
1760 	dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
1761 err_open:
1762 	fsl_mc_object_free(dpbp_dev);
1763 
1764 	return err;
1765 }
1766 
1767 static void free_dpbp(struct dpaa2_eth_priv *priv)
1768 {
1769 	drain_pool(priv);
1770 	dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1771 	dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
1772 	fsl_mc_object_free(priv->dpbp_dev);
1773 }
1774 
1775 static int set_buffer_layout(struct dpaa2_eth_priv *priv)
1776 {
1777 	struct device *dev = priv->net_dev->dev.parent;
1778 	struct dpni_buffer_layout buf_layout = {0};
1779 	int err;
1780 
1781 	/* We need to check for WRIOP version 1.0.0, but depending on the MC
1782 	 * version, this number is not always provided correctly on rev1.
1783 	 * We need to check for both alternatives in this situation.
1784 	 */
1785 	if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
1786 	    priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
1787 		priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
1788 	else
1789 		priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
1790 
1791 	/* tx buffer */
1792 	buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
1793 	buf_layout.pass_timestamp = true;
1794 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
1795 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
1796 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1797 				     DPNI_QUEUE_TX, &buf_layout);
1798 	if (err) {
1799 		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
1800 		return err;
1801 	}
1802 
1803 	/* tx-confirm buffer */
1804 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
1805 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1806 				     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
1807 	if (err) {
1808 		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
1809 		return err;
1810 	}
1811 
1812 	/* Now that we've set our tx buffer layout, retrieve the minimum
1813 	 * required tx data offset.
1814 	 */
1815 	err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
1816 				      &priv->tx_data_offset);
1817 	if (err) {
1818 		dev_err(dev, "dpni_get_tx_data_offset() failed\n");
1819 		return err;
1820 	}
1821 
1822 	if ((priv->tx_data_offset % 64) != 0)
1823 		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
1824 			 priv->tx_data_offset);
1825 
1826 	/* rx buffer */
1827 	buf_layout.pass_frame_status = true;
1828 	buf_layout.pass_parser_result = true;
1829 	buf_layout.data_align = priv->rx_buf_align;
1830 	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
1831 	buf_layout.private_data_size = 0;
1832 	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
1833 			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
1834 			     DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
1835 			     DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
1836 			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
1837 	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
1838 				     DPNI_QUEUE_RX, &buf_layout);
1839 	if (err) {
1840 		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
1841 		return err;
1842 	}
1843 
1844 	return 0;
1845 }
1846 
1847 /* Configure the DPNI object this interface is associated with */
1848 static int setup_dpni(struct fsl_mc_device *ls_dev)
1849 {
1850 	struct device *dev = &ls_dev->dev;
1851 	struct dpaa2_eth_priv *priv;
1852 	struct net_device *net_dev;
1853 	int err;
1854 
1855 	net_dev = dev_get_drvdata(dev);
1856 	priv = netdev_priv(net_dev);
1857 
1858 	/* get a handle for the DPNI object */
1859 	err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
1860 	if (err) {
1861 		dev_err(dev, "dpni_open() failed\n");
1862 		return err;
1863 	}
1864 
1865 	/* Check if we can work with this DPNI object */
1866 	err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
1867 				   &priv->dpni_ver_minor);
1868 	if (err) {
1869 		dev_err(dev, "dpni_get_api_version() failed\n");
1870 		goto close;
1871 	}
1872 	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
1873 		dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
1874 			priv->dpni_ver_major, priv->dpni_ver_minor,
1875 			DPNI_VER_MAJOR, DPNI_VER_MINOR);
1876 		err = -ENOTSUPP;
1877 		goto close;
1878 	}
1879 
1880 	ls_dev->mc_io = priv->mc_io;
1881 	ls_dev->mc_handle = priv->mc_token;
1882 
1883 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1884 	if (err) {
1885 		dev_err(dev, "dpni_reset() failed\n");
1886 		goto close;
1887 	}
1888 
1889 	err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
1890 				  &priv->dpni_attrs);
1891 	if (err) {
1892 		dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
1893 		goto close;
1894 	}
1895 
1896 	err = set_buffer_layout(priv);
1897 	if (err)
1898 		goto close;
1899 
1900 	priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
1901 				       dpaa2_eth_fs_count(priv), GFP_KERNEL);
1902 	if (!priv->cls_rules)
1903 		goto close;
1904 
1905 	return 0;
1906 
1907 close:
1908 	dpni_close(priv->mc_io, 0, priv->mc_token);
1909 
1910 	return err;
1911 }
1912 
1913 static void free_dpni(struct dpaa2_eth_priv *priv)
1914 {
1915 	int err;
1916 
1917 	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
1918 	if (err)
1919 		netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
1920 			    err);
1921 
1922 	dpni_close(priv->mc_io, 0, priv->mc_token);
1923 }
1924 
1925 static int setup_rx_flow(struct dpaa2_eth_priv *priv,
1926 			 struct dpaa2_eth_fq *fq)
1927 {
1928 	struct device *dev = priv->net_dev->dev.parent;
1929 	struct dpni_queue queue;
1930 	struct dpni_queue_id qid;
1931 	struct dpni_taildrop td;
1932 	int err;
1933 
1934 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1935 			     DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
1936 	if (err) {
1937 		dev_err(dev, "dpni_get_queue(RX) failed\n");
1938 		return err;
1939 	}
1940 
1941 	fq->fqid = qid.fqid;
1942 
1943 	queue.destination.id = fq->channel->dpcon_id;
1944 	queue.destination.type = DPNI_DEST_DPCON;
1945 	queue.destination.priority = 1;
1946 	queue.user_context = (u64)(uintptr_t)fq;
1947 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
1948 			     DPNI_QUEUE_RX, 0, fq->flowid,
1949 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
1950 			     &queue);
1951 	if (err) {
1952 		dev_err(dev, "dpni_set_queue(RX) failed\n");
1953 		return err;
1954 	}
1955 
1956 	td.enable = 1;
1957 	td.threshold = DPAA2_ETH_TAILDROP_THRESH;
1958 	err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE,
1959 				DPNI_QUEUE_RX, 0, fq->flowid, &td);
1960 	if (err) {
1961 		dev_err(dev, "dpni_set_threshold() failed\n");
1962 		return err;
1963 	}
1964 
1965 	return 0;
1966 }
1967 
1968 static int setup_tx_flow(struct dpaa2_eth_priv *priv,
1969 			 struct dpaa2_eth_fq *fq)
1970 {
1971 	struct device *dev = priv->net_dev->dev.parent;
1972 	struct dpni_queue queue;
1973 	struct dpni_queue_id qid;
1974 	int err;
1975 
1976 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1977 			     DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid);
1978 	if (err) {
1979 		dev_err(dev, "dpni_get_queue(TX) failed\n");
1980 		return err;
1981 	}
1982 
1983 	fq->tx_qdbin = qid.qdbin;
1984 
1985 	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
1986 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
1987 			     &queue, &qid);
1988 	if (err) {
1989 		dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
1990 		return err;
1991 	}
1992 
1993 	fq->fqid = qid.fqid;
1994 
1995 	queue.destination.id = fq->channel->dpcon_id;
1996 	queue.destination.type = DPNI_DEST_DPCON;
1997 	queue.destination.priority = 0;
1998 	queue.user_context = (u64)(uintptr_t)fq;
1999 	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
2000 			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
2001 			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2002 			     &queue);
2003 	if (err) {
2004 		dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
2005 		return err;
2006 	}
2007 
2008 	return 0;
2009 }
2010 
2011 /* Supported header fields for Rx hash distribution key */
2012 static const struct dpaa2_eth_dist_fields dist_fields[] = {
2013 	{
2014 		/* L2 header */
2015 		.rxnfc_field = RXH_L2DA,
2016 		.cls_prot = NET_PROT_ETH,
2017 		.cls_field = NH_FLD_ETH_DA,
2018 		.size = 6,
2019 	}, {
2020 		.cls_prot = NET_PROT_ETH,
2021 		.cls_field = NH_FLD_ETH_SA,
2022 		.size = 6,
2023 	}, {
2024 		/* This is the last ethertype field parsed:
2025 		 * depending on frame format, it can be the MAC ethertype
2026 		 * or the VLAN etype.
2027 		 */
2028 		.cls_prot = NET_PROT_ETH,
2029 		.cls_field = NH_FLD_ETH_TYPE,
2030 		.size = 2,
2031 	}, {
2032 		/* VLAN header */
2033 		.rxnfc_field = RXH_VLAN,
2034 		.cls_prot = NET_PROT_VLAN,
2035 		.cls_field = NH_FLD_VLAN_TCI,
2036 		.size = 2,
2037 	}, {
2038 		/* IP header */
2039 		.rxnfc_field = RXH_IP_SRC,
2040 		.cls_prot = NET_PROT_IP,
2041 		.cls_field = NH_FLD_IP_SRC,
2042 		.size = 4,
2043 	}, {
2044 		.rxnfc_field = RXH_IP_DST,
2045 		.cls_prot = NET_PROT_IP,
2046 		.cls_field = NH_FLD_IP_DST,
2047 		.size = 4,
2048 	}, {
2049 		.rxnfc_field = RXH_L3_PROTO,
2050 		.cls_prot = NET_PROT_IP,
2051 		.cls_field = NH_FLD_IP_PROTO,
2052 		.size = 1,
2053 	}, {
2054 		/* Using UDP ports, this is functionally equivalent to raw
2055 		 * byte pairs from L4 header.
2056 		 */
2057 		.rxnfc_field = RXH_L4_B_0_1,
2058 		.cls_prot = NET_PROT_UDP,
2059 		.cls_field = NH_FLD_UDP_PORT_SRC,
2060 		.size = 2,
2061 	}, {
2062 		.rxnfc_field = RXH_L4_B_2_3,
2063 		.cls_prot = NET_PROT_UDP,
2064 		.cls_field = NH_FLD_UDP_PORT_DST,
2065 		.size = 2,
2066 	},
2067 };
2068 
2069 /* Configure the Rx hash key using the legacy API */
2070 static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2071 {
2072 	struct device *dev = priv->net_dev->dev.parent;
2073 	struct dpni_rx_tc_dist_cfg dist_cfg;
2074 	int err;
2075 
2076 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2077 
2078 	dist_cfg.key_cfg_iova = key;
2079 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2080 	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
2081 
2082 	err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
2083 	if (err)
2084 		dev_err(dev, "dpni_set_rx_tc_dist failed\n");
2085 
2086 	return err;
2087 }
2088 
2089 /* Configure the Rx hash key using the new API */
2090 static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2091 {
2092 	struct device *dev = priv->net_dev->dev.parent;
2093 	struct dpni_rx_dist_cfg dist_cfg;
2094 	int err;
2095 
2096 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2097 
2098 	dist_cfg.key_cfg_iova = key;
2099 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2100 	dist_cfg.enable = 1;
2101 
2102 	err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2103 	if (err)
2104 		dev_err(dev, "dpni_set_rx_hash_dist failed\n");
2105 
2106 	return err;
2107 }
2108 
2109 /* Configure the Rx flow classification key */
2110 static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
2111 {
2112 	struct device *dev = priv->net_dev->dev.parent;
2113 	struct dpni_rx_dist_cfg dist_cfg;
2114 	int err;
2115 
2116 	memset(&dist_cfg, 0, sizeof(dist_cfg));
2117 
2118 	dist_cfg.key_cfg_iova = key;
2119 	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
2120 	dist_cfg.enable = 1;
2121 
2122 	err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
2123 	if (err)
2124 		dev_err(dev, "dpni_set_rx_fs_dist failed\n");
2125 
2126 	return err;
2127 }
2128 
2129 /* Size of the Rx flow classification key */
2130 int dpaa2_eth_cls_key_size(void)
2131 {
2132 	int i, size = 0;
2133 
2134 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
2135 		size += dist_fields[i].size;
2136 
2137 	return size;
2138 }
2139 
2140 /* Offset of header field in Rx classification key */
2141 int dpaa2_eth_cls_fld_off(int prot, int field)
2142 {
2143 	int i, off = 0;
2144 
2145 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2146 		if (dist_fields[i].cls_prot == prot &&
2147 		    dist_fields[i].cls_field == field)
2148 			return off;
2149 		off += dist_fields[i].size;
2150 	}
2151 
2152 	WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
2153 	return 0;
2154 }
2155 
2156 /* Set Rx distribution (hash or flow classification) key
2157  * flags is a combination of RXH_ bits
2158  */
2159 int dpaa2_eth_set_dist_key(struct net_device *net_dev,
2160 			   enum dpaa2_eth_rx_dist type, u64 flags)
2161 {
2162 	struct device *dev = net_dev->dev.parent;
2163 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2164 	struct dpkg_profile_cfg cls_cfg;
2165 	u32 rx_hash_fields = 0;
2166 	dma_addr_t key_iova;
2167 	u8 *dma_mem;
2168 	int i;
2169 	int err = 0;
2170 
2171 	memset(&cls_cfg, 0, sizeof(cls_cfg));
2172 
2173 	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
2174 		struct dpkg_extract *key =
2175 			&cls_cfg.extracts[cls_cfg.num_extracts];
2176 
2177 		/* For Rx hashing key we set only the selected fields.
2178 		 * For Rx flow classification key we set all supported fields
2179 		 */
2180 		if (type == DPAA2_ETH_RX_DIST_HASH) {
2181 			if (!(flags & dist_fields[i].rxnfc_field))
2182 				continue;
2183 			rx_hash_fields |= dist_fields[i].rxnfc_field;
2184 		}
2185 
2186 		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
2187 			dev_err(dev, "error adding key extraction rule, too many rules?\n");
2188 			return -E2BIG;
2189 		}
2190 
2191 		key->type = DPKG_EXTRACT_FROM_HDR;
2192 		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
2193 		key->extract.from_hdr.type = DPKG_FULL_FIELD;
2194 		key->extract.from_hdr.field = dist_fields[i].cls_field;
2195 		cls_cfg.num_extracts++;
2196 	}
2197 
2198 	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
2199 	if (!dma_mem)
2200 		return -ENOMEM;
2201 
2202 	err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
2203 	if (err) {
2204 		dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
2205 		goto free_key;
2206 	}
2207 
2208 	/* Prepare for setting the rx dist */
2209 	key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
2210 				  DMA_TO_DEVICE);
2211 	if (dma_mapping_error(dev, key_iova)) {
2212 		dev_err(dev, "DMA mapping failed\n");
2213 		err = -ENOMEM;
2214 		goto free_key;
2215 	}
2216 
2217 	if (type == DPAA2_ETH_RX_DIST_HASH) {
2218 		if (dpaa2_eth_has_legacy_dist(priv))
2219 			err = config_legacy_hash_key(priv, key_iova);
2220 		else
2221 			err = config_hash_key(priv, key_iova);
2222 	} else {
2223 		err = config_cls_key(priv, key_iova);
2224 	}
2225 
2226 	dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
2227 			 DMA_TO_DEVICE);
2228 	if (!err && type == DPAA2_ETH_RX_DIST_HASH)
2229 		priv->rx_hash_fields = rx_hash_fields;
2230 
2231 free_key:
2232 	kfree(dma_mem);
2233 	return err;
2234 }
2235 
2236 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
2237 {
2238 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2239 
2240 	if (!dpaa2_eth_hash_enabled(priv))
2241 		return -EOPNOTSUPP;
2242 
2243 	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags);
2244 }
2245 
2246 static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv)
2247 {
2248 	struct device *dev = priv->net_dev->dev.parent;
2249 
2250 	/* Check if we actually support Rx flow classification */
2251 	if (dpaa2_eth_has_legacy_dist(priv)) {
2252 		dev_dbg(dev, "Rx cls not supported by current MC version\n");
2253 		return -EOPNOTSUPP;
2254 	}
2255 
2256 	if (priv->dpni_attrs.options & DPNI_OPT_NO_FS ||
2257 	    !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) {
2258 		dev_dbg(dev, "Rx cls disabled in DPNI options\n");
2259 		return -EOPNOTSUPP;
2260 	}
2261 
2262 	if (!dpaa2_eth_hash_enabled(priv)) {
2263 		dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
2264 		return -EOPNOTSUPP;
2265 	}
2266 
2267 	priv->rx_cls_enabled = 1;
2268 
2269 	return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0);
2270 }
2271 
2272 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
2273  * frame queues and channels
2274  */
2275 static int bind_dpni(struct dpaa2_eth_priv *priv)
2276 {
2277 	struct net_device *net_dev = priv->net_dev;
2278 	struct device *dev = net_dev->dev.parent;
2279 	struct dpni_pools_cfg pools_params;
2280 	struct dpni_error_cfg err_cfg;
2281 	int err = 0;
2282 	int i;
2283 
2284 	pools_params.num_dpbp = 1;
2285 	pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
2286 	pools_params.pools[0].backup_pool = 0;
2287 	pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE;
2288 	err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
2289 	if (err) {
2290 		dev_err(dev, "dpni_set_pools() failed\n");
2291 		return err;
2292 	}
2293 
2294 	/* have the interface implicitly distribute traffic based on
2295 	 * the default hash key
2296 	 */
2297 	err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
2298 	if (err && err != -EOPNOTSUPP)
2299 		dev_err(dev, "Failed to configure hashing\n");
2300 
2301 	/* Configure the flow classification key; it includes all
2302 	 * supported header fields and cannot be modified at runtime
2303 	 */
2304 	err = dpaa2_eth_set_cls(priv);
2305 	if (err && err != -EOPNOTSUPP)
2306 		dev_err(dev, "Failed to configure Rx classification key\n");
2307 
2308 	/* Configure handling of error frames */
2309 	err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
2310 	err_cfg.set_frame_annotation = 1;
2311 	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
2312 	err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
2313 				       &err_cfg);
2314 	if (err) {
2315 		dev_err(dev, "dpni_set_errors_behavior failed\n");
2316 		return err;
2317 	}
2318 
2319 	/* Configure Rx and Tx conf queues to generate CDANs */
2320 	for (i = 0; i < priv->num_fqs; i++) {
2321 		switch (priv->fq[i].type) {
2322 		case DPAA2_RX_FQ:
2323 			err = setup_rx_flow(priv, &priv->fq[i]);
2324 			break;
2325 		case DPAA2_TX_CONF_FQ:
2326 			err = setup_tx_flow(priv, &priv->fq[i]);
2327 			break;
2328 		default:
2329 			dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
2330 			return -EINVAL;
2331 		}
2332 		if (err)
2333 			return err;
2334 	}
2335 
2336 	err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
2337 			    DPNI_QUEUE_TX, &priv->tx_qdid);
2338 	if (err) {
2339 		dev_err(dev, "dpni_get_qdid() failed\n");
2340 		return err;
2341 	}
2342 
2343 	return 0;
2344 }
2345 
2346 /* Allocate rings for storing incoming frame descriptors */
2347 static int alloc_rings(struct dpaa2_eth_priv *priv)
2348 {
2349 	struct net_device *net_dev = priv->net_dev;
2350 	struct device *dev = net_dev->dev.parent;
2351 	int i;
2352 
2353 	for (i = 0; i < priv->num_channels; i++) {
2354 		priv->channel[i]->store =
2355 			dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
2356 		if (!priv->channel[i]->store) {
2357 			netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
2358 			goto err_ring;
2359 		}
2360 	}
2361 
2362 	return 0;
2363 
2364 err_ring:
2365 	for (i = 0; i < priv->num_channels; i++) {
2366 		if (!priv->channel[i]->store)
2367 			break;
2368 		dpaa2_io_store_destroy(priv->channel[i]->store);
2369 	}
2370 
2371 	return -ENOMEM;
2372 }
2373 
2374 static void free_rings(struct dpaa2_eth_priv *priv)
2375 {
2376 	int i;
2377 
2378 	for (i = 0; i < priv->num_channels; i++)
2379 		dpaa2_io_store_destroy(priv->channel[i]->store);
2380 }
2381 
2382 static int set_mac_addr(struct dpaa2_eth_priv *priv)
2383 {
2384 	struct net_device *net_dev = priv->net_dev;
2385 	struct device *dev = net_dev->dev.parent;
2386 	u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
2387 	int err;
2388 
2389 	/* Get firmware address, if any */
2390 	err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
2391 	if (err) {
2392 		dev_err(dev, "dpni_get_port_mac_addr() failed\n");
2393 		return err;
2394 	}
2395 
2396 	/* Get DPNI attributes address, if any */
2397 	err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2398 					dpni_mac_addr);
2399 	if (err) {
2400 		dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
2401 		return err;
2402 	}
2403 
2404 	/* First check if firmware has any address configured by bootloader */
2405 	if (!is_zero_ether_addr(mac_addr)) {
2406 		/* If the DPMAC addr != DPNI addr, update it */
2407 		if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
2408 			err = dpni_set_primary_mac_addr(priv->mc_io, 0,
2409 							priv->mc_token,
2410 							mac_addr);
2411 			if (err) {
2412 				dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2413 				return err;
2414 			}
2415 		}
2416 		memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
2417 	} else if (is_zero_ether_addr(dpni_mac_addr)) {
2418 		/* No MAC address configured, fill in net_dev->dev_addr
2419 		 * with a random one
2420 		 */
2421 		eth_hw_addr_random(net_dev);
2422 		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
2423 
2424 		err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2425 						net_dev->dev_addr);
2426 		if (err) {
2427 			dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
2428 			return err;
2429 		}
2430 
2431 		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
2432 		 * practical purposes, this will be our "permanent" mac address,
2433 		 * at least until the next reboot. This move will also permit
2434 		 * register_netdevice() to properly fill up net_dev->perm_addr.
2435 		 */
2436 		net_dev->addr_assign_type = NET_ADDR_PERM;
2437 	} else {
2438 		/* NET_ADDR_PERM is default, all we have to do is
2439 		 * fill in the device addr.
2440 		 */
2441 		memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
2442 	}
2443 
2444 	return 0;
2445 }
2446 
2447 static int netdev_init(struct net_device *net_dev)
2448 {
2449 	struct device *dev = net_dev->dev.parent;
2450 	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2451 	u32 options = priv->dpni_attrs.options;
2452 	u64 supported = 0, not_supported = 0;
2453 	u8 bcast_addr[ETH_ALEN];
2454 	u8 num_queues;
2455 	int err;
2456 
2457 	net_dev->netdev_ops = &dpaa2_eth_ops;
2458 	net_dev->ethtool_ops = &dpaa2_ethtool_ops;
2459 
2460 	err = set_mac_addr(priv);
2461 	if (err)
2462 		return err;
2463 
2464 	/* Explicitly add the broadcast address to the MAC filtering table */
2465 	eth_broadcast_addr(bcast_addr);
2466 	err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
2467 	if (err) {
2468 		dev_err(dev, "dpni_add_mac_addr() failed\n");
2469 		return err;
2470 	}
2471 
2472 	/* Set MTU upper limit; lower limit is 68B (default value) */
2473 	net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
2474 	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
2475 					DPAA2_ETH_MFL);
2476 	if (err) {
2477 		dev_err(dev, "dpni_set_max_frame_length() failed\n");
2478 		return err;
2479 	}
2480 
2481 	/* Set actual number of queues in the net device */
2482 	num_queues = dpaa2_eth_queue_count(priv);
2483 	err = netif_set_real_num_tx_queues(net_dev, num_queues);
2484 	if (err) {
2485 		dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
2486 		return err;
2487 	}
2488 	err = netif_set_real_num_rx_queues(net_dev, num_queues);
2489 	if (err) {
2490 		dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
2491 		return err;
2492 	}
2493 
2494 	/* Capabilities listing */
2495 	supported |= IFF_LIVE_ADDR_CHANGE;
2496 
2497 	if (options & DPNI_OPT_NO_MAC_FILTER)
2498 		not_supported |= IFF_UNICAST_FLT;
2499 	else
2500 		supported |= IFF_UNICAST_FLT;
2501 
2502 	net_dev->priv_flags |= supported;
2503 	net_dev->priv_flags &= ~not_supported;
2504 
2505 	/* Features */
2506 	net_dev->features = NETIF_F_RXCSUM |
2507 			    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2508 			    NETIF_F_SG | NETIF_F_HIGHDMA |
2509 			    NETIF_F_LLTX;
2510 	net_dev->hw_features = net_dev->features;
2511 
2512 	return 0;
2513 }
2514 
2515 static int poll_link_state(void *arg)
2516 {
2517 	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
2518 	int err;
2519 
2520 	while (!kthread_should_stop()) {
2521 		err = link_state_update(priv);
2522 		if (unlikely(err))
2523 			return err;
2524 
2525 		msleep(DPAA2_ETH_LINK_STATE_REFRESH);
2526 	}
2527 
2528 	return 0;
2529 }
2530 
2531 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
2532 {
2533 	u32 status = ~0;
2534 	struct device *dev = (struct device *)arg;
2535 	struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
2536 	struct net_device *net_dev = dev_get_drvdata(dev);
2537 	int err;
2538 
2539 	err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
2540 				  DPNI_IRQ_INDEX, &status);
2541 	if (unlikely(err)) {
2542 		netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
2543 		return IRQ_HANDLED;
2544 	}
2545 
2546 	if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
2547 		link_state_update(netdev_priv(net_dev));
2548 
2549 	return IRQ_HANDLED;
2550 }
2551 
2552 static int setup_irqs(struct fsl_mc_device *ls_dev)
2553 {
2554 	int err = 0;
2555 	struct fsl_mc_device_irq *irq;
2556 
2557 	err = fsl_mc_allocate_irqs(ls_dev);
2558 	if (err) {
2559 		dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
2560 		return err;
2561 	}
2562 
2563 	irq = ls_dev->irqs[0];
2564 	err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
2565 					NULL, dpni_irq0_handler_thread,
2566 					IRQF_NO_SUSPEND | IRQF_ONESHOT,
2567 					dev_name(&ls_dev->dev), &ls_dev->dev);
2568 	if (err < 0) {
2569 		dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
2570 		goto free_mc_irq;
2571 	}
2572 
2573 	err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
2574 				DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED);
2575 	if (err < 0) {
2576 		dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
2577 		goto free_irq;
2578 	}
2579 
2580 	err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
2581 				  DPNI_IRQ_INDEX, 1);
2582 	if (err < 0) {
2583 		dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
2584 		goto free_irq;
2585 	}
2586 
2587 	return 0;
2588 
2589 free_irq:
2590 	devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
2591 free_mc_irq:
2592 	fsl_mc_free_irqs(ls_dev);
2593 
2594 	return err;
2595 }
2596 
2597 static void add_ch_napi(struct dpaa2_eth_priv *priv)
2598 {
2599 	int i;
2600 	struct dpaa2_eth_channel *ch;
2601 
2602 	for (i = 0; i < priv->num_channels; i++) {
2603 		ch = priv->channel[i];
2604 		/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
2605 		netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
2606 			       NAPI_POLL_WEIGHT);
2607 	}
2608 }
2609 
2610 static void del_ch_napi(struct dpaa2_eth_priv *priv)
2611 {
2612 	int i;
2613 	struct dpaa2_eth_channel *ch;
2614 
2615 	for (i = 0; i < priv->num_channels; i++) {
2616 		ch = priv->channel[i];
2617 		netif_napi_del(&ch->napi);
2618 	}
2619 }
2620 
2621 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
2622 {
2623 	struct device *dev;
2624 	struct net_device *net_dev = NULL;
2625 	struct dpaa2_eth_priv *priv = NULL;
2626 	int err = 0;
2627 
2628 	dev = &dpni_dev->dev;
2629 
2630 	/* Net device */
2631 	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES);
2632 	if (!net_dev) {
2633 		dev_err(dev, "alloc_etherdev_mq() failed\n");
2634 		return -ENOMEM;
2635 	}
2636 
2637 	SET_NETDEV_DEV(net_dev, dev);
2638 	dev_set_drvdata(dev, net_dev);
2639 
2640 	priv = netdev_priv(net_dev);
2641 	priv->net_dev = net_dev;
2642 
2643 	priv->iommu_domain = iommu_get_domain_for_dev(dev);
2644 
2645 	/* Obtain a MC portal */
2646 	err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
2647 				     &priv->mc_io);
2648 	if (err) {
2649 		if (err == -ENXIO)
2650 			err = -EPROBE_DEFER;
2651 		else
2652 			dev_err(dev, "MC portal allocation failed\n");
2653 		goto err_portal_alloc;
2654 	}
2655 
2656 	/* MC objects initialization and configuration */
2657 	err = setup_dpni(dpni_dev);
2658 	if (err)
2659 		goto err_dpni_setup;
2660 
2661 	err = setup_dpio(priv);
2662 	if (err)
2663 		goto err_dpio_setup;
2664 
2665 	setup_fqs(priv);
2666 
2667 	err = setup_dpbp(priv);
2668 	if (err)
2669 		goto err_dpbp_setup;
2670 
2671 	err = bind_dpni(priv);
2672 	if (err)
2673 		goto err_bind;
2674 
2675 	/* Add a NAPI context for each channel */
2676 	add_ch_napi(priv);
2677 
2678 	/* Percpu statistics */
2679 	priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
2680 	if (!priv->percpu_stats) {
2681 		dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
2682 		err = -ENOMEM;
2683 		goto err_alloc_percpu_stats;
2684 	}
2685 	priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
2686 	if (!priv->percpu_extras) {
2687 		dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
2688 		err = -ENOMEM;
2689 		goto err_alloc_percpu_extras;
2690 	}
2691 
2692 	err = netdev_init(net_dev);
2693 	if (err)
2694 		goto err_netdev_init;
2695 
2696 	/* Configure checksum offload based on current interface flags */
2697 	err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
2698 	if (err)
2699 		goto err_csum;
2700 
2701 	err = set_tx_csum(priv, !!(net_dev->features &
2702 				   (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
2703 	if (err)
2704 		goto err_csum;
2705 
2706 	err = alloc_rings(priv);
2707 	if (err)
2708 		goto err_alloc_rings;
2709 
2710 	err = setup_irqs(dpni_dev);
2711 	if (err) {
2712 		netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
2713 		priv->poll_thread = kthread_run(poll_link_state, priv,
2714 						"%s_poll_link", net_dev->name);
2715 		if (IS_ERR(priv->poll_thread)) {
2716 			dev_err(dev, "Error starting polling thread\n");
2717 			goto err_poll_thread;
2718 		}
2719 		priv->do_link_poll = true;
2720 	}
2721 
2722 	err = register_netdev(net_dev);
2723 	if (err < 0) {
2724 		dev_err(dev, "register_netdev() failed\n");
2725 		goto err_netdev_reg;
2726 	}
2727 
2728 	dev_info(dev, "Probed interface %s\n", net_dev->name);
2729 	return 0;
2730 
2731 err_netdev_reg:
2732 	if (priv->do_link_poll)
2733 		kthread_stop(priv->poll_thread);
2734 	else
2735 		fsl_mc_free_irqs(dpni_dev);
2736 err_poll_thread:
2737 	free_rings(priv);
2738 err_alloc_rings:
2739 err_csum:
2740 err_netdev_init:
2741 	free_percpu(priv->percpu_extras);
2742 err_alloc_percpu_extras:
2743 	free_percpu(priv->percpu_stats);
2744 err_alloc_percpu_stats:
2745 	del_ch_napi(priv);
2746 err_bind:
2747 	free_dpbp(priv);
2748 err_dpbp_setup:
2749 	free_dpio(priv);
2750 err_dpio_setup:
2751 	free_dpni(priv);
2752 err_dpni_setup:
2753 	fsl_mc_portal_free(priv->mc_io);
2754 err_portal_alloc:
2755 	dev_set_drvdata(dev, NULL);
2756 	free_netdev(net_dev);
2757 
2758 	return err;
2759 }
2760 
2761 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
2762 {
2763 	struct device *dev;
2764 	struct net_device *net_dev;
2765 	struct dpaa2_eth_priv *priv;
2766 
2767 	dev = &ls_dev->dev;
2768 	net_dev = dev_get_drvdata(dev);
2769 	priv = netdev_priv(net_dev);
2770 
2771 	unregister_netdev(net_dev);
2772 
2773 	if (priv->do_link_poll)
2774 		kthread_stop(priv->poll_thread);
2775 	else
2776 		fsl_mc_free_irqs(ls_dev);
2777 
2778 	free_rings(priv);
2779 	free_percpu(priv->percpu_stats);
2780 	free_percpu(priv->percpu_extras);
2781 
2782 	del_ch_napi(priv);
2783 	free_dpbp(priv);
2784 	free_dpio(priv);
2785 	free_dpni(priv);
2786 
2787 	fsl_mc_portal_free(priv->mc_io);
2788 
2789 	free_netdev(net_dev);
2790 
2791 	dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
2792 
2793 	return 0;
2794 }
2795 
2796 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
2797 	{
2798 		.vendor = FSL_MC_VENDOR_FREESCALE,
2799 		.obj_type = "dpni",
2800 	},
2801 	{ .vendor = 0x0 }
2802 };
2803 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
2804 
2805 static struct fsl_mc_driver dpaa2_eth_driver = {
2806 	.driver = {
2807 		.name = KBUILD_MODNAME,
2808 		.owner = THIS_MODULE,
2809 	},
2810 	.probe = dpaa2_eth_probe,
2811 	.remove = dpaa2_eth_remove,
2812 	.match_id_table = dpaa2_eth_match_id_table
2813 };
2814 
2815 module_fsl_mc_driver(dpaa2_eth_driver);
2816