1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2 /* Copyright 2014-2016 Freescale Semiconductor Inc.
3 * Copyright 2016-2022 NXP
4 */
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/etherdevice.h>
9 #include <linux/of_net.h>
10 #include <linux/interrupt.h>
11 #include <linux/kthread.h>
12 #include <linux/iommu.h>
13 #include <linux/fsl/mc.h>
14 #include <linux/bpf.h>
15 #include <linux/bpf_trace.h>
16 #include <linux/fsl/ptp_qoriq.h>
17 #include <linux/ptp_classify.h>
18 #include <net/pkt_cls.h>
19 #include <net/sock.h>
20 #include <net/tso.h>
21 #include <net/xdp_sock_drv.h>
22
23 #include "dpaa2-eth.h"
24
25 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
26 * using trace events only need to #include <trace/events/sched.h>
27 */
28 #define CREATE_TRACE_POINTS
29 #include "dpaa2-eth-trace.h"
30
31 MODULE_LICENSE("Dual BSD/GPL");
32 MODULE_AUTHOR("Freescale Semiconductor, Inc");
33 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");
34
35 struct ptp_qoriq *dpaa2_ptp;
36 EXPORT_SYMBOL(dpaa2_ptp);
37
dpaa2_eth_detect_features(struct dpaa2_eth_priv * priv)38 static void dpaa2_eth_detect_features(struct dpaa2_eth_priv *priv)
39 {
40 priv->features = 0;
41
42 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_PTP_ONESTEP_VER_MAJOR,
43 DPNI_PTP_ONESTEP_VER_MINOR) >= 0)
44 priv->features |= DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT;
45 }
46
dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv * priv,u32 offset,u8 udp)47 static void dpaa2_update_ptp_onestep_indirect(struct dpaa2_eth_priv *priv,
48 u32 offset, u8 udp)
49 {
50 struct dpni_single_step_cfg cfg;
51
52 cfg.en = 1;
53 cfg.ch_update = udp;
54 cfg.offset = offset;
55 cfg.peer_delay = 0;
56
57 if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, &cfg))
58 WARN_ONCE(1, "Failed to set single step register");
59 }
60
dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv * priv,u32 offset,u8 udp)61 static void dpaa2_update_ptp_onestep_direct(struct dpaa2_eth_priv *priv,
62 u32 offset, u8 udp)
63 {
64 u32 val = 0;
65
66 val = DPAA2_PTP_SINGLE_STEP_ENABLE |
67 DPAA2_PTP_SINGLE_CORRECTION_OFF(offset);
68
69 if (udp)
70 val |= DPAA2_PTP_SINGLE_STEP_CH;
71
72 if (priv->onestep_reg_base)
73 writel(val, priv->onestep_reg_base);
74 }
75
dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv * priv)76 static void dpaa2_ptp_onestep_reg_update_method(struct dpaa2_eth_priv *priv)
77 {
78 struct device *dev = priv->net_dev->dev.parent;
79 struct dpni_single_step_cfg ptp_cfg;
80
81 priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_indirect;
82
83 if (!(priv->features & DPAA2_ETH_FEATURE_ONESTEP_CFG_DIRECT))
84 return;
85
86 if (dpni_get_single_step_cfg(priv->mc_io, 0,
87 priv->mc_token, &ptp_cfg)) {
88 dev_err(dev, "dpni_get_single_step_cfg cannot retrieve onestep reg, falling back to indirect update\n");
89 return;
90 }
91
92 if (!ptp_cfg.ptp_onestep_reg_base) {
93 dev_err(dev, "1588 onestep reg not available, falling back to indirect update\n");
94 return;
95 }
96
97 priv->onestep_reg_base = ioremap(ptp_cfg.ptp_onestep_reg_base,
98 sizeof(u32));
99 if (!priv->onestep_reg_base) {
100 dev_err(dev, "1588 onestep reg cannot be mapped, falling back to indirect update\n");
101 return;
102 }
103
104 priv->dpaa2_set_onestep_params_cb = dpaa2_update_ptp_onestep_direct;
105 }
106
dpaa2_iova_to_virt(struct iommu_domain * domain,dma_addr_t iova_addr)107 void *dpaa2_iova_to_virt(struct iommu_domain *domain,
108 dma_addr_t iova_addr)
109 {
110 phys_addr_t phys_addr;
111
112 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;
113
114 return phys_to_virt(phys_addr);
115 }
116
dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv * priv,u32 fd_status,struct sk_buff * skb)117 static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv,
118 u32 fd_status,
119 struct sk_buff *skb)
120 {
121 skb_checksum_none_assert(skb);
122
123 /* HW checksum validation is disabled, nothing to do here */
124 if (!(priv->net_dev->features & NETIF_F_RXCSUM))
125 return;
126
127 /* Read checksum validation bits */
128 if (!((fd_status & DPAA2_FAS_L3CV) &&
129 (fd_status & DPAA2_FAS_L4CV)))
130 return;
131
132 /* Inform the stack there's no need to compute L3/L4 csum anymore */
133 skb->ip_summed = CHECKSUM_UNNECESSARY;
134 }
135
136 /* Free a received FD.
137 * Not to be used for Tx conf FDs or on any other paths.
138 */
dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv * priv,const struct dpaa2_fd * fd,void * vaddr)139 static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv,
140 const struct dpaa2_fd *fd,
141 void *vaddr)
142 {
143 struct device *dev = priv->net_dev->dev.parent;
144 dma_addr_t addr = dpaa2_fd_get_addr(fd);
145 u8 fd_format = dpaa2_fd_get_format(fd);
146 struct dpaa2_sg_entry *sgt;
147 void *sg_vaddr;
148 int i;
149
150 /* If single buffer frame, just free the data buffer */
151 if (fd_format == dpaa2_fd_single)
152 goto free_buf;
153 else if (fd_format != dpaa2_fd_sg)
154 /* We don't support any other format */
155 return;
156
157 /* For S/G frames, we first need to free all SG entries
158 * except the first one, which was taken care of already
159 */
160 sgt = vaddr + dpaa2_fd_get_offset(fd);
161 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
162 addr = dpaa2_sg_get_addr(&sgt[i]);
163 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
164 dma_unmap_page(dev, addr, priv->rx_buf_size,
165 DMA_BIDIRECTIONAL);
166
167 free_pages((unsigned long)sg_vaddr, 0);
168 if (dpaa2_sg_is_final(&sgt[i]))
169 break;
170 }
171
172 free_buf:
173 free_pages((unsigned long)vaddr, 0);
174 }
175
176 /* Build a linear skb based on a single-buffer frame descriptor */
dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel * ch,const struct dpaa2_fd * fd,void * fd_vaddr)177 static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch,
178 const struct dpaa2_fd *fd,
179 void *fd_vaddr)
180 {
181 struct sk_buff *skb = NULL;
182 u16 fd_offset = dpaa2_fd_get_offset(fd);
183 u32 fd_length = dpaa2_fd_get_len(fd);
184
185 ch->buf_count--;
186
187 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
188 if (unlikely(!skb))
189 return NULL;
190
191 skb_reserve(skb, fd_offset);
192 skb_put(skb, fd_length);
193
194 return skb;
195 }
196
197 /* Build a non linear (fragmented) skb based on a S/G table */
dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,struct dpaa2_sg_entry * sgt)198 static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv,
199 struct dpaa2_eth_channel *ch,
200 struct dpaa2_sg_entry *sgt)
201 {
202 struct sk_buff *skb = NULL;
203 struct device *dev = priv->net_dev->dev.parent;
204 void *sg_vaddr;
205 dma_addr_t sg_addr;
206 u16 sg_offset;
207 u32 sg_length;
208 struct page *page, *head_page;
209 int page_offset;
210 int i;
211
212 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
213 struct dpaa2_sg_entry *sge = &sgt[i];
214
215 /* NOTE: We only support SG entries in dpaa2_sg_single format,
216 * but this is the only format we may receive from HW anyway
217 */
218
219 /* Get the address and length from the S/G entry */
220 sg_addr = dpaa2_sg_get_addr(sge);
221 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
222 dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
223 DMA_BIDIRECTIONAL);
224
225 sg_length = dpaa2_sg_get_len(sge);
226
227 if (i == 0) {
228 /* We build the skb around the first data buffer */
229 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
230 if (unlikely(!skb)) {
231 /* Free the first SG entry now, since we already
232 * unmapped it and obtained the virtual address
233 */
234 free_pages((unsigned long)sg_vaddr, 0);
235
236 /* We still need to subtract the buffers used
237 * by this FD from our software counter
238 */
239 while (!dpaa2_sg_is_final(&sgt[i]) &&
240 i < DPAA2_ETH_MAX_SG_ENTRIES)
241 i++;
242 break;
243 }
244
245 sg_offset = dpaa2_sg_get_offset(sge);
246 skb_reserve(skb, sg_offset);
247 skb_put(skb, sg_length);
248 } else {
249 /* Rest of the data buffers are stored as skb frags */
250 page = virt_to_page(sg_vaddr);
251 head_page = virt_to_head_page(sg_vaddr);
252
253 /* Offset in page (which may be compound).
254 * Data in subsequent SG entries is stored from the
255 * beginning of the buffer, so we don't need to add the
256 * sg_offset.
257 */
258 page_offset = ((unsigned long)sg_vaddr &
259 (PAGE_SIZE - 1)) +
260 (page_address(page) - page_address(head_page));
261
262 skb_add_rx_frag(skb, i - 1, head_page, page_offset,
263 sg_length, priv->rx_buf_size);
264 }
265
266 if (dpaa2_sg_is_final(sge))
267 break;
268 }
269
270 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");
271
272 /* Count all data buffers + SG table buffer */
273 ch->buf_count -= i + 2;
274
275 return skb;
276 }
277
278 /* Free buffers acquired from the buffer pool or which were meant to
279 * be released in the pool
280 */
dpaa2_eth_free_bufs(struct dpaa2_eth_priv * priv,u64 * buf_array,int count,bool xsk_zc)281 static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array,
282 int count, bool xsk_zc)
283 {
284 struct device *dev = priv->net_dev->dev.parent;
285 struct dpaa2_eth_swa *swa;
286 struct xdp_buff *xdp_buff;
287 void *vaddr;
288 int i;
289
290 for (i = 0; i < count; i++) {
291 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
292
293 if (!xsk_zc) {
294 dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
295 DMA_BIDIRECTIONAL);
296 free_pages((unsigned long)vaddr, 0);
297 } else {
298 swa = (struct dpaa2_eth_swa *)
299 (vaddr + DPAA2_ETH_RX_HWA_SIZE);
300 xdp_buff = swa->xsk.xdp_buff;
301 xsk_buff_free(xdp_buff);
302 }
303 }
304 }
305
dpaa2_eth_recycle_buf(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,dma_addr_t addr)306 void dpaa2_eth_recycle_buf(struct dpaa2_eth_priv *priv,
307 struct dpaa2_eth_channel *ch,
308 dma_addr_t addr)
309 {
310 int retries = 0;
311 int err;
312
313 ch->recycled_bufs[ch->recycled_bufs_cnt++] = addr;
314 if (ch->recycled_bufs_cnt < DPAA2_ETH_BUFS_PER_CMD)
315 return;
316
317 while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
318 ch->recycled_bufs,
319 ch->recycled_bufs_cnt)) == -EBUSY) {
320 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
321 break;
322 cpu_relax();
323 }
324
325 if (err) {
326 dpaa2_eth_free_bufs(priv, ch->recycled_bufs,
327 ch->recycled_bufs_cnt, ch->xsk_zc);
328 ch->buf_count -= ch->recycled_bufs_cnt;
329 }
330
331 ch->recycled_bufs_cnt = 0;
332 }
333
dpaa2_eth_xdp_flush(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq,struct dpaa2_eth_xdp_fds * xdp_fds)334 static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
335 struct dpaa2_eth_fq *fq,
336 struct dpaa2_eth_xdp_fds *xdp_fds)
337 {
338 int total_enqueued = 0, retries = 0, enqueued;
339 struct dpaa2_eth_drv_stats *percpu_extras;
340 int num_fds, err, max_retries;
341 struct dpaa2_fd *fds;
342
343 percpu_extras = this_cpu_ptr(priv->percpu_extras);
344
345 /* try to enqueue all the FDs until the max number of retries is hit */
346 fds = xdp_fds->fds;
347 num_fds = xdp_fds->num;
348 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
349 while (total_enqueued < num_fds && retries < max_retries) {
350 err = priv->enqueue(priv, fq, &fds[total_enqueued],
351 0, num_fds - total_enqueued, &enqueued);
352 if (err == -EBUSY) {
353 percpu_extras->tx_portal_busy += ++retries;
354 continue;
355 }
356 total_enqueued += enqueued;
357 }
358 xdp_fds->num = 0;
359
360 return total_enqueued;
361 }
362
dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,struct dpaa2_eth_fq * fq)363 static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv,
364 struct dpaa2_eth_channel *ch,
365 struct dpaa2_eth_fq *fq)
366 {
367 struct rtnl_link_stats64 *percpu_stats;
368 struct dpaa2_fd *fds;
369 int enqueued, i;
370
371 percpu_stats = this_cpu_ptr(priv->percpu_stats);
372
373 // enqueue the array of XDP_TX frames
374 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds);
375
376 /* update statistics */
377 percpu_stats->tx_packets += enqueued;
378 fds = fq->xdp_tx_fds.fds;
379 for (i = 0; i < enqueued; i++) {
380 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
381 ch->stats.xdp_tx++;
382 }
383 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) {
384 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(&fds[i]));
385 percpu_stats->tx_errors++;
386 ch->stats.xdp_tx_err++;
387 }
388 fq->xdp_tx_fds.num = 0;
389 }
390
dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,struct dpaa2_fd * fd,void * buf_start,u16 queue_id)391 void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv,
392 struct dpaa2_eth_channel *ch,
393 struct dpaa2_fd *fd,
394 void *buf_start, u16 queue_id)
395 {
396 struct dpaa2_faead *faead;
397 struct dpaa2_fd *dest_fd;
398 struct dpaa2_eth_fq *fq;
399 u32 ctrl, frc;
400
401 /* Mark the egress frame hardware annotation area as valid */
402 frc = dpaa2_fd_get_frc(fd);
403 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
404 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);
405
406 /* Instruct hardware to release the FD buffer directly into
407 * the buffer pool once transmission is completed, instead of
408 * sending a Tx confirmation frame to us
409 */
410 ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
411 faead = dpaa2_get_faead(buf_start, false);
412 faead->ctrl = cpu_to_le32(ctrl);
413 faead->conf_fqid = 0;
414
415 fq = &priv->fq[queue_id];
416 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++];
417 memcpy(dest_fd, fd, sizeof(*dest_fd));
418
419 if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE)
420 return;
421
422 dpaa2_eth_xdp_tx_flush(priv, ch, fq);
423 }
424
dpaa2_eth_run_xdp(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,struct dpaa2_eth_fq * rx_fq,struct dpaa2_fd * fd,void * vaddr)425 static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv,
426 struct dpaa2_eth_channel *ch,
427 struct dpaa2_eth_fq *rx_fq,
428 struct dpaa2_fd *fd, void *vaddr)
429 {
430 dma_addr_t addr = dpaa2_fd_get_addr(fd);
431 struct bpf_prog *xdp_prog;
432 struct xdp_buff xdp;
433 u32 xdp_act = XDP_PASS;
434 int err, offset;
435
436 xdp_prog = READ_ONCE(ch->xdp.prog);
437 if (!xdp_prog)
438 goto out;
439
440 offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM;
441 xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq);
442 xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM,
443 dpaa2_fd_get_len(fd), false);
444
445 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
446
447 /* xdp.data pointer may have changed */
448 dpaa2_fd_set_offset(fd, xdp.data - vaddr);
449 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);
450
451 switch (xdp_act) {
452 case XDP_PASS:
453 break;
454 case XDP_TX:
455 dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid);
456 break;
457 default:
458 bpf_warn_invalid_xdp_action(priv->net_dev, xdp_prog, xdp_act);
459 fallthrough;
460 case XDP_ABORTED:
461 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
462 fallthrough;
463 case XDP_DROP:
464 dpaa2_eth_recycle_buf(priv, ch, addr);
465 ch->stats.xdp_drop++;
466 break;
467 case XDP_REDIRECT:
468 dma_unmap_page(priv->net_dev->dev.parent, addr,
469 priv->rx_buf_size, DMA_BIDIRECTIONAL);
470 ch->buf_count--;
471
472 /* Allow redirect use of full headroom */
473 xdp.data_hard_start = vaddr;
474 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;
475
476 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
477 if (unlikely(err)) {
478 addr = dma_map_page(priv->net_dev->dev.parent,
479 virt_to_page(vaddr), 0,
480 priv->rx_buf_size, DMA_BIDIRECTIONAL);
481 if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) {
482 free_pages((unsigned long)vaddr, 0);
483 } else {
484 ch->buf_count++;
485 dpaa2_eth_recycle_buf(priv, ch, addr);
486 }
487 ch->stats.xdp_drop++;
488 } else {
489 ch->stats.xdp_redirect++;
490 }
491 break;
492 }
493
494 ch->xdp.res |= xdp_act;
495 out:
496 return xdp_act;
497 }
498
dpaa2_eth_alloc_skb(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,const struct dpaa2_fd * fd,u32 fd_length,void * fd_vaddr)499 struct sk_buff *dpaa2_eth_alloc_skb(struct dpaa2_eth_priv *priv,
500 struct dpaa2_eth_channel *ch,
501 const struct dpaa2_fd *fd, u32 fd_length,
502 void *fd_vaddr)
503 {
504 u16 fd_offset = dpaa2_fd_get_offset(fd);
505 struct sk_buff *skb = NULL;
506 unsigned int skb_len;
507
508 skb_len = fd_length + dpaa2_eth_needed_headroom(NULL);
509
510 skb = napi_alloc_skb(&ch->napi, skb_len);
511 if (!skb)
512 return NULL;
513
514 skb_reserve(skb, dpaa2_eth_needed_headroom(NULL));
515 skb_put(skb, fd_length);
516
517 memcpy(skb->data, fd_vaddr + fd_offset, fd_length);
518
519 return skb;
520 }
521
dpaa2_eth_copybreak(struct dpaa2_eth_channel * ch,const struct dpaa2_fd * fd,void * fd_vaddr)522 static struct sk_buff *dpaa2_eth_copybreak(struct dpaa2_eth_channel *ch,
523 const struct dpaa2_fd *fd,
524 void *fd_vaddr)
525 {
526 struct dpaa2_eth_priv *priv = ch->priv;
527 u32 fd_length = dpaa2_fd_get_len(fd);
528
529 if (fd_length > priv->rx_copybreak)
530 return NULL;
531
532 return dpaa2_eth_alloc_skb(priv, ch, fd, fd_length, fd_vaddr);
533 }
534
dpaa2_eth_receive_skb(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,const struct dpaa2_fd * fd,void * vaddr,struct dpaa2_eth_fq * fq,struct rtnl_link_stats64 * percpu_stats,struct sk_buff * skb)535 void dpaa2_eth_receive_skb(struct dpaa2_eth_priv *priv,
536 struct dpaa2_eth_channel *ch,
537 const struct dpaa2_fd *fd, void *vaddr,
538 struct dpaa2_eth_fq *fq,
539 struct rtnl_link_stats64 *percpu_stats,
540 struct sk_buff *skb)
541 {
542 struct dpaa2_fas *fas;
543 u32 status = 0;
544
545 fas = dpaa2_get_fas(vaddr, false);
546 prefetch(fas);
547 prefetch(skb->data);
548
549 /* Get the timestamp value */
550 if (priv->rx_tstamp) {
551 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
552 __le64 *ts = dpaa2_get_ts(vaddr, false);
553 u64 ns;
554
555 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
556
557 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
558 shhwtstamps->hwtstamp = ns_to_ktime(ns);
559 }
560
561 /* Check if we need to validate the L4 csum */
562 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
563 status = le32_to_cpu(fas->status);
564 dpaa2_eth_validate_rx_csum(priv, status, skb);
565 }
566
567 skb->protocol = eth_type_trans(skb, priv->net_dev);
568 skb_record_rx_queue(skb, fq->flowid);
569
570 percpu_stats->rx_packets++;
571 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
572 ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
573
574 list_add_tail(&skb->list, ch->rx_list);
575 }
576
577 /* Main Rx frame processing routine */
dpaa2_eth_rx(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,const struct dpaa2_fd * fd,struct dpaa2_eth_fq * fq)578 void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
579 struct dpaa2_eth_channel *ch,
580 const struct dpaa2_fd *fd,
581 struct dpaa2_eth_fq *fq)
582 {
583 dma_addr_t addr = dpaa2_fd_get_addr(fd);
584 u8 fd_format = dpaa2_fd_get_format(fd);
585 void *vaddr;
586 struct sk_buff *skb;
587 struct rtnl_link_stats64 *percpu_stats;
588 struct dpaa2_eth_drv_stats *percpu_extras;
589 struct device *dev = priv->net_dev->dev.parent;
590 bool recycle_rx_buf = false;
591 void *buf_data;
592 u32 xdp_act;
593
594 /* Tracing point */
595 trace_dpaa2_rx_fd(priv->net_dev, fd);
596
597 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
598 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
599 DMA_BIDIRECTIONAL);
600
601 buf_data = vaddr + dpaa2_fd_get_offset(fd);
602 prefetch(buf_data);
603
604 percpu_stats = this_cpu_ptr(priv->percpu_stats);
605 percpu_extras = this_cpu_ptr(priv->percpu_extras);
606
607 if (fd_format == dpaa2_fd_single) {
608 xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
609 if (xdp_act != XDP_PASS) {
610 percpu_stats->rx_packets++;
611 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
612 return;
613 }
614
615 skb = dpaa2_eth_copybreak(ch, fd, vaddr);
616 if (!skb) {
617 dma_unmap_page(dev, addr, priv->rx_buf_size,
618 DMA_BIDIRECTIONAL);
619 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
620 } else {
621 recycle_rx_buf = true;
622 }
623 } else if (fd_format == dpaa2_fd_sg) {
624 WARN_ON(priv->xdp_prog);
625
626 dma_unmap_page(dev, addr, priv->rx_buf_size,
627 DMA_BIDIRECTIONAL);
628 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
629 free_pages((unsigned long)vaddr, 0);
630 percpu_extras->rx_sg_frames++;
631 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
632 } else {
633 /* We don't support any other format */
634 goto err_frame_format;
635 }
636
637 if (unlikely(!skb))
638 goto err_build_skb;
639
640 dpaa2_eth_receive_skb(priv, ch, fd, vaddr, fq, percpu_stats, skb);
641
642 if (recycle_rx_buf)
643 dpaa2_eth_recycle_buf(priv, ch, dpaa2_fd_get_addr(fd));
644 return;
645
646 err_build_skb:
647 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
648 err_frame_format:
649 percpu_stats->rx_dropped++;
650 }
651
652 /* Processing of Rx frames received on the error FQ
653 * We check and print the error bits and then free the frame
654 */
dpaa2_eth_rx_err(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,const struct dpaa2_fd * fd,struct dpaa2_eth_fq * fq __always_unused)655 static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv,
656 struct dpaa2_eth_channel *ch,
657 const struct dpaa2_fd *fd,
658 struct dpaa2_eth_fq *fq __always_unused)
659 {
660 struct device *dev = priv->net_dev->dev.parent;
661 dma_addr_t addr = dpaa2_fd_get_addr(fd);
662 u8 fd_format = dpaa2_fd_get_format(fd);
663 struct rtnl_link_stats64 *percpu_stats;
664 struct dpaa2_eth_trap_item *trap_item;
665 struct dpaa2_fapr *fapr;
666 struct sk_buff *skb;
667 void *buf_data;
668 void *vaddr;
669
670 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
671 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
672 DMA_BIDIRECTIONAL);
673
674 buf_data = vaddr + dpaa2_fd_get_offset(fd);
675
676 if (fd_format == dpaa2_fd_single) {
677 dma_unmap_page(dev, addr, priv->rx_buf_size,
678 DMA_BIDIRECTIONAL);
679 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr);
680 } else if (fd_format == dpaa2_fd_sg) {
681 dma_unmap_page(dev, addr, priv->rx_buf_size,
682 DMA_BIDIRECTIONAL);
683 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data);
684 free_pages((unsigned long)vaddr, 0);
685 } else {
686 /* We don't support any other format */
687 dpaa2_eth_free_rx_fd(priv, fd, vaddr);
688 goto err_frame_format;
689 }
690
691 fapr = dpaa2_get_fapr(vaddr, false);
692 trap_item = dpaa2_eth_dl_get_trap(priv, fapr);
693 if (trap_item)
694 devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx,
695 &priv->devlink_port, NULL);
696 consume_skb(skb);
697
698 err_frame_format:
699 percpu_stats = this_cpu_ptr(priv->percpu_stats);
700 percpu_stats->rx_errors++;
701 ch->buf_count--;
702 }
703
704 /* Consume all frames pull-dequeued into the store. This is the simplest way to
705 * make sure we don't accidentally issue another volatile dequeue which would
706 * overwrite (leak) frames already in the store.
707 *
708 * Observance of NAPI budget is not our concern, leaving that to the caller.
709 */
dpaa2_eth_consume_frames(struct dpaa2_eth_channel * ch,struct dpaa2_eth_fq ** src)710 static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
711 struct dpaa2_eth_fq **src)
712 {
713 struct dpaa2_eth_priv *priv = ch->priv;
714 struct dpaa2_eth_fq *fq = NULL;
715 struct dpaa2_dq *dq;
716 const struct dpaa2_fd *fd;
717 int cleaned = 0, retries = 0;
718 int is_last;
719
720 do {
721 dq = dpaa2_io_store_next(ch->store, &is_last);
722 if (unlikely(!dq)) {
723 /* If we're here, we *must* have placed a
724 * volatile dequeue comnmand, so keep reading through
725 * the store until we get some sort of valid response
726 * token (either a valid frame or an "empty dequeue")
727 */
728 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
729 netdev_err_once(priv->net_dev,
730 "Unable to read a valid dequeue response\n");
731 return -ETIMEDOUT;
732 }
733 continue;
734 }
735
736 fd = dpaa2_dq_fd(dq);
737 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
738
739 fq->consume(priv, ch, fd, fq);
740 cleaned++;
741 retries = 0;
742 } while (!is_last);
743
744 if (!cleaned)
745 return 0;
746
747 fq->stats.frames += cleaned;
748 ch->stats.frames += cleaned;
749 ch->stats.frames_per_cdan += cleaned;
750
751 /* A dequeue operation only pulls frames from a single queue
752 * into the store. Return the frame queue as an out param.
753 */
754 if (src)
755 *src = fq;
756
757 return cleaned;
758 }
759
dpaa2_eth_ptp_parse(struct sk_buff * skb,u8 * msgtype,u8 * twostep,u8 * udp,u16 * correction_offset,u16 * origintimestamp_offset)760 static int dpaa2_eth_ptp_parse(struct sk_buff *skb,
761 u8 *msgtype, u8 *twostep, u8 *udp,
762 u16 *correction_offset,
763 u16 *origintimestamp_offset)
764 {
765 unsigned int ptp_class;
766 struct ptp_header *hdr;
767 unsigned int type;
768 u8 *base;
769
770 ptp_class = ptp_classify_raw(skb);
771 if (ptp_class == PTP_CLASS_NONE)
772 return -EINVAL;
773
774 hdr = ptp_parse_header(skb, ptp_class);
775 if (!hdr)
776 return -EINVAL;
777
778 *msgtype = ptp_get_msgtype(hdr, ptp_class);
779 *twostep = hdr->flag_field[0] & 0x2;
780
781 type = ptp_class & PTP_CLASS_PMASK;
782 if (type == PTP_CLASS_IPV4 ||
783 type == PTP_CLASS_IPV6)
784 *udp = 1;
785 else
786 *udp = 0;
787
788 base = skb_mac_header(skb);
789 *correction_offset = (u8 *)&hdr->correction - base;
790 *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base;
791
792 return 0;
793 }
794
795 /* Configure the egress frame annotation for timestamp update */
dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv * priv,struct dpaa2_fd * fd,void * buf_start,struct sk_buff * skb)796 static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv,
797 struct dpaa2_fd *fd,
798 void *buf_start,
799 struct sk_buff *skb)
800 {
801 struct ptp_tstamp origin_timestamp;
802 u8 msgtype, twostep, udp;
803 struct dpaa2_faead *faead;
804 struct dpaa2_fas *fas;
805 struct timespec64 ts;
806 u16 offset1, offset2;
807 u32 ctrl, frc;
808 __le64 *ns;
809 u8 *data;
810
811 /* Mark the egress frame annotation area as valid */
812 frc = dpaa2_fd_get_frc(fd);
813 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
814
815 /* Set hardware annotation size */
816 ctrl = dpaa2_fd_get_ctrl(fd);
817 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);
818
819 /* enable UPD (update prepanded data) bit in FAEAD field of
820 * hardware frame annotation area
821 */
822 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
823 faead = dpaa2_get_faead(buf_start, true);
824 faead->ctrl = cpu_to_le32(ctrl);
825
826 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
827 if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
828 &offset1, &offset2) ||
829 msgtype != PTP_MSGTYPE_SYNC || twostep) {
830 WARN_ONCE(1, "Bad packet for one-step timestamping\n");
831 return;
832 }
833
834 /* Mark the frame annotation status as valid */
835 frc = dpaa2_fd_get_frc(fd);
836 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV);
837
838 /* Mark the PTP flag for one step timestamping */
839 fas = dpaa2_get_fas(buf_start, true);
840 fas->status = cpu_to_le32(DPAA2_FAS_PTP);
841
842 dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts);
843 ns = dpaa2_get_ts(buf_start, true);
844 *ns = cpu_to_le64(timespec64_to_ns(&ts) /
845 DPAA2_PTP_CLK_PERIOD_NS);
846
847 /* Update current time to PTP message originTimestamp field */
848 ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns));
849 data = skb_mac_header(skb);
850 *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb);
851 *(__be32 *)(data + offset2 + 2) =
852 htonl(origin_timestamp.sec_lsb);
853 *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec);
854
855 if (priv->ptp_correction_off == offset1)
856 return;
857
858 priv->dpaa2_set_onestep_params_cb(priv, offset1, udp);
859 priv->ptp_correction_off = offset1;
860
861 }
862 }
863
dpaa2_eth_sgt_get(struct dpaa2_eth_priv * priv)864 void *dpaa2_eth_sgt_get(struct dpaa2_eth_priv *priv)
865 {
866 struct dpaa2_eth_sgt_cache *sgt_cache;
867 void *sgt_buf = NULL;
868 int sgt_buf_size;
869
870 sgt_cache = this_cpu_ptr(priv->sgt_cache);
871 sgt_buf_size = priv->tx_data_offset +
872 DPAA2_ETH_SG_ENTRIES_MAX * sizeof(struct dpaa2_sg_entry);
873
874 if (sgt_cache->count == 0)
875 sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN);
876 else
877 sgt_buf = sgt_cache->buf[--sgt_cache->count];
878 if (!sgt_buf)
879 return NULL;
880
881 memset(sgt_buf, 0, sgt_buf_size);
882
883 return sgt_buf;
884 }
885
dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv * priv,void * sgt_buf)886 void dpaa2_eth_sgt_recycle(struct dpaa2_eth_priv *priv, void *sgt_buf)
887 {
888 struct dpaa2_eth_sgt_cache *sgt_cache;
889
890 sgt_cache = this_cpu_ptr(priv->sgt_cache);
891 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE)
892 skb_free_frag(sgt_buf);
893 else
894 sgt_cache->buf[sgt_cache->count++] = sgt_buf;
895 }
896
897 /* Create a frame descriptor based on a fragmented skb */
dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv * priv,struct sk_buff * skb,struct dpaa2_fd * fd,void ** swa_addr)898 static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv,
899 struct sk_buff *skb,
900 struct dpaa2_fd *fd,
901 void **swa_addr)
902 {
903 struct device *dev = priv->net_dev->dev.parent;
904 void *sgt_buf = NULL;
905 dma_addr_t addr;
906 int nr_frags = skb_shinfo(skb)->nr_frags;
907 struct dpaa2_sg_entry *sgt;
908 int i, err;
909 int sgt_buf_size;
910 struct scatterlist *scl, *crt_scl;
911 int num_sg;
912 int num_dma_bufs;
913 struct dpaa2_eth_swa *swa;
914
915 /* Create and map scatterlist.
916 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
917 * to go beyond nr_frags+1.
918 * Note: We don't support chained scatterlists
919 */
920 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
921 return -EINVAL;
922
923 scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
924 if (unlikely(!scl))
925 return -ENOMEM;
926
927 sg_init_table(scl, nr_frags + 1);
928 num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
929 if (unlikely(num_sg < 0)) {
930 err = -ENOMEM;
931 goto dma_map_sg_failed;
932 }
933 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
934 if (unlikely(!num_dma_bufs)) {
935 err = -ENOMEM;
936 goto dma_map_sg_failed;
937 }
938
939 /* Prepare the HW SGT structure */
940 sgt_buf_size = priv->tx_data_offset +
941 sizeof(struct dpaa2_sg_entry) * num_dma_bufs;
942 sgt_buf = dpaa2_eth_sgt_get(priv);
943 if (unlikely(!sgt_buf)) {
944 err = -ENOMEM;
945 goto sgt_buf_alloc_failed;
946 }
947
948 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
949
950 /* Fill in the HW SGT structure.
951 *
952 * sgt_buf is zeroed out, so the following fields are implicit
953 * in all sgt entries:
954 * - offset is 0
955 * - format is 'dpaa2_sg_single'
956 */
957 for_each_sg(scl, crt_scl, num_dma_bufs, i) {
958 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
959 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
960 }
961 dpaa2_sg_set_final(&sgt[i - 1], true);
962
963 /* Store the skb backpointer in the SGT buffer.
964 * Fit the scatterlist and the number of buffers alongside the
965 * skb backpointer in the software annotation area. We'll need
966 * all of them on Tx Conf.
967 */
968 *swa_addr = (void *)sgt_buf;
969 swa = (struct dpaa2_eth_swa *)sgt_buf;
970 swa->type = DPAA2_ETH_SWA_SG;
971 swa->sg.skb = skb;
972 swa->sg.scl = scl;
973 swa->sg.num_sg = num_sg;
974 swa->sg.sgt_size = sgt_buf_size;
975
976 /* Separately map the SGT buffer */
977 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
978 if (unlikely(dma_mapping_error(dev, addr))) {
979 err = -ENOMEM;
980 goto dma_map_single_failed;
981 }
982 memset(fd, 0, sizeof(struct dpaa2_fd));
983 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
984 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
985 dpaa2_fd_set_addr(fd, addr);
986 dpaa2_fd_set_len(fd, skb->len);
987 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
988
989 return 0;
990
991 dma_map_single_failed:
992 dpaa2_eth_sgt_recycle(priv, sgt_buf);
993 sgt_buf_alloc_failed:
994 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
995 dma_map_sg_failed:
996 kfree(scl);
997 return err;
998 }
999
1000 /* Create a SG frame descriptor based on a linear skb.
1001 *
1002 * This function is used on the Tx path when the skb headroom is not large
1003 * enough for the HW requirements, thus instead of realloc-ing the skb we
1004 * create a SG frame descriptor with only one entry.
1005 */
dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv * priv,struct sk_buff * skb,struct dpaa2_fd * fd,void ** swa_addr)1006 static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv,
1007 struct sk_buff *skb,
1008 struct dpaa2_fd *fd,
1009 void **swa_addr)
1010 {
1011 struct device *dev = priv->net_dev->dev.parent;
1012 struct dpaa2_sg_entry *sgt;
1013 struct dpaa2_eth_swa *swa;
1014 dma_addr_t addr, sgt_addr;
1015 void *sgt_buf = NULL;
1016 int sgt_buf_size;
1017 int err;
1018
1019 /* Prepare the HW SGT structure */
1020 sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry);
1021 sgt_buf = dpaa2_eth_sgt_get(priv);
1022 if (unlikely(!sgt_buf))
1023 return -ENOMEM;
1024 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1025
1026 addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL);
1027 if (unlikely(dma_mapping_error(dev, addr))) {
1028 err = -ENOMEM;
1029 goto data_map_failed;
1030 }
1031
1032 /* Fill in the HW SGT structure */
1033 dpaa2_sg_set_addr(sgt, addr);
1034 dpaa2_sg_set_len(sgt, skb->len);
1035 dpaa2_sg_set_final(sgt, true);
1036
1037 /* Store the skb backpointer in the SGT buffer */
1038 *swa_addr = (void *)sgt_buf;
1039 swa = (struct dpaa2_eth_swa *)sgt_buf;
1040 swa->type = DPAA2_ETH_SWA_SINGLE;
1041 swa->single.skb = skb;
1042 swa->single.sgt_size = sgt_buf_size;
1043
1044 /* Separately map the SGT buffer */
1045 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1046 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1047 err = -ENOMEM;
1048 goto sgt_map_failed;
1049 }
1050
1051 memset(fd, 0, sizeof(struct dpaa2_fd));
1052 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1053 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1054 dpaa2_fd_set_addr(fd, sgt_addr);
1055 dpaa2_fd_set_len(fd, skb->len);
1056 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1057
1058 return 0;
1059
1060 sgt_map_failed:
1061 dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL);
1062 data_map_failed:
1063 dpaa2_eth_sgt_recycle(priv, sgt_buf);
1064
1065 return err;
1066 }
1067
1068 /* Create a frame descriptor based on a linear skb */
dpaa2_eth_build_single_fd(struct dpaa2_eth_priv * priv,struct sk_buff * skb,struct dpaa2_fd * fd,void ** swa_addr)1069 static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv,
1070 struct sk_buff *skb,
1071 struct dpaa2_fd *fd,
1072 void **swa_addr)
1073 {
1074 struct device *dev = priv->net_dev->dev.parent;
1075 u8 *buffer_start, *aligned_start;
1076 struct dpaa2_eth_swa *swa;
1077 dma_addr_t addr;
1078
1079 buffer_start = skb->data - dpaa2_eth_needed_headroom(skb);
1080 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
1081 DPAA2_ETH_TX_BUF_ALIGN);
1082 if (aligned_start >= skb->head)
1083 buffer_start = aligned_start;
1084 else
1085 return -ENOMEM;
1086
1087 /* Store a backpointer to the skb at the beginning of the buffer
1088 * (in the private data area) such that we can release it
1089 * on Tx confirm
1090 */
1091 *swa_addr = (void *)buffer_start;
1092 swa = (struct dpaa2_eth_swa *)buffer_start;
1093 swa->type = DPAA2_ETH_SWA_SINGLE;
1094 swa->single.skb = skb;
1095
1096 addr = dma_map_single(dev, buffer_start,
1097 skb_tail_pointer(skb) - buffer_start,
1098 DMA_BIDIRECTIONAL);
1099 if (unlikely(dma_mapping_error(dev, addr)))
1100 return -ENOMEM;
1101
1102 memset(fd, 0, sizeof(struct dpaa2_fd));
1103 dpaa2_fd_set_addr(fd, addr);
1104 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
1105 dpaa2_fd_set_len(fd, skb->len);
1106 dpaa2_fd_set_format(fd, dpaa2_fd_single);
1107 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1108
1109 return 0;
1110 }
1111
1112 /* FD freeing routine on the Tx path
1113 *
1114 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
1115 * back-pointed to is also freed.
1116 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
1117 * dpaa2_eth_tx().
1118 */
dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,struct dpaa2_eth_fq * fq,const struct dpaa2_fd * fd,bool in_napi)1119 void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv,
1120 struct dpaa2_eth_channel *ch,
1121 struct dpaa2_eth_fq *fq,
1122 const struct dpaa2_fd *fd, bool in_napi)
1123 {
1124 struct device *dev = priv->net_dev->dev.parent;
1125 dma_addr_t fd_addr, sg_addr;
1126 struct sk_buff *skb = NULL;
1127 unsigned char *buffer_start;
1128 struct dpaa2_eth_swa *swa;
1129 u8 fd_format = dpaa2_fd_get_format(fd);
1130 u32 fd_len = dpaa2_fd_get_len(fd);
1131 struct dpaa2_sg_entry *sgt;
1132 int should_free_skb = 1;
1133 void *tso_hdr;
1134 int i;
1135
1136 fd_addr = dpaa2_fd_get_addr(fd);
1137 buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
1138 swa = (struct dpaa2_eth_swa *)buffer_start;
1139
1140 if (fd_format == dpaa2_fd_single) {
1141 if (swa->type == DPAA2_ETH_SWA_SINGLE) {
1142 skb = swa->single.skb;
1143 /* Accessing the skb buffer is safe before dma unmap,
1144 * because we didn't map the actual skb shell.
1145 */
1146 dma_unmap_single(dev, fd_addr,
1147 skb_tail_pointer(skb) - buffer_start,
1148 DMA_BIDIRECTIONAL);
1149 } else {
1150 WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
1151 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
1152 DMA_BIDIRECTIONAL);
1153 }
1154 } else if (fd_format == dpaa2_fd_sg) {
1155 if (swa->type == DPAA2_ETH_SWA_SG) {
1156 skb = swa->sg.skb;
1157
1158 /* Unmap the scatterlist */
1159 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
1160 DMA_BIDIRECTIONAL);
1161 kfree(swa->sg.scl);
1162
1163 /* Unmap the SGT buffer */
1164 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
1165 DMA_BIDIRECTIONAL);
1166 } else if (swa->type == DPAA2_ETH_SWA_SW_TSO) {
1167 skb = swa->tso.skb;
1168
1169 sgt = (struct dpaa2_sg_entry *)(buffer_start +
1170 priv->tx_data_offset);
1171
1172 /* Unmap the SGT buffer */
1173 dma_unmap_single(dev, fd_addr, swa->tso.sgt_size,
1174 DMA_BIDIRECTIONAL);
1175
1176 /* Unmap and free the header */
1177 tso_hdr = dpaa2_iova_to_virt(priv->iommu_domain, dpaa2_sg_get_addr(sgt));
1178 dma_unmap_single(dev, dpaa2_sg_get_addr(sgt), TSO_HEADER_SIZE,
1179 DMA_TO_DEVICE);
1180 kfree(tso_hdr);
1181
1182 /* Unmap the other SG entries for the data */
1183 for (i = 1; i < swa->tso.num_sg; i++)
1184 dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1185 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1186
1187 if (!swa->tso.is_last_fd)
1188 should_free_skb = 0;
1189 } else if (swa->type == DPAA2_ETH_SWA_XSK) {
1190 /* Unmap the SGT Buffer */
1191 dma_unmap_single(dev, fd_addr, swa->xsk.sgt_size,
1192 DMA_BIDIRECTIONAL);
1193 } else {
1194 skb = swa->single.skb;
1195
1196 /* Unmap the SGT Buffer */
1197 dma_unmap_single(dev, fd_addr, swa->single.sgt_size,
1198 DMA_BIDIRECTIONAL);
1199
1200 sgt = (struct dpaa2_sg_entry *)(buffer_start +
1201 priv->tx_data_offset);
1202 sg_addr = dpaa2_sg_get_addr(sgt);
1203 dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL);
1204 }
1205 } else {
1206 netdev_dbg(priv->net_dev, "Invalid FD format\n");
1207 return;
1208 }
1209
1210 if (swa->type == DPAA2_ETH_SWA_XSK) {
1211 ch->xsk_tx_pkts_sent++;
1212 dpaa2_eth_sgt_recycle(priv, buffer_start);
1213 return;
1214 }
1215
1216 if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
1217 fq->dq_frames++;
1218 fq->dq_bytes += fd_len;
1219 }
1220
1221 if (swa->type == DPAA2_ETH_SWA_XDP) {
1222 xdp_return_frame(swa->xdp.xdpf);
1223 return;
1224 }
1225
1226 /* Get the timestamp value */
1227 if (swa->type != DPAA2_ETH_SWA_SW_TSO) {
1228 if (skb->cb[0] == TX_TSTAMP) {
1229 struct skb_shared_hwtstamps shhwtstamps;
1230 __le64 *ts = dpaa2_get_ts(buffer_start, true);
1231 u64 ns;
1232
1233 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
1234
1235 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
1236 shhwtstamps.hwtstamp = ns_to_ktime(ns);
1237 skb_tstamp_tx(skb, &shhwtstamps);
1238 } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1239 mutex_unlock(&priv->onestep_tstamp_lock);
1240 }
1241 }
1242
1243 /* Free SGT buffer allocated on tx */
1244 if (fd_format != dpaa2_fd_single)
1245 dpaa2_eth_sgt_recycle(priv, buffer_start);
1246
1247 /* Move on with skb release. If we are just confirming multiple FDs
1248 * from the same TSO skb then only the last one will need to free the
1249 * skb.
1250 */
1251 if (should_free_skb)
1252 napi_consume_skb(skb, in_napi);
1253 }
1254
dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv * priv,struct sk_buff * skb,struct dpaa2_fd * fd,int * num_fds,u32 * total_fds_len)1255 static int dpaa2_eth_build_gso_fd(struct dpaa2_eth_priv *priv,
1256 struct sk_buff *skb, struct dpaa2_fd *fd,
1257 int *num_fds, u32 *total_fds_len)
1258 {
1259 struct device *dev = priv->net_dev->dev.parent;
1260 int hdr_len, total_len, data_left, fd_len;
1261 int num_sge, err, i, sgt_buf_size;
1262 struct dpaa2_fd *fd_start = fd;
1263 struct dpaa2_sg_entry *sgt;
1264 struct dpaa2_eth_swa *swa;
1265 dma_addr_t sgt_addr, addr;
1266 dma_addr_t tso_hdr_dma;
1267 unsigned int index = 0;
1268 struct tso_t tso;
1269 char *tso_hdr;
1270 void *sgt_buf;
1271
1272 /* Initialize the TSO handler, and prepare the first payload */
1273 hdr_len = tso_start(skb, &tso);
1274 *total_fds_len = 0;
1275
1276 total_len = skb->len - hdr_len;
1277 while (total_len > 0) {
1278 /* Prepare the HW SGT structure for this frame */
1279 sgt_buf = dpaa2_eth_sgt_get(priv);
1280 if (unlikely(!sgt_buf)) {
1281 netdev_err(priv->net_dev, "dpaa2_eth_sgt_get() failed\n");
1282 err = -ENOMEM;
1283 goto err_sgt_get;
1284 }
1285 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1286
1287 /* Determine the data length of this frame */
1288 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
1289 total_len -= data_left;
1290 fd_len = data_left + hdr_len;
1291
1292 /* Prepare packet headers: MAC + IP + TCP */
1293 tso_hdr = kmalloc(TSO_HEADER_SIZE, GFP_ATOMIC);
1294 if (!tso_hdr) {
1295 err = -ENOMEM;
1296 goto err_alloc_tso_hdr;
1297 }
1298
1299 tso_build_hdr(skb, tso_hdr, &tso, data_left, total_len == 0);
1300 tso_hdr_dma = dma_map_single(dev, tso_hdr, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1301 if (dma_mapping_error(dev, tso_hdr_dma)) {
1302 netdev_err(priv->net_dev, "dma_map_single(tso_hdr) failed\n");
1303 err = -ENOMEM;
1304 goto err_map_tso_hdr;
1305 }
1306
1307 /* Setup the SG entry for the header */
1308 dpaa2_sg_set_addr(sgt, tso_hdr_dma);
1309 dpaa2_sg_set_len(sgt, hdr_len);
1310 dpaa2_sg_set_final(sgt, data_left <= 0);
1311
1312 /* Compose the SG entries for each fragment of data */
1313 num_sge = 1;
1314 while (data_left > 0) {
1315 int size;
1316
1317 /* Move to the next SG entry */
1318 sgt++;
1319 size = min_t(int, tso.size, data_left);
1320
1321 addr = dma_map_single(dev, tso.data, size, DMA_TO_DEVICE);
1322 if (dma_mapping_error(dev, addr)) {
1323 netdev_err(priv->net_dev, "dma_map_single(tso.data) failed\n");
1324 err = -ENOMEM;
1325 goto err_map_data;
1326 }
1327 dpaa2_sg_set_addr(sgt, addr);
1328 dpaa2_sg_set_len(sgt, size);
1329 dpaa2_sg_set_final(sgt, size == data_left);
1330
1331 num_sge++;
1332
1333 /* Build the data for the __next__ fragment */
1334 data_left -= size;
1335 tso_build_data(skb, &tso, size);
1336 }
1337
1338 /* Store the skb backpointer in the SGT buffer */
1339 sgt_buf_size = priv->tx_data_offset + num_sge * sizeof(struct dpaa2_sg_entry);
1340 swa = (struct dpaa2_eth_swa *)sgt_buf;
1341 swa->type = DPAA2_ETH_SWA_SW_TSO;
1342 swa->tso.skb = skb;
1343 swa->tso.num_sg = num_sge;
1344 swa->tso.sgt_size = sgt_buf_size;
1345 swa->tso.is_last_fd = total_len == 0 ? 1 : 0;
1346
1347 /* Separately map the SGT buffer */
1348 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
1349 if (unlikely(dma_mapping_error(dev, sgt_addr))) {
1350 netdev_err(priv->net_dev, "dma_map_single(sgt_buf) failed\n");
1351 err = -ENOMEM;
1352 goto err_map_sgt;
1353 }
1354
1355 /* Setup the frame descriptor */
1356 memset(fd, 0, sizeof(struct dpaa2_fd));
1357 dpaa2_fd_set_offset(fd, priv->tx_data_offset);
1358 dpaa2_fd_set_format(fd, dpaa2_fd_sg);
1359 dpaa2_fd_set_addr(fd, sgt_addr);
1360 dpaa2_fd_set_len(fd, fd_len);
1361 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1362
1363 *total_fds_len += fd_len;
1364 /* Advance to the next frame descriptor */
1365 fd++;
1366 index++;
1367 }
1368
1369 *num_fds = index;
1370
1371 return 0;
1372
1373 err_map_sgt:
1374 err_map_data:
1375 /* Unmap all the data S/G entries for the current FD */
1376 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);
1377 for (i = 1; i < num_sge; i++)
1378 dma_unmap_single(dev, dpaa2_sg_get_addr(&sgt[i]),
1379 dpaa2_sg_get_len(&sgt[i]), DMA_TO_DEVICE);
1380
1381 /* Unmap the header entry */
1382 dma_unmap_single(dev, tso_hdr_dma, TSO_HEADER_SIZE, DMA_TO_DEVICE);
1383 err_map_tso_hdr:
1384 kfree(tso_hdr);
1385 err_alloc_tso_hdr:
1386 dpaa2_eth_sgt_recycle(priv, sgt_buf);
1387 err_sgt_get:
1388 /* Free all the other FDs that were already fully created */
1389 for (i = 0; i < index; i++)
1390 dpaa2_eth_free_tx_fd(priv, NULL, NULL, &fd_start[i], false);
1391
1392 return err;
1393 }
1394
__dpaa2_eth_tx(struct sk_buff * skb,struct net_device * net_dev)1395 static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb,
1396 struct net_device *net_dev)
1397 {
1398 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1399 int total_enqueued = 0, retries = 0, enqueued;
1400 struct dpaa2_eth_drv_stats *percpu_extras;
1401 struct rtnl_link_stats64 *percpu_stats;
1402 unsigned int needed_headroom;
1403 int num_fds = 1, max_retries;
1404 struct dpaa2_eth_fq *fq;
1405 struct netdev_queue *nq;
1406 struct dpaa2_fd *fd;
1407 u16 queue_mapping;
1408 void *swa = NULL;
1409 u8 prio = 0;
1410 int err, i;
1411 u32 fd_len;
1412
1413 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1414 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1415 fd = (this_cpu_ptr(priv->fd))->array;
1416
1417 needed_headroom = dpaa2_eth_needed_headroom(skb);
1418
1419 /* We'll be holding a back-reference to the skb until Tx Confirmation;
1420 * we don't want that overwritten by a concurrent Tx with a cloned skb.
1421 */
1422 skb = skb_unshare(skb, GFP_ATOMIC);
1423 if (unlikely(!skb)) {
1424 /* skb_unshare() has already freed the skb */
1425 percpu_stats->tx_dropped++;
1426 return NETDEV_TX_OK;
1427 }
1428
1429 /* Setup the FD fields */
1430
1431 if (skb_is_gso(skb)) {
1432 err = dpaa2_eth_build_gso_fd(priv, skb, fd, &num_fds, &fd_len);
1433 percpu_extras->tx_sg_frames += num_fds;
1434 percpu_extras->tx_sg_bytes += fd_len;
1435 percpu_extras->tx_tso_frames += num_fds;
1436 percpu_extras->tx_tso_bytes += fd_len;
1437 } else if (skb_is_nonlinear(skb)) {
1438 err = dpaa2_eth_build_sg_fd(priv, skb, fd, &swa);
1439 percpu_extras->tx_sg_frames++;
1440 percpu_extras->tx_sg_bytes += skb->len;
1441 fd_len = dpaa2_fd_get_len(fd);
1442 } else if (skb_headroom(skb) < needed_headroom) {
1443 err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, fd, &swa);
1444 percpu_extras->tx_sg_frames++;
1445 percpu_extras->tx_sg_bytes += skb->len;
1446 percpu_extras->tx_converted_sg_frames++;
1447 percpu_extras->tx_converted_sg_bytes += skb->len;
1448 fd_len = dpaa2_fd_get_len(fd);
1449 } else {
1450 err = dpaa2_eth_build_single_fd(priv, skb, fd, &swa);
1451 fd_len = dpaa2_fd_get_len(fd);
1452 }
1453
1454 if (unlikely(err)) {
1455 percpu_stats->tx_dropped++;
1456 goto err_build_fd;
1457 }
1458
1459 if (swa && skb->cb[0])
1460 dpaa2_eth_enable_tx_tstamp(priv, fd, swa, skb);
1461
1462 /* Tracing point */
1463 for (i = 0; i < num_fds; i++)
1464 trace_dpaa2_tx_fd(net_dev, &fd[i]);
1465
1466 /* TxConf FQ selection relies on queue id from the stack.
1467 * In case of a forwarded frame from another DPNI interface, we choose
1468 * a queue affined to the same core that processed the Rx frame
1469 */
1470 queue_mapping = skb_get_queue_mapping(skb);
1471
1472 if (net_dev->num_tc) {
1473 prio = netdev_txq_to_tc(net_dev, queue_mapping);
1474 /* Hardware interprets priority level 0 as being the highest,
1475 * so we need to do a reverse mapping to the netdev tc index
1476 */
1477 prio = net_dev->num_tc - prio - 1;
1478 /* We have only one FQ array entry for all Tx hardware queues
1479 * with the same flow id (but different priority levels)
1480 */
1481 queue_mapping %= dpaa2_eth_queue_count(priv);
1482 }
1483 fq = &priv->fq[queue_mapping];
1484 nq = netdev_get_tx_queue(net_dev, queue_mapping);
1485 netdev_tx_sent_queue(nq, fd_len);
1486
1487 /* Everything that happens after this enqueues might race with
1488 * the Tx confirmation callback for this frame
1489 */
1490 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
1491 while (total_enqueued < num_fds && retries < max_retries) {
1492 err = priv->enqueue(priv, fq, &fd[total_enqueued],
1493 prio, num_fds - total_enqueued, &enqueued);
1494 if (err == -EBUSY) {
1495 retries++;
1496 continue;
1497 }
1498
1499 total_enqueued += enqueued;
1500 }
1501 percpu_extras->tx_portal_busy += retries;
1502
1503 if (unlikely(err < 0)) {
1504 percpu_stats->tx_errors++;
1505 /* Clean up everything, including freeing the skb */
1506 dpaa2_eth_free_tx_fd(priv, NULL, fq, fd, false);
1507 netdev_tx_completed_queue(nq, 1, fd_len);
1508 } else {
1509 percpu_stats->tx_packets += total_enqueued;
1510 percpu_stats->tx_bytes += fd_len;
1511 }
1512
1513 return NETDEV_TX_OK;
1514
1515 err_build_fd:
1516 dev_kfree_skb(skb);
1517
1518 return NETDEV_TX_OK;
1519 }
1520
dpaa2_eth_tx_onestep_tstamp(struct work_struct * work)1521 static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work)
1522 {
1523 struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv,
1524 tx_onestep_tstamp);
1525 struct sk_buff *skb;
1526
1527 while (true) {
1528 skb = skb_dequeue(&priv->tx_skbs);
1529 if (!skb)
1530 return;
1531
1532 /* Lock just before TX one-step timestamping packet,
1533 * and release the lock in dpaa2_eth_free_tx_fd when
1534 * confirm the packet has been sent on hardware, or
1535 * when clean up during transmit failure.
1536 */
1537 mutex_lock(&priv->onestep_tstamp_lock);
1538 __dpaa2_eth_tx(skb, priv->net_dev);
1539 }
1540 }
1541
dpaa2_eth_tx(struct sk_buff * skb,struct net_device * net_dev)1542 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
1543 {
1544 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1545 u8 msgtype, twostep, udp;
1546 u16 offset1, offset2;
1547
1548 /* Utilize skb->cb[0] for timestamping request per skb */
1549 skb->cb[0] = 0;
1550
1551 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) {
1552 if (priv->tx_tstamp_type == HWTSTAMP_TX_ON)
1553 skb->cb[0] = TX_TSTAMP;
1554 else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
1555 skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC;
1556 }
1557
1558 /* TX for one-step timestamping PTP Sync packet */
1559 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) {
1560 if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp,
1561 &offset1, &offset2))
1562 if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) {
1563 skb_queue_tail(&priv->tx_skbs, skb);
1564 queue_work(priv->dpaa2_ptp_wq,
1565 &priv->tx_onestep_tstamp);
1566 return NETDEV_TX_OK;
1567 }
1568 /* Use two-step timestamping if not one-step timestamping
1569 * PTP Sync packet
1570 */
1571 skb->cb[0] = TX_TSTAMP;
1572 }
1573
1574 /* TX for other packets */
1575 return __dpaa2_eth_tx(skb, net_dev);
1576 }
1577
1578 /* Tx confirmation frame processing routine */
dpaa2_eth_tx_conf(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch,const struct dpaa2_fd * fd,struct dpaa2_eth_fq * fq)1579 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
1580 struct dpaa2_eth_channel *ch,
1581 const struct dpaa2_fd *fd,
1582 struct dpaa2_eth_fq *fq)
1583 {
1584 struct rtnl_link_stats64 *percpu_stats;
1585 struct dpaa2_eth_drv_stats *percpu_extras;
1586 u32 fd_len = dpaa2_fd_get_len(fd);
1587 u32 fd_errors;
1588
1589 /* Tracing point */
1590 trace_dpaa2_tx_conf_fd(priv->net_dev, fd);
1591
1592 percpu_extras = this_cpu_ptr(priv->percpu_extras);
1593 percpu_extras->tx_conf_frames++;
1594 percpu_extras->tx_conf_bytes += fd_len;
1595 ch->stats.bytes_per_cdan += fd_len;
1596
1597 /* Check frame errors in the FD field */
1598 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
1599 dpaa2_eth_free_tx_fd(priv, ch, fq, fd, true);
1600
1601 if (likely(!fd_errors))
1602 return;
1603
1604 if (net_ratelimit())
1605 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
1606 fd_errors);
1607
1608 percpu_stats = this_cpu_ptr(priv->percpu_stats);
1609 /* Tx-conf logically pertains to the egress path. */
1610 percpu_stats->tx_errors++;
1611 }
1612
dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv * priv,bool enable)1613 static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv,
1614 bool enable)
1615 {
1616 int err;
1617
1618 err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable);
1619
1620 if (err) {
1621 netdev_err(priv->net_dev,
1622 "dpni_enable_vlan_filter failed\n");
1623 return err;
1624 }
1625
1626 return 0;
1627 }
1628
dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv * priv,bool enable)1629 static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
1630 {
1631 int err;
1632
1633 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1634 DPNI_OFF_RX_L3_CSUM, enable);
1635 if (err) {
1636 netdev_err(priv->net_dev,
1637 "dpni_set_offload(RX_L3_CSUM) failed\n");
1638 return err;
1639 }
1640
1641 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1642 DPNI_OFF_RX_L4_CSUM, enable);
1643 if (err) {
1644 netdev_err(priv->net_dev,
1645 "dpni_set_offload(RX_L4_CSUM) failed\n");
1646 return err;
1647 }
1648
1649 return 0;
1650 }
1651
dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv * priv,bool enable)1652 static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
1653 {
1654 int err;
1655
1656 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1657 DPNI_OFF_TX_L3_CSUM, enable);
1658 if (err) {
1659 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
1660 return err;
1661 }
1662
1663 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
1664 DPNI_OFF_TX_L4_CSUM, enable);
1665 if (err) {
1666 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
1667 return err;
1668 }
1669
1670 return 0;
1671 }
1672
1673 /* Perform a single release command to add buffers
1674 * to the specified buffer pool
1675 */
dpaa2_eth_add_bufs(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch)1676 static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv,
1677 struct dpaa2_eth_channel *ch)
1678 {
1679 struct xdp_buff *xdp_buffs[DPAA2_ETH_BUFS_PER_CMD];
1680 struct device *dev = priv->net_dev->dev.parent;
1681 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1682 struct dpaa2_eth_swa *swa;
1683 struct page *page;
1684 dma_addr_t addr;
1685 int retries = 0;
1686 int i = 0, err;
1687 u32 batch;
1688
1689 /* Allocate buffers visible to WRIOP */
1690 if (!ch->xsk_zc) {
1691 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
1692 /* Also allocate skb shared info and alignment padding.
1693 * There is one page for each Rx buffer. WRIOP sees
1694 * the entire page except for a tailroom reserved for
1695 * skb shared info
1696 */
1697 page = dev_alloc_pages(0);
1698 if (!page)
1699 goto err_alloc;
1700
1701 addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1702 DMA_BIDIRECTIONAL);
1703 if (unlikely(dma_mapping_error(dev, addr)))
1704 goto err_map;
1705
1706 buf_array[i] = addr;
1707
1708 /* tracing point */
1709 trace_dpaa2_eth_buf_seed(priv->net_dev,
1710 page_address(page),
1711 DPAA2_ETH_RX_BUF_RAW_SIZE,
1712 addr, priv->rx_buf_size,
1713 ch->bp->bpid);
1714 }
1715 } else if (xsk_buff_can_alloc(ch->xsk_pool, DPAA2_ETH_BUFS_PER_CMD)) {
1716 /* Allocate XSK buffers for AF_XDP fast path in batches
1717 * of DPAA2_ETH_BUFS_PER_CMD. Bail out if the UMEM cannot
1718 * provide enough buffers at the moment
1719 */
1720 batch = xsk_buff_alloc_batch(ch->xsk_pool, xdp_buffs,
1721 DPAA2_ETH_BUFS_PER_CMD);
1722 if (!batch)
1723 goto err_alloc;
1724
1725 for (i = 0; i < batch; i++) {
1726 swa = (struct dpaa2_eth_swa *)(xdp_buffs[i]->data_hard_start +
1727 DPAA2_ETH_RX_HWA_SIZE);
1728 swa->xsk.xdp_buff = xdp_buffs[i];
1729
1730 addr = xsk_buff_xdp_get_frame_dma(xdp_buffs[i]);
1731 if (unlikely(dma_mapping_error(dev, addr)))
1732 goto err_map;
1733
1734 buf_array[i] = addr;
1735
1736 trace_dpaa2_xsk_buf_seed(priv->net_dev,
1737 xdp_buffs[i]->data_hard_start,
1738 DPAA2_ETH_RX_BUF_RAW_SIZE,
1739 addr, priv->rx_buf_size,
1740 ch->bp->bpid);
1741 }
1742 }
1743
1744 release_bufs:
1745 /* In case the portal is busy, retry until successful */
1746 while ((err = dpaa2_io_service_release(ch->dpio, ch->bp->bpid,
1747 buf_array, i)) == -EBUSY) {
1748 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
1749 break;
1750 cpu_relax();
1751 }
1752
1753 /* If release command failed, clean up and bail out;
1754 * not much else we can do about it
1755 */
1756 if (err) {
1757 dpaa2_eth_free_bufs(priv, buf_array, i, ch->xsk_zc);
1758 return 0;
1759 }
1760
1761 return i;
1762
1763 err_map:
1764 if (!ch->xsk_zc) {
1765 __free_pages(page, 0);
1766 } else {
1767 for (; i < batch; i++)
1768 xsk_buff_free(xdp_buffs[i]);
1769 }
1770 err_alloc:
1771 /* If we managed to allocate at least some buffers,
1772 * release them to hardware
1773 */
1774 if (i)
1775 goto release_bufs;
1776
1777 return 0;
1778 }
1779
dpaa2_eth_seed_pool(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch)1780 static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv,
1781 struct dpaa2_eth_channel *ch)
1782 {
1783 int i;
1784 int new_count;
1785
1786 for (i = 0; i < DPAA2_ETH_NUM_BUFS; i += DPAA2_ETH_BUFS_PER_CMD) {
1787 new_count = dpaa2_eth_add_bufs(priv, ch);
1788 ch->buf_count += new_count;
1789
1790 if (new_count < DPAA2_ETH_BUFS_PER_CMD)
1791 return -ENOMEM;
1792 }
1793
1794 return 0;
1795 }
1796
dpaa2_eth_seed_pools(struct dpaa2_eth_priv * priv)1797 static void dpaa2_eth_seed_pools(struct dpaa2_eth_priv *priv)
1798 {
1799 struct net_device *net_dev = priv->net_dev;
1800 struct dpaa2_eth_channel *channel;
1801 int i, err = 0;
1802
1803 for (i = 0; i < priv->num_channels; i++) {
1804 channel = priv->channel[i];
1805
1806 err = dpaa2_eth_seed_pool(priv, channel);
1807
1808 /* Not much to do; the buffer pool, though not filled up,
1809 * may still contain some buffers which would enable us
1810 * to limp on.
1811 */
1812 if (err)
1813 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1814 channel->bp->dev->obj_desc.id,
1815 channel->bp->bpid);
1816 }
1817 }
1818
1819 /*
1820 * Drain the specified number of buffers from one of the DPNI's private buffer
1821 * pools.
1822 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
1823 */
dpaa2_eth_drain_bufs(struct dpaa2_eth_priv * priv,int bpid,int count)1824 static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int bpid,
1825 int count)
1826 {
1827 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1828 bool xsk_zc = false;
1829 int retries = 0;
1830 int i, ret;
1831
1832 for (i = 0; i < priv->num_channels; i++)
1833 if (priv->channel[i]->bp->bpid == bpid)
1834 xsk_zc = priv->channel[i]->xsk_zc;
1835
1836 do {
1837 ret = dpaa2_io_service_acquire(NULL, bpid, buf_array, count);
1838 if (ret < 0) {
1839 if (ret == -EBUSY &&
1840 retries++ < DPAA2_ETH_SWP_BUSY_RETRIES)
1841 continue;
1842 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
1843 return;
1844 }
1845 dpaa2_eth_free_bufs(priv, buf_array, ret, xsk_zc);
1846 retries = 0;
1847 } while (ret);
1848 }
1849
dpaa2_eth_drain_pool(struct dpaa2_eth_priv * priv,int bpid)1850 static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv, int bpid)
1851 {
1852 int i;
1853
1854 /* Drain the buffer pool */
1855 dpaa2_eth_drain_bufs(priv, bpid, DPAA2_ETH_BUFS_PER_CMD);
1856 dpaa2_eth_drain_bufs(priv, bpid, 1);
1857
1858 /* Setup to zero the buffer count of all channels which were
1859 * using this buffer pool.
1860 */
1861 for (i = 0; i < priv->num_channels; i++)
1862 if (priv->channel[i]->bp->bpid == bpid)
1863 priv->channel[i]->buf_count = 0;
1864 }
1865
dpaa2_eth_drain_pools(struct dpaa2_eth_priv * priv)1866 static void dpaa2_eth_drain_pools(struct dpaa2_eth_priv *priv)
1867 {
1868 int i;
1869
1870 for (i = 0; i < priv->num_bps; i++)
1871 dpaa2_eth_drain_pool(priv, priv->bp[i]->bpid);
1872 }
1873
1874 /* Function is called from softirq context only, so we don't need to guard
1875 * the access to percpu count
1876 */
dpaa2_eth_refill_pool(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * ch)1877 static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv,
1878 struct dpaa2_eth_channel *ch)
1879 {
1880 int new_count;
1881
1882 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
1883 return 0;
1884
1885 do {
1886 new_count = dpaa2_eth_add_bufs(priv, ch);
1887 if (unlikely(!new_count)) {
1888 /* Out of memory; abort for now, we'll try later on */
1889 break;
1890 }
1891 ch->buf_count += new_count;
1892 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS);
1893
1894 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
1895 return -ENOMEM;
1896
1897 return 0;
1898 }
1899
dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv * priv)1900 static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv)
1901 {
1902 struct dpaa2_eth_sgt_cache *sgt_cache;
1903 u16 count;
1904 int k, i;
1905
1906 for_each_possible_cpu(k) {
1907 sgt_cache = per_cpu_ptr(priv->sgt_cache, k);
1908 count = sgt_cache->count;
1909
1910 for (i = 0; i < count; i++)
1911 skb_free_frag(sgt_cache->buf[i]);
1912 sgt_cache->count = 0;
1913 }
1914 }
1915
dpaa2_eth_pull_channel(struct dpaa2_eth_channel * ch)1916 static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch)
1917 {
1918 int err;
1919 int dequeues = -1;
1920
1921 /* Retry while portal is busy */
1922 do {
1923 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
1924 ch->store);
1925 dequeues++;
1926 cpu_relax();
1927 } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1928
1929 ch->stats.dequeue_portal_busy += dequeues;
1930 if (unlikely(err))
1931 ch->stats.pull_err++;
1932
1933 return err;
1934 }
1935
1936 /* NAPI poll routine
1937 *
1938 * Frames are dequeued from the QMan channel associated with this NAPI context.
1939 * Rx, Tx confirmation and (if configured) Rx error frames all count
1940 * towards the NAPI budget.
1941 */
dpaa2_eth_poll(struct napi_struct * napi,int budget)1942 static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
1943 {
1944 struct dpaa2_eth_channel *ch;
1945 struct dpaa2_eth_priv *priv;
1946 int rx_cleaned = 0, txconf_cleaned = 0;
1947 struct dpaa2_eth_fq *fq, *txc_fq = NULL;
1948 struct netdev_queue *nq;
1949 int store_cleaned, work_done;
1950 bool work_done_zc = false;
1951 struct list_head rx_list;
1952 int retries = 0;
1953 u16 flowid;
1954 int err;
1955
1956 ch = container_of(napi, struct dpaa2_eth_channel, napi);
1957 ch->xdp.res = 0;
1958 priv = ch->priv;
1959
1960 INIT_LIST_HEAD(&rx_list);
1961 ch->rx_list = &rx_list;
1962
1963 if (ch->xsk_zc) {
1964 work_done_zc = dpaa2_xsk_tx(priv, ch);
1965 /* If we reached the XSK Tx per NAPI threshold, we're done */
1966 if (work_done_zc) {
1967 work_done = budget;
1968 goto out;
1969 }
1970 }
1971
1972 do {
1973 err = dpaa2_eth_pull_channel(ch);
1974 if (unlikely(err))
1975 break;
1976
1977 /* Refill pool if appropriate */
1978 dpaa2_eth_refill_pool(priv, ch);
1979
1980 store_cleaned = dpaa2_eth_consume_frames(ch, &fq);
1981 if (store_cleaned <= 0)
1982 break;
1983 if (fq->type == DPAA2_RX_FQ) {
1984 rx_cleaned += store_cleaned;
1985 flowid = fq->flowid;
1986 } else {
1987 txconf_cleaned += store_cleaned;
1988 /* We have a single Tx conf FQ on this channel */
1989 txc_fq = fq;
1990 }
1991
1992 /* If we either consumed the whole NAPI budget with Rx frames
1993 * or we reached the Tx confirmations threshold, we're done.
1994 */
1995 if (rx_cleaned >= budget ||
1996 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
1997 work_done = budget;
1998 if (ch->xdp.res & XDP_REDIRECT)
1999 xdp_do_flush();
2000 goto out;
2001 }
2002 } while (store_cleaned);
2003
2004 if (ch->xdp.res & XDP_REDIRECT)
2005 xdp_do_flush();
2006
2007 /* Update NET DIM with the values for this CDAN */
2008 dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
2009 ch->stats.bytes_per_cdan);
2010 ch->stats.frames_per_cdan = 0;
2011 ch->stats.bytes_per_cdan = 0;
2012
2013 /* We didn't consume the entire budget, so finish napi and
2014 * re-enable data availability notifications
2015 */
2016 napi_complete_done(napi, rx_cleaned);
2017 do {
2018 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
2019 cpu_relax();
2020 } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
2021 WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
2022 ch->nctx.desired_cpu);
2023
2024 work_done = max(rx_cleaned, 1);
2025
2026 out:
2027 netif_receive_skb_list(ch->rx_list);
2028
2029 if (ch->xsk_tx_pkts_sent) {
2030 xsk_tx_completed(ch->xsk_pool, ch->xsk_tx_pkts_sent);
2031 ch->xsk_tx_pkts_sent = 0;
2032 }
2033
2034 if (txc_fq && txc_fq->dq_frames) {
2035 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
2036 netdev_tx_completed_queue(nq, txc_fq->dq_frames,
2037 txc_fq->dq_bytes);
2038 txc_fq->dq_frames = 0;
2039 txc_fq->dq_bytes = 0;
2040 }
2041
2042 if (rx_cleaned && ch->xdp.res & XDP_TX)
2043 dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]);
2044
2045 return work_done;
2046 }
2047
dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv * priv)2048 static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv)
2049 {
2050 struct dpaa2_eth_channel *ch;
2051 int i;
2052
2053 for (i = 0; i < priv->num_channels; i++) {
2054 ch = priv->channel[i];
2055 napi_enable(&ch->napi);
2056 }
2057 }
2058
dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv * priv)2059 static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv)
2060 {
2061 struct dpaa2_eth_channel *ch;
2062 int i;
2063
2064 for (i = 0; i < priv->num_channels; i++) {
2065 ch = priv->channel[i];
2066 napi_disable(&ch->napi);
2067 }
2068 }
2069
dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv * priv,bool tx_pause,bool pfc)2070 void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
2071 bool tx_pause, bool pfc)
2072 {
2073 struct dpni_taildrop td = {0};
2074 struct dpaa2_eth_fq *fq;
2075 int i, err;
2076
2077 /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
2078 * flow control is disabled (as it might interfere with either the
2079 * buffer pool depletion trigger for pause frames or with the group
2080 * congestion trigger for PFC frames)
2081 */
2082 td.enable = !tx_pause;
2083 if (priv->rx_fqtd_enabled == td.enable)
2084 goto set_cgtd;
2085
2086 td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
2087 td.units = DPNI_CONGESTION_UNIT_BYTES;
2088
2089 for (i = 0; i < priv->num_fqs; i++) {
2090 fq = &priv->fq[i];
2091 if (fq->type != DPAA2_RX_FQ)
2092 continue;
2093 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
2094 DPNI_CP_QUEUE, DPNI_QUEUE_RX,
2095 fq->tc, fq->flowid, &td);
2096 if (err) {
2097 netdev_err(priv->net_dev,
2098 "dpni_set_taildrop(FQ) failed\n");
2099 return;
2100 }
2101 }
2102
2103 priv->rx_fqtd_enabled = td.enable;
2104
2105 set_cgtd:
2106 /* Congestion group taildrop: threshold is in frames, per group
2107 * of FQs belonging to the same traffic class
2108 * Enabled if general Tx pause disabled or if PFCs are enabled
2109 * (congestion group threhsold for PFC generation is lower than the
2110 * CG taildrop threshold, so it won't interfere with it; we also
2111 * want frames in non-PFC enabled traffic classes to be kept in check)
2112 */
2113 td.enable = !tx_pause || pfc;
2114 if (priv->rx_cgtd_enabled == td.enable)
2115 return;
2116
2117 td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
2118 td.units = DPNI_CONGESTION_UNIT_FRAMES;
2119 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
2120 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
2121 DPNI_CP_GROUP, DPNI_QUEUE_RX,
2122 i, 0, &td);
2123 if (err) {
2124 netdev_err(priv->net_dev,
2125 "dpni_set_taildrop(CG) failed\n");
2126 return;
2127 }
2128 }
2129
2130 priv->rx_cgtd_enabled = td.enable;
2131 }
2132
dpaa2_eth_link_state_update(struct dpaa2_eth_priv * priv)2133 static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv)
2134 {
2135 struct dpni_link_state state = {0};
2136 bool tx_pause;
2137 int err;
2138
2139 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
2140 if (unlikely(err)) {
2141 netdev_err(priv->net_dev,
2142 "dpni_get_link_state() failed\n");
2143 return err;
2144 }
2145
2146 /* If Tx pause frame settings have changed, we need to update
2147 * Rx FQ taildrop configuration as well. We configure taildrop
2148 * only when pause frame generation is disabled.
2149 */
2150 tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
2151 dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
2152
2153 /* When we manage the MAC/PHY using phylink there is no need
2154 * to manually update the netif_carrier.
2155 * We can avoid locking because we are called from the "link changed"
2156 * IRQ handler, which is the same as the "endpoint changed" IRQ handler
2157 * (the writer to priv->mac), so we cannot race with it.
2158 */
2159 if (dpaa2_mac_is_type_phy(priv->mac))
2160 goto out;
2161
2162 /* Chech link state; speed / duplex changes are not treated yet */
2163 if (priv->link_state.up == state.up)
2164 goto out;
2165
2166 if (state.up) {
2167 netif_carrier_on(priv->net_dev);
2168 netif_tx_start_all_queues(priv->net_dev);
2169 } else {
2170 netif_tx_stop_all_queues(priv->net_dev);
2171 netif_carrier_off(priv->net_dev);
2172 }
2173
2174 netdev_info(priv->net_dev, "Link Event: state %s\n",
2175 state.up ? "up" : "down");
2176
2177 out:
2178 priv->link_state = state;
2179
2180 return 0;
2181 }
2182
dpaa2_eth_open(struct net_device * net_dev)2183 static int dpaa2_eth_open(struct net_device *net_dev)
2184 {
2185 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2186 int err;
2187
2188 dpaa2_eth_seed_pools(priv);
2189
2190 mutex_lock(&priv->mac_lock);
2191
2192 if (!dpaa2_eth_is_type_phy(priv)) {
2193 /* We'll only start the txqs when the link is actually ready;
2194 * make sure we don't race against the link up notification,
2195 * which may come immediately after dpni_enable();
2196 */
2197 netif_tx_stop_all_queues(net_dev);
2198
2199 /* Also, explicitly set carrier off, otherwise
2200 * netif_carrier_ok() will return true and cause 'ip link show'
2201 * to report the LOWER_UP flag, even though the link
2202 * notification wasn't even received.
2203 */
2204 netif_carrier_off(net_dev);
2205 }
2206 dpaa2_eth_enable_ch_napi(priv);
2207
2208 err = dpni_enable(priv->mc_io, 0, priv->mc_token);
2209 if (err < 0) {
2210 mutex_unlock(&priv->mac_lock);
2211 netdev_err(net_dev, "dpni_enable() failed\n");
2212 goto enable_err;
2213 }
2214
2215 if (dpaa2_eth_is_type_phy(priv))
2216 dpaa2_mac_start(priv->mac);
2217
2218 mutex_unlock(&priv->mac_lock);
2219
2220 return 0;
2221
2222 enable_err:
2223 dpaa2_eth_disable_ch_napi(priv);
2224 dpaa2_eth_drain_pools(priv);
2225 return err;
2226 }
2227
2228 /* Total number of in-flight frames on ingress queues */
dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv * priv)2229 static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv)
2230 {
2231 struct dpaa2_eth_fq *fq;
2232 u32 fcnt = 0, bcnt = 0, total = 0;
2233 int i, err;
2234
2235 for (i = 0; i < priv->num_fqs; i++) {
2236 fq = &priv->fq[i];
2237 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
2238 if (err) {
2239 netdev_warn(priv->net_dev, "query_fq_count failed");
2240 break;
2241 }
2242 total += fcnt;
2243 }
2244
2245 return total;
2246 }
2247
dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv * priv)2248 static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
2249 {
2250 int retries = 10;
2251 u32 pending;
2252
2253 do {
2254 pending = dpaa2_eth_ingress_fq_count(priv);
2255 if (pending)
2256 msleep(100);
2257 } while (pending && --retries);
2258 }
2259
2260 #define DPNI_TX_PENDING_VER_MAJOR 7
2261 #define DPNI_TX_PENDING_VER_MINOR 13
dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv * priv)2262 static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
2263 {
2264 union dpni_statistics stats;
2265 int retries = 10;
2266 int err;
2267
2268 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
2269 DPNI_TX_PENDING_VER_MINOR) < 0)
2270 goto out;
2271
2272 do {
2273 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
2274 &stats);
2275 if (err)
2276 goto out;
2277 if (stats.page_6.tx_pending_frames == 0)
2278 return;
2279 } while (--retries);
2280
2281 out:
2282 msleep(500);
2283 }
2284
dpaa2_eth_stop(struct net_device * net_dev)2285 static int dpaa2_eth_stop(struct net_device *net_dev)
2286 {
2287 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2288 int dpni_enabled = 0;
2289 int retries = 10;
2290
2291 mutex_lock(&priv->mac_lock);
2292
2293 if (dpaa2_eth_is_type_phy(priv)) {
2294 dpaa2_mac_stop(priv->mac);
2295 } else {
2296 netif_tx_stop_all_queues(net_dev);
2297 netif_carrier_off(net_dev);
2298 }
2299
2300 mutex_unlock(&priv->mac_lock);
2301
2302 /* On dpni_disable(), the MC firmware will:
2303 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
2304 * - cut off WRIOP dequeues from egress FQs and wait until transmission
2305 * of all in flight Tx frames is finished (and corresponding Tx conf
2306 * frames are enqueued back to software)
2307 *
2308 * Before calling dpni_disable(), we wait for all Tx frames to arrive
2309 * on WRIOP. After it finishes, wait until all remaining frames on Rx
2310 * and Tx conf queues are consumed on NAPI poll.
2311 */
2312 dpaa2_eth_wait_for_egress_fq_empty(priv);
2313
2314 do {
2315 dpni_disable(priv->mc_io, 0, priv->mc_token);
2316 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
2317 if (dpni_enabled)
2318 /* Allow the hardware some slack */
2319 msleep(100);
2320 } while (dpni_enabled && --retries);
2321 if (!retries) {
2322 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
2323 /* Must go on and disable NAPI nonetheless, so we don't crash at
2324 * the next "ifconfig up"
2325 */
2326 }
2327
2328 dpaa2_eth_wait_for_ingress_fq_empty(priv);
2329 dpaa2_eth_disable_ch_napi(priv);
2330
2331 /* Empty the buffer pool */
2332 dpaa2_eth_drain_pools(priv);
2333
2334 /* Empty the Scatter-Gather Buffer cache */
2335 dpaa2_eth_sgt_cache_drain(priv);
2336
2337 return 0;
2338 }
2339
dpaa2_eth_set_addr(struct net_device * net_dev,void * addr)2340 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
2341 {
2342 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2343 struct device *dev = net_dev->dev.parent;
2344 int err;
2345
2346 err = eth_mac_addr(net_dev, addr);
2347 if (err < 0) {
2348 dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
2349 return err;
2350 }
2351
2352 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
2353 net_dev->dev_addr);
2354 if (err) {
2355 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
2356 return err;
2357 }
2358
2359 return 0;
2360 }
2361
2362 /** Fill in counters maintained by the GPP driver. These may be different from
2363 * the hardware counters obtained by ethtool.
2364 */
dpaa2_eth_get_stats(struct net_device * net_dev,struct rtnl_link_stats64 * stats)2365 static void dpaa2_eth_get_stats(struct net_device *net_dev,
2366 struct rtnl_link_stats64 *stats)
2367 {
2368 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2369 struct rtnl_link_stats64 *percpu_stats;
2370 u64 *cpustats;
2371 u64 *netstats = (u64 *)stats;
2372 int i, j;
2373 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);
2374
2375 for_each_possible_cpu(i) {
2376 percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
2377 cpustats = (u64 *)percpu_stats;
2378 for (j = 0; j < num; j++)
2379 netstats[j] += cpustats[j];
2380 }
2381 }
2382
2383 /* Copy mac unicast addresses from @net_dev to @priv.
2384 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2385 */
dpaa2_eth_add_uc_hw_addr(const struct net_device * net_dev,struct dpaa2_eth_priv * priv)2386 static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev,
2387 struct dpaa2_eth_priv *priv)
2388 {
2389 struct netdev_hw_addr *ha;
2390 int err;
2391
2392 netdev_for_each_uc_addr(ha, net_dev) {
2393 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2394 ha->addr);
2395 if (err)
2396 netdev_warn(priv->net_dev,
2397 "Could not add ucast MAC %pM to the filtering table (err %d)\n",
2398 ha->addr, err);
2399 }
2400 }
2401
2402 /* Copy mac multicast addresses from @net_dev to @priv
2403 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
2404 */
dpaa2_eth_add_mc_hw_addr(const struct net_device * net_dev,struct dpaa2_eth_priv * priv)2405 static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev,
2406 struct dpaa2_eth_priv *priv)
2407 {
2408 struct netdev_hw_addr *ha;
2409 int err;
2410
2411 netdev_for_each_mc_addr(ha, net_dev) {
2412 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
2413 ha->addr);
2414 if (err)
2415 netdev_warn(priv->net_dev,
2416 "Could not add mcast MAC %pM to the filtering table (err %d)\n",
2417 ha->addr, err);
2418 }
2419 }
2420
dpaa2_eth_rx_add_vid(struct net_device * net_dev,__be16 vlan_proto,u16 vid)2421 static int dpaa2_eth_rx_add_vid(struct net_device *net_dev,
2422 __be16 vlan_proto, u16 vid)
2423 {
2424 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2425 int err;
2426
2427 err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token,
2428 vid, 0, 0, 0);
2429
2430 if (err) {
2431 netdev_warn(priv->net_dev,
2432 "Could not add the vlan id %u\n",
2433 vid);
2434 return err;
2435 }
2436
2437 return 0;
2438 }
2439
dpaa2_eth_rx_kill_vid(struct net_device * net_dev,__be16 vlan_proto,u16 vid)2440 static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev,
2441 __be16 vlan_proto, u16 vid)
2442 {
2443 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2444 int err;
2445
2446 err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid);
2447
2448 if (err) {
2449 netdev_warn(priv->net_dev,
2450 "Could not remove the vlan id %u\n",
2451 vid);
2452 return err;
2453 }
2454
2455 return 0;
2456 }
2457
dpaa2_eth_set_rx_mode(struct net_device * net_dev)2458 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
2459 {
2460 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2461 int uc_count = netdev_uc_count(net_dev);
2462 int mc_count = netdev_mc_count(net_dev);
2463 u8 max_mac = priv->dpni_attrs.mac_filter_entries;
2464 u32 options = priv->dpni_attrs.options;
2465 u16 mc_token = priv->mc_token;
2466 struct fsl_mc_io *mc_io = priv->mc_io;
2467 int err;
2468
2469 /* Basic sanity checks; these probably indicate a misconfiguration */
2470 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
2471 netdev_info(net_dev,
2472 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
2473 max_mac);
2474
2475 /* Force promiscuous if the uc or mc counts exceed our capabilities. */
2476 if (uc_count > max_mac) {
2477 netdev_info(net_dev,
2478 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
2479 uc_count, max_mac);
2480 goto force_promisc;
2481 }
2482 if (mc_count + uc_count > max_mac) {
2483 netdev_info(net_dev,
2484 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
2485 uc_count + mc_count, max_mac);
2486 goto force_mc_promisc;
2487 }
2488
2489 /* Adjust promisc settings due to flag combinations */
2490 if (net_dev->flags & IFF_PROMISC)
2491 goto force_promisc;
2492 if (net_dev->flags & IFF_ALLMULTI) {
2493 /* First, rebuild unicast filtering table. This should be done
2494 * in promisc mode, in order to avoid frame loss while we
2495 * progressively add entries to the table.
2496 * We don't know whether we had been in promisc already, and
2497 * making an MC call to find out is expensive; so set uc promisc
2498 * nonetheless.
2499 */
2500 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2501 if (err)
2502 netdev_warn(net_dev, "Can't set uc promisc\n");
2503
2504 /* Actual uc table reconstruction. */
2505 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
2506 if (err)
2507 netdev_warn(net_dev, "Can't clear uc filters\n");
2508 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2509
2510 /* Finally, clear uc promisc and set mc promisc as requested. */
2511 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2512 if (err)
2513 netdev_warn(net_dev, "Can't clear uc promisc\n");
2514 goto force_mc_promisc;
2515 }
2516
2517 /* Neither unicast, nor multicast promisc will be on... eventually.
2518 * For now, rebuild mac filtering tables while forcing both of them on.
2519 */
2520 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2521 if (err)
2522 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
2523 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2524 if (err)
2525 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);
2526
2527 /* Actual mac filtering tables reconstruction */
2528 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
2529 if (err)
2530 netdev_warn(net_dev, "Can't clear mac filters\n");
2531 dpaa2_eth_add_mc_hw_addr(net_dev, priv);
2532 dpaa2_eth_add_uc_hw_addr(net_dev, priv);
2533
2534 /* Now we can clear both ucast and mcast promisc, without risking
2535 * to drop legitimate frames anymore.
2536 */
2537 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
2538 if (err)
2539 netdev_warn(net_dev, "Can't clear ucast promisc\n");
2540 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
2541 if (err)
2542 netdev_warn(net_dev, "Can't clear mcast promisc\n");
2543
2544 return;
2545
2546 force_promisc:
2547 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
2548 if (err)
2549 netdev_warn(net_dev, "Can't set ucast promisc\n");
2550 force_mc_promisc:
2551 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
2552 if (err)
2553 netdev_warn(net_dev, "Can't set mcast promisc\n");
2554 }
2555
dpaa2_eth_set_features(struct net_device * net_dev,netdev_features_t features)2556 static int dpaa2_eth_set_features(struct net_device *net_dev,
2557 netdev_features_t features)
2558 {
2559 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2560 netdev_features_t changed = features ^ net_dev->features;
2561 bool enable;
2562 int err;
2563
2564 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
2565 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER);
2566 err = dpaa2_eth_set_rx_vlan_filtering(priv, enable);
2567 if (err)
2568 return err;
2569 }
2570
2571 if (changed & NETIF_F_RXCSUM) {
2572 enable = !!(features & NETIF_F_RXCSUM);
2573 err = dpaa2_eth_set_rx_csum(priv, enable);
2574 if (err)
2575 return err;
2576 }
2577
2578 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
2579 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
2580 err = dpaa2_eth_set_tx_csum(priv, enable);
2581 if (err)
2582 return err;
2583 }
2584
2585 return 0;
2586 }
2587
dpaa2_eth_ts_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2588 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2589 {
2590 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2591 struct hwtstamp_config config;
2592
2593 if (!dpaa2_ptp)
2594 return -EINVAL;
2595
2596 if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
2597 return -EFAULT;
2598
2599 switch (config.tx_type) {
2600 case HWTSTAMP_TX_OFF:
2601 case HWTSTAMP_TX_ON:
2602 case HWTSTAMP_TX_ONESTEP_SYNC:
2603 priv->tx_tstamp_type = config.tx_type;
2604 break;
2605 default:
2606 return -ERANGE;
2607 }
2608
2609 if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
2610 priv->rx_tstamp = false;
2611 } else {
2612 priv->rx_tstamp = true;
2613 /* TS is set for all frame types, not only those requested */
2614 config.rx_filter = HWTSTAMP_FILTER_ALL;
2615 }
2616
2617 if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC)
2618 dpaa2_ptp_onestep_reg_update_method(priv);
2619
2620 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
2621 -EFAULT : 0;
2622 }
2623
dpaa2_eth_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)2624 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2625 {
2626 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2627 int err;
2628
2629 if (cmd == SIOCSHWTSTAMP)
2630 return dpaa2_eth_ts_ioctl(dev, rq, cmd);
2631
2632 mutex_lock(&priv->mac_lock);
2633
2634 if (dpaa2_eth_is_type_phy(priv)) {
2635 err = phylink_mii_ioctl(priv->mac->phylink, rq, cmd);
2636 mutex_unlock(&priv->mac_lock);
2637 return err;
2638 }
2639
2640 mutex_unlock(&priv->mac_lock);
2641
2642 return -EOPNOTSUPP;
2643 }
2644
xdp_mtu_valid(struct dpaa2_eth_priv * priv,int mtu)2645 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
2646 {
2647 int mfl, linear_mfl;
2648
2649 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2650 linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
2651 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
2652
2653 if (mfl > linear_mfl) {
2654 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
2655 linear_mfl - VLAN_ETH_HLEN);
2656 return false;
2657 }
2658
2659 return true;
2660 }
2661
dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv * priv,int mtu,bool has_xdp)2662 static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
2663 {
2664 int mfl, err;
2665
2666 /* We enforce a maximum Rx frame length based on MTU only if we have
2667 * an XDP program attached (in order to avoid Rx S/G frames).
2668 * Otherwise, we accept all incoming frames as long as they are not
2669 * larger than maximum size supported in hardware
2670 */
2671 if (has_xdp)
2672 mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
2673 else
2674 mfl = DPAA2_ETH_MFL;
2675
2676 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
2677 if (err) {
2678 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
2679 return err;
2680 }
2681
2682 return 0;
2683 }
2684
dpaa2_eth_change_mtu(struct net_device * dev,int new_mtu)2685 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
2686 {
2687 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2688 int err;
2689
2690 if (!priv->xdp_prog)
2691 goto out;
2692
2693 if (!xdp_mtu_valid(priv, new_mtu))
2694 return -EINVAL;
2695
2696 err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true);
2697 if (err)
2698 return err;
2699
2700 out:
2701 dev->mtu = new_mtu;
2702 return 0;
2703 }
2704
dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv * priv,bool has_xdp)2705 static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
2706 {
2707 struct dpni_buffer_layout buf_layout = {0};
2708 int err;
2709
2710 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
2711 DPNI_QUEUE_RX, &buf_layout);
2712 if (err) {
2713 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
2714 return err;
2715 }
2716
2717 /* Reserve extra headroom for XDP header size changes */
2718 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
2719 (has_xdp ? XDP_PACKET_HEADROOM : 0);
2720 buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
2721 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2722 DPNI_QUEUE_RX, &buf_layout);
2723 if (err) {
2724 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
2725 return err;
2726 }
2727
2728 return 0;
2729 }
2730
dpaa2_eth_setup_xdp(struct net_device * dev,struct bpf_prog * prog)2731 static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog)
2732 {
2733 struct dpaa2_eth_priv *priv = netdev_priv(dev);
2734 struct dpaa2_eth_channel *ch;
2735 struct bpf_prog *old;
2736 bool up, need_update;
2737 int i, err;
2738
2739 if (prog && !xdp_mtu_valid(priv, dev->mtu))
2740 return -EINVAL;
2741
2742 if (prog)
2743 bpf_prog_add(prog, priv->num_channels);
2744
2745 up = netif_running(dev);
2746 need_update = (!!priv->xdp_prog != !!prog);
2747
2748 if (up)
2749 dev_close(dev);
2750
2751 /* While in xdp mode, enforce a maximum Rx frame size based on MTU.
2752 * Also, when switching between xdp/non-xdp modes we need to reconfigure
2753 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
2754 * so we are sure no old format buffers will be used from now on.
2755 */
2756 if (need_update) {
2757 err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog);
2758 if (err)
2759 goto out_err;
2760 err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog);
2761 if (err)
2762 goto out_err;
2763 }
2764
2765 old = xchg(&priv->xdp_prog, prog);
2766 if (old)
2767 bpf_prog_put(old);
2768
2769 for (i = 0; i < priv->num_channels; i++) {
2770 ch = priv->channel[i];
2771 old = xchg(&ch->xdp.prog, prog);
2772 if (old)
2773 bpf_prog_put(old);
2774 }
2775
2776 if (up) {
2777 err = dev_open(dev, NULL);
2778 if (err)
2779 return err;
2780 }
2781
2782 return 0;
2783
2784 out_err:
2785 if (prog)
2786 bpf_prog_sub(prog, priv->num_channels);
2787 if (up)
2788 dev_open(dev, NULL);
2789
2790 return err;
2791 }
2792
dpaa2_eth_xdp(struct net_device * dev,struct netdev_bpf * xdp)2793 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
2794 {
2795 switch (xdp->command) {
2796 case XDP_SETUP_PROG:
2797 return dpaa2_eth_setup_xdp(dev, xdp->prog);
2798 case XDP_SETUP_XSK_POOL:
2799 return dpaa2_xsk_setup_pool(dev, xdp->xsk.pool, xdp->xsk.queue_id);
2800 default:
2801 return -EINVAL;
2802 }
2803
2804 return 0;
2805 }
2806
dpaa2_eth_xdp_create_fd(struct net_device * net_dev,struct xdp_frame * xdpf,struct dpaa2_fd * fd)2807 static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
2808 struct xdp_frame *xdpf,
2809 struct dpaa2_fd *fd)
2810 {
2811 struct device *dev = net_dev->dev.parent;
2812 unsigned int needed_headroom;
2813 struct dpaa2_eth_swa *swa;
2814 void *buffer_start, *aligned_start;
2815 dma_addr_t addr;
2816
2817 /* We require a minimum headroom to be able to transmit the frame.
2818 * Otherwise return an error and let the original net_device handle it
2819 */
2820 needed_headroom = dpaa2_eth_needed_headroom(NULL);
2821 if (xdpf->headroom < needed_headroom)
2822 return -EINVAL;
2823
2824 /* Setup the FD fields */
2825 memset(fd, 0, sizeof(*fd));
2826
2827 /* Align FD address, if possible */
2828 buffer_start = xdpf->data - needed_headroom;
2829 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
2830 DPAA2_ETH_TX_BUF_ALIGN);
2831 if (aligned_start >= xdpf->data - xdpf->headroom)
2832 buffer_start = aligned_start;
2833
2834 swa = (struct dpaa2_eth_swa *)buffer_start;
2835 /* fill in necessary fields here */
2836 swa->type = DPAA2_ETH_SWA_XDP;
2837 swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
2838 swa->xdp.xdpf = xdpf;
2839
2840 addr = dma_map_single(dev, buffer_start,
2841 swa->xdp.dma_size,
2842 DMA_BIDIRECTIONAL);
2843 if (unlikely(dma_mapping_error(dev, addr)))
2844 return -ENOMEM;
2845
2846 dpaa2_fd_set_addr(fd, addr);
2847 dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
2848 dpaa2_fd_set_len(fd, xdpf->len);
2849 dpaa2_fd_set_format(fd, dpaa2_fd_single);
2850 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
2851
2852 return 0;
2853 }
2854
dpaa2_eth_xdp_xmit(struct net_device * net_dev,int n,struct xdp_frame ** frames,u32 flags)2855 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
2856 struct xdp_frame **frames, u32 flags)
2857 {
2858 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2859 struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
2860 struct rtnl_link_stats64 *percpu_stats;
2861 struct dpaa2_eth_fq *fq;
2862 struct dpaa2_fd *fds;
2863 int enqueued, i, err;
2864
2865 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
2866 return -EINVAL;
2867
2868 if (!netif_running(net_dev))
2869 return -ENETDOWN;
2870
2871 fq = &priv->fq[smp_processor_id()];
2872 xdp_redirect_fds = &fq->xdp_redirect_fds;
2873 fds = xdp_redirect_fds->fds;
2874
2875 percpu_stats = this_cpu_ptr(priv->percpu_stats);
2876
2877 /* create a FD for each xdp_frame in the list received */
2878 for (i = 0; i < n; i++) {
2879 err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
2880 if (err)
2881 break;
2882 }
2883 xdp_redirect_fds->num = i;
2884
2885 /* enqueue all the frame descriptors */
2886 enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
2887
2888 /* update statistics */
2889 percpu_stats->tx_packets += enqueued;
2890 for (i = 0; i < enqueued; i++)
2891 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
2892
2893 return enqueued;
2894 }
2895
update_xps(struct dpaa2_eth_priv * priv)2896 static int update_xps(struct dpaa2_eth_priv *priv)
2897 {
2898 struct net_device *net_dev = priv->net_dev;
2899 int i, num_queues, netdev_queues;
2900 struct dpaa2_eth_fq *fq;
2901 cpumask_var_t xps_mask;
2902 int err = 0;
2903
2904 if (!alloc_cpumask_var(&xps_mask, GFP_KERNEL))
2905 return -ENOMEM;
2906
2907 num_queues = dpaa2_eth_queue_count(priv);
2908 netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
2909
2910 /* The first <num_queues> entries in priv->fq array are Tx/Tx conf
2911 * queues, so only process those
2912 */
2913 for (i = 0; i < netdev_queues; i++) {
2914 fq = &priv->fq[i % num_queues];
2915
2916 cpumask_clear(xps_mask);
2917 cpumask_set_cpu(fq->target_cpu, xps_mask);
2918
2919 err = netif_set_xps_queue(net_dev, xps_mask, i);
2920 if (err) {
2921 netdev_warn_once(net_dev, "Error setting XPS queue\n");
2922 break;
2923 }
2924 }
2925
2926 free_cpumask_var(xps_mask);
2927 return err;
2928 }
2929
dpaa2_eth_setup_mqprio(struct net_device * net_dev,struct tc_mqprio_qopt * mqprio)2930 static int dpaa2_eth_setup_mqprio(struct net_device *net_dev,
2931 struct tc_mqprio_qopt *mqprio)
2932 {
2933 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2934 u8 num_tc, num_queues;
2935 int i;
2936
2937 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
2938 num_queues = dpaa2_eth_queue_count(priv);
2939 num_tc = mqprio->num_tc;
2940
2941 if (num_tc == net_dev->num_tc)
2942 return 0;
2943
2944 if (num_tc > dpaa2_eth_tc_count(priv)) {
2945 netdev_err(net_dev, "Max %d traffic classes supported\n",
2946 dpaa2_eth_tc_count(priv));
2947 return -EOPNOTSUPP;
2948 }
2949
2950 if (!num_tc) {
2951 netdev_reset_tc(net_dev);
2952 netif_set_real_num_tx_queues(net_dev, num_queues);
2953 goto out;
2954 }
2955
2956 netdev_set_num_tc(net_dev, num_tc);
2957 netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);
2958
2959 for (i = 0; i < num_tc; i++)
2960 netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);
2961
2962 out:
2963 update_xps(priv);
2964
2965 return 0;
2966 }
2967
2968 #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8)
2969
dpaa2_eth_setup_tbf(struct net_device * net_dev,struct tc_tbf_qopt_offload * p)2970 static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p)
2971 {
2972 struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params;
2973 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
2974 struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 };
2975 struct dpni_tx_shaping_cfg tx_er_shaper = { 0 };
2976 int err;
2977
2978 if (p->command == TC_TBF_STATS)
2979 return -EOPNOTSUPP;
2980
2981 /* Only per port Tx shaping */
2982 if (p->parent != TC_H_ROOT)
2983 return -EOPNOTSUPP;
2984
2985 if (p->command == TC_TBF_REPLACE) {
2986 if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) {
2987 netdev_err(net_dev, "burst size cannot be greater than %d\n",
2988 DPAA2_ETH_MAX_BURST_SIZE);
2989 return -EINVAL;
2990 }
2991
2992 tx_cr_shaper.max_burst_size = cfg->max_size;
2993 /* The TBF interface is in bytes/s, whereas DPAA2 expects the
2994 * rate in Mbits/s
2995 */
2996 tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps);
2997 }
2998
2999 err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper,
3000 &tx_er_shaper, 0);
3001 if (err) {
3002 netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err);
3003 return err;
3004 }
3005
3006 return 0;
3007 }
3008
dpaa2_eth_setup_tc(struct net_device * net_dev,enum tc_setup_type type,void * type_data)3009 static int dpaa2_eth_setup_tc(struct net_device *net_dev,
3010 enum tc_setup_type type, void *type_data)
3011 {
3012 switch (type) {
3013 case TC_SETUP_QDISC_MQPRIO:
3014 return dpaa2_eth_setup_mqprio(net_dev, type_data);
3015 case TC_SETUP_QDISC_TBF:
3016 return dpaa2_eth_setup_tbf(net_dev, type_data);
3017 default:
3018 return -EOPNOTSUPP;
3019 }
3020 }
3021
3022 static const struct net_device_ops dpaa2_eth_ops = {
3023 .ndo_open = dpaa2_eth_open,
3024 .ndo_start_xmit = dpaa2_eth_tx,
3025 .ndo_stop = dpaa2_eth_stop,
3026 .ndo_set_mac_address = dpaa2_eth_set_addr,
3027 .ndo_get_stats64 = dpaa2_eth_get_stats,
3028 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
3029 .ndo_set_features = dpaa2_eth_set_features,
3030 .ndo_eth_ioctl = dpaa2_eth_ioctl,
3031 .ndo_change_mtu = dpaa2_eth_change_mtu,
3032 .ndo_bpf = dpaa2_eth_xdp,
3033 .ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
3034 .ndo_xsk_wakeup = dpaa2_xsk_wakeup,
3035 .ndo_setup_tc = dpaa2_eth_setup_tc,
3036 .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid,
3037 .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid
3038 };
3039
dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx * ctx)3040 static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx)
3041 {
3042 struct dpaa2_eth_channel *ch;
3043
3044 ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
3045
3046 /* Update NAPI statistics */
3047 ch->stats.cdan++;
3048
3049 /* NAPI can also be scheduled from the AF_XDP Tx path. Mark a missed
3050 * so that it can be rescheduled again.
3051 */
3052 if (!napi_if_scheduled_mark_missed(&ch->napi))
3053 napi_schedule(&ch->napi);
3054 }
3055
3056 /* Allocate and configure a DPCON object */
dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv * priv)3057 static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv)
3058 {
3059 struct fsl_mc_device *dpcon;
3060 struct device *dev = priv->net_dev->dev.parent;
3061 int err;
3062
3063 err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
3064 FSL_MC_POOL_DPCON, &dpcon);
3065 if (err) {
3066 if (err == -ENXIO) {
3067 dev_dbg(dev, "Waiting for DPCON\n");
3068 err = -EPROBE_DEFER;
3069 } else {
3070 dev_info(dev, "Not enough DPCONs, will go on as-is\n");
3071 }
3072 return ERR_PTR(err);
3073 }
3074
3075 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
3076 if (err) {
3077 dev_err(dev, "dpcon_open() failed\n");
3078 goto free;
3079 }
3080
3081 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
3082 if (err) {
3083 dev_err(dev, "dpcon_reset() failed\n");
3084 goto close;
3085 }
3086
3087 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
3088 if (err) {
3089 dev_err(dev, "dpcon_enable() failed\n");
3090 goto close;
3091 }
3092
3093 return dpcon;
3094
3095 close:
3096 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
3097 free:
3098 fsl_mc_object_free(dpcon);
3099
3100 return ERR_PTR(err);
3101 }
3102
dpaa2_eth_free_dpcon(struct dpaa2_eth_priv * priv,struct fsl_mc_device * dpcon)3103 static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv,
3104 struct fsl_mc_device *dpcon)
3105 {
3106 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
3107 dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
3108 fsl_mc_object_free(dpcon);
3109 }
3110
dpaa2_eth_alloc_channel(struct dpaa2_eth_priv * priv)3111 static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv)
3112 {
3113 struct dpaa2_eth_channel *channel;
3114 struct dpcon_attr attr;
3115 struct device *dev = priv->net_dev->dev.parent;
3116 int err;
3117
3118 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
3119 if (!channel)
3120 return NULL;
3121
3122 channel->dpcon = dpaa2_eth_setup_dpcon(priv);
3123 if (IS_ERR(channel->dpcon)) {
3124 err = PTR_ERR(channel->dpcon);
3125 goto err_setup;
3126 }
3127
3128 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
3129 &attr);
3130 if (err) {
3131 dev_err(dev, "dpcon_get_attributes() failed\n");
3132 goto err_get_attr;
3133 }
3134
3135 channel->dpcon_id = attr.id;
3136 channel->ch_id = attr.qbman_ch_id;
3137 channel->priv = priv;
3138
3139 return channel;
3140
3141 err_get_attr:
3142 dpaa2_eth_free_dpcon(priv, channel->dpcon);
3143 err_setup:
3144 kfree(channel);
3145 return ERR_PTR(err);
3146 }
3147
dpaa2_eth_free_channel(struct dpaa2_eth_priv * priv,struct dpaa2_eth_channel * channel)3148 static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv,
3149 struct dpaa2_eth_channel *channel)
3150 {
3151 dpaa2_eth_free_dpcon(priv, channel->dpcon);
3152 kfree(channel);
3153 }
3154
3155 /* DPIO setup: allocate and configure QBMan channels, setup core affinity
3156 * and register data availability notifications
3157 */
dpaa2_eth_setup_dpio(struct dpaa2_eth_priv * priv)3158 static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv)
3159 {
3160 struct dpaa2_io_notification_ctx *nctx;
3161 struct dpaa2_eth_channel *channel;
3162 struct dpcon_notification_cfg dpcon_notif_cfg;
3163 struct device *dev = priv->net_dev->dev.parent;
3164 int i, err;
3165
3166 /* We want the ability to spread ingress traffic (RX, TX conf) to as
3167 * many cores as possible, so we need one channel for each core
3168 * (unless there's fewer queues than cores, in which case the extra
3169 * channels would be wasted).
3170 * Allocate one channel per core and register it to the core's
3171 * affine DPIO. If not enough channels are available for all cores
3172 * or if some cores don't have an affine DPIO, there will be no
3173 * ingress frame processing on those cores.
3174 */
3175 cpumask_clear(&priv->dpio_cpumask);
3176 for_each_online_cpu(i) {
3177 /* Try to allocate a channel */
3178 channel = dpaa2_eth_alloc_channel(priv);
3179 if (IS_ERR_OR_NULL(channel)) {
3180 err = PTR_ERR_OR_ZERO(channel);
3181 if (err == -EPROBE_DEFER)
3182 dev_dbg(dev, "waiting for affine channel\n");
3183 else
3184 dev_info(dev,
3185 "No affine channel for cpu %d and above\n", i);
3186 goto err_alloc_ch;
3187 }
3188
3189 priv->channel[priv->num_channels] = channel;
3190
3191 nctx = &channel->nctx;
3192 nctx->is_cdan = 1;
3193 nctx->cb = dpaa2_eth_cdan_cb;
3194 nctx->id = channel->ch_id;
3195 nctx->desired_cpu = i;
3196
3197 /* Register the new context */
3198 channel->dpio = dpaa2_io_service_select(i);
3199 err = dpaa2_io_service_register(channel->dpio, nctx, dev);
3200 if (err) {
3201 dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
3202 /* If no affine DPIO for this core, there's probably
3203 * none available for next cores either. Signal we want
3204 * to retry later, in case the DPIO devices weren't
3205 * probed yet.
3206 */
3207 err = -EPROBE_DEFER;
3208 goto err_service_reg;
3209 }
3210
3211 /* Register DPCON notification with MC */
3212 dpcon_notif_cfg.dpio_id = nctx->dpio_id;
3213 dpcon_notif_cfg.priority = 0;
3214 dpcon_notif_cfg.user_ctx = nctx->qman64;
3215 err = dpcon_set_notification(priv->mc_io, 0,
3216 channel->dpcon->mc_handle,
3217 &dpcon_notif_cfg);
3218 if (err) {
3219 dev_err(dev, "dpcon_set_notification failed()\n");
3220 goto err_set_cdan;
3221 }
3222
3223 /* If we managed to allocate a channel and also found an affine
3224 * DPIO for this core, add it to the final mask
3225 */
3226 cpumask_set_cpu(i, &priv->dpio_cpumask);
3227 priv->num_channels++;
3228
3229 /* Stop if we already have enough channels to accommodate all
3230 * RX and TX conf queues
3231 */
3232 if (priv->num_channels == priv->dpni_attrs.num_queues)
3233 break;
3234 }
3235
3236 return 0;
3237
3238 err_set_cdan:
3239 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3240 err_service_reg:
3241 dpaa2_eth_free_channel(priv, channel);
3242 err_alloc_ch:
3243 if (err == -EPROBE_DEFER) {
3244 for (i = 0; i < priv->num_channels; i++) {
3245 channel = priv->channel[i];
3246 nctx = &channel->nctx;
3247 dpaa2_io_service_deregister(channel->dpio, nctx, dev);
3248 dpaa2_eth_free_channel(priv, channel);
3249 }
3250 priv->num_channels = 0;
3251 return err;
3252 }
3253
3254 if (cpumask_empty(&priv->dpio_cpumask)) {
3255 dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
3256 return -ENODEV;
3257 }
3258
3259 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
3260 cpumask_pr_args(&priv->dpio_cpumask));
3261
3262 return 0;
3263 }
3264
dpaa2_eth_free_dpio(struct dpaa2_eth_priv * priv)3265 static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv)
3266 {
3267 struct device *dev = priv->net_dev->dev.parent;
3268 struct dpaa2_eth_channel *ch;
3269 int i;
3270
3271 /* deregister CDAN notifications and free channels */
3272 for (i = 0; i < priv->num_channels; i++) {
3273 ch = priv->channel[i];
3274 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
3275 dpaa2_eth_free_channel(priv, ch);
3276 }
3277 }
3278
dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv * priv,int cpu)3279 static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv,
3280 int cpu)
3281 {
3282 struct device *dev = priv->net_dev->dev.parent;
3283 int i;
3284
3285 for (i = 0; i < priv->num_channels; i++)
3286 if (priv->channel[i]->nctx.desired_cpu == cpu)
3287 return priv->channel[i];
3288
3289 /* We should never get here. Issue a warning and return
3290 * the first channel, because it's still better than nothing
3291 */
3292 dev_warn(dev, "No affine channel found for cpu %d\n", cpu);
3293
3294 return priv->channel[0];
3295 }
3296
dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv * priv)3297 static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv)
3298 {
3299 struct device *dev = priv->net_dev->dev.parent;
3300 struct dpaa2_eth_fq *fq;
3301 int rx_cpu, txc_cpu;
3302 int i;
3303
3304 /* For each FQ, pick one channel/CPU to deliver frames to.
3305 * This may well change at runtime, either through irqbalance or
3306 * through direct user intervention.
3307 */
3308 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);
3309
3310 for (i = 0; i < priv->num_fqs; i++) {
3311 fq = &priv->fq[i];
3312 switch (fq->type) {
3313 case DPAA2_RX_FQ:
3314 case DPAA2_RX_ERR_FQ:
3315 fq->target_cpu = rx_cpu;
3316 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
3317 if (rx_cpu >= nr_cpu_ids)
3318 rx_cpu = cpumask_first(&priv->dpio_cpumask);
3319 break;
3320 case DPAA2_TX_CONF_FQ:
3321 fq->target_cpu = txc_cpu;
3322 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
3323 if (txc_cpu >= nr_cpu_ids)
3324 txc_cpu = cpumask_first(&priv->dpio_cpumask);
3325 break;
3326 default:
3327 dev_err(dev, "Unknown FQ type: %d\n", fq->type);
3328 }
3329 fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu);
3330 }
3331
3332 update_xps(priv);
3333 }
3334
dpaa2_eth_setup_fqs(struct dpaa2_eth_priv * priv)3335 static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv)
3336 {
3337 int i, j;
3338
3339 /* We have one TxConf FQ per Tx flow.
3340 * The number of Tx and Rx queues is the same.
3341 * Tx queues come first in the fq array.
3342 */
3343 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3344 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
3345 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
3346 priv->fq[priv->num_fqs++].flowid = (u16)i;
3347 }
3348
3349 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3350 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
3351 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
3352 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
3353 priv->fq[priv->num_fqs].tc = (u8)j;
3354 priv->fq[priv->num_fqs++].flowid = (u16)i;
3355 }
3356 }
3357
3358 /* We have exactly one Rx error queue per DPNI */
3359 priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ;
3360 priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err;
3361
3362 /* For each FQ, decide on which core to process incoming frames */
3363 dpaa2_eth_set_fq_affinity(priv);
3364 }
3365
3366 /* Allocate and configure a buffer pool */
dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv * priv)3367 struct dpaa2_eth_bp *dpaa2_eth_allocate_dpbp(struct dpaa2_eth_priv *priv)
3368 {
3369 struct device *dev = priv->net_dev->dev.parent;
3370 struct fsl_mc_device *dpbp_dev;
3371 struct dpbp_attr dpbp_attrs;
3372 struct dpaa2_eth_bp *bp;
3373 int err;
3374
3375 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
3376 &dpbp_dev);
3377 if (err) {
3378 if (err == -ENXIO)
3379 err = -EPROBE_DEFER;
3380 else
3381 dev_err(dev, "DPBP device allocation failed\n");
3382 return ERR_PTR(err);
3383 }
3384
3385 bp = kzalloc(sizeof(*bp), GFP_KERNEL);
3386 if (!bp) {
3387 err = -ENOMEM;
3388 goto err_alloc;
3389 }
3390
3391 err = dpbp_open(priv->mc_io, 0, dpbp_dev->obj_desc.id,
3392 &dpbp_dev->mc_handle);
3393 if (err) {
3394 dev_err(dev, "dpbp_open() failed\n");
3395 goto err_open;
3396 }
3397
3398 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
3399 if (err) {
3400 dev_err(dev, "dpbp_reset() failed\n");
3401 goto err_reset;
3402 }
3403
3404 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
3405 if (err) {
3406 dev_err(dev, "dpbp_enable() failed\n");
3407 goto err_enable;
3408 }
3409
3410 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
3411 &dpbp_attrs);
3412 if (err) {
3413 dev_err(dev, "dpbp_get_attributes() failed\n");
3414 goto err_get_attr;
3415 }
3416
3417 bp->dev = dpbp_dev;
3418 bp->bpid = dpbp_attrs.bpid;
3419
3420 return bp;
3421
3422 err_get_attr:
3423 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
3424 err_enable:
3425 err_reset:
3426 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
3427 err_open:
3428 kfree(bp);
3429 err_alloc:
3430 fsl_mc_object_free(dpbp_dev);
3431
3432 return ERR_PTR(err);
3433 }
3434
dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv * priv)3435 static int dpaa2_eth_setup_default_dpbp(struct dpaa2_eth_priv *priv)
3436 {
3437 struct dpaa2_eth_bp *bp;
3438 int i;
3439
3440 bp = dpaa2_eth_allocate_dpbp(priv);
3441 if (IS_ERR(bp))
3442 return PTR_ERR(bp);
3443
3444 priv->bp[DPAA2_ETH_DEFAULT_BP_IDX] = bp;
3445 priv->num_bps++;
3446
3447 for (i = 0; i < priv->num_channels; i++)
3448 priv->channel[i]->bp = bp;
3449
3450 return 0;
3451 }
3452
dpaa2_eth_free_dpbp(struct dpaa2_eth_priv * priv,struct dpaa2_eth_bp * bp)3453 void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv, struct dpaa2_eth_bp *bp)
3454 {
3455 int idx_bp;
3456
3457 /* Find the index at which this BP is stored */
3458 for (idx_bp = 0; idx_bp < priv->num_bps; idx_bp++)
3459 if (priv->bp[idx_bp] == bp)
3460 break;
3461
3462 /* Drain the pool and disable the associated MC object */
3463 dpaa2_eth_drain_pool(priv, bp->bpid);
3464 dpbp_disable(priv->mc_io, 0, bp->dev->mc_handle);
3465 dpbp_close(priv->mc_io, 0, bp->dev->mc_handle);
3466 fsl_mc_object_free(bp->dev);
3467 kfree(bp);
3468
3469 /* Move the last in use DPBP over in this position */
3470 priv->bp[idx_bp] = priv->bp[priv->num_bps - 1];
3471 priv->num_bps--;
3472 }
3473
dpaa2_eth_free_dpbps(struct dpaa2_eth_priv * priv)3474 static void dpaa2_eth_free_dpbps(struct dpaa2_eth_priv *priv)
3475 {
3476 int i;
3477
3478 for (i = 0; i < priv->num_bps; i++)
3479 dpaa2_eth_free_dpbp(priv, priv->bp[i]);
3480 }
3481
dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv * priv)3482 static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv)
3483 {
3484 struct device *dev = priv->net_dev->dev.parent;
3485 struct dpni_buffer_layout buf_layout = {0};
3486 u16 rx_buf_align;
3487 int err;
3488
3489 /* We need to check for WRIOP version 1.0.0, but depending on the MC
3490 * version, this number is not always provided correctly on rev1.
3491 * We need to check for both alternatives in this situation.
3492 */
3493 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
3494 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
3495 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
3496 else
3497 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
3498
3499 /* We need to ensure that the buffer size seen by WRIOP is a multiple
3500 * of 64 or 256 bytes depending on the WRIOP version.
3501 */
3502 priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);
3503
3504 /* tx buffer */
3505 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
3506 buf_layout.pass_timestamp = true;
3507 buf_layout.pass_frame_status = true;
3508 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
3509 DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3510 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3511 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3512 DPNI_QUEUE_TX, &buf_layout);
3513 if (err) {
3514 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
3515 return err;
3516 }
3517
3518 /* tx-confirm buffer */
3519 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP |
3520 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS;
3521 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3522 DPNI_QUEUE_TX_CONFIRM, &buf_layout);
3523 if (err) {
3524 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
3525 return err;
3526 }
3527
3528 /* Now that we've set our tx buffer layout, retrieve the minimum
3529 * required tx data offset.
3530 */
3531 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
3532 &priv->tx_data_offset);
3533 if (err) {
3534 dev_err(dev, "dpni_get_tx_data_offset() failed\n");
3535 return err;
3536 }
3537
3538 if ((priv->tx_data_offset % 64) != 0)
3539 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
3540 priv->tx_data_offset);
3541
3542 /* rx buffer */
3543 buf_layout.pass_frame_status = true;
3544 buf_layout.pass_parser_result = true;
3545 buf_layout.data_align = rx_buf_align;
3546 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
3547 buf_layout.private_data_size = 0;
3548 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
3549 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
3550 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
3551 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
3552 DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
3553 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
3554 DPNI_QUEUE_RX, &buf_layout);
3555 if (err) {
3556 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
3557 return err;
3558 }
3559
3560 return 0;
3561 }
3562
3563 #define DPNI_ENQUEUE_FQID_VER_MAJOR 7
3564 #define DPNI_ENQUEUE_FQID_VER_MINOR 9
3565
dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq,struct dpaa2_fd * fd,u8 prio,u32 num_frames __always_unused,int * frames_enqueued)3566 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
3567 struct dpaa2_eth_fq *fq,
3568 struct dpaa2_fd *fd, u8 prio,
3569 u32 num_frames __always_unused,
3570 int *frames_enqueued)
3571 {
3572 int err;
3573
3574 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
3575 priv->tx_qdid, prio,
3576 fq->tx_qdbin, fd);
3577 if (!err && frames_enqueued)
3578 *frames_enqueued = 1;
3579 return err;
3580 }
3581
dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq,struct dpaa2_fd * fd,u8 prio,u32 num_frames,int * frames_enqueued)3582 static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
3583 struct dpaa2_eth_fq *fq,
3584 struct dpaa2_fd *fd,
3585 u8 prio, u32 num_frames,
3586 int *frames_enqueued)
3587 {
3588 int err;
3589
3590 err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
3591 fq->tx_fqid[prio],
3592 fd, num_frames);
3593
3594 if (err == 0)
3595 return -EBUSY;
3596
3597 if (frames_enqueued)
3598 *frames_enqueued = err;
3599 return 0;
3600 }
3601
dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv * priv)3602 static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv)
3603 {
3604 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3605 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3606 priv->enqueue = dpaa2_eth_enqueue_qd;
3607 else
3608 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3609 }
3610
dpaa2_eth_set_pause(struct dpaa2_eth_priv * priv)3611 static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv)
3612 {
3613 struct device *dev = priv->net_dev->dev.parent;
3614 struct dpni_link_cfg link_cfg = {0};
3615 int err;
3616
3617 /* Get the default link options so we don't override other flags */
3618 err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3619 if (err) {
3620 dev_err(dev, "dpni_get_link_cfg() failed\n");
3621 return err;
3622 }
3623
3624 /* By default, enable both Rx and Tx pause frames */
3625 link_cfg.options |= DPNI_LINK_OPT_PAUSE;
3626 link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
3627 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
3628 if (err) {
3629 dev_err(dev, "dpni_set_link_cfg() failed\n");
3630 return err;
3631 }
3632
3633 priv->link_state.options = link_cfg.options;
3634
3635 return 0;
3636 }
3637
dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv * priv)3638 static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv)
3639 {
3640 struct dpni_queue_id qid = {0};
3641 struct dpaa2_eth_fq *fq;
3642 struct dpni_queue queue;
3643 int i, j, err;
3644
3645 /* We only use Tx FQIDs for FQID-based enqueue, so check
3646 * if DPNI version supports it before updating FQIDs
3647 */
3648 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
3649 DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
3650 return;
3651
3652 for (i = 0; i < priv->num_fqs; i++) {
3653 fq = &priv->fq[i];
3654 if (fq->type != DPAA2_TX_CONF_FQ)
3655 continue;
3656 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
3657 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3658 DPNI_QUEUE_TX, j, fq->flowid,
3659 &queue, &qid);
3660 if (err)
3661 goto out_err;
3662
3663 fq->tx_fqid[j] = qid.fqid;
3664 if (fq->tx_fqid[j] == 0)
3665 goto out_err;
3666 }
3667 }
3668
3669 priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
3670
3671 return;
3672
3673 out_err:
3674 netdev_info(priv->net_dev,
3675 "Error reading Tx FQID, fallback to QDID-based enqueue\n");
3676 priv->enqueue = dpaa2_eth_enqueue_qd;
3677 }
3678
3679 /* Configure ingress classification based on VLAN PCP */
dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv * priv)3680 static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv)
3681 {
3682 struct device *dev = priv->net_dev->dev.parent;
3683 struct dpkg_profile_cfg kg_cfg = {0};
3684 struct dpni_qos_tbl_cfg qos_cfg = {0};
3685 struct dpni_rule_cfg key_params;
3686 void *dma_mem, *key, *mask;
3687 u8 key_size = 2; /* VLAN TCI field */
3688 int i, pcp, err;
3689
3690 /* VLAN-based classification only makes sense if we have multiple
3691 * traffic classes.
3692 * Also, we need to extract just the 3-bit PCP field from the VLAN
3693 * header and we can only do that by using a mask
3694 */
3695 if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
3696 dev_dbg(dev, "VLAN-based QoS classification not supported\n");
3697 return -EOPNOTSUPP;
3698 }
3699
3700 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3701 if (!dma_mem)
3702 return -ENOMEM;
3703
3704 kg_cfg.num_extracts = 1;
3705 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
3706 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
3707 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
3708 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
3709
3710 err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
3711 if (err) {
3712 dev_err(dev, "dpni_prepare_key_cfg failed\n");
3713 goto out_free_tbl;
3714 }
3715
3716 /* set QoS table */
3717 qos_cfg.default_tc = 0;
3718 qos_cfg.discard_on_miss = 0;
3719 qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
3720 DPAA2_CLASSIFIER_DMA_SIZE,
3721 DMA_TO_DEVICE);
3722 if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
3723 dev_err(dev, "QoS table DMA mapping failed\n");
3724 err = -ENOMEM;
3725 goto out_free_tbl;
3726 }
3727
3728 err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
3729 if (err) {
3730 dev_err(dev, "dpni_set_qos_table failed\n");
3731 goto out_unmap_tbl;
3732 }
3733
3734 /* Add QoS table entries */
3735 key = kzalloc(key_size * 2, GFP_KERNEL);
3736 if (!key) {
3737 err = -ENOMEM;
3738 goto out_unmap_tbl;
3739 }
3740 mask = key + key_size;
3741 *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
3742
3743 key_params.key_iova = dma_map_single(dev, key, key_size * 2,
3744 DMA_TO_DEVICE);
3745 if (dma_mapping_error(dev, key_params.key_iova)) {
3746 dev_err(dev, "Qos table entry DMA mapping failed\n");
3747 err = -ENOMEM;
3748 goto out_free_key;
3749 }
3750
3751 key_params.mask_iova = key_params.key_iova + key_size;
3752 key_params.key_size = key_size;
3753
3754 /* We add rules for PCP-based distribution starting with highest
3755 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
3756 * classes to accommodate all priority levels, the lowest ones end up
3757 * on TC 0 which was configured as default
3758 */
3759 for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
3760 *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
3761 dma_sync_single_for_device(dev, key_params.key_iova,
3762 key_size * 2, DMA_TO_DEVICE);
3763
3764 err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
3765 &key_params, i, i);
3766 if (err) {
3767 dev_err(dev, "dpni_add_qos_entry failed\n");
3768 dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
3769 goto out_unmap_key;
3770 }
3771 }
3772
3773 priv->vlan_cls_enabled = true;
3774
3775 /* Table and key memory is not persistent, clean everything up after
3776 * configuration is finished
3777 */
3778 out_unmap_key:
3779 dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
3780 out_free_key:
3781 kfree(key);
3782 out_unmap_tbl:
3783 dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
3784 DMA_TO_DEVICE);
3785 out_free_tbl:
3786 kfree(dma_mem);
3787
3788 return err;
3789 }
3790
3791 /* Configure the DPNI object this interface is associated with */
dpaa2_eth_setup_dpni(struct fsl_mc_device * ls_dev)3792 static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev)
3793 {
3794 struct device *dev = &ls_dev->dev;
3795 struct dpaa2_eth_priv *priv;
3796 struct net_device *net_dev;
3797 int err;
3798
3799 net_dev = dev_get_drvdata(dev);
3800 priv = netdev_priv(net_dev);
3801
3802 /* get a handle for the DPNI object */
3803 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
3804 if (err) {
3805 dev_err(dev, "dpni_open() failed\n");
3806 return err;
3807 }
3808
3809 /* Check if we can work with this DPNI object */
3810 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
3811 &priv->dpni_ver_minor);
3812 if (err) {
3813 dev_err(dev, "dpni_get_api_version() failed\n");
3814 goto close;
3815 }
3816 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
3817 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
3818 priv->dpni_ver_major, priv->dpni_ver_minor,
3819 DPNI_VER_MAJOR, DPNI_VER_MINOR);
3820 err = -EOPNOTSUPP;
3821 goto close;
3822 }
3823
3824 ls_dev->mc_io = priv->mc_io;
3825 ls_dev->mc_handle = priv->mc_token;
3826
3827 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3828 if (err) {
3829 dev_err(dev, "dpni_reset() failed\n");
3830 goto close;
3831 }
3832
3833 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
3834 &priv->dpni_attrs);
3835 if (err) {
3836 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
3837 goto close;
3838 }
3839
3840 err = dpaa2_eth_set_buffer_layout(priv);
3841 if (err)
3842 goto close;
3843
3844 dpaa2_eth_set_enqueue_mode(priv);
3845
3846 /* Enable pause frame support */
3847 if (dpaa2_eth_has_pause_support(priv)) {
3848 err = dpaa2_eth_set_pause(priv);
3849 if (err)
3850 goto close;
3851 }
3852
3853 err = dpaa2_eth_set_vlan_qos(priv);
3854 if (err && err != -EOPNOTSUPP)
3855 goto close;
3856
3857 priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv),
3858 sizeof(struct dpaa2_eth_cls_rule),
3859 GFP_KERNEL);
3860 if (!priv->cls_rules) {
3861 err = -ENOMEM;
3862 goto close;
3863 }
3864
3865 return 0;
3866
3867 close:
3868 dpni_close(priv->mc_io, 0, priv->mc_token);
3869
3870 return err;
3871 }
3872
dpaa2_eth_free_dpni(struct dpaa2_eth_priv * priv)3873 static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv)
3874 {
3875 int err;
3876
3877 err = dpni_reset(priv->mc_io, 0, priv->mc_token);
3878 if (err)
3879 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
3880 err);
3881
3882 dpni_close(priv->mc_io, 0, priv->mc_token);
3883 }
3884
dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq)3885 static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv,
3886 struct dpaa2_eth_fq *fq)
3887 {
3888 struct device *dev = priv->net_dev->dev.parent;
3889 struct dpni_queue queue;
3890 struct dpni_queue_id qid;
3891 int err;
3892
3893 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3894 DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
3895 if (err) {
3896 dev_err(dev, "dpni_get_queue(RX) failed\n");
3897 return err;
3898 }
3899
3900 fq->fqid = qid.fqid;
3901
3902 queue.destination.id = fq->channel->dpcon_id;
3903 queue.destination.type = DPNI_DEST_DPCON;
3904 queue.destination.priority = 1;
3905 queue.user_context = (u64)(uintptr_t)fq;
3906 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3907 DPNI_QUEUE_RX, fq->tc, fq->flowid,
3908 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3909 &queue);
3910 if (err) {
3911 dev_err(dev, "dpni_set_queue(RX) failed\n");
3912 return err;
3913 }
3914
3915 /* xdp_rxq setup */
3916 /* only once for each channel */
3917 if (fq->tc > 0)
3918 return 0;
3919
3920 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
3921 fq->flowid, 0);
3922 if (err) {
3923 dev_err(dev, "xdp_rxq_info_reg failed\n");
3924 return err;
3925 }
3926
3927 err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
3928 MEM_TYPE_PAGE_ORDER0, NULL);
3929 if (err) {
3930 dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
3931 return err;
3932 }
3933
3934 return 0;
3935 }
3936
dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq)3937 static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv,
3938 struct dpaa2_eth_fq *fq)
3939 {
3940 struct device *dev = priv->net_dev->dev.parent;
3941 struct dpni_queue queue;
3942 struct dpni_queue_id qid;
3943 int i, err;
3944
3945 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
3946 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3947 DPNI_QUEUE_TX, i, fq->flowid,
3948 &queue, &qid);
3949 if (err) {
3950 dev_err(dev, "dpni_get_queue(TX) failed\n");
3951 return err;
3952 }
3953 fq->tx_fqid[i] = qid.fqid;
3954 }
3955
3956 /* All Tx queues belonging to the same flowid have the same qdbin */
3957 fq->tx_qdbin = qid.qdbin;
3958
3959 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3960 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3961 &queue, &qid);
3962 if (err) {
3963 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
3964 return err;
3965 }
3966
3967 fq->fqid = qid.fqid;
3968
3969 queue.destination.id = fq->channel->dpcon_id;
3970 queue.destination.type = DPNI_DEST_DPCON;
3971 queue.destination.priority = 0;
3972 queue.user_context = (u64)(uintptr_t)fq;
3973 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
3974 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
3975 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
3976 &queue);
3977 if (err) {
3978 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
3979 return err;
3980 }
3981
3982 return 0;
3983 }
3984
setup_rx_err_flow(struct dpaa2_eth_priv * priv,struct dpaa2_eth_fq * fq)3985 static int setup_rx_err_flow(struct dpaa2_eth_priv *priv,
3986 struct dpaa2_eth_fq *fq)
3987 {
3988 struct device *dev = priv->net_dev->dev.parent;
3989 struct dpni_queue q = { { 0 } };
3990 struct dpni_queue_id qid;
3991 u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST;
3992 int err;
3993
3994 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
3995 DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid);
3996 if (err) {
3997 dev_err(dev, "dpni_get_queue() failed (%d)\n", err);
3998 return err;
3999 }
4000
4001 fq->fqid = qid.fqid;
4002
4003 q.destination.id = fq->channel->dpcon_id;
4004 q.destination.type = DPNI_DEST_DPCON;
4005 q.destination.priority = 1;
4006 q.user_context = (u64)(uintptr_t)fq;
4007 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
4008 DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q);
4009 if (err) {
4010 dev_err(dev, "dpni_set_queue() failed (%d)\n", err);
4011 return err;
4012 }
4013
4014 return 0;
4015 }
4016
4017 /* Supported header fields for Rx hash distribution key */
4018 static const struct dpaa2_eth_dist_fields dist_fields[] = {
4019 {
4020 /* L2 header */
4021 .rxnfc_field = RXH_L2DA,
4022 .cls_prot = NET_PROT_ETH,
4023 .cls_field = NH_FLD_ETH_DA,
4024 .id = DPAA2_ETH_DIST_ETHDST,
4025 .size = 6,
4026 }, {
4027 .cls_prot = NET_PROT_ETH,
4028 .cls_field = NH_FLD_ETH_SA,
4029 .id = DPAA2_ETH_DIST_ETHSRC,
4030 .size = 6,
4031 }, {
4032 /* This is the last ethertype field parsed:
4033 * depending on frame format, it can be the MAC ethertype
4034 * or the VLAN etype.
4035 */
4036 .cls_prot = NET_PROT_ETH,
4037 .cls_field = NH_FLD_ETH_TYPE,
4038 .id = DPAA2_ETH_DIST_ETHTYPE,
4039 .size = 2,
4040 }, {
4041 /* VLAN header */
4042 .rxnfc_field = RXH_VLAN,
4043 .cls_prot = NET_PROT_VLAN,
4044 .cls_field = NH_FLD_VLAN_TCI,
4045 .id = DPAA2_ETH_DIST_VLAN,
4046 .size = 2,
4047 }, {
4048 /* IP header */
4049 .rxnfc_field = RXH_IP_SRC,
4050 .cls_prot = NET_PROT_IP,
4051 .cls_field = NH_FLD_IP_SRC,
4052 .id = DPAA2_ETH_DIST_IPSRC,
4053 .size = 4,
4054 }, {
4055 .rxnfc_field = RXH_IP_DST,
4056 .cls_prot = NET_PROT_IP,
4057 .cls_field = NH_FLD_IP_DST,
4058 .id = DPAA2_ETH_DIST_IPDST,
4059 .size = 4,
4060 }, {
4061 .rxnfc_field = RXH_L3_PROTO,
4062 .cls_prot = NET_PROT_IP,
4063 .cls_field = NH_FLD_IP_PROTO,
4064 .id = DPAA2_ETH_DIST_IPPROTO,
4065 .size = 1,
4066 }, {
4067 /* Using UDP ports, this is functionally equivalent to raw
4068 * byte pairs from L4 header.
4069 */
4070 .rxnfc_field = RXH_L4_B_0_1,
4071 .cls_prot = NET_PROT_UDP,
4072 .cls_field = NH_FLD_UDP_PORT_SRC,
4073 .id = DPAA2_ETH_DIST_L4SRC,
4074 .size = 2,
4075 }, {
4076 .rxnfc_field = RXH_L4_B_2_3,
4077 .cls_prot = NET_PROT_UDP,
4078 .cls_field = NH_FLD_UDP_PORT_DST,
4079 .id = DPAA2_ETH_DIST_L4DST,
4080 .size = 2,
4081 },
4082 };
4083
4084 /* Configure the Rx hash key using the legacy API */
dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv * priv,dma_addr_t key)4085 static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4086 {
4087 struct device *dev = priv->net_dev->dev.parent;
4088 struct dpni_rx_tc_dist_cfg dist_cfg;
4089 int i, err = 0;
4090
4091 memset(&dist_cfg, 0, sizeof(dist_cfg));
4092
4093 dist_cfg.key_cfg_iova = key;
4094 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4095 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
4096
4097 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4098 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
4099 i, &dist_cfg);
4100 if (err) {
4101 dev_err(dev, "dpni_set_rx_tc_dist failed\n");
4102 break;
4103 }
4104 }
4105
4106 return err;
4107 }
4108
4109 /* Configure the Rx hash key using the new API */
dpaa2_eth_config_hash_key(struct dpaa2_eth_priv * priv,dma_addr_t key)4110 static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4111 {
4112 struct device *dev = priv->net_dev->dev.parent;
4113 struct dpni_rx_dist_cfg dist_cfg;
4114 int i, err = 0;
4115
4116 memset(&dist_cfg, 0, sizeof(dist_cfg));
4117
4118 dist_cfg.key_cfg_iova = key;
4119 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4120 dist_cfg.enable = 1;
4121
4122 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4123 dist_cfg.tc = i;
4124 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
4125 &dist_cfg);
4126 if (err) {
4127 dev_err(dev, "dpni_set_rx_hash_dist failed\n");
4128 break;
4129 }
4130
4131 /* If the flow steering / hashing key is shared between all
4132 * traffic classes, install it just once
4133 */
4134 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
4135 break;
4136 }
4137
4138 return err;
4139 }
4140
4141 /* Configure the Rx flow classification key */
dpaa2_eth_config_cls_key(struct dpaa2_eth_priv * priv,dma_addr_t key)4142 static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
4143 {
4144 struct device *dev = priv->net_dev->dev.parent;
4145 struct dpni_rx_dist_cfg dist_cfg;
4146 int i, err = 0;
4147
4148 memset(&dist_cfg, 0, sizeof(dist_cfg));
4149
4150 dist_cfg.key_cfg_iova = key;
4151 dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
4152 dist_cfg.enable = 1;
4153
4154 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
4155 dist_cfg.tc = i;
4156 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
4157 &dist_cfg);
4158 if (err) {
4159 dev_err(dev, "dpni_set_rx_fs_dist failed\n");
4160 break;
4161 }
4162
4163 /* If the flow steering / hashing key is shared between all
4164 * traffic classes, install it just once
4165 */
4166 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS)
4167 break;
4168 }
4169
4170 return err;
4171 }
4172
4173 /* Size of the Rx flow classification key */
dpaa2_eth_cls_key_size(u64 fields)4174 int dpaa2_eth_cls_key_size(u64 fields)
4175 {
4176 int i, size = 0;
4177
4178 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4179 if (!(fields & dist_fields[i].id))
4180 continue;
4181 size += dist_fields[i].size;
4182 }
4183
4184 return size;
4185 }
4186
4187 /* Offset of header field in Rx classification key */
dpaa2_eth_cls_fld_off(int prot,int field)4188 int dpaa2_eth_cls_fld_off(int prot, int field)
4189 {
4190 int i, off = 0;
4191
4192 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4193 if (dist_fields[i].cls_prot == prot &&
4194 dist_fields[i].cls_field == field)
4195 return off;
4196 off += dist_fields[i].size;
4197 }
4198
4199 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
4200 return 0;
4201 }
4202
4203 /* Prune unused fields from the classification rule.
4204 * Used when masking is not supported
4205 */
dpaa2_eth_cls_trim_rule(void * key_mem,u64 fields)4206 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
4207 {
4208 int off = 0, new_off = 0;
4209 int i, size;
4210
4211 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4212 size = dist_fields[i].size;
4213 if (dist_fields[i].id & fields) {
4214 memcpy(key_mem + new_off, key_mem + off, size);
4215 new_off += size;
4216 }
4217 off += size;
4218 }
4219 }
4220
4221 /* Set Rx distribution (hash or flow classification) key
4222 * flags is a combination of RXH_ bits
4223 */
dpaa2_eth_set_dist_key(struct net_device * net_dev,enum dpaa2_eth_rx_dist type,u64 flags)4224 static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
4225 enum dpaa2_eth_rx_dist type, u64 flags)
4226 {
4227 struct device *dev = net_dev->dev.parent;
4228 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4229 struct dpkg_profile_cfg cls_cfg;
4230 u32 rx_hash_fields = 0;
4231 dma_addr_t key_iova;
4232 u8 *dma_mem;
4233 int i;
4234 int err = 0;
4235
4236 memset(&cls_cfg, 0, sizeof(cls_cfg));
4237
4238 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
4239 struct dpkg_extract *key =
4240 &cls_cfg.extracts[cls_cfg.num_extracts];
4241
4242 /* For both Rx hashing and classification keys
4243 * we set only the selected fields.
4244 */
4245 if (!(flags & dist_fields[i].id))
4246 continue;
4247 if (type == DPAA2_ETH_RX_DIST_HASH)
4248 rx_hash_fields |= dist_fields[i].rxnfc_field;
4249
4250 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
4251 dev_err(dev, "error adding key extraction rule, too many rules?\n");
4252 return -E2BIG;
4253 }
4254
4255 key->type = DPKG_EXTRACT_FROM_HDR;
4256 key->extract.from_hdr.prot = dist_fields[i].cls_prot;
4257 key->extract.from_hdr.type = DPKG_FULL_FIELD;
4258 key->extract.from_hdr.field = dist_fields[i].cls_field;
4259 cls_cfg.num_extracts++;
4260 }
4261
4262 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
4263 if (!dma_mem)
4264 return -ENOMEM;
4265
4266 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
4267 if (err) {
4268 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
4269 goto free_key;
4270 }
4271
4272 /* Prepare for setting the rx dist */
4273 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
4274 DMA_TO_DEVICE);
4275 if (dma_mapping_error(dev, key_iova)) {
4276 dev_err(dev, "DMA mapping failed\n");
4277 err = -ENOMEM;
4278 goto free_key;
4279 }
4280
4281 if (type == DPAA2_ETH_RX_DIST_HASH) {
4282 if (dpaa2_eth_has_legacy_dist(priv))
4283 err = dpaa2_eth_config_legacy_hash_key(priv, key_iova);
4284 else
4285 err = dpaa2_eth_config_hash_key(priv, key_iova);
4286 } else {
4287 err = dpaa2_eth_config_cls_key(priv, key_iova);
4288 }
4289
4290 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
4291 DMA_TO_DEVICE);
4292 if (!err && type == DPAA2_ETH_RX_DIST_HASH)
4293 priv->rx_hash_fields = rx_hash_fields;
4294
4295 free_key:
4296 kfree(dma_mem);
4297 return err;
4298 }
4299
dpaa2_eth_set_hash(struct net_device * net_dev,u64 flags)4300 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
4301 {
4302 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4303 u64 key = 0;
4304 int i;
4305
4306 if (!dpaa2_eth_hash_enabled(priv))
4307 return -EOPNOTSUPP;
4308
4309 for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
4310 if (dist_fields[i].rxnfc_field & flags)
4311 key |= dist_fields[i].id;
4312
4313 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
4314 }
4315
dpaa2_eth_set_cls(struct net_device * net_dev,u64 flags)4316 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
4317 {
4318 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
4319 }
4320
dpaa2_eth_set_default_cls(struct dpaa2_eth_priv * priv)4321 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
4322 {
4323 struct device *dev = priv->net_dev->dev.parent;
4324 int err;
4325
4326 /* Check if we actually support Rx flow classification */
4327 if (dpaa2_eth_has_legacy_dist(priv)) {
4328 dev_dbg(dev, "Rx cls not supported by current MC version\n");
4329 return -EOPNOTSUPP;
4330 }
4331
4332 if (!dpaa2_eth_fs_enabled(priv)) {
4333 dev_dbg(dev, "Rx cls disabled in DPNI options\n");
4334 return -EOPNOTSUPP;
4335 }
4336
4337 if (!dpaa2_eth_hash_enabled(priv)) {
4338 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
4339 return -EOPNOTSUPP;
4340 }
4341
4342 /* If there is no support for masking in the classification table,
4343 * we don't set a default key, as it will depend on the rules
4344 * added by the user at runtime.
4345 */
4346 if (!dpaa2_eth_fs_mask_enabled(priv))
4347 goto out;
4348
4349 err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
4350 if (err)
4351 return err;
4352
4353 out:
4354 priv->rx_cls_enabled = 1;
4355
4356 return 0;
4357 }
4358
4359 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
4360 * frame queues and channels
4361 */
dpaa2_eth_bind_dpni(struct dpaa2_eth_priv * priv)4362 static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv)
4363 {
4364 struct dpaa2_eth_bp *bp = priv->bp[DPAA2_ETH_DEFAULT_BP_IDX];
4365 struct net_device *net_dev = priv->net_dev;
4366 struct dpni_pools_cfg pools_params = { 0 };
4367 struct device *dev = net_dev->dev.parent;
4368 struct dpni_error_cfg err_cfg;
4369 int err = 0;
4370 int i;
4371
4372 pools_params.num_dpbp = 1;
4373 pools_params.pools[0].dpbp_id = bp->dev->obj_desc.id;
4374 pools_params.pools[0].backup_pool = 0;
4375 pools_params.pools[0].buffer_size = priv->rx_buf_size;
4376 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
4377 if (err) {
4378 dev_err(dev, "dpni_set_pools() failed\n");
4379 return err;
4380 }
4381
4382 /* have the interface implicitly distribute traffic based on
4383 * the default hash key
4384 */
4385 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
4386 if (err && err != -EOPNOTSUPP)
4387 dev_err(dev, "Failed to configure hashing\n");
4388
4389 /* Configure the flow classification key; it includes all
4390 * supported header fields and cannot be modified at runtime
4391 */
4392 err = dpaa2_eth_set_default_cls(priv);
4393 if (err && err != -EOPNOTSUPP)
4394 dev_err(dev, "Failed to configure Rx classification key\n");
4395
4396 /* Configure handling of error frames */
4397 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
4398 err_cfg.set_frame_annotation = 1;
4399 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
4400 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
4401 &err_cfg);
4402 if (err) {
4403 dev_err(dev, "dpni_set_errors_behavior failed\n");
4404 return err;
4405 }
4406
4407 /* Configure Rx and Tx conf queues to generate CDANs */
4408 for (i = 0; i < priv->num_fqs; i++) {
4409 switch (priv->fq[i].type) {
4410 case DPAA2_RX_FQ:
4411 err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]);
4412 break;
4413 case DPAA2_TX_CONF_FQ:
4414 err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]);
4415 break;
4416 case DPAA2_RX_ERR_FQ:
4417 err = setup_rx_err_flow(priv, &priv->fq[i]);
4418 break;
4419 default:
4420 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
4421 return -EINVAL;
4422 }
4423 if (err)
4424 return err;
4425 }
4426
4427 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
4428 DPNI_QUEUE_TX, &priv->tx_qdid);
4429 if (err) {
4430 dev_err(dev, "dpni_get_qdid() failed\n");
4431 return err;
4432 }
4433
4434 return 0;
4435 }
4436
4437 /* Allocate rings for storing incoming frame descriptors */
dpaa2_eth_alloc_rings(struct dpaa2_eth_priv * priv)4438 static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv)
4439 {
4440 struct net_device *net_dev = priv->net_dev;
4441 struct device *dev = net_dev->dev.parent;
4442 int i;
4443
4444 for (i = 0; i < priv->num_channels; i++) {
4445 priv->channel[i]->store =
4446 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
4447 if (!priv->channel[i]->store) {
4448 netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
4449 goto err_ring;
4450 }
4451 }
4452
4453 return 0;
4454
4455 err_ring:
4456 for (i = 0; i < priv->num_channels; i++) {
4457 if (!priv->channel[i]->store)
4458 break;
4459 dpaa2_io_store_destroy(priv->channel[i]->store);
4460 }
4461
4462 return -ENOMEM;
4463 }
4464
dpaa2_eth_free_rings(struct dpaa2_eth_priv * priv)4465 static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv)
4466 {
4467 int i;
4468
4469 for (i = 0; i < priv->num_channels; i++)
4470 dpaa2_io_store_destroy(priv->channel[i]->store);
4471 }
4472
dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv * priv)4473 static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
4474 {
4475 struct net_device *net_dev = priv->net_dev;
4476 struct device *dev = net_dev->dev.parent;
4477 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
4478 int err;
4479
4480 /* Get firmware address, if any */
4481 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
4482 if (err) {
4483 dev_err(dev, "dpni_get_port_mac_addr() failed\n");
4484 return err;
4485 }
4486
4487 /* Get DPNI attributes address, if any */
4488 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4489 dpni_mac_addr);
4490 if (err) {
4491 dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
4492 return err;
4493 }
4494
4495 /* First check if firmware has any address configured by bootloader */
4496 if (!is_zero_ether_addr(mac_addr)) {
4497 /* If the DPMAC addr != DPNI addr, update it */
4498 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
4499 err = dpni_set_primary_mac_addr(priv->mc_io, 0,
4500 priv->mc_token,
4501 mac_addr);
4502 if (err) {
4503 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4504 return err;
4505 }
4506 }
4507 eth_hw_addr_set(net_dev, mac_addr);
4508 } else if (is_zero_ether_addr(dpni_mac_addr)) {
4509 /* No MAC address configured, fill in net_dev->dev_addr
4510 * with a random one
4511 */
4512 eth_hw_addr_random(net_dev);
4513 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");
4514
4515 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
4516 net_dev->dev_addr);
4517 if (err) {
4518 dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
4519 return err;
4520 }
4521
4522 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
4523 * practical purposes, this will be our "permanent" mac address,
4524 * at least until the next reboot. This move will also permit
4525 * register_netdevice() to properly fill up net_dev->perm_addr.
4526 */
4527 net_dev->addr_assign_type = NET_ADDR_PERM;
4528 } else {
4529 /* NET_ADDR_PERM is default, all we have to do is
4530 * fill in the device addr.
4531 */
4532 eth_hw_addr_set(net_dev, dpni_mac_addr);
4533 }
4534
4535 return 0;
4536 }
4537
dpaa2_eth_netdev_init(struct net_device * net_dev)4538 static int dpaa2_eth_netdev_init(struct net_device *net_dev)
4539 {
4540 struct device *dev = net_dev->dev.parent;
4541 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4542 u32 options = priv->dpni_attrs.options;
4543 u64 supported = 0, not_supported = 0;
4544 u8 bcast_addr[ETH_ALEN];
4545 u8 num_queues;
4546 int err;
4547
4548 net_dev->netdev_ops = &dpaa2_eth_ops;
4549 net_dev->ethtool_ops = &dpaa2_ethtool_ops;
4550
4551 err = dpaa2_eth_set_mac_addr(priv);
4552 if (err)
4553 return err;
4554
4555 /* Explicitly add the broadcast address to the MAC filtering table */
4556 eth_broadcast_addr(bcast_addr);
4557 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
4558 if (err) {
4559 dev_err(dev, "dpni_add_mac_addr() failed\n");
4560 return err;
4561 }
4562
4563 /* Set MTU upper limit; lower limit is 68B (default value) */
4564 net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
4565 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
4566 DPAA2_ETH_MFL);
4567 if (err) {
4568 dev_err(dev, "dpni_set_max_frame_length() failed\n");
4569 return err;
4570 }
4571
4572 /* Set actual number of queues in the net device */
4573 num_queues = dpaa2_eth_queue_count(priv);
4574 err = netif_set_real_num_tx_queues(net_dev, num_queues);
4575 if (err) {
4576 dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
4577 return err;
4578 }
4579 err = netif_set_real_num_rx_queues(net_dev, num_queues);
4580 if (err) {
4581 dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
4582 return err;
4583 }
4584
4585 dpaa2_eth_detect_features(priv);
4586
4587 /* Capabilities listing */
4588 supported |= IFF_LIVE_ADDR_CHANGE;
4589
4590 if (options & DPNI_OPT_NO_MAC_FILTER)
4591 not_supported |= IFF_UNICAST_FLT;
4592 else
4593 supported |= IFF_UNICAST_FLT;
4594
4595 net_dev->priv_flags |= supported;
4596 net_dev->priv_flags &= ~not_supported;
4597
4598 /* Features */
4599 net_dev->features = NETIF_F_RXCSUM |
4600 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4601 NETIF_F_SG | NETIF_F_HIGHDMA |
4602 NETIF_F_LLTX | NETIF_F_HW_TC | NETIF_F_TSO;
4603 net_dev->gso_max_segs = DPAA2_ETH_ENQUEUE_MAX_FDS;
4604 net_dev->hw_features = net_dev->features;
4605 net_dev->xdp_features = NETDEV_XDP_ACT_BASIC |
4606 NETDEV_XDP_ACT_REDIRECT |
4607 NETDEV_XDP_ACT_NDO_XMIT;
4608 if (priv->dpni_attrs.wriop_version >= DPAA2_WRIOP_VERSION(3, 0, 0) &&
4609 priv->dpni_attrs.num_queues <= 8)
4610 net_dev->xdp_features |= NETDEV_XDP_ACT_XSK_ZEROCOPY;
4611
4612 if (priv->dpni_attrs.vlan_filter_entries)
4613 net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4614
4615 return 0;
4616 }
4617
dpaa2_eth_poll_link_state(void * arg)4618 static int dpaa2_eth_poll_link_state(void *arg)
4619 {
4620 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
4621 int err;
4622
4623 while (!kthread_should_stop()) {
4624 err = dpaa2_eth_link_state_update(priv);
4625 if (unlikely(err))
4626 return err;
4627
4628 msleep(DPAA2_ETH_LINK_STATE_REFRESH);
4629 }
4630
4631 return 0;
4632 }
4633
dpaa2_eth_connect_mac(struct dpaa2_eth_priv * priv)4634 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
4635 {
4636 struct fsl_mc_device *dpni_dev, *dpmac_dev;
4637 struct dpaa2_mac *mac;
4638 int err;
4639
4640 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
4641 dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0);
4642
4643 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) {
4644 netdev_dbg(priv->net_dev, "waiting for mac\n");
4645 return PTR_ERR(dpmac_dev);
4646 }
4647
4648 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
4649 return 0;
4650
4651 mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
4652 if (!mac)
4653 return -ENOMEM;
4654
4655 mac->mc_dev = dpmac_dev;
4656 mac->mc_io = priv->mc_io;
4657 mac->net_dev = priv->net_dev;
4658
4659 err = dpaa2_mac_open(mac);
4660 if (err)
4661 goto err_free_mac;
4662
4663 if (dpaa2_mac_is_type_phy(mac)) {
4664 err = dpaa2_mac_connect(mac);
4665 if (err) {
4666 if (err == -EPROBE_DEFER)
4667 netdev_dbg(priv->net_dev,
4668 "could not connect to MAC\n");
4669 else
4670 netdev_err(priv->net_dev,
4671 "Error connecting to the MAC endpoint: %pe",
4672 ERR_PTR(err));
4673 goto err_close_mac;
4674 }
4675 }
4676
4677 mutex_lock(&priv->mac_lock);
4678 priv->mac = mac;
4679 mutex_unlock(&priv->mac_lock);
4680
4681 return 0;
4682
4683 err_close_mac:
4684 dpaa2_mac_close(mac);
4685 err_free_mac:
4686 kfree(mac);
4687 return err;
4688 }
4689
dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv * priv)4690 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
4691 {
4692 struct dpaa2_mac *mac;
4693
4694 mutex_lock(&priv->mac_lock);
4695 mac = priv->mac;
4696 priv->mac = NULL;
4697 mutex_unlock(&priv->mac_lock);
4698
4699 if (!mac)
4700 return;
4701
4702 if (dpaa2_mac_is_type_phy(mac))
4703 dpaa2_mac_disconnect(mac);
4704
4705 dpaa2_mac_close(mac);
4706 kfree(mac);
4707 }
4708
dpni_irq0_handler_thread(int irq_num,void * arg)4709 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
4710 {
4711 u32 status = ~0;
4712 struct device *dev = (struct device *)arg;
4713 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
4714 struct net_device *net_dev = dev_get_drvdata(dev);
4715 struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
4716 bool had_mac;
4717 int err;
4718
4719 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
4720 DPNI_IRQ_INDEX, &status);
4721 if (unlikely(err)) {
4722 netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
4723 return IRQ_HANDLED;
4724 }
4725
4726 if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
4727 dpaa2_eth_link_state_update(netdev_priv(net_dev));
4728
4729 if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
4730 dpaa2_eth_set_mac_addr(netdev_priv(net_dev));
4731 dpaa2_eth_update_tx_fqids(priv);
4732
4733 /* We can avoid locking because the "endpoint changed" IRQ
4734 * handler is the only one who changes priv->mac at runtime,
4735 * so we are not racing with anyone.
4736 */
4737 had_mac = !!priv->mac;
4738 if (had_mac)
4739 dpaa2_eth_disconnect_mac(priv);
4740 else
4741 dpaa2_eth_connect_mac(priv);
4742 }
4743
4744 return IRQ_HANDLED;
4745 }
4746
dpaa2_eth_setup_irqs(struct fsl_mc_device * ls_dev)4747 static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev)
4748 {
4749 int err = 0;
4750 struct fsl_mc_device_irq *irq;
4751
4752 err = fsl_mc_allocate_irqs(ls_dev);
4753 if (err) {
4754 dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
4755 return err;
4756 }
4757
4758 irq = ls_dev->irqs[0];
4759 err = devm_request_threaded_irq(&ls_dev->dev, irq->virq,
4760 NULL, dpni_irq0_handler_thread,
4761 IRQF_NO_SUSPEND | IRQF_ONESHOT,
4762 dev_name(&ls_dev->dev), &ls_dev->dev);
4763 if (err < 0) {
4764 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
4765 goto free_mc_irq;
4766 }
4767
4768 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
4769 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
4770 DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
4771 if (err < 0) {
4772 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
4773 goto free_irq;
4774 }
4775
4776 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
4777 DPNI_IRQ_INDEX, 1);
4778 if (err < 0) {
4779 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
4780 goto free_irq;
4781 }
4782
4783 return 0;
4784
4785 free_irq:
4786 devm_free_irq(&ls_dev->dev, irq->virq, &ls_dev->dev);
4787 free_mc_irq:
4788 fsl_mc_free_irqs(ls_dev);
4789
4790 return err;
4791 }
4792
dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv * priv)4793 static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
4794 {
4795 int i;
4796 struct dpaa2_eth_channel *ch;
4797
4798 for (i = 0; i < priv->num_channels; i++) {
4799 ch = priv->channel[i];
4800 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
4801 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
4802 }
4803 }
4804
dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv * priv)4805 static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv)
4806 {
4807 int i;
4808 struct dpaa2_eth_channel *ch;
4809
4810 for (i = 0; i < priv->num_channels; i++) {
4811 ch = priv->channel[i];
4812 netif_napi_del(&ch->napi);
4813 }
4814 }
4815
dpaa2_eth_probe(struct fsl_mc_device * dpni_dev)4816 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
4817 {
4818 struct device *dev;
4819 struct net_device *net_dev = NULL;
4820 struct dpaa2_eth_priv *priv = NULL;
4821 int err = 0;
4822
4823 dev = &dpni_dev->dev;
4824
4825 /* Net device */
4826 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
4827 if (!net_dev) {
4828 dev_err(dev, "alloc_etherdev_mq() failed\n");
4829 return -ENOMEM;
4830 }
4831
4832 SET_NETDEV_DEV(net_dev, dev);
4833 dev_set_drvdata(dev, net_dev);
4834
4835 priv = netdev_priv(net_dev);
4836 priv->net_dev = net_dev;
4837 SET_NETDEV_DEVLINK_PORT(net_dev, &priv->devlink_port);
4838
4839 mutex_init(&priv->mac_lock);
4840
4841 priv->iommu_domain = iommu_get_domain_for_dev(dev);
4842
4843 priv->tx_tstamp_type = HWTSTAMP_TX_OFF;
4844 priv->rx_tstamp = false;
4845
4846 priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0);
4847 if (!priv->dpaa2_ptp_wq) {
4848 err = -ENOMEM;
4849 goto err_wq_alloc;
4850 }
4851
4852 INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp);
4853 mutex_init(&priv->onestep_tstamp_lock);
4854 skb_queue_head_init(&priv->tx_skbs);
4855
4856 priv->rx_copybreak = DPAA2_ETH_DEFAULT_COPYBREAK;
4857
4858 /* Obtain a MC portal */
4859 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
4860 &priv->mc_io);
4861 if (err) {
4862 if (err == -ENXIO) {
4863 dev_dbg(dev, "waiting for MC portal\n");
4864 err = -EPROBE_DEFER;
4865 } else {
4866 dev_err(dev, "MC portal allocation failed\n");
4867 }
4868 goto err_portal_alloc;
4869 }
4870
4871 /* MC objects initialization and configuration */
4872 err = dpaa2_eth_setup_dpni(dpni_dev);
4873 if (err)
4874 goto err_dpni_setup;
4875
4876 err = dpaa2_eth_setup_dpio(priv);
4877 if (err)
4878 goto err_dpio_setup;
4879
4880 dpaa2_eth_setup_fqs(priv);
4881
4882 err = dpaa2_eth_setup_default_dpbp(priv);
4883 if (err)
4884 goto err_dpbp_setup;
4885
4886 err = dpaa2_eth_bind_dpni(priv);
4887 if (err)
4888 goto err_bind;
4889
4890 /* Add a NAPI context for each channel */
4891 dpaa2_eth_add_ch_napi(priv);
4892
4893 /* Percpu statistics */
4894 priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
4895 if (!priv->percpu_stats) {
4896 dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
4897 err = -ENOMEM;
4898 goto err_alloc_percpu_stats;
4899 }
4900 priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
4901 if (!priv->percpu_extras) {
4902 dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
4903 err = -ENOMEM;
4904 goto err_alloc_percpu_extras;
4905 }
4906
4907 priv->sgt_cache = alloc_percpu(*priv->sgt_cache);
4908 if (!priv->sgt_cache) {
4909 dev_err(dev, "alloc_percpu(sgt_cache) failed\n");
4910 err = -ENOMEM;
4911 goto err_alloc_sgt_cache;
4912 }
4913
4914 priv->fd = alloc_percpu(*priv->fd);
4915 if (!priv->fd) {
4916 dev_err(dev, "alloc_percpu(fds) failed\n");
4917 err = -ENOMEM;
4918 goto err_alloc_fds;
4919 }
4920
4921 err = dpaa2_eth_netdev_init(net_dev);
4922 if (err)
4923 goto err_netdev_init;
4924
4925 /* Configure checksum offload based on current interface flags */
4926 err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
4927 if (err)
4928 goto err_csum;
4929
4930 err = dpaa2_eth_set_tx_csum(priv,
4931 !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
4932 if (err)
4933 goto err_csum;
4934
4935 err = dpaa2_eth_alloc_rings(priv);
4936 if (err)
4937 goto err_alloc_rings;
4938
4939 #ifdef CONFIG_FSL_DPAA2_ETH_DCB
4940 if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
4941 priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
4942 net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
4943 } else {
4944 dev_dbg(dev, "PFC not supported\n");
4945 }
4946 #endif
4947
4948 err = dpaa2_eth_connect_mac(priv);
4949 if (err)
4950 goto err_connect_mac;
4951
4952 err = dpaa2_eth_setup_irqs(dpni_dev);
4953 if (err) {
4954 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
4955 priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv,
4956 "%s_poll_link", net_dev->name);
4957 if (IS_ERR(priv->poll_thread)) {
4958 dev_err(dev, "Error starting polling thread\n");
4959 goto err_poll_thread;
4960 }
4961 priv->do_link_poll = true;
4962 }
4963
4964 err = dpaa2_eth_dl_alloc(priv);
4965 if (err)
4966 goto err_dl_register;
4967
4968 err = dpaa2_eth_dl_traps_register(priv);
4969 if (err)
4970 goto err_dl_trap_register;
4971
4972 err = dpaa2_eth_dl_port_add(priv);
4973 if (err)
4974 goto err_dl_port_add;
4975
4976 net_dev->needed_headroom = DPAA2_ETH_SWA_SIZE + DPAA2_ETH_TX_BUF_ALIGN;
4977
4978 err = register_netdev(net_dev);
4979 if (err < 0) {
4980 dev_err(dev, "register_netdev() failed\n");
4981 goto err_netdev_reg;
4982 }
4983
4984 #ifdef CONFIG_DEBUG_FS
4985 dpaa2_dbg_add(priv);
4986 #endif
4987
4988 dpaa2_eth_dl_register(priv);
4989 dev_info(dev, "Probed interface %s\n", net_dev->name);
4990 return 0;
4991
4992 err_netdev_reg:
4993 dpaa2_eth_dl_port_del(priv);
4994 err_dl_port_add:
4995 dpaa2_eth_dl_traps_unregister(priv);
4996 err_dl_trap_register:
4997 dpaa2_eth_dl_free(priv);
4998 err_dl_register:
4999 if (priv->do_link_poll)
5000 kthread_stop(priv->poll_thread);
5001 else
5002 fsl_mc_free_irqs(dpni_dev);
5003 err_poll_thread:
5004 dpaa2_eth_disconnect_mac(priv);
5005 err_connect_mac:
5006 dpaa2_eth_free_rings(priv);
5007 err_alloc_rings:
5008 err_csum:
5009 err_netdev_init:
5010 free_percpu(priv->fd);
5011 err_alloc_fds:
5012 free_percpu(priv->sgt_cache);
5013 err_alloc_sgt_cache:
5014 free_percpu(priv->percpu_extras);
5015 err_alloc_percpu_extras:
5016 free_percpu(priv->percpu_stats);
5017 err_alloc_percpu_stats:
5018 dpaa2_eth_del_ch_napi(priv);
5019 err_bind:
5020 dpaa2_eth_free_dpbps(priv);
5021 err_dpbp_setup:
5022 dpaa2_eth_free_dpio(priv);
5023 err_dpio_setup:
5024 dpaa2_eth_free_dpni(priv);
5025 err_dpni_setup:
5026 fsl_mc_portal_free(priv->mc_io);
5027 err_portal_alloc:
5028 destroy_workqueue(priv->dpaa2_ptp_wq);
5029 err_wq_alloc:
5030 dev_set_drvdata(dev, NULL);
5031 free_netdev(net_dev);
5032
5033 return err;
5034 }
5035
dpaa2_eth_remove(struct fsl_mc_device * ls_dev)5036 static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
5037 {
5038 struct device *dev;
5039 struct net_device *net_dev;
5040 struct dpaa2_eth_priv *priv;
5041
5042 dev = &ls_dev->dev;
5043 net_dev = dev_get_drvdata(dev);
5044 priv = netdev_priv(net_dev);
5045
5046 dpaa2_eth_dl_unregister(priv);
5047
5048 #ifdef CONFIG_DEBUG_FS
5049 dpaa2_dbg_remove(priv);
5050 #endif
5051
5052 unregister_netdev(net_dev);
5053
5054 dpaa2_eth_dl_port_del(priv);
5055 dpaa2_eth_dl_traps_unregister(priv);
5056 dpaa2_eth_dl_free(priv);
5057
5058 if (priv->do_link_poll)
5059 kthread_stop(priv->poll_thread);
5060 else
5061 fsl_mc_free_irqs(ls_dev);
5062
5063 dpaa2_eth_disconnect_mac(priv);
5064 dpaa2_eth_free_rings(priv);
5065 free_percpu(priv->fd);
5066 free_percpu(priv->sgt_cache);
5067 free_percpu(priv->percpu_stats);
5068 free_percpu(priv->percpu_extras);
5069
5070 dpaa2_eth_del_ch_napi(priv);
5071 dpaa2_eth_free_dpbps(priv);
5072 dpaa2_eth_free_dpio(priv);
5073 dpaa2_eth_free_dpni(priv);
5074 if (priv->onestep_reg_base)
5075 iounmap(priv->onestep_reg_base);
5076
5077 fsl_mc_portal_free(priv->mc_io);
5078
5079 destroy_workqueue(priv->dpaa2_ptp_wq);
5080
5081 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
5082
5083 free_netdev(net_dev);
5084 }
5085
5086 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
5087 {
5088 .vendor = FSL_MC_VENDOR_FREESCALE,
5089 .obj_type = "dpni",
5090 },
5091 { .vendor = 0x0 }
5092 };
5093 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);
5094
5095 static struct fsl_mc_driver dpaa2_eth_driver = {
5096 .driver = {
5097 .name = KBUILD_MODNAME,
5098 },
5099 .probe = dpaa2_eth_probe,
5100 .remove = dpaa2_eth_remove,
5101 .match_id_table = dpaa2_eth_match_id_table
5102 };
5103
dpaa2_eth_driver_init(void)5104 static int __init dpaa2_eth_driver_init(void)
5105 {
5106 int err;
5107
5108 dpaa2_eth_dbg_init();
5109 err = fsl_mc_driver_register(&dpaa2_eth_driver);
5110 if (err) {
5111 dpaa2_eth_dbg_exit();
5112 return err;
5113 }
5114
5115 return 0;
5116 }
5117
dpaa2_eth_driver_exit(void)5118 static void __exit dpaa2_eth_driver_exit(void)
5119 {
5120 dpaa2_eth_dbg_exit();
5121 fsl_mc_driver_unregister(&dpaa2_eth_driver);
5122 }
5123
5124 module_init(dpaa2_eth_driver_init);
5125 module_exit(dpaa2_eth_driver_exit);
5126