1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2014-2016 Freescale Semiconductor Inc. 3 * Copyright 2016-2020 NXP 4 */ 5 #include <linux/init.h> 6 #include <linux/module.h> 7 #include <linux/platform_device.h> 8 #include <linux/etherdevice.h> 9 #include <linux/of_net.h> 10 #include <linux/interrupt.h> 11 #include <linux/msi.h> 12 #include <linux/kthread.h> 13 #include <linux/iommu.h> 14 #include <linux/fsl/mc.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf_trace.h> 17 #include <linux/fsl/ptp_qoriq.h> 18 #include <linux/ptp_classify.h> 19 #include <net/pkt_cls.h> 20 #include <net/sock.h> 21 22 #include "dpaa2-eth.h" 23 24 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files 25 * using trace events only need to #include <trace/events/sched.h> 26 */ 27 #define CREATE_TRACE_POINTS 28 #include "dpaa2-eth-trace.h" 29 30 MODULE_LICENSE("Dual BSD/GPL"); 31 MODULE_AUTHOR("Freescale Semiconductor, Inc"); 32 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); 33 34 struct ptp_qoriq *dpaa2_ptp; 35 EXPORT_SYMBOL(dpaa2_ptp); 36 37 static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 38 dma_addr_t iova_addr) 39 { 40 phys_addr_t phys_addr; 41 42 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 43 44 return phys_to_virt(phys_addr); 45 } 46 47 static void dpaa2_eth_validate_rx_csum(struct dpaa2_eth_priv *priv, 48 u32 fd_status, 49 struct sk_buff *skb) 50 { 51 skb_checksum_none_assert(skb); 52 53 /* HW checksum validation is disabled, nothing to do here */ 54 if (!(priv->net_dev->features & NETIF_F_RXCSUM)) 55 return; 56 57 /* Read checksum validation bits */ 58 if (!((fd_status & DPAA2_FAS_L3CV) && 59 (fd_status & DPAA2_FAS_L4CV))) 60 return; 61 62 /* Inform the stack there's no need to compute L3/L4 csum anymore */ 63 skb->ip_summed = CHECKSUM_UNNECESSARY; 64 } 65 66 /* Free a received FD. 67 * Not to be used for Tx conf FDs or on any other paths. 68 */ 69 static void dpaa2_eth_free_rx_fd(struct dpaa2_eth_priv *priv, 70 const struct dpaa2_fd *fd, 71 void *vaddr) 72 { 73 struct device *dev = priv->net_dev->dev.parent; 74 dma_addr_t addr = dpaa2_fd_get_addr(fd); 75 u8 fd_format = dpaa2_fd_get_format(fd); 76 struct dpaa2_sg_entry *sgt; 77 void *sg_vaddr; 78 int i; 79 80 /* If single buffer frame, just free the data buffer */ 81 if (fd_format == dpaa2_fd_single) 82 goto free_buf; 83 else if (fd_format != dpaa2_fd_sg) 84 /* We don't support any other format */ 85 return; 86 87 /* For S/G frames, we first need to free all SG entries 88 * except the first one, which was taken care of already 89 */ 90 sgt = vaddr + dpaa2_fd_get_offset(fd); 91 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 92 addr = dpaa2_sg_get_addr(&sgt[i]); 93 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 94 dma_unmap_page(dev, addr, priv->rx_buf_size, 95 DMA_BIDIRECTIONAL); 96 97 free_pages((unsigned long)sg_vaddr, 0); 98 if (dpaa2_sg_is_final(&sgt[i])) 99 break; 100 } 101 102 free_buf: 103 free_pages((unsigned long)vaddr, 0); 104 } 105 106 /* Build a linear skb based on a single-buffer frame descriptor */ 107 static struct sk_buff *dpaa2_eth_build_linear_skb(struct dpaa2_eth_channel *ch, 108 const struct dpaa2_fd *fd, 109 void *fd_vaddr) 110 { 111 struct sk_buff *skb = NULL; 112 u16 fd_offset = dpaa2_fd_get_offset(fd); 113 u32 fd_length = dpaa2_fd_get_len(fd); 114 115 ch->buf_count--; 116 117 skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); 118 if (unlikely(!skb)) 119 return NULL; 120 121 skb_reserve(skb, fd_offset); 122 skb_put(skb, fd_length); 123 124 return skb; 125 } 126 127 /* Build a non linear (fragmented) skb based on a S/G table */ 128 static struct sk_buff *dpaa2_eth_build_frag_skb(struct dpaa2_eth_priv *priv, 129 struct dpaa2_eth_channel *ch, 130 struct dpaa2_sg_entry *sgt) 131 { 132 struct sk_buff *skb = NULL; 133 struct device *dev = priv->net_dev->dev.parent; 134 void *sg_vaddr; 135 dma_addr_t sg_addr; 136 u16 sg_offset; 137 u32 sg_length; 138 struct page *page, *head_page; 139 int page_offset; 140 int i; 141 142 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 143 struct dpaa2_sg_entry *sge = &sgt[i]; 144 145 /* NOTE: We only support SG entries in dpaa2_sg_single format, 146 * but this is the only format we may receive from HW anyway 147 */ 148 149 /* Get the address and length from the S/G entry */ 150 sg_addr = dpaa2_sg_get_addr(sge); 151 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); 152 dma_unmap_page(dev, sg_addr, priv->rx_buf_size, 153 DMA_BIDIRECTIONAL); 154 155 sg_length = dpaa2_sg_get_len(sge); 156 157 if (i == 0) { 158 /* We build the skb around the first data buffer */ 159 skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE); 160 if (unlikely(!skb)) { 161 /* Free the first SG entry now, since we already 162 * unmapped it and obtained the virtual address 163 */ 164 free_pages((unsigned long)sg_vaddr, 0); 165 166 /* We still need to subtract the buffers used 167 * by this FD from our software counter 168 */ 169 while (!dpaa2_sg_is_final(&sgt[i]) && 170 i < DPAA2_ETH_MAX_SG_ENTRIES) 171 i++; 172 break; 173 } 174 175 sg_offset = dpaa2_sg_get_offset(sge); 176 skb_reserve(skb, sg_offset); 177 skb_put(skb, sg_length); 178 } else { 179 /* Rest of the data buffers are stored as skb frags */ 180 page = virt_to_page(sg_vaddr); 181 head_page = virt_to_head_page(sg_vaddr); 182 183 /* Offset in page (which may be compound). 184 * Data in subsequent SG entries is stored from the 185 * beginning of the buffer, so we don't need to add the 186 * sg_offset. 187 */ 188 page_offset = ((unsigned long)sg_vaddr & 189 (PAGE_SIZE - 1)) + 190 (page_address(page) - page_address(head_page)); 191 192 skb_add_rx_frag(skb, i - 1, head_page, page_offset, 193 sg_length, priv->rx_buf_size); 194 } 195 196 if (dpaa2_sg_is_final(sge)) 197 break; 198 } 199 200 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); 201 202 /* Count all data buffers + SG table buffer */ 203 ch->buf_count -= i + 2; 204 205 return skb; 206 } 207 208 /* Free buffers acquired from the buffer pool or which were meant to 209 * be released in the pool 210 */ 211 static void dpaa2_eth_free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, 212 int count) 213 { 214 struct device *dev = priv->net_dev->dev.parent; 215 void *vaddr; 216 int i; 217 218 for (i = 0; i < count; i++) { 219 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); 220 dma_unmap_page(dev, buf_array[i], priv->rx_buf_size, 221 DMA_BIDIRECTIONAL); 222 free_pages((unsigned long)vaddr, 0); 223 } 224 } 225 226 static void dpaa2_eth_xdp_release_buf(struct dpaa2_eth_priv *priv, 227 struct dpaa2_eth_channel *ch, 228 dma_addr_t addr) 229 { 230 int retries = 0; 231 int err; 232 233 ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr; 234 if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD) 235 return; 236 237 while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, 238 ch->xdp.drop_bufs, 239 ch->xdp.drop_cnt)) == -EBUSY) { 240 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) 241 break; 242 cpu_relax(); 243 } 244 245 if (err) { 246 dpaa2_eth_free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); 247 ch->buf_count -= ch->xdp.drop_cnt; 248 } 249 250 ch->xdp.drop_cnt = 0; 251 } 252 253 static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv, 254 struct dpaa2_eth_fq *fq, 255 struct dpaa2_eth_xdp_fds *xdp_fds) 256 { 257 int total_enqueued = 0, retries = 0, enqueued; 258 struct dpaa2_eth_drv_stats *percpu_extras; 259 int num_fds, err, max_retries; 260 struct dpaa2_fd *fds; 261 262 percpu_extras = this_cpu_ptr(priv->percpu_extras); 263 264 /* try to enqueue all the FDs until the max number of retries is hit */ 265 fds = xdp_fds->fds; 266 num_fds = xdp_fds->num; 267 max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES; 268 while (total_enqueued < num_fds && retries < max_retries) { 269 err = priv->enqueue(priv, fq, &fds[total_enqueued], 270 0, num_fds - total_enqueued, &enqueued); 271 if (err == -EBUSY) { 272 percpu_extras->tx_portal_busy += ++retries; 273 continue; 274 } 275 total_enqueued += enqueued; 276 } 277 xdp_fds->num = 0; 278 279 return total_enqueued; 280 } 281 282 static void dpaa2_eth_xdp_tx_flush(struct dpaa2_eth_priv *priv, 283 struct dpaa2_eth_channel *ch, 284 struct dpaa2_eth_fq *fq) 285 { 286 struct rtnl_link_stats64 *percpu_stats; 287 struct dpaa2_fd *fds; 288 int enqueued, i; 289 290 percpu_stats = this_cpu_ptr(priv->percpu_stats); 291 292 // enqueue the array of XDP_TX frames 293 enqueued = dpaa2_eth_xdp_flush(priv, fq, &fq->xdp_tx_fds); 294 295 /* update statistics */ 296 percpu_stats->tx_packets += enqueued; 297 fds = fq->xdp_tx_fds.fds; 298 for (i = 0; i < enqueued; i++) { 299 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); 300 ch->stats.xdp_tx++; 301 } 302 for (i = enqueued; i < fq->xdp_tx_fds.num; i++) { 303 dpaa2_eth_xdp_release_buf(priv, ch, dpaa2_fd_get_addr(&fds[i])); 304 percpu_stats->tx_errors++; 305 ch->stats.xdp_tx_err++; 306 } 307 fq->xdp_tx_fds.num = 0; 308 } 309 310 static void dpaa2_eth_xdp_enqueue(struct dpaa2_eth_priv *priv, 311 struct dpaa2_eth_channel *ch, 312 struct dpaa2_fd *fd, 313 void *buf_start, u16 queue_id) 314 { 315 struct dpaa2_faead *faead; 316 struct dpaa2_fd *dest_fd; 317 struct dpaa2_eth_fq *fq; 318 u32 ctrl, frc; 319 320 /* Mark the egress frame hardware annotation area as valid */ 321 frc = dpaa2_fd_get_frc(fd); 322 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 323 dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL); 324 325 /* Instruct hardware to release the FD buffer directly into 326 * the buffer pool once transmission is completed, instead of 327 * sending a Tx confirmation frame to us 328 */ 329 ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV; 330 faead = dpaa2_get_faead(buf_start, false); 331 faead->ctrl = cpu_to_le32(ctrl); 332 faead->conf_fqid = 0; 333 334 fq = &priv->fq[queue_id]; 335 dest_fd = &fq->xdp_tx_fds.fds[fq->xdp_tx_fds.num++]; 336 memcpy(dest_fd, fd, sizeof(*dest_fd)); 337 338 if (fq->xdp_tx_fds.num < DEV_MAP_BULK_SIZE) 339 return; 340 341 dpaa2_eth_xdp_tx_flush(priv, ch, fq); 342 } 343 344 static u32 dpaa2_eth_run_xdp(struct dpaa2_eth_priv *priv, 345 struct dpaa2_eth_channel *ch, 346 struct dpaa2_eth_fq *rx_fq, 347 struct dpaa2_fd *fd, void *vaddr) 348 { 349 dma_addr_t addr = dpaa2_fd_get_addr(fd); 350 struct bpf_prog *xdp_prog; 351 struct xdp_buff xdp; 352 u32 xdp_act = XDP_PASS; 353 int err, offset; 354 355 rcu_read_lock(); 356 357 xdp_prog = READ_ONCE(ch->xdp.prog); 358 if (!xdp_prog) 359 goto out; 360 361 offset = dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM; 362 xdp_init_buff(&xdp, DPAA2_ETH_RX_BUF_RAW_SIZE - offset, &ch->xdp_rxq); 363 xdp_prepare_buff(&xdp, vaddr + offset, XDP_PACKET_HEADROOM, 364 dpaa2_fd_get_len(fd), false); 365 366 xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); 367 368 /* xdp.data pointer may have changed */ 369 dpaa2_fd_set_offset(fd, xdp.data - vaddr); 370 dpaa2_fd_set_len(fd, xdp.data_end - xdp.data); 371 372 switch (xdp_act) { 373 case XDP_PASS: 374 break; 375 case XDP_TX: 376 dpaa2_eth_xdp_enqueue(priv, ch, fd, vaddr, rx_fq->flowid); 377 break; 378 default: 379 bpf_warn_invalid_xdp_action(xdp_act); 380 fallthrough; 381 case XDP_ABORTED: 382 trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); 383 fallthrough; 384 case XDP_DROP: 385 dpaa2_eth_xdp_release_buf(priv, ch, addr); 386 ch->stats.xdp_drop++; 387 break; 388 case XDP_REDIRECT: 389 dma_unmap_page(priv->net_dev->dev.parent, addr, 390 priv->rx_buf_size, DMA_BIDIRECTIONAL); 391 ch->buf_count--; 392 393 /* Allow redirect use of full headroom */ 394 xdp.data_hard_start = vaddr; 395 xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE; 396 397 err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog); 398 if (unlikely(err)) { 399 addr = dma_map_page(priv->net_dev->dev.parent, 400 virt_to_page(vaddr), 0, 401 priv->rx_buf_size, DMA_BIDIRECTIONAL); 402 if (unlikely(dma_mapping_error(priv->net_dev->dev.parent, addr))) { 403 free_pages((unsigned long)vaddr, 0); 404 } else { 405 ch->buf_count++; 406 dpaa2_eth_xdp_release_buf(priv, ch, addr); 407 } 408 ch->stats.xdp_drop++; 409 } else { 410 ch->stats.xdp_redirect++; 411 } 412 break; 413 } 414 415 ch->xdp.res |= xdp_act; 416 out: 417 rcu_read_unlock(); 418 return xdp_act; 419 } 420 421 /* Main Rx frame processing routine */ 422 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, 423 struct dpaa2_eth_channel *ch, 424 const struct dpaa2_fd *fd, 425 struct dpaa2_eth_fq *fq) 426 { 427 dma_addr_t addr = dpaa2_fd_get_addr(fd); 428 u8 fd_format = dpaa2_fd_get_format(fd); 429 void *vaddr; 430 struct sk_buff *skb; 431 struct rtnl_link_stats64 *percpu_stats; 432 struct dpaa2_eth_drv_stats *percpu_extras; 433 struct device *dev = priv->net_dev->dev.parent; 434 struct dpaa2_fas *fas; 435 void *buf_data; 436 u32 status = 0; 437 u32 xdp_act; 438 439 /* Tracing point */ 440 trace_dpaa2_rx_fd(priv->net_dev, fd); 441 442 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 443 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, 444 DMA_BIDIRECTIONAL); 445 446 fas = dpaa2_get_fas(vaddr, false); 447 prefetch(fas); 448 buf_data = vaddr + dpaa2_fd_get_offset(fd); 449 prefetch(buf_data); 450 451 percpu_stats = this_cpu_ptr(priv->percpu_stats); 452 percpu_extras = this_cpu_ptr(priv->percpu_extras); 453 454 if (fd_format == dpaa2_fd_single) { 455 xdp_act = dpaa2_eth_run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); 456 if (xdp_act != XDP_PASS) { 457 percpu_stats->rx_packets++; 458 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 459 return; 460 } 461 462 dma_unmap_page(dev, addr, priv->rx_buf_size, 463 DMA_BIDIRECTIONAL); 464 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); 465 } else if (fd_format == dpaa2_fd_sg) { 466 WARN_ON(priv->xdp_prog); 467 468 dma_unmap_page(dev, addr, priv->rx_buf_size, 469 DMA_BIDIRECTIONAL); 470 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data); 471 free_pages((unsigned long)vaddr, 0); 472 percpu_extras->rx_sg_frames++; 473 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); 474 } else { 475 /* We don't support any other format */ 476 goto err_frame_format; 477 } 478 479 if (unlikely(!skb)) 480 goto err_build_skb; 481 482 prefetch(skb->data); 483 484 /* Get the timestamp value */ 485 if (priv->rx_tstamp) { 486 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 487 __le64 *ts = dpaa2_get_ts(vaddr, false); 488 u64 ns; 489 490 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 491 492 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 493 shhwtstamps->hwtstamp = ns_to_ktime(ns); 494 } 495 496 /* Check if we need to validate the L4 csum */ 497 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { 498 status = le32_to_cpu(fas->status); 499 dpaa2_eth_validate_rx_csum(priv, status, skb); 500 } 501 502 skb->protocol = eth_type_trans(skb, priv->net_dev); 503 skb_record_rx_queue(skb, fq->flowid); 504 505 percpu_stats->rx_packets++; 506 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 507 508 list_add_tail(&skb->list, ch->rx_list); 509 510 return; 511 512 err_build_skb: 513 dpaa2_eth_free_rx_fd(priv, fd, vaddr); 514 err_frame_format: 515 percpu_stats->rx_dropped++; 516 } 517 518 /* Processing of Rx frames received on the error FQ 519 * We check and print the error bits and then free the frame 520 */ 521 static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, 522 struct dpaa2_eth_channel *ch, 523 const struct dpaa2_fd *fd, 524 struct dpaa2_eth_fq *fq __always_unused) 525 { 526 struct device *dev = priv->net_dev->dev.parent; 527 dma_addr_t addr = dpaa2_fd_get_addr(fd); 528 u8 fd_format = dpaa2_fd_get_format(fd); 529 struct rtnl_link_stats64 *percpu_stats; 530 struct dpaa2_eth_trap_item *trap_item; 531 struct dpaa2_fapr *fapr; 532 struct sk_buff *skb; 533 void *buf_data; 534 void *vaddr; 535 536 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 537 dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size, 538 DMA_BIDIRECTIONAL); 539 540 buf_data = vaddr + dpaa2_fd_get_offset(fd); 541 542 if (fd_format == dpaa2_fd_single) { 543 dma_unmap_page(dev, addr, priv->rx_buf_size, 544 DMA_BIDIRECTIONAL); 545 skb = dpaa2_eth_build_linear_skb(ch, fd, vaddr); 546 } else if (fd_format == dpaa2_fd_sg) { 547 dma_unmap_page(dev, addr, priv->rx_buf_size, 548 DMA_BIDIRECTIONAL); 549 skb = dpaa2_eth_build_frag_skb(priv, ch, buf_data); 550 free_pages((unsigned long)vaddr, 0); 551 } else { 552 /* We don't support any other format */ 553 dpaa2_eth_free_rx_fd(priv, fd, vaddr); 554 goto err_frame_format; 555 } 556 557 fapr = dpaa2_get_fapr(vaddr, false); 558 trap_item = dpaa2_eth_dl_get_trap(priv, fapr); 559 if (trap_item) 560 devlink_trap_report(priv->devlink, skb, trap_item->trap_ctx, 561 &priv->devlink_port, NULL); 562 consume_skb(skb); 563 564 err_frame_format: 565 percpu_stats = this_cpu_ptr(priv->percpu_stats); 566 percpu_stats->rx_errors++; 567 ch->buf_count--; 568 } 569 570 /* Consume all frames pull-dequeued into the store. This is the simplest way to 571 * make sure we don't accidentally issue another volatile dequeue which would 572 * overwrite (leak) frames already in the store. 573 * 574 * Observance of NAPI budget is not our concern, leaving that to the caller. 575 */ 576 static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch, 577 struct dpaa2_eth_fq **src) 578 { 579 struct dpaa2_eth_priv *priv = ch->priv; 580 struct dpaa2_eth_fq *fq = NULL; 581 struct dpaa2_dq *dq; 582 const struct dpaa2_fd *fd; 583 int cleaned = 0, retries = 0; 584 int is_last; 585 586 do { 587 dq = dpaa2_io_store_next(ch->store, &is_last); 588 if (unlikely(!dq)) { 589 /* If we're here, we *must* have placed a 590 * volatile dequeue comnmand, so keep reading through 591 * the store until we get some sort of valid response 592 * token (either a valid frame or an "empty dequeue") 593 */ 594 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) { 595 netdev_err_once(priv->net_dev, 596 "Unable to read a valid dequeue response\n"); 597 return -ETIMEDOUT; 598 } 599 continue; 600 } 601 602 fd = dpaa2_dq_fd(dq); 603 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); 604 605 fq->consume(priv, ch, fd, fq); 606 cleaned++; 607 retries = 0; 608 } while (!is_last); 609 610 if (!cleaned) 611 return 0; 612 613 fq->stats.frames += cleaned; 614 ch->stats.frames += cleaned; 615 616 /* A dequeue operation only pulls frames from a single queue 617 * into the store. Return the frame queue as an out param. 618 */ 619 if (src) 620 *src = fq; 621 622 return cleaned; 623 } 624 625 static int dpaa2_eth_ptp_parse(struct sk_buff *skb, 626 u8 *msgtype, u8 *twostep, u8 *udp, 627 u16 *correction_offset, 628 u16 *origintimestamp_offset) 629 { 630 unsigned int ptp_class; 631 struct ptp_header *hdr; 632 unsigned int type; 633 u8 *base; 634 635 ptp_class = ptp_classify_raw(skb); 636 if (ptp_class == PTP_CLASS_NONE) 637 return -EINVAL; 638 639 hdr = ptp_parse_header(skb, ptp_class); 640 if (!hdr) 641 return -EINVAL; 642 643 *msgtype = ptp_get_msgtype(hdr, ptp_class); 644 *twostep = hdr->flag_field[0] & 0x2; 645 646 type = ptp_class & PTP_CLASS_PMASK; 647 if (type == PTP_CLASS_IPV4 || 648 type == PTP_CLASS_IPV6) 649 *udp = 1; 650 else 651 *udp = 0; 652 653 base = skb_mac_header(skb); 654 *correction_offset = (u8 *)&hdr->correction - base; 655 *origintimestamp_offset = (u8 *)hdr + sizeof(struct ptp_header) - base; 656 657 return 0; 658 } 659 660 /* Configure the egress frame annotation for timestamp update */ 661 static void dpaa2_eth_enable_tx_tstamp(struct dpaa2_eth_priv *priv, 662 struct dpaa2_fd *fd, 663 void *buf_start, 664 struct sk_buff *skb) 665 { 666 struct ptp_tstamp origin_timestamp; 667 struct dpni_single_step_cfg cfg; 668 u8 msgtype, twostep, udp; 669 struct dpaa2_faead *faead; 670 struct dpaa2_fas *fas; 671 struct timespec64 ts; 672 u16 offset1, offset2; 673 u32 ctrl, frc; 674 __le64 *ns; 675 u8 *data; 676 677 /* Mark the egress frame annotation area as valid */ 678 frc = dpaa2_fd_get_frc(fd); 679 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 680 681 /* Set hardware annotation size */ 682 ctrl = dpaa2_fd_get_ctrl(fd); 683 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); 684 685 /* enable UPD (update prepanded data) bit in FAEAD field of 686 * hardware frame annotation area 687 */ 688 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; 689 faead = dpaa2_get_faead(buf_start, true); 690 faead->ctrl = cpu_to_le32(ctrl); 691 692 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { 693 if (dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp, 694 &offset1, &offset2) || 695 msgtype != PTP_MSGTYPE_SYNC || twostep) { 696 WARN_ONCE(1, "Bad packet for one-step timestamping\n"); 697 return; 698 } 699 700 /* Mark the frame annotation status as valid */ 701 frc = dpaa2_fd_get_frc(fd); 702 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FASV); 703 704 /* Mark the PTP flag for one step timestamping */ 705 fas = dpaa2_get_fas(buf_start, true); 706 fas->status = cpu_to_le32(DPAA2_FAS_PTP); 707 708 dpaa2_ptp->caps.gettime64(&dpaa2_ptp->caps, &ts); 709 ns = dpaa2_get_ts(buf_start, true); 710 *ns = cpu_to_le64(timespec64_to_ns(&ts) / 711 DPAA2_PTP_CLK_PERIOD_NS); 712 713 /* Update current time to PTP message originTimestamp field */ 714 ns_to_ptp_tstamp(&origin_timestamp, le64_to_cpup(ns)); 715 data = skb_mac_header(skb); 716 *(__be16 *)(data + offset2) = htons(origin_timestamp.sec_msb); 717 *(__be32 *)(data + offset2 + 2) = 718 htonl(origin_timestamp.sec_lsb); 719 *(__be32 *)(data + offset2 + 6) = htonl(origin_timestamp.nsec); 720 721 cfg.en = 1; 722 cfg.ch_update = udp; 723 cfg.offset = offset1; 724 cfg.peer_delay = 0; 725 726 if (dpni_set_single_step_cfg(priv->mc_io, 0, priv->mc_token, 727 &cfg)) 728 WARN_ONCE(1, "Failed to set single step register"); 729 } 730 } 731 732 /* Create a frame descriptor based on a fragmented skb */ 733 static int dpaa2_eth_build_sg_fd(struct dpaa2_eth_priv *priv, 734 struct sk_buff *skb, 735 struct dpaa2_fd *fd, 736 void **swa_addr) 737 { 738 struct device *dev = priv->net_dev->dev.parent; 739 void *sgt_buf = NULL; 740 dma_addr_t addr; 741 int nr_frags = skb_shinfo(skb)->nr_frags; 742 struct dpaa2_sg_entry *sgt; 743 int i, err; 744 int sgt_buf_size; 745 struct scatterlist *scl, *crt_scl; 746 int num_sg; 747 int num_dma_bufs; 748 struct dpaa2_eth_swa *swa; 749 750 /* Create and map scatterlist. 751 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have 752 * to go beyond nr_frags+1. 753 * Note: We don't support chained scatterlists 754 */ 755 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) 756 return -EINVAL; 757 758 scl = kmalloc_array(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); 759 if (unlikely(!scl)) 760 return -ENOMEM; 761 762 sg_init_table(scl, nr_frags + 1); 763 num_sg = skb_to_sgvec(skb, scl, 0, skb->len); 764 if (unlikely(num_sg < 0)) { 765 err = -ENOMEM; 766 goto dma_map_sg_failed; 767 } 768 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 769 if (unlikely(!num_dma_bufs)) { 770 err = -ENOMEM; 771 goto dma_map_sg_failed; 772 } 773 774 /* Prepare the HW SGT structure */ 775 sgt_buf_size = priv->tx_data_offset + 776 sizeof(struct dpaa2_sg_entry) * num_dma_bufs; 777 sgt_buf = napi_alloc_frag_align(sgt_buf_size, DPAA2_ETH_TX_BUF_ALIGN); 778 if (unlikely(!sgt_buf)) { 779 err = -ENOMEM; 780 goto sgt_buf_alloc_failed; 781 } 782 memset(sgt_buf, 0, sgt_buf_size); 783 784 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 785 786 /* Fill in the HW SGT structure. 787 * 788 * sgt_buf is zeroed out, so the following fields are implicit 789 * in all sgt entries: 790 * - offset is 0 791 * - format is 'dpaa2_sg_single' 792 */ 793 for_each_sg(scl, crt_scl, num_dma_bufs, i) { 794 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); 795 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); 796 } 797 dpaa2_sg_set_final(&sgt[i - 1], true); 798 799 /* Store the skb backpointer in the SGT buffer. 800 * Fit the scatterlist and the number of buffers alongside the 801 * skb backpointer in the software annotation area. We'll need 802 * all of them on Tx Conf. 803 */ 804 *swa_addr = (void *)sgt_buf; 805 swa = (struct dpaa2_eth_swa *)sgt_buf; 806 swa->type = DPAA2_ETH_SWA_SG; 807 swa->sg.skb = skb; 808 swa->sg.scl = scl; 809 swa->sg.num_sg = num_sg; 810 swa->sg.sgt_size = sgt_buf_size; 811 812 /* Separately map the SGT buffer */ 813 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 814 if (unlikely(dma_mapping_error(dev, addr))) { 815 err = -ENOMEM; 816 goto dma_map_single_failed; 817 } 818 dpaa2_fd_set_offset(fd, priv->tx_data_offset); 819 dpaa2_fd_set_format(fd, dpaa2_fd_sg); 820 dpaa2_fd_set_addr(fd, addr); 821 dpaa2_fd_set_len(fd, skb->len); 822 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 823 824 return 0; 825 826 dma_map_single_failed: 827 skb_free_frag(sgt_buf); 828 sgt_buf_alloc_failed: 829 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 830 dma_map_sg_failed: 831 kfree(scl); 832 return err; 833 } 834 835 /* Create a SG frame descriptor based on a linear skb. 836 * 837 * This function is used on the Tx path when the skb headroom is not large 838 * enough for the HW requirements, thus instead of realloc-ing the skb we 839 * create a SG frame descriptor with only one entry. 840 */ 841 static int dpaa2_eth_build_sg_fd_single_buf(struct dpaa2_eth_priv *priv, 842 struct sk_buff *skb, 843 struct dpaa2_fd *fd, 844 void **swa_addr) 845 { 846 struct device *dev = priv->net_dev->dev.parent; 847 struct dpaa2_eth_sgt_cache *sgt_cache; 848 struct dpaa2_sg_entry *sgt; 849 struct dpaa2_eth_swa *swa; 850 dma_addr_t addr, sgt_addr; 851 void *sgt_buf = NULL; 852 int sgt_buf_size; 853 int err; 854 855 /* Prepare the HW SGT structure */ 856 sgt_cache = this_cpu_ptr(priv->sgt_cache); 857 sgt_buf_size = priv->tx_data_offset + sizeof(struct dpaa2_sg_entry); 858 859 if (sgt_cache->count == 0) 860 sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, 861 GFP_ATOMIC); 862 else 863 sgt_buf = sgt_cache->buf[--sgt_cache->count]; 864 if (unlikely(!sgt_buf)) 865 return -ENOMEM; 866 867 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); 868 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 869 870 addr = dma_map_single(dev, skb->data, skb->len, DMA_BIDIRECTIONAL); 871 if (unlikely(dma_mapping_error(dev, addr))) { 872 err = -ENOMEM; 873 goto data_map_failed; 874 } 875 876 /* Fill in the HW SGT structure */ 877 dpaa2_sg_set_addr(sgt, addr); 878 dpaa2_sg_set_len(sgt, skb->len); 879 dpaa2_sg_set_final(sgt, true); 880 881 /* Store the skb backpointer in the SGT buffer */ 882 *swa_addr = (void *)sgt_buf; 883 swa = (struct dpaa2_eth_swa *)sgt_buf; 884 swa->type = DPAA2_ETH_SWA_SINGLE; 885 swa->single.skb = skb; 886 swa->single.sgt_size = sgt_buf_size; 887 888 /* Separately map the SGT buffer */ 889 sgt_addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 890 if (unlikely(dma_mapping_error(dev, sgt_addr))) { 891 err = -ENOMEM; 892 goto sgt_map_failed; 893 } 894 895 dpaa2_fd_set_offset(fd, priv->tx_data_offset); 896 dpaa2_fd_set_format(fd, dpaa2_fd_sg); 897 dpaa2_fd_set_addr(fd, sgt_addr); 898 dpaa2_fd_set_len(fd, skb->len); 899 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 900 901 return 0; 902 903 sgt_map_failed: 904 dma_unmap_single(dev, addr, skb->len, DMA_BIDIRECTIONAL); 905 data_map_failed: 906 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE) 907 kfree(sgt_buf); 908 else 909 sgt_cache->buf[sgt_cache->count++] = sgt_buf; 910 911 return err; 912 } 913 914 /* Create a frame descriptor based on a linear skb */ 915 static int dpaa2_eth_build_single_fd(struct dpaa2_eth_priv *priv, 916 struct sk_buff *skb, 917 struct dpaa2_fd *fd, 918 void **swa_addr) 919 { 920 struct device *dev = priv->net_dev->dev.parent; 921 u8 *buffer_start, *aligned_start; 922 struct dpaa2_eth_swa *swa; 923 dma_addr_t addr; 924 925 buffer_start = skb->data - dpaa2_eth_needed_headroom(skb); 926 927 /* If there's enough room to align the FD address, do it. 928 * It will help hardware optimize accesses. 929 */ 930 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 931 DPAA2_ETH_TX_BUF_ALIGN); 932 if (aligned_start >= skb->head) 933 buffer_start = aligned_start; 934 935 /* Store a backpointer to the skb at the beginning of the buffer 936 * (in the private data area) such that we can release it 937 * on Tx confirm 938 */ 939 *swa_addr = (void *)buffer_start; 940 swa = (struct dpaa2_eth_swa *)buffer_start; 941 swa->type = DPAA2_ETH_SWA_SINGLE; 942 swa->single.skb = skb; 943 944 addr = dma_map_single(dev, buffer_start, 945 skb_tail_pointer(skb) - buffer_start, 946 DMA_BIDIRECTIONAL); 947 if (unlikely(dma_mapping_error(dev, addr))) 948 return -ENOMEM; 949 950 dpaa2_fd_set_addr(fd, addr); 951 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); 952 dpaa2_fd_set_len(fd, skb->len); 953 dpaa2_fd_set_format(fd, dpaa2_fd_single); 954 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 955 956 return 0; 957 } 958 959 /* FD freeing routine on the Tx path 960 * 961 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb 962 * back-pointed to is also freed. 963 * This can be called either from dpaa2_eth_tx_conf() or on the error path of 964 * dpaa2_eth_tx(). 965 */ 966 static void dpaa2_eth_free_tx_fd(struct dpaa2_eth_priv *priv, 967 struct dpaa2_eth_fq *fq, 968 const struct dpaa2_fd *fd, bool in_napi) 969 { 970 struct device *dev = priv->net_dev->dev.parent; 971 dma_addr_t fd_addr, sg_addr; 972 struct sk_buff *skb = NULL; 973 unsigned char *buffer_start; 974 struct dpaa2_eth_swa *swa; 975 u8 fd_format = dpaa2_fd_get_format(fd); 976 u32 fd_len = dpaa2_fd_get_len(fd); 977 978 struct dpaa2_eth_sgt_cache *sgt_cache; 979 struct dpaa2_sg_entry *sgt; 980 981 fd_addr = dpaa2_fd_get_addr(fd); 982 buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); 983 swa = (struct dpaa2_eth_swa *)buffer_start; 984 985 if (fd_format == dpaa2_fd_single) { 986 if (swa->type == DPAA2_ETH_SWA_SINGLE) { 987 skb = swa->single.skb; 988 /* Accessing the skb buffer is safe before dma unmap, 989 * because we didn't map the actual skb shell. 990 */ 991 dma_unmap_single(dev, fd_addr, 992 skb_tail_pointer(skb) - buffer_start, 993 DMA_BIDIRECTIONAL); 994 } else { 995 WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type"); 996 dma_unmap_single(dev, fd_addr, swa->xdp.dma_size, 997 DMA_BIDIRECTIONAL); 998 } 999 } else if (fd_format == dpaa2_fd_sg) { 1000 if (swa->type == DPAA2_ETH_SWA_SG) { 1001 skb = swa->sg.skb; 1002 1003 /* Unmap the scatterlist */ 1004 dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg, 1005 DMA_BIDIRECTIONAL); 1006 kfree(swa->sg.scl); 1007 1008 /* Unmap the SGT buffer */ 1009 dma_unmap_single(dev, fd_addr, swa->sg.sgt_size, 1010 DMA_BIDIRECTIONAL); 1011 } else { 1012 skb = swa->single.skb; 1013 1014 /* Unmap the SGT Buffer */ 1015 dma_unmap_single(dev, fd_addr, swa->single.sgt_size, 1016 DMA_BIDIRECTIONAL); 1017 1018 sgt = (struct dpaa2_sg_entry *)(buffer_start + 1019 priv->tx_data_offset); 1020 sg_addr = dpaa2_sg_get_addr(sgt); 1021 dma_unmap_single(dev, sg_addr, skb->len, DMA_BIDIRECTIONAL); 1022 } 1023 } else { 1024 netdev_dbg(priv->net_dev, "Invalid FD format\n"); 1025 return; 1026 } 1027 1028 if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) { 1029 fq->dq_frames++; 1030 fq->dq_bytes += fd_len; 1031 } 1032 1033 if (swa->type == DPAA2_ETH_SWA_XDP) { 1034 xdp_return_frame(swa->xdp.xdpf); 1035 return; 1036 } 1037 1038 /* Get the timestamp value */ 1039 if (skb->cb[0] == TX_TSTAMP) { 1040 struct skb_shared_hwtstamps shhwtstamps; 1041 __le64 *ts = dpaa2_get_ts(buffer_start, true); 1042 u64 ns; 1043 1044 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 1045 1046 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 1047 shhwtstamps.hwtstamp = ns_to_ktime(ns); 1048 skb_tstamp_tx(skb, &shhwtstamps); 1049 } else if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { 1050 mutex_unlock(&priv->onestep_tstamp_lock); 1051 } 1052 1053 /* Free SGT buffer allocated on tx */ 1054 if (fd_format != dpaa2_fd_single) { 1055 sgt_cache = this_cpu_ptr(priv->sgt_cache); 1056 if (swa->type == DPAA2_ETH_SWA_SG) { 1057 skb_free_frag(buffer_start); 1058 } else { 1059 if (sgt_cache->count >= DPAA2_ETH_SGT_CACHE_SIZE) 1060 kfree(buffer_start); 1061 else 1062 sgt_cache->buf[sgt_cache->count++] = buffer_start; 1063 } 1064 } 1065 1066 /* Move on with skb release */ 1067 napi_consume_skb(skb, in_napi); 1068 } 1069 1070 static netdev_tx_t __dpaa2_eth_tx(struct sk_buff *skb, 1071 struct net_device *net_dev) 1072 { 1073 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1074 struct dpaa2_fd fd; 1075 struct rtnl_link_stats64 *percpu_stats; 1076 struct dpaa2_eth_drv_stats *percpu_extras; 1077 struct dpaa2_eth_fq *fq; 1078 struct netdev_queue *nq; 1079 u16 queue_mapping; 1080 unsigned int needed_headroom; 1081 u32 fd_len; 1082 u8 prio = 0; 1083 int err, i; 1084 void *swa; 1085 1086 percpu_stats = this_cpu_ptr(priv->percpu_stats); 1087 percpu_extras = this_cpu_ptr(priv->percpu_extras); 1088 1089 needed_headroom = dpaa2_eth_needed_headroom(skb); 1090 1091 /* We'll be holding a back-reference to the skb until Tx Confirmation; 1092 * we don't want that overwritten by a concurrent Tx with a cloned skb. 1093 */ 1094 skb = skb_unshare(skb, GFP_ATOMIC); 1095 if (unlikely(!skb)) { 1096 /* skb_unshare() has already freed the skb */ 1097 percpu_stats->tx_dropped++; 1098 return NETDEV_TX_OK; 1099 } 1100 1101 /* Setup the FD fields */ 1102 memset(&fd, 0, sizeof(fd)); 1103 1104 if (skb_is_nonlinear(skb)) { 1105 err = dpaa2_eth_build_sg_fd(priv, skb, &fd, &swa); 1106 percpu_extras->tx_sg_frames++; 1107 percpu_extras->tx_sg_bytes += skb->len; 1108 } else if (skb_headroom(skb) < needed_headroom) { 1109 err = dpaa2_eth_build_sg_fd_single_buf(priv, skb, &fd, &swa); 1110 percpu_extras->tx_sg_frames++; 1111 percpu_extras->tx_sg_bytes += skb->len; 1112 percpu_extras->tx_converted_sg_frames++; 1113 percpu_extras->tx_converted_sg_bytes += skb->len; 1114 } else { 1115 err = dpaa2_eth_build_single_fd(priv, skb, &fd, &swa); 1116 } 1117 1118 if (unlikely(err)) { 1119 percpu_stats->tx_dropped++; 1120 goto err_build_fd; 1121 } 1122 1123 if (skb->cb[0]) 1124 dpaa2_eth_enable_tx_tstamp(priv, &fd, swa, skb); 1125 1126 /* Tracing point */ 1127 trace_dpaa2_tx_fd(net_dev, &fd); 1128 1129 /* TxConf FQ selection relies on queue id from the stack. 1130 * In case of a forwarded frame from another DPNI interface, we choose 1131 * a queue affined to the same core that processed the Rx frame 1132 */ 1133 queue_mapping = skb_get_queue_mapping(skb); 1134 1135 if (net_dev->num_tc) { 1136 prio = netdev_txq_to_tc(net_dev, queue_mapping); 1137 /* Hardware interprets priority level 0 as being the highest, 1138 * so we need to do a reverse mapping to the netdev tc index 1139 */ 1140 prio = net_dev->num_tc - prio - 1; 1141 /* We have only one FQ array entry for all Tx hardware queues 1142 * with the same flow id (but different priority levels) 1143 */ 1144 queue_mapping %= dpaa2_eth_queue_count(priv); 1145 } 1146 fq = &priv->fq[queue_mapping]; 1147 1148 fd_len = dpaa2_fd_get_len(&fd); 1149 nq = netdev_get_tx_queue(net_dev, queue_mapping); 1150 netdev_tx_sent_queue(nq, fd_len); 1151 1152 /* Everything that happens after this enqueues might race with 1153 * the Tx confirmation callback for this frame 1154 */ 1155 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 1156 err = priv->enqueue(priv, fq, &fd, prio, 1, NULL); 1157 if (err != -EBUSY) 1158 break; 1159 } 1160 percpu_extras->tx_portal_busy += i; 1161 if (unlikely(err < 0)) { 1162 percpu_stats->tx_errors++; 1163 /* Clean up everything, including freeing the skb */ 1164 dpaa2_eth_free_tx_fd(priv, fq, &fd, false); 1165 netdev_tx_completed_queue(nq, 1, fd_len); 1166 } else { 1167 percpu_stats->tx_packets++; 1168 percpu_stats->tx_bytes += fd_len; 1169 } 1170 1171 return NETDEV_TX_OK; 1172 1173 err_build_fd: 1174 dev_kfree_skb(skb); 1175 1176 return NETDEV_TX_OK; 1177 } 1178 1179 static void dpaa2_eth_tx_onestep_tstamp(struct work_struct *work) 1180 { 1181 struct dpaa2_eth_priv *priv = container_of(work, struct dpaa2_eth_priv, 1182 tx_onestep_tstamp); 1183 struct sk_buff *skb; 1184 1185 while (true) { 1186 skb = skb_dequeue(&priv->tx_skbs); 1187 if (!skb) 1188 return; 1189 1190 /* Lock just before TX one-step timestamping packet, 1191 * and release the lock in dpaa2_eth_free_tx_fd when 1192 * confirm the packet has been sent on hardware, or 1193 * when clean up during transmit failure. 1194 */ 1195 mutex_lock(&priv->onestep_tstamp_lock); 1196 __dpaa2_eth_tx(skb, priv->net_dev); 1197 } 1198 } 1199 1200 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) 1201 { 1202 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1203 u8 msgtype, twostep, udp; 1204 u16 offset1, offset2; 1205 1206 /* Utilize skb->cb[0] for timestamping request per skb */ 1207 skb->cb[0] = 0; 1208 1209 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && dpaa2_ptp) { 1210 if (priv->tx_tstamp_type == HWTSTAMP_TX_ON) 1211 skb->cb[0] = TX_TSTAMP; 1212 else if (priv->tx_tstamp_type == HWTSTAMP_TX_ONESTEP_SYNC) 1213 skb->cb[0] = TX_TSTAMP_ONESTEP_SYNC; 1214 } 1215 1216 /* TX for one-step timestamping PTP Sync packet */ 1217 if (skb->cb[0] == TX_TSTAMP_ONESTEP_SYNC) { 1218 if (!dpaa2_eth_ptp_parse(skb, &msgtype, &twostep, &udp, 1219 &offset1, &offset2)) 1220 if (msgtype == PTP_MSGTYPE_SYNC && twostep == 0) { 1221 skb_queue_tail(&priv->tx_skbs, skb); 1222 queue_work(priv->dpaa2_ptp_wq, 1223 &priv->tx_onestep_tstamp); 1224 return NETDEV_TX_OK; 1225 } 1226 /* Use two-step timestamping if not one-step timestamping 1227 * PTP Sync packet 1228 */ 1229 skb->cb[0] = TX_TSTAMP; 1230 } 1231 1232 /* TX for other packets */ 1233 return __dpaa2_eth_tx(skb, net_dev); 1234 } 1235 1236 /* Tx confirmation frame processing routine */ 1237 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, 1238 struct dpaa2_eth_channel *ch __always_unused, 1239 const struct dpaa2_fd *fd, 1240 struct dpaa2_eth_fq *fq) 1241 { 1242 struct rtnl_link_stats64 *percpu_stats; 1243 struct dpaa2_eth_drv_stats *percpu_extras; 1244 u32 fd_len = dpaa2_fd_get_len(fd); 1245 u32 fd_errors; 1246 1247 /* Tracing point */ 1248 trace_dpaa2_tx_conf_fd(priv->net_dev, fd); 1249 1250 percpu_extras = this_cpu_ptr(priv->percpu_extras); 1251 percpu_extras->tx_conf_frames++; 1252 percpu_extras->tx_conf_bytes += fd_len; 1253 1254 /* Check frame errors in the FD field */ 1255 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; 1256 dpaa2_eth_free_tx_fd(priv, fq, fd, true); 1257 1258 if (likely(!fd_errors)) 1259 return; 1260 1261 if (net_ratelimit()) 1262 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", 1263 fd_errors); 1264 1265 percpu_stats = this_cpu_ptr(priv->percpu_stats); 1266 /* Tx-conf logically pertains to the egress path. */ 1267 percpu_stats->tx_errors++; 1268 } 1269 1270 static int dpaa2_eth_set_rx_vlan_filtering(struct dpaa2_eth_priv *priv, 1271 bool enable) 1272 { 1273 int err; 1274 1275 err = dpni_enable_vlan_filter(priv->mc_io, 0, priv->mc_token, enable); 1276 1277 if (err) { 1278 netdev_err(priv->net_dev, 1279 "dpni_enable_vlan_filter failed\n"); 1280 return err; 1281 } 1282 1283 return 0; 1284 } 1285 1286 static int dpaa2_eth_set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) 1287 { 1288 int err; 1289 1290 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 1291 DPNI_OFF_RX_L3_CSUM, enable); 1292 if (err) { 1293 netdev_err(priv->net_dev, 1294 "dpni_set_offload(RX_L3_CSUM) failed\n"); 1295 return err; 1296 } 1297 1298 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 1299 DPNI_OFF_RX_L4_CSUM, enable); 1300 if (err) { 1301 netdev_err(priv->net_dev, 1302 "dpni_set_offload(RX_L4_CSUM) failed\n"); 1303 return err; 1304 } 1305 1306 return 0; 1307 } 1308 1309 static int dpaa2_eth_set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) 1310 { 1311 int err; 1312 1313 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 1314 DPNI_OFF_TX_L3_CSUM, enable); 1315 if (err) { 1316 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); 1317 return err; 1318 } 1319 1320 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 1321 DPNI_OFF_TX_L4_CSUM, enable); 1322 if (err) { 1323 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); 1324 return err; 1325 } 1326 1327 return 0; 1328 } 1329 1330 /* Perform a single release command to add buffers 1331 * to the specified buffer pool 1332 */ 1333 static int dpaa2_eth_add_bufs(struct dpaa2_eth_priv *priv, 1334 struct dpaa2_eth_channel *ch, u16 bpid) 1335 { 1336 struct device *dev = priv->net_dev->dev.parent; 1337 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 1338 struct page *page; 1339 dma_addr_t addr; 1340 int retries = 0; 1341 int i, err; 1342 1343 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { 1344 /* Allocate buffer visible to WRIOP + skb shared info + 1345 * alignment padding 1346 */ 1347 /* allocate one page for each Rx buffer. WRIOP sees 1348 * the entire page except for a tailroom reserved for 1349 * skb shared info 1350 */ 1351 page = dev_alloc_pages(0); 1352 if (!page) 1353 goto err_alloc; 1354 1355 addr = dma_map_page(dev, page, 0, priv->rx_buf_size, 1356 DMA_BIDIRECTIONAL); 1357 if (unlikely(dma_mapping_error(dev, addr))) 1358 goto err_map; 1359 1360 buf_array[i] = addr; 1361 1362 /* tracing point */ 1363 trace_dpaa2_eth_buf_seed(priv->net_dev, 1364 page, DPAA2_ETH_RX_BUF_RAW_SIZE, 1365 addr, priv->rx_buf_size, 1366 bpid); 1367 } 1368 1369 release_bufs: 1370 /* In case the portal is busy, retry until successful */ 1371 while ((err = dpaa2_io_service_release(ch->dpio, bpid, 1372 buf_array, i)) == -EBUSY) { 1373 if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) 1374 break; 1375 cpu_relax(); 1376 } 1377 1378 /* If release command failed, clean up and bail out; 1379 * not much else we can do about it 1380 */ 1381 if (err) { 1382 dpaa2_eth_free_bufs(priv, buf_array, i); 1383 return 0; 1384 } 1385 1386 return i; 1387 1388 err_map: 1389 __free_pages(page, 0); 1390 err_alloc: 1391 /* If we managed to allocate at least some buffers, 1392 * release them to hardware 1393 */ 1394 if (i) 1395 goto release_bufs; 1396 1397 return 0; 1398 } 1399 1400 static int dpaa2_eth_seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) 1401 { 1402 int i, j; 1403 int new_count; 1404 1405 for (j = 0; j < priv->num_channels; j++) { 1406 for (i = 0; i < DPAA2_ETH_NUM_BUFS; 1407 i += DPAA2_ETH_BUFS_PER_CMD) { 1408 new_count = dpaa2_eth_add_bufs(priv, priv->channel[j], bpid); 1409 priv->channel[j]->buf_count += new_count; 1410 1411 if (new_count < DPAA2_ETH_BUFS_PER_CMD) { 1412 return -ENOMEM; 1413 } 1414 } 1415 } 1416 1417 return 0; 1418 } 1419 1420 /* 1421 * Drain the specified number of buffers from the DPNI's private buffer pool. 1422 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD 1423 */ 1424 static void dpaa2_eth_drain_bufs(struct dpaa2_eth_priv *priv, int count) 1425 { 1426 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 1427 int retries = 0; 1428 int ret; 1429 1430 do { 1431 ret = dpaa2_io_service_acquire(NULL, priv->bpid, 1432 buf_array, count); 1433 if (ret < 0) { 1434 if (ret == -EBUSY && 1435 retries++ < DPAA2_ETH_SWP_BUSY_RETRIES) 1436 continue; 1437 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); 1438 return; 1439 } 1440 dpaa2_eth_free_bufs(priv, buf_array, ret); 1441 retries = 0; 1442 } while (ret); 1443 } 1444 1445 static void dpaa2_eth_drain_pool(struct dpaa2_eth_priv *priv) 1446 { 1447 int i; 1448 1449 dpaa2_eth_drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); 1450 dpaa2_eth_drain_bufs(priv, 1); 1451 1452 for (i = 0; i < priv->num_channels; i++) 1453 priv->channel[i]->buf_count = 0; 1454 } 1455 1456 /* Function is called from softirq context only, so we don't need to guard 1457 * the access to percpu count 1458 */ 1459 static int dpaa2_eth_refill_pool(struct dpaa2_eth_priv *priv, 1460 struct dpaa2_eth_channel *ch, 1461 u16 bpid) 1462 { 1463 int new_count; 1464 1465 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) 1466 return 0; 1467 1468 do { 1469 new_count = dpaa2_eth_add_bufs(priv, ch, bpid); 1470 if (unlikely(!new_count)) { 1471 /* Out of memory; abort for now, we'll try later on */ 1472 break; 1473 } 1474 ch->buf_count += new_count; 1475 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); 1476 1477 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) 1478 return -ENOMEM; 1479 1480 return 0; 1481 } 1482 1483 static void dpaa2_eth_sgt_cache_drain(struct dpaa2_eth_priv *priv) 1484 { 1485 struct dpaa2_eth_sgt_cache *sgt_cache; 1486 u16 count; 1487 int k, i; 1488 1489 for_each_possible_cpu(k) { 1490 sgt_cache = per_cpu_ptr(priv->sgt_cache, k); 1491 count = sgt_cache->count; 1492 1493 for (i = 0; i < count; i++) 1494 kfree(sgt_cache->buf[i]); 1495 sgt_cache->count = 0; 1496 } 1497 } 1498 1499 static int dpaa2_eth_pull_channel(struct dpaa2_eth_channel *ch) 1500 { 1501 int err; 1502 int dequeues = -1; 1503 1504 /* Retry while portal is busy */ 1505 do { 1506 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, 1507 ch->store); 1508 dequeues++; 1509 cpu_relax(); 1510 } while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES); 1511 1512 ch->stats.dequeue_portal_busy += dequeues; 1513 if (unlikely(err)) 1514 ch->stats.pull_err++; 1515 1516 return err; 1517 } 1518 1519 /* NAPI poll routine 1520 * 1521 * Frames are dequeued from the QMan channel associated with this NAPI context. 1522 * Rx, Tx confirmation and (if configured) Rx error frames all count 1523 * towards the NAPI budget. 1524 */ 1525 static int dpaa2_eth_poll(struct napi_struct *napi, int budget) 1526 { 1527 struct dpaa2_eth_channel *ch; 1528 struct dpaa2_eth_priv *priv; 1529 int rx_cleaned = 0, txconf_cleaned = 0; 1530 struct dpaa2_eth_fq *fq, *txc_fq = NULL; 1531 struct netdev_queue *nq; 1532 int store_cleaned, work_done; 1533 struct list_head rx_list; 1534 int retries = 0; 1535 u16 flowid; 1536 int err; 1537 1538 ch = container_of(napi, struct dpaa2_eth_channel, napi); 1539 ch->xdp.res = 0; 1540 priv = ch->priv; 1541 1542 INIT_LIST_HEAD(&rx_list); 1543 ch->rx_list = &rx_list; 1544 1545 do { 1546 err = dpaa2_eth_pull_channel(ch); 1547 if (unlikely(err)) 1548 break; 1549 1550 /* Refill pool if appropriate */ 1551 dpaa2_eth_refill_pool(priv, ch, priv->bpid); 1552 1553 store_cleaned = dpaa2_eth_consume_frames(ch, &fq); 1554 if (store_cleaned <= 0) 1555 break; 1556 if (fq->type == DPAA2_RX_FQ) { 1557 rx_cleaned += store_cleaned; 1558 flowid = fq->flowid; 1559 } else { 1560 txconf_cleaned += store_cleaned; 1561 /* We have a single Tx conf FQ on this channel */ 1562 txc_fq = fq; 1563 } 1564 1565 /* If we either consumed the whole NAPI budget with Rx frames 1566 * or we reached the Tx confirmations threshold, we're done. 1567 */ 1568 if (rx_cleaned >= budget || 1569 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) { 1570 work_done = budget; 1571 goto out; 1572 } 1573 } while (store_cleaned); 1574 1575 /* We didn't consume the entire budget, so finish napi and 1576 * re-enable data availability notifications 1577 */ 1578 napi_complete_done(napi, rx_cleaned); 1579 do { 1580 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); 1581 cpu_relax(); 1582 } while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES); 1583 WARN_ONCE(err, "CDAN notifications rearm failed on core %d", 1584 ch->nctx.desired_cpu); 1585 1586 work_done = max(rx_cleaned, 1); 1587 1588 out: 1589 netif_receive_skb_list(ch->rx_list); 1590 1591 if (txc_fq && txc_fq->dq_frames) { 1592 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); 1593 netdev_tx_completed_queue(nq, txc_fq->dq_frames, 1594 txc_fq->dq_bytes); 1595 txc_fq->dq_frames = 0; 1596 txc_fq->dq_bytes = 0; 1597 } 1598 1599 if (ch->xdp.res & XDP_REDIRECT) 1600 xdp_do_flush_map(); 1601 else if (rx_cleaned && ch->xdp.res & XDP_TX) 1602 dpaa2_eth_xdp_tx_flush(priv, ch, &priv->fq[flowid]); 1603 1604 return work_done; 1605 } 1606 1607 static void dpaa2_eth_enable_ch_napi(struct dpaa2_eth_priv *priv) 1608 { 1609 struct dpaa2_eth_channel *ch; 1610 int i; 1611 1612 for (i = 0; i < priv->num_channels; i++) { 1613 ch = priv->channel[i]; 1614 napi_enable(&ch->napi); 1615 } 1616 } 1617 1618 static void dpaa2_eth_disable_ch_napi(struct dpaa2_eth_priv *priv) 1619 { 1620 struct dpaa2_eth_channel *ch; 1621 int i; 1622 1623 for (i = 0; i < priv->num_channels; i++) { 1624 ch = priv->channel[i]; 1625 napi_disable(&ch->napi); 1626 } 1627 } 1628 1629 void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, 1630 bool tx_pause, bool pfc) 1631 { 1632 struct dpni_taildrop td = {0}; 1633 struct dpaa2_eth_fq *fq; 1634 int i, err; 1635 1636 /* FQ taildrop: threshold is in bytes, per frame queue. Enabled if 1637 * flow control is disabled (as it might interfere with either the 1638 * buffer pool depletion trigger for pause frames or with the group 1639 * congestion trigger for PFC frames) 1640 */ 1641 td.enable = !tx_pause; 1642 if (priv->rx_fqtd_enabled == td.enable) 1643 goto set_cgtd; 1644 1645 td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH; 1646 td.units = DPNI_CONGESTION_UNIT_BYTES; 1647 1648 for (i = 0; i < priv->num_fqs; i++) { 1649 fq = &priv->fq[i]; 1650 if (fq->type != DPAA2_RX_FQ) 1651 continue; 1652 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, 1653 DPNI_CP_QUEUE, DPNI_QUEUE_RX, 1654 fq->tc, fq->flowid, &td); 1655 if (err) { 1656 netdev_err(priv->net_dev, 1657 "dpni_set_taildrop(FQ) failed\n"); 1658 return; 1659 } 1660 } 1661 1662 priv->rx_fqtd_enabled = td.enable; 1663 1664 set_cgtd: 1665 /* Congestion group taildrop: threshold is in frames, per group 1666 * of FQs belonging to the same traffic class 1667 * Enabled if general Tx pause disabled or if PFCs are enabled 1668 * (congestion group threhsold for PFC generation is lower than the 1669 * CG taildrop threshold, so it won't interfere with it; we also 1670 * want frames in non-PFC enabled traffic classes to be kept in check) 1671 */ 1672 td.enable = !tx_pause || pfc; 1673 if (priv->rx_cgtd_enabled == td.enable) 1674 return; 1675 1676 td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv); 1677 td.units = DPNI_CONGESTION_UNIT_FRAMES; 1678 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 1679 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, 1680 DPNI_CP_GROUP, DPNI_QUEUE_RX, 1681 i, 0, &td); 1682 if (err) { 1683 netdev_err(priv->net_dev, 1684 "dpni_set_taildrop(CG) failed\n"); 1685 return; 1686 } 1687 } 1688 1689 priv->rx_cgtd_enabled = td.enable; 1690 } 1691 1692 static int dpaa2_eth_link_state_update(struct dpaa2_eth_priv *priv) 1693 { 1694 struct dpni_link_state state = {0}; 1695 bool tx_pause; 1696 int err; 1697 1698 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); 1699 if (unlikely(err)) { 1700 netdev_err(priv->net_dev, 1701 "dpni_get_link_state() failed\n"); 1702 return err; 1703 } 1704 1705 /* If Tx pause frame settings have changed, we need to update 1706 * Rx FQ taildrop configuration as well. We configure taildrop 1707 * only when pause frame generation is disabled. 1708 */ 1709 tx_pause = dpaa2_eth_tx_pause_enabled(state.options); 1710 dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled); 1711 1712 /* When we manage the MAC/PHY using phylink there is no need 1713 * to manually update the netif_carrier. 1714 */ 1715 if (dpaa2_eth_is_type_phy(priv)) 1716 goto out; 1717 1718 /* Chech link state; speed / duplex changes are not treated yet */ 1719 if (priv->link_state.up == state.up) 1720 goto out; 1721 1722 if (state.up) { 1723 netif_carrier_on(priv->net_dev); 1724 netif_tx_start_all_queues(priv->net_dev); 1725 } else { 1726 netif_tx_stop_all_queues(priv->net_dev); 1727 netif_carrier_off(priv->net_dev); 1728 } 1729 1730 netdev_info(priv->net_dev, "Link Event: state %s\n", 1731 state.up ? "up" : "down"); 1732 1733 out: 1734 priv->link_state = state; 1735 1736 return 0; 1737 } 1738 1739 static int dpaa2_eth_open(struct net_device *net_dev) 1740 { 1741 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1742 int err; 1743 1744 err = dpaa2_eth_seed_pool(priv, priv->bpid); 1745 if (err) { 1746 /* Not much to do; the buffer pool, though not filled up, 1747 * may still contain some buffers which would enable us 1748 * to limp on. 1749 */ 1750 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", 1751 priv->dpbp_dev->obj_desc.id, priv->bpid); 1752 } 1753 1754 if (!dpaa2_eth_is_type_phy(priv)) { 1755 /* We'll only start the txqs when the link is actually ready; 1756 * make sure we don't race against the link up notification, 1757 * which may come immediately after dpni_enable(); 1758 */ 1759 netif_tx_stop_all_queues(net_dev); 1760 1761 /* Also, explicitly set carrier off, otherwise 1762 * netif_carrier_ok() will return true and cause 'ip link show' 1763 * to report the LOWER_UP flag, even though the link 1764 * notification wasn't even received. 1765 */ 1766 netif_carrier_off(net_dev); 1767 } 1768 dpaa2_eth_enable_ch_napi(priv); 1769 1770 err = dpni_enable(priv->mc_io, 0, priv->mc_token); 1771 if (err < 0) { 1772 netdev_err(net_dev, "dpni_enable() failed\n"); 1773 goto enable_err; 1774 } 1775 1776 if (dpaa2_eth_is_type_phy(priv)) 1777 phylink_start(priv->mac->phylink); 1778 1779 return 0; 1780 1781 enable_err: 1782 dpaa2_eth_disable_ch_napi(priv); 1783 dpaa2_eth_drain_pool(priv); 1784 return err; 1785 } 1786 1787 /* Total number of in-flight frames on ingress queues */ 1788 static u32 dpaa2_eth_ingress_fq_count(struct dpaa2_eth_priv *priv) 1789 { 1790 struct dpaa2_eth_fq *fq; 1791 u32 fcnt = 0, bcnt = 0, total = 0; 1792 int i, err; 1793 1794 for (i = 0; i < priv->num_fqs; i++) { 1795 fq = &priv->fq[i]; 1796 err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); 1797 if (err) { 1798 netdev_warn(priv->net_dev, "query_fq_count failed"); 1799 break; 1800 } 1801 total += fcnt; 1802 } 1803 1804 return total; 1805 } 1806 1807 static void dpaa2_eth_wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv) 1808 { 1809 int retries = 10; 1810 u32 pending; 1811 1812 do { 1813 pending = dpaa2_eth_ingress_fq_count(priv); 1814 if (pending) 1815 msleep(100); 1816 } while (pending && --retries); 1817 } 1818 1819 #define DPNI_TX_PENDING_VER_MAJOR 7 1820 #define DPNI_TX_PENDING_VER_MINOR 13 1821 static void dpaa2_eth_wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv) 1822 { 1823 union dpni_statistics stats; 1824 int retries = 10; 1825 int err; 1826 1827 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR, 1828 DPNI_TX_PENDING_VER_MINOR) < 0) 1829 goto out; 1830 1831 do { 1832 err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6, 1833 &stats); 1834 if (err) 1835 goto out; 1836 if (stats.page_6.tx_pending_frames == 0) 1837 return; 1838 } while (--retries); 1839 1840 out: 1841 msleep(500); 1842 } 1843 1844 static int dpaa2_eth_stop(struct net_device *net_dev) 1845 { 1846 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1847 int dpni_enabled = 0; 1848 int retries = 10; 1849 1850 if (dpaa2_eth_is_type_phy(priv)) { 1851 phylink_stop(priv->mac->phylink); 1852 } else { 1853 netif_tx_stop_all_queues(net_dev); 1854 netif_carrier_off(net_dev); 1855 } 1856 1857 /* On dpni_disable(), the MC firmware will: 1858 * - stop MAC Rx and wait for all Rx frames to be enqueued to software 1859 * - cut off WRIOP dequeues from egress FQs and wait until transmission 1860 * of all in flight Tx frames is finished (and corresponding Tx conf 1861 * frames are enqueued back to software) 1862 * 1863 * Before calling dpni_disable(), we wait for all Tx frames to arrive 1864 * on WRIOP. After it finishes, wait until all remaining frames on Rx 1865 * and Tx conf queues are consumed on NAPI poll. 1866 */ 1867 dpaa2_eth_wait_for_egress_fq_empty(priv); 1868 1869 do { 1870 dpni_disable(priv->mc_io, 0, priv->mc_token); 1871 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); 1872 if (dpni_enabled) 1873 /* Allow the hardware some slack */ 1874 msleep(100); 1875 } while (dpni_enabled && --retries); 1876 if (!retries) { 1877 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); 1878 /* Must go on and disable NAPI nonetheless, so we don't crash at 1879 * the next "ifconfig up" 1880 */ 1881 } 1882 1883 dpaa2_eth_wait_for_ingress_fq_empty(priv); 1884 dpaa2_eth_disable_ch_napi(priv); 1885 1886 /* Empty the buffer pool */ 1887 dpaa2_eth_drain_pool(priv); 1888 1889 /* Empty the Scatter-Gather Buffer cache */ 1890 dpaa2_eth_sgt_cache_drain(priv); 1891 1892 return 0; 1893 } 1894 1895 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) 1896 { 1897 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1898 struct device *dev = net_dev->dev.parent; 1899 int err; 1900 1901 err = eth_mac_addr(net_dev, addr); 1902 if (err < 0) { 1903 dev_err(dev, "eth_mac_addr() failed (%d)\n", err); 1904 return err; 1905 } 1906 1907 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 1908 net_dev->dev_addr); 1909 if (err) { 1910 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); 1911 return err; 1912 } 1913 1914 return 0; 1915 } 1916 1917 /** Fill in counters maintained by the GPP driver. These may be different from 1918 * the hardware counters obtained by ethtool. 1919 */ 1920 static void dpaa2_eth_get_stats(struct net_device *net_dev, 1921 struct rtnl_link_stats64 *stats) 1922 { 1923 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1924 struct rtnl_link_stats64 *percpu_stats; 1925 u64 *cpustats; 1926 u64 *netstats = (u64 *)stats; 1927 int i, j; 1928 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); 1929 1930 for_each_possible_cpu(i) { 1931 percpu_stats = per_cpu_ptr(priv->percpu_stats, i); 1932 cpustats = (u64 *)percpu_stats; 1933 for (j = 0; j < num; j++) 1934 netstats[j] += cpustats[j]; 1935 } 1936 } 1937 1938 /* Copy mac unicast addresses from @net_dev to @priv. 1939 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 1940 */ 1941 static void dpaa2_eth_add_uc_hw_addr(const struct net_device *net_dev, 1942 struct dpaa2_eth_priv *priv) 1943 { 1944 struct netdev_hw_addr *ha; 1945 int err; 1946 1947 netdev_for_each_uc_addr(ha, net_dev) { 1948 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 1949 ha->addr); 1950 if (err) 1951 netdev_warn(priv->net_dev, 1952 "Could not add ucast MAC %pM to the filtering table (err %d)\n", 1953 ha->addr, err); 1954 } 1955 } 1956 1957 /* Copy mac multicast addresses from @net_dev to @priv 1958 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 1959 */ 1960 static void dpaa2_eth_add_mc_hw_addr(const struct net_device *net_dev, 1961 struct dpaa2_eth_priv *priv) 1962 { 1963 struct netdev_hw_addr *ha; 1964 int err; 1965 1966 netdev_for_each_mc_addr(ha, net_dev) { 1967 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 1968 ha->addr); 1969 if (err) 1970 netdev_warn(priv->net_dev, 1971 "Could not add mcast MAC %pM to the filtering table (err %d)\n", 1972 ha->addr, err); 1973 } 1974 } 1975 1976 static int dpaa2_eth_rx_add_vid(struct net_device *net_dev, 1977 __be16 vlan_proto, u16 vid) 1978 { 1979 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1980 int err; 1981 1982 err = dpni_add_vlan_id(priv->mc_io, 0, priv->mc_token, 1983 vid, 0, 0, 0); 1984 1985 if (err) { 1986 netdev_warn(priv->net_dev, 1987 "Could not add the vlan id %u\n", 1988 vid); 1989 return err; 1990 } 1991 1992 return 0; 1993 } 1994 1995 static int dpaa2_eth_rx_kill_vid(struct net_device *net_dev, 1996 __be16 vlan_proto, u16 vid) 1997 { 1998 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1999 int err; 2000 2001 err = dpni_remove_vlan_id(priv->mc_io, 0, priv->mc_token, vid); 2002 2003 if (err) { 2004 netdev_warn(priv->net_dev, 2005 "Could not remove the vlan id %u\n", 2006 vid); 2007 return err; 2008 } 2009 2010 return 0; 2011 } 2012 2013 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) 2014 { 2015 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2016 int uc_count = netdev_uc_count(net_dev); 2017 int mc_count = netdev_mc_count(net_dev); 2018 u8 max_mac = priv->dpni_attrs.mac_filter_entries; 2019 u32 options = priv->dpni_attrs.options; 2020 u16 mc_token = priv->mc_token; 2021 struct fsl_mc_io *mc_io = priv->mc_io; 2022 int err; 2023 2024 /* Basic sanity checks; these probably indicate a misconfiguration */ 2025 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) 2026 netdev_info(net_dev, 2027 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", 2028 max_mac); 2029 2030 /* Force promiscuous if the uc or mc counts exceed our capabilities. */ 2031 if (uc_count > max_mac) { 2032 netdev_info(net_dev, 2033 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", 2034 uc_count, max_mac); 2035 goto force_promisc; 2036 } 2037 if (mc_count + uc_count > max_mac) { 2038 netdev_info(net_dev, 2039 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", 2040 uc_count + mc_count, max_mac); 2041 goto force_mc_promisc; 2042 } 2043 2044 /* Adjust promisc settings due to flag combinations */ 2045 if (net_dev->flags & IFF_PROMISC) 2046 goto force_promisc; 2047 if (net_dev->flags & IFF_ALLMULTI) { 2048 /* First, rebuild unicast filtering table. This should be done 2049 * in promisc mode, in order to avoid frame loss while we 2050 * progressively add entries to the table. 2051 * We don't know whether we had been in promisc already, and 2052 * making an MC call to find out is expensive; so set uc promisc 2053 * nonetheless. 2054 */ 2055 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 2056 if (err) 2057 netdev_warn(net_dev, "Can't set uc promisc\n"); 2058 2059 /* Actual uc table reconstruction. */ 2060 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); 2061 if (err) 2062 netdev_warn(net_dev, "Can't clear uc filters\n"); 2063 dpaa2_eth_add_uc_hw_addr(net_dev, priv); 2064 2065 /* Finally, clear uc promisc and set mc promisc as requested. */ 2066 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 2067 if (err) 2068 netdev_warn(net_dev, "Can't clear uc promisc\n"); 2069 goto force_mc_promisc; 2070 } 2071 2072 /* Neither unicast, nor multicast promisc will be on... eventually. 2073 * For now, rebuild mac filtering tables while forcing both of them on. 2074 */ 2075 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 2076 if (err) 2077 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); 2078 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 2079 if (err) 2080 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); 2081 2082 /* Actual mac filtering tables reconstruction */ 2083 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); 2084 if (err) 2085 netdev_warn(net_dev, "Can't clear mac filters\n"); 2086 dpaa2_eth_add_mc_hw_addr(net_dev, priv); 2087 dpaa2_eth_add_uc_hw_addr(net_dev, priv); 2088 2089 /* Now we can clear both ucast and mcast promisc, without risking 2090 * to drop legitimate frames anymore. 2091 */ 2092 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 2093 if (err) 2094 netdev_warn(net_dev, "Can't clear ucast promisc\n"); 2095 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); 2096 if (err) 2097 netdev_warn(net_dev, "Can't clear mcast promisc\n"); 2098 2099 return; 2100 2101 force_promisc: 2102 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 2103 if (err) 2104 netdev_warn(net_dev, "Can't set ucast promisc\n"); 2105 force_mc_promisc: 2106 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 2107 if (err) 2108 netdev_warn(net_dev, "Can't set mcast promisc\n"); 2109 } 2110 2111 static int dpaa2_eth_set_features(struct net_device *net_dev, 2112 netdev_features_t features) 2113 { 2114 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2115 netdev_features_t changed = features ^ net_dev->features; 2116 bool enable; 2117 int err; 2118 2119 if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { 2120 enable = !!(features & NETIF_F_HW_VLAN_CTAG_FILTER); 2121 err = dpaa2_eth_set_rx_vlan_filtering(priv, enable); 2122 if (err) 2123 return err; 2124 } 2125 2126 if (changed & NETIF_F_RXCSUM) { 2127 enable = !!(features & NETIF_F_RXCSUM); 2128 err = dpaa2_eth_set_rx_csum(priv, enable); 2129 if (err) 2130 return err; 2131 } 2132 2133 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 2134 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 2135 err = dpaa2_eth_set_tx_csum(priv, enable); 2136 if (err) 2137 return err; 2138 } 2139 2140 return 0; 2141 } 2142 2143 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2144 { 2145 struct dpaa2_eth_priv *priv = netdev_priv(dev); 2146 struct hwtstamp_config config; 2147 2148 if (!dpaa2_ptp) 2149 return -EINVAL; 2150 2151 if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 2152 return -EFAULT; 2153 2154 switch (config.tx_type) { 2155 case HWTSTAMP_TX_OFF: 2156 case HWTSTAMP_TX_ON: 2157 case HWTSTAMP_TX_ONESTEP_SYNC: 2158 priv->tx_tstamp_type = config.tx_type; 2159 break; 2160 default: 2161 return -ERANGE; 2162 } 2163 2164 if (config.rx_filter == HWTSTAMP_FILTER_NONE) { 2165 priv->rx_tstamp = false; 2166 } else { 2167 priv->rx_tstamp = true; 2168 /* TS is set for all frame types, not only those requested */ 2169 config.rx_filter = HWTSTAMP_FILTER_ALL; 2170 } 2171 2172 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? 2173 -EFAULT : 0; 2174 } 2175 2176 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 2177 { 2178 struct dpaa2_eth_priv *priv = netdev_priv(dev); 2179 2180 if (cmd == SIOCSHWTSTAMP) 2181 return dpaa2_eth_ts_ioctl(dev, rq, cmd); 2182 2183 if (dpaa2_eth_is_type_phy(priv)) 2184 return phylink_mii_ioctl(priv->mac->phylink, rq, cmd); 2185 2186 return -EOPNOTSUPP; 2187 } 2188 2189 static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) 2190 { 2191 int mfl, linear_mfl; 2192 2193 mfl = DPAA2_ETH_L2_MAX_FRM(mtu); 2194 linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE - 2195 dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; 2196 2197 if (mfl > linear_mfl) { 2198 netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n", 2199 linear_mfl - VLAN_ETH_HLEN); 2200 return false; 2201 } 2202 2203 return true; 2204 } 2205 2206 static int dpaa2_eth_set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) 2207 { 2208 int mfl, err; 2209 2210 /* We enforce a maximum Rx frame length based on MTU only if we have 2211 * an XDP program attached (in order to avoid Rx S/G frames). 2212 * Otherwise, we accept all incoming frames as long as they are not 2213 * larger than maximum size supported in hardware 2214 */ 2215 if (has_xdp) 2216 mfl = DPAA2_ETH_L2_MAX_FRM(mtu); 2217 else 2218 mfl = DPAA2_ETH_MFL; 2219 2220 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl); 2221 if (err) { 2222 netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n"); 2223 return err; 2224 } 2225 2226 return 0; 2227 } 2228 2229 static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu) 2230 { 2231 struct dpaa2_eth_priv *priv = netdev_priv(dev); 2232 int err; 2233 2234 if (!priv->xdp_prog) 2235 goto out; 2236 2237 if (!xdp_mtu_valid(priv, new_mtu)) 2238 return -EINVAL; 2239 2240 err = dpaa2_eth_set_rx_mfl(priv, new_mtu, true); 2241 if (err) 2242 return err; 2243 2244 out: 2245 dev->mtu = new_mtu; 2246 return 0; 2247 } 2248 2249 static int dpaa2_eth_update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) 2250 { 2251 struct dpni_buffer_layout buf_layout = {0}; 2252 int err; 2253 2254 err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token, 2255 DPNI_QUEUE_RX, &buf_layout); 2256 if (err) { 2257 netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n"); 2258 return err; 2259 } 2260 2261 /* Reserve extra headroom for XDP header size changes */ 2262 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) + 2263 (has_xdp ? XDP_PACKET_HEADROOM : 0); 2264 buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; 2265 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 2266 DPNI_QUEUE_RX, &buf_layout); 2267 if (err) { 2268 netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n"); 2269 return err; 2270 } 2271 2272 return 0; 2273 } 2274 2275 static int dpaa2_eth_setup_xdp(struct net_device *dev, struct bpf_prog *prog) 2276 { 2277 struct dpaa2_eth_priv *priv = netdev_priv(dev); 2278 struct dpaa2_eth_channel *ch; 2279 struct bpf_prog *old; 2280 bool up, need_update; 2281 int i, err; 2282 2283 if (prog && !xdp_mtu_valid(priv, dev->mtu)) 2284 return -EINVAL; 2285 2286 if (prog) 2287 bpf_prog_add(prog, priv->num_channels); 2288 2289 up = netif_running(dev); 2290 need_update = (!!priv->xdp_prog != !!prog); 2291 2292 if (up) 2293 dpaa2_eth_stop(dev); 2294 2295 /* While in xdp mode, enforce a maximum Rx frame size based on MTU. 2296 * Also, when switching between xdp/non-xdp modes we need to reconfigure 2297 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop, 2298 * so we are sure no old format buffers will be used from now on. 2299 */ 2300 if (need_update) { 2301 err = dpaa2_eth_set_rx_mfl(priv, dev->mtu, !!prog); 2302 if (err) 2303 goto out_err; 2304 err = dpaa2_eth_update_rx_buffer_headroom(priv, !!prog); 2305 if (err) 2306 goto out_err; 2307 } 2308 2309 old = xchg(&priv->xdp_prog, prog); 2310 if (old) 2311 bpf_prog_put(old); 2312 2313 for (i = 0; i < priv->num_channels; i++) { 2314 ch = priv->channel[i]; 2315 old = xchg(&ch->xdp.prog, prog); 2316 if (old) 2317 bpf_prog_put(old); 2318 } 2319 2320 if (up) { 2321 err = dpaa2_eth_open(dev); 2322 if (err) 2323 return err; 2324 } 2325 2326 return 0; 2327 2328 out_err: 2329 if (prog) 2330 bpf_prog_sub(prog, priv->num_channels); 2331 if (up) 2332 dpaa2_eth_open(dev); 2333 2334 return err; 2335 } 2336 2337 static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) 2338 { 2339 switch (xdp->command) { 2340 case XDP_SETUP_PROG: 2341 return dpaa2_eth_setup_xdp(dev, xdp->prog); 2342 default: 2343 return -EINVAL; 2344 } 2345 2346 return 0; 2347 } 2348 2349 static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev, 2350 struct xdp_frame *xdpf, 2351 struct dpaa2_fd *fd) 2352 { 2353 struct device *dev = net_dev->dev.parent; 2354 unsigned int needed_headroom; 2355 struct dpaa2_eth_swa *swa; 2356 void *buffer_start, *aligned_start; 2357 dma_addr_t addr; 2358 2359 /* We require a minimum headroom to be able to transmit the frame. 2360 * Otherwise return an error and let the original net_device handle it 2361 */ 2362 needed_headroom = dpaa2_eth_needed_headroom(NULL); 2363 if (xdpf->headroom < needed_headroom) 2364 return -EINVAL; 2365 2366 /* Setup the FD fields */ 2367 memset(fd, 0, sizeof(*fd)); 2368 2369 /* Align FD address, if possible */ 2370 buffer_start = xdpf->data - needed_headroom; 2371 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 2372 DPAA2_ETH_TX_BUF_ALIGN); 2373 if (aligned_start >= xdpf->data - xdpf->headroom) 2374 buffer_start = aligned_start; 2375 2376 swa = (struct dpaa2_eth_swa *)buffer_start; 2377 /* fill in necessary fields here */ 2378 swa->type = DPAA2_ETH_SWA_XDP; 2379 swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start; 2380 swa->xdp.xdpf = xdpf; 2381 2382 addr = dma_map_single(dev, buffer_start, 2383 swa->xdp.dma_size, 2384 DMA_BIDIRECTIONAL); 2385 if (unlikely(dma_mapping_error(dev, addr))) 2386 return -ENOMEM; 2387 2388 dpaa2_fd_set_addr(fd, addr); 2389 dpaa2_fd_set_offset(fd, xdpf->data - buffer_start); 2390 dpaa2_fd_set_len(fd, xdpf->len); 2391 dpaa2_fd_set_format(fd, dpaa2_fd_single); 2392 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 2393 2394 return 0; 2395 } 2396 2397 static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n, 2398 struct xdp_frame **frames, u32 flags) 2399 { 2400 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2401 struct dpaa2_eth_xdp_fds *xdp_redirect_fds; 2402 struct rtnl_link_stats64 *percpu_stats; 2403 struct dpaa2_eth_fq *fq; 2404 struct dpaa2_fd *fds; 2405 int enqueued, i, err; 2406 2407 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 2408 return -EINVAL; 2409 2410 if (!netif_running(net_dev)) 2411 return -ENETDOWN; 2412 2413 fq = &priv->fq[smp_processor_id()]; 2414 xdp_redirect_fds = &fq->xdp_redirect_fds; 2415 fds = xdp_redirect_fds->fds; 2416 2417 percpu_stats = this_cpu_ptr(priv->percpu_stats); 2418 2419 /* create a FD for each xdp_frame in the list received */ 2420 for (i = 0; i < n; i++) { 2421 err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]); 2422 if (err) 2423 break; 2424 } 2425 xdp_redirect_fds->num = i; 2426 2427 /* enqueue all the frame descriptors */ 2428 enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds); 2429 2430 /* update statistics */ 2431 percpu_stats->tx_packets += enqueued; 2432 for (i = 0; i < enqueued; i++) 2433 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]); 2434 for (i = enqueued; i < n; i++) 2435 xdp_return_frame_rx_napi(frames[i]); 2436 2437 return enqueued; 2438 } 2439 2440 static int update_xps(struct dpaa2_eth_priv *priv) 2441 { 2442 struct net_device *net_dev = priv->net_dev; 2443 struct cpumask xps_mask; 2444 struct dpaa2_eth_fq *fq; 2445 int i, num_queues, netdev_queues; 2446 int err = 0; 2447 2448 num_queues = dpaa2_eth_queue_count(priv); 2449 netdev_queues = (net_dev->num_tc ? : 1) * num_queues; 2450 2451 /* The first <num_queues> entries in priv->fq array are Tx/Tx conf 2452 * queues, so only process those 2453 */ 2454 for (i = 0; i < netdev_queues; i++) { 2455 fq = &priv->fq[i % num_queues]; 2456 2457 cpumask_clear(&xps_mask); 2458 cpumask_set_cpu(fq->target_cpu, &xps_mask); 2459 2460 err = netif_set_xps_queue(net_dev, &xps_mask, i); 2461 if (err) { 2462 netdev_warn_once(net_dev, "Error setting XPS queue\n"); 2463 break; 2464 } 2465 } 2466 2467 return err; 2468 } 2469 2470 static int dpaa2_eth_setup_mqprio(struct net_device *net_dev, 2471 struct tc_mqprio_qopt *mqprio) 2472 { 2473 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2474 u8 num_tc, num_queues; 2475 int i; 2476 2477 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; 2478 num_queues = dpaa2_eth_queue_count(priv); 2479 num_tc = mqprio->num_tc; 2480 2481 if (num_tc == net_dev->num_tc) 2482 return 0; 2483 2484 if (num_tc > dpaa2_eth_tc_count(priv)) { 2485 netdev_err(net_dev, "Max %d traffic classes supported\n", 2486 dpaa2_eth_tc_count(priv)); 2487 return -EOPNOTSUPP; 2488 } 2489 2490 if (!num_tc) { 2491 netdev_reset_tc(net_dev); 2492 netif_set_real_num_tx_queues(net_dev, num_queues); 2493 goto out; 2494 } 2495 2496 netdev_set_num_tc(net_dev, num_tc); 2497 netif_set_real_num_tx_queues(net_dev, num_tc * num_queues); 2498 2499 for (i = 0; i < num_tc; i++) 2500 netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues); 2501 2502 out: 2503 update_xps(priv); 2504 2505 return 0; 2506 } 2507 2508 #define bps_to_mbits(rate) (div_u64((rate), 1000000) * 8) 2509 2510 static int dpaa2_eth_setup_tbf(struct net_device *net_dev, struct tc_tbf_qopt_offload *p) 2511 { 2512 struct tc_tbf_qopt_offload_replace_params *cfg = &p->replace_params; 2513 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2514 struct dpni_tx_shaping_cfg tx_cr_shaper = { 0 }; 2515 struct dpni_tx_shaping_cfg tx_er_shaper = { 0 }; 2516 int err; 2517 2518 if (p->command == TC_TBF_STATS) 2519 return -EOPNOTSUPP; 2520 2521 /* Only per port Tx shaping */ 2522 if (p->parent != TC_H_ROOT) 2523 return -EOPNOTSUPP; 2524 2525 if (p->command == TC_TBF_REPLACE) { 2526 if (cfg->max_size > DPAA2_ETH_MAX_BURST_SIZE) { 2527 netdev_err(net_dev, "burst size cannot be greater than %d\n", 2528 DPAA2_ETH_MAX_BURST_SIZE); 2529 return -EINVAL; 2530 } 2531 2532 tx_cr_shaper.max_burst_size = cfg->max_size; 2533 /* The TBF interface is in bytes/s, whereas DPAA2 expects the 2534 * rate in Mbits/s 2535 */ 2536 tx_cr_shaper.rate_limit = bps_to_mbits(cfg->rate.rate_bytes_ps); 2537 } 2538 2539 err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &tx_cr_shaper, 2540 &tx_er_shaper, 0); 2541 if (err) { 2542 netdev_err(net_dev, "dpni_set_tx_shaping() = %d\n", err); 2543 return err; 2544 } 2545 2546 return 0; 2547 } 2548 2549 static int dpaa2_eth_setup_tc(struct net_device *net_dev, 2550 enum tc_setup_type type, void *type_data) 2551 { 2552 switch (type) { 2553 case TC_SETUP_QDISC_MQPRIO: 2554 return dpaa2_eth_setup_mqprio(net_dev, type_data); 2555 case TC_SETUP_QDISC_TBF: 2556 return dpaa2_eth_setup_tbf(net_dev, type_data); 2557 default: 2558 return -EOPNOTSUPP; 2559 } 2560 } 2561 2562 static const struct net_device_ops dpaa2_eth_ops = { 2563 .ndo_open = dpaa2_eth_open, 2564 .ndo_start_xmit = dpaa2_eth_tx, 2565 .ndo_stop = dpaa2_eth_stop, 2566 .ndo_set_mac_address = dpaa2_eth_set_addr, 2567 .ndo_get_stats64 = dpaa2_eth_get_stats, 2568 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, 2569 .ndo_set_features = dpaa2_eth_set_features, 2570 .ndo_do_ioctl = dpaa2_eth_ioctl, 2571 .ndo_change_mtu = dpaa2_eth_change_mtu, 2572 .ndo_bpf = dpaa2_eth_xdp, 2573 .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, 2574 .ndo_setup_tc = dpaa2_eth_setup_tc, 2575 .ndo_vlan_rx_add_vid = dpaa2_eth_rx_add_vid, 2576 .ndo_vlan_rx_kill_vid = dpaa2_eth_rx_kill_vid 2577 }; 2578 2579 static void dpaa2_eth_cdan_cb(struct dpaa2_io_notification_ctx *ctx) 2580 { 2581 struct dpaa2_eth_channel *ch; 2582 2583 ch = container_of(ctx, struct dpaa2_eth_channel, nctx); 2584 2585 /* Update NAPI statistics */ 2586 ch->stats.cdan++; 2587 2588 napi_schedule(&ch->napi); 2589 } 2590 2591 /* Allocate and configure a DPCON object */ 2592 static struct fsl_mc_device *dpaa2_eth_setup_dpcon(struct dpaa2_eth_priv *priv) 2593 { 2594 struct fsl_mc_device *dpcon; 2595 struct device *dev = priv->net_dev->dev.parent; 2596 int err; 2597 2598 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), 2599 FSL_MC_POOL_DPCON, &dpcon); 2600 if (err) { 2601 if (err == -ENXIO) 2602 err = -EPROBE_DEFER; 2603 else 2604 dev_info(dev, "Not enough DPCONs, will go on as-is\n"); 2605 return ERR_PTR(err); 2606 } 2607 2608 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); 2609 if (err) { 2610 dev_err(dev, "dpcon_open() failed\n"); 2611 goto free; 2612 } 2613 2614 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); 2615 if (err) { 2616 dev_err(dev, "dpcon_reset() failed\n"); 2617 goto close; 2618 } 2619 2620 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); 2621 if (err) { 2622 dev_err(dev, "dpcon_enable() failed\n"); 2623 goto close; 2624 } 2625 2626 return dpcon; 2627 2628 close: 2629 dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 2630 free: 2631 fsl_mc_object_free(dpcon); 2632 2633 return ERR_PTR(err); 2634 } 2635 2636 static void dpaa2_eth_free_dpcon(struct dpaa2_eth_priv *priv, 2637 struct fsl_mc_device *dpcon) 2638 { 2639 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); 2640 dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 2641 fsl_mc_object_free(dpcon); 2642 } 2643 2644 static struct dpaa2_eth_channel *dpaa2_eth_alloc_channel(struct dpaa2_eth_priv *priv) 2645 { 2646 struct dpaa2_eth_channel *channel; 2647 struct dpcon_attr attr; 2648 struct device *dev = priv->net_dev->dev.parent; 2649 int err; 2650 2651 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 2652 if (!channel) 2653 return NULL; 2654 2655 channel->dpcon = dpaa2_eth_setup_dpcon(priv); 2656 if (IS_ERR(channel->dpcon)) { 2657 err = PTR_ERR(channel->dpcon); 2658 goto err_setup; 2659 } 2660 2661 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, 2662 &attr); 2663 if (err) { 2664 dev_err(dev, "dpcon_get_attributes() failed\n"); 2665 goto err_get_attr; 2666 } 2667 2668 channel->dpcon_id = attr.id; 2669 channel->ch_id = attr.qbman_ch_id; 2670 channel->priv = priv; 2671 2672 return channel; 2673 2674 err_get_attr: 2675 dpaa2_eth_free_dpcon(priv, channel->dpcon); 2676 err_setup: 2677 kfree(channel); 2678 return ERR_PTR(err); 2679 } 2680 2681 static void dpaa2_eth_free_channel(struct dpaa2_eth_priv *priv, 2682 struct dpaa2_eth_channel *channel) 2683 { 2684 dpaa2_eth_free_dpcon(priv, channel->dpcon); 2685 kfree(channel); 2686 } 2687 2688 /* DPIO setup: allocate and configure QBMan channels, setup core affinity 2689 * and register data availability notifications 2690 */ 2691 static int dpaa2_eth_setup_dpio(struct dpaa2_eth_priv *priv) 2692 { 2693 struct dpaa2_io_notification_ctx *nctx; 2694 struct dpaa2_eth_channel *channel; 2695 struct dpcon_notification_cfg dpcon_notif_cfg; 2696 struct device *dev = priv->net_dev->dev.parent; 2697 int i, err; 2698 2699 /* We want the ability to spread ingress traffic (RX, TX conf) to as 2700 * many cores as possible, so we need one channel for each core 2701 * (unless there's fewer queues than cores, in which case the extra 2702 * channels would be wasted). 2703 * Allocate one channel per core and register it to the core's 2704 * affine DPIO. If not enough channels are available for all cores 2705 * or if some cores don't have an affine DPIO, there will be no 2706 * ingress frame processing on those cores. 2707 */ 2708 cpumask_clear(&priv->dpio_cpumask); 2709 for_each_online_cpu(i) { 2710 /* Try to allocate a channel */ 2711 channel = dpaa2_eth_alloc_channel(priv); 2712 if (IS_ERR_OR_NULL(channel)) { 2713 err = PTR_ERR_OR_ZERO(channel); 2714 if (err != -EPROBE_DEFER) 2715 dev_info(dev, 2716 "No affine channel for cpu %d and above\n", i); 2717 goto err_alloc_ch; 2718 } 2719 2720 priv->channel[priv->num_channels] = channel; 2721 2722 nctx = &channel->nctx; 2723 nctx->is_cdan = 1; 2724 nctx->cb = dpaa2_eth_cdan_cb; 2725 nctx->id = channel->ch_id; 2726 nctx->desired_cpu = i; 2727 2728 /* Register the new context */ 2729 channel->dpio = dpaa2_io_service_select(i); 2730 err = dpaa2_io_service_register(channel->dpio, nctx, dev); 2731 if (err) { 2732 dev_dbg(dev, "No affine DPIO for cpu %d\n", i); 2733 /* If no affine DPIO for this core, there's probably 2734 * none available for next cores either. Signal we want 2735 * to retry later, in case the DPIO devices weren't 2736 * probed yet. 2737 */ 2738 err = -EPROBE_DEFER; 2739 goto err_service_reg; 2740 } 2741 2742 /* Register DPCON notification with MC */ 2743 dpcon_notif_cfg.dpio_id = nctx->dpio_id; 2744 dpcon_notif_cfg.priority = 0; 2745 dpcon_notif_cfg.user_ctx = nctx->qman64; 2746 err = dpcon_set_notification(priv->mc_io, 0, 2747 channel->dpcon->mc_handle, 2748 &dpcon_notif_cfg); 2749 if (err) { 2750 dev_err(dev, "dpcon_set_notification failed()\n"); 2751 goto err_set_cdan; 2752 } 2753 2754 /* If we managed to allocate a channel and also found an affine 2755 * DPIO for this core, add it to the final mask 2756 */ 2757 cpumask_set_cpu(i, &priv->dpio_cpumask); 2758 priv->num_channels++; 2759 2760 /* Stop if we already have enough channels to accommodate all 2761 * RX and TX conf queues 2762 */ 2763 if (priv->num_channels == priv->dpni_attrs.num_queues) 2764 break; 2765 } 2766 2767 return 0; 2768 2769 err_set_cdan: 2770 dpaa2_io_service_deregister(channel->dpio, nctx, dev); 2771 err_service_reg: 2772 dpaa2_eth_free_channel(priv, channel); 2773 err_alloc_ch: 2774 if (err == -EPROBE_DEFER) { 2775 for (i = 0; i < priv->num_channels; i++) { 2776 channel = priv->channel[i]; 2777 nctx = &channel->nctx; 2778 dpaa2_io_service_deregister(channel->dpio, nctx, dev); 2779 dpaa2_eth_free_channel(priv, channel); 2780 } 2781 priv->num_channels = 0; 2782 return err; 2783 } 2784 2785 if (cpumask_empty(&priv->dpio_cpumask)) { 2786 dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); 2787 return -ENODEV; 2788 } 2789 2790 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", 2791 cpumask_pr_args(&priv->dpio_cpumask)); 2792 2793 return 0; 2794 } 2795 2796 static void dpaa2_eth_free_dpio(struct dpaa2_eth_priv *priv) 2797 { 2798 struct device *dev = priv->net_dev->dev.parent; 2799 struct dpaa2_eth_channel *ch; 2800 int i; 2801 2802 /* deregister CDAN notifications and free channels */ 2803 for (i = 0; i < priv->num_channels; i++) { 2804 ch = priv->channel[i]; 2805 dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev); 2806 dpaa2_eth_free_channel(priv, ch); 2807 } 2808 } 2809 2810 static struct dpaa2_eth_channel *dpaa2_eth_get_affine_channel(struct dpaa2_eth_priv *priv, 2811 int cpu) 2812 { 2813 struct device *dev = priv->net_dev->dev.parent; 2814 int i; 2815 2816 for (i = 0; i < priv->num_channels; i++) 2817 if (priv->channel[i]->nctx.desired_cpu == cpu) 2818 return priv->channel[i]; 2819 2820 /* We should never get here. Issue a warning and return 2821 * the first channel, because it's still better than nothing 2822 */ 2823 dev_warn(dev, "No affine channel found for cpu %d\n", cpu); 2824 2825 return priv->channel[0]; 2826 } 2827 2828 static void dpaa2_eth_set_fq_affinity(struct dpaa2_eth_priv *priv) 2829 { 2830 struct device *dev = priv->net_dev->dev.parent; 2831 struct dpaa2_eth_fq *fq; 2832 int rx_cpu, txc_cpu; 2833 int i; 2834 2835 /* For each FQ, pick one channel/CPU to deliver frames to. 2836 * This may well change at runtime, either through irqbalance or 2837 * through direct user intervention. 2838 */ 2839 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); 2840 2841 for (i = 0; i < priv->num_fqs; i++) { 2842 fq = &priv->fq[i]; 2843 switch (fq->type) { 2844 case DPAA2_RX_FQ: 2845 case DPAA2_RX_ERR_FQ: 2846 fq->target_cpu = rx_cpu; 2847 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); 2848 if (rx_cpu >= nr_cpu_ids) 2849 rx_cpu = cpumask_first(&priv->dpio_cpumask); 2850 break; 2851 case DPAA2_TX_CONF_FQ: 2852 fq->target_cpu = txc_cpu; 2853 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); 2854 if (txc_cpu >= nr_cpu_ids) 2855 txc_cpu = cpumask_first(&priv->dpio_cpumask); 2856 break; 2857 default: 2858 dev_err(dev, "Unknown FQ type: %d\n", fq->type); 2859 } 2860 fq->channel = dpaa2_eth_get_affine_channel(priv, fq->target_cpu); 2861 } 2862 2863 update_xps(priv); 2864 } 2865 2866 static void dpaa2_eth_setup_fqs(struct dpaa2_eth_priv *priv) 2867 { 2868 int i, j; 2869 2870 /* We have one TxConf FQ per Tx flow. 2871 * The number of Tx and Rx queues is the same. 2872 * Tx queues come first in the fq array. 2873 */ 2874 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 2875 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; 2876 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; 2877 priv->fq[priv->num_fqs++].flowid = (u16)i; 2878 } 2879 2880 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { 2881 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 2882 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; 2883 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; 2884 priv->fq[priv->num_fqs].tc = (u8)j; 2885 priv->fq[priv->num_fqs++].flowid = (u16)i; 2886 } 2887 } 2888 2889 /* We have exactly one Rx error queue per DPNI */ 2890 priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; 2891 priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; 2892 2893 /* For each FQ, decide on which core to process incoming frames */ 2894 dpaa2_eth_set_fq_affinity(priv); 2895 } 2896 2897 /* Allocate and configure one buffer pool for each interface */ 2898 static int dpaa2_eth_setup_dpbp(struct dpaa2_eth_priv *priv) 2899 { 2900 int err; 2901 struct fsl_mc_device *dpbp_dev; 2902 struct device *dev = priv->net_dev->dev.parent; 2903 struct dpbp_attr dpbp_attrs; 2904 2905 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 2906 &dpbp_dev); 2907 if (err) { 2908 if (err == -ENXIO) 2909 err = -EPROBE_DEFER; 2910 else 2911 dev_err(dev, "DPBP device allocation failed\n"); 2912 return err; 2913 } 2914 2915 priv->dpbp_dev = dpbp_dev; 2916 2917 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, 2918 &dpbp_dev->mc_handle); 2919 if (err) { 2920 dev_err(dev, "dpbp_open() failed\n"); 2921 goto err_open; 2922 } 2923 2924 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); 2925 if (err) { 2926 dev_err(dev, "dpbp_reset() failed\n"); 2927 goto err_reset; 2928 } 2929 2930 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); 2931 if (err) { 2932 dev_err(dev, "dpbp_enable() failed\n"); 2933 goto err_enable; 2934 } 2935 2936 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, 2937 &dpbp_attrs); 2938 if (err) { 2939 dev_err(dev, "dpbp_get_attributes() failed\n"); 2940 goto err_get_attr; 2941 } 2942 priv->bpid = dpbp_attrs.bpid; 2943 2944 return 0; 2945 2946 err_get_attr: 2947 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); 2948 err_enable: 2949 err_reset: 2950 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); 2951 err_open: 2952 fsl_mc_object_free(dpbp_dev); 2953 2954 return err; 2955 } 2956 2957 static void dpaa2_eth_free_dpbp(struct dpaa2_eth_priv *priv) 2958 { 2959 dpaa2_eth_drain_pool(priv); 2960 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 2961 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 2962 fsl_mc_object_free(priv->dpbp_dev); 2963 } 2964 2965 static int dpaa2_eth_set_buffer_layout(struct dpaa2_eth_priv *priv) 2966 { 2967 struct device *dev = priv->net_dev->dev.parent; 2968 struct dpni_buffer_layout buf_layout = {0}; 2969 u16 rx_buf_align; 2970 int err; 2971 2972 /* We need to check for WRIOP version 1.0.0, but depending on the MC 2973 * version, this number is not always provided correctly on rev1. 2974 * We need to check for both alternatives in this situation. 2975 */ 2976 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || 2977 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) 2978 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; 2979 else 2980 rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; 2981 2982 /* We need to ensure that the buffer size seen by WRIOP is a multiple 2983 * of 64 or 256 bytes depending on the WRIOP version. 2984 */ 2985 priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align); 2986 2987 /* tx buffer */ 2988 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; 2989 buf_layout.pass_timestamp = true; 2990 buf_layout.pass_frame_status = true; 2991 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | 2992 DPNI_BUF_LAYOUT_OPT_TIMESTAMP | 2993 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 2994 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 2995 DPNI_QUEUE_TX, &buf_layout); 2996 if (err) { 2997 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); 2998 return err; 2999 } 3000 3001 /* tx-confirm buffer */ 3002 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP | 3003 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS; 3004 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 3005 DPNI_QUEUE_TX_CONFIRM, &buf_layout); 3006 if (err) { 3007 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); 3008 return err; 3009 } 3010 3011 /* Now that we've set our tx buffer layout, retrieve the minimum 3012 * required tx data offset. 3013 */ 3014 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, 3015 &priv->tx_data_offset); 3016 if (err) { 3017 dev_err(dev, "dpni_get_tx_data_offset() failed\n"); 3018 return err; 3019 } 3020 3021 if ((priv->tx_data_offset % 64) != 0) 3022 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", 3023 priv->tx_data_offset); 3024 3025 /* rx buffer */ 3026 buf_layout.pass_frame_status = true; 3027 buf_layout.pass_parser_result = true; 3028 buf_layout.data_align = rx_buf_align; 3029 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); 3030 buf_layout.private_data_size = 0; 3031 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | 3032 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 3033 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | 3034 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | 3035 DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 3036 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 3037 DPNI_QUEUE_RX, &buf_layout); 3038 if (err) { 3039 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); 3040 return err; 3041 } 3042 3043 return 0; 3044 } 3045 3046 #define DPNI_ENQUEUE_FQID_VER_MAJOR 7 3047 #define DPNI_ENQUEUE_FQID_VER_MINOR 9 3048 3049 static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv, 3050 struct dpaa2_eth_fq *fq, 3051 struct dpaa2_fd *fd, u8 prio, 3052 u32 num_frames __always_unused, 3053 int *frames_enqueued) 3054 { 3055 int err; 3056 3057 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, 3058 priv->tx_qdid, prio, 3059 fq->tx_qdbin, fd); 3060 if (!err && frames_enqueued) 3061 *frames_enqueued = 1; 3062 return err; 3063 } 3064 3065 static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv, 3066 struct dpaa2_eth_fq *fq, 3067 struct dpaa2_fd *fd, 3068 u8 prio, u32 num_frames, 3069 int *frames_enqueued) 3070 { 3071 int err; 3072 3073 err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio, 3074 fq->tx_fqid[prio], 3075 fd, num_frames); 3076 3077 if (err == 0) 3078 return -EBUSY; 3079 3080 if (frames_enqueued) 3081 *frames_enqueued = err; 3082 return 0; 3083 } 3084 3085 static void dpaa2_eth_set_enqueue_mode(struct dpaa2_eth_priv *priv) 3086 { 3087 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, 3088 DPNI_ENQUEUE_FQID_VER_MINOR) < 0) 3089 priv->enqueue = dpaa2_eth_enqueue_qd; 3090 else 3091 priv->enqueue = dpaa2_eth_enqueue_fq_multiple; 3092 } 3093 3094 static int dpaa2_eth_set_pause(struct dpaa2_eth_priv *priv) 3095 { 3096 struct device *dev = priv->net_dev->dev.parent; 3097 struct dpni_link_cfg link_cfg = {0}; 3098 int err; 3099 3100 /* Get the default link options so we don't override other flags */ 3101 err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); 3102 if (err) { 3103 dev_err(dev, "dpni_get_link_cfg() failed\n"); 3104 return err; 3105 } 3106 3107 /* By default, enable both Rx and Tx pause frames */ 3108 link_cfg.options |= DPNI_LINK_OPT_PAUSE; 3109 link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE; 3110 err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg); 3111 if (err) { 3112 dev_err(dev, "dpni_set_link_cfg() failed\n"); 3113 return err; 3114 } 3115 3116 priv->link_state.options = link_cfg.options; 3117 3118 return 0; 3119 } 3120 3121 static void dpaa2_eth_update_tx_fqids(struct dpaa2_eth_priv *priv) 3122 { 3123 struct dpni_queue_id qid = {0}; 3124 struct dpaa2_eth_fq *fq; 3125 struct dpni_queue queue; 3126 int i, j, err; 3127 3128 /* We only use Tx FQIDs for FQID-based enqueue, so check 3129 * if DPNI version supports it before updating FQIDs 3130 */ 3131 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR, 3132 DPNI_ENQUEUE_FQID_VER_MINOR) < 0) 3133 return; 3134 3135 for (i = 0; i < priv->num_fqs; i++) { 3136 fq = &priv->fq[i]; 3137 if (fq->type != DPAA2_TX_CONF_FQ) 3138 continue; 3139 for (j = 0; j < dpaa2_eth_tc_count(priv); j++) { 3140 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 3141 DPNI_QUEUE_TX, j, fq->flowid, 3142 &queue, &qid); 3143 if (err) 3144 goto out_err; 3145 3146 fq->tx_fqid[j] = qid.fqid; 3147 if (fq->tx_fqid[j] == 0) 3148 goto out_err; 3149 } 3150 } 3151 3152 priv->enqueue = dpaa2_eth_enqueue_fq_multiple; 3153 3154 return; 3155 3156 out_err: 3157 netdev_info(priv->net_dev, 3158 "Error reading Tx FQID, fallback to QDID-based enqueue\n"); 3159 priv->enqueue = dpaa2_eth_enqueue_qd; 3160 } 3161 3162 /* Configure ingress classification based on VLAN PCP */ 3163 static int dpaa2_eth_set_vlan_qos(struct dpaa2_eth_priv *priv) 3164 { 3165 struct device *dev = priv->net_dev->dev.parent; 3166 struct dpkg_profile_cfg kg_cfg = {0}; 3167 struct dpni_qos_tbl_cfg qos_cfg = {0}; 3168 struct dpni_rule_cfg key_params; 3169 void *dma_mem, *key, *mask; 3170 u8 key_size = 2; /* VLAN TCI field */ 3171 int i, pcp, err; 3172 3173 /* VLAN-based classification only makes sense if we have multiple 3174 * traffic classes. 3175 * Also, we need to extract just the 3-bit PCP field from the VLAN 3176 * header and we can only do that by using a mask 3177 */ 3178 if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) { 3179 dev_dbg(dev, "VLAN-based QoS classification not supported\n"); 3180 return -EOPNOTSUPP; 3181 } 3182 3183 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 3184 if (!dma_mem) 3185 return -ENOMEM; 3186 3187 kg_cfg.num_extracts = 1; 3188 kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR; 3189 kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN; 3190 kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD; 3191 kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI; 3192 3193 err = dpni_prepare_key_cfg(&kg_cfg, dma_mem); 3194 if (err) { 3195 dev_err(dev, "dpni_prepare_key_cfg failed\n"); 3196 goto out_free_tbl; 3197 } 3198 3199 /* set QoS table */ 3200 qos_cfg.default_tc = 0; 3201 qos_cfg.discard_on_miss = 0; 3202 qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, 3203 DPAA2_CLASSIFIER_DMA_SIZE, 3204 DMA_TO_DEVICE); 3205 if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) { 3206 dev_err(dev, "QoS table DMA mapping failed\n"); 3207 err = -ENOMEM; 3208 goto out_free_tbl; 3209 } 3210 3211 err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg); 3212 if (err) { 3213 dev_err(dev, "dpni_set_qos_table failed\n"); 3214 goto out_unmap_tbl; 3215 } 3216 3217 /* Add QoS table entries */ 3218 key = kzalloc(key_size * 2, GFP_KERNEL); 3219 if (!key) { 3220 err = -ENOMEM; 3221 goto out_unmap_tbl; 3222 } 3223 mask = key + key_size; 3224 *(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK); 3225 3226 key_params.key_iova = dma_map_single(dev, key, key_size * 2, 3227 DMA_TO_DEVICE); 3228 if (dma_mapping_error(dev, key_params.key_iova)) { 3229 dev_err(dev, "Qos table entry DMA mapping failed\n"); 3230 err = -ENOMEM; 3231 goto out_free_key; 3232 } 3233 3234 key_params.mask_iova = key_params.key_iova + key_size; 3235 key_params.key_size = key_size; 3236 3237 /* We add rules for PCP-based distribution starting with highest 3238 * priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic 3239 * classes to accommodate all priority levels, the lowest ones end up 3240 * on TC 0 which was configured as default 3241 */ 3242 for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) { 3243 *(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT); 3244 dma_sync_single_for_device(dev, key_params.key_iova, 3245 key_size * 2, DMA_TO_DEVICE); 3246 3247 err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token, 3248 &key_params, i, i); 3249 if (err) { 3250 dev_err(dev, "dpni_add_qos_entry failed\n"); 3251 dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token); 3252 goto out_unmap_key; 3253 } 3254 } 3255 3256 priv->vlan_cls_enabled = true; 3257 3258 /* Table and key memory is not persistent, clean everything up after 3259 * configuration is finished 3260 */ 3261 out_unmap_key: 3262 dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE); 3263 out_free_key: 3264 kfree(key); 3265 out_unmap_tbl: 3266 dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE, 3267 DMA_TO_DEVICE); 3268 out_free_tbl: 3269 kfree(dma_mem); 3270 3271 return err; 3272 } 3273 3274 /* Configure the DPNI object this interface is associated with */ 3275 static int dpaa2_eth_setup_dpni(struct fsl_mc_device *ls_dev) 3276 { 3277 struct device *dev = &ls_dev->dev; 3278 struct dpaa2_eth_priv *priv; 3279 struct net_device *net_dev; 3280 int err; 3281 3282 net_dev = dev_get_drvdata(dev); 3283 priv = netdev_priv(net_dev); 3284 3285 /* get a handle for the DPNI object */ 3286 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); 3287 if (err) { 3288 dev_err(dev, "dpni_open() failed\n"); 3289 return err; 3290 } 3291 3292 /* Check if we can work with this DPNI object */ 3293 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, 3294 &priv->dpni_ver_minor); 3295 if (err) { 3296 dev_err(dev, "dpni_get_api_version() failed\n"); 3297 goto close; 3298 } 3299 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { 3300 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", 3301 priv->dpni_ver_major, priv->dpni_ver_minor, 3302 DPNI_VER_MAJOR, DPNI_VER_MINOR); 3303 err = -ENOTSUPP; 3304 goto close; 3305 } 3306 3307 ls_dev->mc_io = priv->mc_io; 3308 ls_dev->mc_handle = priv->mc_token; 3309 3310 err = dpni_reset(priv->mc_io, 0, priv->mc_token); 3311 if (err) { 3312 dev_err(dev, "dpni_reset() failed\n"); 3313 goto close; 3314 } 3315 3316 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, 3317 &priv->dpni_attrs); 3318 if (err) { 3319 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); 3320 goto close; 3321 } 3322 3323 err = dpaa2_eth_set_buffer_layout(priv); 3324 if (err) 3325 goto close; 3326 3327 dpaa2_eth_set_enqueue_mode(priv); 3328 3329 /* Enable pause frame support */ 3330 if (dpaa2_eth_has_pause_support(priv)) { 3331 err = dpaa2_eth_set_pause(priv); 3332 if (err) 3333 goto close; 3334 } 3335 3336 err = dpaa2_eth_set_vlan_qos(priv); 3337 if (err && err != -EOPNOTSUPP) 3338 goto close; 3339 3340 priv->cls_rules = devm_kcalloc(dev, dpaa2_eth_fs_count(priv), 3341 sizeof(struct dpaa2_eth_cls_rule), 3342 GFP_KERNEL); 3343 if (!priv->cls_rules) { 3344 err = -ENOMEM; 3345 goto close; 3346 } 3347 3348 return 0; 3349 3350 close: 3351 dpni_close(priv->mc_io, 0, priv->mc_token); 3352 3353 return err; 3354 } 3355 3356 static void dpaa2_eth_free_dpni(struct dpaa2_eth_priv *priv) 3357 { 3358 int err; 3359 3360 err = dpni_reset(priv->mc_io, 0, priv->mc_token); 3361 if (err) 3362 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", 3363 err); 3364 3365 dpni_close(priv->mc_io, 0, priv->mc_token); 3366 } 3367 3368 static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv, 3369 struct dpaa2_eth_fq *fq) 3370 { 3371 struct device *dev = priv->net_dev->dev.parent; 3372 struct dpni_queue queue; 3373 struct dpni_queue_id qid; 3374 int err; 3375 3376 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 3377 DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid); 3378 if (err) { 3379 dev_err(dev, "dpni_get_queue(RX) failed\n"); 3380 return err; 3381 } 3382 3383 fq->fqid = qid.fqid; 3384 3385 queue.destination.id = fq->channel->dpcon_id; 3386 queue.destination.type = DPNI_DEST_DPCON; 3387 queue.destination.priority = 1; 3388 queue.user_context = (u64)(uintptr_t)fq; 3389 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 3390 DPNI_QUEUE_RX, fq->tc, fq->flowid, 3391 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 3392 &queue); 3393 if (err) { 3394 dev_err(dev, "dpni_set_queue(RX) failed\n"); 3395 return err; 3396 } 3397 3398 /* xdp_rxq setup */ 3399 /* only once for each channel */ 3400 if (fq->tc > 0) 3401 return 0; 3402 3403 err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev, 3404 fq->flowid, 0); 3405 if (err) { 3406 dev_err(dev, "xdp_rxq_info_reg failed\n"); 3407 return err; 3408 } 3409 3410 err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq, 3411 MEM_TYPE_PAGE_ORDER0, NULL); 3412 if (err) { 3413 dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); 3414 return err; 3415 } 3416 3417 return 0; 3418 } 3419 3420 static int dpaa2_eth_setup_tx_flow(struct dpaa2_eth_priv *priv, 3421 struct dpaa2_eth_fq *fq) 3422 { 3423 struct device *dev = priv->net_dev->dev.parent; 3424 struct dpni_queue queue; 3425 struct dpni_queue_id qid; 3426 int i, err; 3427 3428 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 3429 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 3430 DPNI_QUEUE_TX, i, fq->flowid, 3431 &queue, &qid); 3432 if (err) { 3433 dev_err(dev, "dpni_get_queue(TX) failed\n"); 3434 return err; 3435 } 3436 fq->tx_fqid[i] = qid.fqid; 3437 } 3438 3439 /* All Tx queues belonging to the same flowid have the same qdbin */ 3440 fq->tx_qdbin = qid.qdbin; 3441 3442 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 3443 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 3444 &queue, &qid); 3445 if (err) { 3446 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); 3447 return err; 3448 } 3449 3450 fq->fqid = qid.fqid; 3451 3452 queue.destination.id = fq->channel->dpcon_id; 3453 queue.destination.type = DPNI_DEST_DPCON; 3454 queue.destination.priority = 0; 3455 queue.user_context = (u64)(uintptr_t)fq; 3456 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 3457 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 3458 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 3459 &queue); 3460 if (err) { 3461 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); 3462 return err; 3463 } 3464 3465 return 0; 3466 } 3467 3468 static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, 3469 struct dpaa2_eth_fq *fq) 3470 { 3471 struct device *dev = priv->net_dev->dev.parent; 3472 struct dpni_queue q = { { 0 } }; 3473 struct dpni_queue_id qid; 3474 u8 q_opt = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; 3475 int err; 3476 3477 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 3478 DPNI_QUEUE_RX_ERR, 0, 0, &q, &qid); 3479 if (err) { 3480 dev_err(dev, "dpni_get_queue() failed (%d)\n", err); 3481 return err; 3482 } 3483 3484 fq->fqid = qid.fqid; 3485 3486 q.destination.id = fq->channel->dpcon_id; 3487 q.destination.type = DPNI_DEST_DPCON; 3488 q.destination.priority = 1; 3489 q.user_context = (u64)(uintptr_t)fq; 3490 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 3491 DPNI_QUEUE_RX_ERR, 0, 0, q_opt, &q); 3492 if (err) { 3493 dev_err(dev, "dpni_set_queue() failed (%d)\n", err); 3494 return err; 3495 } 3496 3497 return 0; 3498 } 3499 3500 /* Supported header fields for Rx hash distribution key */ 3501 static const struct dpaa2_eth_dist_fields dist_fields[] = { 3502 { 3503 /* L2 header */ 3504 .rxnfc_field = RXH_L2DA, 3505 .cls_prot = NET_PROT_ETH, 3506 .cls_field = NH_FLD_ETH_DA, 3507 .id = DPAA2_ETH_DIST_ETHDST, 3508 .size = 6, 3509 }, { 3510 .cls_prot = NET_PROT_ETH, 3511 .cls_field = NH_FLD_ETH_SA, 3512 .id = DPAA2_ETH_DIST_ETHSRC, 3513 .size = 6, 3514 }, { 3515 /* This is the last ethertype field parsed: 3516 * depending on frame format, it can be the MAC ethertype 3517 * or the VLAN etype. 3518 */ 3519 .cls_prot = NET_PROT_ETH, 3520 .cls_field = NH_FLD_ETH_TYPE, 3521 .id = DPAA2_ETH_DIST_ETHTYPE, 3522 .size = 2, 3523 }, { 3524 /* VLAN header */ 3525 .rxnfc_field = RXH_VLAN, 3526 .cls_prot = NET_PROT_VLAN, 3527 .cls_field = NH_FLD_VLAN_TCI, 3528 .id = DPAA2_ETH_DIST_VLAN, 3529 .size = 2, 3530 }, { 3531 /* IP header */ 3532 .rxnfc_field = RXH_IP_SRC, 3533 .cls_prot = NET_PROT_IP, 3534 .cls_field = NH_FLD_IP_SRC, 3535 .id = DPAA2_ETH_DIST_IPSRC, 3536 .size = 4, 3537 }, { 3538 .rxnfc_field = RXH_IP_DST, 3539 .cls_prot = NET_PROT_IP, 3540 .cls_field = NH_FLD_IP_DST, 3541 .id = DPAA2_ETH_DIST_IPDST, 3542 .size = 4, 3543 }, { 3544 .rxnfc_field = RXH_L3_PROTO, 3545 .cls_prot = NET_PROT_IP, 3546 .cls_field = NH_FLD_IP_PROTO, 3547 .id = DPAA2_ETH_DIST_IPPROTO, 3548 .size = 1, 3549 }, { 3550 /* Using UDP ports, this is functionally equivalent to raw 3551 * byte pairs from L4 header. 3552 */ 3553 .rxnfc_field = RXH_L4_B_0_1, 3554 .cls_prot = NET_PROT_UDP, 3555 .cls_field = NH_FLD_UDP_PORT_SRC, 3556 .id = DPAA2_ETH_DIST_L4SRC, 3557 .size = 2, 3558 }, { 3559 .rxnfc_field = RXH_L4_B_2_3, 3560 .cls_prot = NET_PROT_UDP, 3561 .cls_field = NH_FLD_UDP_PORT_DST, 3562 .id = DPAA2_ETH_DIST_L4DST, 3563 .size = 2, 3564 }, 3565 }; 3566 3567 /* Configure the Rx hash key using the legacy API */ 3568 static int dpaa2_eth_config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 3569 { 3570 struct device *dev = priv->net_dev->dev.parent; 3571 struct dpni_rx_tc_dist_cfg dist_cfg; 3572 int i, err = 0; 3573 3574 memset(&dist_cfg, 0, sizeof(dist_cfg)); 3575 3576 dist_cfg.key_cfg_iova = key; 3577 dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 3578 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; 3579 3580 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 3581 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 3582 i, &dist_cfg); 3583 if (err) { 3584 dev_err(dev, "dpni_set_rx_tc_dist failed\n"); 3585 break; 3586 } 3587 } 3588 3589 return err; 3590 } 3591 3592 /* Configure the Rx hash key using the new API */ 3593 static int dpaa2_eth_config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 3594 { 3595 struct device *dev = priv->net_dev->dev.parent; 3596 struct dpni_rx_dist_cfg dist_cfg; 3597 int i, err = 0; 3598 3599 memset(&dist_cfg, 0, sizeof(dist_cfg)); 3600 3601 dist_cfg.key_cfg_iova = key; 3602 dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 3603 dist_cfg.enable = 1; 3604 3605 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 3606 dist_cfg.tc = i; 3607 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, 3608 &dist_cfg); 3609 if (err) { 3610 dev_err(dev, "dpni_set_rx_hash_dist failed\n"); 3611 break; 3612 } 3613 3614 /* If the flow steering / hashing key is shared between all 3615 * traffic classes, install it just once 3616 */ 3617 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) 3618 break; 3619 } 3620 3621 return err; 3622 } 3623 3624 /* Configure the Rx flow classification key */ 3625 static int dpaa2_eth_config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 3626 { 3627 struct device *dev = priv->net_dev->dev.parent; 3628 struct dpni_rx_dist_cfg dist_cfg; 3629 int i, err = 0; 3630 3631 memset(&dist_cfg, 0, sizeof(dist_cfg)); 3632 3633 dist_cfg.key_cfg_iova = key; 3634 dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 3635 dist_cfg.enable = 1; 3636 3637 for (i = 0; i < dpaa2_eth_tc_count(priv); i++) { 3638 dist_cfg.tc = i; 3639 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, 3640 &dist_cfg); 3641 if (err) { 3642 dev_err(dev, "dpni_set_rx_fs_dist failed\n"); 3643 break; 3644 } 3645 3646 /* If the flow steering / hashing key is shared between all 3647 * traffic classes, install it just once 3648 */ 3649 if (priv->dpni_attrs.options & DPNI_OPT_SHARED_FS) 3650 break; 3651 } 3652 3653 return err; 3654 } 3655 3656 /* Size of the Rx flow classification key */ 3657 int dpaa2_eth_cls_key_size(u64 fields) 3658 { 3659 int i, size = 0; 3660 3661 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 3662 if (!(fields & dist_fields[i].id)) 3663 continue; 3664 size += dist_fields[i].size; 3665 } 3666 3667 return size; 3668 } 3669 3670 /* Offset of header field in Rx classification key */ 3671 int dpaa2_eth_cls_fld_off(int prot, int field) 3672 { 3673 int i, off = 0; 3674 3675 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 3676 if (dist_fields[i].cls_prot == prot && 3677 dist_fields[i].cls_field == field) 3678 return off; 3679 off += dist_fields[i].size; 3680 } 3681 3682 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); 3683 return 0; 3684 } 3685 3686 /* Prune unused fields from the classification rule. 3687 * Used when masking is not supported 3688 */ 3689 void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields) 3690 { 3691 int off = 0, new_off = 0; 3692 int i, size; 3693 3694 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 3695 size = dist_fields[i].size; 3696 if (dist_fields[i].id & fields) { 3697 memcpy(key_mem + new_off, key_mem + off, size); 3698 new_off += size; 3699 } 3700 off += size; 3701 } 3702 } 3703 3704 /* Set Rx distribution (hash or flow classification) key 3705 * flags is a combination of RXH_ bits 3706 */ 3707 static int dpaa2_eth_set_dist_key(struct net_device *net_dev, 3708 enum dpaa2_eth_rx_dist type, u64 flags) 3709 { 3710 struct device *dev = net_dev->dev.parent; 3711 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 3712 struct dpkg_profile_cfg cls_cfg; 3713 u32 rx_hash_fields = 0; 3714 dma_addr_t key_iova; 3715 u8 *dma_mem; 3716 int i; 3717 int err = 0; 3718 3719 memset(&cls_cfg, 0, sizeof(cls_cfg)); 3720 3721 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 3722 struct dpkg_extract *key = 3723 &cls_cfg.extracts[cls_cfg.num_extracts]; 3724 3725 /* For both Rx hashing and classification keys 3726 * we set only the selected fields. 3727 */ 3728 if (!(flags & dist_fields[i].id)) 3729 continue; 3730 if (type == DPAA2_ETH_RX_DIST_HASH) 3731 rx_hash_fields |= dist_fields[i].rxnfc_field; 3732 3733 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 3734 dev_err(dev, "error adding key extraction rule, too many rules?\n"); 3735 return -E2BIG; 3736 } 3737 3738 key->type = DPKG_EXTRACT_FROM_HDR; 3739 key->extract.from_hdr.prot = dist_fields[i].cls_prot; 3740 key->extract.from_hdr.type = DPKG_FULL_FIELD; 3741 key->extract.from_hdr.field = dist_fields[i].cls_field; 3742 cls_cfg.num_extracts++; 3743 } 3744 3745 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 3746 if (!dma_mem) 3747 return -ENOMEM; 3748 3749 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); 3750 if (err) { 3751 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); 3752 goto free_key; 3753 } 3754 3755 /* Prepare for setting the rx dist */ 3756 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, 3757 DMA_TO_DEVICE); 3758 if (dma_mapping_error(dev, key_iova)) { 3759 dev_err(dev, "DMA mapping failed\n"); 3760 err = -ENOMEM; 3761 goto free_key; 3762 } 3763 3764 if (type == DPAA2_ETH_RX_DIST_HASH) { 3765 if (dpaa2_eth_has_legacy_dist(priv)) 3766 err = dpaa2_eth_config_legacy_hash_key(priv, key_iova); 3767 else 3768 err = dpaa2_eth_config_hash_key(priv, key_iova); 3769 } else { 3770 err = dpaa2_eth_config_cls_key(priv, key_iova); 3771 } 3772 3773 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, 3774 DMA_TO_DEVICE); 3775 if (!err && type == DPAA2_ETH_RX_DIST_HASH) 3776 priv->rx_hash_fields = rx_hash_fields; 3777 3778 free_key: 3779 kfree(dma_mem); 3780 return err; 3781 } 3782 3783 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) 3784 { 3785 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 3786 u64 key = 0; 3787 int i; 3788 3789 if (!dpaa2_eth_hash_enabled(priv)) 3790 return -EOPNOTSUPP; 3791 3792 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) 3793 if (dist_fields[i].rxnfc_field & flags) 3794 key |= dist_fields[i].id; 3795 3796 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key); 3797 } 3798 3799 int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags) 3800 { 3801 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags); 3802 } 3803 3804 static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv) 3805 { 3806 struct device *dev = priv->net_dev->dev.parent; 3807 int err; 3808 3809 /* Check if we actually support Rx flow classification */ 3810 if (dpaa2_eth_has_legacy_dist(priv)) { 3811 dev_dbg(dev, "Rx cls not supported by current MC version\n"); 3812 return -EOPNOTSUPP; 3813 } 3814 3815 if (!dpaa2_eth_fs_enabled(priv)) { 3816 dev_dbg(dev, "Rx cls disabled in DPNI options\n"); 3817 return -EOPNOTSUPP; 3818 } 3819 3820 if (!dpaa2_eth_hash_enabled(priv)) { 3821 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); 3822 return -EOPNOTSUPP; 3823 } 3824 3825 /* If there is no support for masking in the classification table, 3826 * we don't set a default key, as it will depend on the rules 3827 * added by the user at runtime. 3828 */ 3829 if (!dpaa2_eth_fs_mask_enabled(priv)) 3830 goto out; 3831 3832 err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL); 3833 if (err) 3834 return err; 3835 3836 out: 3837 priv->rx_cls_enabled = 1; 3838 3839 return 0; 3840 } 3841 3842 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, 3843 * frame queues and channels 3844 */ 3845 static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) 3846 { 3847 struct net_device *net_dev = priv->net_dev; 3848 struct device *dev = net_dev->dev.parent; 3849 struct dpni_pools_cfg pools_params; 3850 struct dpni_error_cfg err_cfg; 3851 int err = 0; 3852 int i; 3853 3854 pools_params.num_dpbp = 1; 3855 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; 3856 pools_params.pools[0].backup_pool = 0; 3857 pools_params.pools[0].buffer_size = priv->rx_buf_size; 3858 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); 3859 if (err) { 3860 dev_err(dev, "dpni_set_pools() failed\n"); 3861 return err; 3862 } 3863 3864 /* have the interface implicitly distribute traffic based on 3865 * the default hash key 3866 */ 3867 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); 3868 if (err && err != -EOPNOTSUPP) 3869 dev_err(dev, "Failed to configure hashing\n"); 3870 3871 /* Configure the flow classification key; it includes all 3872 * supported header fields and cannot be modified at runtime 3873 */ 3874 err = dpaa2_eth_set_default_cls(priv); 3875 if (err && err != -EOPNOTSUPP) 3876 dev_err(dev, "Failed to configure Rx classification key\n"); 3877 3878 /* Configure handling of error frames */ 3879 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; 3880 err_cfg.set_frame_annotation = 1; 3881 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; 3882 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, 3883 &err_cfg); 3884 if (err) { 3885 dev_err(dev, "dpni_set_errors_behavior failed\n"); 3886 return err; 3887 } 3888 3889 /* Configure Rx and Tx conf queues to generate CDANs */ 3890 for (i = 0; i < priv->num_fqs; i++) { 3891 switch (priv->fq[i].type) { 3892 case DPAA2_RX_FQ: 3893 err = dpaa2_eth_setup_rx_flow(priv, &priv->fq[i]); 3894 break; 3895 case DPAA2_TX_CONF_FQ: 3896 err = dpaa2_eth_setup_tx_flow(priv, &priv->fq[i]); 3897 break; 3898 case DPAA2_RX_ERR_FQ: 3899 err = setup_rx_err_flow(priv, &priv->fq[i]); 3900 break; 3901 default: 3902 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); 3903 return -EINVAL; 3904 } 3905 if (err) 3906 return err; 3907 } 3908 3909 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, 3910 DPNI_QUEUE_TX, &priv->tx_qdid); 3911 if (err) { 3912 dev_err(dev, "dpni_get_qdid() failed\n"); 3913 return err; 3914 } 3915 3916 return 0; 3917 } 3918 3919 /* Allocate rings for storing incoming frame descriptors */ 3920 static int dpaa2_eth_alloc_rings(struct dpaa2_eth_priv *priv) 3921 { 3922 struct net_device *net_dev = priv->net_dev; 3923 struct device *dev = net_dev->dev.parent; 3924 int i; 3925 3926 for (i = 0; i < priv->num_channels; i++) { 3927 priv->channel[i]->store = 3928 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); 3929 if (!priv->channel[i]->store) { 3930 netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); 3931 goto err_ring; 3932 } 3933 } 3934 3935 return 0; 3936 3937 err_ring: 3938 for (i = 0; i < priv->num_channels; i++) { 3939 if (!priv->channel[i]->store) 3940 break; 3941 dpaa2_io_store_destroy(priv->channel[i]->store); 3942 } 3943 3944 return -ENOMEM; 3945 } 3946 3947 static void dpaa2_eth_free_rings(struct dpaa2_eth_priv *priv) 3948 { 3949 int i; 3950 3951 for (i = 0; i < priv->num_channels; i++) 3952 dpaa2_io_store_destroy(priv->channel[i]->store); 3953 } 3954 3955 static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv) 3956 { 3957 struct net_device *net_dev = priv->net_dev; 3958 struct device *dev = net_dev->dev.parent; 3959 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; 3960 int err; 3961 3962 /* Get firmware address, if any */ 3963 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); 3964 if (err) { 3965 dev_err(dev, "dpni_get_port_mac_addr() failed\n"); 3966 return err; 3967 } 3968 3969 /* Get DPNI attributes address, if any */ 3970 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 3971 dpni_mac_addr); 3972 if (err) { 3973 dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); 3974 return err; 3975 } 3976 3977 /* First check if firmware has any address configured by bootloader */ 3978 if (!is_zero_ether_addr(mac_addr)) { 3979 /* If the DPMAC addr != DPNI addr, update it */ 3980 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { 3981 err = dpni_set_primary_mac_addr(priv->mc_io, 0, 3982 priv->mc_token, 3983 mac_addr); 3984 if (err) { 3985 dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 3986 return err; 3987 } 3988 } 3989 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 3990 } else if (is_zero_ether_addr(dpni_mac_addr)) { 3991 /* No MAC address configured, fill in net_dev->dev_addr 3992 * with a random one 3993 */ 3994 eth_hw_addr_random(net_dev); 3995 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 3996 3997 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 3998 net_dev->dev_addr); 3999 if (err) { 4000 dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 4001 return err; 4002 } 4003 4004 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 4005 * practical purposes, this will be our "permanent" mac address, 4006 * at least until the next reboot. This move will also permit 4007 * register_netdevice() to properly fill up net_dev->perm_addr. 4008 */ 4009 net_dev->addr_assign_type = NET_ADDR_PERM; 4010 } else { 4011 /* NET_ADDR_PERM is default, all we have to do is 4012 * fill in the device addr. 4013 */ 4014 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); 4015 } 4016 4017 return 0; 4018 } 4019 4020 static int dpaa2_eth_netdev_init(struct net_device *net_dev) 4021 { 4022 struct device *dev = net_dev->dev.parent; 4023 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 4024 u32 options = priv->dpni_attrs.options; 4025 u64 supported = 0, not_supported = 0; 4026 u8 bcast_addr[ETH_ALEN]; 4027 u8 num_queues; 4028 int err; 4029 4030 net_dev->netdev_ops = &dpaa2_eth_ops; 4031 net_dev->ethtool_ops = &dpaa2_ethtool_ops; 4032 4033 err = dpaa2_eth_set_mac_addr(priv); 4034 if (err) 4035 return err; 4036 4037 /* Explicitly add the broadcast address to the MAC filtering table */ 4038 eth_broadcast_addr(bcast_addr); 4039 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); 4040 if (err) { 4041 dev_err(dev, "dpni_add_mac_addr() failed\n"); 4042 return err; 4043 } 4044 4045 /* Set MTU upper limit; lower limit is 68B (default value) */ 4046 net_dev->max_mtu = DPAA2_ETH_MAX_MTU; 4047 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, 4048 DPAA2_ETH_MFL); 4049 if (err) { 4050 dev_err(dev, "dpni_set_max_frame_length() failed\n"); 4051 return err; 4052 } 4053 4054 /* Set actual number of queues in the net device */ 4055 num_queues = dpaa2_eth_queue_count(priv); 4056 err = netif_set_real_num_tx_queues(net_dev, num_queues); 4057 if (err) { 4058 dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); 4059 return err; 4060 } 4061 err = netif_set_real_num_rx_queues(net_dev, num_queues); 4062 if (err) { 4063 dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); 4064 return err; 4065 } 4066 4067 /* Capabilities listing */ 4068 supported |= IFF_LIVE_ADDR_CHANGE; 4069 4070 if (options & DPNI_OPT_NO_MAC_FILTER) 4071 not_supported |= IFF_UNICAST_FLT; 4072 else 4073 supported |= IFF_UNICAST_FLT; 4074 4075 net_dev->priv_flags |= supported; 4076 net_dev->priv_flags &= ~not_supported; 4077 4078 /* Features */ 4079 net_dev->features = NETIF_F_RXCSUM | 4080 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 4081 NETIF_F_SG | NETIF_F_HIGHDMA | 4082 NETIF_F_LLTX | NETIF_F_HW_TC; 4083 net_dev->hw_features = net_dev->features; 4084 4085 if (priv->dpni_attrs.vlan_filter_entries) 4086 net_dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; 4087 4088 return 0; 4089 } 4090 4091 static int dpaa2_eth_poll_link_state(void *arg) 4092 { 4093 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; 4094 int err; 4095 4096 while (!kthread_should_stop()) { 4097 err = dpaa2_eth_link_state_update(priv); 4098 if (unlikely(err)) 4099 return err; 4100 4101 msleep(DPAA2_ETH_LINK_STATE_REFRESH); 4102 } 4103 4104 return 0; 4105 } 4106 4107 static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) 4108 { 4109 struct fsl_mc_device *dpni_dev, *dpmac_dev; 4110 struct dpaa2_mac *mac; 4111 int err; 4112 4113 dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); 4114 dpmac_dev = fsl_mc_get_endpoint(dpni_dev); 4115 4116 if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) 4117 return PTR_ERR(dpmac_dev); 4118 4119 if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) 4120 return 0; 4121 4122 mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL); 4123 if (!mac) 4124 return -ENOMEM; 4125 4126 mac->mc_dev = dpmac_dev; 4127 mac->mc_io = priv->mc_io; 4128 mac->net_dev = priv->net_dev; 4129 4130 err = dpaa2_mac_open(mac); 4131 if (err) 4132 goto err_free_mac; 4133 priv->mac = mac; 4134 4135 if (dpaa2_eth_is_type_phy(priv)) { 4136 err = dpaa2_mac_connect(mac); 4137 if (err) { 4138 netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n"); 4139 goto err_close_mac; 4140 } 4141 } 4142 4143 return 0; 4144 4145 err_close_mac: 4146 dpaa2_mac_close(mac); 4147 priv->mac = NULL; 4148 err_free_mac: 4149 kfree(mac); 4150 return err; 4151 } 4152 4153 static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv) 4154 { 4155 if (dpaa2_eth_is_type_phy(priv)) 4156 dpaa2_mac_disconnect(priv->mac); 4157 4158 if (!dpaa2_eth_has_mac(priv)) 4159 return; 4160 4161 dpaa2_mac_close(priv->mac); 4162 kfree(priv->mac); 4163 priv->mac = NULL; 4164 } 4165 4166 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) 4167 { 4168 u32 status = ~0; 4169 struct device *dev = (struct device *)arg; 4170 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); 4171 struct net_device *net_dev = dev_get_drvdata(dev); 4172 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 4173 int err; 4174 4175 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, 4176 DPNI_IRQ_INDEX, &status); 4177 if (unlikely(err)) { 4178 netdev_err(net_dev, "Can't get irq status (err %d)\n", err); 4179 return IRQ_HANDLED; 4180 } 4181 4182 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) 4183 dpaa2_eth_link_state_update(netdev_priv(net_dev)); 4184 4185 if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) { 4186 dpaa2_eth_set_mac_addr(netdev_priv(net_dev)); 4187 dpaa2_eth_update_tx_fqids(priv); 4188 4189 rtnl_lock(); 4190 if (dpaa2_eth_has_mac(priv)) 4191 dpaa2_eth_disconnect_mac(priv); 4192 else 4193 dpaa2_eth_connect_mac(priv); 4194 rtnl_unlock(); 4195 } 4196 4197 return IRQ_HANDLED; 4198 } 4199 4200 static int dpaa2_eth_setup_irqs(struct fsl_mc_device *ls_dev) 4201 { 4202 int err = 0; 4203 struct fsl_mc_device_irq *irq; 4204 4205 err = fsl_mc_allocate_irqs(ls_dev); 4206 if (err) { 4207 dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); 4208 return err; 4209 } 4210 4211 irq = ls_dev->irqs[0]; 4212 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, 4213 NULL, dpni_irq0_handler_thread, 4214 IRQF_NO_SUSPEND | IRQF_ONESHOT, 4215 dev_name(&ls_dev->dev), &ls_dev->dev); 4216 if (err < 0) { 4217 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); 4218 goto free_mc_irq; 4219 } 4220 4221 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, 4222 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED | 4223 DPNI_IRQ_EVENT_ENDPOINT_CHANGED); 4224 if (err < 0) { 4225 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); 4226 goto free_irq; 4227 } 4228 4229 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, 4230 DPNI_IRQ_INDEX, 1); 4231 if (err < 0) { 4232 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); 4233 goto free_irq; 4234 } 4235 4236 return 0; 4237 4238 free_irq: 4239 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); 4240 free_mc_irq: 4241 fsl_mc_free_irqs(ls_dev); 4242 4243 return err; 4244 } 4245 4246 static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv) 4247 { 4248 int i; 4249 struct dpaa2_eth_channel *ch; 4250 4251 for (i = 0; i < priv->num_channels; i++) { 4252 ch = priv->channel[i]; 4253 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ 4254 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, 4255 NAPI_POLL_WEIGHT); 4256 } 4257 } 4258 4259 static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv) 4260 { 4261 int i; 4262 struct dpaa2_eth_channel *ch; 4263 4264 for (i = 0; i < priv->num_channels; i++) { 4265 ch = priv->channel[i]; 4266 netif_napi_del(&ch->napi); 4267 } 4268 } 4269 4270 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) 4271 { 4272 struct device *dev; 4273 struct net_device *net_dev = NULL; 4274 struct dpaa2_eth_priv *priv = NULL; 4275 int err = 0; 4276 4277 dev = &dpni_dev->dev; 4278 4279 /* Net device */ 4280 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES); 4281 if (!net_dev) { 4282 dev_err(dev, "alloc_etherdev_mq() failed\n"); 4283 return -ENOMEM; 4284 } 4285 4286 SET_NETDEV_DEV(net_dev, dev); 4287 dev_set_drvdata(dev, net_dev); 4288 4289 priv = netdev_priv(net_dev); 4290 priv->net_dev = net_dev; 4291 4292 priv->iommu_domain = iommu_get_domain_for_dev(dev); 4293 4294 priv->tx_tstamp_type = HWTSTAMP_TX_OFF; 4295 priv->rx_tstamp = false; 4296 4297 priv->dpaa2_ptp_wq = alloc_workqueue("dpaa2_ptp_wq", 0, 0); 4298 if (!priv->dpaa2_ptp_wq) { 4299 err = -ENOMEM; 4300 goto err_wq_alloc; 4301 } 4302 4303 INIT_WORK(&priv->tx_onestep_tstamp, dpaa2_eth_tx_onestep_tstamp); 4304 4305 skb_queue_head_init(&priv->tx_skbs); 4306 4307 /* Obtain a MC portal */ 4308 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 4309 &priv->mc_io); 4310 if (err) { 4311 if (err == -ENXIO) 4312 err = -EPROBE_DEFER; 4313 else 4314 dev_err(dev, "MC portal allocation failed\n"); 4315 goto err_portal_alloc; 4316 } 4317 4318 /* MC objects initialization and configuration */ 4319 err = dpaa2_eth_setup_dpni(dpni_dev); 4320 if (err) 4321 goto err_dpni_setup; 4322 4323 err = dpaa2_eth_setup_dpio(priv); 4324 if (err) 4325 goto err_dpio_setup; 4326 4327 dpaa2_eth_setup_fqs(priv); 4328 4329 err = dpaa2_eth_setup_dpbp(priv); 4330 if (err) 4331 goto err_dpbp_setup; 4332 4333 err = dpaa2_eth_bind_dpni(priv); 4334 if (err) 4335 goto err_bind; 4336 4337 /* Add a NAPI context for each channel */ 4338 dpaa2_eth_add_ch_napi(priv); 4339 4340 /* Percpu statistics */ 4341 priv->percpu_stats = alloc_percpu(*priv->percpu_stats); 4342 if (!priv->percpu_stats) { 4343 dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); 4344 err = -ENOMEM; 4345 goto err_alloc_percpu_stats; 4346 } 4347 priv->percpu_extras = alloc_percpu(*priv->percpu_extras); 4348 if (!priv->percpu_extras) { 4349 dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); 4350 err = -ENOMEM; 4351 goto err_alloc_percpu_extras; 4352 } 4353 4354 priv->sgt_cache = alloc_percpu(*priv->sgt_cache); 4355 if (!priv->sgt_cache) { 4356 dev_err(dev, "alloc_percpu(sgt_cache) failed\n"); 4357 err = -ENOMEM; 4358 goto err_alloc_sgt_cache; 4359 } 4360 4361 err = dpaa2_eth_netdev_init(net_dev); 4362 if (err) 4363 goto err_netdev_init; 4364 4365 /* Configure checksum offload based on current interface flags */ 4366 err = dpaa2_eth_set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); 4367 if (err) 4368 goto err_csum; 4369 4370 err = dpaa2_eth_set_tx_csum(priv, 4371 !!(net_dev->features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); 4372 if (err) 4373 goto err_csum; 4374 4375 err = dpaa2_eth_alloc_rings(priv); 4376 if (err) 4377 goto err_alloc_rings; 4378 4379 #ifdef CONFIG_FSL_DPAA2_ETH_DCB 4380 if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) { 4381 priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE; 4382 net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops; 4383 } else { 4384 dev_dbg(dev, "PFC not supported\n"); 4385 } 4386 #endif 4387 4388 err = dpaa2_eth_setup_irqs(dpni_dev); 4389 if (err) { 4390 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); 4391 priv->poll_thread = kthread_run(dpaa2_eth_poll_link_state, priv, 4392 "%s_poll_link", net_dev->name); 4393 if (IS_ERR(priv->poll_thread)) { 4394 dev_err(dev, "Error starting polling thread\n"); 4395 goto err_poll_thread; 4396 } 4397 priv->do_link_poll = true; 4398 } 4399 4400 err = dpaa2_eth_connect_mac(priv); 4401 if (err) 4402 goto err_connect_mac; 4403 4404 err = dpaa2_eth_dl_register(priv); 4405 if (err) 4406 goto err_dl_register; 4407 4408 err = dpaa2_eth_dl_traps_register(priv); 4409 if (err) 4410 goto err_dl_trap_register; 4411 4412 err = dpaa2_eth_dl_port_add(priv); 4413 if (err) 4414 goto err_dl_port_add; 4415 4416 err = register_netdev(net_dev); 4417 if (err < 0) { 4418 dev_err(dev, "register_netdev() failed\n"); 4419 goto err_netdev_reg; 4420 } 4421 4422 #ifdef CONFIG_DEBUG_FS 4423 dpaa2_dbg_add(priv); 4424 #endif 4425 4426 dev_info(dev, "Probed interface %s\n", net_dev->name); 4427 return 0; 4428 4429 err_netdev_reg: 4430 dpaa2_eth_dl_port_del(priv); 4431 err_dl_port_add: 4432 dpaa2_eth_dl_traps_unregister(priv); 4433 err_dl_trap_register: 4434 dpaa2_eth_dl_unregister(priv); 4435 err_dl_register: 4436 dpaa2_eth_disconnect_mac(priv); 4437 err_connect_mac: 4438 if (priv->do_link_poll) 4439 kthread_stop(priv->poll_thread); 4440 else 4441 fsl_mc_free_irqs(dpni_dev); 4442 err_poll_thread: 4443 dpaa2_eth_free_rings(priv); 4444 err_alloc_rings: 4445 err_csum: 4446 err_netdev_init: 4447 free_percpu(priv->sgt_cache); 4448 err_alloc_sgt_cache: 4449 free_percpu(priv->percpu_extras); 4450 err_alloc_percpu_extras: 4451 free_percpu(priv->percpu_stats); 4452 err_alloc_percpu_stats: 4453 dpaa2_eth_del_ch_napi(priv); 4454 err_bind: 4455 dpaa2_eth_free_dpbp(priv); 4456 err_dpbp_setup: 4457 dpaa2_eth_free_dpio(priv); 4458 err_dpio_setup: 4459 dpaa2_eth_free_dpni(priv); 4460 err_dpni_setup: 4461 fsl_mc_portal_free(priv->mc_io); 4462 err_portal_alloc: 4463 destroy_workqueue(priv->dpaa2_ptp_wq); 4464 err_wq_alloc: 4465 dev_set_drvdata(dev, NULL); 4466 free_netdev(net_dev); 4467 4468 return err; 4469 } 4470 4471 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) 4472 { 4473 struct device *dev; 4474 struct net_device *net_dev; 4475 struct dpaa2_eth_priv *priv; 4476 4477 dev = &ls_dev->dev; 4478 net_dev = dev_get_drvdata(dev); 4479 priv = netdev_priv(net_dev); 4480 4481 #ifdef CONFIG_DEBUG_FS 4482 dpaa2_dbg_remove(priv); 4483 #endif 4484 rtnl_lock(); 4485 dpaa2_eth_disconnect_mac(priv); 4486 rtnl_unlock(); 4487 4488 unregister_netdev(net_dev); 4489 4490 dpaa2_eth_dl_port_del(priv); 4491 dpaa2_eth_dl_traps_unregister(priv); 4492 dpaa2_eth_dl_unregister(priv); 4493 4494 if (priv->do_link_poll) 4495 kthread_stop(priv->poll_thread); 4496 else 4497 fsl_mc_free_irqs(ls_dev); 4498 4499 dpaa2_eth_free_rings(priv); 4500 free_percpu(priv->sgt_cache); 4501 free_percpu(priv->percpu_stats); 4502 free_percpu(priv->percpu_extras); 4503 4504 dpaa2_eth_del_ch_napi(priv); 4505 dpaa2_eth_free_dpbp(priv); 4506 dpaa2_eth_free_dpio(priv); 4507 dpaa2_eth_free_dpni(priv); 4508 4509 fsl_mc_portal_free(priv->mc_io); 4510 4511 free_netdev(net_dev); 4512 4513 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); 4514 4515 return 0; 4516 } 4517 4518 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { 4519 { 4520 .vendor = FSL_MC_VENDOR_FREESCALE, 4521 .obj_type = "dpni", 4522 }, 4523 { .vendor = 0x0 } 4524 }; 4525 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); 4526 4527 static struct fsl_mc_driver dpaa2_eth_driver = { 4528 .driver = { 4529 .name = KBUILD_MODNAME, 4530 .owner = THIS_MODULE, 4531 }, 4532 .probe = dpaa2_eth_probe, 4533 .remove = dpaa2_eth_remove, 4534 .match_id_table = dpaa2_eth_match_id_table 4535 }; 4536 4537 static int __init dpaa2_eth_driver_init(void) 4538 { 4539 int err; 4540 4541 dpaa2_eth_dbg_init(); 4542 err = fsl_mc_driver_register(&dpaa2_eth_driver); 4543 if (err) { 4544 dpaa2_eth_dbg_exit(); 4545 return err; 4546 } 4547 4548 return 0; 4549 } 4550 4551 static void __exit dpaa2_eth_driver_exit(void) 4552 { 4553 dpaa2_eth_dbg_exit(); 4554 fsl_mc_driver_unregister(&dpaa2_eth_driver); 4555 } 4556 4557 module_init(dpaa2_eth_driver_init); 4558 module_exit(dpaa2_eth_driver_exit); 4559