1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2014-2016 Freescale Semiconductor Inc. 3 * Copyright 2016-2017 NXP 4 */ 5 #include <linux/init.h> 6 #include <linux/module.h> 7 #include <linux/platform_device.h> 8 #include <linux/etherdevice.h> 9 #include <linux/of_net.h> 10 #include <linux/interrupt.h> 11 #include <linux/msi.h> 12 #include <linux/kthread.h> 13 #include <linux/iommu.h> 14 #include <linux/net_tstamp.h> 15 #include <linux/fsl/mc.h> 16 17 #include <net/sock.h> 18 19 #include "dpaa2-eth.h" 20 21 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files 22 * using trace events only need to #include <trace/events/sched.h> 23 */ 24 #define CREATE_TRACE_POINTS 25 #include "dpaa2-eth-trace.h" 26 27 MODULE_LICENSE("Dual BSD/GPL"); 28 MODULE_AUTHOR("Freescale Semiconductor, Inc"); 29 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); 30 31 static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 32 dma_addr_t iova_addr) 33 { 34 phys_addr_t phys_addr; 35 36 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 37 38 return phys_to_virt(phys_addr); 39 } 40 41 static void validate_rx_csum(struct dpaa2_eth_priv *priv, 42 u32 fd_status, 43 struct sk_buff *skb) 44 { 45 skb_checksum_none_assert(skb); 46 47 /* HW checksum validation is disabled, nothing to do here */ 48 if (!(priv->net_dev->features & NETIF_F_RXCSUM)) 49 return; 50 51 /* Read checksum validation bits */ 52 if (!((fd_status & DPAA2_FAS_L3CV) && 53 (fd_status & DPAA2_FAS_L4CV))) 54 return; 55 56 /* Inform the stack there's no need to compute L3/L4 csum anymore */ 57 skb->ip_summed = CHECKSUM_UNNECESSARY; 58 } 59 60 /* Free a received FD. 61 * Not to be used for Tx conf FDs or on any other paths. 62 */ 63 static void free_rx_fd(struct dpaa2_eth_priv *priv, 64 const struct dpaa2_fd *fd, 65 void *vaddr) 66 { 67 struct device *dev = priv->net_dev->dev.parent; 68 dma_addr_t addr = dpaa2_fd_get_addr(fd); 69 u8 fd_format = dpaa2_fd_get_format(fd); 70 struct dpaa2_sg_entry *sgt; 71 void *sg_vaddr; 72 int i; 73 74 /* If single buffer frame, just free the data buffer */ 75 if (fd_format == dpaa2_fd_single) 76 goto free_buf; 77 else if (fd_format != dpaa2_fd_sg) 78 /* We don't support any other format */ 79 return; 80 81 /* For S/G frames, we first need to free all SG entries 82 * except the first one, which was taken care of already 83 */ 84 sgt = vaddr + dpaa2_fd_get_offset(fd); 85 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 86 addr = dpaa2_sg_get_addr(&sgt[i]); 87 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 88 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, 89 DMA_FROM_DEVICE); 90 91 skb_free_frag(sg_vaddr); 92 if (dpaa2_sg_is_final(&sgt[i])) 93 break; 94 } 95 96 free_buf: 97 skb_free_frag(vaddr); 98 } 99 100 /* Build a linear skb based on a single-buffer frame descriptor */ 101 static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, 102 const struct dpaa2_fd *fd, 103 void *fd_vaddr) 104 { 105 struct sk_buff *skb = NULL; 106 u16 fd_offset = dpaa2_fd_get_offset(fd); 107 u32 fd_length = dpaa2_fd_get_len(fd); 108 109 ch->buf_count--; 110 111 skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); 112 if (unlikely(!skb)) 113 return NULL; 114 115 skb_reserve(skb, fd_offset); 116 skb_put(skb, fd_length); 117 118 return skb; 119 } 120 121 /* Build a non linear (fragmented) skb based on a S/G table */ 122 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, 123 struct dpaa2_eth_channel *ch, 124 struct dpaa2_sg_entry *sgt) 125 { 126 struct sk_buff *skb = NULL; 127 struct device *dev = priv->net_dev->dev.parent; 128 void *sg_vaddr; 129 dma_addr_t sg_addr; 130 u16 sg_offset; 131 u32 sg_length; 132 struct page *page, *head_page; 133 int page_offset; 134 int i; 135 136 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 137 struct dpaa2_sg_entry *sge = &sgt[i]; 138 139 /* NOTE: We only support SG entries in dpaa2_sg_single format, 140 * but this is the only format we may receive from HW anyway 141 */ 142 143 /* Get the address and length from the S/G entry */ 144 sg_addr = dpaa2_sg_get_addr(sge); 145 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); 146 dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, 147 DMA_FROM_DEVICE); 148 149 sg_length = dpaa2_sg_get_len(sge); 150 151 if (i == 0) { 152 /* We build the skb around the first data buffer */ 153 skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); 154 if (unlikely(!skb)) { 155 /* Free the first SG entry now, since we already 156 * unmapped it and obtained the virtual address 157 */ 158 skb_free_frag(sg_vaddr); 159 160 /* We still need to subtract the buffers used 161 * by this FD from our software counter 162 */ 163 while (!dpaa2_sg_is_final(&sgt[i]) && 164 i < DPAA2_ETH_MAX_SG_ENTRIES) 165 i++; 166 break; 167 } 168 169 sg_offset = dpaa2_sg_get_offset(sge); 170 skb_reserve(skb, sg_offset); 171 skb_put(skb, sg_length); 172 } else { 173 /* Rest of the data buffers are stored as skb frags */ 174 page = virt_to_page(sg_vaddr); 175 head_page = virt_to_head_page(sg_vaddr); 176 177 /* Offset in page (which may be compound). 178 * Data in subsequent SG entries is stored from the 179 * beginning of the buffer, so we don't need to add the 180 * sg_offset. 181 */ 182 page_offset = ((unsigned long)sg_vaddr & 183 (PAGE_SIZE - 1)) + 184 (page_address(page) - page_address(head_page)); 185 186 skb_add_rx_frag(skb, i - 1, head_page, page_offset, 187 sg_length, DPAA2_ETH_RX_BUF_SIZE); 188 } 189 190 if (dpaa2_sg_is_final(sge)) 191 break; 192 } 193 194 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); 195 196 /* Count all data buffers + SG table buffer */ 197 ch->buf_count -= i + 2; 198 199 return skb; 200 } 201 202 /* Main Rx frame processing routine */ 203 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, 204 struct dpaa2_eth_channel *ch, 205 const struct dpaa2_fd *fd, 206 struct dpaa2_eth_fq *fq) 207 { 208 dma_addr_t addr = dpaa2_fd_get_addr(fd); 209 u8 fd_format = dpaa2_fd_get_format(fd); 210 void *vaddr; 211 struct sk_buff *skb; 212 struct rtnl_link_stats64 *percpu_stats; 213 struct dpaa2_eth_drv_stats *percpu_extras; 214 struct device *dev = priv->net_dev->dev.parent; 215 struct dpaa2_fas *fas; 216 void *buf_data; 217 u32 status = 0; 218 219 /* Tracing point */ 220 trace_dpaa2_rx_fd(priv->net_dev, fd); 221 222 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 223 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); 224 225 fas = dpaa2_get_fas(vaddr, false); 226 prefetch(fas); 227 buf_data = vaddr + dpaa2_fd_get_offset(fd); 228 prefetch(buf_data); 229 230 percpu_stats = this_cpu_ptr(priv->percpu_stats); 231 percpu_extras = this_cpu_ptr(priv->percpu_extras); 232 233 if (fd_format == dpaa2_fd_single) { 234 skb = build_linear_skb(ch, fd, vaddr); 235 } else if (fd_format == dpaa2_fd_sg) { 236 skb = build_frag_skb(priv, ch, buf_data); 237 skb_free_frag(vaddr); 238 percpu_extras->rx_sg_frames++; 239 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); 240 } else { 241 /* We don't support any other format */ 242 goto err_frame_format; 243 } 244 245 if (unlikely(!skb)) 246 goto err_build_skb; 247 248 prefetch(skb->data); 249 250 /* Get the timestamp value */ 251 if (priv->rx_tstamp) { 252 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 253 __le64 *ts = dpaa2_get_ts(vaddr, false); 254 u64 ns; 255 256 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 257 258 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 259 shhwtstamps->hwtstamp = ns_to_ktime(ns); 260 } 261 262 /* Check if we need to validate the L4 csum */ 263 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { 264 status = le32_to_cpu(fas->status); 265 validate_rx_csum(priv, status, skb); 266 } 267 268 skb->protocol = eth_type_trans(skb, priv->net_dev); 269 skb_record_rx_queue(skb, fq->flowid); 270 271 percpu_stats->rx_packets++; 272 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 273 274 napi_gro_receive(&ch->napi, skb); 275 276 return; 277 278 err_build_skb: 279 free_rx_fd(priv, fd, vaddr); 280 err_frame_format: 281 percpu_stats->rx_dropped++; 282 } 283 284 /* Consume all frames pull-dequeued into the store. This is the simplest way to 285 * make sure we don't accidentally issue another volatile dequeue which would 286 * overwrite (leak) frames already in the store. 287 * 288 * Observance of NAPI budget is not our concern, leaving that to the caller. 289 */ 290 static int consume_frames(struct dpaa2_eth_channel *ch, 291 struct dpaa2_eth_fq **src) 292 { 293 struct dpaa2_eth_priv *priv = ch->priv; 294 struct dpaa2_eth_fq *fq = NULL; 295 struct dpaa2_dq *dq; 296 const struct dpaa2_fd *fd; 297 int cleaned = 0; 298 int is_last; 299 300 do { 301 dq = dpaa2_io_store_next(ch->store, &is_last); 302 if (unlikely(!dq)) { 303 /* If we're here, we *must* have placed a 304 * volatile dequeue comnmand, so keep reading through 305 * the store until we get some sort of valid response 306 * token (either a valid frame or an "empty dequeue") 307 */ 308 continue; 309 } 310 311 fd = dpaa2_dq_fd(dq); 312 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); 313 314 fq->consume(priv, ch, fd, fq); 315 cleaned++; 316 } while (!is_last); 317 318 if (!cleaned) 319 return 0; 320 321 fq->stats.frames += cleaned; 322 ch->stats.frames += cleaned; 323 324 /* A dequeue operation only pulls frames from a single queue 325 * into the store. Return the frame queue as an out param. 326 */ 327 if (src) 328 *src = fq; 329 330 return cleaned; 331 } 332 333 /* Configure the egress frame annotation for timestamp update */ 334 static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) 335 { 336 struct dpaa2_faead *faead; 337 u32 ctrl, frc; 338 339 /* Mark the egress frame annotation area as valid */ 340 frc = dpaa2_fd_get_frc(fd); 341 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 342 343 /* Set hardware annotation size */ 344 ctrl = dpaa2_fd_get_ctrl(fd); 345 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); 346 347 /* enable UPD (update prepanded data) bit in FAEAD field of 348 * hardware frame annotation area 349 */ 350 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; 351 faead = dpaa2_get_faead(buf_start, true); 352 faead->ctrl = cpu_to_le32(ctrl); 353 } 354 355 /* Create a frame descriptor based on a fragmented skb */ 356 static int build_sg_fd(struct dpaa2_eth_priv *priv, 357 struct sk_buff *skb, 358 struct dpaa2_fd *fd) 359 { 360 struct device *dev = priv->net_dev->dev.parent; 361 void *sgt_buf = NULL; 362 dma_addr_t addr; 363 int nr_frags = skb_shinfo(skb)->nr_frags; 364 struct dpaa2_sg_entry *sgt; 365 int i, err; 366 int sgt_buf_size; 367 struct scatterlist *scl, *crt_scl; 368 int num_sg; 369 int num_dma_bufs; 370 struct dpaa2_eth_swa *swa; 371 372 /* Create and map scatterlist. 373 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have 374 * to go beyond nr_frags+1. 375 * Note: We don't support chained scatterlists 376 */ 377 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) 378 return -EINVAL; 379 380 scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); 381 if (unlikely(!scl)) 382 return -ENOMEM; 383 384 sg_init_table(scl, nr_frags + 1); 385 num_sg = skb_to_sgvec(skb, scl, 0, skb->len); 386 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 387 if (unlikely(!num_dma_bufs)) { 388 err = -ENOMEM; 389 goto dma_map_sg_failed; 390 } 391 392 /* Prepare the HW SGT structure */ 393 sgt_buf_size = priv->tx_data_offset + 394 sizeof(struct dpaa2_sg_entry) * num_dma_bufs; 395 sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); 396 if (unlikely(!sgt_buf)) { 397 err = -ENOMEM; 398 goto sgt_buf_alloc_failed; 399 } 400 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); 401 memset(sgt_buf, 0, sgt_buf_size); 402 403 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 404 405 /* Fill in the HW SGT structure. 406 * 407 * sgt_buf is zeroed out, so the following fields are implicit 408 * in all sgt entries: 409 * - offset is 0 410 * - format is 'dpaa2_sg_single' 411 */ 412 for_each_sg(scl, crt_scl, num_dma_bufs, i) { 413 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); 414 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); 415 } 416 dpaa2_sg_set_final(&sgt[i - 1], true); 417 418 /* Store the skb backpointer in the SGT buffer. 419 * Fit the scatterlist and the number of buffers alongside the 420 * skb backpointer in the software annotation area. We'll need 421 * all of them on Tx Conf. 422 */ 423 swa = (struct dpaa2_eth_swa *)sgt_buf; 424 swa->skb = skb; 425 swa->scl = scl; 426 swa->num_sg = num_sg; 427 swa->sgt_size = sgt_buf_size; 428 429 /* Separately map the SGT buffer */ 430 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 431 if (unlikely(dma_mapping_error(dev, addr))) { 432 err = -ENOMEM; 433 goto dma_map_single_failed; 434 } 435 dpaa2_fd_set_offset(fd, priv->tx_data_offset); 436 dpaa2_fd_set_format(fd, dpaa2_fd_sg); 437 dpaa2_fd_set_addr(fd, addr); 438 dpaa2_fd_set_len(fd, skb->len); 439 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 440 441 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 442 enable_tx_tstamp(fd, sgt_buf); 443 444 return 0; 445 446 dma_map_single_failed: 447 skb_free_frag(sgt_buf); 448 sgt_buf_alloc_failed: 449 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 450 dma_map_sg_failed: 451 kfree(scl); 452 return err; 453 } 454 455 /* Create a frame descriptor based on a linear skb */ 456 static int build_single_fd(struct dpaa2_eth_priv *priv, 457 struct sk_buff *skb, 458 struct dpaa2_fd *fd) 459 { 460 struct device *dev = priv->net_dev->dev.parent; 461 u8 *buffer_start, *aligned_start; 462 struct sk_buff **skbh; 463 dma_addr_t addr; 464 465 buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); 466 467 /* If there's enough room to align the FD address, do it. 468 * It will help hardware optimize accesses. 469 */ 470 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 471 DPAA2_ETH_TX_BUF_ALIGN); 472 if (aligned_start >= skb->head) 473 buffer_start = aligned_start; 474 475 /* Store a backpointer to the skb at the beginning of the buffer 476 * (in the private data area) such that we can release it 477 * on Tx confirm 478 */ 479 skbh = (struct sk_buff **)buffer_start; 480 *skbh = skb; 481 482 addr = dma_map_single(dev, buffer_start, 483 skb_tail_pointer(skb) - buffer_start, 484 DMA_BIDIRECTIONAL); 485 if (unlikely(dma_mapping_error(dev, addr))) 486 return -ENOMEM; 487 488 dpaa2_fd_set_addr(fd, addr); 489 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); 490 dpaa2_fd_set_len(fd, skb->len); 491 dpaa2_fd_set_format(fd, dpaa2_fd_single); 492 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA); 493 494 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 495 enable_tx_tstamp(fd, buffer_start); 496 497 return 0; 498 } 499 500 /* FD freeing routine on the Tx path 501 * 502 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb 503 * back-pointed to is also freed. 504 * This can be called either from dpaa2_eth_tx_conf() or on the error path of 505 * dpaa2_eth_tx(). 506 */ 507 static void free_tx_fd(const struct dpaa2_eth_priv *priv, 508 const struct dpaa2_fd *fd) 509 { 510 struct device *dev = priv->net_dev->dev.parent; 511 dma_addr_t fd_addr; 512 struct sk_buff **skbh, *skb; 513 unsigned char *buffer_start; 514 struct dpaa2_eth_swa *swa; 515 u8 fd_format = dpaa2_fd_get_format(fd); 516 517 fd_addr = dpaa2_fd_get_addr(fd); 518 skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); 519 520 if (fd_format == dpaa2_fd_single) { 521 skb = *skbh; 522 buffer_start = (unsigned char *)skbh; 523 /* Accessing the skb buffer is safe before dma unmap, because 524 * we didn't map the actual skb shell. 525 */ 526 dma_unmap_single(dev, fd_addr, 527 skb_tail_pointer(skb) - buffer_start, 528 DMA_BIDIRECTIONAL); 529 } else if (fd_format == dpaa2_fd_sg) { 530 swa = (struct dpaa2_eth_swa *)skbh; 531 skb = swa->skb; 532 533 /* Unmap the scatterlist */ 534 dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL); 535 kfree(swa->scl); 536 537 /* Unmap the SGT buffer */ 538 dma_unmap_single(dev, fd_addr, swa->sgt_size, 539 DMA_BIDIRECTIONAL); 540 } else { 541 netdev_dbg(priv->net_dev, "Invalid FD format\n"); 542 return; 543 } 544 545 /* Get the timestamp value */ 546 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 547 struct skb_shared_hwtstamps shhwtstamps; 548 __le64 *ts = dpaa2_get_ts(skbh, true); 549 u64 ns; 550 551 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 552 553 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 554 shhwtstamps.hwtstamp = ns_to_ktime(ns); 555 skb_tstamp_tx(skb, &shhwtstamps); 556 } 557 558 /* Free SGT buffer allocated on tx */ 559 if (fd_format != dpaa2_fd_single) 560 skb_free_frag(skbh); 561 562 /* Move on with skb release */ 563 dev_kfree_skb(skb); 564 } 565 566 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) 567 { 568 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 569 struct dpaa2_fd fd; 570 struct rtnl_link_stats64 *percpu_stats; 571 struct dpaa2_eth_drv_stats *percpu_extras; 572 struct dpaa2_eth_fq *fq; 573 struct netdev_queue *nq; 574 u16 queue_mapping; 575 unsigned int needed_headroom; 576 u32 fd_len; 577 int err, i; 578 579 percpu_stats = this_cpu_ptr(priv->percpu_stats); 580 percpu_extras = this_cpu_ptr(priv->percpu_extras); 581 582 needed_headroom = dpaa2_eth_needed_headroom(priv, skb); 583 if (skb_headroom(skb) < needed_headroom) { 584 struct sk_buff *ns; 585 586 ns = skb_realloc_headroom(skb, needed_headroom); 587 if (unlikely(!ns)) { 588 percpu_stats->tx_dropped++; 589 goto err_alloc_headroom; 590 } 591 percpu_extras->tx_reallocs++; 592 593 if (skb->sk) 594 skb_set_owner_w(ns, skb->sk); 595 596 dev_kfree_skb(skb); 597 skb = ns; 598 } 599 600 /* We'll be holding a back-reference to the skb until Tx Confirmation; 601 * we don't want that overwritten by a concurrent Tx with a cloned skb. 602 */ 603 skb = skb_unshare(skb, GFP_ATOMIC); 604 if (unlikely(!skb)) { 605 /* skb_unshare() has already freed the skb */ 606 percpu_stats->tx_dropped++; 607 return NETDEV_TX_OK; 608 } 609 610 /* Setup the FD fields */ 611 memset(&fd, 0, sizeof(fd)); 612 613 if (skb_is_nonlinear(skb)) { 614 err = build_sg_fd(priv, skb, &fd); 615 percpu_extras->tx_sg_frames++; 616 percpu_extras->tx_sg_bytes += skb->len; 617 } else { 618 err = build_single_fd(priv, skb, &fd); 619 } 620 621 if (unlikely(err)) { 622 percpu_stats->tx_dropped++; 623 goto err_build_fd; 624 } 625 626 /* Tracing point */ 627 trace_dpaa2_tx_fd(net_dev, &fd); 628 629 /* TxConf FQ selection relies on queue id from the stack. 630 * In case of a forwarded frame from another DPNI interface, we choose 631 * a queue affined to the same core that processed the Rx frame 632 */ 633 queue_mapping = skb_get_queue_mapping(skb); 634 fq = &priv->fq[queue_mapping]; 635 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 636 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, 637 priv->tx_qdid, 0, 638 fq->tx_qdbin, &fd); 639 if (err != -EBUSY) 640 break; 641 } 642 percpu_extras->tx_portal_busy += i; 643 if (unlikely(err < 0)) { 644 percpu_stats->tx_errors++; 645 /* Clean up everything, including freeing the skb */ 646 free_tx_fd(priv, &fd); 647 } else { 648 fd_len = dpaa2_fd_get_len(&fd); 649 percpu_stats->tx_packets++; 650 percpu_stats->tx_bytes += fd_len; 651 652 nq = netdev_get_tx_queue(net_dev, queue_mapping); 653 netdev_tx_sent_queue(nq, fd_len); 654 } 655 656 return NETDEV_TX_OK; 657 658 err_build_fd: 659 err_alloc_headroom: 660 dev_kfree_skb(skb); 661 662 return NETDEV_TX_OK; 663 } 664 665 /* Tx confirmation frame processing routine */ 666 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, 667 struct dpaa2_eth_channel *ch __always_unused, 668 const struct dpaa2_fd *fd, 669 struct dpaa2_eth_fq *fq) 670 { 671 struct rtnl_link_stats64 *percpu_stats; 672 struct dpaa2_eth_drv_stats *percpu_extras; 673 u32 fd_len = dpaa2_fd_get_len(fd); 674 u32 fd_errors; 675 676 /* Tracing point */ 677 trace_dpaa2_tx_conf_fd(priv->net_dev, fd); 678 679 percpu_extras = this_cpu_ptr(priv->percpu_extras); 680 percpu_extras->tx_conf_frames++; 681 percpu_extras->tx_conf_bytes += fd_len; 682 683 fq->dq_frames++; 684 fq->dq_bytes += fd_len; 685 686 /* Check frame errors in the FD field */ 687 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; 688 free_tx_fd(priv, fd); 689 690 if (likely(!fd_errors)) 691 return; 692 693 if (net_ratelimit()) 694 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", 695 fd_errors); 696 697 percpu_stats = this_cpu_ptr(priv->percpu_stats); 698 /* Tx-conf logically pertains to the egress path. */ 699 percpu_stats->tx_errors++; 700 } 701 702 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) 703 { 704 int err; 705 706 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 707 DPNI_OFF_RX_L3_CSUM, enable); 708 if (err) { 709 netdev_err(priv->net_dev, 710 "dpni_set_offload(RX_L3_CSUM) failed\n"); 711 return err; 712 } 713 714 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 715 DPNI_OFF_RX_L4_CSUM, enable); 716 if (err) { 717 netdev_err(priv->net_dev, 718 "dpni_set_offload(RX_L4_CSUM) failed\n"); 719 return err; 720 } 721 722 return 0; 723 } 724 725 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) 726 { 727 int err; 728 729 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 730 DPNI_OFF_TX_L3_CSUM, enable); 731 if (err) { 732 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); 733 return err; 734 } 735 736 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 737 DPNI_OFF_TX_L4_CSUM, enable); 738 if (err) { 739 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); 740 return err; 741 } 742 743 return 0; 744 } 745 746 /* Free buffers acquired from the buffer pool or which were meant to 747 * be released in the pool 748 */ 749 static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) 750 { 751 struct device *dev = priv->net_dev->dev.parent; 752 void *vaddr; 753 int i; 754 755 for (i = 0; i < count; i++) { 756 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); 757 dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, 758 DMA_FROM_DEVICE); 759 skb_free_frag(vaddr); 760 } 761 } 762 763 /* Perform a single release command to add buffers 764 * to the specified buffer pool 765 */ 766 static int add_bufs(struct dpaa2_eth_priv *priv, 767 struct dpaa2_eth_channel *ch, u16 bpid) 768 { 769 struct device *dev = priv->net_dev->dev.parent; 770 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 771 void *buf; 772 dma_addr_t addr; 773 int i, err; 774 775 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { 776 /* Allocate buffer visible to WRIOP + skb shared info + 777 * alignment padding 778 */ 779 buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv)); 780 if (unlikely(!buf)) 781 goto err_alloc; 782 783 buf = PTR_ALIGN(buf, priv->rx_buf_align); 784 785 addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, 786 DMA_FROM_DEVICE); 787 if (unlikely(dma_mapping_error(dev, addr))) 788 goto err_map; 789 790 buf_array[i] = addr; 791 792 /* tracing point */ 793 trace_dpaa2_eth_buf_seed(priv->net_dev, 794 buf, dpaa2_eth_buf_raw_size(priv), 795 addr, DPAA2_ETH_RX_BUF_SIZE, 796 bpid); 797 } 798 799 release_bufs: 800 /* In case the portal is busy, retry until successful */ 801 while ((err = dpaa2_io_service_release(ch->dpio, bpid, 802 buf_array, i)) == -EBUSY) 803 cpu_relax(); 804 805 /* If release command failed, clean up and bail out; 806 * not much else we can do about it 807 */ 808 if (err) { 809 free_bufs(priv, buf_array, i); 810 return 0; 811 } 812 813 return i; 814 815 err_map: 816 skb_free_frag(buf); 817 err_alloc: 818 /* If we managed to allocate at least some buffers, 819 * release them to hardware 820 */ 821 if (i) 822 goto release_bufs; 823 824 return 0; 825 } 826 827 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) 828 { 829 int i, j; 830 int new_count; 831 832 /* This is the lazy seeding of Rx buffer pools. 833 * dpaa2_add_bufs() is also used on the Rx hotpath and calls 834 * napi_alloc_frag(). The trouble with that is that it in turn ends up 835 * calling this_cpu_ptr(), which mandates execution in atomic context. 836 * Rather than splitting up the code, do a one-off preempt disable. 837 */ 838 preempt_disable(); 839 for (j = 0; j < priv->num_channels; j++) { 840 for (i = 0; i < DPAA2_ETH_NUM_BUFS; 841 i += DPAA2_ETH_BUFS_PER_CMD) { 842 new_count = add_bufs(priv, priv->channel[j], bpid); 843 priv->channel[j]->buf_count += new_count; 844 845 if (new_count < DPAA2_ETH_BUFS_PER_CMD) { 846 preempt_enable(); 847 return -ENOMEM; 848 } 849 } 850 } 851 preempt_enable(); 852 853 return 0; 854 } 855 856 /** 857 * Drain the specified number of buffers from the DPNI's private buffer pool. 858 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD 859 */ 860 static void drain_bufs(struct dpaa2_eth_priv *priv, int count) 861 { 862 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 863 int ret; 864 865 do { 866 ret = dpaa2_io_service_acquire(NULL, priv->bpid, 867 buf_array, count); 868 if (ret < 0) { 869 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); 870 return; 871 } 872 free_bufs(priv, buf_array, ret); 873 } while (ret); 874 } 875 876 static void drain_pool(struct dpaa2_eth_priv *priv) 877 { 878 int i; 879 880 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); 881 drain_bufs(priv, 1); 882 883 for (i = 0; i < priv->num_channels; i++) 884 priv->channel[i]->buf_count = 0; 885 } 886 887 /* Function is called from softirq context only, so we don't need to guard 888 * the access to percpu count 889 */ 890 static int refill_pool(struct dpaa2_eth_priv *priv, 891 struct dpaa2_eth_channel *ch, 892 u16 bpid) 893 { 894 int new_count; 895 896 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) 897 return 0; 898 899 do { 900 new_count = add_bufs(priv, ch, bpid); 901 if (unlikely(!new_count)) { 902 /* Out of memory; abort for now, we'll try later on */ 903 break; 904 } 905 ch->buf_count += new_count; 906 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); 907 908 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) 909 return -ENOMEM; 910 911 return 0; 912 } 913 914 static int pull_channel(struct dpaa2_eth_channel *ch) 915 { 916 int err; 917 int dequeues = -1; 918 919 /* Retry while portal is busy */ 920 do { 921 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, 922 ch->store); 923 dequeues++; 924 cpu_relax(); 925 } while (err == -EBUSY); 926 927 ch->stats.dequeue_portal_busy += dequeues; 928 if (unlikely(err)) 929 ch->stats.pull_err++; 930 931 return err; 932 } 933 934 /* NAPI poll routine 935 * 936 * Frames are dequeued from the QMan channel associated with this NAPI context. 937 * Rx, Tx confirmation and (if configured) Rx error frames all count 938 * towards the NAPI budget. 939 */ 940 static int dpaa2_eth_poll(struct napi_struct *napi, int budget) 941 { 942 struct dpaa2_eth_channel *ch; 943 struct dpaa2_eth_priv *priv; 944 int rx_cleaned = 0, txconf_cleaned = 0; 945 struct dpaa2_eth_fq *fq, *txc_fq = NULL; 946 struct netdev_queue *nq; 947 int store_cleaned, work_done; 948 int err; 949 950 ch = container_of(napi, struct dpaa2_eth_channel, napi); 951 priv = ch->priv; 952 953 do { 954 err = pull_channel(ch); 955 if (unlikely(err)) 956 break; 957 958 /* Refill pool if appropriate */ 959 refill_pool(priv, ch, priv->bpid); 960 961 store_cleaned = consume_frames(ch, &fq); 962 if (!store_cleaned) 963 break; 964 if (fq->type == DPAA2_RX_FQ) { 965 rx_cleaned += store_cleaned; 966 } else { 967 txconf_cleaned += store_cleaned; 968 /* We have a single Tx conf FQ on this channel */ 969 txc_fq = fq; 970 } 971 972 /* If we either consumed the whole NAPI budget with Rx frames 973 * or we reached the Tx confirmations threshold, we're done. 974 */ 975 if (rx_cleaned >= budget || 976 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) { 977 work_done = budget; 978 goto out; 979 } 980 } while (store_cleaned); 981 982 /* We didn't consume the entire budget, so finish napi and 983 * re-enable data availability notifications 984 */ 985 napi_complete_done(napi, rx_cleaned); 986 do { 987 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); 988 cpu_relax(); 989 } while (err == -EBUSY); 990 WARN_ONCE(err, "CDAN notifications rearm failed on core %d", 991 ch->nctx.desired_cpu); 992 993 work_done = max(rx_cleaned, 1); 994 995 out: 996 if (txc_fq) { 997 nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); 998 netdev_tx_completed_queue(nq, txc_fq->dq_frames, 999 txc_fq->dq_bytes); 1000 txc_fq->dq_frames = 0; 1001 txc_fq->dq_bytes = 0; 1002 } 1003 1004 return work_done; 1005 } 1006 1007 static void enable_ch_napi(struct dpaa2_eth_priv *priv) 1008 { 1009 struct dpaa2_eth_channel *ch; 1010 int i; 1011 1012 for (i = 0; i < priv->num_channels; i++) { 1013 ch = priv->channel[i]; 1014 napi_enable(&ch->napi); 1015 } 1016 } 1017 1018 static void disable_ch_napi(struct dpaa2_eth_priv *priv) 1019 { 1020 struct dpaa2_eth_channel *ch; 1021 int i; 1022 1023 for (i = 0; i < priv->num_channels; i++) { 1024 ch = priv->channel[i]; 1025 napi_disable(&ch->napi); 1026 } 1027 } 1028 1029 static int link_state_update(struct dpaa2_eth_priv *priv) 1030 { 1031 struct dpni_link_state state = {0}; 1032 int err; 1033 1034 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); 1035 if (unlikely(err)) { 1036 netdev_err(priv->net_dev, 1037 "dpni_get_link_state() failed\n"); 1038 return err; 1039 } 1040 1041 /* Chech link state; speed / duplex changes are not treated yet */ 1042 if (priv->link_state.up == state.up) 1043 return 0; 1044 1045 priv->link_state = state; 1046 if (state.up) { 1047 netif_carrier_on(priv->net_dev); 1048 netif_tx_start_all_queues(priv->net_dev); 1049 } else { 1050 netif_tx_stop_all_queues(priv->net_dev); 1051 netif_carrier_off(priv->net_dev); 1052 } 1053 1054 netdev_info(priv->net_dev, "Link Event: state %s\n", 1055 state.up ? "up" : "down"); 1056 1057 return 0; 1058 } 1059 1060 static int dpaa2_eth_open(struct net_device *net_dev) 1061 { 1062 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1063 int err; 1064 1065 err = seed_pool(priv, priv->bpid); 1066 if (err) { 1067 /* Not much to do; the buffer pool, though not filled up, 1068 * may still contain some buffers which would enable us 1069 * to limp on. 1070 */ 1071 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", 1072 priv->dpbp_dev->obj_desc.id, priv->bpid); 1073 } 1074 1075 /* We'll only start the txqs when the link is actually ready; make sure 1076 * we don't race against the link up notification, which may come 1077 * immediately after dpni_enable(); 1078 */ 1079 netif_tx_stop_all_queues(net_dev); 1080 enable_ch_napi(priv); 1081 /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will 1082 * return true and cause 'ip link show' to report the LOWER_UP flag, 1083 * even though the link notification wasn't even received. 1084 */ 1085 netif_carrier_off(net_dev); 1086 1087 err = dpni_enable(priv->mc_io, 0, priv->mc_token); 1088 if (err < 0) { 1089 netdev_err(net_dev, "dpni_enable() failed\n"); 1090 goto enable_err; 1091 } 1092 1093 /* If the DPMAC object has already processed the link up interrupt, 1094 * we have to learn the link state ourselves. 1095 */ 1096 err = link_state_update(priv); 1097 if (err < 0) { 1098 netdev_err(net_dev, "Can't update link state\n"); 1099 goto link_state_err; 1100 } 1101 1102 return 0; 1103 1104 link_state_err: 1105 enable_err: 1106 disable_ch_napi(priv); 1107 drain_pool(priv); 1108 return err; 1109 } 1110 1111 /* The DPIO store must be empty when we call this, 1112 * at the end of every NAPI cycle. 1113 */ 1114 static u32 drain_channel(struct dpaa2_eth_channel *ch) 1115 { 1116 u32 drained = 0, total = 0; 1117 1118 do { 1119 pull_channel(ch); 1120 drained = consume_frames(ch, NULL); 1121 total += drained; 1122 } while (drained); 1123 1124 return total; 1125 } 1126 1127 static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) 1128 { 1129 struct dpaa2_eth_channel *ch; 1130 int i; 1131 u32 drained = 0; 1132 1133 for (i = 0; i < priv->num_channels; i++) { 1134 ch = priv->channel[i]; 1135 drained += drain_channel(ch); 1136 } 1137 1138 return drained; 1139 } 1140 1141 static int dpaa2_eth_stop(struct net_device *net_dev) 1142 { 1143 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1144 int dpni_enabled = 0; 1145 int retries = 10; 1146 u32 drained; 1147 1148 netif_tx_stop_all_queues(net_dev); 1149 netif_carrier_off(net_dev); 1150 1151 /* Loop while dpni_disable() attempts to drain the egress FQs 1152 * and confirm them back to us. 1153 */ 1154 do { 1155 dpni_disable(priv->mc_io, 0, priv->mc_token); 1156 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); 1157 if (dpni_enabled) 1158 /* Allow the hardware some slack */ 1159 msleep(100); 1160 } while (dpni_enabled && --retries); 1161 if (!retries) { 1162 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); 1163 /* Must go on and disable NAPI nonetheless, so we don't crash at 1164 * the next "ifconfig up" 1165 */ 1166 } 1167 1168 /* Wait for NAPI to complete on every core and disable it. 1169 * In particular, this will also prevent NAPI from being rescheduled if 1170 * a new CDAN is serviced, effectively discarding the CDAN. We therefore 1171 * don't even need to disarm the channels, except perhaps for the case 1172 * of a huge coalescing value. 1173 */ 1174 disable_ch_napi(priv); 1175 1176 /* Manually drain the Rx and TxConf queues */ 1177 drained = drain_ingress_frames(priv); 1178 if (drained) 1179 netdev_dbg(net_dev, "Drained %d frames.\n", drained); 1180 1181 /* Empty the buffer pool */ 1182 drain_pool(priv); 1183 1184 return 0; 1185 } 1186 1187 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) 1188 { 1189 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1190 struct device *dev = net_dev->dev.parent; 1191 int err; 1192 1193 err = eth_mac_addr(net_dev, addr); 1194 if (err < 0) { 1195 dev_err(dev, "eth_mac_addr() failed (%d)\n", err); 1196 return err; 1197 } 1198 1199 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 1200 net_dev->dev_addr); 1201 if (err) { 1202 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); 1203 return err; 1204 } 1205 1206 return 0; 1207 } 1208 1209 /** Fill in counters maintained by the GPP driver. These may be different from 1210 * the hardware counters obtained by ethtool. 1211 */ 1212 static void dpaa2_eth_get_stats(struct net_device *net_dev, 1213 struct rtnl_link_stats64 *stats) 1214 { 1215 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1216 struct rtnl_link_stats64 *percpu_stats; 1217 u64 *cpustats; 1218 u64 *netstats = (u64 *)stats; 1219 int i, j; 1220 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); 1221 1222 for_each_possible_cpu(i) { 1223 percpu_stats = per_cpu_ptr(priv->percpu_stats, i); 1224 cpustats = (u64 *)percpu_stats; 1225 for (j = 0; j < num; j++) 1226 netstats[j] += cpustats[j]; 1227 } 1228 } 1229 1230 /* Copy mac unicast addresses from @net_dev to @priv. 1231 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 1232 */ 1233 static void add_uc_hw_addr(const struct net_device *net_dev, 1234 struct dpaa2_eth_priv *priv) 1235 { 1236 struct netdev_hw_addr *ha; 1237 int err; 1238 1239 netdev_for_each_uc_addr(ha, net_dev) { 1240 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 1241 ha->addr); 1242 if (err) 1243 netdev_warn(priv->net_dev, 1244 "Could not add ucast MAC %pM to the filtering table (err %d)\n", 1245 ha->addr, err); 1246 } 1247 } 1248 1249 /* Copy mac multicast addresses from @net_dev to @priv 1250 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 1251 */ 1252 static void add_mc_hw_addr(const struct net_device *net_dev, 1253 struct dpaa2_eth_priv *priv) 1254 { 1255 struct netdev_hw_addr *ha; 1256 int err; 1257 1258 netdev_for_each_mc_addr(ha, net_dev) { 1259 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 1260 ha->addr); 1261 if (err) 1262 netdev_warn(priv->net_dev, 1263 "Could not add mcast MAC %pM to the filtering table (err %d)\n", 1264 ha->addr, err); 1265 } 1266 } 1267 1268 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) 1269 { 1270 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1271 int uc_count = netdev_uc_count(net_dev); 1272 int mc_count = netdev_mc_count(net_dev); 1273 u8 max_mac = priv->dpni_attrs.mac_filter_entries; 1274 u32 options = priv->dpni_attrs.options; 1275 u16 mc_token = priv->mc_token; 1276 struct fsl_mc_io *mc_io = priv->mc_io; 1277 int err; 1278 1279 /* Basic sanity checks; these probably indicate a misconfiguration */ 1280 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) 1281 netdev_info(net_dev, 1282 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", 1283 max_mac); 1284 1285 /* Force promiscuous if the uc or mc counts exceed our capabilities. */ 1286 if (uc_count > max_mac) { 1287 netdev_info(net_dev, 1288 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", 1289 uc_count, max_mac); 1290 goto force_promisc; 1291 } 1292 if (mc_count + uc_count > max_mac) { 1293 netdev_info(net_dev, 1294 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", 1295 uc_count + mc_count, max_mac); 1296 goto force_mc_promisc; 1297 } 1298 1299 /* Adjust promisc settings due to flag combinations */ 1300 if (net_dev->flags & IFF_PROMISC) 1301 goto force_promisc; 1302 if (net_dev->flags & IFF_ALLMULTI) { 1303 /* First, rebuild unicast filtering table. This should be done 1304 * in promisc mode, in order to avoid frame loss while we 1305 * progressively add entries to the table. 1306 * We don't know whether we had been in promisc already, and 1307 * making an MC call to find out is expensive; so set uc promisc 1308 * nonetheless. 1309 */ 1310 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 1311 if (err) 1312 netdev_warn(net_dev, "Can't set uc promisc\n"); 1313 1314 /* Actual uc table reconstruction. */ 1315 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); 1316 if (err) 1317 netdev_warn(net_dev, "Can't clear uc filters\n"); 1318 add_uc_hw_addr(net_dev, priv); 1319 1320 /* Finally, clear uc promisc and set mc promisc as requested. */ 1321 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 1322 if (err) 1323 netdev_warn(net_dev, "Can't clear uc promisc\n"); 1324 goto force_mc_promisc; 1325 } 1326 1327 /* Neither unicast, nor multicast promisc will be on... eventually. 1328 * For now, rebuild mac filtering tables while forcing both of them on. 1329 */ 1330 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 1331 if (err) 1332 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); 1333 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 1334 if (err) 1335 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); 1336 1337 /* Actual mac filtering tables reconstruction */ 1338 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); 1339 if (err) 1340 netdev_warn(net_dev, "Can't clear mac filters\n"); 1341 add_mc_hw_addr(net_dev, priv); 1342 add_uc_hw_addr(net_dev, priv); 1343 1344 /* Now we can clear both ucast and mcast promisc, without risking 1345 * to drop legitimate frames anymore. 1346 */ 1347 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 1348 if (err) 1349 netdev_warn(net_dev, "Can't clear ucast promisc\n"); 1350 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); 1351 if (err) 1352 netdev_warn(net_dev, "Can't clear mcast promisc\n"); 1353 1354 return; 1355 1356 force_promisc: 1357 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 1358 if (err) 1359 netdev_warn(net_dev, "Can't set ucast promisc\n"); 1360 force_mc_promisc: 1361 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 1362 if (err) 1363 netdev_warn(net_dev, "Can't set mcast promisc\n"); 1364 } 1365 1366 static int dpaa2_eth_set_features(struct net_device *net_dev, 1367 netdev_features_t features) 1368 { 1369 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1370 netdev_features_t changed = features ^ net_dev->features; 1371 bool enable; 1372 int err; 1373 1374 if (changed & NETIF_F_RXCSUM) { 1375 enable = !!(features & NETIF_F_RXCSUM); 1376 err = set_rx_csum(priv, enable); 1377 if (err) 1378 return err; 1379 } 1380 1381 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 1382 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 1383 err = set_tx_csum(priv, enable); 1384 if (err) 1385 return err; 1386 } 1387 1388 return 0; 1389 } 1390 1391 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1392 { 1393 struct dpaa2_eth_priv *priv = netdev_priv(dev); 1394 struct hwtstamp_config config; 1395 1396 if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 1397 return -EFAULT; 1398 1399 switch (config.tx_type) { 1400 case HWTSTAMP_TX_OFF: 1401 priv->tx_tstamp = false; 1402 break; 1403 case HWTSTAMP_TX_ON: 1404 priv->tx_tstamp = true; 1405 break; 1406 default: 1407 return -ERANGE; 1408 } 1409 1410 if (config.rx_filter == HWTSTAMP_FILTER_NONE) { 1411 priv->rx_tstamp = false; 1412 } else { 1413 priv->rx_tstamp = true; 1414 /* TS is set for all frame types, not only those requested */ 1415 config.rx_filter = HWTSTAMP_FILTER_ALL; 1416 } 1417 1418 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? 1419 -EFAULT : 0; 1420 } 1421 1422 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1423 { 1424 if (cmd == SIOCSHWTSTAMP) 1425 return dpaa2_eth_ts_ioctl(dev, rq, cmd); 1426 1427 return -EINVAL; 1428 } 1429 1430 static const struct net_device_ops dpaa2_eth_ops = { 1431 .ndo_open = dpaa2_eth_open, 1432 .ndo_start_xmit = dpaa2_eth_tx, 1433 .ndo_stop = dpaa2_eth_stop, 1434 .ndo_set_mac_address = dpaa2_eth_set_addr, 1435 .ndo_get_stats64 = dpaa2_eth_get_stats, 1436 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, 1437 .ndo_set_features = dpaa2_eth_set_features, 1438 .ndo_do_ioctl = dpaa2_eth_ioctl, 1439 }; 1440 1441 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) 1442 { 1443 struct dpaa2_eth_channel *ch; 1444 1445 ch = container_of(ctx, struct dpaa2_eth_channel, nctx); 1446 1447 /* Update NAPI statistics */ 1448 ch->stats.cdan++; 1449 1450 napi_schedule_irqoff(&ch->napi); 1451 } 1452 1453 /* Allocate and configure a DPCON object */ 1454 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) 1455 { 1456 struct fsl_mc_device *dpcon; 1457 struct device *dev = priv->net_dev->dev.parent; 1458 struct dpcon_attr attrs; 1459 int err; 1460 1461 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), 1462 FSL_MC_POOL_DPCON, &dpcon); 1463 if (err) { 1464 if (err == -ENXIO) 1465 err = -EPROBE_DEFER; 1466 else 1467 dev_info(dev, "Not enough DPCONs, will go on as-is\n"); 1468 return ERR_PTR(err); 1469 } 1470 1471 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); 1472 if (err) { 1473 dev_err(dev, "dpcon_open() failed\n"); 1474 goto free; 1475 } 1476 1477 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); 1478 if (err) { 1479 dev_err(dev, "dpcon_reset() failed\n"); 1480 goto close; 1481 } 1482 1483 err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); 1484 if (err) { 1485 dev_err(dev, "dpcon_get_attributes() failed\n"); 1486 goto close; 1487 } 1488 1489 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); 1490 if (err) { 1491 dev_err(dev, "dpcon_enable() failed\n"); 1492 goto close; 1493 } 1494 1495 return dpcon; 1496 1497 close: 1498 dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 1499 free: 1500 fsl_mc_object_free(dpcon); 1501 1502 return NULL; 1503 } 1504 1505 static void free_dpcon(struct dpaa2_eth_priv *priv, 1506 struct fsl_mc_device *dpcon) 1507 { 1508 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); 1509 dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 1510 fsl_mc_object_free(dpcon); 1511 } 1512 1513 static struct dpaa2_eth_channel * 1514 alloc_channel(struct dpaa2_eth_priv *priv) 1515 { 1516 struct dpaa2_eth_channel *channel; 1517 struct dpcon_attr attr; 1518 struct device *dev = priv->net_dev->dev.parent; 1519 int err; 1520 1521 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 1522 if (!channel) 1523 return NULL; 1524 1525 channel->dpcon = setup_dpcon(priv); 1526 if (IS_ERR_OR_NULL(channel->dpcon)) { 1527 err = PTR_ERR(channel->dpcon); 1528 goto err_setup; 1529 } 1530 1531 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, 1532 &attr); 1533 if (err) { 1534 dev_err(dev, "dpcon_get_attributes() failed\n"); 1535 goto err_get_attr; 1536 } 1537 1538 channel->dpcon_id = attr.id; 1539 channel->ch_id = attr.qbman_ch_id; 1540 channel->priv = priv; 1541 1542 return channel; 1543 1544 err_get_attr: 1545 free_dpcon(priv, channel->dpcon); 1546 err_setup: 1547 kfree(channel); 1548 return ERR_PTR(err); 1549 } 1550 1551 static void free_channel(struct dpaa2_eth_priv *priv, 1552 struct dpaa2_eth_channel *channel) 1553 { 1554 free_dpcon(priv, channel->dpcon); 1555 kfree(channel); 1556 } 1557 1558 /* DPIO setup: allocate and configure QBMan channels, setup core affinity 1559 * and register data availability notifications 1560 */ 1561 static int setup_dpio(struct dpaa2_eth_priv *priv) 1562 { 1563 struct dpaa2_io_notification_ctx *nctx; 1564 struct dpaa2_eth_channel *channel; 1565 struct dpcon_notification_cfg dpcon_notif_cfg; 1566 struct device *dev = priv->net_dev->dev.parent; 1567 int i, err; 1568 1569 /* We want the ability to spread ingress traffic (RX, TX conf) to as 1570 * many cores as possible, so we need one channel for each core 1571 * (unless there's fewer queues than cores, in which case the extra 1572 * channels would be wasted). 1573 * Allocate one channel per core and register it to the core's 1574 * affine DPIO. If not enough channels are available for all cores 1575 * or if some cores don't have an affine DPIO, there will be no 1576 * ingress frame processing on those cores. 1577 */ 1578 cpumask_clear(&priv->dpio_cpumask); 1579 for_each_online_cpu(i) { 1580 /* Try to allocate a channel */ 1581 channel = alloc_channel(priv); 1582 if (IS_ERR_OR_NULL(channel)) { 1583 err = PTR_ERR(channel); 1584 if (err != -EPROBE_DEFER) 1585 dev_info(dev, 1586 "No affine channel for cpu %d and above\n", i); 1587 goto err_alloc_ch; 1588 } 1589 1590 priv->channel[priv->num_channels] = channel; 1591 1592 nctx = &channel->nctx; 1593 nctx->is_cdan = 1; 1594 nctx->cb = cdan_cb; 1595 nctx->id = channel->ch_id; 1596 nctx->desired_cpu = i; 1597 1598 /* Register the new context */ 1599 channel->dpio = dpaa2_io_service_select(i); 1600 err = dpaa2_io_service_register(channel->dpio, nctx); 1601 if (err) { 1602 dev_dbg(dev, "No affine DPIO for cpu %d\n", i); 1603 /* If no affine DPIO for this core, there's probably 1604 * none available for next cores either. Signal we want 1605 * to retry later, in case the DPIO devices weren't 1606 * probed yet. 1607 */ 1608 err = -EPROBE_DEFER; 1609 goto err_service_reg; 1610 } 1611 1612 /* Register DPCON notification with MC */ 1613 dpcon_notif_cfg.dpio_id = nctx->dpio_id; 1614 dpcon_notif_cfg.priority = 0; 1615 dpcon_notif_cfg.user_ctx = nctx->qman64; 1616 err = dpcon_set_notification(priv->mc_io, 0, 1617 channel->dpcon->mc_handle, 1618 &dpcon_notif_cfg); 1619 if (err) { 1620 dev_err(dev, "dpcon_set_notification failed()\n"); 1621 goto err_set_cdan; 1622 } 1623 1624 /* If we managed to allocate a channel and also found an affine 1625 * DPIO for this core, add it to the final mask 1626 */ 1627 cpumask_set_cpu(i, &priv->dpio_cpumask); 1628 priv->num_channels++; 1629 1630 /* Stop if we already have enough channels to accommodate all 1631 * RX and TX conf queues 1632 */ 1633 if (priv->num_channels == priv->dpni_attrs.num_queues) 1634 break; 1635 } 1636 1637 return 0; 1638 1639 err_set_cdan: 1640 dpaa2_io_service_deregister(channel->dpio, nctx); 1641 err_service_reg: 1642 free_channel(priv, channel); 1643 err_alloc_ch: 1644 if (err == -EPROBE_DEFER) 1645 return err; 1646 1647 if (cpumask_empty(&priv->dpio_cpumask)) { 1648 dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); 1649 return -ENODEV; 1650 } 1651 1652 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", 1653 cpumask_pr_args(&priv->dpio_cpumask)); 1654 1655 return 0; 1656 } 1657 1658 static void free_dpio(struct dpaa2_eth_priv *priv) 1659 { 1660 int i; 1661 struct dpaa2_eth_channel *ch; 1662 1663 /* deregister CDAN notifications and free channels */ 1664 for (i = 0; i < priv->num_channels; i++) { 1665 ch = priv->channel[i]; 1666 dpaa2_io_service_deregister(ch->dpio, &ch->nctx); 1667 free_channel(priv, ch); 1668 } 1669 } 1670 1671 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, 1672 int cpu) 1673 { 1674 struct device *dev = priv->net_dev->dev.parent; 1675 int i; 1676 1677 for (i = 0; i < priv->num_channels; i++) 1678 if (priv->channel[i]->nctx.desired_cpu == cpu) 1679 return priv->channel[i]; 1680 1681 /* We should never get here. Issue a warning and return 1682 * the first channel, because it's still better than nothing 1683 */ 1684 dev_warn(dev, "No affine channel found for cpu %d\n", cpu); 1685 1686 return priv->channel[0]; 1687 } 1688 1689 static void set_fq_affinity(struct dpaa2_eth_priv *priv) 1690 { 1691 struct device *dev = priv->net_dev->dev.parent; 1692 struct cpumask xps_mask; 1693 struct dpaa2_eth_fq *fq; 1694 int rx_cpu, txc_cpu; 1695 int i, err; 1696 1697 /* For each FQ, pick one channel/CPU to deliver frames to. 1698 * This may well change at runtime, either through irqbalance or 1699 * through direct user intervention. 1700 */ 1701 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); 1702 1703 for (i = 0; i < priv->num_fqs; i++) { 1704 fq = &priv->fq[i]; 1705 switch (fq->type) { 1706 case DPAA2_RX_FQ: 1707 fq->target_cpu = rx_cpu; 1708 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); 1709 if (rx_cpu >= nr_cpu_ids) 1710 rx_cpu = cpumask_first(&priv->dpio_cpumask); 1711 break; 1712 case DPAA2_TX_CONF_FQ: 1713 fq->target_cpu = txc_cpu; 1714 1715 /* Tell the stack to affine to txc_cpu the Tx queue 1716 * associated with the confirmation one 1717 */ 1718 cpumask_clear(&xps_mask); 1719 cpumask_set_cpu(txc_cpu, &xps_mask); 1720 err = netif_set_xps_queue(priv->net_dev, &xps_mask, 1721 fq->flowid); 1722 if (err) 1723 dev_err(dev, "Error setting XPS queue\n"); 1724 1725 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); 1726 if (txc_cpu >= nr_cpu_ids) 1727 txc_cpu = cpumask_first(&priv->dpio_cpumask); 1728 break; 1729 default: 1730 dev_err(dev, "Unknown FQ type: %d\n", fq->type); 1731 } 1732 fq->channel = get_affine_channel(priv, fq->target_cpu); 1733 } 1734 } 1735 1736 static void setup_fqs(struct dpaa2_eth_priv *priv) 1737 { 1738 int i; 1739 1740 /* We have one TxConf FQ per Tx flow. 1741 * The number of Tx and Rx queues is the same. 1742 * Tx queues come first in the fq array. 1743 */ 1744 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 1745 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; 1746 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; 1747 priv->fq[priv->num_fqs++].flowid = (u16)i; 1748 } 1749 1750 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 1751 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; 1752 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; 1753 priv->fq[priv->num_fqs++].flowid = (u16)i; 1754 } 1755 1756 /* For each FQ, decide on which core to process incoming frames */ 1757 set_fq_affinity(priv); 1758 } 1759 1760 /* Allocate and configure one buffer pool for each interface */ 1761 static int setup_dpbp(struct dpaa2_eth_priv *priv) 1762 { 1763 int err; 1764 struct fsl_mc_device *dpbp_dev; 1765 struct device *dev = priv->net_dev->dev.parent; 1766 struct dpbp_attr dpbp_attrs; 1767 1768 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 1769 &dpbp_dev); 1770 if (err) { 1771 if (err == -ENXIO) 1772 err = -EPROBE_DEFER; 1773 else 1774 dev_err(dev, "DPBP device allocation failed\n"); 1775 return err; 1776 } 1777 1778 priv->dpbp_dev = dpbp_dev; 1779 1780 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, 1781 &dpbp_dev->mc_handle); 1782 if (err) { 1783 dev_err(dev, "dpbp_open() failed\n"); 1784 goto err_open; 1785 } 1786 1787 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); 1788 if (err) { 1789 dev_err(dev, "dpbp_reset() failed\n"); 1790 goto err_reset; 1791 } 1792 1793 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); 1794 if (err) { 1795 dev_err(dev, "dpbp_enable() failed\n"); 1796 goto err_enable; 1797 } 1798 1799 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, 1800 &dpbp_attrs); 1801 if (err) { 1802 dev_err(dev, "dpbp_get_attributes() failed\n"); 1803 goto err_get_attr; 1804 } 1805 priv->bpid = dpbp_attrs.bpid; 1806 1807 return 0; 1808 1809 err_get_attr: 1810 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); 1811 err_enable: 1812 err_reset: 1813 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); 1814 err_open: 1815 fsl_mc_object_free(dpbp_dev); 1816 1817 return err; 1818 } 1819 1820 static void free_dpbp(struct dpaa2_eth_priv *priv) 1821 { 1822 drain_pool(priv); 1823 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 1824 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 1825 fsl_mc_object_free(priv->dpbp_dev); 1826 } 1827 1828 static int set_buffer_layout(struct dpaa2_eth_priv *priv) 1829 { 1830 struct device *dev = priv->net_dev->dev.parent; 1831 struct dpni_buffer_layout buf_layout = {0}; 1832 int err; 1833 1834 /* We need to check for WRIOP version 1.0.0, but depending on the MC 1835 * version, this number is not always provided correctly on rev1. 1836 * We need to check for both alternatives in this situation. 1837 */ 1838 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || 1839 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) 1840 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; 1841 else 1842 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; 1843 1844 /* tx buffer */ 1845 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; 1846 buf_layout.pass_timestamp = true; 1847 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | 1848 DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 1849 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 1850 DPNI_QUEUE_TX, &buf_layout); 1851 if (err) { 1852 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); 1853 return err; 1854 } 1855 1856 /* tx-confirm buffer */ 1857 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 1858 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 1859 DPNI_QUEUE_TX_CONFIRM, &buf_layout); 1860 if (err) { 1861 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); 1862 return err; 1863 } 1864 1865 /* Now that we've set our tx buffer layout, retrieve the minimum 1866 * required tx data offset. 1867 */ 1868 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, 1869 &priv->tx_data_offset); 1870 if (err) { 1871 dev_err(dev, "dpni_get_tx_data_offset() failed\n"); 1872 return err; 1873 } 1874 1875 if ((priv->tx_data_offset % 64) != 0) 1876 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", 1877 priv->tx_data_offset); 1878 1879 /* rx buffer */ 1880 buf_layout.pass_frame_status = true; 1881 buf_layout.pass_parser_result = true; 1882 buf_layout.data_align = priv->rx_buf_align; 1883 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); 1884 buf_layout.private_data_size = 0; 1885 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | 1886 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 1887 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | 1888 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | 1889 DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 1890 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 1891 DPNI_QUEUE_RX, &buf_layout); 1892 if (err) { 1893 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); 1894 return err; 1895 } 1896 1897 return 0; 1898 } 1899 1900 /* Configure the DPNI object this interface is associated with */ 1901 static int setup_dpni(struct fsl_mc_device *ls_dev) 1902 { 1903 struct device *dev = &ls_dev->dev; 1904 struct dpaa2_eth_priv *priv; 1905 struct net_device *net_dev; 1906 int err; 1907 1908 net_dev = dev_get_drvdata(dev); 1909 priv = netdev_priv(net_dev); 1910 1911 /* get a handle for the DPNI object */ 1912 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); 1913 if (err) { 1914 dev_err(dev, "dpni_open() failed\n"); 1915 return err; 1916 } 1917 1918 /* Check if we can work with this DPNI object */ 1919 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, 1920 &priv->dpni_ver_minor); 1921 if (err) { 1922 dev_err(dev, "dpni_get_api_version() failed\n"); 1923 goto close; 1924 } 1925 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { 1926 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", 1927 priv->dpni_ver_major, priv->dpni_ver_minor, 1928 DPNI_VER_MAJOR, DPNI_VER_MINOR); 1929 err = -ENOTSUPP; 1930 goto close; 1931 } 1932 1933 ls_dev->mc_io = priv->mc_io; 1934 ls_dev->mc_handle = priv->mc_token; 1935 1936 err = dpni_reset(priv->mc_io, 0, priv->mc_token); 1937 if (err) { 1938 dev_err(dev, "dpni_reset() failed\n"); 1939 goto close; 1940 } 1941 1942 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, 1943 &priv->dpni_attrs); 1944 if (err) { 1945 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); 1946 goto close; 1947 } 1948 1949 err = set_buffer_layout(priv); 1950 if (err) 1951 goto close; 1952 1953 priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * 1954 dpaa2_eth_fs_count(priv), GFP_KERNEL); 1955 if (!priv->cls_rules) 1956 goto close; 1957 1958 return 0; 1959 1960 close: 1961 dpni_close(priv->mc_io, 0, priv->mc_token); 1962 1963 return err; 1964 } 1965 1966 static void free_dpni(struct dpaa2_eth_priv *priv) 1967 { 1968 int err; 1969 1970 err = dpni_reset(priv->mc_io, 0, priv->mc_token); 1971 if (err) 1972 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", 1973 err); 1974 1975 dpni_close(priv->mc_io, 0, priv->mc_token); 1976 } 1977 1978 static int setup_rx_flow(struct dpaa2_eth_priv *priv, 1979 struct dpaa2_eth_fq *fq) 1980 { 1981 struct device *dev = priv->net_dev->dev.parent; 1982 struct dpni_queue queue; 1983 struct dpni_queue_id qid; 1984 struct dpni_taildrop td; 1985 int err; 1986 1987 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 1988 DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid); 1989 if (err) { 1990 dev_err(dev, "dpni_get_queue(RX) failed\n"); 1991 return err; 1992 } 1993 1994 fq->fqid = qid.fqid; 1995 1996 queue.destination.id = fq->channel->dpcon_id; 1997 queue.destination.type = DPNI_DEST_DPCON; 1998 queue.destination.priority = 1; 1999 queue.user_context = (u64)(uintptr_t)fq; 2000 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 2001 DPNI_QUEUE_RX, 0, fq->flowid, 2002 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 2003 &queue); 2004 if (err) { 2005 dev_err(dev, "dpni_set_queue(RX) failed\n"); 2006 return err; 2007 } 2008 2009 td.enable = 1; 2010 td.threshold = DPAA2_ETH_TAILDROP_THRESH; 2011 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE, 2012 DPNI_QUEUE_RX, 0, fq->flowid, &td); 2013 if (err) { 2014 dev_err(dev, "dpni_set_threshold() failed\n"); 2015 return err; 2016 } 2017 2018 return 0; 2019 } 2020 2021 static int setup_tx_flow(struct dpaa2_eth_priv *priv, 2022 struct dpaa2_eth_fq *fq) 2023 { 2024 struct device *dev = priv->net_dev->dev.parent; 2025 struct dpni_queue queue; 2026 struct dpni_queue_id qid; 2027 int err; 2028 2029 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 2030 DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid); 2031 if (err) { 2032 dev_err(dev, "dpni_get_queue(TX) failed\n"); 2033 return err; 2034 } 2035 2036 fq->tx_qdbin = qid.qdbin; 2037 2038 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 2039 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 2040 &queue, &qid); 2041 if (err) { 2042 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); 2043 return err; 2044 } 2045 2046 fq->fqid = qid.fqid; 2047 2048 queue.destination.id = fq->channel->dpcon_id; 2049 queue.destination.type = DPNI_DEST_DPCON; 2050 queue.destination.priority = 0; 2051 queue.user_context = (u64)(uintptr_t)fq; 2052 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 2053 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 2054 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 2055 &queue); 2056 if (err) { 2057 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); 2058 return err; 2059 } 2060 2061 return 0; 2062 } 2063 2064 /* Supported header fields for Rx hash distribution key */ 2065 static const struct dpaa2_eth_dist_fields dist_fields[] = { 2066 { 2067 /* L2 header */ 2068 .rxnfc_field = RXH_L2DA, 2069 .cls_prot = NET_PROT_ETH, 2070 .cls_field = NH_FLD_ETH_DA, 2071 .size = 6, 2072 }, { 2073 .cls_prot = NET_PROT_ETH, 2074 .cls_field = NH_FLD_ETH_SA, 2075 .size = 6, 2076 }, { 2077 /* This is the last ethertype field parsed: 2078 * depending on frame format, it can be the MAC ethertype 2079 * or the VLAN etype. 2080 */ 2081 .cls_prot = NET_PROT_ETH, 2082 .cls_field = NH_FLD_ETH_TYPE, 2083 .size = 2, 2084 }, { 2085 /* VLAN header */ 2086 .rxnfc_field = RXH_VLAN, 2087 .cls_prot = NET_PROT_VLAN, 2088 .cls_field = NH_FLD_VLAN_TCI, 2089 .size = 2, 2090 }, { 2091 /* IP header */ 2092 .rxnfc_field = RXH_IP_SRC, 2093 .cls_prot = NET_PROT_IP, 2094 .cls_field = NH_FLD_IP_SRC, 2095 .size = 4, 2096 }, { 2097 .rxnfc_field = RXH_IP_DST, 2098 .cls_prot = NET_PROT_IP, 2099 .cls_field = NH_FLD_IP_DST, 2100 .size = 4, 2101 }, { 2102 .rxnfc_field = RXH_L3_PROTO, 2103 .cls_prot = NET_PROT_IP, 2104 .cls_field = NH_FLD_IP_PROTO, 2105 .size = 1, 2106 }, { 2107 /* Using UDP ports, this is functionally equivalent to raw 2108 * byte pairs from L4 header. 2109 */ 2110 .rxnfc_field = RXH_L4_B_0_1, 2111 .cls_prot = NET_PROT_UDP, 2112 .cls_field = NH_FLD_UDP_PORT_SRC, 2113 .size = 2, 2114 }, { 2115 .rxnfc_field = RXH_L4_B_2_3, 2116 .cls_prot = NET_PROT_UDP, 2117 .cls_field = NH_FLD_UDP_PORT_DST, 2118 .size = 2, 2119 }, 2120 }; 2121 2122 /* Configure the Rx hash key using the legacy API */ 2123 static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 2124 { 2125 struct device *dev = priv->net_dev->dev.parent; 2126 struct dpni_rx_tc_dist_cfg dist_cfg; 2127 int err; 2128 2129 memset(&dist_cfg, 0, sizeof(dist_cfg)); 2130 2131 dist_cfg.key_cfg_iova = key; 2132 dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 2133 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; 2134 2135 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); 2136 if (err) 2137 dev_err(dev, "dpni_set_rx_tc_dist failed\n"); 2138 2139 return err; 2140 } 2141 2142 /* Configure the Rx hash key using the new API */ 2143 static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 2144 { 2145 struct device *dev = priv->net_dev->dev.parent; 2146 struct dpni_rx_dist_cfg dist_cfg; 2147 int err; 2148 2149 memset(&dist_cfg, 0, sizeof(dist_cfg)); 2150 2151 dist_cfg.key_cfg_iova = key; 2152 dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 2153 dist_cfg.enable = 1; 2154 2155 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); 2156 if (err) 2157 dev_err(dev, "dpni_set_rx_hash_dist failed\n"); 2158 2159 return err; 2160 } 2161 2162 /* Configure the Rx flow classification key */ 2163 static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 2164 { 2165 struct device *dev = priv->net_dev->dev.parent; 2166 struct dpni_rx_dist_cfg dist_cfg; 2167 int err; 2168 2169 memset(&dist_cfg, 0, sizeof(dist_cfg)); 2170 2171 dist_cfg.key_cfg_iova = key; 2172 dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 2173 dist_cfg.enable = 1; 2174 2175 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); 2176 if (err) 2177 dev_err(dev, "dpni_set_rx_fs_dist failed\n"); 2178 2179 return err; 2180 } 2181 2182 /* Size of the Rx flow classification key */ 2183 int dpaa2_eth_cls_key_size(void) 2184 { 2185 int i, size = 0; 2186 2187 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) 2188 size += dist_fields[i].size; 2189 2190 return size; 2191 } 2192 2193 /* Offset of header field in Rx classification key */ 2194 int dpaa2_eth_cls_fld_off(int prot, int field) 2195 { 2196 int i, off = 0; 2197 2198 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 2199 if (dist_fields[i].cls_prot == prot && 2200 dist_fields[i].cls_field == field) 2201 return off; 2202 off += dist_fields[i].size; 2203 } 2204 2205 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); 2206 return 0; 2207 } 2208 2209 /* Set Rx distribution (hash or flow classification) key 2210 * flags is a combination of RXH_ bits 2211 */ 2212 static int dpaa2_eth_set_dist_key(struct net_device *net_dev, 2213 enum dpaa2_eth_rx_dist type, u64 flags) 2214 { 2215 struct device *dev = net_dev->dev.parent; 2216 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2217 struct dpkg_profile_cfg cls_cfg; 2218 u32 rx_hash_fields = 0; 2219 dma_addr_t key_iova; 2220 u8 *dma_mem; 2221 int i; 2222 int err = 0; 2223 2224 memset(&cls_cfg, 0, sizeof(cls_cfg)); 2225 2226 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 2227 struct dpkg_extract *key = 2228 &cls_cfg.extracts[cls_cfg.num_extracts]; 2229 2230 /* For Rx hashing key we set only the selected fields. 2231 * For Rx flow classification key we set all supported fields 2232 */ 2233 if (type == DPAA2_ETH_RX_DIST_HASH) { 2234 if (!(flags & dist_fields[i].rxnfc_field)) 2235 continue; 2236 rx_hash_fields |= dist_fields[i].rxnfc_field; 2237 } 2238 2239 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 2240 dev_err(dev, "error adding key extraction rule, too many rules?\n"); 2241 return -E2BIG; 2242 } 2243 2244 key->type = DPKG_EXTRACT_FROM_HDR; 2245 key->extract.from_hdr.prot = dist_fields[i].cls_prot; 2246 key->extract.from_hdr.type = DPKG_FULL_FIELD; 2247 key->extract.from_hdr.field = dist_fields[i].cls_field; 2248 cls_cfg.num_extracts++; 2249 } 2250 2251 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 2252 if (!dma_mem) 2253 return -ENOMEM; 2254 2255 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); 2256 if (err) { 2257 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); 2258 goto free_key; 2259 } 2260 2261 /* Prepare for setting the rx dist */ 2262 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, 2263 DMA_TO_DEVICE); 2264 if (dma_mapping_error(dev, key_iova)) { 2265 dev_err(dev, "DMA mapping failed\n"); 2266 err = -ENOMEM; 2267 goto free_key; 2268 } 2269 2270 if (type == DPAA2_ETH_RX_DIST_HASH) { 2271 if (dpaa2_eth_has_legacy_dist(priv)) 2272 err = config_legacy_hash_key(priv, key_iova); 2273 else 2274 err = config_hash_key(priv, key_iova); 2275 } else { 2276 err = config_cls_key(priv, key_iova); 2277 } 2278 2279 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, 2280 DMA_TO_DEVICE); 2281 if (!err && type == DPAA2_ETH_RX_DIST_HASH) 2282 priv->rx_hash_fields = rx_hash_fields; 2283 2284 free_key: 2285 kfree(dma_mem); 2286 return err; 2287 } 2288 2289 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) 2290 { 2291 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2292 2293 if (!dpaa2_eth_hash_enabled(priv)) 2294 return -EOPNOTSUPP; 2295 2296 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags); 2297 } 2298 2299 static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv) 2300 { 2301 struct device *dev = priv->net_dev->dev.parent; 2302 2303 /* Check if we actually support Rx flow classification */ 2304 if (dpaa2_eth_has_legacy_dist(priv)) { 2305 dev_dbg(dev, "Rx cls not supported by current MC version\n"); 2306 return -EOPNOTSUPP; 2307 } 2308 2309 if (priv->dpni_attrs.options & DPNI_OPT_NO_FS || 2310 !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) { 2311 dev_dbg(dev, "Rx cls disabled in DPNI options\n"); 2312 return -EOPNOTSUPP; 2313 } 2314 2315 if (!dpaa2_eth_hash_enabled(priv)) { 2316 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); 2317 return -EOPNOTSUPP; 2318 } 2319 2320 priv->rx_cls_enabled = 1; 2321 2322 return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0); 2323 } 2324 2325 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, 2326 * frame queues and channels 2327 */ 2328 static int bind_dpni(struct dpaa2_eth_priv *priv) 2329 { 2330 struct net_device *net_dev = priv->net_dev; 2331 struct device *dev = net_dev->dev.parent; 2332 struct dpni_pools_cfg pools_params; 2333 struct dpni_error_cfg err_cfg; 2334 int err = 0; 2335 int i; 2336 2337 pools_params.num_dpbp = 1; 2338 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; 2339 pools_params.pools[0].backup_pool = 0; 2340 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; 2341 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); 2342 if (err) { 2343 dev_err(dev, "dpni_set_pools() failed\n"); 2344 return err; 2345 } 2346 2347 /* have the interface implicitly distribute traffic based on 2348 * the default hash key 2349 */ 2350 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); 2351 if (err && err != -EOPNOTSUPP) 2352 dev_err(dev, "Failed to configure hashing\n"); 2353 2354 /* Configure the flow classification key; it includes all 2355 * supported header fields and cannot be modified at runtime 2356 */ 2357 err = dpaa2_eth_set_cls(priv); 2358 if (err && err != -EOPNOTSUPP) 2359 dev_err(dev, "Failed to configure Rx classification key\n"); 2360 2361 /* Configure handling of error frames */ 2362 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; 2363 err_cfg.set_frame_annotation = 1; 2364 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; 2365 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, 2366 &err_cfg); 2367 if (err) { 2368 dev_err(dev, "dpni_set_errors_behavior failed\n"); 2369 return err; 2370 } 2371 2372 /* Configure Rx and Tx conf queues to generate CDANs */ 2373 for (i = 0; i < priv->num_fqs; i++) { 2374 switch (priv->fq[i].type) { 2375 case DPAA2_RX_FQ: 2376 err = setup_rx_flow(priv, &priv->fq[i]); 2377 break; 2378 case DPAA2_TX_CONF_FQ: 2379 err = setup_tx_flow(priv, &priv->fq[i]); 2380 break; 2381 default: 2382 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); 2383 return -EINVAL; 2384 } 2385 if (err) 2386 return err; 2387 } 2388 2389 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, 2390 DPNI_QUEUE_TX, &priv->tx_qdid); 2391 if (err) { 2392 dev_err(dev, "dpni_get_qdid() failed\n"); 2393 return err; 2394 } 2395 2396 return 0; 2397 } 2398 2399 /* Allocate rings for storing incoming frame descriptors */ 2400 static int alloc_rings(struct dpaa2_eth_priv *priv) 2401 { 2402 struct net_device *net_dev = priv->net_dev; 2403 struct device *dev = net_dev->dev.parent; 2404 int i; 2405 2406 for (i = 0; i < priv->num_channels; i++) { 2407 priv->channel[i]->store = 2408 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); 2409 if (!priv->channel[i]->store) { 2410 netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); 2411 goto err_ring; 2412 } 2413 } 2414 2415 return 0; 2416 2417 err_ring: 2418 for (i = 0; i < priv->num_channels; i++) { 2419 if (!priv->channel[i]->store) 2420 break; 2421 dpaa2_io_store_destroy(priv->channel[i]->store); 2422 } 2423 2424 return -ENOMEM; 2425 } 2426 2427 static void free_rings(struct dpaa2_eth_priv *priv) 2428 { 2429 int i; 2430 2431 for (i = 0; i < priv->num_channels; i++) 2432 dpaa2_io_store_destroy(priv->channel[i]->store); 2433 } 2434 2435 static int set_mac_addr(struct dpaa2_eth_priv *priv) 2436 { 2437 struct net_device *net_dev = priv->net_dev; 2438 struct device *dev = net_dev->dev.parent; 2439 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; 2440 int err; 2441 2442 /* Get firmware address, if any */ 2443 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); 2444 if (err) { 2445 dev_err(dev, "dpni_get_port_mac_addr() failed\n"); 2446 return err; 2447 } 2448 2449 /* Get DPNI attributes address, if any */ 2450 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 2451 dpni_mac_addr); 2452 if (err) { 2453 dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); 2454 return err; 2455 } 2456 2457 /* First check if firmware has any address configured by bootloader */ 2458 if (!is_zero_ether_addr(mac_addr)) { 2459 /* If the DPMAC addr != DPNI addr, update it */ 2460 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { 2461 err = dpni_set_primary_mac_addr(priv->mc_io, 0, 2462 priv->mc_token, 2463 mac_addr); 2464 if (err) { 2465 dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 2466 return err; 2467 } 2468 } 2469 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 2470 } else if (is_zero_ether_addr(dpni_mac_addr)) { 2471 /* No MAC address configured, fill in net_dev->dev_addr 2472 * with a random one 2473 */ 2474 eth_hw_addr_random(net_dev); 2475 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 2476 2477 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 2478 net_dev->dev_addr); 2479 if (err) { 2480 dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 2481 return err; 2482 } 2483 2484 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 2485 * practical purposes, this will be our "permanent" mac address, 2486 * at least until the next reboot. This move will also permit 2487 * register_netdevice() to properly fill up net_dev->perm_addr. 2488 */ 2489 net_dev->addr_assign_type = NET_ADDR_PERM; 2490 } else { 2491 /* NET_ADDR_PERM is default, all we have to do is 2492 * fill in the device addr. 2493 */ 2494 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); 2495 } 2496 2497 return 0; 2498 } 2499 2500 static int netdev_init(struct net_device *net_dev) 2501 { 2502 struct device *dev = net_dev->dev.parent; 2503 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2504 u32 options = priv->dpni_attrs.options; 2505 u64 supported = 0, not_supported = 0; 2506 u8 bcast_addr[ETH_ALEN]; 2507 u8 num_queues; 2508 int err; 2509 2510 net_dev->netdev_ops = &dpaa2_eth_ops; 2511 net_dev->ethtool_ops = &dpaa2_ethtool_ops; 2512 2513 err = set_mac_addr(priv); 2514 if (err) 2515 return err; 2516 2517 /* Explicitly add the broadcast address to the MAC filtering table */ 2518 eth_broadcast_addr(bcast_addr); 2519 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); 2520 if (err) { 2521 dev_err(dev, "dpni_add_mac_addr() failed\n"); 2522 return err; 2523 } 2524 2525 /* Set MTU upper limit; lower limit is 68B (default value) */ 2526 net_dev->max_mtu = DPAA2_ETH_MAX_MTU; 2527 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, 2528 DPAA2_ETH_MFL); 2529 if (err) { 2530 dev_err(dev, "dpni_set_max_frame_length() failed\n"); 2531 return err; 2532 } 2533 2534 /* Set actual number of queues in the net device */ 2535 num_queues = dpaa2_eth_queue_count(priv); 2536 err = netif_set_real_num_tx_queues(net_dev, num_queues); 2537 if (err) { 2538 dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); 2539 return err; 2540 } 2541 err = netif_set_real_num_rx_queues(net_dev, num_queues); 2542 if (err) { 2543 dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); 2544 return err; 2545 } 2546 2547 /* Capabilities listing */ 2548 supported |= IFF_LIVE_ADDR_CHANGE; 2549 2550 if (options & DPNI_OPT_NO_MAC_FILTER) 2551 not_supported |= IFF_UNICAST_FLT; 2552 else 2553 supported |= IFF_UNICAST_FLT; 2554 2555 net_dev->priv_flags |= supported; 2556 net_dev->priv_flags &= ~not_supported; 2557 2558 /* Features */ 2559 net_dev->features = NETIF_F_RXCSUM | 2560 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2561 NETIF_F_SG | NETIF_F_HIGHDMA | 2562 NETIF_F_LLTX; 2563 net_dev->hw_features = net_dev->features; 2564 2565 return 0; 2566 } 2567 2568 static int poll_link_state(void *arg) 2569 { 2570 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; 2571 int err; 2572 2573 while (!kthread_should_stop()) { 2574 err = link_state_update(priv); 2575 if (unlikely(err)) 2576 return err; 2577 2578 msleep(DPAA2_ETH_LINK_STATE_REFRESH); 2579 } 2580 2581 return 0; 2582 } 2583 2584 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) 2585 { 2586 u32 status = ~0; 2587 struct device *dev = (struct device *)arg; 2588 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); 2589 struct net_device *net_dev = dev_get_drvdata(dev); 2590 int err; 2591 2592 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, 2593 DPNI_IRQ_INDEX, &status); 2594 if (unlikely(err)) { 2595 netdev_err(net_dev, "Can't get irq status (err %d)\n", err); 2596 return IRQ_HANDLED; 2597 } 2598 2599 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) 2600 link_state_update(netdev_priv(net_dev)); 2601 2602 return IRQ_HANDLED; 2603 } 2604 2605 static int setup_irqs(struct fsl_mc_device *ls_dev) 2606 { 2607 int err = 0; 2608 struct fsl_mc_device_irq *irq; 2609 2610 err = fsl_mc_allocate_irqs(ls_dev); 2611 if (err) { 2612 dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); 2613 return err; 2614 } 2615 2616 irq = ls_dev->irqs[0]; 2617 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, 2618 NULL, dpni_irq0_handler_thread, 2619 IRQF_NO_SUSPEND | IRQF_ONESHOT, 2620 dev_name(&ls_dev->dev), &ls_dev->dev); 2621 if (err < 0) { 2622 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); 2623 goto free_mc_irq; 2624 } 2625 2626 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, 2627 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED); 2628 if (err < 0) { 2629 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); 2630 goto free_irq; 2631 } 2632 2633 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, 2634 DPNI_IRQ_INDEX, 1); 2635 if (err < 0) { 2636 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); 2637 goto free_irq; 2638 } 2639 2640 return 0; 2641 2642 free_irq: 2643 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); 2644 free_mc_irq: 2645 fsl_mc_free_irqs(ls_dev); 2646 2647 return err; 2648 } 2649 2650 static void add_ch_napi(struct dpaa2_eth_priv *priv) 2651 { 2652 int i; 2653 struct dpaa2_eth_channel *ch; 2654 2655 for (i = 0; i < priv->num_channels; i++) { 2656 ch = priv->channel[i]; 2657 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ 2658 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, 2659 NAPI_POLL_WEIGHT); 2660 } 2661 } 2662 2663 static void del_ch_napi(struct dpaa2_eth_priv *priv) 2664 { 2665 int i; 2666 struct dpaa2_eth_channel *ch; 2667 2668 for (i = 0; i < priv->num_channels; i++) { 2669 ch = priv->channel[i]; 2670 netif_napi_del(&ch->napi); 2671 } 2672 } 2673 2674 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) 2675 { 2676 struct device *dev; 2677 struct net_device *net_dev = NULL; 2678 struct dpaa2_eth_priv *priv = NULL; 2679 int err = 0; 2680 2681 dev = &dpni_dev->dev; 2682 2683 /* Net device */ 2684 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); 2685 if (!net_dev) { 2686 dev_err(dev, "alloc_etherdev_mq() failed\n"); 2687 return -ENOMEM; 2688 } 2689 2690 SET_NETDEV_DEV(net_dev, dev); 2691 dev_set_drvdata(dev, net_dev); 2692 2693 priv = netdev_priv(net_dev); 2694 priv->net_dev = net_dev; 2695 2696 priv->iommu_domain = iommu_get_domain_for_dev(dev); 2697 2698 /* Obtain a MC portal */ 2699 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 2700 &priv->mc_io); 2701 if (err) { 2702 if (err == -ENXIO) 2703 err = -EPROBE_DEFER; 2704 else 2705 dev_err(dev, "MC portal allocation failed\n"); 2706 goto err_portal_alloc; 2707 } 2708 2709 /* MC objects initialization and configuration */ 2710 err = setup_dpni(dpni_dev); 2711 if (err) 2712 goto err_dpni_setup; 2713 2714 err = setup_dpio(priv); 2715 if (err) 2716 goto err_dpio_setup; 2717 2718 setup_fqs(priv); 2719 2720 err = setup_dpbp(priv); 2721 if (err) 2722 goto err_dpbp_setup; 2723 2724 err = bind_dpni(priv); 2725 if (err) 2726 goto err_bind; 2727 2728 /* Add a NAPI context for each channel */ 2729 add_ch_napi(priv); 2730 2731 /* Percpu statistics */ 2732 priv->percpu_stats = alloc_percpu(*priv->percpu_stats); 2733 if (!priv->percpu_stats) { 2734 dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); 2735 err = -ENOMEM; 2736 goto err_alloc_percpu_stats; 2737 } 2738 priv->percpu_extras = alloc_percpu(*priv->percpu_extras); 2739 if (!priv->percpu_extras) { 2740 dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); 2741 err = -ENOMEM; 2742 goto err_alloc_percpu_extras; 2743 } 2744 2745 err = netdev_init(net_dev); 2746 if (err) 2747 goto err_netdev_init; 2748 2749 /* Configure checksum offload based on current interface flags */ 2750 err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); 2751 if (err) 2752 goto err_csum; 2753 2754 err = set_tx_csum(priv, !!(net_dev->features & 2755 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); 2756 if (err) 2757 goto err_csum; 2758 2759 err = alloc_rings(priv); 2760 if (err) 2761 goto err_alloc_rings; 2762 2763 err = setup_irqs(dpni_dev); 2764 if (err) { 2765 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); 2766 priv->poll_thread = kthread_run(poll_link_state, priv, 2767 "%s_poll_link", net_dev->name); 2768 if (IS_ERR(priv->poll_thread)) { 2769 dev_err(dev, "Error starting polling thread\n"); 2770 goto err_poll_thread; 2771 } 2772 priv->do_link_poll = true; 2773 } 2774 2775 err = register_netdev(net_dev); 2776 if (err < 0) { 2777 dev_err(dev, "register_netdev() failed\n"); 2778 goto err_netdev_reg; 2779 } 2780 2781 dev_info(dev, "Probed interface %s\n", net_dev->name); 2782 return 0; 2783 2784 err_netdev_reg: 2785 if (priv->do_link_poll) 2786 kthread_stop(priv->poll_thread); 2787 else 2788 fsl_mc_free_irqs(dpni_dev); 2789 err_poll_thread: 2790 free_rings(priv); 2791 err_alloc_rings: 2792 err_csum: 2793 err_netdev_init: 2794 free_percpu(priv->percpu_extras); 2795 err_alloc_percpu_extras: 2796 free_percpu(priv->percpu_stats); 2797 err_alloc_percpu_stats: 2798 del_ch_napi(priv); 2799 err_bind: 2800 free_dpbp(priv); 2801 err_dpbp_setup: 2802 free_dpio(priv); 2803 err_dpio_setup: 2804 free_dpni(priv); 2805 err_dpni_setup: 2806 fsl_mc_portal_free(priv->mc_io); 2807 err_portal_alloc: 2808 dev_set_drvdata(dev, NULL); 2809 free_netdev(net_dev); 2810 2811 return err; 2812 } 2813 2814 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) 2815 { 2816 struct device *dev; 2817 struct net_device *net_dev; 2818 struct dpaa2_eth_priv *priv; 2819 2820 dev = &ls_dev->dev; 2821 net_dev = dev_get_drvdata(dev); 2822 priv = netdev_priv(net_dev); 2823 2824 unregister_netdev(net_dev); 2825 2826 if (priv->do_link_poll) 2827 kthread_stop(priv->poll_thread); 2828 else 2829 fsl_mc_free_irqs(ls_dev); 2830 2831 free_rings(priv); 2832 free_percpu(priv->percpu_stats); 2833 free_percpu(priv->percpu_extras); 2834 2835 del_ch_napi(priv); 2836 free_dpbp(priv); 2837 free_dpio(priv); 2838 free_dpni(priv); 2839 2840 fsl_mc_portal_free(priv->mc_io); 2841 2842 free_netdev(net_dev); 2843 2844 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); 2845 2846 return 0; 2847 } 2848 2849 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { 2850 { 2851 .vendor = FSL_MC_VENDOR_FREESCALE, 2852 .obj_type = "dpni", 2853 }, 2854 { .vendor = 0x0 } 2855 }; 2856 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); 2857 2858 static struct fsl_mc_driver dpaa2_eth_driver = { 2859 .driver = { 2860 .name = KBUILD_MODNAME, 2861 .owner = THIS_MODULE, 2862 }, 2863 .probe = dpaa2_eth_probe, 2864 .remove = dpaa2_eth_remove, 2865 .match_id_table = dpaa2_eth_match_id_table 2866 }; 2867 2868 module_fsl_mc_driver(dpaa2_eth_driver); 2869