1 // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) 2 /* Copyright 2014-2016 Freescale Semiconductor Inc. 3 * Copyright 2016-2017 NXP 4 */ 5 #include <linux/init.h> 6 #include <linux/module.h> 7 #include <linux/platform_device.h> 8 #include <linux/etherdevice.h> 9 #include <linux/of_net.h> 10 #include <linux/interrupt.h> 11 #include <linux/msi.h> 12 #include <linux/kthread.h> 13 #include <linux/iommu.h> 14 #include <linux/net_tstamp.h> 15 #include <linux/fsl/mc.h> 16 17 #include <net/sock.h> 18 19 #include "dpaa2-eth.h" 20 21 /* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files 22 * using trace events only need to #include <trace/events/sched.h> 23 */ 24 #define CREATE_TRACE_POINTS 25 #include "dpaa2-eth-trace.h" 26 27 MODULE_LICENSE("Dual BSD/GPL"); 28 MODULE_AUTHOR("Freescale Semiconductor, Inc"); 29 MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); 30 31 static void *dpaa2_iova_to_virt(struct iommu_domain *domain, 32 dma_addr_t iova_addr) 33 { 34 phys_addr_t phys_addr; 35 36 phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr; 37 38 return phys_to_virt(phys_addr); 39 } 40 41 static void validate_rx_csum(struct dpaa2_eth_priv *priv, 42 u32 fd_status, 43 struct sk_buff *skb) 44 { 45 skb_checksum_none_assert(skb); 46 47 /* HW checksum validation is disabled, nothing to do here */ 48 if (!(priv->net_dev->features & NETIF_F_RXCSUM)) 49 return; 50 51 /* Read checksum validation bits */ 52 if (!((fd_status & DPAA2_FAS_L3CV) && 53 (fd_status & DPAA2_FAS_L4CV))) 54 return; 55 56 /* Inform the stack there's no need to compute L3/L4 csum anymore */ 57 skb->ip_summed = CHECKSUM_UNNECESSARY; 58 } 59 60 /* Free a received FD. 61 * Not to be used for Tx conf FDs or on any other paths. 62 */ 63 static void free_rx_fd(struct dpaa2_eth_priv *priv, 64 const struct dpaa2_fd *fd, 65 void *vaddr) 66 { 67 struct device *dev = priv->net_dev->dev.parent; 68 dma_addr_t addr = dpaa2_fd_get_addr(fd); 69 u8 fd_format = dpaa2_fd_get_format(fd); 70 struct dpaa2_sg_entry *sgt; 71 void *sg_vaddr; 72 int i; 73 74 /* If single buffer frame, just free the data buffer */ 75 if (fd_format == dpaa2_fd_single) 76 goto free_buf; 77 else if (fd_format != dpaa2_fd_sg) 78 /* We don't support any other format */ 79 return; 80 81 /* For S/G frames, we first need to free all SG entries 82 * except the first one, which was taken care of already 83 */ 84 sgt = vaddr + dpaa2_fd_get_offset(fd); 85 for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 86 addr = dpaa2_sg_get_addr(&sgt[i]); 87 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 88 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, 89 DMA_FROM_DEVICE); 90 91 skb_free_frag(sg_vaddr); 92 if (dpaa2_sg_is_final(&sgt[i])) 93 break; 94 } 95 96 free_buf: 97 skb_free_frag(vaddr); 98 } 99 100 /* Build a linear skb based on a single-buffer frame descriptor */ 101 static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch, 102 const struct dpaa2_fd *fd, 103 void *fd_vaddr) 104 { 105 struct sk_buff *skb = NULL; 106 u16 fd_offset = dpaa2_fd_get_offset(fd); 107 u32 fd_length = dpaa2_fd_get_len(fd); 108 109 ch->buf_count--; 110 111 skb = build_skb(fd_vaddr, DPAA2_ETH_SKB_SIZE); 112 if (unlikely(!skb)) 113 return NULL; 114 115 skb_reserve(skb, fd_offset); 116 skb_put(skb, fd_length); 117 118 return skb; 119 } 120 121 /* Build a non linear (fragmented) skb based on a S/G table */ 122 static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, 123 struct dpaa2_eth_channel *ch, 124 struct dpaa2_sg_entry *sgt) 125 { 126 struct sk_buff *skb = NULL; 127 struct device *dev = priv->net_dev->dev.parent; 128 void *sg_vaddr; 129 dma_addr_t sg_addr; 130 u16 sg_offset; 131 u32 sg_length; 132 struct page *page, *head_page; 133 int page_offset; 134 int i; 135 136 for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { 137 struct dpaa2_sg_entry *sge = &sgt[i]; 138 139 /* NOTE: We only support SG entries in dpaa2_sg_single format, 140 * but this is the only format we may receive from HW anyway 141 */ 142 143 /* Get the address and length from the S/G entry */ 144 sg_addr = dpaa2_sg_get_addr(sge); 145 sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); 146 dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, 147 DMA_FROM_DEVICE); 148 149 sg_length = dpaa2_sg_get_len(sge); 150 151 if (i == 0) { 152 /* We build the skb around the first data buffer */ 153 skb = build_skb(sg_vaddr, DPAA2_ETH_SKB_SIZE); 154 if (unlikely(!skb)) { 155 /* Free the first SG entry now, since we already 156 * unmapped it and obtained the virtual address 157 */ 158 skb_free_frag(sg_vaddr); 159 160 /* We still need to subtract the buffers used 161 * by this FD from our software counter 162 */ 163 while (!dpaa2_sg_is_final(&sgt[i]) && 164 i < DPAA2_ETH_MAX_SG_ENTRIES) 165 i++; 166 break; 167 } 168 169 sg_offset = dpaa2_sg_get_offset(sge); 170 skb_reserve(skb, sg_offset); 171 skb_put(skb, sg_length); 172 } else { 173 /* Rest of the data buffers are stored as skb frags */ 174 page = virt_to_page(sg_vaddr); 175 head_page = virt_to_head_page(sg_vaddr); 176 177 /* Offset in page (which may be compound). 178 * Data in subsequent SG entries is stored from the 179 * beginning of the buffer, so we don't need to add the 180 * sg_offset. 181 */ 182 page_offset = ((unsigned long)sg_vaddr & 183 (PAGE_SIZE - 1)) + 184 (page_address(page) - page_address(head_page)); 185 186 skb_add_rx_frag(skb, i - 1, head_page, page_offset, 187 sg_length, DPAA2_ETH_RX_BUF_SIZE); 188 } 189 190 if (dpaa2_sg_is_final(sge)) 191 break; 192 } 193 194 WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT"); 195 196 /* Count all data buffers + SG table buffer */ 197 ch->buf_count -= i + 2; 198 199 return skb; 200 } 201 202 /* Main Rx frame processing routine */ 203 static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, 204 struct dpaa2_eth_channel *ch, 205 const struct dpaa2_fd *fd, 206 struct napi_struct *napi, 207 u16 queue_id) 208 { 209 dma_addr_t addr = dpaa2_fd_get_addr(fd); 210 u8 fd_format = dpaa2_fd_get_format(fd); 211 void *vaddr; 212 struct sk_buff *skb; 213 struct rtnl_link_stats64 *percpu_stats; 214 struct dpaa2_eth_drv_stats *percpu_extras; 215 struct device *dev = priv->net_dev->dev.parent; 216 struct dpaa2_fas *fas; 217 void *buf_data; 218 u32 status = 0; 219 220 /* Tracing point */ 221 trace_dpaa2_rx_fd(priv->net_dev, fd); 222 223 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); 224 dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); 225 226 fas = dpaa2_get_fas(vaddr, false); 227 prefetch(fas); 228 buf_data = vaddr + dpaa2_fd_get_offset(fd); 229 prefetch(buf_data); 230 231 percpu_stats = this_cpu_ptr(priv->percpu_stats); 232 percpu_extras = this_cpu_ptr(priv->percpu_extras); 233 234 if (fd_format == dpaa2_fd_single) { 235 skb = build_linear_skb(ch, fd, vaddr); 236 } else if (fd_format == dpaa2_fd_sg) { 237 skb = build_frag_skb(priv, ch, buf_data); 238 skb_free_frag(vaddr); 239 percpu_extras->rx_sg_frames++; 240 percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); 241 } else { 242 /* We don't support any other format */ 243 goto err_frame_format; 244 } 245 246 if (unlikely(!skb)) 247 goto err_build_skb; 248 249 prefetch(skb->data); 250 251 /* Get the timestamp value */ 252 if (priv->rx_tstamp) { 253 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); 254 __le64 *ts = dpaa2_get_ts(vaddr, false); 255 u64 ns; 256 257 memset(shhwtstamps, 0, sizeof(*shhwtstamps)); 258 259 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 260 shhwtstamps->hwtstamp = ns_to_ktime(ns); 261 } 262 263 /* Check if we need to validate the L4 csum */ 264 if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) { 265 status = le32_to_cpu(fas->status); 266 validate_rx_csum(priv, status, skb); 267 } 268 269 skb->protocol = eth_type_trans(skb, priv->net_dev); 270 skb_record_rx_queue(skb, queue_id); 271 272 percpu_stats->rx_packets++; 273 percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); 274 275 napi_gro_receive(napi, skb); 276 277 return; 278 279 err_build_skb: 280 free_rx_fd(priv, fd, vaddr); 281 err_frame_format: 282 percpu_stats->rx_dropped++; 283 } 284 285 /* Consume all frames pull-dequeued into the store. This is the simplest way to 286 * make sure we don't accidentally issue another volatile dequeue which would 287 * overwrite (leak) frames already in the store. 288 * 289 * Observance of NAPI budget is not our concern, leaving that to the caller. 290 */ 291 static int consume_frames(struct dpaa2_eth_channel *ch, 292 enum dpaa2_eth_fq_type *type) 293 { 294 struct dpaa2_eth_priv *priv = ch->priv; 295 struct dpaa2_eth_fq *fq = NULL; 296 struct dpaa2_dq *dq; 297 const struct dpaa2_fd *fd; 298 int cleaned = 0; 299 int is_last; 300 301 do { 302 dq = dpaa2_io_store_next(ch->store, &is_last); 303 if (unlikely(!dq)) { 304 /* If we're here, we *must* have placed a 305 * volatile dequeue comnmand, so keep reading through 306 * the store until we get some sort of valid response 307 * token (either a valid frame or an "empty dequeue") 308 */ 309 continue; 310 } 311 312 fd = dpaa2_dq_fd(dq); 313 fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); 314 315 fq->consume(priv, ch, fd, &ch->napi, fq->flowid); 316 cleaned++; 317 } while (!is_last); 318 319 if (!cleaned) 320 return 0; 321 322 fq->stats.frames += cleaned; 323 ch->stats.frames += cleaned; 324 325 /* A dequeue operation only pulls frames from a single queue 326 * into the store. Return the frame queue type as an out param. 327 */ 328 if (type) 329 *type = fq->type; 330 331 return cleaned; 332 } 333 334 /* Configure the egress frame annotation for timestamp update */ 335 static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start) 336 { 337 struct dpaa2_faead *faead; 338 u32 ctrl, frc; 339 340 /* Mark the egress frame annotation area as valid */ 341 frc = dpaa2_fd_get_frc(fd); 342 dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); 343 344 /* Set hardware annotation size */ 345 ctrl = dpaa2_fd_get_ctrl(fd); 346 dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL); 347 348 /* enable UPD (update prepanded data) bit in FAEAD field of 349 * hardware frame annotation area 350 */ 351 ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; 352 faead = dpaa2_get_faead(buf_start, true); 353 faead->ctrl = cpu_to_le32(ctrl); 354 } 355 356 /* Create a frame descriptor based on a fragmented skb */ 357 static int build_sg_fd(struct dpaa2_eth_priv *priv, 358 struct sk_buff *skb, 359 struct dpaa2_fd *fd) 360 { 361 struct device *dev = priv->net_dev->dev.parent; 362 void *sgt_buf = NULL; 363 dma_addr_t addr; 364 int nr_frags = skb_shinfo(skb)->nr_frags; 365 struct dpaa2_sg_entry *sgt; 366 int i, err; 367 int sgt_buf_size; 368 struct scatterlist *scl, *crt_scl; 369 int num_sg; 370 int num_dma_bufs; 371 struct dpaa2_eth_swa *swa; 372 373 /* Create and map scatterlist. 374 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have 375 * to go beyond nr_frags+1. 376 * Note: We don't support chained scatterlists 377 */ 378 if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) 379 return -EINVAL; 380 381 scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); 382 if (unlikely(!scl)) 383 return -ENOMEM; 384 385 sg_init_table(scl, nr_frags + 1); 386 num_sg = skb_to_sgvec(skb, scl, 0, skb->len); 387 num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 388 if (unlikely(!num_dma_bufs)) { 389 err = -ENOMEM; 390 goto dma_map_sg_failed; 391 } 392 393 /* Prepare the HW SGT structure */ 394 sgt_buf_size = priv->tx_data_offset + 395 sizeof(struct dpaa2_sg_entry) * num_dma_bufs; 396 sgt_buf = netdev_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN); 397 if (unlikely(!sgt_buf)) { 398 err = -ENOMEM; 399 goto sgt_buf_alloc_failed; 400 } 401 sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); 402 memset(sgt_buf, 0, sgt_buf_size); 403 404 sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); 405 406 /* Fill in the HW SGT structure. 407 * 408 * sgt_buf is zeroed out, so the following fields are implicit 409 * in all sgt entries: 410 * - offset is 0 411 * - format is 'dpaa2_sg_single' 412 */ 413 for_each_sg(scl, crt_scl, num_dma_bufs, i) { 414 dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); 415 dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); 416 } 417 dpaa2_sg_set_final(&sgt[i - 1], true); 418 419 /* Store the skb backpointer in the SGT buffer. 420 * Fit the scatterlist and the number of buffers alongside the 421 * skb backpointer in the software annotation area. We'll need 422 * all of them on Tx Conf. 423 */ 424 swa = (struct dpaa2_eth_swa *)sgt_buf; 425 swa->skb = skb; 426 swa->scl = scl; 427 swa->num_sg = num_sg; 428 swa->sgt_size = sgt_buf_size; 429 430 /* Separately map the SGT buffer */ 431 addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL); 432 if (unlikely(dma_mapping_error(dev, addr))) { 433 err = -ENOMEM; 434 goto dma_map_single_failed; 435 } 436 dpaa2_fd_set_offset(fd, priv->tx_data_offset); 437 dpaa2_fd_set_format(fd, dpaa2_fd_sg); 438 dpaa2_fd_set_addr(fd, addr); 439 dpaa2_fd_set_len(fd, skb->len); 440 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1); 441 442 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 443 enable_tx_tstamp(fd, sgt_buf); 444 445 return 0; 446 447 dma_map_single_failed: 448 skb_free_frag(sgt_buf); 449 sgt_buf_alloc_failed: 450 dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL); 451 dma_map_sg_failed: 452 kfree(scl); 453 return err; 454 } 455 456 /* Create a frame descriptor based on a linear skb */ 457 static int build_single_fd(struct dpaa2_eth_priv *priv, 458 struct sk_buff *skb, 459 struct dpaa2_fd *fd) 460 { 461 struct device *dev = priv->net_dev->dev.parent; 462 u8 *buffer_start, *aligned_start; 463 struct sk_buff **skbh; 464 dma_addr_t addr; 465 466 buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb); 467 468 /* If there's enough room to align the FD address, do it. 469 * It will help hardware optimize accesses. 470 */ 471 aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN, 472 DPAA2_ETH_TX_BUF_ALIGN); 473 if (aligned_start >= skb->head) 474 buffer_start = aligned_start; 475 476 /* Store a backpointer to the skb at the beginning of the buffer 477 * (in the private data area) such that we can release it 478 * on Tx confirm 479 */ 480 skbh = (struct sk_buff **)buffer_start; 481 *skbh = skb; 482 483 addr = dma_map_single(dev, buffer_start, 484 skb_tail_pointer(skb) - buffer_start, 485 DMA_BIDIRECTIONAL); 486 if (unlikely(dma_mapping_error(dev, addr))) 487 return -ENOMEM; 488 489 dpaa2_fd_set_addr(fd, addr); 490 dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); 491 dpaa2_fd_set_len(fd, skb->len); 492 dpaa2_fd_set_format(fd, dpaa2_fd_single); 493 dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA | FD_CTRL_PTV1); 494 495 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 496 enable_tx_tstamp(fd, buffer_start); 497 498 return 0; 499 } 500 501 /* FD freeing routine on the Tx path 502 * 503 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb 504 * back-pointed to is also freed. 505 * This can be called either from dpaa2_eth_tx_conf() or on the error path of 506 * dpaa2_eth_tx(). 507 */ 508 static void free_tx_fd(const struct dpaa2_eth_priv *priv, 509 const struct dpaa2_fd *fd) 510 { 511 struct device *dev = priv->net_dev->dev.parent; 512 dma_addr_t fd_addr; 513 struct sk_buff **skbh, *skb; 514 unsigned char *buffer_start; 515 struct dpaa2_eth_swa *swa; 516 u8 fd_format = dpaa2_fd_get_format(fd); 517 518 fd_addr = dpaa2_fd_get_addr(fd); 519 skbh = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr); 520 521 if (fd_format == dpaa2_fd_single) { 522 skb = *skbh; 523 buffer_start = (unsigned char *)skbh; 524 /* Accessing the skb buffer is safe before dma unmap, because 525 * we didn't map the actual skb shell. 526 */ 527 dma_unmap_single(dev, fd_addr, 528 skb_tail_pointer(skb) - buffer_start, 529 DMA_BIDIRECTIONAL); 530 } else if (fd_format == dpaa2_fd_sg) { 531 swa = (struct dpaa2_eth_swa *)skbh; 532 skb = swa->skb; 533 534 /* Unmap the scatterlist */ 535 dma_unmap_sg(dev, swa->scl, swa->num_sg, DMA_BIDIRECTIONAL); 536 kfree(swa->scl); 537 538 /* Unmap the SGT buffer */ 539 dma_unmap_single(dev, fd_addr, swa->sgt_size, 540 DMA_BIDIRECTIONAL); 541 } else { 542 netdev_dbg(priv->net_dev, "Invalid FD format\n"); 543 return; 544 } 545 546 /* Get the timestamp value */ 547 if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { 548 struct skb_shared_hwtstamps shhwtstamps; 549 __le64 *ts = dpaa2_get_ts(skbh, true); 550 u64 ns; 551 552 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 553 554 ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts); 555 shhwtstamps.hwtstamp = ns_to_ktime(ns); 556 skb_tstamp_tx(skb, &shhwtstamps); 557 } 558 559 /* Free SGT buffer allocated on tx */ 560 if (fd_format != dpaa2_fd_single) 561 skb_free_frag(skbh); 562 563 /* Move on with skb release */ 564 dev_kfree_skb(skb); 565 } 566 567 static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) 568 { 569 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 570 struct dpaa2_fd fd; 571 struct rtnl_link_stats64 *percpu_stats; 572 struct dpaa2_eth_drv_stats *percpu_extras; 573 struct dpaa2_eth_fq *fq; 574 u16 queue_mapping; 575 unsigned int needed_headroom; 576 int err, i; 577 578 percpu_stats = this_cpu_ptr(priv->percpu_stats); 579 percpu_extras = this_cpu_ptr(priv->percpu_extras); 580 581 needed_headroom = dpaa2_eth_needed_headroom(priv, skb); 582 if (skb_headroom(skb) < needed_headroom) { 583 struct sk_buff *ns; 584 585 ns = skb_realloc_headroom(skb, needed_headroom); 586 if (unlikely(!ns)) { 587 percpu_stats->tx_dropped++; 588 goto err_alloc_headroom; 589 } 590 percpu_extras->tx_reallocs++; 591 592 if (skb->sk) 593 skb_set_owner_w(ns, skb->sk); 594 595 dev_kfree_skb(skb); 596 skb = ns; 597 } 598 599 /* We'll be holding a back-reference to the skb until Tx Confirmation; 600 * we don't want that overwritten by a concurrent Tx with a cloned skb. 601 */ 602 skb = skb_unshare(skb, GFP_ATOMIC); 603 if (unlikely(!skb)) { 604 /* skb_unshare() has already freed the skb */ 605 percpu_stats->tx_dropped++; 606 return NETDEV_TX_OK; 607 } 608 609 /* Setup the FD fields */ 610 memset(&fd, 0, sizeof(fd)); 611 612 if (skb_is_nonlinear(skb)) { 613 err = build_sg_fd(priv, skb, &fd); 614 percpu_extras->tx_sg_frames++; 615 percpu_extras->tx_sg_bytes += skb->len; 616 } else { 617 err = build_single_fd(priv, skb, &fd); 618 } 619 620 if (unlikely(err)) { 621 percpu_stats->tx_dropped++; 622 goto err_build_fd; 623 } 624 625 /* Tracing point */ 626 trace_dpaa2_tx_fd(net_dev, &fd); 627 628 /* TxConf FQ selection relies on queue id from the stack. 629 * In case of a forwarded frame from another DPNI interface, we choose 630 * a queue affined to the same core that processed the Rx frame 631 */ 632 queue_mapping = skb_get_queue_mapping(skb); 633 fq = &priv->fq[queue_mapping]; 634 for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { 635 err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, 636 priv->tx_qdid, 0, 637 fq->tx_qdbin, &fd); 638 if (err != -EBUSY) 639 break; 640 } 641 percpu_extras->tx_portal_busy += i; 642 if (unlikely(err < 0)) { 643 percpu_stats->tx_errors++; 644 /* Clean up everything, including freeing the skb */ 645 free_tx_fd(priv, &fd); 646 } else { 647 percpu_stats->tx_packets++; 648 percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); 649 } 650 651 return NETDEV_TX_OK; 652 653 err_build_fd: 654 err_alloc_headroom: 655 dev_kfree_skb(skb); 656 657 return NETDEV_TX_OK; 658 } 659 660 /* Tx confirmation frame processing routine */ 661 static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, 662 struct dpaa2_eth_channel *ch __always_unused, 663 const struct dpaa2_fd *fd, 664 struct napi_struct *napi __always_unused, 665 u16 queue_id __always_unused) 666 { 667 struct rtnl_link_stats64 *percpu_stats; 668 struct dpaa2_eth_drv_stats *percpu_extras; 669 u32 fd_errors; 670 671 /* Tracing point */ 672 trace_dpaa2_tx_conf_fd(priv->net_dev, fd); 673 674 percpu_extras = this_cpu_ptr(priv->percpu_extras); 675 percpu_extras->tx_conf_frames++; 676 percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); 677 678 /* Check frame errors in the FD field */ 679 fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; 680 free_tx_fd(priv, fd); 681 682 if (likely(!fd_errors)) 683 return; 684 685 if (net_ratelimit()) 686 netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n", 687 fd_errors); 688 689 percpu_stats = this_cpu_ptr(priv->percpu_stats); 690 /* Tx-conf logically pertains to the egress path. */ 691 percpu_stats->tx_errors++; 692 } 693 694 static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) 695 { 696 int err; 697 698 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 699 DPNI_OFF_RX_L3_CSUM, enable); 700 if (err) { 701 netdev_err(priv->net_dev, 702 "dpni_set_offload(RX_L3_CSUM) failed\n"); 703 return err; 704 } 705 706 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 707 DPNI_OFF_RX_L4_CSUM, enable); 708 if (err) { 709 netdev_err(priv->net_dev, 710 "dpni_set_offload(RX_L4_CSUM) failed\n"); 711 return err; 712 } 713 714 return 0; 715 } 716 717 static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) 718 { 719 int err; 720 721 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 722 DPNI_OFF_TX_L3_CSUM, enable); 723 if (err) { 724 netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n"); 725 return err; 726 } 727 728 err = dpni_set_offload(priv->mc_io, 0, priv->mc_token, 729 DPNI_OFF_TX_L4_CSUM, enable); 730 if (err) { 731 netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n"); 732 return err; 733 } 734 735 return 0; 736 } 737 738 /* Free buffers acquired from the buffer pool or which were meant to 739 * be released in the pool 740 */ 741 static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) 742 { 743 struct device *dev = priv->net_dev->dev.parent; 744 void *vaddr; 745 int i; 746 747 for (i = 0; i < count; i++) { 748 vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); 749 dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, 750 DMA_FROM_DEVICE); 751 skb_free_frag(vaddr); 752 } 753 } 754 755 /* Perform a single release command to add buffers 756 * to the specified buffer pool 757 */ 758 static int add_bufs(struct dpaa2_eth_priv *priv, 759 struct dpaa2_eth_channel *ch, u16 bpid) 760 { 761 struct device *dev = priv->net_dev->dev.parent; 762 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 763 void *buf; 764 dma_addr_t addr; 765 int i, err; 766 767 for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { 768 /* Allocate buffer visible to WRIOP + skb shared info + 769 * alignment padding 770 */ 771 buf = napi_alloc_frag(dpaa2_eth_buf_raw_size(priv)); 772 if (unlikely(!buf)) 773 goto err_alloc; 774 775 buf = PTR_ALIGN(buf, priv->rx_buf_align); 776 777 addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, 778 DMA_FROM_DEVICE); 779 if (unlikely(dma_mapping_error(dev, addr))) 780 goto err_map; 781 782 buf_array[i] = addr; 783 784 /* tracing point */ 785 trace_dpaa2_eth_buf_seed(priv->net_dev, 786 buf, dpaa2_eth_buf_raw_size(priv), 787 addr, DPAA2_ETH_RX_BUF_SIZE, 788 bpid); 789 } 790 791 release_bufs: 792 /* In case the portal is busy, retry until successful */ 793 while ((err = dpaa2_io_service_release(ch->dpio, bpid, 794 buf_array, i)) == -EBUSY) 795 cpu_relax(); 796 797 /* If release command failed, clean up and bail out; 798 * not much else we can do about it 799 */ 800 if (err) { 801 free_bufs(priv, buf_array, i); 802 return 0; 803 } 804 805 return i; 806 807 err_map: 808 skb_free_frag(buf); 809 err_alloc: 810 /* If we managed to allocate at least some buffers, 811 * release them to hardware 812 */ 813 if (i) 814 goto release_bufs; 815 816 return 0; 817 } 818 819 static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) 820 { 821 int i, j; 822 int new_count; 823 824 /* This is the lazy seeding of Rx buffer pools. 825 * dpaa2_add_bufs() is also used on the Rx hotpath and calls 826 * napi_alloc_frag(). The trouble with that is that it in turn ends up 827 * calling this_cpu_ptr(), which mandates execution in atomic context. 828 * Rather than splitting up the code, do a one-off preempt disable. 829 */ 830 preempt_disable(); 831 for (j = 0; j < priv->num_channels; j++) { 832 for (i = 0; i < DPAA2_ETH_NUM_BUFS; 833 i += DPAA2_ETH_BUFS_PER_CMD) { 834 new_count = add_bufs(priv, priv->channel[j], bpid); 835 priv->channel[j]->buf_count += new_count; 836 837 if (new_count < DPAA2_ETH_BUFS_PER_CMD) { 838 preempt_enable(); 839 return -ENOMEM; 840 } 841 } 842 } 843 preempt_enable(); 844 845 return 0; 846 } 847 848 /** 849 * Drain the specified number of buffers from the DPNI's private buffer pool. 850 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD 851 */ 852 static void drain_bufs(struct dpaa2_eth_priv *priv, int count) 853 { 854 u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; 855 int ret; 856 857 do { 858 ret = dpaa2_io_service_acquire(NULL, priv->bpid, 859 buf_array, count); 860 if (ret < 0) { 861 netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); 862 return; 863 } 864 free_bufs(priv, buf_array, ret); 865 } while (ret); 866 } 867 868 static void drain_pool(struct dpaa2_eth_priv *priv) 869 { 870 int i; 871 872 drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); 873 drain_bufs(priv, 1); 874 875 for (i = 0; i < priv->num_channels; i++) 876 priv->channel[i]->buf_count = 0; 877 } 878 879 /* Function is called from softirq context only, so we don't need to guard 880 * the access to percpu count 881 */ 882 static int refill_pool(struct dpaa2_eth_priv *priv, 883 struct dpaa2_eth_channel *ch, 884 u16 bpid) 885 { 886 int new_count; 887 888 if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) 889 return 0; 890 891 do { 892 new_count = add_bufs(priv, ch, bpid); 893 if (unlikely(!new_count)) { 894 /* Out of memory; abort for now, we'll try later on */ 895 break; 896 } 897 ch->buf_count += new_count; 898 } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); 899 900 if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) 901 return -ENOMEM; 902 903 return 0; 904 } 905 906 static int pull_channel(struct dpaa2_eth_channel *ch) 907 { 908 int err; 909 int dequeues = -1; 910 911 /* Retry while portal is busy */ 912 do { 913 err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id, 914 ch->store); 915 dequeues++; 916 cpu_relax(); 917 } while (err == -EBUSY); 918 919 ch->stats.dequeue_portal_busy += dequeues; 920 if (unlikely(err)) 921 ch->stats.pull_err++; 922 923 return err; 924 } 925 926 /* NAPI poll routine 927 * 928 * Frames are dequeued from the QMan channel associated with this NAPI context. 929 * Rx, Tx confirmation and (if configured) Rx error frames all count 930 * towards the NAPI budget. 931 */ 932 static int dpaa2_eth_poll(struct napi_struct *napi, int budget) 933 { 934 struct dpaa2_eth_channel *ch; 935 struct dpaa2_eth_priv *priv; 936 int rx_cleaned = 0, txconf_cleaned = 0; 937 enum dpaa2_eth_fq_type type = 0; 938 int store_cleaned; 939 int err; 940 941 ch = container_of(napi, struct dpaa2_eth_channel, napi); 942 priv = ch->priv; 943 944 do { 945 err = pull_channel(ch); 946 if (unlikely(err)) 947 break; 948 949 /* Refill pool if appropriate */ 950 refill_pool(priv, ch, priv->bpid); 951 952 store_cleaned = consume_frames(ch, &type); 953 if (type == DPAA2_RX_FQ) 954 rx_cleaned += store_cleaned; 955 else 956 txconf_cleaned += store_cleaned; 957 958 /* If we either consumed the whole NAPI budget with Rx frames 959 * or we reached the Tx confirmations threshold, we're done. 960 */ 961 if (rx_cleaned >= budget || 962 txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) 963 return budget; 964 } while (store_cleaned); 965 966 /* We didn't consume the entire budget, so finish napi and 967 * re-enable data availability notifications 968 */ 969 napi_complete_done(napi, rx_cleaned); 970 do { 971 err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx); 972 cpu_relax(); 973 } while (err == -EBUSY); 974 WARN_ONCE(err, "CDAN notifications rearm failed on core %d", 975 ch->nctx.desired_cpu); 976 977 return max(rx_cleaned, 1); 978 } 979 980 static void enable_ch_napi(struct dpaa2_eth_priv *priv) 981 { 982 struct dpaa2_eth_channel *ch; 983 int i; 984 985 for (i = 0; i < priv->num_channels; i++) { 986 ch = priv->channel[i]; 987 napi_enable(&ch->napi); 988 } 989 } 990 991 static void disable_ch_napi(struct dpaa2_eth_priv *priv) 992 { 993 struct dpaa2_eth_channel *ch; 994 int i; 995 996 for (i = 0; i < priv->num_channels; i++) { 997 ch = priv->channel[i]; 998 napi_disable(&ch->napi); 999 } 1000 } 1001 1002 static int link_state_update(struct dpaa2_eth_priv *priv) 1003 { 1004 struct dpni_link_state state = {0}; 1005 int err; 1006 1007 err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); 1008 if (unlikely(err)) { 1009 netdev_err(priv->net_dev, 1010 "dpni_get_link_state() failed\n"); 1011 return err; 1012 } 1013 1014 /* Chech link state; speed / duplex changes are not treated yet */ 1015 if (priv->link_state.up == state.up) 1016 return 0; 1017 1018 priv->link_state = state; 1019 if (state.up) { 1020 netif_carrier_on(priv->net_dev); 1021 netif_tx_start_all_queues(priv->net_dev); 1022 } else { 1023 netif_tx_stop_all_queues(priv->net_dev); 1024 netif_carrier_off(priv->net_dev); 1025 } 1026 1027 netdev_info(priv->net_dev, "Link Event: state %s\n", 1028 state.up ? "up" : "down"); 1029 1030 return 0; 1031 } 1032 1033 static int dpaa2_eth_open(struct net_device *net_dev) 1034 { 1035 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1036 int err; 1037 1038 err = seed_pool(priv, priv->bpid); 1039 if (err) { 1040 /* Not much to do; the buffer pool, though not filled up, 1041 * may still contain some buffers which would enable us 1042 * to limp on. 1043 */ 1044 netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", 1045 priv->dpbp_dev->obj_desc.id, priv->bpid); 1046 } 1047 1048 /* We'll only start the txqs when the link is actually ready; make sure 1049 * we don't race against the link up notification, which may come 1050 * immediately after dpni_enable(); 1051 */ 1052 netif_tx_stop_all_queues(net_dev); 1053 enable_ch_napi(priv); 1054 /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will 1055 * return true and cause 'ip link show' to report the LOWER_UP flag, 1056 * even though the link notification wasn't even received. 1057 */ 1058 netif_carrier_off(net_dev); 1059 1060 err = dpni_enable(priv->mc_io, 0, priv->mc_token); 1061 if (err < 0) { 1062 netdev_err(net_dev, "dpni_enable() failed\n"); 1063 goto enable_err; 1064 } 1065 1066 /* If the DPMAC object has already processed the link up interrupt, 1067 * we have to learn the link state ourselves. 1068 */ 1069 err = link_state_update(priv); 1070 if (err < 0) { 1071 netdev_err(net_dev, "Can't update link state\n"); 1072 goto link_state_err; 1073 } 1074 1075 return 0; 1076 1077 link_state_err: 1078 enable_err: 1079 disable_ch_napi(priv); 1080 drain_pool(priv); 1081 return err; 1082 } 1083 1084 /* The DPIO store must be empty when we call this, 1085 * at the end of every NAPI cycle. 1086 */ 1087 static u32 drain_channel(struct dpaa2_eth_channel *ch) 1088 { 1089 u32 drained = 0, total = 0; 1090 1091 do { 1092 pull_channel(ch); 1093 drained = consume_frames(ch, NULL); 1094 total += drained; 1095 } while (drained); 1096 1097 return total; 1098 } 1099 1100 static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) 1101 { 1102 struct dpaa2_eth_channel *ch; 1103 int i; 1104 u32 drained = 0; 1105 1106 for (i = 0; i < priv->num_channels; i++) { 1107 ch = priv->channel[i]; 1108 drained += drain_channel(ch); 1109 } 1110 1111 return drained; 1112 } 1113 1114 static int dpaa2_eth_stop(struct net_device *net_dev) 1115 { 1116 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1117 int dpni_enabled = 0; 1118 int retries = 10; 1119 u32 drained; 1120 1121 netif_tx_stop_all_queues(net_dev); 1122 netif_carrier_off(net_dev); 1123 1124 /* Loop while dpni_disable() attempts to drain the egress FQs 1125 * and confirm them back to us. 1126 */ 1127 do { 1128 dpni_disable(priv->mc_io, 0, priv->mc_token); 1129 dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); 1130 if (dpni_enabled) 1131 /* Allow the hardware some slack */ 1132 msleep(100); 1133 } while (dpni_enabled && --retries); 1134 if (!retries) { 1135 netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); 1136 /* Must go on and disable NAPI nonetheless, so we don't crash at 1137 * the next "ifconfig up" 1138 */ 1139 } 1140 1141 /* Wait for NAPI to complete on every core and disable it. 1142 * In particular, this will also prevent NAPI from being rescheduled if 1143 * a new CDAN is serviced, effectively discarding the CDAN. We therefore 1144 * don't even need to disarm the channels, except perhaps for the case 1145 * of a huge coalescing value. 1146 */ 1147 disable_ch_napi(priv); 1148 1149 /* Manually drain the Rx and TxConf queues */ 1150 drained = drain_ingress_frames(priv); 1151 if (drained) 1152 netdev_dbg(net_dev, "Drained %d frames.\n", drained); 1153 1154 /* Empty the buffer pool */ 1155 drain_pool(priv); 1156 1157 return 0; 1158 } 1159 1160 static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) 1161 { 1162 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1163 struct device *dev = net_dev->dev.parent; 1164 int err; 1165 1166 err = eth_mac_addr(net_dev, addr); 1167 if (err < 0) { 1168 dev_err(dev, "eth_mac_addr() failed (%d)\n", err); 1169 return err; 1170 } 1171 1172 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 1173 net_dev->dev_addr); 1174 if (err) { 1175 dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); 1176 return err; 1177 } 1178 1179 return 0; 1180 } 1181 1182 /** Fill in counters maintained by the GPP driver. These may be different from 1183 * the hardware counters obtained by ethtool. 1184 */ 1185 static void dpaa2_eth_get_stats(struct net_device *net_dev, 1186 struct rtnl_link_stats64 *stats) 1187 { 1188 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1189 struct rtnl_link_stats64 *percpu_stats; 1190 u64 *cpustats; 1191 u64 *netstats = (u64 *)stats; 1192 int i, j; 1193 int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); 1194 1195 for_each_possible_cpu(i) { 1196 percpu_stats = per_cpu_ptr(priv->percpu_stats, i); 1197 cpustats = (u64 *)percpu_stats; 1198 for (j = 0; j < num; j++) 1199 netstats[j] += cpustats[j]; 1200 } 1201 } 1202 1203 /* Copy mac unicast addresses from @net_dev to @priv. 1204 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 1205 */ 1206 static void add_uc_hw_addr(const struct net_device *net_dev, 1207 struct dpaa2_eth_priv *priv) 1208 { 1209 struct netdev_hw_addr *ha; 1210 int err; 1211 1212 netdev_for_each_uc_addr(ha, net_dev) { 1213 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 1214 ha->addr); 1215 if (err) 1216 netdev_warn(priv->net_dev, 1217 "Could not add ucast MAC %pM to the filtering table (err %d)\n", 1218 ha->addr, err); 1219 } 1220 } 1221 1222 /* Copy mac multicast addresses from @net_dev to @priv 1223 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. 1224 */ 1225 static void add_mc_hw_addr(const struct net_device *net_dev, 1226 struct dpaa2_eth_priv *priv) 1227 { 1228 struct netdev_hw_addr *ha; 1229 int err; 1230 1231 netdev_for_each_mc_addr(ha, net_dev) { 1232 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, 1233 ha->addr); 1234 if (err) 1235 netdev_warn(priv->net_dev, 1236 "Could not add mcast MAC %pM to the filtering table (err %d)\n", 1237 ha->addr, err); 1238 } 1239 } 1240 1241 static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) 1242 { 1243 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1244 int uc_count = netdev_uc_count(net_dev); 1245 int mc_count = netdev_mc_count(net_dev); 1246 u8 max_mac = priv->dpni_attrs.mac_filter_entries; 1247 u32 options = priv->dpni_attrs.options; 1248 u16 mc_token = priv->mc_token; 1249 struct fsl_mc_io *mc_io = priv->mc_io; 1250 int err; 1251 1252 /* Basic sanity checks; these probably indicate a misconfiguration */ 1253 if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0) 1254 netdev_info(net_dev, 1255 "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n", 1256 max_mac); 1257 1258 /* Force promiscuous if the uc or mc counts exceed our capabilities. */ 1259 if (uc_count > max_mac) { 1260 netdev_info(net_dev, 1261 "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", 1262 uc_count, max_mac); 1263 goto force_promisc; 1264 } 1265 if (mc_count + uc_count > max_mac) { 1266 netdev_info(net_dev, 1267 "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n", 1268 uc_count + mc_count, max_mac); 1269 goto force_mc_promisc; 1270 } 1271 1272 /* Adjust promisc settings due to flag combinations */ 1273 if (net_dev->flags & IFF_PROMISC) 1274 goto force_promisc; 1275 if (net_dev->flags & IFF_ALLMULTI) { 1276 /* First, rebuild unicast filtering table. This should be done 1277 * in promisc mode, in order to avoid frame loss while we 1278 * progressively add entries to the table. 1279 * We don't know whether we had been in promisc already, and 1280 * making an MC call to find out is expensive; so set uc promisc 1281 * nonetheless. 1282 */ 1283 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 1284 if (err) 1285 netdev_warn(net_dev, "Can't set uc promisc\n"); 1286 1287 /* Actual uc table reconstruction. */ 1288 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); 1289 if (err) 1290 netdev_warn(net_dev, "Can't clear uc filters\n"); 1291 add_uc_hw_addr(net_dev, priv); 1292 1293 /* Finally, clear uc promisc and set mc promisc as requested. */ 1294 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 1295 if (err) 1296 netdev_warn(net_dev, "Can't clear uc promisc\n"); 1297 goto force_mc_promisc; 1298 } 1299 1300 /* Neither unicast, nor multicast promisc will be on... eventually. 1301 * For now, rebuild mac filtering tables while forcing both of them on. 1302 */ 1303 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 1304 if (err) 1305 netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); 1306 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 1307 if (err) 1308 netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); 1309 1310 /* Actual mac filtering tables reconstruction */ 1311 err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); 1312 if (err) 1313 netdev_warn(net_dev, "Can't clear mac filters\n"); 1314 add_mc_hw_addr(net_dev, priv); 1315 add_uc_hw_addr(net_dev, priv); 1316 1317 /* Now we can clear both ucast and mcast promisc, without risking 1318 * to drop legitimate frames anymore. 1319 */ 1320 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); 1321 if (err) 1322 netdev_warn(net_dev, "Can't clear ucast promisc\n"); 1323 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); 1324 if (err) 1325 netdev_warn(net_dev, "Can't clear mcast promisc\n"); 1326 1327 return; 1328 1329 force_promisc: 1330 err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); 1331 if (err) 1332 netdev_warn(net_dev, "Can't set ucast promisc\n"); 1333 force_mc_promisc: 1334 err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); 1335 if (err) 1336 netdev_warn(net_dev, "Can't set mcast promisc\n"); 1337 } 1338 1339 static int dpaa2_eth_set_features(struct net_device *net_dev, 1340 netdev_features_t features) 1341 { 1342 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 1343 netdev_features_t changed = features ^ net_dev->features; 1344 bool enable; 1345 int err; 1346 1347 if (changed & NETIF_F_RXCSUM) { 1348 enable = !!(features & NETIF_F_RXCSUM); 1349 err = set_rx_csum(priv, enable); 1350 if (err) 1351 return err; 1352 } 1353 1354 if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { 1355 enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); 1356 err = set_tx_csum(priv, enable); 1357 if (err) 1358 return err; 1359 } 1360 1361 return 0; 1362 } 1363 1364 static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1365 { 1366 struct dpaa2_eth_priv *priv = netdev_priv(dev); 1367 struct hwtstamp_config config; 1368 1369 if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 1370 return -EFAULT; 1371 1372 switch (config.tx_type) { 1373 case HWTSTAMP_TX_OFF: 1374 priv->tx_tstamp = false; 1375 break; 1376 case HWTSTAMP_TX_ON: 1377 priv->tx_tstamp = true; 1378 break; 1379 default: 1380 return -ERANGE; 1381 } 1382 1383 if (config.rx_filter == HWTSTAMP_FILTER_NONE) { 1384 priv->rx_tstamp = false; 1385 } else { 1386 priv->rx_tstamp = true; 1387 /* TS is set for all frame types, not only those requested */ 1388 config.rx_filter = HWTSTAMP_FILTER_ALL; 1389 } 1390 1391 return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? 1392 -EFAULT : 0; 1393 } 1394 1395 static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 1396 { 1397 if (cmd == SIOCSHWTSTAMP) 1398 return dpaa2_eth_ts_ioctl(dev, rq, cmd); 1399 1400 return -EINVAL; 1401 } 1402 1403 static const struct net_device_ops dpaa2_eth_ops = { 1404 .ndo_open = dpaa2_eth_open, 1405 .ndo_start_xmit = dpaa2_eth_tx, 1406 .ndo_stop = dpaa2_eth_stop, 1407 .ndo_set_mac_address = dpaa2_eth_set_addr, 1408 .ndo_get_stats64 = dpaa2_eth_get_stats, 1409 .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, 1410 .ndo_set_features = dpaa2_eth_set_features, 1411 .ndo_do_ioctl = dpaa2_eth_ioctl, 1412 }; 1413 1414 static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) 1415 { 1416 struct dpaa2_eth_channel *ch; 1417 1418 ch = container_of(ctx, struct dpaa2_eth_channel, nctx); 1419 1420 /* Update NAPI statistics */ 1421 ch->stats.cdan++; 1422 1423 napi_schedule_irqoff(&ch->napi); 1424 } 1425 1426 /* Allocate and configure a DPCON object */ 1427 static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) 1428 { 1429 struct fsl_mc_device *dpcon; 1430 struct device *dev = priv->net_dev->dev.parent; 1431 struct dpcon_attr attrs; 1432 int err; 1433 1434 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), 1435 FSL_MC_POOL_DPCON, &dpcon); 1436 if (err) { 1437 dev_info(dev, "Not enough DPCONs, will go on as-is\n"); 1438 return NULL; 1439 } 1440 1441 err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); 1442 if (err) { 1443 dev_err(dev, "dpcon_open() failed\n"); 1444 goto free; 1445 } 1446 1447 err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); 1448 if (err) { 1449 dev_err(dev, "dpcon_reset() failed\n"); 1450 goto close; 1451 } 1452 1453 err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); 1454 if (err) { 1455 dev_err(dev, "dpcon_get_attributes() failed\n"); 1456 goto close; 1457 } 1458 1459 err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); 1460 if (err) { 1461 dev_err(dev, "dpcon_enable() failed\n"); 1462 goto close; 1463 } 1464 1465 return dpcon; 1466 1467 close: 1468 dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 1469 free: 1470 fsl_mc_object_free(dpcon); 1471 1472 return NULL; 1473 } 1474 1475 static void free_dpcon(struct dpaa2_eth_priv *priv, 1476 struct fsl_mc_device *dpcon) 1477 { 1478 dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); 1479 dpcon_close(priv->mc_io, 0, dpcon->mc_handle); 1480 fsl_mc_object_free(dpcon); 1481 } 1482 1483 static struct dpaa2_eth_channel * 1484 alloc_channel(struct dpaa2_eth_priv *priv) 1485 { 1486 struct dpaa2_eth_channel *channel; 1487 struct dpcon_attr attr; 1488 struct device *dev = priv->net_dev->dev.parent; 1489 int err; 1490 1491 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 1492 if (!channel) 1493 return NULL; 1494 1495 channel->dpcon = setup_dpcon(priv); 1496 if (!channel->dpcon) 1497 goto err_setup; 1498 1499 err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, 1500 &attr); 1501 if (err) { 1502 dev_err(dev, "dpcon_get_attributes() failed\n"); 1503 goto err_get_attr; 1504 } 1505 1506 channel->dpcon_id = attr.id; 1507 channel->ch_id = attr.qbman_ch_id; 1508 channel->priv = priv; 1509 1510 return channel; 1511 1512 err_get_attr: 1513 free_dpcon(priv, channel->dpcon); 1514 err_setup: 1515 kfree(channel); 1516 return NULL; 1517 } 1518 1519 static void free_channel(struct dpaa2_eth_priv *priv, 1520 struct dpaa2_eth_channel *channel) 1521 { 1522 free_dpcon(priv, channel->dpcon); 1523 kfree(channel); 1524 } 1525 1526 /* DPIO setup: allocate and configure QBMan channels, setup core affinity 1527 * and register data availability notifications 1528 */ 1529 static int setup_dpio(struct dpaa2_eth_priv *priv) 1530 { 1531 struct dpaa2_io_notification_ctx *nctx; 1532 struct dpaa2_eth_channel *channel; 1533 struct dpcon_notification_cfg dpcon_notif_cfg; 1534 struct device *dev = priv->net_dev->dev.parent; 1535 int i, err; 1536 1537 /* We want the ability to spread ingress traffic (RX, TX conf) to as 1538 * many cores as possible, so we need one channel for each core 1539 * (unless there's fewer queues than cores, in which case the extra 1540 * channels would be wasted). 1541 * Allocate one channel per core and register it to the core's 1542 * affine DPIO. If not enough channels are available for all cores 1543 * or if some cores don't have an affine DPIO, there will be no 1544 * ingress frame processing on those cores. 1545 */ 1546 cpumask_clear(&priv->dpio_cpumask); 1547 for_each_online_cpu(i) { 1548 /* Try to allocate a channel */ 1549 channel = alloc_channel(priv); 1550 if (!channel) { 1551 dev_info(dev, 1552 "No affine channel for cpu %d and above\n", i); 1553 err = -ENODEV; 1554 goto err_alloc_ch; 1555 } 1556 1557 priv->channel[priv->num_channels] = channel; 1558 1559 nctx = &channel->nctx; 1560 nctx->is_cdan = 1; 1561 nctx->cb = cdan_cb; 1562 nctx->id = channel->ch_id; 1563 nctx->desired_cpu = i; 1564 1565 /* Register the new context */ 1566 channel->dpio = dpaa2_io_service_select(i); 1567 err = dpaa2_io_service_register(channel->dpio, nctx); 1568 if (err) { 1569 dev_dbg(dev, "No affine DPIO for cpu %d\n", i); 1570 /* If no affine DPIO for this core, there's probably 1571 * none available for next cores either. Signal we want 1572 * to retry later, in case the DPIO devices weren't 1573 * probed yet. 1574 */ 1575 err = -EPROBE_DEFER; 1576 goto err_service_reg; 1577 } 1578 1579 /* Register DPCON notification with MC */ 1580 dpcon_notif_cfg.dpio_id = nctx->dpio_id; 1581 dpcon_notif_cfg.priority = 0; 1582 dpcon_notif_cfg.user_ctx = nctx->qman64; 1583 err = dpcon_set_notification(priv->mc_io, 0, 1584 channel->dpcon->mc_handle, 1585 &dpcon_notif_cfg); 1586 if (err) { 1587 dev_err(dev, "dpcon_set_notification failed()\n"); 1588 goto err_set_cdan; 1589 } 1590 1591 /* If we managed to allocate a channel and also found an affine 1592 * DPIO for this core, add it to the final mask 1593 */ 1594 cpumask_set_cpu(i, &priv->dpio_cpumask); 1595 priv->num_channels++; 1596 1597 /* Stop if we already have enough channels to accommodate all 1598 * RX and TX conf queues 1599 */ 1600 if (priv->num_channels == dpaa2_eth_queue_count(priv)) 1601 break; 1602 } 1603 1604 return 0; 1605 1606 err_set_cdan: 1607 dpaa2_io_service_deregister(channel->dpio, nctx); 1608 err_service_reg: 1609 free_channel(priv, channel); 1610 err_alloc_ch: 1611 if (cpumask_empty(&priv->dpio_cpumask)) { 1612 dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); 1613 return err; 1614 } 1615 1616 dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", 1617 cpumask_pr_args(&priv->dpio_cpumask)); 1618 1619 return 0; 1620 } 1621 1622 static void free_dpio(struct dpaa2_eth_priv *priv) 1623 { 1624 int i; 1625 struct dpaa2_eth_channel *ch; 1626 1627 /* deregister CDAN notifications and free channels */ 1628 for (i = 0; i < priv->num_channels; i++) { 1629 ch = priv->channel[i]; 1630 dpaa2_io_service_deregister(ch->dpio, &ch->nctx); 1631 free_channel(priv, ch); 1632 } 1633 } 1634 1635 static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, 1636 int cpu) 1637 { 1638 struct device *dev = priv->net_dev->dev.parent; 1639 int i; 1640 1641 for (i = 0; i < priv->num_channels; i++) 1642 if (priv->channel[i]->nctx.desired_cpu == cpu) 1643 return priv->channel[i]; 1644 1645 /* We should never get here. Issue a warning and return 1646 * the first channel, because it's still better than nothing 1647 */ 1648 dev_warn(dev, "No affine channel found for cpu %d\n", cpu); 1649 1650 return priv->channel[0]; 1651 } 1652 1653 static void set_fq_affinity(struct dpaa2_eth_priv *priv) 1654 { 1655 struct device *dev = priv->net_dev->dev.parent; 1656 struct cpumask xps_mask; 1657 struct dpaa2_eth_fq *fq; 1658 int rx_cpu, txc_cpu; 1659 int i, err; 1660 1661 /* For each FQ, pick one channel/CPU to deliver frames to. 1662 * This may well change at runtime, either through irqbalance or 1663 * through direct user intervention. 1664 */ 1665 rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask); 1666 1667 for (i = 0; i < priv->num_fqs; i++) { 1668 fq = &priv->fq[i]; 1669 switch (fq->type) { 1670 case DPAA2_RX_FQ: 1671 fq->target_cpu = rx_cpu; 1672 rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); 1673 if (rx_cpu >= nr_cpu_ids) 1674 rx_cpu = cpumask_first(&priv->dpio_cpumask); 1675 break; 1676 case DPAA2_TX_CONF_FQ: 1677 fq->target_cpu = txc_cpu; 1678 1679 /* Tell the stack to affine to txc_cpu the Tx queue 1680 * associated with the confirmation one 1681 */ 1682 cpumask_clear(&xps_mask); 1683 cpumask_set_cpu(txc_cpu, &xps_mask); 1684 err = netif_set_xps_queue(priv->net_dev, &xps_mask, 1685 fq->flowid); 1686 if (err) 1687 dev_err(dev, "Error setting XPS queue\n"); 1688 1689 txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask); 1690 if (txc_cpu >= nr_cpu_ids) 1691 txc_cpu = cpumask_first(&priv->dpio_cpumask); 1692 break; 1693 default: 1694 dev_err(dev, "Unknown FQ type: %d\n", fq->type); 1695 } 1696 fq->channel = get_affine_channel(priv, fq->target_cpu); 1697 } 1698 } 1699 1700 static void setup_fqs(struct dpaa2_eth_priv *priv) 1701 { 1702 int i; 1703 1704 /* We have one TxConf FQ per Tx flow. 1705 * The number of Tx and Rx queues is the same. 1706 * Tx queues come first in the fq array. 1707 */ 1708 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 1709 priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; 1710 priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; 1711 priv->fq[priv->num_fqs++].flowid = (u16)i; 1712 } 1713 1714 for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { 1715 priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; 1716 priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; 1717 priv->fq[priv->num_fqs++].flowid = (u16)i; 1718 } 1719 1720 /* For each FQ, decide on which core to process incoming frames */ 1721 set_fq_affinity(priv); 1722 } 1723 1724 /* Allocate and configure one buffer pool for each interface */ 1725 static int setup_dpbp(struct dpaa2_eth_priv *priv) 1726 { 1727 int err; 1728 struct fsl_mc_device *dpbp_dev; 1729 struct device *dev = priv->net_dev->dev.parent; 1730 struct dpbp_attr dpbp_attrs; 1731 1732 err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, 1733 &dpbp_dev); 1734 if (err) { 1735 dev_err(dev, "DPBP device allocation failed\n"); 1736 return err; 1737 } 1738 1739 priv->dpbp_dev = dpbp_dev; 1740 1741 err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, 1742 &dpbp_dev->mc_handle); 1743 if (err) { 1744 dev_err(dev, "dpbp_open() failed\n"); 1745 goto err_open; 1746 } 1747 1748 err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); 1749 if (err) { 1750 dev_err(dev, "dpbp_reset() failed\n"); 1751 goto err_reset; 1752 } 1753 1754 err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); 1755 if (err) { 1756 dev_err(dev, "dpbp_enable() failed\n"); 1757 goto err_enable; 1758 } 1759 1760 err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, 1761 &dpbp_attrs); 1762 if (err) { 1763 dev_err(dev, "dpbp_get_attributes() failed\n"); 1764 goto err_get_attr; 1765 } 1766 priv->bpid = dpbp_attrs.bpid; 1767 1768 return 0; 1769 1770 err_get_attr: 1771 dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); 1772 err_enable: 1773 err_reset: 1774 dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); 1775 err_open: 1776 fsl_mc_object_free(dpbp_dev); 1777 1778 return err; 1779 } 1780 1781 static void free_dpbp(struct dpaa2_eth_priv *priv) 1782 { 1783 drain_pool(priv); 1784 dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 1785 dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); 1786 fsl_mc_object_free(priv->dpbp_dev); 1787 } 1788 1789 static int set_buffer_layout(struct dpaa2_eth_priv *priv) 1790 { 1791 struct device *dev = priv->net_dev->dev.parent; 1792 struct dpni_buffer_layout buf_layout = {0}; 1793 int err; 1794 1795 /* We need to check for WRIOP version 1.0.0, but depending on the MC 1796 * version, this number is not always provided correctly on rev1. 1797 * We need to check for both alternatives in this situation. 1798 */ 1799 if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) || 1800 priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0)) 1801 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1; 1802 else 1803 priv->rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN; 1804 1805 /* tx buffer */ 1806 buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; 1807 buf_layout.pass_timestamp = true; 1808 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | 1809 DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 1810 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 1811 DPNI_QUEUE_TX, &buf_layout); 1812 if (err) { 1813 dev_err(dev, "dpni_set_buffer_layout(TX) failed\n"); 1814 return err; 1815 } 1816 1817 /* tx-confirm buffer */ 1818 buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 1819 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 1820 DPNI_QUEUE_TX_CONFIRM, &buf_layout); 1821 if (err) { 1822 dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n"); 1823 return err; 1824 } 1825 1826 /* Now that we've set our tx buffer layout, retrieve the minimum 1827 * required tx data offset. 1828 */ 1829 err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, 1830 &priv->tx_data_offset); 1831 if (err) { 1832 dev_err(dev, "dpni_get_tx_data_offset() failed\n"); 1833 return err; 1834 } 1835 1836 if ((priv->tx_data_offset % 64) != 0) 1837 dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n", 1838 priv->tx_data_offset); 1839 1840 /* rx buffer */ 1841 buf_layout.pass_frame_status = true; 1842 buf_layout.pass_parser_result = true; 1843 buf_layout.data_align = priv->rx_buf_align; 1844 buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv); 1845 buf_layout.private_data_size = 0; 1846 buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | 1847 DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | 1848 DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | 1849 DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM | 1850 DPNI_BUF_LAYOUT_OPT_TIMESTAMP; 1851 err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, 1852 DPNI_QUEUE_RX, &buf_layout); 1853 if (err) { 1854 dev_err(dev, "dpni_set_buffer_layout(RX) failed\n"); 1855 return err; 1856 } 1857 1858 return 0; 1859 } 1860 1861 /* Configure the DPNI object this interface is associated with */ 1862 static int setup_dpni(struct fsl_mc_device *ls_dev) 1863 { 1864 struct device *dev = &ls_dev->dev; 1865 struct dpaa2_eth_priv *priv; 1866 struct net_device *net_dev; 1867 int err; 1868 1869 net_dev = dev_get_drvdata(dev); 1870 priv = netdev_priv(net_dev); 1871 1872 /* get a handle for the DPNI object */ 1873 err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token); 1874 if (err) { 1875 dev_err(dev, "dpni_open() failed\n"); 1876 return err; 1877 } 1878 1879 /* Check if we can work with this DPNI object */ 1880 err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major, 1881 &priv->dpni_ver_minor); 1882 if (err) { 1883 dev_err(dev, "dpni_get_api_version() failed\n"); 1884 goto close; 1885 } 1886 if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) { 1887 dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n", 1888 priv->dpni_ver_major, priv->dpni_ver_minor, 1889 DPNI_VER_MAJOR, DPNI_VER_MINOR); 1890 err = -ENOTSUPP; 1891 goto close; 1892 } 1893 1894 ls_dev->mc_io = priv->mc_io; 1895 ls_dev->mc_handle = priv->mc_token; 1896 1897 err = dpni_reset(priv->mc_io, 0, priv->mc_token); 1898 if (err) { 1899 dev_err(dev, "dpni_reset() failed\n"); 1900 goto close; 1901 } 1902 1903 err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, 1904 &priv->dpni_attrs); 1905 if (err) { 1906 dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); 1907 goto close; 1908 } 1909 1910 err = set_buffer_layout(priv); 1911 if (err) 1912 goto close; 1913 1914 priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) * 1915 dpaa2_eth_fs_count(priv), GFP_KERNEL); 1916 if (!priv->cls_rules) 1917 goto close; 1918 1919 return 0; 1920 1921 close: 1922 dpni_close(priv->mc_io, 0, priv->mc_token); 1923 1924 return err; 1925 } 1926 1927 static void free_dpni(struct dpaa2_eth_priv *priv) 1928 { 1929 int err; 1930 1931 err = dpni_reset(priv->mc_io, 0, priv->mc_token); 1932 if (err) 1933 netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", 1934 err); 1935 1936 dpni_close(priv->mc_io, 0, priv->mc_token); 1937 } 1938 1939 static int setup_rx_flow(struct dpaa2_eth_priv *priv, 1940 struct dpaa2_eth_fq *fq) 1941 { 1942 struct device *dev = priv->net_dev->dev.parent; 1943 struct dpni_queue queue; 1944 struct dpni_queue_id qid; 1945 struct dpni_taildrop td; 1946 int err; 1947 1948 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 1949 DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid); 1950 if (err) { 1951 dev_err(dev, "dpni_get_queue(RX) failed\n"); 1952 return err; 1953 } 1954 1955 fq->fqid = qid.fqid; 1956 1957 queue.destination.id = fq->channel->dpcon_id; 1958 queue.destination.type = DPNI_DEST_DPCON; 1959 queue.destination.priority = 1; 1960 queue.user_context = (u64)(uintptr_t)fq; 1961 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 1962 DPNI_QUEUE_RX, 0, fq->flowid, 1963 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 1964 &queue); 1965 if (err) { 1966 dev_err(dev, "dpni_set_queue(RX) failed\n"); 1967 return err; 1968 } 1969 1970 td.enable = 1; 1971 td.threshold = DPAA2_ETH_TAILDROP_THRESH; 1972 err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token, DPNI_CP_QUEUE, 1973 DPNI_QUEUE_RX, 0, fq->flowid, &td); 1974 if (err) { 1975 dev_err(dev, "dpni_set_threshold() failed\n"); 1976 return err; 1977 } 1978 1979 return 0; 1980 } 1981 1982 static int setup_tx_flow(struct dpaa2_eth_priv *priv, 1983 struct dpaa2_eth_fq *fq) 1984 { 1985 struct device *dev = priv->net_dev->dev.parent; 1986 struct dpni_queue queue; 1987 struct dpni_queue_id qid; 1988 int err; 1989 1990 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 1991 DPNI_QUEUE_TX, 0, fq->flowid, &queue, &qid); 1992 if (err) { 1993 dev_err(dev, "dpni_get_queue(TX) failed\n"); 1994 return err; 1995 } 1996 1997 fq->tx_qdbin = qid.qdbin; 1998 1999 err = dpni_get_queue(priv->mc_io, 0, priv->mc_token, 2000 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 2001 &queue, &qid); 2002 if (err) { 2003 dev_err(dev, "dpni_get_queue(TX_CONF) failed\n"); 2004 return err; 2005 } 2006 2007 fq->fqid = qid.fqid; 2008 2009 queue.destination.id = fq->channel->dpcon_id; 2010 queue.destination.type = DPNI_DEST_DPCON; 2011 queue.destination.priority = 0; 2012 queue.user_context = (u64)(uintptr_t)fq; 2013 err = dpni_set_queue(priv->mc_io, 0, priv->mc_token, 2014 DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid, 2015 DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST, 2016 &queue); 2017 if (err) { 2018 dev_err(dev, "dpni_set_queue(TX_CONF) failed\n"); 2019 return err; 2020 } 2021 2022 return 0; 2023 } 2024 2025 /* Supported header fields for Rx hash distribution key */ 2026 static const struct dpaa2_eth_dist_fields dist_fields[] = { 2027 { 2028 /* L2 header */ 2029 .rxnfc_field = RXH_L2DA, 2030 .cls_prot = NET_PROT_ETH, 2031 .cls_field = NH_FLD_ETH_DA, 2032 .size = 6, 2033 }, { 2034 .cls_prot = NET_PROT_ETH, 2035 .cls_field = NH_FLD_ETH_SA, 2036 .size = 6, 2037 }, { 2038 /* This is the last ethertype field parsed: 2039 * depending on frame format, it can be the MAC ethertype 2040 * or the VLAN etype. 2041 */ 2042 .cls_prot = NET_PROT_ETH, 2043 .cls_field = NH_FLD_ETH_TYPE, 2044 .size = 2, 2045 }, { 2046 /* VLAN header */ 2047 .rxnfc_field = RXH_VLAN, 2048 .cls_prot = NET_PROT_VLAN, 2049 .cls_field = NH_FLD_VLAN_TCI, 2050 .size = 2, 2051 }, { 2052 /* IP header */ 2053 .rxnfc_field = RXH_IP_SRC, 2054 .cls_prot = NET_PROT_IP, 2055 .cls_field = NH_FLD_IP_SRC, 2056 .size = 4, 2057 }, { 2058 .rxnfc_field = RXH_IP_DST, 2059 .cls_prot = NET_PROT_IP, 2060 .cls_field = NH_FLD_IP_DST, 2061 .size = 4, 2062 }, { 2063 .rxnfc_field = RXH_L3_PROTO, 2064 .cls_prot = NET_PROT_IP, 2065 .cls_field = NH_FLD_IP_PROTO, 2066 .size = 1, 2067 }, { 2068 /* Using UDP ports, this is functionally equivalent to raw 2069 * byte pairs from L4 header. 2070 */ 2071 .rxnfc_field = RXH_L4_B_0_1, 2072 .cls_prot = NET_PROT_UDP, 2073 .cls_field = NH_FLD_UDP_PORT_SRC, 2074 .size = 2, 2075 }, { 2076 .rxnfc_field = RXH_L4_B_2_3, 2077 .cls_prot = NET_PROT_UDP, 2078 .cls_field = NH_FLD_UDP_PORT_DST, 2079 .size = 2, 2080 }, 2081 }; 2082 2083 /* Configure the Rx hash key using the legacy API */ 2084 static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 2085 { 2086 struct device *dev = priv->net_dev->dev.parent; 2087 struct dpni_rx_tc_dist_cfg dist_cfg; 2088 int err; 2089 2090 memset(&dist_cfg, 0, sizeof(dist_cfg)); 2091 2092 dist_cfg.key_cfg_iova = key; 2093 dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 2094 dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; 2095 2096 err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); 2097 if (err) 2098 dev_err(dev, "dpni_set_rx_tc_dist failed\n"); 2099 2100 return err; 2101 } 2102 2103 /* Configure the Rx hash key using the new API */ 2104 static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 2105 { 2106 struct device *dev = priv->net_dev->dev.parent; 2107 struct dpni_rx_dist_cfg dist_cfg; 2108 int err; 2109 2110 memset(&dist_cfg, 0, sizeof(dist_cfg)); 2111 2112 dist_cfg.key_cfg_iova = key; 2113 dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 2114 dist_cfg.enable = 1; 2115 2116 err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); 2117 if (err) 2118 dev_err(dev, "dpni_set_rx_hash_dist failed\n"); 2119 2120 return err; 2121 } 2122 2123 /* Configure the Rx flow classification key */ 2124 static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key) 2125 { 2126 struct device *dev = priv->net_dev->dev.parent; 2127 struct dpni_rx_dist_cfg dist_cfg; 2128 int err; 2129 2130 memset(&dist_cfg, 0, sizeof(dist_cfg)); 2131 2132 dist_cfg.key_cfg_iova = key; 2133 dist_cfg.dist_size = dpaa2_eth_queue_count(priv); 2134 dist_cfg.enable = 1; 2135 2136 err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg); 2137 if (err) 2138 dev_err(dev, "dpni_set_rx_fs_dist failed\n"); 2139 2140 return err; 2141 } 2142 2143 /* Size of the Rx flow classification key */ 2144 int dpaa2_eth_cls_key_size(void) 2145 { 2146 int i, size = 0; 2147 2148 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) 2149 size += dist_fields[i].size; 2150 2151 return size; 2152 } 2153 2154 /* Offset of header field in Rx classification key */ 2155 int dpaa2_eth_cls_fld_off(int prot, int field) 2156 { 2157 int i, off = 0; 2158 2159 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 2160 if (dist_fields[i].cls_prot == prot && 2161 dist_fields[i].cls_field == field) 2162 return off; 2163 off += dist_fields[i].size; 2164 } 2165 2166 WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n"); 2167 return 0; 2168 } 2169 2170 /* Set Rx distribution (hash or flow classification) key 2171 * flags is a combination of RXH_ bits 2172 */ 2173 static int dpaa2_eth_set_dist_key(struct net_device *net_dev, 2174 enum dpaa2_eth_rx_dist type, u64 flags) 2175 { 2176 struct device *dev = net_dev->dev.parent; 2177 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2178 struct dpkg_profile_cfg cls_cfg; 2179 u32 rx_hash_fields = 0; 2180 dma_addr_t key_iova; 2181 u8 *dma_mem; 2182 int i; 2183 int err = 0; 2184 2185 memset(&cls_cfg, 0, sizeof(cls_cfg)); 2186 2187 for (i = 0; i < ARRAY_SIZE(dist_fields); i++) { 2188 struct dpkg_extract *key = 2189 &cls_cfg.extracts[cls_cfg.num_extracts]; 2190 2191 /* For Rx hashing key we set only the selected fields. 2192 * For Rx flow classification key we set all supported fields 2193 */ 2194 if (type == DPAA2_ETH_RX_DIST_HASH) { 2195 if (!(flags & dist_fields[i].rxnfc_field)) 2196 continue; 2197 rx_hash_fields |= dist_fields[i].rxnfc_field; 2198 } 2199 2200 if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { 2201 dev_err(dev, "error adding key extraction rule, too many rules?\n"); 2202 return -E2BIG; 2203 } 2204 2205 key->type = DPKG_EXTRACT_FROM_HDR; 2206 key->extract.from_hdr.prot = dist_fields[i].cls_prot; 2207 key->extract.from_hdr.type = DPKG_FULL_FIELD; 2208 key->extract.from_hdr.field = dist_fields[i].cls_field; 2209 cls_cfg.num_extracts++; 2210 } 2211 2212 dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); 2213 if (!dma_mem) 2214 return -ENOMEM; 2215 2216 err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); 2217 if (err) { 2218 dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); 2219 goto free_key; 2220 } 2221 2222 /* Prepare for setting the rx dist */ 2223 key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, 2224 DMA_TO_DEVICE); 2225 if (dma_mapping_error(dev, key_iova)) { 2226 dev_err(dev, "DMA mapping failed\n"); 2227 err = -ENOMEM; 2228 goto free_key; 2229 } 2230 2231 if (type == DPAA2_ETH_RX_DIST_HASH) { 2232 if (dpaa2_eth_has_legacy_dist(priv)) 2233 err = config_legacy_hash_key(priv, key_iova); 2234 else 2235 err = config_hash_key(priv, key_iova); 2236 } else { 2237 err = config_cls_key(priv, key_iova); 2238 } 2239 2240 dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, 2241 DMA_TO_DEVICE); 2242 if (!err && type == DPAA2_ETH_RX_DIST_HASH) 2243 priv->rx_hash_fields = rx_hash_fields; 2244 2245 free_key: 2246 kfree(dma_mem); 2247 return err; 2248 } 2249 2250 int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) 2251 { 2252 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2253 2254 if (!dpaa2_eth_hash_enabled(priv)) 2255 return -EOPNOTSUPP; 2256 2257 return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, flags); 2258 } 2259 2260 static int dpaa2_eth_set_cls(struct dpaa2_eth_priv *priv) 2261 { 2262 struct device *dev = priv->net_dev->dev.parent; 2263 2264 /* Check if we actually support Rx flow classification */ 2265 if (dpaa2_eth_has_legacy_dist(priv)) { 2266 dev_dbg(dev, "Rx cls not supported by current MC version\n"); 2267 return -EOPNOTSUPP; 2268 } 2269 2270 if (priv->dpni_attrs.options & DPNI_OPT_NO_FS || 2271 !(priv->dpni_attrs.options & DPNI_OPT_HAS_KEY_MASKING)) { 2272 dev_dbg(dev, "Rx cls disabled in DPNI options\n"); 2273 return -EOPNOTSUPP; 2274 } 2275 2276 if (!dpaa2_eth_hash_enabled(priv)) { 2277 dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); 2278 return -EOPNOTSUPP; 2279 } 2280 2281 priv->rx_cls_enabled = 1; 2282 2283 return dpaa2_eth_set_dist_key(priv->net_dev, DPAA2_ETH_RX_DIST_CLS, 0); 2284 } 2285 2286 /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, 2287 * frame queues and channels 2288 */ 2289 static int bind_dpni(struct dpaa2_eth_priv *priv) 2290 { 2291 struct net_device *net_dev = priv->net_dev; 2292 struct device *dev = net_dev->dev.parent; 2293 struct dpni_pools_cfg pools_params; 2294 struct dpni_error_cfg err_cfg; 2295 int err = 0; 2296 int i; 2297 2298 pools_params.num_dpbp = 1; 2299 pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; 2300 pools_params.pools[0].backup_pool = 0; 2301 pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; 2302 err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); 2303 if (err) { 2304 dev_err(dev, "dpni_set_pools() failed\n"); 2305 return err; 2306 } 2307 2308 /* have the interface implicitly distribute traffic based on 2309 * the default hash key 2310 */ 2311 err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT); 2312 if (err && err != -EOPNOTSUPP) 2313 dev_err(dev, "Failed to configure hashing\n"); 2314 2315 /* Configure the flow classification key; it includes all 2316 * supported header fields and cannot be modified at runtime 2317 */ 2318 err = dpaa2_eth_set_cls(priv); 2319 if (err && err != -EOPNOTSUPP) 2320 dev_err(dev, "Failed to configure Rx classification key\n"); 2321 2322 /* Configure handling of error frames */ 2323 err_cfg.errors = DPAA2_FAS_RX_ERR_MASK; 2324 err_cfg.set_frame_annotation = 1; 2325 err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; 2326 err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, 2327 &err_cfg); 2328 if (err) { 2329 dev_err(dev, "dpni_set_errors_behavior failed\n"); 2330 return err; 2331 } 2332 2333 /* Configure Rx and Tx conf queues to generate CDANs */ 2334 for (i = 0; i < priv->num_fqs; i++) { 2335 switch (priv->fq[i].type) { 2336 case DPAA2_RX_FQ: 2337 err = setup_rx_flow(priv, &priv->fq[i]); 2338 break; 2339 case DPAA2_TX_CONF_FQ: 2340 err = setup_tx_flow(priv, &priv->fq[i]); 2341 break; 2342 default: 2343 dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); 2344 return -EINVAL; 2345 } 2346 if (err) 2347 return err; 2348 } 2349 2350 err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, 2351 DPNI_QUEUE_TX, &priv->tx_qdid); 2352 if (err) { 2353 dev_err(dev, "dpni_get_qdid() failed\n"); 2354 return err; 2355 } 2356 2357 return 0; 2358 } 2359 2360 /* Allocate rings for storing incoming frame descriptors */ 2361 static int alloc_rings(struct dpaa2_eth_priv *priv) 2362 { 2363 struct net_device *net_dev = priv->net_dev; 2364 struct device *dev = net_dev->dev.parent; 2365 int i; 2366 2367 for (i = 0; i < priv->num_channels; i++) { 2368 priv->channel[i]->store = 2369 dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); 2370 if (!priv->channel[i]->store) { 2371 netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); 2372 goto err_ring; 2373 } 2374 } 2375 2376 return 0; 2377 2378 err_ring: 2379 for (i = 0; i < priv->num_channels; i++) { 2380 if (!priv->channel[i]->store) 2381 break; 2382 dpaa2_io_store_destroy(priv->channel[i]->store); 2383 } 2384 2385 return -ENOMEM; 2386 } 2387 2388 static void free_rings(struct dpaa2_eth_priv *priv) 2389 { 2390 int i; 2391 2392 for (i = 0; i < priv->num_channels; i++) 2393 dpaa2_io_store_destroy(priv->channel[i]->store); 2394 } 2395 2396 static int set_mac_addr(struct dpaa2_eth_priv *priv) 2397 { 2398 struct net_device *net_dev = priv->net_dev; 2399 struct device *dev = net_dev->dev.parent; 2400 u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN]; 2401 int err; 2402 2403 /* Get firmware address, if any */ 2404 err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr); 2405 if (err) { 2406 dev_err(dev, "dpni_get_port_mac_addr() failed\n"); 2407 return err; 2408 } 2409 2410 /* Get DPNI attributes address, if any */ 2411 err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 2412 dpni_mac_addr); 2413 if (err) { 2414 dev_err(dev, "dpni_get_primary_mac_addr() failed\n"); 2415 return err; 2416 } 2417 2418 /* First check if firmware has any address configured by bootloader */ 2419 if (!is_zero_ether_addr(mac_addr)) { 2420 /* If the DPMAC addr != DPNI addr, update it */ 2421 if (!ether_addr_equal(mac_addr, dpni_mac_addr)) { 2422 err = dpni_set_primary_mac_addr(priv->mc_io, 0, 2423 priv->mc_token, 2424 mac_addr); 2425 if (err) { 2426 dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 2427 return err; 2428 } 2429 } 2430 memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); 2431 } else if (is_zero_ether_addr(dpni_mac_addr)) { 2432 /* No MAC address configured, fill in net_dev->dev_addr 2433 * with a random one 2434 */ 2435 eth_hw_addr_random(net_dev); 2436 dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n"); 2437 2438 err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, 2439 net_dev->dev_addr); 2440 if (err) { 2441 dev_err(dev, "dpni_set_primary_mac_addr() failed\n"); 2442 return err; 2443 } 2444 2445 /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all 2446 * practical purposes, this will be our "permanent" mac address, 2447 * at least until the next reboot. This move will also permit 2448 * register_netdevice() to properly fill up net_dev->perm_addr. 2449 */ 2450 net_dev->addr_assign_type = NET_ADDR_PERM; 2451 } else { 2452 /* NET_ADDR_PERM is default, all we have to do is 2453 * fill in the device addr. 2454 */ 2455 memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len); 2456 } 2457 2458 return 0; 2459 } 2460 2461 static int netdev_init(struct net_device *net_dev) 2462 { 2463 struct device *dev = net_dev->dev.parent; 2464 struct dpaa2_eth_priv *priv = netdev_priv(net_dev); 2465 u32 options = priv->dpni_attrs.options; 2466 u64 supported = 0, not_supported = 0; 2467 u8 bcast_addr[ETH_ALEN]; 2468 u8 num_queues; 2469 int err; 2470 2471 net_dev->netdev_ops = &dpaa2_eth_ops; 2472 net_dev->ethtool_ops = &dpaa2_ethtool_ops; 2473 2474 err = set_mac_addr(priv); 2475 if (err) 2476 return err; 2477 2478 /* Explicitly add the broadcast address to the MAC filtering table */ 2479 eth_broadcast_addr(bcast_addr); 2480 err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); 2481 if (err) { 2482 dev_err(dev, "dpni_add_mac_addr() failed\n"); 2483 return err; 2484 } 2485 2486 /* Set MTU upper limit; lower limit is 68B (default value) */ 2487 net_dev->max_mtu = DPAA2_ETH_MAX_MTU; 2488 err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, 2489 DPAA2_ETH_MFL); 2490 if (err) { 2491 dev_err(dev, "dpni_set_max_frame_length() failed\n"); 2492 return err; 2493 } 2494 2495 /* Set actual number of queues in the net device */ 2496 num_queues = dpaa2_eth_queue_count(priv); 2497 err = netif_set_real_num_tx_queues(net_dev, num_queues); 2498 if (err) { 2499 dev_err(dev, "netif_set_real_num_tx_queues() failed\n"); 2500 return err; 2501 } 2502 err = netif_set_real_num_rx_queues(net_dev, num_queues); 2503 if (err) { 2504 dev_err(dev, "netif_set_real_num_rx_queues() failed\n"); 2505 return err; 2506 } 2507 2508 /* Capabilities listing */ 2509 supported |= IFF_LIVE_ADDR_CHANGE; 2510 2511 if (options & DPNI_OPT_NO_MAC_FILTER) 2512 not_supported |= IFF_UNICAST_FLT; 2513 else 2514 supported |= IFF_UNICAST_FLT; 2515 2516 net_dev->priv_flags |= supported; 2517 net_dev->priv_flags &= ~not_supported; 2518 2519 /* Features */ 2520 net_dev->features = NETIF_F_RXCSUM | 2521 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2522 NETIF_F_SG | NETIF_F_HIGHDMA | 2523 NETIF_F_LLTX; 2524 net_dev->hw_features = net_dev->features; 2525 2526 return 0; 2527 } 2528 2529 static int poll_link_state(void *arg) 2530 { 2531 struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; 2532 int err; 2533 2534 while (!kthread_should_stop()) { 2535 err = link_state_update(priv); 2536 if (unlikely(err)) 2537 return err; 2538 2539 msleep(DPAA2_ETH_LINK_STATE_REFRESH); 2540 } 2541 2542 return 0; 2543 } 2544 2545 static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) 2546 { 2547 u32 status = ~0; 2548 struct device *dev = (struct device *)arg; 2549 struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); 2550 struct net_device *net_dev = dev_get_drvdata(dev); 2551 int err; 2552 2553 err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, 2554 DPNI_IRQ_INDEX, &status); 2555 if (unlikely(err)) { 2556 netdev_err(net_dev, "Can't get irq status (err %d)\n", err); 2557 return IRQ_HANDLED; 2558 } 2559 2560 if (status & DPNI_IRQ_EVENT_LINK_CHANGED) 2561 link_state_update(netdev_priv(net_dev)); 2562 2563 return IRQ_HANDLED; 2564 } 2565 2566 static int setup_irqs(struct fsl_mc_device *ls_dev) 2567 { 2568 int err = 0; 2569 struct fsl_mc_device_irq *irq; 2570 2571 err = fsl_mc_allocate_irqs(ls_dev); 2572 if (err) { 2573 dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); 2574 return err; 2575 } 2576 2577 irq = ls_dev->irqs[0]; 2578 err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq, 2579 NULL, dpni_irq0_handler_thread, 2580 IRQF_NO_SUSPEND | IRQF_ONESHOT, 2581 dev_name(&ls_dev->dev), &ls_dev->dev); 2582 if (err < 0) { 2583 dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err); 2584 goto free_mc_irq; 2585 } 2586 2587 err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, 2588 DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED); 2589 if (err < 0) { 2590 dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err); 2591 goto free_irq; 2592 } 2593 2594 err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, 2595 DPNI_IRQ_INDEX, 1); 2596 if (err < 0) { 2597 dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err); 2598 goto free_irq; 2599 } 2600 2601 return 0; 2602 2603 free_irq: 2604 devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev); 2605 free_mc_irq: 2606 fsl_mc_free_irqs(ls_dev); 2607 2608 return err; 2609 } 2610 2611 static void add_ch_napi(struct dpaa2_eth_priv *priv) 2612 { 2613 int i; 2614 struct dpaa2_eth_channel *ch; 2615 2616 for (i = 0; i < priv->num_channels; i++) { 2617 ch = priv->channel[i]; 2618 /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ 2619 netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, 2620 NAPI_POLL_WEIGHT); 2621 } 2622 } 2623 2624 static void del_ch_napi(struct dpaa2_eth_priv *priv) 2625 { 2626 int i; 2627 struct dpaa2_eth_channel *ch; 2628 2629 for (i = 0; i < priv->num_channels; i++) { 2630 ch = priv->channel[i]; 2631 netif_napi_del(&ch->napi); 2632 } 2633 } 2634 2635 static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) 2636 { 2637 struct device *dev; 2638 struct net_device *net_dev = NULL; 2639 struct dpaa2_eth_priv *priv = NULL; 2640 int err = 0; 2641 2642 dev = &dpni_dev->dev; 2643 2644 /* Net device */ 2645 net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); 2646 if (!net_dev) { 2647 dev_err(dev, "alloc_etherdev_mq() failed\n"); 2648 return -ENOMEM; 2649 } 2650 2651 SET_NETDEV_DEV(net_dev, dev); 2652 dev_set_drvdata(dev, net_dev); 2653 2654 priv = netdev_priv(net_dev); 2655 priv->net_dev = net_dev; 2656 2657 priv->iommu_domain = iommu_get_domain_for_dev(dev); 2658 2659 /* Obtain a MC portal */ 2660 err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, 2661 &priv->mc_io); 2662 if (err) { 2663 if (err == -ENXIO) 2664 err = -EPROBE_DEFER; 2665 else 2666 dev_err(dev, "MC portal allocation failed\n"); 2667 goto err_portal_alloc; 2668 } 2669 2670 /* MC objects initialization and configuration */ 2671 err = setup_dpni(dpni_dev); 2672 if (err) 2673 goto err_dpni_setup; 2674 2675 err = setup_dpio(priv); 2676 if (err) 2677 goto err_dpio_setup; 2678 2679 setup_fqs(priv); 2680 2681 err = setup_dpbp(priv); 2682 if (err) 2683 goto err_dpbp_setup; 2684 2685 err = bind_dpni(priv); 2686 if (err) 2687 goto err_bind; 2688 2689 /* Add a NAPI context for each channel */ 2690 add_ch_napi(priv); 2691 2692 /* Percpu statistics */ 2693 priv->percpu_stats = alloc_percpu(*priv->percpu_stats); 2694 if (!priv->percpu_stats) { 2695 dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); 2696 err = -ENOMEM; 2697 goto err_alloc_percpu_stats; 2698 } 2699 priv->percpu_extras = alloc_percpu(*priv->percpu_extras); 2700 if (!priv->percpu_extras) { 2701 dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); 2702 err = -ENOMEM; 2703 goto err_alloc_percpu_extras; 2704 } 2705 2706 err = netdev_init(net_dev); 2707 if (err) 2708 goto err_netdev_init; 2709 2710 /* Configure checksum offload based on current interface flags */ 2711 err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); 2712 if (err) 2713 goto err_csum; 2714 2715 err = set_tx_csum(priv, !!(net_dev->features & 2716 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); 2717 if (err) 2718 goto err_csum; 2719 2720 err = alloc_rings(priv); 2721 if (err) 2722 goto err_alloc_rings; 2723 2724 err = setup_irqs(dpni_dev); 2725 if (err) { 2726 netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); 2727 priv->poll_thread = kthread_run(poll_link_state, priv, 2728 "%s_poll_link", net_dev->name); 2729 if (IS_ERR(priv->poll_thread)) { 2730 dev_err(dev, "Error starting polling thread\n"); 2731 goto err_poll_thread; 2732 } 2733 priv->do_link_poll = true; 2734 } 2735 2736 err = register_netdev(net_dev); 2737 if (err < 0) { 2738 dev_err(dev, "register_netdev() failed\n"); 2739 goto err_netdev_reg; 2740 } 2741 2742 dev_info(dev, "Probed interface %s\n", net_dev->name); 2743 return 0; 2744 2745 err_netdev_reg: 2746 if (priv->do_link_poll) 2747 kthread_stop(priv->poll_thread); 2748 else 2749 fsl_mc_free_irqs(dpni_dev); 2750 err_poll_thread: 2751 free_rings(priv); 2752 err_alloc_rings: 2753 err_csum: 2754 err_netdev_init: 2755 free_percpu(priv->percpu_extras); 2756 err_alloc_percpu_extras: 2757 free_percpu(priv->percpu_stats); 2758 err_alloc_percpu_stats: 2759 del_ch_napi(priv); 2760 err_bind: 2761 free_dpbp(priv); 2762 err_dpbp_setup: 2763 free_dpio(priv); 2764 err_dpio_setup: 2765 free_dpni(priv); 2766 err_dpni_setup: 2767 fsl_mc_portal_free(priv->mc_io); 2768 err_portal_alloc: 2769 dev_set_drvdata(dev, NULL); 2770 free_netdev(net_dev); 2771 2772 return err; 2773 } 2774 2775 static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) 2776 { 2777 struct device *dev; 2778 struct net_device *net_dev; 2779 struct dpaa2_eth_priv *priv; 2780 2781 dev = &ls_dev->dev; 2782 net_dev = dev_get_drvdata(dev); 2783 priv = netdev_priv(net_dev); 2784 2785 unregister_netdev(net_dev); 2786 2787 if (priv->do_link_poll) 2788 kthread_stop(priv->poll_thread); 2789 else 2790 fsl_mc_free_irqs(ls_dev); 2791 2792 free_rings(priv); 2793 free_percpu(priv->percpu_stats); 2794 free_percpu(priv->percpu_extras); 2795 2796 del_ch_napi(priv); 2797 free_dpbp(priv); 2798 free_dpio(priv); 2799 free_dpni(priv); 2800 2801 fsl_mc_portal_free(priv->mc_io); 2802 2803 free_netdev(net_dev); 2804 2805 dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); 2806 2807 return 0; 2808 } 2809 2810 static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = { 2811 { 2812 .vendor = FSL_MC_VENDOR_FREESCALE, 2813 .obj_type = "dpni", 2814 }, 2815 { .vendor = 0x0 } 2816 }; 2817 MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table); 2818 2819 static struct fsl_mc_driver dpaa2_eth_driver = { 2820 .driver = { 2821 .name = KBUILD_MODNAME, 2822 .owner = THIS_MODULE, 2823 }, 2824 .probe = dpaa2_eth_probe, 2825 .remove = dpaa2_eth_remove, 2826 .match_id_table = dpaa2_eth_match_id_table 2827 }; 2828 2829 module_fsl_mc_driver(dpaa2_eth_driver); 2830