1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 4 /* TSN endpoint Ethernet MAC driver 5 * 6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time 7 * communication. It is designed for endpoints within TSN (Time Sensitive 8 * Networking) networks; e.g., for PLCs in the industrial automation case. 9 * 10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used 11 * by the driver. 12 * 13 * More information can be found here: 14 * - www.embedded-experts.at/tsn 15 * - www.engleder-embedded.com 16 */ 17 18 #include "tsnep.h" 19 #include "tsnep_hw.h" 20 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_net.h> 24 #include <linux/of_mdio.h> 25 #include <linux/interrupt.h> 26 #include <linux/etherdevice.h> 27 #include <linux/phy.h> 28 #include <linux/iopoll.h> 29 30 #define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 31 #define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4) 32 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \ 33 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 34 35 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 36 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) 37 #else 38 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0)) 39 #endif 40 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF)) 41 42 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask) 43 { 44 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 45 } 46 47 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask) 48 { 49 mask |= ECM_INT_DISABLE; 50 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 51 } 52 53 static irqreturn_t tsnep_irq(int irq, void *arg) 54 { 55 struct tsnep_adapter *adapter = arg; 56 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); 57 58 /* acknowledge interrupt */ 59 if (active != 0) 60 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); 61 62 /* handle link interrupt */ 63 if ((active & ECM_INT_LINK) != 0) 64 phy_mac_interrupt(adapter->netdev->phydev); 65 66 /* handle TX/RX queue 0 interrupt */ 67 if ((active & adapter->queue[0].irq_mask) != 0) { 68 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); 69 napi_schedule(&adapter->queue[0].napi); 70 } 71 72 return IRQ_HANDLED; 73 } 74 75 static irqreturn_t tsnep_irq_txrx(int irq, void *arg) 76 { 77 struct tsnep_queue *queue = arg; 78 79 /* handle TX/RX queue interrupt */ 80 tsnep_disable_irq(queue->adapter, queue->irq_mask); 81 napi_schedule(&queue->napi); 82 83 return IRQ_HANDLED; 84 } 85 86 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum) 87 { 88 struct tsnep_adapter *adapter = bus->priv; 89 u32 md; 90 int retval; 91 92 if (regnum & MII_ADDR_C45) 93 return -EOPNOTSUPP; 94 95 md = ECM_MD_READ; 96 if (!adapter->suppress_preamble) 97 md |= ECM_MD_PREAMBLE; 98 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 99 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 100 iowrite32(md, adapter->addr + ECM_MD_CONTROL); 101 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 102 !(md & ECM_MD_BUSY), 16, 1000); 103 if (retval != 0) 104 return retval; 105 106 return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT; 107 } 108 109 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum, 110 u16 val) 111 { 112 struct tsnep_adapter *adapter = bus->priv; 113 u32 md; 114 int retval; 115 116 if (regnum & MII_ADDR_C45) 117 return -EOPNOTSUPP; 118 119 md = ECM_MD_WRITE; 120 if (!adapter->suppress_preamble) 121 md |= ECM_MD_PREAMBLE; 122 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 123 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 124 md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK; 125 iowrite32(md, adapter->addr + ECM_MD_CONTROL); 126 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 127 !(md & ECM_MD_BUSY), 16, 1000); 128 if (retval != 0) 129 return retval; 130 131 return 0; 132 } 133 134 static void tsnep_set_link_mode(struct tsnep_adapter *adapter) 135 { 136 u32 mode; 137 138 switch (adapter->phydev->speed) { 139 case SPEED_100: 140 mode = ECM_LINK_MODE_100; 141 break; 142 case SPEED_1000: 143 mode = ECM_LINK_MODE_1000; 144 break; 145 default: 146 mode = ECM_LINK_MODE_OFF; 147 break; 148 } 149 iowrite32(mode, adapter->addr + ECM_STATUS); 150 } 151 152 static void tsnep_phy_link_status_change(struct net_device *netdev) 153 { 154 struct tsnep_adapter *adapter = netdev_priv(netdev); 155 struct phy_device *phydev = netdev->phydev; 156 157 if (phydev->link) 158 tsnep_set_link_mode(adapter); 159 160 phy_print_status(netdev->phydev); 161 } 162 163 static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable) 164 { 165 int retval; 166 167 retval = phy_loopback(adapter->phydev, enable); 168 169 /* PHY link state change is not signaled if loopback is enabled, it 170 * would delay a working loopback anyway, let's ensure that loopback 171 * is working immediately by setting link mode directly 172 */ 173 if (!retval && enable) 174 tsnep_set_link_mode(adapter); 175 176 return retval; 177 } 178 179 static int tsnep_phy_open(struct tsnep_adapter *adapter) 180 { 181 struct phy_device *phydev; 182 struct ethtool_eee ethtool_eee; 183 int retval; 184 185 retval = phy_connect_direct(adapter->netdev, adapter->phydev, 186 tsnep_phy_link_status_change, 187 adapter->phy_mode); 188 if (retval) 189 return retval; 190 phydev = adapter->netdev->phydev; 191 192 /* MAC supports only 100Mbps|1000Mbps full duplex 193 * SPE (Single Pair Ethernet) is also an option but not implemented yet 194 */ 195 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 196 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 197 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 198 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 199 200 /* disable EEE autoneg, EEE not supported by TSNEP */ 201 memset(ðtool_eee, 0, sizeof(ethtool_eee)); 202 phy_ethtool_set_eee(adapter->phydev, ðtool_eee); 203 204 adapter->phydev->irq = PHY_MAC_INTERRUPT; 205 phy_start(adapter->phydev); 206 207 return 0; 208 } 209 210 static void tsnep_phy_close(struct tsnep_adapter *adapter) 211 { 212 phy_stop(adapter->netdev->phydev); 213 phy_disconnect(adapter->netdev->phydev); 214 adapter->netdev->phydev = NULL; 215 } 216 217 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) 218 { 219 struct device *dmadev = tx->adapter->dmadev; 220 int i; 221 222 memset(tx->entry, 0, sizeof(tx->entry)); 223 224 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 225 if (tx->page[i]) { 226 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], 227 tx->page_dma[i]); 228 tx->page[i] = NULL; 229 tx->page_dma[i] = 0; 230 } 231 } 232 } 233 234 static int tsnep_tx_ring_init(struct tsnep_tx *tx) 235 { 236 struct device *dmadev = tx->adapter->dmadev; 237 struct tsnep_tx_entry *entry; 238 struct tsnep_tx_entry *next_entry; 239 int i, j; 240 int retval; 241 242 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 243 tx->page[i] = 244 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], 245 GFP_KERNEL); 246 if (!tx->page[i]) { 247 retval = -ENOMEM; 248 goto alloc_failed; 249 } 250 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 251 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 252 entry->desc_wb = (struct tsnep_tx_desc_wb *) 253 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); 254 entry->desc = (struct tsnep_tx_desc *) 255 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 256 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; 257 } 258 } 259 for (i = 0; i < TSNEP_RING_SIZE; i++) { 260 entry = &tx->entry[i]; 261 next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE]; 262 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 263 } 264 265 return 0; 266 267 alloc_failed: 268 tsnep_tx_ring_cleanup(tx); 269 return retval; 270 } 271 272 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, 273 bool last) 274 { 275 struct tsnep_tx_entry *entry = &tx->entry[index]; 276 277 entry->properties = 0; 278 if (entry->skb) { 279 entry->properties = length & TSNEP_DESC_LENGTH_MASK; 280 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 281 if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) 282 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; 283 284 /* toggle user flag to prevent false acknowledge 285 * 286 * Only the first fragment is acknowledged. For all other 287 * fragments no acknowledge is done and the last written owner 288 * counter stays in the writeback descriptor. Therefore, it is 289 * possible that the last written owner counter is identical to 290 * the new incremented owner counter and a false acknowledge is 291 * detected before the real acknowledge has been done by 292 * hardware. 293 * 294 * The user flag is used to prevent this situation. The user 295 * flag is copied to the writeback descriptor by the hardware 296 * and is used as additional acknowledge data. By toggeling the 297 * user flag only for the first fragment (which is 298 * acknowledged), it is guaranteed that the last acknowledge 299 * done for this descriptor has used a different user flag and 300 * cannot be detected as false acknowledge. 301 */ 302 entry->owner_user_flag = !entry->owner_user_flag; 303 } 304 if (last) 305 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; 306 if (index == tx->increment_owner_counter) { 307 tx->owner_counter++; 308 if (tx->owner_counter == 4) 309 tx->owner_counter = 1; 310 tx->increment_owner_counter--; 311 if (tx->increment_owner_counter < 0) 312 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 313 } 314 entry->properties |= 315 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 316 TSNEP_DESC_OWNER_COUNTER_MASK; 317 if (entry->owner_user_flag) 318 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; 319 entry->desc->more_properties = 320 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); 321 322 /* descriptor properties shall be written last, because valid data is 323 * signaled there 324 */ 325 dma_wmb(); 326 327 entry->desc->properties = __cpu_to_le32(entry->properties); 328 } 329 330 static int tsnep_tx_desc_available(struct tsnep_tx *tx) 331 { 332 if (tx->read <= tx->write) 333 return TSNEP_RING_SIZE - tx->write + tx->read - 1; 334 else 335 return tx->read - tx->write - 1; 336 } 337 338 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) 339 { 340 struct device *dmadev = tx->adapter->dmadev; 341 struct tsnep_tx_entry *entry; 342 unsigned int len; 343 dma_addr_t dma; 344 int map_len = 0; 345 int i; 346 347 for (i = 0; i < count; i++) { 348 entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; 349 350 if (i == 0) { 351 len = skb_headlen(skb); 352 dma = dma_map_single(dmadev, skb->data, len, 353 DMA_TO_DEVICE); 354 } else { 355 len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); 356 dma = skb_frag_dma_map(dmadev, 357 &skb_shinfo(skb)->frags[i - 1], 358 0, len, DMA_TO_DEVICE); 359 } 360 if (dma_mapping_error(dmadev, dma)) 361 return -ENOMEM; 362 363 entry->len = len; 364 dma_unmap_addr_set(entry, dma, dma); 365 366 entry->desc->tx = __cpu_to_le64(dma); 367 368 map_len += len; 369 } 370 371 return map_len; 372 } 373 374 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) 375 { 376 struct device *dmadev = tx->adapter->dmadev; 377 struct tsnep_tx_entry *entry; 378 int map_len = 0; 379 int i; 380 381 for (i = 0; i < count; i++) { 382 entry = &tx->entry[(index + i) % TSNEP_RING_SIZE]; 383 384 if (entry->len) { 385 if (i == 0) 386 dma_unmap_single(dmadev, 387 dma_unmap_addr(entry, dma), 388 dma_unmap_len(entry, len), 389 DMA_TO_DEVICE); 390 else 391 dma_unmap_page(dmadev, 392 dma_unmap_addr(entry, dma), 393 dma_unmap_len(entry, len), 394 DMA_TO_DEVICE); 395 map_len += entry->len; 396 entry->len = 0; 397 } 398 } 399 400 return map_len; 401 } 402 403 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, 404 struct tsnep_tx *tx) 405 { 406 unsigned long flags; 407 int count = 1; 408 struct tsnep_tx_entry *entry; 409 int length; 410 int i; 411 int retval; 412 413 if (skb_shinfo(skb)->nr_frags > 0) 414 count += skb_shinfo(skb)->nr_frags; 415 416 spin_lock_irqsave(&tx->lock, flags); 417 418 if (tsnep_tx_desc_available(tx) < count) { 419 /* ring full, shall not happen because queue is stopped if full 420 * below 421 */ 422 netif_stop_queue(tx->adapter->netdev); 423 424 spin_unlock_irqrestore(&tx->lock, flags); 425 426 return NETDEV_TX_BUSY; 427 } 428 429 entry = &tx->entry[tx->write]; 430 entry->skb = skb; 431 432 retval = tsnep_tx_map(skb, tx, count); 433 if (retval < 0) { 434 tsnep_tx_unmap(tx, tx->write, count); 435 dev_kfree_skb_any(entry->skb); 436 entry->skb = NULL; 437 438 tx->dropped++; 439 440 spin_unlock_irqrestore(&tx->lock, flags); 441 442 netdev_err(tx->adapter->netdev, "TX DMA map failed\n"); 443 444 return NETDEV_TX_OK; 445 } 446 length = retval; 447 448 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 449 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 450 451 for (i = 0; i < count; i++) 452 tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, 453 i == (count - 1)); 454 tx->write = (tx->write + count) % TSNEP_RING_SIZE; 455 456 skb_tx_timestamp(skb); 457 458 /* descriptor properties shall be valid before hardware is notified */ 459 dma_wmb(); 460 461 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); 462 463 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { 464 /* ring can get full with next frame */ 465 netif_stop_queue(tx->adapter->netdev); 466 } 467 468 spin_unlock_irqrestore(&tx->lock, flags); 469 470 return NETDEV_TX_OK; 471 } 472 473 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) 474 { 475 unsigned long flags; 476 int budget = 128; 477 struct tsnep_tx_entry *entry; 478 int count; 479 int length; 480 481 spin_lock_irqsave(&tx->lock, flags); 482 483 do { 484 if (tx->read == tx->write) 485 break; 486 487 entry = &tx->entry[tx->read]; 488 if ((__le32_to_cpu(entry->desc_wb->properties) & 489 TSNEP_TX_DESC_OWNER_MASK) != 490 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) 491 break; 492 493 /* descriptor properties shall be read first, because valid data 494 * is signaled there 495 */ 496 dma_rmb(); 497 498 count = 1; 499 if (skb_shinfo(entry->skb)->nr_frags > 0) 500 count += skb_shinfo(entry->skb)->nr_frags; 501 502 length = tsnep_tx_unmap(tx, tx->read, count); 503 504 if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && 505 (__le32_to_cpu(entry->desc_wb->properties) & 506 TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { 507 struct skb_shared_hwtstamps hwtstamps; 508 u64 timestamp; 509 510 if (skb_shinfo(entry->skb)->tx_flags & 511 SKBTX_HW_TSTAMP_USE_CYCLES) 512 timestamp = 513 __le64_to_cpu(entry->desc_wb->counter); 514 else 515 timestamp = 516 __le64_to_cpu(entry->desc_wb->timestamp); 517 518 memset(&hwtstamps, 0, sizeof(hwtstamps)); 519 hwtstamps.hwtstamp = ns_to_ktime(timestamp); 520 521 skb_tstamp_tx(entry->skb, &hwtstamps); 522 } 523 524 napi_consume_skb(entry->skb, budget); 525 entry->skb = NULL; 526 527 tx->read = (tx->read + count) % TSNEP_RING_SIZE; 528 529 tx->packets++; 530 tx->bytes += length + ETH_FCS_LEN; 531 532 budget--; 533 } while (likely(budget)); 534 535 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && 536 netif_queue_stopped(tx->adapter->netdev)) { 537 netif_wake_queue(tx->adapter->netdev); 538 } 539 540 spin_unlock_irqrestore(&tx->lock, flags); 541 542 return (budget != 0); 543 } 544 545 static bool tsnep_tx_pending(struct tsnep_tx *tx) 546 { 547 unsigned long flags; 548 struct tsnep_tx_entry *entry; 549 bool pending = false; 550 551 spin_lock_irqsave(&tx->lock, flags); 552 553 if (tx->read != tx->write) { 554 entry = &tx->entry[tx->read]; 555 if ((__le32_to_cpu(entry->desc_wb->properties) & 556 TSNEP_TX_DESC_OWNER_MASK) == 557 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) 558 pending = true; 559 } 560 561 spin_unlock_irqrestore(&tx->lock, flags); 562 563 return pending; 564 } 565 566 static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr, 567 int queue_index, struct tsnep_tx *tx) 568 { 569 dma_addr_t dma; 570 int retval; 571 572 memset(tx, 0, sizeof(*tx)); 573 tx->adapter = adapter; 574 tx->addr = addr; 575 tx->queue_index = queue_index; 576 577 retval = tsnep_tx_ring_init(tx); 578 if (retval) 579 return retval; 580 581 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 582 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); 583 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); 584 tx->owner_counter = 1; 585 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 586 587 spin_lock_init(&tx->lock); 588 589 return 0; 590 } 591 592 static void tsnep_tx_close(struct tsnep_tx *tx) 593 { 594 u32 val; 595 596 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, 597 ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000, 598 1000000); 599 600 tsnep_tx_ring_cleanup(tx); 601 } 602 603 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) 604 { 605 struct device *dmadev = rx->adapter->dmadev; 606 struct tsnep_rx_entry *entry; 607 int i; 608 609 for (i = 0; i < TSNEP_RING_SIZE; i++) { 610 entry = &rx->entry[i]; 611 if (entry->page) 612 page_pool_put_full_page(rx->page_pool, entry->page, 613 false); 614 entry->page = NULL; 615 } 616 617 if (rx->page_pool) 618 page_pool_destroy(rx->page_pool); 619 620 memset(rx->entry, 0, sizeof(rx->entry)); 621 622 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 623 if (rx->page[i]) { 624 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], 625 rx->page_dma[i]); 626 rx->page[i] = NULL; 627 rx->page_dma[i] = 0; 628 } 629 } 630 } 631 632 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, 633 struct tsnep_rx_entry *entry) 634 { 635 struct page *page; 636 637 page = page_pool_dev_alloc_pages(rx->page_pool); 638 if (unlikely(!page)) 639 return -ENOMEM; 640 641 entry->page = page; 642 entry->len = TSNEP_MAX_RX_BUF_SIZE; 643 entry->dma = page_pool_get_dma_addr(entry->page); 644 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD); 645 646 return 0; 647 } 648 649 static int tsnep_rx_ring_init(struct tsnep_rx *rx) 650 { 651 struct device *dmadev = rx->adapter->dmadev; 652 struct tsnep_rx_entry *entry; 653 struct page_pool_params pp_params = { 0 }; 654 struct tsnep_rx_entry *next_entry; 655 int i, j; 656 int retval; 657 658 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 659 rx->page[i] = 660 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], 661 GFP_KERNEL); 662 if (!rx->page[i]) { 663 retval = -ENOMEM; 664 goto failed; 665 } 666 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 667 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 668 entry->desc_wb = (struct tsnep_rx_desc_wb *) 669 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); 670 entry->desc = (struct tsnep_rx_desc *) 671 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 672 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; 673 } 674 } 675 676 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 677 pp_params.order = 0; 678 pp_params.pool_size = TSNEP_RING_SIZE; 679 pp_params.nid = dev_to_node(dmadev); 680 pp_params.dev = dmadev; 681 pp_params.dma_dir = DMA_FROM_DEVICE; 682 pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE; 683 pp_params.offset = TSNEP_SKB_PAD; 684 rx->page_pool = page_pool_create(&pp_params); 685 if (IS_ERR(rx->page_pool)) { 686 retval = PTR_ERR(rx->page_pool); 687 rx->page_pool = NULL; 688 goto failed; 689 } 690 691 for (i = 0; i < TSNEP_RING_SIZE; i++) { 692 entry = &rx->entry[i]; 693 next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; 694 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 695 696 retval = tsnep_rx_alloc_buffer(rx, entry); 697 if (retval) 698 goto failed; 699 } 700 701 return 0; 702 703 failed: 704 tsnep_rx_ring_cleanup(rx); 705 return retval; 706 } 707 708 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) 709 { 710 struct tsnep_rx_entry *entry = &rx->entry[index]; 711 712 /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */ 713 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; 714 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 715 if (index == rx->increment_owner_counter) { 716 rx->owner_counter++; 717 if (rx->owner_counter == 4) 718 rx->owner_counter = 1; 719 rx->increment_owner_counter--; 720 if (rx->increment_owner_counter < 0) 721 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 722 } 723 entry->properties |= 724 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 725 TSNEP_DESC_OWNER_COUNTER_MASK; 726 727 /* descriptor properties shall be written last, because valid data is 728 * signaled there 729 */ 730 dma_wmb(); 731 732 entry->desc->properties = __cpu_to_le32(entry->properties); 733 } 734 735 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, 736 int length) 737 { 738 struct sk_buff *skb; 739 740 skb = napi_build_skb(page_address(page), PAGE_SIZE); 741 if (unlikely(!skb)) 742 return NULL; 743 744 /* update pointers within the skb to store the data */ 745 skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE); 746 __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN); 747 748 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { 749 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 750 struct tsnep_rx_inline *rx_inline = 751 (struct tsnep_rx_inline *)(page_address(page) + 752 TSNEP_SKB_PAD); 753 754 skb_shinfo(skb)->tx_flags |= 755 SKBTX_HW_TSTAMP_NETDEV; 756 memset(hwtstamps, 0, sizeof(*hwtstamps)); 757 hwtstamps->netdev_data = rx_inline; 758 } 759 760 skb_record_rx_queue(skb, rx->queue_index); 761 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); 762 763 return skb; 764 } 765 766 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, 767 int budget) 768 { 769 struct device *dmadev = rx->adapter->dmadev; 770 int done = 0; 771 enum dma_data_direction dma_dir; 772 struct tsnep_rx_entry *entry; 773 struct page *page; 774 struct sk_buff *skb; 775 int length; 776 bool enable = false; 777 int retval; 778 779 dma_dir = page_pool_get_dma_dir(rx->page_pool); 780 781 while (likely(done < budget)) { 782 entry = &rx->entry[rx->read]; 783 if ((__le32_to_cpu(entry->desc_wb->properties) & 784 TSNEP_DESC_OWNER_COUNTER_MASK) != 785 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 786 break; 787 788 /* descriptor properties shall be read first, because valid data 789 * is signaled there 790 */ 791 dma_rmb(); 792 793 prefetch(page_address(entry->page) + TSNEP_SKB_PAD); 794 length = __le32_to_cpu(entry->desc_wb->properties) & 795 TSNEP_DESC_LENGTH_MASK; 796 dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD, 797 length, dma_dir); 798 page = entry->page; 799 800 /* forward skb only if allocation is successful, otherwise 801 * page is reused and frame dropped 802 */ 803 retval = tsnep_rx_alloc_buffer(rx, entry); 804 if (!retval) { 805 skb = tsnep_build_skb(rx, page, length); 806 if (skb) { 807 page_pool_release_page(rx->page_pool, page); 808 809 rx->packets++; 810 rx->bytes += length - 811 TSNEP_RX_INLINE_METADATA_SIZE; 812 if (skb->pkt_type == PACKET_MULTICAST) 813 rx->multicast++; 814 815 napi_gro_receive(napi, skb); 816 } else { 817 page_pool_recycle_direct(rx->page_pool, page); 818 819 rx->dropped++; 820 } 821 done++; 822 } else { 823 rx->dropped++; 824 } 825 826 tsnep_rx_activate(rx, rx->read); 827 828 enable = true; 829 830 rx->read = (rx->read + 1) % TSNEP_RING_SIZE; 831 } 832 833 if (enable) { 834 /* descriptor properties shall be valid before hardware is 835 * notified 836 */ 837 dma_wmb(); 838 839 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); 840 } 841 842 return done; 843 } 844 845 static bool tsnep_rx_pending(struct tsnep_rx *rx) 846 { 847 struct tsnep_rx_entry *entry; 848 849 entry = &rx->entry[rx->read]; 850 if ((__le32_to_cpu(entry->desc_wb->properties) & 851 TSNEP_DESC_OWNER_COUNTER_MASK) == 852 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 853 return true; 854 855 return false; 856 } 857 858 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, 859 int queue_index, struct tsnep_rx *rx) 860 { 861 dma_addr_t dma; 862 int i; 863 int retval; 864 865 memset(rx, 0, sizeof(*rx)); 866 rx->adapter = adapter; 867 rx->addr = addr; 868 rx->queue_index = queue_index; 869 870 retval = tsnep_rx_ring_init(rx); 871 if (retval) 872 return retval; 873 874 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 875 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); 876 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); 877 rx->owner_counter = 1; 878 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 879 880 for (i = 0; i < TSNEP_RING_SIZE; i++) 881 tsnep_rx_activate(rx, i); 882 883 /* descriptor properties shall be valid before hardware is notified */ 884 dma_wmb(); 885 886 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); 887 888 return 0; 889 } 890 891 static void tsnep_rx_close(struct tsnep_rx *rx) 892 { 893 u32 val; 894 895 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); 896 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, 897 ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000, 898 1000000); 899 900 tsnep_rx_ring_cleanup(rx); 901 } 902 903 static bool tsnep_pending(struct tsnep_queue *queue) 904 { 905 if (queue->tx && tsnep_tx_pending(queue->tx)) 906 return true; 907 908 if (queue->rx && tsnep_rx_pending(queue->rx)) 909 return true; 910 911 return false; 912 } 913 914 static int tsnep_poll(struct napi_struct *napi, int budget) 915 { 916 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, 917 napi); 918 bool complete = true; 919 int done = 0; 920 921 if (queue->tx) 922 complete = tsnep_tx_poll(queue->tx, budget); 923 924 if (queue->rx) { 925 done = tsnep_rx_poll(queue->rx, napi, budget); 926 if (done >= budget) 927 complete = false; 928 } 929 930 /* if all work not completed, return budget and keep polling */ 931 if (!complete) 932 return budget; 933 934 if (likely(napi_complete_done(napi, done))) { 935 tsnep_enable_irq(queue->adapter, queue->irq_mask); 936 937 /* reschedule if work is already pending, prevent rotten packets 938 * which are transmitted or received after polling but before 939 * interrupt enable 940 */ 941 if (tsnep_pending(queue)) { 942 tsnep_disable_irq(queue->adapter, queue->irq_mask); 943 napi_schedule(napi); 944 } 945 } 946 947 return min(done, budget - 1); 948 } 949 950 static int tsnep_request_irq(struct tsnep_queue *queue, bool first) 951 { 952 const char *name = netdev_name(queue->adapter->netdev); 953 irq_handler_t handler; 954 void *dev; 955 int retval; 956 957 if (first) { 958 sprintf(queue->name, "%s-mac", name); 959 handler = tsnep_irq; 960 dev = queue->adapter; 961 } else { 962 if (queue->tx && queue->rx) 963 sprintf(queue->name, "%s-txrx-%d", name, 964 queue->rx->queue_index); 965 else if (queue->tx) 966 sprintf(queue->name, "%s-tx-%d", name, 967 queue->tx->queue_index); 968 else 969 sprintf(queue->name, "%s-rx-%d", name, 970 queue->rx->queue_index); 971 handler = tsnep_irq_txrx; 972 dev = queue; 973 } 974 975 retval = request_irq(queue->irq, handler, 0, queue->name, dev); 976 if (retval) { 977 /* if name is empty, then interrupt won't be freed */ 978 memset(queue->name, 0, sizeof(queue->name)); 979 } 980 981 return retval; 982 } 983 984 static void tsnep_free_irq(struct tsnep_queue *queue, bool first) 985 { 986 void *dev; 987 988 if (!strlen(queue->name)) 989 return; 990 991 if (first) 992 dev = queue->adapter; 993 else 994 dev = queue; 995 996 free_irq(queue->irq, dev); 997 memset(queue->name, 0, sizeof(queue->name)); 998 } 999 1000 static int tsnep_netdev_open(struct net_device *netdev) 1001 { 1002 struct tsnep_adapter *adapter = netdev_priv(netdev); 1003 int i; 1004 void __iomem *addr; 1005 int tx_queue_index = 0; 1006 int rx_queue_index = 0; 1007 int retval; 1008 1009 for (i = 0; i < adapter->num_queues; i++) { 1010 adapter->queue[i].adapter = adapter; 1011 if (adapter->queue[i].tx) { 1012 addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); 1013 retval = tsnep_tx_open(adapter, addr, tx_queue_index, 1014 adapter->queue[i].tx); 1015 if (retval) 1016 goto failed; 1017 tx_queue_index++; 1018 } 1019 if (adapter->queue[i].rx) { 1020 addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); 1021 retval = tsnep_rx_open(adapter, addr, 1022 rx_queue_index, 1023 adapter->queue[i].rx); 1024 if (retval) 1025 goto failed; 1026 rx_queue_index++; 1027 } 1028 1029 retval = tsnep_request_irq(&adapter->queue[i], i == 0); 1030 if (retval) { 1031 netif_err(adapter, drv, adapter->netdev, 1032 "can't get assigned irq %d.\n", 1033 adapter->queue[i].irq); 1034 goto failed; 1035 } 1036 } 1037 1038 retval = netif_set_real_num_tx_queues(adapter->netdev, 1039 adapter->num_tx_queues); 1040 if (retval) 1041 goto failed; 1042 retval = netif_set_real_num_rx_queues(adapter->netdev, 1043 adapter->num_rx_queues); 1044 if (retval) 1045 goto failed; 1046 1047 tsnep_enable_irq(adapter, ECM_INT_LINK); 1048 retval = tsnep_phy_open(adapter); 1049 if (retval) 1050 goto phy_failed; 1051 1052 for (i = 0; i < adapter->num_queues; i++) { 1053 netif_napi_add(adapter->netdev, &adapter->queue[i].napi, 1054 tsnep_poll); 1055 napi_enable(&adapter->queue[i].napi); 1056 1057 tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); 1058 } 1059 1060 return 0; 1061 1062 phy_failed: 1063 tsnep_disable_irq(adapter, ECM_INT_LINK); 1064 tsnep_phy_close(adapter); 1065 failed: 1066 for (i = 0; i < adapter->num_queues; i++) { 1067 tsnep_free_irq(&adapter->queue[i], i == 0); 1068 1069 if (adapter->queue[i].rx) 1070 tsnep_rx_close(adapter->queue[i].rx); 1071 if (adapter->queue[i].tx) 1072 tsnep_tx_close(adapter->queue[i].tx); 1073 } 1074 return retval; 1075 } 1076 1077 static int tsnep_netdev_close(struct net_device *netdev) 1078 { 1079 struct tsnep_adapter *adapter = netdev_priv(netdev); 1080 int i; 1081 1082 tsnep_disable_irq(adapter, ECM_INT_LINK); 1083 tsnep_phy_close(adapter); 1084 1085 for (i = 0; i < adapter->num_queues; i++) { 1086 tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); 1087 1088 napi_disable(&adapter->queue[i].napi); 1089 netif_napi_del(&adapter->queue[i].napi); 1090 1091 tsnep_free_irq(&adapter->queue[i], i == 0); 1092 1093 if (adapter->queue[i].rx) 1094 tsnep_rx_close(adapter->queue[i].rx); 1095 if (adapter->queue[i].tx) 1096 tsnep_tx_close(adapter->queue[i].tx); 1097 } 1098 1099 return 0; 1100 } 1101 1102 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb, 1103 struct net_device *netdev) 1104 { 1105 struct tsnep_adapter *adapter = netdev_priv(netdev); 1106 u16 queue_mapping = skb_get_queue_mapping(skb); 1107 1108 if (queue_mapping >= adapter->num_tx_queues) 1109 queue_mapping = 0; 1110 1111 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); 1112 } 1113 1114 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, 1115 int cmd) 1116 { 1117 if (!netif_running(netdev)) 1118 return -EINVAL; 1119 if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP) 1120 return tsnep_ptp_ioctl(netdev, ifr, cmd); 1121 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 1122 } 1123 1124 static void tsnep_netdev_set_multicast(struct net_device *netdev) 1125 { 1126 struct tsnep_adapter *adapter = netdev_priv(netdev); 1127 1128 u16 rx_filter = 0; 1129 1130 /* configured MAC address and broadcasts are never filtered */ 1131 if (netdev->flags & IFF_PROMISC) { 1132 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 1133 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS; 1134 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { 1135 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 1136 } 1137 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); 1138 } 1139 1140 static void tsnep_netdev_get_stats64(struct net_device *netdev, 1141 struct rtnl_link_stats64 *stats) 1142 { 1143 struct tsnep_adapter *adapter = netdev_priv(netdev); 1144 u32 reg; 1145 u32 val; 1146 int i; 1147 1148 for (i = 0; i < adapter->num_tx_queues; i++) { 1149 stats->tx_packets += adapter->tx[i].packets; 1150 stats->tx_bytes += adapter->tx[i].bytes; 1151 stats->tx_dropped += adapter->tx[i].dropped; 1152 } 1153 for (i = 0; i < adapter->num_rx_queues; i++) { 1154 stats->rx_packets += adapter->rx[i].packets; 1155 stats->rx_bytes += adapter->rx[i].bytes; 1156 stats->rx_dropped += adapter->rx[i].dropped; 1157 stats->multicast += adapter->rx[i].multicast; 1158 1159 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + 1160 TSNEP_RX_STATISTIC); 1161 val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >> 1162 TSNEP_RX_STATISTIC_NO_DESC_SHIFT; 1163 stats->rx_dropped += val; 1164 val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >> 1165 TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT; 1166 stats->rx_dropped += val; 1167 val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >> 1168 TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT; 1169 stats->rx_errors += val; 1170 stats->rx_fifo_errors += val; 1171 val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >> 1172 TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT; 1173 stats->rx_errors += val; 1174 stats->rx_frame_errors += val; 1175 } 1176 1177 reg = ioread32(adapter->addr + ECM_STAT); 1178 val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT; 1179 stats->rx_errors += val; 1180 val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT; 1181 stats->rx_errors += val; 1182 stats->rx_crc_errors += val; 1183 val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT; 1184 stats->rx_errors += val; 1185 } 1186 1187 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr) 1188 { 1189 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); 1190 iowrite16(*(u16 *)(addr + sizeof(u32)), 1191 adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 1192 1193 ether_addr_copy(adapter->mac_address, addr); 1194 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", 1195 addr); 1196 } 1197 1198 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr) 1199 { 1200 struct tsnep_adapter *adapter = netdev_priv(netdev); 1201 struct sockaddr *sock_addr = addr; 1202 int retval; 1203 1204 retval = eth_prepare_mac_addr_change(netdev, sock_addr); 1205 if (retval) 1206 return retval; 1207 eth_hw_addr_set(netdev, sock_addr->sa_data); 1208 tsnep_mac_set_address(adapter, sock_addr->sa_data); 1209 1210 return 0; 1211 } 1212 1213 static int tsnep_netdev_set_features(struct net_device *netdev, 1214 netdev_features_t features) 1215 { 1216 struct tsnep_adapter *adapter = netdev_priv(netdev); 1217 netdev_features_t changed = netdev->features ^ features; 1218 bool enable; 1219 int retval = 0; 1220 1221 if (changed & NETIF_F_LOOPBACK) { 1222 enable = !!(features & NETIF_F_LOOPBACK); 1223 retval = tsnep_phy_loopback(adapter, enable); 1224 } 1225 1226 return retval; 1227 } 1228 1229 static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev, 1230 const struct skb_shared_hwtstamps *hwtstamps, 1231 bool cycles) 1232 { 1233 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data; 1234 u64 timestamp; 1235 1236 if (cycles) 1237 timestamp = __le64_to_cpu(rx_inline->counter); 1238 else 1239 timestamp = __le64_to_cpu(rx_inline->timestamp); 1240 1241 return ns_to_ktime(timestamp); 1242 } 1243 1244 static const struct net_device_ops tsnep_netdev_ops = { 1245 .ndo_open = tsnep_netdev_open, 1246 .ndo_stop = tsnep_netdev_close, 1247 .ndo_start_xmit = tsnep_netdev_xmit_frame, 1248 .ndo_eth_ioctl = tsnep_netdev_ioctl, 1249 .ndo_set_rx_mode = tsnep_netdev_set_multicast, 1250 .ndo_get_stats64 = tsnep_netdev_get_stats64, 1251 .ndo_set_mac_address = tsnep_netdev_set_mac_address, 1252 .ndo_set_features = tsnep_netdev_set_features, 1253 .ndo_get_tstamp = tsnep_netdev_get_tstamp, 1254 .ndo_setup_tc = tsnep_tc_setup, 1255 }; 1256 1257 static int tsnep_mac_init(struct tsnep_adapter *adapter) 1258 { 1259 int retval; 1260 1261 /* initialize RX filtering, at least configured MAC address and 1262 * broadcast are not filtered 1263 */ 1264 iowrite16(0, adapter->addr + TSNEP_RX_FILTER); 1265 1266 /* try to get MAC address in the following order: 1267 * - device tree 1268 * - valid MAC address already set 1269 * - MAC address register if valid 1270 * - random MAC address 1271 */ 1272 retval = of_get_mac_address(adapter->pdev->dev.of_node, 1273 adapter->mac_address); 1274 if (retval == -EPROBE_DEFER) 1275 return retval; 1276 if (retval && !is_valid_ether_addr(adapter->mac_address)) { 1277 *(u32 *)adapter->mac_address = 1278 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); 1279 *(u16 *)(adapter->mac_address + sizeof(u32)) = 1280 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 1281 if (!is_valid_ether_addr(adapter->mac_address)) 1282 eth_random_addr(adapter->mac_address); 1283 } 1284 1285 tsnep_mac_set_address(adapter, adapter->mac_address); 1286 eth_hw_addr_set(adapter->netdev, adapter->mac_address); 1287 1288 return 0; 1289 } 1290 1291 static int tsnep_mdio_init(struct tsnep_adapter *adapter) 1292 { 1293 struct device_node *np = adapter->pdev->dev.of_node; 1294 int retval; 1295 1296 if (np) { 1297 np = of_get_child_by_name(np, "mdio"); 1298 if (!np) 1299 return 0; 1300 1301 adapter->suppress_preamble = 1302 of_property_read_bool(np, "suppress-preamble"); 1303 } 1304 1305 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); 1306 if (!adapter->mdiobus) { 1307 retval = -ENOMEM; 1308 1309 goto out; 1310 } 1311 1312 adapter->mdiobus->priv = (void *)adapter; 1313 adapter->mdiobus->parent = &adapter->pdev->dev; 1314 adapter->mdiobus->read = tsnep_mdiobus_read; 1315 adapter->mdiobus->write = tsnep_mdiobus_write; 1316 adapter->mdiobus->name = TSNEP "-mdiobus"; 1317 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", 1318 adapter->pdev->name); 1319 1320 /* do not scan broadcast address */ 1321 adapter->mdiobus->phy_mask = 0x0000001; 1322 1323 retval = of_mdiobus_register(adapter->mdiobus, np); 1324 1325 out: 1326 of_node_put(np); 1327 1328 return retval; 1329 } 1330 1331 static int tsnep_phy_init(struct tsnep_adapter *adapter) 1332 { 1333 struct device_node *phy_node; 1334 int retval; 1335 1336 retval = of_get_phy_mode(adapter->pdev->dev.of_node, 1337 &adapter->phy_mode); 1338 if (retval) 1339 adapter->phy_mode = PHY_INTERFACE_MODE_GMII; 1340 1341 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", 1342 0); 1343 adapter->phydev = of_phy_find_device(phy_node); 1344 of_node_put(phy_node); 1345 if (!adapter->phydev && adapter->mdiobus) 1346 adapter->phydev = phy_find_first(adapter->mdiobus); 1347 if (!adapter->phydev) 1348 return -EIO; 1349 1350 return 0; 1351 } 1352 1353 static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count) 1354 { 1355 u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0; 1356 char name[8]; 1357 int i; 1358 int retval; 1359 1360 /* one TX/RX queue pair for netdev is mandatory */ 1361 if (platform_irq_count(adapter->pdev) == 1) 1362 retval = platform_get_irq(adapter->pdev, 0); 1363 else 1364 retval = platform_get_irq_byname(adapter->pdev, "mac"); 1365 if (retval < 0) 1366 return retval; 1367 adapter->num_tx_queues = 1; 1368 adapter->num_rx_queues = 1; 1369 adapter->num_queues = 1; 1370 adapter->queue[0].irq = retval; 1371 adapter->queue[0].tx = &adapter->tx[0]; 1372 adapter->queue[0].rx = &adapter->rx[0]; 1373 adapter->queue[0].irq_mask = irq_mask; 1374 1375 adapter->netdev->irq = adapter->queue[0].irq; 1376 1377 /* add additional TX/RX queue pairs only if dedicated interrupt is 1378 * available 1379 */ 1380 for (i = 1; i < queue_count; i++) { 1381 sprintf(name, "txrx-%d", i); 1382 retval = platform_get_irq_byname_optional(adapter->pdev, name); 1383 if (retval < 0) 1384 break; 1385 1386 adapter->num_tx_queues++; 1387 adapter->num_rx_queues++; 1388 adapter->num_queues++; 1389 adapter->queue[i].irq = retval; 1390 adapter->queue[i].tx = &adapter->tx[i]; 1391 adapter->queue[i].rx = &adapter->rx[i]; 1392 adapter->queue[i].irq_mask = 1393 irq_mask << (ECM_INT_TXRX_SHIFT * i); 1394 } 1395 1396 return 0; 1397 } 1398 1399 static int tsnep_probe(struct platform_device *pdev) 1400 { 1401 struct tsnep_adapter *adapter; 1402 struct net_device *netdev; 1403 struct resource *io; 1404 u32 type; 1405 int revision; 1406 int version; 1407 int queue_count; 1408 int retval; 1409 1410 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 1411 sizeof(struct tsnep_adapter), 1412 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES); 1413 if (!netdev) 1414 return -ENODEV; 1415 SET_NETDEV_DEV(netdev, &pdev->dev); 1416 adapter = netdev_priv(netdev); 1417 platform_set_drvdata(pdev, adapter); 1418 adapter->pdev = pdev; 1419 adapter->dmadev = &pdev->dev; 1420 adapter->netdev = netdev; 1421 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 1422 NETIF_MSG_LINK | NETIF_MSG_IFUP | 1423 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; 1424 1425 netdev->min_mtu = ETH_MIN_MTU; 1426 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; 1427 1428 mutex_init(&adapter->gate_control_lock); 1429 mutex_init(&adapter->rxnfc_lock); 1430 INIT_LIST_HEAD(&adapter->rxnfc_rules); 1431 1432 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1433 adapter->addr = devm_ioremap_resource(&pdev->dev, io); 1434 if (IS_ERR(adapter->addr)) 1435 return PTR_ERR(adapter->addr); 1436 netdev->mem_start = io->start; 1437 netdev->mem_end = io->end; 1438 1439 type = ioread32(adapter->addr + ECM_TYPE); 1440 revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT; 1441 version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT; 1442 queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT; 1443 adapter->gate_control = type & ECM_GATE_CONTROL; 1444 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT; 1445 1446 tsnep_disable_irq(adapter, ECM_INT_ALL); 1447 1448 retval = tsnep_queue_init(adapter, queue_count); 1449 if (retval) 1450 return retval; 1451 1452 retval = dma_set_mask_and_coherent(&adapter->pdev->dev, 1453 DMA_BIT_MASK(64)); 1454 if (retval) { 1455 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n"); 1456 return retval; 1457 } 1458 1459 retval = tsnep_mac_init(adapter); 1460 if (retval) 1461 return retval; 1462 1463 retval = tsnep_mdio_init(adapter); 1464 if (retval) 1465 goto mdio_init_failed; 1466 1467 retval = tsnep_phy_init(adapter); 1468 if (retval) 1469 goto phy_init_failed; 1470 1471 retval = tsnep_ptp_init(adapter); 1472 if (retval) 1473 goto ptp_init_failed; 1474 1475 retval = tsnep_tc_init(adapter); 1476 if (retval) 1477 goto tc_init_failed; 1478 1479 retval = tsnep_rxnfc_init(adapter); 1480 if (retval) 1481 goto rxnfc_init_failed; 1482 1483 netdev->netdev_ops = &tsnep_netdev_ops; 1484 netdev->ethtool_ops = &tsnep_ethtool_ops; 1485 netdev->features = NETIF_F_SG; 1486 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK; 1487 1488 /* carrier off reporting is important to ethtool even BEFORE open */ 1489 netif_carrier_off(netdev); 1490 1491 retval = register_netdev(netdev); 1492 if (retval) 1493 goto register_failed; 1494 1495 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, 1496 revision); 1497 if (adapter->gate_control) 1498 dev_info(&adapter->pdev->dev, "gate control detected\n"); 1499 1500 return 0; 1501 1502 register_failed: 1503 tsnep_rxnfc_cleanup(adapter); 1504 rxnfc_init_failed: 1505 tsnep_tc_cleanup(adapter); 1506 tc_init_failed: 1507 tsnep_ptp_cleanup(adapter); 1508 ptp_init_failed: 1509 phy_init_failed: 1510 if (adapter->mdiobus) 1511 mdiobus_unregister(adapter->mdiobus); 1512 mdio_init_failed: 1513 return retval; 1514 } 1515 1516 static int tsnep_remove(struct platform_device *pdev) 1517 { 1518 struct tsnep_adapter *adapter = platform_get_drvdata(pdev); 1519 1520 unregister_netdev(adapter->netdev); 1521 1522 tsnep_rxnfc_cleanup(adapter); 1523 1524 tsnep_tc_cleanup(adapter); 1525 1526 tsnep_ptp_cleanup(adapter); 1527 1528 if (adapter->mdiobus) 1529 mdiobus_unregister(adapter->mdiobus); 1530 1531 tsnep_disable_irq(adapter, ECM_INT_ALL); 1532 1533 return 0; 1534 } 1535 1536 static const struct of_device_id tsnep_of_match[] = { 1537 { .compatible = "engleder,tsnep", }, 1538 { }, 1539 }; 1540 MODULE_DEVICE_TABLE(of, tsnep_of_match); 1541 1542 static struct platform_driver tsnep_driver = { 1543 .driver = { 1544 .name = TSNEP, 1545 .of_match_table = tsnep_of_match, 1546 }, 1547 .probe = tsnep_probe, 1548 .remove = tsnep_remove, 1549 }; 1550 module_platform_driver(tsnep_driver); 1551 1552 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>"); 1553 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver"); 1554 MODULE_LICENSE("GPL"); 1555