1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 4 /* TSN endpoint Ethernet MAC driver 5 * 6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time 7 * communication. It is designed for endpoints within TSN (Time Sensitive 8 * Networking) networks; e.g., for PLCs in the industrial automation case. 9 * 10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used 11 * by the driver. 12 * 13 * More information can be found here: 14 * - www.embedded-experts.at/tsn 15 * - www.engleder-embedded.com 16 */ 17 18 #include "tsnep.h" 19 #include "tsnep_hw.h" 20 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_net.h> 24 #include <linux/of_mdio.h> 25 #include <linux/interrupt.h> 26 #include <linux/etherdevice.h> 27 #include <linux/phy.h> 28 #include <linux/iopoll.h> 29 30 #define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) 31 #define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4) 32 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \ 33 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 34 35 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 36 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) 37 #else 38 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0)) 39 #endif 40 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF)) 41 42 #define TSNEP_COALESCE_USECS_DEFAULT 64 43 #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \ 44 ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1) 45 46 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask) 47 { 48 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 49 } 50 51 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask) 52 { 53 mask |= ECM_INT_DISABLE; 54 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 55 } 56 57 static irqreturn_t tsnep_irq(int irq, void *arg) 58 { 59 struct tsnep_adapter *adapter = arg; 60 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); 61 62 /* acknowledge interrupt */ 63 if (active != 0) 64 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); 65 66 /* handle link interrupt */ 67 if ((active & ECM_INT_LINK) != 0) 68 phy_mac_interrupt(adapter->netdev->phydev); 69 70 /* handle TX/RX queue 0 interrupt */ 71 if ((active & adapter->queue[0].irq_mask) != 0) { 72 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); 73 napi_schedule(&adapter->queue[0].napi); 74 } 75 76 return IRQ_HANDLED; 77 } 78 79 static irqreturn_t tsnep_irq_txrx(int irq, void *arg) 80 { 81 struct tsnep_queue *queue = arg; 82 83 /* handle TX/RX queue interrupt */ 84 tsnep_disable_irq(queue->adapter, queue->irq_mask); 85 napi_schedule(&queue->napi); 86 87 return IRQ_HANDLED; 88 } 89 90 int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs) 91 { 92 if (usecs > TSNEP_COALESCE_USECS_MAX) 93 return -ERANGE; 94 95 usecs /= ECM_INT_DELAY_BASE_US; 96 usecs <<= ECM_INT_DELAY_SHIFT; 97 usecs &= ECM_INT_DELAY_MASK; 98 99 queue->irq_delay &= ~ECM_INT_DELAY_MASK; 100 queue->irq_delay |= usecs; 101 iowrite8(queue->irq_delay, queue->irq_delay_addr); 102 103 return 0; 104 } 105 106 u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue) 107 { 108 u32 usecs; 109 110 usecs = (queue->irq_delay & ECM_INT_DELAY_MASK); 111 usecs >>= ECM_INT_DELAY_SHIFT; 112 usecs *= ECM_INT_DELAY_BASE_US; 113 114 return usecs; 115 } 116 117 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum) 118 { 119 struct tsnep_adapter *adapter = bus->priv; 120 u32 md; 121 int retval; 122 123 if (regnum & MII_ADDR_C45) 124 return -EOPNOTSUPP; 125 126 md = ECM_MD_READ; 127 if (!adapter->suppress_preamble) 128 md |= ECM_MD_PREAMBLE; 129 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 130 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 131 iowrite32(md, adapter->addr + ECM_MD_CONTROL); 132 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 133 !(md & ECM_MD_BUSY), 16, 1000); 134 if (retval != 0) 135 return retval; 136 137 return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT; 138 } 139 140 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum, 141 u16 val) 142 { 143 struct tsnep_adapter *adapter = bus->priv; 144 u32 md; 145 int retval; 146 147 if (regnum & MII_ADDR_C45) 148 return -EOPNOTSUPP; 149 150 md = ECM_MD_WRITE; 151 if (!adapter->suppress_preamble) 152 md |= ECM_MD_PREAMBLE; 153 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 154 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 155 md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK; 156 iowrite32(md, adapter->addr + ECM_MD_CONTROL); 157 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 158 !(md & ECM_MD_BUSY), 16, 1000); 159 if (retval != 0) 160 return retval; 161 162 return 0; 163 } 164 165 static void tsnep_set_link_mode(struct tsnep_adapter *adapter) 166 { 167 u32 mode; 168 169 switch (adapter->phydev->speed) { 170 case SPEED_100: 171 mode = ECM_LINK_MODE_100; 172 break; 173 case SPEED_1000: 174 mode = ECM_LINK_MODE_1000; 175 break; 176 default: 177 mode = ECM_LINK_MODE_OFF; 178 break; 179 } 180 iowrite32(mode, adapter->addr + ECM_STATUS); 181 } 182 183 static void tsnep_phy_link_status_change(struct net_device *netdev) 184 { 185 struct tsnep_adapter *adapter = netdev_priv(netdev); 186 struct phy_device *phydev = netdev->phydev; 187 188 if (phydev->link) 189 tsnep_set_link_mode(adapter); 190 191 phy_print_status(netdev->phydev); 192 } 193 194 static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable) 195 { 196 int retval; 197 198 retval = phy_loopback(adapter->phydev, enable); 199 200 /* PHY link state change is not signaled if loopback is enabled, it 201 * would delay a working loopback anyway, let's ensure that loopback 202 * is working immediately by setting link mode directly 203 */ 204 if (!retval && enable) 205 tsnep_set_link_mode(adapter); 206 207 return retval; 208 } 209 210 static int tsnep_phy_open(struct tsnep_adapter *adapter) 211 { 212 struct phy_device *phydev; 213 struct ethtool_eee ethtool_eee; 214 int retval; 215 216 retval = phy_connect_direct(adapter->netdev, adapter->phydev, 217 tsnep_phy_link_status_change, 218 adapter->phy_mode); 219 if (retval) 220 return retval; 221 phydev = adapter->netdev->phydev; 222 223 /* MAC supports only 100Mbps|1000Mbps full duplex 224 * SPE (Single Pair Ethernet) is also an option but not implemented yet 225 */ 226 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 227 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 228 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 229 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 230 231 /* disable EEE autoneg, EEE not supported by TSNEP */ 232 memset(ðtool_eee, 0, sizeof(ethtool_eee)); 233 phy_ethtool_set_eee(adapter->phydev, ðtool_eee); 234 235 adapter->phydev->irq = PHY_MAC_INTERRUPT; 236 phy_start(adapter->phydev); 237 238 return 0; 239 } 240 241 static void tsnep_phy_close(struct tsnep_adapter *adapter) 242 { 243 phy_stop(adapter->netdev->phydev); 244 phy_disconnect(adapter->netdev->phydev); 245 adapter->netdev->phydev = NULL; 246 } 247 248 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) 249 { 250 struct device *dmadev = tx->adapter->dmadev; 251 int i; 252 253 memset(tx->entry, 0, sizeof(tx->entry)); 254 255 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 256 if (tx->page[i]) { 257 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], 258 tx->page_dma[i]); 259 tx->page[i] = NULL; 260 tx->page_dma[i] = 0; 261 } 262 } 263 } 264 265 static int tsnep_tx_ring_init(struct tsnep_tx *tx) 266 { 267 struct device *dmadev = tx->adapter->dmadev; 268 struct tsnep_tx_entry *entry; 269 struct tsnep_tx_entry *next_entry; 270 int i, j; 271 int retval; 272 273 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 274 tx->page[i] = 275 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], 276 GFP_KERNEL); 277 if (!tx->page[i]) { 278 retval = -ENOMEM; 279 goto alloc_failed; 280 } 281 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 282 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 283 entry->desc_wb = (struct tsnep_tx_desc_wb *) 284 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); 285 entry->desc = (struct tsnep_tx_desc *) 286 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 287 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; 288 } 289 } 290 for (i = 0; i < TSNEP_RING_SIZE; i++) { 291 entry = &tx->entry[i]; 292 next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE]; 293 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 294 } 295 296 return 0; 297 298 alloc_failed: 299 tsnep_tx_ring_cleanup(tx); 300 return retval; 301 } 302 303 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, 304 bool last) 305 { 306 struct tsnep_tx_entry *entry = &tx->entry[index]; 307 308 entry->properties = 0; 309 if (entry->skb) { 310 entry->properties = length & TSNEP_DESC_LENGTH_MASK; 311 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 312 if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) 313 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; 314 315 /* toggle user flag to prevent false acknowledge 316 * 317 * Only the first fragment is acknowledged. For all other 318 * fragments no acknowledge is done and the last written owner 319 * counter stays in the writeback descriptor. Therefore, it is 320 * possible that the last written owner counter is identical to 321 * the new incremented owner counter and a false acknowledge is 322 * detected before the real acknowledge has been done by 323 * hardware. 324 * 325 * The user flag is used to prevent this situation. The user 326 * flag is copied to the writeback descriptor by the hardware 327 * and is used as additional acknowledge data. By toggeling the 328 * user flag only for the first fragment (which is 329 * acknowledged), it is guaranteed that the last acknowledge 330 * done for this descriptor has used a different user flag and 331 * cannot be detected as false acknowledge. 332 */ 333 entry->owner_user_flag = !entry->owner_user_flag; 334 } 335 if (last) 336 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; 337 if (index == tx->increment_owner_counter) { 338 tx->owner_counter++; 339 if (tx->owner_counter == 4) 340 tx->owner_counter = 1; 341 tx->increment_owner_counter--; 342 if (tx->increment_owner_counter < 0) 343 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 344 } 345 entry->properties |= 346 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 347 TSNEP_DESC_OWNER_COUNTER_MASK; 348 if (entry->owner_user_flag) 349 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; 350 entry->desc->more_properties = 351 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); 352 353 /* descriptor properties shall be written last, because valid data is 354 * signaled there 355 */ 356 dma_wmb(); 357 358 entry->desc->properties = __cpu_to_le32(entry->properties); 359 } 360 361 static int tsnep_tx_desc_available(struct tsnep_tx *tx) 362 { 363 if (tx->read <= tx->write) 364 return TSNEP_RING_SIZE - tx->write + tx->read - 1; 365 else 366 return tx->read - tx->write - 1; 367 } 368 369 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) 370 { 371 struct device *dmadev = tx->adapter->dmadev; 372 struct tsnep_tx_entry *entry; 373 unsigned int len; 374 dma_addr_t dma; 375 int map_len = 0; 376 int i; 377 378 for (i = 0; i < count; i++) { 379 entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; 380 381 if (i == 0) { 382 len = skb_headlen(skb); 383 dma = dma_map_single(dmadev, skb->data, len, 384 DMA_TO_DEVICE); 385 } else { 386 len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); 387 dma = skb_frag_dma_map(dmadev, 388 &skb_shinfo(skb)->frags[i - 1], 389 0, len, DMA_TO_DEVICE); 390 } 391 if (dma_mapping_error(dmadev, dma)) 392 return -ENOMEM; 393 394 entry->len = len; 395 dma_unmap_addr_set(entry, dma, dma); 396 397 entry->desc->tx = __cpu_to_le64(dma); 398 399 map_len += len; 400 } 401 402 return map_len; 403 } 404 405 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) 406 { 407 struct device *dmadev = tx->adapter->dmadev; 408 struct tsnep_tx_entry *entry; 409 int map_len = 0; 410 int i; 411 412 for (i = 0; i < count; i++) { 413 entry = &tx->entry[(index + i) % TSNEP_RING_SIZE]; 414 415 if (entry->len) { 416 if (i == 0) 417 dma_unmap_single(dmadev, 418 dma_unmap_addr(entry, dma), 419 dma_unmap_len(entry, len), 420 DMA_TO_DEVICE); 421 else 422 dma_unmap_page(dmadev, 423 dma_unmap_addr(entry, dma), 424 dma_unmap_len(entry, len), 425 DMA_TO_DEVICE); 426 map_len += entry->len; 427 entry->len = 0; 428 } 429 } 430 431 return map_len; 432 } 433 434 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, 435 struct tsnep_tx *tx) 436 { 437 unsigned long flags; 438 int count = 1; 439 struct tsnep_tx_entry *entry; 440 int length; 441 int i; 442 int retval; 443 444 if (skb_shinfo(skb)->nr_frags > 0) 445 count += skb_shinfo(skb)->nr_frags; 446 447 spin_lock_irqsave(&tx->lock, flags); 448 449 if (tsnep_tx_desc_available(tx) < count) { 450 /* ring full, shall not happen because queue is stopped if full 451 * below 452 */ 453 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); 454 455 spin_unlock_irqrestore(&tx->lock, flags); 456 457 return NETDEV_TX_BUSY; 458 } 459 460 entry = &tx->entry[tx->write]; 461 entry->skb = skb; 462 463 retval = tsnep_tx_map(skb, tx, count); 464 if (retval < 0) { 465 tsnep_tx_unmap(tx, tx->write, count); 466 dev_kfree_skb_any(entry->skb); 467 entry->skb = NULL; 468 469 tx->dropped++; 470 471 spin_unlock_irqrestore(&tx->lock, flags); 472 473 netdev_err(tx->adapter->netdev, "TX DMA map failed\n"); 474 475 return NETDEV_TX_OK; 476 } 477 length = retval; 478 479 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 480 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 481 482 for (i = 0; i < count; i++) 483 tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, 484 i == (count - 1)); 485 tx->write = (tx->write + count) % TSNEP_RING_SIZE; 486 487 skb_tx_timestamp(skb); 488 489 /* descriptor properties shall be valid before hardware is notified */ 490 dma_wmb(); 491 492 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); 493 494 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { 495 /* ring can get full with next frame */ 496 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); 497 } 498 499 spin_unlock_irqrestore(&tx->lock, flags); 500 501 return NETDEV_TX_OK; 502 } 503 504 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) 505 { 506 struct tsnep_tx_entry *entry; 507 struct netdev_queue *nq; 508 unsigned long flags; 509 int budget = 128; 510 int length; 511 int count; 512 513 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 514 515 spin_lock_irqsave(&tx->lock, flags); 516 517 do { 518 if (tx->read == tx->write) 519 break; 520 521 entry = &tx->entry[tx->read]; 522 if ((__le32_to_cpu(entry->desc_wb->properties) & 523 TSNEP_TX_DESC_OWNER_MASK) != 524 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) 525 break; 526 527 /* descriptor properties shall be read first, because valid data 528 * is signaled there 529 */ 530 dma_rmb(); 531 532 count = 1; 533 if (skb_shinfo(entry->skb)->nr_frags > 0) 534 count += skb_shinfo(entry->skb)->nr_frags; 535 536 length = tsnep_tx_unmap(tx, tx->read, count); 537 538 if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && 539 (__le32_to_cpu(entry->desc_wb->properties) & 540 TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { 541 struct skb_shared_hwtstamps hwtstamps; 542 u64 timestamp; 543 544 if (skb_shinfo(entry->skb)->tx_flags & 545 SKBTX_HW_TSTAMP_USE_CYCLES) 546 timestamp = 547 __le64_to_cpu(entry->desc_wb->counter); 548 else 549 timestamp = 550 __le64_to_cpu(entry->desc_wb->timestamp); 551 552 memset(&hwtstamps, 0, sizeof(hwtstamps)); 553 hwtstamps.hwtstamp = ns_to_ktime(timestamp); 554 555 skb_tstamp_tx(entry->skb, &hwtstamps); 556 } 557 558 napi_consume_skb(entry->skb, budget); 559 entry->skb = NULL; 560 561 tx->read = (tx->read + count) % TSNEP_RING_SIZE; 562 563 tx->packets++; 564 tx->bytes += length + ETH_FCS_LEN; 565 566 budget--; 567 } while (likely(budget)); 568 569 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && 570 netif_tx_queue_stopped(nq)) { 571 netif_tx_wake_queue(nq); 572 } 573 574 spin_unlock_irqrestore(&tx->lock, flags); 575 576 return (budget != 0); 577 } 578 579 static bool tsnep_tx_pending(struct tsnep_tx *tx) 580 { 581 unsigned long flags; 582 struct tsnep_tx_entry *entry; 583 bool pending = false; 584 585 spin_lock_irqsave(&tx->lock, flags); 586 587 if (tx->read != tx->write) { 588 entry = &tx->entry[tx->read]; 589 if ((__le32_to_cpu(entry->desc_wb->properties) & 590 TSNEP_TX_DESC_OWNER_MASK) == 591 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) 592 pending = true; 593 } 594 595 spin_unlock_irqrestore(&tx->lock, flags); 596 597 return pending; 598 } 599 600 static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr, 601 int queue_index, struct tsnep_tx *tx) 602 { 603 dma_addr_t dma; 604 int retval; 605 606 memset(tx, 0, sizeof(*tx)); 607 tx->adapter = adapter; 608 tx->addr = addr; 609 tx->queue_index = queue_index; 610 611 retval = tsnep_tx_ring_init(tx); 612 if (retval) 613 return retval; 614 615 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 616 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); 617 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); 618 tx->owner_counter = 1; 619 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 620 621 spin_lock_init(&tx->lock); 622 623 return 0; 624 } 625 626 static void tsnep_tx_close(struct tsnep_tx *tx) 627 { 628 u32 val; 629 630 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, 631 ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000, 632 1000000); 633 634 tsnep_tx_ring_cleanup(tx); 635 } 636 637 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) 638 { 639 struct device *dmadev = rx->adapter->dmadev; 640 struct tsnep_rx_entry *entry; 641 int i; 642 643 for (i = 0; i < TSNEP_RING_SIZE; i++) { 644 entry = &rx->entry[i]; 645 if (entry->page) 646 page_pool_put_full_page(rx->page_pool, entry->page, 647 false); 648 entry->page = NULL; 649 } 650 651 if (rx->page_pool) 652 page_pool_destroy(rx->page_pool); 653 654 memset(rx->entry, 0, sizeof(rx->entry)); 655 656 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 657 if (rx->page[i]) { 658 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], 659 rx->page_dma[i]); 660 rx->page[i] = NULL; 661 rx->page_dma[i] = 0; 662 } 663 } 664 } 665 666 static int tsnep_rx_ring_init(struct tsnep_rx *rx) 667 { 668 struct device *dmadev = rx->adapter->dmadev; 669 struct tsnep_rx_entry *entry; 670 struct page_pool_params pp_params = { 0 }; 671 struct tsnep_rx_entry *next_entry; 672 int i, j; 673 int retval; 674 675 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 676 rx->page[i] = 677 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], 678 GFP_KERNEL); 679 if (!rx->page[i]) { 680 retval = -ENOMEM; 681 goto failed; 682 } 683 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 684 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 685 entry->desc_wb = (struct tsnep_rx_desc_wb *) 686 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); 687 entry->desc = (struct tsnep_rx_desc *) 688 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 689 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; 690 } 691 } 692 693 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 694 pp_params.order = 0; 695 pp_params.pool_size = TSNEP_RING_SIZE; 696 pp_params.nid = dev_to_node(dmadev); 697 pp_params.dev = dmadev; 698 pp_params.dma_dir = DMA_FROM_DEVICE; 699 pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE; 700 pp_params.offset = TSNEP_SKB_PAD; 701 rx->page_pool = page_pool_create(&pp_params); 702 if (IS_ERR(rx->page_pool)) { 703 retval = PTR_ERR(rx->page_pool); 704 rx->page_pool = NULL; 705 goto failed; 706 } 707 708 for (i = 0; i < TSNEP_RING_SIZE; i++) { 709 entry = &rx->entry[i]; 710 next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; 711 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 712 } 713 714 return 0; 715 716 failed: 717 tsnep_rx_ring_cleanup(rx); 718 return retval; 719 } 720 721 static int tsnep_rx_desc_available(struct tsnep_rx *rx) 722 { 723 if (rx->read <= rx->write) 724 return TSNEP_RING_SIZE - rx->write + rx->read - 1; 725 else 726 return rx->read - rx->write - 1; 727 } 728 729 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, 730 struct page *page) 731 { 732 entry->page = page; 733 entry->len = TSNEP_MAX_RX_BUF_SIZE; 734 entry->dma = page_pool_get_dma_addr(entry->page); 735 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD); 736 } 737 738 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) 739 { 740 struct tsnep_rx_entry *entry = &rx->entry[index]; 741 struct page *page; 742 743 page = page_pool_dev_alloc_pages(rx->page_pool); 744 if (unlikely(!page)) 745 return -ENOMEM; 746 tsnep_rx_set_page(rx, entry, page); 747 748 return 0; 749 } 750 751 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) 752 { 753 struct tsnep_rx_entry *entry = &rx->entry[index]; 754 struct tsnep_rx_entry *read = &rx->entry[rx->read]; 755 756 tsnep_rx_set_page(rx, entry, read->page); 757 read->page = NULL; 758 } 759 760 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) 761 { 762 struct tsnep_rx_entry *entry = &rx->entry[index]; 763 764 /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */ 765 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; 766 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 767 if (index == rx->increment_owner_counter) { 768 rx->owner_counter++; 769 if (rx->owner_counter == 4) 770 rx->owner_counter = 1; 771 rx->increment_owner_counter--; 772 if (rx->increment_owner_counter < 0) 773 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 774 } 775 entry->properties |= 776 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 777 TSNEP_DESC_OWNER_COUNTER_MASK; 778 779 /* descriptor properties shall be written last, because valid data is 780 * signaled there 781 */ 782 dma_wmb(); 783 784 entry->desc->properties = __cpu_to_le32(entry->properties); 785 } 786 787 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) 788 { 789 int index; 790 bool alloc_failed = false; 791 bool enable = false; 792 int i; 793 int retval; 794 795 for (i = 0; i < count && !alloc_failed; i++) { 796 index = (rx->write + i) % TSNEP_RING_SIZE; 797 798 retval = tsnep_rx_alloc_buffer(rx, index); 799 if (unlikely(retval)) { 800 rx->alloc_failed++; 801 alloc_failed = true; 802 803 /* reuse only if no other allocation was successful */ 804 if (i == 0 && reuse) 805 tsnep_rx_reuse_buffer(rx, index); 806 else 807 break; 808 } 809 810 tsnep_rx_activate(rx, index); 811 812 enable = true; 813 } 814 815 if (enable) { 816 rx->write = (rx->write + i) % TSNEP_RING_SIZE; 817 818 /* descriptor properties shall be valid before hardware is 819 * notified 820 */ 821 dma_wmb(); 822 823 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); 824 } 825 826 return i; 827 } 828 829 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, 830 int length) 831 { 832 struct sk_buff *skb; 833 834 skb = napi_build_skb(page_address(page), PAGE_SIZE); 835 if (unlikely(!skb)) 836 return NULL; 837 838 /* update pointers within the skb to store the data */ 839 skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE); 840 __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN); 841 842 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { 843 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 844 struct tsnep_rx_inline *rx_inline = 845 (struct tsnep_rx_inline *)(page_address(page) + 846 TSNEP_SKB_PAD); 847 848 skb_shinfo(skb)->tx_flags |= 849 SKBTX_HW_TSTAMP_NETDEV; 850 memset(hwtstamps, 0, sizeof(*hwtstamps)); 851 hwtstamps->netdev_data = rx_inline; 852 } 853 854 skb_record_rx_queue(skb, rx->queue_index); 855 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); 856 857 return skb; 858 } 859 860 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, 861 int budget) 862 { 863 struct device *dmadev = rx->adapter->dmadev; 864 int desc_available; 865 int done = 0; 866 enum dma_data_direction dma_dir; 867 struct tsnep_rx_entry *entry; 868 struct sk_buff *skb; 869 int length; 870 871 desc_available = tsnep_rx_desc_available(rx); 872 dma_dir = page_pool_get_dma_dir(rx->page_pool); 873 874 while (likely(done < budget) && (rx->read != rx->write)) { 875 entry = &rx->entry[rx->read]; 876 if ((__le32_to_cpu(entry->desc_wb->properties) & 877 TSNEP_DESC_OWNER_COUNTER_MASK) != 878 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 879 break; 880 done++; 881 882 if (desc_available >= TSNEP_RING_RX_REFILL) { 883 bool reuse = desc_available >= TSNEP_RING_RX_REUSE; 884 885 desc_available -= tsnep_rx_refill(rx, desc_available, 886 reuse); 887 if (!entry->page) { 888 /* buffer has been reused for refill to prevent 889 * empty RX ring, thus buffer cannot be used for 890 * RX processing 891 */ 892 rx->read = (rx->read + 1) % TSNEP_RING_SIZE; 893 desc_available++; 894 895 rx->dropped++; 896 897 continue; 898 } 899 } 900 901 /* descriptor properties shall be read first, because valid data 902 * is signaled there 903 */ 904 dma_rmb(); 905 906 prefetch(page_address(entry->page) + TSNEP_SKB_PAD); 907 length = __le32_to_cpu(entry->desc_wb->properties) & 908 TSNEP_DESC_LENGTH_MASK; 909 dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD, 910 length, dma_dir); 911 912 rx->read = (rx->read + 1) % TSNEP_RING_SIZE; 913 desc_available++; 914 915 skb = tsnep_build_skb(rx, entry->page, length); 916 if (skb) { 917 page_pool_release_page(rx->page_pool, entry->page); 918 919 rx->packets++; 920 rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE; 921 if (skb->pkt_type == PACKET_MULTICAST) 922 rx->multicast++; 923 924 napi_gro_receive(napi, skb); 925 } else { 926 page_pool_recycle_direct(rx->page_pool, entry->page); 927 928 rx->dropped++; 929 } 930 entry->page = NULL; 931 } 932 933 if (desc_available) 934 tsnep_rx_refill(rx, desc_available, false); 935 936 return done; 937 } 938 939 static bool tsnep_rx_pending(struct tsnep_rx *rx) 940 { 941 struct tsnep_rx_entry *entry; 942 943 if (rx->read != rx->write) { 944 entry = &rx->entry[rx->read]; 945 if ((__le32_to_cpu(entry->desc_wb->properties) & 946 TSNEP_DESC_OWNER_COUNTER_MASK) == 947 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 948 return true; 949 } 950 951 return false; 952 } 953 954 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, 955 int queue_index, struct tsnep_rx *rx) 956 { 957 dma_addr_t dma; 958 int retval; 959 960 memset(rx, 0, sizeof(*rx)); 961 rx->adapter = adapter; 962 rx->addr = addr; 963 rx->queue_index = queue_index; 964 965 retval = tsnep_rx_ring_init(rx); 966 if (retval) 967 return retval; 968 969 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 970 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); 971 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); 972 rx->owner_counter = 1; 973 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 974 975 tsnep_rx_refill(rx, tsnep_rx_desc_available(rx), false); 976 977 return 0; 978 } 979 980 static void tsnep_rx_close(struct tsnep_rx *rx) 981 { 982 u32 val; 983 984 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); 985 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, 986 ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000, 987 1000000); 988 989 tsnep_rx_ring_cleanup(rx); 990 } 991 992 static bool tsnep_pending(struct tsnep_queue *queue) 993 { 994 if (queue->tx && tsnep_tx_pending(queue->tx)) 995 return true; 996 997 if (queue->rx && tsnep_rx_pending(queue->rx)) 998 return true; 999 1000 return false; 1001 } 1002 1003 static int tsnep_poll(struct napi_struct *napi, int budget) 1004 { 1005 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, 1006 napi); 1007 bool complete = true; 1008 int done = 0; 1009 1010 if (queue->tx) 1011 complete = tsnep_tx_poll(queue->tx, budget); 1012 1013 if (queue->rx) { 1014 done = tsnep_rx_poll(queue->rx, napi, budget); 1015 if (done >= budget) 1016 complete = false; 1017 } 1018 1019 /* if all work not completed, return budget and keep polling */ 1020 if (!complete) 1021 return budget; 1022 1023 if (likely(napi_complete_done(napi, done))) { 1024 tsnep_enable_irq(queue->adapter, queue->irq_mask); 1025 1026 /* reschedule if work is already pending, prevent rotten packets 1027 * which are transmitted or received after polling but before 1028 * interrupt enable 1029 */ 1030 if (tsnep_pending(queue)) { 1031 tsnep_disable_irq(queue->adapter, queue->irq_mask); 1032 napi_schedule(napi); 1033 } 1034 } 1035 1036 return min(done, budget - 1); 1037 } 1038 1039 static int tsnep_request_irq(struct tsnep_queue *queue, bool first) 1040 { 1041 const char *name = netdev_name(queue->adapter->netdev); 1042 irq_handler_t handler; 1043 void *dev; 1044 int retval; 1045 1046 if (first) { 1047 sprintf(queue->name, "%s-mac", name); 1048 handler = tsnep_irq; 1049 dev = queue->adapter; 1050 } else { 1051 if (queue->tx && queue->rx) 1052 sprintf(queue->name, "%s-txrx-%d", name, 1053 queue->rx->queue_index); 1054 else if (queue->tx) 1055 sprintf(queue->name, "%s-tx-%d", name, 1056 queue->tx->queue_index); 1057 else 1058 sprintf(queue->name, "%s-rx-%d", name, 1059 queue->rx->queue_index); 1060 handler = tsnep_irq_txrx; 1061 dev = queue; 1062 } 1063 1064 retval = request_irq(queue->irq, handler, 0, queue->name, dev); 1065 if (retval) { 1066 /* if name is empty, then interrupt won't be freed */ 1067 memset(queue->name, 0, sizeof(queue->name)); 1068 } 1069 1070 return retval; 1071 } 1072 1073 static void tsnep_free_irq(struct tsnep_queue *queue, bool first) 1074 { 1075 void *dev; 1076 1077 if (!strlen(queue->name)) 1078 return; 1079 1080 if (first) 1081 dev = queue->adapter; 1082 else 1083 dev = queue; 1084 1085 free_irq(queue->irq, dev); 1086 memset(queue->name, 0, sizeof(queue->name)); 1087 } 1088 1089 static int tsnep_netdev_open(struct net_device *netdev) 1090 { 1091 struct tsnep_adapter *adapter = netdev_priv(netdev); 1092 int i; 1093 void __iomem *addr; 1094 int tx_queue_index = 0; 1095 int rx_queue_index = 0; 1096 int retval; 1097 1098 for (i = 0; i < adapter->num_queues; i++) { 1099 adapter->queue[i].adapter = adapter; 1100 if (adapter->queue[i].tx) { 1101 addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); 1102 retval = tsnep_tx_open(adapter, addr, tx_queue_index, 1103 adapter->queue[i].tx); 1104 if (retval) 1105 goto failed; 1106 tx_queue_index++; 1107 } 1108 if (adapter->queue[i].rx) { 1109 addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); 1110 retval = tsnep_rx_open(adapter, addr, 1111 rx_queue_index, 1112 adapter->queue[i].rx); 1113 if (retval) 1114 goto failed; 1115 rx_queue_index++; 1116 } 1117 1118 retval = tsnep_request_irq(&adapter->queue[i], i == 0); 1119 if (retval) { 1120 netif_err(adapter, drv, adapter->netdev, 1121 "can't get assigned irq %d.\n", 1122 adapter->queue[i].irq); 1123 goto failed; 1124 } 1125 } 1126 1127 retval = netif_set_real_num_tx_queues(adapter->netdev, 1128 adapter->num_tx_queues); 1129 if (retval) 1130 goto failed; 1131 retval = netif_set_real_num_rx_queues(adapter->netdev, 1132 adapter->num_rx_queues); 1133 if (retval) 1134 goto failed; 1135 1136 tsnep_enable_irq(adapter, ECM_INT_LINK); 1137 retval = tsnep_phy_open(adapter); 1138 if (retval) 1139 goto phy_failed; 1140 1141 for (i = 0; i < adapter->num_queues; i++) { 1142 netif_napi_add(adapter->netdev, &adapter->queue[i].napi, 1143 tsnep_poll); 1144 napi_enable(&adapter->queue[i].napi); 1145 1146 tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); 1147 } 1148 1149 return 0; 1150 1151 phy_failed: 1152 tsnep_disable_irq(adapter, ECM_INT_LINK); 1153 tsnep_phy_close(adapter); 1154 failed: 1155 for (i = 0; i < adapter->num_queues; i++) { 1156 tsnep_free_irq(&adapter->queue[i], i == 0); 1157 1158 if (adapter->queue[i].rx) 1159 tsnep_rx_close(adapter->queue[i].rx); 1160 if (adapter->queue[i].tx) 1161 tsnep_tx_close(adapter->queue[i].tx); 1162 } 1163 return retval; 1164 } 1165 1166 static int tsnep_netdev_close(struct net_device *netdev) 1167 { 1168 struct tsnep_adapter *adapter = netdev_priv(netdev); 1169 int i; 1170 1171 tsnep_disable_irq(adapter, ECM_INT_LINK); 1172 tsnep_phy_close(adapter); 1173 1174 for (i = 0; i < adapter->num_queues; i++) { 1175 tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); 1176 1177 napi_disable(&adapter->queue[i].napi); 1178 netif_napi_del(&adapter->queue[i].napi); 1179 1180 tsnep_free_irq(&adapter->queue[i], i == 0); 1181 1182 if (adapter->queue[i].rx) 1183 tsnep_rx_close(adapter->queue[i].rx); 1184 if (adapter->queue[i].tx) 1185 tsnep_tx_close(adapter->queue[i].tx); 1186 } 1187 1188 return 0; 1189 } 1190 1191 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb, 1192 struct net_device *netdev) 1193 { 1194 struct tsnep_adapter *adapter = netdev_priv(netdev); 1195 u16 queue_mapping = skb_get_queue_mapping(skb); 1196 1197 if (queue_mapping >= adapter->num_tx_queues) 1198 queue_mapping = 0; 1199 1200 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); 1201 } 1202 1203 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, 1204 int cmd) 1205 { 1206 if (!netif_running(netdev)) 1207 return -EINVAL; 1208 if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP) 1209 return tsnep_ptp_ioctl(netdev, ifr, cmd); 1210 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 1211 } 1212 1213 static void tsnep_netdev_set_multicast(struct net_device *netdev) 1214 { 1215 struct tsnep_adapter *adapter = netdev_priv(netdev); 1216 1217 u16 rx_filter = 0; 1218 1219 /* configured MAC address and broadcasts are never filtered */ 1220 if (netdev->flags & IFF_PROMISC) { 1221 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 1222 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS; 1223 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { 1224 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 1225 } 1226 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); 1227 } 1228 1229 static void tsnep_netdev_get_stats64(struct net_device *netdev, 1230 struct rtnl_link_stats64 *stats) 1231 { 1232 struct tsnep_adapter *adapter = netdev_priv(netdev); 1233 u32 reg; 1234 u32 val; 1235 int i; 1236 1237 for (i = 0; i < adapter->num_tx_queues; i++) { 1238 stats->tx_packets += adapter->tx[i].packets; 1239 stats->tx_bytes += adapter->tx[i].bytes; 1240 stats->tx_dropped += adapter->tx[i].dropped; 1241 } 1242 for (i = 0; i < adapter->num_rx_queues; i++) { 1243 stats->rx_packets += adapter->rx[i].packets; 1244 stats->rx_bytes += adapter->rx[i].bytes; 1245 stats->rx_dropped += adapter->rx[i].dropped; 1246 stats->multicast += adapter->rx[i].multicast; 1247 1248 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + 1249 TSNEP_RX_STATISTIC); 1250 val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >> 1251 TSNEP_RX_STATISTIC_NO_DESC_SHIFT; 1252 stats->rx_dropped += val; 1253 val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >> 1254 TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT; 1255 stats->rx_dropped += val; 1256 val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >> 1257 TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT; 1258 stats->rx_errors += val; 1259 stats->rx_fifo_errors += val; 1260 val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >> 1261 TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT; 1262 stats->rx_errors += val; 1263 stats->rx_frame_errors += val; 1264 } 1265 1266 reg = ioread32(adapter->addr + ECM_STAT); 1267 val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT; 1268 stats->rx_errors += val; 1269 val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT; 1270 stats->rx_errors += val; 1271 stats->rx_crc_errors += val; 1272 val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT; 1273 stats->rx_errors += val; 1274 } 1275 1276 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr) 1277 { 1278 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); 1279 iowrite16(*(u16 *)(addr + sizeof(u32)), 1280 adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 1281 1282 ether_addr_copy(adapter->mac_address, addr); 1283 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", 1284 addr); 1285 } 1286 1287 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr) 1288 { 1289 struct tsnep_adapter *adapter = netdev_priv(netdev); 1290 struct sockaddr *sock_addr = addr; 1291 int retval; 1292 1293 retval = eth_prepare_mac_addr_change(netdev, sock_addr); 1294 if (retval) 1295 return retval; 1296 eth_hw_addr_set(netdev, sock_addr->sa_data); 1297 tsnep_mac_set_address(adapter, sock_addr->sa_data); 1298 1299 return 0; 1300 } 1301 1302 static int tsnep_netdev_set_features(struct net_device *netdev, 1303 netdev_features_t features) 1304 { 1305 struct tsnep_adapter *adapter = netdev_priv(netdev); 1306 netdev_features_t changed = netdev->features ^ features; 1307 bool enable; 1308 int retval = 0; 1309 1310 if (changed & NETIF_F_LOOPBACK) { 1311 enable = !!(features & NETIF_F_LOOPBACK); 1312 retval = tsnep_phy_loopback(adapter, enable); 1313 } 1314 1315 return retval; 1316 } 1317 1318 static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev, 1319 const struct skb_shared_hwtstamps *hwtstamps, 1320 bool cycles) 1321 { 1322 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data; 1323 u64 timestamp; 1324 1325 if (cycles) 1326 timestamp = __le64_to_cpu(rx_inline->counter); 1327 else 1328 timestamp = __le64_to_cpu(rx_inline->timestamp); 1329 1330 return ns_to_ktime(timestamp); 1331 } 1332 1333 static const struct net_device_ops tsnep_netdev_ops = { 1334 .ndo_open = tsnep_netdev_open, 1335 .ndo_stop = tsnep_netdev_close, 1336 .ndo_start_xmit = tsnep_netdev_xmit_frame, 1337 .ndo_eth_ioctl = tsnep_netdev_ioctl, 1338 .ndo_set_rx_mode = tsnep_netdev_set_multicast, 1339 .ndo_get_stats64 = tsnep_netdev_get_stats64, 1340 .ndo_set_mac_address = tsnep_netdev_set_mac_address, 1341 .ndo_set_features = tsnep_netdev_set_features, 1342 .ndo_get_tstamp = tsnep_netdev_get_tstamp, 1343 .ndo_setup_tc = tsnep_tc_setup, 1344 }; 1345 1346 static int tsnep_mac_init(struct tsnep_adapter *adapter) 1347 { 1348 int retval; 1349 1350 /* initialize RX filtering, at least configured MAC address and 1351 * broadcast are not filtered 1352 */ 1353 iowrite16(0, adapter->addr + TSNEP_RX_FILTER); 1354 1355 /* try to get MAC address in the following order: 1356 * - device tree 1357 * - valid MAC address already set 1358 * - MAC address register if valid 1359 * - random MAC address 1360 */ 1361 retval = of_get_mac_address(adapter->pdev->dev.of_node, 1362 adapter->mac_address); 1363 if (retval == -EPROBE_DEFER) 1364 return retval; 1365 if (retval && !is_valid_ether_addr(adapter->mac_address)) { 1366 *(u32 *)adapter->mac_address = 1367 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); 1368 *(u16 *)(adapter->mac_address + sizeof(u32)) = 1369 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 1370 if (!is_valid_ether_addr(adapter->mac_address)) 1371 eth_random_addr(adapter->mac_address); 1372 } 1373 1374 tsnep_mac_set_address(adapter, adapter->mac_address); 1375 eth_hw_addr_set(adapter->netdev, adapter->mac_address); 1376 1377 return 0; 1378 } 1379 1380 static int tsnep_mdio_init(struct tsnep_adapter *adapter) 1381 { 1382 struct device_node *np = adapter->pdev->dev.of_node; 1383 int retval; 1384 1385 if (np) { 1386 np = of_get_child_by_name(np, "mdio"); 1387 if (!np) 1388 return 0; 1389 1390 adapter->suppress_preamble = 1391 of_property_read_bool(np, "suppress-preamble"); 1392 } 1393 1394 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); 1395 if (!adapter->mdiobus) { 1396 retval = -ENOMEM; 1397 1398 goto out; 1399 } 1400 1401 adapter->mdiobus->priv = (void *)adapter; 1402 adapter->mdiobus->parent = &adapter->pdev->dev; 1403 adapter->mdiobus->read = tsnep_mdiobus_read; 1404 adapter->mdiobus->write = tsnep_mdiobus_write; 1405 adapter->mdiobus->name = TSNEP "-mdiobus"; 1406 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", 1407 adapter->pdev->name); 1408 1409 /* do not scan broadcast address */ 1410 adapter->mdiobus->phy_mask = 0x0000001; 1411 1412 retval = of_mdiobus_register(adapter->mdiobus, np); 1413 1414 out: 1415 of_node_put(np); 1416 1417 return retval; 1418 } 1419 1420 static int tsnep_phy_init(struct tsnep_adapter *adapter) 1421 { 1422 struct device_node *phy_node; 1423 int retval; 1424 1425 retval = of_get_phy_mode(adapter->pdev->dev.of_node, 1426 &adapter->phy_mode); 1427 if (retval) 1428 adapter->phy_mode = PHY_INTERFACE_MODE_GMII; 1429 1430 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", 1431 0); 1432 adapter->phydev = of_phy_find_device(phy_node); 1433 of_node_put(phy_node); 1434 if (!adapter->phydev && adapter->mdiobus) 1435 adapter->phydev = phy_find_first(adapter->mdiobus); 1436 if (!adapter->phydev) 1437 return -EIO; 1438 1439 return 0; 1440 } 1441 1442 static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count) 1443 { 1444 u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0; 1445 char name[8]; 1446 int i; 1447 int retval; 1448 1449 /* one TX/RX queue pair for netdev is mandatory */ 1450 if (platform_irq_count(adapter->pdev) == 1) 1451 retval = platform_get_irq(adapter->pdev, 0); 1452 else 1453 retval = platform_get_irq_byname(adapter->pdev, "mac"); 1454 if (retval < 0) 1455 return retval; 1456 adapter->num_tx_queues = 1; 1457 adapter->num_rx_queues = 1; 1458 adapter->num_queues = 1; 1459 adapter->queue[0].irq = retval; 1460 adapter->queue[0].tx = &adapter->tx[0]; 1461 adapter->queue[0].rx = &adapter->rx[0]; 1462 adapter->queue[0].irq_mask = irq_mask; 1463 adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY; 1464 retval = tsnep_set_irq_coalesce(&adapter->queue[0], 1465 TSNEP_COALESCE_USECS_DEFAULT); 1466 if (retval < 0) 1467 return retval; 1468 1469 adapter->netdev->irq = adapter->queue[0].irq; 1470 1471 /* add additional TX/RX queue pairs only if dedicated interrupt is 1472 * available 1473 */ 1474 for (i = 1; i < queue_count; i++) { 1475 sprintf(name, "txrx-%d", i); 1476 retval = platform_get_irq_byname_optional(adapter->pdev, name); 1477 if (retval < 0) 1478 break; 1479 1480 adapter->num_tx_queues++; 1481 adapter->num_rx_queues++; 1482 adapter->num_queues++; 1483 adapter->queue[i].irq = retval; 1484 adapter->queue[i].tx = &adapter->tx[i]; 1485 adapter->queue[i].rx = &adapter->rx[i]; 1486 adapter->queue[i].irq_mask = 1487 irq_mask << (ECM_INT_TXRX_SHIFT * i); 1488 adapter->queue[i].irq_delay_addr = 1489 adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i; 1490 retval = tsnep_set_irq_coalesce(&adapter->queue[i], 1491 TSNEP_COALESCE_USECS_DEFAULT); 1492 if (retval < 0) 1493 return retval; 1494 } 1495 1496 return 0; 1497 } 1498 1499 static int tsnep_probe(struct platform_device *pdev) 1500 { 1501 struct tsnep_adapter *adapter; 1502 struct net_device *netdev; 1503 struct resource *io; 1504 u32 type; 1505 int revision; 1506 int version; 1507 int queue_count; 1508 int retval; 1509 1510 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 1511 sizeof(struct tsnep_adapter), 1512 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES); 1513 if (!netdev) 1514 return -ENODEV; 1515 SET_NETDEV_DEV(netdev, &pdev->dev); 1516 adapter = netdev_priv(netdev); 1517 platform_set_drvdata(pdev, adapter); 1518 adapter->pdev = pdev; 1519 adapter->dmadev = &pdev->dev; 1520 adapter->netdev = netdev; 1521 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 1522 NETIF_MSG_LINK | NETIF_MSG_IFUP | 1523 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; 1524 1525 netdev->min_mtu = ETH_MIN_MTU; 1526 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; 1527 1528 mutex_init(&adapter->gate_control_lock); 1529 mutex_init(&adapter->rxnfc_lock); 1530 INIT_LIST_HEAD(&adapter->rxnfc_rules); 1531 1532 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1533 adapter->addr = devm_ioremap_resource(&pdev->dev, io); 1534 if (IS_ERR(adapter->addr)) 1535 return PTR_ERR(adapter->addr); 1536 netdev->mem_start = io->start; 1537 netdev->mem_end = io->end; 1538 1539 type = ioread32(adapter->addr + ECM_TYPE); 1540 revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT; 1541 version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT; 1542 queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT; 1543 adapter->gate_control = type & ECM_GATE_CONTROL; 1544 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT; 1545 1546 tsnep_disable_irq(adapter, ECM_INT_ALL); 1547 1548 retval = tsnep_queue_init(adapter, queue_count); 1549 if (retval) 1550 return retval; 1551 1552 retval = dma_set_mask_and_coherent(&adapter->pdev->dev, 1553 DMA_BIT_MASK(64)); 1554 if (retval) { 1555 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n"); 1556 return retval; 1557 } 1558 1559 retval = tsnep_mac_init(adapter); 1560 if (retval) 1561 return retval; 1562 1563 retval = tsnep_mdio_init(adapter); 1564 if (retval) 1565 goto mdio_init_failed; 1566 1567 retval = tsnep_phy_init(adapter); 1568 if (retval) 1569 goto phy_init_failed; 1570 1571 retval = tsnep_ptp_init(adapter); 1572 if (retval) 1573 goto ptp_init_failed; 1574 1575 retval = tsnep_tc_init(adapter); 1576 if (retval) 1577 goto tc_init_failed; 1578 1579 retval = tsnep_rxnfc_init(adapter); 1580 if (retval) 1581 goto rxnfc_init_failed; 1582 1583 netdev->netdev_ops = &tsnep_netdev_ops; 1584 netdev->ethtool_ops = &tsnep_ethtool_ops; 1585 netdev->features = NETIF_F_SG; 1586 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK; 1587 1588 /* carrier off reporting is important to ethtool even BEFORE open */ 1589 netif_carrier_off(netdev); 1590 1591 retval = register_netdev(netdev); 1592 if (retval) 1593 goto register_failed; 1594 1595 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, 1596 revision); 1597 if (adapter->gate_control) 1598 dev_info(&adapter->pdev->dev, "gate control detected\n"); 1599 1600 return 0; 1601 1602 register_failed: 1603 tsnep_rxnfc_cleanup(adapter); 1604 rxnfc_init_failed: 1605 tsnep_tc_cleanup(adapter); 1606 tc_init_failed: 1607 tsnep_ptp_cleanup(adapter); 1608 ptp_init_failed: 1609 phy_init_failed: 1610 if (adapter->mdiobus) 1611 mdiobus_unregister(adapter->mdiobus); 1612 mdio_init_failed: 1613 return retval; 1614 } 1615 1616 static int tsnep_remove(struct platform_device *pdev) 1617 { 1618 struct tsnep_adapter *adapter = platform_get_drvdata(pdev); 1619 1620 unregister_netdev(adapter->netdev); 1621 1622 tsnep_rxnfc_cleanup(adapter); 1623 1624 tsnep_tc_cleanup(adapter); 1625 1626 tsnep_ptp_cleanup(adapter); 1627 1628 if (adapter->mdiobus) 1629 mdiobus_unregister(adapter->mdiobus); 1630 1631 tsnep_disable_irq(adapter, ECM_INT_ALL); 1632 1633 return 0; 1634 } 1635 1636 static const struct of_device_id tsnep_of_match[] = { 1637 { .compatible = "engleder,tsnep", }, 1638 { }, 1639 }; 1640 MODULE_DEVICE_TABLE(of, tsnep_of_match); 1641 1642 static struct platform_driver tsnep_driver = { 1643 .driver = { 1644 .name = TSNEP, 1645 .of_match_table = tsnep_of_match, 1646 }, 1647 .probe = tsnep_probe, 1648 .remove = tsnep_remove, 1649 }; 1650 module_platform_driver(tsnep_driver); 1651 1652 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>"); 1653 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver"); 1654 MODULE_LICENSE("GPL"); 1655