1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 4 /* TSN endpoint Ethernet MAC driver 5 * 6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time 7 * communication. It is designed for endpoints within TSN (Time Sensitive 8 * Networking) networks; e.g., for PLCs in the industrial automation case. 9 * 10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used 11 * by the driver. 12 * 13 * More information can be found here: 14 * - www.embedded-experts.at/tsn 15 * - www.engleder-embedded.com 16 */ 17 18 #include "tsnep.h" 19 #include "tsnep_hw.h" 20 21 #include <linux/module.h> 22 #include <linux/of.h> 23 #include <linux/of_net.h> 24 #include <linux/of_mdio.h> 25 #include <linux/interrupt.h> 26 #include <linux/etherdevice.h> 27 #include <linux/phy.h> 28 #include <linux/iopoll.h> 29 #include <linux/bpf.h> 30 #include <linux/bpf_trace.h> 31 32 #define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) 33 #define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4) 34 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \ 35 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 36 37 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 38 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) 39 #else 40 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0)) 41 #endif 42 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF)) 43 44 #define TSNEP_COALESCE_USECS_DEFAULT 64 45 #define TSNEP_COALESCE_USECS_MAX ((ECM_INT_DELAY_MASK >> ECM_INT_DELAY_SHIFT) * \ 46 ECM_INT_DELAY_BASE_US + ECM_INT_DELAY_BASE_US - 1) 47 48 #define TSNEP_TX_TYPE_SKB BIT(0) 49 #define TSNEP_TX_TYPE_SKB_FRAG BIT(1) 50 #define TSNEP_TX_TYPE_XDP_TX BIT(2) 51 #define TSNEP_TX_TYPE_XDP_NDO BIT(3) 52 53 #define TSNEP_XDP_TX BIT(0) 54 #define TSNEP_XDP_REDIRECT BIT(1) 55 56 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask) 57 { 58 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 59 } 60 61 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask) 62 { 63 mask |= ECM_INT_DISABLE; 64 iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 65 } 66 67 static irqreturn_t tsnep_irq(int irq, void *arg) 68 { 69 struct tsnep_adapter *adapter = arg; 70 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); 71 72 /* acknowledge interrupt */ 73 if (active != 0) 74 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); 75 76 /* handle link interrupt */ 77 if ((active & ECM_INT_LINK) != 0) 78 phy_mac_interrupt(adapter->netdev->phydev); 79 80 /* handle TX/RX queue 0 interrupt */ 81 if ((active & adapter->queue[0].irq_mask) != 0) { 82 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); 83 napi_schedule(&adapter->queue[0].napi); 84 } 85 86 return IRQ_HANDLED; 87 } 88 89 static irqreturn_t tsnep_irq_txrx(int irq, void *arg) 90 { 91 struct tsnep_queue *queue = arg; 92 93 /* handle TX/RX queue interrupt */ 94 tsnep_disable_irq(queue->adapter, queue->irq_mask); 95 napi_schedule(&queue->napi); 96 97 return IRQ_HANDLED; 98 } 99 100 int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs) 101 { 102 if (usecs > TSNEP_COALESCE_USECS_MAX) 103 return -ERANGE; 104 105 usecs /= ECM_INT_DELAY_BASE_US; 106 usecs <<= ECM_INT_DELAY_SHIFT; 107 usecs &= ECM_INT_DELAY_MASK; 108 109 queue->irq_delay &= ~ECM_INT_DELAY_MASK; 110 queue->irq_delay |= usecs; 111 iowrite8(queue->irq_delay, queue->irq_delay_addr); 112 113 return 0; 114 } 115 116 u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue) 117 { 118 u32 usecs; 119 120 usecs = (queue->irq_delay & ECM_INT_DELAY_MASK); 121 usecs >>= ECM_INT_DELAY_SHIFT; 122 usecs *= ECM_INT_DELAY_BASE_US; 123 124 return usecs; 125 } 126 127 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum) 128 { 129 struct tsnep_adapter *adapter = bus->priv; 130 u32 md; 131 int retval; 132 133 md = ECM_MD_READ; 134 if (!adapter->suppress_preamble) 135 md |= ECM_MD_PREAMBLE; 136 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 137 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 138 iowrite32(md, adapter->addr + ECM_MD_CONTROL); 139 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 140 !(md & ECM_MD_BUSY), 16, 1000); 141 if (retval != 0) 142 return retval; 143 144 return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT; 145 } 146 147 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum, 148 u16 val) 149 { 150 struct tsnep_adapter *adapter = bus->priv; 151 u32 md; 152 int retval; 153 154 md = ECM_MD_WRITE; 155 if (!adapter->suppress_preamble) 156 md |= ECM_MD_PREAMBLE; 157 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 158 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 159 md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK; 160 iowrite32(md, adapter->addr + ECM_MD_CONTROL); 161 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 162 !(md & ECM_MD_BUSY), 16, 1000); 163 if (retval != 0) 164 return retval; 165 166 return 0; 167 } 168 169 static void tsnep_set_link_mode(struct tsnep_adapter *adapter) 170 { 171 u32 mode; 172 173 switch (adapter->phydev->speed) { 174 case SPEED_100: 175 mode = ECM_LINK_MODE_100; 176 break; 177 case SPEED_1000: 178 mode = ECM_LINK_MODE_1000; 179 break; 180 default: 181 mode = ECM_LINK_MODE_OFF; 182 break; 183 } 184 iowrite32(mode, adapter->addr + ECM_STATUS); 185 } 186 187 static void tsnep_phy_link_status_change(struct net_device *netdev) 188 { 189 struct tsnep_adapter *adapter = netdev_priv(netdev); 190 struct phy_device *phydev = netdev->phydev; 191 192 if (phydev->link) 193 tsnep_set_link_mode(adapter); 194 195 phy_print_status(netdev->phydev); 196 } 197 198 static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable) 199 { 200 int retval; 201 202 retval = phy_loopback(adapter->phydev, enable); 203 204 /* PHY link state change is not signaled if loopback is enabled, it 205 * would delay a working loopback anyway, let's ensure that loopback 206 * is working immediately by setting link mode directly 207 */ 208 if (!retval && enable) 209 tsnep_set_link_mode(adapter); 210 211 return retval; 212 } 213 214 static int tsnep_phy_open(struct tsnep_adapter *adapter) 215 { 216 struct phy_device *phydev; 217 struct ethtool_eee ethtool_eee; 218 int retval; 219 220 retval = phy_connect_direct(adapter->netdev, adapter->phydev, 221 tsnep_phy_link_status_change, 222 adapter->phy_mode); 223 if (retval) 224 return retval; 225 phydev = adapter->netdev->phydev; 226 227 /* MAC supports only 100Mbps|1000Mbps full duplex 228 * SPE (Single Pair Ethernet) is also an option but not implemented yet 229 */ 230 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 231 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 232 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 233 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 234 235 /* disable EEE autoneg, EEE not supported by TSNEP */ 236 memset(ðtool_eee, 0, sizeof(ethtool_eee)); 237 phy_ethtool_set_eee(adapter->phydev, ðtool_eee); 238 239 adapter->phydev->irq = PHY_MAC_INTERRUPT; 240 phy_start(adapter->phydev); 241 242 return 0; 243 } 244 245 static void tsnep_phy_close(struct tsnep_adapter *adapter) 246 { 247 phy_stop(adapter->netdev->phydev); 248 phy_disconnect(adapter->netdev->phydev); 249 adapter->netdev->phydev = NULL; 250 } 251 252 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) 253 { 254 struct device *dmadev = tx->adapter->dmadev; 255 int i; 256 257 memset(tx->entry, 0, sizeof(tx->entry)); 258 259 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 260 if (tx->page[i]) { 261 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], 262 tx->page_dma[i]); 263 tx->page[i] = NULL; 264 tx->page_dma[i] = 0; 265 } 266 } 267 } 268 269 static int tsnep_tx_ring_init(struct tsnep_tx *tx) 270 { 271 struct device *dmadev = tx->adapter->dmadev; 272 struct tsnep_tx_entry *entry; 273 struct tsnep_tx_entry *next_entry; 274 int i, j; 275 int retval; 276 277 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 278 tx->page[i] = 279 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], 280 GFP_KERNEL); 281 if (!tx->page[i]) { 282 retval = -ENOMEM; 283 goto alloc_failed; 284 } 285 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 286 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 287 entry->desc_wb = (struct tsnep_tx_desc_wb *) 288 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); 289 entry->desc = (struct tsnep_tx_desc *) 290 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 291 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; 292 } 293 } 294 for (i = 0; i < TSNEP_RING_SIZE; i++) { 295 entry = &tx->entry[i]; 296 next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE]; 297 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 298 } 299 300 return 0; 301 302 alloc_failed: 303 tsnep_tx_ring_cleanup(tx); 304 return retval; 305 } 306 307 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, 308 bool last) 309 { 310 struct tsnep_tx_entry *entry = &tx->entry[index]; 311 312 entry->properties = 0; 313 /* xdpf is union with skb */ 314 if (entry->skb) { 315 entry->properties = length & TSNEP_DESC_LENGTH_MASK; 316 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 317 if ((entry->type & TSNEP_TX_TYPE_SKB) && 318 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)) 319 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; 320 321 /* toggle user flag to prevent false acknowledge 322 * 323 * Only the first fragment is acknowledged. For all other 324 * fragments no acknowledge is done and the last written owner 325 * counter stays in the writeback descriptor. Therefore, it is 326 * possible that the last written owner counter is identical to 327 * the new incremented owner counter and a false acknowledge is 328 * detected before the real acknowledge has been done by 329 * hardware. 330 * 331 * The user flag is used to prevent this situation. The user 332 * flag is copied to the writeback descriptor by the hardware 333 * and is used as additional acknowledge data. By toggeling the 334 * user flag only for the first fragment (which is 335 * acknowledged), it is guaranteed that the last acknowledge 336 * done for this descriptor has used a different user flag and 337 * cannot be detected as false acknowledge. 338 */ 339 entry->owner_user_flag = !entry->owner_user_flag; 340 } 341 if (last) 342 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; 343 if (index == tx->increment_owner_counter) { 344 tx->owner_counter++; 345 if (tx->owner_counter == 4) 346 tx->owner_counter = 1; 347 tx->increment_owner_counter--; 348 if (tx->increment_owner_counter < 0) 349 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 350 } 351 entry->properties |= 352 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 353 TSNEP_DESC_OWNER_COUNTER_MASK; 354 if (entry->owner_user_flag) 355 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; 356 entry->desc->more_properties = 357 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); 358 359 /* descriptor properties shall be written last, because valid data is 360 * signaled there 361 */ 362 dma_wmb(); 363 364 entry->desc->properties = __cpu_to_le32(entry->properties); 365 } 366 367 static int tsnep_tx_desc_available(struct tsnep_tx *tx) 368 { 369 if (tx->read <= tx->write) 370 return TSNEP_RING_SIZE - tx->write + tx->read - 1; 371 else 372 return tx->read - tx->write - 1; 373 } 374 375 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) 376 { 377 struct device *dmadev = tx->adapter->dmadev; 378 struct tsnep_tx_entry *entry; 379 unsigned int len; 380 dma_addr_t dma; 381 int map_len = 0; 382 int i; 383 384 for (i = 0; i < count; i++) { 385 entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; 386 387 if (!i) { 388 len = skb_headlen(skb); 389 dma = dma_map_single(dmadev, skb->data, len, 390 DMA_TO_DEVICE); 391 392 entry->type = TSNEP_TX_TYPE_SKB; 393 } else { 394 len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); 395 dma = skb_frag_dma_map(dmadev, 396 &skb_shinfo(skb)->frags[i - 1], 397 0, len, DMA_TO_DEVICE); 398 399 entry->type = TSNEP_TX_TYPE_SKB_FRAG; 400 } 401 if (dma_mapping_error(dmadev, dma)) 402 return -ENOMEM; 403 404 entry->len = len; 405 dma_unmap_addr_set(entry, dma, dma); 406 407 entry->desc->tx = __cpu_to_le64(dma); 408 409 map_len += len; 410 } 411 412 return map_len; 413 } 414 415 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) 416 { 417 struct device *dmadev = tx->adapter->dmadev; 418 struct tsnep_tx_entry *entry; 419 int map_len = 0; 420 int i; 421 422 for (i = 0; i < count; i++) { 423 entry = &tx->entry[(index + i) % TSNEP_RING_SIZE]; 424 425 if (entry->len) { 426 if (entry->type & TSNEP_TX_TYPE_SKB) 427 dma_unmap_single(dmadev, 428 dma_unmap_addr(entry, dma), 429 dma_unmap_len(entry, len), 430 DMA_TO_DEVICE); 431 else if (entry->type & 432 (TSNEP_TX_TYPE_SKB_FRAG | TSNEP_TX_TYPE_XDP_NDO)) 433 dma_unmap_page(dmadev, 434 dma_unmap_addr(entry, dma), 435 dma_unmap_len(entry, len), 436 DMA_TO_DEVICE); 437 map_len += entry->len; 438 entry->len = 0; 439 } 440 } 441 442 return map_len; 443 } 444 445 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, 446 struct tsnep_tx *tx) 447 { 448 int count = 1; 449 struct tsnep_tx_entry *entry; 450 int length; 451 int i; 452 int retval; 453 454 if (skb_shinfo(skb)->nr_frags > 0) 455 count += skb_shinfo(skb)->nr_frags; 456 457 if (tsnep_tx_desc_available(tx) < count) { 458 /* ring full, shall not happen because queue is stopped if full 459 * below 460 */ 461 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); 462 463 return NETDEV_TX_BUSY; 464 } 465 466 entry = &tx->entry[tx->write]; 467 entry->skb = skb; 468 469 retval = tsnep_tx_map(skb, tx, count); 470 if (retval < 0) { 471 tsnep_tx_unmap(tx, tx->write, count); 472 dev_kfree_skb_any(entry->skb); 473 entry->skb = NULL; 474 475 tx->dropped++; 476 477 return NETDEV_TX_OK; 478 } 479 length = retval; 480 481 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 482 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 483 484 for (i = 0; i < count; i++) 485 tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, 486 i == count - 1); 487 tx->write = (tx->write + count) % TSNEP_RING_SIZE; 488 489 skb_tx_timestamp(skb); 490 491 /* descriptor properties shall be valid before hardware is notified */ 492 dma_wmb(); 493 494 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); 495 496 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { 497 /* ring can get full with next frame */ 498 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); 499 } 500 501 return NETDEV_TX_OK; 502 } 503 504 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, 505 struct skb_shared_info *shinfo, int count, u32 type) 506 { 507 struct device *dmadev = tx->adapter->dmadev; 508 struct tsnep_tx_entry *entry; 509 struct page *page; 510 skb_frag_t *frag; 511 unsigned int len; 512 int map_len = 0; 513 dma_addr_t dma; 514 void *data; 515 int i; 516 517 frag = NULL; 518 len = xdpf->len; 519 for (i = 0; i < count; i++) { 520 entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; 521 if (type & TSNEP_TX_TYPE_XDP_NDO) { 522 data = unlikely(frag) ? skb_frag_address(frag) : 523 xdpf->data; 524 dma = dma_map_single(dmadev, data, len, DMA_TO_DEVICE); 525 if (dma_mapping_error(dmadev, dma)) 526 return -ENOMEM; 527 528 entry->type = TSNEP_TX_TYPE_XDP_NDO; 529 } else { 530 page = unlikely(frag) ? skb_frag_page(frag) : 531 virt_to_page(xdpf->data); 532 dma = page_pool_get_dma_addr(page); 533 if (unlikely(frag)) 534 dma += skb_frag_off(frag); 535 else 536 dma += sizeof(*xdpf) + xdpf->headroom; 537 dma_sync_single_for_device(dmadev, dma, len, 538 DMA_BIDIRECTIONAL); 539 540 entry->type = TSNEP_TX_TYPE_XDP_TX; 541 } 542 543 entry->len = len; 544 dma_unmap_addr_set(entry, dma, dma); 545 546 entry->desc->tx = __cpu_to_le64(dma); 547 548 map_len += len; 549 550 if (i + 1 < count) { 551 frag = &shinfo->frags[i]; 552 len = skb_frag_size(frag); 553 } 554 } 555 556 return map_len; 557 } 558 559 /* This function requires __netif_tx_lock is held by the caller. */ 560 static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf, 561 struct tsnep_tx *tx, u32 type) 562 { 563 struct skb_shared_info *shinfo = xdp_get_shared_info_from_frame(xdpf); 564 struct tsnep_tx_entry *entry; 565 int count, length, retval, i; 566 567 count = 1; 568 if (unlikely(xdp_frame_has_frags(xdpf))) 569 count += shinfo->nr_frags; 570 571 /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS 572 * will be available for normal TX path and queue is stopped there if 573 * necessary 574 */ 575 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) 576 return false; 577 578 entry = &tx->entry[tx->write]; 579 entry->xdpf = xdpf; 580 581 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); 582 if (retval < 0) { 583 tsnep_tx_unmap(tx, tx->write, count); 584 entry->xdpf = NULL; 585 586 tx->dropped++; 587 588 return false; 589 } 590 length = retval; 591 592 for (i = 0; i < count; i++) 593 tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, 594 i == count - 1); 595 tx->write = (tx->write + count) % TSNEP_RING_SIZE; 596 597 /* descriptor properties shall be valid before hardware is notified */ 598 dma_wmb(); 599 600 return true; 601 } 602 603 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) 604 { 605 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); 606 } 607 608 static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter, 609 struct xdp_buff *xdp, 610 struct netdev_queue *tx_nq, struct tsnep_tx *tx) 611 { 612 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); 613 bool xmit; 614 615 if (unlikely(!xdpf)) 616 return false; 617 618 __netif_tx_lock(tx_nq, smp_processor_id()); 619 620 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX); 621 622 /* Avoid transmit queue timeout since we share it with the slow path */ 623 if (xmit) 624 txq_trans_cond_update(tx_nq); 625 626 __netif_tx_unlock(tx_nq); 627 628 return xmit; 629 } 630 631 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) 632 { 633 struct tsnep_tx_entry *entry; 634 struct netdev_queue *nq; 635 int budget = 128; 636 int length; 637 int count; 638 639 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 640 __netif_tx_lock(nq, smp_processor_id()); 641 642 do { 643 if (tx->read == tx->write) 644 break; 645 646 entry = &tx->entry[tx->read]; 647 if ((__le32_to_cpu(entry->desc_wb->properties) & 648 TSNEP_TX_DESC_OWNER_MASK) != 649 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) 650 break; 651 652 /* descriptor properties shall be read first, because valid data 653 * is signaled there 654 */ 655 dma_rmb(); 656 657 count = 1; 658 if ((entry->type & TSNEP_TX_TYPE_SKB) && 659 skb_shinfo(entry->skb)->nr_frags > 0) 660 count += skb_shinfo(entry->skb)->nr_frags; 661 else if (!(entry->type & TSNEP_TX_TYPE_SKB) && 662 xdp_frame_has_frags(entry->xdpf)) 663 count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags; 664 665 length = tsnep_tx_unmap(tx, tx->read, count); 666 667 if ((entry->type & TSNEP_TX_TYPE_SKB) && 668 (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && 669 (__le32_to_cpu(entry->desc_wb->properties) & 670 TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { 671 struct skb_shared_hwtstamps hwtstamps; 672 u64 timestamp; 673 674 if (skb_shinfo(entry->skb)->tx_flags & 675 SKBTX_HW_TSTAMP_USE_CYCLES) 676 timestamp = 677 __le64_to_cpu(entry->desc_wb->counter); 678 else 679 timestamp = 680 __le64_to_cpu(entry->desc_wb->timestamp); 681 682 memset(&hwtstamps, 0, sizeof(hwtstamps)); 683 hwtstamps.hwtstamp = ns_to_ktime(timestamp); 684 685 skb_tstamp_tx(entry->skb, &hwtstamps); 686 } 687 688 if (entry->type & TSNEP_TX_TYPE_SKB) 689 napi_consume_skb(entry->skb, napi_budget); 690 else 691 xdp_return_frame_rx_napi(entry->xdpf); 692 /* xdpf is union with skb */ 693 entry->skb = NULL; 694 695 tx->read = (tx->read + count) % TSNEP_RING_SIZE; 696 697 tx->packets++; 698 tx->bytes += length + ETH_FCS_LEN; 699 700 budget--; 701 } while (likely(budget)); 702 703 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && 704 netif_tx_queue_stopped(nq)) { 705 netif_tx_wake_queue(nq); 706 } 707 708 __netif_tx_unlock(nq); 709 710 return budget != 0; 711 } 712 713 static bool tsnep_tx_pending(struct tsnep_tx *tx) 714 { 715 struct tsnep_tx_entry *entry; 716 struct netdev_queue *nq; 717 bool pending = false; 718 719 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); 720 __netif_tx_lock(nq, smp_processor_id()); 721 722 if (tx->read != tx->write) { 723 entry = &tx->entry[tx->read]; 724 if ((__le32_to_cpu(entry->desc_wb->properties) & 725 TSNEP_TX_DESC_OWNER_MASK) == 726 (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) 727 pending = true; 728 } 729 730 __netif_tx_unlock(nq); 731 732 return pending; 733 } 734 735 static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr, 736 int queue_index, struct tsnep_tx *tx) 737 { 738 dma_addr_t dma; 739 int retval; 740 741 memset(tx, 0, sizeof(*tx)); 742 tx->adapter = adapter; 743 tx->addr = addr; 744 tx->queue_index = queue_index; 745 746 retval = tsnep_tx_ring_init(tx); 747 if (retval) 748 return retval; 749 750 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 751 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); 752 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); 753 tx->owner_counter = 1; 754 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 755 756 return 0; 757 } 758 759 static void tsnep_tx_close(struct tsnep_tx *tx) 760 { 761 u32 val; 762 763 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, 764 ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000, 765 1000000); 766 767 tsnep_tx_ring_cleanup(tx); 768 } 769 770 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) 771 { 772 struct device *dmadev = rx->adapter->dmadev; 773 struct tsnep_rx_entry *entry; 774 int i; 775 776 for (i = 0; i < TSNEP_RING_SIZE; i++) { 777 entry = &rx->entry[i]; 778 if (entry->page) 779 page_pool_put_full_page(rx->page_pool, entry->page, 780 false); 781 entry->page = NULL; 782 } 783 784 if (rx->page_pool) 785 page_pool_destroy(rx->page_pool); 786 787 memset(rx->entry, 0, sizeof(rx->entry)); 788 789 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 790 if (rx->page[i]) { 791 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], 792 rx->page_dma[i]); 793 rx->page[i] = NULL; 794 rx->page_dma[i] = 0; 795 } 796 } 797 } 798 799 static int tsnep_rx_ring_init(struct tsnep_rx *rx) 800 { 801 struct device *dmadev = rx->adapter->dmadev; 802 struct tsnep_rx_entry *entry; 803 struct page_pool_params pp_params = { 0 }; 804 struct tsnep_rx_entry *next_entry; 805 int i, j; 806 int retval; 807 808 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 809 rx->page[i] = 810 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], 811 GFP_KERNEL); 812 if (!rx->page[i]) { 813 retval = -ENOMEM; 814 goto failed; 815 } 816 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 817 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 818 entry->desc_wb = (struct tsnep_rx_desc_wb *) 819 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); 820 entry->desc = (struct tsnep_rx_desc *) 821 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 822 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; 823 } 824 } 825 826 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV; 827 pp_params.order = 0; 828 pp_params.pool_size = TSNEP_RING_SIZE; 829 pp_params.nid = dev_to_node(dmadev); 830 pp_params.dev = dmadev; 831 pp_params.dma_dir = DMA_BIDIRECTIONAL; 832 pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE; 833 pp_params.offset = TSNEP_RX_OFFSET; 834 rx->page_pool = page_pool_create(&pp_params); 835 if (IS_ERR(rx->page_pool)) { 836 retval = PTR_ERR(rx->page_pool); 837 rx->page_pool = NULL; 838 goto failed; 839 } 840 841 for (i = 0; i < TSNEP_RING_SIZE; i++) { 842 entry = &rx->entry[i]; 843 next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; 844 entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 845 } 846 847 return 0; 848 849 failed: 850 tsnep_rx_ring_cleanup(rx); 851 return retval; 852 } 853 854 static int tsnep_rx_desc_available(struct tsnep_rx *rx) 855 { 856 if (rx->read <= rx->write) 857 return TSNEP_RING_SIZE - rx->write + rx->read - 1; 858 else 859 return rx->read - rx->write - 1; 860 } 861 862 static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, 863 struct page *page) 864 { 865 entry->page = page; 866 entry->len = TSNEP_MAX_RX_BUF_SIZE; 867 entry->dma = page_pool_get_dma_addr(entry->page); 868 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_RX_OFFSET); 869 } 870 871 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx, int index) 872 { 873 struct tsnep_rx_entry *entry = &rx->entry[index]; 874 struct page *page; 875 876 page = page_pool_dev_alloc_pages(rx->page_pool); 877 if (unlikely(!page)) 878 return -ENOMEM; 879 tsnep_rx_set_page(rx, entry, page); 880 881 return 0; 882 } 883 884 static void tsnep_rx_reuse_buffer(struct tsnep_rx *rx, int index) 885 { 886 struct tsnep_rx_entry *entry = &rx->entry[index]; 887 struct tsnep_rx_entry *read = &rx->entry[rx->read]; 888 889 tsnep_rx_set_page(rx, entry, read->page); 890 read->page = NULL; 891 } 892 893 static void tsnep_rx_activate(struct tsnep_rx *rx, int index) 894 { 895 struct tsnep_rx_entry *entry = &rx->entry[index]; 896 897 /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */ 898 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; 899 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 900 if (index == rx->increment_owner_counter) { 901 rx->owner_counter++; 902 if (rx->owner_counter == 4) 903 rx->owner_counter = 1; 904 rx->increment_owner_counter--; 905 if (rx->increment_owner_counter < 0) 906 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 907 } 908 entry->properties |= 909 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 910 TSNEP_DESC_OWNER_COUNTER_MASK; 911 912 /* descriptor properties shall be written last, because valid data is 913 * signaled there 914 */ 915 dma_wmb(); 916 917 entry->desc->properties = __cpu_to_le32(entry->properties); 918 } 919 920 static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) 921 { 922 int index; 923 bool alloc_failed = false; 924 bool enable = false; 925 int i; 926 int retval; 927 928 for (i = 0; i < count && !alloc_failed; i++) { 929 index = (rx->write + i) % TSNEP_RING_SIZE; 930 931 retval = tsnep_rx_alloc_buffer(rx, index); 932 if (unlikely(retval)) { 933 rx->alloc_failed++; 934 alloc_failed = true; 935 936 /* reuse only if no other allocation was successful */ 937 if (i == 0 && reuse) 938 tsnep_rx_reuse_buffer(rx, index); 939 else 940 break; 941 } 942 943 tsnep_rx_activate(rx, index); 944 945 enable = true; 946 } 947 948 if (enable) { 949 rx->write = (rx->write + i) % TSNEP_RING_SIZE; 950 951 /* descriptor properties shall be valid before hardware is 952 * notified 953 */ 954 dma_wmb(); 955 956 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); 957 } 958 959 return i; 960 } 961 962 static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, 963 struct xdp_buff *xdp, int *status, 964 struct netdev_queue *tx_nq, struct tsnep_tx *tx) 965 { 966 unsigned int length; 967 unsigned int sync; 968 u32 act; 969 970 length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; 971 972 act = bpf_prog_run_xdp(prog, xdp); 973 974 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ 975 sync = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; 976 sync = max(sync, length); 977 978 switch (act) { 979 case XDP_PASS: 980 return false; 981 case XDP_TX: 982 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) 983 goto out_failure; 984 *status |= TSNEP_XDP_TX; 985 return true; 986 case XDP_REDIRECT: 987 if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) 988 goto out_failure; 989 *status |= TSNEP_XDP_REDIRECT; 990 return true; 991 default: 992 bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); 993 fallthrough; 994 case XDP_ABORTED: 995 out_failure: 996 trace_xdp_exception(rx->adapter->netdev, prog, act); 997 fallthrough; 998 case XDP_DROP: 999 page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), 1000 sync, true); 1001 return true; 1002 } 1003 } 1004 1005 static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status, 1006 struct netdev_queue *tx_nq, struct tsnep_tx *tx) 1007 { 1008 if (status & TSNEP_XDP_TX) { 1009 __netif_tx_lock(tx_nq, smp_processor_id()); 1010 tsnep_xdp_xmit_flush(tx); 1011 __netif_tx_unlock(tx_nq); 1012 } 1013 1014 if (status & TSNEP_XDP_REDIRECT) 1015 xdp_do_flush(); 1016 } 1017 1018 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, 1019 int length) 1020 { 1021 struct sk_buff *skb; 1022 1023 skb = napi_build_skb(page_address(page), PAGE_SIZE); 1024 if (unlikely(!skb)) 1025 return NULL; 1026 1027 /* update pointers within the skb to store the data */ 1028 skb_reserve(skb, TSNEP_RX_OFFSET + TSNEP_RX_INLINE_METADATA_SIZE); 1029 __skb_put(skb, length - ETH_FCS_LEN); 1030 1031 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) { 1032 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); 1033 struct tsnep_rx_inline *rx_inline = 1034 (struct tsnep_rx_inline *)(page_address(page) + 1035 TSNEP_RX_OFFSET); 1036 1037 skb_shinfo(skb)->tx_flags |= 1038 SKBTX_HW_TSTAMP_NETDEV; 1039 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1040 hwtstamps->netdev_data = rx_inline; 1041 } 1042 1043 skb_record_rx_queue(skb, rx->queue_index); 1044 skb->protocol = eth_type_trans(skb, rx->adapter->netdev); 1045 1046 return skb; 1047 } 1048 1049 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, 1050 int budget) 1051 { 1052 struct device *dmadev = rx->adapter->dmadev; 1053 enum dma_data_direction dma_dir; 1054 struct tsnep_rx_entry *entry; 1055 struct netdev_queue *tx_nq; 1056 struct bpf_prog *prog; 1057 struct xdp_buff xdp; 1058 struct sk_buff *skb; 1059 struct tsnep_tx *tx; 1060 int desc_available; 1061 int xdp_status = 0; 1062 int done = 0; 1063 int length; 1064 1065 desc_available = tsnep_rx_desc_available(rx); 1066 dma_dir = page_pool_get_dma_dir(rx->page_pool); 1067 prog = READ_ONCE(rx->adapter->xdp_prog); 1068 if (prog) { 1069 tx_nq = netdev_get_tx_queue(rx->adapter->netdev, 1070 rx->tx_queue_index); 1071 tx = &rx->adapter->tx[rx->tx_queue_index]; 1072 1073 xdp_init_buff(&xdp, PAGE_SIZE, &rx->xdp_rxq); 1074 } 1075 1076 while (likely(done < budget) && (rx->read != rx->write)) { 1077 entry = &rx->entry[rx->read]; 1078 if ((__le32_to_cpu(entry->desc_wb->properties) & 1079 TSNEP_DESC_OWNER_COUNTER_MASK) != 1080 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 1081 break; 1082 done++; 1083 1084 if (desc_available >= TSNEP_RING_RX_REFILL) { 1085 bool reuse = desc_available >= TSNEP_RING_RX_REUSE; 1086 1087 desc_available -= tsnep_rx_refill(rx, desc_available, 1088 reuse); 1089 if (!entry->page) { 1090 /* buffer has been reused for refill to prevent 1091 * empty RX ring, thus buffer cannot be used for 1092 * RX processing 1093 */ 1094 rx->read = (rx->read + 1) % TSNEP_RING_SIZE; 1095 desc_available++; 1096 1097 rx->dropped++; 1098 1099 continue; 1100 } 1101 } 1102 1103 /* descriptor properties shall be read first, because valid data 1104 * is signaled there 1105 */ 1106 dma_rmb(); 1107 1108 prefetch(page_address(entry->page) + TSNEP_RX_OFFSET); 1109 length = __le32_to_cpu(entry->desc_wb->properties) & 1110 TSNEP_DESC_LENGTH_MASK; 1111 dma_sync_single_range_for_cpu(dmadev, entry->dma, 1112 TSNEP_RX_OFFSET, length, dma_dir); 1113 1114 /* RX metadata with timestamps is in front of actual data, 1115 * subtract metadata size to get length of actual data and 1116 * consider metadata size as offset of actual data during RX 1117 * processing 1118 */ 1119 length -= TSNEP_RX_INLINE_METADATA_SIZE; 1120 1121 rx->read = (rx->read + 1) % TSNEP_RING_SIZE; 1122 desc_available++; 1123 1124 if (prog) { 1125 bool consume; 1126 1127 xdp_prepare_buff(&xdp, page_address(entry->page), 1128 XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE, 1129 length, false); 1130 1131 consume = tsnep_xdp_run_prog(rx, prog, &xdp, 1132 &xdp_status, tx_nq, tx); 1133 if (consume) { 1134 rx->packets++; 1135 rx->bytes += length; 1136 1137 entry->page = NULL; 1138 1139 continue; 1140 } 1141 } 1142 1143 skb = tsnep_build_skb(rx, entry->page, length); 1144 if (skb) { 1145 page_pool_release_page(rx->page_pool, entry->page); 1146 1147 rx->packets++; 1148 rx->bytes += length; 1149 if (skb->pkt_type == PACKET_MULTICAST) 1150 rx->multicast++; 1151 1152 napi_gro_receive(napi, skb); 1153 } else { 1154 page_pool_recycle_direct(rx->page_pool, entry->page); 1155 1156 rx->dropped++; 1157 } 1158 entry->page = NULL; 1159 } 1160 1161 if (xdp_status) 1162 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); 1163 1164 if (desc_available) 1165 tsnep_rx_refill(rx, desc_available, false); 1166 1167 return done; 1168 } 1169 1170 static bool tsnep_rx_pending(struct tsnep_rx *rx) 1171 { 1172 struct tsnep_rx_entry *entry; 1173 1174 if (rx->read != rx->write) { 1175 entry = &rx->entry[rx->read]; 1176 if ((__le32_to_cpu(entry->desc_wb->properties) & 1177 TSNEP_DESC_OWNER_COUNTER_MASK) == 1178 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 1179 return true; 1180 } 1181 1182 return false; 1183 } 1184 1185 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, 1186 int queue_index, struct tsnep_rx *rx) 1187 { 1188 dma_addr_t dma; 1189 int retval; 1190 1191 memset(rx, 0, sizeof(*rx)); 1192 rx->adapter = adapter; 1193 rx->addr = addr; 1194 rx->queue_index = queue_index; 1195 1196 retval = tsnep_rx_ring_init(rx); 1197 if (retval) 1198 return retval; 1199 1200 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 1201 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); 1202 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); 1203 rx->owner_counter = 1; 1204 rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 1205 1206 tsnep_rx_refill(rx, tsnep_rx_desc_available(rx), false); 1207 1208 return 0; 1209 } 1210 1211 static void tsnep_rx_close(struct tsnep_rx *rx) 1212 { 1213 u32 val; 1214 1215 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); 1216 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, 1217 ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000, 1218 1000000); 1219 1220 tsnep_rx_ring_cleanup(rx); 1221 } 1222 1223 static bool tsnep_pending(struct tsnep_queue *queue) 1224 { 1225 if (queue->tx && tsnep_tx_pending(queue->tx)) 1226 return true; 1227 1228 if (queue->rx && tsnep_rx_pending(queue->rx)) 1229 return true; 1230 1231 return false; 1232 } 1233 1234 static int tsnep_poll(struct napi_struct *napi, int budget) 1235 { 1236 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, 1237 napi); 1238 bool complete = true; 1239 int done = 0; 1240 1241 if (queue->tx) 1242 complete = tsnep_tx_poll(queue->tx, budget); 1243 1244 if (queue->rx) { 1245 done = tsnep_rx_poll(queue->rx, napi, budget); 1246 if (done >= budget) 1247 complete = false; 1248 } 1249 1250 /* if all work not completed, return budget and keep polling */ 1251 if (!complete) 1252 return budget; 1253 1254 if (likely(napi_complete_done(napi, done))) { 1255 tsnep_enable_irq(queue->adapter, queue->irq_mask); 1256 1257 /* reschedule if work is already pending, prevent rotten packets 1258 * which are transmitted or received after polling but before 1259 * interrupt enable 1260 */ 1261 if (tsnep_pending(queue)) { 1262 tsnep_disable_irq(queue->adapter, queue->irq_mask); 1263 napi_schedule(napi); 1264 } 1265 } 1266 1267 return min(done, budget - 1); 1268 } 1269 1270 static int tsnep_request_irq(struct tsnep_queue *queue, bool first) 1271 { 1272 const char *name = netdev_name(queue->adapter->netdev); 1273 irq_handler_t handler; 1274 void *dev; 1275 int retval; 1276 1277 if (first) { 1278 sprintf(queue->name, "%s-mac", name); 1279 handler = tsnep_irq; 1280 dev = queue->adapter; 1281 } else { 1282 if (queue->tx && queue->rx) 1283 sprintf(queue->name, "%s-txrx-%d", name, 1284 queue->rx->queue_index); 1285 else if (queue->tx) 1286 sprintf(queue->name, "%s-tx-%d", name, 1287 queue->tx->queue_index); 1288 else 1289 sprintf(queue->name, "%s-rx-%d", name, 1290 queue->rx->queue_index); 1291 handler = tsnep_irq_txrx; 1292 dev = queue; 1293 } 1294 1295 retval = request_irq(queue->irq, handler, 0, queue->name, dev); 1296 if (retval) { 1297 /* if name is empty, then interrupt won't be freed */ 1298 memset(queue->name, 0, sizeof(queue->name)); 1299 } 1300 1301 return retval; 1302 } 1303 1304 static void tsnep_free_irq(struct tsnep_queue *queue, bool first) 1305 { 1306 void *dev; 1307 1308 if (!strlen(queue->name)) 1309 return; 1310 1311 if (first) 1312 dev = queue->adapter; 1313 else 1314 dev = queue; 1315 1316 free_irq(queue->irq, dev); 1317 memset(queue->name, 0, sizeof(queue->name)); 1318 } 1319 1320 static void tsnep_queue_close(struct tsnep_queue *queue, bool first) 1321 { 1322 struct tsnep_rx *rx = queue->rx; 1323 1324 tsnep_free_irq(queue, first); 1325 1326 if (rx && xdp_rxq_info_is_reg(&rx->xdp_rxq)) 1327 xdp_rxq_info_unreg(&rx->xdp_rxq); 1328 1329 netif_napi_del(&queue->napi); 1330 } 1331 1332 static int tsnep_queue_open(struct tsnep_adapter *adapter, 1333 struct tsnep_queue *queue, bool first) 1334 { 1335 struct tsnep_rx *rx = queue->rx; 1336 struct tsnep_tx *tx = queue->tx; 1337 int retval; 1338 1339 queue->adapter = adapter; 1340 1341 netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll); 1342 1343 if (rx) { 1344 /* choose TX queue for XDP_TX */ 1345 if (tx) 1346 rx->tx_queue_index = tx->queue_index; 1347 else if (rx->queue_index < adapter->num_tx_queues) 1348 rx->tx_queue_index = rx->queue_index; 1349 else 1350 rx->tx_queue_index = 0; 1351 1352 retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, 1353 rx->queue_index, queue->napi.napi_id); 1354 if (retval) 1355 goto failed; 1356 retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq, 1357 MEM_TYPE_PAGE_POOL, 1358 rx->page_pool); 1359 if (retval) 1360 goto failed; 1361 } 1362 1363 retval = tsnep_request_irq(queue, first); 1364 if (retval) { 1365 netif_err(adapter, drv, adapter->netdev, 1366 "can't get assigned irq %d.\n", queue->irq); 1367 goto failed; 1368 } 1369 1370 return 0; 1371 1372 failed: 1373 tsnep_queue_close(queue, first); 1374 1375 return retval; 1376 } 1377 1378 static int tsnep_netdev_open(struct net_device *netdev) 1379 { 1380 struct tsnep_adapter *adapter = netdev_priv(netdev); 1381 int tx_queue_index = 0; 1382 int rx_queue_index = 0; 1383 void __iomem *addr; 1384 int i, retval; 1385 1386 for (i = 0; i < adapter->num_queues; i++) { 1387 if (adapter->queue[i].tx) { 1388 addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); 1389 retval = tsnep_tx_open(adapter, addr, tx_queue_index, 1390 adapter->queue[i].tx); 1391 if (retval) 1392 goto failed; 1393 tx_queue_index++; 1394 } 1395 if (adapter->queue[i].rx) { 1396 addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); 1397 retval = tsnep_rx_open(adapter, addr, rx_queue_index, 1398 adapter->queue[i].rx); 1399 if (retval) 1400 goto failed; 1401 rx_queue_index++; 1402 } 1403 1404 retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0); 1405 if (retval) 1406 goto failed; 1407 } 1408 1409 retval = netif_set_real_num_tx_queues(adapter->netdev, 1410 adapter->num_tx_queues); 1411 if (retval) 1412 goto failed; 1413 retval = netif_set_real_num_rx_queues(adapter->netdev, 1414 adapter->num_rx_queues); 1415 if (retval) 1416 goto failed; 1417 1418 tsnep_enable_irq(adapter, ECM_INT_LINK); 1419 retval = tsnep_phy_open(adapter); 1420 if (retval) 1421 goto phy_failed; 1422 1423 for (i = 0; i < adapter->num_queues; i++) { 1424 napi_enable(&adapter->queue[i].napi); 1425 1426 tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); 1427 } 1428 1429 return 0; 1430 1431 phy_failed: 1432 tsnep_disable_irq(adapter, ECM_INT_LINK); 1433 failed: 1434 for (i = 0; i < adapter->num_queues; i++) { 1435 tsnep_queue_close(&adapter->queue[i], i == 0); 1436 1437 if (adapter->queue[i].rx) 1438 tsnep_rx_close(adapter->queue[i].rx); 1439 if (adapter->queue[i].tx) 1440 tsnep_tx_close(adapter->queue[i].tx); 1441 } 1442 return retval; 1443 } 1444 1445 static int tsnep_netdev_close(struct net_device *netdev) 1446 { 1447 struct tsnep_adapter *adapter = netdev_priv(netdev); 1448 int i; 1449 1450 tsnep_disable_irq(adapter, ECM_INT_LINK); 1451 tsnep_phy_close(adapter); 1452 1453 for (i = 0; i < adapter->num_queues; i++) { 1454 tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); 1455 1456 napi_disable(&adapter->queue[i].napi); 1457 1458 tsnep_queue_close(&adapter->queue[i], i == 0); 1459 1460 if (adapter->queue[i].rx) 1461 tsnep_rx_close(adapter->queue[i].rx); 1462 if (adapter->queue[i].tx) 1463 tsnep_tx_close(adapter->queue[i].tx); 1464 } 1465 1466 return 0; 1467 } 1468 1469 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb, 1470 struct net_device *netdev) 1471 { 1472 struct tsnep_adapter *adapter = netdev_priv(netdev); 1473 u16 queue_mapping = skb_get_queue_mapping(skb); 1474 1475 if (queue_mapping >= adapter->num_tx_queues) 1476 queue_mapping = 0; 1477 1478 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); 1479 } 1480 1481 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, 1482 int cmd) 1483 { 1484 if (!netif_running(netdev)) 1485 return -EINVAL; 1486 if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP) 1487 return tsnep_ptp_ioctl(netdev, ifr, cmd); 1488 return phy_mii_ioctl(netdev->phydev, ifr, cmd); 1489 } 1490 1491 static void tsnep_netdev_set_multicast(struct net_device *netdev) 1492 { 1493 struct tsnep_adapter *adapter = netdev_priv(netdev); 1494 1495 u16 rx_filter = 0; 1496 1497 /* configured MAC address and broadcasts are never filtered */ 1498 if (netdev->flags & IFF_PROMISC) { 1499 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 1500 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS; 1501 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { 1502 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 1503 } 1504 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); 1505 } 1506 1507 static void tsnep_netdev_get_stats64(struct net_device *netdev, 1508 struct rtnl_link_stats64 *stats) 1509 { 1510 struct tsnep_adapter *adapter = netdev_priv(netdev); 1511 u32 reg; 1512 u32 val; 1513 int i; 1514 1515 for (i = 0; i < adapter->num_tx_queues; i++) { 1516 stats->tx_packets += adapter->tx[i].packets; 1517 stats->tx_bytes += adapter->tx[i].bytes; 1518 stats->tx_dropped += adapter->tx[i].dropped; 1519 } 1520 for (i = 0; i < adapter->num_rx_queues; i++) { 1521 stats->rx_packets += adapter->rx[i].packets; 1522 stats->rx_bytes += adapter->rx[i].bytes; 1523 stats->rx_dropped += adapter->rx[i].dropped; 1524 stats->multicast += adapter->rx[i].multicast; 1525 1526 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + 1527 TSNEP_RX_STATISTIC); 1528 val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >> 1529 TSNEP_RX_STATISTIC_NO_DESC_SHIFT; 1530 stats->rx_dropped += val; 1531 val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >> 1532 TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT; 1533 stats->rx_dropped += val; 1534 val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >> 1535 TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT; 1536 stats->rx_errors += val; 1537 stats->rx_fifo_errors += val; 1538 val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >> 1539 TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT; 1540 stats->rx_errors += val; 1541 stats->rx_frame_errors += val; 1542 } 1543 1544 reg = ioread32(adapter->addr + ECM_STAT); 1545 val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT; 1546 stats->rx_errors += val; 1547 val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT; 1548 stats->rx_errors += val; 1549 stats->rx_crc_errors += val; 1550 val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT; 1551 stats->rx_errors += val; 1552 } 1553 1554 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr) 1555 { 1556 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); 1557 iowrite16(*(u16 *)(addr + sizeof(u32)), 1558 adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 1559 1560 ether_addr_copy(adapter->mac_address, addr); 1561 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", 1562 addr); 1563 } 1564 1565 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr) 1566 { 1567 struct tsnep_adapter *adapter = netdev_priv(netdev); 1568 struct sockaddr *sock_addr = addr; 1569 int retval; 1570 1571 retval = eth_prepare_mac_addr_change(netdev, sock_addr); 1572 if (retval) 1573 return retval; 1574 eth_hw_addr_set(netdev, sock_addr->sa_data); 1575 tsnep_mac_set_address(adapter, sock_addr->sa_data); 1576 1577 return 0; 1578 } 1579 1580 static int tsnep_netdev_set_features(struct net_device *netdev, 1581 netdev_features_t features) 1582 { 1583 struct tsnep_adapter *adapter = netdev_priv(netdev); 1584 netdev_features_t changed = netdev->features ^ features; 1585 bool enable; 1586 int retval = 0; 1587 1588 if (changed & NETIF_F_LOOPBACK) { 1589 enable = !!(features & NETIF_F_LOOPBACK); 1590 retval = tsnep_phy_loopback(adapter, enable); 1591 } 1592 1593 return retval; 1594 } 1595 1596 static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev, 1597 const struct skb_shared_hwtstamps *hwtstamps, 1598 bool cycles) 1599 { 1600 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data; 1601 u64 timestamp; 1602 1603 if (cycles) 1604 timestamp = __le64_to_cpu(rx_inline->counter); 1605 else 1606 timestamp = __le64_to_cpu(rx_inline->timestamp); 1607 1608 return ns_to_ktime(timestamp); 1609 } 1610 1611 static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf) 1612 { 1613 struct tsnep_adapter *adapter = netdev_priv(dev); 1614 1615 switch (bpf->command) { 1616 case XDP_SETUP_PROG: 1617 return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack); 1618 default: 1619 return -EOPNOTSUPP; 1620 } 1621 } 1622 1623 static struct tsnep_tx *tsnep_xdp_get_tx(struct tsnep_adapter *adapter, u32 cpu) 1624 { 1625 if (cpu >= TSNEP_MAX_QUEUES) 1626 cpu &= TSNEP_MAX_QUEUES - 1; 1627 1628 while (cpu >= adapter->num_tx_queues) 1629 cpu -= adapter->num_tx_queues; 1630 1631 return &adapter->tx[cpu]; 1632 } 1633 1634 static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n, 1635 struct xdp_frame **xdp, u32 flags) 1636 { 1637 struct tsnep_adapter *adapter = netdev_priv(dev); 1638 u32 cpu = smp_processor_id(); 1639 struct netdev_queue *nq; 1640 struct tsnep_tx *tx; 1641 int nxmit; 1642 bool xmit; 1643 1644 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 1645 return -EINVAL; 1646 1647 tx = tsnep_xdp_get_tx(adapter, cpu); 1648 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); 1649 1650 __netif_tx_lock(nq, cpu); 1651 1652 for (nxmit = 0; nxmit < n; nxmit++) { 1653 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, 1654 TSNEP_TX_TYPE_XDP_NDO); 1655 if (!xmit) 1656 break; 1657 1658 /* avoid transmit queue timeout since we share it with the slow 1659 * path 1660 */ 1661 txq_trans_cond_update(nq); 1662 } 1663 1664 if (flags & XDP_XMIT_FLUSH) 1665 tsnep_xdp_xmit_flush(tx); 1666 1667 __netif_tx_unlock(nq); 1668 1669 return nxmit; 1670 } 1671 1672 static const struct net_device_ops tsnep_netdev_ops = { 1673 .ndo_open = tsnep_netdev_open, 1674 .ndo_stop = tsnep_netdev_close, 1675 .ndo_start_xmit = tsnep_netdev_xmit_frame, 1676 .ndo_eth_ioctl = tsnep_netdev_ioctl, 1677 .ndo_set_rx_mode = tsnep_netdev_set_multicast, 1678 .ndo_get_stats64 = tsnep_netdev_get_stats64, 1679 .ndo_set_mac_address = tsnep_netdev_set_mac_address, 1680 .ndo_set_features = tsnep_netdev_set_features, 1681 .ndo_get_tstamp = tsnep_netdev_get_tstamp, 1682 .ndo_setup_tc = tsnep_tc_setup, 1683 .ndo_bpf = tsnep_netdev_bpf, 1684 .ndo_xdp_xmit = tsnep_netdev_xdp_xmit, 1685 }; 1686 1687 static int tsnep_mac_init(struct tsnep_adapter *adapter) 1688 { 1689 int retval; 1690 1691 /* initialize RX filtering, at least configured MAC address and 1692 * broadcast are not filtered 1693 */ 1694 iowrite16(0, adapter->addr + TSNEP_RX_FILTER); 1695 1696 /* try to get MAC address in the following order: 1697 * - device tree 1698 * - valid MAC address already set 1699 * - MAC address register if valid 1700 * - random MAC address 1701 */ 1702 retval = of_get_mac_address(adapter->pdev->dev.of_node, 1703 adapter->mac_address); 1704 if (retval == -EPROBE_DEFER) 1705 return retval; 1706 if (retval && !is_valid_ether_addr(adapter->mac_address)) { 1707 *(u32 *)adapter->mac_address = 1708 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); 1709 *(u16 *)(adapter->mac_address + sizeof(u32)) = 1710 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 1711 if (!is_valid_ether_addr(adapter->mac_address)) 1712 eth_random_addr(adapter->mac_address); 1713 } 1714 1715 tsnep_mac_set_address(adapter, adapter->mac_address); 1716 eth_hw_addr_set(adapter->netdev, adapter->mac_address); 1717 1718 return 0; 1719 } 1720 1721 static int tsnep_mdio_init(struct tsnep_adapter *adapter) 1722 { 1723 struct device_node *np = adapter->pdev->dev.of_node; 1724 int retval; 1725 1726 if (np) { 1727 np = of_get_child_by_name(np, "mdio"); 1728 if (!np) 1729 return 0; 1730 1731 adapter->suppress_preamble = 1732 of_property_read_bool(np, "suppress-preamble"); 1733 } 1734 1735 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); 1736 if (!adapter->mdiobus) { 1737 retval = -ENOMEM; 1738 1739 goto out; 1740 } 1741 1742 adapter->mdiobus->priv = (void *)adapter; 1743 adapter->mdiobus->parent = &adapter->pdev->dev; 1744 adapter->mdiobus->read = tsnep_mdiobus_read; 1745 adapter->mdiobus->write = tsnep_mdiobus_write; 1746 adapter->mdiobus->name = TSNEP "-mdiobus"; 1747 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", 1748 adapter->pdev->name); 1749 1750 /* do not scan broadcast address */ 1751 adapter->mdiobus->phy_mask = 0x0000001; 1752 1753 retval = of_mdiobus_register(adapter->mdiobus, np); 1754 1755 out: 1756 of_node_put(np); 1757 1758 return retval; 1759 } 1760 1761 static int tsnep_phy_init(struct tsnep_adapter *adapter) 1762 { 1763 struct device_node *phy_node; 1764 int retval; 1765 1766 retval = of_get_phy_mode(adapter->pdev->dev.of_node, 1767 &adapter->phy_mode); 1768 if (retval) 1769 adapter->phy_mode = PHY_INTERFACE_MODE_GMII; 1770 1771 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", 1772 0); 1773 adapter->phydev = of_phy_find_device(phy_node); 1774 of_node_put(phy_node); 1775 if (!adapter->phydev && adapter->mdiobus) 1776 adapter->phydev = phy_find_first(adapter->mdiobus); 1777 if (!adapter->phydev) 1778 return -EIO; 1779 1780 return 0; 1781 } 1782 1783 static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count) 1784 { 1785 u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0; 1786 char name[8]; 1787 int i; 1788 int retval; 1789 1790 /* one TX/RX queue pair for netdev is mandatory */ 1791 if (platform_irq_count(adapter->pdev) == 1) 1792 retval = platform_get_irq(adapter->pdev, 0); 1793 else 1794 retval = platform_get_irq_byname(adapter->pdev, "mac"); 1795 if (retval < 0) 1796 return retval; 1797 adapter->num_tx_queues = 1; 1798 adapter->num_rx_queues = 1; 1799 adapter->num_queues = 1; 1800 adapter->queue[0].irq = retval; 1801 adapter->queue[0].tx = &adapter->tx[0]; 1802 adapter->queue[0].rx = &adapter->rx[0]; 1803 adapter->queue[0].irq_mask = irq_mask; 1804 adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY; 1805 retval = tsnep_set_irq_coalesce(&adapter->queue[0], 1806 TSNEP_COALESCE_USECS_DEFAULT); 1807 if (retval < 0) 1808 return retval; 1809 1810 adapter->netdev->irq = adapter->queue[0].irq; 1811 1812 /* add additional TX/RX queue pairs only if dedicated interrupt is 1813 * available 1814 */ 1815 for (i = 1; i < queue_count; i++) { 1816 sprintf(name, "txrx-%d", i); 1817 retval = platform_get_irq_byname_optional(adapter->pdev, name); 1818 if (retval < 0) 1819 break; 1820 1821 adapter->num_tx_queues++; 1822 adapter->num_rx_queues++; 1823 adapter->num_queues++; 1824 adapter->queue[i].irq = retval; 1825 adapter->queue[i].tx = &adapter->tx[i]; 1826 adapter->queue[i].rx = &adapter->rx[i]; 1827 adapter->queue[i].irq_mask = 1828 irq_mask << (ECM_INT_TXRX_SHIFT * i); 1829 adapter->queue[i].irq_delay_addr = 1830 adapter->addr + ECM_INT_DELAY + ECM_INT_DELAY_OFFSET * i; 1831 retval = tsnep_set_irq_coalesce(&adapter->queue[i], 1832 TSNEP_COALESCE_USECS_DEFAULT); 1833 if (retval < 0) 1834 return retval; 1835 } 1836 1837 return 0; 1838 } 1839 1840 static int tsnep_probe(struct platform_device *pdev) 1841 { 1842 struct tsnep_adapter *adapter; 1843 struct net_device *netdev; 1844 struct resource *io; 1845 u32 type; 1846 int revision; 1847 int version; 1848 int queue_count; 1849 int retval; 1850 1851 netdev = devm_alloc_etherdev_mqs(&pdev->dev, 1852 sizeof(struct tsnep_adapter), 1853 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES); 1854 if (!netdev) 1855 return -ENODEV; 1856 SET_NETDEV_DEV(netdev, &pdev->dev); 1857 adapter = netdev_priv(netdev); 1858 platform_set_drvdata(pdev, adapter); 1859 adapter->pdev = pdev; 1860 adapter->dmadev = &pdev->dev; 1861 adapter->netdev = netdev; 1862 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 1863 NETIF_MSG_LINK | NETIF_MSG_IFUP | 1864 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; 1865 1866 netdev->min_mtu = ETH_MIN_MTU; 1867 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; 1868 1869 mutex_init(&adapter->gate_control_lock); 1870 mutex_init(&adapter->rxnfc_lock); 1871 INIT_LIST_HEAD(&adapter->rxnfc_rules); 1872 1873 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1874 adapter->addr = devm_ioremap_resource(&pdev->dev, io); 1875 if (IS_ERR(adapter->addr)) 1876 return PTR_ERR(adapter->addr); 1877 netdev->mem_start = io->start; 1878 netdev->mem_end = io->end; 1879 1880 type = ioread32(adapter->addr + ECM_TYPE); 1881 revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT; 1882 version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT; 1883 queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT; 1884 adapter->gate_control = type & ECM_GATE_CONTROL; 1885 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT; 1886 1887 tsnep_disable_irq(adapter, ECM_INT_ALL); 1888 1889 retval = tsnep_queue_init(adapter, queue_count); 1890 if (retval) 1891 return retval; 1892 1893 retval = dma_set_mask_and_coherent(&adapter->pdev->dev, 1894 DMA_BIT_MASK(64)); 1895 if (retval) { 1896 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n"); 1897 return retval; 1898 } 1899 1900 retval = tsnep_mac_init(adapter); 1901 if (retval) 1902 return retval; 1903 1904 retval = tsnep_mdio_init(adapter); 1905 if (retval) 1906 goto mdio_init_failed; 1907 1908 retval = tsnep_phy_init(adapter); 1909 if (retval) 1910 goto phy_init_failed; 1911 1912 retval = tsnep_ptp_init(adapter); 1913 if (retval) 1914 goto ptp_init_failed; 1915 1916 retval = tsnep_tc_init(adapter); 1917 if (retval) 1918 goto tc_init_failed; 1919 1920 retval = tsnep_rxnfc_init(adapter); 1921 if (retval) 1922 goto rxnfc_init_failed; 1923 1924 netdev->netdev_ops = &tsnep_netdev_ops; 1925 netdev->ethtool_ops = &tsnep_ethtool_ops; 1926 netdev->features = NETIF_F_SG; 1927 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK; 1928 1929 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | 1930 NETDEV_XDP_ACT_NDO_XMIT | 1931 NETDEV_XDP_ACT_NDO_XMIT_SG; 1932 1933 /* carrier off reporting is important to ethtool even BEFORE open */ 1934 netif_carrier_off(netdev); 1935 1936 retval = register_netdev(netdev); 1937 if (retval) 1938 goto register_failed; 1939 1940 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, 1941 revision); 1942 if (adapter->gate_control) 1943 dev_info(&adapter->pdev->dev, "gate control detected\n"); 1944 1945 return 0; 1946 1947 register_failed: 1948 tsnep_rxnfc_cleanup(adapter); 1949 rxnfc_init_failed: 1950 tsnep_tc_cleanup(adapter); 1951 tc_init_failed: 1952 tsnep_ptp_cleanup(adapter); 1953 ptp_init_failed: 1954 phy_init_failed: 1955 if (adapter->mdiobus) 1956 mdiobus_unregister(adapter->mdiobus); 1957 mdio_init_failed: 1958 return retval; 1959 } 1960 1961 static int tsnep_remove(struct platform_device *pdev) 1962 { 1963 struct tsnep_adapter *adapter = platform_get_drvdata(pdev); 1964 1965 unregister_netdev(adapter->netdev); 1966 1967 tsnep_rxnfc_cleanup(adapter); 1968 1969 tsnep_tc_cleanup(adapter); 1970 1971 tsnep_ptp_cleanup(adapter); 1972 1973 if (adapter->mdiobus) 1974 mdiobus_unregister(adapter->mdiobus); 1975 1976 tsnep_disable_irq(adapter, ECM_INT_ALL); 1977 1978 return 0; 1979 } 1980 1981 static const struct of_device_id tsnep_of_match[] = { 1982 { .compatible = "engleder,tsnep", }, 1983 { }, 1984 }; 1985 MODULE_DEVICE_TABLE(of, tsnep_of_match); 1986 1987 static struct platform_driver tsnep_driver = { 1988 .driver = { 1989 .name = TSNEP, 1990 .of_match_table = tsnep_of_match, 1991 }, 1992 .probe = tsnep_probe, 1993 .remove = tsnep_remove, 1994 }; 1995 module_platform_driver(tsnep_driver); 1996 1997 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>"); 1998 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver"); 1999 MODULE_LICENSE("GPL"); 2000