1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Faraday FTMAC100 10/100 Ethernet 4 * 5 * (C) Copyright 2009-2011 Faraday Technology 6 * Po-Yu Chuang <ratbert@faraday-tech.com> 7 */ 8 9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 10 11 #include <linux/dma-mapping.h> 12 #include <linux/etherdevice.h> 13 #include <linux/ethtool.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/mii.h> 18 #include <linux/module.h> 19 #include <linux/mod_devicetable.h> 20 #include <linux/netdevice.h> 21 #include <linux/platform_device.h> 22 23 #include "ftmac100.h" 24 25 #define DRV_NAME "ftmac100" 26 27 #define RX_QUEUE_ENTRIES 128 /* must be power of 2 */ 28 #define TX_QUEUE_ENTRIES 16 /* must be power of 2 */ 29 30 #define MAX_PKT_SIZE 1518 31 #define RX_BUF_SIZE 2044 /* must be smaller than 0x7ff */ 32 33 #if MAX_PKT_SIZE > 0x7ff 34 #error invalid MAX_PKT_SIZE 35 #endif 36 37 #if RX_BUF_SIZE > 0x7ff || RX_BUF_SIZE > PAGE_SIZE 38 #error invalid RX_BUF_SIZE 39 #endif 40 41 /****************************************************************************** 42 * private data 43 *****************************************************************************/ 44 struct ftmac100_descs { 45 struct ftmac100_rxdes rxdes[RX_QUEUE_ENTRIES]; 46 struct ftmac100_txdes txdes[TX_QUEUE_ENTRIES]; 47 }; 48 49 struct ftmac100 { 50 struct resource *res; 51 void __iomem *base; 52 int irq; 53 54 struct ftmac100_descs *descs; 55 dma_addr_t descs_dma_addr; 56 57 unsigned int rx_pointer; 58 unsigned int tx_clean_pointer; 59 unsigned int tx_pointer; 60 unsigned int tx_pending; 61 62 spinlock_t tx_lock; 63 64 struct net_device *netdev; 65 struct device *dev; 66 struct napi_struct napi; 67 68 struct mii_if_info mii; 69 }; 70 71 static int ftmac100_alloc_rx_page(struct ftmac100 *priv, 72 struct ftmac100_rxdes *rxdes, gfp_t gfp); 73 74 /****************************************************************************** 75 * internal functions (hardware register access) 76 *****************************************************************************/ 77 #define INT_MASK_ALL_ENABLED (FTMAC100_INT_RPKT_FINISH | \ 78 FTMAC100_INT_NORXBUF | \ 79 FTMAC100_INT_XPKT_OK | \ 80 FTMAC100_INT_XPKT_LOST | \ 81 FTMAC100_INT_RPKT_LOST | \ 82 FTMAC100_INT_AHB_ERR | \ 83 FTMAC100_INT_PHYSTS_CHG) 84 85 #define INT_MASK_ALL_DISABLED 0 86 87 static void ftmac100_enable_all_int(struct ftmac100 *priv) 88 { 89 iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTMAC100_OFFSET_IMR); 90 } 91 92 static void ftmac100_disable_all_int(struct ftmac100 *priv) 93 { 94 iowrite32(INT_MASK_ALL_DISABLED, priv->base + FTMAC100_OFFSET_IMR); 95 } 96 97 static void ftmac100_set_rx_ring_base(struct ftmac100 *priv, dma_addr_t addr) 98 { 99 iowrite32(addr, priv->base + FTMAC100_OFFSET_RXR_BADR); 100 } 101 102 static void ftmac100_set_tx_ring_base(struct ftmac100 *priv, dma_addr_t addr) 103 { 104 iowrite32(addr, priv->base + FTMAC100_OFFSET_TXR_BADR); 105 } 106 107 static void ftmac100_txdma_start_polling(struct ftmac100 *priv) 108 { 109 iowrite32(1, priv->base + FTMAC100_OFFSET_TXPD); 110 } 111 112 static int ftmac100_reset(struct ftmac100 *priv) 113 { 114 struct net_device *netdev = priv->netdev; 115 int i; 116 117 /* NOTE: reset clears all registers */ 118 iowrite32(FTMAC100_MACCR_SW_RST, priv->base + FTMAC100_OFFSET_MACCR); 119 120 for (i = 0; i < 5; i++) { 121 unsigned int maccr; 122 123 maccr = ioread32(priv->base + FTMAC100_OFFSET_MACCR); 124 if (!(maccr & FTMAC100_MACCR_SW_RST)) { 125 /* 126 * FTMAC100_MACCR_SW_RST cleared does not indicate 127 * that hardware reset completed (what the f*ck). 128 * We still need to wait for a while. 129 */ 130 udelay(500); 131 return 0; 132 } 133 134 udelay(1000); 135 } 136 137 netdev_err(netdev, "software reset failed\n"); 138 return -EIO; 139 } 140 141 static void ftmac100_set_mac(struct ftmac100 *priv, const unsigned char *mac) 142 { 143 unsigned int maddr = mac[0] << 8 | mac[1]; 144 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; 145 146 iowrite32(maddr, priv->base + FTMAC100_OFFSET_MAC_MADR); 147 iowrite32(laddr, priv->base + FTMAC100_OFFSET_MAC_LADR); 148 } 149 150 #define MACCR_ENABLE_ALL (FTMAC100_MACCR_XMT_EN | \ 151 FTMAC100_MACCR_RCV_EN | \ 152 FTMAC100_MACCR_XDMA_EN | \ 153 FTMAC100_MACCR_RDMA_EN | \ 154 FTMAC100_MACCR_CRC_APD | \ 155 FTMAC100_MACCR_FULLDUP | \ 156 FTMAC100_MACCR_RX_RUNT | \ 157 FTMAC100_MACCR_RX_BROADPKT) 158 159 static int ftmac100_start_hw(struct ftmac100 *priv) 160 { 161 struct net_device *netdev = priv->netdev; 162 163 if (ftmac100_reset(priv)) 164 return -EIO; 165 166 /* setup ring buffer base registers */ 167 ftmac100_set_rx_ring_base(priv, 168 priv->descs_dma_addr + 169 offsetof(struct ftmac100_descs, rxdes)); 170 ftmac100_set_tx_ring_base(priv, 171 priv->descs_dma_addr + 172 offsetof(struct ftmac100_descs, txdes)); 173 174 iowrite32(FTMAC100_APTC_RXPOLL_CNT(1), priv->base + FTMAC100_OFFSET_APTC); 175 176 ftmac100_set_mac(priv, netdev->dev_addr); 177 178 iowrite32(MACCR_ENABLE_ALL, priv->base + FTMAC100_OFFSET_MACCR); 179 return 0; 180 } 181 182 static void ftmac100_stop_hw(struct ftmac100 *priv) 183 { 184 iowrite32(0, priv->base + FTMAC100_OFFSET_MACCR); 185 } 186 187 /****************************************************************************** 188 * internal functions (receive descriptor) 189 *****************************************************************************/ 190 static bool ftmac100_rxdes_first_segment(struct ftmac100_rxdes *rxdes) 191 { 192 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FRS); 193 } 194 195 static bool ftmac100_rxdes_last_segment(struct ftmac100_rxdes *rxdes) 196 { 197 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_LRS); 198 } 199 200 static bool ftmac100_rxdes_owned_by_dma(struct ftmac100_rxdes *rxdes) 201 { 202 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN); 203 } 204 205 static void ftmac100_rxdes_set_dma_own(struct ftmac100_rxdes *rxdes) 206 { 207 /* clear status bits */ 208 rxdes->rxdes0 = cpu_to_le32(FTMAC100_RXDES0_RXDMA_OWN); 209 } 210 211 static bool ftmac100_rxdes_rx_error(struct ftmac100_rxdes *rxdes) 212 { 213 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ERR); 214 } 215 216 static bool ftmac100_rxdes_crc_error(struct ftmac100_rxdes *rxdes) 217 { 218 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_CRC_ERR); 219 } 220 221 static bool ftmac100_rxdes_frame_too_long(struct ftmac100_rxdes *rxdes) 222 { 223 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_FTL); 224 } 225 226 static bool ftmac100_rxdes_runt(struct ftmac100_rxdes *rxdes) 227 { 228 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RUNT); 229 } 230 231 static bool ftmac100_rxdes_odd_nibble(struct ftmac100_rxdes *rxdes) 232 { 233 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_RX_ODD_NB); 234 } 235 236 static unsigned int ftmac100_rxdes_frame_length(struct ftmac100_rxdes *rxdes) 237 { 238 return le32_to_cpu(rxdes->rxdes0) & FTMAC100_RXDES0_RFL; 239 } 240 241 static bool ftmac100_rxdes_multicast(struct ftmac100_rxdes *rxdes) 242 { 243 return rxdes->rxdes0 & cpu_to_le32(FTMAC100_RXDES0_MULTICAST); 244 } 245 246 static void ftmac100_rxdes_set_buffer_size(struct ftmac100_rxdes *rxdes, 247 unsigned int size) 248 { 249 rxdes->rxdes1 &= cpu_to_le32(FTMAC100_RXDES1_EDORR); 250 rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_RXBUF_SIZE(size)); 251 } 252 253 static void ftmac100_rxdes_set_end_of_ring(struct ftmac100_rxdes *rxdes) 254 { 255 rxdes->rxdes1 |= cpu_to_le32(FTMAC100_RXDES1_EDORR); 256 } 257 258 static void ftmac100_rxdes_set_dma_addr(struct ftmac100_rxdes *rxdes, 259 dma_addr_t addr) 260 { 261 rxdes->rxdes2 = cpu_to_le32(addr); 262 } 263 264 static dma_addr_t ftmac100_rxdes_get_dma_addr(struct ftmac100_rxdes *rxdes) 265 { 266 return le32_to_cpu(rxdes->rxdes2); 267 } 268 269 /* 270 * rxdes3 is not used by hardware. We use it to keep track of page. 271 * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). 272 */ 273 static void ftmac100_rxdes_set_page(struct ftmac100_rxdes *rxdes, struct page *page) 274 { 275 rxdes->rxdes3 = (unsigned int)page; 276 } 277 278 static struct page *ftmac100_rxdes_get_page(struct ftmac100_rxdes *rxdes) 279 { 280 return (struct page *)rxdes->rxdes3; 281 } 282 283 /****************************************************************************** 284 * internal functions (receive) 285 *****************************************************************************/ 286 static int ftmac100_next_rx_pointer(int pointer) 287 { 288 return (pointer + 1) & (RX_QUEUE_ENTRIES - 1); 289 } 290 291 static void ftmac100_rx_pointer_advance(struct ftmac100 *priv) 292 { 293 priv->rx_pointer = ftmac100_next_rx_pointer(priv->rx_pointer); 294 } 295 296 static struct ftmac100_rxdes *ftmac100_current_rxdes(struct ftmac100 *priv) 297 { 298 return &priv->descs->rxdes[priv->rx_pointer]; 299 } 300 301 static struct ftmac100_rxdes * 302 ftmac100_rx_locate_first_segment(struct ftmac100 *priv) 303 { 304 struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv); 305 306 while (!ftmac100_rxdes_owned_by_dma(rxdes)) { 307 if (ftmac100_rxdes_first_segment(rxdes)) 308 return rxdes; 309 310 ftmac100_rxdes_set_dma_own(rxdes); 311 ftmac100_rx_pointer_advance(priv); 312 rxdes = ftmac100_current_rxdes(priv); 313 } 314 315 return NULL; 316 } 317 318 static bool ftmac100_rx_packet_error(struct ftmac100 *priv, 319 struct ftmac100_rxdes *rxdes) 320 { 321 struct net_device *netdev = priv->netdev; 322 bool error = false; 323 324 if (unlikely(ftmac100_rxdes_rx_error(rxdes))) { 325 if (net_ratelimit()) 326 netdev_info(netdev, "rx err\n"); 327 328 netdev->stats.rx_errors++; 329 error = true; 330 } 331 332 if (unlikely(ftmac100_rxdes_crc_error(rxdes))) { 333 if (net_ratelimit()) 334 netdev_info(netdev, "rx crc err\n"); 335 336 netdev->stats.rx_crc_errors++; 337 error = true; 338 } 339 340 if (unlikely(ftmac100_rxdes_frame_too_long(rxdes))) { 341 if (net_ratelimit()) 342 netdev_info(netdev, "rx frame too long\n"); 343 344 netdev->stats.rx_length_errors++; 345 error = true; 346 } else if (unlikely(ftmac100_rxdes_runt(rxdes))) { 347 if (net_ratelimit()) 348 netdev_info(netdev, "rx runt\n"); 349 350 netdev->stats.rx_length_errors++; 351 error = true; 352 } else if (unlikely(ftmac100_rxdes_odd_nibble(rxdes))) { 353 if (net_ratelimit()) 354 netdev_info(netdev, "rx odd nibble\n"); 355 356 netdev->stats.rx_length_errors++; 357 error = true; 358 } 359 360 return error; 361 } 362 363 static void ftmac100_rx_drop_packet(struct ftmac100 *priv) 364 { 365 struct net_device *netdev = priv->netdev; 366 struct ftmac100_rxdes *rxdes = ftmac100_current_rxdes(priv); 367 bool done = false; 368 369 if (net_ratelimit()) 370 netdev_dbg(netdev, "drop packet %p\n", rxdes); 371 372 do { 373 if (ftmac100_rxdes_last_segment(rxdes)) 374 done = true; 375 376 ftmac100_rxdes_set_dma_own(rxdes); 377 ftmac100_rx_pointer_advance(priv); 378 rxdes = ftmac100_current_rxdes(priv); 379 } while (!done && !ftmac100_rxdes_owned_by_dma(rxdes)); 380 381 netdev->stats.rx_dropped++; 382 } 383 384 static bool ftmac100_rx_packet(struct ftmac100 *priv, int *processed) 385 { 386 struct net_device *netdev = priv->netdev; 387 struct ftmac100_rxdes *rxdes; 388 struct sk_buff *skb; 389 struct page *page; 390 dma_addr_t map; 391 int length; 392 bool ret; 393 394 rxdes = ftmac100_rx_locate_first_segment(priv); 395 if (!rxdes) 396 return false; 397 398 if (unlikely(ftmac100_rx_packet_error(priv, rxdes))) { 399 ftmac100_rx_drop_packet(priv); 400 return true; 401 } 402 403 /* 404 * It is impossible to get multi-segment packets 405 * because we always provide big enough receive buffers. 406 */ 407 ret = ftmac100_rxdes_last_segment(rxdes); 408 BUG_ON(!ret); 409 410 /* start processing */ 411 skb = netdev_alloc_skb_ip_align(netdev, 128); 412 if (unlikely(!skb)) { 413 if (net_ratelimit()) 414 netdev_err(netdev, "rx skb alloc failed\n"); 415 416 ftmac100_rx_drop_packet(priv); 417 return true; 418 } 419 420 if (unlikely(ftmac100_rxdes_multicast(rxdes))) 421 netdev->stats.multicast++; 422 423 map = ftmac100_rxdes_get_dma_addr(rxdes); 424 dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 425 426 length = ftmac100_rxdes_frame_length(rxdes); 427 page = ftmac100_rxdes_get_page(rxdes); 428 skb_fill_page_desc(skb, 0, page, 0, length); 429 skb->len += length; 430 skb->data_len += length; 431 432 if (length > 128) { 433 skb->truesize += PAGE_SIZE; 434 /* We pull the minimum amount into linear part */ 435 __pskb_pull_tail(skb, ETH_HLEN); 436 } else { 437 /* Small frames are copied into linear part to free one page */ 438 __pskb_pull_tail(skb, length); 439 } 440 ftmac100_alloc_rx_page(priv, rxdes, GFP_ATOMIC); 441 442 ftmac100_rx_pointer_advance(priv); 443 444 skb->protocol = eth_type_trans(skb, netdev); 445 446 netdev->stats.rx_packets++; 447 netdev->stats.rx_bytes += skb->len; 448 449 /* push packet to protocol stack */ 450 netif_receive_skb(skb); 451 452 (*processed)++; 453 return true; 454 } 455 456 /****************************************************************************** 457 * internal functions (transmit descriptor) 458 *****************************************************************************/ 459 static void ftmac100_txdes_reset(struct ftmac100_txdes *txdes) 460 { 461 /* clear all except end of ring bit */ 462 txdes->txdes0 = 0; 463 txdes->txdes1 &= cpu_to_le32(FTMAC100_TXDES1_EDOTR); 464 txdes->txdes2 = 0; 465 txdes->txdes3 = 0; 466 } 467 468 static bool ftmac100_txdes_owned_by_dma(struct ftmac100_txdes *txdes) 469 { 470 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN); 471 } 472 473 static void ftmac100_txdes_set_dma_own(struct ftmac100_txdes *txdes) 474 { 475 /* 476 * Make sure dma own bit will not be set before any other 477 * descriptor fields. 478 */ 479 wmb(); 480 txdes->txdes0 |= cpu_to_le32(FTMAC100_TXDES0_TXDMA_OWN); 481 } 482 483 static bool ftmac100_txdes_excessive_collision(struct ftmac100_txdes *txdes) 484 { 485 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_EXSCOL); 486 } 487 488 static bool ftmac100_txdes_late_collision(struct ftmac100_txdes *txdes) 489 { 490 return txdes->txdes0 & cpu_to_le32(FTMAC100_TXDES0_TXPKT_LATECOL); 491 } 492 493 static void ftmac100_txdes_set_end_of_ring(struct ftmac100_txdes *txdes) 494 { 495 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_EDOTR); 496 } 497 498 static void ftmac100_txdes_set_first_segment(struct ftmac100_txdes *txdes) 499 { 500 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_FTS); 501 } 502 503 static void ftmac100_txdes_set_last_segment(struct ftmac100_txdes *txdes) 504 { 505 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_LTS); 506 } 507 508 static void ftmac100_txdes_set_txint(struct ftmac100_txdes *txdes) 509 { 510 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXIC); 511 } 512 513 static void ftmac100_txdes_set_buffer_size(struct ftmac100_txdes *txdes, 514 unsigned int len) 515 { 516 txdes->txdes1 |= cpu_to_le32(FTMAC100_TXDES1_TXBUF_SIZE(len)); 517 } 518 519 static void ftmac100_txdes_set_dma_addr(struct ftmac100_txdes *txdes, 520 dma_addr_t addr) 521 { 522 txdes->txdes2 = cpu_to_le32(addr); 523 } 524 525 static dma_addr_t ftmac100_txdes_get_dma_addr(struct ftmac100_txdes *txdes) 526 { 527 return le32_to_cpu(txdes->txdes2); 528 } 529 530 /* 531 * txdes3 is not used by hardware. We use it to keep track of socket buffer. 532 * Since hardware does not touch it, we can skip cpu_to_le32()/le32_to_cpu(). 533 */ 534 static void ftmac100_txdes_set_skb(struct ftmac100_txdes *txdes, struct sk_buff *skb) 535 { 536 txdes->txdes3 = (unsigned int)skb; 537 } 538 539 static struct sk_buff *ftmac100_txdes_get_skb(struct ftmac100_txdes *txdes) 540 { 541 return (struct sk_buff *)txdes->txdes3; 542 } 543 544 /****************************************************************************** 545 * internal functions (transmit) 546 *****************************************************************************/ 547 static int ftmac100_next_tx_pointer(int pointer) 548 { 549 return (pointer + 1) & (TX_QUEUE_ENTRIES - 1); 550 } 551 552 static void ftmac100_tx_pointer_advance(struct ftmac100 *priv) 553 { 554 priv->tx_pointer = ftmac100_next_tx_pointer(priv->tx_pointer); 555 } 556 557 static void ftmac100_tx_clean_pointer_advance(struct ftmac100 *priv) 558 { 559 priv->tx_clean_pointer = ftmac100_next_tx_pointer(priv->tx_clean_pointer); 560 } 561 562 static struct ftmac100_txdes *ftmac100_current_txdes(struct ftmac100 *priv) 563 { 564 return &priv->descs->txdes[priv->tx_pointer]; 565 } 566 567 static struct ftmac100_txdes *ftmac100_current_clean_txdes(struct ftmac100 *priv) 568 { 569 return &priv->descs->txdes[priv->tx_clean_pointer]; 570 } 571 572 static bool ftmac100_tx_complete_packet(struct ftmac100 *priv) 573 { 574 struct net_device *netdev = priv->netdev; 575 struct ftmac100_txdes *txdes; 576 struct sk_buff *skb; 577 dma_addr_t map; 578 579 if (priv->tx_pending == 0) 580 return false; 581 582 txdes = ftmac100_current_clean_txdes(priv); 583 584 if (ftmac100_txdes_owned_by_dma(txdes)) 585 return false; 586 587 skb = ftmac100_txdes_get_skb(txdes); 588 map = ftmac100_txdes_get_dma_addr(txdes); 589 590 if (unlikely(ftmac100_txdes_excessive_collision(txdes) || 591 ftmac100_txdes_late_collision(txdes))) { 592 /* 593 * packet transmitted to ethernet lost due to late collision 594 * or excessive collision 595 */ 596 netdev->stats.tx_aborted_errors++; 597 } else { 598 netdev->stats.tx_packets++; 599 netdev->stats.tx_bytes += skb->len; 600 } 601 602 dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); 603 dev_kfree_skb(skb); 604 605 ftmac100_txdes_reset(txdes); 606 607 ftmac100_tx_clean_pointer_advance(priv); 608 609 spin_lock(&priv->tx_lock); 610 priv->tx_pending--; 611 spin_unlock(&priv->tx_lock); 612 netif_wake_queue(netdev); 613 614 return true; 615 } 616 617 static void ftmac100_tx_complete(struct ftmac100 *priv) 618 { 619 while (ftmac100_tx_complete_packet(priv)) 620 ; 621 } 622 623 static netdev_tx_t ftmac100_xmit(struct ftmac100 *priv, struct sk_buff *skb, 624 dma_addr_t map) 625 { 626 struct net_device *netdev = priv->netdev; 627 struct ftmac100_txdes *txdes; 628 unsigned int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; 629 630 txdes = ftmac100_current_txdes(priv); 631 ftmac100_tx_pointer_advance(priv); 632 633 /* setup TX descriptor */ 634 ftmac100_txdes_set_skb(txdes, skb); 635 ftmac100_txdes_set_dma_addr(txdes, map); 636 637 ftmac100_txdes_set_first_segment(txdes); 638 ftmac100_txdes_set_last_segment(txdes); 639 ftmac100_txdes_set_txint(txdes); 640 ftmac100_txdes_set_buffer_size(txdes, len); 641 642 spin_lock(&priv->tx_lock); 643 priv->tx_pending++; 644 if (priv->tx_pending == TX_QUEUE_ENTRIES) 645 netif_stop_queue(netdev); 646 647 /* start transmit */ 648 ftmac100_txdes_set_dma_own(txdes); 649 spin_unlock(&priv->tx_lock); 650 651 ftmac100_txdma_start_polling(priv); 652 return NETDEV_TX_OK; 653 } 654 655 /****************************************************************************** 656 * internal functions (buffer) 657 *****************************************************************************/ 658 static int ftmac100_alloc_rx_page(struct ftmac100 *priv, 659 struct ftmac100_rxdes *rxdes, gfp_t gfp) 660 { 661 struct net_device *netdev = priv->netdev; 662 struct page *page; 663 dma_addr_t map; 664 665 page = alloc_page(gfp); 666 if (!page) { 667 if (net_ratelimit()) 668 netdev_err(netdev, "failed to allocate rx page\n"); 669 return -ENOMEM; 670 } 671 672 map = dma_map_page(priv->dev, page, 0, RX_BUF_SIZE, DMA_FROM_DEVICE); 673 if (unlikely(dma_mapping_error(priv->dev, map))) { 674 if (net_ratelimit()) 675 netdev_err(netdev, "failed to map rx page\n"); 676 __free_page(page); 677 return -ENOMEM; 678 } 679 680 ftmac100_rxdes_set_page(rxdes, page); 681 ftmac100_rxdes_set_dma_addr(rxdes, map); 682 ftmac100_rxdes_set_buffer_size(rxdes, RX_BUF_SIZE); 683 ftmac100_rxdes_set_dma_own(rxdes); 684 return 0; 685 } 686 687 static void ftmac100_free_buffers(struct ftmac100 *priv) 688 { 689 int i; 690 691 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 692 struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; 693 struct page *page = ftmac100_rxdes_get_page(rxdes); 694 dma_addr_t map = ftmac100_rxdes_get_dma_addr(rxdes); 695 696 if (!page) 697 continue; 698 699 dma_unmap_page(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE); 700 __free_page(page); 701 } 702 703 for (i = 0; i < TX_QUEUE_ENTRIES; i++) { 704 struct ftmac100_txdes *txdes = &priv->descs->txdes[i]; 705 struct sk_buff *skb = ftmac100_txdes_get_skb(txdes); 706 dma_addr_t map = ftmac100_txdes_get_dma_addr(txdes); 707 708 if (!skb) 709 continue; 710 711 dma_unmap_single(priv->dev, map, skb_headlen(skb), DMA_TO_DEVICE); 712 dev_kfree_skb(skb); 713 } 714 715 dma_free_coherent(priv->dev, sizeof(struct ftmac100_descs), 716 priv->descs, priv->descs_dma_addr); 717 } 718 719 static int ftmac100_alloc_buffers(struct ftmac100 *priv) 720 { 721 int i; 722 723 priv->descs = dma_alloc_coherent(priv->dev, 724 sizeof(struct ftmac100_descs), 725 &priv->descs_dma_addr, GFP_KERNEL); 726 if (!priv->descs) 727 return -ENOMEM; 728 729 /* initialize RX ring */ 730 ftmac100_rxdes_set_end_of_ring(&priv->descs->rxdes[RX_QUEUE_ENTRIES - 1]); 731 732 for (i = 0; i < RX_QUEUE_ENTRIES; i++) { 733 struct ftmac100_rxdes *rxdes = &priv->descs->rxdes[i]; 734 735 if (ftmac100_alloc_rx_page(priv, rxdes, GFP_KERNEL)) 736 goto err; 737 } 738 739 /* initialize TX ring */ 740 ftmac100_txdes_set_end_of_ring(&priv->descs->txdes[TX_QUEUE_ENTRIES - 1]); 741 return 0; 742 743 err: 744 ftmac100_free_buffers(priv); 745 return -ENOMEM; 746 } 747 748 /****************************************************************************** 749 * struct mii_if_info functions 750 *****************************************************************************/ 751 static int ftmac100_mdio_read(struct net_device *netdev, int phy_id, int reg) 752 { 753 struct ftmac100 *priv = netdev_priv(netdev); 754 unsigned int phycr; 755 int i; 756 757 phycr = FTMAC100_PHYCR_PHYAD(phy_id) | 758 FTMAC100_PHYCR_REGAD(reg) | 759 FTMAC100_PHYCR_MIIRD; 760 761 iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR); 762 763 for (i = 0; i < 10; i++) { 764 phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR); 765 766 if ((phycr & FTMAC100_PHYCR_MIIRD) == 0) 767 return phycr & FTMAC100_PHYCR_MIIRDATA; 768 769 udelay(100); 770 } 771 772 netdev_err(netdev, "mdio read timed out\n"); 773 return 0; 774 } 775 776 static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg, 777 int data) 778 { 779 struct ftmac100 *priv = netdev_priv(netdev); 780 unsigned int phycr; 781 int i; 782 783 phycr = FTMAC100_PHYCR_PHYAD(phy_id) | 784 FTMAC100_PHYCR_REGAD(reg) | 785 FTMAC100_PHYCR_MIIWR; 786 787 data = FTMAC100_PHYWDATA_MIIWDATA(data); 788 789 iowrite32(data, priv->base + FTMAC100_OFFSET_PHYWDATA); 790 iowrite32(phycr, priv->base + FTMAC100_OFFSET_PHYCR); 791 792 for (i = 0; i < 10; i++) { 793 phycr = ioread32(priv->base + FTMAC100_OFFSET_PHYCR); 794 795 if ((phycr & FTMAC100_PHYCR_MIIWR) == 0) 796 return; 797 798 udelay(100); 799 } 800 801 netdev_err(netdev, "mdio write timed out\n"); 802 } 803 804 /****************************************************************************** 805 * struct ethtool_ops functions 806 *****************************************************************************/ 807 static void ftmac100_get_drvinfo(struct net_device *netdev, 808 struct ethtool_drvinfo *info) 809 { 810 strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 811 strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info)); 812 } 813 814 static int ftmac100_get_link_ksettings(struct net_device *netdev, 815 struct ethtool_link_ksettings *cmd) 816 { 817 struct ftmac100 *priv = netdev_priv(netdev); 818 819 mii_ethtool_get_link_ksettings(&priv->mii, cmd); 820 821 return 0; 822 } 823 824 static int ftmac100_set_link_ksettings(struct net_device *netdev, 825 const struct ethtool_link_ksettings *cmd) 826 { 827 struct ftmac100 *priv = netdev_priv(netdev); 828 return mii_ethtool_set_link_ksettings(&priv->mii, cmd); 829 } 830 831 static int ftmac100_nway_reset(struct net_device *netdev) 832 { 833 struct ftmac100 *priv = netdev_priv(netdev); 834 return mii_nway_restart(&priv->mii); 835 } 836 837 static u32 ftmac100_get_link(struct net_device *netdev) 838 { 839 struct ftmac100 *priv = netdev_priv(netdev); 840 return mii_link_ok(&priv->mii); 841 } 842 843 static const struct ethtool_ops ftmac100_ethtool_ops = { 844 .get_drvinfo = ftmac100_get_drvinfo, 845 .nway_reset = ftmac100_nway_reset, 846 .get_link = ftmac100_get_link, 847 .get_link_ksettings = ftmac100_get_link_ksettings, 848 .set_link_ksettings = ftmac100_set_link_ksettings, 849 }; 850 851 /****************************************************************************** 852 * interrupt handler 853 *****************************************************************************/ 854 static irqreturn_t ftmac100_interrupt(int irq, void *dev_id) 855 { 856 struct net_device *netdev = dev_id; 857 struct ftmac100 *priv = netdev_priv(netdev); 858 859 /* Disable interrupts for polling */ 860 ftmac100_disable_all_int(priv); 861 if (likely(netif_running(netdev))) 862 napi_schedule(&priv->napi); 863 864 return IRQ_HANDLED; 865 } 866 867 /****************************************************************************** 868 * struct napi_struct functions 869 *****************************************************************************/ 870 static int ftmac100_poll(struct napi_struct *napi, int budget) 871 { 872 struct ftmac100 *priv = container_of(napi, struct ftmac100, napi); 873 struct net_device *netdev = priv->netdev; 874 unsigned int status; 875 bool completed = true; 876 int rx = 0; 877 878 status = ioread32(priv->base + FTMAC100_OFFSET_ISR); 879 880 if (status & (FTMAC100_INT_RPKT_FINISH | FTMAC100_INT_NORXBUF)) { 881 /* 882 * FTMAC100_INT_RPKT_FINISH: 883 * RX DMA has received packets into RX buffer successfully 884 * 885 * FTMAC100_INT_NORXBUF: 886 * RX buffer unavailable 887 */ 888 bool retry; 889 890 do { 891 retry = ftmac100_rx_packet(priv, &rx); 892 } while (retry && rx < budget); 893 894 if (retry && rx == budget) 895 completed = false; 896 } 897 898 if (status & (FTMAC100_INT_XPKT_OK | FTMAC100_INT_XPKT_LOST)) { 899 /* 900 * FTMAC100_INT_XPKT_OK: 901 * packet transmitted to ethernet successfully 902 * 903 * FTMAC100_INT_XPKT_LOST: 904 * packet transmitted to ethernet lost due to late 905 * collision or excessive collision 906 */ 907 ftmac100_tx_complete(priv); 908 } 909 910 if (status & (FTMAC100_INT_NORXBUF | FTMAC100_INT_RPKT_LOST | 911 FTMAC100_INT_AHB_ERR | FTMAC100_INT_PHYSTS_CHG)) { 912 if (net_ratelimit()) 913 netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status, 914 status & FTMAC100_INT_NORXBUF ? "NORXBUF " : "", 915 status & FTMAC100_INT_RPKT_LOST ? "RPKT_LOST " : "", 916 status & FTMAC100_INT_AHB_ERR ? "AHB_ERR " : "", 917 status & FTMAC100_INT_PHYSTS_CHG ? "PHYSTS_CHG" : ""); 918 919 if (status & FTMAC100_INT_NORXBUF) { 920 /* RX buffer unavailable */ 921 netdev->stats.rx_over_errors++; 922 } 923 924 if (status & FTMAC100_INT_RPKT_LOST) { 925 /* received packet lost due to RX FIFO full */ 926 netdev->stats.rx_fifo_errors++; 927 } 928 929 if (status & FTMAC100_INT_PHYSTS_CHG) { 930 /* PHY link status change */ 931 mii_check_link(&priv->mii); 932 } 933 } 934 935 if (completed) { 936 /* stop polling */ 937 napi_complete(napi); 938 ftmac100_enable_all_int(priv); 939 } 940 941 return rx; 942 } 943 944 /****************************************************************************** 945 * struct net_device_ops functions 946 *****************************************************************************/ 947 static int ftmac100_open(struct net_device *netdev) 948 { 949 struct ftmac100 *priv = netdev_priv(netdev); 950 int err; 951 952 err = ftmac100_alloc_buffers(priv); 953 if (err) { 954 netdev_err(netdev, "failed to allocate buffers\n"); 955 goto err_alloc; 956 } 957 958 err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev); 959 if (err) { 960 netdev_err(netdev, "failed to request irq %d\n", priv->irq); 961 goto err_irq; 962 } 963 964 priv->rx_pointer = 0; 965 priv->tx_clean_pointer = 0; 966 priv->tx_pointer = 0; 967 priv->tx_pending = 0; 968 969 err = ftmac100_start_hw(priv); 970 if (err) 971 goto err_hw; 972 973 napi_enable(&priv->napi); 974 netif_start_queue(netdev); 975 976 ftmac100_enable_all_int(priv); 977 978 return 0; 979 980 err_hw: 981 free_irq(priv->irq, netdev); 982 err_irq: 983 ftmac100_free_buffers(priv); 984 err_alloc: 985 return err; 986 } 987 988 static int ftmac100_stop(struct net_device *netdev) 989 { 990 struct ftmac100 *priv = netdev_priv(netdev); 991 992 ftmac100_disable_all_int(priv); 993 netif_stop_queue(netdev); 994 napi_disable(&priv->napi); 995 ftmac100_stop_hw(priv); 996 free_irq(priv->irq, netdev); 997 ftmac100_free_buffers(priv); 998 999 return 0; 1000 } 1001 1002 static netdev_tx_t 1003 ftmac100_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) 1004 { 1005 struct ftmac100 *priv = netdev_priv(netdev); 1006 dma_addr_t map; 1007 1008 if (unlikely(skb->len > MAX_PKT_SIZE)) { 1009 if (net_ratelimit()) 1010 netdev_dbg(netdev, "tx packet too big\n"); 1011 1012 netdev->stats.tx_dropped++; 1013 dev_kfree_skb(skb); 1014 return NETDEV_TX_OK; 1015 } 1016 1017 map = dma_map_single(priv->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); 1018 if (unlikely(dma_mapping_error(priv->dev, map))) { 1019 /* drop packet */ 1020 if (net_ratelimit()) 1021 netdev_err(netdev, "map socket buffer failed\n"); 1022 1023 netdev->stats.tx_dropped++; 1024 dev_kfree_skb(skb); 1025 return NETDEV_TX_OK; 1026 } 1027 1028 return ftmac100_xmit(priv, skb, map); 1029 } 1030 1031 /* optional */ 1032 static int ftmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 1033 { 1034 struct ftmac100 *priv = netdev_priv(netdev); 1035 struct mii_ioctl_data *data = if_mii(ifr); 1036 1037 return generic_mii_ioctl(&priv->mii, data, cmd, NULL); 1038 } 1039 1040 static const struct net_device_ops ftmac100_netdev_ops = { 1041 .ndo_open = ftmac100_open, 1042 .ndo_stop = ftmac100_stop, 1043 .ndo_start_xmit = ftmac100_hard_start_xmit, 1044 .ndo_set_mac_address = eth_mac_addr, 1045 .ndo_validate_addr = eth_validate_addr, 1046 .ndo_do_ioctl = ftmac100_do_ioctl, 1047 }; 1048 1049 /****************************************************************************** 1050 * struct platform_driver functions 1051 *****************************************************************************/ 1052 static int ftmac100_probe(struct platform_device *pdev) 1053 { 1054 struct resource *res; 1055 int irq; 1056 struct net_device *netdev; 1057 struct ftmac100 *priv; 1058 int err; 1059 1060 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1061 if (!res) 1062 return -ENXIO; 1063 1064 irq = platform_get_irq(pdev, 0); 1065 if (irq < 0) 1066 return irq; 1067 1068 /* setup net_device */ 1069 netdev = alloc_etherdev(sizeof(*priv)); 1070 if (!netdev) { 1071 err = -ENOMEM; 1072 goto err_alloc_etherdev; 1073 } 1074 1075 SET_NETDEV_DEV(netdev, &pdev->dev); 1076 netdev->ethtool_ops = &ftmac100_ethtool_ops; 1077 netdev->netdev_ops = &ftmac100_netdev_ops; 1078 1079 platform_set_drvdata(pdev, netdev); 1080 1081 /* setup private data */ 1082 priv = netdev_priv(netdev); 1083 priv->netdev = netdev; 1084 priv->dev = &pdev->dev; 1085 1086 spin_lock_init(&priv->tx_lock); 1087 1088 /* initialize NAPI */ 1089 netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64); 1090 1091 /* map io memory */ 1092 priv->res = request_mem_region(res->start, resource_size(res), 1093 dev_name(&pdev->dev)); 1094 if (!priv->res) { 1095 dev_err(&pdev->dev, "Could not reserve memory region\n"); 1096 err = -ENOMEM; 1097 goto err_req_mem; 1098 } 1099 1100 priv->base = ioremap(res->start, resource_size(res)); 1101 if (!priv->base) { 1102 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n"); 1103 err = -EIO; 1104 goto err_ioremap; 1105 } 1106 1107 priv->irq = irq; 1108 1109 /* initialize struct mii_if_info */ 1110 priv->mii.phy_id = 0; 1111 priv->mii.phy_id_mask = 0x1f; 1112 priv->mii.reg_num_mask = 0x1f; 1113 priv->mii.dev = netdev; 1114 priv->mii.mdio_read = ftmac100_mdio_read; 1115 priv->mii.mdio_write = ftmac100_mdio_write; 1116 1117 /* register network device */ 1118 err = register_netdev(netdev); 1119 if (err) { 1120 dev_err(&pdev->dev, "Failed to register netdev\n"); 1121 goto err_register_netdev; 1122 } 1123 1124 netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base); 1125 1126 if (!is_valid_ether_addr(netdev->dev_addr)) { 1127 eth_hw_addr_random(netdev); 1128 netdev_info(netdev, "generated random MAC address %pM\n", 1129 netdev->dev_addr); 1130 } 1131 1132 return 0; 1133 1134 err_register_netdev: 1135 iounmap(priv->base); 1136 err_ioremap: 1137 release_resource(priv->res); 1138 err_req_mem: 1139 netif_napi_del(&priv->napi); 1140 free_netdev(netdev); 1141 err_alloc_etherdev: 1142 return err; 1143 } 1144 1145 static int ftmac100_remove(struct platform_device *pdev) 1146 { 1147 struct net_device *netdev; 1148 struct ftmac100 *priv; 1149 1150 netdev = platform_get_drvdata(pdev); 1151 priv = netdev_priv(netdev); 1152 1153 unregister_netdev(netdev); 1154 1155 iounmap(priv->base); 1156 release_resource(priv->res); 1157 1158 netif_napi_del(&priv->napi); 1159 free_netdev(netdev); 1160 return 0; 1161 } 1162 1163 static const struct of_device_id ftmac100_of_ids[] = { 1164 { .compatible = "andestech,atmac100" }, 1165 { } 1166 }; 1167 1168 static struct platform_driver ftmac100_driver = { 1169 .probe = ftmac100_probe, 1170 .remove = ftmac100_remove, 1171 .driver = { 1172 .name = DRV_NAME, 1173 .of_match_table = ftmac100_of_ids 1174 }, 1175 }; 1176 1177 /****************************************************************************** 1178 * initialization / finalization 1179 *****************************************************************************/ 1180 static int __init ftmac100_init(void) 1181 { 1182 return platform_driver_register(&ftmac100_driver); 1183 } 1184 1185 static void __exit ftmac100_exit(void) 1186 { 1187 platform_driver_unregister(&ftmac100_driver); 1188 } 1189 1190 module_init(ftmac100_init); 1191 module_exit(ftmac100_exit); 1192 1193 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>"); 1194 MODULE_DESCRIPTION("FTMAC100 driver"); 1195 MODULE_LICENSE("GPL"); 1196 MODULE_DEVICE_TABLE(of, ftmac100_of_ids); 1197