1 /* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * Right now, I am very wasteful with the buffers. I allocate memory 6 * pages and then divide them into 2K frame buffers. This way I know I 7 * have buffers large enough to hold one frame within one buffer descriptor. 8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 9 * will be much more memory efficient and will easily handle lots of 10 * small packets. 11 * 12 * Much better multiple PHY support by Magnus Damm. 13 * Copyright (c) 2000 Ericsson Radio Systems AB. 14 * 15 * Support for FEC controller of ColdFire processors. 16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 17 * 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 19 * Copyright (c) 2004-2006 Macq Electronique SA. 20 * 21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 22 */ 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/string.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/ptrace.h> 29 #include <linux/errno.h> 30 #include <linux/ioport.h> 31 #include <linux/slab.h> 32 #include <linux/interrupt.h> 33 #include <linux/delay.h> 34 #include <linux/netdevice.h> 35 #include <linux/etherdevice.h> 36 #include <linux/skbuff.h> 37 #include <linux/in.h> 38 #include <linux/ip.h> 39 #include <net/ip.h> 40 #include <net/tso.h> 41 #include <linux/tcp.h> 42 #include <linux/udp.h> 43 #include <linux/icmp.h> 44 #include <linux/spinlock.h> 45 #include <linux/workqueue.h> 46 #include <linux/bitops.h> 47 #include <linux/io.h> 48 #include <linux/irq.h> 49 #include <linux/clk.h> 50 #include <linux/platform_device.h> 51 #include <linux/mdio.h> 52 #include <linux/phy.h> 53 #include <linux/fec.h> 54 #include <linux/of.h> 55 #include <linux/of_device.h> 56 #include <linux/of_gpio.h> 57 #include <linux/of_mdio.h> 58 #include <linux/of_net.h> 59 #include <linux/regulator/consumer.h> 60 #include <linux/if_vlan.h> 61 #include <linux/pinctrl/consumer.h> 62 #include <linux/prefetch.h> 63 64 #include <asm/cacheflush.h> 65 66 #include "fec.h" 67 68 static void set_multicast_list(struct net_device *ndev); 69 static void fec_enet_itr_coal_init(struct net_device *ndev); 70 71 #define DRIVER_NAME "fec" 72 73 #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) 74 75 /* Pause frame feild and FIFO threshold */ 76 #define FEC_ENET_FCE (1 << 5) 77 #define FEC_ENET_RSEM_V 0x84 78 #define FEC_ENET_RSFL_V 16 79 #define FEC_ENET_RAEM_V 0x8 80 #define FEC_ENET_RAFL_V 0x8 81 #define FEC_ENET_OPD_V 0xFFF0 82 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 83 84 static struct platform_device_id fec_devtype[] = { 85 { 86 /* keep it for coldfire */ 87 .name = DRIVER_NAME, 88 .driver_data = 0, 89 }, { 90 .name = "imx25-fec", 91 .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC, 92 }, { 93 .name = "imx27-fec", 94 .driver_data = FEC_QUIRK_HAS_RACC, 95 }, { 96 .name = "imx28-fec", 97 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 98 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC, 99 }, { 100 .name = "imx6q-fec", 101 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 102 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 103 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | 104 FEC_QUIRK_HAS_RACC, 105 }, { 106 .name = "mvf600-fec", 107 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, 108 }, { 109 .name = "imx6sx-fec", 110 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 111 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 112 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 113 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 114 FEC_QUIRK_HAS_RACC, 115 }, { 116 /* sentinel */ 117 } 118 }; 119 MODULE_DEVICE_TABLE(platform, fec_devtype); 120 121 enum imx_fec_type { 122 IMX25_FEC = 1, /* runs on i.mx25/50/53 */ 123 IMX27_FEC, /* runs on i.mx27/35/51 */ 124 IMX28_FEC, 125 IMX6Q_FEC, 126 MVF600_FEC, 127 IMX6SX_FEC, 128 }; 129 130 static const struct of_device_id fec_dt_ids[] = { 131 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, 132 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, 133 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, 134 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, 135 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, 136 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, 137 { /* sentinel */ } 138 }; 139 MODULE_DEVICE_TABLE(of, fec_dt_ids); 140 141 static unsigned char macaddr[ETH_ALEN]; 142 module_param_array(macaddr, byte, NULL, 0); 143 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 144 145 #if defined(CONFIG_M5272) 146 /* 147 * Some hardware gets it MAC address out of local flash memory. 148 * if this is non-zero then assume it is the address to get MAC from. 149 */ 150 #if defined(CONFIG_NETtel) 151 #define FEC_FLASHMAC 0xf0006006 152 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 153 #define FEC_FLASHMAC 0xf0006000 154 #elif defined(CONFIG_CANCam) 155 #define FEC_FLASHMAC 0xf0020000 156 #elif defined (CONFIG_M5272C3) 157 #define FEC_FLASHMAC (0xffe04000 + 4) 158 #elif defined(CONFIG_MOD5272) 159 #define FEC_FLASHMAC 0xffc0406b 160 #else 161 #define FEC_FLASHMAC 0 162 #endif 163 #endif /* CONFIG_M5272 */ 164 165 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. 166 */ 167 #define PKT_MAXBUF_SIZE 1522 168 #define PKT_MINBUF_SIZE 64 169 #define PKT_MAXBLR_SIZE 1536 170 171 /* FEC receive acceleration */ 172 #define FEC_RACC_IPDIS (1 << 1) 173 #define FEC_RACC_PRODIS (1 << 2) 174 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 175 176 /* 177 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 178 * size bits. Other FEC hardware does not, so we need to take that into 179 * account when setting it. 180 */ 181 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 182 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) 183 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 184 #else 185 #define OPT_FRAME_SIZE 0 186 #endif 187 188 /* FEC MII MMFR bits definition */ 189 #define FEC_MMFR_ST (1 << 30) 190 #define FEC_MMFR_OP_READ (2 << 28) 191 #define FEC_MMFR_OP_WRITE (1 << 28) 192 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 193 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 194 #define FEC_MMFR_TA (2 << 16) 195 #define FEC_MMFR_DATA(v) (v & 0xffff) 196 /* FEC ECR bits definition */ 197 #define FEC_ECR_MAGICEN (1 << 2) 198 #define FEC_ECR_SLEEP (1 << 3) 199 200 #define FEC_MII_TIMEOUT 30000 /* us */ 201 202 /* Transmitter timeout */ 203 #define TX_TIMEOUT (2 * HZ) 204 205 #define FEC_PAUSE_FLAG_AUTONEG 0x1 206 #define FEC_PAUSE_FLAG_ENABLE 0x2 207 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) 208 #define FEC_WOL_FLAG_ENABLE (0x1 << 1) 209 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) 210 211 #define COPYBREAK_DEFAULT 256 212 213 #define TSO_HEADER_SIZE 128 214 /* Max number of allowed TCP segments for software TSO */ 215 #define FEC_MAX_TSO_SEGS 100 216 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 217 218 #define IS_TSO_HEADER(txq, addr) \ 219 ((addr >= txq->tso_hdrs_dma) && \ 220 (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE)) 221 222 static int mii_cnt; 223 224 static inline 225 struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, 226 struct fec_enet_private *fep, 227 int queue_id) 228 { 229 struct bufdesc *new_bd = bdp + 1; 230 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; 231 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; 232 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; 233 struct bufdesc_ex *ex_base; 234 struct bufdesc *base; 235 int ring_size; 236 237 if (bdp >= txq->tx_bd_base) { 238 base = txq->tx_bd_base; 239 ring_size = txq->tx_ring_size; 240 ex_base = (struct bufdesc_ex *)txq->tx_bd_base; 241 } else { 242 base = rxq->rx_bd_base; 243 ring_size = rxq->rx_ring_size; 244 ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; 245 } 246 247 if (fep->bufdesc_ex) 248 return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? 249 ex_base : ex_new_bd); 250 else 251 return (new_bd >= (base + ring_size)) ? 252 base : new_bd; 253 } 254 255 static inline 256 struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, 257 struct fec_enet_private *fep, 258 int queue_id) 259 { 260 struct bufdesc *new_bd = bdp - 1; 261 struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; 262 struct fec_enet_priv_tx_q *txq = fep->tx_queue[queue_id]; 263 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[queue_id]; 264 struct bufdesc_ex *ex_base; 265 struct bufdesc *base; 266 int ring_size; 267 268 if (bdp >= txq->tx_bd_base) { 269 base = txq->tx_bd_base; 270 ring_size = txq->tx_ring_size; 271 ex_base = (struct bufdesc_ex *)txq->tx_bd_base; 272 } else { 273 base = rxq->rx_bd_base; 274 ring_size = rxq->rx_ring_size; 275 ex_base = (struct bufdesc_ex *)rxq->rx_bd_base; 276 } 277 278 if (fep->bufdesc_ex) 279 return (struct bufdesc *)((ex_new_bd < ex_base) ? 280 (ex_new_bd + ring_size) : ex_new_bd); 281 else 282 return (new_bd < base) ? (new_bd + ring_size) : new_bd; 283 } 284 285 static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp, 286 struct fec_enet_private *fep) 287 { 288 return ((const char *)bdp - (const char *)base) / fep->bufdesc_size; 289 } 290 291 static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep, 292 struct fec_enet_priv_tx_q *txq) 293 { 294 int entries; 295 296 entries = ((const char *)txq->dirty_tx - 297 (const char *)txq->cur_tx) / fep->bufdesc_size - 1; 298 299 return entries > 0 ? entries : entries + txq->tx_ring_size; 300 } 301 302 static void swap_buffer(void *bufaddr, int len) 303 { 304 int i; 305 unsigned int *buf = bufaddr; 306 307 for (i = 0; i < len; i += 4, buf++) 308 swab32s(buf); 309 } 310 311 static void swap_buffer2(void *dst_buf, void *src_buf, int len) 312 { 313 int i; 314 unsigned int *src = src_buf; 315 unsigned int *dst = dst_buf; 316 317 for (i = 0; i < len; i += 4, src++, dst++) 318 *dst = swab32p(src); 319 } 320 321 static void fec_dump(struct net_device *ndev) 322 { 323 struct fec_enet_private *fep = netdev_priv(ndev); 324 struct bufdesc *bdp; 325 struct fec_enet_priv_tx_q *txq; 326 int index = 0; 327 328 netdev_info(ndev, "TX ring dump\n"); 329 pr_info("Nr SC addr len SKB\n"); 330 331 txq = fep->tx_queue[0]; 332 bdp = txq->tx_bd_base; 333 334 do { 335 pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n", 336 index, 337 bdp == txq->cur_tx ? 'S' : ' ', 338 bdp == txq->dirty_tx ? 'H' : ' ', 339 bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen, 340 txq->tx_skbuff[index]); 341 bdp = fec_enet_get_nextdesc(bdp, fep, 0); 342 index++; 343 } while (bdp != txq->tx_bd_base); 344 } 345 346 static inline bool is_ipv4_pkt(struct sk_buff *skb) 347 { 348 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 349 } 350 351 static int 352 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 353 { 354 /* Only run for packets requiring a checksum. */ 355 if (skb->ip_summed != CHECKSUM_PARTIAL) 356 return 0; 357 358 if (unlikely(skb_cow_head(skb, 0))) 359 return -1; 360 361 if (is_ipv4_pkt(skb)) 362 ip_hdr(skb)->check = 0; 363 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 364 365 return 0; 366 } 367 368 static struct bufdesc * 369 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, 370 struct sk_buff *skb, 371 struct net_device *ndev) 372 { 373 struct fec_enet_private *fep = netdev_priv(ndev); 374 struct bufdesc *bdp = txq->cur_tx; 375 struct bufdesc_ex *ebdp; 376 int nr_frags = skb_shinfo(skb)->nr_frags; 377 unsigned short queue = skb_get_queue_mapping(skb); 378 int frag, frag_len; 379 unsigned short status; 380 unsigned int estatus = 0; 381 skb_frag_t *this_frag; 382 unsigned int index; 383 void *bufaddr; 384 dma_addr_t addr; 385 int i; 386 387 for (frag = 0; frag < nr_frags; frag++) { 388 this_frag = &skb_shinfo(skb)->frags[frag]; 389 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 390 ebdp = (struct bufdesc_ex *)bdp; 391 392 status = bdp->cbd_sc; 393 status &= ~BD_ENET_TX_STATS; 394 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 395 frag_len = skb_shinfo(skb)->frags[frag].size; 396 397 /* Handle the last BD specially */ 398 if (frag == nr_frags - 1) { 399 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 400 if (fep->bufdesc_ex) { 401 estatus |= BD_ENET_TX_INT; 402 if (unlikely(skb_shinfo(skb)->tx_flags & 403 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 404 estatus |= BD_ENET_TX_TS; 405 } 406 } 407 408 if (fep->bufdesc_ex) { 409 if (fep->quirks & FEC_QUIRK_HAS_AVB) 410 estatus |= FEC_TX_BD_FTYPE(queue); 411 if (skb->ip_summed == CHECKSUM_PARTIAL) 412 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 413 ebdp->cbd_bdu = 0; 414 ebdp->cbd_esc = estatus; 415 } 416 417 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; 418 419 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); 420 if (((unsigned long) bufaddr) & fep->tx_align || 421 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 422 memcpy(txq->tx_bounce[index], bufaddr, frag_len); 423 bufaddr = txq->tx_bounce[index]; 424 425 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 426 swap_buffer(bufaddr, frag_len); 427 } 428 429 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, 430 DMA_TO_DEVICE); 431 if (dma_mapping_error(&fep->pdev->dev, addr)) { 432 dev_kfree_skb_any(skb); 433 if (net_ratelimit()) 434 netdev_err(ndev, "Tx DMA memory map failed\n"); 435 goto dma_mapping_error; 436 } 437 438 bdp->cbd_bufaddr = addr; 439 bdp->cbd_datlen = frag_len; 440 bdp->cbd_sc = status; 441 } 442 443 return bdp; 444 dma_mapping_error: 445 bdp = txq->cur_tx; 446 for (i = 0; i < frag; i++) { 447 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 448 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 449 bdp->cbd_datlen, DMA_TO_DEVICE); 450 } 451 return ERR_PTR(-ENOMEM); 452 } 453 454 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, 455 struct sk_buff *skb, struct net_device *ndev) 456 { 457 struct fec_enet_private *fep = netdev_priv(ndev); 458 int nr_frags = skb_shinfo(skb)->nr_frags; 459 struct bufdesc *bdp, *last_bdp; 460 void *bufaddr; 461 dma_addr_t addr; 462 unsigned short status; 463 unsigned short buflen; 464 unsigned short queue; 465 unsigned int estatus = 0; 466 unsigned int index; 467 int entries_free; 468 469 entries_free = fec_enet_get_free_txdesc_num(fep, txq); 470 if (entries_free < MAX_SKB_FRAGS + 1) { 471 dev_kfree_skb_any(skb); 472 if (net_ratelimit()) 473 netdev_err(ndev, "NOT enough BD for SG!\n"); 474 return NETDEV_TX_OK; 475 } 476 477 /* Protocol checksum off-load for TCP and UDP. */ 478 if (fec_enet_clear_csum(skb, ndev)) { 479 dev_kfree_skb_any(skb); 480 return NETDEV_TX_OK; 481 } 482 483 /* Fill in a Tx ring entry */ 484 bdp = txq->cur_tx; 485 last_bdp = bdp; 486 status = bdp->cbd_sc; 487 status &= ~BD_ENET_TX_STATS; 488 489 /* Set buffer length and buffer pointer */ 490 bufaddr = skb->data; 491 buflen = skb_headlen(skb); 492 493 queue = skb_get_queue_mapping(skb); 494 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); 495 if (((unsigned long) bufaddr) & fep->tx_align || 496 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 497 memcpy(txq->tx_bounce[index], skb->data, buflen); 498 bufaddr = txq->tx_bounce[index]; 499 500 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 501 swap_buffer(bufaddr, buflen); 502 } 503 504 /* Push the data cache so the CPM does not get stale memory data. */ 505 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); 506 if (dma_mapping_error(&fep->pdev->dev, addr)) { 507 dev_kfree_skb_any(skb); 508 if (net_ratelimit()) 509 netdev_err(ndev, "Tx DMA memory map failed\n"); 510 return NETDEV_TX_OK; 511 } 512 513 if (nr_frags) { 514 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); 515 if (IS_ERR(last_bdp)) 516 return NETDEV_TX_OK; 517 } else { 518 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 519 if (fep->bufdesc_ex) { 520 estatus = BD_ENET_TX_INT; 521 if (unlikely(skb_shinfo(skb)->tx_flags & 522 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 523 estatus |= BD_ENET_TX_TS; 524 } 525 } 526 527 if (fep->bufdesc_ex) { 528 529 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 530 531 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 532 fep->hwts_tx_en)) 533 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 534 535 if (fep->quirks & FEC_QUIRK_HAS_AVB) 536 estatus |= FEC_TX_BD_FTYPE(queue); 537 538 if (skb->ip_summed == CHECKSUM_PARTIAL) 539 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 540 541 ebdp->cbd_bdu = 0; 542 ebdp->cbd_esc = estatus; 543 } 544 545 index = fec_enet_get_bd_index(txq->tx_bd_base, last_bdp, fep); 546 /* Save skb pointer */ 547 txq->tx_skbuff[index] = skb; 548 549 bdp->cbd_datlen = buflen; 550 bdp->cbd_bufaddr = addr; 551 552 /* Send it on its way. Tell FEC it's ready, interrupt when done, 553 * it's the last BD of the frame, and to put the CRC on the end. 554 */ 555 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 556 bdp->cbd_sc = status; 557 558 /* If this was the last BD in the ring, start at the beginning again. */ 559 bdp = fec_enet_get_nextdesc(last_bdp, fep, queue); 560 561 skb_tx_timestamp(skb); 562 563 /* Make sure the update to bdp and tx_skbuff are performed before 564 * cur_tx. 565 */ 566 wmb(); 567 txq->cur_tx = bdp; 568 569 /* Trigger transmission start */ 570 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); 571 572 return 0; 573 } 574 575 static int 576 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, 577 struct net_device *ndev, 578 struct bufdesc *bdp, int index, char *data, 579 int size, bool last_tcp, bool is_last) 580 { 581 struct fec_enet_private *fep = netdev_priv(ndev); 582 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 583 unsigned short queue = skb_get_queue_mapping(skb); 584 unsigned short status; 585 unsigned int estatus = 0; 586 dma_addr_t addr; 587 588 status = bdp->cbd_sc; 589 status &= ~BD_ENET_TX_STATS; 590 591 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 592 593 if (((unsigned long) data) & fep->tx_align || 594 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 595 memcpy(txq->tx_bounce[index], data, size); 596 data = txq->tx_bounce[index]; 597 598 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 599 swap_buffer(data, size); 600 } 601 602 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); 603 if (dma_mapping_error(&fep->pdev->dev, addr)) { 604 dev_kfree_skb_any(skb); 605 if (net_ratelimit()) 606 netdev_err(ndev, "Tx DMA memory map failed\n"); 607 return NETDEV_TX_BUSY; 608 } 609 610 bdp->cbd_datlen = size; 611 bdp->cbd_bufaddr = addr; 612 613 if (fep->bufdesc_ex) { 614 if (fep->quirks & FEC_QUIRK_HAS_AVB) 615 estatus |= FEC_TX_BD_FTYPE(queue); 616 if (skb->ip_summed == CHECKSUM_PARTIAL) 617 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 618 ebdp->cbd_bdu = 0; 619 ebdp->cbd_esc = estatus; 620 } 621 622 /* Handle the last BD specially */ 623 if (last_tcp) 624 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); 625 if (is_last) { 626 status |= BD_ENET_TX_INTR; 627 if (fep->bufdesc_ex) 628 ebdp->cbd_esc |= BD_ENET_TX_INT; 629 } 630 631 bdp->cbd_sc = status; 632 633 return 0; 634 } 635 636 static int 637 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, 638 struct sk_buff *skb, struct net_device *ndev, 639 struct bufdesc *bdp, int index) 640 { 641 struct fec_enet_private *fep = netdev_priv(ndev); 642 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 643 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 644 unsigned short queue = skb_get_queue_mapping(skb); 645 void *bufaddr; 646 unsigned long dmabuf; 647 unsigned short status; 648 unsigned int estatus = 0; 649 650 status = bdp->cbd_sc; 651 status &= ~BD_ENET_TX_STATS; 652 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 653 654 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 655 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; 656 if (((unsigned long)bufaddr) & fep->tx_align || 657 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 658 memcpy(txq->tx_bounce[index], skb->data, hdr_len); 659 bufaddr = txq->tx_bounce[index]; 660 661 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 662 swap_buffer(bufaddr, hdr_len); 663 664 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, 665 hdr_len, DMA_TO_DEVICE); 666 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { 667 dev_kfree_skb_any(skb); 668 if (net_ratelimit()) 669 netdev_err(ndev, "Tx DMA memory map failed\n"); 670 return NETDEV_TX_BUSY; 671 } 672 } 673 674 bdp->cbd_bufaddr = dmabuf; 675 bdp->cbd_datlen = hdr_len; 676 677 if (fep->bufdesc_ex) { 678 if (fep->quirks & FEC_QUIRK_HAS_AVB) 679 estatus |= FEC_TX_BD_FTYPE(queue); 680 if (skb->ip_summed == CHECKSUM_PARTIAL) 681 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 682 ebdp->cbd_bdu = 0; 683 ebdp->cbd_esc = estatus; 684 } 685 686 bdp->cbd_sc = status; 687 688 return 0; 689 } 690 691 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, 692 struct sk_buff *skb, 693 struct net_device *ndev) 694 { 695 struct fec_enet_private *fep = netdev_priv(ndev); 696 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 697 int total_len, data_left; 698 struct bufdesc *bdp = txq->cur_tx; 699 unsigned short queue = skb_get_queue_mapping(skb); 700 struct tso_t tso; 701 unsigned int index = 0; 702 int ret; 703 704 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep, txq)) { 705 dev_kfree_skb_any(skb); 706 if (net_ratelimit()) 707 netdev_err(ndev, "NOT enough BD for TSO!\n"); 708 return NETDEV_TX_OK; 709 } 710 711 /* Protocol checksum off-load for TCP and UDP. */ 712 if (fec_enet_clear_csum(skb, ndev)) { 713 dev_kfree_skb_any(skb); 714 return NETDEV_TX_OK; 715 } 716 717 /* Initialize the TSO handler, and prepare the first payload */ 718 tso_start(skb, &tso); 719 720 total_len = skb->len - hdr_len; 721 while (total_len > 0) { 722 char *hdr; 723 724 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); 725 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 726 total_len -= data_left; 727 728 /* prepare packet headers: MAC + IP + TCP */ 729 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 730 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 731 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); 732 if (ret) 733 goto err_release; 734 735 while (data_left > 0) { 736 int size; 737 738 size = min_t(int, tso.size, data_left); 739 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 740 index = fec_enet_get_bd_index(txq->tx_bd_base, 741 bdp, fep); 742 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, 743 bdp, index, 744 tso.data, size, 745 size == data_left, 746 total_len == 0); 747 if (ret) 748 goto err_release; 749 750 data_left -= size; 751 tso_build_data(skb, &tso, size); 752 } 753 754 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 755 } 756 757 /* Save skb pointer */ 758 txq->tx_skbuff[index] = skb; 759 760 skb_tx_timestamp(skb); 761 txq->cur_tx = bdp; 762 763 /* Trigger transmission start */ 764 if (!(fep->quirks & FEC_QUIRK_ERR007885) || 765 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || 766 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || 767 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue)) || 768 !readl(fep->hwp + FEC_X_DES_ACTIVE(queue))) 769 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue)); 770 771 return 0; 772 773 err_release: 774 /* TODO: Release all used data descriptors for TSO */ 775 return ret; 776 } 777 778 static netdev_tx_t 779 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 780 { 781 struct fec_enet_private *fep = netdev_priv(ndev); 782 int entries_free; 783 unsigned short queue; 784 struct fec_enet_priv_tx_q *txq; 785 struct netdev_queue *nq; 786 int ret; 787 788 queue = skb_get_queue_mapping(skb); 789 txq = fep->tx_queue[queue]; 790 nq = netdev_get_tx_queue(ndev, queue); 791 792 if (skb_is_gso(skb)) 793 ret = fec_enet_txq_submit_tso(txq, skb, ndev); 794 else 795 ret = fec_enet_txq_submit_skb(txq, skb, ndev); 796 if (ret) 797 return ret; 798 799 entries_free = fec_enet_get_free_txdesc_num(fep, txq); 800 if (entries_free <= txq->tx_stop_threshold) 801 netif_tx_stop_queue(nq); 802 803 return NETDEV_TX_OK; 804 } 805 806 /* Init RX & TX buffer descriptors 807 */ 808 static void fec_enet_bd_init(struct net_device *dev) 809 { 810 struct fec_enet_private *fep = netdev_priv(dev); 811 struct fec_enet_priv_tx_q *txq; 812 struct fec_enet_priv_rx_q *rxq; 813 struct bufdesc *bdp; 814 unsigned int i; 815 unsigned int q; 816 817 for (q = 0; q < fep->num_rx_queues; q++) { 818 /* Initialize the receive buffer descriptors. */ 819 rxq = fep->rx_queue[q]; 820 bdp = rxq->rx_bd_base; 821 822 for (i = 0; i < rxq->rx_ring_size; i++) { 823 824 /* Initialize the BD for every fragment in the page. */ 825 if (bdp->cbd_bufaddr) 826 bdp->cbd_sc = BD_ENET_RX_EMPTY; 827 else 828 bdp->cbd_sc = 0; 829 bdp = fec_enet_get_nextdesc(bdp, fep, q); 830 } 831 832 /* Set the last buffer to wrap */ 833 bdp = fec_enet_get_prevdesc(bdp, fep, q); 834 bdp->cbd_sc |= BD_SC_WRAP; 835 836 rxq->cur_rx = rxq->rx_bd_base; 837 } 838 839 for (q = 0; q < fep->num_tx_queues; q++) { 840 /* ...and the same for transmit */ 841 txq = fep->tx_queue[q]; 842 bdp = txq->tx_bd_base; 843 txq->cur_tx = bdp; 844 845 for (i = 0; i < txq->tx_ring_size; i++) { 846 /* Initialize the BD for every fragment in the page. */ 847 bdp->cbd_sc = 0; 848 if (txq->tx_skbuff[i]) { 849 dev_kfree_skb_any(txq->tx_skbuff[i]); 850 txq->tx_skbuff[i] = NULL; 851 } 852 bdp->cbd_bufaddr = 0; 853 bdp = fec_enet_get_nextdesc(bdp, fep, q); 854 } 855 856 /* Set the last buffer to wrap */ 857 bdp = fec_enet_get_prevdesc(bdp, fep, q); 858 bdp->cbd_sc |= BD_SC_WRAP; 859 txq->dirty_tx = bdp; 860 } 861 } 862 863 static void fec_enet_active_rxring(struct net_device *ndev) 864 { 865 struct fec_enet_private *fep = netdev_priv(ndev); 866 int i; 867 868 for (i = 0; i < fep->num_rx_queues; i++) 869 writel(0, fep->hwp + FEC_R_DES_ACTIVE(i)); 870 } 871 872 static void fec_enet_enable_ring(struct net_device *ndev) 873 { 874 struct fec_enet_private *fep = netdev_priv(ndev); 875 struct fec_enet_priv_tx_q *txq; 876 struct fec_enet_priv_rx_q *rxq; 877 int i; 878 879 for (i = 0; i < fep->num_rx_queues; i++) { 880 rxq = fep->rx_queue[i]; 881 writel(rxq->bd_dma, fep->hwp + FEC_R_DES_START(i)); 882 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); 883 884 /* enable DMA1/2 */ 885 if (i) 886 writel(RCMR_MATCHEN | RCMR_CMP(i), 887 fep->hwp + FEC_RCMR(i)); 888 } 889 890 for (i = 0; i < fep->num_tx_queues; i++) { 891 txq = fep->tx_queue[i]; 892 writel(txq->bd_dma, fep->hwp + FEC_X_DES_START(i)); 893 894 /* enable DMA1/2 */ 895 if (i) 896 writel(DMA_CLASS_EN | IDLE_SLOPE(i), 897 fep->hwp + FEC_DMA_CFG(i)); 898 } 899 } 900 901 static void fec_enet_reset_skb(struct net_device *ndev) 902 { 903 struct fec_enet_private *fep = netdev_priv(ndev); 904 struct fec_enet_priv_tx_q *txq; 905 int i, j; 906 907 for (i = 0; i < fep->num_tx_queues; i++) { 908 txq = fep->tx_queue[i]; 909 910 for (j = 0; j < txq->tx_ring_size; j++) { 911 if (txq->tx_skbuff[j]) { 912 dev_kfree_skb_any(txq->tx_skbuff[j]); 913 txq->tx_skbuff[j] = NULL; 914 } 915 } 916 } 917 } 918 919 /* 920 * This function is called to start or restart the FEC during a link 921 * change, transmit timeout, or to reconfigure the FEC. The network 922 * packet processing for this device must be stopped before this call. 923 */ 924 static void 925 fec_restart(struct net_device *ndev) 926 { 927 struct fec_enet_private *fep = netdev_priv(ndev); 928 u32 val; 929 u32 temp_mac[2]; 930 u32 rcntl = OPT_FRAME_SIZE | 0x04; 931 u32 ecntl = 0x2; /* ETHEREN */ 932 933 /* Whack a reset. We should wait for this. 934 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 935 * instead of reset MAC itself. 936 */ 937 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 938 writel(0, fep->hwp + FEC_ECNTRL); 939 } else { 940 writel(1, fep->hwp + FEC_ECNTRL); 941 udelay(10); 942 } 943 944 /* 945 * enet-mac reset will reset mac address registers too, 946 * so need to reconfigure it. 947 */ 948 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 949 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); 950 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); 951 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); 952 } 953 954 /* Clear any outstanding interrupt. */ 955 writel(0xffffffff, fep->hwp + FEC_IEVENT); 956 957 fec_enet_bd_init(ndev); 958 959 fec_enet_enable_ring(ndev); 960 961 /* Reset tx SKB buffers. */ 962 fec_enet_reset_skb(ndev); 963 964 /* Enable MII mode */ 965 if (fep->full_duplex == DUPLEX_FULL) { 966 /* FD enable */ 967 writel(0x04, fep->hwp + FEC_X_CNTRL); 968 } else { 969 /* No Rcv on Xmit */ 970 rcntl |= 0x02; 971 writel(0x0, fep->hwp + FEC_X_CNTRL); 972 } 973 974 /* Set MII speed */ 975 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 976 977 #if !defined(CONFIG_M5272) 978 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 979 /* set RX checksum */ 980 val = readl(fep->hwp + FEC_RACC); 981 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 982 val |= FEC_RACC_OPTIONS; 983 else 984 val &= ~FEC_RACC_OPTIONS; 985 writel(val, fep->hwp + FEC_RACC); 986 } 987 #endif 988 989 /* 990 * The phy interface and speed need to get configured 991 * differently on enet-mac. 992 */ 993 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 994 /* Enable flow control and length check */ 995 rcntl |= 0x40000000 | 0x00000020; 996 997 /* RGMII, RMII or MII */ 998 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || 999 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 1000 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || 1001 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) 1002 rcntl |= (1 << 6); 1003 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1004 rcntl |= (1 << 8); 1005 else 1006 rcntl &= ~(1 << 8); 1007 1008 /* 1G, 100M or 10M */ 1009 if (fep->phy_dev) { 1010 if (fep->phy_dev->speed == SPEED_1000) 1011 ecntl |= (1 << 5); 1012 else if (fep->phy_dev->speed == SPEED_100) 1013 rcntl &= ~(1 << 9); 1014 else 1015 rcntl |= (1 << 9); 1016 } 1017 } else { 1018 #ifdef FEC_MIIGSK_ENR 1019 if (fep->quirks & FEC_QUIRK_USE_GASKET) { 1020 u32 cfgr; 1021 /* disable the gasket and wait */ 1022 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1023 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1024 udelay(1); 1025 1026 /* 1027 * configure the gasket: 1028 * RMII, 50 MHz, no loopback, no echo 1029 * MII, 25 MHz, no loopback, no echo 1030 */ 1031 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1032 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; 1033 if (fep->phy_dev && fep->phy_dev->speed == SPEED_10) 1034 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; 1035 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); 1036 1037 /* re-enable the gasket */ 1038 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1039 } 1040 #endif 1041 } 1042 1043 #if !defined(CONFIG_M5272) 1044 /* enable pause frame*/ 1045 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 1046 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 1047 fep->phy_dev && fep->phy_dev->pause)) { 1048 rcntl |= FEC_ENET_FCE; 1049 1050 /* set FIFO threshold parameter to reduce overrun */ 1051 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 1052 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 1053 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 1054 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); 1055 1056 /* OPD */ 1057 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); 1058 } else { 1059 rcntl &= ~FEC_ENET_FCE; 1060 } 1061 #endif /* !defined(CONFIG_M5272) */ 1062 1063 writel(rcntl, fep->hwp + FEC_R_CNTRL); 1064 1065 /* Setup multicast filter. */ 1066 set_multicast_list(ndev); 1067 #ifndef CONFIG_M5272 1068 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1069 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1070 #endif 1071 1072 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1073 /* enable ENET endian swap */ 1074 ecntl |= (1 << 8); 1075 /* enable ENET store and forward mode */ 1076 writel(1 << 8, fep->hwp + FEC_X_WMRK); 1077 } 1078 1079 if (fep->bufdesc_ex) 1080 ecntl |= (1 << 4); 1081 1082 #ifndef CONFIG_M5272 1083 /* Enable the MIB statistic event counters */ 1084 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); 1085 #endif 1086 1087 /* And last, enable the transmit and receive processing */ 1088 writel(ecntl, fep->hwp + FEC_ECNTRL); 1089 fec_enet_active_rxring(ndev); 1090 1091 if (fep->bufdesc_ex) 1092 fec_ptp_start_cyclecounter(ndev); 1093 1094 /* Enable interrupts we wish to service */ 1095 if (fep->link) 1096 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1097 else 1098 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); 1099 1100 /* Init the interrupt coalescing */ 1101 fec_enet_itr_coal_init(ndev); 1102 1103 } 1104 1105 static void 1106 fec_stop(struct net_device *ndev) 1107 { 1108 struct fec_enet_private *fep = netdev_priv(ndev); 1109 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1110 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); 1111 u32 val; 1112 1113 /* We cannot expect a graceful transmit stop without link !!! */ 1114 if (fep->link) { 1115 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1116 udelay(10); 1117 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1118 netdev_err(ndev, "Graceful transmit stop did not complete!\n"); 1119 } 1120 1121 /* Whack a reset. We should wait for this. 1122 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1123 * instead of reset MAC itself. 1124 */ 1125 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1126 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 1127 writel(0, fep->hwp + FEC_ECNTRL); 1128 } else { 1129 writel(1, fep->hwp + FEC_ECNTRL); 1130 udelay(10); 1131 } 1132 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1133 } else { 1134 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); 1135 val = readl(fep->hwp + FEC_ECNTRL); 1136 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 1137 writel(val, fep->hwp + FEC_ECNTRL); 1138 1139 if (pdata && pdata->sleep_mode_enable) 1140 pdata->sleep_mode_enable(true); 1141 } 1142 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1143 1144 /* We have to keep ENET enabled to have MII interrupt stay working */ 1145 if (fep->quirks & FEC_QUIRK_ENET_MAC && 1146 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1147 writel(2, fep->hwp + FEC_ECNTRL); 1148 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1149 } 1150 } 1151 1152 1153 static void 1154 fec_timeout(struct net_device *ndev) 1155 { 1156 struct fec_enet_private *fep = netdev_priv(ndev); 1157 1158 fec_dump(ndev); 1159 1160 ndev->stats.tx_errors++; 1161 1162 schedule_work(&fep->tx_timeout_work); 1163 } 1164 1165 static void fec_enet_timeout_work(struct work_struct *work) 1166 { 1167 struct fec_enet_private *fep = 1168 container_of(work, struct fec_enet_private, tx_timeout_work); 1169 struct net_device *ndev = fep->netdev; 1170 1171 rtnl_lock(); 1172 if (netif_device_present(ndev) || netif_running(ndev)) { 1173 napi_disable(&fep->napi); 1174 netif_tx_lock_bh(ndev); 1175 fec_restart(ndev); 1176 netif_wake_queue(ndev); 1177 netif_tx_unlock_bh(ndev); 1178 napi_enable(&fep->napi); 1179 } 1180 rtnl_unlock(); 1181 } 1182 1183 static void 1184 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, 1185 struct skb_shared_hwtstamps *hwtstamps) 1186 { 1187 unsigned long flags; 1188 u64 ns; 1189 1190 spin_lock_irqsave(&fep->tmreg_lock, flags); 1191 ns = timecounter_cyc2time(&fep->tc, ts); 1192 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 1193 1194 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1195 hwtstamps->hwtstamp = ns_to_ktime(ns); 1196 } 1197 1198 static void 1199 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1200 { 1201 struct fec_enet_private *fep; 1202 struct bufdesc *bdp; 1203 unsigned short status; 1204 struct sk_buff *skb; 1205 struct fec_enet_priv_tx_q *txq; 1206 struct netdev_queue *nq; 1207 int index = 0; 1208 int entries_free; 1209 1210 fep = netdev_priv(ndev); 1211 1212 queue_id = FEC_ENET_GET_QUQUE(queue_id); 1213 1214 txq = fep->tx_queue[queue_id]; 1215 /* get next bdp of dirty_tx */ 1216 nq = netdev_get_tx_queue(ndev, queue_id); 1217 bdp = txq->dirty_tx; 1218 1219 /* get next bdp of dirty_tx */ 1220 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); 1221 1222 while (bdp != READ_ONCE(txq->cur_tx)) { 1223 /* Order the load of cur_tx and cbd_sc */ 1224 rmb(); 1225 status = READ_ONCE(bdp->cbd_sc); 1226 if (status & BD_ENET_TX_READY) 1227 break; 1228 1229 index = fec_enet_get_bd_index(txq->tx_bd_base, bdp, fep); 1230 1231 skb = txq->tx_skbuff[index]; 1232 txq->tx_skbuff[index] = NULL; 1233 if (!IS_TSO_HEADER(txq, bdp->cbd_bufaddr)) 1234 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 1235 bdp->cbd_datlen, DMA_TO_DEVICE); 1236 bdp->cbd_bufaddr = 0; 1237 if (!skb) { 1238 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); 1239 continue; 1240 } 1241 1242 /* Check for errors. */ 1243 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1244 BD_ENET_TX_RL | BD_ENET_TX_UN | 1245 BD_ENET_TX_CSL)) { 1246 ndev->stats.tx_errors++; 1247 if (status & BD_ENET_TX_HB) /* No heartbeat */ 1248 ndev->stats.tx_heartbeat_errors++; 1249 if (status & BD_ENET_TX_LC) /* Late collision */ 1250 ndev->stats.tx_window_errors++; 1251 if (status & BD_ENET_TX_RL) /* Retrans limit */ 1252 ndev->stats.tx_aborted_errors++; 1253 if (status & BD_ENET_TX_UN) /* Underrun */ 1254 ndev->stats.tx_fifo_errors++; 1255 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 1256 ndev->stats.tx_carrier_errors++; 1257 } else { 1258 ndev->stats.tx_packets++; 1259 ndev->stats.tx_bytes += skb->len; 1260 } 1261 1262 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && 1263 fep->bufdesc_ex) { 1264 struct skb_shared_hwtstamps shhwtstamps; 1265 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1266 1267 fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps); 1268 skb_tstamp_tx(skb, &shhwtstamps); 1269 } 1270 1271 /* Deferred means some collisions occurred during transmit, 1272 * but we eventually sent the packet OK. 1273 */ 1274 if (status & BD_ENET_TX_DEF) 1275 ndev->stats.collisions++; 1276 1277 /* Free the sk buffer associated with this last transmit */ 1278 dev_kfree_skb_any(skb); 1279 1280 /* Make sure the update to bdp and tx_skbuff are performed 1281 * before dirty_tx 1282 */ 1283 wmb(); 1284 txq->dirty_tx = bdp; 1285 1286 /* Update pointer to next buffer descriptor to be transmitted */ 1287 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); 1288 1289 /* Since we have freed up a buffer, the ring is no longer full 1290 */ 1291 if (netif_queue_stopped(ndev)) { 1292 entries_free = fec_enet_get_free_txdesc_num(fep, txq); 1293 if (entries_free >= txq->tx_wake_threshold) 1294 netif_tx_wake_queue(nq); 1295 } 1296 } 1297 1298 /* ERR006538: Keep the transmitter going */ 1299 if (bdp != txq->cur_tx && 1300 readl(fep->hwp + FEC_X_DES_ACTIVE(queue_id)) == 0) 1301 writel(0, fep->hwp + FEC_X_DES_ACTIVE(queue_id)); 1302 } 1303 1304 static void 1305 fec_enet_tx(struct net_device *ndev) 1306 { 1307 struct fec_enet_private *fep = netdev_priv(ndev); 1308 u16 queue_id; 1309 /* First process class A queue, then Class B and Best Effort queue */ 1310 for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { 1311 clear_bit(queue_id, &fep->work_tx); 1312 fec_enet_tx_queue(ndev, queue_id); 1313 } 1314 return; 1315 } 1316 1317 static int 1318 fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) 1319 { 1320 struct fec_enet_private *fep = netdev_priv(ndev); 1321 int off; 1322 1323 off = ((unsigned long)skb->data) & fep->rx_align; 1324 if (off) 1325 skb_reserve(skb, fep->rx_align + 1 - off); 1326 1327 bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, 1328 FEC_ENET_RX_FRSIZE - fep->rx_align, 1329 DMA_FROM_DEVICE); 1330 if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { 1331 if (net_ratelimit()) 1332 netdev_err(ndev, "Rx DMA memory map failed\n"); 1333 return -ENOMEM; 1334 } 1335 1336 return 0; 1337 } 1338 1339 static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, 1340 struct bufdesc *bdp, u32 length, bool swap) 1341 { 1342 struct fec_enet_private *fep = netdev_priv(ndev); 1343 struct sk_buff *new_skb; 1344 1345 if (length > fep->rx_copybreak) 1346 return false; 1347 1348 new_skb = netdev_alloc_skb(ndev, length); 1349 if (!new_skb) 1350 return false; 1351 1352 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, 1353 FEC_ENET_RX_FRSIZE - fep->rx_align, 1354 DMA_FROM_DEVICE); 1355 if (!swap) 1356 memcpy(new_skb->data, (*skb)->data, length); 1357 else 1358 swap_buffer2(new_skb->data, (*skb)->data, length); 1359 *skb = new_skb; 1360 1361 return true; 1362 } 1363 1364 /* During a receive, the cur_rx points to the current incoming buffer. 1365 * When we update through the ring, if the next incoming buffer has 1366 * not been given to the system, we just set the empty indicator, 1367 * effectively tossing the packet. 1368 */ 1369 static int 1370 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) 1371 { 1372 struct fec_enet_private *fep = netdev_priv(ndev); 1373 struct fec_enet_priv_rx_q *rxq; 1374 struct bufdesc *bdp; 1375 unsigned short status; 1376 struct sk_buff *skb_new = NULL; 1377 struct sk_buff *skb; 1378 ushort pkt_len; 1379 __u8 *data; 1380 int pkt_received = 0; 1381 struct bufdesc_ex *ebdp = NULL; 1382 bool vlan_packet_rcvd = false; 1383 u16 vlan_tag; 1384 int index = 0; 1385 bool is_copybreak; 1386 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; 1387 1388 #ifdef CONFIG_M532x 1389 flush_cache_all(); 1390 #endif 1391 queue_id = FEC_ENET_GET_QUQUE(queue_id); 1392 rxq = fep->rx_queue[queue_id]; 1393 1394 /* First, grab all of the stats for the incoming packet. 1395 * These get messed up if we get called due to a busy condition. 1396 */ 1397 bdp = rxq->cur_rx; 1398 1399 while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { 1400 1401 if (pkt_received >= budget) 1402 break; 1403 pkt_received++; 1404 1405 /* Since we have allocated space to hold a complete frame, 1406 * the last indicator should be set. 1407 */ 1408 if ((status & BD_ENET_RX_LAST) == 0) 1409 netdev_err(ndev, "rcv is not +last\n"); 1410 1411 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); 1412 1413 /* Check for errors. */ 1414 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 1415 BD_ENET_RX_CR | BD_ENET_RX_OV)) { 1416 ndev->stats.rx_errors++; 1417 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { 1418 /* Frame too long or too short. */ 1419 ndev->stats.rx_length_errors++; 1420 } 1421 if (status & BD_ENET_RX_NO) /* Frame alignment */ 1422 ndev->stats.rx_frame_errors++; 1423 if (status & BD_ENET_RX_CR) /* CRC Error */ 1424 ndev->stats.rx_crc_errors++; 1425 if (status & BD_ENET_RX_OV) /* FIFO overrun */ 1426 ndev->stats.rx_fifo_errors++; 1427 } 1428 1429 /* Report late collisions as a frame error. 1430 * On this error, the BD is closed, but we don't know what we 1431 * have in the buffer. So, just drop this frame on the floor. 1432 */ 1433 if (status & BD_ENET_RX_CL) { 1434 ndev->stats.rx_errors++; 1435 ndev->stats.rx_frame_errors++; 1436 goto rx_processing_done; 1437 } 1438 1439 /* Process the incoming frame. */ 1440 ndev->stats.rx_packets++; 1441 pkt_len = bdp->cbd_datlen; 1442 ndev->stats.rx_bytes += pkt_len; 1443 1444 index = fec_enet_get_bd_index(rxq->rx_bd_base, bdp, fep); 1445 skb = rxq->rx_skbuff[index]; 1446 1447 /* The packet length includes FCS, but we don't want to 1448 * include that when passing upstream as it messes up 1449 * bridging applications. 1450 */ 1451 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, 1452 need_swap); 1453 if (!is_copybreak) { 1454 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1455 if (unlikely(!skb_new)) { 1456 ndev->stats.rx_dropped++; 1457 goto rx_processing_done; 1458 } 1459 dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, 1460 FEC_ENET_RX_FRSIZE - fep->rx_align, 1461 DMA_FROM_DEVICE); 1462 } 1463 1464 prefetch(skb->data - NET_IP_ALIGN); 1465 skb_put(skb, pkt_len - 4); 1466 data = skb->data; 1467 if (!is_copybreak && need_swap) 1468 swap_buffer(data, pkt_len); 1469 1470 /* Extract the enhanced buffer descriptor */ 1471 ebdp = NULL; 1472 if (fep->bufdesc_ex) 1473 ebdp = (struct bufdesc_ex *)bdp; 1474 1475 /* If this is a VLAN packet remove the VLAN Tag */ 1476 vlan_packet_rcvd = false; 1477 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1478 fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { 1479 /* Push and remove the vlan tag */ 1480 struct vlan_hdr *vlan_header = 1481 (struct vlan_hdr *) (data + ETH_HLEN); 1482 vlan_tag = ntohs(vlan_header->h_vlan_TCI); 1483 1484 vlan_packet_rcvd = true; 1485 1486 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); 1487 skb_pull(skb, VLAN_HLEN); 1488 } 1489 1490 skb->protocol = eth_type_trans(skb, ndev); 1491 1492 /* Get receive timestamp from the skb */ 1493 if (fep->hwts_rx_en && fep->bufdesc_ex) 1494 fec_enet_hwtstamp(fep, ebdp->ts, 1495 skb_hwtstamps(skb)); 1496 1497 if (fep->bufdesc_ex && 1498 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 1499 if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { 1500 /* don't check it */ 1501 skb->ip_summed = CHECKSUM_UNNECESSARY; 1502 } else { 1503 skb_checksum_none_assert(skb); 1504 } 1505 } 1506 1507 /* Handle received VLAN packets */ 1508 if (vlan_packet_rcvd) 1509 __vlan_hwaccel_put_tag(skb, 1510 htons(ETH_P_8021Q), 1511 vlan_tag); 1512 1513 napi_gro_receive(&fep->napi, skb); 1514 1515 if (is_copybreak) { 1516 dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, 1517 FEC_ENET_RX_FRSIZE - fep->rx_align, 1518 DMA_FROM_DEVICE); 1519 } else { 1520 rxq->rx_skbuff[index] = skb_new; 1521 fec_enet_new_rxbdp(ndev, bdp, skb_new); 1522 } 1523 1524 rx_processing_done: 1525 /* Clear the status flags for this buffer */ 1526 status &= ~BD_ENET_RX_STATS; 1527 1528 /* Mark the buffer empty */ 1529 status |= BD_ENET_RX_EMPTY; 1530 bdp->cbd_sc = status; 1531 1532 if (fep->bufdesc_ex) { 1533 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1534 1535 ebdp->cbd_esc = BD_ENET_RX_INT; 1536 ebdp->cbd_prot = 0; 1537 ebdp->cbd_bdu = 0; 1538 } 1539 1540 /* Update BD pointer to next entry */ 1541 bdp = fec_enet_get_nextdesc(bdp, fep, queue_id); 1542 1543 /* Doing this here will keep the FEC running while we process 1544 * incoming frames. On a heavily loaded network, we should be 1545 * able to keep up at the expense of system resources. 1546 */ 1547 writel(0, fep->hwp + FEC_R_DES_ACTIVE(queue_id)); 1548 } 1549 rxq->cur_rx = bdp; 1550 return pkt_received; 1551 } 1552 1553 static int 1554 fec_enet_rx(struct net_device *ndev, int budget) 1555 { 1556 int pkt_received = 0; 1557 u16 queue_id; 1558 struct fec_enet_private *fep = netdev_priv(ndev); 1559 1560 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { 1561 clear_bit(queue_id, &fep->work_rx); 1562 pkt_received += fec_enet_rx_queue(ndev, 1563 budget - pkt_received, queue_id); 1564 } 1565 return pkt_received; 1566 } 1567 1568 static bool 1569 fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) 1570 { 1571 if (int_events == 0) 1572 return false; 1573 1574 if (int_events & FEC_ENET_RXF) 1575 fep->work_rx |= (1 << 2); 1576 if (int_events & FEC_ENET_RXF_1) 1577 fep->work_rx |= (1 << 0); 1578 if (int_events & FEC_ENET_RXF_2) 1579 fep->work_rx |= (1 << 1); 1580 1581 if (int_events & FEC_ENET_TXF) 1582 fep->work_tx |= (1 << 2); 1583 if (int_events & FEC_ENET_TXF_1) 1584 fep->work_tx |= (1 << 0); 1585 if (int_events & FEC_ENET_TXF_2) 1586 fep->work_tx |= (1 << 1); 1587 1588 return true; 1589 } 1590 1591 static irqreturn_t 1592 fec_enet_interrupt(int irq, void *dev_id) 1593 { 1594 struct net_device *ndev = dev_id; 1595 struct fec_enet_private *fep = netdev_priv(ndev); 1596 uint int_events; 1597 irqreturn_t ret = IRQ_NONE; 1598 1599 int_events = readl(fep->hwp + FEC_IEVENT); 1600 writel(int_events, fep->hwp + FEC_IEVENT); 1601 fec_enet_collect_events(fep, int_events); 1602 1603 if ((fep->work_tx || fep->work_rx) && fep->link) { 1604 ret = IRQ_HANDLED; 1605 1606 if (napi_schedule_prep(&fep->napi)) { 1607 /* Disable the NAPI interrupts */ 1608 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); 1609 __napi_schedule(&fep->napi); 1610 } 1611 } 1612 1613 if (int_events & FEC_ENET_MII) { 1614 ret = IRQ_HANDLED; 1615 complete(&fep->mdio_done); 1616 } 1617 1618 if (fep->ptp_clock) 1619 fec_ptp_check_pps_event(fep); 1620 1621 return ret; 1622 } 1623 1624 static int fec_enet_rx_napi(struct napi_struct *napi, int budget) 1625 { 1626 struct net_device *ndev = napi->dev; 1627 struct fec_enet_private *fep = netdev_priv(ndev); 1628 int pkts; 1629 1630 pkts = fec_enet_rx(ndev, budget); 1631 1632 fec_enet_tx(ndev); 1633 1634 if (pkts < budget) { 1635 napi_complete(napi); 1636 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1637 } 1638 return pkts; 1639 } 1640 1641 /* ------------------------------------------------------------------------- */ 1642 static void fec_get_mac(struct net_device *ndev) 1643 { 1644 struct fec_enet_private *fep = netdev_priv(ndev); 1645 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); 1646 unsigned char *iap, tmpaddr[ETH_ALEN]; 1647 1648 /* 1649 * try to get mac address in following order: 1650 * 1651 * 1) module parameter via kernel command line in form 1652 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 1653 */ 1654 iap = macaddr; 1655 1656 /* 1657 * 2) from device tree data 1658 */ 1659 if (!is_valid_ether_addr(iap)) { 1660 struct device_node *np = fep->pdev->dev.of_node; 1661 if (np) { 1662 const char *mac = of_get_mac_address(np); 1663 if (mac) 1664 iap = (unsigned char *) mac; 1665 } 1666 } 1667 1668 /* 1669 * 3) from flash or fuse (via platform data) 1670 */ 1671 if (!is_valid_ether_addr(iap)) { 1672 #ifdef CONFIG_M5272 1673 if (FEC_FLASHMAC) 1674 iap = (unsigned char *)FEC_FLASHMAC; 1675 #else 1676 if (pdata) 1677 iap = (unsigned char *)&pdata->mac; 1678 #endif 1679 } 1680 1681 /* 1682 * 4) FEC mac registers set by bootloader 1683 */ 1684 if (!is_valid_ether_addr(iap)) { 1685 *((__be32 *) &tmpaddr[0]) = 1686 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); 1687 *((__be16 *) &tmpaddr[4]) = 1688 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 1689 iap = &tmpaddr[0]; 1690 } 1691 1692 /* 1693 * 5) random mac address 1694 */ 1695 if (!is_valid_ether_addr(iap)) { 1696 /* Report it and use a random ethernet address instead */ 1697 netdev_err(ndev, "Invalid MAC address: %pM\n", iap); 1698 eth_hw_addr_random(ndev); 1699 netdev_info(ndev, "Using random MAC address: %pM\n", 1700 ndev->dev_addr); 1701 return; 1702 } 1703 1704 memcpy(ndev->dev_addr, iap, ETH_ALEN); 1705 1706 /* Adjust MAC if using macaddr */ 1707 if (iap == macaddr) 1708 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; 1709 } 1710 1711 /* ------------------------------------------------------------------------- */ 1712 1713 /* 1714 * Phy section 1715 */ 1716 static void fec_enet_adjust_link(struct net_device *ndev) 1717 { 1718 struct fec_enet_private *fep = netdev_priv(ndev); 1719 struct phy_device *phy_dev = fep->phy_dev; 1720 int status_change = 0; 1721 1722 /* Prevent a state halted on mii error */ 1723 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { 1724 phy_dev->state = PHY_RESUMING; 1725 return; 1726 } 1727 1728 /* 1729 * If the netdev is down, or is going down, we're not interested 1730 * in link state events, so just mark our idea of the link as down 1731 * and ignore the event. 1732 */ 1733 if (!netif_running(ndev) || !netif_device_present(ndev)) { 1734 fep->link = 0; 1735 } else if (phy_dev->link) { 1736 if (!fep->link) { 1737 fep->link = phy_dev->link; 1738 status_change = 1; 1739 } 1740 1741 if (fep->full_duplex != phy_dev->duplex) { 1742 fep->full_duplex = phy_dev->duplex; 1743 status_change = 1; 1744 } 1745 1746 if (phy_dev->speed != fep->speed) { 1747 fep->speed = phy_dev->speed; 1748 status_change = 1; 1749 } 1750 1751 /* if any of the above changed restart the FEC */ 1752 if (status_change) { 1753 napi_disable(&fep->napi); 1754 netif_tx_lock_bh(ndev); 1755 fec_restart(ndev); 1756 netif_wake_queue(ndev); 1757 netif_tx_unlock_bh(ndev); 1758 napi_enable(&fep->napi); 1759 } 1760 } else { 1761 if (fep->link) { 1762 napi_disable(&fep->napi); 1763 netif_tx_lock_bh(ndev); 1764 fec_stop(ndev); 1765 netif_tx_unlock_bh(ndev); 1766 napi_enable(&fep->napi); 1767 fep->link = phy_dev->link; 1768 status_change = 1; 1769 } 1770 } 1771 1772 if (status_change) 1773 phy_print_status(phy_dev); 1774 } 1775 1776 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 1777 { 1778 struct fec_enet_private *fep = bus->priv; 1779 struct device *dev = &fep->pdev->dev; 1780 unsigned long time_left; 1781 int ret = 0; 1782 1783 ret = pm_runtime_get_sync(dev); 1784 if (ret < 0) 1785 return ret; 1786 1787 fep->mii_timeout = 0; 1788 reinit_completion(&fep->mdio_done); 1789 1790 /* start a read op */ 1791 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 1792 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 1793 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 1794 1795 /* wait for end of transfer */ 1796 time_left = wait_for_completion_timeout(&fep->mdio_done, 1797 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1798 if (time_left == 0) { 1799 fep->mii_timeout = 1; 1800 netdev_err(fep->netdev, "MDIO read timeout\n"); 1801 ret = -ETIMEDOUT; 1802 goto out; 1803 } 1804 1805 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1806 1807 out: 1808 pm_runtime_mark_last_busy(dev); 1809 pm_runtime_put_autosuspend(dev); 1810 1811 return ret; 1812 } 1813 1814 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 1815 u16 value) 1816 { 1817 struct fec_enet_private *fep = bus->priv; 1818 struct device *dev = &fep->pdev->dev; 1819 unsigned long time_left; 1820 int ret; 1821 1822 ret = pm_runtime_get_sync(dev); 1823 if (ret < 0) 1824 return ret; 1825 else 1826 ret = 0; 1827 1828 fep->mii_timeout = 0; 1829 reinit_completion(&fep->mdio_done); 1830 1831 /* start a write op */ 1832 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | 1833 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 1834 FEC_MMFR_TA | FEC_MMFR_DATA(value), 1835 fep->hwp + FEC_MII_DATA); 1836 1837 /* wait for end of transfer */ 1838 time_left = wait_for_completion_timeout(&fep->mdio_done, 1839 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1840 if (time_left == 0) { 1841 fep->mii_timeout = 1; 1842 netdev_err(fep->netdev, "MDIO write timeout\n"); 1843 ret = -ETIMEDOUT; 1844 } 1845 1846 pm_runtime_mark_last_busy(dev); 1847 pm_runtime_put_autosuspend(dev); 1848 1849 return ret; 1850 } 1851 1852 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 1853 { 1854 struct fec_enet_private *fep = netdev_priv(ndev); 1855 int ret; 1856 1857 if (enable) { 1858 ret = clk_prepare_enable(fep->clk_ahb); 1859 if (ret) 1860 return ret; 1861 if (fep->clk_enet_out) { 1862 ret = clk_prepare_enable(fep->clk_enet_out); 1863 if (ret) 1864 goto failed_clk_enet_out; 1865 } 1866 if (fep->clk_ptp) { 1867 mutex_lock(&fep->ptp_clk_mutex); 1868 ret = clk_prepare_enable(fep->clk_ptp); 1869 if (ret) { 1870 mutex_unlock(&fep->ptp_clk_mutex); 1871 goto failed_clk_ptp; 1872 } else { 1873 fep->ptp_clk_on = true; 1874 } 1875 mutex_unlock(&fep->ptp_clk_mutex); 1876 } 1877 if (fep->clk_ref) { 1878 ret = clk_prepare_enable(fep->clk_ref); 1879 if (ret) 1880 goto failed_clk_ref; 1881 } 1882 } else { 1883 clk_disable_unprepare(fep->clk_ahb); 1884 if (fep->clk_enet_out) 1885 clk_disable_unprepare(fep->clk_enet_out); 1886 if (fep->clk_ptp) { 1887 mutex_lock(&fep->ptp_clk_mutex); 1888 clk_disable_unprepare(fep->clk_ptp); 1889 fep->ptp_clk_on = false; 1890 mutex_unlock(&fep->ptp_clk_mutex); 1891 } 1892 if (fep->clk_ref) 1893 clk_disable_unprepare(fep->clk_ref); 1894 } 1895 1896 return 0; 1897 1898 failed_clk_ref: 1899 if (fep->clk_ref) 1900 clk_disable_unprepare(fep->clk_ref); 1901 failed_clk_ptp: 1902 if (fep->clk_enet_out) 1903 clk_disable_unprepare(fep->clk_enet_out); 1904 failed_clk_enet_out: 1905 clk_disable_unprepare(fep->clk_ahb); 1906 1907 return ret; 1908 } 1909 1910 static int fec_enet_mii_probe(struct net_device *ndev) 1911 { 1912 struct fec_enet_private *fep = netdev_priv(ndev); 1913 struct phy_device *phy_dev = NULL; 1914 char mdio_bus_id[MII_BUS_ID_SIZE]; 1915 char phy_name[MII_BUS_ID_SIZE + 3]; 1916 int phy_id; 1917 int dev_id = fep->dev_id; 1918 1919 fep->phy_dev = NULL; 1920 1921 if (fep->phy_node) { 1922 phy_dev = of_phy_connect(ndev, fep->phy_node, 1923 &fec_enet_adjust_link, 0, 1924 fep->phy_interface); 1925 if (!phy_dev) 1926 return -ENODEV; 1927 } else { 1928 /* check for attached phy */ 1929 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { 1930 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) 1931 continue; 1932 if (dev_id--) 1933 continue; 1934 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 1935 break; 1936 } 1937 1938 if (phy_id >= PHY_MAX_ADDR) { 1939 netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); 1940 strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); 1941 phy_id = 0; 1942 } 1943 1944 snprintf(phy_name, sizeof(phy_name), 1945 PHY_ID_FMT, mdio_bus_id, phy_id); 1946 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 1947 fep->phy_interface); 1948 } 1949 1950 if (IS_ERR(phy_dev)) { 1951 netdev_err(ndev, "could not attach to PHY\n"); 1952 return PTR_ERR(phy_dev); 1953 } 1954 1955 /* mask with MAC supported features */ 1956 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { 1957 phy_dev->supported &= PHY_GBIT_FEATURES; 1958 phy_dev->supported &= ~SUPPORTED_1000baseT_Half; 1959 #if !defined(CONFIG_M5272) 1960 phy_dev->supported |= SUPPORTED_Pause; 1961 #endif 1962 } 1963 else 1964 phy_dev->supported &= PHY_BASIC_FEATURES; 1965 1966 phy_dev->advertising = phy_dev->supported; 1967 1968 fep->phy_dev = phy_dev; 1969 fep->link = 0; 1970 fep->full_duplex = 0; 1971 1972 phy_attached_info(phy_dev); 1973 1974 return 0; 1975 } 1976 1977 static int fec_enet_mii_init(struct platform_device *pdev) 1978 { 1979 static struct mii_bus *fec0_mii_bus; 1980 struct net_device *ndev = platform_get_drvdata(pdev); 1981 struct fec_enet_private *fep = netdev_priv(ndev); 1982 struct device_node *node; 1983 int err = -ENXIO; 1984 u32 mii_speed, holdtime; 1985 1986 /* 1987 * The i.MX28 dual fec interfaces are not equal. 1988 * Here are the differences: 1989 * 1990 * - fec0 supports MII & RMII modes while fec1 only supports RMII 1991 * - fec0 acts as the 1588 time master while fec1 is slave 1992 * - external phys can only be configured by fec0 1993 * 1994 * That is to say fec1 can not work independently. It only works 1995 * when fec0 is working. The reason behind this design is that the 1996 * second interface is added primarily for Switch mode. 1997 * 1998 * Because of the last point above, both phys are attached on fec0 1999 * mdio interface in board design, and need to be configured by 2000 * fec0 mii_bus. 2001 */ 2002 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 2003 /* fec1 uses fec0 mii_bus */ 2004 if (mii_cnt && fec0_mii_bus) { 2005 fep->mii_bus = fec0_mii_bus; 2006 mii_cnt++; 2007 return 0; 2008 } 2009 return -ENOENT; 2010 } 2011 2012 fep->mii_timeout = 0; 2013 2014 /* 2015 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) 2016 * 2017 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while 2018 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 2019 * Reference Manual has an error on this, and gets fixed on i.MX6Q 2020 * document. 2021 */ 2022 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); 2023 if (fep->quirks & FEC_QUIRK_ENET_MAC) 2024 mii_speed--; 2025 if (mii_speed > 63) { 2026 dev_err(&pdev->dev, 2027 "fec clock (%lu) to fast to get right mii speed\n", 2028 clk_get_rate(fep->clk_ipg)); 2029 err = -EINVAL; 2030 goto err_out; 2031 } 2032 2033 /* 2034 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka 2035 * MII_SPEED) register that defines the MDIO output hold time. Earlier 2036 * versions are RAZ there, so just ignore the difference and write the 2037 * register always. 2038 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. 2039 * HOLDTIME + 1 is the number of clk cycles the fec is holding the 2040 * output. 2041 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). 2042 * Given that ceil(clkrate / 5000000) <= 64, the calculation for 2043 * holdtime cannot result in a value greater than 3. 2044 */ 2045 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; 2046 2047 fep->phy_speed = mii_speed << 1 | holdtime << 8; 2048 2049 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 2050 2051 fep->mii_bus = mdiobus_alloc(); 2052 if (fep->mii_bus == NULL) { 2053 err = -ENOMEM; 2054 goto err_out; 2055 } 2056 2057 fep->mii_bus->name = "fec_enet_mii_bus"; 2058 fep->mii_bus->read = fec_enet_mdio_read; 2059 fep->mii_bus->write = fec_enet_mdio_write; 2060 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2061 pdev->name, fep->dev_id + 1); 2062 fep->mii_bus->priv = fep; 2063 fep->mii_bus->parent = &pdev->dev; 2064 2065 node = of_get_child_by_name(pdev->dev.of_node, "mdio"); 2066 if (node) { 2067 err = of_mdiobus_register(fep->mii_bus, node); 2068 of_node_put(node); 2069 } else { 2070 err = mdiobus_register(fep->mii_bus); 2071 } 2072 2073 if (err) 2074 goto err_out_free_mdiobus; 2075 2076 mii_cnt++; 2077 2078 /* save fec0 mii_bus */ 2079 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2080 fec0_mii_bus = fep->mii_bus; 2081 2082 return 0; 2083 2084 err_out_free_mdiobus: 2085 mdiobus_free(fep->mii_bus); 2086 err_out: 2087 return err; 2088 } 2089 2090 static void fec_enet_mii_remove(struct fec_enet_private *fep) 2091 { 2092 if (--mii_cnt == 0) { 2093 mdiobus_unregister(fep->mii_bus); 2094 mdiobus_free(fep->mii_bus); 2095 } 2096 } 2097 2098 static int fec_enet_get_settings(struct net_device *ndev, 2099 struct ethtool_cmd *cmd) 2100 { 2101 struct fec_enet_private *fep = netdev_priv(ndev); 2102 struct phy_device *phydev = fep->phy_dev; 2103 2104 if (!phydev) 2105 return -ENODEV; 2106 2107 return phy_ethtool_gset(phydev, cmd); 2108 } 2109 2110 static int fec_enet_set_settings(struct net_device *ndev, 2111 struct ethtool_cmd *cmd) 2112 { 2113 struct fec_enet_private *fep = netdev_priv(ndev); 2114 struct phy_device *phydev = fep->phy_dev; 2115 2116 if (!phydev) 2117 return -ENODEV; 2118 2119 return phy_ethtool_sset(phydev, cmd); 2120 } 2121 2122 static void fec_enet_get_drvinfo(struct net_device *ndev, 2123 struct ethtool_drvinfo *info) 2124 { 2125 struct fec_enet_private *fep = netdev_priv(ndev); 2126 2127 strlcpy(info->driver, fep->pdev->dev.driver->name, 2128 sizeof(info->driver)); 2129 strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); 2130 strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); 2131 } 2132 2133 static int fec_enet_get_regs_len(struct net_device *ndev) 2134 { 2135 struct fec_enet_private *fep = netdev_priv(ndev); 2136 struct resource *r; 2137 int s = 0; 2138 2139 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); 2140 if (r) 2141 s = resource_size(r); 2142 2143 return s; 2144 } 2145 2146 /* List of registers that can be safety be read to dump them with ethtool */ 2147 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2148 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \ 2149 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28) 2150 static u32 fec_enet_register_offset[] = { 2151 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2152 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2153 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, 2154 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, 2155 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, 2156 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, 2157 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, 2158 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, 2159 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2160 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, 2161 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, 2162 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, 2163 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2164 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2165 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2166 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2167 RMON_T_P_GTE2048, RMON_T_OCTETS, 2168 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2169 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2170 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2171 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2172 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2173 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2174 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2175 RMON_R_P_GTE2048, RMON_R_OCTETS, 2176 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2177 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2178 }; 2179 #else 2180 static u32 fec_enet_register_offset[] = { 2181 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2182 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2183 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, 2184 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, 2185 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, 2186 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, 2187 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, 2188 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, 2189 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 2190 }; 2191 #endif 2192 2193 static void fec_enet_get_regs(struct net_device *ndev, 2194 struct ethtool_regs *regs, void *regbuf) 2195 { 2196 struct fec_enet_private *fep = netdev_priv(ndev); 2197 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; 2198 u32 *buf = (u32 *)regbuf; 2199 u32 i, off; 2200 2201 memset(buf, 0, regs->len); 2202 2203 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { 2204 off = fec_enet_register_offset[i] / 4; 2205 buf[off] = readl(&theregs[off]); 2206 } 2207 } 2208 2209 static int fec_enet_get_ts_info(struct net_device *ndev, 2210 struct ethtool_ts_info *info) 2211 { 2212 struct fec_enet_private *fep = netdev_priv(ndev); 2213 2214 if (fep->bufdesc_ex) { 2215 2216 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 2217 SOF_TIMESTAMPING_RX_SOFTWARE | 2218 SOF_TIMESTAMPING_SOFTWARE | 2219 SOF_TIMESTAMPING_TX_HARDWARE | 2220 SOF_TIMESTAMPING_RX_HARDWARE | 2221 SOF_TIMESTAMPING_RAW_HARDWARE; 2222 if (fep->ptp_clock) 2223 info->phc_index = ptp_clock_index(fep->ptp_clock); 2224 else 2225 info->phc_index = -1; 2226 2227 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 2228 (1 << HWTSTAMP_TX_ON); 2229 2230 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2231 (1 << HWTSTAMP_FILTER_ALL); 2232 return 0; 2233 } else { 2234 return ethtool_op_get_ts_info(ndev, info); 2235 } 2236 } 2237 2238 #if !defined(CONFIG_M5272) 2239 2240 static void fec_enet_get_pauseparam(struct net_device *ndev, 2241 struct ethtool_pauseparam *pause) 2242 { 2243 struct fec_enet_private *fep = netdev_priv(ndev); 2244 2245 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; 2246 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; 2247 pause->rx_pause = pause->tx_pause; 2248 } 2249 2250 static int fec_enet_set_pauseparam(struct net_device *ndev, 2251 struct ethtool_pauseparam *pause) 2252 { 2253 struct fec_enet_private *fep = netdev_priv(ndev); 2254 2255 if (!fep->phy_dev) 2256 return -ENODEV; 2257 2258 if (pause->tx_pause != pause->rx_pause) { 2259 netdev_info(ndev, 2260 "hardware only support enable/disable both tx and rx"); 2261 return -EINVAL; 2262 } 2263 2264 fep->pause_flag = 0; 2265 2266 /* tx pause must be same as rx pause */ 2267 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; 2268 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; 2269 2270 if (pause->rx_pause || pause->autoneg) { 2271 fep->phy_dev->supported |= ADVERTISED_Pause; 2272 fep->phy_dev->advertising |= ADVERTISED_Pause; 2273 } else { 2274 fep->phy_dev->supported &= ~ADVERTISED_Pause; 2275 fep->phy_dev->advertising &= ~ADVERTISED_Pause; 2276 } 2277 2278 if (pause->autoneg) { 2279 if (netif_running(ndev)) 2280 fec_stop(ndev); 2281 phy_start_aneg(fep->phy_dev); 2282 } 2283 if (netif_running(ndev)) { 2284 napi_disable(&fep->napi); 2285 netif_tx_lock_bh(ndev); 2286 fec_restart(ndev); 2287 netif_wake_queue(ndev); 2288 netif_tx_unlock_bh(ndev); 2289 napi_enable(&fep->napi); 2290 } 2291 2292 return 0; 2293 } 2294 2295 static const struct fec_stat { 2296 char name[ETH_GSTRING_LEN]; 2297 u16 offset; 2298 } fec_stats[] = { 2299 /* RMON TX */ 2300 { "tx_dropped", RMON_T_DROP }, 2301 { "tx_packets", RMON_T_PACKETS }, 2302 { "tx_broadcast", RMON_T_BC_PKT }, 2303 { "tx_multicast", RMON_T_MC_PKT }, 2304 { "tx_crc_errors", RMON_T_CRC_ALIGN }, 2305 { "tx_undersize", RMON_T_UNDERSIZE }, 2306 { "tx_oversize", RMON_T_OVERSIZE }, 2307 { "tx_fragment", RMON_T_FRAG }, 2308 { "tx_jabber", RMON_T_JAB }, 2309 { "tx_collision", RMON_T_COL }, 2310 { "tx_64byte", RMON_T_P64 }, 2311 { "tx_65to127byte", RMON_T_P65TO127 }, 2312 { "tx_128to255byte", RMON_T_P128TO255 }, 2313 { "tx_256to511byte", RMON_T_P256TO511 }, 2314 { "tx_512to1023byte", RMON_T_P512TO1023 }, 2315 { "tx_1024to2047byte", RMON_T_P1024TO2047 }, 2316 { "tx_GTE2048byte", RMON_T_P_GTE2048 }, 2317 { "tx_octets", RMON_T_OCTETS }, 2318 2319 /* IEEE TX */ 2320 { "IEEE_tx_drop", IEEE_T_DROP }, 2321 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, 2322 { "IEEE_tx_1col", IEEE_T_1COL }, 2323 { "IEEE_tx_mcol", IEEE_T_MCOL }, 2324 { "IEEE_tx_def", IEEE_T_DEF }, 2325 { "IEEE_tx_lcol", IEEE_T_LCOL }, 2326 { "IEEE_tx_excol", IEEE_T_EXCOL }, 2327 { "IEEE_tx_macerr", IEEE_T_MACERR }, 2328 { "IEEE_tx_cserr", IEEE_T_CSERR }, 2329 { "IEEE_tx_sqe", IEEE_T_SQE }, 2330 { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, 2331 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, 2332 2333 /* RMON RX */ 2334 { "rx_packets", RMON_R_PACKETS }, 2335 { "rx_broadcast", RMON_R_BC_PKT }, 2336 { "rx_multicast", RMON_R_MC_PKT }, 2337 { "rx_crc_errors", RMON_R_CRC_ALIGN }, 2338 { "rx_undersize", RMON_R_UNDERSIZE }, 2339 { "rx_oversize", RMON_R_OVERSIZE }, 2340 { "rx_fragment", RMON_R_FRAG }, 2341 { "rx_jabber", RMON_R_JAB }, 2342 { "rx_64byte", RMON_R_P64 }, 2343 { "rx_65to127byte", RMON_R_P65TO127 }, 2344 { "rx_128to255byte", RMON_R_P128TO255 }, 2345 { "rx_256to511byte", RMON_R_P256TO511 }, 2346 { "rx_512to1023byte", RMON_R_P512TO1023 }, 2347 { "rx_1024to2047byte", RMON_R_P1024TO2047 }, 2348 { "rx_GTE2048byte", RMON_R_P_GTE2048 }, 2349 { "rx_octets", RMON_R_OCTETS }, 2350 2351 /* IEEE RX */ 2352 { "IEEE_rx_drop", IEEE_R_DROP }, 2353 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, 2354 { "IEEE_rx_crc", IEEE_R_CRC }, 2355 { "IEEE_rx_align", IEEE_R_ALIGN }, 2356 { "IEEE_rx_macerr", IEEE_R_MACERR }, 2357 { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, 2358 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 2359 }; 2360 2361 static void fec_enet_get_ethtool_stats(struct net_device *dev, 2362 struct ethtool_stats *stats, u64 *data) 2363 { 2364 struct fec_enet_private *fep = netdev_priv(dev); 2365 int i; 2366 2367 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2368 data[i] = readl(fep->hwp + fec_stats[i].offset); 2369 } 2370 2371 static void fec_enet_get_strings(struct net_device *netdev, 2372 u32 stringset, u8 *data) 2373 { 2374 int i; 2375 switch (stringset) { 2376 case ETH_SS_STATS: 2377 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2378 memcpy(data + i * ETH_GSTRING_LEN, 2379 fec_stats[i].name, ETH_GSTRING_LEN); 2380 break; 2381 } 2382 } 2383 2384 static int fec_enet_get_sset_count(struct net_device *dev, int sset) 2385 { 2386 switch (sset) { 2387 case ETH_SS_STATS: 2388 return ARRAY_SIZE(fec_stats); 2389 default: 2390 return -EOPNOTSUPP; 2391 } 2392 } 2393 #endif /* !defined(CONFIG_M5272) */ 2394 2395 static int fec_enet_nway_reset(struct net_device *dev) 2396 { 2397 struct fec_enet_private *fep = netdev_priv(dev); 2398 struct phy_device *phydev = fep->phy_dev; 2399 2400 if (!phydev) 2401 return -ENODEV; 2402 2403 return genphy_restart_aneg(phydev); 2404 } 2405 2406 /* ITR clock source is enet system clock (clk_ahb). 2407 * TCTT unit is cycle_ns * 64 cycle 2408 * So, the ICTT value = X us / (cycle_ns * 64) 2409 */ 2410 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) 2411 { 2412 struct fec_enet_private *fep = netdev_priv(ndev); 2413 2414 return us * (fep->itr_clk_rate / 64000) / 1000; 2415 } 2416 2417 /* Set threshold for interrupt coalescing */ 2418 static void fec_enet_itr_coal_set(struct net_device *ndev) 2419 { 2420 struct fec_enet_private *fep = netdev_priv(ndev); 2421 int rx_itr, tx_itr; 2422 2423 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 2424 return; 2425 2426 /* Must be greater than zero to avoid unpredictable behavior */ 2427 if (!fep->rx_time_itr || !fep->rx_pkts_itr || 2428 !fep->tx_time_itr || !fep->tx_pkts_itr) 2429 return; 2430 2431 /* Select enet system clock as Interrupt Coalescing 2432 * timer Clock Source 2433 */ 2434 rx_itr = FEC_ITR_CLK_SEL; 2435 tx_itr = FEC_ITR_CLK_SEL; 2436 2437 /* set ICFT and ICTT */ 2438 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); 2439 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); 2440 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); 2441 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); 2442 2443 rx_itr |= FEC_ITR_EN; 2444 tx_itr |= FEC_ITR_EN; 2445 2446 writel(tx_itr, fep->hwp + FEC_TXIC0); 2447 writel(rx_itr, fep->hwp + FEC_RXIC0); 2448 writel(tx_itr, fep->hwp + FEC_TXIC1); 2449 writel(rx_itr, fep->hwp + FEC_RXIC1); 2450 writel(tx_itr, fep->hwp + FEC_TXIC2); 2451 writel(rx_itr, fep->hwp + FEC_RXIC2); 2452 } 2453 2454 static int 2455 fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) 2456 { 2457 struct fec_enet_private *fep = netdev_priv(ndev); 2458 2459 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 2460 return -EOPNOTSUPP; 2461 2462 ec->rx_coalesce_usecs = fep->rx_time_itr; 2463 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; 2464 2465 ec->tx_coalesce_usecs = fep->tx_time_itr; 2466 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; 2467 2468 return 0; 2469 } 2470 2471 static int 2472 fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) 2473 { 2474 struct fec_enet_private *fep = netdev_priv(ndev); 2475 unsigned int cycle; 2476 2477 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 2478 return -EOPNOTSUPP; 2479 2480 if (ec->rx_max_coalesced_frames > 255) { 2481 pr_err("Rx coalesced frames exceed hardware limiation"); 2482 return -EINVAL; 2483 } 2484 2485 if (ec->tx_max_coalesced_frames > 255) { 2486 pr_err("Tx coalesced frame exceed hardware limiation"); 2487 return -EINVAL; 2488 } 2489 2490 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); 2491 if (cycle > 0xFFFF) { 2492 pr_err("Rx coalesed usec exceeed hardware limiation"); 2493 return -EINVAL; 2494 } 2495 2496 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); 2497 if (cycle > 0xFFFF) { 2498 pr_err("Rx coalesed usec exceeed hardware limiation"); 2499 return -EINVAL; 2500 } 2501 2502 fep->rx_time_itr = ec->rx_coalesce_usecs; 2503 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; 2504 2505 fep->tx_time_itr = ec->tx_coalesce_usecs; 2506 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; 2507 2508 fec_enet_itr_coal_set(ndev); 2509 2510 return 0; 2511 } 2512 2513 static void fec_enet_itr_coal_init(struct net_device *ndev) 2514 { 2515 struct ethtool_coalesce ec; 2516 2517 ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; 2518 ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; 2519 2520 ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; 2521 ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; 2522 2523 fec_enet_set_coalesce(ndev, &ec); 2524 } 2525 2526 static int fec_enet_get_tunable(struct net_device *netdev, 2527 const struct ethtool_tunable *tuna, 2528 void *data) 2529 { 2530 struct fec_enet_private *fep = netdev_priv(netdev); 2531 int ret = 0; 2532 2533 switch (tuna->id) { 2534 case ETHTOOL_RX_COPYBREAK: 2535 *(u32 *)data = fep->rx_copybreak; 2536 break; 2537 default: 2538 ret = -EINVAL; 2539 break; 2540 } 2541 2542 return ret; 2543 } 2544 2545 static int fec_enet_set_tunable(struct net_device *netdev, 2546 const struct ethtool_tunable *tuna, 2547 const void *data) 2548 { 2549 struct fec_enet_private *fep = netdev_priv(netdev); 2550 int ret = 0; 2551 2552 switch (tuna->id) { 2553 case ETHTOOL_RX_COPYBREAK: 2554 fep->rx_copybreak = *(u32 *)data; 2555 break; 2556 default: 2557 ret = -EINVAL; 2558 break; 2559 } 2560 2561 return ret; 2562 } 2563 2564 static void 2565 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 2566 { 2567 struct fec_enet_private *fep = netdev_priv(ndev); 2568 2569 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { 2570 wol->supported = WAKE_MAGIC; 2571 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; 2572 } else { 2573 wol->supported = wol->wolopts = 0; 2574 } 2575 } 2576 2577 static int 2578 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 2579 { 2580 struct fec_enet_private *fep = netdev_priv(ndev); 2581 2582 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) 2583 return -EINVAL; 2584 2585 if (wol->wolopts & ~WAKE_MAGIC) 2586 return -EINVAL; 2587 2588 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); 2589 if (device_may_wakeup(&ndev->dev)) { 2590 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; 2591 if (fep->irq[0] > 0) 2592 enable_irq_wake(fep->irq[0]); 2593 } else { 2594 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); 2595 if (fep->irq[0] > 0) 2596 disable_irq_wake(fep->irq[0]); 2597 } 2598 2599 return 0; 2600 } 2601 2602 static const struct ethtool_ops fec_enet_ethtool_ops = { 2603 .get_settings = fec_enet_get_settings, 2604 .set_settings = fec_enet_set_settings, 2605 .get_drvinfo = fec_enet_get_drvinfo, 2606 .get_regs_len = fec_enet_get_regs_len, 2607 .get_regs = fec_enet_get_regs, 2608 .nway_reset = fec_enet_nway_reset, 2609 .get_link = ethtool_op_get_link, 2610 .get_coalesce = fec_enet_get_coalesce, 2611 .set_coalesce = fec_enet_set_coalesce, 2612 #ifndef CONFIG_M5272 2613 .get_pauseparam = fec_enet_get_pauseparam, 2614 .set_pauseparam = fec_enet_set_pauseparam, 2615 .get_strings = fec_enet_get_strings, 2616 .get_ethtool_stats = fec_enet_get_ethtool_stats, 2617 .get_sset_count = fec_enet_get_sset_count, 2618 #endif 2619 .get_ts_info = fec_enet_get_ts_info, 2620 .get_tunable = fec_enet_get_tunable, 2621 .set_tunable = fec_enet_set_tunable, 2622 .get_wol = fec_enet_get_wol, 2623 .set_wol = fec_enet_set_wol, 2624 }; 2625 2626 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2627 { 2628 struct fec_enet_private *fep = netdev_priv(ndev); 2629 struct phy_device *phydev = fep->phy_dev; 2630 2631 if (!netif_running(ndev)) 2632 return -EINVAL; 2633 2634 if (!phydev) 2635 return -ENODEV; 2636 2637 if (fep->bufdesc_ex) { 2638 if (cmd == SIOCSHWTSTAMP) 2639 return fec_ptp_set(ndev, rq); 2640 if (cmd == SIOCGHWTSTAMP) 2641 return fec_ptp_get(ndev, rq); 2642 } 2643 2644 return phy_mii_ioctl(phydev, rq, cmd); 2645 } 2646 2647 static void fec_enet_free_buffers(struct net_device *ndev) 2648 { 2649 struct fec_enet_private *fep = netdev_priv(ndev); 2650 unsigned int i; 2651 struct sk_buff *skb; 2652 struct bufdesc *bdp; 2653 struct fec_enet_priv_tx_q *txq; 2654 struct fec_enet_priv_rx_q *rxq; 2655 unsigned int q; 2656 2657 for (q = 0; q < fep->num_rx_queues; q++) { 2658 rxq = fep->rx_queue[q]; 2659 bdp = rxq->rx_bd_base; 2660 for (i = 0; i < rxq->rx_ring_size; i++) { 2661 skb = rxq->rx_skbuff[i]; 2662 rxq->rx_skbuff[i] = NULL; 2663 if (skb) { 2664 dma_unmap_single(&fep->pdev->dev, 2665 bdp->cbd_bufaddr, 2666 FEC_ENET_RX_FRSIZE - fep->rx_align, 2667 DMA_FROM_DEVICE); 2668 dev_kfree_skb(skb); 2669 } 2670 bdp = fec_enet_get_nextdesc(bdp, fep, q); 2671 } 2672 } 2673 2674 for (q = 0; q < fep->num_tx_queues; q++) { 2675 txq = fep->tx_queue[q]; 2676 bdp = txq->tx_bd_base; 2677 for (i = 0; i < txq->tx_ring_size; i++) { 2678 kfree(txq->tx_bounce[i]); 2679 txq->tx_bounce[i] = NULL; 2680 skb = txq->tx_skbuff[i]; 2681 txq->tx_skbuff[i] = NULL; 2682 dev_kfree_skb(skb); 2683 } 2684 } 2685 } 2686 2687 static void fec_enet_free_queue(struct net_device *ndev) 2688 { 2689 struct fec_enet_private *fep = netdev_priv(ndev); 2690 int i; 2691 struct fec_enet_priv_tx_q *txq; 2692 2693 for (i = 0; i < fep->num_tx_queues; i++) 2694 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { 2695 txq = fep->tx_queue[i]; 2696 dma_free_coherent(NULL, 2697 txq->tx_ring_size * TSO_HEADER_SIZE, 2698 txq->tso_hdrs, 2699 txq->tso_hdrs_dma); 2700 } 2701 2702 for (i = 0; i < fep->num_rx_queues; i++) 2703 kfree(fep->rx_queue[i]); 2704 for (i = 0; i < fep->num_tx_queues; i++) 2705 kfree(fep->tx_queue[i]); 2706 } 2707 2708 static int fec_enet_alloc_queue(struct net_device *ndev) 2709 { 2710 struct fec_enet_private *fep = netdev_priv(ndev); 2711 int i; 2712 int ret = 0; 2713 struct fec_enet_priv_tx_q *txq; 2714 2715 for (i = 0; i < fep->num_tx_queues; i++) { 2716 txq = kzalloc(sizeof(*txq), GFP_KERNEL); 2717 if (!txq) { 2718 ret = -ENOMEM; 2719 goto alloc_failed; 2720 } 2721 2722 fep->tx_queue[i] = txq; 2723 txq->tx_ring_size = TX_RING_SIZE; 2724 fep->total_tx_ring_size += fep->tx_queue[i]->tx_ring_size; 2725 2726 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; 2727 txq->tx_wake_threshold = 2728 (txq->tx_ring_size - txq->tx_stop_threshold) / 2; 2729 2730 txq->tso_hdrs = dma_alloc_coherent(NULL, 2731 txq->tx_ring_size * TSO_HEADER_SIZE, 2732 &txq->tso_hdrs_dma, 2733 GFP_KERNEL); 2734 if (!txq->tso_hdrs) { 2735 ret = -ENOMEM; 2736 goto alloc_failed; 2737 } 2738 } 2739 2740 for (i = 0; i < fep->num_rx_queues; i++) { 2741 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), 2742 GFP_KERNEL); 2743 if (!fep->rx_queue[i]) { 2744 ret = -ENOMEM; 2745 goto alloc_failed; 2746 } 2747 2748 fep->rx_queue[i]->rx_ring_size = RX_RING_SIZE; 2749 fep->total_rx_ring_size += fep->rx_queue[i]->rx_ring_size; 2750 } 2751 return ret; 2752 2753 alloc_failed: 2754 fec_enet_free_queue(ndev); 2755 return ret; 2756 } 2757 2758 static int 2759 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) 2760 { 2761 struct fec_enet_private *fep = netdev_priv(ndev); 2762 unsigned int i; 2763 struct sk_buff *skb; 2764 struct bufdesc *bdp; 2765 struct fec_enet_priv_rx_q *rxq; 2766 2767 rxq = fep->rx_queue[queue]; 2768 bdp = rxq->rx_bd_base; 2769 for (i = 0; i < rxq->rx_ring_size; i++) { 2770 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 2771 if (!skb) 2772 goto err_alloc; 2773 2774 if (fec_enet_new_rxbdp(ndev, bdp, skb)) { 2775 dev_kfree_skb(skb); 2776 goto err_alloc; 2777 } 2778 2779 rxq->rx_skbuff[i] = skb; 2780 bdp->cbd_sc = BD_ENET_RX_EMPTY; 2781 2782 if (fep->bufdesc_ex) { 2783 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2784 ebdp->cbd_esc = BD_ENET_RX_INT; 2785 } 2786 2787 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 2788 } 2789 2790 /* Set the last buffer to wrap. */ 2791 bdp = fec_enet_get_prevdesc(bdp, fep, queue); 2792 bdp->cbd_sc |= BD_SC_WRAP; 2793 return 0; 2794 2795 err_alloc: 2796 fec_enet_free_buffers(ndev); 2797 return -ENOMEM; 2798 } 2799 2800 static int 2801 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) 2802 { 2803 struct fec_enet_private *fep = netdev_priv(ndev); 2804 unsigned int i; 2805 struct bufdesc *bdp; 2806 struct fec_enet_priv_tx_q *txq; 2807 2808 txq = fep->tx_queue[queue]; 2809 bdp = txq->tx_bd_base; 2810 for (i = 0; i < txq->tx_ring_size; i++) { 2811 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 2812 if (!txq->tx_bounce[i]) 2813 goto err_alloc; 2814 2815 bdp->cbd_sc = 0; 2816 bdp->cbd_bufaddr = 0; 2817 2818 if (fep->bufdesc_ex) { 2819 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2820 ebdp->cbd_esc = BD_ENET_TX_INT; 2821 } 2822 2823 bdp = fec_enet_get_nextdesc(bdp, fep, queue); 2824 } 2825 2826 /* Set the last buffer to wrap. */ 2827 bdp = fec_enet_get_prevdesc(bdp, fep, queue); 2828 bdp->cbd_sc |= BD_SC_WRAP; 2829 2830 return 0; 2831 2832 err_alloc: 2833 fec_enet_free_buffers(ndev); 2834 return -ENOMEM; 2835 } 2836 2837 static int fec_enet_alloc_buffers(struct net_device *ndev) 2838 { 2839 struct fec_enet_private *fep = netdev_priv(ndev); 2840 unsigned int i; 2841 2842 for (i = 0; i < fep->num_rx_queues; i++) 2843 if (fec_enet_alloc_rxq_buffers(ndev, i)) 2844 return -ENOMEM; 2845 2846 for (i = 0; i < fep->num_tx_queues; i++) 2847 if (fec_enet_alloc_txq_buffers(ndev, i)) 2848 return -ENOMEM; 2849 return 0; 2850 } 2851 2852 static int 2853 fec_enet_open(struct net_device *ndev) 2854 { 2855 struct fec_enet_private *fep = netdev_priv(ndev); 2856 int ret; 2857 2858 ret = pm_runtime_get_sync(&fep->pdev->dev); 2859 if (ret < 0) 2860 return ret; 2861 2862 pinctrl_pm_select_default_state(&fep->pdev->dev); 2863 ret = fec_enet_clk_enable(ndev, true); 2864 if (ret) 2865 goto clk_enable; 2866 2867 /* I should reset the ring buffers here, but I don't yet know 2868 * a simple way to do that. 2869 */ 2870 2871 ret = fec_enet_alloc_buffers(ndev); 2872 if (ret) 2873 goto err_enet_alloc; 2874 2875 /* Init MAC prior to mii bus probe */ 2876 fec_restart(ndev); 2877 2878 /* Probe and connect to PHY when open the interface */ 2879 ret = fec_enet_mii_probe(ndev); 2880 if (ret) 2881 goto err_enet_mii_probe; 2882 2883 napi_enable(&fep->napi); 2884 phy_start(fep->phy_dev); 2885 netif_tx_start_all_queues(ndev); 2886 2887 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & 2888 FEC_WOL_FLAG_ENABLE); 2889 2890 return 0; 2891 2892 err_enet_mii_probe: 2893 fec_enet_free_buffers(ndev); 2894 err_enet_alloc: 2895 fec_enet_clk_enable(ndev, false); 2896 clk_enable: 2897 pm_runtime_mark_last_busy(&fep->pdev->dev); 2898 pm_runtime_put_autosuspend(&fep->pdev->dev); 2899 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2900 return ret; 2901 } 2902 2903 static int 2904 fec_enet_close(struct net_device *ndev) 2905 { 2906 struct fec_enet_private *fep = netdev_priv(ndev); 2907 2908 phy_stop(fep->phy_dev); 2909 2910 if (netif_device_present(ndev)) { 2911 napi_disable(&fep->napi); 2912 netif_tx_disable(ndev); 2913 fec_stop(ndev); 2914 } 2915 2916 phy_disconnect(fep->phy_dev); 2917 fep->phy_dev = NULL; 2918 2919 fec_enet_clk_enable(ndev, false); 2920 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2921 pm_runtime_mark_last_busy(&fep->pdev->dev); 2922 pm_runtime_put_autosuspend(&fep->pdev->dev); 2923 2924 fec_enet_free_buffers(ndev); 2925 2926 return 0; 2927 } 2928 2929 /* Set or clear the multicast filter for this adaptor. 2930 * Skeleton taken from sunlance driver. 2931 * The CPM Ethernet implementation allows Multicast as well as individual 2932 * MAC address filtering. Some of the drivers check to make sure it is 2933 * a group multicast address, and discard those that are not. I guess I 2934 * will do the same for now, but just remove the test if you want 2935 * individual filtering as well (do the upper net layers want or support 2936 * this kind of feature?). 2937 */ 2938 2939 #define HASH_BITS 6 /* #bits in hash */ 2940 #define CRC32_POLY 0xEDB88320 2941 2942 static void set_multicast_list(struct net_device *ndev) 2943 { 2944 struct fec_enet_private *fep = netdev_priv(ndev); 2945 struct netdev_hw_addr *ha; 2946 unsigned int i, bit, data, crc, tmp; 2947 unsigned char hash; 2948 2949 if (ndev->flags & IFF_PROMISC) { 2950 tmp = readl(fep->hwp + FEC_R_CNTRL); 2951 tmp |= 0x8; 2952 writel(tmp, fep->hwp + FEC_R_CNTRL); 2953 return; 2954 } 2955 2956 tmp = readl(fep->hwp + FEC_R_CNTRL); 2957 tmp &= ~0x8; 2958 writel(tmp, fep->hwp + FEC_R_CNTRL); 2959 2960 if (ndev->flags & IFF_ALLMULTI) { 2961 /* Catch all multicast addresses, so set the 2962 * filter to all 1's 2963 */ 2964 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2965 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2966 2967 return; 2968 } 2969 2970 /* Clear filter and add the addresses in hash register 2971 */ 2972 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2973 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2974 2975 netdev_for_each_mc_addr(ha, ndev) { 2976 /* calculate crc32 value of mac address */ 2977 crc = 0xffffffff; 2978 2979 for (i = 0; i < ndev->addr_len; i++) { 2980 data = ha->addr[i]; 2981 for (bit = 0; bit < 8; bit++, data >>= 1) { 2982 crc = (crc >> 1) ^ 2983 (((crc ^ data) & 1) ? CRC32_POLY : 0); 2984 } 2985 } 2986 2987 /* only upper 6 bits (HASH_BITS) are used 2988 * which point to specific bit in he hash registers 2989 */ 2990 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 2991 2992 if (hash > 31) { 2993 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2994 tmp |= 1 << (hash - 32); 2995 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2996 } else { 2997 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2998 tmp |= 1 << hash; 2999 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3000 } 3001 } 3002 } 3003 3004 /* Set a MAC change in hardware. */ 3005 static int 3006 fec_set_mac_address(struct net_device *ndev, void *p) 3007 { 3008 struct fec_enet_private *fep = netdev_priv(ndev); 3009 struct sockaddr *addr = p; 3010 3011 if (addr) { 3012 if (!is_valid_ether_addr(addr->sa_data)) 3013 return -EADDRNOTAVAIL; 3014 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3015 } 3016 3017 /* Add netif status check here to avoid system hang in below case: 3018 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; 3019 * After ethx down, fec all clocks are gated off and then register 3020 * access causes system hang. 3021 */ 3022 if (!netif_running(ndev)) 3023 return 0; 3024 3025 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 3026 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 3027 fep->hwp + FEC_ADDR_LOW); 3028 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), 3029 fep->hwp + FEC_ADDR_HIGH); 3030 return 0; 3031 } 3032 3033 #ifdef CONFIG_NET_POLL_CONTROLLER 3034 /** 3035 * fec_poll_controller - FEC Poll controller function 3036 * @dev: The FEC network adapter 3037 * 3038 * Polled functionality used by netconsole and others in non interrupt mode 3039 * 3040 */ 3041 static void fec_poll_controller(struct net_device *dev) 3042 { 3043 int i; 3044 struct fec_enet_private *fep = netdev_priv(dev); 3045 3046 for (i = 0; i < FEC_IRQ_NUM; i++) { 3047 if (fep->irq[i] > 0) { 3048 disable_irq(fep->irq[i]); 3049 fec_enet_interrupt(fep->irq[i], dev); 3050 enable_irq(fep->irq[i]); 3051 } 3052 } 3053 } 3054 #endif 3055 3056 static inline void fec_enet_set_netdev_features(struct net_device *netdev, 3057 netdev_features_t features) 3058 { 3059 struct fec_enet_private *fep = netdev_priv(netdev); 3060 netdev_features_t changed = features ^ netdev->features; 3061 3062 netdev->features = features; 3063 3064 /* Receive checksum has been changed */ 3065 if (changed & NETIF_F_RXCSUM) { 3066 if (features & NETIF_F_RXCSUM) 3067 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3068 else 3069 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; 3070 } 3071 } 3072 3073 static int fec_set_features(struct net_device *netdev, 3074 netdev_features_t features) 3075 { 3076 struct fec_enet_private *fep = netdev_priv(netdev); 3077 netdev_features_t changed = features ^ netdev->features; 3078 3079 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) { 3080 napi_disable(&fep->napi); 3081 netif_tx_lock_bh(netdev); 3082 fec_stop(netdev); 3083 fec_enet_set_netdev_features(netdev, features); 3084 fec_restart(netdev); 3085 netif_tx_wake_all_queues(netdev); 3086 netif_tx_unlock_bh(netdev); 3087 napi_enable(&fep->napi); 3088 } else { 3089 fec_enet_set_netdev_features(netdev, features); 3090 } 3091 3092 return 0; 3093 } 3094 3095 static const struct net_device_ops fec_netdev_ops = { 3096 .ndo_open = fec_enet_open, 3097 .ndo_stop = fec_enet_close, 3098 .ndo_start_xmit = fec_enet_start_xmit, 3099 .ndo_set_rx_mode = set_multicast_list, 3100 .ndo_change_mtu = eth_change_mtu, 3101 .ndo_validate_addr = eth_validate_addr, 3102 .ndo_tx_timeout = fec_timeout, 3103 .ndo_set_mac_address = fec_set_mac_address, 3104 .ndo_do_ioctl = fec_enet_ioctl, 3105 #ifdef CONFIG_NET_POLL_CONTROLLER 3106 .ndo_poll_controller = fec_poll_controller, 3107 #endif 3108 .ndo_set_features = fec_set_features, 3109 }; 3110 3111 /* 3112 * XXX: We need to clean up on failure exits here. 3113 * 3114 */ 3115 static int fec_enet_init(struct net_device *ndev) 3116 { 3117 struct fec_enet_private *fep = netdev_priv(ndev); 3118 struct fec_enet_priv_tx_q *txq; 3119 struct fec_enet_priv_rx_q *rxq; 3120 struct bufdesc *cbd_base; 3121 dma_addr_t bd_dma; 3122 int bd_size; 3123 unsigned int i; 3124 3125 #if defined(CONFIG_ARM) 3126 fep->rx_align = 0xf; 3127 fep->tx_align = 0xf; 3128 #else 3129 fep->rx_align = 0x3; 3130 fep->tx_align = 0x3; 3131 #endif 3132 3133 fec_enet_alloc_queue(ndev); 3134 3135 if (fep->bufdesc_ex) 3136 fep->bufdesc_size = sizeof(struct bufdesc_ex); 3137 else 3138 fep->bufdesc_size = sizeof(struct bufdesc); 3139 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * 3140 fep->bufdesc_size; 3141 3142 /* Allocate memory for buffer descriptors. */ 3143 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, 3144 GFP_KERNEL); 3145 if (!cbd_base) { 3146 return -ENOMEM; 3147 } 3148 3149 memset(cbd_base, 0, bd_size); 3150 3151 /* Get the Ethernet address */ 3152 fec_get_mac(ndev); 3153 /* make sure MAC we just acquired is programmed into the hw */ 3154 fec_set_mac_address(ndev, NULL); 3155 3156 /* Set receive and transmit descriptor base. */ 3157 for (i = 0; i < fep->num_rx_queues; i++) { 3158 rxq = fep->rx_queue[i]; 3159 rxq->index = i; 3160 rxq->rx_bd_base = (struct bufdesc *)cbd_base; 3161 rxq->bd_dma = bd_dma; 3162 if (fep->bufdesc_ex) { 3163 bd_dma += sizeof(struct bufdesc_ex) * rxq->rx_ring_size; 3164 cbd_base = (struct bufdesc *) 3165 (((struct bufdesc_ex *)cbd_base) + rxq->rx_ring_size); 3166 } else { 3167 bd_dma += sizeof(struct bufdesc) * rxq->rx_ring_size; 3168 cbd_base += rxq->rx_ring_size; 3169 } 3170 } 3171 3172 for (i = 0; i < fep->num_tx_queues; i++) { 3173 txq = fep->tx_queue[i]; 3174 txq->index = i; 3175 txq->tx_bd_base = (struct bufdesc *)cbd_base; 3176 txq->bd_dma = bd_dma; 3177 if (fep->bufdesc_ex) { 3178 bd_dma += sizeof(struct bufdesc_ex) * txq->tx_ring_size; 3179 cbd_base = (struct bufdesc *) 3180 (((struct bufdesc_ex *)cbd_base) + txq->tx_ring_size); 3181 } else { 3182 bd_dma += sizeof(struct bufdesc) * txq->tx_ring_size; 3183 cbd_base += txq->tx_ring_size; 3184 } 3185 } 3186 3187 3188 /* The FEC Ethernet specific entries in the device structure */ 3189 ndev->watchdog_timeo = TX_TIMEOUT; 3190 ndev->netdev_ops = &fec_netdev_ops; 3191 ndev->ethtool_ops = &fec_enet_ethtool_ops; 3192 3193 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 3194 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); 3195 3196 if (fep->quirks & FEC_QUIRK_HAS_VLAN) 3197 /* enable hw VLAN support */ 3198 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3199 3200 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { 3201 ndev->gso_max_segs = FEC_MAX_TSO_SEGS; 3202 3203 /* enable hw accelerator */ 3204 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 3205 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); 3206 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3207 } 3208 3209 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 3210 fep->tx_align = 0; 3211 fep->rx_align = 0x3f; 3212 } 3213 3214 ndev->hw_features = ndev->features; 3215 3216 fec_restart(ndev); 3217 3218 return 0; 3219 } 3220 3221 #ifdef CONFIG_OF 3222 static void fec_reset_phy(struct platform_device *pdev) 3223 { 3224 int err, phy_reset; 3225 int msec = 1; 3226 struct device_node *np = pdev->dev.of_node; 3227 3228 if (!np) 3229 return; 3230 3231 of_property_read_u32(np, "phy-reset-duration", &msec); 3232 /* A sane reset duration should not be longer than 1s */ 3233 if (msec > 1000) 3234 msec = 1; 3235 3236 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); 3237 if (!gpio_is_valid(phy_reset)) 3238 return; 3239 3240 err = devm_gpio_request_one(&pdev->dev, phy_reset, 3241 GPIOF_OUT_INIT_LOW, "phy-reset"); 3242 if (err) { 3243 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); 3244 return; 3245 } 3246 msleep(msec); 3247 gpio_set_value_cansleep(phy_reset, 1); 3248 } 3249 #else /* CONFIG_OF */ 3250 static void fec_reset_phy(struct platform_device *pdev) 3251 { 3252 /* 3253 * In case of platform probe, the reset has been done 3254 * by machine code. 3255 */ 3256 } 3257 #endif /* CONFIG_OF */ 3258 3259 static void 3260 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) 3261 { 3262 struct device_node *np = pdev->dev.of_node; 3263 3264 *num_tx = *num_rx = 1; 3265 3266 if (!np || !of_device_is_available(np)) 3267 return; 3268 3269 /* parse the num of tx and rx queues */ 3270 of_property_read_u32(np, "fsl,num-tx-queues", num_tx); 3271 3272 of_property_read_u32(np, "fsl,num-rx-queues", num_rx); 3273 3274 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { 3275 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", 3276 *num_tx); 3277 *num_tx = 1; 3278 return; 3279 } 3280 3281 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { 3282 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", 3283 *num_rx); 3284 *num_rx = 1; 3285 return; 3286 } 3287 3288 } 3289 3290 static int 3291 fec_probe(struct platform_device *pdev) 3292 { 3293 struct fec_enet_private *fep; 3294 struct fec_platform_data *pdata; 3295 struct net_device *ndev; 3296 int i, irq, ret = 0; 3297 struct resource *r; 3298 const struct of_device_id *of_id; 3299 static int dev_id; 3300 struct device_node *np = pdev->dev.of_node, *phy_node; 3301 int num_tx_qs; 3302 int num_rx_qs; 3303 3304 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 3305 3306 /* Init network device */ 3307 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), 3308 num_tx_qs, num_rx_qs); 3309 if (!ndev) 3310 return -ENOMEM; 3311 3312 SET_NETDEV_DEV(ndev, &pdev->dev); 3313 3314 /* setup board info structure */ 3315 fep = netdev_priv(ndev); 3316 3317 of_id = of_match_device(fec_dt_ids, &pdev->dev); 3318 if (of_id) 3319 pdev->id_entry = of_id->data; 3320 fep->quirks = pdev->id_entry->driver_data; 3321 3322 fep->netdev = ndev; 3323 fep->num_rx_queues = num_rx_qs; 3324 fep->num_tx_queues = num_tx_qs; 3325 3326 #if !defined(CONFIG_M5272) 3327 /* default enable pause frame auto negotiation */ 3328 if (fep->quirks & FEC_QUIRK_HAS_GBIT) 3329 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 3330 #endif 3331 3332 /* Select default pin state */ 3333 pinctrl_pm_select_default_state(&pdev->dev); 3334 3335 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3336 fep->hwp = devm_ioremap_resource(&pdev->dev, r); 3337 if (IS_ERR(fep->hwp)) { 3338 ret = PTR_ERR(fep->hwp); 3339 goto failed_ioremap; 3340 } 3341 3342 fep->pdev = pdev; 3343 fep->dev_id = dev_id++; 3344 3345 platform_set_drvdata(pdev, ndev); 3346 3347 if (of_get_property(np, "fsl,magic-packet", NULL)) 3348 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; 3349 3350 phy_node = of_parse_phandle(np, "phy-handle", 0); 3351 if (!phy_node && of_phy_is_fixed_link(np)) { 3352 ret = of_phy_register_fixed_link(np); 3353 if (ret < 0) { 3354 dev_err(&pdev->dev, 3355 "broken fixed-link specification\n"); 3356 goto failed_phy; 3357 } 3358 phy_node = of_node_get(np); 3359 } 3360 fep->phy_node = phy_node; 3361 3362 ret = of_get_phy_mode(pdev->dev.of_node); 3363 if (ret < 0) { 3364 pdata = dev_get_platdata(&pdev->dev); 3365 if (pdata) 3366 fep->phy_interface = pdata->phy; 3367 else 3368 fep->phy_interface = PHY_INTERFACE_MODE_MII; 3369 } else { 3370 fep->phy_interface = ret; 3371 } 3372 3373 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 3374 if (IS_ERR(fep->clk_ipg)) { 3375 ret = PTR_ERR(fep->clk_ipg); 3376 goto failed_clk; 3377 } 3378 3379 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 3380 if (IS_ERR(fep->clk_ahb)) { 3381 ret = PTR_ERR(fep->clk_ahb); 3382 goto failed_clk; 3383 } 3384 3385 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); 3386 3387 /* enet_out is optional, depends on board */ 3388 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); 3389 if (IS_ERR(fep->clk_enet_out)) 3390 fep->clk_enet_out = NULL; 3391 3392 fep->ptp_clk_on = false; 3393 mutex_init(&fep->ptp_clk_mutex); 3394 3395 /* clk_ref is optional, depends on board */ 3396 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); 3397 if (IS_ERR(fep->clk_ref)) 3398 fep->clk_ref = NULL; 3399 3400 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; 3401 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); 3402 if (IS_ERR(fep->clk_ptp)) { 3403 fep->clk_ptp = NULL; 3404 fep->bufdesc_ex = false; 3405 } 3406 3407 ret = fec_enet_clk_enable(ndev, true); 3408 if (ret) 3409 goto failed_clk; 3410 3411 ret = clk_prepare_enable(fep->clk_ipg); 3412 if (ret) 3413 goto failed_clk_ipg; 3414 3415 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3416 if (!IS_ERR(fep->reg_phy)) { 3417 ret = regulator_enable(fep->reg_phy); 3418 if (ret) { 3419 dev_err(&pdev->dev, 3420 "Failed to enable phy regulator: %d\n", ret); 3421 goto failed_regulator; 3422 } 3423 } else { 3424 fep->reg_phy = NULL; 3425 } 3426 3427 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 3428 pm_runtime_use_autosuspend(&pdev->dev); 3429 pm_runtime_get_noresume(&pdev->dev); 3430 pm_runtime_set_active(&pdev->dev); 3431 pm_runtime_enable(&pdev->dev); 3432 3433 fec_reset_phy(pdev); 3434 3435 if (fep->bufdesc_ex) 3436 fec_ptp_init(pdev); 3437 3438 ret = fec_enet_init(ndev); 3439 if (ret) 3440 goto failed_init; 3441 3442 for (i = 0; i < FEC_IRQ_NUM; i++) { 3443 irq = platform_get_irq(pdev, i); 3444 if (irq < 0) { 3445 if (i) 3446 break; 3447 ret = irq; 3448 goto failed_irq; 3449 } 3450 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 3451 0, pdev->name, ndev); 3452 if (ret) 3453 goto failed_irq; 3454 3455 fep->irq[i] = irq; 3456 } 3457 3458 init_completion(&fep->mdio_done); 3459 ret = fec_enet_mii_init(pdev); 3460 if (ret) 3461 goto failed_mii_init; 3462 3463 /* Carrier starts down, phylib will bring it up */ 3464 netif_carrier_off(ndev); 3465 fec_enet_clk_enable(ndev, false); 3466 pinctrl_pm_select_sleep_state(&pdev->dev); 3467 3468 ret = register_netdev(ndev); 3469 if (ret) 3470 goto failed_register; 3471 3472 device_init_wakeup(&ndev->dev, fep->wol_flag & 3473 FEC_WOL_HAS_MAGIC_PACKET); 3474 3475 if (fep->bufdesc_ex && fep->ptp_clock) 3476 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 3477 3478 fep->rx_copybreak = COPYBREAK_DEFAULT; 3479 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3480 3481 pm_runtime_mark_last_busy(&pdev->dev); 3482 pm_runtime_put_autosuspend(&pdev->dev); 3483 3484 return 0; 3485 3486 failed_register: 3487 fec_enet_mii_remove(fep); 3488 failed_mii_init: 3489 failed_irq: 3490 failed_init: 3491 fec_ptp_stop(pdev); 3492 if (fep->reg_phy) 3493 regulator_disable(fep->reg_phy); 3494 failed_regulator: 3495 clk_disable_unprepare(fep->clk_ipg); 3496 failed_clk_ipg: 3497 fec_enet_clk_enable(ndev, false); 3498 failed_clk: 3499 failed_phy: 3500 of_node_put(phy_node); 3501 failed_ioremap: 3502 free_netdev(ndev); 3503 3504 return ret; 3505 } 3506 3507 static int 3508 fec_drv_remove(struct platform_device *pdev) 3509 { 3510 struct net_device *ndev = platform_get_drvdata(pdev); 3511 struct fec_enet_private *fep = netdev_priv(ndev); 3512 3513 cancel_work_sync(&fep->tx_timeout_work); 3514 fec_ptp_stop(pdev); 3515 unregister_netdev(ndev); 3516 fec_enet_mii_remove(fep); 3517 if (fep->reg_phy) 3518 regulator_disable(fep->reg_phy); 3519 of_node_put(fep->phy_node); 3520 free_netdev(ndev); 3521 3522 return 0; 3523 } 3524 3525 static int __maybe_unused fec_suspend(struct device *dev) 3526 { 3527 struct net_device *ndev = dev_get_drvdata(dev); 3528 struct fec_enet_private *fep = netdev_priv(ndev); 3529 3530 rtnl_lock(); 3531 if (netif_running(ndev)) { 3532 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) 3533 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; 3534 phy_stop(fep->phy_dev); 3535 napi_disable(&fep->napi); 3536 netif_tx_lock_bh(ndev); 3537 netif_device_detach(ndev); 3538 netif_tx_unlock_bh(ndev); 3539 fec_stop(ndev); 3540 fec_enet_clk_enable(ndev, false); 3541 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 3542 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3543 } 3544 rtnl_unlock(); 3545 3546 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 3547 regulator_disable(fep->reg_phy); 3548 3549 /* SOC supply clock to phy, when clock is disabled, phy link down 3550 * SOC control phy regulator, when regulator is disabled, phy link down 3551 */ 3552 if (fep->clk_enet_out || fep->reg_phy) 3553 fep->link = 0; 3554 3555 return 0; 3556 } 3557 3558 static int __maybe_unused fec_resume(struct device *dev) 3559 { 3560 struct net_device *ndev = dev_get_drvdata(dev); 3561 struct fec_enet_private *fep = netdev_priv(ndev); 3562 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 3563 int ret; 3564 int val; 3565 3566 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 3567 ret = regulator_enable(fep->reg_phy); 3568 if (ret) 3569 return ret; 3570 } 3571 3572 rtnl_lock(); 3573 if (netif_running(ndev)) { 3574 ret = fec_enet_clk_enable(ndev, true); 3575 if (ret) { 3576 rtnl_unlock(); 3577 goto failed_clk; 3578 } 3579 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { 3580 if (pdata && pdata->sleep_mode_enable) 3581 pdata->sleep_mode_enable(false); 3582 val = readl(fep->hwp + FEC_ECNTRL); 3583 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 3584 writel(val, fep->hwp + FEC_ECNTRL); 3585 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; 3586 } else { 3587 pinctrl_pm_select_default_state(&fep->pdev->dev); 3588 } 3589 fec_restart(ndev); 3590 netif_tx_lock_bh(ndev); 3591 netif_device_attach(ndev); 3592 netif_tx_unlock_bh(ndev); 3593 napi_enable(&fep->napi); 3594 phy_start(fep->phy_dev); 3595 } 3596 rtnl_unlock(); 3597 3598 return 0; 3599 3600 failed_clk: 3601 if (fep->reg_phy) 3602 regulator_disable(fep->reg_phy); 3603 return ret; 3604 } 3605 3606 static int __maybe_unused fec_runtime_suspend(struct device *dev) 3607 { 3608 struct net_device *ndev = dev_get_drvdata(dev); 3609 struct fec_enet_private *fep = netdev_priv(ndev); 3610 3611 clk_disable_unprepare(fep->clk_ipg); 3612 3613 return 0; 3614 } 3615 3616 static int __maybe_unused fec_runtime_resume(struct device *dev) 3617 { 3618 struct net_device *ndev = dev_get_drvdata(dev); 3619 struct fec_enet_private *fep = netdev_priv(ndev); 3620 3621 return clk_prepare_enable(fep->clk_ipg); 3622 } 3623 3624 static const struct dev_pm_ops fec_pm_ops = { 3625 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 3626 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 3627 }; 3628 3629 static struct platform_driver fec_driver = { 3630 .driver = { 3631 .name = DRIVER_NAME, 3632 .pm = &fec_pm_ops, 3633 .of_match_table = fec_dt_ids, 3634 }, 3635 .id_table = fec_devtype, 3636 .probe = fec_probe, 3637 .remove = fec_drv_remove, 3638 }; 3639 3640 module_platform_driver(fec_driver); 3641 3642 MODULE_ALIAS("platform:"DRIVER_NAME); 3643 MODULE_LICENSE("GPL"); 3644