1 /* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * Right now, I am very wasteful with the buffers. I allocate memory 6 * pages and then divide them into 2K frame buffers. This way I know I 7 * have buffers large enough to hold one frame within one buffer descriptor. 8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 9 * will be much more memory efficient and will easily handle lots of 10 * small packets. 11 * 12 * Much better multiple PHY support by Magnus Damm. 13 * Copyright (c) 2000 Ericsson Radio Systems AB. 14 * 15 * Support for FEC controller of ColdFire processors. 16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 17 * 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 19 * Copyright (c) 2004-2006 Macq Electronique SA. 20 * 21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 22 */ 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/string.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/ptrace.h> 29 #include <linux/errno.h> 30 #include <linux/ioport.h> 31 #include <linux/slab.h> 32 #include <linux/interrupt.h> 33 #include <linux/delay.h> 34 #include <linux/netdevice.h> 35 #include <linux/etherdevice.h> 36 #include <linux/skbuff.h> 37 #include <linux/in.h> 38 #include <linux/ip.h> 39 #include <net/ip.h> 40 #include <net/tso.h> 41 #include <linux/tcp.h> 42 #include <linux/udp.h> 43 #include <linux/icmp.h> 44 #include <linux/spinlock.h> 45 #include <linux/workqueue.h> 46 #include <linux/bitops.h> 47 #include <linux/io.h> 48 #include <linux/irq.h> 49 #include <linux/clk.h> 50 #include <linux/platform_device.h> 51 #include <linux/mdio.h> 52 #include <linux/phy.h> 53 #include <linux/fec.h> 54 #include <linux/of.h> 55 #include <linux/of_device.h> 56 #include <linux/of_gpio.h> 57 #include <linux/of_mdio.h> 58 #include <linux/of_net.h> 59 #include <linux/regulator/consumer.h> 60 #include <linux/if_vlan.h> 61 #include <linux/pinctrl/consumer.h> 62 #include <linux/prefetch.h> 63 #include <soc/imx/cpuidle.h> 64 65 #include <asm/cacheflush.h> 66 67 #include "fec.h" 68 69 static void set_multicast_list(struct net_device *ndev); 70 static void fec_enet_itr_coal_init(struct net_device *ndev); 71 72 #define DRIVER_NAME "fec" 73 74 #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) 75 76 /* Pause frame feild and FIFO threshold */ 77 #define FEC_ENET_FCE (1 << 5) 78 #define FEC_ENET_RSEM_V 0x84 79 #define FEC_ENET_RSFL_V 16 80 #define FEC_ENET_RAEM_V 0x8 81 #define FEC_ENET_RAFL_V 0x8 82 #define FEC_ENET_OPD_V 0xFFF0 83 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 84 85 static struct platform_device_id fec_devtype[] = { 86 { 87 /* keep it for coldfire */ 88 .name = DRIVER_NAME, 89 .driver_data = 0, 90 }, { 91 .name = "imx25-fec", 92 .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC, 93 }, { 94 .name = "imx27-fec", 95 .driver_data = FEC_QUIRK_HAS_RACC, 96 }, { 97 .name = "imx28-fec", 98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 99 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC, 100 }, { 101 .name = "imx6q-fec", 102 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 103 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 104 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | 105 FEC_QUIRK_HAS_RACC, 106 }, { 107 .name = "mvf600-fec", 108 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, 109 }, { 110 .name = "imx6sx-fec", 111 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 112 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 113 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 114 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 115 FEC_QUIRK_HAS_RACC, 116 }, { 117 /* sentinel */ 118 } 119 }; 120 MODULE_DEVICE_TABLE(platform, fec_devtype); 121 122 enum imx_fec_type { 123 IMX25_FEC = 1, /* runs on i.mx25/50/53 */ 124 IMX27_FEC, /* runs on i.mx27/35/51 */ 125 IMX28_FEC, 126 IMX6Q_FEC, 127 MVF600_FEC, 128 IMX6SX_FEC, 129 }; 130 131 static const struct of_device_id fec_dt_ids[] = { 132 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, 133 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, 134 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, 135 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, 136 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, 137 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, 138 { /* sentinel */ } 139 }; 140 MODULE_DEVICE_TABLE(of, fec_dt_ids); 141 142 static unsigned char macaddr[ETH_ALEN]; 143 module_param_array(macaddr, byte, NULL, 0); 144 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 145 146 #if defined(CONFIG_M5272) 147 /* 148 * Some hardware gets it MAC address out of local flash memory. 149 * if this is non-zero then assume it is the address to get MAC from. 150 */ 151 #if defined(CONFIG_NETtel) 152 #define FEC_FLASHMAC 0xf0006006 153 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 154 #define FEC_FLASHMAC 0xf0006000 155 #elif defined(CONFIG_CANCam) 156 #define FEC_FLASHMAC 0xf0020000 157 #elif defined (CONFIG_M5272C3) 158 #define FEC_FLASHMAC (0xffe04000 + 4) 159 #elif defined(CONFIG_MOD5272) 160 #define FEC_FLASHMAC 0xffc0406b 161 #else 162 #define FEC_FLASHMAC 0 163 #endif 164 #endif /* CONFIG_M5272 */ 165 166 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. 167 */ 168 #define PKT_MAXBUF_SIZE 1522 169 #define PKT_MINBUF_SIZE 64 170 #define PKT_MAXBLR_SIZE 1536 171 172 /* FEC receive acceleration */ 173 #define FEC_RACC_IPDIS (1 << 1) 174 #define FEC_RACC_PRODIS (1 << 2) 175 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 176 177 /* 178 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 179 * size bits. Other FEC hardware does not, so we need to take that into 180 * account when setting it. 181 */ 182 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 183 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) 184 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 185 #else 186 #define OPT_FRAME_SIZE 0 187 #endif 188 189 /* FEC MII MMFR bits definition */ 190 #define FEC_MMFR_ST (1 << 30) 191 #define FEC_MMFR_OP_READ (2 << 28) 192 #define FEC_MMFR_OP_WRITE (1 << 28) 193 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 194 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 195 #define FEC_MMFR_TA (2 << 16) 196 #define FEC_MMFR_DATA(v) (v & 0xffff) 197 /* FEC ECR bits definition */ 198 #define FEC_ECR_MAGICEN (1 << 2) 199 #define FEC_ECR_SLEEP (1 << 3) 200 201 #define FEC_MII_TIMEOUT 30000 /* us */ 202 203 /* Transmitter timeout */ 204 #define TX_TIMEOUT (2 * HZ) 205 206 #define FEC_PAUSE_FLAG_AUTONEG 0x1 207 #define FEC_PAUSE_FLAG_ENABLE 0x2 208 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) 209 #define FEC_WOL_FLAG_ENABLE (0x1 << 1) 210 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) 211 212 #define COPYBREAK_DEFAULT 256 213 214 #define TSO_HEADER_SIZE 128 215 /* Max number of allowed TCP segments for software TSO */ 216 #define FEC_MAX_TSO_SEGS 100 217 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 218 219 #define IS_TSO_HEADER(txq, addr) \ 220 ((addr >= txq->tso_hdrs_dma) && \ 221 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 222 223 static int mii_cnt; 224 225 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, 226 struct bufdesc_prop *bd) 227 { 228 return (bdp >= bd->last) ? bd->base 229 : (struct bufdesc *)(((unsigned)bdp) + bd->dsize); 230 } 231 232 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, 233 struct bufdesc_prop *bd) 234 { 235 return (bdp <= bd->base) ? bd->last 236 : (struct bufdesc *)(((unsigned)bdp) - bd->dsize); 237 } 238 239 static int fec_enet_get_bd_index(struct bufdesc *bdp, 240 struct bufdesc_prop *bd) 241 { 242 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; 243 } 244 245 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) 246 { 247 int entries; 248 249 entries = (((const char *)txq->dirty_tx - 250 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; 251 252 return entries >= 0 ? entries : entries + txq->bd.ring_size; 253 } 254 255 static void swap_buffer(void *bufaddr, int len) 256 { 257 int i; 258 unsigned int *buf = bufaddr; 259 260 for (i = 0; i < len; i += 4, buf++) 261 swab32s(buf); 262 } 263 264 static void swap_buffer2(void *dst_buf, void *src_buf, int len) 265 { 266 int i; 267 unsigned int *src = src_buf; 268 unsigned int *dst = dst_buf; 269 270 for (i = 0; i < len; i += 4, src++, dst++) 271 *dst = swab32p(src); 272 } 273 274 static void fec_dump(struct net_device *ndev) 275 { 276 struct fec_enet_private *fep = netdev_priv(ndev); 277 struct bufdesc *bdp; 278 struct fec_enet_priv_tx_q *txq; 279 int index = 0; 280 281 netdev_info(ndev, "TX ring dump\n"); 282 pr_info("Nr SC addr len SKB\n"); 283 284 txq = fep->tx_queue[0]; 285 bdp = txq->bd.base; 286 287 do { 288 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", 289 index, 290 bdp == txq->bd.cur ? 'S' : ' ', 291 bdp == txq->dirty_tx ? 'H' : ' ', 292 fec16_to_cpu(bdp->cbd_sc), 293 fec32_to_cpu(bdp->cbd_bufaddr), 294 fec16_to_cpu(bdp->cbd_datlen), 295 txq->tx_skbuff[index]); 296 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 297 index++; 298 } while (bdp != txq->bd.base); 299 } 300 301 static inline bool is_ipv4_pkt(struct sk_buff *skb) 302 { 303 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 304 } 305 306 static int 307 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 308 { 309 /* Only run for packets requiring a checksum. */ 310 if (skb->ip_summed != CHECKSUM_PARTIAL) 311 return 0; 312 313 if (unlikely(skb_cow_head(skb, 0))) 314 return -1; 315 316 if (is_ipv4_pkt(skb)) 317 ip_hdr(skb)->check = 0; 318 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 319 320 return 0; 321 } 322 323 static struct bufdesc * 324 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, 325 struct sk_buff *skb, 326 struct net_device *ndev) 327 { 328 struct fec_enet_private *fep = netdev_priv(ndev); 329 struct bufdesc *bdp = txq->bd.cur; 330 struct bufdesc_ex *ebdp; 331 int nr_frags = skb_shinfo(skb)->nr_frags; 332 int frag, frag_len; 333 unsigned short status; 334 unsigned int estatus = 0; 335 skb_frag_t *this_frag; 336 unsigned int index; 337 void *bufaddr; 338 dma_addr_t addr; 339 int i; 340 341 for (frag = 0; frag < nr_frags; frag++) { 342 this_frag = &skb_shinfo(skb)->frags[frag]; 343 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 344 ebdp = (struct bufdesc_ex *)bdp; 345 346 status = fec16_to_cpu(bdp->cbd_sc); 347 status &= ~BD_ENET_TX_STATS; 348 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 349 frag_len = skb_shinfo(skb)->frags[frag].size; 350 351 /* Handle the last BD specially */ 352 if (frag == nr_frags - 1) { 353 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 354 if (fep->bufdesc_ex) { 355 estatus |= BD_ENET_TX_INT; 356 if (unlikely(skb_shinfo(skb)->tx_flags & 357 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 358 estatus |= BD_ENET_TX_TS; 359 } 360 } 361 362 if (fep->bufdesc_ex) { 363 if (fep->quirks & FEC_QUIRK_HAS_AVB) 364 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 365 if (skb->ip_summed == CHECKSUM_PARTIAL) 366 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 367 ebdp->cbd_bdu = 0; 368 ebdp->cbd_esc = cpu_to_fec32(estatus); 369 } 370 371 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; 372 373 index = fec_enet_get_bd_index(bdp, &txq->bd); 374 if (((unsigned long) bufaddr) & fep->tx_align || 375 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 376 memcpy(txq->tx_bounce[index], bufaddr, frag_len); 377 bufaddr = txq->tx_bounce[index]; 378 379 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 380 swap_buffer(bufaddr, frag_len); 381 } 382 383 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, 384 DMA_TO_DEVICE); 385 if (dma_mapping_error(&fep->pdev->dev, addr)) { 386 if (net_ratelimit()) 387 netdev_err(ndev, "Tx DMA memory map failed\n"); 388 goto dma_mapping_error; 389 } 390 391 bdp->cbd_bufaddr = cpu_to_fec32(addr); 392 bdp->cbd_datlen = cpu_to_fec16(frag_len); 393 /* Make sure the updates to rest of the descriptor are 394 * performed before transferring ownership. 395 */ 396 wmb(); 397 bdp->cbd_sc = cpu_to_fec16(status); 398 } 399 400 return bdp; 401 dma_mapping_error: 402 bdp = txq->bd.cur; 403 for (i = 0; i < frag; i++) { 404 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 405 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), 406 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); 407 } 408 return ERR_PTR(-ENOMEM); 409 } 410 411 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, 412 struct sk_buff *skb, struct net_device *ndev) 413 { 414 struct fec_enet_private *fep = netdev_priv(ndev); 415 int nr_frags = skb_shinfo(skb)->nr_frags; 416 struct bufdesc *bdp, *last_bdp; 417 void *bufaddr; 418 dma_addr_t addr; 419 unsigned short status; 420 unsigned short buflen; 421 unsigned int estatus = 0; 422 unsigned int index; 423 int entries_free; 424 425 entries_free = fec_enet_get_free_txdesc_num(txq); 426 if (entries_free < MAX_SKB_FRAGS + 1) { 427 dev_kfree_skb_any(skb); 428 if (net_ratelimit()) 429 netdev_err(ndev, "NOT enough BD for SG!\n"); 430 return NETDEV_TX_OK; 431 } 432 433 /* Protocol checksum off-load for TCP and UDP. */ 434 if (fec_enet_clear_csum(skb, ndev)) { 435 dev_kfree_skb_any(skb); 436 return NETDEV_TX_OK; 437 } 438 439 /* Fill in a Tx ring entry */ 440 bdp = txq->bd.cur; 441 last_bdp = bdp; 442 status = fec16_to_cpu(bdp->cbd_sc); 443 status &= ~BD_ENET_TX_STATS; 444 445 /* Set buffer length and buffer pointer */ 446 bufaddr = skb->data; 447 buflen = skb_headlen(skb); 448 449 index = fec_enet_get_bd_index(bdp, &txq->bd); 450 if (((unsigned long) bufaddr) & fep->tx_align || 451 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 452 memcpy(txq->tx_bounce[index], skb->data, buflen); 453 bufaddr = txq->tx_bounce[index]; 454 455 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 456 swap_buffer(bufaddr, buflen); 457 } 458 459 /* Push the data cache so the CPM does not get stale memory data. */ 460 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); 461 if (dma_mapping_error(&fep->pdev->dev, addr)) { 462 dev_kfree_skb_any(skb); 463 if (net_ratelimit()) 464 netdev_err(ndev, "Tx DMA memory map failed\n"); 465 return NETDEV_TX_OK; 466 } 467 468 if (nr_frags) { 469 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); 470 if (IS_ERR(last_bdp)) { 471 dma_unmap_single(&fep->pdev->dev, addr, 472 buflen, DMA_TO_DEVICE); 473 dev_kfree_skb_any(skb); 474 return NETDEV_TX_OK; 475 } 476 } else { 477 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 478 if (fep->bufdesc_ex) { 479 estatus = BD_ENET_TX_INT; 480 if (unlikely(skb_shinfo(skb)->tx_flags & 481 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 482 estatus |= BD_ENET_TX_TS; 483 } 484 } 485 bdp->cbd_bufaddr = cpu_to_fec32(addr); 486 bdp->cbd_datlen = cpu_to_fec16(buflen); 487 488 if (fep->bufdesc_ex) { 489 490 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 491 492 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 493 fep->hwts_tx_en)) 494 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 495 496 if (fep->quirks & FEC_QUIRK_HAS_AVB) 497 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 498 499 if (skb->ip_summed == CHECKSUM_PARTIAL) 500 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 501 502 ebdp->cbd_bdu = 0; 503 ebdp->cbd_esc = cpu_to_fec32(estatus); 504 } 505 506 index = fec_enet_get_bd_index(last_bdp, &txq->bd); 507 /* Save skb pointer */ 508 txq->tx_skbuff[index] = skb; 509 510 /* Make sure the updates to rest of the descriptor are performed before 511 * transferring ownership. 512 */ 513 wmb(); 514 515 /* Send it on its way. Tell FEC it's ready, interrupt when done, 516 * it's the last BD of the frame, and to put the CRC on the end. 517 */ 518 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 519 bdp->cbd_sc = cpu_to_fec16(status); 520 521 /* If this was the last BD in the ring, start at the beginning again. */ 522 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); 523 524 skb_tx_timestamp(skb); 525 526 /* Make sure the update to bdp and tx_skbuff are performed before 527 * txq->bd.cur. 528 */ 529 wmb(); 530 txq->bd.cur = bdp; 531 532 /* Trigger transmission start */ 533 writel(0, txq->bd.reg_desc_active); 534 535 return 0; 536 } 537 538 static int 539 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, 540 struct net_device *ndev, 541 struct bufdesc *bdp, int index, char *data, 542 int size, bool last_tcp, bool is_last) 543 { 544 struct fec_enet_private *fep = netdev_priv(ndev); 545 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 546 unsigned short status; 547 unsigned int estatus = 0; 548 dma_addr_t addr; 549 550 status = fec16_to_cpu(bdp->cbd_sc); 551 status &= ~BD_ENET_TX_STATS; 552 553 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 554 555 if (((unsigned long) data) & fep->tx_align || 556 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 557 memcpy(txq->tx_bounce[index], data, size); 558 data = txq->tx_bounce[index]; 559 560 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 561 swap_buffer(data, size); 562 } 563 564 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); 565 if (dma_mapping_error(&fep->pdev->dev, addr)) { 566 dev_kfree_skb_any(skb); 567 if (net_ratelimit()) 568 netdev_err(ndev, "Tx DMA memory map failed\n"); 569 return NETDEV_TX_BUSY; 570 } 571 572 bdp->cbd_datlen = cpu_to_fec16(size); 573 bdp->cbd_bufaddr = cpu_to_fec32(addr); 574 575 if (fep->bufdesc_ex) { 576 if (fep->quirks & FEC_QUIRK_HAS_AVB) 577 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 578 if (skb->ip_summed == CHECKSUM_PARTIAL) 579 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 580 ebdp->cbd_bdu = 0; 581 ebdp->cbd_esc = cpu_to_fec32(estatus); 582 } 583 584 /* Handle the last BD specially */ 585 if (last_tcp) 586 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); 587 if (is_last) { 588 status |= BD_ENET_TX_INTR; 589 if (fep->bufdesc_ex) 590 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); 591 } 592 593 bdp->cbd_sc = cpu_to_fec16(status); 594 595 return 0; 596 } 597 598 static int 599 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, 600 struct sk_buff *skb, struct net_device *ndev, 601 struct bufdesc *bdp, int index) 602 { 603 struct fec_enet_private *fep = netdev_priv(ndev); 604 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 605 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 606 void *bufaddr; 607 unsigned long dmabuf; 608 unsigned short status; 609 unsigned int estatus = 0; 610 611 status = fec16_to_cpu(bdp->cbd_sc); 612 status &= ~BD_ENET_TX_STATS; 613 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 614 615 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 616 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; 617 if (((unsigned long)bufaddr) & fep->tx_align || 618 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 619 memcpy(txq->tx_bounce[index], skb->data, hdr_len); 620 bufaddr = txq->tx_bounce[index]; 621 622 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 623 swap_buffer(bufaddr, hdr_len); 624 625 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, 626 hdr_len, DMA_TO_DEVICE); 627 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { 628 dev_kfree_skb_any(skb); 629 if (net_ratelimit()) 630 netdev_err(ndev, "Tx DMA memory map failed\n"); 631 return NETDEV_TX_BUSY; 632 } 633 } 634 635 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); 636 bdp->cbd_datlen = cpu_to_fec16(hdr_len); 637 638 if (fep->bufdesc_ex) { 639 if (fep->quirks & FEC_QUIRK_HAS_AVB) 640 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 641 if (skb->ip_summed == CHECKSUM_PARTIAL) 642 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 643 ebdp->cbd_bdu = 0; 644 ebdp->cbd_esc = cpu_to_fec32(estatus); 645 } 646 647 bdp->cbd_sc = cpu_to_fec16(status); 648 649 return 0; 650 } 651 652 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, 653 struct sk_buff *skb, 654 struct net_device *ndev) 655 { 656 struct fec_enet_private *fep = netdev_priv(ndev); 657 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 658 int total_len, data_left; 659 struct bufdesc *bdp = txq->bd.cur; 660 struct tso_t tso; 661 unsigned int index = 0; 662 int ret; 663 664 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { 665 dev_kfree_skb_any(skb); 666 if (net_ratelimit()) 667 netdev_err(ndev, "NOT enough BD for TSO!\n"); 668 return NETDEV_TX_OK; 669 } 670 671 /* Protocol checksum off-load for TCP and UDP. */ 672 if (fec_enet_clear_csum(skb, ndev)) { 673 dev_kfree_skb_any(skb); 674 return NETDEV_TX_OK; 675 } 676 677 /* Initialize the TSO handler, and prepare the first payload */ 678 tso_start(skb, &tso); 679 680 total_len = skb->len - hdr_len; 681 while (total_len > 0) { 682 char *hdr; 683 684 index = fec_enet_get_bd_index(bdp, &txq->bd); 685 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 686 total_len -= data_left; 687 688 /* prepare packet headers: MAC + IP + TCP */ 689 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 690 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 691 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); 692 if (ret) 693 goto err_release; 694 695 while (data_left > 0) { 696 int size; 697 698 size = min_t(int, tso.size, data_left); 699 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 700 index = fec_enet_get_bd_index(bdp, &txq->bd); 701 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, 702 bdp, index, 703 tso.data, size, 704 size == data_left, 705 total_len == 0); 706 if (ret) 707 goto err_release; 708 709 data_left -= size; 710 tso_build_data(skb, &tso, size); 711 } 712 713 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 714 } 715 716 /* Save skb pointer */ 717 txq->tx_skbuff[index] = skb; 718 719 skb_tx_timestamp(skb); 720 txq->bd.cur = bdp; 721 722 /* Trigger transmission start */ 723 if (!(fep->quirks & FEC_QUIRK_ERR007885) || 724 !readl(txq->bd.reg_desc_active) || 725 !readl(txq->bd.reg_desc_active) || 726 !readl(txq->bd.reg_desc_active) || 727 !readl(txq->bd.reg_desc_active)) 728 writel(0, txq->bd.reg_desc_active); 729 730 return 0; 731 732 err_release: 733 /* TODO: Release all used data descriptors for TSO */ 734 return ret; 735 } 736 737 static netdev_tx_t 738 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 739 { 740 struct fec_enet_private *fep = netdev_priv(ndev); 741 int entries_free; 742 unsigned short queue; 743 struct fec_enet_priv_tx_q *txq; 744 struct netdev_queue *nq; 745 int ret; 746 747 queue = skb_get_queue_mapping(skb); 748 txq = fep->tx_queue[queue]; 749 nq = netdev_get_tx_queue(ndev, queue); 750 751 if (skb_is_gso(skb)) 752 ret = fec_enet_txq_submit_tso(txq, skb, ndev); 753 else 754 ret = fec_enet_txq_submit_skb(txq, skb, ndev); 755 if (ret) 756 return ret; 757 758 entries_free = fec_enet_get_free_txdesc_num(txq); 759 if (entries_free <= txq->tx_stop_threshold) 760 netif_tx_stop_queue(nq); 761 762 return NETDEV_TX_OK; 763 } 764 765 /* Init RX & TX buffer descriptors 766 */ 767 static void fec_enet_bd_init(struct net_device *dev) 768 { 769 struct fec_enet_private *fep = netdev_priv(dev); 770 struct fec_enet_priv_tx_q *txq; 771 struct fec_enet_priv_rx_q *rxq; 772 struct bufdesc *bdp; 773 unsigned int i; 774 unsigned int q; 775 776 for (q = 0; q < fep->num_rx_queues; q++) { 777 /* Initialize the receive buffer descriptors. */ 778 rxq = fep->rx_queue[q]; 779 bdp = rxq->bd.base; 780 781 for (i = 0; i < rxq->bd.ring_size; i++) { 782 783 /* Initialize the BD for every fragment in the page. */ 784 if (bdp->cbd_bufaddr) 785 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 786 else 787 bdp->cbd_sc = cpu_to_fec16(0); 788 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 789 } 790 791 /* Set the last buffer to wrap */ 792 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 793 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 794 795 rxq->bd.cur = rxq->bd.base; 796 } 797 798 for (q = 0; q < fep->num_tx_queues; q++) { 799 /* ...and the same for transmit */ 800 txq = fep->tx_queue[q]; 801 bdp = txq->bd.base; 802 txq->bd.cur = bdp; 803 804 for (i = 0; i < txq->bd.ring_size; i++) { 805 /* Initialize the BD for every fragment in the page. */ 806 bdp->cbd_sc = cpu_to_fec16(0); 807 if (txq->tx_skbuff[i]) { 808 dev_kfree_skb_any(txq->tx_skbuff[i]); 809 txq->tx_skbuff[i] = NULL; 810 } 811 bdp->cbd_bufaddr = cpu_to_fec32(0); 812 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 813 } 814 815 /* Set the last buffer to wrap */ 816 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 817 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 818 txq->dirty_tx = bdp; 819 } 820 } 821 822 static void fec_enet_active_rxring(struct net_device *ndev) 823 { 824 struct fec_enet_private *fep = netdev_priv(ndev); 825 int i; 826 827 for (i = 0; i < fep->num_rx_queues; i++) 828 writel(0, fep->rx_queue[i]->bd.reg_desc_active); 829 } 830 831 static void fec_enet_enable_ring(struct net_device *ndev) 832 { 833 struct fec_enet_private *fep = netdev_priv(ndev); 834 struct fec_enet_priv_tx_q *txq; 835 struct fec_enet_priv_rx_q *rxq; 836 int i; 837 838 for (i = 0; i < fep->num_rx_queues; i++) { 839 rxq = fep->rx_queue[i]; 840 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); 841 writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); 842 843 /* enable DMA1/2 */ 844 if (i) 845 writel(RCMR_MATCHEN | RCMR_CMP(i), 846 fep->hwp + FEC_RCMR(i)); 847 } 848 849 for (i = 0; i < fep->num_tx_queues; i++) { 850 txq = fep->tx_queue[i]; 851 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); 852 853 /* enable DMA1/2 */ 854 if (i) 855 writel(DMA_CLASS_EN | IDLE_SLOPE(i), 856 fep->hwp + FEC_DMA_CFG(i)); 857 } 858 } 859 860 static void fec_enet_reset_skb(struct net_device *ndev) 861 { 862 struct fec_enet_private *fep = netdev_priv(ndev); 863 struct fec_enet_priv_tx_q *txq; 864 int i, j; 865 866 for (i = 0; i < fep->num_tx_queues; i++) { 867 txq = fep->tx_queue[i]; 868 869 for (j = 0; j < txq->bd.ring_size; j++) { 870 if (txq->tx_skbuff[j]) { 871 dev_kfree_skb_any(txq->tx_skbuff[j]); 872 txq->tx_skbuff[j] = NULL; 873 } 874 } 875 } 876 } 877 878 /* 879 * This function is called to start or restart the FEC during a link 880 * change, transmit timeout, or to reconfigure the FEC. The network 881 * packet processing for this device must be stopped before this call. 882 */ 883 static void 884 fec_restart(struct net_device *ndev) 885 { 886 struct fec_enet_private *fep = netdev_priv(ndev); 887 u32 val; 888 u32 temp_mac[2]; 889 u32 rcntl = OPT_FRAME_SIZE | 0x04; 890 u32 ecntl = 0x2; /* ETHEREN */ 891 892 /* Whack a reset. We should wait for this. 893 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 894 * instead of reset MAC itself. 895 */ 896 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 897 writel(0, fep->hwp + FEC_ECNTRL); 898 } else { 899 writel(1, fep->hwp + FEC_ECNTRL); 900 udelay(10); 901 } 902 903 /* 904 * enet-mac reset will reset mac address registers too, 905 * so need to reconfigure it. 906 */ 907 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 908 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); 909 writel((__force u32)cpu_to_be32(temp_mac[0]), 910 fep->hwp + FEC_ADDR_LOW); 911 writel((__force u32)cpu_to_be32(temp_mac[1]), 912 fep->hwp + FEC_ADDR_HIGH); 913 } 914 915 /* Clear any outstanding interrupt. */ 916 writel(0xffffffff, fep->hwp + FEC_IEVENT); 917 918 fec_enet_bd_init(ndev); 919 920 fec_enet_enable_ring(ndev); 921 922 /* Reset tx SKB buffers. */ 923 fec_enet_reset_skb(ndev); 924 925 /* Enable MII mode */ 926 if (fep->full_duplex == DUPLEX_FULL) { 927 /* FD enable */ 928 writel(0x04, fep->hwp + FEC_X_CNTRL); 929 } else { 930 /* No Rcv on Xmit */ 931 rcntl |= 0x02; 932 writel(0x0, fep->hwp + FEC_X_CNTRL); 933 } 934 935 /* Set MII speed */ 936 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 937 938 #if !defined(CONFIG_M5272) 939 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 940 /* set RX checksum */ 941 val = readl(fep->hwp + FEC_RACC); 942 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 943 val |= FEC_RACC_OPTIONS; 944 else 945 val &= ~FEC_RACC_OPTIONS; 946 writel(val, fep->hwp + FEC_RACC); 947 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); 948 } 949 #endif 950 951 /* 952 * The phy interface and speed need to get configured 953 * differently on enet-mac. 954 */ 955 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 956 /* Enable flow control and length check */ 957 rcntl |= 0x40000000 | 0x00000020; 958 959 /* RGMII, RMII or MII */ 960 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || 961 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 962 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || 963 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) 964 rcntl |= (1 << 6); 965 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 966 rcntl |= (1 << 8); 967 else 968 rcntl &= ~(1 << 8); 969 970 /* 1G, 100M or 10M */ 971 if (ndev->phydev) { 972 if (ndev->phydev->speed == SPEED_1000) 973 ecntl |= (1 << 5); 974 else if (ndev->phydev->speed == SPEED_100) 975 rcntl &= ~(1 << 9); 976 else 977 rcntl |= (1 << 9); 978 } 979 } else { 980 #ifdef FEC_MIIGSK_ENR 981 if (fep->quirks & FEC_QUIRK_USE_GASKET) { 982 u32 cfgr; 983 /* disable the gasket and wait */ 984 writel(0, fep->hwp + FEC_MIIGSK_ENR); 985 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 986 udelay(1); 987 988 /* 989 * configure the gasket: 990 * RMII, 50 MHz, no loopback, no echo 991 * MII, 25 MHz, no loopback, no echo 992 */ 993 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 994 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; 995 if (ndev->phydev && ndev->phydev->speed == SPEED_10) 996 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; 997 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); 998 999 /* re-enable the gasket */ 1000 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1001 } 1002 #endif 1003 } 1004 1005 #if !defined(CONFIG_M5272) 1006 /* enable pause frame*/ 1007 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 1008 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 1009 ndev->phydev && ndev->phydev->pause)) { 1010 rcntl |= FEC_ENET_FCE; 1011 1012 /* set FIFO threshold parameter to reduce overrun */ 1013 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 1014 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 1015 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 1016 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); 1017 1018 /* OPD */ 1019 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); 1020 } else { 1021 rcntl &= ~FEC_ENET_FCE; 1022 } 1023 #endif /* !defined(CONFIG_M5272) */ 1024 1025 writel(rcntl, fep->hwp + FEC_R_CNTRL); 1026 1027 /* Setup multicast filter. */ 1028 set_multicast_list(ndev); 1029 #ifndef CONFIG_M5272 1030 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1031 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1032 #endif 1033 1034 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1035 /* enable ENET endian swap */ 1036 ecntl |= (1 << 8); 1037 /* enable ENET store and forward mode */ 1038 writel(1 << 8, fep->hwp + FEC_X_WMRK); 1039 } 1040 1041 if (fep->bufdesc_ex) 1042 ecntl |= (1 << 4); 1043 1044 #ifndef CONFIG_M5272 1045 /* Enable the MIB statistic event counters */ 1046 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); 1047 #endif 1048 1049 /* And last, enable the transmit and receive processing */ 1050 writel(ecntl, fep->hwp + FEC_ECNTRL); 1051 fec_enet_active_rxring(ndev); 1052 1053 if (fep->bufdesc_ex) 1054 fec_ptp_start_cyclecounter(ndev); 1055 1056 /* Enable interrupts we wish to service */ 1057 if (fep->link) 1058 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1059 else 1060 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); 1061 1062 /* Init the interrupt coalescing */ 1063 fec_enet_itr_coal_init(ndev); 1064 1065 } 1066 1067 static void 1068 fec_stop(struct net_device *ndev) 1069 { 1070 struct fec_enet_private *fep = netdev_priv(ndev); 1071 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1072 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); 1073 u32 val; 1074 1075 /* We cannot expect a graceful transmit stop without link !!! */ 1076 if (fep->link) { 1077 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1078 udelay(10); 1079 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1080 netdev_err(ndev, "Graceful transmit stop did not complete!\n"); 1081 } 1082 1083 /* Whack a reset. We should wait for this. 1084 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1085 * instead of reset MAC itself. 1086 */ 1087 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1088 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 1089 writel(0, fep->hwp + FEC_ECNTRL); 1090 } else { 1091 writel(1, fep->hwp + FEC_ECNTRL); 1092 udelay(10); 1093 } 1094 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1095 } else { 1096 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); 1097 val = readl(fep->hwp + FEC_ECNTRL); 1098 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 1099 writel(val, fep->hwp + FEC_ECNTRL); 1100 1101 if (pdata && pdata->sleep_mode_enable) 1102 pdata->sleep_mode_enable(true); 1103 } 1104 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1105 1106 /* We have to keep ENET enabled to have MII interrupt stay working */ 1107 if (fep->quirks & FEC_QUIRK_ENET_MAC && 1108 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1109 writel(2, fep->hwp + FEC_ECNTRL); 1110 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1111 } 1112 } 1113 1114 1115 static void 1116 fec_timeout(struct net_device *ndev) 1117 { 1118 struct fec_enet_private *fep = netdev_priv(ndev); 1119 1120 fec_dump(ndev); 1121 1122 ndev->stats.tx_errors++; 1123 1124 schedule_work(&fep->tx_timeout_work); 1125 } 1126 1127 static void fec_enet_timeout_work(struct work_struct *work) 1128 { 1129 struct fec_enet_private *fep = 1130 container_of(work, struct fec_enet_private, tx_timeout_work); 1131 struct net_device *ndev = fep->netdev; 1132 1133 rtnl_lock(); 1134 if (netif_device_present(ndev) || netif_running(ndev)) { 1135 napi_disable(&fep->napi); 1136 netif_tx_lock_bh(ndev); 1137 fec_restart(ndev); 1138 netif_wake_queue(ndev); 1139 netif_tx_unlock_bh(ndev); 1140 napi_enable(&fep->napi); 1141 } 1142 rtnl_unlock(); 1143 } 1144 1145 static void 1146 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, 1147 struct skb_shared_hwtstamps *hwtstamps) 1148 { 1149 unsigned long flags; 1150 u64 ns; 1151 1152 spin_lock_irqsave(&fep->tmreg_lock, flags); 1153 ns = timecounter_cyc2time(&fep->tc, ts); 1154 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 1155 1156 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1157 hwtstamps->hwtstamp = ns_to_ktime(ns); 1158 } 1159 1160 static void 1161 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1162 { 1163 struct fec_enet_private *fep; 1164 struct bufdesc *bdp; 1165 unsigned short status; 1166 struct sk_buff *skb; 1167 struct fec_enet_priv_tx_q *txq; 1168 struct netdev_queue *nq; 1169 int index = 0; 1170 int entries_free; 1171 1172 fep = netdev_priv(ndev); 1173 1174 queue_id = FEC_ENET_GET_QUQUE(queue_id); 1175 1176 txq = fep->tx_queue[queue_id]; 1177 /* get next bdp of dirty_tx */ 1178 nq = netdev_get_tx_queue(ndev, queue_id); 1179 bdp = txq->dirty_tx; 1180 1181 /* get next bdp of dirty_tx */ 1182 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1183 1184 while (bdp != READ_ONCE(txq->bd.cur)) { 1185 /* Order the load of bd.cur and cbd_sc */ 1186 rmb(); 1187 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); 1188 if (status & BD_ENET_TX_READY) 1189 break; 1190 1191 index = fec_enet_get_bd_index(bdp, &txq->bd); 1192 1193 skb = txq->tx_skbuff[index]; 1194 txq->tx_skbuff[index] = NULL; 1195 if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 1196 dma_unmap_single(&fep->pdev->dev, 1197 fec32_to_cpu(bdp->cbd_bufaddr), 1198 fec16_to_cpu(bdp->cbd_datlen), 1199 DMA_TO_DEVICE); 1200 bdp->cbd_bufaddr = cpu_to_fec32(0); 1201 if (!skb) 1202 goto skb_done; 1203 1204 /* Check for errors. */ 1205 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1206 BD_ENET_TX_RL | BD_ENET_TX_UN | 1207 BD_ENET_TX_CSL)) { 1208 ndev->stats.tx_errors++; 1209 if (status & BD_ENET_TX_HB) /* No heartbeat */ 1210 ndev->stats.tx_heartbeat_errors++; 1211 if (status & BD_ENET_TX_LC) /* Late collision */ 1212 ndev->stats.tx_window_errors++; 1213 if (status & BD_ENET_TX_RL) /* Retrans limit */ 1214 ndev->stats.tx_aborted_errors++; 1215 if (status & BD_ENET_TX_UN) /* Underrun */ 1216 ndev->stats.tx_fifo_errors++; 1217 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 1218 ndev->stats.tx_carrier_errors++; 1219 } else { 1220 ndev->stats.tx_packets++; 1221 ndev->stats.tx_bytes += skb->len; 1222 } 1223 1224 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && 1225 fep->bufdesc_ex) { 1226 struct skb_shared_hwtstamps shhwtstamps; 1227 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1228 1229 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); 1230 skb_tstamp_tx(skb, &shhwtstamps); 1231 } 1232 1233 /* Deferred means some collisions occurred during transmit, 1234 * but we eventually sent the packet OK. 1235 */ 1236 if (status & BD_ENET_TX_DEF) 1237 ndev->stats.collisions++; 1238 1239 /* Free the sk buffer associated with this last transmit */ 1240 dev_kfree_skb_any(skb); 1241 skb_done: 1242 /* Make sure the update to bdp and tx_skbuff are performed 1243 * before dirty_tx 1244 */ 1245 wmb(); 1246 txq->dirty_tx = bdp; 1247 1248 /* Update pointer to next buffer descriptor to be transmitted */ 1249 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1250 1251 /* Since we have freed up a buffer, the ring is no longer full 1252 */ 1253 if (netif_queue_stopped(ndev)) { 1254 entries_free = fec_enet_get_free_txdesc_num(txq); 1255 if (entries_free >= txq->tx_wake_threshold) 1256 netif_tx_wake_queue(nq); 1257 } 1258 } 1259 1260 /* ERR006538: Keep the transmitter going */ 1261 if (bdp != txq->bd.cur && 1262 readl(txq->bd.reg_desc_active) == 0) 1263 writel(0, txq->bd.reg_desc_active); 1264 } 1265 1266 static void 1267 fec_enet_tx(struct net_device *ndev) 1268 { 1269 struct fec_enet_private *fep = netdev_priv(ndev); 1270 u16 queue_id; 1271 /* First process class A queue, then Class B and Best Effort queue */ 1272 for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { 1273 clear_bit(queue_id, &fep->work_tx); 1274 fec_enet_tx_queue(ndev, queue_id); 1275 } 1276 return; 1277 } 1278 1279 static int 1280 fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) 1281 { 1282 struct fec_enet_private *fep = netdev_priv(ndev); 1283 int off; 1284 1285 off = ((unsigned long)skb->data) & fep->rx_align; 1286 if (off) 1287 skb_reserve(skb, fep->rx_align + 1 - off); 1288 1289 bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE)); 1290 if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { 1291 if (net_ratelimit()) 1292 netdev_err(ndev, "Rx DMA memory map failed\n"); 1293 return -ENOMEM; 1294 } 1295 1296 return 0; 1297 } 1298 1299 static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, 1300 struct bufdesc *bdp, u32 length, bool swap) 1301 { 1302 struct fec_enet_private *fep = netdev_priv(ndev); 1303 struct sk_buff *new_skb; 1304 1305 if (length > fep->rx_copybreak) 1306 return false; 1307 1308 new_skb = netdev_alloc_skb(ndev, length); 1309 if (!new_skb) 1310 return false; 1311 1312 dma_sync_single_for_cpu(&fep->pdev->dev, 1313 fec32_to_cpu(bdp->cbd_bufaddr), 1314 FEC_ENET_RX_FRSIZE - fep->rx_align, 1315 DMA_FROM_DEVICE); 1316 if (!swap) 1317 memcpy(new_skb->data, (*skb)->data, length); 1318 else 1319 swap_buffer2(new_skb->data, (*skb)->data, length); 1320 *skb = new_skb; 1321 1322 return true; 1323 } 1324 1325 /* During a receive, the bd_rx.cur points to the current incoming buffer. 1326 * When we update through the ring, if the next incoming buffer has 1327 * not been given to the system, we just set the empty indicator, 1328 * effectively tossing the packet. 1329 */ 1330 static int 1331 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) 1332 { 1333 struct fec_enet_private *fep = netdev_priv(ndev); 1334 struct fec_enet_priv_rx_q *rxq; 1335 struct bufdesc *bdp; 1336 unsigned short status; 1337 struct sk_buff *skb_new = NULL; 1338 struct sk_buff *skb; 1339 ushort pkt_len; 1340 __u8 *data; 1341 int pkt_received = 0; 1342 struct bufdesc_ex *ebdp = NULL; 1343 bool vlan_packet_rcvd = false; 1344 u16 vlan_tag; 1345 int index = 0; 1346 bool is_copybreak; 1347 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; 1348 1349 #ifdef CONFIG_M532x 1350 flush_cache_all(); 1351 #endif 1352 queue_id = FEC_ENET_GET_QUQUE(queue_id); 1353 rxq = fep->rx_queue[queue_id]; 1354 1355 /* First, grab all of the stats for the incoming packet. 1356 * These get messed up if we get called due to a busy condition. 1357 */ 1358 bdp = rxq->bd.cur; 1359 1360 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { 1361 1362 if (pkt_received >= budget) 1363 break; 1364 pkt_received++; 1365 1366 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); 1367 1368 /* Check for errors. */ 1369 status ^= BD_ENET_RX_LAST; 1370 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 1371 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | 1372 BD_ENET_RX_CL)) { 1373 ndev->stats.rx_errors++; 1374 if (status & BD_ENET_RX_OV) { 1375 /* FIFO overrun */ 1376 ndev->stats.rx_fifo_errors++; 1377 goto rx_processing_done; 1378 } 1379 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH 1380 | BD_ENET_RX_LAST)) { 1381 /* Frame too long or too short. */ 1382 ndev->stats.rx_length_errors++; 1383 if (status & BD_ENET_RX_LAST) 1384 netdev_err(ndev, "rcv is not +last\n"); 1385 } 1386 if (status & BD_ENET_RX_CR) /* CRC Error */ 1387 ndev->stats.rx_crc_errors++; 1388 /* Report late collisions as a frame error. */ 1389 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 1390 ndev->stats.rx_frame_errors++; 1391 goto rx_processing_done; 1392 } 1393 1394 /* Process the incoming frame. */ 1395 ndev->stats.rx_packets++; 1396 pkt_len = fec16_to_cpu(bdp->cbd_datlen); 1397 ndev->stats.rx_bytes += pkt_len; 1398 1399 index = fec_enet_get_bd_index(bdp, &rxq->bd); 1400 skb = rxq->rx_skbuff[index]; 1401 1402 /* The packet length includes FCS, but we don't want to 1403 * include that when passing upstream as it messes up 1404 * bridging applications. 1405 */ 1406 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, 1407 need_swap); 1408 if (!is_copybreak) { 1409 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1410 if (unlikely(!skb_new)) { 1411 ndev->stats.rx_dropped++; 1412 goto rx_processing_done; 1413 } 1414 dma_unmap_single(&fep->pdev->dev, 1415 fec32_to_cpu(bdp->cbd_bufaddr), 1416 FEC_ENET_RX_FRSIZE - fep->rx_align, 1417 DMA_FROM_DEVICE); 1418 } 1419 1420 prefetch(skb->data - NET_IP_ALIGN); 1421 skb_put(skb, pkt_len - 4); 1422 data = skb->data; 1423 if (!is_copybreak && need_swap) 1424 swap_buffer(data, pkt_len); 1425 1426 /* Extract the enhanced buffer descriptor */ 1427 ebdp = NULL; 1428 if (fep->bufdesc_ex) 1429 ebdp = (struct bufdesc_ex *)bdp; 1430 1431 /* If this is a VLAN packet remove the VLAN Tag */ 1432 vlan_packet_rcvd = false; 1433 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1434 fep->bufdesc_ex && 1435 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { 1436 /* Push and remove the vlan tag */ 1437 struct vlan_hdr *vlan_header = 1438 (struct vlan_hdr *) (data + ETH_HLEN); 1439 vlan_tag = ntohs(vlan_header->h_vlan_TCI); 1440 1441 vlan_packet_rcvd = true; 1442 1443 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); 1444 skb_pull(skb, VLAN_HLEN); 1445 } 1446 1447 skb->protocol = eth_type_trans(skb, ndev); 1448 1449 /* Get receive timestamp from the skb */ 1450 if (fep->hwts_rx_en && fep->bufdesc_ex) 1451 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), 1452 skb_hwtstamps(skb)); 1453 1454 if (fep->bufdesc_ex && 1455 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 1456 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { 1457 /* don't check it */ 1458 skb->ip_summed = CHECKSUM_UNNECESSARY; 1459 } else { 1460 skb_checksum_none_assert(skb); 1461 } 1462 } 1463 1464 /* Handle received VLAN packets */ 1465 if (vlan_packet_rcvd) 1466 __vlan_hwaccel_put_tag(skb, 1467 htons(ETH_P_8021Q), 1468 vlan_tag); 1469 1470 napi_gro_receive(&fep->napi, skb); 1471 1472 if (is_copybreak) { 1473 dma_sync_single_for_device(&fep->pdev->dev, 1474 fec32_to_cpu(bdp->cbd_bufaddr), 1475 FEC_ENET_RX_FRSIZE - fep->rx_align, 1476 DMA_FROM_DEVICE); 1477 } else { 1478 rxq->rx_skbuff[index] = skb_new; 1479 fec_enet_new_rxbdp(ndev, bdp, skb_new); 1480 } 1481 1482 rx_processing_done: 1483 /* Clear the status flags for this buffer */ 1484 status &= ~BD_ENET_RX_STATS; 1485 1486 /* Mark the buffer empty */ 1487 status |= BD_ENET_RX_EMPTY; 1488 1489 if (fep->bufdesc_ex) { 1490 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1491 1492 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 1493 ebdp->cbd_prot = 0; 1494 ebdp->cbd_bdu = 0; 1495 } 1496 /* Make sure the updates to rest of the descriptor are 1497 * performed before transferring ownership. 1498 */ 1499 wmb(); 1500 bdp->cbd_sc = cpu_to_fec16(status); 1501 1502 /* Update BD pointer to next entry */ 1503 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 1504 1505 /* Doing this here will keep the FEC running while we process 1506 * incoming frames. On a heavily loaded network, we should be 1507 * able to keep up at the expense of system resources. 1508 */ 1509 writel(0, rxq->bd.reg_desc_active); 1510 } 1511 rxq->bd.cur = bdp; 1512 return pkt_received; 1513 } 1514 1515 static int 1516 fec_enet_rx(struct net_device *ndev, int budget) 1517 { 1518 int pkt_received = 0; 1519 u16 queue_id; 1520 struct fec_enet_private *fep = netdev_priv(ndev); 1521 1522 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { 1523 int ret; 1524 1525 ret = fec_enet_rx_queue(ndev, 1526 budget - pkt_received, queue_id); 1527 1528 if (ret < budget - pkt_received) 1529 clear_bit(queue_id, &fep->work_rx); 1530 1531 pkt_received += ret; 1532 } 1533 return pkt_received; 1534 } 1535 1536 static bool 1537 fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) 1538 { 1539 if (int_events == 0) 1540 return false; 1541 1542 if (int_events & FEC_ENET_RXF) 1543 fep->work_rx |= (1 << 2); 1544 if (int_events & FEC_ENET_RXF_1) 1545 fep->work_rx |= (1 << 0); 1546 if (int_events & FEC_ENET_RXF_2) 1547 fep->work_rx |= (1 << 1); 1548 1549 if (int_events & FEC_ENET_TXF) 1550 fep->work_tx |= (1 << 2); 1551 if (int_events & FEC_ENET_TXF_1) 1552 fep->work_tx |= (1 << 0); 1553 if (int_events & FEC_ENET_TXF_2) 1554 fep->work_tx |= (1 << 1); 1555 1556 return true; 1557 } 1558 1559 static irqreturn_t 1560 fec_enet_interrupt(int irq, void *dev_id) 1561 { 1562 struct net_device *ndev = dev_id; 1563 struct fec_enet_private *fep = netdev_priv(ndev); 1564 uint int_events; 1565 irqreturn_t ret = IRQ_NONE; 1566 1567 int_events = readl(fep->hwp + FEC_IEVENT); 1568 writel(int_events, fep->hwp + FEC_IEVENT); 1569 fec_enet_collect_events(fep, int_events); 1570 1571 if ((fep->work_tx || fep->work_rx) && fep->link) { 1572 ret = IRQ_HANDLED; 1573 1574 if (napi_schedule_prep(&fep->napi)) { 1575 /* Disable the NAPI interrupts */ 1576 writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK); 1577 __napi_schedule(&fep->napi); 1578 } 1579 } 1580 1581 if (int_events & FEC_ENET_MII) { 1582 ret = IRQ_HANDLED; 1583 complete(&fep->mdio_done); 1584 } 1585 1586 if (fep->ptp_clock) 1587 fec_ptp_check_pps_event(fep); 1588 1589 return ret; 1590 } 1591 1592 static int fec_enet_rx_napi(struct napi_struct *napi, int budget) 1593 { 1594 struct net_device *ndev = napi->dev; 1595 struct fec_enet_private *fep = netdev_priv(ndev); 1596 int pkts; 1597 1598 pkts = fec_enet_rx(ndev, budget); 1599 1600 fec_enet_tx(ndev); 1601 1602 if (pkts < budget) { 1603 napi_complete(napi); 1604 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1605 } 1606 return pkts; 1607 } 1608 1609 /* ------------------------------------------------------------------------- */ 1610 static void fec_get_mac(struct net_device *ndev) 1611 { 1612 struct fec_enet_private *fep = netdev_priv(ndev); 1613 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); 1614 unsigned char *iap, tmpaddr[ETH_ALEN]; 1615 1616 /* 1617 * try to get mac address in following order: 1618 * 1619 * 1) module parameter via kernel command line in form 1620 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 1621 */ 1622 iap = macaddr; 1623 1624 /* 1625 * 2) from device tree data 1626 */ 1627 if (!is_valid_ether_addr(iap)) { 1628 struct device_node *np = fep->pdev->dev.of_node; 1629 if (np) { 1630 const char *mac = of_get_mac_address(np); 1631 if (mac) 1632 iap = (unsigned char *) mac; 1633 } 1634 } 1635 1636 /* 1637 * 3) from flash or fuse (via platform data) 1638 */ 1639 if (!is_valid_ether_addr(iap)) { 1640 #ifdef CONFIG_M5272 1641 if (FEC_FLASHMAC) 1642 iap = (unsigned char *)FEC_FLASHMAC; 1643 #else 1644 if (pdata) 1645 iap = (unsigned char *)&pdata->mac; 1646 #endif 1647 } 1648 1649 /* 1650 * 4) FEC mac registers set by bootloader 1651 */ 1652 if (!is_valid_ether_addr(iap)) { 1653 *((__be32 *) &tmpaddr[0]) = 1654 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); 1655 *((__be16 *) &tmpaddr[4]) = 1656 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 1657 iap = &tmpaddr[0]; 1658 } 1659 1660 /* 1661 * 5) random mac address 1662 */ 1663 if (!is_valid_ether_addr(iap)) { 1664 /* Report it and use a random ethernet address instead */ 1665 netdev_err(ndev, "Invalid MAC address: %pM\n", iap); 1666 eth_hw_addr_random(ndev); 1667 netdev_info(ndev, "Using random MAC address: %pM\n", 1668 ndev->dev_addr); 1669 return; 1670 } 1671 1672 memcpy(ndev->dev_addr, iap, ETH_ALEN); 1673 1674 /* Adjust MAC if using macaddr */ 1675 if (iap == macaddr) 1676 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; 1677 } 1678 1679 /* ------------------------------------------------------------------------- */ 1680 1681 /* 1682 * Phy section 1683 */ 1684 static void fec_enet_adjust_link(struct net_device *ndev) 1685 { 1686 struct fec_enet_private *fep = netdev_priv(ndev); 1687 struct phy_device *phy_dev = ndev->phydev; 1688 int status_change = 0; 1689 1690 /* Prevent a state halted on mii error */ 1691 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { 1692 phy_dev->state = PHY_RESUMING; 1693 return; 1694 } 1695 1696 /* 1697 * If the netdev is down, or is going down, we're not interested 1698 * in link state events, so just mark our idea of the link as down 1699 * and ignore the event. 1700 */ 1701 if (!netif_running(ndev) || !netif_device_present(ndev)) { 1702 fep->link = 0; 1703 } else if (phy_dev->link) { 1704 if (!fep->link) { 1705 fep->link = phy_dev->link; 1706 status_change = 1; 1707 } 1708 1709 if (fep->full_duplex != phy_dev->duplex) { 1710 fep->full_duplex = phy_dev->duplex; 1711 status_change = 1; 1712 } 1713 1714 if (phy_dev->speed != fep->speed) { 1715 fep->speed = phy_dev->speed; 1716 status_change = 1; 1717 } 1718 1719 /* if any of the above changed restart the FEC */ 1720 if (status_change) { 1721 napi_disable(&fep->napi); 1722 netif_tx_lock_bh(ndev); 1723 fec_restart(ndev); 1724 netif_wake_queue(ndev); 1725 netif_tx_unlock_bh(ndev); 1726 napi_enable(&fep->napi); 1727 } 1728 } else { 1729 if (fep->link) { 1730 napi_disable(&fep->napi); 1731 netif_tx_lock_bh(ndev); 1732 fec_stop(ndev); 1733 netif_tx_unlock_bh(ndev); 1734 napi_enable(&fep->napi); 1735 fep->link = phy_dev->link; 1736 status_change = 1; 1737 } 1738 } 1739 1740 if (status_change) 1741 phy_print_status(phy_dev); 1742 } 1743 1744 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 1745 { 1746 struct fec_enet_private *fep = bus->priv; 1747 struct device *dev = &fep->pdev->dev; 1748 unsigned long time_left; 1749 int ret = 0; 1750 1751 ret = pm_runtime_get_sync(dev); 1752 if (ret < 0) 1753 return ret; 1754 1755 fep->mii_timeout = 0; 1756 reinit_completion(&fep->mdio_done); 1757 1758 /* start a read op */ 1759 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 1760 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 1761 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 1762 1763 /* wait for end of transfer */ 1764 time_left = wait_for_completion_timeout(&fep->mdio_done, 1765 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1766 if (time_left == 0) { 1767 fep->mii_timeout = 1; 1768 netdev_err(fep->netdev, "MDIO read timeout\n"); 1769 ret = -ETIMEDOUT; 1770 goto out; 1771 } 1772 1773 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1774 1775 out: 1776 pm_runtime_mark_last_busy(dev); 1777 pm_runtime_put_autosuspend(dev); 1778 1779 return ret; 1780 } 1781 1782 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 1783 u16 value) 1784 { 1785 struct fec_enet_private *fep = bus->priv; 1786 struct device *dev = &fep->pdev->dev; 1787 unsigned long time_left; 1788 int ret; 1789 1790 ret = pm_runtime_get_sync(dev); 1791 if (ret < 0) 1792 return ret; 1793 else 1794 ret = 0; 1795 1796 fep->mii_timeout = 0; 1797 reinit_completion(&fep->mdio_done); 1798 1799 /* start a write op */ 1800 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | 1801 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 1802 FEC_MMFR_TA | FEC_MMFR_DATA(value), 1803 fep->hwp + FEC_MII_DATA); 1804 1805 /* wait for end of transfer */ 1806 time_left = wait_for_completion_timeout(&fep->mdio_done, 1807 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1808 if (time_left == 0) { 1809 fep->mii_timeout = 1; 1810 netdev_err(fep->netdev, "MDIO write timeout\n"); 1811 ret = -ETIMEDOUT; 1812 } 1813 1814 pm_runtime_mark_last_busy(dev); 1815 pm_runtime_put_autosuspend(dev); 1816 1817 return ret; 1818 } 1819 1820 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 1821 { 1822 struct fec_enet_private *fep = netdev_priv(ndev); 1823 int ret; 1824 1825 if (enable) { 1826 ret = clk_prepare_enable(fep->clk_ahb); 1827 if (ret) 1828 return ret; 1829 if (fep->clk_enet_out) { 1830 ret = clk_prepare_enable(fep->clk_enet_out); 1831 if (ret) 1832 goto failed_clk_enet_out; 1833 } 1834 if (fep->clk_ptp) { 1835 mutex_lock(&fep->ptp_clk_mutex); 1836 ret = clk_prepare_enable(fep->clk_ptp); 1837 if (ret) { 1838 mutex_unlock(&fep->ptp_clk_mutex); 1839 goto failed_clk_ptp; 1840 } else { 1841 fep->ptp_clk_on = true; 1842 } 1843 mutex_unlock(&fep->ptp_clk_mutex); 1844 } 1845 if (fep->clk_ref) { 1846 ret = clk_prepare_enable(fep->clk_ref); 1847 if (ret) 1848 goto failed_clk_ref; 1849 } 1850 } else { 1851 clk_disable_unprepare(fep->clk_ahb); 1852 if (fep->clk_enet_out) 1853 clk_disable_unprepare(fep->clk_enet_out); 1854 if (fep->clk_ptp) { 1855 mutex_lock(&fep->ptp_clk_mutex); 1856 clk_disable_unprepare(fep->clk_ptp); 1857 fep->ptp_clk_on = false; 1858 mutex_unlock(&fep->ptp_clk_mutex); 1859 } 1860 if (fep->clk_ref) 1861 clk_disable_unprepare(fep->clk_ref); 1862 } 1863 1864 return 0; 1865 1866 failed_clk_ref: 1867 if (fep->clk_ref) 1868 clk_disable_unprepare(fep->clk_ref); 1869 failed_clk_ptp: 1870 if (fep->clk_enet_out) 1871 clk_disable_unprepare(fep->clk_enet_out); 1872 failed_clk_enet_out: 1873 clk_disable_unprepare(fep->clk_ahb); 1874 1875 return ret; 1876 } 1877 1878 static int fec_enet_mii_probe(struct net_device *ndev) 1879 { 1880 struct fec_enet_private *fep = netdev_priv(ndev); 1881 struct phy_device *phy_dev = NULL; 1882 char mdio_bus_id[MII_BUS_ID_SIZE]; 1883 char phy_name[MII_BUS_ID_SIZE + 3]; 1884 int phy_id; 1885 int dev_id = fep->dev_id; 1886 1887 if (fep->phy_node) { 1888 phy_dev = of_phy_connect(ndev, fep->phy_node, 1889 &fec_enet_adjust_link, 0, 1890 fep->phy_interface); 1891 if (!phy_dev) 1892 return -ENODEV; 1893 } else { 1894 /* check for attached phy */ 1895 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { 1896 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) 1897 continue; 1898 if (dev_id--) 1899 continue; 1900 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 1901 break; 1902 } 1903 1904 if (phy_id >= PHY_MAX_ADDR) { 1905 netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); 1906 strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); 1907 phy_id = 0; 1908 } 1909 1910 snprintf(phy_name, sizeof(phy_name), 1911 PHY_ID_FMT, mdio_bus_id, phy_id); 1912 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 1913 fep->phy_interface); 1914 } 1915 1916 if (IS_ERR(phy_dev)) { 1917 netdev_err(ndev, "could not attach to PHY\n"); 1918 return PTR_ERR(phy_dev); 1919 } 1920 1921 /* mask with MAC supported features */ 1922 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { 1923 phy_dev->supported &= PHY_GBIT_FEATURES; 1924 phy_dev->supported &= ~SUPPORTED_1000baseT_Half; 1925 #if !defined(CONFIG_M5272) 1926 phy_dev->supported |= SUPPORTED_Pause; 1927 #endif 1928 } 1929 else 1930 phy_dev->supported &= PHY_BASIC_FEATURES; 1931 1932 phy_dev->advertising = phy_dev->supported; 1933 1934 fep->link = 0; 1935 fep->full_duplex = 0; 1936 1937 phy_attached_info(phy_dev); 1938 1939 return 0; 1940 } 1941 1942 static int fec_enet_mii_init(struct platform_device *pdev) 1943 { 1944 static struct mii_bus *fec0_mii_bus; 1945 struct net_device *ndev = platform_get_drvdata(pdev); 1946 struct fec_enet_private *fep = netdev_priv(ndev); 1947 struct device_node *node; 1948 int err = -ENXIO; 1949 u32 mii_speed, holdtime; 1950 1951 /* 1952 * The i.MX28 dual fec interfaces are not equal. 1953 * Here are the differences: 1954 * 1955 * - fec0 supports MII & RMII modes while fec1 only supports RMII 1956 * - fec0 acts as the 1588 time master while fec1 is slave 1957 * - external phys can only be configured by fec0 1958 * 1959 * That is to say fec1 can not work independently. It only works 1960 * when fec0 is working. The reason behind this design is that the 1961 * second interface is added primarily for Switch mode. 1962 * 1963 * Because of the last point above, both phys are attached on fec0 1964 * mdio interface in board design, and need to be configured by 1965 * fec0 mii_bus. 1966 */ 1967 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 1968 /* fec1 uses fec0 mii_bus */ 1969 if (mii_cnt && fec0_mii_bus) { 1970 fep->mii_bus = fec0_mii_bus; 1971 mii_cnt++; 1972 return 0; 1973 } 1974 return -ENOENT; 1975 } 1976 1977 fep->mii_timeout = 0; 1978 1979 /* 1980 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) 1981 * 1982 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while 1983 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 1984 * Reference Manual has an error on this, and gets fixed on i.MX6Q 1985 * document. 1986 */ 1987 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); 1988 if (fep->quirks & FEC_QUIRK_ENET_MAC) 1989 mii_speed--; 1990 if (mii_speed > 63) { 1991 dev_err(&pdev->dev, 1992 "fec clock (%lu) to fast to get right mii speed\n", 1993 clk_get_rate(fep->clk_ipg)); 1994 err = -EINVAL; 1995 goto err_out; 1996 } 1997 1998 /* 1999 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka 2000 * MII_SPEED) register that defines the MDIO output hold time. Earlier 2001 * versions are RAZ there, so just ignore the difference and write the 2002 * register always. 2003 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. 2004 * HOLDTIME + 1 is the number of clk cycles the fec is holding the 2005 * output. 2006 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). 2007 * Given that ceil(clkrate / 5000000) <= 64, the calculation for 2008 * holdtime cannot result in a value greater than 3. 2009 */ 2010 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; 2011 2012 fep->phy_speed = mii_speed << 1 | holdtime << 8; 2013 2014 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 2015 2016 fep->mii_bus = mdiobus_alloc(); 2017 if (fep->mii_bus == NULL) { 2018 err = -ENOMEM; 2019 goto err_out; 2020 } 2021 2022 fep->mii_bus->name = "fec_enet_mii_bus"; 2023 fep->mii_bus->read = fec_enet_mdio_read; 2024 fep->mii_bus->write = fec_enet_mdio_write; 2025 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2026 pdev->name, fep->dev_id + 1); 2027 fep->mii_bus->priv = fep; 2028 fep->mii_bus->parent = &pdev->dev; 2029 2030 node = of_get_child_by_name(pdev->dev.of_node, "mdio"); 2031 if (node) { 2032 err = of_mdiobus_register(fep->mii_bus, node); 2033 of_node_put(node); 2034 } else { 2035 err = mdiobus_register(fep->mii_bus); 2036 } 2037 2038 if (err) 2039 goto err_out_free_mdiobus; 2040 2041 mii_cnt++; 2042 2043 /* save fec0 mii_bus */ 2044 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2045 fec0_mii_bus = fep->mii_bus; 2046 2047 return 0; 2048 2049 err_out_free_mdiobus: 2050 mdiobus_free(fep->mii_bus); 2051 err_out: 2052 return err; 2053 } 2054 2055 static void fec_enet_mii_remove(struct fec_enet_private *fep) 2056 { 2057 if (--mii_cnt == 0) { 2058 mdiobus_unregister(fep->mii_bus); 2059 mdiobus_free(fep->mii_bus); 2060 } 2061 } 2062 2063 static void fec_enet_get_drvinfo(struct net_device *ndev, 2064 struct ethtool_drvinfo *info) 2065 { 2066 struct fec_enet_private *fep = netdev_priv(ndev); 2067 2068 strlcpy(info->driver, fep->pdev->dev.driver->name, 2069 sizeof(info->driver)); 2070 strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); 2071 strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); 2072 } 2073 2074 static int fec_enet_get_regs_len(struct net_device *ndev) 2075 { 2076 struct fec_enet_private *fep = netdev_priv(ndev); 2077 struct resource *r; 2078 int s = 0; 2079 2080 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); 2081 if (r) 2082 s = resource_size(r); 2083 2084 return s; 2085 } 2086 2087 /* List of registers that can be safety be read to dump them with ethtool */ 2088 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2089 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) 2090 static u32 fec_enet_register_offset[] = { 2091 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2092 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2093 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, 2094 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, 2095 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, 2096 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, 2097 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, 2098 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, 2099 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2100 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, 2101 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, 2102 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, 2103 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2104 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2105 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2106 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2107 RMON_T_P_GTE2048, RMON_T_OCTETS, 2108 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2109 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2110 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2111 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2112 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2113 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2114 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2115 RMON_R_P_GTE2048, RMON_R_OCTETS, 2116 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2117 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2118 }; 2119 #else 2120 static u32 fec_enet_register_offset[] = { 2121 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2122 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2123 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, 2124 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, 2125 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, 2126 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, 2127 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, 2128 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, 2129 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 2130 }; 2131 #endif 2132 2133 static void fec_enet_get_regs(struct net_device *ndev, 2134 struct ethtool_regs *regs, void *regbuf) 2135 { 2136 struct fec_enet_private *fep = netdev_priv(ndev); 2137 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; 2138 u32 *buf = (u32 *)regbuf; 2139 u32 i, off; 2140 2141 memset(buf, 0, regs->len); 2142 2143 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { 2144 off = fec_enet_register_offset[i] / 4; 2145 buf[off] = readl(&theregs[off]); 2146 } 2147 } 2148 2149 static int fec_enet_get_ts_info(struct net_device *ndev, 2150 struct ethtool_ts_info *info) 2151 { 2152 struct fec_enet_private *fep = netdev_priv(ndev); 2153 2154 if (fep->bufdesc_ex) { 2155 2156 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 2157 SOF_TIMESTAMPING_RX_SOFTWARE | 2158 SOF_TIMESTAMPING_SOFTWARE | 2159 SOF_TIMESTAMPING_TX_HARDWARE | 2160 SOF_TIMESTAMPING_RX_HARDWARE | 2161 SOF_TIMESTAMPING_RAW_HARDWARE; 2162 if (fep->ptp_clock) 2163 info->phc_index = ptp_clock_index(fep->ptp_clock); 2164 else 2165 info->phc_index = -1; 2166 2167 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 2168 (1 << HWTSTAMP_TX_ON); 2169 2170 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2171 (1 << HWTSTAMP_FILTER_ALL); 2172 return 0; 2173 } else { 2174 return ethtool_op_get_ts_info(ndev, info); 2175 } 2176 } 2177 2178 #if !defined(CONFIG_M5272) 2179 2180 static void fec_enet_get_pauseparam(struct net_device *ndev, 2181 struct ethtool_pauseparam *pause) 2182 { 2183 struct fec_enet_private *fep = netdev_priv(ndev); 2184 2185 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; 2186 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; 2187 pause->rx_pause = pause->tx_pause; 2188 } 2189 2190 static int fec_enet_set_pauseparam(struct net_device *ndev, 2191 struct ethtool_pauseparam *pause) 2192 { 2193 struct fec_enet_private *fep = netdev_priv(ndev); 2194 2195 if (!ndev->phydev) 2196 return -ENODEV; 2197 2198 if (pause->tx_pause != pause->rx_pause) { 2199 netdev_info(ndev, 2200 "hardware only support enable/disable both tx and rx"); 2201 return -EINVAL; 2202 } 2203 2204 fep->pause_flag = 0; 2205 2206 /* tx pause must be same as rx pause */ 2207 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; 2208 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; 2209 2210 if (pause->rx_pause || pause->autoneg) { 2211 ndev->phydev->supported |= ADVERTISED_Pause; 2212 ndev->phydev->advertising |= ADVERTISED_Pause; 2213 } else { 2214 ndev->phydev->supported &= ~ADVERTISED_Pause; 2215 ndev->phydev->advertising &= ~ADVERTISED_Pause; 2216 } 2217 2218 if (pause->autoneg) { 2219 if (netif_running(ndev)) 2220 fec_stop(ndev); 2221 phy_start_aneg(ndev->phydev); 2222 } 2223 if (netif_running(ndev)) { 2224 napi_disable(&fep->napi); 2225 netif_tx_lock_bh(ndev); 2226 fec_restart(ndev); 2227 netif_wake_queue(ndev); 2228 netif_tx_unlock_bh(ndev); 2229 napi_enable(&fep->napi); 2230 } 2231 2232 return 0; 2233 } 2234 2235 static const struct fec_stat { 2236 char name[ETH_GSTRING_LEN]; 2237 u16 offset; 2238 } fec_stats[] = { 2239 /* RMON TX */ 2240 { "tx_dropped", RMON_T_DROP }, 2241 { "tx_packets", RMON_T_PACKETS }, 2242 { "tx_broadcast", RMON_T_BC_PKT }, 2243 { "tx_multicast", RMON_T_MC_PKT }, 2244 { "tx_crc_errors", RMON_T_CRC_ALIGN }, 2245 { "tx_undersize", RMON_T_UNDERSIZE }, 2246 { "tx_oversize", RMON_T_OVERSIZE }, 2247 { "tx_fragment", RMON_T_FRAG }, 2248 { "tx_jabber", RMON_T_JAB }, 2249 { "tx_collision", RMON_T_COL }, 2250 { "tx_64byte", RMON_T_P64 }, 2251 { "tx_65to127byte", RMON_T_P65TO127 }, 2252 { "tx_128to255byte", RMON_T_P128TO255 }, 2253 { "tx_256to511byte", RMON_T_P256TO511 }, 2254 { "tx_512to1023byte", RMON_T_P512TO1023 }, 2255 { "tx_1024to2047byte", RMON_T_P1024TO2047 }, 2256 { "tx_GTE2048byte", RMON_T_P_GTE2048 }, 2257 { "tx_octets", RMON_T_OCTETS }, 2258 2259 /* IEEE TX */ 2260 { "IEEE_tx_drop", IEEE_T_DROP }, 2261 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, 2262 { "IEEE_tx_1col", IEEE_T_1COL }, 2263 { "IEEE_tx_mcol", IEEE_T_MCOL }, 2264 { "IEEE_tx_def", IEEE_T_DEF }, 2265 { "IEEE_tx_lcol", IEEE_T_LCOL }, 2266 { "IEEE_tx_excol", IEEE_T_EXCOL }, 2267 { "IEEE_tx_macerr", IEEE_T_MACERR }, 2268 { "IEEE_tx_cserr", IEEE_T_CSERR }, 2269 { "IEEE_tx_sqe", IEEE_T_SQE }, 2270 { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, 2271 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, 2272 2273 /* RMON RX */ 2274 { "rx_packets", RMON_R_PACKETS }, 2275 { "rx_broadcast", RMON_R_BC_PKT }, 2276 { "rx_multicast", RMON_R_MC_PKT }, 2277 { "rx_crc_errors", RMON_R_CRC_ALIGN }, 2278 { "rx_undersize", RMON_R_UNDERSIZE }, 2279 { "rx_oversize", RMON_R_OVERSIZE }, 2280 { "rx_fragment", RMON_R_FRAG }, 2281 { "rx_jabber", RMON_R_JAB }, 2282 { "rx_64byte", RMON_R_P64 }, 2283 { "rx_65to127byte", RMON_R_P65TO127 }, 2284 { "rx_128to255byte", RMON_R_P128TO255 }, 2285 { "rx_256to511byte", RMON_R_P256TO511 }, 2286 { "rx_512to1023byte", RMON_R_P512TO1023 }, 2287 { "rx_1024to2047byte", RMON_R_P1024TO2047 }, 2288 { "rx_GTE2048byte", RMON_R_P_GTE2048 }, 2289 { "rx_octets", RMON_R_OCTETS }, 2290 2291 /* IEEE RX */ 2292 { "IEEE_rx_drop", IEEE_R_DROP }, 2293 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, 2294 { "IEEE_rx_crc", IEEE_R_CRC }, 2295 { "IEEE_rx_align", IEEE_R_ALIGN }, 2296 { "IEEE_rx_macerr", IEEE_R_MACERR }, 2297 { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, 2298 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 2299 }; 2300 2301 static void fec_enet_get_ethtool_stats(struct net_device *dev, 2302 struct ethtool_stats *stats, u64 *data) 2303 { 2304 struct fec_enet_private *fep = netdev_priv(dev); 2305 int i; 2306 2307 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2308 data[i] = readl(fep->hwp + fec_stats[i].offset); 2309 } 2310 2311 static void fec_enet_get_strings(struct net_device *netdev, 2312 u32 stringset, u8 *data) 2313 { 2314 int i; 2315 switch (stringset) { 2316 case ETH_SS_STATS: 2317 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2318 memcpy(data + i * ETH_GSTRING_LEN, 2319 fec_stats[i].name, ETH_GSTRING_LEN); 2320 break; 2321 } 2322 } 2323 2324 static int fec_enet_get_sset_count(struct net_device *dev, int sset) 2325 { 2326 switch (sset) { 2327 case ETH_SS_STATS: 2328 return ARRAY_SIZE(fec_stats); 2329 default: 2330 return -EOPNOTSUPP; 2331 } 2332 } 2333 #endif /* !defined(CONFIG_M5272) */ 2334 2335 static int fec_enet_nway_reset(struct net_device *dev) 2336 { 2337 struct phy_device *phydev = dev->phydev; 2338 2339 if (!phydev) 2340 return -ENODEV; 2341 2342 return genphy_restart_aneg(phydev); 2343 } 2344 2345 /* ITR clock source is enet system clock (clk_ahb). 2346 * TCTT unit is cycle_ns * 64 cycle 2347 * So, the ICTT value = X us / (cycle_ns * 64) 2348 */ 2349 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) 2350 { 2351 struct fec_enet_private *fep = netdev_priv(ndev); 2352 2353 return us * (fep->itr_clk_rate / 64000) / 1000; 2354 } 2355 2356 /* Set threshold for interrupt coalescing */ 2357 static void fec_enet_itr_coal_set(struct net_device *ndev) 2358 { 2359 struct fec_enet_private *fep = netdev_priv(ndev); 2360 int rx_itr, tx_itr; 2361 2362 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 2363 return; 2364 2365 /* Must be greater than zero to avoid unpredictable behavior */ 2366 if (!fep->rx_time_itr || !fep->rx_pkts_itr || 2367 !fep->tx_time_itr || !fep->tx_pkts_itr) 2368 return; 2369 2370 /* Select enet system clock as Interrupt Coalescing 2371 * timer Clock Source 2372 */ 2373 rx_itr = FEC_ITR_CLK_SEL; 2374 tx_itr = FEC_ITR_CLK_SEL; 2375 2376 /* set ICFT and ICTT */ 2377 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); 2378 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); 2379 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); 2380 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); 2381 2382 rx_itr |= FEC_ITR_EN; 2383 tx_itr |= FEC_ITR_EN; 2384 2385 writel(tx_itr, fep->hwp + FEC_TXIC0); 2386 writel(rx_itr, fep->hwp + FEC_RXIC0); 2387 writel(tx_itr, fep->hwp + FEC_TXIC1); 2388 writel(rx_itr, fep->hwp + FEC_RXIC1); 2389 writel(tx_itr, fep->hwp + FEC_TXIC2); 2390 writel(rx_itr, fep->hwp + FEC_RXIC2); 2391 } 2392 2393 static int 2394 fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) 2395 { 2396 struct fec_enet_private *fep = netdev_priv(ndev); 2397 2398 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 2399 return -EOPNOTSUPP; 2400 2401 ec->rx_coalesce_usecs = fep->rx_time_itr; 2402 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; 2403 2404 ec->tx_coalesce_usecs = fep->tx_time_itr; 2405 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; 2406 2407 return 0; 2408 } 2409 2410 static int 2411 fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) 2412 { 2413 struct fec_enet_private *fep = netdev_priv(ndev); 2414 unsigned int cycle; 2415 2416 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 2417 return -EOPNOTSUPP; 2418 2419 if (ec->rx_max_coalesced_frames > 255) { 2420 pr_err("Rx coalesced frames exceed hardware limitation\n"); 2421 return -EINVAL; 2422 } 2423 2424 if (ec->tx_max_coalesced_frames > 255) { 2425 pr_err("Tx coalesced frame exceed hardware limitation\n"); 2426 return -EINVAL; 2427 } 2428 2429 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); 2430 if (cycle > 0xFFFF) { 2431 pr_err("Rx coalesced usec exceed hardware limitation\n"); 2432 return -EINVAL; 2433 } 2434 2435 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); 2436 if (cycle > 0xFFFF) { 2437 pr_err("Rx coalesced usec exceed hardware limitation\n"); 2438 return -EINVAL; 2439 } 2440 2441 fep->rx_time_itr = ec->rx_coalesce_usecs; 2442 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; 2443 2444 fep->tx_time_itr = ec->tx_coalesce_usecs; 2445 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; 2446 2447 fec_enet_itr_coal_set(ndev); 2448 2449 return 0; 2450 } 2451 2452 static void fec_enet_itr_coal_init(struct net_device *ndev) 2453 { 2454 struct ethtool_coalesce ec; 2455 2456 ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; 2457 ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; 2458 2459 ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; 2460 ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; 2461 2462 fec_enet_set_coalesce(ndev, &ec); 2463 } 2464 2465 static int fec_enet_get_tunable(struct net_device *netdev, 2466 const struct ethtool_tunable *tuna, 2467 void *data) 2468 { 2469 struct fec_enet_private *fep = netdev_priv(netdev); 2470 int ret = 0; 2471 2472 switch (tuna->id) { 2473 case ETHTOOL_RX_COPYBREAK: 2474 *(u32 *)data = fep->rx_copybreak; 2475 break; 2476 default: 2477 ret = -EINVAL; 2478 break; 2479 } 2480 2481 return ret; 2482 } 2483 2484 static int fec_enet_set_tunable(struct net_device *netdev, 2485 const struct ethtool_tunable *tuna, 2486 const void *data) 2487 { 2488 struct fec_enet_private *fep = netdev_priv(netdev); 2489 int ret = 0; 2490 2491 switch (tuna->id) { 2492 case ETHTOOL_RX_COPYBREAK: 2493 fep->rx_copybreak = *(u32 *)data; 2494 break; 2495 default: 2496 ret = -EINVAL; 2497 break; 2498 } 2499 2500 return ret; 2501 } 2502 2503 static void 2504 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 2505 { 2506 struct fec_enet_private *fep = netdev_priv(ndev); 2507 2508 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { 2509 wol->supported = WAKE_MAGIC; 2510 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; 2511 } else { 2512 wol->supported = wol->wolopts = 0; 2513 } 2514 } 2515 2516 static int 2517 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 2518 { 2519 struct fec_enet_private *fep = netdev_priv(ndev); 2520 2521 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) 2522 return -EINVAL; 2523 2524 if (wol->wolopts & ~WAKE_MAGIC) 2525 return -EINVAL; 2526 2527 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); 2528 if (device_may_wakeup(&ndev->dev)) { 2529 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; 2530 if (fep->irq[0] > 0) 2531 enable_irq_wake(fep->irq[0]); 2532 } else { 2533 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); 2534 if (fep->irq[0] > 0) 2535 disable_irq_wake(fep->irq[0]); 2536 } 2537 2538 return 0; 2539 } 2540 2541 static const struct ethtool_ops fec_enet_ethtool_ops = { 2542 .get_drvinfo = fec_enet_get_drvinfo, 2543 .get_regs_len = fec_enet_get_regs_len, 2544 .get_regs = fec_enet_get_regs, 2545 .nway_reset = fec_enet_nway_reset, 2546 .get_link = ethtool_op_get_link, 2547 .get_coalesce = fec_enet_get_coalesce, 2548 .set_coalesce = fec_enet_set_coalesce, 2549 #ifndef CONFIG_M5272 2550 .get_pauseparam = fec_enet_get_pauseparam, 2551 .set_pauseparam = fec_enet_set_pauseparam, 2552 .get_strings = fec_enet_get_strings, 2553 .get_ethtool_stats = fec_enet_get_ethtool_stats, 2554 .get_sset_count = fec_enet_get_sset_count, 2555 #endif 2556 .get_ts_info = fec_enet_get_ts_info, 2557 .get_tunable = fec_enet_get_tunable, 2558 .set_tunable = fec_enet_set_tunable, 2559 .get_wol = fec_enet_get_wol, 2560 .set_wol = fec_enet_set_wol, 2561 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2562 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2563 }; 2564 2565 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2566 { 2567 struct fec_enet_private *fep = netdev_priv(ndev); 2568 struct phy_device *phydev = ndev->phydev; 2569 2570 if (!netif_running(ndev)) 2571 return -EINVAL; 2572 2573 if (!phydev) 2574 return -ENODEV; 2575 2576 if (fep->bufdesc_ex) { 2577 if (cmd == SIOCSHWTSTAMP) 2578 return fec_ptp_set(ndev, rq); 2579 if (cmd == SIOCGHWTSTAMP) 2580 return fec_ptp_get(ndev, rq); 2581 } 2582 2583 return phy_mii_ioctl(phydev, rq, cmd); 2584 } 2585 2586 static void fec_enet_free_buffers(struct net_device *ndev) 2587 { 2588 struct fec_enet_private *fep = netdev_priv(ndev); 2589 unsigned int i; 2590 struct sk_buff *skb; 2591 struct bufdesc *bdp; 2592 struct fec_enet_priv_tx_q *txq; 2593 struct fec_enet_priv_rx_q *rxq; 2594 unsigned int q; 2595 2596 for (q = 0; q < fep->num_rx_queues; q++) { 2597 rxq = fep->rx_queue[q]; 2598 bdp = rxq->bd.base; 2599 for (i = 0; i < rxq->bd.ring_size; i++) { 2600 skb = rxq->rx_skbuff[i]; 2601 rxq->rx_skbuff[i] = NULL; 2602 if (skb) { 2603 dma_unmap_single(&fep->pdev->dev, 2604 fec32_to_cpu(bdp->cbd_bufaddr), 2605 FEC_ENET_RX_FRSIZE - fep->rx_align, 2606 DMA_FROM_DEVICE); 2607 dev_kfree_skb(skb); 2608 } 2609 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 2610 } 2611 } 2612 2613 for (q = 0; q < fep->num_tx_queues; q++) { 2614 txq = fep->tx_queue[q]; 2615 bdp = txq->bd.base; 2616 for (i = 0; i < txq->bd.ring_size; i++) { 2617 kfree(txq->tx_bounce[i]); 2618 txq->tx_bounce[i] = NULL; 2619 skb = txq->tx_skbuff[i]; 2620 txq->tx_skbuff[i] = NULL; 2621 dev_kfree_skb(skb); 2622 } 2623 } 2624 } 2625 2626 static void fec_enet_free_queue(struct net_device *ndev) 2627 { 2628 struct fec_enet_private *fep = netdev_priv(ndev); 2629 int i; 2630 struct fec_enet_priv_tx_q *txq; 2631 2632 for (i = 0; i < fep->num_tx_queues; i++) 2633 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { 2634 txq = fep->tx_queue[i]; 2635 dma_free_coherent(NULL, 2636 txq->bd.ring_size * TSO_HEADER_SIZE, 2637 txq->tso_hdrs, 2638 txq->tso_hdrs_dma); 2639 } 2640 2641 for (i = 0; i < fep->num_rx_queues; i++) 2642 kfree(fep->rx_queue[i]); 2643 for (i = 0; i < fep->num_tx_queues; i++) 2644 kfree(fep->tx_queue[i]); 2645 } 2646 2647 static int fec_enet_alloc_queue(struct net_device *ndev) 2648 { 2649 struct fec_enet_private *fep = netdev_priv(ndev); 2650 int i; 2651 int ret = 0; 2652 struct fec_enet_priv_tx_q *txq; 2653 2654 for (i = 0; i < fep->num_tx_queues; i++) { 2655 txq = kzalloc(sizeof(*txq), GFP_KERNEL); 2656 if (!txq) { 2657 ret = -ENOMEM; 2658 goto alloc_failed; 2659 } 2660 2661 fep->tx_queue[i] = txq; 2662 txq->bd.ring_size = TX_RING_SIZE; 2663 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; 2664 2665 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; 2666 txq->tx_wake_threshold = 2667 (txq->bd.ring_size - txq->tx_stop_threshold) / 2; 2668 2669 txq->tso_hdrs = dma_alloc_coherent(NULL, 2670 txq->bd.ring_size * TSO_HEADER_SIZE, 2671 &txq->tso_hdrs_dma, 2672 GFP_KERNEL); 2673 if (!txq->tso_hdrs) { 2674 ret = -ENOMEM; 2675 goto alloc_failed; 2676 } 2677 } 2678 2679 for (i = 0; i < fep->num_rx_queues; i++) { 2680 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), 2681 GFP_KERNEL); 2682 if (!fep->rx_queue[i]) { 2683 ret = -ENOMEM; 2684 goto alloc_failed; 2685 } 2686 2687 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; 2688 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; 2689 } 2690 return ret; 2691 2692 alloc_failed: 2693 fec_enet_free_queue(ndev); 2694 return ret; 2695 } 2696 2697 static int 2698 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) 2699 { 2700 struct fec_enet_private *fep = netdev_priv(ndev); 2701 unsigned int i; 2702 struct sk_buff *skb; 2703 struct bufdesc *bdp; 2704 struct fec_enet_priv_rx_q *rxq; 2705 2706 rxq = fep->rx_queue[queue]; 2707 bdp = rxq->bd.base; 2708 for (i = 0; i < rxq->bd.ring_size; i++) { 2709 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 2710 if (!skb) 2711 goto err_alloc; 2712 2713 if (fec_enet_new_rxbdp(ndev, bdp, skb)) { 2714 dev_kfree_skb(skb); 2715 goto err_alloc; 2716 } 2717 2718 rxq->rx_skbuff[i] = skb; 2719 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 2720 2721 if (fep->bufdesc_ex) { 2722 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2723 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 2724 } 2725 2726 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 2727 } 2728 2729 /* Set the last buffer to wrap. */ 2730 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 2731 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 2732 return 0; 2733 2734 err_alloc: 2735 fec_enet_free_buffers(ndev); 2736 return -ENOMEM; 2737 } 2738 2739 static int 2740 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) 2741 { 2742 struct fec_enet_private *fep = netdev_priv(ndev); 2743 unsigned int i; 2744 struct bufdesc *bdp; 2745 struct fec_enet_priv_tx_q *txq; 2746 2747 txq = fep->tx_queue[queue]; 2748 bdp = txq->bd.base; 2749 for (i = 0; i < txq->bd.ring_size; i++) { 2750 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 2751 if (!txq->tx_bounce[i]) 2752 goto err_alloc; 2753 2754 bdp->cbd_sc = cpu_to_fec16(0); 2755 bdp->cbd_bufaddr = cpu_to_fec32(0); 2756 2757 if (fep->bufdesc_ex) { 2758 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2759 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); 2760 } 2761 2762 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 2763 } 2764 2765 /* Set the last buffer to wrap. */ 2766 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 2767 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 2768 2769 return 0; 2770 2771 err_alloc: 2772 fec_enet_free_buffers(ndev); 2773 return -ENOMEM; 2774 } 2775 2776 static int fec_enet_alloc_buffers(struct net_device *ndev) 2777 { 2778 struct fec_enet_private *fep = netdev_priv(ndev); 2779 unsigned int i; 2780 2781 for (i = 0; i < fep->num_rx_queues; i++) 2782 if (fec_enet_alloc_rxq_buffers(ndev, i)) 2783 return -ENOMEM; 2784 2785 for (i = 0; i < fep->num_tx_queues; i++) 2786 if (fec_enet_alloc_txq_buffers(ndev, i)) 2787 return -ENOMEM; 2788 return 0; 2789 } 2790 2791 static int 2792 fec_enet_open(struct net_device *ndev) 2793 { 2794 struct fec_enet_private *fep = netdev_priv(ndev); 2795 int ret; 2796 2797 ret = pm_runtime_get_sync(&fep->pdev->dev); 2798 if (ret < 0) 2799 return ret; 2800 2801 pinctrl_pm_select_default_state(&fep->pdev->dev); 2802 ret = fec_enet_clk_enable(ndev, true); 2803 if (ret) 2804 goto clk_enable; 2805 2806 /* I should reset the ring buffers here, but I don't yet know 2807 * a simple way to do that. 2808 */ 2809 2810 ret = fec_enet_alloc_buffers(ndev); 2811 if (ret) 2812 goto err_enet_alloc; 2813 2814 /* Init MAC prior to mii bus probe */ 2815 fec_restart(ndev); 2816 2817 /* Probe and connect to PHY when open the interface */ 2818 ret = fec_enet_mii_probe(ndev); 2819 if (ret) 2820 goto err_enet_mii_probe; 2821 2822 if (fep->quirks & FEC_QUIRK_ERR006687) 2823 imx6q_cpuidle_fec_irqs_used(); 2824 2825 napi_enable(&fep->napi); 2826 phy_start(ndev->phydev); 2827 netif_tx_start_all_queues(ndev); 2828 2829 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & 2830 FEC_WOL_FLAG_ENABLE); 2831 2832 return 0; 2833 2834 err_enet_mii_probe: 2835 fec_enet_free_buffers(ndev); 2836 err_enet_alloc: 2837 fec_enet_clk_enable(ndev, false); 2838 clk_enable: 2839 pm_runtime_mark_last_busy(&fep->pdev->dev); 2840 pm_runtime_put_autosuspend(&fep->pdev->dev); 2841 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2842 return ret; 2843 } 2844 2845 static int 2846 fec_enet_close(struct net_device *ndev) 2847 { 2848 struct fec_enet_private *fep = netdev_priv(ndev); 2849 2850 phy_stop(ndev->phydev); 2851 2852 if (netif_device_present(ndev)) { 2853 napi_disable(&fep->napi); 2854 netif_tx_disable(ndev); 2855 fec_stop(ndev); 2856 } 2857 2858 phy_disconnect(ndev->phydev); 2859 2860 if (fep->quirks & FEC_QUIRK_ERR006687) 2861 imx6q_cpuidle_fec_irqs_unused(); 2862 2863 fec_enet_clk_enable(ndev, false); 2864 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2865 pm_runtime_mark_last_busy(&fep->pdev->dev); 2866 pm_runtime_put_autosuspend(&fep->pdev->dev); 2867 2868 fec_enet_free_buffers(ndev); 2869 2870 return 0; 2871 } 2872 2873 /* Set or clear the multicast filter for this adaptor. 2874 * Skeleton taken from sunlance driver. 2875 * The CPM Ethernet implementation allows Multicast as well as individual 2876 * MAC address filtering. Some of the drivers check to make sure it is 2877 * a group multicast address, and discard those that are not. I guess I 2878 * will do the same for now, but just remove the test if you want 2879 * individual filtering as well (do the upper net layers want or support 2880 * this kind of feature?). 2881 */ 2882 2883 #define HASH_BITS 6 /* #bits in hash */ 2884 #define CRC32_POLY 0xEDB88320 2885 2886 static void set_multicast_list(struct net_device *ndev) 2887 { 2888 struct fec_enet_private *fep = netdev_priv(ndev); 2889 struct netdev_hw_addr *ha; 2890 unsigned int i, bit, data, crc, tmp; 2891 unsigned char hash; 2892 2893 if (ndev->flags & IFF_PROMISC) { 2894 tmp = readl(fep->hwp + FEC_R_CNTRL); 2895 tmp |= 0x8; 2896 writel(tmp, fep->hwp + FEC_R_CNTRL); 2897 return; 2898 } 2899 2900 tmp = readl(fep->hwp + FEC_R_CNTRL); 2901 tmp &= ~0x8; 2902 writel(tmp, fep->hwp + FEC_R_CNTRL); 2903 2904 if (ndev->flags & IFF_ALLMULTI) { 2905 /* Catch all multicast addresses, so set the 2906 * filter to all 1's 2907 */ 2908 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2909 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2910 2911 return; 2912 } 2913 2914 /* Clear filter and add the addresses in hash register 2915 */ 2916 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2917 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2918 2919 netdev_for_each_mc_addr(ha, ndev) { 2920 /* calculate crc32 value of mac address */ 2921 crc = 0xffffffff; 2922 2923 for (i = 0; i < ndev->addr_len; i++) { 2924 data = ha->addr[i]; 2925 for (bit = 0; bit < 8; bit++, data >>= 1) { 2926 crc = (crc >> 1) ^ 2927 (((crc ^ data) & 1) ? CRC32_POLY : 0); 2928 } 2929 } 2930 2931 /* only upper 6 bits (HASH_BITS) are used 2932 * which point to specific bit in he hash registers 2933 */ 2934 hash = (crc >> (32 - HASH_BITS)) & 0x3f; 2935 2936 if (hash > 31) { 2937 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2938 tmp |= 1 << (hash - 32); 2939 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2940 } else { 2941 tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2942 tmp |= 1 << hash; 2943 writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2944 } 2945 } 2946 } 2947 2948 /* Set a MAC change in hardware. */ 2949 static int 2950 fec_set_mac_address(struct net_device *ndev, void *p) 2951 { 2952 struct fec_enet_private *fep = netdev_priv(ndev); 2953 struct sockaddr *addr = p; 2954 2955 if (addr) { 2956 if (!is_valid_ether_addr(addr->sa_data)) 2957 return -EADDRNOTAVAIL; 2958 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 2959 } 2960 2961 /* Add netif status check here to avoid system hang in below case: 2962 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; 2963 * After ethx down, fec all clocks are gated off and then register 2964 * access causes system hang. 2965 */ 2966 if (!netif_running(ndev)) 2967 return 0; 2968 2969 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 2970 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 2971 fep->hwp + FEC_ADDR_LOW); 2972 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), 2973 fep->hwp + FEC_ADDR_HIGH); 2974 return 0; 2975 } 2976 2977 #ifdef CONFIG_NET_POLL_CONTROLLER 2978 /** 2979 * fec_poll_controller - FEC Poll controller function 2980 * @dev: The FEC network adapter 2981 * 2982 * Polled functionality used by netconsole and others in non interrupt mode 2983 * 2984 */ 2985 static void fec_poll_controller(struct net_device *dev) 2986 { 2987 int i; 2988 struct fec_enet_private *fep = netdev_priv(dev); 2989 2990 for (i = 0; i < FEC_IRQ_NUM; i++) { 2991 if (fep->irq[i] > 0) { 2992 disable_irq(fep->irq[i]); 2993 fec_enet_interrupt(fep->irq[i], dev); 2994 enable_irq(fep->irq[i]); 2995 } 2996 } 2997 } 2998 #endif 2999 3000 static inline void fec_enet_set_netdev_features(struct net_device *netdev, 3001 netdev_features_t features) 3002 { 3003 struct fec_enet_private *fep = netdev_priv(netdev); 3004 netdev_features_t changed = features ^ netdev->features; 3005 3006 netdev->features = features; 3007 3008 /* Receive checksum has been changed */ 3009 if (changed & NETIF_F_RXCSUM) { 3010 if (features & NETIF_F_RXCSUM) 3011 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3012 else 3013 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; 3014 } 3015 } 3016 3017 static int fec_set_features(struct net_device *netdev, 3018 netdev_features_t features) 3019 { 3020 struct fec_enet_private *fep = netdev_priv(netdev); 3021 netdev_features_t changed = features ^ netdev->features; 3022 3023 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) { 3024 napi_disable(&fep->napi); 3025 netif_tx_lock_bh(netdev); 3026 fec_stop(netdev); 3027 fec_enet_set_netdev_features(netdev, features); 3028 fec_restart(netdev); 3029 netif_tx_wake_all_queues(netdev); 3030 netif_tx_unlock_bh(netdev); 3031 napi_enable(&fep->napi); 3032 } else { 3033 fec_enet_set_netdev_features(netdev, features); 3034 } 3035 3036 return 0; 3037 } 3038 3039 static const struct net_device_ops fec_netdev_ops = { 3040 .ndo_open = fec_enet_open, 3041 .ndo_stop = fec_enet_close, 3042 .ndo_start_xmit = fec_enet_start_xmit, 3043 .ndo_set_rx_mode = set_multicast_list, 3044 .ndo_change_mtu = eth_change_mtu, 3045 .ndo_validate_addr = eth_validate_addr, 3046 .ndo_tx_timeout = fec_timeout, 3047 .ndo_set_mac_address = fec_set_mac_address, 3048 .ndo_do_ioctl = fec_enet_ioctl, 3049 #ifdef CONFIG_NET_POLL_CONTROLLER 3050 .ndo_poll_controller = fec_poll_controller, 3051 #endif 3052 .ndo_set_features = fec_set_features, 3053 }; 3054 3055 static const unsigned short offset_des_active_rxq[] = { 3056 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 3057 }; 3058 3059 static const unsigned short offset_des_active_txq[] = { 3060 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 3061 }; 3062 3063 /* 3064 * XXX: We need to clean up on failure exits here. 3065 * 3066 */ 3067 static int fec_enet_init(struct net_device *ndev) 3068 { 3069 struct fec_enet_private *fep = netdev_priv(ndev); 3070 struct bufdesc *cbd_base; 3071 dma_addr_t bd_dma; 3072 int bd_size; 3073 unsigned int i; 3074 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : 3075 sizeof(struct bufdesc); 3076 unsigned dsize_log2 = __fls(dsize); 3077 3078 WARN_ON(dsize != (1 << dsize_log2)); 3079 #if defined(CONFIG_ARM) 3080 fep->rx_align = 0xf; 3081 fep->tx_align = 0xf; 3082 #else 3083 fep->rx_align = 0x3; 3084 fep->tx_align = 0x3; 3085 #endif 3086 3087 fec_enet_alloc_queue(ndev); 3088 3089 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; 3090 3091 /* Allocate memory for buffer descriptors. */ 3092 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, 3093 GFP_KERNEL); 3094 if (!cbd_base) { 3095 return -ENOMEM; 3096 } 3097 3098 memset(cbd_base, 0, bd_size); 3099 3100 /* Get the Ethernet address */ 3101 fec_get_mac(ndev); 3102 /* make sure MAC we just acquired is programmed into the hw */ 3103 fec_set_mac_address(ndev, NULL); 3104 3105 /* Set receive and transmit descriptor base. */ 3106 for (i = 0; i < fep->num_rx_queues; i++) { 3107 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; 3108 unsigned size = dsize * rxq->bd.ring_size; 3109 3110 rxq->bd.qid = i; 3111 rxq->bd.base = cbd_base; 3112 rxq->bd.cur = cbd_base; 3113 rxq->bd.dma = bd_dma; 3114 rxq->bd.dsize = dsize; 3115 rxq->bd.dsize_log2 = dsize_log2; 3116 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; 3117 bd_dma += size; 3118 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 3119 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 3120 } 3121 3122 for (i = 0; i < fep->num_tx_queues; i++) { 3123 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; 3124 unsigned size = dsize * txq->bd.ring_size; 3125 3126 txq->bd.qid = i; 3127 txq->bd.base = cbd_base; 3128 txq->bd.cur = cbd_base; 3129 txq->bd.dma = bd_dma; 3130 txq->bd.dsize = dsize; 3131 txq->bd.dsize_log2 = dsize_log2; 3132 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; 3133 bd_dma += size; 3134 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 3135 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 3136 } 3137 3138 3139 /* The FEC Ethernet specific entries in the device structure */ 3140 ndev->watchdog_timeo = TX_TIMEOUT; 3141 ndev->netdev_ops = &fec_netdev_ops; 3142 ndev->ethtool_ops = &fec_enet_ethtool_ops; 3143 3144 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 3145 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); 3146 3147 if (fep->quirks & FEC_QUIRK_HAS_VLAN) 3148 /* enable hw VLAN support */ 3149 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3150 3151 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { 3152 ndev->gso_max_segs = FEC_MAX_TSO_SEGS; 3153 3154 /* enable hw accelerator */ 3155 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 3156 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); 3157 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3158 } 3159 3160 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 3161 fep->tx_align = 0; 3162 fep->rx_align = 0x3f; 3163 } 3164 3165 ndev->hw_features = ndev->features; 3166 3167 fec_restart(ndev); 3168 3169 return 0; 3170 } 3171 3172 #ifdef CONFIG_OF 3173 static void fec_reset_phy(struct platform_device *pdev) 3174 { 3175 int err, phy_reset; 3176 bool active_high = false; 3177 int msec = 1; 3178 struct device_node *np = pdev->dev.of_node; 3179 3180 if (!np) 3181 return; 3182 3183 of_property_read_u32(np, "phy-reset-duration", &msec); 3184 /* A sane reset duration should not be longer than 1s */ 3185 if (msec > 1000) 3186 msec = 1; 3187 3188 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); 3189 if (!gpio_is_valid(phy_reset)) 3190 return; 3191 3192 active_high = of_property_read_bool(np, "phy-reset-active-high"); 3193 3194 err = devm_gpio_request_one(&pdev->dev, phy_reset, 3195 active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, 3196 "phy-reset"); 3197 if (err) { 3198 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); 3199 return; 3200 } 3201 msleep(msec); 3202 gpio_set_value_cansleep(phy_reset, !active_high); 3203 } 3204 #else /* CONFIG_OF */ 3205 static void fec_reset_phy(struct platform_device *pdev) 3206 { 3207 /* 3208 * In case of platform probe, the reset has been done 3209 * by machine code. 3210 */ 3211 } 3212 #endif /* CONFIG_OF */ 3213 3214 static void 3215 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) 3216 { 3217 struct device_node *np = pdev->dev.of_node; 3218 3219 *num_tx = *num_rx = 1; 3220 3221 if (!np || !of_device_is_available(np)) 3222 return; 3223 3224 /* parse the num of tx and rx queues */ 3225 of_property_read_u32(np, "fsl,num-tx-queues", num_tx); 3226 3227 of_property_read_u32(np, "fsl,num-rx-queues", num_rx); 3228 3229 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { 3230 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", 3231 *num_tx); 3232 *num_tx = 1; 3233 return; 3234 } 3235 3236 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { 3237 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", 3238 *num_rx); 3239 *num_rx = 1; 3240 return; 3241 } 3242 3243 } 3244 3245 static int 3246 fec_probe(struct platform_device *pdev) 3247 { 3248 struct fec_enet_private *fep; 3249 struct fec_platform_data *pdata; 3250 struct net_device *ndev; 3251 int i, irq, ret = 0; 3252 struct resource *r; 3253 const struct of_device_id *of_id; 3254 static int dev_id; 3255 struct device_node *np = pdev->dev.of_node, *phy_node; 3256 int num_tx_qs; 3257 int num_rx_qs; 3258 3259 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 3260 3261 /* Init network device */ 3262 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private), 3263 num_tx_qs, num_rx_qs); 3264 if (!ndev) 3265 return -ENOMEM; 3266 3267 SET_NETDEV_DEV(ndev, &pdev->dev); 3268 3269 /* setup board info structure */ 3270 fep = netdev_priv(ndev); 3271 3272 of_id = of_match_device(fec_dt_ids, &pdev->dev); 3273 if (of_id) 3274 pdev->id_entry = of_id->data; 3275 fep->quirks = pdev->id_entry->driver_data; 3276 3277 fep->netdev = ndev; 3278 fep->num_rx_queues = num_rx_qs; 3279 fep->num_tx_queues = num_tx_qs; 3280 3281 #if !defined(CONFIG_M5272) 3282 /* default enable pause frame auto negotiation */ 3283 if (fep->quirks & FEC_QUIRK_HAS_GBIT) 3284 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 3285 #endif 3286 3287 /* Select default pin state */ 3288 pinctrl_pm_select_default_state(&pdev->dev); 3289 3290 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3291 fep->hwp = devm_ioremap_resource(&pdev->dev, r); 3292 if (IS_ERR(fep->hwp)) { 3293 ret = PTR_ERR(fep->hwp); 3294 goto failed_ioremap; 3295 } 3296 3297 fep->pdev = pdev; 3298 fep->dev_id = dev_id++; 3299 3300 platform_set_drvdata(pdev, ndev); 3301 3302 if ((of_machine_is_compatible("fsl,imx6q") || 3303 of_machine_is_compatible("fsl,imx6dl")) && 3304 !of_property_read_bool(np, "fsl,err006687-workaround-present")) 3305 fep->quirks |= FEC_QUIRK_ERR006687; 3306 3307 if (of_get_property(np, "fsl,magic-packet", NULL)) 3308 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; 3309 3310 phy_node = of_parse_phandle(np, "phy-handle", 0); 3311 if (!phy_node && of_phy_is_fixed_link(np)) { 3312 ret = of_phy_register_fixed_link(np); 3313 if (ret < 0) { 3314 dev_err(&pdev->dev, 3315 "broken fixed-link specification\n"); 3316 goto failed_phy; 3317 } 3318 phy_node = of_node_get(np); 3319 } 3320 fep->phy_node = phy_node; 3321 3322 ret = of_get_phy_mode(pdev->dev.of_node); 3323 if (ret < 0) { 3324 pdata = dev_get_platdata(&pdev->dev); 3325 if (pdata) 3326 fep->phy_interface = pdata->phy; 3327 else 3328 fep->phy_interface = PHY_INTERFACE_MODE_MII; 3329 } else { 3330 fep->phy_interface = ret; 3331 } 3332 3333 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 3334 if (IS_ERR(fep->clk_ipg)) { 3335 ret = PTR_ERR(fep->clk_ipg); 3336 goto failed_clk; 3337 } 3338 3339 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 3340 if (IS_ERR(fep->clk_ahb)) { 3341 ret = PTR_ERR(fep->clk_ahb); 3342 goto failed_clk; 3343 } 3344 3345 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); 3346 3347 /* enet_out is optional, depends on board */ 3348 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); 3349 if (IS_ERR(fep->clk_enet_out)) 3350 fep->clk_enet_out = NULL; 3351 3352 fep->ptp_clk_on = false; 3353 mutex_init(&fep->ptp_clk_mutex); 3354 3355 /* clk_ref is optional, depends on board */ 3356 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); 3357 if (IS_ERR(fep->clk_ref)) 3358 fep->clk_ref = NULL; 3359 3360 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; 3361 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); 3362 if (IS_ERR(fep->clk_ptp)) { 3363 fep->clk_ptp = NULL; 3364 fep->bufdesc_ex = false; 3365 } 3366 3367 ret = fec_enet_clk_enable(ndev, true); 3368 if (ret) 3369 goto failed_clk; 3370 3371 ret = clk_prepare_enable(fep->clk_ipg); 3372 if (ret) 3373 goto failed_clk_ipg; 3374 3375 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3376 if (!IS_ERR(fep->reg_phy)) { 3377 ret = regulator_enable(fep->reg_phy); 3378 if (ret) { 3379 dev_err(&pdev->dev, 3380 "Failed to enable phy regulator: %d\n", ret); 3381 goto failed_regulator; 3382 } 3383 } else { 3384 fep->reg_phy = NULL; 3385 } 3386 3387 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 3388 pm_runtime_use_autosuspend(&pdev->dev); 3389 pm_runtime_get_noresume(&pdev->dev); 3390 pm_runtime_set_active(&pdev->dev); 3391 pm_runtime_enable(&pdev->dev); 3392 3393 fec_reset_phy(pdev); 3394 3395 if (fep->bufdesc_ex) 3396 fec_ptp_init(pdev); 3397 3398 ret = fec_enet_init(ndev); 3399 if (ret) 3400 goto failed_init; 3401 3402 for (i = 0; i < FEC_IRQ_NUM; i++) { 3403 irq = platform_get_irq(pdev, i); 3404 if (irq < 0) { 3405 if (i) 3406 break; 3407 ret = irq; 3408 goto failed_irq; 3409 } 3410 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 3411 0, pdev->name, ndev); 3412 if (ret) 3413 goto failed_irq; 3414 3415 fep->irq[i] = irq; 3416 } 3417 3418 init_completion(&fep->mdio_done); 3419 ret = fec_enet_mii_init(pdev); 3420 if (ret) 3421 goto failed_mii_init; 3422 3423 /* Carrier starts down, phylib will bring it up */ 3424 netif_carrier_off(ndev); 3425 fec_enet_clk_enable(ndev, false); 3426 pinctrl_pm_select_sleep_state(&pdev->dev); 3427 3428 ret = register_netdev(ndev); 3429 if (ret) 3430 goto failed_register; 3431 3432 device_init_wakeup(&ndev->dev, fep->wol_flag & 3433 FEC_WOL_HAS_MAGIC_PACKET); 3434 3435 if (fep->bufdesc_ex && fep->ptp_clock) 3436 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 3437 3438 fep->rx_copybreak = COPYBREAK_DEFAULT; 3439 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3440 3441 pm_runtime_mark_last_busy(&pdev->dev); 3442 pm_runtime_put_autosuspend(&pdev->dev); 3443 3444 return 0; 3445 3446 failed_register: 3447 fec_enet_mii_remove(fep); 3448 failed_mii_init: 3449 failed_irq: 3450 failed_init: 3451 fec_ptp_stop(pdev); 3452 if (fep->reg_phy) 3453 regulator_disable(fep->reg_phy); 3454 failed_regulator: 3455 clk_disable_unprepare(fep->clk_ipg); 3456 failed_clk_ipg: 3457 fec_enet_clk_enable(ndev, false); 3458 failed_clk: 3459 failed_phy: 3460 of_node_put(phy_node); 3461 failed_ioremap: 3462 free_netdev(ndev); 3463 3464 return ret; 3465 } 3466 3467 static int 3468 fec_drv_remove(struct platform_device *pdev) 3469 { 3470 struct net_device *ndev = platform_get_drvdata(pdev); 3471 struct fec_enet_private *fep = netdev_priv(ndev); 3472 3473 cancel_work_sync(&fep->tx_timeout_work); 3474 fec_ptp_stop(pdev); 3475 unregister_netdev(ndev); 3476 fec_enet_mii_remove(fep); 3477 if (fep->reg_phy) 3478 regulator_disable(fep->reg_phy); 3479 of_node_put(fep->phy_node); 3480 free_netdev(ndev); 3481 3482 return 0; 3483 } 3484 3485 static int __maybe_unused fec_suspend(struct device *dev) 3486 { 3487 struct net_device *ndev = dev_get_drvdata(dev); 3488 struct fec_enet_private *fep = netdev_priv(ndev); 3489 3490 rtnl_lock(); 3491 if (netif_running(ndev)) { 3492 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) 3493 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; 3494 phy_stop(ndev->phydev); 3495 napi_disable(&fep->napi); 3496 netif_tx_lock_bh(ndev); 3497 netif_device_detach(ndev); 3498 netif_tx_unlock_bh(ndev); 3499 fec_stop(ndev); 3500 fec_enet_clk_enable(ndev, false); 3501 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 3502 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3503 } 3504 rtnl_unlock(); 3505 3506 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 3507 regulator_disable(fep->reg_phy); 3508 3509 /* SOC supply clock to phy, when clock is disabled, phy link down 3510 * SOC control phy regulator, when regulator is disabled, phy link down 3511 */ 3512 if (fep->clk_enet_out || fep->reg_phy) 3513 fep->link = 0; 3514 3515 return 0; 3516 } 3517 3518 static int __maybe_unused fec_resume(struct device *dev) 3519 { 3520 struct net_device *ndev = dev_get_drvdata(dev); 3521 struct fec_enet_private *fep = netdev_priv(ndev); 3522 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 3523 int ret; 3524 int val; 3525 3526 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 3527 ret = regulator_enable(fep->reg_phy); 3528 if (ret) 3529 return ret; 3530 } 3531 3532 rtnl_lock(); 3533 if (netif_running(ndev)) { 3534 ret = fec_enet_clk_enable(ndev, true); 3535 if (ret) { 3536 rtnl_unlock(); 3537 goto failed_clk; 3538 } 3539 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { 3540 if (pdata && pdata->sleep_mode_enable) 3541 pdata->sleep_mode_enable(false); 3542 val = readl(fep->hwp + FEC_ECNTRL); 3543 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 3544 writel(val, fep->hwp + FEC_ECNTRL); 3545 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; 3546 } else { 3547 pinctrl_pm_select_default_state(&fep->pdev->dev); 3548 } 3549 fec_restart(ndev); 3550 netif_tx_lock_bh(ndev); 3551 netif_device_attach(ndev); 3552 netif_tx_unlock_bh(ndev); 3553 napi_enable(&fep->napi); 3554 phy_start(ndev->phydev); 3555 } 3556 rtnl_unlock(); 3557 3558 return 0; 3559 3560 failed_clk: 3561 if (fep->reg_phy) 3562 regulator_disable(fep->reg_phy); 3563 return ret; 3564 } 3565 3566 static int __maybe_unused fec_runtime_suspend(struct device *dev) 3567 { 3568 struct net_device *ndev = dev_get_drvdata(dev); 3569 struct fec_enet_private *fep = netdev_priv(ndev); 3570 3571 clk_disable_unprepare(fep->clk_ipg); 3572 3573 return 0; 3574 } 3575 3576 static int __maybe_unused fec_runtime_resume(struct device *dev) 3577 { 3578 struct net_device *ndev = dev_get_drvdata(dev); 3579 struct fec_enet_private *fep = netdev_priv(ndev); 3580 3581 return clk_prepare_enable(fep->clk_ipg); 3582 } 3583 3584 static const struct dev_pm_ops fec_pm_ops = { 3585 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 3586 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 3587 }; 3588 3589 static struct platform_driver fec_driver = { 3590 .driver = { 3591 .name = DRIVER_NAME, 3592 .pm = &fec_pm_ops, 3593 .of_match_table = fec_dt_ids, 3594 }, 3595 .id_table = fec_devtype, 3596 .probe = fec_probe, 3597 .remove = fec_drv_remove, 3598 }; 3599 3600 module_platform_driver(fec_driver); 3601 3602 MODULE_ALIAS("platform:"DRIVER_NAME); 3603 MODULE_LICENSE("GPL"); 3604