1 /* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * Right now, I am very wasteful with the buffers. I allocate memory 6 * pages and then divide them into 2K frame buffers. This way I know I 7 * have buffers large enough to hold one frame within one buffer descriptor. 8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 9 * will be much more memory efficient and will easily handle lots of 10 * small packets. 11 * 12 * Much better multiple PHY support by Magnus Damm. 13 * Copyright (c) 2000 Ericsson Radio Systems AB. 14 * 15 * Support for FEC controller of ColdFire processors. 16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 17 * 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 19 * Copyright (c) 2004-2006 Macq Electronique SA. 20 * 21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 22 */ 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/string.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/ptrace.h> 29 #include <linux/errno.h> 30 #include <linux/ioport.h> 31 #include <linux/slab.h> 32 #include <linux/interrupt.h> 33 #include <linux/delay.h> 34 #include <linux/netdevice.h> 35 #include <linux/etherdevice.h> 36 #include <linux/skbuff.h> 37 #include <linux/in.h> 38 #include <linux/ip.h> 39 #include <net/ip.h> 40 #include <net/tso.h> 41 #include <linux/tcp.h> 42 #include <linux/udp.h> 43 #include <linux/icmp.h> 44 #include <linux/spinlock.h> 45 #include <linux/workqueue.h> 46 #include <linux/bitops.h> 47 #include <linux/io.h> 48 #include <linux/irq.h> 49 #include <linux/clk.h> 50 #include <linux/platform_device.h> 51 #include <linux/mdio.h> 52 #include <linux/phy.h> 53 #include <linux/fec.h> 54 #include <linux/of.h> 55 #include <linux/of_device.h> 56 #include <linux/of_gpio.h> 57 #include <linux/of_mdio.h> 58 #include <linux/of_net.h> 59 #include <linux/regulator/consumer.h> 60 #include <linux/if_vlan.h> 61 #include <linux/pinctrl/consumer.h> 62 #include <linux/prefetch.h> 63 #include <soc/imx/cpuidle.h> 64 65 #include <asm/cacheflush.h> 66 67 #include "fec.h" 68 69 static void set_multicast_list(struct net_device *ndev); 70 static void fec_enet_itr_coal_init(struct net_device *ndev); 71 72 #define DRIVER_NAME "fec" 73 74 #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) 75 76 /* Pause frame feild and FIFO threshold */ 77 #define FEC_ENET_FCE (1 << 5) 78 #define FEC_ENET_RSEM_V 0x84 79 #define FEC_ENET_RSFL_V 16 80 #define FEC_ENET_RAEM_V 0x8 81 #define FEC_ENET_RAFL_V 0x8 82 #define FEC_ENET_OPD_V 0xFFF0 83 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 84 85 static struct platform_device_id fec_devtype[] = { 86 { 87 /* keep it for coldfire */ 88 .name = DRIVER_NAME, 89 .driver_data = 0, 90 }, { 91 .name = "imx25-fec", 92 .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR, 93 }, { 94 .name = "imx27-fec", 95 .driver_data = FEC_QUIRK_MIB_CLEAR, 96 }, { 97 .name = "imx28-fec", 98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 99 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC, 100 }, { 101 .name = "imx6q-fec", 102 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 103 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 104 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | 105 FEC_QUIRK_HAS_RACC, 106 }, { 107 .name = "mvf600-fec", 108 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, 109 }, { 110 .name = "imx6sx-fec", 111 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 112 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 113 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 114 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 115 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, 116 }, { 117 .name = "imx6ul-fec", 118 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 119 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 120 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | 121 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | 122 FEC_QUIRK_HAS_COALESCE, 123 }, { 124 /* sentinel */ 125 } 126 }; 127 MODULE_DEVICE_TABLE(platform, fec_devtype); 128 129 enum imx_fec_type { 130 IMX25_FEC = 1, /* runs on i.mx25/50/53 */ 131 IMX27_FEC, /* runs on i.mx27/35/51 */ 132 IMX28_FEC, 133 IMX6Q_FEC, 134 MVF600_FEC, 135 IMX6SX_FEC, 136 IMX6UL_FEC, 137 }; 138 139 static const struct of_device_id fec_dt_ids[] = { 140 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, 141 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, 142 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, 143 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, 144 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, 145 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, 146 { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, 147 { /* sentinel */ } 148 }; 149 MODULE_DEVICE_TABLE(of, fec_dt_ids); 150 151 static unsigned char macaddr[ETH_ALEN]; 152 module_param_array(macaddr, byte, NULL, 0); 153 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 154 155 #if defined(CONFIG_M5272) 156 /* 157 * Some hardware gets it MAC address out of local flash memory. 158 * if this is non-zero then assume it is the address to get MAC from. 159 */ 160 #if defined(CONFIG_NETtel) 161 #define FEC_FLASHMAC 0xf0006006 162 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 163 #define FEC_FLASHMAC 0xf0006000 164 #elif defined(CONFIG_CANCam) 165 #define FEC_FLASHMAC 0xf0020000 166 #elif defined (CONFIG_M5272C3) 167 #define FEC_FLASHMAC (0xffe04000 + 4) 168 #elif defined(CONFIG_MOD5272) 169 #define FEC_FLASHMAC 0xffc0406b 170 #else 171 #define FEC_FLASHMAC 0 172 #endif 173 #endif /* CONFIG_M5272 */ 174 175 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. 176 * 177 * 2048 byte skbufs are allocated. However, alignment requirements 178 * varies between FEC variants. Worst case is 64, so round down by 64. 179 */ 180 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) 181 #define PKT_MINBUF_SIZE 64 182 183 /* FEC receive acceleration */ 184 #define FEC_RACC_IPDIS (1 << 1) 185 #define FEC_RACC_PRODIS (1 << 2) 186 #define FEC_RACC_SHIFT16 BIT(7) 187 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 188 189 /* MIB Control Register */ 190 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31) 191 192 /* 193 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 194 * size bits. Other FEC hardware does not, so we need to take that into 195 * account when setting it. 196 */ 197 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 198 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 199 defined(CONFIG_ARM64) 200 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 201 #else 202 #define OPT_FRAME_SIZE 0 203 #endif 204 205 /* FEC MII MMFR bits definition */ 206 #define FEC_MMFR_ST (1 << 30) 207 #define FEC_MMFR_OP_READ (2 << 28) 208 #define FEC_MMFR_OP_WRITE (1 << 28) 209 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 210 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 211 #define FEC_MMFR_TA (2 << 16) 212 #define FEC_MMFR_DATA(v) (v & 0xffff) 213 /* FEC ECR bits definition */ 214 #define FEC_ECR_MAGICEN (1 << 2) 215 #define FEC_ECR_SLEEP (1 << 3) 216 217 #define FEC_MII_TIMEOUT 30000 /* us */ 218 219 /* Transmitter timeout */ 220 #define TX_TIMEOUT (2 * HZ) 221 222 #define FEC_PAUSE_FLAG_AUTONEG 0x1 223 #define FEC_PAUSE_FLAG_ENABLE 0x2 224 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) 225 #define FEC_WOL_FLAG_ENABLE (0x1 << 1) 226 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) 227 228 #define COPYBREAK_DEFAULT 256 229 230 /* Max number of allowed TCP segments for software TSO */ 231 #define FEC_MAX_TSO_SEGS 100 232 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 233 234 #define IS_TSO_HEADER(txq, addr) \ 235 ((addr >= txq->tso_hdrs_dma) && \ 236 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 237 238 static int mii_cnt; 239 240 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, 241 struct bufdesc_prop *bd) 242 { 243 return (bdp >= bd->last) ? bd->base 244 : (struct bufdesc *)(((void *)bdp) + bd->dsize); 245 } 246 247 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, 248 struct bufdesc_prop *bd) 249 { 250 return (bdp <= bd->base) ? bd->last 251 : (struct bufdesc *)(((void *)bdp) - bd->dsize); 252 } 253 254 static int fec_enet_get_bd_index(struct bufdesc *bdp, 255 struct bufdesc_prop *bd) 256 { 257 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; 258 } 259 260 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) 261 { 262 int entries; 263 264 entries = (((const char *)txq->dirty_tx - 265 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; 266 267 return entries >= 0 ? entries : entries + txq->bd.ring_size; 268 } 269 270 static void swap_buffer(void *bufaddr, int len) 271 { 272 int i; 273 unsigned int *buf = bufaddr; 274 275 for (i = 0; i < len; i += 4, buf++) 276 swab32s(buf); 277 } 278 279 static void swap_buffer2(void *dst_buf, void *src_buf, int len) 280 { 281 int i; 282 unsigned int *src = src_buf; 283 unsigned int *dst = dst_buf; 284 285 for (i = 0; i < len; i += 4, src++, dst++) 286 *dst = swab32p(src); 287 } 288 289 static void fec_dump(struct net_device *ndev) 290 { 291 struct fec_enet_private *fep = netdev_priv(ndev); 292 struct bufdesc *bdp; 293 struct fec_enet_priv_tx_q *txq; 294 int index = 0; 295 296 netdev_info(ndev, "TX ring dump\n"); 297 pr_info("Nr SC addr len SKB\n"); 298 299 txq = fep->tx_queue[0]; 300 bdp = txq->bd.base; 301 302 do { 303 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", 304 index, 305 bdp == txq->bd.cur ? 'S' : ' ', 306 bdp == txq->dirty_tx ? 'H' : ' ', 307 fec16_to_cpu(bdp->cbd_sc), 308 fec32_to_cpu(bdp->cbd_bufaddr), 309 fec16_to_cpu(bdp->cbd_datlen), 310 txq->tx_skbuff[index]); 311 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 312 index++; 313 } while (bdp != txq->bd.base); 314 } 315 316 static inline bool is_ipv4_pkt(struct sk_buff *skb) 317 { 318 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 319 } 320 321 static int 322 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 323 { 324 /* Only run for packets requiring a checksum. */ 325 if (skb->ip_summed != CHECKSUM_PARTIAL) 326 return 0; 327 328 if (unlikely(skb_cow_head(skb, 0))) 329 return -1; 330 331 if (is_ipv4_pkt(skb)) 332 ip_hdr(skb)->check = 0; 333 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 334 335 return 0; 336 } 337 338 static struct bufdesc * 339 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, 340 struct sk_buff *skb, 341 struct net_device *ndev) 342 { 343 struct fec_enet_private *fep = netdev_priv(ndev); 344 struct bufdesc *bdp = txq->bd.cur; 345 struct bufdesc_ex *ebdp; 346 int nr_frags = skb_shinfo(skb)->nr_frags; 347 int frag, frag_len; 348 unsigned short status; 349 unsigned int estatus = 0; 350 skb_frag_t *this_frag; 351 unsigned int index; 352 void *bufaddr; 353 dma_addr_t addr; 354 int i; 355 356 for (frag = 0; frag < nr_frags; frag++) { 357 this_frag = &skb_shinfo(skb)->frags[frag]; 358 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 359 ebdp = (struct bufdesc_ex *)bdp; 360 361 status = fec16_to_cpu(bdp->cbd_sc); 362 status &= ~BD_ENET_TX_STATS; 363 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 364 frag_len = skb_shinfo(skb)->frags[frag].size; 365 366 /* Handle the last BD specially */ 367 if (frag == nr_frags - 1) { 368 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 369 if (fep->bufdesc_ex) { 370 estatus |= BD_ENET_TX_INT; 371 if (unlikely(skb_shinfo(skb)->tx_flags & 372 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 373 estatus |= BD_ENET_TX_TS; 374 } 375 } 376 377 if (fep->bufdesc_ex) { 378 if (fep->quirks & FEC_QUIRK_HAS_AVB) 379 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 380 if (skb->ip_summed == CHECKSUM_PARTIAL) 381 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 382 ebdp->cbd_bdu = 0; 383 ebdp->cbd_esc = cpu_to_fec32(estatus); 384 } 385 386 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; 387 388 index = fec_enet_get_bd_index(bdp, &txq->bd); 389 if (((unsigned long) bufaddr) & fep->tx_align || 390 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 391 memcpy(txq->tx_bounce[index], bufaddr, frag_len); 392 bufaddr = txq->tx_bounce[index]; 393 394 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 395 swap_buffer(bufaddr, frag_len); 396 } 397 398 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, 399 DMA_TO_DEVICE); 400 if (dma_mapping_error(&fep->pdev->dev, addr)) { 401 if (net_ratelimit()) 402 netdev_err(ndev, "Tx DMA memory map failed\n"); 403 goto dma_mapping_error; 404 } 405 406 bdp->cbd_bufaddr = cpu_to_fec32(addr); 407 bdp->cbd_datlen = cpu_to_fec16(frag_len); 408 /* Make sure the updates to rest of the descriptor are 409 * performed before transferring ownership. 410 */ 411 wmb(); 412 bdp->cbd_sc = cpu_to_fec16(status); 413 } 414 415 return bdp; 416 dma_mapping_error: 417 bdp = txq->bd.cur; 418 for (i = 0; i < frag; i++) { 419 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 420 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), 421 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); 422 } 423 return ERR_PTR(-ENOMEM); 424 } 425 426 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, 427 struct sk_buff *skb, struct net_device *ndev) 428 { 429 struct fec_enet_private *fep = netdev_priv(ndev); 430 int nr_frags = skb_shinfo(skb)->nr_frags; 431 struct bufdesc *bdp, *last_bdp; 432 void *bufaddr; 433 dma_addr_t addr; 434 unsigned short status; 435 unsigned short buflen; 436 unsigned int estatus = 0; 437 unsigned int index; 438 int entries_free; 439 440 entries_free = fec_enet_get_free_txdesc_num(txq); 441 if (entries_free < MAX_SKB_FRAGS + 1) { 442 dev_kfree_skb_any(skb); 443 if (net_ratelimit()) 444 netdev_err(ndev, "NOT enough BD for SG!\n"); 445 return NETDEV_TX_OK; 446 } 447 448 /* Protocol checksum off-load for TCP and UDP. */ 449 if (fec_enet_clear_csum(skb, ndev)) { 450 dev_kfree_skb_any(skb); 451 return NETDEV_TX_OK; 452 } 453 454 /* Fill in a Tx ring entry */ 455 bdp = txq->bd.cur; 456 last_bdp = bdp; 457 status = fec16_to_cpu(bdp->cbd_sc); 458 status &= ~BD_ENET_TX_STATS; 459 460 /* Set buffer length and buffer pointer */ 461 bufaddr = skb->data; 462 buflen = skb_headlen(skb); 463 464 index = fec_enet_get_bd_index(bdp, &txq->bd); 465 if (((unsigned long) bufaddr) & fep->tx_align || 466 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 467 memcpy(txq->tx_bounce[index], skb->data, buflen); 468 bufaddr = txq->tx_bounce[index]; 469 470 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 471 swap_buffer(bufaddr, buflen); 472 } 473 474 /* Push the data cache so the CPM does not get stale memory data. */ 475 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); 476 if (dma_mapping_error(&fep->pdev->dev, addr)) { 477 dev_kfree_skb_any(skb); 478 if (net_ratelimit()) 479 netdev_err(ndev, "Tx DMA memory map failed\n"); 480 return NETDEV_TX_OK; 481 } 482 483 if (nr_frags) { 484 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); 485 if (IS_ERR(last_bdp)) { 486 dma_unmap_single(&fep->pdev->dev, addr, 487 buflen, DMA_TO_DEVICE); 488 dev_kfree_skb_any(skb); 489 return NETDEV_TX_OK; 490 } 491 } else { 492 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 493 if (fep->bufdesc_ex) { 494 estatus = BD_ENET_TX_INT; 495 if (unlikely(skb_shinfo(skb)->tx_flags & 496 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 497 estatus |= BD_ENET_TX_TS; 498 } 499 } 500 bdp->cbd_bufaddr = cpu_to_fec32(addr); 501 bdp->cbd_datlen = cpu_to_fec16(buflen); 502 503 if (fep->bufdesc_ex) { 504 505 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 506 507 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 508 fep->hwts_tx_en)) 509 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 510 511 if (fep->quirks & FEC_QUIRK_HAS_AVB) 512 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 513 514 if (skb->ip_summed == CHECKSUM_PARTIAL) 515 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 516 517 ebdp->cbd_bdu = 0; 518 ebdp->cbd_esc = cpu_to_fec32(estatus); 519 } 520 521 index = fec_enet_get_bd_index(last_bdp, &txq->bd); 522 /* Save skb pointer */ 523 txq->tx_skbuff[index] = skb; 524 525 /* Make sure the updates to rest of the descriptor are performed before 526 * transferring ownership. 527 */ 528 wmb(); 529 530 /* Send it on its way. Tell FEC it's ready, interrupt when done, 531 * it's the last BD of the frame, and to put the CRC on the end. 532 */ 533 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 534 bdp->cbd_sc = cpu_to_fec16(status); 535 536 /* If this was the last BD in the ring, start at the beginning again. */ 537 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); 538 539 skb_tx_timestamp(skb); 540 541 /* Make sure the update to bdp and tx_skbuff are performed before 542 * txq->bd.cur. 543 */ 544 wmb(); 545 txq->bd.cur = bdp; 546 547 /* Trigger transmission start */ 548 writel(0, txq->bd.reg_desc_active); 549 550 return 0; 551 } 552 553 static int 554 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, 555 struct net_device *ndev, 556 struct bufdesc *bdp, int index, char *data, 557 int size, bool last_tcp, bool is_last) 558 { 559 struct fec_enet_private *fep = netdev_priv(ndev); 560 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 561 unsigned short status; 562 unsigned int estatus = 0; 563 dma_addr_t addr; 564 565 status = fec16_to_cpu(bdp->cbd_sc); 566 status &= ~BD_ENET_TX_STATS; 567 568 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 569 570 if (((unsigned long) data) & fep->tx_align || 571 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 572 memcpy(txq->tx_bounce[index], data, size); 573 data = txq->tx_bounce[index]; 574 575 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 576 swap_buffer(data, size); 577 } 578 579 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); 580 if (dma_mapping_error(&fep->pdev->dev, addr)) { 581 dev_kfree_skb_any(skb); 582 if (net_ratelimit()) 583 netdev_err(ndev, "Tx DMA memory map failed\n"); 584 return NETDEV_TX_BUSY; 585 } 586 587 bdp->cbd_datlen = cpu_to_fec16(size); 588 bdp->cbd_bufaddr = cpu_to_fec32(addr); 589 590 if (fep->bufdesc_ex) { 591 if (fep->quirks & FEC_QUIRK_HAS_AVB) 592 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 593 if (skb->ip_summed == CHECKSUM_PARTIAL) 594 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 595 ebdp->cbd_bdu = 0; 596 ebdp->cbd_esc = cpu_to_fec32(estatus); 597 } 598 599 /* Handle the last BD specially */ 600 if (last_tcp) 601 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); 602 if (is_last) { 603 status |= BD_ENET_TX_INTR; 604 if (fep->bufdesc_ex) 605 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); 606 } 607 608 bdp->cbd_sc = cpu_to_fec16(status); 609 610 return 0; 611 } 612 613 static int 614 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, 615 struct sk_buff *skb, struct net_device *ndev, 616 struct bufdesc *bdp, int index) 617 { 618 struct fec_enet_private *fep = netdev_priv(ndev); 619 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 620 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 621 void *bufaddr; 622 unsigned long dmabuf; 623 unsigned short status; 624 unsigned int estatus = 0; 625 626 status = fec16_to_cpu(bdp->cbd_sc); 627 status &= ~BD_ENET_TX_STATS; 628 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 629 630 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 631 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; 632 if (((unsigned long)bufaddr) & fep->tx_align || 633 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 634 memcpy(txq->tx_bounce[index], skb->data, hdr_len); 635 bufaddr = txq->tx_bounce[index]; 636 637 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 638 swap_buffer(bufaddr, hdr_len); 639 640 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, 641 hdr_len, DMA_TO_DEVICE); 642 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { 643 dev_kfree_skb_any(skb); 644 if (net_ratelimit()) 645 netdev_err(ndev, "Tx DMA memory map failed\n"); 646 return NETDEV_TX_BUSY; 647 } 648 } 649 650 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); 651 bdp->cbd_datlen = cpu_to_fec16(hdr_len); 652 653 if (fep->bufdesc_ex) { 654 if (fep->quirks & FEC_QUIRK_HAS_AVB) 655 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 656 if (skb->ip_summed == CHECKSUM_PARTIAL) 657 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 658 ebdp->cbd_bdu = 0; 659 ebdp->cbd_esc = cpu_to_fec32(estatus); 660 } 661 662 bdp->cbd_sc = cpu_to_fec16(status); 663 664 return 0; 665 } 666 667 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, 668 struct sk_buff *skb, 669 struct net_device *ndev) 670 { 671 struct fec_enet_private *fep = netdev_priv(ndev); 672 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 673 int total_len, data_left; 674 struct bufdesc *bdp = txq->bd.cur; 675 struct tso_t tso; 676 unsigned int index = 0; 677 int ret; 678 679 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { 680 dev_kfree_skb_any(skb); 681 if (net_ratelimit()) 682 netdev_err(ndev, "NOT enough BD for TSO!\n"); 683 return NETDEV_TX_OK; 684 } 685 686 /* Protocol checksum off-load for TCP and UDP. */ 687 if (fec_enet_clear_csum(skb, ndev)) { 688 dev_kfree_skb_any(skb); 689 return NETDEV_TX_OK; 690 } 691 692 /* Initialize the TSO handler, and prepare the first payload */ 693 tso_start(skb, &tso); 694 695 total_len = skb->len - hdr_len; 696 while (total_len > 0) { 697 char *hdr; 698 699 index = fec_enet_get_bd_index(bdp, &txq->bd); 700 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 701 total_len -= data_left; 702 703 /* prepare packet headers: MAC + IP + TCP */ 704 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 705 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 706 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); 707 if (ret) 708 goto err_release; 709 710 while (data_left > 0) { 711 int size; 712 713 size = min_t(int, tso.size, data_left); 714 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 715 index = fec_enet_get_bd_index(bdp, &txq->bd); 716 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, 717 bdp, index, 718 tso.data, size, 719 size == data_left, 720 total_len == 0); 721 if (ret) 722 goto err_release; 723 724 data_left -= size; 725 tso_build_data(skb, &tso, size); 726 } 727 728 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 729 } 730 731 /* Save skb pointer */ 732 txq->tx_skbuff[index] = skb; 733 734 skb_tx_timestamp(skb); 735 txq->bd.cur = bdp; 736 737 /* Trigger transmission start */ 738 if (!(fep->quirks & FEC_QUIRK_ERR007885) || 739 !readl(txq->bd.reg_desc_active) || 740 !readl(txq->bd.reg_desc_active) || 741 !readl(txq->bd.reg_desc_active) || 742 !readl(txq->bd.reg_desc_active)) 743 writel(0, txq->bd.reg_desc_active); 744 745 return 0; 746 747 err_release: 748 /* TODO: Release all used data descriptors for TSO */ 749 return ret; 750 } 751 752 static netdev_tx_t 753 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 754 { 755 struct fec_enet_private *fep = netdev_priv(ndev); 756 int entries_free; 757 unsigned short queue; 758 struct fec_enet_priv_tx_q *txq; 759 struct netdev_queue *nq; 760 int ret; 761 762 queue = skb_get_queue_mapping(skb); 763 txq = fep->tx_queue[queue]; 764 nq = netdev_get_tx_queue(ndev, queue); 765 766 if (skb_is_gso(skb)) 767 ret = fec_enet_txq_submit_tso(txq, skb, ndev); 768 else 769 ret = fec_enet_txq_submit_skb(txq, skb, ndev); 770 if (ret) 771 return ret; 772 773 entries_free = fec_enet_get_free_txdesc_num(txq); 774 if (entries_free <= txq->tx_stop_threshold) 775 netif_tx_stop_queue(nq); 776 777 return NETDEV_TX_OK; 778 } 779 780 /* Init RX & TX buffer descriptors 781 */ 782 static void fec_enet_bd_init(struct net_device *dev) 783 { 784 struct fec_enet_private *fep = netdev_priv(dev); 785 struct fec_enet_priv_tx_q *txq; 786 struct fec_enet_priv_rx_q *rxq; 787 struct bufdesc *bdp; 788 unsigned int i; 789 unsigned int q; 790 791 for (q = 0; q < fep->num_rx_queues; q++) { 792 /* Initialize the receive buffer descriptors. */ 793 rxq = fep->rx_queue[q]; 794 bdp = rxq->bd.base; 795 796 for (i = 0; i < rxq->bd.ring_size; i++) { 797 798 /* Initialize the BD for every fragment in the page. */ 799 if (bdp->cbd_bufaddr) 800 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 801 else 802 bdp->cbd_sc = cpu_to_fec16(0); 803 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 804 } 805 806 /* Set the last buffer to wrap */ 807 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 808 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 809 810 rxq->bd.cur = rxq->bd.base; 811 } 812 813 for (q = 0; q < fep->num_tx_queues; q++) { 814 /* ...and the same for transmit */ 815 txq = fep->tx_queue[q]; 816 bdp = txq->bd.base; 817 txq->bd.cur = bdp; 818 819 for (i = 0; i < txq->bd.ring_size; i++) { 820 /* Initialize the BD for every fragment in the page. */ 821 bdp->cbd_sc = cpu_to_fec16(0); 822 if (bdp->cbd_bufaddr && 823 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 824 dma_unmap_single(&fep->pdev->dev, 825 fec32_to_cpu(bdp->cbd_bufaddr), 826 fec16_to_cpu(bdp->cbd_datlen), 827 DMA_TO_DEVICE); 828 if (txq->tx_skbuff[i]) { 829 dev_kfree_skb_any(txq->tx_skbuff[i]); 830 txq->tx_skbuff[i] = NULL; 831 } 832 bdp->cbd_bufaddr = cpu_to_fec32(0); 833 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 834 } 835 836 /* Set the last buffer to wrap */ 837 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 838 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 839 txq->dirty_tx = bdp; 840 } 841 } 842 843 static void fec_enet_active_rxring(struct net_device *ndev) 844 { 845 struct fec_enet_private *fep = netdev_priv(ndev); 846 int i; 847 848 for (i = 0; i < fep->num_rx_queues; i++) 849 writel(0, fep->rx_queue[i]->bd.reg_desc_active); 850 } 851 852 static void fec_enet_enable_ring(struct net_device *ndev) 853 { 854 struct fec_enet_private *fep = netdev_priv(ndev); 855 struct fec_enet_priv_tx_q *txq; 856 struct fec_enet_priv_rx_q *rxq; 857 int i; 858 859 for (i = 0; i < fep->num_rx_queues; i++) { 860 rxq = fep->rx_queue[i]; 861 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); 862 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); 863 864 /* enable DMA1/2 */ 865 if (i) 866 writel(RCMR_MATCHEN | RCMR_CMP(i), 867 fep->hwp + FEC_RCMR(i)); 868 } 869 870 for (i = 0; i < fep->num_tx_queues; i++) { 871 txq = fep->tx_queue[i]; 872 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); 873 874 /* enable DMA1/2 */ 875 if (i) 876 writel(DMA_CLASS_EN | IDLE_SLOPE(i), 877 fep->hwp + FEC_DMA_CFG(i)); 878 } 879 } 880 881 static void fec_enet_reset_skb(struct net_device *ndev) 882 { 883 struct fec_enet_private *fep = netdev_priv(ndev); 884 struct fec_enet_priv_tx_q *txq; 885 int i, j; 886 887 for (i = 0; i < fep->num_tx_queues; i++) { 888 txq = fep->tx_queue[i]; 889 890 for (j = 0; j < txq->bd.ring_size; j++) { 891 if (txq->tx_skbuff[j]) { 892 dev_kfree_skb_any(txq->tx_skbuff[j]); 893 txq->tx_skbuff[j] = NULL; 894 } 895 } 896 } 897 } 898 899 /* 900 * This function is called to start or restart the FEC during a link 901 * change, transmit timeout, or to reconfigure the FEC. The network 902 * packet processing for this device must be stopped before this call. 903 */ 904 static void 905 fec_restart(struct net_device *ndev) 906 { 907 struct fec_enet_private *fep = netdev_priv(ndev); 908 u32 val; 909 u32 temp_mac[2]; 910 u32 rcntl = OPT_FRAME_SIZE | 0x04; 911 u32 ecntl = 0x2; /* ETHEREN */ 912 913 /* Whack a reset. We should wait for this. 914 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 915 * instead of reset MAC itself. 916 */ 917 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 918 writel(0, fep->hwp + FEC_ECNTRL); 919 } else { 920 writel(1, fep->hwp + FEC_ECNTRL); 921 udelay(10); 922 } 923 924 /* 925 * enet-mac reset will reset mac address registers too, 926 * so need to reconfigure it. 927 */ 928 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); 929 writel((__force u32)cpu_to_be32(temp_mac[0]), 930 fep->hwp + FEC_ADDR_LOW); 931 writel((__force u32)cpu_to_be32(temp_mac[1]), 932 fep->hwp + FEC_ADDR_HIGH); 933 934 /* Clear any outstanding interrupt. */ 935 writel(0xffffffff, fep->hwp + FEC_IEVENT); 936 937 fec_enet_bd_init(ndev); 938 939 fec_enet_enable_ring(ndev); 940 941 /* Reset tx SKB buffers. */ 942 fec_enet_reset_skb(ndev); 943 944 /* Enable MII mode */ 945 if (fep->full_duplex == DUPLEX_FULL) { 946 /* FD enable */ 947 writel(0x04, fep->hwp + FEC_X_CNTRL); 948 } else { 949 /* No Rcv on Xmit */ 950 rcntl |= 0x02; 951 writel(0x0, fep->hwp + FEC_X_CNTRL); 952 } 953 954 /* Set MII speed */ 955 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 956 957 #if !defined(CONFIG_M5272) 958 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 959 val = readl(fep->hwp + FEC_RACC); 960 /* align IP header */ 961 val |= FEC_RACC_SHIFT16; 962 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 963 /* set RX checksum */ 964 val |= FEC_RACC_OPTIONS; 965 else 966 val &= ~FEC_RACC_OPTIONS; 967 writel(val, fep->hwp + FEC_RACC); 968 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); 969 } 970 #endif 971 972 /* 973 * The phy interface and speed need to get configured 974 * differently on enet-mac. 975 */ 976 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 977 /* Enable flow control and length check */ 978 rcntl |= 0x40000000 | 0x00000020; 979 980 /* RGMII, RMII or MII */ 981 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || 982 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 983 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || 984 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) 985 rcntl |= (1 << 6); 986 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 987 rcntl |= (1 << 8); 988 else 989 rcntl &= ~(1 << 8); 990 991 /* 1G, 100M or 10M */ 992 if (ndev->phydev) { 993 if (ndev->phydev->speed == SPEED_1000) 994 ecntl |= (1 << 5); 995 else if (ndev->phydev->speed == SPEED_100) 996 rcntl &= ~(1 << 9); 997 else 998 rcntl |= (1 << 9); 999 } 1000 } else { 1001 #ifdef FEC_MIIGSK_ENR 1002 if (fep->quirks & FEC_QUIRK_USE_GASKET) { 1003 u32 cfgr; 1004 /* disable the gasket and wait */ 1005 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1006 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1007 udelay(1); 1008 1009 /* 1010 * configure the gasket: 1011 * RMII, 50 MHz, no loopback, no echo 1012 * MII, 25 MHz, no loopback, no echo 1013 */ 1014 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1015 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; 1016 if (ndev->phydev && ndev->phydev->speed == SPEED_10) 1017 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; 1018 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); 1019 1020 /* re-enable the gasket */ 1021 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1022 } 1023 #endif 1024 } 1025 1026 #if !defined(CONFIG_M5272) 1027 /* enable pause frame*/ 1028 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 1029 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 1030 ndev->phydev && ndev->phydev->pause)) { 1031 rcntl |= FEC_ENET_FCE; 1032 1033 /* set FIFO threshold parameter to reduce overrun */ 1034 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 1035 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 1036 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 1037 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); 1038 1039 /* OPD */ 1040 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); 1041 } else { 1042 rcntl &= ~FEC_ENET_FCE; 1043 } 1044 #endif /* !defined(CONFIG_M5272) */ 1045 1046 writel(rcntl, fep->hwp + FEC_R_CNTRL); 1047 1048 /* Setup multicast filter. */ 1049 set_multicast_list(ndev); 1050 #ifndef CONFIG_M5272 1051 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1052 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1053 #endif 1054 1055 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1056 /* enable ENET endian swap */ 1057 ecntl |= (1 << 8); 1058 /* enable ENET store and forward mode */ 1059 writel(1 << 8, fep->hwp + FEC_X_WMRK); 1060 } 1061 1062 if (fep->bufdesc_ex) 1063 ecntl |= (1 << 4); 1064 1065 #ifndef CONFIG_M5272 1066 /* Enable the MIB statistic event counters */ 1067 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); 1068 #endif 1069 1070 /* And last, enable the transmit and receive processing */ 1071 writel(ecntl, fep->hwp + FEC_ECNTRL); 1072 fec_enet_active_rxring(ndev); 1073 1074 if (fep->bufdesc_ex) 1075 fec_ptp_start_cyclecounter(ndev); 1076 1077 /* Enable interrupts we wish to service */ 1078 if (fep->link) 1079 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1080 else 1081 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); 1082 1083 /* Init the interrupt coalescing */ 1084 fec_enet_itr_coal_init(ndev); 1085 1086 } 1087 1088 static void 1089 fec_stop(struct net_device *ndev) 1090 { 1091 struct fec_enet_private *fep = netdev_priv(ndev); 1092 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1093 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); 1094 u32 val; 1095 1096 /* We cannot expect a graceful transmit stop without link !!! */ 1097 if (fep->link) { 1098 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1099 udelay(10); 1100 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1101 netdev_err(ndev, "Graceful transmit stop did not complete!\n"); 1102 } 1103 1104 /* Whack a reset. We should wait for this. 1105 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1106 * instead of reset MAC itself. 1107 */ 1108 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1109 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 1110 writel(0, fep->hwp + FEC_ECNTRL); 1111 } else { 1112 writel(1, fep->hwp + FEC_ECNTRL); 1113 udelay(10); 1114 } 1115 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1116 } else { 1117 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); 1118 val = readl(fep->hwp + FEC_ECNTRL); 1119 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 1120 writel(val, fep->hwp + FEC_ECNTRL); 1121 1122 if (pdata && pdata->sleep_mode_enable) 1123 pdata->sleep_mode_enable(true); 1124 } 1125 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1126 1127 /* We have to keep ENET enabled to have MII interrupt stay working */ 1128 if (fep->quirks & FEC_QUIRK_ENET_MAC && 1129 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1130 writel(2, fep->hwp + FEC_ECNTRL); 1131 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1132 } 1133 } 1134 1135 1136 static void 1137 fec_timeout(struct net_device *ndev) 1138 { 1139 struct fec_enet_private *fep = netdev_priv(ndev); 1140 1141 fec_dump(ndev); 1142 1143 ndev->stats.tx_errors++; 1144 1145 schedule_work(&fep->tx_timeout_work); 1146 } 1147 1148 static void fec_enet_timeout_work(struct work_struct *work) 1149 { 1150 struct fec_enet_private *fep = 1151 container_of(work, struct fec_enet_private, tx_timeout_work); 1152 struct net_device *ndev = fep->netdev; 1153 1154 rtnl_lock(); 1155 if (netif_device_present(ndev) || netif_running(ndev)) { 1156 napi_disable(&fep->napi); 1157 netif_tx_lock_bh(ndev); 1158 fec_restart(ndev); 1159 netif_wake_queue(ndev); 1160 netif_tx_unlock_bh(ndev); 1161 napi_enable(&fep->napi); 1162 } 1163 rtnl_unlock(); 1164 } 1165 1166 static void 1167 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, 1168 struct skb_shared_hwtstamps *hwtstamps) 1169 { 1170 unsigned long flags; 1171 u64 ns; 1172 1173 spin_lock_irqsave(&fep->tmreg_lock, flags); 1174 ns = timecounter_cyc2time(&fep->tc, ts); 1175 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 1176 1177 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1178 hwtstamps->hwtstamp = ns_to_ktime(ns); 1179 } 1180 1181 static void 1182 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1183 { 1184 struct fec_enet_private *fep; 1185 struct bufdesc *bdp; 1186 unsigned short status; 1187 struct sk_buff *skb; 1188 struct fec_enet_priv_tx_q *txq; 1189 struct netdev_queue *nq; 1190 int index = 0; 1191 int entries_free; 1192 1193 fep = netdev_priv(ndev); 1194 1195 queue_id = FEC_ENET_GET_QUQUE(queue_id); 1196 1197 txq = fep->tx_queue[queue_id]; 1198 /* get next bdp of dirty_tx */ 1199 nq = netdev_get_tx_queue(ndev, queue_id); 1200 bdp = txq->dirty_tx; 1201 1202 /* get next bdp of dirty_tx */ 1203 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1204 1205 while (bdp != READ_ONCE(txq->bd.cur)) { 1206 /* Order the load of bd.cur and cbd_sc */ 1207 rmb(); 1208 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); 1209 if (status & BD_ENET_TX_READY) 1210 break; 1211 1212 index = fec_enet_get_bd_index(bdp, &txq->bd); 1213 1214 skb = txq->tx_skbuff[index]; 1215 txq->tx_skbuff[index] = NULL; 1216 if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 1217 dma_unmap_single(&fep->pdev->dev, 1218 fec32_to_cpu(bdp->cbd_bufaddr), 1219 fec16_to_cpu(bdp->cbd_datlen), 1220 DMA_TO_DEVICE); 1221 bdp->cbd_bufaddr = cpu_to_fec32(0); 1222 if (!skb) 1223 goto skb_done; 1224 1225 /* Check for errors. */ 1226 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1227 BD_ENET_TX_RL | BD_ENET_TX_UN | 1228 BD_ENET_TX_CSL)) { 1229 ndev->stats.tx_errors++; 1230 if (status & BD_ENET_TX_HB) /* No heartbeat */ 1231 ndev->stats.tx_heartbeat_errors++; 1232 if (status & BD_ENET_TX_LC) /* Late collision */ 1233 ndev->stats.tx_window_errors++; 1234 if (status & BD_ENET_TX_RL) /* Retrans limit */ 1235 ndev->stats.tx_aborted_errors++; 1236 if (status & BD_ENET_TX_UN) /* Underrun */ 1237 ndev->stats.tx_fifo_errors++; 1238 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 1239 ndev->stats.tx_carrier_errors++; 1240 } else { 1241 ndev->stats.tx_packets++; 1242 ndev->stats.tx_bytes += skb->len; 1243 } 1244 1245 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && 1246 fep->bufdesc_ex) { 1247 struct skb_shared_hwtstamps shhwtstamps; 1248 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1249 1250 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); 1251 skb_tstamp_tx(skb, &shhwtstamps); 1252 } 1253 1254 /* Deferred means some collisions occurred during transmit, 1255 * but we eventually sent the packet OK. 1256 */ 1257 if (status & BD_ENET_TX_DEF) 1258 ndev->stats.collisions++; 1259 1260 /* Free the sk buffer associated with this last transmit */ 1261 dev_kfree_skb_any(skb); 1262 skb_done: 1263 /* Make sure the update to bdp and tx_skbuff are performed 1264 * before dirty_tx 1265 */ 1266 wmb(); 1267 txq->dirty_tx = bdp; 1268 1269 /* Update pointer to next buffer descriptor to be transmitted */ 1270 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1271 1272 /* Since we have freed up a buffer, the ring is no longer full 1273 */ 1274 if (netif_queue_stopped(ndev)) { 1275 entries_free = fec_enet_get_free_txdesc_num(txq); 1276 if (entries_free >= txq->tx_wake_threshold) 1277 netif_tx_wake_queue(nq); 1278 } 1279 } 1280 1281 /* ERR006358: Keep the transmitter going */ 1282 if (bdp != txq->bd.cur && 1283 readl(txq->bd.reg_desc_active) == 0) 1284 writel(0, txq->bd.reg_desc_active); 1285 } 1286 1287 static void 1288 fec_enet_tx(struct net_device *ndev) 1289 { 1290 struct fec_enet_private *fep = netdev_priv(ndev); 1291 u16 queue_id; 1292 /* First process class A queue, then Class B and Best Effort queue */ 1293 for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { 1294 clear_bit(queue_id, &fep->work_tx); 1295 fec_enet_tx_queue(ndev, queue_id); 1296 } 1297 return; 1298 } 1299 1300 static int 1301 fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) 1302 { 1303 struct fec_enet_private *fep = netdev_priv(ndev); 1304 int off; 1305 1306 off = ((unsigned long)skb->data) & fep->rx_align; 1307 if (off) 1308 skb_reserve(skb, fep->rx_align + 1 - off); 1309 1310 bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE)); 1311 if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { 1312 if (net_ratelimit()) 1313 netdev_err(ndev, "Rx DMA memory map failed\n"); 1314 return -ENOMEM; 1315 } 1316 1317 return 0; 1318 } 1319 1320 static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, 1321 struct bufdesc *bdp, u32 length, bool swap) 1322 { 1323 struct fec_enet_private *fep = netdev_priv(ndev); 1324 struct sk_buff *new_skb; 1325 1326 if (length > fep->rx_copybreak) 1327 return false; 1328 1329 new_skb = netdev_alloc_skb(ndev, length); 1330 if (!new_skb) 1331 return false; 1332 1333 dma_sync_single_for_cpu(&fep->pdev->dev, 1334 fec32_to_cpu(bdp->cbd_bufaddr), 1335 FEC_ENET_RX_FRSIZE - fep->rx_align, 1336 DMA_FROM_DEVICE); 1337 if (!swap) 1338 memcpy(new_skb->data, (*skb)->data, length); 1339 else 1340 swap_buffer2(new_skb->data, (*skb)->data, length); 1341 *skb = new_skb; 1342 1343 return true; 1344 } 1345 1346 /* During a receive, the bd_rx.cur points to the current incoming buffer. 1347 * When we update through the ring, if the next incoming buffer has 1348 * not been given to the system, we just set the empty indicator, 1349 * effectively tossing the packet. 1350 */ 1351 static int 1352 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) 1353 { 1354 struct fec_enet_private *fep = netdev_priv(ndev); 1355 struct fec_enet_priv_rx_q *rxq; 1356 struct bufdesc *bdp; 1357 unsigned short status; 1358 struct sk_buff *skb_new = NULL; 1359 struct sk_buff *skb; 1360 ushort pkt_len; 1361 __u8 *data; 1362 int pkt_received = 0; 1363 struct bufdesc_ex *ebdp = NULL; 1364 bool vlan_packet_rcvd = false; 1365 u16 vlan_tag; 1366 int index = 0; 1367 bool is_copybreak; 1368 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; 1369 1370 #ifdef CONFIG_M532x 1371 flush_cache_all(); 1372 #endif 1373 queue_id = FEC_ENET_GET_QUQUE(queue_id); 1374 rxq = fep->rx_queue[queue_id]; 1375 1376 /* First, grab all of the stats for the incoming packet. 1377 * These get messed up if we get called due to a busy condition. 1378 */ 1379 bdp = rxq->bd.cur; 1380 1381 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { 1382 1383 if (pkt_received >= budget) 1384 break; 1385 pkt_received++; 1386 1387 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); 1388 1389 /* Check for errors. */ 1390 status ^= BD_ENET_RX_LAST; 1391 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 1392 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | 1393 BD_ENET_RX_CL)) { 1394 ndev->stats.rx_errors++; 1395 if (status & BD_ENET_RX_OV) { 1396 /* FIFO overrun */ 1397 ndev->stats.rx_fifo_errors++; 1398 goto rx_processing_done; 1399 } 1400 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH 1401 | BD_ENET_RX_LAST)) { 1402 /* Frame too long or too short. */ 1403 ndev->stats.rx_length_errors++; 1404 if (status & BD_ENET_RX_LAST) 1405 netdev_err(ndev, "rcv is not +last\n"); 1406 } 1407 if (status & BD_ENET_RX_CR) /* CRC Error */ 1408 ndev->stats.rx_crc_errors++; 1409 /* Report late collisions as a frame error. */ 1410 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 1411 ndev->stats.rx_frame_errors++; 1412 goto rx_processing_done; 1413 } 1414 1415 /* Process the incoming frame. */ 1416 ndev->stats.rx_packets++; 1417 pkt_len = fec16_to_cpu(bdp->cbd_datlen); 1418 ndev->stats.rx_bytes += pkt_len; 1419 1420 index = fec_enet_get_bd_index(bdp, &rxq->bd); 1421 skb = rxq->rx_skbuff[index]; 1422 1423 /* The packet length includes FCS, but we don't want to 1424 * include that when passing upstream as it messes up 1425 * bridging applications. 1426 */ 1427 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, 1428 need_swap); 1429 if (!is_copybreak) { 1430 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1431 if (unlikely(!skb_new)) { 1432 ndev->stats.rx_dropped++; 1433 goto rx_processing_done; 1434 } 1435 dma_unmap_single(&fep->pdev->dev, 1436 fec32_to_cpu(bdp->cbd_bufaddr), 1437 FEC_ENET_RX_FRSIZE - fep->rx_align, 1438 DMA_FROM_DEVICE); 1439 } 1440 1441 prefetch(skb->data - NET_IP_ALIGN); 1442 skb_put(skb, pkt_len - 4); 1443 data = skb->data; 1444 1445 if (!is_copybreak && need_swap) 1446 swap_buffer(data, pkt_len); 1447 1448 #if !defined(CONFIG_M5272) 1449 if (fep->quirks & FEC_QUIRK_HAS_RACC) 1450 data = skb_pull_inline(skb, 2); 1451 #endif 1452 1453 /* Extract the enhanced buffer descriptor */ 1454 ebdp = NULL; 1455 if (fep->bufdesc_ex) 1456 ebdp = (struct bufdesc_ex *)bdp; 1457 1458 /* If this is a VLAN packet remove the VLAN Tag */ 1459 vlan_packet_rcvd = false; 1460 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1461 fep->bufdesc_ex && 1462 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { 1463 /* Push and remove the vlan tag */ 1464 struct vlan_hdr *vlan_header = 1465 (struct vlan_hdr *) (data + ETH_HLEN); 1466 vlan_tag = ntohs(vlan_header->h_vlan_TCI); 1467 1468 vlan_packet_rcvd = true; 1469 1470 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); 1471 skb_pull(skb, VLAN_HLEN); 1472 } 1473 1474 skb->protocol = eth_type_trans(skb, ndev); 1475 1476 /* Get receive timestamp from the skb */ 1477 if (fep->hwts_rx_en && fep->bufdesc_ex) 1478 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), 1479 skb_hwtstamps(skb)); 1480 1481 if (fep->bufdesc_ex && 1482 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 1483 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { 1484 /* don't check it */ 1485 skb->ip_summed = CHECKSUM_UNNECESSARY; 1486 } else { 1487 skb_checksum_none_assert(skb); 1488 } 1489 } 1490 1491 /* Handle received VLAN packets */ 1492 if (vlan_packet_rcvd) 1493 __vlan_hwaccel_put_tag(skb, 1494 htons(ETH_P_8021Q), 1495 vlan_tag); 1496 1497 napi_gro_receive(&fep->napi, skb); 1498 1499 if (is_copybreak) { 1500 dma_sync_single_for_device(&fep->pdev->dev, 1501 fec32_to_cpu(bdp->cbd_bufaddr), 1502 FEC_ENET_RX_FRSIZE - fep->rx_align, 1503 DMA_FROM_DEVICE); 1504 } else { 1505 rxq->rx_skbuff[index] = skb_new; 1506 fec_enet_new_rxbdp(ndev, bdp, skb_new); 1507 } 1508 1509 rx_processing_done: 1510 /* Clear the status flags for this buffer */ 1511 status &= ~BD_ENET_RX_STATS; 1512 1513 /* Mark the buffer empty */ 1514 status |= BD_ENET_RX_EMPTY; 1515 1516 if (fep->bufdesc_ex) { 1517 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1518 1519 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 1520 ebdp->cbd_prot = 0; 1521 ebdp->cbd_bdu = 0; 1522 } 1523 /* Make sure the updates to rest of the descriptor are 1524 * performed before transferring ownership. 1525 */ 1526 wmb(); 1527 bdp->cbd_sc = cpu_to_fec16(status); 1528 1529 /* Update BD pointer to next entry */ 1530 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 1531 1532 /* Doing this here will keep the FEC running while we process 1533 * incoming frames. On a heavily loaded network, we should be 1534 * able to keep up at the expense of system resources. 1535 */ 1536 writel(0, rxq->bd.reg_desc_active); 1537 } 1538 rxq->bd.cur = bdp; 1539 return pkt_received; 1540 } 1541 1542 static int 1543 fec_enet_rx(struct net_device *ndev, int budget) 1544 { 1545 int pkt_received = 0; 1546 u16 queue_id; 1547 struct fec_enet_private *fep = netdev_priv(ndev); 1548 1549 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { 1550 int ret; 1551 1552 ret = fec_enet_rx_queue(ndev, 1553 budget - pkt_received, queue_id); 1554 1555 if (ret < budget - pkt_received) 1556 clear_bit(queue_id, &fep->work_rx); 1557 1558 pkt_received += ret; 1559 } 1560 return pkt_received; 1561 } 1562 1563 static bool 1564 fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) 1565 { 1566 if (int_events == 0) 1567 return false; 1568 1569 if (int_events & FEC_ENET_RXF_0) 1570 fep->work_rx |= (1 << 2); 1571 if (int_events & FEC_ENET_RXF_1) 1572 fep->work_rx |= (1 << 0); 1573 if (int_events & FEC_ENET_RXF_2) 1574 fep->work_rx |= (1 << 1); 1575 1576 if (int_events & FEC_ENET_TXF_0) 1577 fep->work_tx |= (1 << 2); 1578 if (int_events & FEC_ENET_TXF_1) 1579 fep->work_tx |= (1 << 0); 1580 if (int_events & FEC_ENET_TXF_2) 1581 fep->work_tx |= (1 << 1); 1582 1583 return true; 1584 } 1585 1586 static irqreturn_t 1587 fec_enet_interrupt(int irq, void *dev_id) 1588 { 1589 struct net_device *ndev = dev_id; 1590 struct fec_enet_private *fep = netdev_priv(ndev); 1591 uint int_events; 1592 irqreturn_t ret = IRQ_NONE; 1593 1594 int_events = readl(fep->hwp + FEC_IEVENT); 1595 writel(int_events, fep->hwp + FEC_IEVENT); 1596 fec_enet_collect_events(fep, int_events); 1597 1598 if ((fep->work_tx || fep->work_rx) && fep->link) { 1599 ret = IRQ_HANDLED; 1600 1601 if (napi_schedule_prep(&fep->napi)) { 1602 /* Disable the NAPI interrupts */ 1603 writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK); 1604 __napi_schedule(&fep->napi); 1605 } 1606 } 1607 1608 if (int_events & FEC_ENET_MII) { 1609 ret = IRQ_HANDLED; 1610 complete(&fep->mdio_done); 1611 } 1612 return ret; 1613 } 1614 1615 static int fec_enet_rx_napi(struct napi_struct *napi, int budget) 1616 { 1617 struct net_device *ndev = napi->dev; 1618 struct fec_enet_private *fep = netdev_priv(ndev); 1619 int pkts; 1620 1621 pkts = fec_enet_rx(ndev, budget); 1622 1623 fec_enet_tx(ndev); 1624 1625 if (pkts < budget) { 1626 napi_complete_done(napi, pkts); 1627 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1628 } 1629 return pkts; 1630 } 1631 1632 /* ------------------------------------------------------------------------- */ 1633 static void fec_get_mac(struct net_device *ndev) 1634 { 1635 struct fec_enet_private *fep = netdev_priv(ndev); 1636 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); 1637 unsigned char *iap, tmpaddr[ETH_ALEN]; 1638 1639 /* 1640 * try to get mac address in following order: 1641 * 1642 * 1) module parameter via kernel command line in form 1643 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 1644 */ 1645 iap = macaddr; 1646 1647 /* 1648 * 2) from device tree data 1649 */ 1650 if (!is_valid_ether_addr(iap)) { 1651 struct device_node *np = fep->pdev->dev.of_node; 1652 if (np) { 1653 const char *mac = of_get_mac_address(np); 1654 if (mac) 1655 iap = (unsigned char *) mac; 1656 } 1657 } 1658 1659 /* 1660 * 3) from flash or fuse (via platform data) 1661 */ 1662 if (!is_valid_ether_addr(iap)) { 1663 #ifdef CONFIG_M5272 1664 if (FEC_FLASHMAC) 1665 iap = (unsigned char *)FEC_FLASHMAC; 1666 #else 1667 if (pdata) 1668 iap = (unsigned char *)&pdata->mac; 1669 #endif 1670 } 1671 1672 /* 1673 * 4) FEC mac registers set by bootloader 1674 */ 1675 if (!is_valid_ether_addr(iap)) { 1676 *((__be32 *) &tmpaddr[0]) = 1677 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); 1678 *((__be16 *) &tmpaddr[4]) = 1679 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 1680 iap = &tmpaddr[0]; 1681 } 1682 1683 /* 1684 * 5) random mac address 1685 */ 1686 if (!is_valid_ether_addr(iap)) { 1687 /* Report it and use a random ethernet address instead */ 1688 netdev_err(ndev, "Invalid MAC address: %pM\n", iap); 1689 eth_hw_addr_random(ndev); 1690 netdev_info(ndev, "Using random MAC address: %pM\n", 1691 ndev->dev_addr); 1692 return; 1693 } 1694 1695 memcpy(ndev->dev_addr, iap, ETH_ALEN); 1696 1697 /* Adjust MAC if using macaddr */ 1698 if (iap == macaddr) 1699 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; 1700 } 1701 1702 /* ------------------------------------------------------------------------- */ 1703 1704 /* 1705 * Phy section 1706 */ 1707 static void fec_enet_adjust_link(struct net_device *ndev) 1708 { 1709 struct fec_enet_private *fep = netdev_priv(ndev); 1710 struct phy_device *phy_dev = ndev->phydev; 1711 int status_change = 0; 1712 1713 /* Prevent a state halted on mii error */ 1714 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { 1715 phy_dev->state = PHY_RESUMING; 1716 return; 1717 } 1718 1719 /* 1720 * If the netdev is down, or is going down, we're not interested 1721 * in link state events, so just mark our idea of the link as down 1722 * and ignore the event. 1723 */ 1724 if (!netif_running(ndev) || !netif_device_present(ndev)) { 1725 fep->link = 0; 1726 } else if (phy_dev->link) { 1727 if (!fep->link) { 1728 fep->link = phy_dev->link; 1729 status_change = 1; 1730 } 1731 1732 if (fep->full_duplex != phy_dev->duplex) { 1733 fep->full_duplex = phy_dev->duplex; 1734 status_change = 1; 1735 } 1736 1737 if (phy_dev->speed != fep->speed) { 1738 fep->speed = phy_dev->speed; 1739 status_change = 1; 1740 } 1741 1742 /* if any of the above changed restart the FEC */ 1743 if (status_change) { 1744 napi_disable(&fep->napi); 1745 netif_tx_lock_bh(ndev); 1746 fec_restart(ndev); 1747 netif_wake_queue(ndev); 1748 netif_tx_unlock_bh(ndev); 1749 napi_enable(&fep->napi); 1750 } 1751 } else { 1752 if (fep->link) { 1753 napi_disable(&fep->napi); 1754 netif_tx_lock_bh(ndev); 1755 fec_stop(ndev); 1756 netif_tx_unlock_bh(ndev); 1757 napi_enable(&fep->napi); 1758 fep->link = phy_dev->link; 1759 status_change = 1; 1760 } 1761 } 1762 1763 if (status_change) 1764 phy_print_status(phy_dev); 1765 } 1766 1767 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 1768 { 1769 struct fec_enet_private *fep = bus->priv; 1770 struct device *dev = &fep->pdev->dev; 1771 unsigned long time_left; 1772 int ret = 0; 1773 1774 ret = pm_runtime_get_sync(dev); 1775 if (ret < 0) 1776 return ret; 1777 1778 fep->mii_timeout = 0; 1779 reinit_completion(&fep->mdio_done); 1780 1781 /* start a read op */ 1782 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 1783 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 1784 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 1785 1786 /* wait for end of transfer */ 1787 time_left = wait_for_completion_timeout(&fep->mdio_done, 1788 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1789 if (time_left == 0) { 1790 fep->mii_timeout = 1; 1791 netdev_err(fep->netdev, "MDIO read timeout\n"); 1792 ret = -ETIMEDOUT; 1793 goto out; 1794 } 1795 1796 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1797 1798 out: 1799 pm_runtime_mark_last_busy(dev); 1800 pm_runtime_put_autosuspend(dev); 1801 1802 return ret; 1803 } 1804 1805 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 1806 u16 value) 1807 { 1808 struct fec_enet_private *fep = bus->priv; 1809 struct device *dev = &fep->pdev->dev; 1810 unsigned long time_left; 1811 int ret; 1812 1813 ret = pm_runtime_get_sync(dev); 1814 if (ret < 0) 1815 return ret; 1816 else 1817 ret = 0; 1818 1819 fep->mii_timeout = 0; 1820 reinit_completion(&fep->mdio_done); 1821 1822 /* start a write op */ 1823 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | 1824 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 1825 FEC_MMFR_TA | FEC_MMFR_DATA(value), 1826 fep->hwp + FEC_MII_DATA); 1827 1828 /* wait for end of transfer */ 1829 time_left = wait_for_completion_timeout(&fep->mdio_done, 1830 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1831 if (time_left == 0) { 1832 fep->mii_timeout = 1; 1833 netdev_err(fep->netdev, "MDIO write timeout\n"); 1834 ret = -ETIMEDOUT; 1835 } 1836 1837 pm_runtime_mark_last_busy(dev); 1838 pm_runtime_put_autosuspend(dev); 1839 1840 return ret; 1841 } 1842 1843 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 1844 { 1845 struct fec_enet_private *fep = netdev_priv(ndev); 1846 int ret; 1847 1848 if (enable) { 1849 ret = clk_prepare_enable(fep->clk_ahb); 1850 if (ret) 1851 return ret; 1852 1853 ret = clk_prepare_enable(fep->clk_enet_out); 1854 if (ret) 1855 goto failed_clk_enet_out; 1856 1857 if (fep->clk_ptp) { 1858 mutex_lock(&fep->ptp_clk_mutex); 1859 ret = clk_prepare_enable(fep->clk_ptp); 1860 if (ret) { 1861 mutex_unlock(&fep->ptp_clk_mutex); 1862 goto failed_clk_ptp; 1863 } else { 1864 fep->ptp_clk_on = true; 1865 } 1866 mutex_unlock(&fep->ptp_clk_mutex); 1867 } 1868 1869 ret = clk_prepare_enable(fep->clk_ref); 1870 if (ret) 1871 goto failed_clk_ref; 1872 1873 phy_reset_after_clk_enable(ndev->phydev); 1874 } else { 1875 clk_disable_unprepare(fep->clk_ahb); 1876 clk_disable_unprepare(fep->clk_enet_out); 1877 if (fep->clk_ptp) { 1878 mutex_lock(&fep->ptp_clk_mutex); 1879 clk_disable_unprepare(fep->clk_ptp); 1880 fep->ptp_clk_on = false; 1881 mutex_unlock(&fep->ptp_clk_mutex); 1882 } 1883 clk_disable_unprepare(fep->clk_ref); 1884 } 1885 1886 return 0; 1887 1888 failed_clk_ref: 1889 if (fep->clk_ref) 1890 clk_disable_unprepare(fep->clk_ref); 1891 failed_clk_ptp: 1892 if (fep->clk_enet_out) 1893 clk_disable_unprepare(fep->clk_enet_out); 1894 failed_clk_enet_out: 1895 clk_disable_unprepare(fep->clk_ahb); 1896 1897 return ret; 1898 } 1899 1900 static int fec_enet_mii_probe(struct net_device *ndev) 1901 { 1902 struct fec_enet_private *fep = netdev_priv(ndev); 1903 struct phy_device *phy_dev = NULL; 1904 char mdio_bus_id[MII_BUS_ID_SIZE]; 1905 char phy_name[MII_BUS_ID_SIZE + 3]; 1906 int phy_id; 1907 int dev_id = fep->dev_id; 1908 1909 if (fep->phy_node) { 1910 phy_dev = of_phy_connect(ndev, fep->phy_node, 1911 &fec_enet_adjust_link, 0, 1912 fep->phy_interface); 1913 if (!phy_dev) { 1914 netdev_err(ndev, "Unable to connect to phy\n"); 1915 return -ENODEV; 1916 } 1917 } else { 1918 /* check for attached phy */ 1919 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { 1920 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) 1921 continue; 1922 if (dev_id--) 1923 continue; 1924 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 1925 break; 1926 } 1927 1928 if (phy_id >= PHY_MAX_ADDR) { 1929 netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); 1930 strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); 1931 phy_id = 0; 1932 } 1933 1934 snprintf(phy_name, sizeof(phy_name), 1935 PHY_ID_FMT, mdio_bus_id, phy_id); 1936 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 1937 fep->phy_interface); 1938 } 1939 1940 if (IS_ERR(phy_dev)) { 1941 netdev_err(ndev, "could not attach to PHY\n"); 1942 return PTR_ERR(phy_dev); 1943 } 1944 1945 /* mask with MAC supported features */ 1946 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { 1947 phy_dev->supported &= PHY_GBIT_FEATURES; 1948 phy_dev->supported &= ~SUPPORTED_1000baseT_Half; 1949 #if !defined(CONFIG_M5272) 1950 phy_dev->supported |= SUPPORTED_Pause; 1951 #endif 1952 } 1953 else 1954 phy_dev->supported &= PHY_BASIC_FEATURES; 1955 1956 phy_dev->advertising = phy_dev->supported; 1957 1958 fep->link = 0; 1959 fep->full_duplex = 0; 1960 1961 phy_attached_info(phy_dev); 1962 1963 return 0; 1964 } 1965 1966 static int fec_enet_mii_init(struct platform_device *pdev) 1967 { 1968 static struct mii_bus *fec0_mii_bus; 1969 struct net_device *ndev = platform_get_drvdata(pdev); 1970 struct fec_enet_private *fep = netdev_priv(ndev); 1971 struct device_node *node; 1972 int err = -ENXIO; 1973 u32 mii_speed, holdtime; 1974 1975 /* 1976 * The i.MX28 dual fec interfaces are not equal. 1977 * Here are the differences: 1978 * 1979 * - fec0 supports MII & RMII modes while fec1 only supports RMII 1980 * - fec0 acts as the 1588 time master while fec1 is slave 1981 * - external phys can only be configured by fec0 1982 * 1983 * That is to say fec1 can not work independently. It only works 1984 * when fec0 is working. The reason behind this design is that the 1985 * second interface is added primarily for Switch mode. 1986 * 1987 * Because of the last point above, both phys are attached on fec0 1988 * mdio interface in board design, and need to be configured by 1989 * fec0 mii_bus. 1990 */ 1991 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 1992 /* fec1 uses fec0 mii_bus */ 1993 if (mii_cnt && fec0_mii_bus) { 1994 fep->mii_bus = fec0_mii_bus; 1995 mii_cnt++; 1996 return 0; 1997 } 1998 return -ENOENT; 1999 } 2000 2001 fep->mii_timeout = 0; 2002 2003 /* 2004 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) 2005 * 2006 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while 2007 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 2008 * Reference Manual has an error on this, and gets fixed on i.MX6Q 2009 * document. 2010 */ 2011 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); 2012 if (fep->quirks & FEC_QUIRK_ENET_MAC) 2013 mii_speed--; 2014 if (mii_speed > 63) { 2015 dev_err(&pdev->dev, 2016 "fec clock (%lu) too fast to get right mii speed\n", 2017 clk_get_rate(fep->clk_ipg)); 2018 err = -EINVAL; 2019 goto err_out; 2020 } 2021 2022 /* 2023 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka 2024 * MII_SPEED) register that defines the MDIO output hold time. Earlier 2025 * versions are RAZ there, so just ignore the difference and write the 2026 * register always. 2027 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. 2028 * HOLDTIME + 1 is the number of clk cycles the fec is holding the 2029 * output. 2030 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). 2031 * Given that ceil(clkrate / 5000000) <= 64, the calculation for 2032 * holdtime cannot result in a value greater than 3. 2033 */ 2034 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; 2035 2036 fep->phy_speed = mii_speed << 1 | holdtime << 8; 2037 2038 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 2039 2040 fep->mii_bus = mdiobus_alloc(); 2041 if (fep->mii_bus == NULL) { 2042 err = -ENOMEM; 2043 goto err_out; 2044 } 2045 2046 fep->mii_bus->name = "fec_enet_mii_bus"; 2047 fep->mii_bus->read = fec_enet_mdio_read; 2048 fep->mii_bus->write = fec_enet_mdio_write; 2049 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2050 pdev->name, fep->dev_id + 1); 2051 fep->mii_bus->priv = fep; 2052 fep->mii_bus->parent = &pdev->dev; 2053 2054 node = of_get_child_by_name(pdev->dev.of_node, "mdio"); 2055 err = of_mdiobus_register(fep->mii_bus, node); 2056 if (node) 2057 of_node_put(node); 2058 if (err) 2059 goto err_out_free_mdiobus; 2060 2061 mii_cnt++; 2062 2063 /* save fec0 mii_bus */ 2064 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2065 fec0_mii_bus = fep->mii_bus; 2066 2067 return 0; 2068 2069 err_out_free_mdiobus: 2070 mdiobus_free(fep->mii_bus); 2071 err_out: 2072 return err; 2073 } 2074 2075 static void fec_enet_mii_remove(struct fec_enet_private *fep) 2076 { 2077 if (--mii_cnt == 0) { 2078 mdiobus_unregister(fep->mii_bus); 2079 mdiobus_free(fep->mii_bus); 2080 } 2081 } 2082 2083 static void fec_enet_get_drvinfo(struct net_device *ndev, 2084 struct ethtool_drvinfo *info) 2085 { 2086 struct fec_enet_private *fep = netdev_priv(ndev); 2087 2088 strlcpy(info->driver, fep->pdev->dev.driver->name, 2089 sizeof(info->driver)); 2090 strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); 2091 strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); 2092 } 2093 2094 static int fec_enet_get_regs_len(struct net_device *ndev) 2095 { 2096 struct fec_enet_private *fep = netdev_priv(ndev); 2097 struct resource *r; 2098 int s = 0; 2099 2100 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); 2101 if (r) 2102 s = resource_size(r); 2103 2104 return s; 2105 } 2106 2107 /* List of registers that can be safety be read to dump them with ethtool */ 2108 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2109 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2110 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2111 static u32 fec_enet_register_offset[] = { 2112 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2113 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2114 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, 2115 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, 2116 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, 2117 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, 2118 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, 2119 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, 2120 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2121 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, 2122 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, 2123 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, 2124 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2125 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2126 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2127 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2128 RMON_T_P_GTE2048, RMON_T_OCTETS, 2129 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2130 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2131 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2132 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2133 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2134 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2135 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2136 RMON_R_P_GTE2048, RMON_R_OCTETS, 2137 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2138 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2139 }; 2140 #else 2141 static u32 fec_enet_register_offset[] = { 2142 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2143 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2144 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, 2145 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, 2146 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, 2147 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, 2148 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, 2149 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, 2150 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 2151 }; 2152 #endif 2153 2154 static void fec_enet_get_regs(struct net_device *ndev, 2155 struct ethtool_regs *regs, void *regbuf) 2156 { 2157 struct fec_enet_private *fep = netdev_priv(ndev); 2158 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; 2159 u32 *buf = (u32 *)regbuf; 2160 u32 i, off; 2161 2162 memset(buf, 0, regs->len); 2163 2164 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { 2165 off = fec_enet_register_offset[i] / 4; 2166 buf[off] = readl(&theregs[off]); 2167 } 2168 } 2169 2170 static int fec_enet_get_ts_info(struct net_device *ndev, 2171 struct ethtool_ts_info *info) 2172 { 2173 struct fec_enet_private *fep = netdev_priv(ndev); 2174 2175 if (fep->bufdesc_ex) { 2176 2177 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 2178 SOF_TIMESTAMPING_RX_SOFTWARE | 2179 SOF_TIMESTAMPING_SOFTWARE | 2180 SOF_TIMESTAMPING_TX_HARDWARE | 2181 SOF_TIMESTAMPING_RX_HARDWARE | 2182 SOF_TIMESTAMPING_RAW_HARDWARE; 2183 if (fep->ptp_clock) 2184 info->phc_index = ptp_clock_index(fep->ptp_clock); 2185 else 2186 info->phc_index = -1; 2187 2188 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 2189 (1 << HWTSTAMP_TX_ON); 2190 2191 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2192 (1 << HWTSTAMP_FILTER_ALL); 2193 return 0; 2194 } else { 2195 return ethtool_op_get_ts_info(ndev, info); 2196 } 2197 } 2198 2199 #if !defined(CONFIG_M5272) 2200 2201 static void fec_enet_get_pauseparam(struct net_device *ndev, 2202 struct ethtool_pauseparam *pause) 2203 { 2204 struct fec_enet_private *fep = netdev_priv(ndev); 2205 2206 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; 2207 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; 2208 pause->rx_pause = pause->tx_pause; 2209 } 2210 2211 static int fec_enet_set_pauseparam(struct net_device *ndev, 2212 struct ethtool_pauseparam *pause) 2213 { 2214 struct fec_enet_private *fep = netdev_priv(ndev); 2215 2216 if (!ndev->phydev) 2217 return -ENODEV; 2218 2219 if (pause->tx_pause != pause->rx_pause) { 2220 netdev_info(ndev, 2221 "hardware only support enable/disable both tx and rx"); 2222 return -EINVAL; 2223 } 2224 2225 fep->pause_flag = 0; 2226 2227 /* tx pause must be same as rx pause */ 2228 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; 2229 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; 2230 2231 if (pause->rx_pause || pause->autoneg) { 2232 ndev->phydev->supported |= ADVERTISED_Pause; 2233 ndev->phydev->advertising |= ADVERTISED_Pause; 2234 } else { 2235 ndev->phydev->supported &= ~ADVERTISED_Pause; 2236 ndev->phydev->advertising &= ~ADVERTISED_Pause; 2237 } 2238 2239 if (pause->autoneg) { 2240 if (netif_running(ndev)) 2241 fec_stop(ndev); 2242 phy_start_aneg(ndev->phydev); 2243 } 2244 if (netif_running(ndev)) { 2245 napi_disable(&fep->napi); 2246 netif_tx_lock_bh(ndev); 2247 fec_restart(ndev); 2248 netif_wake_queue(ndev); 2249 netif_tx_unlock_bh(ndev); 2250 napi_enable(&fep->napi); 2251 } 2252 2253 return 0; 2254 } 2255 2256 static const struct fec_stat { 2257 char name[ETH_GSTRING_LEN]; 2258 u16 offset; 2259 } fec_stats[] = { 2260 /* RMON TX */ 2261 { "tx_dropped", RMON_T_DROP }, 2262 { "tx_packets", RMON_T_PACKETS }, 2263 { "tx_broadcast", RMON_T_BC_PKT }, 2264 { "tx_multicast", RMON_T_MC_PKT }, 2265 { "tx_crc_errors", RMON_T_CRC_ALIGN }, 2266 { "tx_undersize", RMON_T_UNDERSIZE }, 2267 { "tx_oversize", RMON_T_OVERSIZE }, 2268 { "tx_fragment", RMON_T_FRAG }, 2269 { "tx_jabber", RMON_T_JAB }, 2270 { "tx_collision", RMON_T_COL }, 2271 { "tx_64byte", RMON_T_P64 }, 2272 { "tx_65to127byte", RMON_T_P65TO127 }, 2273 { "tx_128to255byte", RMON_T_P128TO255 }, 2274 { "tx_256to511byte", RMON_T_P256TO511 }, 2275 { "tx_512to1023byte", RMON_T_P512TO1023 }, 2276 { "tx_1024to2047byte", RMON_T_P1024TO2047 }, 2277 { "tx_GTE2048byte", RMON_T_P_GTE2048 }, 2278 { "tx_octets", RMON_T_OCTETS }, 2279 2280 /* IEEE TX */ 2281 { "IEEE_tx_drop", IEEE_T_DROP }, 2282 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, 2283 { "IEEE_tx_1col", IEEE_T_1COL }, 2284 { "IEEE_tx_mcol", IEEE_T_MCOL }, 2285 { "IEEE_tx_def", IEEE_T_DEF }, 2286 { "IEEE_tx_lcol", IEEE_T_LCOL }, 2287 { "IEEE_tx_excol", IEEE_T_EXCOL }, 2288 { "IEEE_tx_macerr", IEEE_T_MACERR }, 2289 { "IEEE_tx_cserr", IEEE_T_CSERR }, 2290 { "IEEE_tx_sqe", IEEE_T_SQE }, 2291 { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, 2292 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, 2293 2294 /* RMON RX */ 2295 { "rx_packets", RMON_R_PACKETS }, 2296 { "rx_broadcast", RMON_R_BC_PKT }, 2297 { "rx_multicast", RMON_R_MC_PKT }, 2298 { "rx_crc_errors", RMON_R_CRC_ALIGN }, 2299 { "rx_undersize", RMON_R_UNDERSIZE }, 2300 { "rx_oversize", RMON_R_OVERSIZE }, 2301 { "rx_fragment", RMON_R_FRAG }, 2302 { "rx_jabber", RMON_R_JAB }, 2303 { "rx_64byte", RMON_R_P64 }, 2304 { "rx_65to127byte", RMON_R_P65TO127 }, 2305 { "rx_128to255byte", RMON_R_P128TO255 }, 2306 { "rx_256to511byte", RMON_R_P256TO511 }, 2307 { "rx_512to1023byte", RMON_R_P512TO1023 }, 2308 { "rx_1024to2047byte", RMON_R_P1024TO2047 }, 2309 { "rx_GTE2048byte", RMON_R_P_GTE2048 }, 2310 { "rx_octets", RMON_R_OCTETS }, 2311 2312 /* IEEE RX */ 2313 { "IEEE_rx_drop", IEEE_R_DROP }, 2314 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, 2315 { "IEEE_rx_crc", IEEE_R_CRC }, 2316 { "IEEE_rx_align", IEEE_R_ALIGN }, 2317 { "IEEE_rx_macerr", IEEE_R_MACERR }, 2318 { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, 2319 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 2320 }; 2321 2322 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) 2323 2324 static void fec_enet_update_ethtool_stats(struct net_device *dev) 2325 { 2326 struct fec_enet_private *fep = netdev_priv(dev); 2327 int i; 2328 2329 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2330 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); 2331 } 2332 2333 static void fec_enet_get_ethtool_stats(struct net_device *dev, 2334 struct ethtool_stats *stats, u64 *data) 2335 { 2336 struct fec_enet_private *fep = netdev_priv(dev); 2337 2338 if (netif_running(dev)) 2339 fec_enet_update_ethtool_stats(dev); 2340 2341 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); 2342 } 2343 2344 static void fec_enet_get_strings(struct net_device *netdev, 2345 u32 stringset, u8 *data) 2346 { 2347 int i; 2348 switch (stringset) { 2349 case ETH_SS_STATS: 2350 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2351 memcpy(data + i * ETH_GSTRING_LEN, 2352 fec_stats[i].name, ETH_GSTRING_LEN); 2353 break; 2354 } 2355 } 2356 2357 static int fec_enet_get_sset_count(struct net_device *dev, int sset) 2358 { 2359 switch (sset) { 2360 case ETH_SS_STATS: 2361 return ARRAY_SIZE(fec_stats); 2362 default: 2363 return -EOPNOTSUPP; 2364 } 2365 } 2366 2367 static void fec_enet_clear_ethtool_stats(struct net_device *dev) 2368 { 2369 struct fec_enet_private *fep = netdev_priv(dev); 2370 int i; 2371 2372 /* Disable MIB statistics counters */ 2373 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); 2374 2375 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2376 writel(0, fep->hwp + fec_stats[i].offset); 2377 2378 /* Don't disable MIB statistics counters */ 2379 writel(0, fep->hwp + FEC_MIB_CTRLSTAT); 2380 } 2381 2382 #else /* !defined(CONFIG_M5272) */ 2383 #define FEC_STATS_SIZE 0 2384 static inline void fec_enet_update_ethtool_stats(struct net_device *dev) 2385 { 2386 } 2387 2388 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) 2389 { 2390 } 2391 #endif /* !defined(CONFIG_M5272) */ 2392 2393 /* ITR clock source is enet system clock (clk_ahb). 2394 * TCTT unit is cycle_ns * 64 cycle 2395 * So, the ICTT value = X us / (cycle_ns * 64) 2396 */ 2397 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) 2398 { 2399 struct fec_enet_private *fep = netdev_priv(ndev); 2400 2401 return us * (fep->itr_clk_rate / 64000) / 1000; 2402 } 2403 2404 /* Set threshold for interrupt coalescing */ 2405 static void fec_enet_itr_coal_set(struct net_device *ndev) 2406 { 2407 struct fec_enet_private *fep = netdev_priv(ndev); 2408 int rx_itr, tx_itr; 2409 2410 /* Must be greater than zero to avoid unpredictable behavior */ 2411 if (!fep->rx_time_itr || !fep->rx_pkts_itr || 2412 !fep->tx_time_itr || !fep->tx_pkts_itr) 2413 return; 2414 2415 /* Select enet system clock as Interrupt Coalescing 2416 * timer Clock Source 2417 */ 2418 rx_itr = FEC_ITR_CLK_SEL; 2419 tx_itr = FEC_ITR_CLK_SEL; 2420 2421 /* set ICFT and ICTT */ 2422 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); 2423 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); 2424 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); 2425 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); 2426 2427 rx_itr |= FEC_ITR_EN; 2428 tx_itr |= FEC_ITR_EN; 2429 2430 writel(tx_itr, fep->hwp + FEC_TXIC0); 2431 writel(rx_itr, fep->hwp + FEC_RXIC0); 2432 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 2433 writel(tx_itr, fep->hwp + FEC_TXIC1); 2434 writel(rx_itr, fep->hwp + FEC_RXIC1); 2435 writel(tx_itr, fep->hwp + FEC_TXIC2); 2436 writel(rx_itr, fep->hwp + FEC_RXIC2); 2437 } 2438 } 2439 2440 static int 2441 fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) 2442 { 2443 struct fec_enet_private *fep = netdev_priv(ndev); 2444 2445 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 2446 return -EOPNOTSUPP; 2447 2448 ec->rx_coalesce_usecs = fep->rx_time_itr; 2449 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; 2450 2451 ec->tx_coalesce_usecs = fep->tx_time_itr; 2452 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; 2453 2454 return 0; 2455 } 2456 2457 static int 2458 fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) 2459 { 2460 struct fec_enet_private *fep = netdev_priv(ndev); 2461 unsigned int cycle; 2462 2463 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 2464 return -EOPNOTSUPP; 2465 2466 if (ec->rx_max_coalesced_frames > 255) { 2467 pr_err("Rx coalesced frames exceed hardware limitation\n"); 2468 return -EINVAL; 2469 } 2470 2471 if (ec->tx_max_coalesced_frames > 255) { 2472 pr_err("Tx coalesced frame exceed hardware limitation\n"); 2473 return -EINVAL; 2474 } 2475 2476 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); 2477 if (cycle > 0xFFFF) { 2478 pr_err("Rx coalesced usec exceed hardware limitation\n"); 2479 return -EINVAL; 2480 } 2481 2482 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); 2483 if (cycle > 0xFFFF) { 2484 pr_err("Rx coalesced usec exceed hardware limitation\n"); 2485 return -EINVAL; 2486 } 2487 2488 fep->rx_time_itr = ec->rx_coalesce_usecs; 2489 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; 2490 2491 fep->tx_time_itr = ec->tx_coalesce_usecs; 2492 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; 2493 2494 fec_enet_itr_coal_set(ndev); 2495 2496 return 0; 2497 } 2498 2499 static void fec_enet_itr_coal_init(struct net_device *ndev) 2500 { 2501 struct ethtool_coalesce ec; 2502 2503 ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; 2504 ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; 2505 2506 ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; 2507 ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; 2508 2509 fec_enet_set_coalesce(ndev, &ec); 2510 } 2511 2512 static int fec_enet_get_tunable(struct net_device *netdev, 2513 const struct ethtool_tunable *tuna, 2514 void *data) 2515 { 2516 struct fec_enet_private *fep = netdev_priv(netdev); 2517 int ret = 0; 2518 2519 switch (tuna->id) { 2520 case ETHTOOL_RX_COPYBREAK: 2521 *(u32 *)data = fep->rx_copybreak; 2522 break; 2523 default: 2524 ret = -EINVAL; 2525 break; 2526 } 2527 2528 return ret; 2529 } 2530 2531 static int fec_enet_set_tunable(struct net_device *netdev, 2532 const struct ethtool_tunable *tuna, 2533 const void *data) 2534 { 2535 struct fec_enet_private *fep = netdev_priv(netdev); 2536 int ret = 0; 2537 2538 switch (tuna->id) { 2539 case ETHTOOL_RX_COPYBREAK: 2540 fep->rx_copybreak = *(u32 *)data; 2541 break; 2542 default: 2543 ret = -EINVAL; 2544 break; 2545 } 2546 2547 return ret; 2548 } 2549 2550 static void 2551 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 2552 { 2553 struct fec_enet_private *fep = netdev_priv(ndev); 2554 2555 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { 2556 wol->supported = WAKE_MAGIC; 2557 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; 2558 } else { 2559 wol->supported = wol->wolopts = 0; 2560 } 2561 } 2562 2563 static int 2564 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 2565 { 2566 struct fec_enet_private *fep = netdev_priv(ndev); 2567 2568 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) 2569 return -EINVAL; 2570 2571 if (wol->wolopts & ~WAKE_MAGIC) 2572 return -EINVAL; 2573 2574 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); 2575 if (device_may_wakeup(&ndev->dev)) { 2576 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; 2577 if (fep->irq[0] > 0) 2578 enable_irq_wake(fep->irq[0]); 2579 } else { 2580 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); 2581 if (fep->irq[0] > 0) 2582 disable_irq_wake(fep->irq[0]); 2583 } 2584 2585 return 0; 2586 } 2587 2588 static const struct ethtool_ops fec_enet_ethtool_ops = { 2589 .get_drvinfo = fec_enet_get_drvinfo, 2590 .get_regs_len = fec_enet_get_regs_len, 2591 .get_regs = fec_enet_get_regs, 2592 .nway_reset = phy_ethtool_nway_reset, 2593 .get_link = ethtool_op_get_link, 2594 .get_coalesce = fec_enet_get_coalesce, 2595 .set_coalesce = fec_enet_set_coalesce, 2596 #ifndef CONFIG_M5272 2597 .get_pauseparam = fec_enet_get_pauseparam, 2598 .set_pauseparam = fec_enet_set_pauseparam, 2599 .get_strings = fec_enet_get_strings, 2600 .get_ethtool_stats = fec_enet_get_ethtool_stats, 2601 .get_sset_count = fec_enet_get_sset_count, 2602 #endif 2603 .get_ts_info = fec_enet_get_ts_info, 2604 .get_tunable = fec_enet_get_tunable, 2605 .set_tunable = fec_enet_set_tunable, 2606 .get_wol = fec_enet_get_wol, 2607 .set_wol = fec_enet_set_wol, 2608 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2609 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2610 }; 2611 2612 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2613 { 2614 struct fec_enet_private *fep = netdev_priv(ndev); 2615 struct phy_device *phydev = ndev->phydev; 2616 2617 if (!netif_running(ndev)) 2618 return -EINVAL; 2619 2620 if (!phydev) 2621 return -ENODEV; 2622 2623 if (fep->bufdesc_ex) { 2624 if (cmd == SIOCSHWTSTAMP) 2625 return fec_ptp_set(ndev, rq); 2626 if (cmd == SIOCGHWTSTAMP) 2627 return fec_ptp_get(ndev, rq); 2628 } 2629 2630 return phy_mii_ioctl(phydev, rq, cmd); 2631 } 2632 2633 static void fec_enet_free_buffers(struct net_device *ndev) 2634 { 2635 struct fec_enet_private *fep = netdev_priv(ndev); 2636 unsigned int i; 2637 struct sk_buff *skb; 2638 struct bufdesc *bdp; 2639 struct fec_enet_priv_tx_q *txq; 2640 struct fec_enet_priv_rx_q *rxq; 2641 unsigned int q; 2642 2643 for (q = 0; q < fep->num_rx_queues; q++) { 2644 rxq = fep->rx_queue[q]; 2645 bdp = rxq->bd.base; 2646 for (i = 0; i < rxq->bd.ring_size; i++) { 2647 skb = rxq->rx_skbuff[i]; 2648 rxq->rx_skbuff[i] = NULL; 2649 if (skb) { 2650 dma_unmap_single(&fep->pdev->dev, 2651 fec32_to_cpu(bdp->cbd_bufaddr), 2652 FEC_ENET_RX_FRSIZE - fep->rx_align, 2653 DMA_FROM_DEVICE); 2654 dev_kfree_skb(skb); 2655 } 2656 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 2657 } 2658 } 2659 2660 for (q = 0; q < fep->num_tx_queues; q++) { 2661 txq = fep->tx_queue[q]; 2662 bdp = txq->bd.base; 2663 for (i = 0; i < txq->bd.ring_size; i++) { 2664 kfree(txq->tx_bounce[i]); 2665 txq->tx_bounce[i] = NULL; 2666 skb = txq->tx_skbuff[i]; 2667 txq->tx_skbuff[i] = NULL; 2668 dev_kfree_skb(skb); 2669 } 2670 } 2671 } 2672 2673 static void fec_enet_free_queue(struct net_device *ndev) 2674 { 2675 struct fec_enet_private *fep = netdev_priv(ndev); 2676 int i; 2677 struct fec_enet_priv_tx_q *txq; 2678 2679 for (i = 0; i < fep->num_tx_queues; i++) 2680 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { 2681 txq = fep->tx_queue[i]; 2682 dma_free_coherent(&fep->pdev->dev, 2683 txq->bd.ring_size * TSO_HEADER_SIZE, 2684 txq->tso_hdrs, 2685 txq->tso_hdrs_dma); 2686 } 2687 2688 for (i = 0; i < fep->num_rx_queues; i++) 2689 kfree(fep->rx_queue[i]); 2690 for (i = 0; i < fep->num_tx_queues; i++) 2691 kfree(fep->tx_queue[i]); 2692 } 2693 2694 static int fec_enet_alloc_queue(struct net_device *ndev) 2695 { 2696 struct fec_enet_private *fep = netdev_priv(ndev); 2697 int i; 2698 int ret = 0; 2699 struct fec_enet_priv_tx_q *txq; 2700 2701 for (i = 0; i < fep->num_tx_queues; i++) { 2702 txq = kzalloc(sizeof(*txq), GFP_KERNEL); 2703 if (!txq) { 2704 ret = -ENOMEM; 2705 goto alloc_failed; 2706 } 2707 2708 fep->tx_queue[i] = txq; 2709 txq->bd.ring_size = TX_RING_SIZE; 2710 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; 2711 2712 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; 2713 txq->tx_wake_threshold = 2714 (txq->bd.ring_size - txq->tx_stop_threshold) / 2; 2715 2716 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, 2717 txq->bd.ring_size * TSO_HEADER_SIZE, 2718 &txq->tso_hdrs_dma, 2719 GFP_KERNEL); 2720 if (!txq->tso_hdrs) { 2721 ret = -ENOMEM; 2722 goto alloc_failed; 2723 } 2724 } 2725 2726 for (i = 0; i < fep->num_rx_queues; i++) { 2727 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), 2728 GFP_KERNEL); 2729 if (!fep->rx_queue[i]) { 2730 ret = -ENOMEM; 2731 goto alloc_failed; 2732 } 2733 2734 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; 2735 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; 2736 } 2737 return ret; 2738 2739 alloc_failed: 2740 fec_enet_free_queue(ndev); 2741 return ret; 2742 } 2743 2744 static int 2745 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) 2746 { 2747 struct fec_enet_private *fep = netdev_priv(ndev); 2748 unsigned int i; 2749 struct sk_buff *skb; 2750 struct bufdesc *bdp; 2751 struct fec_enet_priv_rx_q *rxq; 2752 2753 rxq = fep->rx_queue[queue]; 2754 bdp = rxq->bd.base; 2755 for (i = 0; i < rxq->bd.ring_size; i++) { 2756 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 2757 if (!skb) 2758 goto err_alloc; 2759 2760 if (fec_enet_new_rxbdp(ndev, bdp, skb)) { 2761 dev_kfree_skb(skb); 2762 goto err_alloc; 2763 } 2764 2765 rxq->rx_skbuff[i] = skb; 2766 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 2767 2768 if (fep->bufdesc_ex) { 2769 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2770 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 2771 } 2772 2773 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 2774 } 2775 2776 /* Set the last buffer to wrap. */ 2777 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 2778 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 2779 return 0; 2780 2781 err_alloc: 2782 fec_enet_free_buffers(ndev); 2783 return -ENOMEM; 2784 } 2785 2786 static int 2787 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) 2788 { 2789 struct fec_enet_private *fep = netdev_priv(ndev); 2790 unsigned int i; 2791 struct bufdesc *bdp; 2792 struct fec_enet_priv_tx_q *txq; 2793 2794 txq = fep->tx_queue[queue]; 2795 bdp = txq->bd.base; 2796 for (i = 0; i < txq->bd.ring_size; i++) { 2797 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 2798 if (!txq->tx_bounce[i]) 2799 goto err_alloc; 2800 2801 bdp->cbd_sc = cpu_to_fec16(0); 2802 bdp->cbd_bufaddr = cpu_to_fec32(0); 2803 2804 if (fep->bufdesc_ex) { 2805 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2806 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); 2807 } 2808 2809 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 2810 } 2811 2812 /* Set the last buffer to wrap. */ 2813 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 2814 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 2815 2816 return 0; 2817 2818 err_alloc: 2819 fec_enet_free_buffers(ndev); 2820 return -ENOMEM; 2821 } 2822 2823 static int fec_enet_alloc_buffers(struct net_device *ndev) 2824 { 2825 struct fec_enet_private *fep = netdev_priv(ndev); 2826 unsigned int i; 2827 2828 for (i = 0; i < fep->num_rx_queues; i++) 2829 if (fec_enet_alloc_rxq_buffers(ndev, i)) 2830 return -ENOMEM; 2831 2832 for (i = 0; i < fep->num_tx_queues; i++) 2833 if (fec_enet_alloc_txq_buffers(ndev, i)) 2834 return -ENOMEM; 2835 return 0; 2836 } 2837 2838 static int 2839 fec_enet_open(struct net_device *ndev) 2840 { 2841 struct fec_enet_private *fep = netdev_priv(ndev); 2842 int ret; 2843 bool reset_again; 2844 2845 ret = pm_runtime_get_sync(&fep->pdev->dev); 2846 if (ret < 0) 2847 return ret; 2848 2849 pinctrl_pm_select_default_state(&fep->pdev->dev); 2850 ret = fec_enet_clk_enable(ndev, true); 2851 if (ret) 2852 goto clk_enable; 2853 2854 /* During the first fec_enet_open call the PHY isn't probed at this 2855 * point. Therefore the phy_reset_after_clk_enable() call within 2856 * fec_enet_clk_enable() fails. As we need this reset in order to be 2857 * sure the PHY is working correctly we check if we need to reset again 2858 * later when the PHY is probed 2859 */ 2860 if (ndev->phydev && ndev->phydev->drv) 2861 reset_again = false; 2862 else 2863 reset_again = true; 2864 2865 /* I should reset the ring buffers here, but I don't yet know 2866 * a simple way to do that. 2867 */ 2868 2869 ret = fec_enet_alloc_buffers(ndev); 2870 if (ret) 2871 goto err_enet_alloc; 2872 2873 /* Init MAC prior to mii bus probe */ 2874 fec_restart(ndev); 2875 2876 /* Probe and connect to PHY when open the interface */ 2877 ret = fec_enet_mii_probe(ndev); 2878 if (ret) 2879 goto err_enet_mii_probe; 2880 2881 /* Call phy_reset_after_clk_enable() again if it failed during 2882 * phy_reset_after_clk_enable() before because the PHY wasn't probed. 2883 */ 2884 if (reset_again) 2885 phy_reset_after_clk_enable(ndev->phydev); 2886 2887 if (fep->quirks & FEC_QUIRK_ERR006687) 2888 imx6q_cpuidle_fec_irqs_used(); 2889 2890 napi_enable(&fep->napi); 2891 phy_start(ndev->phydev); 2892 netif_tx_start_all_queues(ndev); 2893 2894 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & 2895 FEC_WOL_FLAG_ENABLE); 2896 2897 return 0; 2898 2899 err_enet_mii_probe: 2900 fec_enet_free_buffers(ndev); 2901 err_enet_alloc: 2902 fec_enet_clk_enable(ndev, false); 2903 clk_enable: 2904 pm_runtime_mark_last_busy(&fep->pdev->dev); 2905 pm_runtime_put_autosuspend(&fep->pdev->dev); 2906 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2907 return ret; 2908 } 2909 2910 static int 2911 fec_enet_close(struct net_device *ndev) 2912 { 2913 struct fec_enet_private *fep = netdev_priv(ndev); 2914 2915 phy_stop(ndev->phydev); 2916 2917 if (netif_device_present(ndev)) { 2918 napi_disable(&fep->napi); 2919 netif_tx_disable(ndev); 2920 fec_stop(ndev); 2921 } 2922 2923 phy_disconnect(ndev->phydev); 2924 2925 if (fep->quirks & FEC_QUIRK_ERR006687) 2926 imx6q_cpuidle_fec_irqs_unused(); 2927 2928 fec_enet_update_ethtool_stats(ndev); 2929 2930 fec_enet_clk_enable(ndev, false); 2931 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2932 pm_runtime_mark_last_busy(&fep->pdev->dev); 2933 pm_runtime_put_autosuspend(&fep->pdev->dev); 2934 2935 fec_enet_free_buffers(ndev); 2936 2937 return 0; 2938 } 2939 2940 /* Set or clear the multicast filter for this adaptor. 2941 * Skeleton taken from sunlance driver. 2942 * The CPM Ethernet implementation allows Multicast as well as individual 2943 * MAC address filtering. Some of the drivers check to make sure it is 2944 * a group multicast address, and discard those that are not. I guess I 2945 * will do the same for now, but just remove the test if you want 2946 * individual filtering as well (do the upper net layers want or support 2947 * this kind of feature?). 2948 */ 2949 2950 #define FEC_HASH_BITS 6 /* #bits in hash */ 2951 #define CRC32_POLY 0xEDB88320 2952 2953 static void set_multicast_list(struct net_device *ndev) 2954 { 2955 struct fec_enet_private *fep = netdev_priv(ndev); 2956 struct netdev_hw_addr *ha; 2957 unsigned int i, bit, data, crc, tmp; 2958 unsigned char hash; 2959 unsigned int hash_high = 0, hash_low = 0; 2960 2961 if (ndev->flags & IFF_PROMISC) { 2962 tmp = readl(fep->hwp + FEC_R_CNTRL); 2963 tmp |= 0x8; 2964 writel(tmp, fep->hwp + FEC_R_CNTRL); 2965 return; 2966 } 2967 2968 tmp = readl(fep->hwp + FEC_R_CNTRL); 2969 tmp &= ~0x8; 2970 writel(tmp, fep->hwp + FEC_R_CNTRL); 2971 2972 if (ndev->flags & IFF_ALLMULTI) { 2973 /* Catch all multicast addresses, so set the 2974 * filter to all 1's 2975 */ 2976 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2977 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2978 2979 return; 2980 } 2981 2982 /* Add the addresses in hash register */ 2983 netdev_for_each_mc_addr(ha, ndev) { 2984 /* calculate crc32 value of mac address */ 2985 crc = 0xffffffff; 2986 2987 for (i = 0; i < ndev->addr_len; i++) { 2988 data = ha->addr[i]; 2989 for (bit = 0; bit < 8; bit++, data >>= 1) { 2990 crc = (crc >> 1) ^ 2991 (((crc ^ data) & 1) ? CRC32_POLY : 0); 2992 } 2993 } 2994 2995 /* only upper 6 bits (FEC_HASH_BITS) are used 2996 * which point to specific bit in the hash registers 2997 */ 2998 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; 2999 3000 if (hash > 31) 3001 hash_high |= 1 << (hash - 32); 3002 else 3003 hash_low |= 1 << hash; 3004 } 3005 3006 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 3007 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3008 } 3009 3010 /* Set a MAC change in hardware. */ 3011 static int 3012 fec_set_mac_address(struct net_device *ndev, void *p) 3013 { 3014 struct fec_enet_private *fep = netdev_priv(ndev); 3015 struct sockaddr *addr = p; 3016 3017 if (addr) { 3018 if (!is_valid_ether_addr(addr->sa_data)) 3019 return -EADDRNOTAVAIL; 3020 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3021 } 3022 3023 /* Add netif status check here to avoid system hang in below case: 3024 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; 3025 * After ethx down, fec all clocks are gated off and then register 3026 * access causes system hang. 3027 */ 3028 if (!netif_running(ndev)) 3029 return 0; 3030 3031 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 3032 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 3033 fep->hwp + FEC_ADDR_LOW); 3034 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), 3035 fep->hwp + FEC_ADDR_HIGH); 3036 return 0; 3037 } 3038 3039 #ifdef CONFIG_NET_POLL_CONTROLLER 3040 /** 3041 * fec_poll_controller - FEC Poll controller function 3042 * @dev: The FEC network adapter 3043 * 3044 * Polled functionality used by netconsole and others in non interrupt mode 3045 * 3046 */ 3047 static void fec_poll_controller(struct net_device *dev) 3048 { 3049 int i; 3050 struct fec_enet_private *fep = netdev_priv(dev); 3051 3052 for (i = 0; i < FEC_IRQ_NUM; i++) { 3053 if (fep->irq[i] > 0) { 3054 disable_irq(fep->irq[i]); 3055 fec_enet_interrupt(fep->irq[i], dev); 3056 enable_irq(fep->irq[i]); 3057 } 3058 } 3059 } 3060 #endif 3061 3062 static inline void fec_enet_set_netdev_features(struct net_device *netdev, 3063 netdev_features_t features) 3064 { 3065 struct fec_enet_private *fep = netdev_priv(netdev); 3066 netdev_features_t changed = features ^ netdev->features; 3067 3068 netdev->features = features; 3069 3070 /* Receive checksum has been changed */ 3071 if (changed & NETIF_F_RXCSUM) { 3072 if (features & NETIF_F_RXCSUM) 3073 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3074 else 3075 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; 3076 } 3077 } 3078 3079 static int fec_set_features(struct net_device *netdev, 3080 netdev_features_t features) 3081 { 3082 struct fec_enet_private *fep = netdev_priv(netdev); 3083 netdev_features_t changed = features ^ netdev->features; 3084 3085 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) { 3086 napi_disable(&fep->napi); 3087 netif_tx_lock_bh(netdev); 3088 fec_stop(netdev); 3089 fec_enet_set_netdev_features(netdev, features); 3090 fec_restart(netdev); 3091 netif_tx_wake_all_queues(netdev); 3092 netif_tx_unlock_bh(netdev); 3093 napi_enable(&fep->napi); 3094 } else { 3095 fec_enet_set_netdev_features(netdev, features); 3096 } 3097 3098 return 0; 3099 } 3100 3101 static const struct net_device_ops fec_netdev_ops = { 3102 .ndo_open = fec_enet_open, 3103 .ndo_stop = fec_enet_close, 3104 .ndo_start_xmit = fec_enet_start_xmit, 3105 .ndo_set_rx_mode = set_multicast_list, 3106 .ndo_validate_addr = eth_validate_addr, 3107 .ndo_tx_timeout = fec_timeout, 3108 .ndo_set_mac_address = fec_set_mac_address, 3109 .ndo_do_ioctl = fec_enet_ioctl, 3110 #ifdef CONFIG_NET_POLL_CONTROLLER 3111 .ndo_poll_controller = fec_poll_controller, 3112 #endif 3113 .ndo_set_features = fec_set_features, 3114 }; 3115 3116 static const unsigned short offset_des_active_rxq[] = { 3117 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 3118 }; 3119 3120 static const unsigned short offset_des_active_txq[] = { 3121 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 3122 }; 3123 3124 /* 3125 * XXX: We need to clean up on failure exits here. 3126 * 3127 */ 3128 static int fec_enet_init(struct net_device *ndev) 3129 { 3130 struct fec_enet_private *fep = netdev_priv(ndev); 3131 struct bufdesc *cbd_base; 3132 dma_addr_t bd_dma; 3133 int bd_size; 3134 unsigned int i; 3135 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : 3136 sizeof(struct bufdesc); 3137 unsigned dsize_log2 = __fls(dsize); 3138 3139 WARN_ON(dsize != (1 << dsize_log2)); 3140 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 3141 fep->rx_align = 0xf; 3142 fep->tx_align = 0xf; 3143 #else 3144 fep->rx_align = 0x3; 3145 fep->tx_align = 0x3; 3146 #endif 3147 3148 fec_enet_alloc_queue(ndev); 3149 3150 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; 3151 3152 /* Allocate memory for buffer descriptors. */ 3153 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, 3154 GFP_KERNEL); 3155 if (!cbd_base) { 3156 return -ENOMEM; 3157 } 3158 3159 memset(cbd_base, 0, bd_size); 3160 3161 /* Get the Ethernet address */ 3162 fec_get_mac(ndev); 3163 /* make sure MAC we just acquired is programmed into the hw */ 3164 fec_set_mac_address(ndev, NULL); 3165 3166 /* Set receive and transmit descriptor base. */ 3167 for (i = 0; i < fep->num_rx_queues; i++) { 3168 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; 3169 unsigned size = dsize * rxq->bd.ring_size; 3170 3171 rxq->bd.qid = i; 3172 rxq->bd.base = cbd_base; 3173 rxq->bd.cur = cbd_base; 3174 rxq->bd.dma = bd_dma; 3175 rxq->bd.dsize = dsize; 3176 rxq->bd.dsize_log2 = dsize_log2; 3177 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; 3178 bd_dma += size; 3179 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 3180 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 3181 } 3182 3183 for (i = 0; i < fep->num_tx_queues; i++) { 3184 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; 3185 unsigned size = dsize * txq->bd.ring_size; 3186 3187 txq->bd.qid = i; 3188 txq->bd.base = cbd_base; 3189 txq->bd.cur = cbd_base; 3190 txq->bd.dma = bd_dma; 3191 txq->bd.dsize = dsize; 3192 txq->bd.dsize_log2 = dsize_log2; 3193 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; 3194 bd_dma += size; 3195 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 3196 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 3197 } 3198 3199 3200 /* The FEC Ethernet specific entries in the device structure */ 3201 ndev->watchdog_timeo = TX_TIMEOUT; 3202 ndev->netdev_ops = &fec_netdev_ops; 3203 ndev->ethtool_ops = &fec_enet_ethtool_ops; 3204 3205 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 3206 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); 3207 3208 if (fep->quirks & FEC_QUIRK_HAS_VLAN) 3209 /* enable hw VLAN support */ 3210 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3211 3212 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { 3213 ndev->gso_max_segs = FEC_MAX_TSO_SEGS; 3214 3215 /* enable hw accelerator */ 3216 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 3217 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); 3218 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3219 } 3220 3221 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 3222 fep->tx_align = 0; 3223 fep->rx_align = 0x3f; 3224 } 3225 3226 ndev->hw_features = ndev->features; 3227 3228 fec_restart(ndev); 3229 3230 if (fep->quirks & FEC_QUIRK_MIB_CLEAR) 3231 fec_enet_clear_ethtool_stats(ndev); 3232 else 3233 fec_enet_update_ethtool_stats(ndev); 3234 3235 return 0; 3236 } 3237 3238 #ifdef CONFIG_OF 3239 static int fec_reset_phy(struct platform_device *pdev) 3240 { 3241 int err, phy_reset; 3242 bool active_high = false; 3243 int msec = 1, phy_post_delay = 0; 3244 struct device_node *np = pdev->dev.of_node; 3245 3246 if (!np) 3247 return 0; 3248 3249 err = of_property_read_u32(np, "phy-reset-duration", &msec); 3250 /* A sane reset duration should not be longer than 1s */ 3251 if (!err && msec > 1000) 3252 msec = 1; 3253 3254 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); 3255 if (phy_reset == -EPROBE_DEFER) 3256 return phy_reset; 3257 else if (!gpio_is_valid(phy_reset)) 3258 return 0; 3259 3260 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); 3261 /* valid reset duration should be less than 1s */ 3262 if (!err && phy_post_delay > 1000) 3263 return -EINVAL; 3264 3265 active_high = of_property_read_bool(np, "phy-reset-active-high"); 3266 3267 err = devm_gpio_request_one(&pdev->dev, phy_reset, 3268 active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, 3269 "phy-reset"); 3270 if (err) { 3271 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); 3272 return err; 3273 } 3274 3275 if (msec > 20) 3276 msleep(msec); 3277 else 3278 usleep_range(msec * 1000, msec * 1000 + 1000); 3279 3280 gpio_set_value_cansleep(phy_reset, !active_high); 3281 3282 if (!phy_post_delay) 3283 return 0; 3284 3285 if (phy_post_delay > 20) 3286 msleep(phy_post_delay); 3287 else 3288 usleep_range(phy_post_delay * 1000, 3289 phy_post_delay * 1000 + 1000); 3290 3291 return 0; 3292 } 3293 #else /* CONFIG_OF */ 3294 static int fec_reset_phy(struct platform_device *pdev) 3295 { 3296 /* 3297 * In case of platform probe, the reset has been done 3298 * by machine code. 3299 */ 3300 return 0; 3301 } 3302 #endif /* CONFIG_OF */ 3303 3304 static void 3305 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) 3306 { 3307 struct device_node *np = pdev->dev.of_node; 3308 3309 *num_tx = *num_rx = 1; 3310 3311 if (!np || !of_device_is_available(np)) 3312 return; 3313 3314 /* parse the num of tx and rx queues */ 3315 of_property_read_u32(np, "fsl,num-tx-queues", num_tx); 3316 3317 of_property_read_u32(np, "fsl,num-rx-queues", num_rx); 3318 3319 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { 3320 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", 3321 *num_tx); 3322 *num_tx = 1; 3323 return; 3324 } 3325 3326 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { 3327 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", 3328 *num_rx); 3329 *num_rx = 1; 3330 return; 3331 } 3332 3333 } 3334 3335 static int fec_enet_get_irq_cnt(struct platform_device *pdev) 3336 { 3337 int irq_cnt = platform_irq_count(pdev); 3338 3339 if (irq_cnt > FEC_IRQ_NUM) 3340 irq_cnt = FEC_IRQ_NUM; /* last for pps */ 3341 else if (irq_cnt == 2) 3342 irq_cnt = 1; /* last for pps */ 3343 else if (irq_cnt <= 0) 3344 irq_cnt = 1; /* At least 1 irq is needed */ 3345 return irq_cnt; 3346 } 3347 3348 static int 3349 fec_probe(struct platform_device *pdev) 3350 { 3351 struct fec_enet_private *fep; 3352 struct fec_platform_data *pdata; 3353 struct net_device *ndev; 3354 int i, irq, ret = 0; 3355 struct resource *r; 3356 const struct of_device_id *of_id; 3357 static int dev_id; 3358 struct device_node *np = pdev->dev.of_node, *phy_node; 3359 int num_tx_qs; 3360 int num_rx_qs; 3361 char irq_name[8]; 3362 int irq_cnt; 3363 3364 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 3365 3366 /* Init network device */ 3367 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) + 3368 FEC_STATS_SIZE, num_tx_qs, num_rx_qs); 3369 if (!ndev) 3370 return -ENOMEM; 3371 3372 SET_NETDEV_DEV(ndev, &pdev->dev); 3373 3374 /* setup board info structure */ 3375 fep = netdev_priv(ndev); 3376 3377 of_id = of_match_device(fec_dt_ids, &pdev->dev); 3378 if (of_id) 3379 pdev->id_entry = of_id->data; 3380 fep->quirks = pdev->id_entry->driver_data; 3381 3382 fep->netdev = ndev; 3383 fep->num_rx_queues = num_rx_qs; 3384 fep->num_tx_queues = num_tx_qs; 3385 3386 #if !defined(CONFIG_M5272) 3387 /* default enable pause frame auto negotiation */ 3388 if (fep->quirks & FEC_QUIRK_HAS_GBIT) 3389 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 3390 #endif 3391 3392 /* Select default pin state */ 3393 pinctrl_pm_select_default_state(&pdev->dev); 3394 3395 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3396 fep->hwp = devm_ioremap_resource(&pdev->dev, r); 3397 if (IS_ERR(fep->hwp)) { 3398 ret = PTR_ERR(fep->hwp); 3399 goto failed_ioremap; 3400 } 3401 3402 fep->pdev = pdev; 3403 fep->dev_id = dev_id++; 3404 3405 platform_set_drvdata(pdev, ndev); 3406 3407 if ((of_machine_is_compatible("fsl,imx6q") || 3408 of_machine_is_compatible("fsl,imx6dl")) && 3409 !of_property_read_bool(np, "fsl,err006687-workaround-present")) 3410 fep->quirks |= FEC_QUIRK_ERR006687; 3411 3412 if (of_get_property(np, "fsl,magic-packet", NULL)) 3413 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; 3414 3415 phy_node = of_parse_phandle(np, "phy-handle", 0); 3416 if (!phy_node && of_phy_is_fixed_link(np)) { 3417 ret = of_phy_register_fixed_link(np); 3418 if (ret < 0) { 3419 dev_err(&pdev->dev, 3420 "broken fixed-link specification\n"); 3421 goto failed_phy; 3422 } 3423 phy_node = of_node_get(np); 3424 } 3425 fep->phy_node = phy_node; 3426 3427 ret = of_get_phy_mode(pdev->dev.of_node); 3428 if (ret < 0) { 3429 pdata = dev_get_platdata(&pdev->dev); 3430 if (pdata) 3431 fep->phy_interface = pdata->phy; 3432 else 3433 fep->phy_interface = PHY_INTERFACE_MODE_MII; 3434 } else { 3435 fep->phy_interface = ret; 3436 } 3437 3438 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 3439 if (IS_ERR(fep->clk_ipg)) { 3440 ret = PTR_ERR(fep->clk_ipg); 3441 goto failed_clk; 3442 } 3443 3444 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 3445 if (IS_ERR(fep->clk_ahb)) { 3446 ret = PTR_ERR(fep->clk_ahb); 3447 goto failed_clk; 3448 } 3449 3450 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); 3451 3452 /* enet_out is optional, depends on board */ 3453 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); 3454 if (IS_ERR(fep->clk_enet_out)) 3455 fep->clk_enet_out = NULL; 3456 3457 fep->ptp_clk_on = false; 3458 mutex_init(&fep->ptp_clk_mutex); 3459 3460 /* clk_ref is optional, depends on board */ 3461 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); 3462 if (IS_ERR(fep->clk_ref)) 3463 fep->clk_ref = NULL; 3464 3465 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; 3466 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); 3467 if (IS_ERR(fep->clk_ptp)) { 3468 fep->clk_ptp = NULL; 3469 fep->bufdesc_ex = false; 3470 } 3471 3472 ret = fec_enet_clk_enable(ndev, true); 3473 if (ret) 3474 goto failed_clk; 3475 3476 ret = clk_prepare_enable(fep->clk_ipg); 3477 if (ret) 3478 goto failed_clk_ipg; 3479 3480 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3481 if (!IS_ERR(fep->reg_phy)) { 3482 ret = regulator_enable(fep->reg_phy); 3483 if (ret) { 3484 dev_err(&pdev->dev, 3485 "Failed to enable phy regulator: %d\n", ret); 3486 clk_disable_unprepare(fep->clk_ipg); 3487 goto failed_regulator; 3488 } 3489 } else { 3490 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { 3491 ret = -EPROBE_DEFER; 3492 goto failed_regulator; 3493 } 3494 fep->reg_phy = NULL; 3495 } 3496 3497 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 3498 pm_runtime_use_autosuspend(&pdev->dev); 3499 pm_runtime_get_noresume(&pdev->dev); 3500 pm_runtime_set_active(&pdev->dev); 3501 pm_runtime_enable(&pdev->dev); 3502 3503 ret = fec_reset_phy(pdev); 3504 if (ret) 3505 goto failed_reset; 3506 3507 irq_cnt = fec_enet_get_irq_cnt(pdev); 3508 if (fep->bufdesc_ex) 3509 fec_ptp_init(pdev, irq_cnt); 3510 3511 ret = fec_enet_init(ndev); 3512 if (ret) 3513 goto failed_init; 3514 3515 for (i = 0; i < irq_cnt; i++) { 3516 sprintf(irq_name, "int%d", i); 3517 irq = platform_get_irq_byname(pdev, irq_name); 3518 if (irq < 0) 3519 irq = platform_get_irq(pdev, i); 3520 if (irq < 0) { 3521 ret = irq; 3522 goto failed_irq; 3523 } 3524 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 3525 0, pdev->name, ndev); 3526 if (ret) 3527 goto failed_irq; 3528 3529 fep->irq[i] = irq; 3530 } 3531 3532 init_completion(&fep->mdio_done); 3533 ret = fec_enet_mii_init(pdev); 3534 if (ret) 3535 goto failed_mii_init; 3536 3537 /* Carrier starts down, phylib will bring it up */ 3538 netif_carrier_off(ndev); 3539 fec_enet_clk_enable(ndev, false); 3540 pinctrl_pm_select_sleep_state(&pdev->dev); 3541 3542 ret = register_netdev(ndev); 3543 if (ret) 3544 goto failed_register; 3545 3546 device_init_wakeup(&ndev->dev, fep->wol_flag & 3547 FEC_WOL_HAS_MAGIC_PACKET); 3548 3549 if (fep->bufdesc_ex && fep->ptp_clock) 3550 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 3551 3552 fep->rx_copybreak = COPYBREAK_DEFAULT; 3553 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3554 3555 pm_runtime_mark_last_busy(&pdev->dev); 3556 pm_runtime_put_autosuspend(&pdev->dev); 3557 3558 return 0; 3559 3560 failed_register: 3561 fec_enet_mii_remove(fep); 3562 failed_mii_init: 3563 failed_irq: 3564 failed_init: 3565 fec_ptp_stop(pdev); 3566 if (fep->reg_phy) 3567 regulator_disable(fep->reg_phy); 3568 failed_reset: 3569 pm_runtime_put(&pdev->dev); 3570 pm_runtime_disable(&pdev->dev); 3571 failed_regulator: 3572 failed_clk_ipg: 3573 fec_enet_clk_enable(ndev, false); 3574 failed_clk: 3575 if (of_phy_is_fixed_link(np)) 3576 of_phy_deregister_fixed_link(np); 3577 of_node_put(phy_node); 3578 failed_phy: 3579 dev_id--; 3580 failed_ioremap: 3581 free_netdev(ndev); 3582 3583 return ret; 3584 } 3585 3586 static int 3587 fec_drv_remove(struct platform_device *pdev) 3588 { 3589 struct net_device *ndev = platform_get_drvdata(pdev); 3590 struct fec_enet_private *fep = netdev_priv(ndev); 3591 struct device_node *np = pdev->dev.of_node; 3592 3593 cancel_work_sync(&fep->tx_timeout_work); 3594 fec_ptp_stop(pdev); 3595 unregister_netdev(ndev); 3596 fec_enet_mii_remove(fep); 3597 if (fep->reg_phy) 3598 regulator_disable(fep->reg_phy); 3599 pm_runtime_put(&pdev->dev); 3600 pm_runtime_disable(&pdev->dev); 3601 if (of_phy_is_fixed_link(np)) 3602 of_phy_deregister_fixed_link(np); 3603 of_node_put(fep->phy_node); 3604 free_netdev(ndev); 3605 3606 return 0; 3607 } 3608 3609 static int __maybe_unused fec_suspend(struct device *dev) 3610 { 3611 struct net_device *ndev = dev_get_drvdata(dev); 3612 struct fec_enet_private *fep = netdev_priv(ndev); 3613 3614 rtnl_lock(); 3615 if (netif_running(ndev)) { 3616 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) 3617 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; 3618 phy_stop(ndev->phydev); 3619 napi_disable(&fep->napi); 3620 netif_tx_lock_bh(ndev); 3621 netif_device_detach(ndev); 3622 netif_tx_unlock_bh(ndev); 3623 fec_stop(ndev); 3624 fec_enet_clk_enable(ndev, false); 3625 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 3626 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3627 } 3628 rtnl_unlock(); 3629 3630 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 3631 regulator_disable(fep->reg_phy); 3632 3633 /* SOC supply clock to phy, when clock is disabled, phy link down 3634 * SOC control phy regulator, when regulator is disabled, phy link down 3635 */ 3636 if (fep->clk_enet_out || fep->reg_phy) 3637 fep->link = 0; 3638 3639 return 0; 3640 } 3641 3642 static int __maybe_unused fec_resume(struct device *dev) 3643 { 3644 struct net_device *ndev = dev_get_drvdata(dev); 3645 struct fec_enet_private *fep = netdev_priv(ndev); 3646 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 3647 int ret; 3648 int val; 3649 3650 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 3651 ret = regulator_enable(fep->reg_phy); 3652 if (ret) 3653 return ret; 3654 } 3655 3656 rtnl_lock(); 3657 if (netif_running(ndev)) { 3658 ret = fec_enet_clk_enable(ndev, true); 3659 if (ret) { 3660 rtnl_unlock(); 3661 goto failed_clk; 3662 } 3663 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { 3664 if (pdata && pdata->sleep_mode_enable) 3665 pdata->sleep_mode_enable(false); 3666 val = readl(fep->hwp + FEC_ECNTRL); 3667 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 3668 writel(val, fep->hwp + FEC_ECNTRL); 3669 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; 3670 } else { 3671 pinctrl_pm_select_default_state(&fep->pdev->dev); 3672 } 3673 fec_restart(ndev); 3674 netif_tx_lock_bh(ndev); 3675 netif_device_attach(ndev); 3676 netif_tx_unlock_bh(ndev); 3677 napi_enable(&fep->napi); 3678 phy_start(ndev->phydev); 3679 } 3680 rtnl_unlock(); 3681 3682 return 0; 3683 3684 failed_clk: 3685 if (fep->reg_phy) 3686 regulator_disable(fep->reg_phy); 3687 return ret; 3688 } 3689 3690 static int __maybe_unused fec_runtime_suspend(struct device *dev) 3691 { 3692 struct net_device *ndev = dev_get_drvdata(dev); 3693 struct fec_enet_private *fep = netdev_priv(ndev); 3694 3695 clk_disable_unprepare(fep->clk_ipg); 3696 3697 return 0; 3698 } 3699 3700 static int __maybe_unused fec_runtime_resume(struct device *dev) 3701 { 3702 struct net_device *ndev = dev_get_drvdata(dev); 3703 struct fec_enet_private *fep = netdev_priv(ndev); 3704 3705 return clk_prepare_enable(fep->clk_ipg); 3706 } 3707 3708 static const struct dev_pm_ops fec_pm_ops = { 3709 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 3710 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 3711 }; 3712 3713 static struct platform_driver fec_driver = { 3714 .driver = { 3715 .name = DRIVER_NAME, 3716 .pm = &fec_pm_ops, 3717 .of_match_table = fec_dt_ids, 3718 }, 3719 .id_table = fec_devtype, 3720 .probe = fec_probe, 3721 .remove = fec_drv_remove, 3722 }; 3723 3724 module_platform_driver(fec_driver); 3725 3726 MODULE_ALIAS("platform:"DRIVER_NAME); 3727 MODULE_LICENSE("GPL"); 3728