1 /* 2 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 3 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 4 * 5 * Right now, I am very wasteful with the buffers. I allocate memory 6 * pages and then divide them into 2K frame buffers. This way I know I 7 * have buffers large enough to hold one frame within one buffer descriptor. 8 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 9 * will be much more memory efficient and will easily handle lots of 10 * small packets. 11 * 12 * Much better multiple PHY support by Magnus Damm. 13 * Copyright (c) 2000 Ericsson Radio Systems AB. 14 * 15 * Support for FEC controller of ColdFire processors. 16 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 17 * 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 19 * Copyright (c) 2004-2006 Macq Electronique SA. 20 * 21 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 22 */ 23 24 #include <linux/module.h> 25 #include <linux/kernel.h> 26 #include <linux/string.h> 27 #include <linux/pm_runtime.h> 28 #include <linux/ptrace.h> 29 #include <linux/errno.h> 30 #include <linux/ioport.h> 31 #include <linux/slab.h> 32 #include <linux/interrupt.h> 33 #include <linux/delay.h> 34 #include <linux/netdevice.h> 35 #include <linux/etherdevice.h> 36 #include <linux/skbuff.h> 37 #include <linux/in.h> 38 #include <linux/ip.h> 39 #include <net/ip.h> 40 #include <net/tso.h> 41 #include <linux/tcp.h> 42 #include <linux/udp.h> 43 #include <linux/icmp.h> 44 #include <linux/spinlock.h> 45 #include <linux/workqueue.h> 46 #include <linux/bitops.h> 47 #include <linux/io.h> 48 #include <linux/irq.h> 49 #include <linux/clk.h> 50 #include <linux/platform_device.h> 51 #include <linux/mdio.h> 52 #include <linux/phy.h> 53 #include <linux/fec.h> 54 #include <linux/of.h> 55 #include <linux/of_device.h> 56 #include <linux/of_gpio.h> 57 #include <linux/of_mdio.h> 58 #include <linux/of_net.h> 59 #include <linux/regulator/consumer.h> 60 #include <linux/if_vlan.h> 61 #include <linux/pinctrl/consumer.h> 62 #include <linux/prefetch.h> 63 #include <soc/imx/cpuidle.h> 64 65 #include <asm/cacheflush.h> 66 67 #include "fec.h" 68 69 static void set_multicast_list(struct net_device *ndev); 70 static void fec_enet_itr_coal_init(struct net_device *ndev); 71 72 #define DRIVER_NAME "fec" 73 74 #define FEC_ENET_GET_QUQUE(_x) ((_x == 0) ? 1 : ((_x == 1) ? 2 : 0)) 75 76 /* Pause frame feild and FIFO threshold */ 77 #define FEC_ENET_FCE (1 << 5) 78 #define FEC_ENET_RSEM_V 0x84 79 #define FEC_ENET_RSFL_V 16 80 #define FEC_ENET_RAEM_V 0x8 81 #define FEC_ENET_RAFL_V 0x8 82 #define FEC_ENET_OPD_V 0xFFF0 83 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 84 85 static struct platform_device_id fec_devtype[] = { 86 { 87 /* keep it for coldfire */ 88 .name = DRIVER_NAME, 89 .driver_data = 0, 90 }, { 91 .name = "imx25-fec", 92 .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR, 93 }, { 94 .name = "imx27-fec", 95 .driver_data = FEC_QUIRK_MIB_CLEAR, 96 }, { 97 .name = "imx28-fec", 98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 99 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC, 100 }, { 101 .name = "imx6q-fec", 102 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 103 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 104 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | 105 FEC_QUIRK_HAS_RACC, 106 }, { 107 .name = "mvf600-fec", 108 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, 109 }, { 110 .name = "imx6sx-fec", 111 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 112 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 113 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 114 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 115 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE, 116 }, { 117 .name = "imx6ul-fec", 118 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 119 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 120 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | 121 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | 122 FEC_QUIRK_HAS_COALESCE, 123 }, { 124 /* sentinel */ 125 } 126 }; 127 MODULE_DEVICE_TABLE(platform, fec_devtype); 128 129 enum imx_fec_type { 130 IMX25_FEC = 1, /* runs on i.mx25/50/53 */ 131 IMX27_FEC, /* runs on i.mx27/35/51 */ 132 IMX28_FEC, 133 IMX6Q_FEC, 134 MVF600_FEC, 135 IMX6SX_FEC, 136 IMX6UL_FEC, 137 }; 138 139 static const struct of_device_id fec_dt_ids[] = { 140 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, 141 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, 142 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, 143 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, 144 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, 145 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, 146 { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, 147 { /* sentinel */ } 148 }; 149 MODULE_DEVICE_TABLE(of, fec_dt_ids); 150 151 static unsigned char macaddr[ETH_ALEN]; 152 module_param_array(macaddr, byte, NULL, 0); 153 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 154 155 #if defined(CONFIG_M5272) 156 /* 157 * Some hardware gets it MAC address out of local flash memory. 158 * if this is non-zero then assume it is the address to get MAC from. 159 */ 160 #if defined(CONFIG_NETtel) 161 #define FEC_FLASHMAC 0xf0006006 162 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 163 #define FEC_FLASHMAC 0xf0006000 164 #elif defined(CONFIG_CANCam) 165 #define FEC_FLASHMAC 0xf0020000 166 #elif defined (CONFIG_M5272C3) 167 #define FEC_FLASHMAC (0xffe04000 + 4) 168 #elif defined(CONFIG_MOD5272) 169 #define FEC_FLASHMAC 0xffc0406b 170 #else 171 #define FEC_FLASHMAC 0 172 #endif 173 #endif /* CONFIG_M5272 */ 174 175 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. 176 * 177 * 2048 byte skbufs are allocated. However, alignment requirements 178 * varies between FEC variants. Worst case is 64, so round down by 64. 179 */ 180 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) 181 #define PKT_MINBUF_SIZE 64 182 183 /* FEC receive acceleration */ 184 #define FEC_RACC_IPDIS (1 << 1) 185 #define FEC_RACC_PRODIS (1 << 2) 186 #define FEC_RACC_SHIFT16 BIT(7) 187 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 188 189 /* MIB Control Register */ 190 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31) 191 192 /* 193 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 194 * size bits. Other FEC hardware does not, so we need to take that into 195 * account when setting it. 196 */ 197 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 198 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 199 defined(CONFIG_ARM64) 200 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 201 #else 202 #define OPT_FRAME_SIZE 0 203 #endif 204 205 /* FEC MII MMFR bits definition */ 206 #define FEC_MMFR_ST (1 << 30) 207 #define FEC_MMFR_OP_READ (2 << 28) 208 #define FEC_MMFR_OP_WRITE (1 << 28) 209 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 210 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 211 #define FEC_MMFR_TA (2 << 16) 212 #define FEC_MMFR_DATA(v) (v & 0xffff) 213 /* FEC ECR bits definition */ 214 #define FEC_ECR_MAGICEN (1 << 2) 215 #define FEC_ECR_SLEEP (1 << 3) 216 217 #define FEC_MII_TIMEOUT 30000 /* us */ 218 219 /* Transmitter timeout */ 220 #define TX_TIMEOUT (2 * HZ) 221 222 #define FEC_PAUSE_FLAG_AUTONEG 0x1 223 #define FEC_PAUSE_FLAG_ENABLE 0x2 224 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) 225 #define FEC_WOL_FLAG_ENABLE (0x1 << 1) 226 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) 227 228 #define COPYBREAK_DEFAULT 256 229 230 /* Max number of allowed TCP segments for software TSO */ 231 #define FEC_MAX_TSO_SEGS 100 232 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 233 234 #define IS_TSO_HEADER(txq, addr) \ 235 ((addr >= txq->tso_hdrs_dma) && \ 236 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 237 238 static int mii_cnt; 239 240 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, 241 struct bufdesc_prop *bd) 242 { 243 return (bdp >= bd->last) ? bd->base 244 : (struct bufdesc *)(((void *)bdp) + bd->dsize); 245 } 246 247 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, 248 struct bufdesc_prop *bd) 249 { 250 return (bdp <= bd->base) ? bd->last 251 : (struct bufdesc *)(((void *)bdp) - bd->dsize); 252 } 253 254 static int fec_enet_get_bd_index(struct bufdesc *bdp, 255 struct bufdesc_prop *bd) 256 { 257 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; 258 } 259 260 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) 261 { 262 int entries; 263 264 entries = (((const char *)txq->dirty_tx - 265 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; 266 267 return entries >= 0 ? entries : entries + txq->bd.ring_size; 268 } 269 270 static void swap_buffer(void *bufaddr, int len) 271 { 272 int i; 273 unsigned int *buf = bufaddr; 274 275 for (i = 0; i < len; i += 4, buf++) 276 swab32s(buf); 277 } 278 279 static void swap_buffer2(void *dst_buf, void *src_buf, int len) 280 { 281 int i; 282 unsigned int *src = src_buf; 283 unsigned int *dst = dst_buf; 284 285 for (i = 0; i < len; i += 4, src++, dst++) 286 *dst = swab32p(src); 287 } 288 289 static void fec_dump(struct net_device *ndev) 290 { 291 struct fec_enet_private *fep = netdev_priv(ndev); 292 struct bufdesc *bdp; 293 struct fec_enet_priv_tx_q *txq; 294 int index = 0; 295 296 netdev_info(ndev, "TX ring dump\n"); 297 pr_info("Nr SC addr len SKB\n"); 298 299 txq = fep->tx_queue[0]; 300 bdp = txq->bd.base; 301 302 do { 303 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", 304 index, 305 bdp == txq->bd.cur ? 'S' : ' ', 306 bdp == txq->dirty_tx ? 'H' : ' ', 307 fec16_to_cpu(bdp->cbd_sc), 308 fec32_to_cpu(bdp->cbd_bufaddr), 309 fec16_to_cpu(bdp->cbd_datlen), 310 txq->tx_skbuff[index]); 311 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 312 index++; 313 } while (bdp != txq->bd.base); 314 } 315 316 static inline bool is_ipv4_pkt(struct sk_buff *skb) 317 { 318 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 319 } 320 321 static int 322 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 323 { 324 /* Only run for packets requiring a checksum. */ 325 if (skb->ip_summed != CHECKSUM_PARTIAL) 326 return 0; 327 328 if (unlikely(skb_cow_head(skb, 0))) 329 return -1; 330 331 if (is_ipv4_pkt(skb)) 332 ip_hdr(skb)->check = 0; 333 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 334 335 return 0; 336 } 337 338 static struct bufdesc * 339 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, 340 struct sk_buff *skb, 341 struct net_device *ndev) 342 { 343 struct fec_enet_private *fep = netdev_priv(ndev); 344 struct bufdesc *bdp = txq->bd.cur; 345 struct bufdesc_ex *ebdp; 346 int nr_frags = skb_shinfo(skb)->nr_frags; 347 int frag, frag_len; 348 unsigned short status; 349 unsigned int estatus = 0; 350 skb_frag_t *this_frag; 351 unsigned int index; 352 void *bufaddr; 353 dma_addr_t addr; 354 int i; 355 356 for (frag = 0; frag < nr_frags; frag++) { 357 this_frag = &skb_shinfo(skb)->frags[frag]; 358 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 359 ebdp = (struct bufdesc_ex *)bdp; 360 361 status = fec16_to_cpu(bdp->cbd_sc); 362 status &= ~BD_ENET_TX_STATS; 363 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 364 frag_len = skb_shinfo(skb)->frags[frag].size; 365 366 /* Handle the last BD specially */ 367 if (frag == nr_frags - 1) { 368 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 369 if (fep->bufdesc_ex) { 370 estatus |= BD_ENET_TX_INT; 371 if (unlikely(skb_shinfo(skb)->tx_flags & 372 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 373 estatus |= BD_ENET_TX_TS; 374 } 375 } 376 377 if (fep->bufdesc_ex) { 378 if (fep->quirks & FEC_QUIRK_HAS_AVB) 379 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 380 if (skb->ip_summed == CHECKSUM_PARTIAL) 381 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 382 ebdp->cbd_bdu = 0; 383 ebdp->cbd_esc = cpu_to_fec32(estatus); 384 } 385 386 bufaddr = page_address(this_frag->page.p) + this_frag->page_offset; 387 388 index = fec_enet_get_bd_index(bdp, &txq->bd); 389 if (((unsigned long) bufaddr) & fep->tx_align || 390 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 391 memcpy(txq->tx_bounce[index], bufaddr, frag_len); 392 bufaddr = txq->tx_bounce[index]; 393 394 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 395 swap_buffer(bufaddr, frag_len); 396 } 397 398 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, 399 DMA_TO_DEVICE); 400 if (dma_mapping_error(&fep->pdev->dev, addr)) { 401 if (net_ratelimit()) 402 netdev_err(ndev, "Tx DMA memory map failed\n"); 403 goto dma_mapping_error; 404 } 405 406 bdp->cbd_bufaddr = cpu_to_fec32(addr); 407 bdp->cbd_datlen = cpu_to_fec16(frag_len); 408 /* Make sure the updates to rest of the descriptor are 409 * performed before transferring ownership. 410 */ 411 wmb(); 412 bdp->cbd_sc = cpu_to_fec16(status); 413 } 414 415 return bdp; 416 dma_mapping_error: 417 bdp = txq->bd.cur; 418 for (i = 0; i < frag; i++) { 419 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 420 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), 421 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); 422 } 423 return ERR_PTR(-ENOMEM); 424 } 425 426 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, 427 struct sk_buff *skb, struct net_device *ndev) 428 { 429 struct fec_enet_private *fep = netdev_priv(ndev); 430 int nr_frags = skb_shinfo(skb)->nr_frags; 431 struct bufdesc *bdp, *last_bdp; 432 void *bufaddr; 433 dma_addr_t addr; 434 unsigned short status; 435 unsigned short buflen; 436 unsigned int estatus = 0; 437 unsigned int index; 438 int entries_free; 439 440 entries_free = fec_enet_get_free_txdesc_num(txq); 441 if (entries_free < MAX_SKB_FRAGS + 1) { 442 dev_kfree_skb_any(skb); 443 if (net_ratelimit()) 444 netdev_err(ndev, "NOT enough BD for SG!\n"); 445 return NETDEV_TX_OK; 446 } 447 448 /* Protocol checksum off-load for TCP and UDP. */ 449 if (fec_enet_clear_csum(skb, ndev)) { 450 dev_kfree_skb_any(skb); 451 return NETDEV_TX_OK; 452 } 453 454 /* Fill in a Tx ring entry */ 455 bdp = txq->bd.cur; 456 last_bdp = bdp; 457 status = fec16_to_cpu(bdp->cbd_sc); 458 status &= ~BD_ENET_TX_STATS; 459 460 /* Set buffer length and buffer pointer */ 461 bufaddr = skb->data; 462 buflen = skb_headlen(skb); 463 464 index = fec_enet_get_bd_index(bdp, &txq->bd); 465 if (((unsigned long) bufaddr) & fep->tx_align || 466 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 467 memcpy(txq->tx_bounce[index], skb->data, buflen); 468 bufaddr = txq->tx_bounce[index]; 469 470 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 471 swap_buffer(bufaddr, buflen); 472 } 473 474 /* Push the data cache so the CPM does not get stale memory data. */ 475 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); 476 if (dma_mapping_error(&fep->pdev->dev, addr)) { 477 dev_kfree_skb_any(skb); 478 if (net_ratelimit()) 479 netdev_err(ndev, "Tx DMA memory map failed\n"); 480 return NETDEV_TX_OK; 481 } 482 483 if (nr_frags) { 484 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); 485 if (IS_ERR(last_bdp)) { 486 dma_unmap_single(&fep->pdev->dev, addr, 487 buflen, DMA_TO_DEVICE); 488 dev_kfree_skb_any(skb); 489 return NETDEV_TX_OK; 490 } 491 } else { 492 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 493 if (fep->bufdesc_ex) { 494 estatus = BD_ENET_TX_INT; 495 if (unlikely(skb_shinfo(skb)->tx_flags & 496 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 497 estatus |= BD_ENET_TX_TS; 498 } 499 } 500 bdp->cbd_bufaddr = cpu_to_fec32(addr); 501 bdp->cbd_datlen = cpu_to_fec16(buflen); 502 503 if (fep->bufdesc_ex) { 504 505 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 506 507 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 508 fep->hwts_tx_en)) 509 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 510 511 if (fep->quirks & FEC_QUIRK_HAS_AVB) 512 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 513 514 if (skb->ip_summed == CHECKSUM_PARTIAL) 515 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 516 517 ebdp->cbd_bdu = 0; 518 ebdp->cbd_esc = cpu_to_fec32(estatus); 519 } 520 521 index = fec_enet_get_bd_index(last_bdp, &txq->bd); 522 /* Save skb pointer */ 523 txq->tx_skbuff[index] = skb; 524 525 /* Make sure the updates to rest of the descriptor are performed before 526 * transferring ownership. 527 */ 528 wmb(); 529 530 /* Send it on its way. Tell FEC it's ready, interrupt when done, 531 * it's the last BD of the frame, and to put the CRC on the end. 532 */ 533 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 534 bdp->cbd_sc = cpu_to_fec16(status); 535 536 /* If this was the last BD in the ring, start at the beginning again. */ 537 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); 538 539 skb_tx_timestamp(skb); 540 541 /* Make sure the update to bdp and tx_skbuff are performed before 542 * txq->bd.cur. 543 */ 544 wmb(); 545 txq->bd.cur = bdp; 546 547 /* Trigger transmission start */ 548 writel(0, txq->bd.reg_desc_active); 549 550 return 0; 551 } 552 553 static int 554 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, 555 struct net_device *ndev, 556 struct bufdesc *bdp, int index, char *data, 557 int size, bool last_tcp, bool is_last) 558 { 559 struct fec_enet_private *fep = netdev_priv(ndev); 560 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 561 unsigned short status; 562 unsigned int estatus = 0; 563 dma_addr_t addr; 564 565 status = fec16_to_cpu(bdp->cbd_sc); 566 status &= ~BD_ENET_TX_STATS; 567 568 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 569 570 if (((unsigned long) data) & fep->tx_align || 571 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 572 memcpy(txq->tx_bounce[index], data, size); 573 data = txq->tx_bounce[index]; 574 575 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 576 swap_buffer(data, size); 577 } 578 579 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); 580 if (dma_mapping_error(&fep->pdev->dev, addr)) { 581 dev_kfree_skb_any(skb); 582 if (net_ratelimit()) 583 netdev_err(ndev, "Tx DMA memory map failed\n"); 584 return NETDEV_TX_BUSY; 585 } 586 587 bdp->cbd_datlen = cpu_to_fec16(size); 588 bdp->cbd_bufaddr = cpu_to_fec32(addr); 589 590 if (fep->bufdesc_ex) { 591 if (fep->quirks & FEC_QUIRK_HAS_AVB) 592 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 593 if (skb->ip_summed == CHECKSUM_PARTIAL) 594 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 595 ebdp->cbd_bdu = 0; 596 ebdp->cbd_esc = cpu_to_fec32(estatus); 597 } 598 599 /* Handle the last BD specially */ 600 if (last_tcp) 601 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); 602 if (is_last) { 603 status |= BD_ENET_TX_INTR; 604 if (fep->bufdesc_ex) 605 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); 606 } 607 608 bdp->cbd_sc = cpu_to_fec16(status); 609 610 return 0; 611 } 612 613 static int 614 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, 615 struct sk_buff *skb, struct net_device *ndev, 616 struct bufdesc *bdp, int index) 617 { 618 struct fec_enet_private *fep = netdev_priv(ndev); 619 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 620 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 621 void *bufaddr; 622 unsigned long dmabuf; 623 unsigned short status; 624 unsigned int estatus = 0; 625 626 status = fec16_to_cpu(bdp->cbd_sc); 627 status &= ~BD_ENET_TX_STATS; 628 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 629 630 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 631 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; 632 if (((unsigned long)bufaddr) & fep->tx_align || 633 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 634 memcpy(txq->tx_bounce[index], skb->data, hdr_len); 635 bufaddr = txq->tx_bounce[index]; 636 637 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 638 swap_buffer(bufaddr, hdr_len); 639 640 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, 641 hdr_len, DMA_TO_DEVICE); 642 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { 643 dev_kfree_skb_any(skb); 644 if (net_ratelimit()) 645 netdev_err(ndev, "Tx DMA memory map failed\n"); 646 return NETDEV_TX_BUSY; 647 } 648 } 649 650 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); 651 bdp->cbd_datlen = cpu_to_fec16(hdr_len); 652 653 if (fep->bufdesc_ex) { 654 if (fep->quirks & FEC_QUIRK_HAS_AVB) 655 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 656 if (skb->ip_summed == CHECKSUM_PARTIAL) 657 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 658 ebdp->cbd_bdu = 0; 659 ebdp->cbd_esc = cpu_to_fec32(estatus); 660 } 661 662 bdp->cbd_sc = cpu_to_fec16(status); 663 664 return 0; 665 } 666 667 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, 668 struct sk_buff *skb, 669 struct net_device *ndev) 670 { 671 struct fec_enet_private *fep = netdev_priv(ndev); 672 int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 673 int total_len, data_left; 674 struct bufdesc *bdp = txq->bd.cur; 675 struct tso_t tso; 676 unsigned int index = 0; 677 int ret; 678 679 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { 680 dev_kfree_skb_any(skb); 681 if (net_ratelimit()) 682 netdev_err(ndev, "NOT enough BD for TSO!\n"); 683 return NETDEV_TX_OK; 684 } 685 686 /* Protocol checksum off-load for TCP and UDP. */ 687 if (fec_enet_clear_csum(skb, ndev)) { 688 dev_kfree_skb_any(skb); 689 return NETDEV_TX_OK; 690 } 691 692 /* Initialize the TSO handler, and prepare the first payload */ 693 tso_start(skb, &tso); 694 695 total_len = skb->len - hdr_len; 696 while (total_len > 0) { 697 char *hdr; 698 699 index = fec_enet_get_bd_index(bdp, &txq->bd); 700 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 701 total_len -= data_left; 702 703 /* prepare packet headers: MAC + IP + TCP */ 704 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 705 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 706 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); 707 if (ret) 708 goto err_release; 709 710 while (data_left > 0) { 711 int size; 712 713 size = min_t(int, tso.size, data_left); 714 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 715 index = fec_enet_get_bd_index(bdp, &txq->bd); 716 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, 717 bdp, index, 718 tso.data, size, 719 size == data_left, 720 total_len == 0); 721 if (ret) 722 goto err_release; 723 724 data_left -= size; 725 tso_build_data(skb, &tso, size); 726 } 727 728 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 729 } 730 731 /* Save skb pointer */ 732 txq->tx_skbuff[index] = skb; 733 734 skb_tx_timestamp(skb); 735 txq->bd.cur = bdp; 736 737 /* Trigger transmission start */ 738 if (!(fep->quirks & FEC_QUIRK_ERR007885) || 739 !readl(txq->bd.reg_desc_active) || 740 !readl(txq->bd.reg_desc_active) || 741 !readl(txq->bd.reg_desc_active) || 742 !readl(txq->bd.reg_desc_active)) 743 writel(0, txq->bd.reg_desc_active); 744 745 return 0; 746 747 err_release: 748 /* TODO: Release all used data descriptors for TSO */ 749 return ret; 750 } 751 752 static netdev_tx_t 753 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 754 { 755 struct fec_enet_private *fep = netdev_priv(ndev); 756 int entries_free; 757 unsigned short queue; 758 struct fec_enet_priv_tx_q *txq; 759 struct netdev_queue *nq; 760 int ret; 761 762 queue = skb_get_queue_mapping(skb); 763 txq = fep->tx_queue[queue]; 764 nq = netdev_get_tx_queue(ndev, queue); 765 766 if (skb_is_gso(skb)) 767 ret = fec_enet_txq_submit_tso(txq, skb, ndev); 768 else 769 ret = fec_enet_txq_submit_skb(txq, skb, ndev); 770 if (ret) 771 return ret; 772 773 entries_free = fec_enet_get_free_txdesc_num(txq); 774 if (entries_free <= txq->tx_stop_threshold) 775 netif_tx_stop_queue(nq); 776 777 return NETDEV_TX_OK; 778 } 779 780 /* Init RX & TX buffer descriptors 781 */ 782 static void fec_enet_bd_init(struct net_device *dev) 783 { 784 struct fec_enet_private *fep = netdev_priv(dev); 785 struct fec_enet_priv_tx_q *txq; 786 struct fec_enet_priv_rx_q *rxq; 787 struct bufdesc *bdp; 788 unsigned int i; 789 unsigned int q; 790 791 for (q = 0; q < fep->num_rx_queues; q++) { 792 /* Initialize the receive buffer descriptors. */ 793 rxq = fep->rx_queue[q]; 794 bdp = rxq->bd.base; 795 796 for (i = 0; i < rxq->bd.ring_size; i++) { 797 798 /* Initialize the BD for every fragment in the page. */ 799 if (bdp->cbd_bufaddr) 800 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 801 else 802 bdp->cbd_sc = cpu_to_fec16(0); 803 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 804 } 805 806 /* Set the last buffer to wrap */ 807 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 808 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 809 810 rxq->bd.cur = rxq->bd.base; 811 } 812 813 for (q = 0; q < fep->num_tx_queues; q++) { 814 /* ...and the same for transmit */ 815 txq = fep->tx_queue[q]; 816 bdp = txq->bd.base; 817 txq->bd.cur = bdp; 818 819 for (i = 0; i < txq->bd.ring_size; i++) { 820 /* Initialize the BD for every fragment in the page. */ 821 bdp->cbd_sc = cpu_to_fec16(0); 822 if (bdp->cbd_bufaddr && 823 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 824 dma_unmap_single(&fep->pdev->dev, 825 fec32_to_cpu(bdp->cbd_bufaddr), 826 fec16_to_cpu(bdp->cbd_datlen), 827 DMA_TO_DEVICE); 828 if (txq->tx_skbuff[i]) { 829 dev_kfree_skb_any(txq->tx_skbuff[i]); 830 txq->tx_skbuff[i] = NULL; 831 } 832 bdp->cbd_bufaddr = cpu_to_fec32(0); 833 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 834 } 835 836 /* Set the last buffer to wrap */ 837 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 838 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 839 txq->dirty_tx = bdp; 840 } 841 } 842 843 static void fec_enet_active_rxring(struct net_device *ndev) 844 { 845 struct fec_enet_private *fep = netdev_priv(ndev); 846 int i; 847 848 for (i = 0; i < fep->num_rx_queues; i++) 849 writel(0, fep->rx_queue[i]->bd.reg_desc_active); 850 } 851 852 static void fec_enet_enable_ring(struct net_device *ndev) 853 { 854 struct fec_enet_private *fep = netdev_priv(ndev); 855 struct fec_enet_priv_tx_q *txq; 856 struct fec_enet_priv_rx_q *rxq; 857 int i; 858 859 for (i = 0; i < fep->num_rx_queues; i++) { 860 rxq = fep->rx_queue[i]; 861 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); 862 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); 863 864 /* enable DMA1/2 */ 865 if (i) 866 writel(RCMR_MATCHEN | RCMR_CMP(i), 867 fep->hwp + FEC_RCMR(i)); 868 } 869 870 for (i = 0; i < fep->num_tx_queues; i++) { 871 txq = fep->tx_queue[i]; 872 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); 873 874 /* enable DMA1/2 */ 875 if (i) 876 writel(DMA_CLASS_EN | IDLE_SLOPE(i), 877 fep->hwp + FEC_DMA_CFG(i)); 878 } 879 } 880 881 static void fec_enet_reset_skb(struct net_device *ndev) 882 { 883 struct fec_enet_private *fep = netdev_priv(ndev); 884 struct fec_enet_priv_tx_q *txq; 885 int i, j; 886 887 for (i = 0; i < fep->num_tx_queues; i++) { 888 txq = fep->tx_queue[i]; 889 890 for (j = 0; j < txq->bd.ring_size; j++) { 891 if (txq->tx_skbuff[j]) { 892 dev_kfree_skb_any(txq->tx_skbuff[j]); 893 txq->tx_skbuff[j] = NULL; 894 } 895 } 896 } 897 } 898 899 /* 900 * This function is called to start or restart the FEC during a link 901 * change, transmit timeout, or to reconfigure the FEC. The network 902 * packet processing for this device must be stopped before this call. 903 */ 904 static void 905 fec_restart(struct net_device *ndev) 906 { 907 struct fec_enet_private *fep = netdev_priv(ndev); 908 u32 val; 909 u32 temp_mac[2]; 910 u32 rcntl = OPT_FRAME_SIZE | 0x04; 911 u32 ecntl = 0x2; /* ETHEREN */ 912 913 /* Whack a reset. We should wait for this. 914 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 915 * instead of reset MAC itself. 916 */ 917 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 918 writel(0, fep->hwp + FEC_ECNTRL); 919 } else { 920 writel(1, fep->hwp + FEC_ECNTRL); 921 udelay(10); 922 } 923 924 /* 925 * enet-mac reset will reset mac address registers too, 926 * so need to reconfigure it. 927 */ 928 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); 929 writel((__force u32)cpu_to_be32(temp_mac[0]), 930 fep->hwp + FEC_ADDR_LOW); 931 writel((__force u32)cpu_to_be32(temp_mac[1]), 932 fep->hwp + FEC_ADDR_HIGH); 933 934 /* Clear any outstanding interrupt. */ 935 writel(0xffffffff, fep->hwp + FEC_IEVENT); 936 937 fec_enet_bd_init(ndev); 938 939 fec_enet_enable_ring(ndev); 940 941 /* Reset tx SKB buffers. */ 942 fec_enet_reset_skb(ndev); 943 944 /* Enable MII mode */ 945 if (fep->full_duplex == DUPLEX_FULL) { 946 /* FD enable */ 947 writel(0x04, fep->hwp + FEC_X_CNTRL); 948 } else { 949 /* No Rcv on Xmit */ 950 rcntl |= 0x02; 951 writel(0x0, fep->hwp + FEC_X_CNTRL); 952 } 953 954 /* Set MII speed */ 955 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 956 957 #if !defined(CONFIG_M5272) 958 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 959 val = readl(fep->hwp + FEC_RACC); 960 /* align IP header */ 961 val |= FEC_RACC_SHIFT16; 962 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 963 /* set RX checksum */ 964 val |= FEC_RACC_OPTIONS; 965 else 966 val &= ~FEC_RACC_OPTIONS; 967 writel(val, fep->hwp + FEC_RACC); 968 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); 969 } 970 #endif 971 972 /* 973 * The phy interface and speed need to get configured 974 * differently on enet-mac. 975 */ 976 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 977 /* Enable flow control and length check */ 978 rcntl |= 0x40000000 | 0x00000020; 979 980 /* RGMII, RMII or MII */ 981 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || 982 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 983 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || 984 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) 985 rcntl |= (1 << 6); 986 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 987 rcntl |= (1 << 8); 988 else 989 rcntl &= ~(1 << 8); 990 991 /* 1G, 100M or 10M */ 992 if (ndev->phydev) { 993 if (ndev->phydev->speed == SPEED_1000) 994 ecntl |= (1 << 5); 995 else if (ndev->phydev->speed == SPEED_100) 996 rcntl &= ~(1 << 9); 997 else 998 rcntl |= (1 << 9); 999 } 1000 } else { 1001 #ifdef FEC_MIIGSK_ENR 1002 if (fep->quirks & FEC_QUIRK_USE_GASKET) { 1003 u32 cfgr; 1004 /* disable the gasket and wait */ 1005 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1006 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1007 udelay(1); 1008 1009 /* 1010 * configure the gasket: 1011 * RMII, 50 MHz, no loopback, no echo 1012 * MII, 25 MHz, no loopback, no echo 1013 */ 1014 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1015 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; 1016 if (ndev->phydev && ndev->phydev->speed == SPEED_10) 1017 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; 1018 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); 1019 1020 /* re-enable the gasket */ 1021 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1022 } 1023 #endif 1024 } 1025 1026 #if !defined(CONFIG_M5272) 1027 /* enable pause frame*/ 1028 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 1029 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 1030 ndev->phydev && ndev->phydev->pause)) { 1031 rcntl |= FEC_ENET_FCE; 1032 1033 /* set FIFO threshold parameter to reduce overrun */ 1034 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 1035 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 1036 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 1037 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); 1038 1039 /* OPD */ 1040 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); 1041 } else { 1042 rcntl &= ~FEC_ENET_FCE; 1043 } 1044 #endif /* !defined(CONFIG_M5272) */ 1045 1046 writel(rcntl, fep->hwp + FEC_R_CNTRL); 1047 1048 /* Setup multicast filter. */ 1049 set_multicast_list(ndev); 1050 #ifndef CONFIG_M5272 1051 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1052 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1053 #endif 1054 1055 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1056 /* enable ENET endian swap */ 1057 ecntl |= (1 << 8); 1058 /* enable ENET store and forward mode */ 1059 writel(1 << 8, fep->hwp + FEC_X_WMRK); 1060 } 1061 1062 if (fep->bufdesc_ex) 1063 ecntl |= (1 << 4); 1064 1065 #ifndef CONFIG_M5272 1066 /* Enable the MIB statistic event counters */ 1067 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); 1068 #endif 1069 1070 /* And last, enable the transmit and receive processing */ 1071 writel(ecntl, fep->hwp + FEC_ECNTRL); 1072 fec_enet_active_rxring(ndev); 1073 1074 if (fep->bufdesc_ex) 1075 fec_ptp_start_cyclecounter(ndev); 1076 1077 /* Enable interrupts we wish to service */ 1078 if (fep->link) 1079 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1080 else 1081 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); 1082 1083 /* Init the interrupt coalescing */ 1084 fec_enet_itr_coal_init(ndev); 1085 1086 } 1087 1088 static void 1089 fec_stop(struct net_device *ndev) 1090 { 1091 struct fec_enet_private *fep = netdev_priv(ndev); 1092 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1093 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); 1094 u32 val; 1095 1096 /* We cannot expect a graceful transmit stop without link !!! */ 1097 if (fep->link) { 1098 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1099 udelay(10); 1100 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1101 netdev_err(ndev, "Graceful transmit stop did not complete!\n"); 1102 } 1103 1104 /* Whack a reset. We should wait for this. 1105 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1106 * instead of reset MAC itself. 1107 */ 1108 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1109 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 1110 writel(0, fep->hwp + FEC_ECNTRL); 1111 } else { 1112 writel(1, fep->hwp + FEC_ECNTRL); 1113 udelay(10); 1114 } 1115 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1116 } else { 1117 writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); 1118 val = readl(fep->hwp + FEC_ECNTRL); 1119 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 1120 writel(val, fep->hwp + FEC_ECNTRL); 1121 1122 if (pdata && pdata->sleep_mode_enable) 1123 pdata->sleep_mode_enable(true); 1124 } 1125 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1126 1127 /* We have to keep ENET enabled to have MII interrupt stay working */ 1128 if (fep->quirks & FEC_QUIRK_ENET_MAC && 1129 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1130 writel(2, fep->hwp + FEC_ECNTRL); 1131 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1132 } 1133 } 1134 1135 1136 static void 1137 fec_timeout(struct net_device *ndev) 1138 { 1139 struct fec_enet_private *fep = netdev_priv(ndev); 1140 1141 fec_dump(ndev); 1142 1143 ndev->stats.tx_errors++; 1144 1145 schedule_work(&fep->tx_timeout_work); 1146 } 1147 1148 static void fec_enet_timeout_work(struct work_struct *work) 1149 { 1150 struct fec_enet_private *fep = 1151 container_of(work, struct fec_enet_private, tx_timeout_work); 1152 struct net_device *ndev = fep->netdev; 1153 1154 rtnl_lock(); 1155 if (netif_device_present(ndev) || netif_running(ndev)) { 1156 napi_disable(&fep->napi); 1157 netif_tx_lock_bh(ndev); 1158 fec_restart(ndev); 1159 netif_wake_queue(ndev); 1160 netif_tx_unlock_bh(ndev); 1161 napi_enable(&fep->napi); 1162 } 1163 rtnl_unlock(); 1164 } 1165 1166 static void 1167 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, 1168 struct skb_shared_hwtstamps *hwtstamps) 1169 { 1170 unsigned long flags; 1171 u64 ns; 1172 1173 spin_lock_irqsave(&fep->tmreg_lock, flags); 1174 ns = timecounter_cyc2time(&fep->tc, ts); 1175 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 1176 1177 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1178 hwtstamps->hwtstamp = ns_to_ktime(ns); 1179 } 1180 1181 static void 1182 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) 1183 { 1184 struct fec_enet_private *fep; 1185 struct bufdesc *bdp; 1186 unsigned short status; 1187 struct sk_buff *skb; 1188 struct fec_enet_priv_tx_q *txq; 1189 struct netdev_queue *nq; 1190 int index = 0; 1191 int entries_free; 1192 1193 fep = netdev_priv(ndev); 1194 1195 queue_id = FEC_ENET_GET_QUQUE(queue_id); 1196 1197 txq = fep->tx_queue[queue_id]; 1198 /* get next bdp of dirty_tx */ 1199 nq = netdev_get_tx_queue(ndev, queue_id); 1200 bdp = txq->dirty_tx; 1201 1202 /* get next bdp of dirty_tx */ 1203 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1204 1205 while (bdp != READ_ONCE(txq->bd.cur)) { 1206 /* Order the load of bd.cur and cbd_sc */ 1207 rmb(); 1208 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); 1209 if (status & BD_ENET_TX_READY) 1210 break; 1211 1212 index = fec_enet_get_bd_index(bdp, &txq->bd); 1213 1214 skb = txq->tx_skbuff[index]; 1215 txq->tx_skbuff[index] = NULL; 1216 if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 1217 dma_unmap_single(&fep->pdev->dev, 1218 fec32_to_cpu(bdp->cbd_bufaddr), 1219 fec16_to_cpu(bdp->cbd_datlen), 1220 DMA_TO_DEVICE); 1221 bdp->cbd_bufaddr = cpu_to_fec32(0); 1222 if (!skb) 1223 goto skb_done; 1224 1225 /* Check for errors. */ 1226 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1227 BD_ENET_TX_RL | BD_ENET_TX_UN | 1228 BD_ENET_TX_CSL)) { 1229 ndev->stats.tx_errors++; 1230 if (status & BD_ENET_TX_HB) /* No heartbeat */ 1231 ndev->stats.tx_heartbeat_errors++; 1232 if (status & BD_ENET_TX_LC) /* Late collision */ 1233 ndev->stats.tx_window_errors++; 1234 if (status & BD_ENET_TX_RL) /* Retrans limit */ 1235 ndev->stats.tx_aborted_errors++; 1236 if (status & BD_ENET_TX_UN) /* Underrun */ 1237 ndev->stats.tx_fifo_errors++; 1238 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 1239 ndev->stats.tx_carrier_errors++; 1240 } else { 1241 ndev->stats.tx_packets++; 1242 ndev->stats.tx_bytes += skb->len; 1243 } 1244 1245 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && 1246 fep->bufdesc_ex) { 1247 struct skb_shared_hwtstamps shhwtstamps; 1248 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1249 1250 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); 1251 skb_tstamp_tx(skb, &shhwtstamps); 1252 } 1253 1254 /* Deferred means some collisions occurred during transmit, 1255 * but we eventually sent the packet OK. 1256 */ 1257 if (status & BD_ENET_TX_DEF) 1258 ndev->stats.collisions++; 1259 1260 /* Free the sk buffer associated with this last transmit */ 1261 dev_kfree_skb_any(skb); 1262 skb_done: 1263 /* Make sure the update to bdp and tx_skbuff are performed 1264 * before dirty_tx 1265 */ 1266 wmb(); 1267 txq->dirty_tx = bdp; 1268 1269 /* Update pointer to next buffer descriptor to be transmitted */ 1270 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1271 1272 /* Since we have freed up a buffer, the ring is no longer full 1273 */ 1274 if (netif_queue_stopped(ndev)) { 1275 entries_free = fec_enet_get_free_txdesc_num(txq); 1276 if (entries_free >= txq->tx_wake_threshold) 1277 netif_tx_wake_queue(nq); 1278 } 1279 } 1280 1281 /* ERR006358: Keep the transmitter going */ 1282 if (bdp != txq->bd.cur && 1283 readl(txq->bd.reg_desc_active) == 0) 1284 writel(0, txq->bd.reg_desc_active); 1285 } 1286 1287 static void 1288 fec_enet_tx(struct net_device *ndev) 1289 { 1290 struct fec_enet_private *fep = netdev_priv(ndev); 1291 u16 queue_id; 1292 /* First process class A queue, then Class B and Best Effort queue */ 1293 for_each_set_bit(queue_id, &fep->work_tx, FEC_ENET_MAX_TX_QS) { 1294 clear_bit(queue_id, &fep->work_tx); 1295 fec_enet_tx_queue(ndev, queue_id); 1296 } 1297 return; 1298 } 1299 1300 static int 1301 fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb) 1302 { 1303 struct fec_enet_private *fep = netdev_priv(ndev); 1304 int off; 1305 1306 off = ((unsigned long)skb->data) & fep->rx_align; 1307 if (off) 1308 skb_reserve(skb, fep->rx_align + 1 - off); 1309 1310 bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE - fep->rx_align, DMA_FROM_DEVICE)); 1311 if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) { 1312 if (net_ratelimit()) 1313 netdev_err(ndev, "Rx DMA memory map failed\n"); 1314 return -ENOMEM; 1315 } 1316 1317 return 0; 1318 } 1319 1320 static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, 1321 struct bufdesc *bdp, u32 length, bool swap) 1322 { 1323 struct fec_enet_private *fep = netdev_priv(ndev); 1324 struct sk_buff *new_skb; 1325 1326 if (length > fep->rx_copybreak) 1327 return false; 1328 1329 new_skb = netdev_alloc_skb(ndev, length); 1330 if (!new_skb) 1331 return false; 1332 1333 dma_sync_single_for_cpu(&fep->pdev->dev, 1334 fec32_to_cpu(bdp->cbd_bufaddr), 1335 FEC_ENET_RX_FRSIZE - fep->rx_align, 1336 DMA_FROM_DEVICE); 1337 if (!swap) 1338 memcpy(new_skb->data, (*skb)->data, length); 1339 else 1340 swap_buffer2(new_skb->data, (*skb)->data, length); 1341 *skb = new_skb; 1342 1343 return true; 1344 } 1345 1346 /* During a receive, the bd_rx.cur points to the current incoming buffer. 1347 * When we update through the ring, if the next incoming buffer has 1348 * not been given to the system, we just set the empty indicator, 1349 * effectively tossing the packet. 1350 */ 1351 static int 1352 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) 1353 { 1354 struct fec_enet_private *fep = netdev_priv(ndev); 1355 struct fec_enet_priv_rx_q *rxq; 1356 struct bufdesc *bdp; 1357 unsigned short status; 1358 struct sk_buff *skb_new = NULL; 1359 struct sk_buff *skb; 1360 ushort pkt_len; 1361 __u8 *data; 1362 int pkt_received = 0; 1363 struct bufdesc_ex *ebdp = NULL; 1364 bool vlan_packet_rcvd = false; 1365 u16 vlan_tag; 1366 int index = 0; 1367 bool is_copybreak; 1368 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; 1369 1370 #ifdef CONFIG_M532x 1371 flush_cache_all(); 1372 #endif 1373 queue_id = FEC_ENET_GET_QUQUE(queue_id); 1374 rxq = fep->rx_queue[queue_id]; 1375 1376 /* First, grab all of the stats for the incoming packet. 1377 * These get messed up if we get called due to a busy condition. 1378 */ 1379 bdp = rxq->bd.cur; 1380 1381 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { 1382 1383 if (pkt_received >= budget) 1384 break; 1385 pkt_received++; 1386 1387 writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); 1388 1389 /* Check for errors. */ 1390 status ^= BD_ENET_RX_LAST; 1391 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 1392 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | 1393 BD_ENET_RX_CL)) { 1394 ndev->stats.rx_errors++; 1395 if (status & BD_ENET_RX_OV) { 1396 /* FIFO overrun */ 1397 ndev->stats.rx_fifo_errors++; 1398 goto rx_processing_done; 1399 } 1400 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH 1401 | BD_ENET_RX_LAST)) { 1402 /* Frame too long or too short. */ 1403 ndev->stats.rx_length_errors++; 1404 if (status & BD_ENET_RX_LAST) 1405 netdev_err(ndev, "rcv is not +last\n"); 1406 } 1407 if (status & BD_ENET_RX_CR) /* CRC Error */ 1408 ndev->stats.rx_crc_errors++; 1409 /* Report late collisions as a frame error. */ 1410 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 1411 ndev->stats.rx_frame_errors++; 1412 goto rx_processing_done; 1413 } 1414 1415 /* Process the incoming frame. */ 1416 ndev->stats.rx_packets++; 1417 pkt_len = fec16_to_cpu(bdp->cbd_datlen); 1418 ndev->stats.rx_bytes += pkt_len; 1419 1420 index = fec_enet_get_bd_index(bdp, &rxq->bd); 1421 skb = rxq->rx_skbuff[index]; 1422 1423 /* The packet length includes FCS, but we don't want to 1424 * include that when passing upstream as it messes up 1425 * bridging applications. 1426 */ 1427 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4, 1428 need_swap); 1429 if (!is_copybreak) { 1430 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1431 if (unlikely(!skb_new)) { 1432 ndev->stats.rx_dropped++; 1433 goto rx_processing_done; 1434 } 1435 dma_unmap_single(&fep->pdev->dev, 1436 fec32_to_cpu(bdp->cbd_bufaddr), 1437 FEC_ENET_RX_FRSIZE - fep->rx_align, 1438 DMA_FROM_DEVICE); 1439 } 1440 1441 prefetch(skb->data - NET_IP_ALIGN); 1442 skb_put(skb, pkt_len - 4); 1443 data = skb->data; 1444 1445 if (!is_copybreak && need_swap) 1446 swap_buffer(data, pkt_len); 1447 1448 #if !defined(CONFIG_M5272) 1449 if (fep->quirks & FEC_QUIRK_HAS_RACC) 1450 data = skb_pull_inline(skb, 2); 1451 #endif 1452 1453 /* Extract the enhanced buffer descriptor */ 1454 ebdp = NULL; 1455 if (fep->bufdesc_ex) 1456 ebdp = (struct bufdesc_ex *)bdp; 1457 1458 /* If this is a VLAN packet remove the VLAN Tag */ 1459 vlan_packet_rcvd = false; 1460 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1461 fep->bufdesc_ex && 1462 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { 1463 /* Push and remove the vlan tag */ 1464 struct vlan_hdr *vlan_header = 1465 (struct vlan_hdr *) (data + ETH_HLEN); 1466 vlan_tag = ntohs(vlan_header->h_vlan_TCI); 1467 1468 vlan_packet_rcvd = true; 1469 1470 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); 1471 skb_pull(skb, VLAN_HLEN); 1472 } 1473 1474 skb->protocol = eth_type_trans(skb, ndev); 1475 1476 /* Get receive timestamp from the skb */ 1477 if (fep->hwts_rx_en && fep->bufdesc_ex) 1478 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), 1479 skb_hwtstamps(skb)); 1480 1481 if (fep->bufdesc_ex && 1482 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 1483 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { 1484 /* don't check it */ 1485 skb->ip_summed = CHECKSUM_UNNECESSARY; 1486 } else { 1487 skb_checksum_none_assert(skb); 1488 } 1489 } 1490 1491 /* Handle received VLAN packets */ 1492 if (vlan_packet_rcvd) 1493 __vlan_hwaccel_put_tag(skb, 1494 htons(ETH_P_8021Q), 1495 vlan_tag); 1496 1497 napi_gro_receive(&fep->napi, skb); 1498 1499 if (is_copybreak) { 1500 dma_sync_single_for_device(&fep->pdev->dev, 1501 fec32_to_cpu(bdp->cbd_bufaddr), 1502 FEC_ENET_RX_FRSIZE - fep->rx_align, 1503 DMA_FROM_DEVICE); 1504 } else { 1505 rxq->rx_skbuff[index] = skb_new; 1506 fec_enet_new_rxbdp(ndev, bdp, skb_new); 1507 } 1508 1509 rx_processing_done: 1510 /* Clear the status flags for this buffer */ 1511 status &= ~BD_ENET_RX_STATS; 1512 1513 /* Mark the buffer empty */ 1514 status |= BD_ENET_RX_EMPTY; 1515 1516 if (fep->bufdesc_ex) { 1517 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1518 1519 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 1520 ebdp->cbd_prot = 0; 1521 ebdp->cbd_bdu = 0; 1522 } 1523 /* Make sure the updates to rest of the descriptor are 1524 * performed before transferring ownership. 1525 */ 1526 wmb(); 1527 bdp->cbd_sc = cpu_to_fec16(status); 1528 1529 /* Update BD pointer to next entry */ 1530 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 1531 1532 /* Doing this here will keep the FEC running while we process 1533 * incoming frames. On a heavily loaded network, we should be 1534 * able to keep up at the expense of system resources. 1535 */ 1536 writel(0, rxq->bd.reg_desc_active); 1537 } 1538 rxq->bd.cur = bdp; 1539 return pkt_received; 1540 } 1541 1542 static int 1543 fec_enet_rx(struct net_device *ndev, int budget) 1544 { 1545 int pkt_received = 0; 1546 u16 queue_id; 1547 struct fec_enet_private *fep = netdev_priv(ndev); 1548 1549 for_each_set_bit(queue_id, &fep->work_rx, FEC_ENET_MAX_RX_QS) { 1550 int ret; 1551 1552 ret = fec_enet_rx_queue(ndev, 1553 budget - pkt_received, queue_id); 1554 1555 if (ret < budget - pkt_received) 1556 clear_bit(queue_id, &fep->work_rx); 1557 1558 pkt_received += ret; 1559 } 1560 return pkt_received; 1561 } 1562 1563 static bool 1564 fec_enet_collect_events(struct fec_enet_private *fep, uint int_events) 1565 { 1566 if (int_events == 0) 1567 return false; 1568 1569 if (int_events & FEC_ENET_RXF_0) 1570 fep->work_rx |= (1 << 2); 1571 if (int_events & FEC_ENET_RXF_1) 1572 fep->work_rx |= (1 << 0); 1573 if (int_events & FEC_ENET_RXF_2) 1574 fep->work_rx |= (1 << 1); 1575 1576 if (int_events & FEC_ENET_TXF_0) 1577 fep->work_tx |= (1 << 2); 1578 if (int_events & FEC_ENET_TXF_1) 1579 fep->work_tx |= (1 << 0); 1580 if (int_events & FEC_ENET_TXF_2) 1581 fep->work_tx |= (1 << 1); 1582 1583 return true; 1584 } 1585 1586 static irqreturn_t 1587 fec_enet_interrupt(int irq, void *dev_id) 1588 { 1589 struct net_device *ndev = dev_id; 1590 struct fec_enet_private *fep = netdev_priv(ndev); 1591 uint int_events; 1592 irqreturn_t ret = IRQ_NONE; 1593 1594 int_events = readl(fep->hwp + FEC_IEVENT); 1595 writel(int_events, fep->hwp + FEC_IEVENT); 1596 fec_enet_collect_events(fep, int_events); 1597 1598 if ((fep->work_tx || fep->work_rx) && fep->link) { 1599 ret = IRQ_HANDLED; 1600 1601 if (napi_schedule_prep(&fep->napi)) { 1602 /* Disable the NAPI interrupts */ 1603 writel(FEC_NAPI_IMASK, fep->hwp + FEC_IMASK); 1604 __napi_schedule(&fep->napi); 1605 } 1606 } 1607 1608 if (int_events & FEC_ENET_MII) { 1609 ret = IRQ_HANDLED; 1610 complete(&fep->mdio_done); 1611 } 1612 return ret; 1613 } 1614 1615 static int fec_enet_rx_napi(struct napi_struct *napi, int budget) 1616 { 1617 struct net_device *ndev = napi->dev; 1618 struct fec_enet_private *fep = netdev_priv(ndev); 1619 int pkts; 1620 1621 pkts = fec_enet_rx(ndev, budget); 1622 1623 fec_enet_tx(ndev); 1624 1625 if (pkts < budget) { 1626 napi_complete_done(napi, pkts); 1627 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1628 } 1629 return pkts; 1630 } 1631 1632 /* ------------------------------------------------------------------------- */ 1633 static void fec_get_mac(struct net_device *ndev) 1634 { 1635 struct fec_enet_private *fep = netdev_priv(ndev); 1636 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); 1637 unsigned char *iap, tmpaddr[ETH_ALEN]; 1638 1639 /* 1640 * try to get mac address in following order: 1641 * 1642 * 1) module parameter via kernel command line in form 1643 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 1644 */ 1645 iap = macaddr; 1646 1647 /* 1648 * 2) from device tree data 1649 */ 1650 if (!is_valid_ether_addr(iap)) { 1651 struct device_node *np = fep->pdev->dev.of_node; 1652 if (np) { 1653 const char *mac = of_get_mac_address(np); 1654 if (mac) 1655 iap = (unsigned char *) mac; 1656 } 1657 } 1658 1659 /* 1660 * 3) from flash or fuse (via platform data) 1661 */ 1662 if (!is_valid_ether_addr(iap)) { 1663 #ifdef CONFIG_M5272 1664 if (FEC_FLASHMAC) 1665 iap = (unsigned char *)FEC_FLASHMAC; 1666 #else 1667 if (pdata) 1668 iap = (unsigned char *)&pdata->mac; 1669 #endif 1670 } 1671 1672 /* 1673 * 4) FEC mac registers set by bootloader 1674 */ 1675 if (!is_valid_ether_addr(iap)) { 1676 *((__be32 *) &tmpaddr[0]) = 1677 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); 1678 *((__be16 *) &tmpaddr[4]) = 1679 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 1680 iap = &tmpaddr[0]; 1681 } 1682 1683 /* 1684 * 5) random mac address 1685 */ 1686 if (!is_valid_ether_addr(iap)) { 1687 /* Report it and use a random ethernet address instead */ 1688 netdev_err(ndev, "Invalid MAC address: %pM\n", iap); 1689 eth_hw_addr_random(ndev); 1690 netdev_info(ndev, "Using random MAC address: %pM\n", 1691 ndev->dev_addr); 1692 return; 1693 } 1694 1695 memcpy(ndev->dev_addr, iap, ETH_ALEN); 1696 1697 /* Adjust MAC if using macaddr */ 1698 if (iap == macaddr) 1699 ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id; 1700 } 1701 1702 /* ------------------------------------------------------------------------- */ 1703 1704 /* 1705 * Phy section 1706 */ 1707 static void fec_enet_adjust_link(struct net_device *ndev) 1708 { 1709 struct fec_enet_private *fep = netdev_priv(ndev); 1710 struct phy_device *phy_dev = ndev->phydev; 1711 int status_change = 0; 1712 1713 /* Prevent a state halted on mii error */ 1714 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { 1715 phy_dev->state = PHY_RESUMING; 1716 return; 1717 } 1718 1719 /* 1720 * If the netdev is down, or is going down, we're not interested 1721 * in link state events, so just mark our idea of the link as down 1722 * and ignore the event. 1723 */ 1724 if (!netif_running(ndev) || !netif_device_present(ndev)) { 1725 fep->link = 0; 1726 } else if (phy_dev->link) { 1727 if (!fep->link) { 1728 fep->link = phy_dev->link; 1729 status_change = 1; 1730 } 1731 1732 if (fep->full_duplex != phy_dev->duplex) { 1733 fep->full_duplex = phy_dev->duplex; 1734 status_change = 1; 1735 } 1736 1737 if (phy_dev->speed != fep->speed) { 1738 fep->speed = phy_dev->speed; 1739 status_change = 1; 1740 } 1741 1742 /* if any of the above changed restart the FEC */ 1743 if (status_change) { 1744 napi_disable(&fep->napi); 1745 netif_tx_lock_bh(ndev); 1746 fec_restart(ndev); 1747 netif_wake_queue(ndev); 1748 netif_tx_unlock_bh(ndev); 1749 napi_enable(&fep->napi); 1750 } 1751 } else { 1752 if (fep->link) { 1753 napi_disable(&fep->napi); 1754 netif_tx_lock_bh(ndev); 1755 fec_stop(ndev); 1756 netif_tx_unlock_bh(ndev); 1757 napi_enable(&fep->napi); 1758 fep->link = phy_dev->link; 1759 status_change = 1; 1760 } 1761 } 1762 1763 if (status_change) 1764 phy_print_status(phy_dev); 1765 } 1766 1767 static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 1768 { 1769 struct fec_enet_private *fep = bus->priv; 1770 struct device *dev = &fep->pdev->dev; 1771 unsigned long time_left; 1772 int ret = 0; 1773 1774 ret = pm_runtime_get_sync(dev); 1775 if (ret < 0) 1776 return ret; 1777 1778 fep->mii_timeout = 0; 1779 reinit_completion(&fep->mdio_done); 1780 1781 /* start a read op */ 1782 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 1783 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 1784 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 1785 1786 /* wait for end of transfer */ 1787 time_left = wait_for_completion_timeout(&fep->mdio_done, 1788 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1789 if (time_left == 0) { 1790 fep->mii_timeout = 1; 1791 netdev_err(fep->netdev, "MDIO read timeout\n"); 1792 ret = -ETIMEDOUT; 1793 goto out; 1794 } 1795 1796 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 1797 1798 out: 1799 pm_runtime_mark_last_busy(dev); 1800 pm_runtime_put_autosuspend(dev); 1801 1802 return ret; 1803 } 1804 1805 static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, 1806 u16 value) 1807 { 1808 struct fec_enet_private *fep = bus->priv; 1809 struct device *dev = &fep->pdev->dev; 1810 unsigned long time_left; 1811 int ret; 1812 1813 ret = pm_runtime_get_sync(dev); 1814 if (ret < 0) 1815 return ret; 1816 else 1817 ret = 0; 1818 1819 fep->mii_timeout = 0; 1820 reinit_completion(&fep->mdio_done); 1821 1822 /* start a write op */ 1823 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | 1824 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 1825 FEC_MMFR_TA | FEC_MMFR_DATA(value), 1826 fep->hwp + FEC_MII_DATA); 1827 1828 /* wait for end of transfer */ 1829 time_left = wait_for_completion_timeout(&fep->mdio_done, 1830 usecs_to_jiffies(FEC_MII_TIMEOUT)); 1831 if (time_left == 0) { 1832 fep->mii_timeout = 1; 1833 netdev_err(fep->netdev, "MDIO write timeout\n"); 1834 ret = -ETIMEDOUT; 1835 } 1836 1837 pm_runtime_mark_last_busy(dev); 1838 pm_runtime_put_autosuspend(dev); 1839 1840 return ret; 1841 } 1842 1843 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 1844 { 1845 struct fec_enet_private *fep = netdev_priv(ndev); 1846 int ret; 1847 1848 if (enable) { 1849 ret = clk_prepare_enable(fep->clk_ahb); 1850 if (ret) 1851 return ret; 1852 1853 ret = clk_prepare_enable(fep->clk_enet_out); 1854 if (ret) 1855 goto failed_clk_enet_out; 1856 1857 if (fep->clk_ptp) { 1858 mutex_lock(&fep->ptp_clk_mutex); 1859 ret = clk_prepare_enable(fep->clk_ptp); 1860 if (ret) { 1861 mutex_unlock(&fep->ptp_clk_mutex); 1862 goto failed_clk_ptp; 1863 } else { 1864 fep->ptp_clk_on = true; 1865 } 1866 mutex_unlock(&fep->ptp_clk_mutex); 1867 } 1868 1869 ret = clk_prepare_enable(fep->clk_ref); 1870 if (ret) 1871 goto failed_clk_ref; 1872 1873 phy_reset_after_clk_enable(ndev->phydev); 1874 } else { 1875 clk_disable_unprepare(fep->clk_ahb); 1876 clk_disable_unprepare(fep->clk_enet_out); 1877 if (fep->clk_ptp) { 1878 mutex_lock(&fep->ptp_clk_mutex); 1879 clk_disable_unprepare(fep->clk_ptp); 1880 fep->ptp_clk_on = false; 1881 mutex_unlock(&fep->ptp_clk_mutex); 1882 } 1883 clk_disable_unprepare(fep->clk_ref); 1884 } 1885 1886 return 0; 1887 1888 failed_clk_ref: 1889 if (fep->clk_ref) 1890 clk_disable_unprepare(fep->clk_ref); 1891 failed_clk_ptp: 1892 if (fep->clk_enet_out) 1893 clk_disable_unprepare(fep->clk_enet_out); 1894 failed_clk_enet_out: 1895 clk_disable_unprepare(fep->clk_ahb); 1896 1897 return ret; 1898 } 1899 1900 static int fec_enet_mii_probe(struct net_device *ndev) 1901 { 1902 struct fec_enet_private *fep = netdev_priv(ndev); 1903 struct phy_device *phy_dev = NULL; 1904 char mdio_bus_id[MII_BUS_ID_SIZE]; 1905 char phy_name[MII_BUS_ID_SIZE + 3]; 1906 int phy_id; 1907 int dev_id = fep->dev_id; 1908 1909 if (fep->phy_node) { 1910 phy_dev = of_phy_connect(ndev, fep->phy_node, 1911 &fec_enet_adjust_link, 0, 1912 fep->phy_interface); 1913 if (!phy_dev) { 1914 netdev_err(ndev, "Unable to connect to phy\n"); 1915 return -ENODEV; 1916 } 1917 } else { 1918 /* check for attached phy */ 1919 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { 1920 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) 1921 continue; 1922 if (dev_id--) 1923 continue; 1924 strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 1925 break; 1926 } 1927 1928 if (phy_id >= PHY_MAX_ADDR) { 1929 netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); 1930 strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); 1931 phy_id = 0; 1932 } 1933 1934 snprintf(phy_name, sizeof(phy_name), 1935 PHY_ID_FMT, mdio_bus_id, phy_id); 1936 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 1937 fep->phy_interface); 1938 } 1939 1940 if (IS_ERR(phy_dev)) { 1941 netdev_err(ndev, "could not attach to PHY\n"); 1942 return PTR_ERR(phy_dev); 1943 } 1944 1945 /* mask with MAC supported features */ 1946 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { 1947 phy_dev->supported &= PHY_GBIT_FEATURES; 1948 phy_dev->supported &= ~SUPPORTED_1000baseT_Half; 1949 #if !defined(CONFIG_M5272) 1950 phy_dev->supported |= SUPPORTED_Pause; 1951 #endif 1952 } 1953 else 1954 phy_dev->supported &= PHY_BASIC_FEATURES; 1955 1956 phy_dev->advertising = phy_dev->supported; 1957 1958 fep->link = 0; 1959 fep->full_duplex = 0; 1960 1961 phy_attached_info(phy_dev); 1962 1963 return 0; 1964 } 1965 1966 static int fec_enet_mii_init(struct platform_device *pdev) 1967 { 1968 static struct mii_bus *fec0_mii_bus; 1969 struct net_device *ndev = platform_get_drvdata(pdev); 1970 struct fec_enet_private *fep = netdev_priv(ndev); 1971 struct device_node *node; 1972 int err = -ENXIO; 1973 u32 mii_speed, holdtime; 1974 1975 /* 1976 * The i.MX28 dual fec interfaces are not equal. 1977 * Here are the differences: 1978 * 1979 * - fec0 supports MII & RMII modes while fec1 only supports RMII 1980 * - fec0 acts as the 1588 time master while fec1 is slave 1981 * - external phys can only be configured by fec0 1982 * 1983 * That is to say fec1 can not work independently. It only works 1984 * when fec0 is working. The reason behind this design is that the 1985 * second interface is added primarily for Switch mode. 1986 * 1987 * Because of the last point above, both phys are attached on fec0 1988 * mdio interface in board design, and need to be configured by 1989 * fec0 mii_bus. 1990 */ 1991 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 1992 /* fec1 uses fec0 mii_bus */ 1993 if (mii_cnt && fec0_mii_bus) { 1994 fep->mii_bus = fec0_mii_bus; 1995 mii_cnt++; 1996 return 0; 1997 } 1998 return -ENOENT; 1999 } 2000 2001 fep->mii_timeout = 0; 2002 2003 /* 2004 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) 2005 * 2006 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while 2007 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 2008 * Reference Manual has an error on this, and gets fixed on i.MX6Q 2009 * document. 2010 */ 2011 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000); 2012 if (fep->quirks & FEC_QUIRK_ENET_MAC) 2013 mii_speed--; 2014 if (mii_speed > 63) { 2015 dev_err(&pdev->dev, 2016 "fec clock (%lu) too fast to get right mii speed\n", 2017 clk_get_rate(fep->clk_ipg)); 2018 err = -EINVAL; 2019 goto err_out; 2020 } 2021 2022 /* 2023 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka 2024 * MII_SPEED) register that defines the MDIO output hold time. Earlier 2025 * versions are RAZ there, so just ignore the difference and write the 2026 * register always. 2027 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. 2028 * HOLDTIME + 1 is the number of clk cycles the fec is holding the 2029 * output. 2030 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). 2031 * Given that ceil(clkrate / 5000000) <= 64, the calculation for 2032 * holdtime cannot result in a value greater than 3. 2033 */ 2034 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; 2035 2036 fep->phy_speed = mii_speed << 1 | holdtime << 8; 2037 2038 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 2039 2040 fep->mii_bus = mdiobus_alloc(); 2041 if (fep->mii_bus == NULL) { 2042 err = -ENOMEM; 2043 goto err_out; 2044 } 2045 2046 fep->mii_bus->name = "fec_enet_mii_bus"; 2047 fep->mii_bus->read = fec_enet_mdio_read; 2048 fep->mii_bus->write = fec_enet_mdio_write; 2049 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2050 pdev->name, fep->dev_id + 1); 2051 fep->mii_bus->priv = fep; 2052 fep->mii_bus->parent = &pdev->dev; 2053 2054 node = of_get_child_by_name(pdev->dev.of_node, "mdio"); 2055 if (node) { 2056 err = of_mdiobus_register(fep->mii_bus, node); 2057 of_node_put(node); 2058 } else { 2059 err = mdiobus_register(fep->mii_bus); 2060 } 2061 2062 if (err) 2063 goto err_out_free_mdiobus; 2064 2065 mii_cnt++; 2066 2067 /* save fec0 mii_bus */ 2068 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2069 fec0_mii_bus = fep->mii_bus; 2070 2071 return 0; 2072 2073 err_out_free_mdiobus: 2074 mdiobus_free(fep->mii_bus); 2075 err_out: 2076 return err; 2077 } 2078 2079 static void fec_enet_mii_remove(struct fec_enet_private *fep) 2080 { 2081 if (--mii_cnt == 0) { 2082 mdiobus_unregister(fep->mii_bus); 2083 mdiobus_free(fep->mii_bus); 2084 } 2085 } 2086 2087 static void fec_enet_get_drvinfo(struct net_device *ndev, 2088 struct ethtool_drvinfo *info) 2089 { 2090 struct fec_enet_private *fep = netdev_priv(ndev); 2091 2092 strlcpy(info->driver, fep->pdev->dev.driver->name, 2093 sizeof(info->driver)); 2094 strlcpy(info->version, "Revision: 1.0", sizeof(info->version)); 2095 strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); 2096 } 2097 2098 static int fec_enet_get_regs_len(struct net_device *ndev) 2099 { 2100 struct fec_enet_private *fep = netdev_priv(ndev); 2101 struct resource *r; 2102 int s = 0; 2103 2104 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); 2105 if (r) 2106 s = resource_size(r); 2107 2108 return s; 2109 } 2110 2111 /* List of registers that can be safety be read to dump them with ethtool */ 2112 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2113 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2114 defined(CONFIG_ARM64) 2115 static u32 fec_enet_register_offset[] = { 2116 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2117 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2118 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, 2119 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, 2120 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, 2121 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, 2122 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, 2123 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, 2124 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2125 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, 2126 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, 2127 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, 2128 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2129 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2130 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2131 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2132 RMON_T_P_GTE2048, RMON_T_OCTETS, 2133 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2134 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2135 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2136 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2137 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2138 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2139 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2140 RMON_R_P_GTE2048, RMON_R_OCTETS, 2141 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2142 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2143 }; 2144 #else 2145 static u32 fec_enet_register_offset[] = { 2146 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2147 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2148 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, 2149 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, 2150 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, 2151 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, 2152 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, 2153 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, 2154 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 2155 }; 2156 #endif 2157 2158 static void fec_enet_get_regs(struct net_device *ndev, 2159 struct ethtool_regs *regs, void *regbuf) 2160 { 2161 struct fec_enet_private *fep = netdev_priv(ndev); 2162 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; 2163 u32 *buf = (u32 *)regbuf; 2164 u32 i, off; 2165 2166 memset(buf, 0, regs->len); 2167 2168 for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { 2169 off = fec_enet_register_offset[i] / 4; 2170 buf[off] = readl(&theregs[off]); 2171 } 2172 } 2173 2174 static int fec_enet_get_ts_info(struct net_device *ndev, 2175 struct ethtool_ts_info *info) 2176 { 2177 struct fec_enet_private *fep = netdev_priv(ndev); 2178 2179 if (fep->bufdesc_ex) { 2180 2181 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 2182 SOF_TIMESTAMPING_RX_SOFTWARE | 2183 SOF_TIMESTAMPING_SOFTWARE | 2184 SOF_TIMESTAMPING_TX_HARDWARE | 2185 SOF_TIMESTAMPING_RX_HARDWARE | 2186 SOF_TIMESTAMPING_RAW_HARDWARE; 2187 if (fep->ptp_clock) 2188 info->phc_index = ptp_clock_index(fep->ptp_clock); 2189 else 2190 info->phc_index = -1; 2191 2192 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 2193 (1 << HWTSTAMP_TX_ON); 2194 2195 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2196 (1 << HWTSTAMP_FILTER_ALL); 2197 return 0; 2198 } else { 2199 return ethtool_op_get_ts_info(ndev, info); 2200 } 2201 } 2202 2203 #if !defined(CONFIG_M5272) 2204 2205 static void fec_enet_get_pauseparam(struct net_device *ndev, 2206 struct ethtool_pauseparam *pause) 2207 { 2208 struct fec_enet_private *fep = netdev_priv(ndev); 2209 2210 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; 2211 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; 2212 pause->rx_pause = pause->tx_pause; 2213 } 2214 2215 static int fec_enet_set_pauseparam(struct net_device *ndev, 2216 struct ethtool_pauseparam *pause) 2217 { 2218 struct fec_enet_private *fep = netdev_priv(ndev); 2219 2220 if (!ndev->phydev) 2221 return -ENODEV; 2222 2223 if (pause->tx_pause != pause->rx_pause) { 2224 netdev_info(ndev, 2225 "hardware only support enable/disable both tx and rx"); 2226 return -EINVAL; 2227 } 2228 2229 fep->pause_flag = 0; 2230 2231 /* tx pause must be same as rx pause */ 2232 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; 2233 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; 2234 2235 if (pause->rx_pause || pause->autoneg) { 2236 ndev->phydev->supported |= ADVERTISED_Pause; 2237 ndev->phydev->advertising |= ADVERTISED_Pause; 2238 } else { 2239 ndev->phydev->supported &= ~ADVERTISED_Pause; 2240 ndev->phydev->advertising &= ~ADVERTISED_Pause; 2241 } 2242 2243 if (pause->autoneg) { 2244 if (netif_running(ndev)) 2245 fec_stop(ndev); 2246 phy_start_aneg(ndev->phydev); 2247 } 2248 if (netif_running(ndev)) { 2249 napi_disable(&fep->napi); 2250 netif_tx_lock_bh(ndev); 2251 fec_restart(ndev); 2252 netif_wake_queue(ndev); 2253 netif_tx_unlock_bh(ndev); 2254 napi_enable(&fep->napi); 2255 } 2256 2257 return 0; 2258 } 2259 2260 static const struct fec_stat { 2261 char name[ETH_GSTRING_LEN]; 2262 u16 offset; 2263 } fec_stats[] = { 2264 /* RMON TX */ 2265 { "tx_dropped", RMON_T_DROP }, 2266 { "tx_packets", RMON_T_PACKETS }, 2267 { "tx_broadcast", RMON_T_BC_PKT }, 2268 { "tx_multicast", RMON_T_MC_PKT }, 2269 { "tx_crc_errors", RMON_T_CRC_ALIGN }, 2270 { "tx_undersize", RMON_T_UNDERSIZE }, 2271 { "tx_oversize", RMON_T_OVERSIZE }, 2272 { "tx_fragment", RMON_T_FRAG }, 2273 { "tx_jabber", RMON_T_JAB }, 2274 { "tx_collision", RMON_T_COL }, 2275 { "tx_64byte", RMON_T_P64 }, 2276 { "tx_65to127byte", RMON_T_P65TO127 }, 2277 { "tx_128to255byte", RMON_T_P128TO255 }, 2278 { "tx_256to511byte", RMON_T_P256TO511 }, 2279 { "tx_512to1023byte", RMON_T_P512TO1023 }, 2280 { "tx_1024to2047byte", RMON_T_P1024TO2047 }, 2281 { "tx_GTE2048byte", RMON_T_P_GTE2048 }, 2282 { "tx_octets", RMON_T_OCTETS }, 2283 2284 /* IEEE TX */ 2285 { "IEEE_tx_drop", IEEE_T_DROP }, 2286 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, 2287 { "IEEE_tx_1col", IEEE_T_1COL }, 2288 { "IEEE_tx_mcol", IEEE_T_MCOL }, 2289 { "IEEE_tx_def", IEEE_T_DEF }, 2290 { "IEEE_tx_lcol", IEEE_T_LCOL }, 2291 { "IEEE_tx_excol", IEEE_T_EXCOL }, 2292 { "IEEE_tx_macerr", IEEE_T_MACERR }, 2293 { "IEEE_tx_cserr", IEEE_T_CSERR }, 2294 { "IEEE_tx_sqe", IEEE_T_SQE }, 2295 { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, 2296 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, 2297 2298 /* RMON RX */ 2299 { "rx_packets", RMON_R_PACKETS }, 2300 { "rx_broadcast", RMON_R_BC_PKT }, 2301 { "rx_multicast", RMON_R_MC_PKT }, 2302 { "rx_crc_errors", RMON_R_CRC_ALIGN }, 2303 { "rx_undersize", RMON_R_UNDERSIZE }, 2304 { "rx_oversize", RMON_R_OVERSIZE }, 2305 { "rx_fragment", RMON_R_FRAG }, 2306 { "rx_jabber", RMON_R_JAB }, 2307 { "rx_64byte", RMON_R_P64 }, 2308 { "rx_65to127byte", RMON_R_P65TO127 }, 2309 { "rx_128to255byte", RMON_R_P128TO255 }, 2310 { "rx_256to511byte", RMON_R_P256TO511 }, 2311 { "rx_512to1023byte", RMON_R_P512TO1023 }, 2312 { "rx_1024to2047byte", RMON_R_P1024TO2047 }, 2313 { "rx_GTE2048byte", RMON_R_P_GTE2048 }, 2314 { "rx_octets", RMON_R_OCTETS }, 2315 2316 /* IEEE RX */ 2317 { "IEEE_rx_drop", IEEE_R_DROP }, 2318 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, 2319 { "IEEE_rx_crc", IEEE_R_CRC }, 2320 { "IEEE_rx_align", IEEE_R_ALIGN }, 2321 { "IEEE_rx_macerr", IEEE_R_MACERR }, 2322 { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, 2323 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 2324 }; 2325 2326 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) 2327 2328 static void fec_enet_update_ethtool_stats(struct net_device *dev) 2329 { 2330 struct fec_enet_private *fep = netdev_priv(dev); 2331 int i; 2332 2333 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2334 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); 2335 } 2336 2337 static void fec_enet_get_ethtool_stats(struct net_device *dev, 2338 struct ethtool_stats *stats, u64 *data) 2339 { 2340 struct fec_enet_private *fep = netdev_priv(dev); 2341 2342 if (netif_running(dev)) 2343 fec_enet_update_ethtool_stats(dev); 2344 2345 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); 2346 } 2347 2348 static void fec_enet_get_strings(struct net_device *netdev, 2349 u32 stringset, u8 *data) 2350 { 2351 int i; 2352 switch (stringset) { 2353 case ETH_SS_STATS: 2354 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2355 memcpy(data + i * ETH_GSTRING_LEN, 2356 fec_stats[i].name, ETH_GSTRING_LEN); 2357 break; 2358 } 2359 } 2360 2361 static int fec_enet_get_sset_count(struct net_device *dev, int sset) 2362 { 2363 switch (sset) { 2364 case ETH_SS_STATS: 2365 return ARRAY_SIZE(fec_stats); 2366 default: 2367 return -EOPNOTSUPP; 2368 } 2369 } 2370 2371 static void fec_enet_clear_ethtool_stats(struct net_device *dev) 2372 { 2373 struct fec_enet_private *fep = netdev_priv(dev); 2374 int i; 2375 2376 /* Disable MIB statistics counters */ 2377 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); 2378 2379 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2380 writel(0, fep->hwp + fec_stats[i].offset); 2381 2382 /* Don't disable MIB statistics counters */ 2383 writel(0, fep->hwp + FEC_MIB_CTRLSTAT); 2384 } 2385 2386 #else /* !defined(CONFIG_M5272) */ 2387 #define FEC_STATS_SIZE 0 2388 static inline void fec_enet_update_ethtool_stats(struct net_device *dev) 2389 { 2390 } 2391 2392 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) 2393 { 2394 } 2395 #endif /* !defined(CONFIG_M5272) */ 2396 2397 /* ITR clock source is enet system clock (clk_ahb). 2398 * TCTT unit is cycle_ns * 64 cycle 2399 * So, the ICTT value = X us / (cycle_ns * 64) 2400 */ 2401 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) 2402 { 2403 struct fec_enet_private *fep = netdev_priv(ndev); 2404 2405 return us * (fep->itr_clk_rate / 64000) / 1000; 2406 } 2407 2408 /* Set threshold for interrupt coalescing */ 2409 static void fec_enet_itr_coal_set(struct net_device *ndev) 2410 { 2411 struct fec_enet_private *fep = netdev_priv(ndev); 2412 int rx_itr, tx_itr; 2413 2414 /* Must be greater than zero to avoid unpredictable behavior */ 2415 if (!fep->rx_time_itr || !fep->rx_pkts_itr || 2416 !fep->tx_time_itr || !fep->tx_pkts_itr) 2417 return; 2418 2419 /* Select enet system clock as Interrupt Coalescing 2420 * timer Clock Source 2421 */ 2422 rx_itr = FEC_ITR_CLK_SEL; 2423 tx_itr = FEC_ITR_CLK_SEL; 2424 2425 /* set ICFT and ICTT */ 2426 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); 2427 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); 2428 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); 2429 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); 2430 2431 rx_itr |= FEC_ITR_EN; 2432 tx_itr |= FEC_ITR_EN; 2433 2434 writel(tx_itr, fep->hwp + FEC_TXIC0); 2435 writel(rx_itr, fep->hwp + FEC_RXIC0); 2436 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 2437 writel(tx_itr, fep->hwp + FEC_TXIC1); 2438 writel(rx_itr, fep->hwp + FEC_RXIC1); 2439 writel(tx_itr, fep->hwp + FEC_TXIC2); 2440 writel(rx_itr, fep->hwp + FEC_RXIC2); 2441 } 2442 } 2443 2444 static int 2445 fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) 2446 { 2447 struct fec_enet_private *fep = netdev_priv(ndev); 2448 2449 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 2450 return -EOPNOTSUPP; 2451 2452 ec->rx_coalesce_usecs = fep->rx_time_itr; 2453 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; 2454 2455 ec->tx_coalesce_usecs = fep->tx_time_itr; 2456 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; 2457 2458 return 0; 2459 } 2460 2461 static int 2462 fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) 2463 { 2464 struct fec_enet_private *fep = netdev_priv(ndev); 2465 unsigned int cycle; 2466 2467 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 2468 return -EOPNOTSUPP; 2469 2470 if (ec->rx_max_coalesced_frames > 255) { 2471 pr_err("Rx coalesced frames exceed hardware limitation\n"); 2472 return -EINVAL; 2473 } 2474 2475 if (ec->tx_max_coalesced_frames > 255) { 2476 pr_err("Tx coalesced frame exceed hardware limitation\n"); 2477 return -EINVAL; 2478 } 2479 2480 cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr); 2481 if (cycle > 0xFFFF) { 2482 pr_err("Rx coalesced usec exceed hardware limitation\n"); 2483 return -EINVAL; 2484 } 2485 2486 cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr); 2487 if (cycle > 0xFFFF) { 2488 pr_err("Rx coalesced usec exceed hardware limitation\n"); 2489 return -EINVAL; 2490 } 2491 2492 fep->rx_time_itr = ec->rx_coalesce_usecs; 2493 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; 2494 2495 fep->tx_time_itr = ec->tx_coalesce_usecs; 2496 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; 2497 2498 fec_enet_itr_coal_set(ndev); 2499 2500 return 0; 2501 } 2502 2503 static void fec_enet_itr_coal_init(struct net_device *ndev) 2504 { 2505 struct ethtool_coalesce ec; 2506 2507 ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; 2508 ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; 2509 2510 ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; 2511 ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; 2512 2513 fec_enet_set_coalesce(ndev, &ec); 2514 } 2515 2516 static int fec_enet_get_tunable(struct net_device *netdev, 2517 const struct ethtool_tunable *tuna, 2518 void *data) 2519 { 2520 struct fec_enet_private *fep = netdev_priv(netdev); 2521 int ret = 0; 2522 2523 switch (tuna->id) { 2524 case ETHTOOL_RX_COPYBREAK: 2525 *(u32 *)data = fep->rx_copybreak; 2526 break; 2527 default: 2528 ret = -EINVAL; 2529 break; 2530 } 2531 2532 return ret; 2533 } 2534 2535 static int fec_enet_set_tunable(struct net_device *netdev, 2536 const struct ethtool_tunable *tuna, 2537 const void *data) 2538 { 2539 struct fec_enet_private *fep = netdev_priv(netdev); 2540 int ret = 0; 2541 2542 switch (tuna->id) { 2543 case ETHTOOL_RX_COPYBREAK: 2544 fep->rx_copybreak = *(u32 *)data; 2545 break; 2546 default: 2547 ret = -EINVAL; 2548 break; 2549 } 2550 2551 return ret; 2552 } 2553 2554 static void 2555 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 2556 { 2557 struct fec_enet_private *fep = netdev_priv(ndev); 2558 2559 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { 2560 wol->supported = WAKE_MAGIC; 2561 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; 2562 } else { 2563 wol->supported = wol->wolopts = 0; 2564 } 2565 } 2566 2567 static int 2568 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 2569 { 2570 struct fec_enet_private *fep = netdev_priv(ndev); 2571 2572 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) 2573 return -EINVAL; 2574 2575 if (wol->wolopts & ~WAKE_MAGIC) 2576 return -EINVAL; 2577 2578 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); 2579 if (device_may_wakeup(&ndev->dev)) { 2580 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; 2581 if (fep->irq[0] > 0) 2582 enable_irq_wake(fep->irq[0]); 2583 } else { 2584 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); 2585 if (fep->irq[0] > 0) 2586 disable_irq_wake(fep->irq[0]); 2587 } 2588 2589 return 0; 2590 } 2591 2592 static const struct ethtool_ops fec_enet_ethtool_ops = { 2593 .get_drvinfo = fec_enet_get_drvinfo, 2594 .get_regs_len = fec_enet_get_regs_len, 2595 .get_regs = fec_enet_get_regs, 2596 .nway_reset = phy_ethtool_nway_reset, 2597 .get_link = ethtool_op_get_link, 2598 .get_coalesce = fec_enet_get_coalesce, 2599 .set_coalesce = fec_enet_set_coalesce, 2600 #ifndef CONFIG_M5272 2601 .get_pauseparam = fec_enet_get_pauseparam, 2602 .set_pauseparam = fec_enet_set_pauseparam, 2603 .get_strings = fec_enet_get_strings, 2604 .get_ethtool_stats = fec_enet_get_ethtool_stats, 2605 .get_sset_count = fec_enet_get_sset_count, 2606 #endif 2607 .get_ts_info = fec_enet_get_ts_info, 2608 .get_tunable = fec_enet_get_tunable, 2609 .set_tunable = fec_enet_set_tunable, 2610 .get_wol = fec_enet_get_wol, 2611 .set_wol = fec_enet_set_wol, 2612 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2613 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2614 }; 2615 2616 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 2617 { 2618 struct fec_enet_private *fep = netdev_priv(ndev); 2619 struct phy_device *phydev = ndev->phydev; 2620 2621 if (!netif_running(ndev)) 2622 return -EINVAL; 2623 2624 if (!phydev) 2625 return -ENODEV; 2626 2627 if (fep->bufdesc_ex) { 2628 if (cmd == SIOCSHWTSTAMP) 2629 return fec_ptp_set(ndev, rq); 2630 if (cmd == SIOCGHWTSTAMP) 2631 return fec_ptp_get(ndev, rq); 2632 } 2633 2634 return phy_mii_ioctl(phydev, rq, cmd); 2635 } 2636 2637 static void fec_enet_free_buffers(struct net_device *ndev) 2638 { 2639 struct fec_enet_private *fep = netdev_priv(ndev); 2640 unsigned int i; 2641 struct sk_buff *skb; 2642 struct bufdesc *bdp; 2643 struct fec_enet_priv_tx_q *txq; 2644 struct fec_enet_priv_rx_q *rxq; 2645 unsigned int q; 2646 2647 for (q = 0; q < fep->num_rx_queues; q++) { 2648 rxq = fep->rx_queue[q]; 2649 bdp = rxq->bd.base; 2650 for (i = 0; i < rxq->bd.ring_size; i++) { 2651 skb = rxq->rx_skbuff[i]; 2652 rxq->rx_skbuff[i] = NULL; 2653 if (skb) { 2654 dma_unmap_single(&fep->pdev->dev, 2655 fec32_to_cpu(bdp->cbd_bufaddr), 2656 FEC_ENET_RX_FRSIZE - fep->rx_align, 2657 DMA_FROM_DEVICE); 2658 dev_kfree_skb(skb); 2659 } 2660 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 2661 } 2662 } 2663 2664 for (q = 0; q < fep->num_tx_queues; q++) { 2665 txq = fep->tx_queue[q]; 2666 bdp = txq->bd.base; 2667 for (i = 0; i < txq->bd.ring_size; i++) { 2668 kfree(txq->tx_bounce[i]); 2669 txq->tx_bounce[i] = NULL; 2670 skb = txq->tx_skbuff[i]; 2671 txq->tx_skbuff[i] = NULL; 2672 dev_kfree_skb(skb); 2673 } 2674 } 2675 } 2676 2677 static void fec_enet_free_queue(struct net_device *ndev) 2678 { 2679 struct fec_enet_private *fep = netdev_priv(ndev); 2680 int i; 2681 struct fec_enet_priv_tx_q *txq; 2682 2683 for (i = 0; i < fep->num_tx_queues; i++) 2684 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { 2685 txq = fep->tx_queue[i]; 2686 dma_free_coherent(&fep->pdev->dev, 2687 txq->bd.ring_size * TSO_HEADER_SIZE, 2688 txq->tso_hdrs, 2689 txq->tso_hdrs_dma); 2690 } 2691 2692 for (i = 0; i < fep->num_rx_queues; i++) 2693 kfree(fep->rx_queue[i]); 2694 for (i = 0; i < fep->num_tx_queues; i++) 2695 kfree(fep->tx_queue[i]); 2696 } 2697 2698 static int fec_enet_alloc_queue(struct net_device *ndev) 2699 { 2700 struct fec_enet_private *fep = netdev_priv(ndev); 2701 int i; 2702 int ret = 0; 2703 struct fec_enet_priv_tx_q *txq; 2704 2705 for (i = 0; i < fep->num_tx_queues; i++) { 2706 txq = kzalloc(sizeof(*txq), GFP_KERNEL); 2707 if (!txq) { 2708 ret = -ENOMEM; 2709 goto alloc_failed; 2710 } 2711 2712 fep->tx_queue[i] = txq; 2713 txq->bd.ring_size = TX_RING_SIZE; 2714 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; 2715 2716 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; 2717 txq->tx_wake_threshold = 2718 (txq->bd.ring_size - txq->tx_stop_threshold) / 2; 2719 2720 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, 2721 txq->bd.ring_size * TSO_HEADER_SIZE, 2722 &txq->tso_hdrs_dma, 2723 GFP_KERNEL); 2724 if (!txq->tso_hdrs) { 2725 ret = -ENOMEM; 2726 goto alloc_failed; 2727 } 2728 } 2729 2730 for (i = 0; i < fep->num_rx_queues; i++) { 2731 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), 2732 GFP_KERNEL); 2733 if (!fep->rx_queue[i]) { 2734 ret = -ENOMEM; 2735 goto alloc_failed; 2736 } 2737 2738 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; 2739 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; 2740 } 2741 return ret; 2742 2743 alloc_failed: 2744 fec_enet_free_queue(ndev); 2745 return ret; 2746 } 2747 2748 static int 2749 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) 2750 { 2751 struct fec_enet_private *fep = netdev_priv(ndev); 2752 unsigned int i; 2753 struct sk_buff *skb; 2754 struct bufdesc *bdp; 2755 struct fec_enet_priv_rx_q *rxq; 2756 2757 rxq = fep->rx_queue[queue]; 2758 bdp = rxq->bd.base; 2759 for (i = 0; i < rxq->bd.ring_size; i++) { 2760 skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 2761 if (!skb) 2762 goto err_alloc; 2763 2764 if (fec_enet_new_rxbdp(ndev, bdp, skb)) { 2765 dev_kfree_skb(skb); 2766 goto err_alloc; 2767 } 2768 2769 rxq->rx_skbuff[i] = skb; 2770 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 2771 2772 if (fep->bufdesc_ex) { 2773 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2774 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 2775 } 2776 2777 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 2778 } 2779 2780 /* Set the last buffer to wrap. */ 2781 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 2782 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 2783 return 0; 2784 2785 err_alloc: 2786 fec_enet_free_buffers(ndev); 2787 return -ENOMEM; 2788 } 2789 2790 static int 2791 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) 2792 { 2793 struct fec_enet_private *fep = netdev_priv(ndev); 2794 unsigned int i; 2795 struct bufdesc *bdp; 2796 struct fec_enet_priv_tx_q *txq; 2797 2798 txq = fep->tx_queue[queue]; 2799 bdp = txq->bd.base; 2800 for (i = 0; i < txq->bd.ring_size; i++) { 2801 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 2802 if (!txq->tx_bounce[i]) 2803 goto err_alloc; 2804 2805 bdp->cbd_sc = cpu_to_fec16(0); 2806 bdp->cbd_bufaddr = cpu_to_fec32(0); 2807 2808 if (fep->bufdesc_ex) { 2809 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 2810 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); 2811 } 2812 2813 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 2814 } 2815 2816 /* Set the last buffer to wrap. */ 2817 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 2818 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 2819 2820 return 0; 2821 2822 err_alloc: 2823 fec_enet_free_buffers(ndev); 2824 return -ENOMEM; 2825 } 2826 2827 static int fec_enet_alloc_buffers(struct net_device *ndev) 2828 { 2829 struct fec_enet_private *fep = netdev_priv(ndev); 2830 unsigned int i; 2831 2832 for (i = 0; i < fep->num_rx_queues; i++) 2833 if (fec_enet_alloc_rxq_buffers(ndev, i)) 2834 return -ENOMEM; 2835 2836 for (i = 0; i < fep->num_tx_queues; i++) 2837 if (fec_enet_alloc_txq_buffers(ndev, i)) 2838 return -ENOMEM; 2839 return 0; 2840 } 2841 2842 static int 2843 fec_enet_open(struct net_device *ndev) 2844 { 2845 struct fec_enet_private *fep = netdev_priv(ndev); 2846 int ret; 2847 bool reset_again; 2848 2849 ret = pm_runtime_get_sync(&fep->pdev->dev); 2850 if (ret < 0) 2851 return ret; 2852 2853 pinctrl_pm_select_default_state(&fep->pdev->dev); 2854 ret = fec_enet_clk_enable(ndev, true); 2855 if (ret) 2856 goto clk_enable; 2857 2858 /* During the first fec_enet_open call the PHY isn't probed at this 2859 * point. Therefore the phy_reset_after_clk_enable() call within 2860 * fec_enet_clk_enable() fails. As we need this reset in order to be 2861 * sure the PHY is working correctly we check if we need to reset again 2862 * later when the PHY is probed 2863 */ 2864 if (ndev->phydev && ndev->phydev->drv) 2865 reset_again = false; 2866 else 2867 reset_again = true; 2868 2869 /* I should reset the ring buffers here, but I don't yet know 2870 * a simple way to do that. 2871 */ 2872 2873 ret = fec_enet_alloc_buffers(ndev); 2874 if (ret) 2875 goto err_enet_alloc; 2876 2877 /* Init MAC prior to mii bus probe */ 2878 fec_restart(ndev); 2879 2880 /* Probe and connect to PHY when open the interface */ 2881 ret = fec_enet_mii_probe(ndev); 2882 if (ret) 2883 goto err_enet_mii_probe; 2884 2885 /* Call phy_reset_after_clk_enable() again if it failed during 2886 * phy_reset_after_clk_enable() before because the PHY wasn't probed. 2887 */ 2888 if (reset_again) 2889 phy_reset_after_clk_enable(ndev->phydev); 2890 2891 if (fep->quirks & FEC_QUIRK_ERR006687) 2892 imx6q_cpuidle_fec_irqs_used(); 2893 2894 napi_enable(&fep->napi); 2895 phy_start(ndev->phydev); 2896 netif_tx_start_all_queues(ndev); 2897 2898 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & 2899 FEC_WOL_FLAG_ENABLE); 2900 2901 return 0; 2902 2903 err_enet_mii_probe: 2904 fec_enet_free_buffers(ndev); 2905 err_enet_alloc: 2906 fec_enet_clk_enable(ndev, false); 2907 clk_enable: 2908 pm_runtime_mark_last_busy(&fep->pdev->dev); 2909 pm_runtime_put_autosuspend(&fep->pdev->dev); 2910 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2911 return ret; 2912 } 2913 2914 static int 2915 fec_enet_close(struct net_device *ndev) 2916 { 2917 struct fec_enet_private *fep = netdev_priv(ndev); 2918 2919 phy_stop(ndev->phydev); 2920 2921 if (netif_device_present(ndev)) { 2922 napi_disable(&fep->napi); 2923 netif_tx_disable(ndev); 2924 fec_stop(ndev); 2925 } 2926 2927 phy_disconnect(ndev->phydev); 2928 2929 if (fep->quirks & FEC_QUIRK_ERR006687) 2930 imx6q_cpuidle_fec_irqs_unused(); 2931 2932 fec_enet_update_ethtool_stats(ndev); 2933 2934 fec_enet_clk_enable(ndev, false); 2935 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 2936 pm_runtime_mark_last_busy(&fep->pdev->dev); 2937 pm_runtime_put_autosuspend(&fep->pdev->dev); 2938 2939 fec_enet_free_buffers(ndev); 2940 2941 return 0; 2942 } 2943 2944 /* Set or clear the multicast filter for this adaptor. 2945 * Skeleton taken from sunlance driver. 2946 * The CPM Ethernet implementation allows Multicast as well as individual 2947 * MAC address filtering. Some of the drivers check to make sure it is 2948 * a group multicast address, and discard those that are not. I guess I 2949 * will do the same for now, but just remove the test if you want 2950 * individual filtering as well (do the upper net layers want or support 2951 * this kind of feature?). 2952 */ 2953 2954 #define FEC_HASH_BITS 6 /* #bits in hash */ 2955 #define CRC32_POLY 0xEDB88320 2956 2957 static void set_multicast_list(struct net_device *ndev) 2958 { 2959 struct fec_enet_private *fep = netdev_priv(ndev); 2960 struct netdev_hw_addr *ha; 2961 unsigned int i, bit, data, crc, tmp; 2962 unsigned char hash; 2963 unsigned int hash_high = 0, hash_low = 0; 2964 2965 if (ndev->flags & IFF_PROMISC) { 2966 tmp = readl(fep->hwp + FEC_R_CNTRL); 2967 tmp |= 0x8; 2968 writel(tmp, fep->hwp + FEC_R_CNTRL); 2969 return; 2970 } 2971 2972 tmp = readl(fep->hwp + FEC_R_CNTRL); 2973 tmp &= ~0x8; 2974 writel(tmp, fep->hwp + FEC_R_CNTRL); 2975 2976 if (ndev->flags & IFF_ALLMULTI) { 2977 /* Catch all multicast addresses, so set the 2978 * filter to all 1's 2979 */ 2980 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 2981 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 2982 2983 return; 2984 } 2985 2986 /* Add the addresses in hash register */ 2987 netdev_for_each_mc_addr(ha, ndev) { 2988 /* calculate crc32 value of mac address */ 2989 crc = 0xffffffff; 2990 2991 for (i = 0; i < ndev->addr_len; i++) { 2992 data = ha->addr[i]; 2993 for (bit = 0; bit < 8; bit++, data >>= 1) { 2994 crc = (crc >> 1) ^ 2995 (((crc ^ data) & 1) ? CRC32_POLY : 0); 2996 } 2997 } 2998 2999 /* only upper 6 bits (FEC_HASH_BITS) are used 3000 * which point to specific bit in the hash registers 3001 */ 3002 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; 3003 3004 if (hash > 31) 3005 hash_high |= 1 << (hash - 32); 3006 else 3007 hash_low |= 1 << hash; 3008 } 3009 3010 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 3011 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3012 } 3013 3014 /* Set a MAC change in hardware. */ 3015 static int 3016 fec_set_mac_address(struct net_device *ndev, void *p) 3017 { 3018 struct fec_enet_private *fep = netdev_priv(ndev); 3019 struct sockaddr *addr = p; 3020 3021 if (addr) { 3022 if (!is_valid_ether_addr(addr->sa_data)) 3023 return -EADDRNOTAVAIL; 3024 memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len); 3025 } 3026 3027 /* Add netif status check here to avoid system hang in below case: 3028 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; 3029 * After ethx down, fec all clocks are gated off and then register 3030 * access causes system hang. 3031 */ 3032 if (!netif_running(ndev)) 3033 return 0; 3034 3035 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 3036 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 3037 fep->hwp + FEC_ADDR_LOW); 3038 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), 3039 fep->hwp + FEC_ADDR_HIGH); 3040 return 0; 3041 } 3042 3043 #ifdef CONFIG_NET_POLL_CONTROLLER 3044 /** 3045 * fec_poll_controller - FEC Poll controller function 3046 * @dev: The FEC network adapter 3047 * 3048 * Polled functionality used by netconsole and others in non interrupt mode 3049 * 3050 */ 3051 static void fec_poll_controller(struct net_device *dev) 3052 { 3053 int i; 3054 struct fec_enet_private *fep = netdev_priv(dev); 3055 3056 for (i = 0; i < FEC_IRQ_NUM; i++) { 3057 if (fep->irq[i] > 0) { 3058 disable_irq(fep->irq[i]); 3059 fec_enet_interrupt(fep->irq[i], dev); 3060 enable_irq(fep->irq[i]); 3061 } 3062 } 3063 } 3064 #endif 3065 3066 static inline void fec_enet_set_netdev_features(struct net_device *netdev, 3067 netdev_features_t features) 3068 { 3069 struct fec_enet_private *fep = netdev_priv(netdev); 3070 netdev_features_t changed = features ^ netdev->features; 3071 3072 netdev->features = features; 3073 3074 /* Receive checksum has been changed */ 3075 if (changed & NETIF_F_RXCSUM) { 3076 if (features & NETIF_F_RXCSUM) 3077 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3078 else 3079 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; 3080 } 3081 } 3082 3083 static int fec_set_features(struct net_device *netdev, 3084 netdev_features_t features) 3085 { 3086 struct fec_enet_private *fep = netdev_priv(netdev); 3087 netdev_features_t changed = features ^ netdev->features; 3088 3089 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) { 3090 napi_disable(&fep->napi); 3091 netif_tx_lock_bh(netdev); 3092 fec_stop(netdev); 3093 fec_enet_set_netdev_features(netdev, features); 3094 fec_restart(netdev); 3095 netif_tx_wake_all_queues(netdev); 3096 netif_tx_unlock_bh(netdev); 3097 napi_enable(&fep->napi); 3098 } else { 3099 fec_enet_set_netdev_features(netdev, features); 3100 } 3101 3102 return 0; 3103 } 3104 3105 static const struct net_device_ops fec_netdev_ops = { 3106 .ndo_open = fec_enet_open, 3107 .ndo_stop = fec_enet_close, 3108 .ndo_start_xmit = fec_enet_start_xmit, 3109 .ndo_set_rx_mode = set_multicast_list, 3110 .ndo_validate_addr = eth_validate_addr, 3111 .ndo_tx_timeout = fec_timeout, 3112 .ndo_set_mac_address = fec_set_mac_address, 3113 .ndo_do_ioctl = fec_enet_ioctl, 3114 #ifdef CONFIG_NET_POLL_CONTROLLER 3115 .ndo_poll_controller = fec_poll_controller, 3116 #endif 3117 .ndo_set_features = fec_set_features, 3118 }; 3119 3120 static const unsigned short offset_des_active_rxq[] = { 3121 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 3122 }; 3123 3124 static const unsigned short offset_des_active_txq[] = { 3125 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 3126 }; 3127 3128 /* 3129 * XXX: We need to clean up on failure exits here. 3130 * 3131 */ 3132 static int fec_enet_init(struct net_device *ndev) 3133 { 3134 struct fec_enet_private *fep = netdev_priv(ndev); 3135 struct bufdesc *cbd_base; 3136 dma_addr_t bd_dma; 3137 int bd_size; 3138 unsigned int i; 3139 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : 3140 sizeof(struct bufdesc); 3141 unsigned dsize_log2 = __fls(dsize); 3142 3143 WARN_ON(dsize != (1 << dsize_log2)); 3144 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 3145 fep->rx_align = 0xf; 3146 fep->tx_align = 0xf; 3147 #else 3148 fep->rx_align = 0x3; 3149 fep->tx_align = 0x3; 3150 #endif 3151 3152 fec_enet_alloc_queue(ndev); 3153 3154 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; 3155 3156 /* Allocate memory for buffer descriptors. */ 3157 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, 3158 GFP_KERNEL); 3159 if (!cbd_base) { 3160 return -ENOMEM; 3161 } 3162 3163 memset(cbd_base, 0, bd_size); 3164 3165 /* Get the Ethernet address */ 3166 fec_get_mac(ndev); 3167 /* make sure MAC we just acquired is programmed into the hw */ 3168 fec_set_mac_address(ndev, NULL); 3169 3170 /* Set receive and transmit descriptor base. */ 3171 for (i = 0; i < fep->num_rx_queues; i++) { 3172 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; 3173 unsigned size = dsize * rxq->bd.ring_size; 3174 3175 rxq->bd.qid = i; 3176 rxq->bd.base = cbd_base; 3177 rxq->bd.cur = cbd_base; 3178 rxq->bd.dma = bd_dma; 3179 rxq->bd.dsize = dsize; 3180 rxq->bd.dsize_log2 = dsize_log2; 3181 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; 3182 bd_dma += size; 3183 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 3184 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 3185 } 3186 3187 for (i = 0; i < fep->num_tx_queues; i++) { 3188 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; 3189 unsigned size = dsize * txq->bd.ring_size; 3190 3191 txq->bd.qid = i; 3192 txq->bd.base = cbd_base; 3193 txq->bd.cur = cbd_base; 3194 txq->bd.dma = bd_dma; 3195 txq->bd.dsize = dsize; 3196 txq->bd.dsize_log2 = dsize_log2; 3197 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; 3198 bd_dma += size; 3199 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 3200 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 3201 } 3202 3203 3204 /* The FEC Ethernet specific entries in the device structure */ 3205 ndev->watchdog_timeo = TX_TIMEOUT; 3206 ndev->netdev_ops = &fec_netdev_ops; 3207 ndev->ethtool_ops = &fec_enet_ethtool_ops; 3208 3209 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 3210 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); 3211 3212 if (fep->quirks & FEC_QUIRK_HAS_VLAN) 3213 /* enable hw VLAN support */ 3214 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 3215 3216 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { 3217 ndev->gso_max_segs = FEC_MAX_TSO_SEGS; 3218 3219 /* enable hw accelerator */ 3220 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 3221 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); 3222 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3223 } 3224 3225 if (fep->quirks & FEC_QUIRK_HAS_AVB) { 3226 fep->tx_align = 0; 3227 fep->rx_align = 0x3f; 3228 } 3229 3230 ndev->hw_features = ndev->features; 3231 3232 fec_restart(ndev); 3233 3234 if (fep->quirks & FEC_QUIRK_MIB_CLEAR) 3235 fec_enet_clear_ethtool_stats(ndev); 3236 else 3237 fec_enet_update_ethtool_stats(ndev); 3238 3239 return 0; 3240 } 3241 3242 #ifdef CONFIG_OF 3243 static int fec_reset_phy(struct platform_device *pdev) 3244 { 3245 int err, phy_reset; 3246 bool active_high = false; 3247 int msec = 1, phy_post_delay = 0; 3248 struct device_node *np = pdev->dev.of_node; 3249 3250 if (!np) 3251 return 0; 3252 3253 err = of_property_read_u32(np, "phy-reset-duration", &msec); 3254 /* A sane reset duration should not be longer than 1s */ 3255 if (!err && msec > 1000) 3256 msec = 1; 3257 3258 phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0); 3259 if (phy_reset == -EPROBE_DEFER) 3260 return phy_reset; 3261 else if (!gpio_is_valid(phy_reset)) 3262 return 0; 3263 3264 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); 3265 /* valid reset duration should be less than 1s */ 3266 if (!err && phy_post_delay > 1000) 3267 return -EINVAL; 3268 3269 active_high = of_property_read_bool(np, "phy-reset-active-high"); 3270 3271 err = devm_gpio_request_one(&pdev->dev, phy_reset, 3272 active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, 3273 "phy-reset"); 3274 if (err) { 3275 dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err); 3276 return err; 3277 } 3278 3279 if (msec > 20) 3280 msleep(msec); 3281 else 3282 usleep_range(msec * 1000, msec * 1000 + 1000); 3283 3284 gpio_set_value_cansleep(phy_reset, !active_high); 3285 3286 if (!phy_post_delay) 3287 return 0; 3288 3289 if (phy_post_delay > 20) 3290 msleep(phy_post_delay); 3291 else 3292 usleep_range(phy_post_delay * 1000, 3293 phy_post_delay * 1000 + 1000); 3294 3295 return 0; 3296 } 3297 #else /* CONFIG_OF */ 3298 static int fec_reset_phy(struct platform_device *pdev) 3299 { 3300 /* 3301 * In case of platform probe, the reset has been done 3302 * by machine code. 3303 */ 3304 return 0; 3305 } 3306 #endif /* CONFIG_OF */ 3307 3308 static void 3309 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) 3310 { 3311 struct device_node *np = pdev->dev.of_node; 3312 3313 *num_tx = *num_rx = 1; 3314 3315 if (!np || !of_device_is_available(np)) 3316 return; 3317 3318 /* parse the num of tx and rx queues */ 3319 of_property_read_u32(np, "fsl,num-tx-queues", num_tx); 3320 3321 of_property_read_u32(np, "fsl,num-rx-queues", num_rx); 3322 3323 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { 3324 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", 3325 *num_tx); 3326 *num_tx = 1; 3327 return; 3328 } 3329 3330 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { 3331 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", 3332 *num_rx); 3333 *num_rx = 1; 3334 return; 3335 } 3336 3337 } 3338 3339 static int fec_enet_get_irq_cnt(struct platform_device *pdev) 3340 { 3341 int irq_cnt = platform_irq_count(pdev); 3342 3343 if (irq_cnt > FEC_IRQ_NUM) 3344 irq_cnt = FEC_IRQ_NUM; /* last for pps */ 3345 else if (irq_cnt == 2) 3346 irq_cnt = 1; /* last for pps */ 3347 else if (irq_cnt <= 0) 3348 irq_cnt = 1; /* At least 1 irq is needed */ 3349 return irq_cnt; 3350 } 3351 3352 static int 3353 fec_probe(struct platform_device *pdev) 3354 { 3355 struct fec_enet_private *fep; 3356 struct fec_platform_data *pdata; 3357 struct net_device *ndev; 3358 int i, irq, ret = 0; 3359 struct resource *r; 3360 const struct of_device_id *of_id; 3361 static int dev_id; 3362 struct device_node *np = pdev->dev.of_node, *phy_node; 3363 int num_tx_qs; 3364 int num_rx_qs; 3365 char irq_name[8]; 3366 int irq_cnt; 3367 3368 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 3369 3370 /* Init network device */ 3371 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) + 3372 FEC_STATS_SIZE, num_tx_qs, num_rx_qs); 3373 if (!ndev) 3374 return -ENOMEM; 3375 3376 SET_NETDEV_DEV(ndev, &pdev->dev); 3377 3378 /* setup board info structure */ 3379 fep = netdev_priv(ndev); 3380 3381 of_id = of_match_device(fec_dt_ids, &pdev->dev); 3382 if (of_id) 3383 pdev->id_entry = of_id->data; 3384 fep->quirks = pdev->id_entry->driver_data; 3385 3386 fep->netdev = ndev; 3387 fep->num_rx_queues = num_rx_qs; 3388 fep->num_tx_queues = num_tx_qs; 3389 3390 #if !defined(CONFIG_M5272) 3391 /* default enable pause frame auto negotiation */ 3392 if (fep->quirks & FEC_QUIRK_HAS_GBIT) 3393 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 3394 #endif 3395 3396 /* Select default pin state */ 3397 pinctrl_pm_select_default_state(&pdev->dev); 3398 3399 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 3400 fep->hwp = devm_ioremap_resource(&pdev->dev, r); 3401 if (IS_ERR(fep->hwp)) { 3402 ret = PTR_ERR(fep->hwp); 3403 goto failed_ioremap; 3404 } 3405 3406 fep->pdev = pdev; 3407 fep->dev_id = dev_id++; 3408 3409 platform_set_drvdata(pdev, ndev); 3410 3411 if ((of_machine_is_compatible("fsl,imx6q") || 3412 of_machine_is_compatible("fsl,imx6dl")) && 3413 !of_property_read_bool(np, "fsl,err006687-workaround-present")) 3414 fep->quirks |= FEC_QUIRK_ERR006687; 3415 3416 if (of_get_property(np, "fsl,magic-packet", NULL)) 3417 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; 3418 3419 phy_node = of_parse_phandle(np, "phy-handle", 0); 3420 if (!phy_node && of_phy_is_fixed_link(np)) { 3421 ret = of_phy_register_fixed_link(np); 3422 if (ret < 0) { 3423 dev_err(&pdev->dev, 3424 "broken fixed-link specification\n"); 3425 goto failed_phy; 3426 } 3427 phy_node = of_node_get(np); 3428 } 3429 fep->phy_node = phy_node; 3430 3431 ret = of_get_phy_mode(pdev->dev.of_node); 3432 if (ret < 0) { 3433 pdata = dev_get_platdata(&pdev->dev); 3434 if (pdata) 3435 fep->phy_interface = pdata->phy; 3436 else 3437 fep->phy_interface = PHY_INTERFACE_MODE_MII; 3438 } else { 3439 fep->phy_interface = ret; 3440 } 3441 3442 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 3443 if (IS_ERR(fep->clk_ipg)) { 3444 ret = PTR_ERR(fep->clk_ipg); 3445 goto failed_clk; 3446 } 3447 3448 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 3449 if (IS_ERR(fep->clk_ahb)) { 3450 ret = PTR_ERR(fep->clk_ahb); 3451 goto failed_clk; 3452 } 3453 3454 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); 3455 3456 /* enet_out is optional, depends on board */ 3457 fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out"); 3458 if (IS_ERR(fep->clk_enet_out)) 3459 fep->clk_enet_out = NULL; 3460 3461 fep->ptp_clk_on = false; 3462 mutex_init(&fep->ptp_clk_mutex); 3463 3464 /* clk_ref is optional, depends on board */ 3465 fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); 3466 if (IS_ERR(fep->clk_ref)) 3467 fep->clk_ref = NULL; 3468 3469 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; 3470 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); 3471 if (IS_ERR(fep->clk_ptp)) { 3472 fep->clk_ptp = NULL; 3473 fep->bufdesc_ex = false; 3474 } 3475 3476 ret = fec_enet_clk_enable(ndev, true); 3477 if (ret) 3478 goto failed_clk; 3479 3480 ret = clk_prepare_enable(fep->clk_ipg); 3481 if (ret) 3482 goto failed_clk_ipg; 3483 3484 fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); 3485 if (!IS_ERR(fep->reg_phy)) { 3486 ret = regulator_enable(fep->reg_phy); 3487 if (ret) { 3488 dev_err(&pdev->dev, 3489 "Failed to enable phy regulator: %d\n", ret); 3490 clk_disable_unprepare(fep->clk_ipg); 3491 goto failed_regulator; 3492 } 3493 } else { 3494 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { 3495 ret = -EPROBE_DEFER; 3496 goto failed_regulator; 3497 } 3498 fep->reg_phy = NULL; 3499 } 3500 3501 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 3502 pm_runtime_use_autosuspend(&pdev->dev); 3503 pm_runtime_get_noresume(&pdev->dev); 3504 pm_runtime_set_active(&pdev->dev); 3505 pm_runtime_enable(&pdev->dev); 3506 3507 ret = fec_reset_phy(pdev); 3508 if (ret) 3509 goto failed_reset; 3510 3511 irq_cnt = fec_enet_get_irq_cnt(pdev); 3512 if (fep->bufdesc_ex) 3513 fec_ptp_init(pdev, irq_cnt); 3514 3515 ret = fec_enet_init(ndev); 3516 if (ret) 3517 goto failed_init; 3518 3519 for (i = 0; i < irq_cnt; i++) { 3520 sprintf(irq_name, "int%d", i); 3521 irq = platform_get_irq_byname(pdev, irq_name); 3522 if (irq < 0) 3523 irq = platform_get_irq(pdev, i); 3524 if (irq < 0) { 3525 ret = irq; 3526 goto failed_irq; 3527 } 3528 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 3529 0, pdev->name, ndev); 3530 if (ret) 3531 goto failed_irq; 3532 3533 fep->irq[i] = irq; 3534 } 3535 3536 init_completion(&fep->mdio_done); 3537 ret = fec_enet_mii_init(pdev); 3538 if (ret) 3539 goto failed_mii_init; 3540 3541 /* Carrier starts down, phylib will bring it up */ 3542 netif_carrier_off(ndev); 3543 fec_enet_clk_enable(ndev, false); 3544 pinctrl_pm_select_sleep_state(&pdev->dev); 3545 3546 ret = register_netdev(ndev); 3547 if (ret) 3548 goto failed_register; 3549 3550 device_init_wakeup(&ndev->dev, fep->wol_flag & 3551 FEC_WOL_HAS_MAGIC_PACKET); 3552 3553 if (fep->bufdesc_ex && fep->ptp_clock) 3554 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 3555 3556 fep->rx_copybreak = COPYBREAK_DEFAULT; 3557 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3558 3559 pm_runtime_mark_last_busy(&pdev->dev); 3560 pm_runtime_put_autosuspend(&pdev->dev); 3561 3562 return 0; 3563 3564 failed_register: 3565 fec_enet_mii_remove(fep); 3566 failed_mii_init: 3567 failed_irq: 3568 failed_init: 3569 fec_ptp_stop(pdev); 3570 if (fep->reg_phy) 3571 regulator_disable(fep->reg_phy); 3572 failed_reset: 3573 pm_runtime_put(&pdev->dev); 3574 pm_runtime_disable(&pdev->dev); 3575 failed_regulator: 3576 failed_clk_ipg: 3577 fec_enet_clk_enable(ndev, false); 3578 failed_clk: 3579 if (of_phy_is_fixed_link(np)) 3580 of_phy_deregister_fixed_link(np); 3581 of_node_put(phy_node); 3582 failed_phy: 3583 dev_id--; 3584 failed_ioremap: 3585 free_netdev(ndev); 3586 3587 return ret; 3588 } 3589 3590 static int 3591 fec_drv_remove(struct platform_device *pdev) 3592 { 3593 struct net_device *ndev = platform_get_drvdata(pdev); 3594 struct fec_enet_private *fep = netdev_priv(ndev); 3595 struct device_node *np = pdev->dev.of_node; 3596 3597 cancel_work_sync(&fep->tx_timeout_work); 3598 fec_ptp_stop(pdev); 3599 unregister_netdev(ndev); 3600 fec_enet_mii_remove(fep); 3601 if (fep->reg_phy) 3602 regulator_disable(fep->reg_phy); 3603 if (of_phy_is_fixed_link(np)) 3604 of_phy_deregister_fixed_link(np); 3605 of_node_put(fep->phy_node); 3606 free_netdev(ndev); 3607 3608 return 0; 3609 } 3610 3611 static int __maybe_unused fec_suspend(struct device *dev) 3612 { 3613 struct net_device *ndev = dev_get_drvdata(dev); 3614 struct fec_enet_private *fep = netdev_priv(ndev); 3615 3616 rtnl_lock(); 3617 if (netif_running(ndev)) { 3618 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) 3619 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; 3620 phy_stop(ndev->phydev); 3621 napi_disable(&fep->napi); 3622 netif_tx_lock_bh(ndev); 3623 netif_device_detach(ndev); 3624 netif_tx_unlock_bh(ndev); 3625 fec_stop(ndev); 3626 fec_enet_clk_enable(ndev, false); 3627 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 3628 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3629 } 3630 rtnl_unlock(); 3631 3632 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 3633 regulator_disable(fep->reg_phy); 3634 3635 /* SOC supply clock to phy, when clock is disabled, phy link down 3636 * SOC control phy regulator, when regulator is disabled, phy link down 3637 */ 3638 if (fep->clk_enet_out || fep->reg_phy) 3639 fep->link = 0; 3640 3641 return 0; 3642 } 3643 3644 static int __maybe_unused fec_resume(struct device *dev) 3645 { 3646 struct net_device *ndev = dev_get_drvdata(dev); 3647 struct fec_enet_private *fep = netdev_priv(ndev); 3648 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 3649 int ret; 3650 int val; 3651 3652 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 3653 ret = regulator_enable(fep->reg_phy); 3654 if (ret) 3655 return ret; 3656 } 3657 3658 rtnl_lock(); 3659 if (netif_running(ndev)) { 3660 ret = fec_enet_clk_enable(ndev, true); 3661 if (ret) { 3662 rtnl_unlock(); 3663 goto failed_clk; 3664 } 3665 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { 3666 if (pdata && pdata->sleep_mode_enable) 3667 pdata->sleep_mode_enable(false); 3668 val = readl(fep->hwp + FEC_ECNTRL); 3669 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 3670 writel(val, fep->hwp + FEC_ECNTRL); 3671 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; 3672 } else { 3673 pinctrl_pm_select_default_state(&fep->pdev->dev); 3674 } 3675 fec_restart(ndev); 3676 netif_tx_lock_bh(ndev); 3677 netif_device_attach(ndev); 3678 netif_tx_unlock_bh(ndev); 3679 napi_enable(&fep->napi); 3680 phy_start(ndev->phydev); 3681 } 3682 rtnl_unlock(); 3683 3684 return 0; 3685 3686 failed_clk: 3687 if (fep->reg_phy) 3688 regulator_disable(fep->reg_phy); 3689 return ret; 3690 } 3691 3692 static int __maybe_unused fec_runtime_suspend(struct device *dev) 3693 { 3694 struct net_device *ndev = dev_get_drvdata(dev); 3695 struct fec_enet_private *fep = netdev_priv(ndev); 3696 3697 clk_disable_unprepare(fep->clk_ipg); 3698 3699 return 0; 3700 } 3701 3702 static int __maybe_unused fec_runtime_resume(struct device *dev) 3703 { 3704 struct net_device *ndev = dev_get_drvdata(dev); 3705 struct fec_enet_private *fep = netdev_priv(ndev); 3706 3707 return clk_prepare_enable(fep->clk_ipg); 3708 } 3709 3710 static const struct dev_pm_ops fec_pm_ops = { 3711 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 3712 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 3713 }; 3714 3715 static struct platform_driver fec_driver = { 3716 .driver = { 3717 .name = DRIVER_NAME, 3718 .pm = &fec_pm_ops, 3719 .of_match_table = fec_dt_ids, 3720 }, 3721 .id_table = fec_devtype, 3722 .probe = fec_probe, 3723 .remove = fec_drv_remove, 3724 }; 3725 3726 module_platform_driver(fec_driver); 3727 3728 MODULE_ALIAS("platform:"DRIVER_NAME); 3729 MODULE_LICENSE("GPL"); 3730