1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx. 4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net) 5 * 6 * Right now, I am very wasteful with the buffers. I allocate memory 7 * pages and then divide them into 2K frame buffers. This way I know I 8 * have buffers large enough to hold one frame within one buffer descriptor. 9 * Once I get this working, I will use 64 or 128 byte CPM buffers, which 10 * will be much more memory efficient and will easily handle lots of 11 * small packets. 12 * 13 * Much better multiple PHY support by Magnus Damm. 14 * Copyright (c) 2000 Ericsson Radio Systems AB. 15 * 16 * Support for FEC controller of ColdFire processors. 17 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com) 18 * 19 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 20 * Copyright (c) 2004-2006 Macq Electronique SA. 21 * 22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. 23 */ 24 25 #include <linux/module.h> 26 #include <linux/kernel.h> 27 #include <linux/string.h> 28 #include <linux/pm_runtime.h> 29 #include <linux/ptrace.h> 30 #include <linux/errno.h> 31 #include <linux/ioport.h> 32 #include <linux/slab.h> 33 #include <linux/interrupt.h> 34 #include <linux/delay.h> 35 #include <linux/netdevice.h> 36 #include <linux/etherdevice.h> 37 #include <linux/skbuff.h> 38 #include <linux/in.h> 39 #include <linux/ip.h> 40 #include <net/ip.h> 41 #include <net/selftests.h> 42 #include <net/tso.h> 43 #include <linux/tcp.h> 44 #include <linux/udp.h> 45 #include <linux/icmp.h> 46 #include <linux/spinlock.h> 47 #include <linux/workqueue.h> 48 #include <linux/bitops.h> 49 #include <linux/io.h> 50 #include <linux/irq.h> 51 #include <linux/clk.h> 52 #include <linux/crc32.h> 53 #include <linux/platform_device.h> 54 #include <linux/mdio.h> 55 #include <linux/phy.h> 56 #include <linux/fec.h> 57 #include <linux/of.h> 58 #include <linux/of_device.h> 59 #include <linux/of_mdio.h> 60 #include <linux/of_net.h> 61 #include <linux/regulator/consumer.h> 62 #include <linux/if_vlan.h> 63 #include <linux/pinctrl/consumer.h> 64 #include <linux/gpio/consumer.h> 65 #include <linux/prefetch.h> 66 #include <linux/mfd/syscon.h> 67 #include <linux/regmap.h> 68 #include <soc/imx/cpuidle.h> 69 #include <linux/filter.h> 70 #include <linux/bpf.h> 71 72 #include <asm/cacheflush.h> 73 74 #include "fec.h" 75 76 static void set_multicast_list(struct net_device *ndev); 77 static void fec_enet_itr_coal_set(struct net_device *ndev); 78 79 #define DRIVER_NAME "fec" 80 81 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2}; 82 83 /* Pause frame feild and FIFO threshold */ 84 #define FEC_ENET_FCE (1 << 5) 85 #define FEC_ENET_RSEM_V 0x84 86 #define FEC_ENET_RSFL_V 16 87 #define FEC_ENET_RAEM_V 0x8 88 #define FEC_ENET_RAFL_V 0x8 89 #define FEC_ENET_OPD_V 0xFFF0 90 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ 91 92 #define FEC_ENET_XDP_PASS 0 93 #define FEC_ENET_XDP_CONSUMED BIT(0) 94 #define FEC_ENET_XDP_TX BIT(1) 95 #define FEC_ENET_XDP_REDIR BIT(2) 96 97 struct fec_devinfo { 98 u32 quirks; 99 }; 100 101 static const struct fec_devinfo fec_imx25_info = { 102 .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | 103 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45, 104 }; 105 106 static const struct fec_devinfo fec_imx27_info = { 107 .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG | 108 FEC_QUIRK_HAS_MDIO_C45, 109 }; 110 111 static const struct fec_devinfo fec_imx28_info = { 112 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 113 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | 114 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII | 115 FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45, 116 }; 117 118 static const struct fec_devinfo fec_imx6q_info = { 119 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 120 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 121 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | 122 FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII | 123 FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45, 124 }; 125 126 static const struct fec_devinfo fec_mvf600_info = { 127 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC | 128 FEC_QUIRK_HAS_MDIO_C45, 129 }; 130 131 static const struct fec_devinfo fec_imx6x_info = { 132 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 133 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 134 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 135 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 136 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 137 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 138 FEC_QUIRK_HAS_MDIO_C45, 139 }; 140 141 static const struct fec_devinfo fec_imx6ul_info = { 142 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 143 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 144 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 | 145 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC | 146 FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII | 147 FEC_QUIRK_HAS_MDIO_C45, 148 }; 149 150 static const struct fec_devinfo fec_imx8mq_info = { 151 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 152 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 153 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 154 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 155 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 156 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 157 FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 | 158 FEC_QUIRK_HAS_MDIO_C45, 159 }; 160 161 static const struct fec_devinfo fec_imx8qm_info = { 162 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 163 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 164 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 165 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 166 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | 167 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | 168 FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45, 169 }; 170 171 static const struct fec_devinfo fec_s32v234_info = { 172 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 173 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 174 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 175 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 176 FEC_QUIRK_HAS_MDIO_C45, 177 }; 178 179 static struct platform_device_id fec_devtype[] = { 180 { 181 /* keep it for coldfire */ 182 .name = DRIVER_NAME, 183 .driver_data = 0, 184 }, { 185 .name = "imx25-fec", 186 .driver_data = (kernel_ulong_t)&fec_imx25_info, 187 }, { 188 .name = "imx27-fec", 189 .driver_data = (kernel_ulong_t)&fec_imx27_info, 190 }, { 191 .name = "imx28-fec", 192 .driver_data = (kernel_ulong_t)&fec_imx28_info, 193 }, { 194 .name = "imx6q-fec", 195 .driver_data = (kernel_ulong_t)&fec_imx6q_info, 196 }, { 197 .name = "mvf600-fec", 198 .driver_data = (kernel_ulong_t)&fec_mvf600_info, 199 }, { 200 .name = "imx6sx-fec", 201 .driver_data = (kernel_ulong_t)&fec_imx6x_info, 202 }, { 203 .name = "imx6ul-fec", 204 .driver_data = (kernel_ulong_t)&fec_imx6ul_info, 205 }, { 206 .name = "imx8mq-fec", 207 .driver_data = (kernel_ulong_t)&fec_imx8mq_info, 208 }, { 209 .name = "imx8qm-fec", 210 .driver_data = (kernel_ulong_t)&fec_imx8qm_info, 211 }, { 212 .name = "s32v234-fec", 213 .driver_data = (kernel_ulong_t)&fec_s32v234_info, 214 }, { 215 /* sentinel */ 216 } 217 }; 218 MODULE_DEVICE_TABLE(platform, fec_devtype); 219 220 enum imx_fec_type { 221 IMX25_FEC = 1, /* runs on i.mx25/50/53 */ 222 IMX27_FEC, /* runs on i.mx27/35/51 */ 223 IMX28_FEC, 224 IMX6Q_FEC, 225 MVF600_FEC, 226 IMX6SX_FEC, 227 IMX6UL_FEC, 228 IMX8MQ_FEC, 229 IMX8QM_FEC, 230 S32V234_FEC, 231 }; 232 233 static const struct of_device_id fec_dt_ids[] = { 234 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], }, 235 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, 236 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, 237 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, 238 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, 239 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, 240 { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, 241 { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], }, 242 { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], }, 243 { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], }, 244 { /* sentinel */ } 245 }; 246 MODULE_DEVICE_TABLE(of, fec_dt_ids); 247 248 static unsigned char macaddr[ETH_ALEN]; 249 module_param_array(macaddr, byte, NULL, 0); 250 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address"); 251 252 #if defined(CONFIG_M5272) 253 /* 254 * Some hardware gets it MAC address out of local flash memory. 255 * if this is non-zero then assume it is the address to get MAC from. 256 */ 257 #if defined(CONFIG_NETtel) 258 #define FEC_FLASHMAC 0xf0006006 259 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES) 260 #define FEC_FLASHMAC 0xf0006000 261 #elif defined(CONFIG_CANCam) 262 #define FEC_FLASHMAC 0xf0020000 263 #elif defined (CONFIG_M5272C3) 264 #define FEC_FLASHMAC (0xffe04000 + 4) 265 #elif defined(CONFIG_MOD5272) 266 #define FEC_FLASHMAC 0xffc0406b 267 #else 268 #define FEC_FLASHMAC 0 269 #endif 270 #endif /* CONFIG_M5272 */ 271 272 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets. 273 * 274 * 2048 byte skbufs are allocated. However, alignment requirements 275 * varies between FEC variants. Worst case is 64, so round down by 64. 276 */ 277 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64)) 278 #define PKT_MINBUF_SIZE 64 279 280 /* FEC receive acceleration */ 281 #define FEC_RACC_IPDIS (1 << 1) 282 #define FEC_RACC_PRODIS (1 << 2) 283 #define FEC_RACC_SHIFT16 BIT(7) 284 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 285 286 /* MIB Control Register */ 287 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31) 288 289 /* 290 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame 291 * size bits. Other FEC hardware does not, so we need to take that into 292 * account when setting it. 293 */ 294 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 295 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 296 defined(CONFIG_ARM64) 297 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 298 #else 299 #define OPT_FRAME_SIZE 0 300 #endif 301 302 /* FEC MII MMFR bits definition */ 303 #define FEC_MMFR_ST (1 << 30) 304 #define FEC_MMFR_ST_C45 (0) 305 #define FEC_MMFR_OP_READ (2 << 28) 306 #define FEC_MMFR_OP_READ_C45 (3 << 28) 307 #define FEC_MMFR_OP_WRITE (1 << 28) 308 #define FEC_MMFR_OP_ADDR_WRITE (0) 309 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23) 310 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18) 311 #define FEC_MMFR_TA (2 << 16) 312 #define FEC_MMFR_DATA(v) (v & 0xffff) 313 /* FEC ECR bits definition */ 314 #define FEC_ECR_MAGICEN (1 << 2) 315 #define FEC_ECR_SLEEP (1 << 3) 316 317 #define FEC_MII_TIMEOUT 30000 /* us */ 318 319 /* Transmitter timeout */ 320 #define TX_TIMEOUT (2 * HZ) 321 322 #define FEC_PAUSE_FLAG_AUTONEG 0x1 323 #define FEC_PAUSE_FLAG_ENABLE 0x2 324 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0) 325 #define FEC_WOL_FLAG_ENABLE (0x1 << 1) 326 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2) 327 328 #define COPYBREAK_DEFAULT 256 329 330 /* Max number of allowed TCP segments for software TSO */ 331 #define FEC_MAX_TSO_SEGS 100 332 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS) 333 334 #define IS_TSO_HEADER(txq, addr) \ 335 ((addr >= txq->tso_hdrs_dma) && \ 336 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE)) 337 338 static int mii_cnt; 339 340 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, 341 struct bufdesc_prop *bd) 342 { 343 return (bdp >= bd->last) ? bd->base 344 : (struct bufdesc *)(((void *)bdp) + bd->dsize); 345 } 346 347 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, 348 struct bufdesc_prop *bd) 349 { 350 return (bdp <= bd->base) ? bd->last 351 : (struct bufdesc *)(((void *)bdp) - bd->dsize); 352 } 353 354 static int fec_enet_get_bd_index(struct bufdesc *bdp, 355 struct bufdesc_prop *bd) 356 { 357 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2; 358 } 359 360 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq) 361 { 362 int entries; 363 364 entries = (((const char *)txq->dirty_tx - 365 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1; 366 367 return entries >= 0 ? entries : entries + txq->bd.ring_size; 368 } 369 370 static void swap_buffer(void *bufaddr, int len) 371 { 372 int i; 373 unsigned int *buf = bufaddr; 374 375 for (i = 0; i < len; i += 4, buf++) 376 swab32s(buf); 377 } 378 379 static void fec_dump(struct net_device *ndev) 380 { 381 struct fec_enet_private *fep = netdev_priv(ndev); 382 struct bufdesc *bdp; 383 struct fec_enet_priv_tx_q *txq; 384 int index = 0; 385 386 netdev_info(ndev, "TX ring dump\n"); 387 pr_info("Nr SC addr len SKB\n"); 388 389 txq = fep->tx_queue[0]; 390 bdp = txq->bd.base; 391 392 do { 393 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n", 394 index, 395 bdp == txq->bd.cur ? 'S' : ' ', 396 bdp == txq->dirty_tx ? 'H' : ' ', 397 fec16_to_cpu(bdp->cbd_sc), 398 fec32_to_cpu(bdp->cbd_bufaddr), 399 fec16_to_cpu(bdp->cbd_datlen), 400 txq->tx_buf[index].skb); 401 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 402 index++; 403 } while (bdp != txq->bd.base); 404 } 405 406 static inline bool is_ipv4_pkt(struct sk_buff *skb) 407 { 408 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4; 409 } 410 411 static int 412 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 413 { 414 /* Only run for packets requiring a checksum. */ 415 if (skb->ip_summed != CHECKSUM_PARTIAL) 416 return 0; 417 418 if (unlikely(skb_cow_head(skb, 0))) 419 return -1; 420 421 if (is_ipv4_pkt(skb)) 422 ip_hdr(skb)->check = 0; 423 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 424 425 return 0; 426 } 427 428 static int 429 fec_enet_create_page_pool(struct fec_enet_private *fep, 430 struct fec_enet_priv_rx_q *rxq, int size) 431 { 432 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); 433 struct page_pool_params pp_params = { 434 .order = 0, 435 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, 436 .pool_size = size, 437 .nid = dev_to_node(&fep->pdev->dev), 438 .dev = &fep->pdev->dev, 439 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE, 440 .offset = FEC_ENET_XDP_HEADROOM, 441 .max_len = FEC_ENET_RX_FRSIZE, 442 }; 443 int err; 444 445 rxq->page_pool = page_pool_create(&pp_params); 446 if (IS_ERR(rxq->page_pool)) { 447 err = PTR_ERR(rxq->page_pool); 448 rxq->page_pool = NULL; 449 return err; 450 } 451 452 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0); 453 if (err < 0) 454 goto err_free_pp; 455 456 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL, 457 rxq->page_pool); 458 if (err) 459 goto err_unregister_rxq; 460 461 return 0; 462 463 err_unregister_rxq: 464 xdp_rxq_info_unreg(&rxq->xdp_rxq); 465 err_free_pp: 466 page_pool_destroy(rxq->page_pool); 467 rxq->page_pool = NULL; 468 return err; 469 } 470 471 static struct bufdesc * 472 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq, 473 struct sk_buff *skb, 474 struct net_device *ndev) 475 { 476 struct fec_enet_private *fep = netdev_priv(ndev); 477 struct bufdesc *bdp = txq->bd.cur; 478 struct bufdesc_ex *ebdp; 479 int nr_frags = skb_shinfo(skb)->nr_frags; 480 int frag, frag_len; 481 unsigned short status; 482 unsigned int estatus = 0; 483 skb_frag_t *this_frag; 484 unsigned int index; 485 void *bufaddr; 486 dma_addr_t addr; 487 int i; 488 489 for (frag = 0; frag < nr_frags; frag++) { 490 this_frag = &skb_shinfo(skb)->frags[frag]; 491 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 492 ebdp = (struct bufdesc_ex *)bdp; 493 494 status = fec16_to_cpu(bdp->cbd_sc); 495 status &= ~BD_ENET_TX_STATS; 496 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 497 frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]); 498 499 /* Handle the last BD specially */ 500 if (frag == nr_frags - 1) { 501 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 502 if (fep->bufdesc_ex) { 503 estatus |= BD_ENET_TX_INT; 504 if (unlikely(skb_shinfo(skb)->tx_flags & 505 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 506 estatus |= BD_ENET_TX_TS; 507 } 508 } 509 510 if (fep->bufdesc_ex) { 511 if (fep->quirks & FEC_QUIRK_HAS_AVB) 512 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 513 if (skb->ip_summed == CHECKSUM_PARTIAL) 514 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 515 516 ebdp->cbd_bdu = 0; 517 ebdp->cbd_esc = cpu_to_fec32(estatus); 518 } 519 520 bufaddr = skb_frag_address(this_frag); 521 522 index = fec_enet_get_bd_index(bdp, &txq->bd); 523 if (((unsigned long) bufaddr) & fep->tx_align || 524 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 525 memcpy(txq->tx_bounce[index], bufaddr, frag_len); 526 bufaddr = txq->tx_bounce[index]; 527 528 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 529 swap_buffer(bufaddr, frag_len); 530 } 531 532 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len, 533 DMA_TO_DEVICE); 534 if (dma_mapping_error(&fep->pdev->dev, addr)) { 535 if (net_ratelimit()) 536 netdev_err(ndev, "Tx DMA memory map failed\n"); 537 goto dma_mapping_error; 538 } 539 540 bdp->cbd_bufaddr = cpu_to_fec32(addr); 541 bdp->cbd_datlen = cpu_to_fec16(frag_len); 542 /* Make sure the updates to rest of the descriptor are 543 * performed before transferring ownership. 544 */ 545 wmb(); 546 bdp->cbd_sc = cpu_to_fec16(status); 547 } 548 549 return bdp; 550 dma_mapping_error: 551 bdp = txq->bd.cur; 552 for (i = 0; i < frag; i++) { 553 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 554 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr), 555 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE); 556 } 557 return ERR_PTR(-ENOMEM); 558 } 559 560 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, 561 struct sk_buff *skb, struct net_device *ndev) 562 { 563 struct fec_enet_private *fep = netdev_priv(ndev); 564 int nr_frags = skb_shinfo(skb)->nr_frags; 565 struct bufdesc *bdp, *last_bdp; 566 void *bufaddr; 567 dma_addr_t addr; 568 unsigned short status; 569 unsigned short buflen; 570 unsigned int estatus = 0; 571 unsigned int index; 572 int entries_free; 573 574 entries_free = fec_enet_get_free_txdesc_num(txq); 575 if (entries_free < MAX_SKB_FRAGS + 1) { 576 dev_kfree_skb_any(skb); 577 if (net_ratelimit()) 578 netdev_err(ndev, "NOT enough BD for SG!\n"); 579 return NETDEV_TX_OK; 580 } 581 582 /* Protocol checksum off-load for TCP and UDP. */ 583 if (fec_enet_clear_csum(skb, ndev)) { 584 dev_kfree_skb_any(skb); 585 return NETDEV_TX_OK; 586 } 587 588 /* Fill in a Tx ring entry */ 589 bdp = txq->bd.cur; 590 last_bdp = bdp; 591 status = fec16_to_cpu(bdp->cbd_sc); 592 status &= ~BD_ENET_TX_STATS; 593 594 /* Set buffer length and buffer pointer */ 595 bufaddr = skb->data; 596 buflen = skb_headlen(skb); 597 598 index = fec_enet_get_bd_index(bdp, &txq->bd); 599 if (((unsigned long) bufaddr) & fep->tx_align || 600 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 601 memcpy(txq->tx_bounce[index], skb->data, buflen); 602 bufaddr = txq->tx_bounce[index]; 603 604 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 605 swap_buffer(bufaddr, buflen); 606 } 607 608 /* Push the data cache so the CPM does not get stale memory data. */ 609 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE); 610 if (dma_mapping_error(&fep->pdev->dev, addr)) { 611 dev_kfree_skb_any(skb); 612 if (net_ratelimit()) 613 netdev_err(ndev, "Tx DMA memory map failed\n"); 614 return NETDEV_TX_OK; 615 } 616 617 if (nr_frags) { 618 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev); 619 if (IS_ERR(last_bdp)) { 620 dma_unmap_single(&fep->pdev->dev, addr, 621 buflen, DMA_TO_DEVICE); 622 dev_kfree_skb_any(skb); 623 return NETDEV_TX_OK; 624 } 625 } else { 626 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 627 if (fep->bufdesc_ex) { 628 estatus = BD_ENET_TX_INT; 629 if (unlikely(skb_shinfo(skb)->tx_flags & 630 SKBTX_HW_TSTAMP && fep->hwts_tx_en)) 631 estatus |= BD_ENET_TX_TS; 632 } 633 } 634 bdp->cbd_bufaddr = cpu_to_fec32(addr); 635 bdp->cbd_datlen = cpu_to_fec16(buflen); 636 637 if (fep->bufdesc_ex) { 638 639 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 640 641 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && 642 fep->hwts_tx_en)) 643 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 644 645 if (fep->quirks & FEC_QUIRK_HAS_AVB) 646 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 647 648 if (skb->ip_summed == CHECKSUM_PARTIAL) 649 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 650 651 ebdp->cbd_bdu = 0; 652 ebdp->cbd_esc = cpu_to_fec32(estatus); 653 } 654 655 index = fec_enet_get_bd_index(last_bdp, &txq->bd); 656 /* Save skb pointer */ 657 txq->tx_buf[index].skb = skb; 658 659 /* Make sure the updates to rest of the descriptor are performed before 660 * transferring ownership. 661 */ 662 wmb(); 663 664 /* Send it on its way. Tell FEC it's ready, interrupt when done, 665 * it's the last BD of the frame, and to put the CRC on the end. 666 */ 667 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 668 bdp->cbd_sc = cpu_to_fec16(status); 669 670 /* If this was the last BD in the ring, start at the beginning again. */ 671 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); 672 673 skb_tx_timestamp(skb); 674 675 /* Make sure the update to bdp is performed before txq->bd.cur. */ 676 wmb(); 677 txq->bd.cur = bdp; 678 679 /* Trigger transmission start */ 680 writel(0, txq->bd.reg_desc_active); 681 682 return 0; 683 } 684 685 static int 686 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, 687 struct net_device *ndev, 688 struct bufdesc *bdp, int index, char *data, 689 int size, bool last_tcp, bool is_last) 690 { 691 struct fec_enet_private *fep = netdev_priv(ndev); 692 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 693 unsigned short status; 694 unsigned int estatus = 0; 695 dma_addr_t addr; 696 697 status = fec16_to_cpu(bdp->cbd_sc); 698 status &= ~BD_ENET_TX_STATS; 699 700 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 701 702 if (((unsigned long) data) & fep->tx_align || 703 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 704 memcpy(txq->tx_bounce[index], data, size); 705 data = txq->tx_bounce[index]; 706 707 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 708 swap_buffer(data, size); 709 } 710 711 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE); 712 if (dma_mapping_error(&fep->pdev->dev, addr)) { 713 dev_kfree_skb_any(skb); 714 if (net_ratelimit()) 715 netdev_err(ndev, "Tx DMA memory map failed\n"); 716 return NETDEV_TX_OK; 717 } 718 719 bdp->cbd_datlen = cpu_to_fec16(size); 720 bdp->cbd_bufaddr = cpu_to_fec32(addr); 721 722 if (fep->bufdesc_ex) { 723 if (fep->quirks & FEC_QUIRK_HAS_AVB) 724 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 725 if (skb->ip_summed == CHECKSUM_PARTIAL) 726 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 727 ebdp->cbd_bdu = 0; 728 ebdp->cbd_esc = cpu_to_fec32(estatus); 729 } 730 731 /* Handle the last BD specially */ 732 if (last_tcp) 733 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC); 734 if (is_last) { 735 status |= BD_ENET_TX_INTR; 736 if (fep->bufdesc_ex) 737 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT); 738 } 739 740 bdp->cbd_sc = cpu_to_fec16(status); 741 742 return 0; 743 } 744 745 static int 746 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, 747 struct sk_buff *skb, struct net_device *ndev, 748 struct bufdesc *bdp, int index) 749 { 750 struct fec_enet_private *fep = netdev_priv(ndev); 751 int hdr_len = skb_tcp_all_headers(skb); 752 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc); 753 void *bufaddr; 754 unsigned long dmabuf; 755 unsigned short status; 756 unsigned int estatus = 0; 757 758 status = fec16_to_cpu(bdp->cbd_sc); 759 status &= ~BD_ENET_TX_STATS; 760 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY); 761 762 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 763 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE; 764 if (((unsigned long)bufaddr) & fep->tx_align || 765 fep->quirks & FEC_QUIRK_SWAP_FRAME) { 766 memcpy(txq->tx_bounce[index], skb->data, hdr_len); 767 bufaddr = txq->tx_bounce[index]; 768 769 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 770 swap_buffer(bufaddr, hdr_len); 771 772 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr, 773 hdr_len, DMA_TO_DEVICE); 774 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) { 775 dev_kfree_skb_any(skb); 776 if (net_ratelimit()) 777 netdev_err(ndev, "Tx DMA memory map failed\n"); 778 return NETDEV_TX_OK; 779 } 780 } 781 782 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf); 783 bdp->cbd_datlen = cpu_to_fec16(hdr_len); 784 785 if (fep->bufdesc_ex) { 786 if (fep->quirks & FEC_QUIRK_HAS_AVB) 787 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 788 if (skb->ip_summed == CHECKSUM_PARTIAL) 789 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS; 790 ebdp->cbd_bdu = 0; 791 ebdp->cbd_esc = cpu_to_fec32(estatus); 792 } 793 794 bdp->cbd_sc = cpu_to_fec16(status); 795 796 return 0; 797 } 798 799 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, 800 struct sk_buff *skb, 801 struct net_device *ndev) 802 { 803 struct fec_enet_private *fep = netdev_priv(ndev); 804 int hdr_len, total_len, data_left; 805 struct bufdesc *bdp = txq->bd.cur; 806 struct tso_t tso; 807 unsigned int index = 0; 808 int ret; 809 810 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) { 811 dev_kfree_skb_any(skb); 812 if (net_ratelimit()) 813 netdev_err(ndev, "NOT enough BD for TSO!\n"); 814 return NETDEV_TX_OK; 815 } 816 817 /* Protocol checksum off-load for TCP and UDP. */ 818 if (fec_enet_clear_csum(skb, ndev)) { 819 dev_kfree_skb_any(skb); 820 return NETDEV_TX_OK; 821 } 822 823 /* Initialize the TSO handler, and prepare the first payload */ 824 hdr_len = tso_start(skb, &tso); 825 826 total_len = skb->len - hdr_len; 827 while (total_len > 0) { 828 char *hdr; 829 830 index = fec_enet_get_bd_index(bdp, &txq->bd); 831 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len); 832 total_len -= data_left; 833 834 /* prepare packet headers: MAC + IP + TCP */ 835 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE; 836 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0); 837 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index); 838 if (ret) 839 goto err_release; 840 841 while (data_left > 0) { 842 int size; 843 844 size = min_t(int, tso.size, data_left); 845 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 846 index = fec_enet_get_bd_index(bdp, &txq->bd); 847 ret = fec_enet_txq_put_data_tso(txq, skb, ndev, 848 bdp, index, 849 tso.data, size, 850 size == data_left, 851 total_len == 0); 852 if (ret) 853 goto err_release; 854 855 data_left -= size; 856 tso_build_data(skb, &tso, size); 857 } 858 859 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 860 } 861 862 /* Save skb pointer */ 863 txq->tx_buf[index].skb = skb; 864 865 skb_tx_timestamp(skb); 866 txq->bd.cur = bdp; 867 868 /* Trigger transmission start */ 869 if (!(fep->quirks & FEC_QUIRK_ERR007885) || 870 !readl(txq->bd.reg_desc_active) || 871 !readl(txq->bd.reg_desc_active) || 872 !readl(txq->bd.reg_desc_active) || 873 !readl(txq->bd.reg_desc_active)) 874 writel(0, txq->bd.reg_desc_active); 875 876 return 0; 877 878 err_release: 879 /* TODO: Release all used data descriptors for TSO */ 880 return ret; 881 } 882 883 static netdev_tx_t 884 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 885 { 886 struct fec_enet_private *fep = netdev_priv(ndev); 887 int entries_free; 888 unsigned short queue; 889 struct fec_enet_priv_tx_q *txq; 890 struct netdev_queue *nq; 891 int ret; 892 893 queue = skb_get_queue_mapping(skb); 894 txq = fep->tx_queue[queue]; 895 nq = netdev_get_tx_queue(ndev, queue); 896 897 if (skb_is_gso(skb)) 898 ret = fec_enet_txq_submit_tso(txq, skb, ndev); 899 else 900 ret = fec_enet_txq_submit_skb(txq, skb, ndev); 901 if (ret) 902 return ret; 903 904 entries_free = fec_enet_get_free_txdesc_num(txq); 905 if (entries_free <= txq->tx_stop_threshold) 906 netif_tx_stop_queue(nq); 907 908 return NETDEV_TX_OK; 909 } 910 911 /* Init RX & TX buffer descriptors 912 */ 913 static void fec_enet_bd_init(struct net_device *dev) 914 { 915 struct fec_enet_private *fep = netdev_priv(dev); 916 struct fec_enet_priv_tx_q *txq; 917 struct fec_enet_priv_rx_q *rxq; 918 struct bufdesc *bdp; 919 unsigned int i; 920 unsigned int q; 921 922 for (q = 0; q < fep->num_rx_queues; q++) { 923 /* Initialize the receive buffer descriptors. */ 924 rxq = fep->rx_queue[q]; 925 bdp = rxq->bd.base; 926 927 for (i = 0; i < rxq->bd.ring_size; i++) { 928 929 /* Initialize the BD for every fragment in the page. */ 930 if (bdp->cbd_bufaddr) 931 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 932 else 933 bdp->cbd_sc = cpu_to_fec16(0); 934 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 935 } 936 937 /* Set the last buffer to wrap */ 938 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 939 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 940 941 rxq->bd.cur = rxq->bd.base; 942 } 943 944 for (q = 0; q < fep->num_tx_queues; q++) { 945 /* ...and the same for transmit */ 946 txq = fep->tx_queue[q]; 947 bdp = txq->bd.base; 948 txq->bd.cur = bdp; 949 950 for (i = 0; i < txq->bd.ring_size; i++) { 951 /* Initialize the BD for every fragment in the page. */ 952 bdp->cbd_sc = cpu_to_fec16(0); 953 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { 954 if (bdp->cbd_bufaddr && 955 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 956 dma_unmap_single(&fep->pdev->dev, 957 fec32_to_cpu(bdp->cbd_bufaddr), 958 fec16_to_cpu(bdp->cbd_datlen), 959 DMA_TO_DEVICE); 960 if (txq->tx_buf[i].skb) { 961 dev_kfree_skb_any(txq->tx_buf[i].skb); 962 txq->tx_buf[i].skb = NULL; 963 } 964 } else { 965 if (bdp->cbd_bufaddr) 966 dma_unmap_single(&fep->pdev->dev, 967 fec32_to_cpu(bdp->cbd_bufaddr), 968 fec16_to_cpu(bdp->cbd_datlen), 969 DMA_TO_DEVICE); 970 971 if (txq->tx_buf[i].xdp) { 972 xdp_return_frame(txq->tx_buf[i].xdp); 973 txq->tx_buf[i].xdp = NULL; 974 } 975 976 /* restore default tx buffer type: FEC_TXBUF_T_SKB */ 977 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 978 } 979 980 bdp->cbd_bufaddr = cpu_to_fec32(0); 981 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 982 } 983 984 /* Set the last buffer to wrap */ 985 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 986 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 987 txq->dirty_tx = bdp; 988 } 989 } 990 991 static void fec_enet_active_rxring(struct net_device *ndev) 992 { 993 struct fec_enet_private *fep = netdev_priv(ndev); 994 int i; 995 996 for (i = 0; i < fep->num_rx_queues; i++) 997 writel(0, fep->rx_queue[i]->bd.reg_desc_active); 998 } 999 1000 static void fec_enet_enable_ring(struct net_device *ndev) 1001 { 1002 struct fec_enet_private *fep = netdev_priv(ndev); 1003 struct fec_enet_priv_tx_q *txq; 1004 struct fec_enet_priv_rx_q *rxq; 1005 int i; 1006 1007 for (i = 0; i < fep->num_rx_queues; i++) { 1008 rxq = fep->rx_queue[i]; 1009 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i)); 1010 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i)); 1011 1012 /* enable DMA1/2 */ 1013 if (i) 1014 writel(RCMR_MATCHEN | RCMR_CMP(i), 1015 fep->hwp + FEC_RCMR(i)); 1016 } 1017 1018 for (i = 0; i < fep->num_tx_queues; i++) { 1019 txq = fep->tx_queue[i]; 1020 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i)); 1021 1022 /* enable DMA1/2 */ 1023 if (i) 1024 writel(DMA_CLASS_EN | IDLE_SLOPE(i), 1025 fep->hwp + FEC_DMA_CFG(i)); 1026 } 1027 } 1028 1029 /* 1030 * This function is called to start or restart the FEC during a link 1031 * change, transmit timeout, or to reconfigure the FEC. The network 1032 * packet processing for this device must be stopped before this call. 1033 */ 1034 static void 1035 fec_restart(struct net_device *ndev) 1036 { 1037 struct fec_enet_private *fep = netdev_priv(ndev); 1038 u32 temp_mac[2]; 1039 u32 rcntl = OPT_FRAME_SIZE | 0x04; 1040 u32 ecntl = 0x2; /* ETHEREN */ 1041 1042 /* Whack a reset. We should wait for this. 1043 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1044 * instead of reset MAC itself. 1045 */ 1046 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES || 1047 ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { 1048 writel(0, fep->hwp + FEC_ECNTRL); 1049 } else { 1050 writel(1, fep->hwp + FEC_ECNTRL); 1051 udelay(10); 1052 } 1053 1054 /* 1055 * enet-mac reset will reset mac address registers too, 1056 * so need to reconfigure it. 1057 */ 1058 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); 1059 writel((__force u32)cpu_to_be32(temp_mac[0]), 1060 fep->hwp + FEC_ADDR_LOW); 1061 writel((__force u32)cpu_to_be32(temp_mac[1]), 1062 fep->hwp + FEC_ADDR_HIGH); 1063 1064 /* Clear any outstanding interrupt, except MDIO. */ 1065 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT); 1066 1067 fec_enet_bd_init(ndev); 1068 1069 fec_enet_enable_ring(ndev); 1070 1071 /* Enable MII mode */ 1072 if (fep->full_duplex == DUPLEX_FULL) { 1073 /* FD enable */ 1074 writel(0x04, fep->hwp + FEC_X_CNTRL); 1075 } else { 1076 /* No Rcv on Xmit */ 1077 rcntl |= 0x02; 1078 writel(0x0, fep->hwp + FEC_X_CNTRL); 1079 } 1080 1081 /* Set MII speed */ 1082 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1083 1084 #if !defined(CONFIG_M5272) 1085 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 1086 u32 val = readl(fep->hwp + FEC_RACC); 1087 1088 /* align IP header */ 1089 val |= FEC_RACC_SHIFT16; 1090 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 1091 /* set RX checksum */ 1092 val |= FEC_RACC_OPTIONS; 1093 else 1094 val &= ~FEC_RACC_OPTIONS; 1095 writel(val, fep->hwp + FEC_RACC); 1096 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL); 1097 } 1098 #endif 1099 1100 /* 1101 * The phy interface and speed need to get configured 1102 * differently on enet-mac. 1103 */ 1104 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1105 /* Enable flow control and length check */ 1106 rcntl |= 0x40000000 | 0x00000020; 1107 1108 /* RGMII, RMII or MII */ 1109 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII || 1110 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || 1111 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID || 1112 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) 1113 rcntl |= (1 << 6); 1114 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1115 rcntl |= (1 << 8); 1116 else 1117 rcntl &= ~(1 << 8); 1118 1119 /* 1G, 100M or 10M */ 1120 if (ndev->phydev) { 1121 if (ndev->phydev->speed == SPEED_1000) 1122 ecntl |= (1 << 5); 1123 else if (ndev->phydev->speed == SPEED_100) 1124 rcntl &= ~(1 << 9); 1125 else 1126 rcntl |= (1 << 9); 1127 } 1128 } else { 1129 #ifdef FEC_MIIGSK_ENR 1130 if (fep->quirks & FEC_QUIRK_USE_GASKET) { 1131 u32 cfgr; 1132 /* disable the gasket and wait */ 1133 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1134 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1135 udelay(1); 1136 1137 /* 1138 * configure the gasket: 1139 * RMII, 50 MHz, no loopback, no echo 1140 * MII, 25 MHz, no loopback, no echo 1141 */ 1142 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII) 1143 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII; 1144 if (ndev->phydev && ndev->phydev->speed == SPEED_10) 1145 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M; 1146 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR); 1147 1148 /* re-enable the gasket */ 1149 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1150 } 1151 #endif 1152 } 1153 1154 #if !defined(CONFIG_M5272) 1155 /* enable pause frame*/ 1156 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || 1157 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && 1158 ndev->phydev && ndev->phydev->pause)) { 1159 rcntl |= FEC_ENET_FCE; 1160 1161 /* set FIFO threshold parameter to reduce overrun */ 1162 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); 1163 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); 1164 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); 1165 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); 1166 1167 /* OPD */ 1168 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); 1169 } else { 1170 rcntl &= ~FEC_ENET_FCE; 1171 } 1172 #endif /* !defined(CONFIG_M5272) */ 1173 1174 writel(rcntl, fep->hwp + FEC_R_CNTRL); 1175 1176 /* Setup multicast filter. */ 1177 set_multicast_list(ndev); 1178 #ifndef CONFIG_M5272 1179 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); 1180 writel(0, fep->hwp + FEC_HASH_TABLE_LOW); 1181 #endif 1182 1183 if (fep->quirks & FEC_QUIRK_ENET_MAC) { 1184 /* enable ENET endian swap */ 1185 ecntl |= (1 << 8); 1186 /* enable ENET store and forward mode */ 1187 writel(1 << 8, fep->hwp + FEC_X_WMRK); 1188 } 1189 1190 if (fep->bufdesc_ex) 1191 ecntl |= (1 << 4); 1192 1193 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && 1194 fep->rgmii_txc_dly) 1195 ecntl |= FEC_ENET_TXC_DLY; 1196 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && 1197 fep->rgmii_rxc_dly) 1198 ecntl |= FEC_ENET_RXC_DLY; 1199 1200 #ifndef CONFIG_M5272 1201 /* Enable the MIB statistic event counters */ 1202 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); 1203 #endif 1204 1205 /* And last, enable the transmit and receive processing */ 1206 writel(ecntl, fep->hwp + FEC_ECNTRL); 1207 fec_enet_active_rxring(ndev); 1208 1209 if (fep->bufdesc_ex) 1210 fec_ptp_start_cyclecounter(ndev); 1211 1212 /* Enable interrupts we wish to service */ 1213 if (fep->link) 1214 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1215 else 1216 writel(0, fep->hwp + FEC_IMASK); 1217 1218 /* Init the interrupt coalescing */ 1219 if (fep->quirks & FEC_QUIRK_HAS_COALESCE) 1220 fec_enet_itr_coal_set(ndev); 1221 } 1222 1223 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep) 1224 { 1225 if (!(of_machine_is_compatible("fsl,imx8qm") || 1226 of_machine_is_compatible("fsl,imx8qxp") || 1227 of_machine_is_compatible("fsl,imx8dxl"))) 1228 return 0; 1229 1230 return imx_scu_get_handle(&fep->ipc_handle); 1231 } 1232 1233 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled) 1234 { 1235 struct device_node *np = fep->pdev->dev.of_node; 1236 u32 rsrc_id, val; 1237 int idx; 1238 1239 if (!np || !fep->ipc_handle) 1240 return; 1241 1242 idx = of_alias_get_id(np, "ethernet"); 1243 if (idx < 0) 1244 idx = 0; 1245 rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0; 1246 1247 val = enabled ? 1 : 0; 1248 imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); 1249 } 1250 1251 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled) 1252 { 1253 struct fec_platform_data *pdata = fep->pdev->dev.platform_data; 1254 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr; 1255 1256 if (stop_gpr->gpr) { 1257 if (enabled) 1258 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, 1259 BIT(stop_gpr->bit), 1260 BIT(stop_gpr->bit)); 1261 else 1262 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg, 1263 BIT(stop_gpr->bit), 0); 1264 } else if (pdata && pdata->sleep_mode_enable) { 1265 pdata->sleep_mode_enable(enabled); 1266 } else { 1267 fec_enet_ipg_stop_set(fep, enabled); 1268 } 1269 } 1270 1271 static void fec_irqs_disable(struct net_device *ndev) 1272 { 1273 struct fec_enet_private *fep = netdev_priv(ndev); 1274 1275 writel(0, fep->hwp + FEC_IMASK); 1276 } 1277 1278 static void fec_irqs_disable_except_wakeup(struct net_device *ndev) 1279 { 1280 struct fec_enet_private *fep = netdev_priv(ndev); 1281 1282 writel(0, fep->hwp + FEC_IMASK); 1283 writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK); 1284 } 1285 1286 static void 1287 fec_stop(struct net_device *ndev) 1288 { 1289 struct fec_enet_private *fep = netdev_priv(ndev); 1290 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); 1291 u32 val; 1292 1293 /* We cannot expect a graceful transmit stop without link !!! */ 1294 if (fep->link) { 1295 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */ 1296 udelay(10); 1297 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA)) 1298 netdev_err(ndev, "Graceful transmit stop did not complete!\n"); 1299 } 1300 1301 /* Whack a reset. We should wait for this. 1302 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC 1303 * instead of reset MAC itself. 1304 */ 1305 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1306 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 1307 writel(0, fep->hwp + FEC_ECNTRL); 1308 } else { 1309 writel(1, fep->hwp + FEC_ECNTRL); 1310 udelay(10); 1311 } 1312 } else { 1313 val = readl(fep->hwp + FEC_ECNTRL); 1314 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 1315 writel(val, fep->hwp + FEC_ECNTRL); 1316 } 1317 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1318 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1319 1320 /* We have to keep ENET enabled to have MII interrupt stay working */ 1321 if (fep->quirks & FEC_QUIRK_ENET_MAC && 1322 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) { 1323 writel(2, fep->hwp + FEC_ECNTRL); 1324 writel(rmii_mode, fep->hwp + FEC_R_CNTRL); 1325 } 1326 } 1327 1328 1329 static void 1330 fec_timeout(struct net_device *ndev, unsigned int txqueue) 1331 { 1332 struct fec_enet_private *fep = netdev_priv(ndev); 1333 1334 fec_dump(ndev); 1335 1336 ndev->stats.tx_errors++; 1337 1338 schedule_work(&fep->tx_timeout_work); 1339 } 1340 1341 static void fec_enet_timeout_work(struct work_struct *work) 1342 { 1343 struct fec_enet_private *fep = 1344 container_of(work, struct fec_enet_private, tx_timeout_work); 1345 struct net_device *ndev = fep->netdev; 1346 1347 rtnl_lock(); 1348 if (netif_device_present(ndev) || netif_running(ndev)) { 1349 napi_disable(&fep->napi); 1350 netif_tx_lock_bh(ndev); 1351 fec_restart(ndev); 1352 netif_tx_wake_all_queues(ndev); 1353 netif_tx_unlock_bh(ndev); 1354 napi_enable(&fep->napi); 1355 } 1356 rtnl_unlock(); 1357 } 1358 1359 static void 1360 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, 1361 struct skb_shared_hwtstamps *hwtstamps) 1362 { 1363 unsigned long flags; 1364 u64 ns; 1365 1366 spin_lock_irqsave(&fep->tmreg_lock, flags); 1367 ns = timecounter_cyc2time(&fep->tc, ts); 1368 spin_unlock_irqrestore(&fep->tmreg_lock, flags); 1369 1370 memset(hwtstamps, 0, sizeof(*hwtstamps)); 1371 hwtstamps->hwtstamp = ns_to_ktime(ns); 1372 } 1373 1374 static void 1375 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) 1376 { 1377 struct fec_enet_private *fep; 1378 struct xdp_frame *xdpf; 1379 struct bufdesc *bdp; 1380 unsigned short status; 1381 struct sk_buff *skb; 1382 struct fec_enet_priv_tx_q *txq; 1383 struct netdev_queue *nq; 1384 int index = 0; 1385 int entries_free; 1386 1387 fep = netdev_priv(ndev); 1388 1389 txq = fep->tx_queue[queue_id]; 1390 /* get next bdp of dirty_tx */ 1391 nq = netdev_get_tx_queue(ndev, queue_id); 1392 bdp = txq->dirty_tx; 1393 1394 /* get next bdp of dirty_tx */ 1395 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1396 1397 while (bdp != READ_ONCE(txq->bd.cur)) { 1398 /* Order the load of bd.cur and cbd_sc */ 1399 rmb(); 1400 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc)); 1401 if (status & BD_ENET_TX_READY) 1402 break; 1403 1404 index = fec_enet_get_bd_index(bdp, &txq->bd); 1405 1406 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { 1407 skb = txq->tx_buf[index].skb; 1408 txq->tx_buf[index].skb = NULL; 1409 if (bdp->cbd_bufaddr && 1410 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) 1411 dma_unmap_single(&fep->pdev->dev, 1412 fec32_to_cpu(bdp->cbd_bufaddr), 1413 fec16_to_cpu(bdp->cbd_datlen), 1414 DMA_TO_DEVICE); 1415 bdp->cbd_bufaddr = cpu_to_fec32(0); 1416 if (!skb) 1417 goto tx_buf_done; 1418 } else { 1419 /* Tx processing cannot call any XDP (or page pool) APIs if 1420 * the "budget" is 0. Because NAPI is called with budget of 1421 * 0 (such as netpoll) indicates we may be in an IRQ context, 1422 * however, we can't use the page pool from IRQ context. 1423 */ 1424 if (unlikely(!budget)) 1425 break; 1426 1427 xdpf = txq->tx_buf[index].xdp; 1428 if (bdp->cbd_bufaddr) 1429 dma_unmap_single(&fep->pdev->dev, 1430 fec32_to_cpu(bdp->cbd_bufaddr), 1431 fec16_to_cpu(bdp->cbd_datlen), 1432 DMA_TO_DEVICE); 1433 bdp->cbd_bufaddr = cpu_to_fec32(0); 1434 if (!xdpf) { 1435 txq->tx_buf[index].type = FEC_TXBUF_T_SKB; 1436 goto tx_buf_done; 1437 } 1438 } 1439 1440 /* Check for errors. */ 1441 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | 1442 BD_ENET_TX_RL | BD_ENET_TX_UN | 1443 BD_ENET_TX_CSL)) { 1444 ndev->stats.tx_errors++; 1445 if (status & BD_ENET_TX_HB) /* No heartbeat */ 1446 ndev->stats.tx_heartbeat_errors++; 1447 if (status & BD_ENET_TX_LC) /* Late collision */ 1448 ndev->stats.tx_window_errors++; 1449 if (status & BD_ENET_TX_RL) /* Retrans limit */ 1450 ndev->stats.tx_aborted_errors++; 1451 if (status & BD_ENET_TX_UN) /* Underrun */ 1452 ndev->stats.tx_fifo_errors++; 1453 if (status & BD_ENET_TX_CSL) /* Carrier lost */ 1454 ndev->stats.tx_carrier_errors++; 1455 } else { 1456 ndev->stats.tx_packets++; 1457 1458 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) 1459 ndev->stats.tx_bytes += skb->len; 1460 else 1461 ndev->stats.tx_bytes += xdpf->len; 1462 } 1463 1464 /* Deferred means some collisions occurred during transmit, 1465 * but we eventually sent the packet OK. 1466 */ 1467 if (status & BD_ENET_TX_DEF) 1468 ndev->stats.collisions++; 1469 1470 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { 1471 /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who 1472 * are to time stamp the packet, so we still need to check time 1473 * stamping enabled flag. 1474 */ 1475 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && 1476 fep->hwts_tx_en) && fep->bufdesc_ex) { 1477 struct skb_shared_hwtstamps shhwtstamps; 1478 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1479 1480 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); 1481 skb_tstamp_tx(skb, &shhwtstamps); 1482 } 1483 1484 /* Free the sk buffer associated with this last transmit */ 1485 dev_kfree_skb_any(skb); 1486 } else { 1487 xdp_return_frame(xdpf); 1488 1489 txq->tx_buf[index].xdp = NULL; 1490 /* restore default tx buffer type: FEC_TXBUF_T_SKB */ 1491 txq->tx_buf[index].type = FEC_TXBUF_T_SKB; 1492 } 1493 1494 tx_buf_done: 1495 /* Make sure the update to bdp and tx_buf are performed 1496 * before dirty_tx 1497 */ 1498 wmb(); 1499 txq->dirty_tx = bdp; 1500 1501 /* Update pointer to next buffer descriptor to be transmitted */ 1502 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 1503 1504 /* Since we have freed up a buffer, the ring is no longer full 1505 */ 1506 if (netif_tx_queue_stopped(nq)) { 1507 entries_free = fec_enet_get_free_txdesc_num(txq); 1508 if (entries_free >= txq->tx_wake_threshold) 1509 netif_tx_wake_queue(nq); 1510 } 1511 } 1512 1513 /* ERR006358: Keep the transmitter going */ 1514 if (bdp != txq->bd.cur && 1515 readl(txq->bd.reg_desc_active) == 0) 1516 writel(0, txq->bd.reg_desc_active); 1517 } 1518 1519 static void fec_enet_tx(struct net_device *ndev, int budget) 1520 { 1521 struct fec_enet_private *fep = netdev_priv(ndev); 1522 int i; 1523 1524 /* Make sure that AVB queues are processed first. */ 1525 for (i = fep->num_tx_queues - 1; i >= 0; i--) 1526 fec_enet_tx_queue(ndev, i, budget); 1527 } 1528 1529 static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, 1530 struct bufdesc *bdp, int index) 1531 { 1532 struct page *new_page; 1533 dma_addr_t phys_addr; 1534 1535 new_page = page_pool_dev_alloc_pages(rxq->page_pool); 1536 WARN_ON(!new_page); 1537 rxq->rx_skb_info[index].page = new_page; 1538 1539 rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM; 1540 phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM; 1541 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); 1542 } 1543 1544 static u32 1545 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog, 1546 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int index) 1547 { 1548 unsigned int sync, len = xdp->data_end - xdp->data; 1549 u32 ret = FEC_ENET_XDP_PASS; 1550 struct page *page; 1551 int err; 1552 u32 act; 1553 1554 act = bpf_prog_run_xdp(prog, xdp); 1555 1556 /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ 1557 sync = xdp->data_end - xdp->data_hard_start - FEC_ENET_XDP_HEADROOM; 1558 sync = max(sync, len); 1559 1560 switch (act) { 1561 case XDP_PASS: 1562 rxq->stats[RX_XDP_PASS]++; 1563 ret = FEC_ENET_XDP_PASS; 1564 break; 1565 1566 case XDP_REDIRECT: 1567 rxq->stats[RX_XDP_REDIRECT]++; 1568 err = xdp_do_redirect(fep->netdev, xdp, prog); 1569 if (!err) { 1570 ret = FEC_ENET_XDP_REDIR; 1571 } else { 1572 ret = FEC_ENET_XDP_CONSUMED; 1573 page = virt_to_head_page(xdp->data); 1574 page_pool_put_page(rxq->page_pool, page, sync, true); 1575 } 1576 break; 1577 1578 default: 1579 bpf_warn_invalid_xdp_action(fep->netdev, prog, act); 1580 fallthrough; 1581 1582 case XDP_TX: 1583 bpf_warn_invalid_xdp_action(fep->netdev, prog, act); 1584 fallthrough; 1585 1586 case XDP_ABORTED: 1587 fallthrough; /* handle aborts by dropping packet */ 1588 1589 case XDP_DROP: 1590 rxq->stats[RX_XDP_DROP]++; 1591 ret = FEC_ENET_XDP_CONSUMED; 1592 page = virt_to_head_page(xdp->data); 1593 page_pool_put_page(rxq->page_pool, page, sync, true); 1594 break; 1595 } 1596 1597 return ret; 1598 } 1599 1600 /* During a receive, the bd_rx.cur points to the current incoming buffer. 1601 * When we update through the ring, if the next incoming buffer has 1602 * not been given to the system, we just set the empty indicator, 1603 * effectively tossing the packet. 1604 */ 1605 static int 1606 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) 1607 { 1608 struct fec_enet_private *fep = netdev_priv(ndev); 1609 struct fec_enet_priv_rx_q *rxq; 1610 struct bufdesc *bdp; 1611 unsigned short status; 1612 struct sk_buff *skb; 1613 ushort pkt_len; 1614 __u8 *data; 1615 int pkt_received = 0; 1616 struct bufdesc_ex *ebdp = NULL; 1617 bool vlan_packet_rcvd = false; 1618 u16 vlan_tag; 1619 int index = 0; 1620 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME; 1621 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog); 1622 u32 ret, xdp_result = FEC_ENET_XDP_PASS; 1623 u32 data_start = FEC_ENET_XDP_HEADROOM; 1624 struct xdp_buff xdp; 1625 struct page *page; 1626 u32 sub_len = 4; 1627 1628 #if !defined(CONFIG_M5272) 1629 /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of 1630 * FEC_RACC_SHIFT16 is set by default in the probe function. 1631 */ 1632 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 1633 data_start += 2; 1634 sub_len += 2; 1635 } 1636 #endif 1637 1638 #ifdef CONFIG_M532x 1639 flush_cache_all(); 1640 #endif 1641 rxq = fep->rx_queue[queue_id]; 1642 1643 /* First, grab all of the stats for the incoming packet. 1644 * These get messed up if we get called due to a busy condition. 1645 */ 1646 bdp = rxq->bd.cur; 1647 xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq); 1648 1649 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) { 1650 1651 if (pkt_received >= budget) 1652 break; 1653 pkt_received++; 1654 1655 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT); 1656 1657 /* Check for errors. */ 1658 status ^= BD_ENET_RX_LAST; 1659 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | 1660 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST | 1661 BD_ENET_RX_CL)) { 1662 ndev->stats.rx_errors++; 1663 if (status & BD_ENET_RX_OV) { 1664 /* FIFO overrun */ 1665 ndev->stats.rx_fifo_errors++; 1666 goto rx_processing_done; 1667 } 1668 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH 1669 | BD_ENET_RX_LAST)) { 1670 /* Frame too long or too short. */ 1671 ndev->stats.rx_length_errors++; 1672 if (status & BD_ENET_RX_LAST) 1673 netdev_err(ndev, "rcv is not +last\n"); 1674 } 1675 if (status & BD_ENET_RX_CR) /* CRC Error */ 1676 ndev->stats.rx_crc_errors++; 1677 /* Report late collisions as a frame error. */ 1678 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL)) 1679 ndev->stats.rx_frame_errors++; 1680 goto rx_processing_done; 1681 } 1682 1683 /* Process the incoming frame. */ 1684 ndev->stats.rx_packets++; 1685 pkt_len = fec16_to_cpu(bdp->cbd_datlen); 1686 ndev->stats.rx_bytes += pkt_len; 1687 1688 index = fec_enet_get_bd_index(bdp, &rxq->bd); 1689 page = rxq->rx_skb_info[index].page; 1690 dma_sync_single_for_cpu(&fep->pdev->dev, 1691 fec32_to_cpu(bdp->cbd_bufaddr), 1692 pkt_len, 1693 DMA_FROM_DEVICE); 1694 prefetch(page_address(page)); 1695 fec_enet_update_cbd(rxq, bdp, index); 1696 1697 if (xdp_prog) { 1698 xdp_buff_clear_frags_flag(&xdp); 1699 /* subtract 16bit shift and FCS */ 1700 xdp_prepare_buff(&xdp, page_address(page), 1701 data_start, pkt_len - sub_len, false); 1702 ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, index); 1703 xdp_result |= ret; 1704 if (ret != FEC_ENET_XDP_PASS) 1705 goto rx_processing_done; 1706 } 1707 1708 /* The packet length includes FCS, but we don't want to 1709 * include that when passing upstream as it messes up 1710 * bridging applications. 1711 */ 1712 skb = build_skb(page_address(page), PAGE_SIZE); 1713 if (unlikely(!skb)) { 1714 page_pool_recycle_direct(rxq->page_pool, page); 1715 ndev->stats.rx_dropped++; 1716 1717 netdev_err_once(ndev, "build_skb failed!\n"); 1718 goto rx_processing_done; 1719 } 1720 1721 skb_reserve(skb, data_start); 1722 skb_put(skb, pkt_len - sub_len); 1723 skb_mark_for_recycle(skb); 1724 1725 if (unlikely(need_swap)) { 1726 data = page_address(page) + FEC_ENET_XDP_HEADROOM; 1727 swap_buffer(data, pkt_len); 1728 } 1729 data = skb->data; 1730 1731 /* Extract the enhanced buffer descriptor */ 1732 ebdp = NULL; 1733 if (fep->bufdesc_ex) 1734 ebdp = (struct bufdesc_ex *)bdp; 1735 1736 /* If this is a VLAN packet remove the VLAN Tag */ 1737 vlan_packet_rcvd = false; 1738 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && 1739 fep->bufdesc_ex && 1740 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) { 1741 /* Push and remove the vlan tag */ 1742 struct vlan_hdr *vlan_header = 1743 (struct vlan_hdr *) (data + ETH_HLEN); 1744 vlan_tag = ntohs(vlan_header->h_vlan_TCI); 1745 1746 vlan_packet_rcvd = true; 1747 1748 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2); 1749 skb_pull(skb, VLAN_HLEN); 1750 } 1751 1752 skb->protocol = eth_type_trans(skb, ndev); 1753 1754 /* Get receive timestamp from the skb */ 1755 if (fep->hwts_rx_en && fep->bufdesc_ex) 1756 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), 1757 skb_hwtstamps(skb)); 1758 1759 if (fep->bufdesc_ex && 1760 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { 1761 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) { 1762 /* don't check it */ 1763 skb->ip_summed = CHECKSUM_UNNECESSARY; 1764 } else { 1765 skb_checksum_none_assert(skb); 1766 } 1767 } 1768 1769 /* Handle received VLAN packets */ 1770 if (vlan_packet_rcvd) 1771 __vlan_hwaccel_put_tag(skb, 1772 htons(ETH_P_8021Q), 1773 vlan_tag); 1774 1775 skb_record_rx_queue(skb, queue_id); 1776 napi_gro_receive(&fep->napi, skb); 1777 1778 rx_processing_done: 1779 /* Clear the status flags for this buffer */ 1780 status &= ~BD_ENET_RX_STATS; 1781 1782 /* Mark the buffer empty */ 1783 status |= BD_ENET_RX_EMPTY; 1784 1785 if (fep->bufdesc_ex) { 1786 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 1787 1788 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 1789 ebdp->cbd_prot = 0; 1790 ebdp->cbd_bdu = 0; 1791 } 1792 /* Make sure the updates to rest of the descriptor are 1793 * performed before transferring ownership. 1794 */ 1795 wmb(); 1796 bdp->cbd_sc = cpu_to_fec16(status); 1797 1798 /* Update BD pointer to next entry */ 1799 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 1800 1801 /* Doing this here will keep the FEC running while we process 1802 * incoming frames. On a heavily loaded network, we should be 1803 * able to keep up at the expense of system resources. 1804 */ 1805 writel(0, rxq->bd.reg_desc_active); 1806 } 1807 rxq->bd.cur = bdp; 1808 1809 if (xdp_result & FEC_ENET_XDP_REDIR) 1810 xdp_do_flush_map(); 1811 1812 return pkt_received; 1813 } 1814 1815 static int fec_enet_rx(struct net_device *ndev, int budget) 1816 { 1817 struct fec_enet_private *fep = netdev_priv(ndev); 1818 int i, done = 0; 1819 1820 /* Make sure that AVB queues are processed first. */ 1821 for (i = fep->num_rx_queues - 1; i >= 0; i--) 1822 done += fec_enet_rx_queue(ndev, budget - done, i); 1823 1824 return done; 1825 } 1826 1827 static bool fec_enet_collect_events(struct fec_enet_private *fep) 1828 { 1829 uint int_events; 1830 1831 int_events = readl(fep->hwp + FEC_IEVENT); 1832 1833 /* Don't clear MDIO events, we poll for those */ 1834 int_events &= ~FEC_ENET_MII; 1835 1836 writel(int_events, fep->hwp + FEC_IEVENT); 1837 1838 return int_events != 0; 1839 } 1840 1841 static irqreturn_t 1842 fec_enet_interrupt(int irq, void *dev_id) 1843 { 1844 struct net_device *ndev = dev_id; 1845 struct fec_enet_private *fep = netdev_priv(ndev); 1846 irqreturn_t ret = IRQ_NONE; 1847 1848 if (fec_enet_collect_events(fep) && fep->link) { 1849 ret = IRQ_HANDLED; 1850 1851 if (napi_schedule_prep(&fep->napi)) { 1852 /* Disable interrupts */ 1853 writel(0, fep->hwp + FEC_IMASK); 1854 __napi_schedule(&fep->napi); 1855 } 1856 } 1857 1858 return ret; 1859 } 1860 1861 static int fec_enet_rx_napi(struct napi_struct *napi, int budget) 1862 { 1863 struct net_device *ndev = napi->dev; 1864 struct fec_enet_private *fep = netdev_priv(ndev); 1865 int done = 0; 1866 1867 do { 1868 done += fec_enet_rx(ndev, budget - done); 1869 fec_enet_tx(ndev, budget); 1870 } while ((done < budget) && fec_enet_collect_events(fep)); 1871 1872 if (done < budget) { 1873 napi_complete_done(napi, done); 1874 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 1875 } 1876 1877 return done; 1878 } 1879 1880 /* ------------------------------------------------------------------------- */ 1881 static int fec_get_mac(struct net_device *ndev) 1882 { 1883 struct fec_enet_private *fep = netdev_priv(ndev); 1884 unsigned char *iap, tmpaddr[ETH_ALEN]; 1885 int ret; 1886 1887 /* 1888 * try to get mac address in following order: 1889 * 1890 * 1) module parameter via kernel command line in form 1891 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0 1892 */ 1893 iap = macaddr; 1894 1895 /* 1896 * 2) from device tree data 1897 */ 1898 if (!is_valid_ether_addr(iap)) { 1899 struct device_node *np = fep->pdev->dev.of_node; 1900 if (np) { 1901 ret = of_get_mac_address(np, tmpaddr); 1902 if (!ret) 1903 iap = tmpaddr; 1904 else if (ret == -EPROBE_DEFER) 1905 return ret; 1906 } 1907 } 1908 1909 /* 1910 * 3) from flash or fuse (via platform data) 1911 */ 1912 if (!is_valid_ether_addr(iap)) { 1913 #ifdef CONFIG_M5272 1914 if (FEC_FLASHMAC) 1915 iap = (unsigned char *)FEC_FLASHMAC; 1916 #else 1917 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev); 1918 1919 if (pdata) 1920 iap = (unsigned char *)&pdata->mac; 1921 #endif 1922 } 1923 1924 /* 1925 * 4) FEC mac registers set by bootloader 1926 */ 1927 if (!is_valid_ether_addr(iap)) { 1928 *((__be32 *) &tmpaddr[0]) = 1929 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW)); 1930 *((__be16 *) &tmpaddr[4]) = 1931 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 1932 iap = &tmpaddr[0]; 1933 } 1934 1935 /* 1936 * 5) random mac address 1937 */ 1938 if (!is_valid_ether_addr(iap)) { 1939 /* Report it and use a random ethernet address instead */ 1940 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap); 1941 eth_hw_addr_random(ndev); 1942 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n", 1943 ndev->dev_addr); 1944 return 0; 1945 } 1946 1947 /* Adjust MAC if using macaddr */ 1948 eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0); 1949 1950 return 0; 1951 } 1952 1953 /* ------------------------------------------------------------------------- */ 1954 1955 /* 1956 * Phy section 1957 */ 1958 static void fec_enet_adjust_link(struct net_device *ndev) 1959 { 1960 struct fec_enet_private *fep = netdev_priv(ndev); 1961 struct phy_device *phy_dev = ndev->phydev; 1962 int status_change = 0; 1963 1964 /* 1965 * If the netdev is down, or is going down, we're not interested 1966 * in link state events, so just mark our idea of the link as down 1967 * and ignore the event. 1968 */ 1969 if (!netif_running(ndev) || !netif_device_present(ndev)) { 1970 fep->link = 0; 1971 } else if (phy_dev->link) { 1972 if (!fep->link) { 1973 fep->link = phy_dev->link; 1974 status_change = 1; 1975 } 1976 1977 if (fep->full_duplex != phy_dev->duplex) { 1978 fep->full_duplex = phy_dev->duplex; 1979 status_change = 1; 1980 } 1981 1982 if (phy_dev->speed != fep->speed) { 1983 fep->speed = phy_dev->speed; 1984 status_change = 1; 1985 } 1986 1987 /* if any of the above changed restart the FEC */ 1988 if (status_change) { 1989 napi_disable(&fep->napi); 1990 netif_tx_lock_bh(ndev); 1991 fec_restart(ndev); 1992 netif_tx_wake_all_queues(ndev); 1993 netif_tx_unlock_bh(ndev); 1994 napi_enable(&fep->napi); 1995 } 1996 } else { 1997 if (fep->link) { 1998 napi_disable(&fep->napi); 1999 netif_tx_lock_bh(ndev); 2000 fec_stop(ndev); 2001 netif_tx_unlock_bh(ndev); 2002 napi_enable(&fep->napi); 2003 fep->link = phy_dev->link; 2004 status_change = 1; 2005 } 2006 } 2007 2008 if (status_change) 2009 phy_print_status(phy_dev); 2010 } 2011 2012 static int fec_enet_mdio_wait(struct fec_enet_private *fep) 2013 { 2014 uint ievent; 2015 int ret; 2016 2017 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent, 2018 ievent & FEC_ENET_MII, 2, 30000); 2019 2020 if (!ret) 2021 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 2022 2023 return ret; 2024 } 2025 2026 static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) 2027 { 2028 struct fec_enet_private *fep = bus->priv; 2029 struct device *dev = &fep->pdev->dev; 2030 int ret = 0, frame_start, frame_addr, frame_op; 2031 2032 ret = pm_runtime_resume_and_get(dev); 2033 if (ret < 0) 2034 return ret; 2035 2036 /* C22 read */ 2037 frame_op = FEC_MMFR_OP_READ; 2038 frame_start = FEC_MMFR_ST; 2039 frame_addr = regnum; 2040 2041 /* start a read op */ 2042 writel(frame_start | frame_op | 2043 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | 2044 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 2045 2046 /* wait for end of transfer */ 2047 ret = fec_enet_mdio_wait(fep); 2048 if (ret) { 2049 netdev_err(fep->netdev, "MDIO read timeout\n"); 2050 goto out; 2051 } 2052 2053 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 2054 2055 out: 2056 pm_runtime_mark_last_busy(dev); 2057 pm_runtime_put_autosuspend(dev); 2058 2059 return ret; 2060 } 2061 2062 static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id, 2063 int devad, int regnum) 2064 { 2065 struct fec_enet_private *fep = bus->priv; 2066 struct device *dev = &fep->pdev->dev; 2067 int ret = 0, frame_start, frame_op; 2068 2069 ret = pm_runtime_resume_and_get(dev); 2070 if (ret < 0) 2071 return ret; 2072 2073 frame_start = FEC_MMFR_ST_C45; 2074 2075 /* write address */ 2076 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | 2077 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2078 FEC_MMFR_TA | (regnum & 0xFFFF), 2079 fep->hwp + FEC_MII_DATA); 2080 2081 /* wait for end of transfer */ 2082 ret = fec_enet_mdio_wait(fep); 2083 if (ret) { 2084 netdev_err(fep->netdev, "MDIO address write timeout\n"); 2085 goto out; 2086 } 2087 2088 frame_op = FEC_MMFR_OP_READ_C45; 2089 2090 /* start a read op */ 2091 writel(frame_start | frame_op | 2092 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2093 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); 2094 2095 /* wait for end of transfer */ 2096 ret = fec_enet_mdio_wait(fep); 2097 if (ret) { 2098 netdev_err(fep->netdev, "MDIO read timeout\n"); 2099 goto out; 2100 } 2101 2102 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); 2103 2104 out: 2105 pm_runtime_mark_last_busy(dev); 2106 pm_runtime_put_autosuspend(dev); 2107 2108 return ret; 2109 } 2110 2111 static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, 2112 u16 value) 2113 { 2114 struct fec_enet_private *fep = bus->priv; 2115 struct device *dev = &fep->pdev->dev; 2116 int ret, frame_start, frame_addr; 2117 2118 ret = pm_runtime_resume_and_get(dev); 2119 if (ret < 0) 2120 return ret; 2121 2122 /* C22 write */ 2123 frame_start = FEC_MMFR_ST; 2124 frame_addr = regnum; 2125 2126 /* start a write op */ 2127 writel(frame_start | FEC_MMFR_OP_WRITE | 2128 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) | 2129 FEC_MMFR_TA | FEC_MMFR_DATA(value), 2130 fep->hwp + FEC_MII_DATA); 2131 2132 /* wait for end of transfer */ 2133 ret = fec_enet_mdio_wait(fep); 2134 if (ret) 2135 netdev_err(fep->netdev, "MDIO write timeout\n"); 2136 2137 pm_runtime_mark_last_busy(dev); 2138 pm_runtime_put_autosuspend(dev); 2139 2140 return ret; 2141 } 2142 2143 static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id, 2144 int devad, int regnum, u16 value) 2145 { 2146 struct fec_enet_private *fep = bus->priv; 2147 struct device *dev = &fep->pdev->dev; 2148 int ret, frame_start; 2149 2150 ret = pm_runtime_resume_and_get(dev); 2151 if (ret < 0) 2152 return ret; 2153 2154 frame_start = FEC_MMFR_ST_C45; 2155 2156 /* write address */ 2157 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE | 2158 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2159 FEC_MMFR_TA | (regnum & 0xFFFF), 2160 fep->hwp + FEC_MII_DATA); 2161 2162 /* wait for end of transfer */ 2163 ret = fec_enet_mdio_wait(fep); 2164 if (ret) { 2165 netdev_err(fep->netdev, "MDIO address write timeout\n"); 2166 goto out; 2167 } 2168 2169 /* start a write op */ 2170 writel(frame_start | FEC_MMFR_OP_WRITE | 2171 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) | 2172 FEC_MMFR_TA | FEC_MMFR_DATA(value), 2173 fep->hwp + FEC_MII_DATA); 2174 2175 /* wait for end of transfer */ 2176 ret = fec_enet_mdio_wait(fep); 2177 if (ret) 2178 netdev_err(fep->netdev, "MDIO write timeout\n"); 2179 2180 out: 2181 pm_runtime_mark_last_busy(dev); 2182 pm_runtime_put_autosuspend(dev); 2183 2184 return ret; 2185 } 2186 2187 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev) 2188 { 2189 struct fec_enet_private *fep = netdev_priv(ndev); 2190 struct phy_device *phy_dev = ndev->phydev; 2191 2192 if (phy_dev) { 2193 phy_reset_after_clk_enable(phy_dev); 2194 } else if (fep->phy_node) { 2195 /* 2196 * If the PHY still is not bound to the MAC, but there is 2197 * OF PHY node and a matching PHY device instance already, 2198 * use the OF PHY node to obtain the PHY device instance, 2199 * and then use that PHY device instance when triggering 2200 * the PHY reset. 2201 */ 2202 phy_dev = of_phy_find_device(fep->phy_node); 2203 phy_reset_after_clk_enable(phy_dev); 2204 put_device(&phy_dev->mdio.dev); 2205 } 2206 } 2207 2208 static int fec_enet_clk_enable(struct net_device *ndev, bool enable) 2209 { 2210 struct fec_enet_private *fep = netdev_priv(ndev); 2211 int ret; 2212 2213 if (enable) { 2214 ret = clk_prepare_enable(fep->clk_enet_out); 2215 if (ret) 2216 return ret; 2217 2218 if (fep->clk_ptp) { 2219 mutex_lock(&fep->ptp_clk_mutex); 2220 ret = clk_prepare_enable(fep->clk_ptp); 2221 if (ret) { 2222 mutex_unlock(&fep->ptp_clk_mutex); 2223 goto failed_clk_ptp; 2224 } else { 2225 fep->ptp_clk_on = true; 2226 } 2227 mutex_unlock(&fep->ptp_clk_mutex); 2228 } 2229 2230 ret = clk_prepare_enable(fep->clk_ref); 2231 if (ret) 2232 goto failed_clk_ref; 2233 2234 ret = clk_prepare_enable(fep->clk_2x_txclk); 2235 if (ret) 2236 goto failed_clk_2x_txclk; 2237 2238 fec_enet_phy_reset_after_clk_enable(ndev); 2239 } else { 2240 clk_disable_unprepare(fep->clk_enet_out); 2241 if (fep->clk_ptp) { 2242 mutex_lock(&fep->ptp_clk_mutex); 2243 clk_disable_unprepare(fep->clk_ptp); 2244 fep->ptp_clk_on = false; 2245 mutex_unlock(&fep->ptp_clk_mutex); 2246 } 2247 clk_disable_unprepare(fep->clk_ref); 2248 clk_disable_unprepare(fep->clk_2x_txclk); 2249 } 2250 2251 return 0; 2252 2253 failed_clk_2x_txclk: 2254 if (fep->clk_ref) 2255 clk_disable_unprepare(fep->clk_ref); 2256 failed_clk_ref: 2257 if (fep->clk_ptp) { 2258 mutex_lock(&fep->ptp_clk_mutex); 2259 clk_disable_unprepare(fep->clk_ptp); 2260 fep->ptp_clk_on = false; 2261 mutex_unlock(&fep->ptp_clk_mutex); 2262 } 2263 failed_clk_ptp: 2264 clk_disable_unprepare(fep->clk_enet_out); 2265 2266 return ret; 2267 } 2268 2269 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, 2270 struct device_node *np) 2271 { 2272 u32 rgmii_tx_delay, rgmii_rx_delay; 2273 2274 /* For rgmii tx internal delay, valid values are 0ps and 2000ps */ 2275 if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) { 2276 if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) { 2277 dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps"); 2278 return -EINVAL; 2279 } else if (rgmii_tx_delay == 2000) { 2280 fep->rgmii_txc_dly = true; 2281 } 2282 } 2283 2284 /* For rgmii rx internal delay, valid values are 0ps and 2000ps */ 2285 if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) { 2286 if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) { 2287 dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps"); 2288 return -EINVAL; 2289 } else if (rgmii_rx_delay == 2000) { 2290 fep->rgmii_rxc_dly = true; 2291 } 2292 } 2293 2294 return 0; 2295 } 2296 2297 static int fec_enet_mii_probe(struct net_device *ndev) 2298 { 2299 struct fec_enet_private *fep = netdev_priv(ndev); 2300 struct phy_device *phy_dev = NULL; 2301 char mdio_bus_id[MII_BUS_ID_SIZE]; 2302 char phy_name[MII_BUS_ID_SIZE + 3]; 2303 int phy_id; 2304 int dev_id = fep->dev_id; 2305 2306 if (fep->phy_node) { 2307 phy_dev = of_phy_connect(ndev, fep->phy_node, 2308 &fec_enet_adjust_link, 0, 2309 fep->phy_interface); 2310 if (!phy_dev) { 2311 netdev_err(ndev, "Unable to connect to phy\n"); 2312 return -ENODEV; 2313 } 2314 } else { 2315 /* check for attached phy */ 2316 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) { 2317 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id)) 2318 continue; 2319 if (dev_id--) 2320 continue; 2321 strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 2322 break; 2323 } 2324 2325 if (phy_id >= PHY_MAX_ADDR) { 2326 netdev_info(ndev, "no PHY, assuming direct connection to switch\n"); 2327 strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); 2328 phy_id = 0; 2329 } 2330 2331 snprintf(phy_name, sizeof(phy_name), 2332 PHY_ID_FMT, mdio_bus_id, phy_id); 2333 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 2334 fep->phy_interface); 2335 } 2336 2337 if (IS_ERR(phy_dev)) { 2338 netdev_err(ndev, "could not attach to PHY\n"); 2339 return PTR_ERR(phy_dev); 2340 } 2341 2342 /* mask with MAC supported features */ 2343 if (fep->quirks & FEC_QUIRK_HAS_GBIT) { 2344 phy_set_max_speed(phy_dev, 1000); 2345 phy_remove_link_mode(phy_dev, 2346 ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 2347 #if !defined(CONFIG_M5272) 2348 phy_support_sym_pause(phy_dev); 2349 #endif 2350 } 2351 else 2352 phy_set_max_speed(phy_dev, 100); 2353 2354 fep->link = 0; 2355 fep->full_duplex = 0; 2356 2357 phy_dev->mac_managed_pm = true; 2358 2359 phy_attached_info(phy_dev); 2360 2361 return 0; 2362 } 2363 2364 static int fec_enet_mii_init(struct platform_device *pdev) 2365 { 2366 static struct mii_bus *fec0_mii_bus; 2367 struct net_device *ndev = platform_get_drvdata(pdev); 2368 struct fec_enet_private *fep = netdev_priv(ndev); 2369 bool suppress_preamble = false; 2370 struct device_node *node; 2371 int err = -ENXIO; 2372 u32 mii_speed, holdtime; 2373 u32 bus_freq; 2374 2375 /* 2376 * The i.MX28 dual fec interfaces are not equal. 2377 * Here are the differences: 2378 * 2379 * - fec0 supports MII & RMII modes while fec1 only supports RMII 2380 * - fec0 acts as the 1588 time master while fec1 is slave 2381 * - external phys can only be configured by fec0 2382 * 2383 * That is to say fec1 can not work independently. It only works 2384 * when fec0 is working. The reason behind this design is that the 2385 * second interface is added primarily for Switch mode. 2386 * 2387 * Because of the last point above, both phys are attached on fec0 2388 * mdio interface in board design, and need to be configured by 2389 * fec0 mii_bus. 2390 */ 2391 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { 2392 /* fec1 uses fec0 mii_bus */ 2393 if (mii_cnt && fec0_mii_bus) { 2394 fep->mii_bus = fec0_mii_bus; 2395 mii_cnt++; 2396 return 0; 2397 } 2398 return -ENOENT; 2399 } 2400 2401 bus_freq = 2500000; /* 2.5MHz by default */ 2402 node = of_get_child_by_name(pdev->dev.of_node, "mdio"); 2403 if (node) { 2404 of_property_read_u32(node, "clock-frequency", &bus_freq); 2405 suppress_preamble = of_property_read_bool(node, 2406 "suppress-preamble"); 2407 } 2408 2409 /* 2410 * Set MII speed (= clk_get_rate() / 2 * phy_speed) 2411 * 2412 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while 2413 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28 2414 * Reference Manual has an error on this, and gets fixed on i.MX6Q 2415 * document. 2416 */ 2417 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2); 2418 if (fep->quirks & FEC_QUIRK_ENET_MAC) 2419 mii_speed--; 2420 if (mii_speed > 63) { 2421 dev_err(&pdev->dev, 2422 "fec clock (%lu) too fast to get right mii speed\n", 2423 clk_get_rate(fep->clk_ipg)); 2424 err = -EINVAL; 2425 goto err_out; 2426 } 2427 2428 /* 2429 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka 2430 * MII_SPEED) register that defines the MDIO output hold time. Earlier 2431 * versions are RAZ there, so just ignore the difference and write the 2432 * register always. 2433 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns. 2434 * HOLDTIME + 1 is the number of clk cycles the fec is holding the 2435 * output. 2436 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive). 2437 * Given that ceil(clkrate / 5000000) <= 64, the calculation for 2438 * holdtime cannot result in a value greater than 3. 2439 */ 2440 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1; 2441 2442 fep->phy_speed = mii_speed << 1 | holdtime << 8; 2443 2444 if (suppress_preamble) 2445 fep->phy_speed |= BIT(7); 2446 2447 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) { 2448 /* Clear MMFR to avoid to generate MII event by writing MSCR. 2449 * MII event generation condition: 2450 * - writing MSCR: 2451 * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero & 2452 * mscr_reg_data_in[7:0] != 0 2453 * - writing MMFR: 2454 * - mscr[7:0]_not_zero 2455 */ 2456 writel(0, fep->hwp + FEC_MII_DATA); 2457 } 2458 2459 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 2460 2461 /* Clear any pending transaction complete indication */ 2462 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 2463 2464 fep->mii_bus = mdiobus_alloc(); 2465 if (fep->mii_bus == NULL) { 2466 err = -ENOMEM; 2467 goto err_out; 2468 } 2469 2470 fep->mii_bus->name = "fec_enet_mii_bus"; 2471 fep->mii_bus->read = fec_enet_mdio_read_c22; 2472 fep->mii_bus->write = fec_enet_mdio_write_c22; 2473 if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) { 2474 fep->mii_bus->read_c45 = fec_enet_mdio_read_c45; 2475 fep->mii_bus->write_c45 = fec_enet_mdio_write_c45; 2476 } 2477 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", 2478 pdev->name, fep->dev_id + 1); 2479 fep->mii_bus->priv = fep; 2480 fep->mii_bus->parent = &pdev->dev; 2481 2482 err = of_mdiobus_register(fep->mii_bus, node); 2483 if (err) 2484 goto err_out_free_mdiobus; 2485 of_node_put(node); 2486 2487 mii_cnt++; 2488 2489 /* save fec0 mii_bus */ 2490 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) 2491 fec0_mii_bus = fep->mii_bus; 2492 2493 return 0; 2494 2495 err_out_free_mdiobus: 2496 mdiobus_free(fep->mii_bus); 2497 err_out: 2498 of_node_put(node); 2499 return err; 2500 } 2501 2502 static void fec_enet_mii_remove(struct fec_enet_private *fep) 2503 { 2504 if (--mii_cnt == 0) { 2505 mdiobus_unregister(fep->mii_bus); 2506 mdiobus_free(fep->mii_bus); 2507 } 2508 } 2509 2510 static void fec_enet_get_drvinfo(struct net_device *ndev, 2511 struct ethtool_drvinfo *info) 2512 { 2513 struct fec_enet_private *fep = netdev_priv(ndev); 2514 2515 strscpy(info->driver, fep->pdev->dev.driver->name, 2516 sizeof(info->driver)); 2517 strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info)); 2518 } 2519 2520 static int fec_enet_get_regs_len(struct net_device *ndev) 2521 { 2522 struct fec_enet_private *fep = netdev_priv(ndev); 2523 struct resource *r; 2524 int s = 0; 2525 2526 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0); 2527 if (r) 2528 s = resource_size(r); 2529 2530 return s; 2531 } 2532 2533 /* List of registers that can be safety be read to dump them with ethtool */ 2534 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2535 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2536 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2537 static __u32 fec_enet_register_version = 2; 2538 static u32 fec_enet_register_offset[] = { 2539 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2540 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2541 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1, 2542 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH, 2543 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, 2544 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1, 2545 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2, 2546 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0, 2547 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2548 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2, 2549 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1, 2550 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME, 2551 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2552 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2553 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2554 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2555 RMON_T_P_GTE2048, RMON_T_OCTETS, 2556 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2557 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2558 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2559 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2560 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2561 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2562 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2563 RMON_R_P_GTE2048, RMON_R_OCTETS, 2564 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2565 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2566 }; 2567 /* for i.MX6ul */ 2568 static u32 fec_enet_register_offset_6ul[] = { 2569 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, 2570 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, 2571 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, 2572 FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, 2573 FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, 2574 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, 2575 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, 2576 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, 2577 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, 2578 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, 2579 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, 2580 RMON_T_P_GTE2048, RMON_T_OCTETS, 2581 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, 2582 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, 2583 IEEE_T_FDXFC, IEEE_T_OCTETS_OK, 2584 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, 2585 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, 2586 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, 2587 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, 2588 RMON_R_P_GTE2048, RMON_R_OCTETS, 2589 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, 2590 IEEE_R_FDXFC, IEEE_R_OCTETS_OK 2591 }; 2592 #else 2593 static __u32 fec_enet_register_version = 1; 2594 static u32 fec_enet_register_offset[] = { 2595 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, 2596 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, 2597 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED, 2598 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL, 2599 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, 2600 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0, 2601 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0, 2602 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0, 2603 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2 2604 }; 2605 #endif 2606 2607 static void fec_enet_get_regs(struct net_device *ndev, 2608 struct ethtool_regs *regs, void *regbuf) 2609 { 2610 struct fec_enet_private *fep = netdev_priv(ndev); 2611 u32 __iomem *theregs = (u32 __iomem *)fep->hwp; 2612 struct device *dev = &fep->pdev->dev; 2613 u32 *buf = (u32 *)regbuf; 2614 u32 i, off; 2615 int ret; 2616 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 2617 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ 2618 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) 2619 u32 *reg_list; 2620 u32 reg_cnt; 2621 2622 if (!of_machine_is_compatible("fsl,imx6ul")) { 2623 reg_list = fec_enet_register_offset; 2624 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); 2625 } else { 2626 reg_list = fec_enet_register_offset_6ul; 2627 reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); 2628 } 2629 #else 2630 /* coldfire */ 2631 static u32 *reg_list = fec_enet_register_offset; 2632 static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); 2633 #endif 2634 ret = pm_runtime_resume_and_get(dev); 2635 if (ret < 0) 2636 return; 2637 2638 regs->version = fec_enet_register_version; 2639 2640 memset(buf, 0, regs->len); 2641 2642 for (i = 0; i < reg_cnt; i++) { 2643 off = reg_list[i]; 2644 2645 if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && 2646 !(fep->quirks & FEC_QUIRK_HAS_FRREG)) 2647 continue; 2648 2649 off >>= 2; 2650 buf[off] = readl(&theregs[off]); 2651 } 2652 2653 pm_runtime_mark_last_busy(dev); 2654 pm_runtime_put_autosuspend(dev); 2655 } 2656 2657 static int fec_enet_get_ts_info(struct net_device *ndev, 2658 struct ethtool_ts_info *info) 2659 { 2660 struct fec_enet_private *fep = netdev_priv(ndev); 2661 2662 if (fep->bufdesc_ex) { 2663 2664 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 2665 SOF_TIMESTAMPING_RX_SOFTWARE | 2666 SOF_TIMESTAMPING_SOFTWARE | 2667 SOF_TIMESTAMPING_TX_HARDWARE | 2668 SOF_TIMESTAMPING_RX_HARDWARE | 2669 SOF_TIMESTAMPING_RAW_HARDWARE; 2670 if (fep->ptp_clock) 2671 info->phc_index = ptp_clock_index(fep->ptp_clock); 2672 else 2673 info->phc_index = -1; 2674 2675 info->tx_types = (1 << HWTSTAMP_TX_OFF) | 2676 (1 << HWTSTAMP_TX_ON); 2677 2678 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) | 2679 (1 << HWTSTAMP_FILTER_ALL); 2680 return 0; 2681 } else { 2682 return ethtool_op_get_ts_info(ndev, info); 2683 } 2684 } 2685 2686 #if !defined(CONFIG_M5272) 2687 2688 static void fec_enet_get_pauseparam(struct net_device *ndev, 2689 struct ethtool_pauseparam *pause) 2690 { 2691 struct fec_enet_private *fep = netdev_priv(ndev); 2692 2693 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; 2694 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; 2695 pause->rx_pause = pause->tx_pause; 2696 } 2697 2698 static int fec_enet_set_pauseparam(struct net_device *ndev, 2699 struct ethtool_pauseparam *pause) 2700 { 2701 struct fec_enet_private *fep = netdev_priv(ndev); 2702 2703 if (!ndev->phydev) 2704 return -ENODEV; 2705 2706 if (pause->tx_pause != pause->rx_pause) { 2707 netdev_info(ndev, 2708 "hardware only support enable/disable both tx and rx"); 2709 return -EINVAL; 2710 } 2711 2712 fep->pause_flag = 0; 2713 2714 /* tx pause must be same as rx pause */ 2715 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; 2716 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; 2717 2718 phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause, 2719 pause->autoneg); 2720 2721 if (pause->autoneg) { 2722 if (netif_running(ndev)) 2723 fec_stop(ndev); 2724 phy_start_aneg(ndev->phydev); 2725 } 2726 if (netif_running(ndev)) { 2727 napi_disable(&fep->napi); 2728 netif_tx_lock_bh(ndev); 2729 fec_restart(ndev); 2730 netif_tx_wake_all_queues(ndev); 2731 netif_tx_unlock_bh(ndev); 2732 napi_enable(&fep->napi); 2733 } 2734 2735 return 0; 2736 } 2737 2738 static const struct fec_stat { 2739 char name[ETH_GSTRING_LEN]; 2740 u16 offset; 2741 } fec_stats[] = { 2742 /* RMON TX */ 2743 { "tx_dropped", RMON_T_DROP }, 2744 { "tx_packets", RMON_T_PACKETS }, 2745 { "tx_broadcast", RMON_T_BC_PKT }, 2746 { "tx_multicast", RMON_T_MC_PKT }, 2747 { "tx_crc_errors", RMON_T_CRC_ALIGN }, 2748 { "tx_undersize", RMON_T_UNDERSIZE }, 2749 { "tx_oversize", RMON_T_OVERSIZE }, 2750 { "tx_fragment", RMON_T_FRAG }, 2751 { "tx_jabber", RMON_T_JAB }, 2752 { "tx_collision", RMON_T_COL }, 2753 { "tx_64byte", RMON_T_P64 }, 2754 { "tx_65to127byte", RMON_T_P65TO127 }, 2755 { "tx_128to255byte", RMON_T_P128TO255 }, 2756 { "tx_256to511byte", RMON_T_P256TO511 }, 2757 { "tx_512to1023byte", RMON_T_P512TO1023 }, 2758 { "tx_1024to2047byte", RMON_T_P1024TO2047 }, 2759 { "tx_GTE2048byte", RMON_T_P_GTE2048 }, 2760 { "tx_octets", RMON_T_OCTETS }, 2761 2762 /* IEEE TX */ 2763 { "IEEE_tx_drop", IEEE_T_DROP }, 2764 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK }, 2765 { "IEEE_tx_1col", IEEE_T_1COL }, 2766 { "IEEE_tx_mcol", IEEE_T_MCOL }, 2767 { "IEEE_tx_def", IEEE_T_DEF }, 2768 { "IEEE_tx_lcol", IEEE_T_LCOL }, 2769 { "IEEE_tx_excol", IEEE_T_EXCOL }, 2770 { "IEEE_tx_macerr", IEEE_T_MACERR }, 2771 { "IEEE_tx_cserr", IEEE_T_CSERR }, 2772 { "IEEE_tx_sqe", IEEE_T_SQE }, 2773 { "IEEE_tx_fdxfc", IEEE_T_FDXFC }, 2774 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK }, 2775 2776 /* RMON RX */ 2777 { "rx_packets", RMON_R_PACKETS }, 2778 { "rx_broadcast", RMON_R_BC_PKT }, 2779 { "rx_multicast", RMON_R_MC_PKT }, 2780 { "rx_crc_errors", RMON_R_CRC_ALIGN }, 2781 { "rx_undersize", RMON_R_UNDERSIZE }, 2782 { "rx_oversize", RMON_R_OVERSIZE }, 2783 { "rx_fragment", RMON_R_FRAG }, 2784 { "rx_jabber", RMON_R_JAB }, 2785 { "rx_64byte", RMON_R_P64 }, 2786 { "rx_65to127byte", RMON_R_P65TO127 }, 2787 { "rx_128to255byte", RMON_R_P128TO255 }, 2788 { "rx_256to511byte", RMON_R_P256TO511 }, 2789 { "rx_512to1023byte", RMON_R_P512TO1023 }, 2790 { "rx_1024to2047byte", RMON_R_P1024TO2047 }, 2791 { "rx_GTE2048byte", RMON_R_P_GTE2048 }, 2792 { "rx_octets", RMON_R_OCTETS }, 2793 2794 /* IEEE RX */ 2795 { "IEEE_rx_drop", IEEE_R_DROP }, 2796 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK }, 2797 { "IEEE_rx_crc", IEEE_R_CRC }, 2798 { "IEEE_rx_align", IEEE_R_ALIGN }, 2799 { "IEEE_rx_macerr", IEEE_R_MACERR }, 2800 { "IEEE_rx_fdxfc", IEEE_R_FDXFC }, 2801 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK }, 2802 }; 2803 2804 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64)) 2805 2806 static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = { 2807 "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */ 2808 "rx_xdp_pass", /* RX_XDP_PASS, */ 2809 "rx_xdp_drop", /* RX_XDP_DROP, */ 2810 "rx_xdp_tx", /* RX_XDP_TX, */ 2811 "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */ 2812 "tx_xdp_xmit", /* TX_XDP_XMIT, */ 2813 "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */ 2814 }; 2815 2816 static void fec_enet_update_ethtool_stats(struct net_device *dev) 2817 { 2818 struct fec_enet_private *fep = netdev_priv(dev); 2819 int i; 2820 2821 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2822 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset); 2823 } 2824 2825 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) 2826 { 2827 u64 xdp_stats[XDP_STATS_TOTAL] = { 0 }; 2828 struct fec_enet_priv_rx_q *rxq; 2829 int i, j; 2830 2831 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 2832 rxq = fep->rx_queue[i]; 2833 2834 for (j = 0; j < XDP_STATS_TOTAL; j++) 2835 xdp_stats[j] += rxq->stats[j]; 2836 } 2837 2838 memcpy(data, xdp_stats, sizeof(xdp_stats)); 2839 } 2840 2841 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) 2842 { 2843 #ifdef CONFIG_PAGE_POOL_STATS 2844 struct page_pool_stats stats = {}; 2845 struct fec_enet_priv_rx_q *rxq; 2846 int i; 2847 2848 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 2849 rxq = fep->rx_queue[i]; 2850 2851 if (!rxq->page_pool) 2852 continue; 2853 2854 page_pool_get_stats(rxq->page_pool, &stats); 2855 } 2856 2857 page_pool_ethtool_stats_get(data, &stats); 2858 #endif 2859 } 2860 2861 static void fec_enet_get_ethtool_stats(struct net_device *dev, 2862 struct ethtool_stats *stats, u64 *data) 2863 { 2864 struct fec_enet_private *fep = netdev_priv(dev); 2865 2866 if (netif_running(dev)) 2867 fec_enet_update_ethtool_stats(dev); 2868 2869 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE); 2870 data += FEC_STATS_SIZE / sizeof(u64); 2871 2872 fec_enet_get_xdp_stats(fep, data); 2873 data += XDP_STATS_TOTAL; 2874 2875 fec_enet_page_pool_stats(fep, data); 2876 } 2877 2878 static void fec_enet_get_strings(struct net_device *netdev, 2879 u32 stringset, u8 *data) 2880 { 2881 int i; 2882 switch (stringset) { 2883 case ETH_SS_STATS: 2884 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) { 2885 memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN); 2886 data += ETH_GSTRING_LEN; 2887 } 2888 for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) { 2889 strncpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN); 2890 data += ETH_GSTRING_LEN; 2891 } 2892 page_pool_ethtool_stats_get_strings(data); 2893 2894 break; 2895 case ETH_SS_TEST: 2896 net_selftest_get_strings(data); 2897 break; 2898 } 2899 } 2900 2901 static int fec_enet_get_sset_count(struct net_device *dev, int sset) 2902 { 2903 int count; 2904 2905 switch (sset) { 2906 case ETH_SS_STATS: 2907 count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL; 2908 count += page_pool_ethtool_stats_get_count(); 2909 return count; 2910 2911 case ETH_SS_TEST: 2912 return net_selftest_get_count(); 2913 default: 2914 return -EOPNOTSUPP; 2915 } 2916 } 2917 2918 static void fec_enet_clear_ethtool_stats(struct net_device *dev) 2919 { 2920 struct fec_enet_private *fep = netdev_priv(dev); 2921 struct fec_enet_priv_rx_q *rxq; 2922 int i, j; 2923 2924 /* Disable MIB statistics counters */ 2925 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT); 2926 2927 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) 2928 writel(0, fep->hwp + fec_stats[i].offset); 2929 2930 for (i = fep->num_rx_queues - 1; i >= 0; i--) { 2931 rxq = fep->rx_queue[i]; 2932 for (j = 0; j < XDP_STATS_TOTAL; j++) 2933 rxq->stats[j] = 0; 2934 } 2935 2936 /* Don't disable MIB statistics counters */ 2937 writel(0, fep->hwp + FEC_MIB_CTRLSTAT); 2938 } 2939 2940 #else /* !defined(CONFIG_M5272) */ 2941 #define FEC_STATS_SIZE 0 2942 static inline void fec_enet_update_ethtool_stats(struct net_device *dev) 2943 { 2944 } 2945 2946 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev) 2947 { 2948 } 2949 #endif /* !defined(CONFIG_M5272) */ 2950 2951 /* ITR clock source is enet system clock (clk_ahb). 2952 * TCTT unit is cycle_ns * 64 cycle 2953 * So, the ICTT value = X us / (cycle_ns * 64) 2954 */ 2955 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us) 2956 { 2957 struct fec_enet_private *fep = netdev_priv(ndev); 2958 2959 return us * (fep->itr_clk_rate / 64000) / 1000; 2960 } 2961 2962 /* Set threshold for interrupt coalescing */ 2963 static void fec_enet_itr_coal_set(struct net_device *ndev) 2964 { 2965 struct fec_enet_private *fep = netdev_priv(ndev); 2966 int rx_itr, tx_itr; 2967 2968 /* Must be greater than zero to avoid unpredictable behavior */ 2969 if (!fep->rx_time_itr || !fep->rx_pkts_itr || 2970 !fep->tx_time_itr || !fep->tx_pkts_itr) 2971 return; 2972 2973 /* Select enet system clock as Interrupt Coalescing 2974 * timer Clock Source 2975 */ 2976 rx_itr = FEC_ITR_CLK_SEL; 2977 tx_itr = FEC_ITR_CLK_SEL; 2978 2979 /* set ICFT and ICTT */ 2980 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr); 2981 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr)); 2982 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr); 2983 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr)); 2984 2985 rx_itr |= FEC_ITR_EN; 2986 tx_itr |= FEC_ITR_EN; 2987 2988 writel(tx_itr, fep->hwp + FEC_TXIC0); 2989 writel(rx_itr, fep->hwp + FEC_RXIC0); 2990 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 2991 writel(tx_itr, fep->hwp + FEC_TXIC1); 2992 writel(rx_itr, fep->hwp + FEC_RXIC1); 2993 writel(tx_itr, fep->hwp + FEC_TXIC2); 2994 writel(rx_itr, fep->hwp + FEC_RXIC2); 2995 } 2996 } 2997 2998 static int fec_enet_get_coalesce(struct net_device *ndev, 2999 struct ethtool_coalesce *ec, 3000 struct kernel_ethtool_coalesce *kernel_coal, 3001 struct netlink_ext_ack *extack) 3002 { 3003 struct fec_enet_private *fep = netdev_priv(ndev); 3004 3005 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 3006 return -EOPNOTSUPP; 3007 3008 ec->rx_coalesce_usecs = fep->rx_time_itr; 3009 ec->rx_max_coalesced_frames = fep->rx_pkts_itr; 3010 3011 ec->tx_coalesce_usecs = fep->tx_time_itr; 3012 ec->tx_max_coalesced_frames = fep->tx_pkts_itr; 3013 3014 return 0; 3015 } 3016 3017 static int fec_enet_set_coalesce(struct net_device *ndev, 3018 struct ethtool_coalesce *ec, 3019 struct kernel_ethtool_coalesce *kernel_coal, 3020 struct netlink_ext_ack *extack) 3021 { 3022 struct fec_enet_private *fep = netdev_priv(ndev); 3023 struct device *dev = &fep->pdev->dev; 3024 unsigned int cycle; 3025 3026 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE)) 3027 return -EOPNOTSUPP; 3028 3029 if (ec->rx_max_coalesced_frames > 255) { 3030 dev_err(dev, "Rx coalesced frames exceed hardware limitation\n"); 3031 return -EINVAL; 3032 } 3033 3034 if (ec->tx_max_coalesced_frames > 255) { 3035 dev_err(dev, "Tx coalesced frame exceed hardware limitation\n"); 3036 return -EINVAL; 3037 } 3038 3039 cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs); 3040 if (cycle > 0xFFFF) { 3041 dev_err(dev, "Rx coalesced usec exceed hardware limitation\n"); 3042 return -EINVAL; 3043 } 3044 3045 cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs); 3046 if (cycle > 0xFFFF) { 3047 dev_err(dev, "Tx coalesced usec exceed hardware limitation\n"); 3048 return -EINVAL; 3049 } 3050 3051 fep->rx_time_itr = ec->rx_coalesce_usecs; 3052 fep->rx_pkts_itr = ec->rx_max_coalesced_frames; 3053 3054 fep->tx_time_itr = ec->tx_coalesce_usecs; 3055 fep->tx_pkts_itr = ec->tx_max_coalesced_frames; 3056 3057 fec_enet_itr_coal_set(ndev); 3058 3059 return 0; 3060 } 3061 3062 static int fec_enet_get_tunable(struct net_device *netdev, 3063 const struct ethtool_tunable *tuna, 3064 void *data) 3065 { 3066 struct fec_enet_private *fep = netdev_priv(netdev); 3067 int ret = 0; 3068 3069 switch (tuna->id) { 3070 case ETHTOOL_RX_COPYBREAK: 3071 *(u32 *)data = fep->rx_copybreak; 3072 break; 3073 default: 3074 ret = -EINVAL; 3075 break; 3076 } 3077 3078 return ret; 3079 } 3080 3081 static int fec_enet_set_tunable(struct net_device *netdev, 3082 const struct ethtool_tunable *tuna, 3083 const void *data) 3084 { 3085 struct fec_enet_private *fep = netdev_priv(netdev); 3086 int ret = 0; 3087 3088 switch (tuna->id) { 3089 case ETHTOOL_RX_COPYBREAK: 3090 fep->rx_copybreak = *(u32 *)data; 3091 break; 3092 default: 3093 ret = -EINVAL; 3094 break; 3095 } 3096 3097 return ret; 3098 } 3099 3100 /* LPI Sleep Ts count base on tx clk (clk_ref). 3101 * The lpi sleep cnt value = X us / (cycle_ns). 3102 */ 3103 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) 3104 { 3105 struct fec_enet_private *fep = netdev_priv(ndev); 3106 3107 return us * (fep->clk_ref_rate / 1000) / 1000; 3108 } 3109 3110 static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) 3111 { 3112 struct fec_enet_private *fep = netdev_priv(ndev); 3113 struct ethtool_eee *p = &fep->eee; 3114 unsigned int sleep_cycle, wake_cycle; 3115 int ret = 0; 3116 3117 if (enable) { 3118 ret = phy_init_eee(ndev->phydev, false); 3119 if (ret) 3120 return ret; 3121 3122 sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer); 3123 wake_cycle = sleep_cycle; 3124 } else { 3125 sleep_cycle = 0; 3126 wake_cycle = 0; 3127 } 3128 3129 p->tx_lpi_enabled = enable; 3130 p->eee_enabled = enable; 3131 p->eee_active = enable; 3132 3133 writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP); 3134 writel(wake_cycle, fep->hwp + FEC_LPI_WAKE); 3135 3136 return 0; 3137 } 3138 3139 static int 3140 fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata) 3141 { 3142 struct fec_enet_private *fep = netdev_priv(ndev); 3143 struct ethtool_eee *p = &fep->eee; 3144 3145 if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) 3146 return -EOPNOTSUPP; 3147 3148 if (!netif_running(ndev)) 3149 return -ENETDOWN; 3150 3151 edata->eee_enabled = p->eee_enabled; 3152 edata->eee_active = p->eee_active; 3153 edata->tx_lpi_timer = p->tx_lpi_timer; 3154 edata->tx_lpi_enabled = p->tx_lpi_enabled; 3155 3156 return phy_ethtool_get_eee(ndev->phydev, edata); 3157 } 3158 3159 static int 3160 fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata) 3161 { 3162 struct fec_enet_private *fep = netdev_priv(ndev); 3163 struct ethtool_eee *p = &fep->eee; 3164 int ret = 0; 3165 3166 if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) 3167 return -EOPNOTSUPP; 3168 3169 if (!netif_running(ndev)) 3170 return -ENETDOWN; 3171 3172 p->tx_lpi_timer = edata->tx_lpi_timer; 3173 3174 if (!edata->eee_enabled || !edata->tx_lpi_enabled || 3175 !edata->tx_lpi_timer) 3176 ret = fec_enet_eee_mode_set(ndev, false); 3177 else 3178 ret = fec_enet_eee_mode_set(ndev, true); 3179 3180 if (ret) 3181 return ret; 3182 3183 return phy_ethtool_set_eee(ndev->phydev, edata); 3184 } 3185 3186 static void 3187 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 3188 { 3189 struct fec_enet_private *fep = netdev_priv(ndev); 3190 3191 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) { 3192 wol->supported = WAKE_MAGIC; 3193 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0; 3194 } else { 3195 wol->supported = wol->wolopts = 0; 3196 } 3197 } 3198 3199 static int 3200 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) 3201 { 3202 struct fec_enet_private *fep = netdev_priv(ndev); 3203 3204 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET)) 3205 return -EINVAL; 3206 3207 if (wol->wolopts & ~WAKE_MAGIC) 3208 return -EINVAL; 3209 3210 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); 3211 if (device_may_wakeup(&ndev->dev)) 3212 fep->wol_flag |= FEC_WOL_FLAG_ENABLE; 3213 else 3214 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); 3215 3216 return 0; 3217 } 3218 3219 static const struct ethtool_ops fec_enet_ethtool_ops = { 3220 .supported_coalesce_params = ETHTOOL_COALESCE_USECS | 3221 ETHTOOL_COALESCE_MAX_FRAMES, 3222 .get_drvinfo = fec_enet_get_drvinfo, 3223 .get_regs_len = fec_enet_get_regs_len, 3224 .get_regs = fec_enet_get_regs, 3225 .nway_reset = phy_ethtool_nway_reset, 3226 .get_link = ethtool_op_get_link, 3227 .get_coalesce = fec_enet_get_coalesce, 3228 .set_coalesce = fec_enet_set_coalesce, 3229 #ifndef CONFIG_M5272 3230 .get_pauseparam = fec_enet_get_pauseparam, 3231 .set_pauseparam = fec_enet_set_pauseparam, 3232 .get_strings = fec_enet_get_strings, 3233 .get_ethtool_stats = fec_enet_get_ethtool_stats, 3234 .get_sset_count = fec_enet_get_sset_count, 3235 #endif 3236 .get_ts_info = fec_enet_get_ts_info, 3237 .get_tunable = fec_enet_get_tunable, 3238 .set_tunable = fec_enet_set_tunable, 3239 .get_wol = fec_enet_get_wol, 3240 .set_wol = fec_enet_set_wol, 3241 .get_eee = fec_enet_get_eee, 3242 .set_eee = fec_enet_set_eee, 3243 .get_link_ksettings = phy_ethtool_get_link_ksettings, 3244 .set_link_ksettings = phy_ethtool_set_link_ksettings, 3245 .self_test = net_selftest, 3246 }; 3247 3248 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 3249 { 3250 struct fec_enet_private *fep = netdev_priv(ndev); 3251 struct phy_device *phydev = ndev->phydev; 3252 3253 if (!netif_running(ndev)) 3254 return -EINVAL; 3255 3256 if (!phydev) 3257 return -ENODEV; 3258 3259 if (fep->bufdesc_ex) { 3260 bool use_fec_hwts = !phy_has_hwtstamp(phydev); 3261 3262 if (cmd == SIOCSHWTSTAMP) { 3263 if (use_fec_hwts) 3264 return fec_ptp_set(ndev, rq); 3265 fec_ptp_disable_hwts(ndev); 3266 } else if (cmd == SIOCGHWTSTAMP) { 3267 if (use_fec_hwts) 3268 return fec_ptp_get(ndev, rq); 3269 } 3270 } 3271 3272 return phy_mii_ioctl(phydev, rq, cmd); 3273 } 3274 3275 static void fec_enet_free_buffers(struct net_device *ndev) 3276 { 3277 struct fec_enet_private *fep = netdev_priv(ndev); 3278 unsigned int i; 3279 struct sk_buff *skb; 3280 struct fec_enet_priv_tx_q *txq; 3281 struct fec_enet_priv_rx_q *rxq; 3282 unsigned int q; 3283 3284 for (q = 0; q < fep->num_rx_queues; q++) { 3285 rxq = fep->rx_queue[q]; 3286 for (i = 0; i < rxq->bd.ring_size; i++) 3287 page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false); 3288 3289 for (i = 0; i < XDP_STATS_TOTAL; i++) 3290 rxq->stats[i] = 0; 3291 3292 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq)) 3293 xdp_rxq_info_unreg(&rxq->xdp_rxq); 3294 page_pool_destroy(rxq->page_pool); 3295 rxq->page_pool = NULL; 3296 } 3297 3298 for (q = 0; q < fep->num_tx_queues; q++) { 3299 txq = fep->tx_queue[q]; 3300 for (i = 0; i < txq->bd.ring_size; i++) { 3301 kfree(txq->tx_bounce[i]); 3302 txq->tx_bounce[i] = NULL; 3303 3304 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { 3305 skb = txq->tx_buf[i].skb; 3306 txq->tx_buf[i].skb = NULL; 3307 dev_kfree_skb(skb); 3308 } else { 3309 if (txq->tx_buf[i].xdp) { 3310 xdp_return_frame(txq->tx_buf[i].xdp); 3311 txq->tx_buf[i].xdp = NULL; 3312 } 3313 3314 txq->tx_buf[i].type = FEC_TXBUF_T_SKB; 3315 } 3316 } 3317 } 3318 } 3319 3320 static void fec_enet_free_queue(struct net_device *ndev) 3321 { 3322 struct fec_enet_private *fep = netdev_priv(ndev); 3323 int i; 3324 struct fec_enet_priv_tx_q *txq; 3325 3326 for (i = 0; i < fep->num_tx_queues; i++) 3327 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) { 3328 txq = fep->tx_queue[i]; 3329 dma_free_coherent(&fep->pdev->dev, 3330 txq->bd.ring_size * TSO_HEADER_SIZE, 3331 txq->tso_hdrs, 3332 txq->tso_hdrs_dma); 3333 } 3334 3335 for (i = 0; i < fep->num_rx_queues; i++) 3336 kfree(fep->rx_queue[i]); 3337 for (i = 0; i < fep->num_tx_queues; i++) 3338 kfree(fep->tx_queue[i]); 3339 } 3340 3341 static int fec_enet_alloc_queue(struct net_device *ndev) 3342 { 3343 struct fec_enet_private *fep = netdev_priv(ndev); 3344 int i; 3345 int ret = 0; 3346 struct fec_enet_priv_tx_q *txq; 3347 3348 for (i = 0; i < fep->num_tx_queues; i++) { 3349 txq = kzalloc(sizeof(*txq), GFP_KERNEL); 3350 if (!txq) { 3351 ret = -ENOMEM; 3352 goto alloc_failed; 3353 } 3354 3355 fep->tx_queue[i] = txq; 3356 txq->bd.ring_size = TX_RING_SIZE; 3357 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; 3358 3359 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; 3360 txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; 3361 3362 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, 3363 txq->bd.ring_size * TSO_HEADER_SIZE, 3364 &txq->tso_hdrs_dma, 3365 GFP_KERNEL); 3366 if (!txq->tso_hdrs) { 3367 ret = -ENOMEM; 3368 goto alloc_failed; 3369 } 3370 } 3371 3372 for (i = 0; i < fep->num_rx_queues; i++) { 3373 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]), 3374 GFP_KERNEL); 3375 if (!fep->rx_queue[i]) { 3376 ret = -ENOMEM; 3377 goto alloc_failed; 3378 } 3379 3380 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE; 3381 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size; 3382 } 3383 return ret; 3384 3385 alloc_failed: 3386 fec_enet_free_queue(ndev); 3387 return ret; 3388 } 3389 3390 static int 3391 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue) 3392 { 3393 struct fec_enet_private *fep = netdev_priv(ndev); 3394 struct fec_enet_priv_rx_q *rxq; 3395 dma_addr_t phys_addr; 3396 struct bufdesc *bdp; 3397 struct page *page; 3398 int i, err; 3399 3400 rxq = fep->rx_queue[queue]; 3401 bdp = rxq->bd.base; 3402 3403 err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size); 3404 if (err < 0) { 3405 netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err); 3406 return err; 3407 } 3408 3409 for (i = 0; i < rxq->bd.ring_size; i++) { 3410 page = page_pool_dev_alloc_pages(rxq->page_pool); 3411 if (!page) 3412 goto err_alloc; 3413 3414 phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM; 3415 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr); 3416 3417 rxq->rx_skb_info[i].page = page; 3418 rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM; 3419 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY); 3420 3421 if (fep->bufdesc_ex) { 3422 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3423 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT); 3424 } 3425 3426 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd); 3427 } 3428 3429 /* Set the last buffer to wrap. */ 3430 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd); 3431 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 3432 return 0; 3433 3434 err_alloc: 3435 fec_enet_free_buffers(ndev); 3436 return -ENOMEM; 3437 } 3438 3439 static int 3440 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue) 3441 { 3442 struct fec_enet_private *fep = netdev_priv(ndev); 3443 unsigned int i; 3444 struct bufdesc *bdp; 3445 struct fec_enet_priv_tx_q *txq; 3446 3447 txq = fep->tx_queue[queue]; 3448 bdp = txq->bd.base; 3449 for (i = 0; i < txq->bd.ring_size; i++) { 3450 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); 3451 if (!txq->tx_bounce[i]) 3452 goto err_alloc; 3453 3454 bdp->cbd_sc = cpu_to_fec16(0); 3455 bdp->cbd_bufaddr = cpu_to_fec32(0); 3456 3457 if (fep->bufdesc_ex) { 3458 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3459 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT); 3460 } 3461 3462 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 3463 } 3464 3465 /* Set the last buffer to wrap. */ 3466 bdp = fec_enet_get_prevdesc(bdp, &txq->bd); 3467 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP); 3468 3469 return 0; 3470 3471 err_alloc: 3472 fec_enet_free_buffers(ndev); 3473 return -ENOMEM; 3474 } 3475 3476 static int fec_enet_alloc_buffers(struct net_device *ndev) 3477 { 3478 struct fec_enet_private *fep = netdev_priv(ndev); 3479 unsigned int i; 3480 3481 for (i = 0; i < fep->num_rx_queues; i++) 3482 if (fec_enet_alloc_rxq_buffers(ndev, i)) 3483 return -ENOMEM; 3484 3485 for (i = 0; i < fep->num_tx_queues; i++) 3486 if (fec_enet_alloc_txq_buffers(ndev, i)) 3487 return -ENOMEM; 3488 return 0; 3489 } 3490 3491 static int 3492 fec_enet_open(struct net_device *ndev) 3493 { 3494 struct fec_enet_private *fep = netdev_priv(ndev); 3495 int ret; 3496 bool reset_again; 3497 3498 ret = pm_runtime_resume_and_get(&fep->pdev->dev); 3499 if (ret < 0) 3500 return ret; 3501 3502 pinctrl_pm_select_default_state(&fep->pdev->dev); 3503 ret = fec_enet_clk_enable(ndev, true); 3504 if (ret) 3505 goto clk_enable; 3506 3507 /* During the first fec_enet_open call the PHY isn't probed at this 3508 * point. Therefore the phy_reset_after_clk_enable() call within 3509 * fec_enet_clk_enable() fails. As we need this reset in order to be 3510 * sure the PHY is working correctly we check if we need to reset again 3511 * later when the PHY is probed 3512 */ 3513 if (ndev->phydev && ndev->phydev->drv) 3514 reset_again = false; 3515 else 3516 reset_again = true; 3517 3518 /* I should reset the ring buffers here, but I don't yet know 3519 * a simple way to do that. 3520 */ 3521 3522 ret = fec_enet_alloc_buffers(ndev); 3523 if (ret) 3524 goto err_enet_alloc; 3525 3526 /* Init MAC prior to mii bus probe */ 3527 fec_restart(ndev); 3528 3529 /* Call phy_reset_after_clk_enable() again if it failed during 3530 * phy_reset_after_clk_enable() before because the PHY wasn't probed. 3531 */ 3532 if (reset_again) 3533 fec_enet_phy_reset_after_clk_enable(ndev); 3534 3535 /* Probe and connect to PHY when open the interface */ 3536 ret = fec_enet_mii_probe(ndev); 3537 if (ret) 3538 goto err_enet_mii_probe; 3539 3540 if (fep->quirks & FEC_QUIRK_ERR006687) 3541 imx6q_cpuidle_fec_irqs_used(); 3542 3543 if (fep->quirks & FEC_QUIRK_HAS_PMQOS) 3544 cpu_latency_qos_add_request(&fep->pm_qos_req, 0); 3545 3546 napi_enable(&fep->napi); 3547 phy_start(ndev->phydev); 3548 netif_tx_start_all_queues(ndev); 3549 3550 device_set_wakeup_enable(&ndev->dev, fep->wol_flag & 3551 FEC_WOL_FLAG_ENABLE); 3552 3553 return 0; 3554 3555 err_enet_mii_probe: 3556 fec_enet_free_buffers(ndev); 3557 err_enet_alloc: 3558 fec_enet_clk_enable(ndev, false); 3559 clk_enable: 3560 pm_runtime_mark_last_busy(&fep->pdev->dev); 3561 pm_runtime_put_autosuspend(&fep->pdev->dev); 3562 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3563 return ret; 3564 } 3565 3566 static int 3567 fec_enet_close(struct net_device *ndev) 3568 { 3569 struct fec_enet_private *fep = netdev_priv(ndev); 3570 3571 phy_stop(ndev->phydev); 3572 3573 if (netif_device_present(ndev)) { 3574 napi_disable(&fep->napi); 3575 netif_tx_disable(ndev); 3576 fec_stop(ndev); 3577 } 3578 3579 phy_disconnect(ndev->phydev); 3580 3581 if (fep->quirks & FEC_QUIRK_ERR006687) 3582 imx6q_cpuidle_fec_irqs_unused(); 3583 3584 fec_enet_update_ethtool_stats(ndev); 3585 3586 fec_enet_clk_enable(ndev, false); 3587 if (fep->quirks & FEC_QUIRK_HAS_PMQOS) 3588 cpu_latency_qos_remove_request(&fep->pm_qos_req); 3589 3590 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 3591 pm_runtime_mark_last_busy(&fep->pdev->dev); 3592 pm_runtime_put_autosuspend(&fep->pdev->dev); 3593 3594 fec_enet_free_buffers(ndev); 3595 3596 return 0; 3597 } 3598 3599 /* Set or clear the multicast filter for this adaptor. 3600 * Skeleton taken from sunlance driver. 3601 * The CPM Ethernet implementation allows Multicast as well as individual 3602 * MAC address filtering. Some of the drivers check to make sure it is 3603 * a group multicast address, and discard those that are not. I guess I 3604 * will do the same for now, but just remove the test if you want 3605 * individual filtering as well (do the upper net layers want or support 3606 * this kind of feature?). 3607 */ 3608 3609 #define FEC_HASH_BITS 6 /* #bits in hash */ 3610 3611 static void set_multicast_list(struct net_device *ndev) 3612 { 3613 struct fec_enet_private *fep = netdev_priv(ndev); 3614 struct netdev_hw_addr *ha; 3615 unsigned int crc, tmp; 3616 unsigned char hash; 3617 unsigned int hash_high = 0, hash_low = 0; 3618 3619 if (ndev->flags & IFF_PROMISC) { 3620 tmp = readl(fep->hwp + FEC_R_CNTRL); 3621 tmp |= 0x8; 3622 writel(tmp, fep->hwp + FEC_R_CNTRL); 3623 return; 3624 } 3625 3626 tmp = readl(fep->hwp + FEC_R_CNTRL); 3627 tmp &= ~0x8; 3628 writel(tmp, fep->hwp + FEC_R_CNTRL); 3629 3630 if (ndev->flags & IFF_ALLMULTI) { 3631 /* Catch all multicast addresses, so set the 3632 * filter to all 1's 3633 */ 3634 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 3635 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3636 3637 return; 3638 } 3639 3640 /* Add the addresses in hash register */ 3641 netdev_for_each_mc_addr(ha, ndev) { 3642 /* calculate crc32 value of mac address */ 3643 crc = ether_crc_le(ndev->addr_len, ha->addr); 3644 3645 /* only upper 6 bits (FEC_HASH_BITS) are used 3646 * which point to specific bit in the hash registers 3647 */ 3648 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f; 3649 3650 if (hash > 31) 3651 hash_high |= 1 << (hash - 32); 3652 else 3653 hash_low |= 1 << hash; 3654 } 3655 3656 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 3657 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 3658 } 3659 3660 /* Set a MAC change in hardware. */ 3661 static int 3662 fec_set_mac_address(struct net_device *ndev, void *p) 3663 { 3664 struct fec_enet_private *fep = netdev_priv(ndev); 3665 struct sockaddr *addr = p; 3666 3667 if (addr) { 3668 if (!is_valid_ether_addr(addr->sa_data)) 3669 return -EADDRNOTAVAIL; 3670 eth_hw_addr_set(ndev, addr->sa_data); 3671 } 3672 3673 /* Add netif status check here to avoid system hang in below case: 3674 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx; 3675 * After ethx down, fec all clocks are gated off and then register 3676 * access causes system hang. 3677 */ 3678 if (!netif_running(ndev)) 3679 return 0; 3680 3681 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) | 3682 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24), 3683 fep->hwp + FEC_ADDR_LOW); 3684 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24), 3685 fep->hwp + FEC_ADDR_HIGH); 3686 return 0; 3687 } 3688 3689 #ifdef CONFIG_NET_POLL_CONTROLLER 3690 /** 3691 * fec_poll_controller - FEC Poll controller function 3692 * @dev: The FEC network adapter 3693 * 3694 * Polled functionality used by netconsole and others in non interrupt mode 3695 * 3696 */ 3697 static void fec_poll_controller(struct net_device *dev) 3698 { 3699 int i; 3700 struct fec_enet_private *fep = netdev_priv(dev); 3701 3702 for (i = 0; i < FEC_IRQ_NUM; i++) { 3703 if (fep->irq[i] > 0) { 3704 disable_irq(fep->irq[i]); 3705 fec_enet_interrupt(fep->irq[i], dev); 3706 enable_irq(fep->irq[i]); 3707 } 3708 } 3709 } 3710 #endif 3711 3712 static inline void fec_enet_set_netdev_features(struct net_device *netdev, 3713 netdev_features_t features) 3714 { 3715 struct fec_enet_private *fep = netdev_priv(netdev); 3716 netdev_features_t changed = features ^ netdev->features; 3717 3718 netdev->features = features; 3719 3720 /* Receive checksum has been changed */ 3721 if (changed & NETIF_F_RXCSUM) { 3722 if (features & NETIF_F_RXCSUM) 3723 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 3724 else 3725 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; 3726 } 3727 } 3728 3729 static int fec_set_features(struct net_device *netdev, 3730 netdev_features_t features) 3731 { 3732 struct fec_enet_private *fep = netdev_priv(netdev); 3733 netdev_features_t changed = features ^ netdev->features; 3734 3735 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) { 3736 napi_disable(&fep->napi); 3737 netif_tx_lock_bh(netdev); 3738 fec_stop(netdev); 3739 fec_enet_set_netdev_features(netdev, features); 3740 fec_restart(netdev); 3741 netif_tx_wake_all_queues(netdev); 3742 netif_tx_unlock_bh(netdev); 3743 napi_enable(&fep->napi); 3744 } else { 3745 fec_enet_set_netdev_features(netdev, features); 3746 } 3747 3748 return 0; 3749 } 3750 3751 static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb) 3752 { 3753 struct vlan_ethhdr *vhdr; 3754 unsigned short vlan_TCI = 0; 3755 3756 if (skb->protocol == htons(ETH_P_ALL)) { 3757 vhdr = (struct vlan_ethhdr *)(skb->data); 3758 vlan_TCI = ntohs(vhdr->h_vlan_TCI); 3759 } 3760 3761 return vlan_TCI; 3762 } 3763 3764 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb, 3765 struct net_device *sb_dev) 3766 { 3767 struct fec_enet_private *fep = netdev_priv(ndev); 3768 u16 vlan_tag; 3769 3770 if (!(fep->quirks & FEC_QUIRK_HAS_AVB)) 3771 return netdev_pick_tx(ndev, skb, NULL); 3772 3773 vlan_tag = fec_enet_get_raw_vlan_tci(skb); 3774 if (!vlan_tag) 3775 return vlan_tag; 3776 3777 return fec_enet_vlan_pri_to_queue[vlan_tag >> 13]; 3778 } 3779 3780 static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) 3781 { 3782 struct fec_enet_private *fep = netdev_priv(dev); 3783 bool is_run = netif_running(dev); 3784 struct bpf_prog *old_prog; 3785 3786 switch (bpf->command) { 3787 case XDP_SETUP_PROG: 3788 /* No need to support the SoCs that require to 3789 * do the frame swap because the performance wouldn't be 3790 * better than the skb mode. 3791 */ 3792 if (fep->quirks & FEC_QUIRK_SWAP_FRAME) 3793 return -EOPNOTSUPP; 3794 3795 if (!bpf->prog) 3796 xdp_features_clear_redirect_target(dev); 3797 3798 if (is_run) { 3799 napi_disable(&fep->napi); 3800 netif_tx_disable(dev); 3801 } 3802 3803 old_prog = xchg(&fep->xdp_prog, bpf->prog); 3804 if (old_prog) 3805 bpf_prog_put(old_prog); 3806 3807 fec_restart(dev); 3808 3809 if (is_run) { 3810 napi_enable(&fep->napi); 3811 netif_tx_start_all_queues(dev); 3812 } 3813 3814 if (bpf->prog) 3815 xdp_features_set_redirect_target(dev, false); 3816 3817 return 0; 3818 3819 case XDP_SETUP_XSK_POOL: 3820 return -EOPNOTSUPP; 3821 3822 default: 3823 return -EOPNOTSUPP; 3824 } 3825 } 3826 3827 static int 3828 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index) 3829 { 3830 if (unlikely(index < 0)) 3831 return 0; 3832 3833 return (index % fep->num_tx_queues); 3834 } 3835 3836 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, 3837 struct fec_enet_priv_tx_q *txq, 3838 struct xdp_frame *frame) 3839 { 3840 unsigned int index, status, estatus; 3841 struct bufdesc *bdp; 3842 dma_addr_t dma_addr; 3843 int entries_free; 3844 3845 entries_free = fec_enet_get_free_txdesc_num(txq); 3846 if (entries_free < MAX_SKB_FRAGS + 1) { 3847 netdev_err_once(fep->netdev, "NOT enough BD for SG!\n"); 3848 return -EBUSY; 3849 } 3850 3851 /* Fill in a Tx ring entry */ 3852 bdp = txq->bd.cur; 3853 status = fec16_to_cpu(bdp->cbd_sc); 3854 status &= ~BD_ENET_TX_STATS; 3855 3856 index = fec_enet_get_bd_index(bdp, &txq->bd); 3857 3858 dma_addr = dma_map_single(&fep->pdev->dev, frame->data, 3859 frame->len, DMA_TO_DEVICE); 3860 if (dma_mapping_error(&fep->pdev->dev, dma_addr)) 3861 return -ENOMEM; 3862 3863 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); 3864 if (fep->bufdesc_ex) 3865 estatus = BD_ENET_TX_INT; 3866 3867 bdp->cbd_bufaddr = cpu_to_fec32(dma_addr); 3868 bdp->cbd_datlen = cpu_to_fec16(frame->len); 3869 3870 if (fep->bufdesc_ex) { 3871 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; 3872 3873 if (fep->quirks & FEC_QUIRK_HAS_AVB) 3874 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid); 3875 3876 ebdp->cbd_bdu = 0; 3877 ebdp->cbd_esc = cpu_to_fec32(estatus); 3878 } 3879 3880 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; 3881 txq->tx_buf[index].xdp = frame; 3882 3883 /* Make sure the updates to rest of the descriptor are performed before 3884 * transferring ownership. 3885 */ 3886 dma_wmb(); 3887 3888 /* Send it on its way. Tell FEC it's ready, interrupt when done, 3889 * it's the last BD of the frame, and to put the CRC on the end. 3890 */ 3891 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC); 3892 bdp->cbd_sc = cpu_to_fec16(status); 3893 3894 /* If this was the last BD in the ring, start at the beginning again. */ 3895 bdp = fec_enet_get_nextdesc(bdp, &txq->bd); 3896 3897 /* Make sure the update to bdp are performed before txq->bd.cur. */ 3898 dma_wmb(); 3899 3900 txq->bd.cur = bdp; 3901 3902 /* Trigger transmission start */ 3903 writel(0, txq->bd.reg_desc_active); 3904 3905 return 0; 3906 } 3907 3908 static int fec_enet_xdp_xmit(struct net_device *dev, 3909 int num_frames, 3910 struct xdp_frame **frames, 3911 u32 flags) 3912 { 3913 struct fec_enet_private *fep = netdev_priv(dev); 3914 struct fec_enet_priv_tx_q *txq; 3915 int cpu = smp_processor_id(); 3916 unsigned int sent_frames = 0; 3917 struct netdev_queue *nq; 3918 unsigned int queue; 3919 int i; 3920 3921 queue = fec_enet_xdp_get_tx_queue(fep, cpu); 3922 txq = fep->tx_queue[queue]; 3923 nq = netdev_get_tx_queue(fep->netdev, queue); 3924 3925 __netif_tx_lock(nq, cpu); 3926 3927 /* Avoid tx timeout as XDP shares the queue with kernel stack */ 3928 txq_trans_cond_update(nq); 3929 for (i = 0; i < num_frames; i++) { 3930 if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0) 3931 break; 3932 sent_frames++; 3933 } 3934 3935 __netif_tx_unlock(nq); 3936 3937 return sent_frames; 3938 } 3939 3940 static const struct net_device_ops fec_netdev_ops = { 3941 .ndo_open = fec_enet_open, 3942 .ndo_stop = fec_enet_close, 3943 .ndo_start_xmit = fec_enet_start_xmit, 3944 .ndo_select_queue = fec_enet_select_queue, 3945 .ndo_set_rx_mode = set_multicast_list, 3946 .ndo_validate_addr = eth_validate_addr, 3947 .ndo_tx_timeout = fec_timeout, 3948 .ndo_set_mac_address = fec_set_mac_address, 3949 .ndo_eth_ioctl = fec_enet_ioctl, 3950 #ifdef CONFIG_NET_POLL_CONTROLLER 3951 .ndo_poll_controller = fec_poll_controller, 3952 #endif 3953 .ndo_set_features = fec_set_features, 3954 .ndo_bpf = fec_enet_bpf, 3955 .ndo_xdp_xmit = fec_enet_xdp_xmit, 3956 }; 3957 3958 static const unsigned short offset_des_active_rxq[] = { 3959 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2 3960 }; 3961 3962 static const unsigned short offset_des_active_txq[] = { 3963 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2 3964 }; 3965 3966 /* 3967 * XXX: We need to clean up on failure exits here. 3968 * 3969 */ 3970 static int fec_enet_init(struct net_device *ndev) 3971 { 3972 struct fec_enet_private *fep = netdev_priv(ndev); 3973 struct bufdesc *cbd_base; 3974 dma_addr_t bd_dma; 3975 int bd_size; 3976 unsigned int i; 3977 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) : 3978 sizeof(struct bufdesc); 3979 unsigned dsize_log2 = __fls(dsize); 3980 int ret; 3981 3982 WARN_ON(dsize != (1 << dsize_log2)); 3983 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64) 3984 fep->rx_align = 0xf; 3985 fep->tx_align = 0xf; 3986 #else 3987 fep->rx_align = 0x3; 3988 fep->tx_align = 0x3; 3989 #endif 3990 fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT; 3991 fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT; 3992 fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT; 3993 fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT; 3994 3995 /* Check mask of the streaming and coherent API */ 3996 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32)); 3997 if (ret < 0) { 3998 dev_warn(&fep->pdev->dev, "No suitable DMA available\n"); 3999 return ret; 4000 } 4001 4002 ret = fec_enet_alloc_queue(ndev); 4003 if (ret) 4004 return ret; 4005 4006 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize; 4007 4008 /* Allocate memory for buffer descriptors. */ 4009 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma, 4010 GFP_KERNEL); 4011 if (!cbd_base) { 4012 ret = -ENOMEM; 4013 goto free_queue_mem; 4014 } 4015 4016 /* Get the Ethernet address */ 4017 ret = fec_get_mac(ndev); 4018 if (ret) 4019 goto free_queue_mem; 4020 4021 /* make sure MAC we just acquired is programmed into the hw */ 4022 fec_set_mac_address(ndev, NULL); 4023 4024 /* Set receive and transmit descriptor base. */ 4025 for (i = 0; i < fep->num_rx_queues; i++) { 4026 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i]; 4027 unsigned size = dsize * rxq->bd.ring_size; 4028 4029 rxq->bd.qid = i; 4030 rxq->bd.base = cbd_base; 4031 rxq->bd.cur = cbd_base; 4032 rxq->bd.dma = bd_dma; 4033 rxq->bd.dsize = dsize; 4034 rxq->bd.dsize_log2 = dsize_log2; 4035 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i]; 4036 bd_dma += size; 4037 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 4038 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 4039 } 4040 4041 for (i = 0; i < fep->num_tx_queues; i++) { 4042 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i]; 4043 unsigned size = dsize * txq->bd.ring_size; 4044 4045 txq->bd.qid = i; 4046 txq->bd.base = cbd_base; 4047 txq->bd.cur = cbd_base; 4048 txq->bd.dma = bd_dma; 4049 txq->bd.dsize = dsize; 4050 txq->bd.dsize_log2 = dsize_log2; 4051 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i]; 4052 bd_dma += size; 4053 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size); 4054 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize); 4055 } 4056 4057 4058 /* The FEC Ethernet specific entries in the device structure */ 4059 ndev->watchdog_timeo = TX_TIMEOUT; 4060 ndev->netdev_ops = &fec_netdev_ops; 4061 ndev->ethtool_ops = &fec_enet_ethtool_ops; 4062 4063 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 4064 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi); 4065 4066 if (fep->quirks & FEC_QUIRK_HAS_VLAN) 4067 /* enable hw VLAN support */ 4068 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; 4069 4070 if (fep->quirks & FEC_QUIRK_HAS_CSUM) { 4071 netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS); 4072 4073 /* enable hw accelerator */ 4074 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 4075 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO); 4076 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 4077 } 4078 4079 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) { 4080 fep->tx_align = 0; 4081 fep->rx_align = 0x3f; 4082 } 4083 4084 ndev->hw_features = ndev->features; 4085 4086 if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) 4087 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | 4088 NETDEV_XDP_ACT_REDIRECT; 4089 4090 fec_restart(ndev); 4091 4092 if (fep->quirks & FEC_QUIRK_MIB_CLEAR) 4093 fec_enet_clear_ethtool_stats(ndev); 4094 else 4095 fec_enet_update_ethtool_stats(ndev); 4096 4097 return 0; 4098 4099 free_queue_mem: 4100 fec_enet_free_queue(ndev); 4101 return ret; 4102 } 4103 4104 #ifdef CONFIG_OF 4105 static int fec_reset_phy(struct platform_device *pdev) 4106 { 4107 struct gpio_desc *phy_reset; 4108 int msec = 1, phy_post_delay = 0; 4109 struct device_node *np = pdev->dev.of_node; 4110 int err; 4111 4112 if (!np) 4113 return 0; 4114 4115 err = of_property_read_u32(np, "phy-reset-duration", &msec); 4116 /* A sane reset duration should not be longer than 1s */ 4117 if (!err && msec > 1000) 4118 msec = 1; 4119 4120 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay); 4121 /* valid reset duration should be less than 1s */ 4122 if (!err && phy_post_delay > 1000) 4123 return -EINVAL; 4124 4125 phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset", 4126 GPIOD_OUT_HIGH); 4127 if (IS_ERR(phy_reset)) 4128 return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset), 4129 "failed to get phy-reset-gpios\n"); 4130 4131 if (!phy_reset) 4132 return 0; 4133 4134 if (msec > 20) 4135 msleep(msec); 4136 else 4137 usleep_range(msec * 1000, msec * 1000 + 1000); 4138 4139 gpiod_set_value_cansleep(phy_reset, 0); 4140 4141 if (!phy_post_delay) 4142 return 0; 4143 4144 if (phy_post_delay > 20) 4145 msleep(phy_post_delay); 4146 else 4147 usleep_range(phy_post_delay * 1000, 4148 phy_post_delay * 1000 + 1000); 4149 4150 return 0; 4151 } 4152 #else /* CONFIG_OF */ 4153 static int fec_reset_phy(struct platform_device *pdev) 4154 { 4155 /* 4156 * In case of platform probe, the reset has been done 4157 * by machine code. 4158 */ 4159 return 0; 4160 } 4161 #endif /* CONFIG_OF */ 4162 4163 static void 4164 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx) 4165 { 4166 struct device_node *np = pdev->dev.of_node; 4167 4168 *num_tx = *num_rx = 1; 4169 4170 if (!np || !of_device_is_available(np)) 4171 return; 4172 4173 /* parse the num of tx and rx queues */ 4174 of_property_read_u32(np, "fsl,num-tx-queues", num_tx); 4175 4176 of_property_read_u32(np, "fsl,num-rx-queues", num_rx); 4177 4178 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) { 4179 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n", 4180 *num_tx); 4181 *num_tx = 1; 4182 return; 4183 } 4184 4185 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) { 4186 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n", 4187 *num_rx); 4188 *num_rx = 1; 4189 return; 4190 } 4191 4192 } 4193 4194 static int fec_enet_get_irq_cnt(struct platform_device *pdev) 4195 { 4196 int irq_cnt = platform_irq_count(pdev); 4197 4198 if (irq_cnt > FEC_IRQ_NUM) 4199 irq_cnt = FEC_IRQ_NUM; /* last for pps */ 4200 else if (irq_cnt == 2) 4201 irq_cnt = 1; /* last for pps */ 4202 else if (irq_cnt <= 0) 4203 irq_cnt = 1; /* At least 1 irq is needed */ 4204 return irq_cnt; 4205 } 4206 4207 static void fec_enet_get_wakeup_irq(struct platform_device *pdev) 4208 { 4209 struct net_device *ndev = platform_get_drvdata(pdev); 4210 struct fec_enet_private *fep = netdev_priv(ndev); 4211 4212 if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2) 4213 fep->wake_irq = fep->irq[2]; 4214 else 4215 fep->wake_irq = fep->irq[0]; 4216 } 4217 4218 static int fec_enet_init_stop_mode(struct fec_enet_private *fep, 4219 struct device_node *np) 4220 { 4221 struct device_node *gpr_np; 4222 u32 out_val[3]; 4223 int ret = 0; 4224 4225 gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0); 4226 if (!gpr_np) 4227 return 0; 4228 4229 ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, 4230 ARRAY_SIZE(out_val)); 4231 if (ret) { 4232 dev_dbg(&fep->pdev->dev, "no stop mode property\n"); 4233 goto out; 4234 } 4235 4236 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np); 4237 if (IS_ERR(fep->stop_gpr.gpr)) { 4238 dev_err(&fep->pdev->dev, "could not find gpr regmap\n"); 4239 ret = PTR_ERR(fep->stop_gpr.gpr); 4240 fep->stop_gpr.gpr = NULL; 4241 goto out; 4242 } 4243 4244 fep->stop_gpr.reg = out_val[1]; 4245 fep->stop_gpr.bit = out_val[2]; 4246 4247 out: 4248 of_node_put(gpr_np); 4249 4250 return ret; 4251 } 4252 4253 static int 4254 fec_probe(struct platform_device *pdev) 4255 { 4256 struct fec_enet_private *fep; 4257 struct fec_platform_data *pdata; 4258 phy_interface_t interface; 4259 struct net_device *ndev; 4260 int i, irq, ret = 0; 4261 const struct of_device_id *of_id; 4262 static int dev_id; 4263 struct device_node *np = pdev->dev.of_node, *phy_node; 4264 int num_tx_qs; 4265 int num_rx_qs; 4266 char irq_name[8]; 4267 int irq_cnt; 4268 struct fec_devinfo *dev_info; 4269 4270 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs); 4271 4272 /* Init network device */ 4273 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) + 4274 FEC_STATS_SIZE, num_tx_qs, num_rx_qs); 4275 if (!ndev) 4276 return -ENOMEM; 4277 4278 SET_NETDEV_DEV(ndev, &pdev->dev); 4279 4280 /* setup board info structure */ 4281 fep = netdev_priv(ndev); 4282 4283 of_id = of_match_device(fec_dt_ids, &pdev->dev); 4284 if (of_id) 4285 pdev->id_entry = of_id->data; 4286 dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data; 4287 if (dev_info) 4288 fep->quirks = dev_info->quirks; 4289 4290 fep->netdev = ndev; 4291 fep->num_rx_queues = num_rx_qs; 4292 fep->num_tx_queues = num_tx_qs; 4293 4294 #if !defined(CONFIG_M5272) 4295 /* default enable pause frame auto negotiation */ 4296 if (fep->quirks & FEC_QUIRK_HAS_GBIT) 4297 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; 4298 #endif 4299 4300 /* Select default pin state */ 4301 pinctrl_pm_select_default_state(&pdev->dev); 4302 4303 fep->hwp = devm_platform_ioremap_resource(pdev, 0); 4304 if (IS_ERR(fep->hwp)) { 4305 ret = PTR_ERR(fep->hwp); 4306 goto failed_ioremap; 4307 } 4308 4309 fep->pdev = pdev; 4310 fep->dev_id = dev_id++; 4311 4312 platform_set_drvdata(pdev, ndev); 4313 4314 if ((of_machine_is_compatible("fsl,imx6q") || 4315 of_machine_is_compatible("fsl,imx6dl")) && 4316 !of_property_read_bool(np, "fsl,err006687-workaround-present")) 4317 fep->quirks |= FEC_QUIRK_ERR006687; 4318 4319 ret = fec_enet_ipc_handle_init(fep); 4320 if (ret) 4321 goto failed_ipc_init; 4322 4323 if (of_property_read_bool(np, "fsl,magic-packet")) 4324 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; 4325 4326 ret = fec_enet_init_stop_mode(fep, np); 4327 if (ret) 4328 goto failed_stop_mode; 4329 4330 phy_node = of_parse_phandle(np, "phy-handle", 0); 4331 if (!phy_node && of_phy_is_fixed_link(np)) { 4332 ret = of_phy_register_fixed_link(np); 4333 if (ret < 0) { 4334 dev_err(&pdev->dev, 4335 "broken fixed-link specification\n"); 4336 goto failed_phy; 4337 } 4338 phy_node = of_node_get(np); 4339 } 4340 fep->phy_node = phy_node; 4341 4342 ret = of_get_phy_mode(pdev->dev.of_node, &interface); 4343 if (ret) { 4344 pdata = dev_get_platdata(&pdev->dev); 4345 if (pdata) 4346 fep->phy_interface = pdata->phy; 4347 else 4348 fep->phy_interface = PHY_INTERFACE_MODE_MII; 4349 } else { 4350 fep->phy_interface = interface; 4351 } 4352 4353 ret = fec_enet_parse_rgmii_delay(fep, np); 4354 if (ret) 4355 goto failed_rgmii_delay; 4356 4357 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); 4358 if (IS_ERR(fep->clk_ipg)) { 4359 ret = PTR_ERR(fep->clk_ipg); 4360 goto failed_clk; 4361 } 4362 4363 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); 4364 if (IS_ERR(fep->clk_ahb)) { 4365 ret = PTR_ERR(fep->clk_ahb); 4366 goto failed_clk; 4367 } 4368 4369 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb); 4370 4371 /* enet_out is optional, depends on board */ 4372 fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out"); 4373 if (IS_ERR(fep->clk_enet_out)) { 4374 ret = PTR_ERR(fep->clk_enet_out); 4375 goto failed_clk; 4376 } 4377 4378 fep->ptp_clk_on = false; 4379 mutex_init(&fep->ptp_clk_mutex); 4380 4381 /* clk_ref is optional, depends on board */ 4382 fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref"); 4383 if (IS_ERR(fep->clk_ref)) { 4384 ret = PTR_ERR(fep->clk_ref); 4385 goto failed_clk; 4386 } 4387 fep->clk_ref_rate = clk_get_rate(fep->clk_ref); 4388 4389 /* clk_2x_txclk is optional, depends on board */ 4390 if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) { 4391 fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk"); 4392 if (IS_ERR(fep->clk_2x_txclk)) 4393 fep->clk_2x_txclk = NULL; 4394 } 4395 4396 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; 4397 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); 4398 if (IS_ERR(fep->clk_ptp)) { 4399 fep->clk_ptp = NULL; 4400 fep->bufdesc_ex = false; 4401 } 4402 4403 ret = fec_enet_clk_enable(ndev, true); 4404 if (ret) 4405 goto failed_clk; 4406 4407 ret = clk_prepare_enable(fep->clk_ipg); 4408 if (ret) 4409 goto failed_clk_ipg; 4410 ret = clk_prepare_enable(fep->clk_ahb); 4411 if (ret) 4412 goto failed_clk_ahb; 4413 4414 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy"); 4415 if (!IS_ERR(fep->reg_phy)) { 4416 ret = regulator_enable(fep->reg_phy); 4417 if (ret) { 4418 dev_err(&pdev->dev, 4419 "Failed to enable phy regulator: %d\n", ret); 4420 goto failed_regulator; 4421 } 4422 } else { 4423 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) { 4424 ret = -EPROBE_DEFER; 4425 goto failed_regulator; 4426 } 4427 fep->reg_phy = NULL; 4428 } 4429 4430 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); 4431 pm_runtime_use_autosuspend(&pdev->dev); 4432 pm_runtime_get_noresume(&pdev->dev); 4433 pm_runtime_set_active(&pdev->dev); 4434 pm_runtime_enable(&pdev->dev); 4435 4436 ret = fec_reset_phy(pdev); 4437 if (ret) 4438 goto failed_reset; 4439 4440 irq_cnt = fec_enet_get_irq_cnt(pdev); 4441 if (fep->bufdesc_ex) 4442 fec_ptp_init(pdev, irq_cnt); 4443 4444 ret = fec_enet_init(ndev); 4445 if (ret) 4446 goto failed_init; 4447 4448 for (i = 0; i < irq_cnt; i++) { 4449 snprintf(irq_name, sizeof(irq_name), "int%d", i); 4450 irq = platform_get_irq_byname_optional(pdev, irq_name); 4451 if (irq < 0) 4452 irq = platform_get_irq(pdev, i); 4453 if (irq < 0) { 4454 ret = irq; 4455 goto failed_irq; 4456 } 4457 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt, 4458 0, pdev->name, ndev); 4459 if (ret) 4460 goto failed_irq; 4461 4462 fep->irq[i] = irq; 4463 } 4464 4465 /* Decide which interrupt line is wakeup capable */ 4466 fec_enet_get_wakeup_irq(pdev); 4467 4468 ret = fec_enet_mii_init(pdev); 4469 if (ret) 4470 goto failed_mii_init; 4471 4472 /* Carrier starts down, phylib will bring it up */ 4473 netif_carrier_off(ndev); 4474 fec_enet_clk_enable(ndev, false); 4475 pinctrl_pm_select_sleep_state(&pdev->dev); 4476 4477 ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN; 4478 4479 ret = register_netdev(ndev); 4480 if (ret) 4481 goto failed_register; 4482 4483 device_init_wakeup(&ndev->dev, fep->wol_flag & 4484 FEC_WOL_HAS_MAGIC_PACKET); 4485 4486 if (fep->bufdesc_ex && fep->ptp_clock) 4487 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); 4488 4489 fep->rx_copybreak = COPYBREAK_DEFAULT; 4490 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 4491 4492 pm_runtime_mark_last_busy(&pdev->dev); 4493 pm_runtime_put_autosuspend(&pdev->dev); 4494 4495 return 0; 4496 4497 failed_register: 4498 fec_enet_mii_remove(fep); 4499 failed_mii_init: 4500 failed_irq: 4501 failed_init: 4502 fec_ptp_stop(pdev); 4503 failed_reset: 4504 pm_runtime_put_noidle(&pdev->dev); 4505 pm_runtime_disable(&pdev->dev); 4506 if (fep->reg_phy) 4507 regulator_disable(fep->reg_phy); 4508 failed_regulator: 4509 clk_disable_unprepare(fep->clk_ahb); 4510 failed_clk_ahb: 4511 clk_disable_unprepare(fep->clk_ipg); 4512 failed_clk_ipg: 4513 fec_enet_clk_enable(ndev, false); 4514 failed_clk: 4515 failed_rgmii_delay: 4516 if (of_phy_is_fixed_link(np)) 4517 of_phy_deregister_fixed_link(np); 4518 of_node_put(phy_node); 4519 failed_stop_mode: 4520 failed_ipc_init: 4521 failed_phy: 4522 dev_id--; 4523 failed_ioremap: 4524 free_netdev(ndev); 4525 4526 return ret; 4527 } 4528 4529 static int 4530 fec_drv_remove(struct platform_device *pdev) 4531 { 4532 struct net_device *ndev = platform_get_drvdata(pdev); 4533 struct fec_enet_private *fep = netdev_priv(ndev); 4534 struct device_node *np = pdev->dev.of_node; 4535 int ret; 4536 4537 ret = pm_runtime_get_sync(&pdev->dev); 4538 if (ret < 0) 4539 dev_err(&pdev->dev, 4540 "Failed to resume device in remove callback (%pe)\n", 4541 ERR_PTR(ret)); 4542 4543 cancel_work_sync(&fep->tx_timeout_work); 4544 fec_ptp_stop(pdev); 4545 unregister_netdev(ndev); 4546 fec_enet_mii_remove(fep); 4547 if (fep->reg_phy) 4548 regulator_disable(fep->reg_phy); 4549 4550 if (of_phy_is_fixed_link(np)) 4551 of_phy_deregister_fixed_link(np); 4552 of_node_put(fep->phy_node); 4553 4554 /* After pm_runtime_get_sync() failed, the clks are still off, so skip 4555 * disabling them again. 4556 */ 4557 if (ret >= 0) { 4558 clk_disable_unprepare(fep->clk_ahb); 4559 clk_disable_unprepare(fep->clk_ipg); 4560 } 4561 pm_runtime_put_noidle(&pdev->dev); 4562 pm_runtime_disable(&pdev->dev); 4563 4564 free_netdev(ndev); 4565 return 0; 4566 } 4567 4568 static int __maybe_unused fec_suspend(struct device *dev) 4569 { 4570 struct net_device *ndev = dev_get_drvdata(dev); 4571 struct fec_enet_private *fep = netdev_priv(ndev); 4572 int ret; 4573 4574 rtnl_lock(); 4575 if (netif_running(ndev)) { 4576 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) 4577 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON; 4578 phy_stop(ndev->phydev); 4579 napi_disable(&fep->napi); 4580 netif_tx_lock_bh(ndev); 4581 netif_device_detach(ndev); 4582 netif_tx_unlock_bh(ndev); 4583 fec_stop(ndev); 4584 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 4585 fec_irqs_disable(ndev); 4586 pinctrl_pm_select_sleep_state(&fep->pdev->dev); 4587 } else { 4588 fec_irqs_disable_except_wakeup(ndev); 4589 if (fep->wake_irq > 0) { 4590 disable_irq(fep->wake_irq); 4591 enable_irq_wake(fep->wake_irq); 4592 } 4593 fec_enet_stop_mode(fep, true); 4594 } 4595 /* It's safe to disable clocks since interrupts are masked */ 4596 fec_enet_clk_enable(ndev, false); 4597 4598 fep->rpm_active = !pm_runtime_status_suspended(dev); 4599 if (fep->rpm_active) { 4600 ret = pm_runtime_force_suspend(dev); 4601 if (ret < 0) { 4602 rtnl_unlock(); 4603 return ret; 4604 } 4605 } 4606 } 4607 rtnl_unlock(); 4608 4609 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) 4610 regulator_disable(fep->reg_phy); 4611 4612 /* SOC supply clock to phy, when clock is disabled, phy link down 4613 * SOC control phy regulator, when regulator is disabled, phy link down 4614 */ 4615 if (fep->clk_enet_out || fep->reg_phy) 4616 fep->link = 0; 4617 4618 return 0; 4619 } 4620 4621 static int __maybe_unused fec_resume(struct device *dev) 4622 { 4623 struct net_device *ndev = dev_get_drvdata(dev); 4624 struct fec_enet_private *fep = netdev_priv(ndev); 4625 int ret; 4626 int val; 4627 4628 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) { 4629 ret = regulator_enable(fep->reg_phy); 4630 if (ret) 4631 return ret; 4632 } 4633 4634 rtnl_lock(); 4635 if (netif_running(ndev)) { 4636 if (fep->rpm_active) 4637 pm_runtime_force_resume(dev); 4638 4639 ret = fec_enet_clk_enable(ndev, true); 4640 if (ret) { 4641 rtnl_unlock(); 4642 goto failed_clk; 4643 } 4644 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) { 4645 fec_enet_stop_mode(fep, false); 4646 if (fep->wake_irq) { 4647 disable_irq_wake(fep->wake_irq); 4648 enable_irq(fep->wake_irq); 4649 } 4650 4651 val = readl(fep->hwp + FEC_ECNTRL); 4652 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP); 4653 writel(val, fep->hwp + FEC_ECNTRL); 4654 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON; 4655 } else { 4656 pinctrl_pm_select_default_state(&fep->pdev->dev); 4657 } 4658 fec_restart(ndev); 4659 netif_tx_lock_bh(ndev); 4660 netif_device_attach(ndev); 4661 netif_tx_unlock_bh(ndev); 4662 napi_enable(&fep->napi); 4663 phy_init_hw(ndev->phydev); 4664 phy_start(ndev->phydev); 4665 } 4666 rtnl_unlock(); 4667 4668 return 0; 4669 4670 failed_clk: 4671 if (fep->reg_phy) 4672 regulator_disable(fep->reg_phy); 4673 return ret; 4674 } 4675 4676 static int __maybe_unused fec_runtime_suspend(struct device *dev) 4677 { 4678 struct net_device *ndev = dev_get_drvdata(dev); 4679 struct fec_enet_private *fep = netdev_priv(ndev); 4680 4681 clk_disable_unprepare(fep->clk_ahb); 4682 clk_disable_unprepare(fep->clk_ipg); 4683 4684 return 0; 4685 } 4686 4687 static int __maybe_unused fec_runtime_resume(struct device *dev) 4688 { 4689 struct net_device *ndev = dev_get_drvdata(dev); 4690 struct fec_enet_private *fep = netdev_priv(ndev); 4691 int ret; 4692 4693 ret = clk_prepare_enable(fep->clk_ahb); 4694 if (ret) 4695 return ret; 4696 ret = clk_prepare_enable(fep->clk_ipg); 4697 if (ret) 4698 goto failed_clk_ipg; 4699 4700 return 0; 4701 4702 failed_clk_ipg: 4703 clk_disable_unprepare(fep->clk_ahb); 4704 return ret; 4705 } 4706 4707 static const struct dev_pm_ops fec_pm_ops = { 4708 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) 4709 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) 4710 }; 4711 4712 static struct platform_driver fec_driver = { 4713 .driver = { 4714 .name = DRIVER_NAME, 4715 .pm = &fec_pm_ops, 4716 .of_match_table = fec_dt_ids, 4717 .suppress_bind_attrs = true, 4718 }, 4719 .id_table = fec_devtype, 4720 .probe = fec_probe, 4721 .remove = fec_drv_remove, 4722 }; 4723 4724 module_platform_driver(fec_driver); 4725 4726 MODULE_LICENSE("GPL"); 4727