1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
4 * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
5 *
6 * Right now, I am very wasteful with the buffers. I allocate memory
7 * pages and then divide them into 2K frame buffers. This way I know I
8 * have buffers large enough to hold one frame within one buffer descriptor.
9 * Once I get this working, I will use 64 or 128 byte CPM buffers, which
10 * will be much more memory efficient and will easily handle lots of
11 * small packets.
12 *
13 * Much better multiple PHY support by Magnus Damm.
14 * Copyright (c) 2000 Ericsson Radio Systems AB.
15 *
16 * Support for FEC controller of ColdFire processors.
17 * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
18 *
19 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
20 * Copyright (c) 2004-2006 Macq Electronique SA.
21 *
22 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
23 */
24
25 #include <linux/module.h>
26 #include <linux/kernel.h>
27 #include <linux/string.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/ptrace.h>
30 #include <linux/errno.h>
31 #include <linux/ioport.h>
32 #include <linux/slab.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/in.h>
39 #include <linux/ip.h>
40 #include <net/ip.h>
41 #include <net/page_pool/helpers.h>
42 #include <net/selftests.h>
43 #include <net/tso.h>
44 #include <linux/tcp.h>
45 #include <linux/udp.h>
46 #include <linux/icmp.h>
47 #include <linux/spinlock.h>
48 #include <linux/workqueue.h>
49 #include <linux/bitops.h>
50 #include <linux/io.h>
51 #include <linux/irq.h>
52 #include <linux/clk.h>
53 #include <linux/crc32.h>
54 #include <linux/platform_device.h>
55 #include <linux/mdio.h>
56 #include <linux/phy.h>
57 #include <linux/fec.h>
58 #include <linux/of.h>
59 #include <linux/of_device.h>
60 #include <linux/of_mdio.h>
61 #include <linux/of_net.h>
62 #include <linux/regulator/consumer.h>
63 #include <linux/if_vlan.h>
64 #include <linux/pinctrl/consumer.h>
65 #include <linux/gpio/consumer.h>
66 #include <linux/prefetch.h>
67 #include <linux/mfd/syscon.h>
68 #include <linux/regmap.h>
69 #include <soc/imx/cpuidle.h>
70 #include <linux/filter.h>
71 #include <linux/bpf.h>
72 #include <linux/bpf_trace.h>
73
74 #include <asm/cacheflush.h>
75
76 #include "fec.h"
77
78 static void set_multicast_list(struct net_device *ndev);
79 static void fec_enet_itr_coal_set(struct net_device *ndev);
80 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
81 int cpu, struct xdp_buff *xdp,
82 u32 dma_sync_len);
83
84 #define DRIVER_NAME "fec"
85
86 static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
87
88 /* Pause frame feild and FIFO threshold */
89 #define FEC_ENET_FCE (1 << 5)
90 #define FEC_ENET_RSEM_V 0x84
91 #define FEC_ENET_RSFL_V 16
92 #define FEC_ENET_RAEM_V 0x8
93 #define FEC_ENET_RAFL_V 0x8
94 #define FEC_ENET_OPD_V 0xFFF0
95 #define FEC_MDIO_PM_TIMEOUT 100 /* ms */
96
97 #define FEC_ENET_XDP_PASS 0
98 #define FEC_ENET_XDP_CONSUMED BIT(0)
99 #define FEC_ENET_XDP_TX BIT(1)
100 #define FEC_ENET_XDP_REDIR BIT(2)
101
102 struct fec_devinfo {
103 u32 quirks;
104 };
105
106 static const struct fec_devinfo fec_imx25_info = {
107 .quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
108 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_HAS_MDIO_C45,
109 };
110
111 static const struct fec_devinfo fec_imx27_info = {
112 .quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG |
113 FEC_QUIRK_HAS_MDIO_C45,
114 };
115
116 static const struct fec_devinfo fec_imx28_info = {
117 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
118 FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
119 FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII |
120 FEC_QUIRK_NO_HARD_RESET | FEC_QUIRK_HAS_MDIO_C45,
121 };
122
123 static const struct fec_devinfo fec_imx6q_info = {
124 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
125 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
126 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
127 FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII |
128 FEC_QUIRK_HAS_PMQOS | FEC_QUIRK_HAS_MDIO_C45,
129 };
130
131 static const struct fec_devinfo fec_mvf600_info = {
132 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC |
133 FEC_QUIRK_HAS_MDIO_C45,
134 };
135
136 static const struct fec_devinfo fec_imx6x_info = {
137 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
138 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
139 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
140 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
141 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
142 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
143 FEC_QUIRK_HAS_MDIO_C45,
144 };
145
146 static const struct fec_devinfo fec_imx6ul_info = {
147 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
148 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
149 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
150 FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
151 FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII |
152 FEC_QUIRK_HAS_MDIO_C45,
153 };
154
155 static const struct fec_devinfo fec_imx8mq_info = {
156 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
157 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
158 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
159 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
160 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
161 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
162 FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2 |
163 FEC_QUIRK_HAS_MDIO_C45,
164 };
165
166 static const struct fec_devinfo fec_imx8qm_info = {
167 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
168 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
169 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
170 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
171 FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
172 FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES |
173 FEC_QUIRK_DELAYED_CLKS_SUPPORT | FEC_QUIRK_HAS_MDIO_C45,
174 };
175
176 static const struct fec_devinfo fec_s32v234_info = {
177 .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
178 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
179 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
180 FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
181 FEC_QUIRK_HAS_MDIO_C45,
182 };
183
184 static struct platform_device_id fec_devtype[] = {
185 {
186 /* keep it for coldfire */
187 .name = DRIVER_NAME,
188 .driver_data = 0,
189 }, {
190 .name = "imx25-fec",
191 .driver_data = (kernel_ulong_t)&fec_imx25_info,
192 }, {
193 .name = "imx27-fec",
194 .driver_data = (kernel_ulong_t)&fec_imx27_info,
195 }, {
196 .name = "imx28-fec",
197 .driver_data = (kernel_ulong_t)&fec_imx28_info,
198 }, {
199 .name = "imx6q-fec",
200 .driver_data = (kernel_ulong_t)&fec_imx6q_info,
201 }, {
202 .name = "mvf600-fec",
203 .driver_data = (kernel_ulong_t)&fec_mvf600_info,
204 }, {
205 .name = "imx6sx-fec",
206 .driver_data = (kernel_ulong_t)&fec_imx6x_info,
207 }, {
208 .name = "imx6ul-fec",
209 .driver_data = (kernel_ulong_t)&fec_imx6ul_info,
210 }, {
211 .name = "imx8mq-fec",
212 .driver_data = (kernel_ulong_t)&fec_imx8mq_info,
213 }, {
214 .name = "imx8qm-fec",
215 .driver_data = (kernel_ulong_t)&fec_imx8qm_info,
216 }, {
217 .name = "s32v234-fec",
218 .driver_data = (kernel_ulong_t)&fec_s32v234_info,
219 }, {
220 /* sentinel */
221 }
222 };
223 MODULE_DEVICE_TABLE(platform, fec_devtype);
224
225 enum imx_fec_type {
226 IMX25_FEC = 1, /* runs on i.mx25/50/53 */
227 IMX27_FEC, /* runs on i.mx27/35/51 */
228 IMX28_FEC,
229 IMX6Q_FEC,
230 MVF600_FEC,
231 IMX6SX_FEC,
232 IMX6UL_FEC,
233 IMX8MQ_FEC,
234 IMX8QM_FEC,
235 S32V234_FEC,
236 };
237
238 static const struct of_device_id fec_dt_ids[] = {
239 { .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
240 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
241 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
242 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
243 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
244 { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
245 { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
246 { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
247 { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
248 { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], },
249 { /* sentinel */ }
250 };
251 MODULE_DEVICE_TABLE(of, fec_dt_ids);
252
253 static unsigned char macaddr[ETH_ALEN];
254 module_param_array(macaddr, byte, NULL, 0);
255 MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
256
257 #if defined(CONFIG_M5272)
258 /*
259 * Some hardware gets it MAC address out of local flash memory.
260 * if this is non-zero then assume it is the address to get MAC from.
261 */
262 #if defined(CONFIG_NETtel)
263 #define FEC_FLASHMAC 0xf0006006
264 #elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
265 #define FEC_FLASHMAC 0xf0006000
266 #elif defined(CONFIG_CANCam)
267 #define FEC_FLASHMAC 0xf0020000
268 #elif defined (CONFIG_M5272C3)
269 #define FEC_FLASHMAC (0xffe04000 + 4)
270 #elif defined(CONFIG_MOD5272)
271 #define FEC_FLASHMAC 0xffc0406b
272 #else
273 #define FEC_FLASHMAC 0
274 #endif
275 #endif /* CONFIG_M5272 */
276
277 /* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
278 *
279 * 2048 byte skbufs are allocated. However, alignment requirements
280 * varies between FEC variants. Worst case is 64, so round down by 64.
281 */
282 #define PKT_MAXBUF_SIZE (round_down(2048 - 64, 64))
283 #define PKT_MINBUF_SIZE 64
284
285 /* FEC receive acceleration */
286 #define FEC_RACC_IPDIS BIT(1)
287 #define FEC_RACC_PRODIS BIT(2)
288 #define FEC_RACC_SHIFT16 BIT(7)
289 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
290
291 /* MIB Control Register */
292 #define FEC_MIB_CTRLSTAT_DISABLE BIT(31)
293
294 /*
295 * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
296 * size bits. Other FEC hardware does not, so we need to take that into
297 * account when setting it.
298 */
299 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
300 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
301 defined(CONFIG_ARM64)
302 #define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
303 #else
304 #define OPT_FRAME_SIZE 0
305 #endif
306
307 /* FEC MII MMFR bits definition */
308 #define FEC_MMFR_ST (1 << 30)
309 #define FEC_MMFR_ST_C45 (0)
310 #define FEC_MMFR_OP_READ (2 << 28)
311 #define FEC_MMFR_OP_READ_C45 (3 << 28)
312 #define FEC_MMFR_OP_WRITE (1 << 28)
313 #define FEC_MMFR_OP_ADDR_WRITE (0)
314 #define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
315 #define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
316 #define FEC_MMFR_TA (2 << 16)
317 #define FEC_MMFR_DATA(v) (v & 0xffff)
318 /* FEC ECR bits definition */
319 #define FEC_ECR_RESET BIT(0)
320 #define FEC_ECR_ETHEREN BIT(1)
321 #define FEC_ECR_MAGICEN BIT(2)
322 #define FEC_ECR_SLEEP BIT(3)
323 #define FEC_ECR_EN1588 BIT(4)
324 #define FEC_ECR_BYTESWP BIT(8)
325 /* FEC RCR bits definition */
326 #define FEC_RCR_LOOP BIT(0)
327 #define FEC_RCR_HALFDPX BIT(1)
328 #define FEC_RCR_MII BIT(2)
329 #define FEC_RCR_PROMISC BIT(3)
330 #define FEC_RCR_BC_REJ BIT(4)
331 #define FEC_RCR_FLOWCTL BIT(5)
332 #define FEC_RCR_RMII BIT(8)
333 #define FEC_RCR_10BASET BIT(9)
334 /* TX WMARK bits */
335 #define FEC_TXWMRK_STRFWD BIT(8)
336
337 #define FEC_MII_TIMEOUT 30000 /* us */
338
339 /* Transmitter timeout */
340 #define TX_TIMEOUT (2 * HZ)
341
342 #define FEC_PAUSE_FLAG_AUTONEG 0x1
343 #define FEC_PAUSE_FLAG_ENABLE 0x2
344 #define FEC_WOL_HAS_MAGIC_PACKET (0x1 << 0)
345 #define FEC_WOL_FLAG_ENABLE (0x1 << 1)
346 #define FEC_WOL_FLAG_SLEEP_ON (0x1 << 2)
347
348 /* Max number of allowed TCP segments for software TSO */
349 #define FEC_MAX_TSO_SEGS 100
350 #define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
351
352 #define IS_TSO_HEADER(txq, addr) \
353 ((addr >= txq->tso_hdrs_dma) && \
354 (addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
355
356 static int mii_cnt;
357
fec_enet_get_nextdesc(struct bufdesc * bdp,struct bufdesc_prop * bd)358 static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
359 struct bufdesc_prop *bd)
360 {
361 return (bdp >= bd->last) ? bd->base
362 : (struct bufdesc *)(((void *)bdp) + bd->dsize);
363 }
364
fec_enet_get_prevdesc(struct bufdesc * bdp,struct bufdesc_prop * bd)365 static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
366 struct bufdesc_prop *bd)
367 {
368 return (bdp <= bd->base) ? bd->last
369 : (struct bufdesc *)(((void *)bdp) - bd->dsize);
370 }
371
fec_enet_get_bd_index(struct bufdesc * bdp,struct bufdesc_prop * bd)372 static int fec_enet_get_bd_index(struct bufdesc *bdp,
373 struct bufdesc_prop *bd)
374 {
375 return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
376 }
377
fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q * txq)378 static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
379 {
380 int entries;
381
382 entries = (((const char *)txq->dirty_tx -
383 (const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
384
385 return entries >= 0 ? entries : entries + txq->bd.ring_size;
386 }
387
swap_buffer(void * bufaddr,int len)388 static void swap_buffer(void *bufaddr, int len)
389 {
390 int i;
391 unsigned int *buf = bufaddr;
392
393 for (i = 0; i < len; i += 4, buf++)
394 swab32s(buf);
395 }
396
fec_dump(struct net_device * ndev)397 static void fec_dump(struct net_device *ndev)
398 {
399 struct fec_enet_private *fep = netdev_priv(ndev);
400 struct bufdesc *bdp;
401 struct fec_enet_priv_tx_q *txq;
402 int index = 0;
403
404 netdev_info(ndev, "TX ring dump\n");
405 pr_info("Nr SC addr len SKB\n");
406
407 txq = fep->tx_queue[0];
408 bdp = txq->bd.base;
409
410 do {
411 pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
412 index,
413 bdp == txq->bd.cur ? 'S' : ' ',
414 bdp == txq->dirty_tx ? 'H' : ' ',
415 fec16_to_cpu(bdp->cbd_sc),
416 fec32_to_cpu(bdp->cbd_bufaddr),
417 fec16_to_cpu(bdp->cbd_datlen),
418 txq->tx_buf[index].buf_p);
419 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
420 index++;
421 } while (bdp != txq->bd.base);
422 }
423
is_ipv4_pkt(struct sk_buff * skb)424 static inline bool is_ipv4_pkt(struct sk_buff *skb)
425 {
426 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
427 }
428
429 static int
fec_enet_clear_csum(struct sk_buff * skb,struct net_device * ndev)430 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
431 {
432 /* Only run for packets requiring a checksum. */
433 if (skb->ip_summed != CHECKSUM_PARTIAL)
434 return 0;
435
436 if (unlikely(skb_cow_head(skb, 0)))
437 return -1;
438
439 if (is_ipv4_pkt(skb))
440 ip_hdr(skb)->check = 0;
441 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
442
443 return 0;
444 }
445
446 static int
fec_enet_create_page_pool(struct fec_enet_private * fep,struct fec_enet_priv_rx_q * rxq,int size)447 fec_enet_create_page_pool(struct fec_enet_private *fep,
448 struct fec_enet_priv_rx_q *rxq, int size)
449 {
450 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
451 struct page_pool_params pp_params = {
452 .order = 0,
453 .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
454 .pool_size = size,
455 .nid = dev_to_node(&fep->pdev->dev),
456 .dev = &fep->pdev->dev,
457 .dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
458 .offset = FEC_ENET_XDP_HEADROOM,
459 .max_len = FEC_ENET_RX_FRSIZE,
460 };
461 int err;
462
463 rxq->page_pool = page_pool_create(&pp_params);
464 if (IS_ERR(rxq->page_pool)) {
465 err = PTR_ERR(rxq->page_pool);
466 rxq->page_pool = NULL;
467 return err;
468 }
469
470 err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
471 if (err < 0)
472 goto err_free_pp;
473
474 err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
475 rxq->page_pool);
476 if (err)
477 goto err_unregister_rxq;
478
479 return 0;
480
481 err_unregister_rxq:
482 xdp_rxq_info_unreg(&rxq->xdp_rxq);
483 err_free_pp:
484 page_pool_destroy(rxq->page_pool);
485 rxq->page_pool = NULL;
486 return err;
487 }
488
489 static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q * txq,struct sk_buff * skb,struct net_device * ndev)490 fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
491 struct sk_buff *skb,
492 struct net_device *ndev)
493 {
494 struct fec_enet_private *fep = netdev_priv(ndev);
495 struct bufdesc *bdp = txq->bd.cur;
496 struct bufdesc_ex *ebdp;
497 int nr_frags = skb_shinfo(skb)->nr_frags;
498 int frag, frag_len;
499 unsigned short status;
500 unsigned int estatus = 0;
501 skb_frag_t *this_frag;
502 unsigned int index;
503 void *bufaddr;
504 dma_addr_t addr;
505 int i;
506
507 for (frag = 0; frag < nr_frags; frag++) {
508 this_frag = &skb_shinfo(skb)->frags[frag];
509 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
510 ebdp = (struct bufdesc_ex *)bdp;
511
512 status = fec16_to_cpu(bdp->cbd_sc);
513 status &= ~BD_ENET_TX_STATS;
514 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
515 frag_len = skb_frag_size(&skb_shinfo(skb)->frags[frag]);
516
517 /* Handle the last BD specially */
518 if (frag == nr_frags - 1) {
519 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
520 if (fep->bufdesc_ex) {
521 estatus |= BD_ENET_TX_INT;
522 if (unlikely(skb_shinfo(skb)->tx_flags &
523 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
524 estatus |= BD_ENET_TX_TS;
525 }
526 }
527
528 if (fep->bufdesc_ex) {
529 if (fep->quirks & FEC_QUIRK_HAS_AVB)
530 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
531 if (skb->ip_summed == CHECKSUM_PARTIAL)
532 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
533
534 ebdp->cbd_bdu = 0;
535 ebdp->cbd_esc = cpu_to_fec32(estatus);
536 }
537
538 bufaddr = skb_frag_address(this_frag);
539
540 index = fec_enet_get_bd_index(bdp, &txq->bd);
541 if (((unsigned long) bufaddr) & fep->tx_align ||
542 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
543 memcpy(txq->tx_bounce[index], bufaddr, frag_len);
544 bufaddr = txq->tx_bounce[index];
545
546 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
547 swap_buffer(bufaddr, frag_len);
548 }
549
550 addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
551 DMA_TO_DEVICE);
552 if (dma_mapping_error(&fep->pdev->dev, addr)) {
553 if (net_ratelimit())
554 netdev_err(ndev, "Tx DMA memory map failed\n");
555 goto dma_mapping_error;
556 }
557
558 bdp->cbd_bufaddr = cpu_to_fec32(addr);
559 bdp->cbd_datlen = cpu_to_fec16(frag_len);
560 /* Make sure the updates to rest of the descriptor are
561 * performed before transferring ownership.
562 */
563 wmb();
564 bdp->cbd_sc = cpu_to_fec16(status);
565 }
566
567 return bdp;
568 dma_mapping_error:
569 bdp = txq->bd.cur;
570 for (i = 0; i < frag; i++) {
571 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
572 dma_unmap_single(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr),
573 fec16_to_cpu(bdp->cbd_datlen), DMA_TO_DEVICE);
574 }
575 return ERR_PTR(-ENOMEM);
576 }
577
fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q * txq,struct sk_buff * skb,struct net_device * ndev)578 static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
579 struct sk_buff *skb, struct net_device *ndev)
580 {
581 struct fec_enet_private *fep = netdev_priv(ndev);
582 int nr_frags = skb_shinfo(skb)->nr_frags;
583 struct bufdesc *bdp, *last_bdp;
584 void *bufaddr;
585 dma_addr_t addr;
586 unsigned short status;
587 unsigned short buflen;
588 unsigned int estatus = 0;
589 unsigned int index;
590 int entries_free;
591
592 entries_free = fec_enet_get_free_txdesc_num(txq);
593 if (entries_free < MAX_SKB_FRAGS + 1) {
594 dev_kfree_skb_any(skb);
595 if (net_ratelimit())
596 netdev_err(ndev, "NOT enough BD for SG!\n");
597 return NETDEV_TX_OK;
598 }
599
600 /* Protocol checksum off-load for TCP and UDP. */
601 if (fec_enet_clear_csum(skb, ndev)) {
602 dev_kfree_skb_any(skb);
603 return NETDEV_TX_OK;
604 }
605
606 /* Fill in a Tx ring entry */
607 bdp = txq->bd.cur;
608 last_bdp = bdp;
609 status = fec16_to_cpu(bdp->cbd_sc);
610 status &= ~BD_ENET_TX_STATS;
611
612 /* Set buffer length and buffer pointer */
613 bufaddr = skb->data;
614 buflen = skb_headlen(skb);
615
616 index = fec_enet_get_bd_index(bdp, &txq->bd);
617 if (((unsigned long) bufaddr) & fep->tx_align ||
618 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
619 memcpy(txq->tx_bounce[index], skb->data, buflen);
620 bufaddr = txq->tx_bounce[index];
621
622 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
623 swap_buffer(bufaddr, buflen);
624 }
625
626 /* Push the data cache so the CPM does not get stale memory data. */
627 addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
628 if (dma_mapping_error(&fep->pdev->dev, addr)) {
629 dev_kfree_skb_any(skb);
630 if (net_ratelimit())
631 netdev_err(ndev, "Tx DMA memory map failed\n");
632 return NETDEV_TX_OK;
633 }
634
635 if (nr_frags) {
636 last_bdp = fec_enet_txq_submit_frag_skb(txq, skb, ndev);
637 if (IS_ERR(last_bdp)) {
638 dma_unmap_single(&fep->pdev->dev, addr,
639 buflen, DMA_TO_DEVICE);
640 dev_kfree_skb_any(skb);
641 return NETDEV_TX_OK;
642 }
643 } else {
644 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
645 if (fep->bufdesc_ex) {
646 estatus = BD_ENET_TX_INT;
647 if (unlikely(skb_shinfo(skb)->tx_flags &
648 SKBTX_HW_TSTAMP && fep->hwts_tx_en))
649 estatus |= BD_ENET_TX_TS;
650 }
651 }
652 bdp->cbd_bufaddr = cpu_to_fec32(addr);
653 bdp->cbd_datlen = cpu_to_fec16(buflen);
654
655 if (fep->bufdesc_ex) {
656
657 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
658
659 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
660 fep->hwts_tx_en))
661 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
662
663 if (fep->quirks & FEC_QUIRK_HAS_AVB)
664 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
665
666 if (skb->ip_summed == CHECKSUM_PARTIAL)
667 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
668
669 ebdp->cbd_bdu = 0;
670 ebdp->cbd_esc = cpu_to_fec32(estatus);
671 }
672
673 index = fec_enet_get_bd_index(last_bdp, &txq->bd);
674 /* Save skb pointer */
675 txq->tx_buf[index].buf_p = skb;
676
677 /* Make sure the updates to rest of the descriptor are performed before
678 * transferring ownership.
679 */
680 wmb();
681
682 /* Send it on its way. Tell FEC it's ready, interrupt when done,
683 * it's the last BD of the frame, and to put the CRC on the end.
684 */
685 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
686 bdp->cbd_sc = cpu_to_fec16(status);
687
688 /* If this was the last BD in the ring, start at the beginning again. */
689 bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
690
691 skb_tx_timestamp(skb);
692
693 /* Make sure the update to bdp is performed before txq->bd.cur. */
694 wmb();
695 txq->bd.cur = bdp;
696
697 /* Trigger transmission start */
698 writel(0, txq->bd.reg_desc_active);
699
700 return 0;
701 }
702
703 static int
fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q * txq,struct sk_buff * skb,struct net_device * ndev,struct bufdesc * bdp,int index,char * data,int size,bool last_tcp,bool is_last)704 fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb,
705 struct net_device *ndev,
706 struct bufdesc *bdp, int index, char *data,
707 int size, bool last_tcp, bool is_last)
708 {
709 struct fec_enet_private *fep = netdev_priv(ndev);
710 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
711 unsigned short status;
712 unsigned int estatus = 0;
713 dma_addr_t addr;
714
715 status = fec16_to_cpu(bdp->cbd_sc);
716 status &= ~BD_ENET_TX_STATS;
717
718 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
719
720 if (((unsigned long) data) & fep->tx_align ||
721 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
722 memcpy(txq->tx_bounce[index], data, size);
723 data = txq->tx_bounce[index];
724
725 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
726 swap_buffer(data, size);
727 }
728
729 addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
730 if (dma_mapping_error(&fep->pdev->dev, addr)) {
731 dev_kfree_skb_any(skb);
732 if (net_ratelimit())
733 netdev_err(ndev, "Tx DMA memory map failed\n");
734 return NETDEV_TX_OK;
735 }
736
737 bdp->cbd_datlen = cpu_to_fec16(size);
738 bdp->cbd_bufaddr = cpu_to_fec32(addr);
739
740 if (fep->bufdesc_ex) {
741 if (fep->quirks & FEC_QUIRK_HAS_AVB)
742 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
743 if (skb->ip_summed == CHECKSUM_PARTIAL)
744 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
745 ebdp->cbd_bdu = 0;
746 ebdp->cbd_esc = cpu_to_fec32(estatus);
747 }
748
749 /* Handle the last BD specially */
750 if (last_tcp)
751 status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
752 if (is_last) {
753 status |= BD_ENET_TX_INTR;
754 if (fep->bufdesc_ex)
755 ebdp->cbd_esc |= cpu_to_fec32(BD_ENET_TX_INT);
756 }
757
758 bdp->cbd_sc = cpu_to_fec16(status);
759
760 return 0;
761 }
762
763 static int
fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q * txq,struct sk_buff * skb,struct net_device * ndev,struct bufdesc * bdp,int index)764 fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq,
765 struct sk_buff *skb, struct net_device *ndev,
766 struct bufdesc *bdp, int index)
767 {
768 struct fec_enet_private *fep = netdev_priv(ndev);
769 int hdr_len = skb_tcp_all_headers(skb);
770 struct bufdesc_ex *ebdp = container_of(bdp, struct bufdesc_ex, desc);
771 void *bufaddr;
772 unsigned long dmabuf;
773 unsigned short status;
774 unsigned int estatus = 0;
775
776 status = fec16_to_cpu(bdp->cbd_sc);
777 status &= ~BD_ENET_TX_STATS;
778 status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
779
780 bufaddr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
781 dmabuf = txq->tso_hdrs_dma + index * TSO_HEADER_SIZE;
782 if (((unsigned long)bufaddr) & fep->tx_align ||
783 fep->quirks & FEC_QUIRK_SWAP_FRAME) {
784 memcpy(txq->tx_bounce[index], skb->data, hdr_len);
785 bufaddr = txq->tx_bounce[index];
786
787 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
788 swap_buffer(bufaddr, hdr_len);
789
790 dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
791 hdr_len, DMA_TO_DEVICE);
792 if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
793 dev_kfree_skb_any(skb);
794 if (net_ratelimit())
795 netdev_err(ndev, "Tx DMA memory map failed\n");
796 return NETDEV_TX_OK;
797 }
798 }
799
800 bdp->cbd_bufaddr = cpu_to_fec32(dmabuf);
801 bdp->cbd_datlen = cpu_to_fec16(hdr_len);
802
803 if (fep->bufdesc_ex) {
804 if (fep->quirks & FEC_QUIRK_HAS_AVB)
805 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
806 if (skb->ip_summed == CHECKSUM_PARTIAL)
807 estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
808 ebdp->cbd_bdu = 0;
809 ebdp->cbd_esc = cpu_to_fec32(estatus);
810 }
811
812 bdp->cbd_sc = cpu_to_fec16(status);
813
814 return 0;
815 }
816
fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q * txq,struct sk_buff * skb,struct net_device * ndev)817 static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq,
818 struct sk_buff *skb,
819 struct net_device *ndev)
820 {
821 struct fec_enet_private *fep = netdev_priv(ndev);
822 int hdr_len, total_len, data_left;
823 struct bufdesc *bdp = txq->bd.cur;
824 struct tso_t tso;
825 unsigned int index = 0;
826 int ret;
827
828 if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(txq)) {
829 dev_kfree_skb_any(skb);
830 if (net_ratelimit())
831 netdev_err(ndev, "NOT enough BD for TSO!\n");
832 return NETDEV_TX_OK;
833 }
834
835 /* Protocol checksum off-load for TCP and UDP. */
836 if (fec_enet_clear_csum(skb, ndev)) {
837 dev_kfree_skb_any(skb);
838 return NETDEV_TX_OK;
839 }
840
841 /* Initialize the TSO handler, and prepare the first payload */
842 hdr_len = tso_start(skb, &tso);
843
844 total_len = skb->len - hdr_len;
845 while (total_len > 0) {
846 char *hdr;
847
848 index = fec_enet_get_bd_index(bdp, &txq->bd);
849 data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
850 total_len -= data_left;
851
852 /* prepare packet headers: MAC + IP + TCP */
853 hdr = txq->tso_hdrs + index * TSO_HEADER_SIZE;
854 tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
855 ret = fec_enet_txq_put_hdr_tso(txq, skb, ndev, bdp, index);
856 if (ret)
857 goto err_release;
858
859 while (data_left > 0) {
860 int size;
861
862 size = min_t(int, tso.size, data_left);
863 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
864 index = fec_enet_get_bd_index(bdp, &txq->bd);
865 ret = fec_enet_txq_put_data_tso(txq, skb, ndev,
866 bdp, index,
867 tso.data, size,
868 size == data_left,
869 total_len == 0);
870 if (ret)
871 goto err_release;
872
873 data_left -= size;
874 tso_build_data(skb, &tso, size);
875 }
876
877 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
878 }
879
880 /* Save skb pointer */
881 txq->tx_buf[index].buf_p = skb;
882
883 skb_tx_timestamp(skb);
884 txq->bd.cur = bdp;
885
886 /* Trigger transmission start */
887 if (!(fep->quirks & FEC_QUIRK_ERR007885) ||
888 !readl(txq->bd.reg_desc_active) ||
889 !readl(txq->bd.reg_desc_active) ||
890 !readl(txq->bd.reg_desc_active) ||
891 !readl(txq->bd.reg_desc_active))
892 writel(0, txq->bd.reg_desc_active);
893
894 return 0;
895
896 err_release:
897 /* TODO: Release all used data descriptors for TSO */
898 return ret;
899 }
900
901 static netdev_tx_t
fec_enet_start_xmit(struct sk_buff * skb,struct net_device * ndev)902 fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
903 {
904 struct fec_enet_private *fep = netdev_priv(ndev);
905 int entries_free;
906 unsigned short queue;
907 struct fec_enet_priv_tx_q *txq;
908 struct netdev_queue *nq;
909 int ret;
910
911 queue = skb_get_queue_mapping(skb);
912 txq = fep->tx_queue[queue];
913 nq = netdev_get_tx_queue(ndev, queue);
914
915 if (skb_is_gso(skb))
916 ret = fec_enet_txq_submit_tso(txq, skb, ndev);
917 else
918 ret = fec_enet_txq_submit_skb(txq, skb, ndev);
919 if (ret)
920 return ret;
921
922 entries_free = fec_enet_get_free_txdesc_num(txq);
923 if (entries_free <= txq->tx_stop_threshold)
924 netif_tx_stop_queue(nq);
925
926 return NETDEV_TX_OK;
927 }
928
929 /* Init RX & TX buffer descriptors
930 */
fec_enet_bd_init(struct net_device * dev)931 static void fec_enet_bd_init(struct net_device *dev)
932 {
933 struct fec_enet_private *fep = netdev_priv(dev);
934 struct fec_enet_priv_tx_q *txq;
935 struct fec_enet_priv_rx_q *rxq;
936 struct bufdesc *bdp;
937 unsigned int i;
938 unsigned int q;
939
940 for (q = 0; q < fep->num_rx_queues; q++) {
941 /* Initialize the receive buffer descriptors. */
942 rxq = fep->rx_queue[q];
943 bdp = rxq->bd.base;
944
945 for (i = 0; i < rxq->bd.ring_size; i++) {
946
947 /* Initialize the BD for every fragment in the page. */
948 if (bdp->cbd_bufaddr)
949 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
950 else
951 bdp->cbd_sc = cpu_to_fec16(0);
952 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
953 }
954
955 /* Set the last buffer to wrap */
956 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
957 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
958
959 rxq->bd.cur = rxq->bd.base;
960 }
961
962 for (q = 0; q < fep->num_tx_queues; q++) {
963 /* ...and the same for transmit */
964 txq = fep->tx_queue[q];
965 bdp = txq->bd.base;
966 txq->bd.cur = bdp;
967
968 for (i = 0; i < txq->bd.ring_size; i++) {
969 /* Initialize the BD for every fragment in the page. */
970 bdp->cbd_sc = cpu_to_fec16(0);
971 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
972 if (bdp->cbd_bufaddr &&
973 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
974 dma_unmap_single(&fep->pdev->dev,
975 fec32_to_cpu(bdp->cbd_bufaddr),
976 fec16_to_cpu(bdp->cbd_datlen),
977 DMA_TO_DEVICE);
978 if (txq->tx_buf[i].buf_p)
979 dev_kfree_skb_any(txq->tx_buf[i].buf_p);
980 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
981 if (bdp->cbd_bufaddr)
982 dma_unmap_single(&fep->pdev->dev,
983 fec32_to_cpu(bdp->cbd_bufaddr),
984 fec16_to_cpu(bdp->cbd_datlen),
985 DMA_TO_DEVICE);
986
987 if (txq->tx_buf[i].buf_p)
988 xdp_return_frame(txq->tx_buf[i].buf_p);
989 } else {
990 struct page *page = txq->tx_buf[i].buf_p;
991
992 if (page)
993 page_pool_put_page(page->pp, page, 0, false);
994 }
995
996 txq->tx_buf[i].buf_p = NULL;
997 /* restore default tx buffer type: FEC_TXBUF_T_SKB */
998 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
999 bdp->cbd_bufaddr = cpu_to_fec32(0);
1000 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1001 }
1002
1003 /* Set the last buffer to wrap */
1004 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
1005 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
1006 txq->dirty_tx = bdp;
1007 }
1008 }
1009
fec_enet_active_rxring(struct net_device * ndev)1010 static void fec_enet_active_rxring(struct net_device *ndev)
1011 {
1012 struct fec_enet_private *fep = netdev_priv(ndev);
1013 int i;
1014
1015 for (i = 0; i < fep->num_rx_queues; i++)
1016 writel(0, fep->rx_queue[i]->bd.reg_desc_active);
1017 }
1018
fec_enet_enable_ring(struct net_device * ndev)1019 static void fec_enet_enable_ring(struct net_device *ndev)
1020 {
1021 struct fec_enet_private *fep = netdev_priv(ndev);
1022 struct fec_enet_priv_tx_q *txq;
1023 struct fec_enet_priv_rx_q *rxq;
1024 int i;
1025
1026 for (i = 0; i < fep->num_rx_queues; i++) {
1027 rxq = fep->rx_queue[i];
1028 writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
1029 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
1030
1031 /* enable DMA1/2 */
1032 if (i)
1033 writel(RCMR_MATCHEN | RCMR_CMP(i),
1034 fep->hwp + FEC_RCMR(i));
1035 }
1036
1037 for (i = 0; i < fep->num_tx_queues; i++) {
1038 txq = fep->tx_queue[i];
1039 writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
1040
1041 /* enable DMA1/2 */
1042 if (i)
1043 writel(DMA_CLASS_EN | IDLE_SLOPE(i),
1044 fep->hwp + FEC_DMA_CFG(i));
1045 }
1046 }
1047
1048 /*
1049 * This function is called to start or restart the FEC during a link
1050 * change, transmit timeout, or to reconfigure the FEC. The network
1051 * packet processing for this device must be stopped before this call.
1052 */
1053 static void
fec_restart(struct net_device * ndev)1054 fec_restart(struct net_device *ndev)
1055 {
1056 struct fec_enet_private *fep = netdev_priv(ndev);
1057 u32 temp_mac[2];
1058 u32 rcntl = OPT_FRAME_SIZE | 0x04;
1059 u32 ecntl = FEC_ECR_ETHEREN;
1060
1061 /* Whack a reset. We should wait for this.
1062 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1063 * instead of reset MAC itself.
1064 */
1065 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES ||
1066 ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) {
1067 writel(0, fep->hwp + FEC_ECNTRL);
1068 } else {
1069 writel(1, fep->hwp + FEC_ECNTRL);
1070 udelay(10);
1071 }
1072
1073 /*
1074 * enet-mac reset will reset mac address registers too,
1075 * so need to reconfigure it.
1076 */
1077 memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
1078 writel((__force u32)cpu_to_be32(temp_mac[0]),
1079 fep->hwp + FEC_ADDR_LOW);
1080 writel((__force u32)cpu_to_be32(temp_mac[1]),
1081 fep->hwp + FEC_ADDR_HIGH);
1082
1083 /* Clear any outstanding interrupt, except MDIO. */
1084 writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
1085
1086 fec_enet_bd_init(ndev);
1087
1088 fec_enet_enable_ring(ndev);
1089
1090 /* Enable MII mode */
1091 if (fep->full_duplex == DUPLEX_FULL) {
1092 /* FD enable */
1093 writel(0x04, fep->hwp + FEC_X_CNTRL);
1094 } else {
1095 /* No Rcv on Xmit */
1096 rcntl |= 0x02;
1097 writel(0x0, fep->hwp + FEC_X_CNTRL);
1098 }
1099
1100 /* Set MII speed */
1101 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1102
1103 #if !defined(CONFIG_M5272)
1104 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1105 u32 val = readl(fep->hwp + FEC_RACC);
1106
1107 /* align IP header */
1108 val |= FEC_RACC_SHIFT16;
1109 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
1110 /* set RX checksum */
1111 val |= FEC_RACC_OPTIONS;
1112 else
1113 val &= ~FEC_RACC_OPTIONS;
1114 writel(val, fep->hwp + FEC_RACC);
1115 writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
1116 }
1117 #endif
1118
1119 /*
1120 * The phy interface and speed need to get configured
1121 * differently on enet-mac.
1122 */
1123 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1124 /* Enable flow control and length check */
1125 rcntl |= 0x40000000 | 0x00000020;
1126
1127 /* RGMII, RMII or MII */
1128 if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
1129 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
1130 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
1131 fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
1132 rcntl |= (1 << 6);
1133 else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1134 rcntl |= FEC_RCR_RMII;
1135 else
1136 rcntl &= ~FEC_RCR_RMII;
1137
1138 /* 1G, 100M or 10M */
1139 if (ndev->phydev) {
1140 if (ndev->phydev->speed == SPEED_1000)
1141 ecntl |= (1 << 5);
1142 else if (ndev->phydev->speed == SPEED_100)
1143 rcntl &= ~FEC_RCR_10BASET;
1144 else
1145 rcntl |= FEC_RCR_10BASET;
1146 }
1147 } else {
1148 #ifdef FEC_MIIGSK_ENR
1149 if (fep->quirks & FEC_QUIRK_USE_GASKET) {
1150 u32 cfgr;
1151 /* disable the gasket and wait */
1152 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1153 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1154 udelay(1);
1155
1156 /*
1157 * configure the gasket:
1158 * RMII, 50 MHz, no loopback, no echo
1159 * MII, 25 MHz, no loopback, no echo
1160 */
1161 cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1162 ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
1163 if (ndev->phydev && ndev->phydev->speed == SPEED_10)
1164 cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
1165 writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
1166
1167 /* re-enable the gasket */
1168 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1169 }
1170 #endif
1171 }
1172
1173 #if !defined(CONFIG_M5272)
1174 /* enable pause frame*/
1175 if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
1176 ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
1177 ndev->phydev && ndev->phydev->pause)) {
1178 rcntl |= FEC_ENET_FCE;
1179
1180 /* set FIFO threshold parameter to reduce overrun */
1181 writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
1182 writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
1183 writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
1184 writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
1185
1186 /* OPD */
1187 writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
1188 } else {
1189 rcntl &= ~FEC_ENET_FCE;
1190 }
1191 #endif /* !defined(CONFIG_M5272) */
1192
1193 writel(rcntl, fep->hwp + FEC_R_CNTRL);
1194
1195 /* Setup multicast filter. */
1196 set_multicast_list(ndev);
1197 #ifndef CONFIG_M5272
1198 writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
1199 writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
1200 #endif
1201
1202 if (fep->quirks & FEC_QUIRK_ENET_MAC) {
1203 /* enable ENET endian swap */
1204 ecntl |= FEC_ECR_BYTESWP;
1205 /* enable ENET store and forward mode */
1206 writel(FEC_TXWMRK_STRFWD, fep->hwp + FEC_X_WMRK);
1207 }
1208
1209 if (fep->bufdesc_ex)
1210 ecntl |= FEC_ECR_EN1588;
1211
1212 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1213 fep->rgmii_txc_dly)
1214 ecntl |= FEC_ENET_TXC_DLY;
1215 if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT &&
1216 fep->rgmii_rxc_dly)
1217 ecntl |= FEC_ENET_RXC_DLY;
1218
1219 #ifndef CONFIG_M5272
1220 /* Enable the MIB statistic event counters */
1221 writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
1222 #endif
1223
1224 /* And last, enable the transmit and receive processing */
1225 writel(ecntl, fep->hwp + FEC_ECNTRL);
1226 fec_enet_active_rxring(ndev);
1227
1228 if (fep->bufdesc_ex)
1229 fec_ptp_start_cyclecounter(ndev);
1230
1231 /* Enable interrupts we wish to service */
1232 if (fep->link)
1233 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1234 else
1235 writel(0, fep->hwp + FEC_IMASK);
1236
1237 /* Init the interrupt coalescing */
1238 if (fep->quirks & FEC_QUIRK_HAS_COALESCE)
1239 fec_enet_itr_coal_set(ndev);
1240 }
1241
fec_enet_ipc_handle_init(struct fec_enet_private * fep)1242 static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
1243 {
1244 if (!(of_machine_is_compatible("fsl,imx8qm") ||
1245 of_machine_is_compatible("fsl,imx8qxp") ||
1246 of_machine_is_compatible("fsl,imx8dxl")))
1247 return 0;
1248
1249 return imx_scu_get_handle(&fep->ipc_handle);
1250 }
1251
fec_enet_ipg_stop_set(struct fec_enet_private * fep,bool enabled)1252 static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
1253 {
1254 struct device_node *np = fep->pdev->dev.of_node;
1255 u32 rsrc_id, val;
1256 int idx;
1257
1258 if (!np || !fep->ipc_handle)
1259 return;
1260
1261 idx = of_alias_get_id(np, "ethernet");
1262 if (idx < 0)
1263 idx = 0;
1264 rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
1265
1266 val = enabled ? 1 : 0;
1267 imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
1268 }
1269
fec_enet_stop_mode(struct fec_enet_private * fep,bool enabled)1270 static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
1271 {
1272 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
1273 struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
1274
1275 if (stop_gpr->gpr) {
1276 if (enabled)
1277 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1278 BIT(stop_gpr->bit),
1279 BIT(stop_gpr->bit));
1280 else
1281 regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
1282 BIT(stop_gpr->bit), 0);
1283 } else if (pdata && pdata->sleep_mode_enable) {
1284 pdata->sleep_mode_enable(enabled);
1285 } else {
1286 fec_enet_ipg_stop_set(fep, enabled);
1287 }
1288 }
1289
fec_irqs_disable(struct net_device * ndev)1290 static void fec_irqs_disable(struct net_device *ndev)
1291 {
1292 struct fec_enet_private *fep = netdev_priv(ndev);
1293
1294 writel(0, fep->hwp + FEC_IMASK);
1295 }
1296
fec_irqs_disable_except_wakeup(struct net_device * ndev)1297 static void fec_irqs_disable_except_wakeup(struct net_device *ndev)
1298 {
1299 struct fec_enet_private *fep = netdev_priv(ndev);
1300
1301 writel(0, fep->hwp + FEC_IMASK);
1302 writel(FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
1303 }
1304
1305 static void
fec_stop(struct net_device * ndev)1306 fec_stop(struct net_device *ndev)
1307 {
1308 struct fec_enet_private *fep = netdev_priv(ndev);
1309 u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & FEC_RCR_RMII;
1310 u32 val;
1311
1312 /* We cannot expect a graceful transmit stop without link !!! */
1313 if (fep->link) {
1314 writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
1315 udelay(10);
1316 if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
1317 netdev_err(ndev, "Graceful transmit stop did not complete!\n");
1318 }
1319
1320 /* Whack a reset. We should wait for this.
1321 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
1322 * instead of reset MAC itself.
1323 */
1324 if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1325 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
1326 writel(0, fep->hwp + FEC_ECNTRL);
1327 } else {
1328 writel(FEC_ECR_RESET, fep->hwp + FEC_ECNTRL);
1329 udelay(10);
1330 }
1331 } else {
1332 val = readl(fep->hwp + FEC_ECNTRL);
1333 val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
1334 writel(val, fep->hwp + FEC_ECNTRL);
1335 }
1336 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1337 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1338
1339 /* We have to keep ENET enabled to have MII interrupt stay working */
1340 if (fep->quirks & FEC_QUIRK_ENET_MAC &&
1341 !(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
1342 writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL);
1343 writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
1344 }
1345
1346 if (fep->bufdesc_ex) {
1347 val = readl(fep->hwp + FEC_ECNTRL);
1348 val |= FEC_ECR_EN1588;
1349 writel(val, fep->hwp + FEC_ECNTRL);
1350 }
1351 }
1352
1353 static void
fec_timeout(struct net_device * ndev,unsigned int txqueue)1354 fec_timeout(struct net_device *ndev, unsigned int txqueue)
1355 {
1356 struct fec_enet_private *fep = netdev_priv(ndev);
1357
1358 fec_dump(ndev);
1359
1360 ndev->stats.tx_errors++;
1361
1362 schedule_work(&fep->tx_timeout_work);
1363 }
1364
fec_enet_timeout_work(struct work_struct * work)1365 static void fec_enet_timeout_work(struct work_struct *work)
1366 {
1367 struct fec_enet_private *fep =
1368 container_of(work, struct fec_enet_private, tx_timeout_work);
1369 struct net_device *ndev = fep->netdev;
1370
1371 rtnl_lock();
1372 if (netif_device_present(ndev) || netif_running(ndev)) {
1373 napi_disable(&fep->napi);
1374 netif_tx_lock_bh(ndev);
1375 fec_restart(ndev);
1376 netif_tx_wake_all_queues(ndev);
1377 netif_tx_unlock_bh(ndev);
1378 napi_enable(&fep->napi);
1379 }
1380 rtnl_unlock();
1381 }
1382
1383 static void
fec_enet_hwtstamp(struct fec_enet_private * fep,unsigned ts,struct skb_shared_hwtstamps * hwtstamps)1384 fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
1385 struct skb_shared_hwtstamps *hwtstamps)
1386 {
1387 unsigned long flags;
1388 u64 ns;
1389
1390 spin_lock_irqsave(&fep->tmreg_lock, flags);
1391 ns = timecounter_cyc2time(&fep->tc, ts);
1392 spin_unlock_irqrestore(&fep->tmreg_lock, flags);
1393
1394 memset(hwtstamps, 0, sizeof(*hwtstamps));
1395 hwtstamps->hwtstamp = ns_to_ktime(ns);
1396 }
1397
1398 static void
fec_enet_tx_queue(struct net_device * ndev,u16 queue_id,int budget)1399 fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget)
1400 {
1401 struct fec_enet_private *fep;
1402 struct xdp_frame *xdpf;
1403 struct bufdesc *bdp;
1404 unsigned short status;
1405 struct sk_buff *skb;
1406 struct fec_enet_priv_tx_q *txq;
1407 struct netdev_queue *nq;
1408 int index = 0;
1409 int entries_free;
1410 struct page *page;
1411 int frame_len;
1412
1413 fep = netdev_priv(ndev);
1414
1415 txq = fep->tx_queue[queue_id];
1416 /* get next bdp of dirty_tx */
1417 nq = netdev_get_tx_queue(ndev, queue_id);
1418 bdp = txq->dirty_tx;
1419
1420 /* get next bdp of dirty_tx */
1421 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1422
1423 while (bdp != READ_ONCE(txq->bd.cur)) {
1424 /* Order the load of bd.cur and cbd_sc */
1425 rmb();
1426 status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
1427 if (status & BD_ENET_TX_READY)
1428 break;
1429
1430 index = fec_enet_get_bd_index(bdp, &txq->bd);
1431
1432 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1433 skb = txq->tx_buf[index].buf_p;
1434 if (bdp->cbd_bufaddr &&
1435 !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
1436 dma_unmap_single(&fep->pdev->dev,
1437 fec32_to_cpu(bdp->cbd_bufaddr),
1438 fec16_to_cpu(bdp->cbd_datlen),
1439 DMA_TO_DEVICE);
1440 bdp->cbd_bufaddr = cpu_to_fec32(0);
1441 if (!skb)
1442 goto tx_buf_done;
1443 } else {
1444 /* Tx processing cannot call any XDP (or page pool) APIs if
1445 * the "budget" is 0. Because NAPI is called with budget of
1446 * 0 (such as netpoll) indicates we may be in an IRQ context,
1447 * however, we can't use the page pool from IRQ context.
1448 */
1449 if (unlikely(!budget))
1450 break;
1451
1452 if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1453 xdpf = txq->tx_buf[index].buf_p;
1454 if (bdp->cbd_bufaddr)
1455 dma_unmap_single(&fep->pdev->dev,
1456 fec32_to_cpu(bdp->cbd_bufaddr),
1457 fec16_to_cpu(bdp->cbd_datlen),
1458 DMA_TO_DEVICE);
1459 } else {
1460 page = txq->tx_buf[index].buf_p;
1461 }
1462
1463 bdp->cbd_bufaddr = cpu_to_fec32(0);
1464 if (unlikely(!txq->tx_buf[index].buf_p)) {
1465 txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1466 goto tx_buf_done;
1467 }
1468
1469 frame_len = fec16_to_cpu(bdp->cbd_datlen);
1470 }
1471
1472 /* Check for errors. */
1473 if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
1474 BD_ENET_TX_RL | BD_ENET_TX_UN |
1475 BD_ENET_TX_CSL)) {
1476 ndev->stats.tx_errors++;
1477 if (status & BD_ENET_TX_HB) /* No heartbeat */
1478 ndev->stats.tx_heartbeat_errors++;
1479 if (status & BD_ENET_TX_LC) /* Late collision */
1480 ndev->stats.tx_window_errors++;
1481 if (status & BD_ENET_TX_RL) /* Retrans limit */
1482 ndev->stats.tx_aborted_errors++;
1483 if (status & BD_ENET_TX_UN) /* Underrun */
1484 ndev->stats.tx_fifo_errors++;
1485 if (status & BD_ENET_TX_CSL) /* Carrier lost */
1486 ndev->stats.tx_carrier_errors++;
1487 } else {
1488 ndev->stats.tx_packets++;
1489
1490 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB)
1491 ndev->stats.tx_bytes += skb->len;
1492 else
1493 ndev->stats.tx_bytes += frame_len;
1494 }
1495
1496 /* Deferred means some collisions occurred during transmit,
1497 * but we eventually sent the packet OK.
1498 */
1499 if (status & BD_ENET_TX_DEF)
1500 ndev->stats.collisions++;
1501
1502 if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) {
1503 /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who
1504 * are to time stamp the packet, so we still need to check time
1505 * stamping enabled flag.
1506 */
1507 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS &&
1508 fep->hwts_tx_en) && fep->bufdesc_ex) {
1509 struct skb_shared_hwtstamps shhwtstamps;
1510 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1511
1512 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps);
1513 skb_tstamp_tx(skb, &shhwtstamps);
1514 }
1515
1516 /* Free the sk buffer associated with this last transmit */
1517 napi_consume_skb(skb, budget);
1518 } else if (txq->tx_buf[index].type == FEC_TXBUF_T_XDP_NDO) {
1519 xdp_return_frame_rx_napi(xdpf);
1520 } else { /* recycle pages of XDP_TX frames */
1521 /* The dma_sync_size = 0 as XDP_TX has already synced DMA for_device */
1522 page_pool_put_page(page->pp, page, 0, true);
1523 }
1524
1525 txq->tx_buf[index].buf_p = NULL;
1526 /* restore default tx buffer type: FEC_TXBUF_T_SKB */
1527 txq->tx_buf[index].type = FEC_TXBUF_T_SKB;
1528
1529 tx_buf_done:
1530 /* Make sure the update to bdp and tx_buf are performed
1531 * before dirty_tx
1532 */
1533 wmb();
1534 txq->dirty_tx = bdp;
1535
1536 /* Update pointer to next buffer descriptor to be transmitted */
1537 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
1538
1539 /* Since we have freed up a buffer, the ring is no longer full
1540 */
1541 if (netif_tx_queue_stopped(nq)) {
1542 entries_free = fec_enet_get_free_txdesc_num(txq);
1543 if (entries_free >= txq->tx_wake_threshold)
1544 netif_tx_wake_queue(nq);
1545 }
1546 }
1547
1548 /* ERR006358: Keep the transmitter going */
1549 if (bdp != txq->bd.cur &&
1550 readl(txq->bd.reg_desc_active) == 0)
1551 writel(0, txq->bd.reg_desc_active);
1552 }
1553
fec_enet_tx(struct net_device * ndev,int budget)1554 static void fec_enet_tx(struct net_device *ndev, int budget)
1555 {
1556 struct fec_enet_private *fep = netdev_priv(ndev);
1557 int i;
1558
1559 /* Make sure that AVB queues are processed first. */
1560 for (i = fep->num_tx_queues - 1; i >= 0; i--)
1561 fec_enet_tx_queue(ndev, i, budget);
1562 }
1563
fec_enet_update_cbd(struct fec_enet_priv_rx_q * rxq,struct bufdesc * bdp,int index)1564 static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
1565 struct bufdesc *bdp, int index)
1566 {
1567 struct page *new_page;
1568 dma_addr_t phys_addr;
1569
1570 new_page = page_pool_dev_alloc_pages(rxq->page_pool);
1571 WARN_ON(!new_page);
1572 rxq->rx_skb_info[index].page = new_page;
1573
1574 rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
1575 phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
1576 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
1577 }
1578
1579 static u32
fec_enet_run_xdp(struct fec_enet_private * fep,struct bpf_prog * prog,struct xdp_buff * xdp,struct fec_enet_priv_rx_q * rxq,int cpu)1580 fec_enet_run_xdp(struct fec_enet_private *fep, struct bpf_prog *prog,
1581 struct xdp_buff *xdp, struct fec_enet_priv_rx_q *rxq, int cpu)
1582 {
1583 unsigned int sync, len = xdp->data_end - xdp->data;
1584 u32 ret = FEC_ENET_XDP_PASS;
1585 struct page *page;
1586 int err;
1587 u32 act;
1588
1589 act = bpf_prog_run_xdp(prog, xdp);
1590
1591 /* Due xdp_adjust_tail and xdp_adjust_head: DMA sync for_device cover
1592 * max len CPU touch
1593 */
1594 sync = xdp->data_end - xdp->data;
1595 sync = max(sync, len);
1596
1597 switch (act) {
1598 case XDP_PASS:
1599 rxq->stats[RX_XDP_PASS]++;
1600 ret = FEC_ENET_XDP_PASS;
1601 break;
1602
1603 case XDP_REDIRECT:
1604 rxq->stats[RX_XDP_REDIRECT]++;
1605 err = xdp_do_redirect(fep->netdev, xdp, prog);
1606 if (unlikely(err))
1607 goto xdp_err;
1608
1609 ret = FEC_ENET_XDP_REDIR;
1610 break;
1611
1612 case XDP_TX:
1613 rxq->stats[RX_XDP_TX]++;
1614 err = fec_enet_xdp_tx_xmit(fep, cpu, xdp, sync);
1615 if (unlikely(err)) {
1616 rxq->stats[RX_XDP_TX_ERRORS]++;
1617 goto xdp_err;
1618 }
1619
1620 ret = FEC_ENET_XDP_TX;
1621 break;
1622
1623 default:
1624 bpf_warn_invalid_xdp_action(fep->netdev, prog, act);
1625 fallthrough;
1626
1627 case XDP_ABORTED:
1628 fallthrough; /* handle aborts by dropping packet */
1629
1630 case XDP_DROP:
1631 rxq->stats[RX_XDP_DROP]++;
1632 xdp_err:
1633 ret = FEC_ENET_XDP_CONSUMED;
1634 page = virt_to_head_page(xdp->data);
1635 page_pool_put_page(rxq->page_pool, page, sync, true);
1636 if (act != XDP_DROP)
1637 trace_xdp_exception(fep->netdev, prog, act);
1638 break;
1639 }
1640
1641 return ret;
1642 }
1643
1644 /* During a receive, the bd_rx.cur points to the current incoming buffer.
1645 * When we update through the ring, if the next incoming buffer has
1646 * not been given to the system, we just set the empty indicator,
1647 * effectively tossing the packet.
1648 */
1649 static int
fec_enet_rx_queue(struct net_device * ndev,int budget,u16 queue_id)1650 fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1651 {
1652 struct fec_enet_private *fep = netdev_priv(ndev);
1653 struct fec_enet_priv_rx_q *rxq;
1654 struct bufdesc *bdp;
1655 unsigned short status;
1656 struct sk_buff *skb;
1657 ushort pkt_len;
1658 __u8 *data;
1659 int pkt_received = 0;
1660 struct bufdesc_ex *ebdp = NULL;
1661 bool vlan_packet_rcvd = false;
1662 u16 vlan_tag;
1663 int index = 0;
1664 bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
1665 struct bpf_prog *xdp_prog = READ_ONCE(fep->xdp_prog);
1666 u32 ret, xdp_result = FEC_ENET_XDP_PASS;
1667 u32 data_start = FEC_ENET_XDP_HEADROOM;
1668 int cpu = smp_processor_id();
1669 struct xdp_buff xdp;
1670 struct page *page;
1671 u32 sub_len = 4;
1672
1673 #if !defined(CONFIG_M5272)
1674 /*If it has the FEC_QUIRK_HAS_RACC quirk property, the bit of
1675 * FEC_RACC_SHIFT16 is set by default in the probe function.
1676 */
1677 if (fep->quirks & FEC_QUIRK_HAS_RACC) {
1678 data_start += 2;
1679 sub_len += 2;
1680 }
1681 #endif
1682
1683 #ifdef CONFIG_M532x
1684 flush_cache_all();
1685 #endif
1686 rxq = fep->rx_queue[queue_id];
1687
1688 /* First, grab all of the stats for the incoming packet.
1689 * These get messed up if we get called due to a busy condition.
1690 */
1691 bdp = rxq->bd.cur;
1692 xdp_init_buff(&xdp, PAGE_SIZE, &rxq->xdp_rxq);
1693
1694 while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
1695
1696 if (pkt_received >= budget)
1697 break;
1698 pkt_received++;
1699
1700 writel(FEC_ENET_RXF_GET(queue_id), fep->hwp + FEC_IEVENT);
1701
1702 /* Check for errors. */
1703 status ^= BD_ENET_RX_LAST;
1704 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
1705 BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
1706 BD_ENET_RX_CL)) {
1707 ndev->stats.rx_errors++;
1708 if (status & BD_ENET_RX_OV) {
1709 /* FIFO overrun */
1710 ndev->stats.rx_fifo_errors++;
1711 goto rx_processing_done;
1712 }
1713 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
1714 | BD_ENET_RX_LAST)) {
1715 /* Frame too long or too short. */
1716 ndev->stats.rx_length_errors++;
1717 if (status & BD_ENET_RX_LAST)
1718 netdev_err(ndev, "rcv is not +last\n");
1719 }
1720 if (status & BD_ENET_RX_CR) /* CRC Error */
1721 ndev->stats.rx_crc_errors++;
1722 /* Report late collisions as a frame error. */
1723 if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
1724 ndev->stats.rx_frame_errors++;
1725 goto rx_processing_done;
1726 }
1727
1728 /* Process the incoming frame. */
1729 ndev->stats.rx_packets++;
1730 pkt_len = fec16_to_cpu(bdp->cbd_datlen);
1731 ndev->stats.rx_bytes += pkt_len;
1732
1733 index = fec_enet_get_bd_index(bdp, &rxq->bd);
1734 page = rxq->rx_skb_info[index].page;
1735 dma_sync_single_for_cpu(&fep->pdev->dev,
1736 fec32_to_cpu(bdp->cbd_bufaddr),
1737 pkt_len,
1738 DMA_FROM_DEVICE);
1739 prefetch(page_address(page));
1740 fec_enet_update_cbd(rxq, bdp, index);
1741
1742 if (xdp_prog) {
1743 xdp_buff_clear_frags_flag(&xdp);
1744 /* subtract 16bit shift and FCS */
1745 xdp_prepare_buff(&xdp, page_address(page),
1746 data_start, pkt_len - sub_len, false);
1747 ret = fec_enet_run_xdp(fep, xdp_prog, &xdp, rxq, cpu);
1748 xdp_result |= ret;
1749 if (ret != FEC_ENET_XDP_PASS)
1750 goto rx_processing_done;
1751 }
1752
1753 /* The packet length includes FCS, but we don't want to
1754 * include that when passing upstream as it messes up
1755 * bridging applications.
1756 */
1757 skb = build_skb(page_address(page), PAGE_SIZE);
1758 if (unlikely(!skb)) {
1759 page_pool_recycle_direct(rxq->page_pool, page);
1760 ndev->stats.rx_dropped++;
1761
1762 netdev_err_once(ndev, "build_skb failed!\n");
1763 goto rx_processing_done;
1764 }
1765
1766 skb_reserve(skb, data_start);
1767 skb_put(skb, pkt_len - sub_len);
1768 skb_mark_for_recycle(skb);
1769
1770 if (unlikely(need_swap)) {
1771 data = page_address(page) + FEC_ENET_XDP_HEADROOM;
1772 swap_buffer(data, pkt_len);
1773 }
1774 data = skb->data;
1775
1776 /* Extract the enhanced buffer descriptor */
1777 ebdp = NULL;
1778 if (fep->bufdesc_ex)
1779 ebdp = (struct bufdesc_ex *)bdp;
1780
1781 /* If this is a VLAN packet remove the VLAN Tag */
1782 vlan_packet_rcvd = false;
1783 if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
1784 fep->bufdesc_ex &&
1785 (ebdp->cbd_esc & cpu_to_fec32(BD_ENET_RX_VLAN))) {
1786 /* Push and remove the vlan tag */
1787 struct vlan_hdr *vlan_header =
1788 (struct vlan_hdr *) (data + ETH_HLEN);
1789 vlan_tag = ntohs(vlan_header->h_vlan_TCI);
1790
1791 vlan_packet_rcvd = true;
1792
1793 memmove(skb->data + VLAN_HLEN, data, ETH_ALEN * 2);
1794 skb_pull(skb, VLAN_HLEN);
1795 }
1796
1797 skb->protocol = eth_type_trans(skb, ndev);
1798
1799 /* Get receive timestamp from the skb */
1800 if (fep->hwts_rx_en && fep->bufdesc_ex)
1801 fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts),
1802 skb_hwtstamps(skb));
1803
1804 if (fep->bufdesc_ex &&
1805 (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
1806 if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR))) {
1807 /* don't check it */
1808 skb->ip_summed = CHECKSUM_UNNECESSARY;
1809 } else {
1810 skb_checksum_none_assert(skb);
1811 }
1812 }
1813
1814 /* Handle received VLAN packets */
1815 if (vlan_packet_rcvd)
1816 __vlan_hwaccel_put_tag(skb,
1817 htons(ETH_P_8021Q),
1818 vlan_tag);
1819
1820 skb_record_rx_queue(skb, queue_id);
1821 napi_gro_receive(&fep->napi, skb);
1822
1823 rx_processing_done:
1824 /* Clear the status flags for this buffer */
1825 status &= ~BD_ENET_RX_STATS;
1826
1827 /* Mark the buffer empty */
1828 status |= BD_ENET_RX_EMPTY;
1829
1830 if (fep->bufdesc_ex) {
1831 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
1832
1833 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
1834 ebdp->cbd_prot = 0;
1835 ebdp->cbd_bdu = 0;
1836 }
1837 /* Make sure the updates to rest of the descriptor are
1838 * performed before transferring ownership.
1839 */
1840 wmb();
1841 bdp->cbd_sc = cpu_to_fec16(status);
1842
1843 /* Update BD pointer to next entry */
1844 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
1845
1846 /* Doing this here will keep the FEC running while we process
1847 * incoming frames. On a heavily loaded network, we should be
1848 * able to keep up at the expense of system resources.
1849 */
1850 writel(0, rxq->bd.reg_desc_active);
1851 }
1852 rxq->bd.cur = bdp;
1853
1854 if (xdp_result & FEC_ENET_XDP_REDIR)
1855 xdp_do_flush_map();
1856
1857 return pkt_received;
1858 }
1859
fec_enet_rx(struct net_device * ndev,int budget)1860 static int fec_enet_rx(struct net_device *ndev, int budget)
1861 {
1862 struct fec_enet_private *fep = netdev_priv(ndev);
1863 int i, done = 0;
1864
1865 /* Make sure that AVB queues are processed first. */
1866 for (i = fep->num_rx_queues - 1; i >= 0; i--)
1867 done += fec_enet_rx_queue(ndev, budget - done, i);
1868
1869 return done;
1870 }
1871
fec_enet_collect_events(struct fec_enet_private * fep)1872 static bool fec_enet_collect_events(struct fec_enet_private *fep)
1873 {
1874 uint int_events;
1875
1876 int_events = readl(fep->hwp + FEC_IEVENT);
1877
1878 /* Don't clear MDIO events, we poll for those */
1879 int_events &= ~FEC_ENET_MII;
1880
1881 writel(int_events, fep->hwp + FEC_IEVENT);
1882
1883 return int_events != 0;
1884 }
1885
1886 static irqreturn_t
fec_enet_interrupt(int irq,void * dev_id)1887 fec_enet_interrupt(int irq, void *dev_id)
1888 {
1889 struct net_device *ndev = dev_id;
1890 struct fec_enet_private *fep = netdev_priv(ndev);
1891 irqreturn_t ret = IRQ_NONE;
1892
1893 if (fec_enet_collect_events(fep) && fep->link) {
1894 ret = IRQ_HANDLED;
1895
1896 if (napi_schedule_prep(&fep->napi)) {
1897 /* Disable interrupts */
1898 writel(0, fep->hwp + FEC_IMASK);
1899 __napi_schedule(&fep->napi);
1900 }
1901 }
1902
1903 return ret;
1904 }
1905
fec_enet_rx_napi(struct napi_struct * napi,int budget)1906 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
1907 {
1908 struct net_device *ndev = napi->dev;
1909 struct fec_enet_private *fep = netdev_priv(ndev);
1910 int done = 0;
1911
1912 do {
1913 done += fec_enet_rx(ndev, budget - done);
1914 fec_enet_tx(ndev, budget);
1915 } while ((done < budget) && fec_enet_collect_events(fep));
1916
1917 if (done < budget) {
1918 napi_complete_done(napi, done);
1919 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
1920 }
1921
1922 return done;
1923 }
1924
1925 /* ------------------------------------------------------------------------- */
fec_get_mac(struct net_device * ndev)1926 static int fec_get_mac(struct net_device *ndev)
1927 {
1928 struct fec_enet_private *fep = netdev_priv(ndev);
1929 unsigned char *iap, tmpaddr[ETH_ALEN];
1930 int ret;
1931
1932 /*
1933 * try to get mac address in following order:
1934 *
1935 * 1) module parameter via kernel command line in form
1936 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
1937 */
1938 iap = macaddr;
1939
1940 /*
1941 * 2) from device tree data
1942 */
1943 if (!is_valid_ether_addr(iap)) {
1944 struct device_node *np = fep->pdev->dev.of_node;
1945 if (np) {
1946 ret = of_get_mac_address(np, tmpaddr);
1947 if (!ret)
1948 iap = tmpaddr;
1949 else if (ret == -EPROBE_DEFER)
1950 return ret;
1951 }
1952 }
1953
1954 /*
1955 * 3) from flash or fuse (via platform data)
1956 */
1957 if (!is_valid_ether_addr(iap)) {
1958 #ifdef CONFIG_M5272
1959 if (FEC_FLASHMAC)
1960 iap = (unsigned char *)FEC_FLASHMAC;
1961 #else
1962 struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
1963
1964 if (pdata)
1965 iap = (unsigned char *)&pdata->mac;
1966 #endif
1967 }
1968
1969 /*
1970 * 4) FEC mac registers set by bootloader
1971 */
1972 if (!is_valid_ether_addr(iap)) {
1973 *((__be32 *) &tmpaddr[0]) =
1974 cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
1975 *((__be16 *) &tmpaddr[4]) =
1976 cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1977 iap = &tmpaddr[0];
1978 }
1979
1980 /*
1981 * 5) random mac address
1982 */
1983 if (!is_valid_ether_addr(iap)) {
1984 /* Report it and use a random ethernet address instead */
1985 dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
1986 eth_hw_addr_random(ndev);
1987 dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
1988 ndev->dev_addr);
1989 return 0;
1990 }
1991
1992 /* Adjust MAC if using macaddr */
1993 eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
1994
1995 return 0;
1996 }
1997
1998 /* ------------------------------------------------------------------------- */
1999
2000 /*
2001 * Phy section
2002 */
fec_enet_adjust_link(struct net_device * ndev)2003 static void fec_enet_adjust_link(struct net_device *ndev)
2004 {
2005 struct fec_enet_private *fep = netdev_priv(ndev);
2006 struct phy_device *phy_dev = ndev->phydev;
2007 int status_change = 0;
2008
2009 /*
2010 * If the netdev is down, or is going down, we're not interested
2011 * in link state events, so just mark our idea of the link as down
2012 * and ignore the event.
2013 */
2014 if (!netif_running(ndev) || !netif_device_present(ndev)) {
2015 fep->link = 0;
2016 } else if (phy_dev->link) {
2017 if (!fep->link) {
2018 fep->link = phy_dev->link;
2019 status_change = 1;
2020 }
2021
2022 if (fep->full_duplex != phy_dev->duplex) {
2023 fep->full_duplex = phy_dev->duplex;
2024 status_change = 1;
2025 }
2026
2027 if (phy_dev->speed != fep->speed) {
2028 fep->speed = phy_dev->speed;
2029 status_change = 1;
2030 }
2031
2032 /* if any of the above changed restart the FEC */
2033 if (status_change) {
2034 netif_stop_queue(ndev);
2035 napi_disable(&fep->napi);
2036 netif_tx_lock_bh(ndev);
2037 fec_restart(ndev);
2038 netif_tx_wake_all_queues(ndev);
2039 netif_tx_unlock_bh(ndev);
2040 napi_enable(&fep->napi);
2041 }
2042 } else {
2043 if (fep->link) {
2044 netif_stop_queue(ndev);
2045 napi_disable(&fep->napi);
2046 netif_tx_lock_bh(ndev);
2047 fec_stop(ndev);
2048 netif_tx_unlock_bh(ndev);
2049 napi_enable(&fep->napi);
2050 fep->link = phy_dev->link;
2051 status_change = 1;
2052 }
2053 }
2054
2055 if (status_change)
2056 phy_print_status(phy_dev);
2057 }
2058
fec_enet_mdio_wait(struct fec_enet_private * fep)2059 static int fec_enet_mdio_wait(struct fec_enet_private *fep)
2060 {
2061 uint ievent;
2062 int ret;
2063
2064 ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
2065 ievent & FEC_ENET_MII, 2, 30000);
2066
2067 if (!ret)
2068 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2069
2070 return ret;
2071 }
2072
fec_enet_mdio_read_c22(struct mii_bus * bus,int mii_id,int regnum)2073 static int fec_enet_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
2074 {
2075 struct fec_enet_private *fep = bus->priv;
2076 struct device *dev = &fep->pdev->dev;
2077 int ret = 0, frame_start, frame_addr, frame_op;
2078
2079 ret = pm_runtime_resume_and_get(dev);
2080 if (ret < 0)
2081 return ret;
2082
2083 /* C22 read */
2084 frame_op = FEC_MMFR_OP_READ;
2085 frame_start = FEC_MMFR_ST;
2086 frame_addr = regnum;
2087
2088 /* start a read op */
2089 writel(frame_start | frame_op |
2090 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2091 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2092
2093 /* wait for end of transfer */
2094 ret = fec_enet_mdio_wait(fep);
2095 if (ret) {
2096 netdev_err(fep->netdev, "MDIO read timeout\n");
2097 goto out;
2098 }
2099
2100 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2101
2102 out:
2103 pm_runtime_mark_last_busy(dev);
2104 pm_runtime_put_autosuspend(dev);
2105
2106 return ret;
2107 }
2108
fec_enet_mdio_read_c45(struct mii_bus * bus,int mii_id,int devad,int regnum)2109 static int fec_enet_mdio_read_c45(struct mii_bus *bus, int mii_id,
2110 int devad, int regnum)
2111 {
2112 struct fec_enet_private *fep = bus->priv;
2113 struct device *dev = &fep->pdev->dev;
2114 int ret = 0, frame_start, frame_op;
2115
2116 ret = pm_runtime_resume_and_get(dev);
2117 if (ret < 0)
2118 return ret;
2119
2120 frame_start = FEC_MMFR_ST_C45;
2121
2122 /* write address */
2123 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2124 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2125 FEC_MMFR_TA | (regnum & 0xFFFF),
2126 fep->hwp + FEC_MII_DATA);
2127
2128 /* wait for end of transfer */
2129 ret = fec_enet_mdio_wait(fep);
2130 if (ret) {
2131 netdev_err(fep->netdev, "MDIO address write timeout\n");
2132 goto out;
2133 }
2134
2135 frame_op = FEC_MMFR_OP_READ_C45;
2136
2137 /* start a read op */
2138 writel(frame_start | frame_op |
2139 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2140 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
2141
2142 /* wait for end of transfer */
2143 ret = fec_enet_mdio_wait(fep);
2144 if (ret) {
2145 netdev_err(fep->netdev, "MDIO read timeout\n");
2146 goto out;
2147 }
2148
2149 ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
2150
2151 out:
2152 pm_runtime_mark_last_busy(dev);
2153 pm_runtime_put_autosuspend(dev);
2154
2155 return ret;
2156 }
2157
fec_enet_mdio_write_c22(struct mii_bus * bus,int mii_id,int regnum,u16 value)2158 static int fec_enet_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
2159 u16 value)
2160 {
2161 struct fec_enet_private *fep = bus->priv;
2162 struct device *dev = &fep->pdev->dev;
2163 int ret, frame_start, frame_addr;
2164
2165 ret = pm_runtime_resume_and_get(dev);
2166 if (ret < 0)
2167 return ret;
2168
2169 /* C22 write */
2170 frame_start = FEC_MMFR_ST;
2171 frame_addr = regnum;
2172
2173 /* start a write op */
2174 writel(frame_start | FEC_MMFR_OP_WRITE |
2175 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
2176 FEC_MMFR_TA | FEC_MMFR_DATA(value),
2177 fep->hwp + FEC_MII_DATA);
2178
2179 /* wait for end of transfer */
2180 ret = fec_enet_mdio_wait(fep);
2181 if (ret)
2182 netdev_err(fep->netdev, "MDIO write timeout\n");
2183
2184 pm_runtime_mark_last_busy(dev);
2185 pm_runtime_put_autosuspend(dev);
2186
2187 return ret;
2188 }
2189
fec_enet_mdio_write_c45(struct mii_bus * bus,int mii_id,int devad,int regnum,u16 value)2190 static int fec_enet_mdio_write_c45(struct mii_bus *bus, int mii_id,
2191 int devad, int regnum, u16 value)
2192 {
2193 struct fec_enet_private *fep = bus->priv;
2194 struct device *dev = &fep->pdev->dev;
2195 int ret, frame_start;
2196
2197 ret = pm_runtime_resume_and_get(dev);
2198 if (ret < 0)
2199 return ret;
2200
2201 frame_start = FEC_MMFR_ST_C45;
2202
2203 /* write address */
2204 writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
2205 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2206 FEC_MMFR_TA | (regnum & 0xFFFF),
2207 fep->hwp + FEC_MII_DATA);
2208
2209 /* wait for end of transfer */
2210 ret = fec_enet_mdio_wait(fep);
2211 if (ret) {
2212 netdev_err(fep->netdev, "MDIO address write timeout\n");
2213 goto out;
2214 }
2215
2216 /* start a write op */
2217 writel(frame_start | FEC_MMFR_OP_WRITE |
2218 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(devad) |
2219 FEC_MMFR_TA | FEC_MMFR_DATA(value),
2220 fep->hwp + FEC_MII_DATA);
2221
2222 /* wait for end of transfer */
2223 ret = fec_enet_mdio_wait(fep);
2224 if (ret)
2225 netdev_err(fep->netdev, "MDIO write timeout\n");
2226
2227 out:
2228 pm_runtime_mark_last_busy(dev);
2229 pm_runtime_put_autosuspend(dev);
2230
2231 return ret;
2232 }
2233
fec_enet_phy_reset_after_clk_enable(struct net_device * ndev)2234 static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
2235 {
2236 struct fec_enet_private *fep = netdev_priv(ndev);
2237 struct phy_device *phy_dev = ndev->phydev;
2238
2239 if (phy_dev) {
2240 phy_reset_after_clk_enable(phy_dev);
2241 } else if (fep->phy_node) {
2242 /*
2243 * If the PHY still is not bound to the MAC, but there is
2244 * OF PHY node and a matching PHY device instance already,
2245 * use the OF PHY node to obtain the PHY device instance,
2246 * and then use that PHY device instance when triggering
2247 * the PHY reset.
2248 */
2249 phy_dev = of_phy_find_device(fep->phy_node);
2250 phy_reset_after_clk_enable(phy_dev);
2251 put_device(&phy_dev->mdio.dev);
2252 }
2253 }
2254
fec_enet_clk_enable(struct net_device * ndev,bool enable)2255 static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
2256 {
2257 struct fec_enet_private *fep = netdev_priv(ndev);
2258 int ret;
2259
2260 if (enable) {
2261 ret = clk_prepare_enable(fep->clk_enet_out);
2262 if (ret)
2263 return ret;
2264
2265 if (fep->clk_ptp) {
2266 mutex_lock(&fep->ptp_clk_mutex);
2267 ret = clk_prepare_enable(fep->clk_ptp);
2268 if (ret) {
2269 mutex_unlock(&fep->ptp_clk_mutex);
2270 goto failed_clk_ptp;
2271 } else {
2272 fep->ptp_clk_on = true;
2273 }
2274 mutex_unlock(&fep->ptp_clk_mutex);
2275 }
2276
2277 ret = clk_prepare_enable(fep->clk_ref);
2278 if (ret)
2279 goto failed_clk_ref;
2280
2281 ret = clk_prepare_enable(fep->clk_2x_txclk);
2282 if (ret)
2283 goto failed_clk_2x_txclk;
2284
2285 fec_enet_phy_reset_after_clk_enable(ndev);
2286 } else {
2287 clk_disable_unprepare(fep->clk_enet_out);
2288 if (fep->clk_ptp) {
2289 mutex_lock(&fep->ptp_clk_mutex);
2290 clk_disable_unprepare(fep->clk_ptp);
2291 fep->ptp_clk_on = false;
2292 mutex_unlock(&fep->ptp_clk_mutex);
2293 }
2294 clk_disable_unprepare(fep->clk_ref);
2295 clk_disable_unprepare(fep->clk_2x_txclk);
2296 }
2297
2298 return 0;
2299
2300 failed_clk_2x_txclk:
2301 if (fep->clk_ref)
2302 clk_disable_unprepare(fep->clk_ref);
2303 failed_clk_ref:
2304 if (fep->clk_ptp) {
2305 mutex_lock(&fep->ptp_clk_mutex);
2306 clk_disable_unprepare(fep->clk_ptp);
2307 fep->ptp_clk_on = false;
2308 mutex_unlock(&fep->ptp_clk_mutex);
2309 }
2310 failed_clk_ptp:
2311 clk_disable_unprepare(fep->clk_enet_out);
2312
2313 return ret;
2314 }
2315
fec_enet_parse_rgmii_delay(struct fec_enet_private * fep,struct device_node * np)2316 static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep,
2317 struct device_node *np)
2318 {
2319 u32 rgmii_tx_delay, rgmii_rx_delay;
2320
2321 /* For rgmii tx internal delay, valid values are 0ps and 2000ps */
2322 if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) {
2323 if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) {
2324 dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps");
2325 return -EINVAL;
2326 } else if (rgmii_tx_delay == 2000) {
2327 fep->rgmii_txc_dly = true;
2328 }
2329 }
2330
2331 /* For rgmii rx internal delay, valid values are 0ps and 2000ps */
2332 if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) {
2333 if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) {
2334 dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps");
2335 return -EINVAL;
2336 } else if (rgmii_rx_delay == 2000) {
2337 fep->rgmii_rxc_dly = true;
2338 }
2339 }
2340
2341 return 0;
2342 }
2343
fec_enet_mii_probe(struct net_device * ndev)2344 static int fec_enet_mii_probe(struct net_device *ndev)
2345 {
2346 struct fec_enet_private *fep = netdev_priv(ndev);
2347 struct phy_device *phy_dev = NULL;
2348 char mdio_bus_id[MII_BUS_ID_SIZE];
2349 char phy_name[MII_BUS_ID_SIZE + 3];
2350 int phy_id;
2351 int dev_id = fep->dev_id;
2352
2353 if (fep->phy_node) {
2354 phy_dev = of_phy_connect(ndev, fep->phy_node,
2355 &fec_enet_adjust_link, 0,
2356 fep->phy_interface);
2357 if (!phy_dev) {
2358 netdev_err(ndev, "Unable to connect to phy\n");
2359 return -ENODEV;
2360 }
2361 } else {
2362 /* check for attached phy */
2363 for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
2364 if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
2365 continue;
2366 if (dev_id--)
2367 continue;
2368 strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
2369 break;
2370 }
2371
2372 if (phy_id >= PHY_MAX_ADDR) {
2373 netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
2374 strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
2375 phy_id = 0;
2376 }
2377
2378 snprintf(phy_name, sizeof(phy_name),
2379 PHY_ID_FMT, mdio_bus_id, phy_id);
2380 phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
2381 fep->phy_interface);
2382 }
2383
2384 if (IS_ERR(phy_dev)) {
2385 netdev_err(ndev, "could not attach to PHY\n");
2386 return PTR_ERR(phy_dev);
2387 }
2388
2389 /* mask with MAC supported features */
2390 if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
2391 phy_set_max_speed(phy_dev, 1000);
2392 phy_remove_link_mode(phy_dev,
2393 ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2394 #if !defined(CONFIG_M5272)
2395 phy_support_sym_pause(phy_dev);
2396 #endif
2397 }
2398 else
2399 phy_set_max_speed(phy_dev, 100);
2400
2401 fep->link = 0;
2402 fep->full_duplex = 0;
2403
2404 phy_attached_info(phy_dev);
2405
2406 return 0;
2407 }
2408
fec_enet_mii_init(struct platform_device * pdev)2409 static int fec_enet_mii_init(struct platform_device *pdev)
2410 {
2411 static struct mii_bus *fec0_mii_bus;
2412 struct net_device *ndev = platform_get_drvdata(pdev);
2413 struct fec_enet_private *fep = netdev_priv(ndev);
2414 bool suppress_preamble = false;
2415 struct phy_device *phydev;
2416 struct device_node *node;
2417 int err = -ENXIO;
2418 u32 mii_speed, holdtime;
2419 u32 bus_freq;
2420 int addr;
2421
2422 /*
2423 * The i.MX28 dual fec interfaces are not equal.
2424 * Here are the differences:
2425 *
2426 * - fec0 supports MII & RMII modes while fec1 only supports RMII
2427 * - fec0 acts as the 1588 time master while fec1 is slave
2428 * - external phys can only be configured by fec0
2429 *
2430 * That is to say fec1 can not work independently. It only works
2431 * when fec0 is working. The reason behind this design is that the
2432 * second interface is added primarily for Switch mode.
2433 *
2434 * Because of the last point above, both phys are attached on fec0
2435 * mdio interface in board design, and need to be configured by
2436 * fec0 mii_bus.
2437 */
2438 if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
2439 /* fec1 uses fec0 mii_bus */
2440 if (mii_cnt && fec0_mii_bus) {
2441 fep->mii_bus = fec0_mii_bus;
2442 mii_cnt++;
2443 return 0;
2444 }
2445 return -ENOENT;
2446 }
2447
2448 bus_freq = 2500000; /* 2.5MHz by default */
2449 node = of_get_child_by_name(pdev->dev.of_node, "mdio");
2450 if (node) {
2451 of_property_read_u32(node, "clock-frequency", &bus_freq);
2452 suppress_preamble = of_property_read_bool(node,
2453 "suppress-preamble");
2454 }
2455
2456 /*
2457 * Set MII speed (= clk_get_rate() / 2 * phy_speed)
2458 *
2459 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
2460 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'. The i.MX28
2461 * Reference Manual has an error on this, and gets fixed on i.MX6Q
2462 * document.
2463 */
2464 mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
2465 if (fep->quirks & FEC_QUIRK_ENET_MAC)
2466 mii_speed--;
2467 if (mii_speed > 63) {
2468 dev_err(&pdev->dev,
2469 "fec clock (%lu) too fast to get right mii speed\n",
2470 clk_get_rate(fep->clk_ipg));
2471 err = -EINVAL;
2472 goto err_out;
2473 }
2474
2475 /*
2476 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
2477 * MII_SPEED) register that defines the MDIO output hold time. Earlier
2478 * versions are RAZ there, so just ignore the difference and write the
2479 * register always.
2480 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
2481 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
2482 * output.
2483 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
2484 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
2485 * holdtime cannot result in a value greater than 3.
2486 */
2487 holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
2488
2489 fep->phy_speed = mii_speed << 1 | holdtime << 8;
2490
2491 if (suppress_preamble)
2492 fep->phy_speed |= BIT(7);
2493
2494 if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
2495 /* Clear MMFR to avoid to generate MII event by writing MSCR.
2496 * MII event generation condition:
2497 * - writing MSCR:
2498 * - mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
2499 * mscr_reg_data_in[7:0] != 0
2500 * - writing MMFR:
2501 * - mscr[7:0]_not_zero
2502 */
2503 writel(0, fep->hwp + FEC_MII_DATA);
2504 }
2505
2506 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
2507
2508 /* Clear any pending transaction complete indication */
2509 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
2510
2511 fep->mii_bus = mdiobus_alloc();
2512 if (fep->mii_bus == NULL) {
2513 err = -ENOMEM;
2514 goto err_out;
2515 }
2516
2517 fep->mii_bus->name = "fec_enet_mii_bus";
2518 fep->mii_bus->read = fec_enet_mdio_read_c22;
2519 fep->mii_bus->write = fec_enet_mdio_write_c22;
2520 if (fep->quirks & FEC_QUIRK_HAS_MDIO_C45) {
2521 fep->mii_bus->read_c45 = fec_enet_mdio_read_c45;
2522 fep->mii_bus->write_c45 = fec_enet_mdio_write_c45;
2523 }
2524 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
2525 pdev->name, fep->dev_id + 1);
2526 fep->mii_bus->priv = fep;
2527 fep->mii_bus->parent = &pdev->dev;
2528
2529 err = of_mdiobus_register(fep->mii_bus, node);
2530 if (err)
2531 goto err_out_free_mdiobus;
2532 of_node_put(node);
2533
2534 /* find all the PHY devices on the bus and set mac_managed_pm to true */
2535 for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
2536 phydev = mdiobus_get_phy(fep->mii_bus, addr);
2537 if (phydev)
2538 phydev->mac_managed_pm = true;
2539 }
2540
2541 mii_cnt++;
2542
2543 /* save fec0 mii_bus */
2544 if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
2545 fec0_mii_bus = fep->mii_bus;
2546
2547 return 0;
2548
2549 err_out_free_mdiobus:
2550 mdiobus_free(fep->mii_bus);
2551 err_out:
2552 of_node_put(node);
2553 return err;
2554 }
2555
fec_enet_mii_remove(struct fec_enet_private * fep)2556 static void fec_enet_mii_remove(struct fec_enet_private *fep)
2557 {
2558 if (--mii_cnt == 0) {
2559 mdiobus_unregister(fep->mii_bus);
2560 mdiobus_free(fep->mii_bus);
2561 }
2562 }
2563
fec_enet_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * info)2564 static void fec_enet_get_drvinfo(struct net_device *ndev,
2565 struct ethtool_drvinfo *info)
2566 {
2567 struct fec_enet_private *fep = netdev_priv(ndev);
2568
2569 strscpy(info->driver, fep->pdev->dev.driver->name,
2570 sizeof(info->driver));
2571 strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
2572 }
2573
fec_enet_get_regs_len(struct net_device * ndev)2574 static int fec_enet_get_regs_len(struct net_device *ndev)
2575 {
2576 struct fec_enet_private *fep = netdev_priv(ndev);
2577 struct resource *r;
2578 int s = 0;
2579
2580 r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
2581 if (r)
2582 s = resource_size(r);
2583
2584 return s;
2585 }
2586
2587 /* List of registers that can be safety be read to dump them with ethtool */
2588 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2589 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2590 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2591 static __u32 fec_enet_register_version = 2;
2592 static u32 fec_enet_register_offset[] = {
2593 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2594 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2595 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
2596 FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
2597 FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
2598 FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
2599 FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
2600 FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
2601 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2602 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
2603 FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
2604 FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
2605 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2606 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2607 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2608 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2609 RMON_T_P_GTE2048, RMON_T_OCTETS,
2610 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2611 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2612 IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2613 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2614 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2615 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2616 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2617 RMON_R_P_GTE2048, RMON_R_OCTETS,
2618 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2619 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2620 };
2621 /* for i.MX6ul */
2622 static u32 fec_enet_register_offset_6ul[] = {
2623 FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
2624 FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
2625 FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0,
2626 FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH,
2627 FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0,
2628 FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
2629 FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC,
2630 RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
2631 RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
2632 RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
2633 RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
2634 RMON_T_P_GTE2048, RMON_T_OCTETS,
2635 IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
2636 IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
2637 IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
2638 RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
2639 RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
2640 RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
2641 RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
2642 RMON_R_P_GTE2048, RMON_R_OCTETS,
2643 IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
2644 IEEE_R_FDXFC, IEEE_R_OCTETS_OK
2645 };
2646 #else
2647 static __u32 fec_enet_register_version = 1;
2648 static u32 fec_enet_register_offset[] = {
2649 FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
2650 FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
2651 FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
2652 FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
2653 FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
2654 FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
2655 FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
2656 FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
2657 FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
2658 };
2659 #endif
2660
fec_enet_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * regbuf)2661 static void fec_enet_get_regs(struct net_device *ndev,
2662 struct ethtool_regs *regs, void *regbuf)
2663 {
2664 struct fec_enet_private *fep = netdev_priv(ndev);
2665 u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
2666 struct device *dev = &fep->pdev->dev;
2667 u32 *buf = (u32 *)regbuf;
2668 u32 i, off;
2669 int ret;
2670 #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
2671 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
2672 defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
2673 u32 *reg_list;
2674 u32 reg_cnt;
2675
2676 if (!of_machine_is_compatible("fsl,imx6ul")) {
2677 reg_list = fec_enet_register_offset;
2678 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2679 } else {
2680 reg_list = fec_enet_register_offset_6ul;
2681 reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul);
2682 }
2683 #else
2684 /* coldfire */
2685 static u32 *reg_list = fec_enet_register_offset;
2686 static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset);
2687 #endif
2688 ret = pm_runtime_resume_and_get(dev);
2689 if (ret < 0)
2690 return;
2691
2692 regs->version = fec_enet_register_version;
2693
2694 memset(buf, 0, regs->len);
2695
2696 for (i = 0; i < reg_cnt; i++) {
2697 off = reg_list[i];
2698
2699 if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
2700 !(fep->quirks & FEC_QUIRK_HAS_FRREG))
2701 continue;
2702
2703 off >>= 2;
2704 buf[off] = readl(&theregs[off]);
2705 }
2706
2707 pm_runtime_mark_last_busy(dev);
2708 pm_runtime_put_autosuspend(dev);
2709 }
2710
fec_enet_get_ts_info(struct net_device * ndev,struct ethtool_ts_info * info)2711 static int fec_enet_get_ts_info(struct net_device *ndev,
2712 struct ethtool_ts_info *info)
2713 {
2714 struct fec_enet_private *fep = netdev_priv(ndev);
2715
2716 if (fep->bufdesc_ex) {
2717
2718 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
2719 SOF_TIMESTAMPING_RX_SOFTWARE |
2720 SOF_TIMESTAMPING_SOFTWARE |
2721 SOF_TIMESTAMPING_TX_HARDWARE |
2722 SOF_TIMESTAMPING_RX_HARDWARE |
2723 SOF_TIMESTAMPING_RAW_HARDWARE;
2724 if (fep->ptp_clock)
2725 info->phc_index = ptp_clock_index(fep->ptp_clock);
2726 else
2727 info->phc_index = -1;
2728
2729 info->tx_types = (1 << HWTSTAMP_TX_OFF) |
2730 (1 << HWTSTAMP_TX_ON);
2731
2732 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
2733 (1 << HWTSTAMP_FILTER_ALL);
2734 return 0;
2735 } else {
2736 return ethtool_op_get_ts_info(ndev, info);
2737 }
2738 }
2739
2740 #if !defined(CONFIG_M5272)
2741
fec_enet_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)2742 static void fec_enet_get_pauseparam(struct net_device *ndev,
2743 struct ethtool_pauseparam *pause)
2744 {
2745 struct fec_enet_private *fep = netdev_priv(ndev);
2746
2747 pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
2748 pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
2749 pause->rx_pause = pause->tx_pause;
2750 }
2751
fec_enet_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * pause)2752 static int fec_enet_set_pauseparam(struct net_device *ndev,
2753 struct ethtool_pauseparam *pause)
2754 {
2755 struct fec_enet_private *fep = netdev_priv(ndev);
2756
2757 if (!ndev->phydev)
2758 return -ENODEV;
2759
2760 if (pause->tx_pause != pause->rx_pause) {
2761 netdev_info(ndev,
2762 "hardware only support enable/disable both tx and rx");
2763 return -EINVAL;
2764 }
2765
2766 fep->pause_flag = 0;
2767
2768 /* tx pause must be same as rx pause */
2769 fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
2770 fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
2771
2772 phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
2773 pause->autoneg);
2774
2775 if (pause->autoneg) {
2776 if (netif_running(ndev))
2777 fec_stop(ndev);
2778 phy_start_aneg(ndev->phydev);
2779 }
2780 if (netif_running(ndev)) {
2781 napi_disable(&fep->napi);
2782 netif_tx_lock_bh(ndev);
2783 fec_restart(ndev);
2784 netif_tx_wake_all_queues(ndev);
2785 netif_tx_unlock_bh(ndev);
2786 napi_enable(&fep->napi);
2787 }
2788
2789 return 0;
2790 }
2791
2792 static const struct fec_stat {
2793 char name[ETH_GSTRING_LEN];
2794 u16 offset;
2795 } fec_stats[] = {
2796 /* RMON TX */
2797 { "tx_dropped", RMON_T_DROP },
2798 { "tx_packets", RMON_T_PACKETS },
2799 { "tx_broadcast", RMON_T_BC_PKT },
2800 { "tx_multicast", RMON_T_MC_PKT },
2801 { "tx_crc_errors", RMON_T_CRC_ALIGN },
2802 { "tx_undersize", RMON_T_UNDERSIZE },
2803 { "tx_oversize", RMON_T_OVERSIZE },
2804 { "tx_fragment", RMON_T_FRAG },
2805 { "tx_jabber", RMON_T_JAB },
2806 { "tx_collision", RMON_T_COL },
2807 { "tx_64byte", RMON_T_P64 },
2808 { "tx_65to127byte", RMON_T_P65TO127 },
2809 { "tx_128to255byte", RMON_T_P128TO255 },
2810 { "tx_256to511byte", RMON_T_P256TO511 },
2811 { "tx_512to1023byte", RMON_T_P512TO1023 },
2812 { "tx_1024to2047byte", RMON_T_P1024TO2047 },
2813 { "tx_GTE2048byte", RMON_T_P_GTE2048 },
2814 { "tx_octets", RMON_T_OCTETS },
2815
2816 /* IEEE TX */
2817 { "IEEE_tx_drop", IEEE_T_DROP },
2818 { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
2819 { "IEEE_tx_1col", IEEE_T_1COL },
2820 { "IEEE_tx_mcol", IEEE_T_MCOL },
2821 { "IEEE_tx_def", IEEE_T_DEF },
2822 { "IEEE_tx_lcol", IEEE_T_LCOL },
2823 { "IEEE_tx_excol", IEEE_T_EXCOL },
2824 { "IEEE_tx_macerr", IEEE_T_MACERR },
2825 { "IEEE_tx_cserr", IEEE_T_CSERR },
2826 { "IEEE_tx_sqe", IEEE_T_SQE },
2827 { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
2828 { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
2829
2830 /* RMON RX */
2831 { "rx_packets", RMON_R_PACKETS },
2832 { "rx_broadcast", RMON_R_BC_PKT },
2833 { "rx_multicast", RMON_R_MC_PKT },
2834 { "rx_crc_errors", RMON_R_CRC_ALIGN },
2835 { "rx_undersize", RMON_R_UNDERSIZE },
2836 { "rx_oversize", RMON_R_OVERSIZE },
2837 { "rx_fragment", RMON_R_FRAG },
2838 { "rx_jabber", RMON_R_JAB },
2839 { "rx_64byte", RMON_R_P64 },
2840 { "rx_65to127byte", RMON_R_P65TO127 },
2841 { "rx_128to255byte", RMON_R_P128TO255 },
2842 { "rx_256to511byte", RMON_R_P256TO511 },
2843 { "rx_512to1023byte", RMON_R_P512TO1023 },
2844 { "rx_1024to2047byte", RMON_R_P1024TO2047 },
2845 { "rx_GTE2048byte", RMON_R_P_GTE2048 },
2846 { "rx_octets", RMON_R_OCTETS },
2847
2848 /* IEEE RX */
2849 { "IEEE_rx_drop", IEEE_R_DROP },
2850 { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
2851 { "IEEE_rx_crc", IEEE_R_CRC },
2852 { "IEEE_rx_align", IEEE_R_ALIGN },
2853 { "IEEE_rx_macerr", IEEE_R_MACERR },
2854 { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
2855 { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
2856 };
2857
2858 #define FEC_STATS_SIZE (ARRAY_SIZE(fec_stats) * sizeof(u64))
2859
2860 static const char *fec_xdp_stat_strs[XDP_STATS_TOTAL] = {
2861 "rx_xdp_redirect", /* RX_XDP_REDIRECT = 0, */
2862 "rx_xdp_pass", /* RX_XDP_PASS, */
2863 "rx_xdp_drop", /* RX_XDP_DROP, */
2864 "rx_xdp_tx", /* RX_XDP_TX, */
2865 "rx_xdp_tx_errors", /* RX_XDP_TX_ERRORS, */
2866 "tx_xdp_xmit", /* TX_XDP_XMIT, */
2867 "tx_xdp_xmit_errors", /* TX_XDP_XMIT_ERRORS, */
2868 };
2869
fec_enet_update_ethtool_stats(struct net_device * dev)2870 static void fec_enet_update_ethtool_stats(struct net_device *dev)
2871 {
2872 struct fec_enet_private *fep = netdev_priv(dev);
2873 int i;
2874
2875 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2876 fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
2877 }
2878
fec_enet_get_xdp_stats(struct fec_enet_private * fep,u64 * data)2879 static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data)
2880 {
2881 u64 xdp_stats[XDP_STATS_TOTAL] = { 0 };
2882 struct fec_enet_priv_rx_q *rxq;
2883 int i, j;
2884
2885 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2886 rxq = fep->rx_queue[i];
2887
2888 for (j = 0; j < XDP_STATS_TOTAL; j++)
2889 xdp_stats[j] += rxq->stats[j];
2890 }
2891
2892 memcpy(data, xdp_stats, sizeof(xdp_stats));
2893 }
2894
fec_enet_page_pool_stats(struct fec_enet_private * fep,u64 * data)2895 static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data)
2896 {
2897 #ifdef CONFIG_PAGE_POOL_STATS
2898 struct page_pool_stats stats = {};
2899 struct fec_enet_priv_rx_q *rxq;
2900 int i;
2901
2902 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2903 rxq = fep->rx_queue[i];
2904
2905 if (!rxq->page_pool)
2906 continue;
2907
2908 page_pool_get_stats(rxq->page_pool, &stats);
2909 }
2910
2911 page_pool_ethtool_stats_get(data, &stats);
2912 #endif
2913 }
2914
fec_enet_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2915 static void fec_enet_get_ethtool_stats(struct net_device *dev,
2916 struct ethtool_stats *stats, u64 *data)
2917 {
2918 struct fec_enet_private *fep = netdev_priv(dev);
2919
2920 if (netif_running(dev))
2921 fec_enet_update_ethtool_stats(dev);
2922
2923 memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
2924 data += FEC_STATS_SIZE / sizeof(u64);
2925
2926 fec_enet_get_xdp_stats(fep, data);
2927 data += XDP_STATS_TOTAL;
2928
2929 fec_enet_page_pool_stats(fep, data);
2930 }
2931
fec_enet_get_strings(struct net_device * netdev,u32 stringset,u8 * data)2932 static void fec_enet_get_strings(struct net_device *netdev,
2933 u32 stringset, u8 *data)
2934 {
2935 int i;
2936 switch (stringset) {
2937 case ETH_SS_STATS:
2938 for (i = 0; i < ARRAY_SIZE(fec_stats); i++) {
2939 memcpy(data, fec_stats[i].name, ETH_GSTRING_LEN);
2940 data += ETH_GSTRING_LEN;
2941 }
2942 for (i = 0; i < ARRAY_SIZE(fec_xdp_stat_strs); i++) {
2943 strncpy(data, fec_xdp_stat_strs[i], ETH_GSTRING_LEN);
2944 data += ETH_GSTRING_LEN;
2945 }
2946 page_pool_ethtool_stats_get_strings(data);
2947
2948 break;
2949 case ETH_SS_TEST:
2950 net_selftest_get_strings(data);
2951 break;
2952 }
2953 }
2954
fec_enet_get_sset_count(struct net_device * dev,int sset)2955 static int fec_enet_get_sset_count(struct net_device *dev, int sset)
2956 {
2957 int count;
2958
2959 switch (sset) {
2960 case ETH_SS_STATS:
2961 count = ARRAY_SIZE(fec_stats) + XDP_STATS_TOTAL;
2962 count += page_pool_ethtool_stats_get_count();
2963 return count;
2964
2965 case ETH_SS_TEST:
2966 return net_selftest_get_count();
2967 default:
2968 return -EOPNOTSUPP;
2969 }
2970 }
2971
fec_enet_clear_ethtool_stats(struct net_device * dev)2972 static void fec_enet_clear_ethtool_stats(struct net_device *dev)
2973 {
2974 struct fec_enet_private *fep = netdev_priv(dev);
2975 struct fec_enet_priv_rx_q *rxq;
2976 int i, j;
2977
2978 /* Disable MIB statistics counters */
2979 writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
2980
2981 for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
2982 writel(0, fep->hwp + fec_stats[i].offset);
2983
2984 for (i = fep->num_rx_queues - 1; i >= 0; i--) {
2985 rxq = fep->rx_queue[i];
2986 for (j = 0; j < XDP_STATS_TOTAL; j++)
2987 rxq->stats[j] = 0;
2988 }
2989
2990 /* Don't disable MIB statistics counters */
2991 writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
2992 }
2993
2994 #else /* !defined(CONFIG_M5272) */
2995 #define FEC_STATS_SIZE 0
fec_enet_update_ethtool_stats(struct net_device * dev)2996 static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
2997 {
2998 }
2999
fec_enet_clear_ethtool_stats(struct net_device * dev)3000 static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
3001 {
3002 }
3003 #endif /* !defined(CONFIG_M5272) */
3004
3005 /* ITR clock source is enet system clock (clk_ahb).
3006 * TCTT unit is cycle_ns * 64 cycle
3007 * So, the ICTT value = X us / (cycle_ns * 64)
3008 */
fec_enet_us_to_itr_clock(struct net_device * ndev,int us)3009 static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
3010 {
3011 struct fec_enet_private *fep = netdev_priv(ndev);
3012
3013 return us * (fep->itr_clk_rate / 64000) / 1000;
3014 }
3015
3016 /* Set threshold for interrupt coalescing */
fec_enet_itr_coal_set(struct net_device * ndev)3017 static void fec_enet_itr_coal_set(struct net_device *ndev)
3018 {
3019 struct fec_enet_private *fep = netdev_priv(ndev);
3020 int rx_itr, tx_itr;
3021
3022 /* Must be greater than zero to avoid unpredictable behavior */
3023 if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
3024 !fep->tx_time_itr || !fep->tx_pkts_itr)
3025 return;
3026
3027 /* Select enet system clock as Interrupt Coalescing
3028 * timer Clock Source
3029 */
3030 rx_itr = FEC_ITR_CLK_SEL;
3031 tx_itr = FEC_ITR_CLK_SEL;
3032
3033 /* set ICFT and ICTT */
3034 rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
3035 rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
3036 tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
3037 tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
3038
3039 rx_itr |= FEC_ITR_EN;
3040 tx_itr |= FEC_ITR_EN;
3041
3042 writel(tx_itr, fep->hwp + FEC_TXIC0);
3043 writel(rx_itr, fep->hwp + FEC_RXIC0);
3044 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
3045 writel(tx_itr, fep->hwp + FEC_TXIC1);
3046 writel(rx_itr, fep->hwp + FEC_RXIC1);
3047 writel(tx_itr, fep->hwp + FEC_TXIC2);
3048 writel(rx_itr, fep->hwp + FEC_RXIC2);
3049 }
3050 }
3051
fec_enet_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)3052 static int fec_enet_get_coalesce(struct net_device *ndev,
3053 struct ethtool_coalesce *ec,
3054 struct kernel_ethtool_coalesce *kernel_coal,
3055 struct netlink_ext_ack *extack)
3056 {
3057 struct fec_enet_private *fep = netdev_priv(ndev);
3058
3059 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3060 return -EOPNOTSUPP;
3061
3062 ec->rx_coalesce_usecs = fep->rx_time_itr;
3063 ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
3064
3065 ec->tx_coalesce_usecs = fep->tx_time_itr;
3066 ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
3067
3068 return 0;
3069 }
3070
fec_enet_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ec,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)3071 static int fec_enet_set_coalesce(struct net_device *ndev,
3072 struct ethtool_coalesce *ec,
3073 struct kernel_ethtool_coalesce *kernel_coal,
3074 struct netlink_ext_ack *extack)
3075 {
3076 struct fec_enet_private *fep = netdev_priv(ndev);
3077 struct device *dev = &fep->pdev->dev;
3078 unsigned int cycle;
3079
3080 if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
3081 return -EOPNOTSUPP;
3082
3083 if (ec->rx_max_coalesced_frames > 255) {
3084 dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
3085 return -EINVAL;
3086 }
3087
3088 if (ec->tx_max_coalesced_frames > 255) {
3089 dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
3090 return -EINVAL;
3091 }
3092
3093 cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
3094 if (cycle > 0xFFFF) {
3095 dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
3096 return -EINVAL;
3097 }
3098
3099 cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
3100 if (cycle > 0xFFFF) {
3101 dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
3102 return -EINVAL;
3103 }
3104
3105 fep->rx_time_itr = ec->rx_coalesce_usecs;
3106 fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
3107
3108 fep->tx_time_itr = ec->tx_coalesce_usecs;
3109 fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
3110
3111 fec_enet_itr_coal_set(ndev);
3112
3113 return 0;
3114 }
3115
3116 /* LPI Sleep Ts count base on tx clk (clk_ref).
3117 * The lpi sleep cnt value = X us / (cycle_ns).
3118 */
fec_enet_us_to_tx_cycle(struct net_device * ndev,int us)3119 static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us)
3120 {
3121 struct fec_enet_private *fep = netdev_priv(ndev);
3122
3123 return us * (fep->clk_ref_rate / 1000) / 1000;
3124 }
3125
fec_enet_eee_mode_set(struct net_device * ndev,bool enable)3126 static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable)
3127 {
3128 struct fec_enet_private *fep = netdev_priv(ndev);
3129 struct ethtool_eee *p = &fep->eee;
3130 unsigned int sleep_cycle, wake_cycle;
3131 int ret = 0;
3132
3133 if (enable) {
3134 ret = phy_init_eee(ndev->phydev, false);
3135 if (ret)
3136 return ret;
3137
3138 sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer);
3139 wake_cycle = sleep_cycle;
3140 } else {
3141 sleep_cycle = 0;
3142 wake_cycle = 0;
3143 }
3144
3145 p->tx_lpi_enabled = enable;
3146 p->eee_enabled = enable;
3147 p->eee_active = enable;
3148
3149 writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP);
3150 writel(wake_cycle, fep->hwp + FEC_LPI_WAKE);
3151
3152 return 0;
3153 }
3154
3155 static int
fec_enet_get_eee(struct net_device * ndev,struct ethtool_eee * edata)3156 fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
3157 {
3158 struct fec_enet_private *fep = netdev_priv(ndev);
3159 struct ethtool_eee *p = &fep->eee;
3160
3161 if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3162 return -EOPNOTSUPP;
3163
3164 if (!netif_running(ndev))
3165 return -ENETDOWN;
3166
3167 edata->eee_enabled = p->eee_enabled;
3168 edata->eee_active = p->eee_active;
3169 edata->tx_lpi_timer = p->tx_lpi_timer;
3170 edata->tx_lpi_enabled = p->tx_lpi_enabled;
3171
3172 return phy_ethtool_get_eee(ndev->phydev, edata);
3173 }
3174
3175 static int
fec_enet_set_eee(struct net_device * ndev,struct ethtool_eee * edata)3176 fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
3177 {
3178 struct fec_enet_private *fep = netdev_priv(ndev);
3179 struct ethtool_eee *p = &fep->eee;
3180 int ret = 0;
3181
3182 if (!(fep->quirks & FEC_QUIRK_HAS_EEE))
3183 return -EOPNOTSUPP;
3184
3185 if (!netif_running(ndev))
3186 return -ENETDOWN;
3187
3188 p->tx_lpi_timer = edata->tx_lpi_timer;
3189
3190 if (!edata->eee_enabled || !edata->tx_lpi_enabled ||
3191 !edata->tx_lpi_timer)
3192 ret = fec_enet_eee_mode_set(ndev, false);
3193 else
3194 ret = fec_enet_eee_mode_set(ndev, true);
3195
3196 if (ret)
3197 return ret;
3198
3199 return phy_ethtool_set_eee(ndev->phydev, edata);
3200 }
3201
3202 static void
fec_enet_get_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)3203 fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3204 {
3205 struct fec_enet_private *fep = netdev_priv(ndev);
3206
3207 if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
3208 wol->supported = WAKE_MAGIC;
3209 wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
3210 } else {
3211 wol->supported = wol->wolopts = 0;
3212 }
3213 }
3214
3215 static int
fec_enet_set_wol(struct net_device * ndev,struct ethtool_wolinfo * wol)3216 fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
3217 {
3218 struct fec_enet_private *fep = netdev_priv(ndev);
3219
3220 if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
3221 return -EINVAL;
3222
3223 if (wol->wolopts & ~WAKE_MAGIC)
3224 return -EINVAL;
3225
3226 device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
3227 if (device_may_wakeup(&ndev->dev))
3228 fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
3229 else
3230 fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
3231
3232 return 0;
3233 }
3234
3235 static const struct ethtool_ops fec_enet_ethtool_ops = {
3236 .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
3237 ETHTOOL_COALESCE_MAX_FRAMES,
3238 .get_drvinfo = fec_enet_get_drvinfo,
3239 .get_regs_len = fec_enet_get_regs_len,
3240 .get_regs = fec_enet_get_regs,
3241 .nway_reset = phy_ethtool_nway_reset,
3242 .get_link = ethtool_op_get_link,
3243 .get_coalesce = fec_enet_get_coalesce,
3244 .set_coalesce = fec_enet_set_coalesce,
3245 #ifndef CONFIG_M5272
3246 .get_pauseparam = fec_enet_get_pauseparam,
3247 .set_pauseparam = fec_enet_set_pauseparam,
3248 .get_strings = fec_enet_get_strings,
3249 .get_ethtool_stats = fec_enet_get_ethtool_stats,
3250 .get_sset_count = fec_enet_get_sset_count,
3251 #endif
3252 .get_ts_info = fec_enet_get_ts_info,
3253 .get_wol = fec_enet_get_wol,
3254 .set_wol = fec_enet_set_wol,
3255 .get_eee = fec_enet_get_eee,
3256 .set_eee = fec_enet_set_eee,
3257 .get_link_ksettings = phy_ethtool_get_link_ksettings,
3258 .set_link_ksettings = phy_ethtool_set_link_ksettings,
3259 .self_test = net_selftest,
3260 };
3261
fec_enet_free_buffers(struct net_device * ndev)3262 static void fec_enet_free_buffers(struct net_device *ndev)
3263 {
3264 struct fec_enet_private *fep = netdev_priv(ndev);
3265 unsigned int i;
3266 struct fec_enet_priv_tx_q *txq;
3267 struct fec_enet_priv_rx_q *rxq;
3268 unsigned int q;
3269
3270 for (q = 0; q < fep->num_rx_queues; q++) {
3271 rxq = fep->rx_queue[q];
3272 for (i = 0; i < rxq->bd.ring_size; i++)
3273 page_pool_put_full_page(rxq->page_pool, rxq->rx_skb_info[i].page, false);
3274
3275 for (i = 0; i < XDP_STATS_TOTAL; i++)
3276 rxq->stats[i] = 0;
3277
3278 if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
3279 xdp_rxq_info_unreg(&rxq->xdp_rxq);
3280 page_pool_destroy(rxq->page_pool);
3281 rxq->page_pool = NULL;
3282 }
3283
3284 for (q = 0; q < fep->num_tx_queues; q++) {
3285 txq = fep->tx_queue[q];
3286 for (i = 0; i < txq->bd.ring_size; i++) {
3287 kfree(txq->tx_bounce[i]);
3288 txq->tx_bounce[i] = NULL;
3289
3290 if (!txq->tx_buf[i].buf_p) {
3291 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3292 continue;
3293 }
3294
3295 if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) {
3296 dev_kfree_skb(txq->tx_buf[i].buf_p);
3297 } else if (txq->tx_buf[i].type == FEC_TXBUF_T_XDP_NDO) {
3298 xdp_return_frame(txq->tx_buf[i].buf_p);
3299 } else {
3300 struct page *page = txq->tx_buf[i].buf_p;
3301
3302 page_pool_put_page(page->pp, page, 0, false);
3303 }
3304
3305 txq->tx_buf[i].buf_p = NULL;
3306 txq->tx_buf[i].type = FEC_TXBUF_T_SKB;
3307 }
3308 }
3309 }
3310
fec_enet_free_queue(struct net_device * ndev)3311 static void fec_enet_free_queue(struct net_device *ndev)
3312 {
3313 struct fec_enet_private *fep = netdev_priv(ndev);
3314 int i;
3315 struct fec_enet_priv_tx_q *txq;
3316
3317 for (i = 0; i < fep->num_tx_queues; i++)
3318 if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
3319 txq = fep->tx_queue[i];
3320 dma_free_coherent(&fep->pdev->dev,
3321 txq->bd.ring_size * TSO_HEADER_SIZE,
3322 txq->tso_hdrs,
3323 txq->tso_hdrs_dma);
3324 }
3325
3326 for (i = 0; i < fep->num_rx_queues; i++)
3327 kfree(fep->rx_queue[i]);
3328 for (i = 0; i < fep->num_tx_queues; i++)
3329 kfree(fep->tx_queue[i]);
3330 }
3331
fec_enet_alloc_queue(struct net_device * ndev)3332 static int fec_enet_alloc_queue(struct net_device *ndev)
3333 {
3334 struct fec_enet_private *fep = netdev_priv(ndev);
3335 int i;
3336 int ret = 0;
3337 struct fec_enet_priv_tx_q *txq;
3338
3339 for (i = 0; i < fep->num_tx_queues; i++) {
3340 txq = kzalloc(sizeof(*txq), GFP_KERNEL);
3341 if (!txq) {
3342 ret = -ENOMEM;
3343 goto alloc_failed;
3344 }
3345
3346 fep->tx_queue[i] = txq;
3347 txq->bd.ring_size = TX_RING_SIZE;
3348 fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
3349
3350 txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
3351 txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS;
3352
3353 txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
3354 txq->bd.ring_size * TSO_HEADER_SIZE,
3355 &txq->tso_hdrs_dma,
3356 GFP_KERNEL);
3357 if (!txq->tso_hdrs) {
3358 ret = -ENOMEM;
3359 goto alloc_failed;
3360 }
3361 }
3362
3363 for (i = 0; i < fep->num_rx_queues; i++) {
3364 fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
3365 GFP_KERNEL);
3366 if (!fep->rx_queue[i]) {
3367 ret = -ENOMEM;
3368 goto alloc_failed;
3369 }
3370
3371 fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
3372 fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
3373 }
3374 return ret;
3375
3376 alloc_failed:
3377 fec_enet_free_queue(ndev);
3378 return ret;
3379 }
3380
3381 static int
fec_enet_alloc_rxq_buffers(struct net_device * ndev,unsigned int queue)3382 fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
3383 {
3384 struct fec_enet_private *fep = netdev_priv(ndev);
3385 struct fec_enet_priv_rx_q *rxq;
3386 dma_addr_t phys_addr;
3387 struct bufdesc *bdp;
3388 struct page *page;
3389 int i, err;
3390
3391 rxq = fep->rx_queue[queue];
3392 bdp = rxq->bd.base;
3393
3394 err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
3395 if (err < 0) {
3396 netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
3397 return err;
3398 }
3399
3400 for (i = 0; i < rxq->bd.ring_size; i++) {
3401 page = page_pool_dev_alloc_pages(rxq->page_pool);
3402 if (!page)
3403 goto err_alloc;
3404
3405 phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
3406 bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
3407
3408 rxq->rx_skb_info[i].page = page;
3409 rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
3410 bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
3411
3412 if (fep->bufdesc_ex) {
3413 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3414 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
3415 }
3416
3417 bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
3418 }
3419
3420 /* Set the last buffer to wrap. */
3421 bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
3422 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3423 return 0;
3424
3425 err_alloc:
3426 fec_enet_free_buffers(ndev);
3427 return -ENOMEM;
3428 }
3429
3430 static int
fec_enet_alloc_txq_buffers(struct net_device * ndev,unsigned int queue)3431 fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
3432 {
3433 struct fec_enet_private *fep = netdev_priv(ndev);
3434 unsigned int i;
3435 struct bufdesc *bdp;
3436 struct fec_enet_priv_tx_q *txq;
3437
3438 txq = fep->tx_queue[queue];
3439 bdp = txq->bd.base;
3440 for (i = 0; i < txq->bd.ring_size; i++) {
3441 txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
3442 if (!txq->tx_bounce[i])
3443 goto err_alloc;
3444
3445 bdp->cbd_sc = cpu_to_fec16(0);
3446 bdp->cbd_bufaddr = cpu_to_fec32(0);
3447
3448 if (fep->bufdesc_ex) {
3449 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3450 ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
3451 }
3452
3453 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3454 }
3455
3456 /* Set the last buffer to wrap. */
3457 bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
3458 bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
3459
3460 return 0;
3461
3462 err_alloc:
3463 fec_enet_free_buffers(ndev);
3464 return -ENOMEM;
3465 }
3466
fec_enet_alloc_buffers(struct net_device * ndev)3467 static int fec_enet_alloc_buffers(struct net_device *ndev)
3468 {
3469 struct fec_enet_private *fep = netdev_priv(ndev);
3470 unsigned int i;
3471
3472 for (i = 0; i < fep->num_rx_queues; i++)
3473 if (fec_enet_alloc_rxq_buffers(ndev, i))
3474 return -ENOMEM;
3475
3476 for (i = 0; i < fep->num_tx_queues; i++)
3477 if (fec_enet_alloc_txq_buffers(ndev, i))
3478 return -ENOMEM;
3479 return 0;
3480 }
3481
3482 static int
fec_enet_open(struct net_device * ndev)3483 fec_enet_open(struct net_device *ndev)
3484 {
3485 struct fec_enet_private *fep = netdev_priv(ndev);
3486 int ret;
3487 bool reset_again;
3488
3489 ret = pm_runtime_resume_and_get(&fep->pdev->dev);
3490 if (ret < 0)
3491 return ret;
3492
3493 pinctrl_pm_select_default_state(&fep->pdev->dev);
3494 ret = fec_enet_clk_enable(ndev, true);
3495 if (ret)
3496 goto clk_enable;
3497
3498 /* During the first fec_enet_open call the PHY isn't probed at this
3499 * point. Therefore the phy_reset_after_clk_enable() call within
3500 * fec_enet_clk_enable() fails. As we need this reset in order to be
3501 * sure the PHY is working correctly we check if we need to reset again
3502 * later when the PHY is probed
3503 */
3504 if (ndev->phydev && ndev->phydev->drv)
3505 reset_again = false;
3506 else
3507 reset_again = true;
3508
3509 /* I should reset the ring buffers here, but I don't yet know
3510 * a simple way to do that.
3511 */
3512
3513 ret = fec_enet_alloc_buffers(ndev);
3514 if (ret)
3515 goto err_enet_alloc;
3516
3517 /* Init MAC prior to mii bus probe */
3518 fec_restart(ndev);
3519
3520 /* Call phy_reset_after_clk_enable() again if it failed during
3521 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
3522 */
3523 if (reset_again)
3524 fec_enet_phy_reset_after_clk_enable(ndev);
3525
3526 /* Probe and connect to PHY when open the interface */
3527 ret = fec_enet_mii_probe(ndev);
3528 if (ret)
3529 goto err_enet_mii_probe;
3530
3531 if (fep->quirks & FEC_QUIRK_ERR006687)
3532 imx6q_cpuidle_fec_irqs_used();
3533
3534 if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3535 cpu_latency_qos_add_request(&fep->pm_qos_req, 0);
3536
3537 napi_enable(&fep->napi);
3538 phy_start(ndev->phydev);
3539 netif_tx_start_all_queues(ndev);
3540
3541 device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
3542 FEC_WOL_FLAG_ENABLE);
3543
3544 return 0;
3545
3546 err_enet_mii_probe:
3547 fec_enet_free_buffers(ndev);
3548 err_enet_alloc:
3549 fec_enet_clk_enable(ndev, false);
3550 clk_enable:
3551 pm_runtime_mark_last_busy(&fep->pdev->dev);
3552 pm_runtime_put_autosuspend(&fep->pdev->dev);
3553 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3554 return ret;
3555 }
3556
3557 static int
fec_enet_close(struct net_device * ndev)3558 fec_enet_close(struct net_device *ndev)
3559 {
3560 struct fec_enet_private *fep = netdev_priv(ndev);
3561
3562 phy_stop(ndev->phydev);
3563
3564 if (netif_device_present(ndev)) {
3565 napi_disable(&fep->napi);
3566 netif_tx_disable(ndev);
3567 fec_stop(ndev);
3568 }
3569
3570 phy_disconnect(ndev->phydev);
3571
3572 if (fep->quirks & FEC_QUIRK_ERR006687)
3573 imx6q_cpuidle_fec_irqs_unused();
3574
3575 fec_enet_update_ethtool_stats(ndev);
3576
3577 fec_enet_clk_enable(ndev, false);
3578 if (fep->quirks & FEC_QUIRK_HAS_PMQOS)
3579 cpu_latency_qos_remove_request(&fep->pm_qos_req);
3580
3581 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3582 pm_runtime_mark_last_busy(&fep->pdev->dev);
3583 pm_runtime_put_autosuspend(&fep->pdev->dev);
3584
3585 fec_enet_free_buffers(ndev);
3586
3587 return 0;
3588 }
3589
3590 /* Set or clear the multicast filter for this adaptor.
3591 * Skeleton taken from sunlance driver.
3592 * The CPM Ethernet implementation allows Multicast as well as individual
3593 * MAC address filtering. Some of the drivers check to make sure it is
3594 * a group multicast address, and discard those that are not. I guess I
3595 * will do the same for now, but just remove the test if you want
3596 * individual filtering as well (do the upper net layers want or support
3597 * this kind of feature?).
3598 */
3599
3600 #define FEC_HASH_BITS 6 /* #bits in hash */
3601
set_multicast_list(struct net_device * ndev)3602 static void set_multicast_list(struct net_device *ndev)
3603 {
3604 struct fec_enet_private *fep = netdev_priv(ndev);
3605 struct netdev_hw_addr *ha;
3606 unsigned int crc, tmp;
3607 unsigned char hash;
3608 unsigned int hash_high = 0, hash_low = 0;
3609
3610 if (ndev->flags & IFF_PROMISC) {
3611 tmp = readl(fep->hwp + FEC_R_CNTRL);
3612 tmp |= 0x8;
3613 writel(tmp, fep->hwp + FEC_R_CNTRL);
3614 return;
3615 }
3616
3617 tmp = readl(fep->hwp + FEC_R_CNTRL);
3618 tmp &= ~0x8;
3619 writel(tmp, fep->hwp + FEC_R_CNTRL);
3620
3621 if (ndev->flags & IFF_ALLMULTI) {
3622 /* Catch all multicast addresses, so set the
3623 * filter to all 1's
3624 */
3625 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3626 writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3627
3628 return;
3629 }
3630
3631 /* Add the addresses in hash register */
3632 netdev_for_each_mc_addr(ha, ndev) {
3633 /* calculate crc32 value of mac address */
3634 crc = ether_crc_le(ndev->addr_len, ha->addr);
3635
3636 /* only upper 6 bits (FEC_HASH_BITS) are used
3637 * which point to specific bit in the hash registers
3638 */
3639 hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
3640
3641 if (hash > 31)
3642 hash_high |= 1 << (hash - 32);
3643 else
3644 hash_low |= 1 << hash;
3645 }
3646
3647 writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
3648 writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
3649 }
3650
3651 /* Set a MAC change in hardware. */
3652 static int
fec_set_mac_address(struct net_device * ndev,void * p)3653 fec_set_mac_address(struct net_device *ndev, void *p)
3654 {
3655 struct fec_enet_private *fep = netdev_priv(ndev);
3656 struct sockaddr *addr = p;
3657
3658 if (addr) {
3659 if (!is_valid_ether_addr(addr->sa_data))
3660 return -EADDRNOTAVAIL;
3661 eth_hw_addr_set(ndev, addr->sa_data);
3662 }
3663
3664 /* Add netif status check here to avoid system hang in below case:
3665 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
3666 * After ethx down, fec all clocks are gated off and then register
3667 * access causes system hang.
3668 */
3669 if (!netif_running(ndev))
3670 return 0;
3671
3672 writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
3673 (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
3674 fep->hwp + FEC_ADDR_LOW);
3675 writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
3676 fep->hwp + FEC_ADDR_HIGH);
3677 return 0;
3678 }
3679
fec_enet_set_netdev_features(struct net_device * netdev,netdev_features_t features)3680 static inline void fec_enet_set_netdev_features(struct net_device *netdev,
3681 netdev_features_t features)
3682 {
3683 struct fec_enet_private *fep = netdev_priv(netdev);
3684 netdev_features_t changed = features ^ netdev->features;
3685
3686 netdev->features = features;
3687
3688 /* Receive checksum has been changed */
3689 if (changed & NETIF_F_RXCSUM) {
3690 if (features & NETIF_F_RXCSUM)
3691 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
3692 else
3693 fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
3694 }
3695 }
3696
fec_set_features(struct net_device * netdev,netdev_features_t features)3697 static int fec_set_features(struct net_device *netdev,
3698 netdev_features_t features)
3699 {
3700 struct fec_enet_private *fep = netdev_priv(netdev);
3701 netdev_features_t changed = features ^ netdev->features;
3702
3703 if (netif_running(netdev) && changed & NETIF_F_RXCSUM) {
3704 napi_disable(&fep->napi);
3705 netif_tx_lock_bh(netdev);
3706 fec_stop(netdev);
3707 fec_enet_set_netdev_features(netdev, features);
3708 fec_restart(netdev);
3709 netif_tx_wake_all_queues(netdev);
3710 netif_tx_unlock_bh(netdev);
3711 napi_enable(&fep->napi);
3712 } else {
3713 fec_enet_set_netdev_features(netdev, features);
3714 }
3715
3716 return 0;
3717 }
3718
fec_enet_select_queue(struct net_device * ndev,struct sk_buff * skb,struct net_device * sb_dev)3719 static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
3720 struct net_device *sb_dev)
3721 {
3722 struct fec_enet_private *fep = netdev_priv(ndev);
3723 u16 vlan_tag = 0;
3724
3725 if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
3726 return netdev_pick_tx(ndev, skb, NULL);
3727
3728 /* VLAN is present in the payload.*/
3729 if (eth_type_vlan(skb->protocol)) {
3730 struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb);
3731
3732 vlan_tag = ntohs(vhdr->h_vlan_TCI);
3733 /* VLAN is present in the skb but not yet pushed in the payload.*/
3734 } else if (skb_vlan_tag_present(skb)) {
3735 vlan_tag = skb->vlan_tci;
3736 } else {
3737 return vlan_tag;
3738 }
3739
3740 return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
3741 }
3742
fec_enet_bpf(struct net_device * dev,struct netdev_bpf * bpf)3743 static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf)
3744 {
3745 struct fec_enet_private *fep = netdev_priv(dev);
3746 bool is_run = netif_running(dev);
3747 struct bpf_prog *old_prog;
3748
3749 switch (bpf->command) {
3750 case XDP_SETUP_PROG:
3751 /* No need to support the SoCs that require to
3752 * do the frame swap because the performance wouldn't be
3753 * better than the skb mode.
3754 */
3755 if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
3756 return -EOPNOTSUPP;
3757
3758 if (!bpf->prog)
3759 xdp_features_clear_redirect_target(dev);
3760
3761 if (is_run) {
3762 napi_disable(&fep->napi);
3763 netif_tx_disable(dev);
3764 }
3765
3766 old_prog = xchg(&fep->xdp_prog, bpf->prog);
3767 if (old_prog)
3768 bpf_prog_put(old_prog);
3769
3770 fec_restart(dev);
3771
3772 if (is_run) {
3773 napi_enable(&fep->napi);
3774 netif_tx_start_all_queues(dev);
3775 }
3776
3777 if (bpf->prog)
3778 xdp_features_set_redirect_target(dev, false);
3779
3780 return 0;
3781
3782 case XDP_SETUP_XSK_POOL:
3783 return -EOPNOTSUPP;
3784
3785 default:
3786 return -EOPNOTSUPP;
3787 }
3788 }
3789
3790 static int
fec_enet_xdp_get_tx_queue(struct fec_enet_private * fep,int index)3791 fec_enet_xdp_get_tx_queue(struct fec_enet_private *fep, int index)
3792 {
3793 if (unlikely(index < 0))
3794 return 0;
3795
3796 return (index % fep->num_tx_queues);
3797 }
3798
fec_enet_txq_xmit_frame(struct fec_enet_private * fep,struct fec_enet_priv_tx_q * txq,void * frame,u32 dma_sync_len,bool ndo_xmit)3799 static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep,
3800 struct fec_enet_priv_tx_q *txq,
3801 void *frame, u32 dma_sync_len,
3802 bool ndo_xmit)
3803 {
3804 unsigned int index, status, estatus;
3805 struct bufdesc *bdp;
3806 dma_addr_t dma_addr;
3807 int entries_free;
3808 u16 frame_len;
3809
3810 entries_free = fec_enet_get_free_txdesc_num(txq);
3811 if (entries_free < MAX_SKB_FRAGS + 1) {
3812 netdev_err_once(fep->netdev, "NOT enough BD for SG!\n");
3813 return -EBUSY;
3814 }
3815
3816 /* Fill in a Tx ring entry */
3817 bdp = txq->bd.cur;
3818 status = fec16_to_cpu(bdp->cbd_sc);
3819 status &= ~BD_ENET_TX_STATS;
3820
3821 index = fec_enet_get_bd_index(bdp, &txq->bd);
3822
3823 if (ndo_xmit) {
3824 struct xdp_frame *xdpf = frame;
3825
3826 dma_addr = dma_map_single(&fep->pdev->dev, xdpf->data,
3827 xdpf->len, DMA_TO_DEVICE);
3828 if (dma_mapping_error(&fep->pdev->dev, dma_addr))
3829 return -ENOMEM;
3830
3831 frame_len = xdpf->len;
3832 txq->tx_buf[index].buf_p = xdpf;
3833 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO;
3834 } else {
3835 struct xdp_buff *xdpb = frame;
3836 struct page *page;
3837
3838 page = virt_to_page(xdpb->data);
3839 dma_addr = page_pool_get_dma_addr(page) +
3840 (xdpb->data - xdpb->data_hard_start);
3841 dma_sync_single_for_device(&fep->pdev->dev, dma_addr,
3842 dma_sync_len, DMA_BIDIRECTIONAL);
3843 frame_len = xdpb->data_end - xdpb->data;
3844 txq->tx_buf[index].buf_p = page;
3845 txq->tx_buf[index].type = FEC_TXBUF_T_XDP_TX;
3846 }
3847
3848 status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
3849 if (fep->bufdesc_ex)
3850 estatus = BD_ENET_TX_INT;
3851
3852 bdp->cbd_bufaddr = cpu_to_fec32(dma_addr);
3853 bdp->cbd_datlen = cpu_to_fec16(frame_len);
3854
3855 if (fep->bufdesc_ex) {
3856 struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
3857
3858 if (fep->quirks & FEC_QUIRK_HAS_AVB)
3859 estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
3860
3861 ebdp->cbd_bdu = 0;
3862 ebdp->cbd_esc = cpu_to_fec32(estatus);
3863 }
3864
3865 /* Make sure the updates to rest of the descriptor are performed before
3866 * transferring ownership.
3867 */
3868 dma_wmb();
3869
3870 /* Send it on its way. Tell FEC it's ready, interrupt when done,
3871 * it's the last BD of the frame, and to put the CRC on the end.
3872 */
3873 status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
3874 bdp->cbd_sc = cpu_to_fec16(status);
3875
3876 /* If this was the last BD in the ring, start at the beginning again. */
3877 bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
3878
3879 /* Make sure the update to bdp are performed before txq->bd.cur. */
3880 dma_wmb();
3881
3882 txq->bd.cur = bdp;
3883
3884 /* Trigger transmission start */
3885 writel(0, txq->bd.reg_desc_active);
3886
3887 return 0;
3888 }
3889
fec_enet_xdp_tx_xmit(struct fec_enet_private * fep,int cpu,struct xdp_buff * xdp,u32 dma_sync_len)3890 static int fec_enet_xdp_tx_xmit(struct fec_enet_private *fep,
3891 int cpu, struct xdp_buff *xdp,
3892 u32 dma_sync_len)
3893 {
3894 struct fec_enet_priv_tx_q *txq;
3895 struct netdev_queue *nq;
3896 int queue, ret;
3897
3898 queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3899 txq = fep->tx_queue[queue];
3900 nq = netdev_get_tx_queue(fep->netdev, queue);
3901
3902 __netif_tx_lock(nq, cpu);
3903
3904 /* Avoid tx timeout as XDP shares the queue with kernel stack */
3905 txq_trans_cond_update(nq);
3906 ret = fec_enet_txq_xmit_frame(fep, txq, xdp, dma_sync_len, false);
3907
3908 __netif_tx_unlock(nq);
3909
3910 return ret;
3911 }
3912
fec_enet_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)3913 static int fec_enet_xdp_xmit(struct net_device *dev,
3914 int num_frames,
3915 struct xdp_frame **frames,
3916 u32 flags)
3917 {
3918 struct fec_enet_private *fep = netdev_priv(dev);
3919 struct fec_enet_priv_tx_q *txq;
3920 int cpu = smp_processor_id();
3921 unsigned int sent_frames = 0;
3922 struct netdev_queue *nq;
3923 unsigned int queue;
3924 int i;
3925
3926 queue = fec_enet_xdp_get_tx_queue(fep, cpu);
3927 txq = fep->tx_queue[queue];
3928 nq = netdev_get_tx_queue(fep->netdev, queue);
3929
3930 __netif_tx_lock(nq, cpu);
3931
3932 /* Avoid tx timeout as XDP shares the queue with kernel stack */
3933 txq_trans_cond_update(nq);
3934 for (i = 0; i < num_frames; i++) {
3935 if (fec_enet_txq_xmit_frame(fep, txq, frames[i], 0, true) < 0)
3936 break;
3937 sent_frames++;
3938 }
3939
3940 __netif_tx_unlock(nq);
3941
3942 return sent_frames;
3943 }
3944
fec_hwtstamp_get(struct net_device * ndev,struct kernel_hwtstamp_config * config)3945 static int fec_hwtstamp_get(struct net_device *ndev,
3946 struct kernel_hwtstamp_config *config)
3947 {
3948 struct fec_enet_private *fep = netdev_priv(ndev);
3949
3950 if (!netif_running(ndev))
3951 return -EINVAL;
3952
3953 if (!fep->bufdesc_ex)
3954 return -EOPNOTSUPP;
3955
3956 fec_ptp_get(ndev, config);
3957
3958 return 0;
3959 }
3960
fec_hwtstamp_set(struct net_device * ndev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)3961 static int fec_hwtstamp_set(struct net_device *ndev,
3962 struct kernel_hwtstamp_config *config,
3963 struct netlink_ext_ack *extack)
3964 {
3965 struct fec_enet_private *fep = netdev_priv(ndev);
3966
3967 if (!netif_running(ndev))
3968 return -EINVAL;
3969
3970 if (!fep->bufdesc_ex)
3971 return -EOPNOTSUPP;
3972
3973 return fec_ptp_set(ndev, config, extack);
3974 }
3975
3976 static const struct net_device_ops fec_netdev_ops = {
3977 .ndo_open = fec_enet_open,
3978 .ndo_stop = fec_enet_close,
3979 .ndo_start_xmit = fec_enet_start_xmit,
3980 .ndo_select_queue = fec_enet_select_queue,
3981 .ndo_set_rx_mode = set_multicast_list,
3982 .ndo_validate_addr = eth_validate_addr,
3983 .ndo_tx_timeout = fec_timeout,
3984 .ndo_set_mac_address = fec_set_mac_address,
3985 .ndo_eth_ioctl = phy_do_ioctl_running,
3986 .ndo_set_features = fec_set_features,
3987 .ndo_bpf = fec_enet_bpf,
3988 .ndo_xdp_xmit = fec_enet_xdp_xmit,
3989 .ndo_hwtstamp_get = fec_hwtstamp_get,
3990 .ndo_hwtstamp_set = fec_hwtstamp_set,
3991 };
3992
3993 static const unsigned short offset_des_active_rxq[] = {
3994 FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
3995 };
3996
3997 static const unsigned short offset_des_active_txq[] = {
3998 FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
3999 };
4000
4001 /*
4002 * XXX: We need to clean up on failure exits here.
4003 *
4004 */
fec_enet_init(struct net_device * ndev)4005 static int fec_enet_init(struct net_device *ndev)
4006 {
4007 struct fec_enet_private *fep = netdev_priv(ndev);
4008 struct bufdesc *cbd_base;
4009 dma_addr_t bd_dma;
4010 int bd_size;
4011 unsigned int i;
4012 unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
4013 sizeof(struct bufdesc);
4014 unsigned dsize_log2 = __fls(dsize);
4015 int ret;
4016
4017 WARN_ON(dsize != (1 << dsize_log2));
4018 #if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4019 fep->rx_align = 0xf;
4020 fep->tx_align = 0xf;
4021 #else
4022 fep->rx_align = 0x3;
4023 fep->tx_align = 0x3;
4024 #endif
4025 fep->rx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4026 fep->tx_pkts_itr = FEC_ITR_ICFT_DEFAULT;
4027 fep->rx_time_itr = FEC_ITR_ICTT_DEFAULT;
4028 fep->tx_time_itr = FEC_ITR_ICTT_DEFAULT;
4029
4030 /* Check mask of the streaming and coherent API */
4031 ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
4032 if (ret < 0) {
4033 dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
4034 return ret;
4035 }
4036
4037 ret = fec_enet_alloc_queue(ndev);
4038 if (ret)
4039 return ret;
4040
4041 bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
4042
4043 /* Allocate memory for buffer descriptors. */
4044 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
4045 GFP_KERNEL);
4046 if (!cbd_base) {
4047 ret = -ENOMEM;
4048 goto free_queue_mem;
4049 }
4050
4051 /* Get the Ethernet address */
4052 ret = fec_get_mac(ndev);
4053 if (ret)
4054 goto free_queue_mem;
4055
4056 /* Set receive and transmit descriptor base. */
4057 for (i = 0; i < fep->num_rx_queues; i++) {
4058 struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
4059 unsigned size = dsize * rxq->bd.ring_size;
4060
4061 rxq->bd.qid = i;
4062 rxq->bd.base = cbd_base;
4063 rxq->bd.cur = cbd_base;
4064 rxq->bd.dma = bd_dma;
4065 rxq->bd.dsize = dsize;
4066 rxq->bd.dsize_log2 = dsize_log2;
4067 rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
4068 bd_dma += size;
4069 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4070 rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4071 }
4072
4073 for (i = 0; i < fep->num_tx_queues; i++) {
4074 struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
4075 unsigned size = dsize * txq->bd.ring_size;
4076
4077 txq->bd.qid = i;
4078 txq->bd.base = cbd_base;
4079 txq->bd.cur = cbd_base;
4080 txq->bd.dma = bd_dma;
4081 txq->bd.dsize = dsize;
4082 txq->bd.dsize_log2 = dsize_log2;
4083 txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
4084 bd_dma += size;
4085 cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
4086 txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
4087 }
4088
4089
4090 /* The FEC Ethernet specific entries in the device structure */
4091 ndev->watchdog_timeo = TX_TIMEOUT;
4092 ndev->netdev_ops = &fec_netdev_ops;
4093 ndev->ethtool_ops = &fec_enet_ethtool_ops;
4094
4095 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
4096 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
4097
4098 if (fep->quirks & FEC_QUIRK_HAS_VLAN)
4099 /* enable hw VLAN support */
4100 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
4101
4102 if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
4103 netif_set_tso_max_segs(ndev, FEC_MAX_TSO_SEGS);
4104
4105 /* enable hw accelerator */
4106 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
4107 | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
4108 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
4109 }
4110
4111 if (fep->quirks & FEC_QUIRK_HAS_MULTI_QUEUES) {
4112 fep->tx_align = 0;
4113 fep->rx_align = 0x3f;
4114 }
4115
4116 ndev->hw_features = ndev->features;
4117
4118 if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME))
4119 ndev->xdp_features = NETDEV_XDP_ACT_BASIC |
4120 NETDEV_XDP_ACT_REDIRECT;
4121
4122 fec_restart(ndev);
4123
4124 if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
4125 fec_enet_clear_ethtool_stats(ndev);
4126 else
4127 fec_enet_update_ethtool_stats(ndev);
4128
4129 return 0;
4130
4131 free_queue_mem:
4132 fec_enet_free_queue(ndev);
4133 return ret;
4134 }
4135
fec_enet_deinit(struct net_device * ndev)4136 static void fec_enet_deinit(struct net_device *ndev)
4137 {
4138 struct fec_enet_private *fep = netdev_priv(ndev);
4139
4140 netif_napi_del(&fep->napi);
4141 fec_enet_free_queue(ndev);
4142 }
4143
4144 #ifdef CONFIG_OF
fec_reset_phy(struct platform_device * pdev)4145 static int fec_reset_phy(struct platform_device *pdev)
4146 {
4147 struct gpio_desc *phy_reset;
4148 int msec = 1, phy_post_delay = 0;
4149 struct device_node *np = pdev->dev.of_node;
4150 int err;
4151
4152 if (!np)
4153 return 0;
4154
4155 err = of_property_read_u32(np, "phy-reset-duration", &msec);
4156 /* A sane reset duration should not be longer than 1s */
4157 if (!err && msec > 1000)
4158 msec = 1;
4159
4160 err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
4161 /* valid reset duration should be less than 1s */
4162 if (!err && phy_post_delay > 1000)
4163 return -EINVAL;
4164
4165 phy_reset = devm_gpiod_get_optional(&pdev->dev, "phy-reset",
4166 GPIOD_OUT_HIGH);
4167 if (IS_ERR(phy_reset))
4168 return dev_err_probe(&pdev->dev, PTR_ERR(phy_reset),
4169 "failed to get phy-reset-gpios\n");
4170
4171 if (!phy_reset)
4172 return 0;
4173
4174 if (msec > 20)
4175 msleep(msec);
4176 else
4177 usleep_range(msec * 1000, msec * 1000 + 1000);
4178
4179 gpiod_set_value_cansleep(phy_reset, 0);
4180
4181 if (!phy_post_delay)
4182 return 0;
4183
4184 if (phy_post_delay > 20)
4185 msleep(phy_post_delay);
4186 else
4187 usleep_range(phy_post_delay * 1000,
4188 phy_post_delay * 1000 + 1000);
4189
4190 return 0;
4191 }
4192 #else /* CONFIG_OF */
fec_reset_phy(struct platform_device * pdev)4193 static int fec_reset_phy(struct platform_device *pdev)
4194 {
4195 /*
4196 * In case of platform probe, the reset has been done
4197 * by machine code.
4198 */
4199 return 0;
4200 }
4201 #endif /* CONFIG_OF */
4202
4203 static void
fec_enet_get_queue_num(struct platform_device * pdev,int * num_tx,int * num_rx)4204 fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
4205 {
4206 struct device_node *np = pdev->dev.of_node;
4207
4208 *num_tx = *num_rx = 1;
4209
4210 if (!np || !of_device_is_available(np))
4211 return;
4212
4213 /* parse the num of tx and rx queues */
4214 of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
4215
4216 of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
4217
4218 if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
4219 dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
4220 *num_tx);
4221 *num_tx = 1;
4222 return;
4223 }
4224
4225 if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
4226 dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
4227 *num_rx);
4228 *num_rx = 1;
4229 return;
4230 }
4231
4232 }
4233
fec_enet_get_irq_cnt(struct platform_device * pdev)4234 static int fec_enet_get_irq_cnt(struct platform_device *pdev)
4235 {
4236 int irq_cnt = platform_irq_count(pdev);
4237
4238 if (irq_cnt > FEC_IRQ_NUM)
4239 irq_cnt = FEC_IRQ_NUM; /* last for pps */
4240 else if (irq_cnt == 2)
4241 irq_cnt = 1; /* last for pps */
4242 else if (irq_cnt <= 0)
4243 irq_cnt = 1; /* At least 1 irq is needed */
4244 return irq_cnt;
4245 }
4246
fec_enet_get_wakeup_irq(struct platform_device * pdev)4247 static void fec_enet_get_wakeup_irq(struct platform_device *pdev)
4248 {
4249 struct net_device *ndev = platform_get_drvdata(pdev);
4250 struct fec_enet_private *fep = netdev_priv(ndev);
4251
4252 if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2)
4253 fep->wake_irq = fep->irq[2];
4254 else
4255 fep->wake_irq = fep->irq[0];
4256 }
4257
fec_enet_init_stop_mode(struct fec_enet_private * fep,struct device_node * np)4258 static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
4259 struct device_node *np)
4260 {
4261 struct device_node *gpr_np;
4262 u32 out_val[3];
4263 int ret = 0;
4264
4265 gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
4266 if (!gpr_np)
4267 return 0;
4268
4269 ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
4270 ARRAY_SIZE(out_val));
4271 if (ret) {
4272 dev_dbg(&fep->pdev->dev, "no stop mode property\n");
4273 goto out;
4274 }
4275
4276 fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
4277 if (IS_ERR(fep->stop_gpr.gpr)) {
4278 dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
4279 ret = PTR_ERR(fep->stop_gpr.gpr);
4280 fep->stop_gpr.gpr = NULL;
4281 goto out;
4282 }
4283
4284 fep->stop_gpr.reg = out_val[1];
4285 fep->stop_gpr.bit = out_val[2];
4286
4287 out:
4288 of_node_put(gpr_np);
4289
4290 return ret;
4291 }
4292
4293 static int
fec_probe(struct platform_device * pdev)4294 fec_probe(struct platform_device *pdev)
4295 {
4296 struct fec_enet_private *fep;
4297 struct fec_platform_data *pdata;
4298 phy_interface_t interface;
4299 struct net_device *ndev;
4300 int i, irq, ret = 0;
4301 const struct of_device_id *of_id;
4302 static int dev_id;
4303 struct device_node *np = pdev->dev.of_node, *phy_node;
4304 int num_tx_qs;
4305 int num_rx_qs;
4306 char irq_name[8];
4307 int irq_cnt;
4308 struct fec_devinfo *dev_info;
4309
4310 fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
4311
4312 /* Init network device */
4313 ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
4314 FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
4315 if (!ndev)
4316 return -ENOMEM;
4317
4318 SET_NETDEV_DEV(ndev, &pdev->dev);
4319
4320 /* setup board info structure */
4321 fep = netdev_priv(ndev);
4322
4323 of_id = of_match_device(fec_dt_ids, &pdev->dev);
4324 if (of_id)
4325 pdev->id_entry = of_id->data;
4326 dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
4327 if (dev_info)
4328 fep->quirks = dev_info->quirks;
4329
4330 fep->netdev = ndev;
4331 fep->num_rx_queues = num_rx_qs;
4332 fep->num_tx_queues = num_tx_qs;
4333
4334 #if !defined(CONFIG_M5272)
4335 /* default enable pause frame auto negotiation */
4336 if (fep->quirks & FEC_QUIRK_HAS_GBIT)
4337 fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
4338 #endif
4339
4340 /* Select default pin state */
4341 pinctrl_pm_select_default_state(&pdev->dev);
4342
4343 fep->hwp = devm_platform_ioremap_resource(pdev, 0);
4344 if (IS_ERR(fep->hwp)) {
4345 ret = PTR_ERR(fep->hwp);
4346 goto failed_ioremap;
4347 }
4348
4349 fep->pdev = pdev;
4350 fep->dev_id = dev_id++;
4351
4352 platform_set_drvdata(pdev, ndev);
4353
4354 if ((of_machine_is_compatible("fsl,imx6q") ||
4355 of_machine_is_compatible("fsl,imx6dl")) &&
4356 !of_property_read_bool(np, "fsl,err006687-workaround-present"))
4357 fep->quirks |= FEC_QUIRK_ERR006687;
4358
4359 ret = fec_enet_ipc_handle_init(fep);
4360 if (ret)
4361 goto failed_ipc_init;
4362
4363 if (of_property_read_bool(np, "fsl,magic-packet"))
4364 fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
4365
4366 ret = fec_enet_init_stop_mode(fep, np);
4367 if (ret)
4368 goto failed_stop_mode;
4369
4370 phy_node = of_parse_phandle(np, "phy-handle", 0);
4371 if (!phy_node && of_phy_is_fixed_link(np)) {
4372 ret = of_phy_register_fixed_link(np);
4373 if (ret < 0) {
4374 dev_err(&pdev->dev,
4375 "broken fixed-link specification\n");
4376 goto failed_phy;
4377 }
4378 phy_node = of_node_get(np);
4379 }
4380 fep->phy_node = phy_node;
4381
4382 ret = of_get_phy_mode(pdev->dev.of_node, &interface);
4383 if (ret) {
4384 pdata = dev_get_platdata(&pdev->dev);
4385 if (pdata)
4386 fep->phy_interface = pdata->phy;
4387 else
4388 fep->phy_interface = PHY_INTERFACE_MODE_MII;
4389 } else {
4390 fep->phy_interface = interface;
4391 }
4392
4393 ret = fec_enet_parse_rgmii_delay(fep, np);
4394 if (ret)
4395 goto failed_rgmii_delay;
4396
4397 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
4398 if (IS_ERR(fep->clk_ipg)) {
4399 ret = PTR_ERR(fep->clk_ipg);
4400 goto failed_clk;
4401 }
4402
4403 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
4404 if (IS_ERR(fep->clk_ahb)) {
4405 ret = PTR_ERR(fep->clk_ahb);
4406 goto failed_clk;
4407 }
4408
4409 fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
4410
4411 /* enet_out is optional, depends on board */
4412 fep->clk_enet_out = devm_clk_get_optional(&pdev->dev, "enet_out");
4413 if (IS_ERR(fep->clk_enet_out)) {
4414 ret = PTR_ERR(fep->clk_enet_out);
4415 goto failed_clk;
4416 }
4417
4418 fep->ptp_clk_on = false;
4419 mutex_init(&fep->ptp_clk_mutex);
4420
4421 /* clk_ref is optional, depends on board */
4422 fep->clk_ref = devm_clk_get_optional(&pdev->dev, "enet_clk_ref");
4423 if (IS_ERR(fep->clk_ref)) {
4424 ret = PTR_ERR(fep->clk_ref);
4425 goto failed_clk;
4426 }
4427 fep->clk_ref_rate = clk_get_rate(fep->clk_ref);
4428
4429 /* clk_2x_txclk is optional, depends on board */
4430 if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) {
4431 fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk");
4432 if (IS_ERR(fep->clk_2x_txclk))
4433 fep->clk_2x_txclk = NULL;
4434 }
4435
4436 fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
4437 fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
4438 if (IS_ERR(fep->clk_ptp)) {
4439 fep->clk_ptp = NULL;
4440 fep->bufdesc_ex = false;
4441 }
4442
4443 ret = fec_enet_clk_enable(ndev, true);
4444 if (ret)
4445 goto failed_clk;
4446
4447 ret = clk_prepare_enable(fep->clk_ipg);
4448 if (ret)
4449 goto failed_clk_ipg;
4450 ret = clk_prepare_enable(fep->clk_ahb);
4451 if (ret)
4452 goto failed_clk_ahb;
4453
4454 fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
4455 if (!IS_ERR(fep->reg_phy)) {
4456 ret = regulator_enable(fep->reg_phy);
4457 if (ret) {
4458 dev_err(&pdev->dev,
4459 "Failed to enable phy regulator: %d\n", ret);
4460 goto failed_regulator;
4461 }
4462 } else {
4463 if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
4464 ret = -EPROBE_DEFER;
4465 goto failed_regulator;
4466 }
4467 fep->reg_phy = NULL;
4468 }
4469
4470 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
4471 pm_runtime_use_autosuspend(&pdev->dev);
4472 pm_runtime_get_noresume(&pdev->dev);
4473 pm_runtime_set_active(&pdev->dev);
4474 pm_runtime_enable(&pdev->dev);
4475
4476 ret = fec_reset_phy(pdev);
4477 if (ret)
4478 goto failed_reset;
4479
4480 irq_cnt = fec_enet_get_irq_cnt(pdev);
4481 if (fep->bufdesc_ex)
4482 fec_ptp_init(pdev, irq_cnt);
4483
4484 ret = fec_enet_init(ndev);
4485 if (ret)
4486 goto failed_init;
4487
4488 for (i = 0; i < irq_cnt; i++) {
4489 snprintf(irq_name, sizeof(irq_name), "int%d", i);
4490 irq = platform_get_irq_byname_optional(pdev, irq_name);
4491 if (irq < 0)
4492 irq = platform_get_irq(pdev, i);
4493 if (irq < 0) {
4494 ret = irq;
4495 goto failed_irq;
4496 }
4497 ret = devm_request_irq(&pdev->dev, irq, fec_enet_interrupt,
4498 0, pdev->name, ndev);
4499 if (ret)
4500 goto failed_irq;
4501
4502 fep->irq[i] = irq;
4503 }
4504
4505 /* Decide which interrupt line is wakeup capable */
4506 fec_enet_get_wakeup_irq(pdev);
4507
4508 ret = fec_enet_mii_init(pdev);
4509 if (ret)
4510 goto failed_mii_init;
4511
4512 /* Carrier starts down, phylib will bring it up */
4513 netif_carrier_off(ndev);
4514 fec_enet_clk_enable(ndev, false);
4515 pinctrl_pm_select_sleep_state(&pdev->dev);
4516
4517 ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
4518
4519 ret = register_netdev(ndev);
4520 if (ret)
4521 goto failed_register;
4522
4523 device_init_wakeup(&ndev->dev, fep->wol_flag &
4524 FEC_WOL_HAS_MAGIC_PACKET);
4525
4526 if (fep->bufdesc_ex && fep->ptp_clock)
4527 netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
4528
4529 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
4530
4531 pm_runtime_mark_last_busy(&pdev->dev);
4532 pm_runtime_put_autosuspend(&pdev->dev);
4533
4534 return 0;
4535
4536 failed_register:
4537 fec_enet_mii_remove(fep);
4538 failed_mii_init:
4539 failed_irq:
4540 fec_enet_deinit(ndev);
4541 failed_init:
4542 fec_ptp_stop(pdev);
4543 failed_reset:
4544 pm_runtime_put_noidle(&pdev->dev);
4545 pm_runtime_disable(&pdev->dev);
4546 if (fep->reg_phy)
4547 regulator_disable(fep->reg_phy);
4548 failed_regulator:
4549 clk_disable_unprepare(fep->clk_ahb);
4550 failed_clk_ahb:
4551 clk_disable_unprepare(fep->clk_ipg);
4552 failed_clk_ipg:
4553 fec_enet_clk_enable(ndev, false);
4554 failed_clk:
4555 failed_rgmii_delay:
4556 if (of_phy_is_fixed_link(np))
4557 of_phy_deregister_fixed_link(np);
4558 of_node_put(phy_node);
4559 failed_stop_mode:
4560 failed_ipc_init:
4561 failed_phy:
4562 dev_id--;
4563 failed_ioremap:
4564 free_netdev(ndev);
4565
4566 return ret;
4567 }
4568
4569 static void
fec_drv_remove(struct platform_device * pdev)4570 fec_drv_remove(struct platform_device *pdev)
4571 {
4572 struct net_device *ndev = platform_get_drvdata(pdev);
4573 struct fec_enet_private *fep = netdev_priv(ndev);
4574 struct device_node *np = pdev->dev.of_node;
4575 int ret;
4576
4577 ret = pm_runtime_get_sync(&pdev->dev);
4578 if (ret < 0)
4579 dev_err(&pdev->dev,
4580 "Failed to resume device in remove callback (%pe)\n",
4581 ERR_PTR(ret));
4582
4583 cancel_work_sync(&fep->tx_timeout_work);
4584 fec_ptp_stop(pdev);
4585 unregister_netdev(ndev);
4586 fec_enet_mii_remove(fep);
4587 if (fep->reg_phy)
4588 regulator_disable(fep->reg_phy);
4589
4590 if (of_phy_is_fixed_link(np))
4591 of_phy_deregister_fixed_link(np);
4592 of_node_put(fep->phy_node);
4593
4594 /* After pm_runtime_get_sync() failed, the clks are still off, so skip
4595 * disabling them again.
4596 */
4597 if (ret >= 0) {
4598 clk_disable_unprepare(fep->clk_ahb);
4599 clk_disable_unprepare(fep->clk_ipg);
4600 }
4601 pm_runtime_put_noidle(&pdev->dev);
4602 pm_runtime_disable(&pdev->dev);
4603
4604 fec_enet_deinit(ndev);
4605 free_netdev(ndev);
4606 }
4607
fec_suspend(struct device * dev)4608 static int __maybe_unused fec_suspend(struct device *dev)
4609 {
4610 struct net_device *ndev = dev_get_drvdata(dev);
4611 struct fec_enet_private *fep = netdev_priv(ndev);
4612 int ret;
4613
4614 rtnl_lock();
4615 if (netif_running(ndev)) {
4616 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
4617 fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
4618 phy_stop(ndev->phydev);
4619 napi_disable(&fep->napi);
4620 netif_tx_lock_bh(ndev);
4621 netif_device_detach(ndev);
4622 netif_tx_unlock_bh(ndev);
4623 fec_stop(ndev);
4624 if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4625 fec_irqs_disable(ndev);
4626 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
4627 } else {
4628 fec_irqs_disable_except_wakeup(ndev);
4629 if (fep->wake_irq > 0) {
4630 disable_irq(fep->wake_irq);
4631 enable_irq_wake(fep->wake_irq);
4632 }
4633 fec_enet_stop_mode(fep, true);
4634 }
4635 /* It's safe to disable clocks since interrupts are masked */
4636 fec_enet_clk_enable(ndev, false);
4637
4638 fep->rpm_active = !pm_runtime_status_suspended(dev);
4639 if (fep->rpm_active) {
4640 ret = pm_runtime_force_suspend(dev);
4641 if (ret < 0) {
4642 rtnl_unlock();
4643 return ret;
4644 }
4645 }
4646 }
4647 rtnl_unlock();
4648
4649 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
4650 regulator_disable(fep->reg_phy);
4651
4652 /* SOC supply clock to phy, when clock is disabled, phy link down
4653 * SOC control phy regulator, when regulator is disabled, phy link down
4654 */
4655 if (fep->clk_enet_out || fep->reg_phy)
4656 fep->link = 0;
4657
4658 return 0;
4659 }
4660
fec_resume(struct device * dev)4661 static int __maybe_unused fec_resume(struct device *dev)
4662 {
4663 struct net_device *ndev = dev_get_drvdata(dev);
4664 struct fec_enet_private *fep = netdev_priv(ndev);
4665 int ret;
4666 int val;
4667
4668 if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
4669 ret = regulator_enable(fep->reg_phy);
4670 if (ret)
4671 return ret;
4672 }
4673
4674 rtnl_lock();
4675 if (netif_running(ndev)) {
4676 if (fep->rpm_active)
4677 pm_runtime_force_resume(dev);
4678
4679 ret = fec_enet_clk_enable(ndev, true);
4680 if (ret) {
4681 rtnl_unlock();
4682 goto failed_clk;
4683 }
4684 if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
4685 fec_enet_stop_mode(fep, false);
4686 if (fep->wake_irq) {
4687 disable_irq_wake(fep->wake_irq);
4688 enable_irq(fep->wake_irq);
4689 }
4690
4691 val = readl(fep->hwp + FEC_ECNTRL);
4692 val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
4693 writel(val, fep->hwp + FEC_ECNTRL);
4694 fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
4695 } else {
4696 pinctrl_pm_select_default_state(&fep->pdev->dev);
4697 }
4698 fec_restart(ndev);
4699 netif_tx_lock_bh(ndev);
4700 netif_device_attach(ndev);
4701 netif_tx_unlock_bh(ndev);
4702 napi_enable(&fep->napi);
4703 phy_init_hw(ndev->phydev);
4704 phy_start(ndev->phydev);
4705 }
4706 rtnl_unlock();
4707
4708 return 0;
4709
4710 failed_clk:
4711 if (fep->reg_phy)
4712 regulator_disable(fep->reg_phy);
4713 return ret;
4714 }
4715
fec_runtime_suspend(struct device * dev)4716 static int __maybe_unused fec_runtime_suspend(struct device *dev)
4717 {
4718 struct net_device *ndev = dev_get_drvdata(dev);
4719 struct fec_enet_private *fep = netdev_priv(ndev);
4720
4721 clk_disable_unprepare(fep->clk_ahb);
4722 clk_disable_unprepare(fep->clk_ipg);
4723
4724 return 0;
4725 }
4726
fec_runtime_resume(struct device * dev)4727 static int __maybe_unused fec_runtime_resume(struct device *dev)
4728 {
4729 struct net_device *ndev = dev_get_drvdata(dev);
4730 struct fec_enet_private *fep = netdev_priv(ndev);
4731 int ret;
4732
4733 ret = clk_prepare_enable(fep->clk_ahb);
4734 if (ret)
4735 return ret;
4736 ret = clk_prepare_enable(fep->clk_ipg);
4737 if (ret)
4738 goto failed_clk_ipg;
4739
4740 return 0;
4741
4742 failed_clk_ipg:
4743 clk_disable_unprepare(fep->clk_ahb);
4744 return ret;
4745 }
4746
4747 static const struct dev_pm_ops fec_pm_ops = {
4748 SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
4749 SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
4750 };
4751
4752 static struct platform_driver fec_driver = {
4753 .driver = {
4754 .name = DRIVER_NAME,
4755 .pm = &fec_pm_ops,
4756 .of_match_table = fec_dt_ids,
4757 .suppress_bind_attrs = true,
4758 },
4759 .id_table = fec_devtype,
4760 .probe = fec_probe,
4761 .remove_new = fec_drv_remove,
4762 };
4763
4764 module_platform_driver(fec_driver);
4765
4766 MODULE_LICENSE("GPL");
4767