1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Xilinx Axi Ethernet device driver
4 *
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
12 *
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14 * and Spartan6.
15 *
16 * TODO:
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
23 */
24
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
30 #include <linux/of.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41
42 #include "xilinx_axienet.h"
43
44 /* Descriptors defines for Tx and Rx DMA */
45 #define TX_BD_NUM_DEFAULT 128
46 #define RX_BD_NUM_DEFAULT 1024
47 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
48 #define TX_BD_NUM_MAX 4096
49 #define RX_BD_NUM_MAX 4096
50
51 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
52 #define DRIVER_NAME "xaxienet"
53 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
54 #define DRIVER_VERSION "1.00a"
55
56 #define AXIENET_REGS_N 40
57
58 /* Match table for of_platform binding */
59 static const struct of_device_id axienet_of_match[] = {
60 { .compatible = "xlnx,axi-ethernet-1.00.a", },
61 { .compatible = "xlnx,axi-ethernet-1.01.a", },
62 { .compatible = "xlnx,axi-ethernet-2.01.a", },
63 {},
64 };
65
66 MODULE_DEVICE_TABLE(of, axienet_of_match);
67
68 /* Option table for setting up Axi Ethernet hardware options */
69 static struct axienet_option axienet_options[] = {
70 /* Turn on jumbo packet support for both Rx and Tx */
71 {
72 .opt = XAE_OPTION_JUMBO,
73 .reg = XAE_TC_OFFSET,
74 .m_or = XAE_TC_JUM_MASK,
75 }, {
76 .opt = XAE_OPTION_JUMBO,
77 .reg = XAE_RCW1_OFFSET,
78 .m_or = XAE_RCW1_JUM_MASK,
79 }, { /* Turn on VLAN packet support for both Rx and Tx */
80 .opt = XAE_OPTION_VLAN,
81 .reg = XAE_TC_OFFSET,
82 .m_or = XAE_TC_VLAN_MASK,
83 }, {
84 .opt = XAE_OPTION_VLAN,
85 .reg = XAE_RCW1_OFFSET,
86 .m_or = XAE_RCW1_VLAN_MASK,
87 }, { /* Turn on FCS stripping on receive packets */
88 .opt = XAE_OPTION_FCS_STRIP,
89 .reg = XAE_RCW1_OFFSET,
90 .m_or = XAE_RCW1_FCS_MASK,
91 }, { /* Turn on FCS insertion on transmit packets */
92 .opt = XAE_OPTION_FCS_INSERT,
93 .reg = XAE_TC_OFFSET,
94 .m_or = XAE_TC_FCS_MASK,
95 }, { /* Turn off length/type field checking on receive packets */
96 .opt = XAE_OPTION_LENTYPE_ERR,
97 .reg = XAE_RCW1_OFFSET,
98 .m_or = XAE_RCW1_LT_DIS_MASK,
99 }, { /* Turn on Rx flow control */
100 .opt = XAE_OPTION_FLOW_CONTROL,
101 .reg = XAE_FCC_OFFSET,
102 .m_or = XAE_FCC_FCRX_MASK,
103 }, { /* Turn on Tx flow control */
104 .opt = XAE_OPTION_FLOW_CONTROL,
105 .reg = XAE_FCC_OFFSET,
106 .m_or = XAE_FCC_FCTX_MASK,
107 }, { /* Turn on promiscuous frame filtering */
108 .opt = XAE_OPTION_PROMISC,
109 .reg = XAE_FMI_OFFSET,
110 .m_or = XAE_FMI_PM_MASK,
111 }, { /* Enable transmitter */
112 .opt = XAE_OPTION_TXEN,
113 .reg = XAE_TC_OFFSET,
114 .m_or = XAE_TC_TX_MASK,
115 }, { /* Enable receiver */
116 .opt = XAE_OPTION_RXEN,
117 .reg = XAE_RCW1_OFFSET,
118 .m_or = XAE_RCW1_RX_MASK,
119 },
120 {}
121 };
122
123 /**
124 * axienet_dma_in32 - Memory mapped Axi DMA register read
125 * @lp: Pointer to axienet local structure
126 * @reg: Address offset from the base address of the Axi DMA core
127 *
128 * Return: The contents of the Axi DMA register
129 *
130 * This function returns the contents of the corresponding Axi DMA register.
131 */
axienet_dma_in32(struct axienet_local * lp,off_t reg)132 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
133 {
134 return ioread32(lp->dma_regs + reg);
135 }
136
desc_set_phys_addr(struct axienet_local * lp,dma_addr_t addr,struct axidma_bd * desc)137 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
138 struct axidma_bd *desc)
139 {
140 desc->phys = lower_32_bits(addr);
141 if (lp->features & XAE_FEATURE_DMA_64BIT)
142 desc->phys_msb = upper_32_bits(addr);
143 }
144
desc_get_phys_addr(struct axienet_local * lp,struct axidma_bd * desc)145 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
146 struct axidma_bd *desc)
147 {
148 dma_addr_t ret = desc->phys;
149
150 if (lp->features & XAE_FEATURE_DMA_64BIT)
151 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
152
153 return ret;
154 }
155
156 /**
157 * axienet_dma_bd_release - Release buffer descriptor rings
158 * @ndev: Pointer to the net_device structure
159 *
160 * This function is used to release the descriptors allocated in
161 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
162 * driver stop api is called.
163 */
axienet_dma_bd_release(struct net_device * ndev)164 static void axienet_dma_bd_release(struct net_device *ndev)
165 {
166 int i;
167 struct axienet_local *lp = netdev_priv(ndev);
168
169 /* If we end up here, tx_bd_v must have been DMA allocated. */
170 dma_free_coherent(lp->dev,
171 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
172 lp->tx_bd_v,
173 lp->tx_bd_p);
174
175 if (!lp->rx_bd_v)
176 return;
177
178 for (i = 0; i < lp->rx_bd_num; i++) {
179 dma_addr_t phys;
180
181 /* A NULL skb means this descriptor has not been initialised
182 * at all.
183 */
184 if (!lp->rx_bd_v[i].skb)
185 break;
186
187 dev_kfree_skb(lp->rx_bd_v[i].skb);
188
189 /* For each descriptor, we programmed cntrl with the (non-zero)
190 * descriptor size, after it had been successfully allocated.
191 * So a non-zero value in there means we need to unmap it.
192 */
193 if (lp->rx_bd_v[i].cntrl) {
194 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
195 dma_unmap_single(lp->dev, phys,
196 lp->max_frm_size, DMA_FROM_DEVICE);
197 }
198 }
199
200 dma_free_coherent(lp->dev,
201 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
202 lp->rx_bd_v,
203 lp->rx_bd_p);
204 }
205
206 /**
207 * axienet_usec_to_timer - Calculate IRQ delay timer value
208 * @lp: Pointer to the axienet_local structure
209 * @coalesce_usec: Microseconds to convert into timer value
210 */
axienet_usec_to_timer(struct axienet_local * lp,u32 coalesce_usec)211 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
212 {
213 u32 result;
214 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
215
216 if (lp->axi_clk)
217 clk_rate = clk_get_rate(lp->axi_clk);
218
219 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
220 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
221 (u64)125000000);
222 if (result > 255)
223 result = 255;
224
225 return result;
226 }
227
228 /**
229 * axienet_dma_start - Set up DMA registers and start DMA operation
230 * @lp: Pointer to the axienet_local structure
231 */
axienet_dma_start(struct axienet_local * lp)232 static void axienet_dma_start(struct axienet_local *lp)
233 {
234 /* Start updating the Rx channel control register */
235 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
236 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
237 /* Only set interrupt delay timer if not generating an interrupt on
238 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
239 */
240 if (lp->coalesce_count_rx > 1)
241 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
242 << XAXIDMA_DELAY_SHIFT) |
243 XAXIDMA_IRQ_DELAY_MASK;
244 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
245
246 /* Start updating the Tx channel control register */
247 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
248 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
249 /* Only set interrupt delay timer if not generating an interrupt on
250 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
251 */
252 if (lp->coalesce_count_tx > 1)
253 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
254 << XAXIDMA_DELAY_SHIFT) |
255 XAXIDMA_IRQ_DELAY_MASK;
256 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
257
258 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
259 * halted state. This will make the Rx side ready for reception.
260 */
261 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
262 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
263 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
264 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
265 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
266
267 /* Write to the RS (Run-stop) bit in the Tx channel control register.
268 * Tx channel is now ready to run. But only after we write to the
269 * tail pointer register that the Tx channel will start transmitting.
270 */
271 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
272 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
273 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
274 }
275
276 /**
277 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
278 * @ndev: Pointer to the net_device structure
279 *
280 * Return: 0, on success -ENOMEM, on failure
281 *
282 * This function is called to initialize the Rx and Tx DMA descriptor
283 * rings. This initializes the descriptors with required default values
284 * and is called when Axi Ethernet driver reset is called.
285 */
axienet_dma_bd_init(struct net_device * ndev)286 static int axienet_dma_bd_init(struct net_device *ndev)
287 {
288 int i;
289 struct sk_buff *skb;
290 struct axienet_local *lp = netdev_priv(ndev);
291
292 /* Reset the indexes which are used for accessing the BDs */
293 lp->tx_bd_ci = 0;
294 lp->tx_bd_tail = 0;
295 lp->rx_bd_ci = 0;
296
297 /* Allocate the Tx and Rx buffer descriptors. */
298 lp->tx_bd_v = dma_alloc_coherent(lp->dev,
299 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
300 &lp->tx_bd_p, GFP_KERNEL);
301 if (!lp->tx_bd_v)
302 return -ENOMEM;
303
304 lp->rx_bd_v = dma_alloc_coherent(lp->dev,
305 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
306 &lp->rx_bd_p, GFP_KERNEL);
307 if (!lp->rx_bd_v)
308 goto out;
309
310 for (i = 0; i < lp->tx_bd_num; i++) {
311 dma_addr_t addr = lp->tx_bd_p +
312 sizeof(*lp->tx_bd_v) *
313 ((i + 1) % lp->tx_bd_num);
314
315 lp->tx_bd_v[i].next = lower_32_bits(addr);
316 if (lp->features & XAE_FEATURE_DMA_64BIT)
317 lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
318 }
319
320 for (i = 0; i < lp->rx_bd_num; i++) {
321 dma_addr_t addr;
322
323 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
324 ((i + 1) % lp->rx_bd_num);
325 lp->rx_bd_v[i].next = lower_32_bits(addr);
326 if (lp->features & XAE_FEATURE_DMA_64BIT)
327 lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
328
329 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
330 if (!skb)
331 goto out;
332
333 lp->rx_bd_v[i].skb = skb;
334 addr = dma_map_single(lp->dev, skb->data,
335 lp->max_frm_size, DMA_FROM_DEVICE);
336 if (dma_mapping_error(lp->dev, addr)) {
337 netdev_err(ndev, "DMA mapping error\n");
338 goto out;
339 }
340 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
341
342 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
343 }
344
345 axienet_dma_start(lp);
346
347 return 0;
348 out:
349 axienet_dma_bd_release(ndev);
350 return -ENOMEM;
351 }
352
353 /**
354 * axienet_set_mac_address - Write the MAC address
355 * @ndev: Pointer to the net_device structure
356 * @address: 6 byte Address to be written as MAC address
357 *
358 * This function is called to initialize the MAC address of the Axi Ethernet
359 * core. It writes to the UAW0 and UAW1 registers of the core.
360 */
axienet_set_mac_address(struct net_device * ndev,const void * address)361 static void axienet_set_mac_address(struct net_device *ndev,
362 const void *address)
363 {
364 struct axienet_local *lp = netdev_priv(ndev);
365
366 if (address)
367 eth_hw_addr_set(ndev, address);
368 if (!is_valid_ether_addr(ndev->dev_addr))
369 eth_hw_addr_random(ndev);
370
371 /* Set up unicast MAC address filter set its mac address */
372 axienet_iow(lp, XAE_UAW0_OFFSET,
373 (ndev->dev_addr[0]) |
374 (ndev->dev_addr[1] << 8) |
375 (ndev->dev_addr[2] << 16) |
376 (ndev->dev_addr[3] << 24));
377 axienet_iow(lp, XAE_UAW1_OFFSET,
378 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
379 ~XAE_UAW1_UNICASTADDR_MASK) |
380 (ndev->dev_addr[4] |
381 (ndev->dev_addr[5] << 8))));
382 }
383
384 /**
385 * netdev_set_mac_address - Write the MAC address (from outside the driver)
386 * @ndev: Pointer to the net_device structure
387 * @p: 6 byte Address to be written as MAC address
388 *
389 * Return: 0 for all conditions. Presently, there is no failure case.
390 *
391 * This function is called to initialize the MAC address of the Axi Ethernet
392 * core. It calls the core specific axienet_set_mac_address. This is the
393 * function that goes into net_device_ops structure entry ndo_set_mac_address.
394 */
netdev_set_mac_address(struct net_device * ndev,void * p)395 static int netdev_set_mac_address(struct net_device *ndev, void *p)
396 {
397 struct sockaddr *addr = p;
398 axienet_set_mac_address(ndev, addr->sa_data);
399 return 0;
400 }
401
402 /**
403 * axienet_set_multicast_list - Prepare the multicast table
404 * @ndev: Pointer to the net_device structure
405 *
406 * This function is called to initialize the multicast table during
407 * initialization. The Axi Ethernet basic multicast support has a four-entry
408 * multicast table which is initialized here. Additionally this function
409 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
410 * means whenever the multicast table entries need to be updated this
411 * function gets called.
412 */
axienet_set_multicast_list(struct net_device * ndev)413 static void axienet_set_multicast_list(struct net_device *ndev)
414 {
415 int i = 0;
416 u32 reg, af0reg, af1reg;
417 struct axienet_local *lp = netdev_priv(ndev);
418
419 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
420 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
421 /* We must make the kernel realize we had to move into
422 * promiscuous mode. If it was a promiscuous mode request
423 * the flag is already set. If not we set it.
424 */
425 ndev->flags |= IFF_PROMISC;
426 reg = axienet_ior(lp, XAE_FMI_OFFSET);
427 reg |= XAE_FMI_PM_MASK;
428 axienet_iow(lp, XAE_FMI_OFFSET, reg);
429 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
430 } else if (!netdev_mc_empty(ndev)) {
431 struct netdev_hw_addr *ha;
432
433 reg = axienet_ior(lp, XAE_FMI_OFFSET);
434 reg &= ~XAE_FMI_PM_MASK;
435 axienet_iow(lp, XAE_FMI_OFFSET, reg);
436
437 netdev_for_each_mc_addr(ha, ndev) {
438 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
439 break;
440
441 af0reg = (ha->addr[0]);
442 af0reg |= (ha->addr[1] << 8);
443 af0reg |= (ha->addr[2] << 16);
444 af0reg |= (ha->addr[3] << 24);
445
446 af1reg = (ha->addr[4]);
447 af1reg |= (ha->addr[5] << 8);
448
449 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
450 reg |= i;
451
452 axienet_iow(lp, XAE_FMI_OFFSET, reg);
453 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
454 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
455 axienet_iow(lp, XAE_FFE_OFFSET, 1);
456 i++;
457 }
458 } else {
459 reg = axienet_ior(lp, XAE_FMI_OFFSET);
460 reg &= ~XAE_FMI_PM_MASK;
461
462 axienet_iow(lp, XAE_FMI_OFFSET, reg);
463 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
464 }
465
466 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
467 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
468 reg |= i;
469 axienet_iow(lp, XAE_FMI_OFFSET, reg);
470 axienet_iow(lp, XAE_FFE_OFFSET, 0);
471 }
472 }
473
474 /**
475 * axienet_setoptions - Set an Axi Ethernet option
476 * @ndev: Pointer to the net_device structure
477 * @options: Option to be enabled/disabled
478 *
479 * The Axi Ethernet core has multiple features which can be selectively turned
480 * on or off. The typical options could be jumbo frame option, basic VLAN
481 * option, promiscuous mode option etc. This function is used to set or clear
482 * these options in the Axi Ethernet hardware. This is done through
483 * axienet_option structure .
484 */
axienet_setoptions(struct net_device * ndev,u32 options)485 static void axienet_setoptions(struct net_device *ndev, u32 options)
486 {
487 int reg;
488 struct axienet_local *lp = netdev_priv(ndev);
489 struct axienet_option *tp = &axienet_options[0];
490
491 while (tp->opt) {
492 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
493 if (options & tp->opt)
494 reg |= tp->m_or;
495 axienet_iow(lp, tp->reg, reg);
496 tp++;
497 }
498
499 lp->options |= options;
500 }
501
__axienet_device_reset(struct axienet_local * lp)502 static int __axienet_device_reset(struct axienet_local *lp)
503 {
504 u32 value;
505 int ret;
506
507 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
508 * process of Axi DMA takes a while to complete as all pending
509 * commands/transfers will be flushed or completed during this
510 * reset process.
511 * Note that even though both TX and RX have their own reset register,
512 * they both reset the entire DMA core, so only one needs to be used.
513 */
514 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
515 ret = read_poll_timeout(axienet_dma_in32, value,
516 !(value & XAXIDMA_CR_RESET_MASK),
517 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
518 XAXIDMA_TX_CR_OFFSET);
519 if (ret) {
520 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
521 return ret;
522 }
523
524 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
525 ret = read_poll_timeout(axienet_ior, value,
526 value & XAE_INT_PHYRSTCMPLT_MASK,
527 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
528 XAE_IS_OFFSET);
529 if (ret) {
530 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
531 return ret;
532 }
533
534 return 0;
535 }
536
537 /**
538 * axienet_dma_stop - Stop DMA operation
539 * @lp: Pointer to the axienet_local structure
540 */
axienet_dma_stop(struct axienet_local * lp)541 static void axienet_dma_stop(struct axienet_local *lp)
542 {
543 int count;
544 u32 cr, sr;
545
546 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
547 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
548 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
549 synchronize_irq(lp->rx_irq);
550
551 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
552 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
553 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
554 synchronize_irq(lp->tx_irq);
555
556 /* Give DMAs a chance to halt gracefully */
557 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
558 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
559 msleep(20);
560 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
561 }
562
563 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
564 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
565 msleep(20);
566 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
567 }
568
569 /* Do a reset to ensure DMA is really stopped */
570 axienet_lock_mii(lp);
571 __axienet_device_reset(lp);
572 axienet_unlock_mii(lp);
573 }
574
575 /**
576 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
577 * @ndev: Pointer to the net_device structure
578 *
579 * This function is called to reset and initialize the Axi Ethernet core. This
580 * is typically called during initialization. It does a reset of the Axi DMA
581 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
582 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
583 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
584 * core.
585 * Returns 0 on success or a negative error number otherwise.
586 */
axienet_device_reset(struct net_device * ndev)587 static int axienet_device_reset(struct net_device *ndev)
588 {
589 u32 axienet_status;
590 struct axienet_local *lp = netdev_priv(ndev);
591 int ret;
592
593 ret = __axienet_device_reset(lp);
594 if (ret)
595 return ret;
596
597 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
598 lp->options |= XAE_OPTION_VLAN;
599 lp->options &= (~XAE_OPTION_JUMBO);
600
601 if ((ndev->mtu > XAE_MTU) &&
602 (ndev->mtu <= XAE_JUMBO_MTU)) {
603 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
604 XAE_TRL_SIZE;
605
606 if (lp->max_frm_size <= lp->rxmem)
607 lp->options |= XAE_OPTION_JUMBO;
608 }
609
610 ret = axienet_dma_bd_init(ndev);
611 if (ret) {
612 netdev_err(ndev, "%s: descriptor allocation failed\n",
613 __func__);
614 return ret;
615 }
616
617 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
618 axienet_status &= ~XAE_RCW1_RX_MASK;
619 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
620
621 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
622 if (axienet_status & XAE_INT_RXRJECT_MASK)
623 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
624 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
625 XAE_INT_RECV_ERROR_MASK : 0);
626
627 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
628
629 /* Sync default options with HW but leave receiver and
630 * transmitter disabled.
631 */
632 axienet_setoptions(ndev, lp->options &
633 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
634 axienet_set_mac_address(ndev, NULL);
635 axienet_set_multicast_list(ndev);
636 axienet_setoptions(ndev, lp->options);
637
638 netif_trans_update(ndev);
639
640 return 0;
641 }
642
643 /**
644 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
645 * @lp: Pointer to the axienet_local structure
646 * @first_bd: Index of first descriptor to clean up
647 * @nr_bds: Max number of descriptors to clean up
648 * @force: Whether to clean descriptors even if not complete
649 * @sizep: Pointer to a u32 filled with the total sum of all bytes
650 * in all cleaned-up descriptors. Ignored if NULL.
651 * @budget: NAPI budget (use 0 when not called from NAPI poll)
652 *
653 * Would either be called after a successful transmit operation, or after
654 * there was an error when setting up the chain.
655 * Returns the number of descriptors handled.
656 */
axienet_free_tx_chain(struct axienet_local * lp,u32 first_bd,int nr_bds,bool force,u32 * sizep,int budget)657 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
658 int nr_bds, bool force, u32 *sizep, int budget)
659 {
660 struct axidma_bd *cur_p;
661 unsigned int status;
662 dma_addr_t phys;
663 int i;
664
665 for (i = 0; i < nr_bds; i++) {
666 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
667 status = cur_p->status;
668
669 /* If force is not specified, clean up only descriptors
670 * that have been completed by the MAC.
671 */
672 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
673 break;
674
675 /* Ensure we see complete descriptor update */
676 dma_rmb();
677 phys = desc_get_phys_addr(lp, cur_p);
678 dma_unmap_single(lp->dev, phys,
679 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
680 DMA_TO_DEVICE);
681
682 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK))
683 napi_consume_skb(cur_p->skb, budget);
684
685 cur_p->app0 = 0;
686 cur_p->app1 = 0;
687 cur_p->app2 = 0;
688 cur_p->app4 = 0;
689 cur_p->skb = NULL;
690 /* ensure our transmit path and device don't prematurely see status cleared */
691 wmb();
692 cur_p->cntrl = 0;
693 cur_p->status = 0;
694
695 if (sizep)
696 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
697 }
698
699 return i;
700 }
701
702 /**
703 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
704 * @lp: Pointer to the axienet_local structure
705 * @num_frag: The number of BDs to check for
706 *
707 * Return: 0, on success
708 * NETDEV_TX_BUSY, if any of the descriptors are not free
709 *
710 * This function is invoked before BDs are allocated and transmission starts.
711 * This function returns 0 if a BD or group of BDs can be allocated for
712 * transmission. If the BD or any of the BDs are not free the function
713 * returns a busy status.
714 */
axienet_check_tx_bd_space(struct axienet_local * lp,int num_frag)715 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
716 int num_frag)
717 {
718 struct axidma_bd *cur_p;
719
720 /* Ensure we see all descriptor updates from device or TX polling */
721 rmb();
722 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
723 lp->tx_bd_num];
724 if (cur_p->cntrl)
725 return NETDEV_TX_BUSY;
726 return 0;
727 }
728
729 /**
730 * axienet_tx_poll - Invoked once a transmit is completed by the
731 * Axi DMA Tx channel.
732 * @napi: Pointer to NAPI structure.
733 * @budget: Max number of TX packets to process.
734 *
735 * Return: Number of TX packets processed.
736 *
737 * This function is invoked from the NAPI processing to notify the completion
738 * of transmit operation. It clears fields in the corresponding Tx BDs and
739 * unmaps the corresponding buffer so that CPU can regain ownership of the
740 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
741 * required.
742 */
axienet_tx_poll(struct napi_struct * napi,int budget)743 static int axienet_tx_poll(struct napi_struct *napi, int budget)
744 {
745 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
746 struct net_device *ndev = lp->ndev;
747 u32 size = 0;
748 int packets;
749
750 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, budget, false, &size, budget);
751
752 if (packets) {
753 lp->tx_bd_ci += packets;
754 if (lp->tx_bd_ci >= lp->tx_bd_num)
755 lp->tx_bd_ci %= lp->tx_bd_num;
756
757 u64_stats_update_begin(&lp->tx_stat_sync);
758 u64_stats_add(&lp->tx_packets, packets);
759 u64_stats_add(&lp->tx_bytes, size);
760 u64_stats_update_end(&lp->tx_stat_sync);
761
762 /* Matches barrier in axienet_start_xmit */
763 smp_mb();
764
765 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
766 netif_wake_queue(ndev);
767 }
768
769 if (packets < budget && napi_complete_done(napi, packets)) {
770 /* Re-enable TX completion interrupts. This should
771 * cause an immediate interrupt if any TX packets are
772 * already pending.
773 */
774 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
775 }
776 return packets;
777 }
778
779 /**
780 * axienet_start_xmit - Starts the transmission.
781 * @skb: sk_buff pointer that contains data to be Txed.
782 * @ndev: Pointer to net_device structure.
783 *
784 * Return: NETDEV_TX_OK, on success
785 * NETDEV_TX_BUSY, if any of the descriptors are not free
786 *
787 * This function is invoked from upper layers to initiate transmission. The
788 * function uses the next available free BDs and populates their fields to
789 * start the transmission. Additionally if checksum offloading is supported,
790 * it populates AXI Stream Control fields with appropriate values.
791 */
792 static netdev_tx_t
axienet_start_xmit(struct sk_buff * skb,struct net_device * ndev)793 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
794 {
795 u32 ii;
796 u32 num_frag;
797 u32 csum_start_off;
798 u32 csum_index_off;
799 skb_frag_t *frag;
800 dma_addr_t tail_p, phys;
801 u32 orig_tail_ptr, new_tail_ptr;
802 struct axienet_local *lp = netdev_priv(ndev);
803 struct axidma_bd *cur_p;
804
805 orig_tail_ptr = lp->tx_bd_tail;
806 new_tail_ptr = orig_tail_ptr;
807
808 num_frag = skb_shinfo(skb)->nr_frags;
809 cur_p = &lp->tx_bd_v[orig_tail_ptr];
810
811 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
812 /* Should not happen as last start_xmit call should have
813 * checked for sufficient space and queue should only be
814 * woken when sufficient space is available.
815 */
816 netif_stop_queue(ndev);
817 if (net_ratelimit())
818 netdev_warn(ndev, "TX ring unexpectedly full\n");
819 return NETDEV_TX_BUSY;
820 }
821
822 if (skb->ip_summed == CHECKSUM_PARTIAL) {
823 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
824 /* Tx Full Checksum Offload Enabled */
825 cur_p->app0 |= 2;
826 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
827 csum_start_off = skb_transport_offset(skb);
828 csum_index_off = csum_start_off + skb->csum_offset;
829 /* Tx Partial Checksum Offload Enabled */
830 cur_p->app0 |= 1;
831 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
832 }
833 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
834 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
835 }
836
837 phys = dma_map_single(lp->dev, skb->data,
838 skb_headlen(skb), DMA_TO_DEVICE);
839 if (unlikely(dma_mapping_error(lp->dev, phys))) {
840 if (net_ratelimit())
841 netdev_err(ndev, "TX DMA mapping error\n");
842 ndev->stats.tx_dropped++;
843 return NETDEV_TX_OK;
844 }
845 desc_set_phys_addr(lp, phys, cur_p);
846 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
847
848 for (ii = 0; ii < num_frag; ii++) {
849 if (++new_tail_ptr >= lp->tx_bd_num)
850 new_tail_ptr = 0;
851 cur_p = &lp->tx_bd_v[new_tail_ptr];
852 frag = &skb_shinfo(skb)->frags[ii];
853 phys = dma_map_single(lp->dev,
854 skb_frag_address(frag),
855 skb_frag_size(frag),
856 DMA_TO_DEVICE);
857 if (unlikely(dma_mapping_error(lp->dev, phys))) {
858 if (net_ratelimit())
859 netdev_err(ndev, "TX DMA mapping error\n");
860 ndev->stats.tx_dropped++;
861 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
862 true, NULL, 0);
863 return NETDEV_TX_OK;
864 }
865 desc_set_phys_addr(lp, phys, cur_p);
866 cur_p->cntrl = skb_frag_size(frag);
867 }
868
869 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
870 cur_p->skb = skb;
871
872 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
873 if (++new_tail_ptr >= lp->tx_bd_num)
874 new_tail_ptr = 0;
875 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
876
877 /* Start the transfer */
878 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
879
880 /* Stop queue if next transmit may not have space */
881 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
882 netif_stop_queue(ndev);
883
884 /* Matches barrier in axienet_tx_poll */
885 smp_mb();
886
887 /* Space might have just been freed - check again */
888 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
889 netif_wake_queue(ndev);
890 }
891
892 return NETDEV_TX_OK;
893 }
894
895 /**
896 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
897 * @napi: Pointer to NAPI structure.
898 * @budget: Max number of RX packets to process.
899 *
900 * Return: Number of RX packets processed.
901 */
axienet_rx_poll(struct napi_struct * napi,int budget)902 static int axienet_rx_poll(struct napi_struct *napi, int budget)
903 {
904 u32 length;
905 u32 csumstatus;
906 u32 size = 0;
907 int packets = 0;
908 dma_addr_t tail_p = 0;
909 struct axidma_bd *cur_p;
910 struct sk_buff *skb, *new_skb;
911 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
912
913 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
914
915 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
916 dma_addr_t phys;
917
918 /* Ensure we see complete descriptor update */
919 dma_rmb();
920
921 skb = cur_p->skb;
922 cur_p->skb = NULL;
923
924 /* skb could be NULL if a previous pass already received the
925 * packet for this slot in the ring, but failed to refill it
926 * with a newly allocated buffer. In this case, don't try to
927 * receive it again.
928 */
929 if (likely(skb)) {
930 length = cur_p->app4 & 0x0000FFFF;
931
932 phys = desc_get_phys_addr(lp, cur_p);
933 dma_unmap_single(lp->dev, phys, lp->max_frm_size,
934 DMA_FROM_DEVICE);
935
936 skb_put(skb, length);
937 skb->protocol = eth_type_trans(skb, lp->ndev);
938 /*skb_checksum_none_assert(skb);*/
939 skb->ip_summed = CHECKSUM_NONE;
940
941 /* if we're doing Rx csum offload, set it up */
942 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
943 csumstatus = (cur_p->app2 &
944 XAE_FULL_CSUM_STATUS_MASK) >> 3;
945 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
946 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
947 skb->ip_summed = CHECKSUM_UNNECESSARY;
948 }
949 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
950 skb->protocol == htons(ETH_P_IP) &&
951 skb->len > 64) {
952 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
953 skb->ip_summed = CHECKSUM_COMPLETE;
954 }
955
956 napi_gro_receive(napi, skb);
957
958 size += length;
959 packets++;
960 }
961
962 new_skb = napi_alloc_skb(napi, lp->max_frm_size);
963 if (!new_skb)
964 break;
965
966 phys = dma_map_single(lp->dev, new_skb->data,
967 lp->max_frm_size,
968 DMA_FROM_DEVICE);
969 if (unlikely(dma_mapping_error(lp->dev, phys))) {
970 if (net_ratelimit())
971 netdev_err(lp->ndev, "RX DMA mapping error\n");
972 dev_kfree_skb(new_skb);
973 break;
974 }
975 desc_set_phys_addr(lp, phys, cur_p);
976
977 cur_p->cntrl = lp->max_frm_size;
978 cur_p->status = 0;
979 cur_p->skb = new_skb;
980
981 /* Only update tail_p to mark this slot as usable after it has
982 * been successfully refilled.
983 */
984 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
985
986 if (++lp->rx_bd_ci >= lp->rx_bd_num)
987 lp->rx_bd_ci = 0;
988 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
989 }
990
991 u64_stats_update_begin(&lp->rx_stat_sync);
992 u64_stats_add(&lp->rx_packets, packets);
993 u64_stats_add(&lp->rx_bytes, size);
994 u64_stats_update_end(&lp->rx_stat_sync);
995
996 if (tail_p)
997 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
998
999 if (packets < budget && napi_complete_done(napi, packets)) {
1000 /* Re-enable RX completion interrupts. This should
1001 * cause an immediate interrupt if any RX packets are
1002 * already pending.
1003 */
1004 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1005 }
1006 return packets;
1007 }
1008
1009 /**
1010 * axienet_tx_irq - Tx Done Isr.
1011 * @irq: irq number
1012 * @_ndev: net_device pointer
1013 *
1014 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1015 *
1016 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1017 * TX BD processing.
1018 */
axienet_tx_irq(int irq,void * _ndev)1019 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1020 {
1021 unsigned int status;
1022 struct net_device *ndev = _ndev;
1023 struct axienet_local *lp = netdev_priv(ndev);
1024
1025 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1026
1027 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1028 return IRQ_NONE;
1029
1030 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1031
1032 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1033 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1034 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1035 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1036 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1037 schedule_work(&lp->dma_err_task);
1038 } else {
1039 /* Disable further TX completion interrupts and schedule
1040 * NAPI to handle the completions.
1041 */
1042 u32 cr = lp->tx_dma_cr;
1043
1044 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1045 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1046
1047 napi_schedule(&lp->napi_tx);
1048 }
1049
1050 return IRQ_HANDLED;
1051 }
1052
1053 /**
1054 * axienet_rx_irq - Rx Isr.
1055 * @irq: irq number
1056 * @_ndev: net_device pointer
1057 *
1058 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1059 *
1060 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1061 * processing.
1062 */
axienet_rx_irq(int irq,void * _ndev)1063 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1064 {
1065 unsigned int status;
1066 struct net_device *ndev = _ndev;
1067 struct axienet_local *lp = netdev_priv(ndev);
1068
1069 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1070
1071 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1072 return IRQ_NONE;
1073
1074 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1075
1076 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1077 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1078 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1079 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1080 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1081 schedule_work(&lp->dma_err_task);
1082 } else {
1083 /* Disable further RX completion interrupts and schedule
1084 * NAPI receive.
1085 */
1086 u32 cr = lp->rx_dma_cr;
1087
1088 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1089 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1090
1091 napi_schedule(&lp->napi_rx);
1092 }
1093
1094 return IRQ_HANDLED;
1095 }
1096
1097 /**
1098 * axienet_eth_irq - Ethernet core Isr.
1099 * @irq: irq number
1100 * @_ndev: net_device pointer
1101 *
1102 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1103 *
1104 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1105 */
axienet_eth_irq(int irq,void * _ndev)1106 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1107 {
1108 struct net_device *ndev = _ndev;
1109 struct axienet_local *lp = netdev_priv(ndev);
1110 unsigned int pending;
1111
1112 pending = axienet_ior(lp, XAE_IP_OFFSET);
1113 if (!pending)
1114 return IRQ_NONE;
1115
1116 if (pending & XAE_INT_RXFIFOOVR_MASK)
1117 ndev->stats.rx_missed_errors++;
1118
1119 if (pending & XAE_INT_RXRJECT_MASK)
1120 ndev->stats.rx_frame_errors++;
1121
1122 axienet_iow(lp, XAE_IS_OFFSET, pending);
1123 return IRQ_HANDLED;
1124 }
1125
1126 static void axienet_dma_err_handler(struct work_struct *work);
1127
1128 /**
1129 * axienet_open - Driver open routine.
1130 * @ndev: Pointer to net_device structure
1131 *
1132 * Return: 0, on success.
1133 * non-zero error value on failure
1134 *
1135 * This is the driver open routine. It calls phylink_start to start the
1136 * PHY device.
1137 * It also allocates interrupt service routines, enables the interrupt lines
1138 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1139 * descriptors are initialized.
1140 */
axienet_open(struct net_device * ndev)1141 static int axienet_open(struct net_device *ndev)
1142 {
1143 int ret;
1144 struct axienet_local *lp = netdev_priv(ndev);
1145
1146 dev_dbg(&ndev->dev, "axienet_open()\n");
1147
1148 /* When we do an Axi Ethernet reset, it resets the complete core
1149 * including the MDIO. MDIO must be disabled before resetting.
1150 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1151 */
1152 axienet_lock_mii(lp);
1153 ret = axienet_device_reset(ndev);
1154 axienet_unlock_mii(lp);
1155
1156 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1157 if (ret) {
1158 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1159 return ret;
1160 }
1161
1162 phylink_start(lp->phylink);
1163
1164 /* Enable worker thread for Axi DMA error handling */
1165 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1166
1167 napi_enable(&lp->napi_rx);
1168 napi_enable(&lp->napi_tx);
1169
1170 /* Enable interrupts for Axi DMA Tx */
1171 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1172 ndev->name, ndev);
1173 if (ret)
1174 goto err_tx_irq;
1175 /* Enable interrupts for Axi DMA Rx */
1176 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1177 ndev->name, ndev);
1178 if (ret)
1179 goto err_rx_irq;
1180 /* Enable interrupts for Axi Ethernet core (if defined) */
1181 if (lp->eth_irq > 0) {
1182 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1183 ndev->name, ndev);
1184 if (ret)
1185 goto err_eth_irq;
1186 }
1187
1188 return 0;
1189
1190 err_eth_irq:
1191 free_irq(lp->rx_irq, ndev);
1192 err_rx_irq:
1193 free_irq(lp->tx_irq, ndev);
1194 err_tx_irq:
1195 napi_disable(&lp->napi_tx);
1196 napi_disable(&lp->napi_rx);
1197 phylink_stop(lp->phylink);
1198 phylink_disconnect_phy(lp->phylink);
1199 cancel_work_sync(&lp->dma_err_task);
1200 dev_err(lp->dev, "request_irq() failed\n");
1201 return ret;
1202 }
1203
1204 /**
1205 * axienet_stop - Driver stop routine.
1206 * @ndev: Pointer to net_device structure
1207 *
1208 * Return: 0, on success.
1209 *
1210 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1211 * device. It also removes the interrupt handlers and disables the interrupts.
1212 * The Axi DMA Tx/Rx BDs are released.
1213 */
axienet_stop(struct net_device * ndev)1214 static int axienet_stop(struct net_device *ndev)
1215 {
1216 struct axienet_local *lp = netdev_priv(ndev);
1217
1218 dev_dbg(&ndev->dev, "axienet_close()\n");
1219
1220 napi_disable(&lp->napi_tx);
1221 napi_disable(&lp->napi_rx);
1222
1223 phylink_stop(lp->phylink);
1224 phylink_disconnect_phy(lp->phylink);
1225
1226 axienet_setoptions(ndev, lp->options &
1227 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1228
1229 axienet_dma_stop(lp);
1230
1231 axienet_iow(lp, XAE_IE_OFFSET, 0);
1232
1233 cancel_work_sync(&lp->dma_err_task);
1234
1235 if (lp->eth_irq > 0)
1236 free_irq(lp->eth_irq, ndev);
1237 free_irq(lp->tx_irq, ndev);
1238 free_irq(lp->rx_irq, ndev);
1239
1240 axienet_dma_bd_release(ndev);
1241 return 0;
1242 }
1243
1244 /**
1245 * axienet_change_mtu - Driver change mtu routine.
1246 * @ndev: Pointer to net_device structure
1247 * @new_mtu: New mtu value to be applied
1248 *
1249 * Return: Always returns 0 (success).
1250 *
1251 * This is the change mtu driver routine. It checks if the Axi Ethernet
1252 * hardware supports jumbo frames before changing the mtu. This can be
1253 * called only when the device is not up.
1254 */
axienet_change_mtu(struct net_device * ndev,int new_mtu)1255 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1256 {
1257 struct axienet_local *lp = netdev_priv(ndev);
1258
1259 if (netif_running(ndev))
1260 return -EBUSY;
1261
1262 if ((new_mtu + VLAN_ETH_HLEN +
1263 XAE_TRL_SIZE) > lp->rxmem)
1264 return -EINVAL;
1265
1266 ndev->mtu = new_mtu;
1267
1268 return 0;
1269 }
1270
1271 #ifdef CONFIG_NET_POLL_CONTROLLER
1272 /**
1273 * axienet_poll_controller - Axi Ethernet poll mechanism.
1274 * @ndev: Pointer to net_device structure
1275 *
1276 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1277 * to polling the ISRs and are enabled back after the polling is done.
1278 */
axienet_poll_controller(struct net_device * ndev)1279 static void axienet_poll_controller(struct net_device *ndev)
1280 {
1281 struct axienet_local *lp = netdev_priv(ndev);
1282 disable_irq(lp->tx_irq);
1283 disable_irq(lp->rx_irq);
1284 axienet_rx_irq(lp->tx_irq, ndev);
1285 axienet_tx_irq(lp->rx_irq, ndev);
1286 enable_irq(lp->tx_irq);
1287 enable_irq(lp->rx_irq);
1288 }
1289 #endif
1290
axienet_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1291 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1292 {
1293 struct axienet_local *lp = netdev_priv(dev);
1294
1295 if (!netif_running(dev))
1296 return -EINVAL;
1297
1298 return phylink_mii_ioctl(lp->phylink, rq, cmd);
1299 }
1300
1301 static void
axienet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1302 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1303 {
1304 struct axienet_local *lp = netdev_priv(dev);
1305 unsigned int start;
1306
1307 netdev_stats_to_stats64(stats, &dev->stats);
1308
1309 do {
1310 start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1311 stats->rx_packets = u64_stats_read(&lp->rx_packets);
1312 stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1313 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1314
1315 do {
1316 start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1317 stats->tx_packets = u64_stats_read(&lp->tx_packets);
1318 stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1319 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1320 }
1321
1322 static const struct net_device_ops axienet_netdev_ops = {
1323 .ndo_open = axienet_open,
1324 .ndo_stop = axienet_stop,
1325 .ndo_start_xmit = axienet_start_xmit,
1326 .ndo_get_stats64 = axienet_get_stats64,
1327 .ndo_change_mtu = axienet_change_mtu,
1328 .ndo_set_mac_address = netdev_set_mac_address,
1329 .ndo_validate_addr = eth_validate_addr,
1330 .ndo_eth_ioctl = axienet_ioctl,
1331 .ndo_set_rx_mode = axienet_set_multicast_list,
1332 #ifdef CONFIG_NET_POLL_CONTROLLER
1333 .ndo_poll_controller = axienet_poll_controller,
1334 #endif
1335 };
1336
1337 /**
1338 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1339 * @ndev: Pointer to net_device structure
1340 * @ed: Pointer to ethtool_drvinfo structure
1341 *
1342 * This implements ethtool command for getting the driver information.
1343 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1344 */
axienet_ethtools_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * ed)1345 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1346 struct ethtool_drvinfo *ed)
1347 {
1348 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1349 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1350 }
1351
1352 /**
1353 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1354 * AxiEthernet core.
1355 * @ndev: Pointer to net_device structure
1356 *
1357 * This implements ethtool command for getting the total register length
1358 * information.
1359 *
1360 * Return: the total regs length
1361 */
axienet_ethtools_get_regs_len(struct net_device * ndev)1362 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1363 {
1364 return sizeof(u32) * AXIENET_REGS_N;
1365 }
1366
1367 /**
1368 * axienet_ethtools_get_regs - Dump the contents of all registers present
1369 * in AxiEthernet core.
1370 * @ndev: Pointer to net_device structure
1371 * @regs: Pointer to ethtool_regs structure
1372 * @ret: Void pointer used to return the contents of the registers.
1373 *
1374 * This implements ethtool command for getting the Axi Ethernet register dump.
1375 * Issue "ethtool -d ethX" to execute this function.
1376 */
axienet_ethtools_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * ret)1377 static void axienet_ethtools_get_regs(struct net_device *ndev,
1378 struct ethtool_regs *regs, void *ret)
1379 {
1380 u32 *data = (u32 *)ret;
1381 size_t len = sizeof(u32) * AXIENET_REGS_N;
1382 struct axienet_local *lp = netdev_priv(ndev);
1383
1384 regs->version = 0;
1385 regs->len = len;
1386
1387 memset(data, 0, len);
1388 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1389 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1390 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1391 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1392 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1393 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1394 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1395 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1396 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1397 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1398 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1399 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1400 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1401 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1402 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1403 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1404 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1405 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1406 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1407 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1408 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1409 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1410 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1411 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1412 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1413 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1414 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1415 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1416 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1417 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1418 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1419 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1420 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1421 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1422 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1423 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1424 }
1425
1426 static void
axienet_ethtools_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1427 axienet_ethtools_get_ringparam(struct net_device *ndev,
1428 struct ethtool_ringparam *ering,
1429 struct kernel_ethtool_ringparam *kernel_ering,
1430 struct netlink_ext_ack *extack)
1431 {
1432 struct axienet_local *lp = netdev_priv(ndev);
1433
1434 ering->rx_max_pending = RX_BD_NUM_MAX;
1435 ering->rx_mini_max_pending = 0;
1436 ering->rx_jumbo_max_pending = 0;
1437 ering->tx_max_pending = TX_BD_NUM_MAX;
1438 ering->rx_pending = lp->rx_bd_num;
1439 ering->rx_mini_pending = 0;
1440 ering->rx_jumbo_pending = 0;
1441 ering->tx_pending = lp->tx_bd_num;
1442 }
1443
1444 static int
axienet_ethtools_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1445 axienet_ethtools_set_ringparam(struct net_device *ndev,
1446 struct ethtool_ringparam *ering,
1447 struct kernel_ethtool_ringparam *kernel_ering,
1448 struct netlink_ext_ack *extack)
1449 {
1450 struct axienet_local *lp = netdev_priv(ndev);
1451
1452 if (ering->rx_pending > RX_BD_NUM_MAX ||
1453 ering->rx_mini_pending ||
1454 ering->rx_jumbo_pending ||
1455 ering->tx_pending < TX_BD_NUM_MIN ||
1456 ering->tx_pending > TX_BD_NUM_MAX)
1457 return -EINVAL;
1458
1459 if (netif_running(ndev))
1460 return -EBUSY;
1461
1462 lp->rx_bd_num = ering->rx_pending;
1463 lp->tx_bd_num = ering->tx_pending;
1464 return 0;
1465 }
1466
1467 /**
1468 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1469 * Tx and Rx paths.
1470 * @ndev: Pointer to net_device structure
1471 * @epauseparm: Pointer to ethtool_pauseparam structure.
1472 *
1473 * This implements ethtool command for getting axi ethernet pause frame
1474 * setting. Issue "ethtool -a ethX" to execute this function.
1475 */
1476 static void
axienet_ethtools_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)1477 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1478 struct ethtool_pauseparam *epauseparm)
1479 {
1480 struct axienet_local *lp = netdev_priv(ndev);
1481
1482 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1483 }
1484
1485 /**
1486 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1487 * settings.
1488 * @ndev: Pointer to net_device structure
1489 * @epauseparm:Pointer to ethtool_pauseparam structure
1490 *
1491 * This implements ethtool command for enabling flow control on Rx and Tx
1492 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1493 * function.
1494 *
1495 * Return: 0 on success, -EFAULT if device is running
1496 */
1497 static int
axienet_ethtools_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)1498 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1499 struct ethtool_pauseparam *epauseparm)
1500 {
1501 struct axienet_local *lp = netdev_priv(ndev);
1502
1503 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1504 }
1505
1506 /**
1507 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1508 * @ndev: Pointer to net_device structure
1509 * @ecoalesce: Pointer to ethtool_coalesce structure
1510 * @kernel_coal: ethtool CQE mode setting structure
1511 * @extack: extack for reporting error messages
1512 *
1513 * This implements ethtool command for getting the DMA interrupt coalescing
1514 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1515 * execute this function.
1516 *
1517 * Return: 0 always
1518 */
1519 static int
axienet_ethtools_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1520 axienet_ethtools_get_coalesce(struct net_device *ndev,
1521 struct ethtool_coalesce *ecoalesce,
1522 struct kernel_ethtool_coalesce *kernel_coal,
1523 struct netlink_ext_ack *extack)
1524 {
1525 struct axienet_local *lp = netdev_priv(ndev);
1526
1527 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
1528 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
1529 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
1530 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
1531 return 0;
1532 }
1533
1534 /**
1535 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1536 * @ndev: Pointer to net_device structure
1537 * @ecoalesce: Pointer to ethtool_coalesce structure
1538 * @kernel_coal: ethtool CQE mode setting structure
1539 * @extack: extack for reporting error messages
1540 *
1541 * This implements ethtool command for setting the DMA interrupt coalescing
1542 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1543 * prompt to execute this function.
1544 *
1545 * Return: 0, on success, Non-zero error value on failure.
1546 */
1547 static int
axienet_ethtools_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1548 axienet_ethtools_set_coalesce(struct net_device *ndev,
1549 struct ethtool_coalesce *ecoalesce,
1550 struct kernel_ethtool_coalesce *kernel_coal,
1551 struct netlink_ext_ack *extack)
1552 {
1553 struct axienet_local *lp = netdev_priv(ndev);
1554
1555 if (netif_running(ndev)) {
1556 netdev_err(ndev,
1557 "Please stop netif before applying configuration\n");
1558 return -EFAULT;
1559 }
1560
1561 if (ecoalesce->rx_max_coalesced_frames)
1562 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1563 if (ecoalesce->rx_coalesce_usecs)
1564 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
1565 if (ecoalesce->tx_max_coalesced_frames)
1566 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1567 if (ecoalesce->tx_coalesce_usecs)
1568 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
1569
1570 return 0;
1571 }
1572
1573 static int
axienet_ethtools_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)1574 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
1575 struct ethtool_link_ksettings *cmd)
1576 {
1577 struct axienet_local *lp = netdev_priv(ndev);
1578
1579 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
1580 }
1581
1582 static int
axienet_ethtools_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)1583 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
1584 const struct ethtool_link_ksettings *cmd)
1585 {
1586 struct axienet_local *lp = netdev_priv(ndev);
1587
1588 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
1589 }
1590
axienet_ethtools_nway_reset(struct net_device * dev)1591 static int axienet_ethtools_nway_reset(struct net_device *dev)
1592 {
1593 struct axienet_local *lp = netdev_priv(dev);
1594
1595 return phylink_ethtool_nway_reset(lp->phylink);
1596 }
1597
1598 static const struct ethtool_ops axienet_ethtool_ops = {
1599 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
1600 ETHTOOL_COALESCE_USECS,
1601 .get_drvinfo = axienet_ethtools_get_drvinfo,
1602 .get_regs_len = axienet_ethtools_get_regs_len,
1603 .get_regs = axienet_ethtools_get_regs,
1604 .get_link = ethtool_op_get_link,
1605 .get_ringparam = axienet_ethtools_get_ringparam,
1606 .set_ringparam = axienet_ethtools_set_ringparam,
1607 .get_pauseparam = axienet_ethtools_get_pauseparam,
1608 .set_pauseparam = axienet_ethtools_set_pauseparam,
1609 .get_coalesce = axienet_ethtools_get_coalesce,
1610 .set_coalesce = axienet_ethtools_set_coalesce,
1611 .get_link_ksettings = axienet_ethtools_get_link_ksettings,
1612 .set_link_ksettings = axienet_ethtools_set_link_ksettings,
1613 .nway_reset = axienet_ethtools_nway_reset,
1614 };
1615
pcs_to_axienet_local(struct phylink_pcs * pcs)1616 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
1617 {
1618 return container_of(pcs, struct axienet_local, pcs);
1619 }
1620
axienet_pcs_get_state(struct phylink_pcs * pcs,struct phylink_link_state * state)1621 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
1622 struct phylink_link_state *state)
1623 {
1624 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1625
1626 phylink_mii_c22_pcs_get_state(pcs_phy, state);
1627 }
1628
axienet_pcs_an_restart(struct phylink_pcs * pcs)1629 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
1630 {
1631 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1632
1633 phylink_mii_c22_pcs_an_restart(pcs_phy);
1634 }
1635
axienet_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)1636 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
1637 phy_interface_t interface,
1638 const unsigned long *advertising,
1639 bool permit_pause_to_mac)
1640 {
1641 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1642 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
1643 struct axienet_local *lp = netdev_priv(ndev);
1644 int ret;
1645
1646 if (lp->switch_x_sgmii) {
1647 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
1648 interface == PHY_INTERFACE_MODE_SGMII ?
1649 XLNX_MII_STD_SELECT_SGMII : 0);
1650 if (ret < 0) {
1651 netdev_warn(ndev,
1652 "Failed to switch PHY interface: %d\n",
1653 ret);
1654 return ret;
1655 }
1656 }
1657
1658 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
1659 neg_mode);
1660 if (ret < 0)
1661 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
1662
1663 return ret;
1664 }
1665
1666 static const struct phylink_pcs_ops axienet_pcs_ops = {
1667 .pcs_get_state = axienet_pcs_get_state,
1668 .pcs_config = axienet_pcs_config,
1669 .pcs_an_restart = axienet_pcs_an_restart,
1670 };
1671
axienet_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)1672 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
1673 phy_interface_t interface)
1674 {
1675 struct net_device *ndev = to_net_dev(config->dev);
1676 struct axienet_local *lp = netdev_priv(ndev);
1677
1678 if (interface == PHY_INTERFACE_MODE_1000BASEX ||
1679 interface == PHY_INTERFACE_MODE_SGMII)
1680 return &lp->pcs;
1681
1682 return NULL;
1683 }
1684
axienet_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1685 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
1686 const struct phylink_link_state *state)
1687 {
1688 /* nothing meaningful to do */
1689 }
1690
axienet_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1691 static void axienet_mac_link_down(struct phylink_config *config,
1692 unsigned int mode,
1693 phy_interface_t interface)
1694 {
1695 /* nothing meaningful to do */
1696 }
1697
axienet_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1698 static void axienet_mac_link_up(struct phylink_config *config,
1699 struct phy_device *phy,
1700 unsigned int mode, phy_interface_t interface,
1701 int speed, int duplex,
1702 bool tx_pause, bool rx_pause)
1703 {
1704 struct net_device *ndev = to_net_dev(config->dev);
1705 struct axienet_local *lp = netdev_priv(ndev);
1706 u32 emmc_reg, fcc_reg;
1707
1708 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
1709 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
1710
1711 switch (speed) {
1712 case SPEED_1000:
1713 emmc_reg |= XAE_EMMC_LINKSPD_1000;
1714 break;
1715 case SPEED_100:
1716 emmc_reg |= XAE_EMMC_LINKSPD_100;
1717 break;
1718 case SPEED_10:
1719 emmc_reg |= XAE_EMMC_LINKSPD_10;
1720 break;
1721 default:
1722 dev_err(&ndev->dev,
1723 "Speed other than 10, 100 or 1Gbps is not supported\n");
1724 break;
1725 }
1726
1727 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
1728
1729 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
1730 if (tx_pause)
1731 fcc_reg |= XAE_FCC_FCTX_MASK;
1732 else
1733 fcc_reg &= ~XAE_FCC_FCTX_MASK;
1734 if (rx_pause)
1735 fcc_reg |= XAE_FCC_FCRX_MASK;
1736 else
1737 fcc_reg &= ~XAE_FCC_FCRX_MASK;
1738 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
1739 }
1740
1741 static const struct phylink_mac_ops axienet_phylink_ops = {
1742 .mac_select_pcs = axienet_mac_select_pcs,
1743 .mac_config = axienet_mac_config,
1744 .mac_link_down = axienet_mac_link_down,
1745 .mac_link_up = axienet_mac_link_up,
1746 };
1747
1748 /**
1749 * axienet_dma_err_handler - Work queue task for Axi DMA Error
1750 * @work: pointer to work_struct
1751 *
1752 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1753 * Tx/Rx BDs.
1754 */
axienet_dma_err_handler(struct work_struct * work)1755 static void axienet_dma_err_handler(struct work_struct *work)
1756 {
1757 u32 i;
1758 u32 axienet_status;
1759 struct axidma_bd *cur_p;
1760 struct axienet_local *lp = container_of(work, struct axienet_local,
1761 dma_err_task);
1762 struct net_device *ndev = lp->ndev;
1763
1764 napi_disable(&lp->napi_tx);
1765 napi_disable(&lp->napi_rx);
1766
1767 axienet_setoptions(ndev, lp->options &
1768 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1769
1770 axienet_dma_stop(lp);
1771
1772 for (i = 0; i < lp->tx_bd_num; i++) {
1773 cur_p = &lp->tx_bd_v[i];
1774 if (cur_p->cntrl) {
1775 dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
1776
1777 dma_unmap_single(lp->dev, addr,
1778 (cur_p->cntrl &
1779 XAXIDMA_BD_CTRL_LENGTH_MASK),
1780 DMA_TO_DEVICE);
1781 }
1782 if (cur_p->skb)
1783 dev_kfree_skb_irq(cur_p->skb);
1784 cur_p->phys = 0;
1785 cur_p->phys_msb = 0;
1786 cur_p->cntrl = 0;
1787 cur_p->status = 0;
1788 cur_p->app0 = 0;
1789 cur_p->app1 = 0;
1790 cur_p->app2 = 0;
1791 cur_p->app3 = 0;
1792 cur_p->app4 = 0;
1793 cur_p->skb = NULL;
1794 }
1795
1796 for (i = 0; i < lp->rx_bd_num; i++) {
1797 cur_p = &lp->rx_bd_v[i];
1798 cur_p->status = 0;
1799 cur_p->app0 = 0;
1800 cur_p->app1 = 0;
1801 cur_p->app2 = 0;
1802 cur_p->app3 = 0;
1803 cur_p->app4 = 0;
1804 }
1805
1806 lp->tx_bd_ci = 0;
1807 lp->tx_bd_tail = 0;
1808 lp->rx_bd_ci = 0;
1809
1810 axienet_dma_start(lp);
1811
1812 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1813 axienet_status &= ~XAE_RCW1_RX_MASK;
1814 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1815
1816 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1817 if (axienet_status & XAE_INT_RXRJECT_MASK)
1818 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1819 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
1820 XAE_INT_RECV_ERROR_MASK : 0);
1821 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1822
1823 /* Sync default options with HW but leave receiver and
1824 * transmitter disabled.
1825 */
1826 axienet_setoptions(ndev, lp->options &
1827 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1828 axienet_set_mac_address(ndev, NULL);
1829 axienet_set_multicast_list(ndev);
1830 napi_enable(&lp->napi_rx);
1831 napi_enable(&lp->napi_tx);
1832 axienet_setoptions(ndev, lp->options);
1833 }
1834
1835 /**
1836 * axienet_probe - Axi Ethernet probe function.
1837 * @pdev: Pointer to platform device structure.
1838 *
1839 * Return: 0, on success
1840 * Non-zero error value on failure.
1841 *
1842 * This is the probe routine for Axi Ethernet driver. This is called before
1843 * any other driver routines are invoked. It allocates and sets up the Ethernet
1844 * device. Parses through device tree and populates fields of
1845 * axienet_local. It registers the Ethernet device.
1846 */
axienet_probe(struct platform_device * pdev)1847 static int axienet_probe(struct platform_device *pdev)
1848 {
1849 int ret;
1850 struct device_node *np;
1851 struct axienet_local *lp;
1852 struct net_device *ndev;
1853 struct resource *ethres;
1854 u8 mac_addr[ETH_ALEN];
1855 int addr_width = 32;
1856 u32 value;
1857
1858 ndev = alloc_etherdev(sizeof(*lp));
1859 if (!ndev)
1860 return -ENOMEM;
1861
1862 platform_set_drvdata(pdev, ndev);
1863
1864 SET_NETDEV_DEV(ndev, &pdev->dev);
1865 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1866 ndev->features = NETIF_F_SG;
1867 ndev->netdev_ops = &axienet_netdev_ops;
1868 ndev->ethtool_ops = &axienet_ethtool_ops;
1869
1870 /* MTU range: 64 - 9000 */
1871 ndev->min_mtu = 64;
1872 ndev->max_mtu = XAE_JUMBO_MTU;
1873
1874 lp = netdev_priv(ndev);
1875 lp->ndev = ndev;
1876 lp->dev = &pdev->dev;
1877 lp->options = XAE_OPTION_DEFAULTS;
1878 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1879 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1880
1881 u64_stats_init(&lp->rx_stat_sync);
1882 u64_stats_init(&lp->tx_stat_sync);
1883
1884 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
1885 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
1886
1887 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
1888 if (!lp->axi_clk) {
1889 /* For backward compatibility, if named AXI clock is not present,
1890 * treat the first clock specified as the AXI clock.
1891 */
1892 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
1893 }
1894 if (IS_ERR(lp->axi_clk)) {
1895 ret = PTR_ERR(lp->axi_clk);
1896 goto free_netdev;
1897 }
1898 ret = clk_prepare_enable(lp->axi_clk);
1899 if (ret) {
1900 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
1901 goto free_netdev;
1902 }
1903
1904 lp->misc_clks[0].id = "axis_clk";
1905 lp->misc_clks[1].id = "ref_clk";
1906 lp->misc_clks[2].id = "mgt_clk";
1907
1908 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
1909 if (ret)
1910 goto cleanup_clk;
1911
1912 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
1913 if (ret)
1914 goto cleanup_clk;
1915
1916 /* Map device registers */
1917 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres);
1918 if (IS_ERR(lp->regs)) {
1919 ret = PTR_ERR(lp->regs);
1920 goto cleanup_clk;
1921 }
1922 lp->regs_start = ethres->start;
1923
1924 /* Setup checksum offload, but default to off if not specified */
1925 lp->features = 0;
1926
1927 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
1928 if (!ret) {
1929 switch (value) {
1930 case 1:
1931 lp->csum_offload_on_tx_path =
1932 XAE_FEATURE_PARTIAL_TX_CSUM;
1933 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1934 /* Can checksum TCP/UDP over IPv4. */
1935 ndev->features |= NETIF_F_IP_CSUM;
1936 break;
1937 case 2:
1938 lp->csum_offload_on_tx_path =
1939 XAE_FEATURE_FULL_TX_CSUM;
1940 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1941 /* Can checksum TCP/UDP over IPv4. */
1942 ndev->features |= NETIF_F_IP_CSUM;
1943 break;
1944 default:
1945 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1946 }
1947 }
1948 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
1949 if (!ret) {
1950 switch (value) {
1951 case 1:
1952 lp->csum_offload_on_rx_path =
1953 XAE_FEATURE_PARTIAL_RX_CSUM;
1954 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
1955 break;
1956 case 2:
1957 lp->csum_offload_on_rx_path =
1958 XAE_FEATURE_FULL_RX_CSUM;
1959 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
1960 break;
1961 default:
1962 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1963 }
1964 }
1965 /* For supporting jumbo frames, the Axi Ethernet hardware must have
1966 * a larger Rx/Tx Memory. Typically, the size must be large so that
1967 * we can enable jumbo option and start supporting jumbo frames.
1968 * Here we check for memory allocated for Rx/Tx in the hardware from
1969 * the device-tree and accordingly set flags.
1970 */
1971 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
1972
1973 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
1974 "xlnx,switch-x-sgmii");
1975
1976 /* Start with the proprietary, and broken phy_type */
1977 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
1978 if (!ret) {
1979 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
1980 switch (value) {
1981 case XAE_PHY_TYPE_MII:
1982 lp->phy_mode = PHY_INTERFACE_MODE_MII;
1983 break;
1984 case XAE_PHY_TYPE_GMII:
1985 lp->phy_mode = PHY_INTERFACE_MODE_GMII;
1986 break;
1987 case XAE_PHY_TYPE_RGMII_2_0:
1988 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
1989 break;
1990 case XAE_PHY_TYPE_SGMII:
1991 lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
1992 break;
1993 case XAE_PHY_TYPE_1000BASE_X:
1994 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
1995 break;
1996 default:
1997 ret = -EINVAL;
1998 goto cleanup_clk;
1999 }
2000 } else {
2001 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2002 if (ret)
2003 goto cleanup_clk;
2004 }
2005 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2006 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2007 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2008 ret = -EINVAL;
2009 goto cleanup_clk;
2010 }
2011
2012 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2013 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2014 if (np) {
2015 struct resource dmares;
2016
2017 ret = of_address_to_resource(np, 0, &dmares);
2018 if (ret) {
2019 dev_err(&pdev->dev,
2020 "unable to get DMA resource\n");
2021 of_node_put(np);
2022 goto cleanup_clk;
2023 }
2024 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2025 &dmares);
2026 lp->rx_irq = irq_of_parse_and_map(np, 1);
2027 lp->tx_irq = irq_of_parse_and_map(np, 0);
2028 of_node_put(np);
2029 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2030 } else {
2031 /* Check for these resources directly on the Ethernet node. */
2032 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2033 lp->rx_irq = platform_get_irq(pdev, 1);
2034 lp->tx_irq = platform_get_irq(pdev, 0);
2035 lp->eth_irq = platform_get_irq_optional(pdev, 2);
2036 }
2037 if (IS_ERR(lp->dma_regs)) {
2038 dev_err(&pdev->dev, "could not map DMA regs\n");
2039 ret = PTR_ERR(lp->dma_regs);
2040 goto cleanup_clk;
2041 }
2042 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
2043 dev_err(&pdev->dev, "could not determine irqs\n");
2044 ret = -ENOMEM;
2045 goto cleanup_clk;
2046 }
2047
2048 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2049 ret = __axienet_device_reset(lp);
2050 if (ret)
2051 goto cleanup_clk;
2052
2053 /* Autodetect the need for 64-bit DMA pointers.
2054 * When the IP is configured for a bus width bigger than 32 bits,
2055 * writing the MSB registers is mandatory, even if they are all 0.
2056 * We can detect this case by writing all 1's to one such register
2057 * and see if that sticks: when the IP is configured for 32 bits
2058 * only, those registers are RES0.
2059 * Those MSB registers were introduced in IP v7.1, which we check first.
2060 */
2061 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2062 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2063
2064 iowrite32(0x0, desc);
2065 if (ioread32(desc) == 0) { /* sanity check */
2066 iowrite32(0xffffffff, desc);
2067 if (ioread32(desc) > 0) {
2068 lp->features |= XAE_FEATURE_DMA_64BIT;
2069 addr_width = 64;
2070 dev_info(&pdev->dev,
2071 "autodetected 64-bit DMA range\n");
2072 }
2073 iowrite32(0x0, desc);
2074 }
2075 }
2076 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2077 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2078 ret = -EINVAL;
2079 goto cleanup_clk;
2080 }
2081
2082 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2083 if (ret) {
2084 dev_err(&pdev->dev, "No suitable DMA available\n");
2085 goto cleanup_clk;
2086 }
2087
2088 /* Check for Ethernet core IRQ (optional) */
2089 if (lp->eth_irq <= 0)
2090 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2091
2092 /* Retrieve the MAC address */
2093 ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2094 if (!ret) {
2095 axienet_set_mac_address(ndev, mac_addr);
2096 } else {
2097 dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2098 ret);
2099 axienet_set_mac_address(ndev, NULL);
2100 }
2101
2102 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2103 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2104 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2105 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2106
2107 ret = axienet_mdio_setup(lp);
2108 if (ret)
2109 dev_warn(&pdev->dev,
2110 "error registering MDIO bus: %d\n", ret);
2111
2112 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2113 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2114 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2115 if (!np) {
2116 /* Deprecated: Always use "pcs-handle" for pcs_phy.
2117 * Falling back to "phy-handle" here is only for
2118 * backward compatibility with old device trees.
2119 */
2120 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2121 }
2122 if (!np) {
2123 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2124 ret = -EINVAL;
2125 goto cleanup_mdio;
2126 }
2127 lp->pcs_phy = of_mdio_find_device(np);
2128 if (!lp->pcs_phy) {
2129 ret = -EPROBE_DEFER;
2130 of_node_put(np);
2131 goto cleanup_mdio;
2132 }
2133 of_node_put(np);
2134 lp->pcs.ops = &axienet_pcs_ops;
2135 lp->pcs.neg_mode = true;
2136 lp->pcs.poll = true;
2137 }
2138
2139 lp->phylink_config.dev = &ndev->dev;
2140 lp->phylink_config.type = PHYLINK_NETDEV;
2141 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2142 MAC_10FD | MAC_100FD | MAC_1000FD;
2143
2144 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2145 if (lp->switch_x_sgmii) {
2146 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
2147 lp->phylink_config.supported_interfaces);
2148 __set_bit(PHY_INTERFACE_MODE_SGMII,
2149 lp->phylink_config.supported_interfaces);
2150 }
2151
2152 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2153 lp->phy_mode,
2154 &axienet_phylink_ops);
2155 if (IS_ERR(lp->phylink)) {
2156 ret = PTR_ERR(lp->phylink);
2157 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2158 goto cleanup_mdio;
2159 }
2160
2161 ret = register_netdev(lp->ndev);
2162 if (ret) {
2163 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2164 goto cleanup_phylink;
2165 }
2166
2167 return 0;
2168
2169 cleanup_phylink:
2170 phylink_destroy(lp->phylink);
2171
2172 cleanup_mdio:
2173 if (lp->pcs_phy)
2174 put_device(&lp->pcs_phy->dev);
2175 if (lp->mii_bus)
2176 axienet_mdio_teardown(lp);
2177 cleanup_clk:
2178 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2179 clk_disable_unprepare(lp->axi_clk);
2180
2181 free_netdev:
2182 free_netdev(ndev);
2183
2184 return ret;
2185 }
2186
axienet_remove(struct platform_device * pdev)2187 static int axienet_remove(struct platform_device *pdev)
2188 {
2189 struct net_device *ndev = platform_get_drvdata(pdev);
2190 struct axienet_local *lp = netdev_priv(ndev);
2191
2192 unregister_netdev(ndev);
2193
2194 if (lp->phylink)
2195 phylink_destroy(lp->phylink);
2196
2197 if (lp->pcs_phy)
2198 put_device(&lp->pcs_phy->dev);
2199
2200 axienet_mdio_teardown(lp);
2201
2202 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2203 clk_disable_unprepare(lp->axi_clk);
2204
2205 free_netdev(ndev);
2206
2207 return 0;
2208 }
2209
axienet_shutdown(struct platform_device * pdev)2210 static void axienet_shutdown(struct platform_device *pdev)
2211 {
2212 struct net_device *ndev = platform_get_drvdata(pdev);
2213
2214 rtnl_lock();
2215 netif_device_detach(ndev);
2216
2217 if (netif_running(ndev))
2218 dev_close(ndev);
2219
2220 rtnl_unlock();
2221 }
2222
axienet_suspend(struct device * dev)2223 static int axienet_suspend(struct device *dev)
2224 {
2225 struct net_device *ndev = dev_get_drvdata(dev);
2226
2227 if (!netif_running(ndev))
2228 return 0;
2229
2230 netif_device_detach(ndev);
2231
2232 rtnl_lock();
2233 axienet_stop(ndev);
2234 rtnl_unlock();
2235
2236 return 0;
2237 }
2238
axienet_resume(struct device * dev)2239 static int axienet_resume(struct device *dev)
2240 {
2241 struct net_device *ndev = dev_get_drvdata(dev);
2242
2243 if (!netif_running(ndev))
2244 return 0;
2245
2246 rtnl_lock();
2247 axienet_open(ndev);
2248 rtnl_unlock();
2249
2250 netif_device_attach(ndev);
2251
2252 return 0;
2253 }
2254
2255 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
2256 axienet_suspend, axienet_resume);
2257
2258 static struct platform_driver axienet_driver = {
2259 .probe = axienet_probe,
2260 .remove = axienet_remove,
2261 .shutdown = axienet_shutdown,
2262 .driver = {
2263 .name = "xilinx_axienet",
2264 .pm = &axienet_pm_ops,
2265 .of_match_table = axienet_of_match,
2266 },
2267 };
2268
2269 module_platform_driver(axienet_driver);
2270
2271 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
2272 MODULE_AUTHOR("Xilinx");
2273 MODULE_LICENSE("GPL");
2274