1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Xilinx Axi Ethernet device driver
4 *
5 * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
12 *
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
14 * and Spartan6.
15 *
16 * TODO:
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
23 */
24
25 #include <linux/clk.h>
26 #include <linux/delay.h>
27 #include <linux/etherdevice.h>
28 #include <linux/module.h>
29 #include <linux/netdevice.h>
30 #include <linux/of.h>
31 #include <linux/of_mdio.h>
32 #include <linux/of_net.h>
33 #include <linux/of_irq.h>
34 #include <linux/of_address.h>
35 #include <linux/platform_device.h>
36 #include <linux/skbuff.h>
37 #include <linux/math64.h>
38 #include <linux/phy.h>
39 #include <linux/mii.h>
40 #include <linux/ethtool.h>
41
42 #include "xilinx_axienet.h"
43
44 /* Descriptors defines for Tx and Rx DMA */
45 #define TX_BD_NUM_DEFAULT 128
46 #define RX_BD_NUM_DEFAULT 1024
47 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
48 #define TX_BD_NUM_MAX 4096
49 #define RX_BD_NUM_MAX 4096
50
51 /* Must be shorter than length of ethtool_drvinfo.driver field to fit */
52 #define DRIVER_NAME "xaxienet"
53 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
54 #define DRIVER_VERSION "1.00a"
55
56 #define AXIENET_REGS_N 40
57
58 /* Match table for of_platform binding */
59 static const struct of_device_id axienet_of_match[] = {
60 { .compatible = "xlnx,axi-ethernet-1.00.a", },
61 { .compatible = "xlnx,axi-ethernet-1.01.a", },
62 { .compatible = "xlnx,axi-ethernet-2.01.a", },
63 {},
64 };
65
66 MODULE_DEVICE_TABLE(of, axienet_of_match);
67
68 /* Option table for setting up Axi Ethernet hardware options */
69 static struct axienet_option axienet_options[] = {
70 /* Turn on jumbo packet support for both Rx and Tx */
71 {
72 .opt = XAE_OPTION_JUMBO,
73 .reg = XAE_TC_OFFSET,
74 .m_or = XAE_TC_JUM_MASK,
75 }, {
76 .opt = XAE_OPTION_JUMBO,
77 .reg = XAE_RCW1_OFFSET,
78 .m_or = XAE_RCW1_JUM_MASK,
79 }, { /* Turn on VLAN packet support for both Rx and Tx */
80 .opt = XAE_OPTION_VLAN,
81 .reg = XAE_TC_OFFSET,
82 .m_or = XAE_TC_VLAN_MASK,
83 }, {
84 .opt = XAE_OPTION_VLAN,
85 .reg = XAE_RCW1_OFFSET,
86 .m_or = XAE_RCW1_VLAN_MASK,
87 }, { /* Turn on FCS stripping on receive packets */
88 .opt = XAE_OPTION_FCS_STRIP,
89 .reg = XAE_RCW1_OFFSET,
90 .m_or = XAE_RCW1_FCS_MASK,
91 }, { /* Turn on FCS insertion on transmit packets */
92 .opt = XAE_OPTION_FCS_INSERT,
93 .reg = XAE_TC_OFFSET,
94 .m_or = XAE_TC_FCS_MASK,
95 }, { /* Turn off length/type field checking on receive packets */
96 .opt = XAE_OPTION_LENTYPE_ERR,
97 .reg = XAE_RCW1_OFFSET,
98 .m_or = XAE_RCW1_LT_DIS_MASK,
99 }, { /* Turn on Rx flow control */
100 .opt = XAE_OPTION_FLOW_CONTROL,
101 .reg = XAE_FCC_OFFSET,
102 .m_or = XAE_FCC_FCRX_MASK,
103 }, { /* Turn on Tx flow control */
104 .opt = XAE_OPTION_FLOW_CONTROL,
105 .reg = XAE_FCC_OFFSET,
106 .m_or = XAE_FCC_FCTX_MASK,
107 }, { /* Turn on promiscuous frame filtering */
108 .opt = XAE_OPTION_PROMISC,
109 .reg = XAE_FMI_OFFSET,
110 .m_or = XAE_FMI_PM_MASK,
111 }, { /* Enable transmitter */
112 .opt = XAE_OPTION_TXEN,
113 .reg = XAE_TC_OFFSET,
114 .m_or = XAE_TC_TX_MASK,
115 }, { /* Enable receiver */
116 .opt = XAE_OPTION_RXEN,
117 .reg = XAE_RCW1_OFFSET,
118 .m_or = XAE_RCW1_RX_MASK,
119 },
120 {}
121 };
122
123 /**
124 * axienet_dma_in32 - Memory mapped Axi DMA register read
125 * @lp: Pointer to axienet local structure
126 * @reg: Address offset from the base address of the Axi DMA core
127 *
128 * Return: The contents of the Axi DMA register
129 *
130 * This function returns the contents of the corresponding Axi DMA register.
131 */
axienet_dma_in32(struct axienet_local * lp,off_t reg)132 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
133 {
134 return ioread32(lp->dma_regs + reg);
135 }
136
desc_set_phys_addr(struct axienet_local * lp,dma_addr_t addr,struct axidma_bd * desc)137 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
138 struct axidma_bd *desc)
139 {
140 desc->phys = lower_32_bits(addr);
141 if (lp->features & XAE_FEATURE_DMA_64BIT)
142 desc->phys_msb = upper_32_bits(addr);
143 }
144
desc_get_phys_addr(struct axienet_local * lp,struct axidma_bd * desc)145 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
146 struct axidma_bd *desc)
147 {
148 dma_addr_t ret = desc->phys;
149
150 if (lp->features & XAE_FEATURE_DMA_64BIT)
151 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
152
153 return ret;
154 }
155
156 /**
157 * axienet_dma_bd_release - Release buffer descriptor rings
158 * @ndev: Pointer to the net_device structure
159 *
160 * This function is used to release the descriptors allocated in
161 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
162 * driver stop api is called.
163 */
axienet_dma_bd_release(struct net_device * ndev)164 static void axienet_dma_bd_release(struct net_device *ndev)
165 {
166 int i;
167 struct axienet_local *lp = netdev_priv(ndev);
168
169 /* If we end up here, tx_bd_v must have been DMA allocated. */
170 dma_free_coherent(lp->dev,
171 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
172 lp->tx_bd_v,
173 lp->tx_bd_p);
174
175 if (!lp->rx_bd_v)
176 return;
177
178 for (i = 0; i < lp->rx_bd_num; i++) {
179 dma_addr_t phys;
180
181 /* A NULL skb means this descriptor has not been initialised
182 * at all.
183 */
184 if (!lp->rx_bd_v[i].skb)
185 break;
186
187 dev_kfree_skb(lp->rx_bd_v[i].skb);
188
189 /* For each descriptor, we programmed cntrl with the (non-zero)
190 * descriptor size, after it had been successfully allocated.
191 * So a non-zero value in there means we need to unmap it.
192 */
193 if (lp->rx_bd_v[i].cntrl) {
194 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
195 dma_unmap_single(lp->dev, phys,
196 lp->max_frm_size, DMA_FROM_DEVICE);
197 }
198 }
199
200 dma_free_coherent(lp->dev,
201 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
202 lp->rx_bd_v,
203 lp->rx_bd_p);
204 }
205
206 /**
207 * axienet_usec_to_timer - Calculate IRQ delay timer value
208 * @lp: Pointer to the axienet_local structure
209 * @coalesce_usec: Microseconds to convert into timer value
210 */
axienet_usec_to_timer(struct axienet_local * lp,u32 coalesce_usec)211 static u32 axienet_usec_to_timer(struct axienet_local *lp, u32 coalesce_usec)
212 {
213 u32 result;
214 u64 clk_rate = 125000000; /* arbitrary guess if no clock rate set */
215
216 if (lp->axi_clk)
217 clk_rate = clk_get_rate(lp->axi_clk);
218
219 /* 1 Timeout Interval = 125 * (clock period of SG clock) */
220 result = DIV64_U64_ROUND_CLOSEST((u64)coalesce_usec * clk_rate,
221 (u64)125000000);
222 if (result > 255)
223 result = 255;
224
225 return result;
226 }
227
228 /**
229 * axienet_dma_start - Set up DMA registers and start DMA operation
230 * @lp: Pointer to the axienet_local structure
231 */
axienet_dma_start(struct axienet_local * lp)232 static void axienet_dma_start(struct axienet_local *lp)
233 {
234 /* Start updating the Rx channel control register */
235 lp->rx_dma_cr = (lp->coalesce_count_rx << XAXIDMA_COALESCE_SHIFT) |
236 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
237 /* Only set interrupt delay timer if not generating an interrupt on
238 * the first RX packet. Otherwise leave at 0 to disable delay interrupt.
239 */
240 if (lp->coalesce_count_rx > 1)
241 lp->rx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_rx)
242 << XAXIDMA_DELAY_SHIFT) |
243 XAXIDMA_IRQ_DELAY_MASK;
244 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
245
246 /* Start updating the Tx channel control register */
247 lp->tx_dma_cr = (lp->coalesce_count_tx << XAXIDMA_COALESCE_SHIFT) |
248 XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_ERROR_MASK;
249 /* Only set interrupt delay timer if not generating an interrupt on
250 * the first TX packet. Otherwise leave at 0 to disable delay interrupt.
251 */
252 if (lp->coalesce_count_tx > 1)
253 lp->tx_dma_cr |= (axienet_usec_to_timer(lp, lp->coalesce_usec_tx)
254 << XAXIDMA_DELAY_SHIFT) |
255 XAXIDMA_IRQ_DELAY_MASK;
256 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
257
258 /* Populate the tail pointer and bring the Rx Axi DMA engine out of
259 * halted state. This will make the Rx side ready for reception.
260 */
261 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
262 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
263 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
264 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
265 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
266
267 /* Write to the RS (Run-stop) bit in the Tx channel control register.
268 * Tx channel is now ready to run. But only after we write to the
269 * tail pointer register that the Tx channel will start transmitting.
270 */
271 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
272 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
273 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
274 }
275
276 /**
277 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
278 * @ndev: Pointer to the net_device structure
279 *
280 * Return: 0, on success -ENOMEM, on failure
281 *
282 * This function is called to initialize the Rx and Tx DMA descriptor
283 * rings. This initializes the descriptors with required default values
284 * and is called when Axi Ethernet driver reset is called.
285 */
axienet_dma_bd_init(struct net_device * ndev)286 static int axienet_dma_bd_init(struct net_device *ndev)
287 {
288 int i;
289 struct sk_buff *skb;
290 struct axienet_local *lp = netdev_priv(ndev);
291
292 /* Reset the indexes which are used for accessing the BDs */
293 lp->tx_bd_ci = 0;
294 lp->tx_bd_tail = 0;
295 lp->rx_bd_ci = 0;
296
297 /* Allocate the Tx and Rx buffer descriptors. */
298 lp->tx_bd_v = dma_alloc_coherent(lp->dev,
299 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
300 &lp->tx_bd_p, GFP_KERNEL);
301 if (!lp->tx_bd_v)
302 return -ENOMEM;
303
304 lp->rx_bd_v = dma_alloc_coherent(lp->dev,
305 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
306 &lp->rx_bd_p, GFP_KERNEL);
307 if (!lp->rx_bd_v)
308 goto out;
309
310 for (i = 0; i < lp->tx_bd_num; i++) {
311 dma_addr_t addr = lp->tx_bd_p +
312 sizeof(*lp->tx_bd_v) *
313 ((i + 1) % lp->tx_bd_num);
314
315 lp->tx_bd_v[i].next = lower_32_bits(addr);
316 if (lp->features & XAE_FEATURE_DMA_64BIT)
317 lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
318 }
319
320 for (i = 0; i < lp->rx_bd_num; i++) {
321 dma_addr_t addr;
322
323 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
324 ((i + 1) % lp->rx_bd_num);
325 lp->rx_bd_v[i].next = lower_32_bits(addr);
326 if (lp->features & XAE_FEATURE_DMA_64BIT)
327 lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
328
329 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
330 if (!skb)
331 goto out;
332
333 lp->rx_bd_v[i].skb = skb;
334 addr = dma_map_single(lp->dev, skb->data,
335 lp->max_frm_size, DMA_FROM_DEVICE);
336 if (dma_mapping_error(lp->dev, addr)) {
337 netdev_err(ndev, "DMA mapping error\n");
338 goto out;
339 }
340 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
341
342 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
343 }
344
345 axienet_dma_start(lp);
346
347 return 0;
348 out:
349 axienet_dma_bd_release(ndev);
350 return -ENOMEM;
351 }
352
353 /**
354 * axienet_set_mac_address - Write the MAC address
355 * @ndev: Pointer to the net_device structure
356 * @address: 6 byte Address to be written as MAC address
357 *
358 * This function is called to initialize the MAC address of the Axi Ethernet
359 * core. It writes to the UAW0 and UAW1 registers of the core.
360 */
axienet_set_mac_address(struct net_device * ndev,const void * address)361 static void axienet_set_mac_address(struct net_device *ndev,
362 const void *address)
363 {
364 struct axienet_local *lp = netdev_priv(ndev);
365
366 if (address)
367 eth_hw_addr_set(ndev, address);
368 if (!is_valid_ether_addr(ndev->dev_addr))
369 eth_hw_addr_random(ndev);
370
371 /* Set up unicast MAC address filter set its mac address */
372 axienet_iow(lp, XAE_UAW0_OFFSET,
373 (ndev->dev_addr[0]) |
374 (ndev->dev_addr[1] << 8) |
375 (ndev->dev_addr[2] << 16) |
376 (ndev->dev_addr[3] << 24));
377 axienet_iow(lp, XAE_UAW1_OFFSET,
378 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
379 ~XAE_UAW1_UNICASTADDR_MASK) |
380 (ndev->dev_addr[4] |
381 (ndev->dev_addr[5] << 8))));
382 }
383
384 /**
385 * netdev_set_mac_address - Write the MAC address (from outside the driver)
386 * @ndev: Pointer to the net_device structure
387 * @p: 6 byte Address to be written as MAC address
388 *
389 * Return: 0 for all conditions. Presently, there is no failure case.
390 *
391 * This function is called to initialize the MAC address of the Axi Ethernet
392 * core. It calls the core specific axienet_set_mac_address. This is the
393 * function that goes into net_device_ops structure entry ndo_set_mac_address.
394 */
netdev_set_mac_address(struct net_device * ndev,void * p)395 static int netdev_set_mac_address(struct net_device *ndev, void *p)
396 {
397 struct sockaddr *addr = p;
398 axienet_set_mac_address(ndev, addr->sa_data);
399 return 0;
400 }
401
402 /**
403 * axienet_set_multicast_list - Prepare the multicast table
404 * @ndev: Pointer to the net_device structure
405 *
406 * This function is called to initialize the multicast table during
407 * initialization. The Axi Ethernet basic multicast support has a four-entry
408 * multicast table which is initialized here. Additionally this function
409 * goes into the net_device_ops structure entry ndo_set_multicast_list. This
410 * means whenever the multicast table entries need to be updated this
411 * function gets called.
412 */
axienet_set_multicast_list(struct net_device * ndev)413 static void axienet_set_multicast_list(struct net_device *ndev)
414 {
415 int i = 0;
416 u32 reg, af0reg, af1reg;
417 struct axienet_local *lp = netdev_priv(ndev);
418
419 if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
420 netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) {
421 /* We must make the kernel realize we had to move into
422 * promiscuous mode. If it was a promiscuous mode request
423 * the flag is already set. If not we set it.
424 */
425 ndev->flags |= IFF_PROMISC;
426 reg = axienet_ior(lp, XAE_FMI_OFFSET);
427 reg |= XAE_FMI_PM_MASK;
428 axienet_iow(lp, XAE_FMI_OFFSET, reg);
429 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
430 } else if (!netdev_mc_empty(ndev)) {
431 struct netdev_hw_addr *ha;
432
433 reg = axienet_ior(lp, XAE_FMI_OFFSET);
434 reg &= ~XAE_FMI_PM_MASK;
435 axienet_iow(lp, XAE_FMI_OFFSET, reg);
436
437 netdev_for_each_mc_addr(ha, ndev) {
438 if (i >= XAE_MULTICAST_CAM_TABLE_NUM)
439 break;
440
441 af0reg = (ha->addr[0]);
442 af0reg |= (ha->addr[1] << 8);
443 af0reg |= (ha->addr[2] << 16);
444 af0reg |= (ha->addr[3] << 24);
445
446 af1reg = (ha->addr[4]);
447 af1reg |= (ha->addr[5] << 8);
448
449 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
450 reg |= i;
451
452 axienet_iow(lp, XAE_FMI_OFFSET, reg);
453 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
454 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
455 axienet_iow(lp, XAE_FFE_OFFSET, 1);
456 i++;
457 }
458 } else {
459 reg = axienet_ior(lp, XAE_FMI_OFFSET);
460 reg &= ~XAE_FMI_PM_MASK;
461
462 axienet_iow(lp, XAE_FMI_OFFSET, reg);
463 dev_info(&ndev->dev, "Promiscuous mode disabled.\n");
464 }
465
466 for (; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) {
467 reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00;
468 reg |= i;
469 axienet_iow(lp, XAE_FMI_OFFSET, reg);
470 axienet_iow(lp, XAE_FFE_OFFSET, 0);
471 }
472 }
473
474 /**
475 * axienet_setoptions - Set an Axi Ethernet option
476 * @ndev: Pointer to the net_device structure
477 * @options: Option to be enabled/disabled
478 *
479 * The Axi Ethernet core has multiple features which can be selectively turned
480 * on or off. The typical options could be jumbo frame option, basic VLAN
481 * option, promiscuous mode option etc. This function is used to set or clear
482 * these options in the Axi Ethernet hardware. This is done through
483 * axienet_option structure .
484 */
axienet_setoptions(struct net_device * ndev,u32 options)485 static void axienet_setoptions(struct net_device *ndev, u32 options)
486 {
487 int reg;
488 struct axienet_local *lp = netdev_priv(ndev);
489 struct axienet_option *tp = &axienet_options[0];
490
491 while (tp->opt) {
492 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
493 if (options & tp->opt)
494 reg |= tp->m_or;
495 axienet_iow(lp, tp->reg, reg);
496 tp++;
497 }
498
499 lp->options |= options;
500 }
501
__axienet_device_reset(struct axienet_local * lp)502 static int __axienet_device_reset(struct axienet_local *lp)
503 {
504 u32 value;
505 int ret;
506
507 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
508 * process of Axi DMA takes a while to complete as all pending
509 * commands/transfers will be flushed or completed during this
510 * reset process.
511 * Note that even though both TX and RX have their own reset register,
512 * they both reset the entire DMA core, so only one needs to be used.
513 */
514 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
515 ret = read_poll_timeout(axienet_dma_in32, value,
516 !(value & XAXIDMA_CR_RESET_MASK),
517 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
518 XAXIDMA_TX_CR_OFFSET);
519 if (ret) {
520 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
521 return ret;
522 }
523
524 /* Wait for PhyRstCmplt bit to be set, indicating the PHY reset has finished */
525 ret = read_poll_timeout(axienet_ior, value,
526 value & XAE_INT_PHYRSTCMPLT_MASK,
527 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
528 XAE_IS_OFFSET);
529 if (ret) {
530 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
531 return ret;
532 }
533
534 return 0;
535 }
536
537 /**
538 * axienet_dma_stop - Stop DMA operation
539 * @lp: Pointer to the axienet_local structure
540 */
axienet_dma_stop(struct axienet_local * lp)541 static void axienet_dma_stop(struct axienet_local *lp)
542 {
543 int count;
544 u32 cr, sr;
545
546 cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
547 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
548 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
549 synchronize_irq(lp->rx_irq);
550
551 cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
552 cr &= ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
553 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
554 synchronize_irq(lp->tx_irq);
555
556 /* Give DMAs a chance to halt gracefully */
557 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
558 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
559 msleep(20);
560 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
561 }
562
563 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
564 for (count = 0; !(sr & XAXIDMA_SR_HALT_MASK) && count < 5; ++count) {
565 msleep(20);
566 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
567 }
568
569 /* Do a reset to ensure DMA is really stopped */
570 axienet_lock_mii(lp);
571 __axienet_device_reset(lp);
572 axienet_unlock_mii(lp);
573 }
574
575 /**
576 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
577 * @ndev: Pointer to the net_device structure
578 *
579 * This function is called to reset and initialize the Axi Ethernet core. This
580 * is typically called during initialization. It does a reset of the Axi DMA
581 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
582 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
583 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
584 * core.
585 * Returns 0 on success or a negative error number otherwise.
586 */
axienet_device_reset(struct net_device * ndev)587 static int axienet_device_reset(struct net_device *ndev)
588 {
589 u32 axienet_status;
590 struct axienet_local *lp = netdev_priv(ndev);
591 int ret;
592
593 ret = __axienet_device_reset(lp);
594 if (ret)
595 return ret;
596
597 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
598 lp->options |= XAE_OPTION_VLAN;
599 lp->options &= (~XAE_OPTION_JUMBO);
600
601 if ((ndev->mtu > XAE_MTU) &&
602 (ndev->mtu <= XAE_JUMBO_MTU)) {
603 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
604 XAE_TRL_SIZE;
605
606 if (lp->max_frm_size <= lp->rxmem)
607 lp->options |= XAE_OPTION_JUMBO;
608 }
609
610 ret = axienet_dma_bd_init(ndev);
611 if (ret) {
612 netdev_err(ndev, "%s: descriptor allocation failed\n",
613 __func__);
614 return ret;
615 }
616
617 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
618 axienet_status &= ~XAE_RCW1_RX_MASK;
619 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
620
621 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
622 if (axienet_status & XAE_INT_RXRJECT_MASK)
623 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
624 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
625 XAE_INT_RECV_ERROR_MASK : 0);
626
627 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
628
629 /* Sync default options with HW but leave receiver and
630 * transmitter disabled.
631 */
632 axienet_setoptions(ndev, lp->options &
633 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
634 axienet_set_mac_address(ndev, NULL);
635 axienet_set_multicast_list(ndev);
636 axienet_setoptions(ndev, lp->options);
637
638 netif_trans_update(ndev);
639
640 return 0;
641 }
642
643 /**
644 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
645 * @lp: Pointer to the axienet_local structure
646 * @first_bd: Index of first descriptor to clean up
647 * @nr_bds: Max number of descriptors to clean up
648 * @force: Whether to clean descriptors even if not complete
649 * @sizep: Pointer to a u32 filled with the total sum of all bytes
650 * in all cleaned-up descriptors. Ignored if NULL.
651 * @budget: NAPI budget (use 0 when not called from NAPI poll)
652 *
653 * Would either be called after a successful transmit operation, or after
654 * there was an error when setting up the chain.
655 * Returns the number of packets handled.
656 */
axienet_free_tx_chain(struct axienet_local * lp,u32 first_bd,int nr_bds,bool force,u32 * sizep,int budget)657 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
658 int nr_bds, bool force, u32 *sizep, int budget)
659 {
660 struct axidma_bd *cur_p;
661 unsigned int status;
662 int i, packets = 0;
663 dma_addr_t phys;
664
665 for (i = 0; i < nr_bds; i++) {
666 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
667 status = cur_p->status;
668
669 /* If force is not specified, clean up only descriptors
670 * that have been completed by the MAC.
671 */
672 if (!force && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
673 break;
674
675 /* Ensure we see complete descriptor update */
676 dma_rmb();
677 phys = desc_get_phys_addr(lp, cur_p);
678 dma_unmap_single(lp->dev, phys,
679 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
680 DMA_TO_DEVICE);
681
682 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
683 napi_consume_skb(cur_p->skb, budget);
684 packets++;
685 }
686
687 cur_p->app0 = 0;
688 cur_p->app1 = 0;
689 cur_p->app2 = 0;
690 cur_p->app4 = 0;
691 cur_p->skb = NULL;
692 /* ensure our transmit path and device don't prematurely see status cleared */
693 wmb();
694 cur_p->cntrl = 0;
695 cur_p->status = 0;
696
697 if (sizep)
698 *sizep += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
699 }
700
701 if (!force) {
702 lp->tx_bd_ci += i;
703 if (lp->tx_bd_ci >= lp->tx_bd_num)
704 lp->tx_bd_ci %= lp->tx_bd_num;
705 }
706
707 return packets;
708 }
709
710 /**
711 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
712 * @lp: Pointer to the axienet_local structure
713 * @num_frag: The number of BDs to check for
714 *
715 * Return: 0, on success
716 * NETDEV_TX_BUSY, if any of the descriptors are not free
717 *
718 * This function is invoked before BDs are allocated and transmission starts.
719 * This function returns 0 if a BD or group of BDs can be allocated for
720 * transmission. If the BD or any of the BDs are not free the function
721 * returns a busy status.
722 */
axienet_check_tx_bd_space(struct axienet_local * lp,int num_frag)723 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
724 int num_frag)
725 {
726 struct axidma_bd *cur_p;
727
728 /* Ensure we see all descriptor updates from device or TX polling */
729 rmb();
730 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
731 lp->tx_bd_num];
732 if (cur_p->cntrl)
733 return NETDEV_TX_BUSY;
734 return 0;
735 }
736
737 /**
738 * axienet_tx_poll - Invoked once a transmit is completed by the
739 * Axi DMA Tx channel.
740 * @napi: Pointer to NAPI structure.
741 * @budget: Max number of TX packets to process.
742 *
743 * Return: Number of TX packets processed.
744 *
745 * This function is invoked from the NAPI processing to notify the completion
746 * of transmit operation. It clears fields in the corresponding Tx BDs and
747 * unmaps the corresponding buffer so that CPU can regain ownership of the
748 * buffer. It finally invokes "netif_wake_queue" to restart transmission if
749 * required.
750 */
axienet_tx_poll(struct napi_struct * napi,int budget)751 static int axienet_tx_poll(struct napi_struct *napi, int budget)
752 {
753 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
754 struct net_device *ndev = lp->ndev;
755 u32 size = 0;
756 int packets;
757
758 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
759 &size, budget);
760
761 if (packets) {
762 u64_stats_update_begin(&lp->tx_stat_sync);
763 u64_stats_add(&lp->tx_packets, packets);
764 u64_stats_add(&lp->tx_bytes, size);
765 u64_stats_update_end(&lp->tx_stat_sync);
766
767 /* Matches barrier in axienet_start_xmit */
768 smp_mb();
769
770 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
771 netif_wake_queue(ndev);
772 }
773
774 if (packets < budget && napi_complete_done(napi, packets)) {
775 /* Re-enable TX completion interrupts. This should
776 * cause an immediate interrupt if any TX packets are
777 * already pending.
778 */
779 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
780 }
781 return packets;
782 }
783
784 /**
785 * axienet_start_xmit - Starts the transmission.
786 * @skb: sk_buff pointer that contains data to be Txed.
787 * @ndev: Pointer to net_device structure.
788 *
789 * Return: NETDEV_TX_OK, on success
790 * NETDEV_TX_BUSY, if any of the descriptors are not free
791 *
792 * This function is invoked from upper layers to initiate transmission. The
793 * function uses the next available free BDs and populates their fields to
794 * start the transmission. Additionally if checksum offloading is supported,
795 * it populates AXI Stream Control fields with appropriate values.
796 */
797 static netdev_tx_t
axienet_start_xmit(struct sk_buff * skb,struct net_device * ndev)798 axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
799 {
800 u32 ii;
801 u32 num_frag;
802 u32 csum_start_off;
803 u32 csum_index_off;
804 skb_frag_t *frag;
805 dma_addr_t tail_p, phys;
806 u32 orig_tail_ptr, new_tail_ptr;
807 struct axienet_local *lp = netdev_priv(ndev);
808 struct axidma_bd *cur_p;
809
810 orig_tail_ptr = lp->tx_bd_tail;
811 new_tail_ptr = orig_tail_ptr;
812
813 num_frag = skb_shinfo(skb)->nr_frags;
814 cur_p = &lp->tx_bd_v[orig_tail_ptr];
815
816 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
817 /* Should not happen as last start_xmit call should have
818 * checked for sufficient space and queue should only be
819 * woken when sufficient space is available.
820 */
821 netif_stop_queue(ndev);
822 if (net_ratelimit())
823 netdev_warn(ndev, "TX ring unexpectedly full\n");
824 return NETDEV_TX_BUSY;
825 }
826
827 if (skb->ip_summed == CHECKSUM_PARTIAL) {
828 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
829 /* Tx Full Checksum Offload Enabled */
830 cur_p->app0 |= 2;
831 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
832 csum_start_off = skb_transport_offset(skb);
833 csum_index_off = csum_start_off + skb->csum_offset;
834 /* Tx Partial Checksum Offload Enabled */
835 cur_p->app0 |= 1;
836 cur_p->app1 = (csum_start_off << 16) | csum_index_off;
837 }
838 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
839 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
840 }
841
842 phys = dma_map_single(lp->dev, skb->data,
843 skb_headlen(skb), DMA_TO_DEVICE);
844 if (unlikely(dma_mapping_error(lp->dev, phys))) {
845 if (net_ratelimit())
846 netdev_err(ndev, "TX DMA mapping error\n");
847 ndev->stats.tx_dropped++;
848 return NETDEV_TX_OK;
849 }
850 desc_set_phys_addr(lp, phys, cur_p);
851 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
852
853 for (ii = 0; ii < num_frag; ii++) {
854 if (++new_tail_ptr >= lp->tx_bd_num)
855 new_tail_ptr = 0;
856 cur_p = &lp->tx_bd_v[new_tail_ptr];
857 frag = &skb_shinfo(skb)->frags[ii];
858 phys = dma_map_single(lp->dev,
859 skb_frag_address(frag),
860 skb_frag_size(frag),
861 DMA_TO_DEVICE);
862 if (unlikely(dma_mapping_error(lp->dev, phys))) {
863 if (net_ratelimit())
864 netdev_err(ndev, "TX DMA mapping error\n");
865 ndev->stats.tx_dropped++;
866 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
867 true, NULL, 0);
868 return NETDEV_TX_OK;
869 }
870 desc_set_phys_addr(lp, phys, cur_p);
871 cur_p->cntrl = skb_frag_size(frag);
872 }
873
874 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK;
875 cur_p->skb = skb;
876
877 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
878 if (++new_tail_ptr >= lp->tx_bd_num)
879 new_tail_ptr = 0;
880 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
881
882 /* Start the transfer */
883 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
884
885 /* Stop queue if next transmit may not have space */
886 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
887 netif_stop_queue(ndev);
888
889 /* Matches barrier in axienet_tx_poll */
890 smp_mb();
891
892 /* Space might have just been freed - check again */
893 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
894 netif_wake_queue(ndev);
895 }
896
897 return NETDEV_TX_OK;
898 }
899
900 /**
901 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
902 * @napi: Pointer to NAPI structure.
903 * @budget: Max number of RX packets to process.
904 *
905 * Return: Number of RX packets processed.
906 */
axienet_rx_poll(struct napi_struct * napi,int budget)907 static int axienet_rx_poll(struct napi_struct *napi, int budget)
908 {
909 u32 length;
910 u32 csumstatus;
911 u32 size = 0;
912 int packets = 0;
913 dma_addr_t tail_p = 0;
914 struct axidma_bd *cur_p;
915 struct sk_buff *skb, *new_skb;
916 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
917
918 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
919
920 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
921 dma_addr_t phys;
922
923 /* Ensure we see complete descriptor update */
924 dma_rmb();
925
926 skb = cur_p->skb;
927 cur_p->skb = NULL;
928
929 /* skb could be NULL if a previous pass already received the
930 * packet for this slot in the ring, but failed to refill it
931 * with a newly allocated buffer. In this case, don't try to
932 * receive it again.
933 */
934 if (likely(skb)) {
935 length = cur_p->app4 & 0x0000FFFF;
936
937 phys = desc_get_phys_addr(lp, cur_p);
938 dma_unmap_single(lp->dev, phys, lp->max_frm_size,
939 DMA_FROM_DEVICE);
940
941 skb_put(skb, length);
942 skb->protocol = eth_type_trans(skb, lp->ndev);
943 /*skb_checksum_none_assert(skb);*/
944 skb->ip_summed = CHECKSUM_NONE;
945
946 /* if we're doing Rx csum offload, set it up */
947 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
948 csumstatus = (cur_p->app2 &
949 XAE_FULL_CSUM_STATUS_MASK) >> 3;
950 if (csumstatus == XAE_IP_TCP_CSUM_VALIDATED ||
951 csumstatus == XAE_IP_UDP_CSUM_VALIDATED) {
952 skb->ip_summed = CHECKSUM_UNNECESSARY;
953 }
954 } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 &&
955 skb->protocol == htons(ETH_P_IP) &&
956 skb->len > 64) {
957 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF);
958 skb->ip_summed = CHECKSUM_COMPLETE;
959 }
960
961 napi_gro_receive(napi, skb);
962
963 size += length;
964 packets++;
965 }
966
967 new_skb = napi_alloc_skb(napi, lp->max_frm_size);
968 if (!new_skb)
969 break;
970
971 phys = dma_map_single(lp->dev, new_skb->data,
972 lp->max_frm_size,
973 DMA_FROM_DEVICE);
974 if (unlikely(dma_mapping_error(lp->dev, phys))) {
975 if (net_ratelimit())
976 netdev_err(lp->ndev, "RX DMA mapping error\n");
977 dev_kfree_skb(new_skb);
978 break;
979 }
980 desc_set_phys_addr(lp, phys, cur_p);
981
982 cur_p->cntrl = lp->max_frm_size;
983 cur_p->status = 0;
984 cur_p->skb = new_skb;
985
986 /* Only update tail_p to mark this slot as usable after it has
987 * been successfully refilled.
988 */
989 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
990
991 if (++lp->rx_bd_ci >= lp->rx_bd_num)
992 lp->rx_bd_ci = 0;
993 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
994 }
995
996 u64_stats_update_begin(&lp->rx_stat_sync);
997 u64_stats_add(&lp->rx_packets, packets);
998 u64_stats_add(&lp->rx_bytes, size);
999 u64_stats_update_end(&lp->rx_stat_sync);
1000
1001 if (tail_p)
1002 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1003
1004 if (packets < budget && napi_complete_done(napi, packets)) {
1005 /* Re-enable RX completion interrupts. This should
1006 * cause an immediate interrupt if any RX packets are
1007 * already pending.
1008 */
1009 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1010 }
1011 return packets;
1012 }
1013
1014 /**
1015 * axienet_tx_irq - Tx Done Isr.
1016 * @irq: irq number
1017 * @_ndev: net_device pointer
1018 *
1019 * Return: IRQ_HANDLED if device generated a TX interrupt, IRQ_NONE otherwise.
1020 *
1021 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1022 * TX BD processing.
1023 */
axienet_tx_irq(int irq,void * _ndev)1024 static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
1025 {
1026 unsigned int status;
1027 struct net_device *ndev = _ndev;
1028 struct axienet_local *lp = netdev_priv(ndev);
1029
1030 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1031
1032 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1033 return IRQ_NONE;
1034
1035 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1036
1037 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1038 netdev_err(ndev, "DMA Tx error 0x%x\n", status);
1039 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1040 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1041 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1042 schedule_work(&lp->dma_err_task);
1043 } else {
1044 /* Disable further TX completion interrupts and schedule
1045 * NAPI to handle the completions.
1046 */
1047 u32 cr = lp->tx_dma_cr;
1048
1049 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1050 if (napi_schedule_prep(&lp->napi_tx)) {
1051 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1052 __napi_schedule(&lp->napi_tx);
1053 }
1054 }
1055
1056 return IRQ_HANDLED;
1057 }
1058
1059 /**
1060 * axienet_rx_irq - Rx Isr.
1061 * @irq: irq number
1062 * @_ndev: net_device pointer
1063 *
1064 * Return: IRQ_HANDLED if device generated a RX interrupt, IRQ_NONE otherwise.
1065 *
1066 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1067 * processing.
1068 */
axienet_rx_irq(int irq,void * _ndev)1069 static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
1070 {
1071 unsigned int status;
1072 struct net_device *ndev = _ndev;
1073 struct axienet_local *lp = netdev_priv(ndev);
1074
1075 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1076
1077 if (!(status & XAXIDMA_IRQ_ALL_MASK))
1078 return IRQ_NONE;
1079
1080 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1081
1082 if (unlikely(status & XAXIDMA_IRQ_ERROR_MASK)) {
1083 netdev_err(ndev, "DMA Rx error 0x%x\n", status);
1084 netdev_err(ndev, "Current BD is at: 0x%x%08x\n",
1085 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1086 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1087 schedule_work(&lp->dma_err_task);
1088 } else {
1089 /* Disable further RX completion interrupts and schedule
1090 * NAPI receive.
1091 */
1092 u32 cr = lp->rx_dma_cr;
1093
1094 cr &= ~(XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK);
1095 if (napi_schedule_prep(&lp->napi_rx)) {
1096 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1097 __napi_schedule(&lp->napi_rx);
1098 }
1099 }
1100
1101 return IRQ_HANDLED;
1102 }
1103
1104 /**
1105 * axienet_eth_irq - Ethernet core Isr.
1106 * @irq: irq number
1107 * @_ndev: net_device pointer
1108 *
1109 * Return: IRQ_HANDLED if device generated a core interrupt, IRQ_NONE otherwise.
1110 *
1111 * Handle miscellaneous conditions indicated by Ethernet core IRQ.
1112 */
axienet_eth_irq(int irq,void * _ndev)1113 static irqreturn_t axienet_eth_irq(int irq, void *_ndev)
1114 {
1115 struct net_device *ndev = _ndev;
1116 struct axienet_local *lp = netdev_priv(ndev);
1117 unsigned int pending;
1118
1119 pending = axienet_ior(lp, XAE_IP_OFFSET);
1120 if (!pending)
1121 return IRQ_NONE;
1122
1123 if (pending & XAE_INT_RXFIFOOVR_MASK)
1124 ndev->stats.rx_missed_errors++;
1125
1126 if (pending & XAE_INT_RXRJECT_MASK)
1127 ndev->stats.rx_frame_errors++;
1128
1129 axienet_iow(lp, XAE_IS_OFFSET, pending);
1130 return IRQ_HANDLED;
1131 }
1132
1133 static void axienet_dma_err_handler(struct work_struct *work);
1134
1135 /**
1136 * axienet_open - Driver open routine.
1137 * @ndev: Pointer to net_device structure
1138 *
1139 * Return: 0, on success.
1140 * non-zero error value on failure
1141 *
1142 * This is the driver open routine. It calls phylink_start to start the
1143 * PHY device.
1144 * It also allocates interrupt service routines, enables the interrupt lines
1145 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1146 * descriptors are initialized.
1147 */
axienet_open(struct net_device * ndev)1148 static int axienet_open(struct net_device *ndev)
1149 {
1150 int ret;
1151 struct axienet_local *lp = netdev_priv(ndev);
1152
1153 dev_dbg(&ndev->dev, "axienet_open()\n");
1154
1155 /* When we do an Axi Ethernet reset, it resets the complete core
1156 * including the MDIO. MDIO must be disabled before resetting.
1157 * Hold MDIO bus lock to avoid MDIO accesses during the reset.
1158 */
1159 axienet_lock_mii(lp);
1160 ret = axienet_device_reset(ndev);
1161 axienet_unlock_mii(lp);
1162
1163 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1164 if (ret) {
1165 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1166 return ret;
1167 }
1168
1169 phylink_start(lp->phylink);
1170
1171 /* Enable worker thread for Axi DMA error handling */
1172 lp->stopping = false;
1173 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1174
1175 napi_enable(&lp->napi_rx);
1176 napi_enable(&lp->napi_tx);
1177
1178 /* Enable interrupts for Axi DMA Tx */
1179 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1180 ndev->name, ndev);
1181 if (ret)
1182 goto err_tx_irq;
1183 /* Enable interrupts for Axi DMA Rx */
1184 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1185 ndev->name, ndev);
1186 if (ret)
1187 goto err_rx_irq;
1188 /* Enable interrupts for Axi Ethernet core (if defined) */
1189 if (lp->eth_irq > 0) {
1190 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1191 ndev->name, ndev);
1192 if (ret)
1193 goto err_eth_irq;
1194 }
1195
1196 return 0;
1197
1198 err_eth_irq:
1199 free_irq(lp->rx_irq, ndev);
1200 err_rx_irq:
1201 free_irq(lp->tx_irq, ndev);
1202 err_tx_irq:
1203 napi_disable(&lp->napi_tx);
1204 napi_disable(&lp->napi_rx);
1205 phylink_stop(lp->phylink);
1206 phylink_disconnect_phy(lp->phylink);
1207 cancel_work_sync(&lp->dma_err_task);
1208 dev_err(lp->dev, "request_irq() failed\n");
1209 return ret;
1210 }
1211
1212 /**
1213 * axienet_stop - Driver stop routine.
1214 * @ndev: Pointer to net_device structure
1215 *
1216 * Return: 0, on success.
1217 *
1218 * This is the driver stop routine. It calls phylink_disconnect to stop the PHY
1219 * device. It also removes the interrupt handlers and disables the interrupts.
1220 * The Axi DMA Tx/Rx BDs are released.
1221 */
axienet_stop(struct net_device * ndev)1222 static int axienet_stop(struct net_device *ndev)
1223 {
1224 struct axienet_local *lp = netdev_priv(ndev);
1225
1226 dev_dbg(&ndev->dev, "axienet_close()\n");
1227
1228 WRITE_ONCE(lp->stopping, true);
1229 flush_work(&lp->dma_err_task);
1230
1231 napi_disable(&lp->napi_tx);
1232 napi_disable(&lp->napi_rx);
1233
1234 phylink_stop(lp->phylink);
1235 phylink_disconnect_phy(lp->phylink);
1236
1237 axienet_setoptions(ndev, lp->options &
1238 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1239
1240 axienet_dma_stop(lp);
1241
1242 axienet_iow(lp, XAE_IE_OFFSET, 0);
1243
1244 cancel_work_sync(&lp->dma_err_task);
1245
1246 if (lp->eth_irq > 0)
1247 free_irq(lp->eth_irq, ndev);
1248 free_irq(lp->tx_irq, ndev);
1249 free_irq(lp->rx_irq, ndev);
1250
1251 axienet_dma_bd_release(ndev);
1252 return 0;
1253 }
1254
1255 /**
1256 * axienet_change_mtu - Driver change mtu routine.
1257 * @ndev: Pointer to net_device structure
1258 * @new_mtu: New mtu value to be applied
1259 *
1260 * Return: Always returns 0 (success).
1261 *
1262 * This is the change mtu driver routine. It checks if the Axi Ethernet
1263 * hardware supports jumbo frames before changing the mtu. This can be
1264 * called only when the device is not up.
1265 */
axienet_change_mtu(struct net_device * ndev,int new_mtu)1266 static int axienet_change_mtu(struct net_device *ndev, int new_mtu)
1267 {
1268 struct axienet_local *lp = netdev_priv(ndev);
1269
1270 if (netif_running(ndev))
1271 return -EBUSY;
1272
1273 if ((new_mtu + VLAN_ETH_HLEN +
1274 XAE_TRL_SIZE) > lp->rxmem)
1275 return -EINVAL;
1276
1277 ndev->mtu = new_mtu;
1278
1279 return 0;
1280 }
1281
1282 #ifdef CONFIG_NET_POLL_CONTROLLER
1283 /**
1284 * axienet_poll_controller - Axi Ethernet poll mechanism.
1285 * @ndev: Pointer to net_device structure
1286 *
1287 * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior
1288 * to polling the ISRs and are enabled back after the polling is done.
1289 */
axienet_poll_controller(struct net_device * ndev)1290 static void axienet_poll_controller(struct net_device *ndev)
1291 {
1292 struct axienet_local *lp = netdev_priv(ndev);
1293 disable_irq(lp->tx_irq);
1294 disable_irq(lp->rx_irq);
1295 axienet_rx_irq(lp->tx_irq, ndev);
1296 axienet_tx_irq(lp->rx_irq, ndev);
1297 enable_irq(lp->tx_irq);
1298 enable_irq(lp->rx_irq);
1299 }
1300 #endif
1301
axienet_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1302 static int axienet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1303 {
1304 struct axienet_local *lp = netdev_priv(dev);
1305
1306 if (!netif_running(dev))
1307 return -EINVAL;
1308
1309 return phylink_mii_ioctl(lp->phylink, rq, cmd);
1310 }
1311
1312 static void
axienet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)1313 axienet_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1314 {
1315 struct axienet_local *lp = netdev_priv(dev);
1316 unsigned int start;
1317
1318 netdev_stats_to_stats64(stats, &dev->stats);
1319
1320 do {
1321 start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1322 stats->rx_packets = u64_stats_read(&lp->rx_packets);
1323 stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1324 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1325
1326 do {
1327 start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1328 stats->tx_packets = u64_stats_read(&lp->tx_packets);
1329 stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1330 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1331 }
1332
1333 static const struct net_device_ops axienet_netdev_ops = {
1334 .ndo_open = axienet_open,
1335 .ndo_stop = axienet_stop,
1336 .ndo_start_xmit = axienet_start_xmit,
1337 .ndo_get_stats64 = axienet_get_stats64,
1338 .ndo_change_mtu = axienet_change_mtu,
1339 .ndo_set_mac_address = netdev_set_mac_address,
1340 .ndo_validate_addr = eth_validate_addr,
1341 .ndo_eth_ioctl = axienet_ioctl,
1342 .ndo_set_rx_mode = axienet_set_multicast_list,
1343 #ifdef CONFIG_NET_POLL_CONTROLLER
1344 .ndo_poll_controller = axienet_poll_controller,
1345 #endif
1346 };
1347
1348 /**
1349 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1350 * @ndev: Pointer to net_device structure
1351 * @ed: Pointer to ethtool_drvinfo structure
1352 *
1353 * This implements ethtool command for getting the driver information.
1354 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1355 */
axienet_ethtools_get_drvinfo(struct net_device * ndev,struct ethtool_drvinfo * ed)1356 static void axienet_ethtools_get_drvinfo(struct net_device *ndev,
1357 struct ethtool_drvinfo *ed)
1358 {
1359 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver));
1360 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version));
1361 }
1362
1363 /**
1364 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1365 * AxiEthernet core.
1366 * @ndev: Pointer to net_device structure
1367 *
1368 * This implements ethtool command for getting the total register length
1369 * information.
1370 *
1371 * Return: the total regs length
1372 */
axienet_ethtools_get_regs_len(struct net_device * ndev)1373 static int axienet_ethtools_get_regs_len(struct net_device *ndev)
1374 {
1375 return sizeof(u32) * AXIENET_REGS_N;
1376 }
1377
1378 /**
1379 * axienet_ethtools_get_regs - Dump the contents of all registers present
1380 * in AxiEthernet core.
1381 * @ndev: Pointer to net_device structure
1382 * @regs: Pointer to ethtool_regs structure
1383 * @ret: Void pointer used to return the contents of the registers.
1384 *
1385 * This implements ethtool command for getting the Axi Ethernet register dump.
1386 * Issue "ethtool -d ethX" to execute this function.
1387 */
axienet_ethtools_get_regs(struct net_device * ndev,struct ethtool_regs * regs,void * ret)1388 static void axienet_ethtools_get_regs(struct net_device *ndev,
1389 struct ethtool_regs *regs, void *ret)
1390 {
1391 u32 *data = (u32 *)ret;
1392 size_t len = sizeof(u32) * AXIENET_REGS_N;
1393 struct axienet_local *lp = netdev_priv(ndev);
1394
1395 regs->version = 0;
1396 regs->len = len;
1397
1398 memset(data, 0, len);
1399 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1400 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1401 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1402 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1403 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1404 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1405 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1406 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1407 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1408 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1409 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1410 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1411 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1412 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1413 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1414 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1415 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1416 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1417 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1418 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1419 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1420 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1421 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1422 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1423 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1424 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1425 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1426 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1427 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1428 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1429 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1430 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1431 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1432 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1433 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1434 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1435 }
1436
1437 static void
axienet_ethtools_get_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1438 axienet_ethtools_get_ringparam(struct net_device *ndev,
1439 struct ethtool_ringparam *ering,
1440 struct kernel_ethtool_ringparam *kernel_ering,
1441 struct netlink_ext_ack *extack)
1442 {
1443 struct axienet_local *lp = netdev_priv(ndev);
1444
1445 ering->rx_max_pending = RX_BD_NUM_MAX;
1446 ering->rx_mini_max_pending = 0;
1447 ering->rx_jumbo_max_pending = 0;
1448 ering->tx_max_pending = TX_BD_NUM_MAX;
1449 ering->rx_pending = lp->rx_bd_num;
1450 ering->rx_mini_pending = 0;
1451 ering->rx_jumbo_pending = 0;
1452 ering->tx_pending = lp->tx_bd_num;
1453 }
1454
1455 static int
axienet_ethtools_set_ringparam(struct net_device * ndev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1456 axienet_ethtools_set_ringparam(struct net_device *ndev,
1457 struct ethtool_ringparam *ering,
1458 struct kernel_ethtool_ringparam *kernel_ering,
1459 struct netlink_ext_ack *extack)
1460 {
1461 struct axienet_local *lp = netdev_priv(ndev);
1462
1463 if (ering->rx_pending > RX_BD_NUM_MAX ||
1464 ering->rx_mini_pending ||
1465 ering->rx_jumbo_pending ||
1466 ering->tx_pending < TX_BD_NUM_MIN ||
1467 ering->tx_pending > TX_BD_NUM_MAX)
1468 return -EINVAL;
1469
1470 if (netif_running(ndev))
1471 return -EBUSY;
1472
1473 lp->rx_bd_num = ering->rx_pending;
1474 lp->tx_bd_num = ering->tx_pending;
1475 return 0;
1476 }
1477
1478 /**
1479 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
1480 * Tx and Rx paths.
1481 * @ndev: Pointer to net_device structure
1482 * @epauseparm: Pointer to ethtool_pauseparam structure.
1483 *
1484 * This implements ethtool command for getting axi ethernet pause frame
1485 * setting. Issue "ethtool -a ethX" to execute this function.
1486 */
1487 static void
axienet_ethtools_get_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)1488 axienet_ethtools_get_pauseparam(struct net_device *ndev,
1489 struct ethtool_pauseparam *epauseparm)
1490 {
1491 struct axienet_local *lp = netdev_priv(ndev);
1492
1493 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
1494 }
1495
1496 /**
1497 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
1498 * settings.
1499 * @ndev: Pointer to net_device structure
1500 * @epauseparm:Pointer to ethtool_pauseparam structure
1501 *
1502 * This implements ethtool command for enabling flow control on Rx and Tx
1503 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
1504 * function.
1505 *
1506 * Return: 0 on success, -EFAULT if device is running
1507 */
1508 static int
axienet_ethtools_set_pauseparam(struct net_device * ndev,struct ethtool_pauseparam * epauseparm)1509 axienet_ethtools_set_pauseparam(struct net_device *ndev,
1510 struct ethtool_pauseparam *epauseparm)
1511 {
1512 struct axienet_local *lp = netdev_priv(ndev);
1513
1514 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
1515 }
1516
1517 /**
1518 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
1519 * @ndev: Pointer to net_device structure
1520 * @ecoalesce: Pointer to ethtool_coalesce structure
1521 * @kernel_coal: ethtool CQE mode setting structure
1522 * @extack: extack for reporting error messages
1523 *
1524 * This implements ethtool command for getting the DMA interrupt coalescing
1525 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
1526 * execute this function.
1527 *
1528 * Return: 0 always
1529 */
1530 static int
axienet_ethtools_get_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1531 axienet_ethtools_get_coalesce(struct net_device *ndev,
1532 struct ethtool_coalesce *ecoalesce,
1533 struct kernel_ethtool_coalesce *kernel_coal,
1534 struct netlink_ext_ack *extack)
1535 {
1536 struct axienet_local *lp = netdev_priv(ndev);
1537
1538 ecoalesce->rx_max_coalesced_frames = lp->coalesce_count_rx;
1539 ecoalesce->rx_coalesce_usecs = lp->coalesce_usec_rx;
1540 ecoalesce->tx_max_coalesced_frames = lp->coalesce_count_tx;
1541 ecoalesce->tx_coalesce_usecs = lp->coalesce_usec_tx;
1542 return 0;
1543 }
1544
1545 /**
1546 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
1547 * @ndev: Pointer to net_device structure
1548 * @ecoalesce: Pointer to ethtool_coalesce structure
1549 * @kernel_coal: ethtool CQE mode setting structure
1550 * @extack: extack for reporting error messages
1551 *
1552 * This implements ethtool command for setting the DMA interrupt coalescing
1553 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
1554 * prompt to execute this function.
1555 *
1556 * Return: 0, on success, Non-zero error value on failure.
1557 */
1558 static int
axienet_ethtools_set_coalesce(struct net_device * ndev,struct ethtool_coalesce * ecoalesce,struct kernel_ethtool_coalesce * kernel_coal,struct netlink_ext_ack * extack)1559 axienet_ethtools_set_coalesce(struct net_device *ndev,
1560 struct ethtool_coalesce *ecoalesce,
1561 struct kernel_ethtool_coalesce *kernel_coal,
1562 struct netlink_ext_ack *extack)
1563 {
1564 struct axienet_local *lp = netdev_priv(ndev);
1565
1566 if (netif_running(ndev)) {
1567 netdev_err(ndev,
1568 "Please stop netif before applying configuration\n");
1569 return -EFAULT;
1570 }
1571
1572 if (ecoalesce->rx_max_coalesced_frames)
1573 lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
1574 if (ecoalesce->rx_coalesce_usecs)
1575 lp->coalesce_usec_rx = ecoalesce->rx_coalesce_usecs;
1576 if (ecoalesce->tx_max_coalesced_frames)
1577 lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames;
1578 if (ecoalesce->tx_coalesce_usecs)
1579 lp->coalesce_usec_tx = ecoalesce->tx_coalesce_usecs;
1580
1581 return 0;
1582 }
1583
1584 static int
axienet_ethtools_get_link_ksettings(struct net_device * ndev,struct ethtool_link_ksettings * cmd)1585 axienet_ethtools_get_link_ksettings(struct net_device *ndev,
1586 struct ethtool_link_ksettings *cmd)
1587 {
1588 struct axienet_local *lp = netdev_priv(ndev);
1589
1590 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
1591 }
1592
1593 static int
axienet_ethtools_set_link_ksettings(struct net_device * ndev,const struct ethtool_link_ksettings * cmd)1594 axienet_ethtools_set_link_ksettings(struct net_device *ndev,
1595 const struct ethtool_link_ksettings *cmd)
1596 {
1597 struct axienet_local *lp = netdev_priv(ndev);
1598
1599 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
1600 }
1601
axienet_ethtools_nway_reset(struct net_device * dev)1602 static int axienet_ethtools_nway_reset(struct net_device *dev)
1603 {
1604 struct axienet_local *lp = netdev_priv(dev);
1605
1606 return phylink_ethtool_nway_reset(lp->phylink);
1607 }
1608
1609 static const struct ethtool_ops axienet_ethtool_ops = {
1610 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES |
1611 ETHTOOL_COALESCE_USECS,
1612 .get_drvinfo = axienet_ethtools_get_drvinfo,
1613 .get_regs_len = axienet_ethtools_get_regs_len,
1614 .get_regs = axienet_ethtools_get_regs,
1615 .get_link = ethtool_op_get_link,
1616 .get_ringparam = axienet_ethtools_get_ringparam,
1617 .set_ringparam = axienet_ethtools_set_ringparam,
1618 .get_pauseparam = axienet_ethtools_get_pauseparam,
1619 .set_pauseparam = axienet_ethtools_set_pauseparam,
1620 .get_coalesce = axienet_ethtools_get_coalesce,
1621 .set_coalesce = axienet_ethtools_set_coalesce,
1622 .get_link_ksettings = axienet_ethtools_get_link_ksettings,
1623 .set_link_ksettings = axienet_ethtools_set_link_ksettings,
1624 .nway_reset = axienet_ethtools_nway_reset,
1625 };
1626
pcs_to_axienet_local(struct phylink_pcs * pcs)1627 static struct axienet_local *pcs_to_axienet_local(struct phylink_pcs *pcs)
1628 {
1629 return container_of(pcs, struct axienet_local, pcs);
1630 }
1631
axienet_pcs_get_state(struct phylink_pcs * pcs,struct phylink_link_state * state)1632 static void axienet_pcs_get_state(struct phylink_pcs *pcs,
1633 struct phylink_link_state *state)
1634 {
1635 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1636
1637 phylink_mii_c22_pcs_get_state(pcs_phy, state);
1638 }
1639
axienet_pcs_an_restart(struct phylink_pcs * pcs)1640 static void axienet_pcs_an_restart(struct phylink_pcs *pcs)
1641 {
1642 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1643
1644 phylink_mii_c22_pcs_an_restart(pcs_phy);
1645 }
1646
axienet_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)1647 static int axienet_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
1648 phy_interface_t interface,
1649 const unsigned long *advertising,
1650 bool permit_pause_to_mac)
1651 {
1652 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy;
1653 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev;
1654 struct axienet_local *lp = netdev_priv(ndev);
1655 int ret;
1656
1657 if (lp->switch_x_sgmii) {
1658 ret = mdiodev_write(pcs_phy, XLNX_MII_STD_SELECT_REG,
1659 interface == PHY_INTERFACE_MODE_SGMII ?
1660 XLNX_MII_STD_SELECT_SGMII : 0);
1661 if (ret < 0) {
1662 netdev_warn(ndev,
1663 "Failed to switch PHY interface: %d\n",
1664 ret);
1665 return ret;
1666 }
1667 }
1668
1669 ret = phylink_mii_c22_pcs_config(pcs_phy, interface, advertising,
1670 neg_mode);
1671 if (ret < 0)
1672 netdev_warn(ndev, "Failed to configure PCS: %d\n", ret);
1673
1674 return ret;
1675 }
1676
1677 static const struct phylink_pcs_ops axienet_pcs_ops = {
1678 .pcs_get_state = axienet_pcs_get_state,
1679 .pcs_config = axienet_pcs_config,
1680 .pcs_an_restart = axienet_pcs_an_restart,
1681 };
1682
axienet_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)1683 static struct phylink_pcs *axienet_mac_select_pcs(struct phylink_config *config,
1684 phy_interface_t interface)
1685 {
1686 struct net_device *ndev = to_net_dev(config->dev);
1687 struct axienet_local *lp = netdev_priv(ndev);
1688
1689 if (interface == PHY_INTERFACE_MODE_1000BASEX ||
1690 interface == PHY_INTERFACE_MODE_SGMII)
1691 return &lp->pcs;
1692
1693 return NULL;
1694 }
1695
axienet_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1696 static void axienet_mac_config(struct phylink_config *config, unsigned int mode,
1697 const struct phylink_link_state *state)
1698 {
1699 /* nothing meaningful to do */
1700 }
1701
axienet_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1702 static void axienet_mac_link_down(struct phylink_config *config,
1703 unsigned int mode,
1704 phy_interface_t interface)
1705 {
1706 /* nothing meaningful to do */
1707 }
1708
axienet_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1709 static void axienet_mac_link_up(struct phylink_config *config,
1710 struct phy_device *phy,
1711 unsigned int mode, phy_interface_t interface,
1712 int speed, int duplex,
1713 bool tx_pause, bool rx_pause)
1714 {
1715 struct net_device *ndev = to_net_dev(config->dev);
1716 struct axienet_local *lp = netdev_priv(ndev);
1717 u32 emmc_reg, fcc_reg;
1718
1719 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
1720 emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK;
1721
1722 switch (speed) {
1723 case SPEED_1000:
1724 emmc_reg |= XAE_EMMC_LINKSPD_1000;
1725 break;
1726 case SPEED_100:
1727 emmc_reg |= XAE_EMMC_LINKSPD_100;
1728 break;
1729 case SPEED_10:
1730 emmc_reg |= XAE_EMMC_LINKSPD_10;
1731 break;
1732 default:
1733 dev_err(&ndev->dev,
1734 "Speed other than 10, 100 or 1Gbps is not supported\n");
1735 break;
1736 }
1737
1738 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
1739
1740 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
1741 if (tx_pause)
1742 fcc_reg |= XAE_FCC_FCTX_MASK;
1743 else
1744 fcc_reg &= ~XAE_FCC_FCTX_MASK;
1745 if (rx_pause)
1746 fcc_reg |= XAE_FCC_FCRX_MASK;
1747 else
1748 fcc_reg &= ~XAE_FCC_FCRX_MASK;
1749 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
1750 }
1751
1752 static const struct phylink_mac_ops axienet_phylink_ops = {
1753 .mac_select_pcs = axienet_mac_select_pcs,
1754 .mac_config = axienet_mac_config,
1755 .mac_link_down = axienet_mac_link_down,
1756 .mac_link_up = axienet_mac_link_up,
1757 };
1758
1759 /**
1760 * axienet_dma_err_handler - Work queue task for Axi DMA Error
1761 * @work: pointer to work_struct
1762 *
1763 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
1764 * Tx/Rx BDs.
1765 */
axienet_dma_err_handler(struct work_struct * work)1766 static void axienet_dma_err_handler(struct work_struct *work)
1767 {
1768 u32 i;
1769 u32 axienet_status;
1770 struct axidma_bd *cur_p;
1771 struct axienet_local *lp = container_of(work, struct axienet_local,
1772 dma_err_task);
1773 struct net_device *ndev = lp->ndev;
1774
1775 /* Don't bother if we are going to stop anyway */
1776 if (READ_ONCE(lp->stopping))
1777 return;
1778
1779 napi_disable(&lp->napi_tx);
1780 napi_disable(&lp->napi_rx);
1781
1782 axienet_setoptions(ndev, lp->options &
1783 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1784
1785 axienet_dma_stop(lp);
1786
1787 for (i = 0; i < lp->tx_bd_num; i++) {
1788 cur_p = &lp->tx_bd_v[i];
1789 if (cur_p->cntrl) {
1790 dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
1791
1792 dma_unmap_single(lp->dev, addr,
1793 (cur_p->cntrl &
1794 XAXIDMA_BD_CTRL_LENGTH_MASK),
1795 DMA_TO_DEVICE);
1796 }
1797 if (cur_p->skb)
1798 dev_kfree_skb_irq(cur_p->skb);
1799 cur_p->phys = 0;
1800 cur_p->phys_msb = 0;
1801 cur_p->cntrl = 0;
1802 cur_p->status = 0;
1803 cur_p->app0 = 0;
1804 cur_p->app1 = 0;
1805 cur_p->app2 = 0;
1806 cur_p->app3 = 0;
1807 cur_p->app4 = 0;
1808 cur_p->skb = NULL;
1809 }
1810
1811 for (i = 0; i < lp->rx_bd_num; i++) {
1812 cur_p = &lp->rx_bd_v[i];
1813 cur_p->status = 0;
1814 cur_p->app0 = 0;
1815 cur_p->app1 = 0;
1816 cur_p->app2 = 0;
1817 cur_p->app3 = 0;
1818 cur_p->app4 = 0;
1819 }
1820
1821 lp->tx_bd_ci = 0;
1822 lp->tx_bd_tail = 0;
1823 lp->rx_bd_ci = 0;
1824
1825 axienet_dma_start(lp);
1826
1827 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
1828 axienet_status &= ~XAE_RCW1_RX_MASK;
1829 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
1830
1831 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
1832 if (axienet_status & XAE_INT_RXRJECT_MASK)
1833 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
1834 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
1835 XAE_INT_RECV_ERROR_MASK : 0);
1836 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
1837
1838 /* Sync default options with HW but leave receiver and
1839 * transmitter disabled.
1840 */
1841 axienet_setoptions(ndev, lp->options &
1842 ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN));
1843 axienet_set_mac_address(ndev, NULL);
1844 axienet_set_multicast_list(ndev);
1845 napi_enable(&lp->napi_rx);
1846 napi_enable(&lp->napi_tx);
1847 axienet_setoptions(ndev, lp->options);
1848 }
1849
1850 /**
1851 * axienet_probe - Axi Ethernet probe function.
1852 * @pdev: Pointer to platform device structure.
1853 *
1854 * Return: 0, on success
1855 * Non-zero error value on failure.
1856 *
1857 * This is the probe routine for Axi Ethernet driver. This is called before
1858 * any other driver routines are invoked. It allocates and sets up the Ethernet
1859 * device. Parses through device tree and populates fields of
1860 * axienet_local. It registers the Ethernet device.
1861 */
axienet_probe(struct platform_device * pdev)1862 static int axienet_probe(struct platform_device *pdev)
1863 {
1864 int ret;
1865 struct device_node *np;
1866 struct axienet_local *lp;
1867 struct net_device *ndev;
1868 struct resource *ethres;
1869 u8 mac_addr[ETH_ALEN];
1870 int addr_width = 32;
1871 u32 value;
1872
1873 ndev = alloc_etherdev(sizeof(*lp));
1874 if (!ndev)
1875 return -ENOMEM;
1876
1877 platform_set_drvdata(pdev, ndev);
1878
1879 SET_NETDEV_DEV(ndev, &pdev->dev);
1880 ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
1881 ndev->features = NETIF_F_SG;
1882 ndev->netdev_ops = &axienet_netdev_ops;
1883 ndev->ethtool_ops = &axienet_ethtool_ops;
1884
1885 /* MTU range: 64 - 9000 */
1886 ndev->min_mtu = 64;
1887 ndev->max_mtu = XAE_JUMBO_MTU;
1888
1889 lp = netdev_priv(ndev);
1890 lp->ndev = ndev;
1891 lp->dev = &pdev->dev;
1892 lp->options = XAE_OPTION_DEFAULTS;
1893 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
1894 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
1895
1896 u64_stats_init(&lp->rx_stat_sync);
1897 u64_stats_init(&lp->tx_stat_sync);
1898
1899 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
1900 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
1901
1902 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
1903 if (!lp->axi_clk) {
1904 /* For backward compatibility, if named AXI clock is not present,
1905 * treat the first clock specified as the AXI clock.
1906 */
1907 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
1908 }
1909 if (IS_ERR(lp->axi_clk)) {
1910 ret = PTR_ERR(lp->axi_clk);
1911 goto free_netdev;
1912 }
1913 ret = clk_prepare_enable(lp->axi_clk);
1914 if (ret) {
1915 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret);
1916 goto free_netdev;
1917 }
1918
1919 lp->misc_clks[0].id = "axis_clk";
1920 lp->misc_clks[1].id = "ref_clk";
1921 lp->misc_clks[2].id = "mgt_clk";
1922
1923 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
1924 if (ret)
1925 goto cleanup_clk;
1926
1927 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
1928 if (ret)
1929 goto cleanup_clk;
1930
1931 /* Map device registers */
1932 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ðres);
1933 if (IS_ERR(lp->regs)) {
1934 ret = PTR_ERR(lp->regs);
1935 goto cleanup_clk;
1936 }
1937 lp->regs_start = ethres->start;
1938
1939 /* Setup checksum offload, but default to off if not specified */
1940 lp->features = 0;
1941
1942 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value);
1943 if (!ret) {
1944 switch (value) {
1945 case 1:
1946 lp->csum_offload_on_tx_path =
1947 XAE_FEATURE_PARTIAL_TX_CSUM;
1948 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
1949 /* Can checksum TCP/UDP over IPv4. */
1950 ndev->features |= NETIF_F_IP_CSUM;
1951 break;
1952 case 2:
1953 lp->csum_offload_on_tx_path =
1954 XAE_FEATURE_FULL_TX_CSUM;
1955 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
1956 /* Can checksum TCP/UDP over IPv4. */
1957 ndev->features |= NETIF_F_IP_CSUM;
1958 break;
1959 default:
1960 lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD;
1961 }
1962 }
1963 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value);
1964 if (!ret) {
1965 switch (value) {
1966 case 1:
1967 lp->csum_offload_on_rx_path =
1968 XAE_FEATURE_PARTIAL_RX_CSUM;
1969 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
1970 break;
1971 case 2:
1972 lp->csum_offload_on_rx_path =
1973 XAE_FEATURE_FULL_RX_CSUM;
1974 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
1975 break;
1976 default:
1977 lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD;
1978 }
1979 }
1980 /* For supporting jumbo frames, the Axi Ethernet hardware must have
1981 * a larger Rx/Tx Memory. Typically, the size must be large so that
1982 * we can enable jumbo option and start supporting jumbo frames.
1983 * Here we check for memory allocated for Rx/Tx in the hardware from
1984 * the device-tree and accordingly set flags.
1985 */
1986 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
1987
1988 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
1989 "xlnx,switch-x-sgmii");
1990
1991 /* Start with the proprietary, and broken phy_type */
1992 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value);
1993 if (!ret) {
1994 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode");
1995 switch (value) {
1996 case XAE_PHY_TYPE_MII:
1997 lp->phy_mode = PHY_INTERFACE_MODE_MII;
1998 break;
1999 case XAE_PHY_TYPE_GMII:
2000 lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2001 break;
2002 case XAE_PHY_TYPE_RGMII_2_0:
2003 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2004 break;
2005 case XAE_PHY_TYPE_SGMII:
2006 lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2007 break;
2008 case XAE_PHY_TYPE_1000BASE_X:
2009 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2010 break;
2011 default:
2012 ret = -EINVAL;
2013 goto cleanup_clk;
2014 }
2015 } else {
2016 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2017 if (ret)
2018 goto cleanup_clk;
2019 }
2020 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2021 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2022 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n");
2023 ret = -EINVAL;
2024 goto cleanup_clk;
2025 }
2026
2027 /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
2028 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
2029 if (np) {
2030 struct resource dmares;
2031
2032 ret = of_address_to_resource(np, 0, &dmares);
2033 if (ret) {
2034 dev_err(&pdev->dev,
2035 "unable to get DMA resource\n");
2036 of_node_put(np);
2037 goto cleanup_clk;
2038 }
2039 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2040 &dmares);
2041 lp->rx_irq = irq_of_parse_and_map(np, 1);
2042 lp->tx_irq = irq_of_parse_and_map(np, 0);
2043 of_node_put(np);
2044 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2045 } else {
2046 /* Check for these resources directly on the Ethernet node. */
2047 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2048 lp->rx_irq = platform_get_irq(pdev, 1);
2049 lp->tx_irq = platform_get_irq(pdev, 0);
2050 lp->eth_irq = platform_get_irq_optional(pdev, 2);
2051 }
2052 if (IS_ERR(lp->dma_regs)) {
2053 dev_err(&pdev->dev, "could not map DMA regs\n");
2054 ret = PTR_ERR(lp->dma_regs);
2055 goto cleanup_clk;
2056 }
2057 if ((lp->rx_irq <= 0) || (lp->tx_irq <= 0)) {
2058 dev_err(&pdev->dev, "could not determine irqs\n");
2059 ret = -ENOMEM;
2060 goto cleanup_clk;
2061 }
2062
2063 /* Reset core now that clocks are enabled, prior to accessing MDIO */
2064 ret = __axienet_device_reset(lp);
2065 if (ret)
2066 goto cleanup_clk;
2067
2068 /* Autodetect the need for 64-bit DMA pointers.
2069 * When the IP is configured for a bus width bigger than 32 bits,
2070 * writing the MSB registers is mandatory, even if they are all 0.
2071 * We can detect this case by writing all 1's to one such register
2072 * and see if that sticks: when the IP is configured for 32 bits
2073 * only, those registers are RES0.
2074 * Those MSB registers were introduced in IP v7.1, which we check first.
2075 */
2076 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2077 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2078
2079 iowrite32(0x0, desc);
2080 if (ioread32(desc) == 0) { /* sanity check */
2081 iowrite32(0xffffffff, desc);
2082 if (ioread32(desc) > 0) {
2083 lp->features |= XAE_FEATURE_DMA_64BIT;
2084 addr_width = 64;
2085 dev_info(&pdev->dev,
2086 "autodetected 64-bit DMA range\n");
2087 }
2088 iowrite32(0x0, desc);
2089 }
2090 }
2091 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2092 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n");
2093 ret = -EINVAL;
2094 goto cleanup_clk;
2095 }
2096
2097 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width));
2098 if (ret) {
2099 dev_err(&pdev->dev, "No suitable DMA available\n");
2100 goto cleanup_clk;
2101 }
2102
2103 /* Check for Ethernet core IRQ (optional) */
2104 if (lp->eth_irq <= 0)
2105 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n");
2106
2107 /* Retrieve the MAC address */
2108 ret = of_get_mac_address(pdev->dev.of_node, mac_addr);
2109 if (!ret) {
2110 axienet_set_mac_address(ndev, mac_addr);
2111 } else {
2112 dev_warn(&pdev->dev, "could not find MAC address property: %d\n",
2113 ret);
2114 axienet_set_mac_address(ndev, NULL);
2115 }
2116
2117 lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD;
2118 lp->coalesce_usec_rx = XAXIDMA_DFT_RX_USEC;
2119 lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD;
2120 lp->coalesce_usec_tx = XAXIDMA_DFT_TX_USEC;
2121
2122 ret = axienet_mdio_setup(lp);
2123 if (ret)
2124 dev_warn(&pdev->dev,
2125 "error registering MDIO bus: %d\n", ret);
2126
2127 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
2128 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
2129 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0);
2130 if (!np) {
2131 /* Deprecated: Always use "pcs-handle" for pcs_phy.
2132 * Falling back to "phy-handle" here is only for
2133 * backward compatibility with old device trees.
2134 */
2135 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
2136 }
2137 if (!np) {
2138 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n");
2139 ret = -EINVAL;
2140 goto cleanup_mdio;
2141 }
2142 lp->pcs_phy = of_mdio_find_device(np);
2143 if (!lp->pcs_phy) {
2144 ret = -EPROBE_DEFER;
2145 of_node_put(np);
2146 goto cleanup_mdio;
2147 }
2148 of_node_put(np);
2149 lp->pcs.ops = &axienet_pcs_ops;
2150 lp->pcs.neg_mode = true;
2151 lp->pcs.poll = true;
2152 }
2153
2154 lp->phylink_config.dev = &ndev->dev;
2155 lp->phylink_config.type = PHYLINK_NETDEV;
2156 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
2157 MAC_10FD | MAC_100FD | MAC_1000FD;
2158
2159 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
2160 if (lp->switch_x_sgmii) {
2161 __set_bit(PHY_INTERFACE_MODE_1000BASEX,
2162 lp->phylink_config.supported_interfaces);
2163 __set_bit(PHY_INTERFACE_MODE_SGMII,
2164 lp->phylink_config.supported_interfaces);
2165 }
2166
2167 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
2168 lp->phy_mode,
2169 &axienet_phylink_ops);
2170 if (IS_ERR(lp->phylink)) {
2171 ret = PTR_ERR(lp->phylink);
2172 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret);
2173 goto cleanup_mdio;
2174 }
2175
2176 ret = register_netdev(lp->ndev);
2177 if (ret) {
2178 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
2179 goto cleanup_phylink;
2180 }
2181
2182 return 0;
2183
2184 cleanup_phylink:
2185 phylink_destroy(lp->phylink);
2186
2187 cleanup_mdio:
2188 if (lp->pcs_phy)
2189 put_device(&lp->pcs_phy->dev);
2190 if (lp->mii_bus)
2191 axienet_mdio_teardown(lp);
2192 cleanup_clk:
2193 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2194 clk_disable_unprepare(lp->axi_clk);
2195
2196 free_netdev:
2197 free_netdev(ndev);
2198
2199 return ret;
2200 }
2201
axienet_remove(struct platform_device * pdev)2202 static int axienet_remove(struct platform_device *pdev)
2203 {
2204 struct net_device *ndev = platform_get_drvdata(pdev);
2205 struct axienet_local *lp = netdev_priv(ndev);
2206
2207 unregister_netdev(ndev);
2208
2209 if (lp->phylink)
2210 phylink_destroy(lp->phylink);
2211
2212 if (lp->pcs_phy)
2213 put_device(&lp->pcs_phy->dev);
2214
2215 axienet_mdio_teardown(lp);
2216
2217 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2218 clk_disable_unprepare(lp->axi_clk);
2219
2220 free_netdev(ndev);
2221
2222 return 0;
2223 }
2224
axienet_shutdown(struct platform_device * pdev)2225 static void axienet_shutdown(struct platform_device *pdev)
2226 {
2227 struct net_device *ndev = platform_get_drvdata(pdev);
2228
2229 rtnl_lock();
2230 netif_device_detach(ndev);
2231
2232 if (netif_running(ndev))
2233 dev_close(ndev);
2234
2235 rtnl_unlock();
2236 }
2237
axienet_suspend(struct device * dev)2238 static int axienet_suspend(struct device *dev)
2239 {
2240 struct net_device *ndev = dev_get_drvdata(dev);
2241
2242 if (!netif_running(ndev))
2243 return 0;
2244
2245 netif_device_detach(ndev);
2246
2247 rtnl_lock();
2248 axienet_stop(ndev);
2249 rtnl_unlock();
2250
2251 return 0;
2252 }
2253
axienet_resume(struct device * dev)2254 static int axienet_resume(struct device *dev)
2255 {
2256 struct net_device *ndev = dev_get_drvdata(dev);
2257
2258 if (!netif_running(ndev))
2259 return 0;
2260
2261 rtnl_lock();
2262 axienet_open(ndev);
2263 rtnl_unlock();
2264
2265 netif_device_attach(ndev);
2266
2267 return 0;
2268 }
2269
2270 static DEFINE_SIMPLE_DEV_PM_OPS(axienet_pm_ops,
2271 axienet_suspend, axienet_resume);
2272
2273 static struct platform_driver axienet_driver = {
2274 .probe = axienet_probe,
2275 .remove = axienet_remove,
2276 .shutdown = axienet_shutdown,
2277 .driver = {
2278 .name = "xilinx_axienet",
2279 .pm = &axienet_pm_ops,
2280 .of_match_table = axienet_of_match,
2281 },
2282 };
2283
2284 module_platform_driver(axienet_driver);
2285
2286 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");
2287 MODULE_AUTHOR("Xilinx");
2288 MODULE_LICENSE("GPL");
2289