1*de6e0b19SCristian Ciocaltea // SPDX-License-Identifier: GPL-2.0-or-later
2*de6e0b19SCristian Ciocaltea /*
3*de6e0b19SCristian Ciocaltea  * Actions Semi Owl SoCs Ethernet MAC driver
4*de6e0b19SCristian Ciocaltea  *
5*de6e0b19SCristian Ciocaltea  * Copyright (c) 2012 Actions Semi Inc.
6*de6e0b19SCristian Ciocaltea  * Copyright (c) 2021 Cristian Ciocaltea <cristian.ciocaltea@gmail.com>
7*de6e0b19SCristian Ciocaltea  */
8*de6e0b19SCristian Ciocaltea 
9*de6e0b19SCristian Ciocaltea #include <linux/circ_buf.h>
10*de6e0b19SCristian Ciocaltea #include <linux/clk.h>
11*de6e0b19SCristian Ciocaltea #include <linux/dma-mapping.h>
12*de6e0b19SCristian Ciocaltea #include <linux/etherdevice.h>
13*de6e0b19SCristian Ciocaltea #include <linux/of_mdio.h>
14*de6e0b19SCristian Ciocaltea #include <linux/of_net.h>
15*de6e0b19SCristian Ciocaltea #include <linux/platform_device.h>
16*de6e0b19SCristian Ciocaltea #include <linux/pm.h>
17*de6e0b19SCristian Ciocaltea #include <linux/reset.h>
18*de6e0b19SCristian Ciocaltea 
19*de6e0b19SCristian Ciocaltea #include "owl-emac.h"
20*de6e0b19SCristian Ciocaltea 
21*de6e0b19SCristian Ciocaltea #define OWL_EMAC_DEFAULT_MSG_ENABLE	(NETIF_MSG_DRV | \
22*de6e0b19SCristian Ciocaltea 					 NETIF_MSG_PROBE | \
23*de6e0b19SCristian Ciocaltea 					 NETIF_MSG_LINK)
24*de6e0b19SCristian Ciocaltea 
25*de6e0b19SCristian Ciocaltea static u32 owl_emac_reg_read(struct owl_emac_priv *priv, u32 reg)
26*de6e0b19SCristian Ciocaltea {
27*de6e0b19SCristian Ciocaltea 	return readl(priv->base + reg);
28*de6e0b19SCristian Ciocaltea }
29*de6e0b19SCristian Ciocaltea 
30*de6e0b19SCristian Ciocaltea static void owl_emac_reg_write(struct owl_emac_priv *priv, u32 reg, u32 data)
31*de6e0b19SCristian Ciocaltea {
32*de6e0b19SCristian Ciocaltea 	writel(data, priv->base + reg);
33*de6e0b19SCristian Ciocaltea }
34*de6e0b19SCristian Ciocaltea 
35*de6e0b19SCristian Ciocaltea static u32 owl_emac_reg_update(struct owl_emac_priv *priv,
36*de6e0b19SCristian Ciocaltea 			       u32 reg, u32 mask, u32 val)
37*de6e0b19SCristian Ciocaltea {
38*de6e0b19SCristian Ciocaltea 	u32 data, old_val;
39*de6e0b19SCristian Ciocaltea 
40*de6e0b19SCristian Ciocaltea 	data = owl_emac_reg_read(priv, reg);
41*de6e0b19SCristian Ciocaltea 	old_val = data & mask;
42*de6e0b19SCristian Ciocaltea 
43*de6e0b19SCristian Ciocaltea 	data &= ~mask;
44*de6e0b19SCristian Ciocaltea 	data |= val & mask;
45*de6e0b19SCristian Ciocaltea 
46*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, reg, data);
47*de6e0b19SCristian Ciocaltea 
48*de6e0b19SCristian Ciocaltea 	return old_val;
49*de6e0b19SCristian Ciocaltea }
50*de6e0b19SCristian Ciocaltea 
51*de6e0b19SCristian Ciocaltea static void owl_emac_reg_set(struct owl_emac_priv *priv, u32 reg, u32 bits)
52*de6e0b19SCristian Ciocaltea {
53*de6e0b19SCristian Ciocaltea 	owl_emac_reg_update(priv, reg, bits, bits);
54*de6e0b19SCristian Ciocaltea }
55*de6e0b19SCristian Ciocaltea 
56*de6e0b19SCristian Ciocaltea static void owl_emac_reg_clear(struct owl_emac_priv *priv, u32 reg, u32 bits)
57*de6e0b19SCristian Ciocaltea {
58*de6e0b19SCristian Ciocaltea 	owl_emac_reg_update(priv, reg, bits, 0);
59*de6e0b19SCristian Ciocaltea }
60*de6e0b19SCristian Ciocaltea 
61*de6e0b19SCristian Ciocaltea static struct device *owl_emac_get_dev(struct owl_emac_priv *priv)
62*de6e0b19SCristian Ciocaltea {
63*de6e0b19SCristian Ciocaltea 	return priv->netdev->dev.parent;
64*de6e0b19SCristian Ciocaltea }
65*de6e0b19SCristian Ciocaltea 
66*de6e0b19SCristian Ciocaltea static void owl_emac_irq_enable(struct owl_emac_priv *priv)
67*de6e0b19SCristian Ciocaltea {
68*de6e0b19SCristian Ciocaltea 	/* Enable all interrupts except TU.
69*de6e0b19SCristian Ciocaltea 	 *
70*de6e0b19SCristian Ciocaltea 	 * Note the NIE and AIE bits shall also be set in order to actually
71*de6e0b19SCristian Ciocaltea 	 * enable the selected interrupts.
72*de6e0b19SCristian Ciocaltea 	 */
73*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7,
74*de6e0b19SCristian Ciocaltea 			   OWL_EMAC_BIT_MAC_CSR7_NIE |
75*de6e0b19SCristian Ciocaltea 			   OWL_EMAC_BIT_MAC_CSR7_AIE |
76*de6e0b19SCristian Ciocaltea 			   OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE);
77*de6e0b19SCristian Ciocaltea }
78*de6e0b19SCristian Ciocaltea 
79*de6e0b19SCristian Ciocaltea static void owl_emac_irq_disable(struct owl_emac_priv *priv)
80*de6e0b19SCristian Ciocaltea {
81*de6e0b19SCristian Ciocaltea 	/* Disable all interrupts.
82*de6e0b19SCristian Ciocaltea 	 *
83*de6e0b19SCristian Ciocaltea 	 * WARNING: Unset only the NIE and AIE bits in CSR7 to workaround an
84*de6e0b19SCristian Ciocaltea 	 * unexpected side effect (MAC hardware bug?!) where some bits in the
85*de6e0b19SCristian Ciocaltea 	 * status register (CSR5) are cleared automatically before being able
86*de6e0b19SCristian Ciocaltea 	 * to read them via owl_emac_irq_clear().
87*de6e0b19SCristian Ciocaltea 	 */
88*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR7,
89*de6e0b19SCristian Ciocaltea 			   OWL_EMAC_BIT_MAC_CSR7_ALL_NOT_TUE);
90*de6e0b19SCristian Ciocaltea }
91*de6e0b19SCristian Ciocaltea 
92*de6e0b19SCristian Ciocaltea static u32 owl_emac_irq_status(struct owl_emac_priv *priv)
93*de6e0b19SCristian Ciocaltea {
94*de6e0b19SCristian Ciocaltea 	return owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR5);
95*de6e0b19SCristian Ciocaltea }
96*de6e0b19SCristian Ciocaltea 
97*de6e0b19SCristian Ciocaltea static u32 owl_emac_irq_clear(struct owl_emac_priv *priv)
98*de6e0b19SCristian Ciocaltea {
99*de6e0b19SCristian Ciocaltea 	u32 val = owl_emac_irq_status(priv);
100*de6e0b19SCristian Ciocaltea 
101*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR5, val);
102*de6e0b19SCristian Ciocaltea 
103*de6e0b19SCristian Ciocaltea 	return val;
104*de6e0b19SCristian Ciocaltea }
105*de6e0b19SCristian Ciocaltea 
106*de6e0b19SCristian Ciocaltea static dma_addr_t owl_emac_dma_map_rx(struct owl_emac_priv *priv,
107*de6e0b19SCristian Ciocaltea 				      struct sk_buff *skb)
108*de6e0b19SCristian Ciocaltea {
109*de6e0b19SCristian Ciocaltea 	struct device *dev = owl_emac_get_dev(priv);
110*de6e0b19SCristian Ciocaltea 
111*de6e0b19SCristian Ciocaltea 	/* Buffer pointer for the RX DMA descriptor must be word aligned. */
112*de6e0b19SCristian Ciocaltea 	return dma_map_single(dev, skb_tail_pointer(skb),
113*de6e0b19SCristian Ciocaltea 			      skb_tailroom(skb), DMA_FROM_DEVICE);
114*de6e0b19SCristian Ciocaltea }
115*de6e0b19SCristian Ciocaltea 
116*de6e0b19SCristian Ciocaltea static void owl_emac_dma_unmap_rx(struct owl_emac_priv *priv,
117*de6e0b19SCristian Ciocaltea 				  struct sk_buff *skb, dma_addr_t dma_addr)
118*de6e0b19SCristian Ciocaltea {
119*de6e0b19SCristian Ciocaltea 	struct device *dev = owl_emac_get_dev(priv);
120*de6e0b19SCristian Ciocaltea 
121*de6e0b19SCristian Ciocaltea 	dma_unmap_single(dev, dma_addr, skb_tailroom(skb), DMA_FROM_DEVICE);
122*de6e0b19SCristian Ciocaltea }
123*de6e0b19SCristian Ciocaltea 
124*de6e0b19SCristian Ciocaltea static dma_addr_t owl_emac_dma_map_tx(struct owl_emac_priv *priv,
125*de6e0b19SCristian Ciocaltea 				      struct sk_buff *skb)
126*de6e0b19SCristian Ciocaltea {
127*de6e0b19SCristian Ciocaltea 	struct device *dev = owl_emac_get_dev(priv);
128*de6e0b19SCristian Ciocaltea 
129*de6e0b19SCristian Ciocaltea 	return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
130*de6e0b19SCristian Ciocaltea }
131*de6e0b19SCristian Ciocaltea 
132*de6e0b19SCristian Ciocaltea static void owl_emac_dma_unmap_tx(struct owl_emac_priv *priv,
133*de6e0b19SCristian Ciocaltea 				  struct sk_buff *skb, dma_addr_t dma_addr)
134*de6e0b19SCristian Ciocaltea {
135*de6e0b19SCristian Ciocaltea 	struct device *dev = owl_emac_get_dev(priv);
136*de6e0b19SCristian Ciocaltea 
137*de6e0b19SCristian Ciocaltea 	dma_unmap_single(dev, dma_addr, skb_headlen(skb), DMA_TO_DEVICE);
138*de6e0b19SCristian Ciocaltea }
139*de6e0b19SCristian Ciocaltea 
140*de6e0b19SCristian Ciocaltea static unsigned int owl_emac_ring_num_unused(struct owl_emac_ring *ring)
141*de6e0b19SCristian Ciocaltea {
142*de6e0b19SCristian Ciocaltea 	return CIRC_SPACE(ring->head, ring->tail, ring->size);
143*de6e0b19SCristian Ciocaltea }
144*de6e0b19SCristian Ciocaltea 
145*de6e0b19SCristian Ciocaltea static unsigned int owl_emac_ring_get_next(struct owl_emac_ring *ring,
146*de6e0b19SCristian Ciocaltea 					   unsigned int cur)
147*de6e0b19SCristian Ciocaltea {
148*de6e0b19SCristian Ciocaltea 	return (cur + 1) & (ring->size - 1);
149*de6e0b19SCristian Ciocaltea }
150*de6e0b19SCristian Ciocaltea 
151*de6e0b19SCristian Ciocaltea static void owl_emac_ring_push_head(struct owl_emac_ring *ring)
152*de6e0b19SCristian Ciocaltea {
153*de6e0b19SCristian Ciocaltea 	ring->head = owl_emac_ring_get_next(ring, ring->head);
154*de6e0b19SCristian Ciocaltea }
155*de6e0b19SCristian Ciocaltea 
156*de6e0b19SCristian Ciocaltea static void owl_emac_ring_pop_tail(struct owl_emac_ring *ring)
157*de6e0b19SCristian Ciocaltea {
158*de6e0b19SCristian Ciocaltea 	ring->tail = owl_emac_ring_get_next(ring, ring->tail);
159*de6e0b19SCristian Ciocaltea }
160*de6e0b19SCristian Ciocaltea 
161*de6e0b19SCristian Ciocaltea static struct sk_buff *owl_emac_alloc_skb(struct net_device *netdev)
162*de6e0b19SCristian Ciocaltea {
163*de6e0b19SCristian Ciocaltea 	struct sk_buff *skb;
164*de6e0b19SCristian Ciocaltea 	int offset;
165*de6e0b19SCristian Ciocaltea 
166*de6e0b19SCristian Ciocaltea 	skb = netdev_alloc_skb(netdev, OWL_EMAC_RX_FRAME_MAX_LEN +
167*de6e0b19SCristian Ciocaltea 			       OWL_EMAC_SKB_RESERVE);
168*de6e0b19SCristian Ciocaltea 	if (unlikely(!skb))
169*de6e0b19SCristian Ciocaltea 		return NULL;
170*de6e0b19SCristian Ciocaltea 
171*de6e0b19SCristian Ciocaltea 	/* Ensure 4 bytes DMA alignment. */
172*de6e0b19SCristian Ciocaltea 	offset = ((uintptr_t)skb->data) & (OWL_EMAC_SKB_ALIGN - 1);
173*de6e0b19SCristian Ciocaltea 	if (unlikely(offset))
174*de6e0b19SCristian Ciocaltea 		skb_reserve(skb, OWL_EMAC_SKB_ALIGN - offset);
175*de6e0b19SCristian Ciocaltea 
176*de6e0b19SCristian Ciocaltea 	return skb;
177*de6e0b19SCristian Ciocaltea }
178*de6e0b19SCristian Ciocaltea 
179*de6e0b19SCristian Ciocaltea static int owl_emac_ring_prepare_rx(struct owl_emac_priv *priv)
180*de6e0b19SCristian Ciocaltea {
181*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring *ring = &priv->rx_ring;
182*de6e0b19SCristian Ciocaltea 	struct device *dev = owl_emac_get_dev(priv);
183*de6e0b19SCristian Ciocaltea 	struct net_device *netdev = priv->netdev;
184*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring_desc *desc;
185*de6e0b19SCristian Ciocaltea 	struct sk_buff *skb;
186*de6e0b19SCristian Ciocaltea 	dma_addr_t dma_addr;
187*de6e0b19SCristian Ciocaltea 	int i;
188*de6e0b19SCristian Ciocaltea 
189*de6e0b19SCristian Ciocaltea 	for (i = 0; i < ring->size; i++) {
190*de6e0b19SCristian Ciocaltea 		skb = owl_emac_alloc_skb(netdev);
191*de6e0b19SCristian Ciocaltea 		if (!skb)
192*de6e0b19SCristian Ciocaltea 			return -ENOMEM;
193*de6e0b19SCristian Ciocaltea 
194*de6e0b19SCristian Ciocaltea 		dma_addr = owl_emac_dma_map_rx(priv, skb);
195*de6e0b19SCristian Ciocaltea 		if (dma_mapping_error(dev, dma_addr)) {
196*de6e0b19SCristian Ciocaltea 			dev_kfree_skb(skb);
197*de6e0b19SCristian Ciocaltea 			return -ENOMEM;
198*de6e0b19SCristian Ciocaltea 		}
199*de6e0b19SCristian Ciocaltea 
200*de6e0b19SCristian Ciocaltea 		desc = &ring->descs[i];
201*de6e0b19SCristian Ciocaltea 		desc->status = OWL_EMAC_BIT_RDES0_OWN;
202*de6e0b19SCristian Ciocaltea 		desc->control = skb_tailroom(skb) & OWL_EMAC_MSK_RDES1_RBS1;
203*de6e0b19SCristian Ciocaltea 		desc->buf_addr = dma_addr;
204*de6e0b19SCristian Ciocaltea 		desc->reserved = 0;
205*de6e0b19SCristian Ciocaltea 
206*de6e0b19SCristian Ciocaltea 		ring->skbs[i] = skb;
207*de6e0b19SCristian Ciocaltea 		ring->skbs_dma[i] = dma_addr;
208*de6e0b19SCristian Ciocaltea 	}
209*de6e0b19SCristian Ciocaltea 
210*de6e0b19SCristian Ciocaltea 	desc->control |= OWL_EMAC_BIT_RDES1_RER;
211*de6e0b19SCristian Ciocaltea 
212*de6e0b19SCristian Ciocaltea 	ring->head = 0;
213*de6e0b19SCristian Ciocaltea 	ring->tail = 0;
214*de6e0b19SCristian Ciocaltea 
215*de6e0b19SCristian Ciocaltea 	return 0;
216*de6e0b19SCristian Ciocaltea }
217*de6e0b19SCristian Ciocaltea 
218*de6e0b19SCristian Ciocaltea static void owl_emac_ring_prepare_tx(struct owl_emac_priv *priv)
219*de6e0b19SCristian Ciocaltea {
220*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring *ring = &priv->tx_ring;
221*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring_desc *desc;
222*de6e0b19SCristian Ciocaltea 	int i;
223*de6e0b19SCristian Ciocaltea 
224*de6e0b19SCristian Ciocaltea 	for (i = 0; i < ring->size; i++) {
225*de6e0b19SCristian Ciocaltea 		desc = &ring->descs[i];
226*de6e0b19SCristian Ciocaltea 
227*de6e0b19SCristian Ciocaltea 		desc->status = 0;
228*de6e0b19SCristian Ciocaltea 		desc->control = OWL_EMAC_BIT_TDES1_IC;
229*de6e0b19SCristian Ciocaltea 		desc->buf_addr = 0;
230*de6e0b19SCristian Ciocaltea 		desc->reserved = 0;
231*de6e0b19SCristian Ciocaltea 	}
232*de6e0b19SCristian Ciocaltea 
233*de6e0b19SCristian Ciocaltea 	desc->control |= OWL_EMAC_BIT_TDES1_TER;
234*de6e0b19SCristian Ciocaltea 
235*de6e0b19SCristian Ciocaltea 	memset(ring->skbs_dma, 0, sizeof(dma_addr_t) * ring->size);
236*de6e0b19SCristian Ciocaltea 
237*de6e0b19SCristian Ciocaltea 	ring->head = 0;
238*de6e0b19SCristian Ciocaltea 	ring->tail = 0;
239*de6e0b19SCristian Ciocaltea }
240*de6e0b19SCristian Ciocaltea 
241*de6e0b19SCristian Ciocaltea static void owl_emac_ring_unprepare_rx(struct owl_emac_priv *priv)
242*de6e0b19SCristian Ciocaltea {
243*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring *ring = &priv->rx_ring;
244*de6e0b19SCristian Ciocaltea 	int i;
245*de6e0b19SCristian Ciocaltea 
246*de6e0b19SCristian Ciocaltea 	for (i = 0; i < ring->size; i++) {
247*de6e0b19SCristian Ciocaltea 		ring->descs[i].status = 0;
248*de6e0b19SCristian Ciocaltea 
249*de6e0b19SCristian Ciocaltea 		if (!ring->skbs_dma[i])
250*de6e0b19SCristian Ciocaltea 			continue;
251*de6e0b19SCristian Ciocaltea 
252*de6e0b19SCristian Ciocaltea 		owl_emac_dma_unmap_rx(priv, ring->skbs[i], ring->skbs_dma[i]);
253*de6e0b19SCristian Ciocaltea 		ring->skbs_dma[i] = 0;
254*de6e0b19SCristian Ciocaltea 
255*de6e0b19SCristian Ciocaltea 		dev_kfree_skb(ring->skbs[i]);
256*de6e0b19SCristian Ciocaltea 		ring->skbs[i] = NULL;
257*de6e0b19SCristian Ciocaltea 	}
258*de6e0b19SCristian Ciocaltea }
259*de6e0b19SCristian Ciocaltea 
260*de6e0b19SCristian Ciocaltea static void owl_emac_ring_unprepare_tx(struct owl_emac_priv *priv)
261*de6e0b19SCristian Ciocaltea {
262*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring *ring = &priv->tx_ring;
263*de6e0b19SCristian Ciocaltea 	int i;
264*de6e0b19SCristian Ciocaltea 
265*de6e0b19SCristian Ciocaltea 	for (i = 0; i < ring->size; i++) {
266*de6e0b19SCristian Ciocaltea 		ring->descs[i].status = 0;
267*de6e0b19SCristian Ciocaltea 
268*de6e0b19SCristian Ciocaltea 		if (!ring->skbs_dma[i])
269*de6e0b19SCristian Ciocaltea 			continue;
270*de6e0b19SCristian Ciocaltea 
271*de6e0b19SCristian Ciocaltea 		owl_emac_dma_unmap_tx(priv, ring->skbs[i], ring->skbs_dma[i]);
272*de6e0b19SCristian Ciocaltea 		ring->skbs_dma[i] = 0;
273*de6e0b19SCristian Ciocaltea 
274*de6e0b19SCristian Ciocaltea 		dev_kfree_skb(ring->skbs[i]);
275*de6e0b19SCristian Ciocaltea 		ring->skbs[i] = NULL;
276*de6e0b19SCristian Ciocaltea 	}
277*de6e0b19SCristian Ciocaltea }
278*de6e0b19SCristian Ciocaltea 
279*de6e0b19SCristian Ciocaltea static int owl_emac_ring_alloc(struct device *dev, struct owl_emac_ring *ring,
280*de6e0b19SCristian Ciocaltea 			       unsigned int size)
281*de6e0b19SCristian Ciocaltea {
282*de6e0b19SCristian Ciocaltea 	ring->descs = dmam_alloc_coherent(dev,
283*de6e0b19SCristian Ciocaltea 					  sizeof(struct owl_emac_ring_desc) * size,
284*de6e0b19SCristian Ciocaltea 					  &ring->descs_dma, GFP_KERNEL);
285*de6e0b19SCristian Ciocaltea 	if (!ring->descs)
286*de6e0b19SCristian Ciocaltea 		return -ENOMEM;
287*de6e0b19SCristian Ciocaltea 
288*de6e0b19SCristian Ciocaltea 	ring->skbs = devm_kcalloc(dev, size, sizeof(struct sk_buff *),
289*de6e0b19SCristian Ciocaltea 				  GFP_KERNEL);
290*de6e0b19SCristian Ciocaltea 	if (!ring->skbs)
291*de6e0b19SCristian Ciocaltea 		return -ENOMEM;
292*de6e0b19SCristian Ciocaltea 
293*de6e0b19SCristian Ciocaltea 	ring->skbs_dma = devm_kcalloc(dev, size, sizeof(dma_addr_t),
294*de6e0b19SCristian Ciocaltea 				      GFP_KERNEL);
295*de6e0b19SCristian Ciocaltea 	if (!ring->skbs_dma)
296*de6e0b19SCristian Ciocaltea 		return -ENOMEM;
297*de6e0b19SCristian Ciocaltea 
298*de6e0b19SCristian Ciocaltea 	ring->size = size;
299*de6e0b19SCristian Ciocaltea 
300*de6e0b19SCristian Ciocaltea 	return 0;
301*de6e0b19SCristian Ciocaltea }
302*de6e0b19SCristian Ciocaltea 
303*de6e0b19SCristian Ciocaltea static void owl_emac_dma_cmd_resume_rx(struct owl_emac_priv *priv)
304*de6e0b19SCristian Ciocaltea {
305*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR2,
306*de6e0b19SCristian Ciocaltea 			   OWL_EMAC_VAL_MAC_CSR2_RPD);
307*de6e0b19SCristian Ciocaltea }
308*de6e0b19SCristian Ciocaltea 
309*de6e0b19SCristian Ciocaltea static void owl_emac_dma_cmd_resume_tx(struct owl_emac_priv *priv)
310*de6e0b19SCristian Ciocaltea {
311*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR1,
312*de6e0b19SCristian Ciocaltea 			   OWL_EMAC_VAL_MAC_CSR1_TPD);
313*de6e0b19SCristian Ciocaltea }
314*de6e0b19SCristian Ciocaltea 
315*de6e0b19SCristian Ciocaltea static u32 owl_emac_dma_cmd_set_tx(struct owl_emac_priv *priv, u32 status)
316*de6e0b19SCristian Ciocaltea {
317*de6e0b19SCristian Ciocaltea 	return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
318*de6e0b19SCristian Ciocaltea 				   OWL_EMAC_BIT_MAC_CSR6_ST, status);
319*de6e0b19SCristian Ciocaltea }
320*de6e0b19SCristian Ciocaltea 
321*de6e0b19SCristian Ciocaltea static u32 owl_emac_dma_cmd_start_tx(struct owl_emac_priv *priv)
322*de6e0b19SCristian Ciocaltea {
323*de6e0b19SCristian Ciocaltea 	return owl_emac_dma_cmd_set_tx(priv, ~0);
324*de6e0b19SCristian Ciocaltea }
325*de6e0b19SCristian Ciocaltea 
326*de6e0b19SCristian Ciocaltea static u32 owl_emac_dma_cmd_set(struct owl_emac_priv *priv, u32 status)
327*de6e0b19SCristian Ciocaltea {
328*de6e0b19SCristian Ciocaltea 	return owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
329*de6e0b19SCristian Ciocaltea 				   OWL_EMAC_MSK_MAC_CSR6_STSR, status);
330*de6e0b19SCristian Ciocaltea }
331*de6e0b19SCristian Ciocaltea 
332*de6e0b19SCristian Ciocaltea static u32 owl_emac_dma_cmd_start(struct owl_emac_priv *priv)
333*de6e0b19SCristian Ciocaltea {
334*de6e0b19SCristian Ciocaltea 	return owl_emac_dma_cmd_set(priv, ~0);
335*de6e0b19SCristian Ciocaltea }
336*de6e0b19SCristian Ciocaltea 
337*de6e0b19SCristian Ciocaltea static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv)
338*de6e0b19SCristian Ciocaltea {
339*de6e0b19SCristian Ciocaltea 	return owl_emac_dma_cmd_set(priv, 0);
340*de6e0b19SCristian Ciocaltea }
341*de6e0b19SCristian Ciocaltea 
342*de6e0b19SCristian Ciocaltea static void owl_emac_set_hw_mac_addr(struct net_device *netdev)
343*de6e0b19SCristian Ciocaltea {
344*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
345*de6e0b19SCristian Ciocaltea 	u8 *mac_addr = netdev->dev_addr;
346*de6e0b19SCristian Ciocaltea 	u32 addr_high, addr_low;
347*de6e0b19SCristian Ciocaltea 
348*de6e0b19SCristian Ciocaltea 	addr_high = mac_addr[0] << 8 | mac_addr[1];
349*de6e0b19SCristian Ciocaltea 	addr_low = mac_addr[2] << 24 | mac_addr[3] << 16 |
350*de6e0b19SCristian Ciocaltea 		   mac_addr[4] << 8 | mac_addr[5];
351*de6e0b19SCristian Ciocaltea 
352*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR17, addr_high);
353*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR16, addr_low);
354*de6e0b19SCristian Ciocaltea }
355*de6e0b19SCristian Ciocaltea 
356*de6e0b19SCristian Ciocaltea static void owl_emac_update_link_state(struct owl_emac_priv *priv)
357*de6e0b19SCristian Ciocaltea {
358*de6e0b19SCristian Ciocaltea 	u32 val, status;
359*de6e0b19SCristian Ciocaltea 
360*de6e0b19SCristian Ciocaltea 	if (priv->pause) {
361*de6e0b19SCristian Ciocaltea 		val = OWL_EMAC_BIT_MAC_CSR20_FCE | OWL_EMAC_BIT_MAC_CSR20_TUE;
362*de6e0b19SCristian Ciocaltea 		val |= OWL_EMAC_BIT_MAC_CSR20_TPE | OWL_EMAC_BIT_MAC_CSR20_RPE;
363*de6e0b19SCristian Ciocaltea 		val |= OWL_EMAC_BIT_MAC_CSR20_BPE;
364*de6e0b19SCristian Ciocaltea 	} else {
365*de6e0b19SCristian Ciocaltea 		val = 0;
366*de6e0b19SCristian Ciocaltea 	}
367*de6e0b19SCristian Ciocaltea 
368*de6e0b19SCristian Ciocaltea 	/* Update flow control. */
369*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR20, val);
370*de6e0b19SCristian Ciocaltea 
371*de6e0b19SCristian Ciocaltea 	val = (priv->speed == SPEED_100) ? OWL_EMAC_VAL_MAC_CSR6_SPEED_100M :
372*de6e0b19SCristian Ciocaltea 					   OWL_EMAC_VAL_MAC_CSR6_SPEED_10M;
373*de6e0b19SCristian Ciocaltea 	val <<= OWL_EMAC_OFF_MAC_CSR6_SPEED;
374*de6e0b19SCristian Ciocaltea 
375*de6e0b19SCristian Ciocaltea 	if (priv->duplex == DUPLEX_FULL)
376*de6e0b19SCristian Ciocaltea 		val |= OWL_EMAC_BIT_MAC_CSR6_FD;
377*de6e0b19SCristian Ciocaltea 
378*de6e0b19SCristian Ciocaltea 	spin_lock_bh(&priv->lock);
379*de6e0b19SCristian Ciocaltea 
380*de6e0b19SCristian Ciocaltea 	/* Temporarily stop DMA TX & RX. */
381*de6e0b19SCristian Ciocaltea 	status = owl_emac_dma_cmd_stop(priv);
382*de6e0b19SCristian Ciocaltea 
383*de6e0b19SCristian Ciocaltea 	/* Update operation modes. */
384*de6e0b19SCristian Ciocaltea 	owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
385*de6e0b19SCristian Ciocaltea 			    OWL_EMAC_MSK_MAC_CSR6_SPEED |
386*de6e0b19SCristian Ciocaltea 			    OWL_EMAC_BIT_MAC_CSR6_FD, val);
387*de6e0b19SCristian Ciocaltea 
388*de6e0b19SCristian Ciocaltea 	/* Restore DMA TX & RX status. */
389*de6e0b19SCristian Ciocaltea 	owl_emac_dma_cmd_set(priv, status);
390*de6e0b19SCristian Ciocaltea 
391*de6e0b19SCristian Ciocaltea 	spin_unlock_bh(&priv->lock);
392*de6e0b19SCristian Ciocaltea }
393*de6e0b19SCristian Ciocaltea 
394*de6e0b19SCristian Ciocaltea static void owl_emac_adjust_link(struct net_device *netdev)
395*de6e0b19SCristian Ciocaltea {
396*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
397*de6e0b19SCristian Ciocaltea 	struct phy_device *phydev = netdev->phydev;
398*de6e0b19SCristian Ciocaltea 	bool state_changed = false;
399*de6e0b19SCristian Ciocaltea 
400*de6e0b19SCristian Ciocaltea 	if (phydev->link) {
401*de6e0b19SCristian Ciocaltea 		if (!priv->link) {
402*de6e0b19SCristian Ciocaltea 			priv->link = phydev->link;
403*de6e0b19SCristian Ciocaltea 			state_changed = true;
404*de6e0b19SCristian Ciocaltea 		}
405*de6e0b19SCristian Ciocaltea 
406*de6e0b19SCristian Ciocaltea 		if (priv->speed != phydev->speed) {
407*de6e0b19SCristian Ciocaltea 			priv->speed = phydev->speed;
408*de6e0b19SCristian Ciocaltea 			state_changed = true;
409*de6e0b19SCristian Ciocaltea 		}
410*de6e0b19SCristian Ciocaltea 
411*de6e0b19SCristian Ciocaltea 		if (priv->duplex != phydev->duplex) {
412*de6e0b19SCristian Ciocaltea 			priv->duplex = phydev->duplex;
413*de6e0b19SCristian Ciocaltea 			state_changed = true;
414*de6e0b19SCristian Ciocaltea 		}
415*de6e0b19SCristian Ciocaltea 
416*de6e0b19SCristian Ciocaltea 		if (priv->pause != phydev->pause) {
417*de6e0b19SCristian Ciocaltea 			priv->pause = phydev->pause;
418*de6e0b19SCristian Ciocaltea 			state_changed = true;
419*de6e0b19SCristian Ciocaltea 		}
420*de6e0b19SCristian Ciocaltea 	} else {
421*de6e0b19SCristian Ciocaltea 		if (priv->link) {
422*de6e0b19SCristian Ciocaltea 			priv->link = phydev->link;
423*de6e0b19SCristian Ciocaltea 			state_changed = true;
424*de6e0b19SCristian Ciocaltea 		}
425*de6e0b19SCristian Ciocaltea 	}
426*de6e0b19SCristian Ciocaltea 
427*de6e0b19SCristian Ciocaltea 	if (state_changed) {
428*de6e0b19SCristian Ciocaltea 		if (phydev->link)
429*de6e0b19SCristian Ciocaltea 			owl_emac_update_link_state(priv);
430*de6e0b19SCristian Ciocaltea 
431*de6e0b19SCristian Ciocaltea 		if (netif_msg_link(priv))
432*de6e0b19SCristian Ciocaltea 			phy_print_status(phydev);
433*de6e0b19SCristian Ciocaltea 	}
434*de6e0b19SCristian Ciocaltea }
435*de6e0b19SCristian Ciocaltea 
436*de6e0b19SCristian Ciocaltea static irqreturn_t owl_emac_handle_irq(int irq, void *data)
437*de6e0b19SCristian Ciocaltea {
438*de6e0b19SCristian Ciocaltea 	struct net_device *netdev = data;
439*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
440*de6e0b19SCristian Ciocaltea 
441*de6e0b19SCristian Ciocaltea 	if (netif_running(netdev)) {
442*de6e0b19SCristian Ciocaltea 		owl_emac_irq_disable(priv);
443*de6e0b19SCristian Ciocaltea 		napi_schedule(&priv->napi);
444*de6e0b19SCristian Ciocaltea 	}
445*de6e0b19SCristian Ciocaltea 
446*de6e0b19SCristian Ciocaltea 	return IRQ_HANDLED;
447*de6e0b19SCristian Ciocaltea }
448*de6e0b19SCristian Ciocaltea 
449*de6e0b19SCristian Ciocaltea static void owl_emac_ether_addr_push(u8 **dst, const u8 *src)
450*de6e0b19SCristian Ciocaltea {
451*de6e0b19SCristian Ciocaltea 	u32 *a = (u32 *)(*dst);
452*de6e0b19SCristian Ciocaltea 	const u16 *b = (const u16 *)src;
453*de6e0b19SCristian Ciocaltea 
454*de6e0b19SCristian Ciocaltea 	a[0] = b[0];
455*de6e0b19SCristian Ciocaltea 	a[1] = b[1];
456*de6e0b19SCristian Ciocaltea 	a[2] = b[2];
457*de6e0b19SCristian Ciocaltea 
458*de6e0b19SCristian Ciocaltea 	*dst += 12;
459*de6e0b19SCristian Ciocaltea }
460*de6e0b19SCristian Ciocaltea 
461*de6e0b19SCristian Ciocaltea static void
462*de6e0b19SCristian Ciocaltea owl_emac_setup_frame_prepare(struct owl_emac_priv *priv, struct sk_buff *skb)
463*de6e0b19SCristian Ciocaltea {
464*de6e0b19SCristian Ciocaltea 	const u8 bcast_addr[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
465*de6e0b19SCristian Ciocaltea 	const u8 *mac_addr = priv->netdev->dev_addr;
466*de6e0b19SCristian Ciocaltea 	u8 *frame;
467*de6e0b19SCristian Ciocaltea 	int i;
468*de6e0b19SCristian Ciocaltea 
469*de6e0b19SCristian Ciocaltea 	skb_put(skb, OWL_EMAC_SETUP_FRAME_LEN);
470*de6e0b19SCristian Ciocaltea 
471*de6e0b19SCristian Ciocaltea 	frame = skb->data;
472*de6e0b19SCristian Ciocaltea 	memset(frame, 0, skb->len);
473*de6e0b19SCristian Ciocaltea 
474*de6e0b19SCristian Ciocaltea 	owl_emac_ether_addr_push(&frame, mac_addr);
475*de6e0b19SCristian Ciocaltea 	owl_emac_ether_addr_push(&frame, bcast_addr);
476*de6e0b19SCristian Ciocaltea 
477*de6e0b19SCristian Ciocaltea 	/* Fill multicast addresses. */
478*de6e0b19SCristian Ciocaltea 	WARN_ON(priv->mcaddr_list.count >= OWL_EMAC_MAX_MULTICAST_ADDRS);
479*de6e0b19SCristian Ciocaltea 	for (i = 0; i < priv->mcaddr_list.count; i++) {
480*de6e0b19SCristian Ciocaltea 		mac_addr = priv->mcaddr_list.addrs[i];
481*de6e0b19SCristian Ciocaltea 		owl_emac_ether_addr_push(&frame, mac_addr);
482*de6e0b19SCristian Ciocaltea 	}
483*de6e0b19SCristian Ciocaltea }
484*de6e0b19SCristian Ciocaltea 
485*de6e0b19SCristian Ciocaltea /* The setup frame is a special descriptor which is used to provide physical
486*de6e0b19SCristian Ciocaltea  * addresses (i.e. mac, broadcast and multicast) to the MAC hardware for
487*de6e0b19SCristian Ciocaltea  * filtering purposes. To be recognized as a setup frame, the TDES1_SET bit
488*de6e0b19SCristian Ciocaltea  * must be set in the TX descriptor control field.
489*de6e0b19SCristian Ciocaltea  */
490*de6e0b19SCristian Ciocaltea static int owl_emac_setup_frame_xmit(struct owl_emac_priv *priv)
491*de6e0b19SCristian Ciocaltea {
492*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring *ring = &priv->tx_ring;
493*de6e0b19SCristian Ciocaltea 	struct net_device *netdev = priv->netdev;
494*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring_desc *desc;
495*de6e0b19SCristian Ciocaltea 	struct sk_buff *skb;
496*de6e0b19SCristian Ciocaltea 	unsigned int tx_head;
497*de6e0b19SCristian Ciocaltea 	u32 status, control;
498*de6e0b19SCristian Ciocaltea 	dma_addr_t dma_addr;
499*de6e0b19SCristian Ciocaltea 	int ret;
500*de6e0b19SCristian Ciocaltea 
501*de6e0b19SCristian Ciocaltea 	skb = owl_emac_alloc_skb(netdev);
502*de6e0b19SCristian Ciocaltea 	if (!skb)
503*de6e0b19SCristian Ciocaltea 		return -ENOMEM;
504*de6e0b19SCristian Ciocaltea 
505*de6e0b19SCristian Ciocaltea 	owl_emac_setup_frame_prepare(priv, skb);
506*de6e0b19SCristian Ciocaltea 
507*de6e0b19SCristian Ciocaltea 	dma_addr = owl_emac_dma_map_tx(priv, skb);
508*de6e0b19SCristian Ciocaltea 	if (dma_mapping_error(owl_emac_get_dev(priv), dma_addr)) {
509*de6e0b19SCristian Ciocaltea 		ret = -ENOMEM;
510*de6e0b19SCristian Ciocaltea 		goto err_free_skb;
511*de6e0b19SCristian Ciocaltea 	}
512*de6e0b19SCristian Ciocaltea 
513*de6e0b19SCristian Ciocaltea 	spin_lock_bh(&priv->lock);
514*de6e0b19SCristian Ciocaltea 
515*de6e0b19SCristian Ciocaltea 	tx_head = ring->head;
516*de6e0b19SCristian Ciocaltea 	desc = &ring->descs[tx_head];
517*de6e0b19SCristian Ciocaltea 
518*de6e0b19SCristian Ciocaltea 	status = READ_ONCE(desc->status);
519*de6e0b19SCristian Ciocaltea 	control = READ_ONCE(desc->control);
520*de6e0b19SCristian Ciocaltea 	dma_rmb(); /* Ensure data has been read before used. */
521*de6e0b19SCristian Ciocaltea 
522*de6e0b19SCristian Ciocaltea 	if (unlikely(status & OWL_EMAC_BIT_TDES0_OWN) ||
523*de6e0b19SCristian Ciocaltea 	    !owl_emac_ring_num_unused(ring)) {
524*de6e0b19SCristian Ciocaltea 		spin_unlock_bh(&priv->lock);
525*de6e0b19SCristian Ciocaltea 		owl_emac_dma_unmap_tx(priv, skb, dma_addr);
526*de6e0b19SCristian Ciocaltea 		ret = -EBUSY;
527*de6e0b19SCristian Ciocaltea 		goto err_free_skb;
528*de6e0b19SCristian Ciocaltea 	}
529*de6e0b19SCristian Ciocaltea 
530*de6e0b19SCristian Ciocaltea 	ring->skbs[tx_head] = skb;
531*de6e0b19SCristian Ciocaltea 	ring->skbs_dma[tx_head] = dma_addr;
532*de6e0b19SCristian Ciocaltea 
533*de6e0b19SCristian Ciocaltea 	control &= OWL_EMAC_BIT_TDES1_IC | OWL_EMAC_BIT_TDES1_TER; /* Maintain bits */
534*de6e0b19SCristian Ciocaltea 	control |= OWL_EMAC_BIT_TDES1_SET;
535*de6e0b19SCristian Ciocaltea 	control |= OWL_EMAC_MSK_TDES1_TBS1 & skb->len;
536*de6e0b19SCristian Ciocaltea 
537*de6e0b19SCristian Ciocaltea 	WRITE_ONCE(desc->control, control);
538*de6e0b19SCristian Ciocaltea 	WRITE_ONCE(desc->buf_addr, dma_addr);
539*de6e0b19SCristian Ciocaltea 	dma_wmb(); /* Flush descriptor before changing ownership. */
540*de6e0b19SCristian Ciocaltea 	WRITE_ONCE(desc->status, OWL_EMAC_BIT_TDES0_OWN);
541*de6e0b19SCristian Ciocaltea 
542*de6e0b19SCristian Ciocaltea 	owl_emac_ring_push_head(ring);
543*de6e0b19SCristian Ciocaltea 
544*de6e0b19SCristian Ciocaltea 	/* Temporarily enable DMA TX. */
545*de6e0b19SCristian Ciocaltea 	status = owl_emac_dma_cmd_start_tx(priv);
546*de6e0b19SCristian Ciocaltea 
547*de6e0b19SCristian Ciocaltea 	/* Trigger setup frame processing. */
548*de6e0b19SCristian Ciocaltea 	owl_emac_dma_cmd_resume_tx(priv);
549*de6e0b19SCristian Ciocaltea 
550*de6e0b19SCristian Ciocaltea 	/* Restore DMA TX status. */
551*de6e0b19SCristian Ciocaltea 	owl_emac_dma_cmd_set_tx(priv, status);
552*de6e0b19SCristian Ciocaltea 
553*de6e0b19SCristian Ciocaltea 	/* Stop regular TX until setup frame is processed. */
554*de6e0b19SCristian Ciocaltea 	netif_stop_queue(netdev);
555*de6e0b19SCristian Ciocaltea 
556*de6e0b19SCristian Ciocaltea 	spin_unlock_bh(&priv->lock);
557*de6e0b19SCristian Ciocaltea 
558*de6e0b19SCristian Ciocaltea 	return 0;
559*de6e0b19SCristian Ciocaltea 
560*de6e0b19SCristian Ciocaltea err_free_skb:
561*de6e0b19SCristian Ciocaltea 	dev_kfree_skb(skb);
562*de6e0b19SCristian Ciocaltea 	return ret;
563*de6e0b19SCristian Ciocaltea }
564*de6e0b19SCristian Ciocaltea 
565*de6e0b19SCristian Ciocaltea static netdev_tx_t owl_emac_ndo_start_xmit(struct sk_buff *skb,
566*de6e0b19SCristian Ciocaltea 					   struct net_device *netdev)
567*de6e0b19SCristian Ciocaltea {
568*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
569*de6e0b19SCristian Ciocaltea 	struct device *dev = owl_emac_get_dev(priv);
570*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring *ring = &priv->tx_ring;
571*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring_desc *desc;
572*de6e0b19SCristian Ciocaltea 	unsigned int tx_head;
573*de6e0b19SCristian Ciocaltea 	u32 status, control;
574*de6e0b19SCristian Ciocaltea 	dma_addr_t dma_addr;
575*de6e0b19SCristian Ciocaltea 
576*de6e0b19SCristian Ciocaltea 	dma_addr = owl_emac_dma_map_tx(priv, skb);
577*de6e0b19SCristian Ciocaltea 	if (dma_mapping_error(dev, dma_addr)) {
578*de6e0b19SCristian Ciocaltea 		dev_err_ratelimited(&netdev->dev, "TX DMA mapping failed\n");
579*de6e0b19SCristian Ciocaltea 		dev_kfree_skb(skb);
580*de6e0b19SCristian Ciocaltea 		netdev->stats.tx_dropped++;
581*de6e0b19SCristian Ciocaltea 		return NETDEV_TX_OK;
582*de6e0b19SCristian Ciocaltea 	}
583*de6e0b19SCristian Ciocaltea 
584*de6e0b19SCristian Ciocaltea 	spin_lock_bh(&priv->lock);
585*de6e0b19SCristian Ciocaltea 
586*de6e0b19SCristian Ciocaltea 	tx_head = ring->head;
587*de6e0b19SCristian Ciocaltea 	desc = &ring->descs[tx_head];
588*de6e0b19SCristian Ciocaltea 
589*de6e0b19SCristian Ciocaltea 	status = READ_ONCE(desc->status);
590*de6e0b19SCristian Ciocaltea 	control = READ_ONCE(desc->control);
591*de6e0b19SCristian Ciocaltea 	dma_rmb(); /* Ensure data has been read before used. */
592*de6e0b19SCristian Ciocaltea 
593*de6e0b19SCristian Ciocaltea 	if (!owl_emac_ring_num_unused(ring) ||
594*de6e0b19SCristian Ciocaltea 	    unlikely(status & OWL_EMAC_BIT_TDES0_OWN)) {
595*de6e0b19SCristian Ciocaltea 		netif_stop_queue(netdev);
596*de6e0b19SCristian Ciocaltea 		spin_unlock_bh(&priv->lock);
597*de6e0b19SCristian Ciocaltea 
598*de6e0b19SCristian Ciocaltea 		dev_dbg_ratelimited(&netdev->dev, "TX buffer full, status=0x%08x\n",
599*de6e0b19SCristian Ciocaltea 				    owl_emac_irq_status(priv));
600*de6e0b19SCristian Ciocaltea 		owl_emac_dma_unmap_tx(priv, skb, dma_addr);
601*de6e0b19SCristian Ciocaltea 		netdev->stats.tx_dropped++;
602*de6e0b19SCristian Ciocaltea 		return NETDEV_TX_BUSY;
603*de6e0b19SCristian Ciocaltea 	}
604*de6e0b19SCristian Ciocaltea 
605*de6e0b19SCristian Ciocaltea 	ring->skbs[tx_head] = skb;
606*de6e0b19SCristian Ciocaltea 	ring->skbs_dma[tx_head] = dma_addr;
607*de6e0b19SCristian Ciocaltea 
608*de6e0b19SCristian Ciocaltea 	control &= OWL_EMAC_BIT_TDES1_IC | OWL_EMAC_BIT_TDES1_TER; /* Maintain bits */
609*de6e0b19SCristian Ciocaltea 	control |= OWL_EMAC_BIT_TDES1_FS | OWL_EMAC_BIT_TDES1_LS;
610*de6e0b19SCristian Ciocaltea 	control |= OWL_EMAC_MSK_TDES1_TBS1 & skb->len;
611*de6e0b19SCristian Ciocaltea 
612*de6e0b19SCristian Ciocaltea 	WRITE_ONCE(desc->control, control);
613*de6e0b19SCristian Ciocaltea 	WRITE_ONCE(desc->buf_addr, dma_addr);
614*de6e0b19SCristian Ciocaltea 	dma_wmb(); /* Flush descriptor before changing ownership. */
615*de6e0b19SCristian Ciocaltea 	WRITE_ONCE(desc->status, OWL_EMAC_BIT_TDES0_OWN);
616*de6e0b19SCristian Ciocaltea 
617*de6e0b19SCristian Ciocaltea 	owl_emac_dma_cmd_resume_tx(priv);
618*de6e0b19SCristian Ciocaltea 	owl_emac_ring_push_head(ring);
619*de6e0b19SCristian Ciocaltea 
620*de6e0b19SCristian Ciocaltea 	/* FIXME: The transmission is currently restricted to a single frame
621*de6e0b19SCristian Ciocaltea 	 * at a time as a workaround for a MAC hardware bug that causes random
622*de6e0b19SCristian Ciocaltea 	 * freeze of the TX queue processor.
623*de6e0b19SCristian Ciocaltea 	 */
624*de6e0b19SCristian Ciocaltea 	netif_stop_queue(netdev);
625*de6e0b19SCristian Ciocaltea 
626*de6e0b19SCristian Ciocaltea 	spin_unlock_bh(&priv->lock);
627*de6e0b19SCristian Ciocaltea 
628*de6e0b19SCristian Ciocaltea 	return NETDEV_TX_OK;
629*de6e0b19SCristian Ciocaltea }
630*de6e0b19SCristian Ciocaltea 
631*de6e0b19SCristian Ciocaltea static bool owl_emac_tx_complete_tail(struct owl_emac_priv *priv)
632*de6e0b19SCristian Ciocaltea {
633*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring *ring = &priv->tx_ring;
634*de6e0b19SCristian Ciocaltea 	struct net_device *netdev = priv->netdev;
635*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring_desc *desc;
636*de6e0b19SCristian Ciocaltea 	struct sk_buff *skb;
637*de6e0b19SCristian Ciocaltea 	unsigned int tx_tail;
638*de6e0b19SCristian Ciocaltea 	u32 status;
639*de6e0b19SCristian Ciocaltea 
640*de6e0b19SCristian Ciocaltea 	tx_tail = ring->tail;
641*de6e0b19SCristian Ciocaltea 	desc = &ring->descs[tx_tail];
642*de6e0b19SCristian Ciocaltea 
643*de6e0b19SCristian Ciocaltea 	status = READ_ONCE(desc->status);
644*de6e0b19SCristian Ciocaltea 	dma_rmb(); /* Ensure data has been read before used. */
645*de6e0b19SCristian Ciocaltea 
646*de6e0b19SCristian Ciocaltea 	if (status & OWL_EMAC_BIT_TDES0_OWN)
647*de6e0b19SCristian Ciocaltea 		return false;
648*de6e0b19SCristian Ciocaltea 
649*de6e0b19SCristian Ciocaltea 	/* Check for errors. */
650*de6e0b19SCristian Ciocaltea 	if (status & OWL_EMAC_BIT_TDES0_ES) {
651*de6e0b19SCristian Ciocaltea 		dev_dbg_ratelimited(&netdev->dev,
652*de6e0b19SCristian Ciocaltea 				    "TX complete error status: 0x%08x\n",
653*de6e0b19SCristian Ciocaltea 				    status);
654*de6e0b19SCristian Ciocaltea 
655*de6e0b19SCristian Ciocaltea 		netdev->stats.tx_errors++;
656*de6e0b19SCristian Ciocaltea 
657*de6e0b19SCristian Ciocaltea 		if (status & OWL_EMAC_BIT_TDES0_UF)
658*de6e0b19SCristian Ciocaltea 			netdev->stats.tx_fifo_errors++;
659*de6e0b19SCristian Ciocaltea 
660*de6e0b19SCristian Ciocaltea 		if (status & OWL_EMAC_BIT_TDES0_EC)
661*de6e0b19SCristian Ciocaltea 			netdev->stats.tx_aborted_errors++;
662*de6e0b19SCristian Ciocaltea 
663*de6e0b19SCristian Ciocaltea 		if (status & OWL_EMAC_BIT_TDES0_LC)
664*de6e0b19SCristian Ciocaltea 			netdev->stats.tx_window_errors++;
665*de6e0b19SCristian Ciocaltea 
666*de6e0b19SCristian Ciocaltea 		if (status & OWL_EMAC_BIT_TDES0_NC)
667*de6e0b19SCristian Ciocaltea 			netdev->stats.tx_heartbeat_errors++;
668*de6e0b19SCristian Ciocaltea 
669*de6e0b19SCristian Ciocaltea 		if (status & OWL_EMAC_BIT_TDES0_LO)
670*de6e0b19SCristian Ciocaltea 			netdev->stats.tx_carrier_errors++;
671*de6e0b19SCristian Ciocaltea 	} else {
672*de6e0b19SCristian Ciocaltea 		netdev->stats.tx_packets++;
673*de6e0b19SCristian Ciocaltea 		netdev->stats.tx_bytes += ring->skbs[tx_tail]->len;
674*de6e0b19SCristian Ciocaltea 	}
675*de6e0b19SCristian Ciocaltea 
676*de6e0b19SCristian Ciocaltea 	/* Some collisions occurred, but pkt has been transmitted. */
677*de6e0b19SCristian Ciocaltea 	if (status & OWL_EMAC_BIT_TDES0_DE)
678*de6e0b19SCristian Ciocaltea 		netdev->stats.collisions++;
679*de6e0b19SCristian Ciocaltea 
680*de6e0b19SCristian Ciocaltea 	skb = ring->skbs[tx_tail];
681*de6e0b19SCristian Ciocaltea 	owl_emac_dma_unmap_tx(priv, skb, ring->skbs_dma[tx_tail]);
682*de6e0b19SCristian Ciocaltea 	dev_kfree_skb(skb);
683*de6e0b19SCristian Ciocaltea 
684*de6e0b19SCristian Ciocaltea 	ring->skbs[tx_tail] = NULL;
685*de6e0b19SCristian Ciocaltea 	ring->skbs_dma[tx_tail] = 0;
686*de6e0b19SCristian Ciocaltea 
687*de6e0b19SCristian Ciocaltea 	owl_emac_ring_pop_tail(ring);
688*de6e0b19SCristian Ciocaltea 
689*de6e0b19SCristian Ciocaltea 	if (unlikely(netif_queue_stopped(netdev)))
690*de6e0b19SCristian Ciocaltea 		netif_wake_queue(netdev);
691*de6e0b19SCristian Ciocaltea 
692*de6e0b19SCristian Ciocaltea 	return true;
693*de6e0b19SCristian Ciocaltea }
694*de6e0b19SCristian Ciocaltea 
695*de6e0b19SCristian Ciocaltea static void owl_emac_tx_complete(struct owl_emac_priv *priv)
696*de6e0b19SCristian Ciocaltea {
697*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring *ring = &priv->tx_ring;
698*de6e0b19SCristian Ciocaltea 	struct net_device *netdev = priv->netdev;
699*de6e0b19SCristian Ciocaltea 	unsigned int tx_next;
700*de6e0b19SCristian Ciocaltea 	u32 status;
701*de6e0b19SCristian Ciocaltea 
702*de6e0b19SCristian Ciocaltea 	spin_lock(&priv->lock);
703*de6e0b19SCristian Ciocaltea 
704*de6e0b19SCristian Ciocaltea 	while (ring->tail != ring->head) {
705*de6e0b19SCristian Ciocaltea 		if (!owl_emac_tx_complete_tail(priv))
706*de6e0b19SCristian Ciocaltea 			break;
707*de6e0b19SCristian Ciocaltea 	}
708*de6e0b19SCristian Ciocaltea 
709*de6e0b19SCristian Ciocaltea 	/* FIXME: This is a workaround for a MAC hardware bug not clearing
710*de6e0b19SCristian Ciocaltea 	 * (sometimes) the OWN bit for a transmitted frame descriptor.
711*de6e0b19SCristian Ciocaltea 	 *
712*de6e0b19SCristian Ciocaltea 	 * At this point, when TX queue is full, the tail descriptor has the
713*de6e0b19SCristian Ciocaltea 	 * OWN bit set, which normally means the frame has not been processed
714*de6e0b19SCristian Ciocaltea 	 * or transmitted yet. But if there is at least one descriptor in the
715*de6e0b19SCristian Ciocaltea 	 * queue having the OWN bit cleared, we can safely assume the tail
716*de6e0b19SCristian Ciocaltea 	 * frame has been also processed by the MAC hardware.
717*de6e0b19SCristian Ciocaltea 	 *
718*de6e0b19SCristian Ciocaltea 	 * If that's the case, let's force the frame completion by manually
719*de6e0b19SCristian Ciocaltea 	 * clearing the OWN bit.
720*de6e0b19SCristian Ciocaltea 	 */
721*de6e0b19SCristian Ciocaltea 	if (unlikely(!owl_emac_ring_num_unused(ring))) {
722*de6e0b19SCristian Ciocaltea 		tx_next = ring->tail;
723*de6e0b19SCristian Ciocaltea 
724*de6e0b19SCristian Ciocaltea 		while ((tx_next = owl_emac_ring_get_next(ring, tx_next)) != ring->head) {
725*de6e0b19SCristian Ciocaltea 			status = READ_ONCE(ring->descs[tx_next].status);
726*de6e0b19SCristian Ciocaltea 			dma_rmb(); /* Ensure data has been read before used. */
727*de6e0b19SCristian Ciocaltea 
728*de6e0b19SCristian Ciocaltea 			if (status & OWL_EMAC_BIT_TDES0_OWN)
729*de6e0b19SCristian Ciocaltea 				continue;
730*de6e0b19SCristian Ciocaltea 
731*de6e0b19SCristian Ciocaltea 			netdev_dbg(netdev, "Found uncleared TX desc OWN bit\n");
732*de6e0b19SCristian Ciocaltea 
733*de6e0b19SCristian Ciocaltea 			status = READ_ONCE(ring->descs[ring->tail].status);
734*de6e0b19SCristian Ciocaltea 			dma_rmb(); /* Ensure data has been read before used. */
735*de6e0b19SCristian Ciocaltea 			status &= ~OWL_EMAC_BIT_TDES0_OWN;
736*de6e0b19SCristian Ciocaltea 			WRITE_ONCE(ring->descs[ring->tail].status, status);
737*de6e0b19SCristian Ciocaltea 
738*de6e0b19SCristian Ciocaltea 			owl_emac_tx_complete_tail(priv);
739*de6e0b19SCristian Ciocaltea 			break;
740*de6e0b19SCristian Ciocaltea 		}
741*de6e0b19SCristian Ciocaltea 	}
742*de6e0b19SCristian Ciocaltea 
743*de6e0b19SCristian Ciocaltea 	spin_unlock(&priv->lock);
744*de6e0b19SCristian Ciocaltea }
745*de6e0b19SCristian Ciocaltea 
746*de6e0b19SCristian Ciocaltea static int owl_emac_rx_process(struct owl_emac_priv *priv, int budget)
747*de6e0b19SCristian Ciocaltea {
748*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring *ring = &priv->rx_ring;
749*de6e0b19SCristian Ciocaltea 	struct device *dev = owl_emac_get_dev(priv);
750*de6e0b19SCristian Ciocaltea 	struct net_device *netdev = priv->netdev;
751*de6e0b19SCristian Ciocaltea 	struct owl_emac_ring_desc *desc;
752*de6e0b19SCristian Ciocaltea 	struct sk_buff *curr_skb, *new_skb;
753*de6e0b19SCristian Ciocaltea 	dma_addr_t curr_dma, new_dma;
754*de6e0b19SCristian Ciocaltea 	unsigned int rx_tail, len;
755*de6e0b19SCristian Ciocaltea 	u32 status;
756*de6e0b19SCristian Ciocaltea 	int recv = 0;
757*de6e0b19SCristian Ciocaltea 
758*de6e0b19SCristian Ciocaltea 	while (recv < budget) {
759*de6e0b19SCristian Ciocaltea 		spin_lock(&priv->lock);
760*de6e0b19SCristian Ciocaltea 
761*de6e0b19SCristian Ciocaltea 		rx_tail = ring->tail;
762*de6e0b19SCristian Ciocaltea 		desc = &ring->descs[rx_tail];
763*de6e0b19SCristian Ciocaltea 
764*de6e0b19SCristian Ciocaltea 		status = READ_ONCE(desc->status);
765*de6e0b19SCristian Ciocaltea 		dma_rmb(); /* Ensure data has been read before used. */
766*de6e0b19SCristian Ciocaltea 
767*de6e0b19SCristian Ciocaltea 		if (status & OWL_EMAC_BIT_RDES0_OWN) {
768*de6e0b19SCristian Ciocaltea 			spin_unlock(&priv->lock);
769*de6e0b19SCristian Ciocaltea 			break;
770*de6e0b19SCristian Ciocaltea 		}
771*de6e0b19SCristian Ciocaltea 
772*de6e0b19SCristian Ciocaltea 		curr_skb = ring->skbs[rx_tail];
773*de6e0b19SCristian Ciocaltea 		curr_dma = ring->skbs_dma[rx_tail];
774*de6e0b19SCristian Ciocaltea 		owl_emac_ring_pop_tail(ring);
775*de6e0b19SCristian Ciocaltea 
776*de6e0b19SCristian Ciocaltea 		spin_unlock(&priv->lock);
777*de6e0b19SCristian Ciocaltea 
778*de6e0b19SCristian Ciocaltea 		if (status & (OWL_EMAC_BIT_RDES0_DE | OWL_EMAC_BIT_RDES0_RF |
779*de6e0b19SCristian Ciocaltea 		    OWL_EMAC_BIT_RDES0_TL | OWL_EMAC_BIT_RDES0_CS |
780*de6e0b19SCristian Ciocaltea 		    OWL_EMAC_BIT_RDES0_DB | OWL_EMAC_BIT_RDES0_CE |
781*de6e0b19SCristian Ciocaltea 		    OWL_EMAC_BIT_RDES0_ZERO)) {
782*de6e0b19SCristian Ciocaltea 			dev_dbg_ratelimited(&netdev->dev,
783*de6e0b19SCristian Ciocaltea 					    "RX desc error status: 0x%08x\n",
784*de6e0b19SCristian Ciocaltea 					    status);
785*de6e0b19SCristian Ciocaltea 
786*de6e0b19SCristian Ciocaltea 			if (status & OWL_EMAC_BIT_RDES0_DE)
787*de6e0b19SCristian Ciocaltea 				netdev->stats.rx_over_errors++;
788*de6e0b19SCristian Ciocaltea 
789*de6e0b19SCristian Ciocaltea 			if (status & (OWL_EMAC_BIT_RDES0_RF | OWL_EMAC_BIT_RDES0_DB))
790*de6e0b19SCristian Ciocaltea 				netdev->stats.rx_frame_errors++;
791*de6e0b19SCristian Ciocaltea 
792*de6e0b19SCristian Ciocaltea 			if (status & OWL_EMAC_BIT_RDES0_TL)
793*de6e0b19SCristian Ciocaltea 				netdev->stats.rx_length_errors++;
794*de6e0b19SCristian Ciocaltea 
795*de6e0b19SCristian Ciocaltea 			if (status & OWL_EMAC_BIT_RDES0_CS)
796*de6e0b19SCristian Ciocaltea 				netdev->stats.collisions++;
797*de6e0b19SCristian Ciocaltea 
798*de6e0b19SCristian Ciocaltea 			if (status & OWL_EMAC_BIT_RDES0_CE)
799*de6e0b19SCristian Ciocaltea 				netdev->stats.rx_crc_errors++;
800*de6e0b19SCristian Ciocaltea 
801*de6e0b19SCristian Ciocaltea 			if (status & OWL_EMAC_BIT_RDES0_ZERO)
802*de6e0b19SCristian Ciocaltea 				netdev->stats.rx_fifo_errors++;
803*de6e0b19SCristian Ciocaltea 
804*de6e0b19SCristian Ciocaltea 			goto drop_skb;
805*de6e0b19SCristian Ciocaltea 		}
806*de6e0b19SCristian Ciocaltea 
807*de6e0b19SCristian Ciocaltea 		len = (status & OWL_EMAC_MSK_RDES0_FL) >> OWL_EMAC_OFF_RDES0_FL;
808*de6e0b19SCristian Ciocaltea 		if (unlikely(len > OWL_EMAC_RX_FRAME_MAX_LEN)) {
809*de6e0b19SCristian Ciocaltea 			netdev->stats.rx_length_errors++;
810*de6e0b19SCristian Ciocaltea 			netdev_err(netdev, "invalid RX frame len: %u\n", len);
811*de6e0b19SCristian Ciocaltea 			goto drop_skb;
812*de6e0b19SCristian Ciocaltea 		}
813*de6e0b19SCristian Ciocaltea 
814*de6e0b19SCristian Ciocaltea 		/* Prepare new skb before receiving the current one. */
815*de6e0b19SCristian Ciocaltea 		new_skb = owl_emac_alloc_skb(netdev);
816*de6e0b19SCristian Ciocaltea 		if (unlikely(!new_skb))
817*de6e0b19SCristian Ciocaltea 			goto drop_skb;
818*de6e0b19SCristian Ciocaltea 
819*de6e0b19SCristian Ciocaltea 		new_dma = owl_emac_dma_map_rx(priv, new_skb);
820*de6e0b19SCristian Ciocaltea 		if (dma_mapping_error(dev, new_dma)) {
821*de6e0b19SCristian Ciocaltea 			dev_kfree_skb(new_skb);
822*de6e0b19SCristian Ciocaltea 			netdev_err(netdev, "RX DMA mapping failed\n");
823*de6e0b19SCristian Ciocaltea 			goto drop_skb;
824*de6e0b19SCristian Ciocaltea 		}
825*de6e0b19SCristian Ciocaltea 
826*de6e0b19SCristian Ciocaltea 		owl_emac_dma_unmap_rx(priv, curr_skb, curr_dma);
827*de6e0b19SCristian Ciocaltea 
828*de6e0b19SCristian Ciocaltea 		skb_put(curr_skb, len - ETH_FCS_LEN);
829*de6e0b19SCristian Ciocaltea 		curr_skb->ip_summed = CHECKSUM_NONE;
830*de6e0b19SCristian Ciocaltea 		curr_skb->protocol = eth_type_trans(curr_skb, netdev);
831*de6e0b19SCristian Ciocaltea 		curr_skb->dev = netdev;
832*de6e0b19SCristian Ciocaltea 
833*de6e0b19SCristian Ciocaltea 		netif_receive_skb(curr_skb);
834*de6e0b19SCristian Ciocaltea 
835*de6e0b19SCristian Ciocaltea 		netdev->stats.rx_packets++;
836*de6e0b19SCristian Ciocaltea 		netdev->stats.rx_bytes += len;
837*de6e0b19SCristian Ciocaltea 		recv++;
838*de6e0b19SCristian Ciocaltea 		goto push_skb;
839*de6e0b19SCristian Ciocaltea 
840*de6e0b19SCristian Ciocaltea drop_skb:
841*de6e0b19SCristian Ciocaltea 		netdev->stats.rx_dropped++;
842*de6e0b19SCristian Ciocaltea 		netdev->stats.rx_errors++;
843*de6e0b19SCristian Ciocaltea 		/* Reuse the current skb. */
844*de6e0b19SCristian Ciocaltea 		new_skb = curr_skb;
845*de6e0b19SCristian Ciocaltea 		new_dma = curr_dma;
846*de6e0b19SCristian Ciocaltea 
847*de6e0b19SCristian Ciocaltea push_skb:
848*de6e0b19SCristian Ciocaltea 		spin_lock(&priv->lock);
849*de6e0b19SCristian Ciocaltea 
850*de6e0b19SCristian Ciocaltea 		ring->skbs[ring->head] = new_skb;
851*de6e0b19SCristian Ciocaltea 		ring->skbs_dma[ring->head] = new_dma;
852*de6e0b19SCristian Ciocaltea 
853*de6e0b19SCristian Ciocaltea 		WRITE_ONCE(desc->buf_addr, new_dma);
854*de6e0b19SCristian Ciocaltea 		dma_wmb(); /* Flush descriptor before changing ownership. */
855*de6e0b19SCristian Ciocaltea 		WRITE_ONCE(desc->status, OWL_EMAC_BIT_RDES0_OWN);
856*de6e0b19SCristian Ciocaltea 
857*de6e0b19SCristian Ciocaltea 		owl_emac_ring_push_head(ring);
858*de6e0b19SCristian Ciocaltea 
859*de6e0b19SCristian Ciocaltea 		spin_unlock(&priv->lock);
860*de6e0b19SCristian Ciocaltea 	}
861*de6e0b19SCristian Ciocaltea 
862*de6e0b19SCristian Ciocaltea 	return recv;
863*de6e0b19SCristian Ciocaltea }
864*de6e0b19SCristian Ciocaltea 
865*de6e0b19SCristian Ciocaltea static int owl_emac_poll(struct napi_struct *napi, int budget)
866*de6e0b19SCristian Ciocaltea {
867*de6e0b19SCristian Ciocaltea 	int work_done = 0, ru_cnt = 0, recv;
868*de6e0b19SCristian Ciocaltea 	static int tx_err_cnt, rx_err_cnt;
869*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv;
870*de6e0b19SCristian Ciocaltea 	u32 status, proc_status;
871*de6e0b19SCristian Ciocaltea 
872*de6e0b19SCristian Ciocaltea 	priv = container_of(napi, struct owl_emac_priv, napi);
873*de6e0b19SCristian Ciocaltea 
874*de6e0b19SCristian Ciocaltea 	while ((status = owl_emac_irq_clear(priv)) &
875*de6e0b19SCristian Ciocaltea 	       (OWL_EMAC_BIT_MAC_CSR5_NIS | OWL_EMAC_BIT_MAC_CSR5_AIS)) {
876*de6e0b19SCristian Ciocaltea 		recv = 0;
877*de6e0b19SCristian Ciocaltea 
878*de6e0b19SCristian Ciocaltea 		/* TX setup frame raises ETI instead of TI. */
879*de6e0b19SCristian Ciocaltea 		if (status & (OWL_EMAC_BIT_MAC_CSR5_TI | OWL_EMAC_BIT_MAC_CSR5_ETI)) {
880*de6e0b19SCristian Ciocaltea 			owl_emac_tx_complete(priv);
881*de6e0b19SCristian Ciocaltea 			tx_err_cnt = 0;
882*de6e0b19SCristian Ciocaltea 
883*de6e0b19SCristian Ciocaltea 			/* Count MAC internal RX errors. */
884*de6e0b19SCristian Ciocaltea 			proc_status = status & OWL_EMAC_MSK_MAC_CSR5_RS;
885*de6e0b19SCristian Ciocaltea 			proc_status >>= OWL_EMAC_OFF_MAC_CSR5_RS;
886*de6e0b19SCristian Ciocaltea 			if (proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_DATA ||
887*de6e0b19SCristian Ciocaltea 			    proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_CDES ||
888*de6e0b19SCristian Ciocaltea 			    proc_status == OWL_EMAC_VAL_MAC_CSR5_RS_FDES)
889*de6e0b19SCristian Ciocaltea 				rx_err_cnt++;
890*de6e0b19SCristian Ciocaltea 		}
891*de6e0b19SCristian Ciocaltea 
892*de6e0b19SCristian Ciocaltea 		if (status & OWL_EMAC_BIT_MAC_CSR5_RI) {
893*de6e0b19SCristian Ciocaltea 			recv = owl_emac_rx_process(priv, budget - work_done);
894*de6e0b19SCristian Ciocaltea 			rx_err_cnt = 0;
895*de6e0b19SCristian Ciocaltea 
896*de6e0b19SCristian Ciocaltea 			/* Count MAC internal TX errors. */
897*de6e0b19SCristian Ciocaltea 			proc_status = status & OWL_EMAC_MSK_MAC_CSR5_TS;
898*de6e0b19SCristian Ciocaltea 			proc_status >>= OWL_EMAC_OFF_MAC_CSR5_TS;
899*de6e0b19SCristian Ciocaltea 			if (proc_status == OWL_EMAC_VAL_MAC_CSR5_TS_DATA ||
900*de6e0b19SCristian Ciocaltea 			    proc_status == OWL_EMAC_VAL_MAC_CSR5_TS_CDES)
901*de6e0b19SCristian Ciocaltea 				tx_err_cnt++;
902*de6e0b19SCristian Ciocaltea 		} else if (status & OWL_EMAC_BIT_MAC_CSR5_RU) {
903*de6e0b19SCristian Ciocaltea 			/* MAC AHB is in suspended state, will return to RX
904*de6e0b19SCristian Ciocaltea 			 * descriptor processing when the host changes ownership
905*de6e0b19SCristian Ciocaltea 			 * of the descriptor and either an RX poll demand CMD is
906*de6e0b19SCristian Ciocaltea 			 * issued or a new frame is recognized by the MAC AHB.
907*de6e0b19SCristian Ciocaltea 			 */
908*de6e0b19SCristian Ciocaltea 			if (++ru_cnt == 2)
909*de6e0b19SCristian Ciocaltea 				owl_emac_dma_cmd_resume_rx(priv);
910*de6e0b19SCristian Ciocaltea 
911*de6e0b19SCristian Ciocaltea 			recv = owl_emac_rx_process(priv, budget - work_done);
912*de6e0b19SCristian Ciocaltea 
913*de6e0b19SCristian Ciocaltea 			/* Guard against too many RU interrupts. */
914*de6e0b19SCristian Ciocaltea 			if (ru_cnt > 3)
915*de6e0b19SCristian Ciocaltea 				break;
916*de6e0b19SCristian Ciocaltea 		}
917*de6e0b19SCristian Ciocaltea 
918*de6e0b19SCristian Ciocaltea 		work_done += recv;
919*de6e0b19SCristian Ciocaltea 		if (work_done >= budget)
920*de6e0b19SCristian Ciocaltea 			break;
921*de6e0b19SCristian Ciocaltea 	}
922*de6e0b19SCristian Ciocaltea 
923*de6e0b19SCristian Ciocaltea 	if (work_done < budget) {
924*de6e0b19SCristian Ciocaltea 		napi_complete_done(napi, work_done);
925*de6e0b19SCristian Ciocaltea 		owl_emac_irq_enable(priv);
926*de6e0b19SCristian Ciocaltea 	}
927*de6e0b19SCristian Ciocaltea 
928*de6e0b19SCristian Ciocaltea 	/* Reset MAC when getting too many internal TX or RX errors. */
929*de6e0b19SCristian Ciocaltea 	if (tx_err_cnt > 10 || rx_err_cnt > 10) {
930*de6e0b19SCristian Ciocaltea 		netdev_dbg(priv->netdev, "%s error status: 0x%08x\n",
931*de6e0b19SCristian Ciocaltea 			   tx_err_cnt > 10 ? "TX" : "RX", status);
932*de6e0b19SCristian Ciocaltea 		rx_err_cnt = 0;
933*de6e0b19SCristian Ciocaltea 		tx_err_cnt = 0;
934*de6e0b19SCristian Ciocaltea 		schedule_work(&priv->mac_reset_task);
935*de6e0b19SCristian Ciocaltea 	}
936*de6e0b19SCristian Ciocaltea 
937*de6e0b19SCristian Ciocaltea 	return work_done;
938*de6e0b19SCristian Ciocaltea }
939*de6e0b19SCristian Ciocaltea 
940*de6e0b19SCristian Ciocaltea static void owl_emac_mdio_clock_enable(struct owl_emac_priv *priv)
941*de6e0b19SCristian Ciocaltea {
942*de6e0b19SCristian Ciocaltea 	u32 val;
943*de6e0b19SCristian Ciocaltea 
944*de6e0b19SCristian Ciocaltea 	/* Enable MDC clock generation by adjusting CLKDIV according to
945*de6e0b19SCristian Ciocaltea 	 * the vendor implementation of the original driver.
946*de6e0b19SCristian Ciocaltea 	 */
947*de6e0b19SCristian Ciocaltea 	val = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10);
948*de6e0b19SCristian Ciocaltea 	val &= OWL_EMAC_MSK_MAC_CSR10_CLKDIV;
949*de6e0b19SCristian Ciocaltea 	val |= OWL_EMAC_VAL_MAC_CSR10_CLKDIV_128 << OWL_EMAC_OFF_MAC_CSR10_CLKDIV;
950*de6e0b19SCristian Ciocaltea 
951*de6e0b19SCristian Ciocaltea 	val |= OWL_EMAC_BIT_MAC_CSR10_SB;
952*de6e0b19SCristian Ciocaltea 	val |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_CDS << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
953*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, val);
954*de6e0b19SCristian Ciocaltea }
955*de6e0b19SCristian Ciocaltea 
956*de6e0b19SCristian Ciocaltea static void owl_emac_core_hw_reset(struct owl_emac_priv *priv)
957*de6e0b19SCristian Ciocaltea {
958*de6e0b19SCristian Ciocaltea 	/* Trigger hardware reset. */
959*de6e0b19SCristian Ciocaltea 	reset_control_assert(priv->reset);
960*de6e0b19SCristian Ciocaltea 	usleep_range(10, 20);
961*de6e0b19SCristian Ciocaltea 	reset_control_deassert(priv->reset);
962*de6e0b19SCristian Ciocaltea 	usleep_range(100, 200);
963*de6e0b19SCristian Ciocaltea }
964*de6e0b19SCristian Ciocaltea 
965*de6e0b19SCristian Ciocaltea static int owl_emac_core_sw_reset(struct owl_emac_priv *priv)
966*de6e0b19SCristian Ciocaltea {
967*de6e0b19SCristian Ciocaltea 	u32 val;
968*de6e0b19SCristian Ciocaltea 	int ret;
969*de6e0b19SCristian Ciocaltea 
970*de6e0b19SCristian Ciocaltea 	/* Trigger software reset. */
971*de6e0b19SCristian Ciocaltea 	owl_emac_reg_set(priv, OWL_EMAC_REG_MAC_CSR0, OWL_EMAC_BIT_MAC_CSR0_SWR);
972*de6e0b19SCristian Ciocaltea 	ret = readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR0,
973*de6e0b19SCristian Ciocaltea 				 val, !(val & OWL_EMAC_BIT_MAC_CSR0_SWR),
974*de6e0b19SCristian Ciocaltea 				 OWL_EMAC_POLL_DELAY_USEC,
975*de6e0b19SCristian Ciocaltea 				 OWL_EMAC_RESET_POLL_TIMEOUT_USEC);
976*de6e0b19SCristian Ciocaltea 	if (ret)
977*de6e0b19SCristian Ciocaltea 		return ret;
978*de6e0b19SCristian Ciocaltea 
979*de6e0b19SCristian Ciocaltea 	if (priv->phy_mode == PHY_INTERFACE_MODE_RMII) {
980*de6e0b19SCristian Ciocaltea 		/* Enable RMII and use the 50MHz rmii clk as output to PHY. */
981*de6e0b19SCristian Ciocaltea 		val = 0;
982*de6e0b19SCristian Ciocaltea 	} else {
983*de6e0b19SCristian Ciocaltea 		/* Enable SMII and use the 125MHz rmii clk as output to PHY.
984*de6e0b19SCristian Ciocaltea 		 * Additionally set SMII SYNC delay to 4 half cycle.
985*de6e0b19SCristian Ciocaltea 		 */
986*de6e0b19SCristian Ciocaltea 		val = 0x04 << OWL_EMAC_OFF_MAC_CTRL_SSDC;
987*de6e0b19SCristian Ciocaltea 		val |= OWL_EMAC_BIT_MAC_CTRL_RSIS;
988*de6e0b19SCristian Ciocaltea 	}
989*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CTRL, val);
990*de6e0b19SCristian Ciocaltea 
991*de6e0b19SCristian Ciocaltea 	/* MDC is disabled after reset. */
992*de6e0b19SCristian Ciocaltea 	owl_emac_mdio_clock_enable(priv);
993*de6e0b19SCristian Ciocaltea 
994*de6e0b19SCristian Ciocaltea 	/* Set FIFO pause & restart threshold levels. */
995*de6e0b19SCristian Ciocaltea 	val = 0x40 << OWL_EMAC_OFF_MAC_CSR19_FPTL;
996*de6e0b19SCristian Ciocaltea 	val |= 0x10 << OWL_EMAC_OFF_MAC_CSR19_FRTL;
997*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR19, val);
998*de6e0b19SCristian Ciocaltea 
999*de6e0b19SCristian Ciocaltea 	/* Set flow control pause quanta time to ~100 ms. */
1000*de6e0b19SCristian Ciocaltea 	val = 0x4FFF << OWL_EMAC_OFF_MAC_CSR18_PQT;
1001*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR18, val);
1002*de6e0b19SCristian Ciocaltea 
1003*de6e0b19SCristian Ciocaltea 	/* Setup interrupt mitigation. */
1004*de6e0b19SCristian Ciocaltea 	val = 7 << OWL_EMAC_OFF_MAC_CSR11_NRP;
1005*de6e0b19SCristian Ciocaltea 	val |= 4 << OWL_EMAC_OFF_MAC_CSR11_RT;
1006*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR11, val);
1007*de6e0b19SCristian Ciocaltea 
1008*de6e0b19SCristian Ciocaltea 	/* Set RX/TX rings base addresses. */
1009*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR3,
1010*de6e0b19SCristian Ciocaltea 			   (u32)(priv->rx_ring.descs_dma));
1011*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR4,
1012*de6e0b19SCristian Ciocaltea 			   (u32)(priv->tx_ring.descs_dma));
1013*de6e0b19SCristian Ciocaltea 
1014*de6e0b19SCristian Ciocaltea 	/* Setup initial operation mode. */
1015*de6e0b19SCristian Ciocaltea 	val = OWL_EMAC_VAL_MAC_CSR6_SPEED_100M << OWL_EMAC_OFF_MAC_CSR6_SPEED;
1016*de6e0b19SCristian Ciocaltea 	val |= OWL_EMAC_BIT_MAC_CSR6_FD;
1017*de6e0b19SCristian Ciocaltea 	owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
1018*de6e0b19SCristian Ciocaltea 			    OWL_EMAC_MSK_MAC_CSR6_SPEED |
1019*de6e0b19SCristian Ciocaltea 			    OWL_EMAC_BIT_MAC_CSR6_FD, val);
1020*de6e0b19SCristian Ciocaltea 	owl_emac_reg_clear(priv, OWL_EMAC_REG_MAC_CSR6,
1021*de6e0b19SCristian Ciocaltea 			   OWL_EMAC_BIT_MAC_CSR6_PR | OWL_EMAC_BIT_MAC_CSR6_PM);
1022*de6e0b19SCristian Ciocaltea 
1023*de6e0b19SCristian Ciocaltea 	priv->link = 0;
1024*de6e0b19SCristian Ciocaltea 	priv->speed = SPEED_UNKNOWN;
1025*de6e0b19SCristian Ciocaltea 	priv->duplex = DUPLEX_UNKNOWN;
1026*de6e0b19SCristian Ciocaltea 	priv->pause = 0;
1027*de6e0b19SCristian Ciocaltea 	priv->mcaddr_list.count = 0;
1028*de6e0b19SCristian Ciocaltea 
1029*de6e0b19SCristian Ciocaltea 	return 0;
1030*de6e0b19SCristian Ciocaltea }
1031*de6e0b19SCristian Ciocaltea 
1032*de6e0b19SCristian Ciocaltea static int owl_emac_enable(struct net_device *netdev, bool start_phy)
1033*de6e0b19SCristian Ciocaltea {
1034*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
1035*de6e0b19SCristian Ciocaltea 	int ret;
1036*de6e0b19SCristian Ciocaltea 
1037*de6e0b19SCristian Ciocaltea 	owl_emac_dma_cmd_stop(priv);
1038*de6e0b19SCristian Ciocaltea 	owl_emac_irq_disable(priv);
1039*de6e0b19SCristian Ciocaltea 	owl_emac_irq_clear(priv);
1040*de6e0b19SCristian Ciocaltea 
1041*de6e0b19SCristian Ciocaltea 	owl_emac_ring_prepare_tx(priv);
1042*de6e0b19SCristian Ciocaltea 	ret = owl_emac_ring_prepare_rx(priv);
1043*de6e0b19SCristian Ciocaltea 	if (ret)
1044*de6e0b19SCristian Ciocaltea 		goto err_unprep;
1045*de6e0b19SCristian Ciocaltea 
1046*de6e0b19SCristian Ciocaltea 	ret = owl_emac_core_sw_reset(priv);
1047*de6e0b19SCristian Ciocaltea 	if (ret) {
1048*de6e0b19SCristian Ciocaltea 		netdev_err(netdev, "failed to soft reset MAC core: %d\n", ret);
1049*de6e0b19SCristian Ciocaltea 		goto err_unprep;
1050*de6e0b19SCristian Ciocaltea 	}
1051*de6e0b19SCristian Ciocaltea 
1052*de6e0b19SCristian Ciocaltea 	owl_emac_set_hw_mac_addr(netdev);
1053*de6e0b19SCristian Ciocaltea 	owl_emac_setup_frame_xmit(priv);
1054*de6e0b19SCristian Ciocaltea 
1055*de6e0b19SCristian Ciocaltea 	netdev_reset_queue(netdev);
1056*de6e0b19SCristian Ciocaltea 	napi_enable(&priv->napi);
1057*de6e0b19SCristian Ciocaltea 
1058*de6e0b19SCristian Ciocaltea 	owl_emac_irq_enable(priv);
1059*de6e0b19SCristian Ciocaltea 	owl_emac_dma_cmd_start(priv);
1060*de6e0b19SCristian Ciocaltea 
1061*de6e0b19SCristian Ciocaltea 	if (start_phy)
1062*de6e0b19SCristian Ciocaltea 		phy_start(netdev->phydev);
1063*de6e0b19SCristian Ciocaltea 
1064*de6e0b19SCristian Ciocaltea 	netif_start_queue(netdev);
1065*de6e0b19SCristian Ciocaltea 
1066*de6e0b19SCristian Ciocaltea 	return 0;
1067*de6e0b19SCristian Ciocaltea 
1068*de6e0b19SCristian Ciocaltea err_unprep:
1069*de6e0b19SCristian Ciocaltea 	owl_emac_ring_unprepare_rx(priv);
1070*de6e0b19SCristian Ciocaltea 	owl_emac_ring_unprepare_tx(priv);
1071*de6e0b19SCristian Ciocaltea 
1072*de6e0b19SCristian Ciocaltea 	return ret;
1073*de6e0b19SCristian Ciocaltea }
1074*de6e0b19SCristian Ciocaltea 
1075*de6e0b19SCristian Ciocaltea static void owl_emac_disable(struct net_device *netdev, bool stop_phy)
1076*de6e0b19SCristian Ciocaltea {
1077*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
1078*de6e0b19SCristian Ciocaltea 
1079*de6e0b19SCristian Ciocaltea 	owl_emac_dma_cmd_stop(priv);
1080*de6e0b19SCristian Ciocaltea 	owl_emac_irq_disable(priv);
1081*de6e0b19SCristian Ciocaltea 
1082*de6e0b19SCristian Ciocaltea 	netif_stop_queue(netdev);
1083*de6e0b19SCristian Ciocaltea 	napi_disable(&priv->napi);
1084*de6e0b19SCristian Ciocaltea 
1085*de6e0b19SCristian Ciocaltea 	if (stop_phy)
1086*de6e0b19SCristian Ciocaltea 		phy_stop(netdev->phydev);
1087*de6e0b19SCristian Ciocaltea 
1088*de6e0b19SCristian Ciocaltea 	owl_emac_ring_unprepare_rx(priv);
1089*de6e0b19SCristian Ciocaltea 	owl_emac_ring_unprepare_tx(priv);
1090*de6e0b19SCristian Ciocaltea }
1091*de6e0b19SCristian Ciocaltea 
1092*de6e0b19SCristian Ciocaltea static int owl_emac_ndo_open(struct net_device *netdev)
1093*de6e0b19SCristian Ciocaltea {
1094*de6e0b19SCristian Ciocaltea 	return owl_emac_enable(netdev, true);
1095*de6e0b19SCristian Ciocaltea }
1096*de6e0b19SCristian Ciocaltea 
1097*de6e0b19SCristian Ciocaltea static int owl_emac_ndo_stop(struct net_device *netdev)
1098*de6e0b19SCristian Ciocaltea {
1099*de6e0b19SCristian Ciocaltea 	owl_emac_disable(netdev, true);
1100*de6e0b19SCristian Ciocaltea 
1101*de6e0b19SCristian Ciocaltea 	return 0;
1102*de6e0b19SCristian Ciocaltea }
1103*de6e0b19SCristian Ciocaltea 
1104*de6e0b19SCristian Ciocaltea static void owl_emac_set_multicast(struct net_device *netdev, int count)
1105*de6e0b19SCristian Ciocaltea {
1106*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
1107*de6e0b19SCristian Ciocaltea 	struct netdev_hw_addr *ha;
1108*de6e0b19SCristian Ciocaltea 	int index = 0;
1109*de6e0b19SCristian Ciocaltea 
1110*de6e0b19SCristian Ciocaltea 	if (count <= 0) {
1111*de6e0b19SCristian Ciocaltea 		priv->mcaddr_list.count = 0;
1112*de6e0b19SCristian Ciocaltea 		return;
1113*de6e0b19SCristian Ciocaltea 	}
1114*de6e0b19SCristian Ciocaltea 
1115*de6e0b19SCristian Ciocaltea 	netdev_for_each_mc_addr(ha, netdev) {
1116*de6e0b19SCristian Ciocaltea 		if (!is_multicast_ether_addr(ha->addr))
1117*de6e0b19SCristian Ciocaltea 			continue;
1118*de6e0b19SCristian Ciocaltea 
1119*de6e0b19SCristian Ciocaltea 		WARN_ON(index >= OWL_EMAC_MAX_MULTICAST_ADDRS);
1120*de6e0b19SCristian Ciocaltea 		ether_addr_copy(priv->mcaddr_list.addrs[index++], ha->addr);
1121*de6e0b19SCristian Ciocaltea 	}
1122*de6e0b19SCristian Ciocaltea 
1123*de6e0b19SCristian Ciocaltea 	priv->mcaddr_list.count = index;
1124*de6e0b19SCristian Ciocaltea 
1125*de6e0b19SCristian Ciocaltea 	owl_emac_setup_frame_xmit(priv);
1126*de6e0b19SCristian Ciocaltea }
1127*de6e0b19SCristian Ciocaltea 
1128*de6e0b19SCristian Ciocaltea static void owl_emac_ndo_set_rx_mode(struct net_device *netdev)
1129*de6e0b19SCristian Ciocaltea {
1130*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
1131*de6e0b19SCristian Ciocaltea 	u32 status, val = 0;
1132*de6e0b19SCristian Ciocaltea 	int mcast_count = 0;
1133*de6e0b19SCristian Ciocaltea 
1134*de6e0b19SCristian Ciocaltea 	if (netdev->flags & IFF_PROMISC) {
1135*de6e0b19SCristian Ciocaltea 		val = OWL_EMAC_BIT_MAC_CSR6_PR;
1136*de6e0b19SCristian Ciocaltea 	} else if (netdev->flags & IFF_ALLMULTI) {
1137*de6e0b19SCristian Ciocaltea 		val = OWL_EMAC_BIT_MAC_CSR6_PM;
1138*de6e0b19SCristian Ciocaltea 	} else if (netdev->flags & IFF_MULTICAST) {
1139*de6e0b19SCristian Ciocaltea 		mcast_count = netdev_mc_count(netdev);
1140*de6e0b19SCristian Ciocaltea 
1141*de6e0b19SCristian Ciocaltea 		if (mcast_count > OWL_EMAC_MAX_MULTICAST_ADDRS) {
1142*de6e0b19SCristian Ciocaltea 			val = OWL_EMAC_BIT_MAC_CSR6_PM;
1143*de6e0b19SCristian Ciocaltea 			mcast_count = 0;
1144*de6e0b19SCristian Ciocaltea 		}
1145*de6e0b19SCristian Ciocaltea 	}
1146*de6e0b19SCristian Ciocaltea 
1147*de6e0b19SCristian Ciocaltea 	spin_lock_bh(&priv->lock);
1148*de6e0b19SCristian Ciocaltea 
1149*de6e0b19SCristian Ciocaltea 	/* Temporarily stop DMA TX & RX. */
1150*de6e0b19SCristian Ciocaltea 	status = owl_emac_dma_cmd_stop(priv);
1151*de6e0b19SCristian Ciocaltea 
1152*de6e0b19SCristian Ciocaltea 	/* Update operation modes. */
1153*de6e0b19SCristian Ciocaltea 	owl_emac_reg_update(priv, OWL_EMAC_REG_MAC_CSR6,
1154*de6e0b19SCristian Ciocaltea 			    OWL_EMAC_BIT_MAC_CSR6_PR | OWL_EMAC_BIT_MAC_CSR6_PM,
1155*de6e0b19SCristian Ciocaltea 			    val);
1156*de6e0b19SCristian Ciocaltea 
1157*de6e0b19SCristian Ciocaltea 	/* Restore DMA TX & RX status. */
1158*de6e0b19SCristian Ciocaltea 	owl_emac_dma_cmd_set(priv, status);
1159*de6e0b19SCristian Ciocaltea 
1160*de6e0b19SCristian Ciocaltea 	spin_unlock_bh(&priv->lock);
1161*de6e0b19SCristian Ciocaltea 
1162*de6e0b19SCristian Ciocaltea 	/* Set/reset multicast addr list. */
1163*de6e0b19SCristian Ciocaltea 	owl_emac_set_multicast(netdev, mcast_count);
1164*de6e0b19SCristian Ciocaltea }
1165*de6e0b19SCristian Ciocaltea 
1166*de6e0b19SCristian Ciocaltea static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr)
1167*de6e0b19SCristian Ciocaltea {
1168*de6e0b19SCristian Ciocaltea 	struct sockaddr *skaddr = addr;
1169*de6e0b19SCristian Ciocaltea 
1170*de6e0b19SCristian Ciocaltea 	if (!is_valid_ether_addr(skaddr->sa_data))
1171*de6e0b19SCristian Ciocaltea 		return -EADDRNOTAVAIL;
1172*de6e0b19SCristian Ciocaltea 
1173*de6e0b19SCristian Ciocaltea 	if (netif_running(netdev))
1174*de6e0b19SCristian Ciocaltea 		return -EBUSY;
1175*de6e0b19SCristian Ciocaltea 
1176*de6e0b19SCristian Ciocaltea 	memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
1177*de6e0b19SCristian Ciocaltea 	owl_emac_set_hw_mac_addr(netdev);
1178*de6e0b19SCristian Ciocaltea 
1179*de6e0b19SCristian Ciocaltea 	return owl_emac_setup_frame_xmit(netdev_priv(netdev));
1180*de6e0b19SCristian Ciocaltea }
1181*de6e0b19SCristian Ciocaltea 
1182*de6e0b19SCristian Ciocaltea static int owl_emac_ndo_do_ioctl(struct net_device *netdev,
1183*de6e0b19SCristian Ciocaltea 				 struct ifreq *req, int cmd)
1184*de6e0b19SCristian Ciocaltea {
1185*de6e0b19SCristian Ciocaltea 	if (!netif_running(netdev))
1186*de6e0b19SCristian Ciocaltea 		return -EINVAL;
1187*de6e0b19SCristian Ciocaltea 
1188*de6e0b19SCristian Ciocaltea 	return phy_mii_ioctl(netdev->phydev, req, cmd);
1189*de6e0b19SCristian Ciocaltea }
1190*de6e0b19SCristian Ciocaltea 
1191*de6e0b19SCristian Ciocaltea static void owl_emac_ndo_tx_timeout(struct net_device *netdev,
1192*de6e0b19SCristian Ciocaltea 				    unsigned int txqueue)
1193*de6e0b19SCristian Ciocaltea {
1194*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
1195*de6e0b19SCristian Ciocaltea 
1196*de6e0b19SCristian Ciocaltea 	schedule_work(&priv->mac_reset_task);
1197*de6e0b19SCristian Ciocaltea }
1198*de6e0b19SCristian Ciocaltea 
1199*de6e0b19SCristian Ciocaltea static void owl_emac_reset_task(struct work_struct *work)
1200*de6e0b19SCristian Ciocaltea {
1201*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv;
1202*de6e0b19SCristian Ciocaltea 
1203*de6e0b19SCristian Ciocaltea 	priv = container_of(work, struct owl_emac_priv, mac_reset_task);
1204*de6e0b19SCristian Ciocaltea 
1205*de6e0b19SCristian Ciocaltea 	netdev_dbg(priv->netdev, "resetting MAC\n");
1206*de6e0b19SCristian Ciocaltea 	owl_emac_disable(priv->netdev, false);
1207*de6e0b19SCristian Ciocaltea 	owl_emac_enable(priv->netdev, false);
1208*de6e0b19SCristian Ciocaltea }
1209*de6e0b19SCristian Ciocaltea 
1210*de6e0b19SCristian Ciocaltea static struct net_device_stats *
1211*de6e0b19SCristian Ciocaltea owl_emac_ndo_get_stats(struct net_device *netdev)
1212*de6e0b19SCristian Ciocaltea {
1213*de6e0b19SCristian Ciocaltea 	/* FIXME: If possible, try to get stats from MAC hardware registers
1214*de6e0b19SCristian Ciocaltea 	 * instead of tracking them manually in the driver.
1215*de6e0b19SCristian Ciocaltea 	 */
1216*de6e0b19SCristian Ciocaltea 
1217*de6e0b19SCristian Ciocaltea 	return &netdev->stats;
1218*de6e0b19SCristian Ciocaltea }
1219*de6e0b19SCristian Ciocaltea 
1220*de6e0b19SCristian Ciocaltea static const struct net_device_ops owl_emac_netdev_ops = {
1221*de6e0b19SCristian Ciocaltea 	.ndo_open		= owl_emac_ndo_open,
1222*de6e0b19SCristian Ciocaltea 	.ndo_stop		= owl_emac_ndo_stop,
1223*de6e0b19SCristian Ciocaltea 	.ndo_start_xmit		= owl_emac_ndo_start_xmit,
1224*de6e0b19SCristian Ciocaltea 	.ndo_set_rx_mode	= owl_emac_ndo_set_rx_mode,
1225*de6e0b19SCristian Ciocaltea 	.ndo_set_mac_address	= owl_emac_ndo_set_mac_addr,
1226*de6e0b19SCristian Ciocaltea 	.ndo_validate_addr	= eth_validate_addr,
1227*de6e0b19SCristian Ciocaltea 	.ndo_do_ioctl		= owl_emac_ndo_do_ioctl,
1228*de6e0b19SCristian Ciocaltea 	.ndo_tx_timeout         = owl_emac_ndo_tx_timeout,
1229*de6e0b19SCristian Ciocaltea 	.ndo_get_stats		= owl_emac_ndo_get_stats,
1230*de6e0b19SCristian Ciocaltea };
1231*de6e0b19SCristian Ciocaltea 
1232*de6e0b19SCristian Ciocaltea static void owl_emac_ethtool_get_drvinfo(struct net_device *dev,
1233*de6e0b19SCristian Ciocaltea 					 struct ethtool_drvinfo *info)
1234*de6e0b19SCristian Ciocaltea {
1235*de6e0b19SCristian Ciocaltea 	strscpy(info->driver, OWL_EMAC_DRVNAME, sizeof(info->driver));
1236*de6e0b19SCristian Ciocaltea }
1237*de6e0b19SCristian Ciocaltea 
1238*de6e0b19SCristian Ciocaltea static u32 owl_emac_ethtool_get_msglevel(struct net_device *netdev)
1239*de6e0b19SCristian Ciocaltea {
1240*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
1241*de6e0b19SCristian Ciocaltea 
1242*de6e0b19SCristian Ciocaltea 	return priv->msg_enable;
1243*de6e0b19SCristian Ciocaltea }
1244*de6e0b19SCristian Ciocaltea 
1245*de6e0b19SCristian Ciocaltea static void owl_emac_ethtool_set_msglevel(struct net_device *ndev, u32 val)
1246*de6e0b19SCristian Ciocaltea {
1247*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(ndev);
1248*de6e0b19SCristian Ciocaltea 
1249*de6e0b19SCristian Ciocaltea 	priv->msg_enable = val;
1250*de6e0b19SCristian Ciocaltea }
1251*de6e0b19SCristian Ciocaltea 
1252*de6e0b19SCristian Ciocaltea static const struct ethtool_ops owl_emac_ethtool_ops = {
1253*de6e0b19SCristian Ciocaltea 	.get_drvinfo		= owl_emac_ethtool_get_drvinfo,
1254*de6e0b19SCristian Ciocaltea 	.get_link		= ethtool_op_get_link,
1255*de6e0b19SCristian Ciocaltea 	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
1256*de6e0b19SCristian Ciocaltea 	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
1257*de6e0b19SCristian Ciocaltea 	.get_msglevel		= owl_emac_ethtool_get_msglevel,
1258*de6e0b19SCristian Ciocaltea 	.set_msglevel		= owl_emac_ethtool_set_msglevel,
1259*de6e0b19SCristian Ciocaltea };
1260*de6e0b19SCristian Ciocaltea 
1261*de6e0b19SCristian Ciocaltea static int owl_emac_mdio_wait(struct owl_emac_priv *priv)
1262*de6e0b19SCristian Ciocaltea {
1263*de6e0b19SCristian Ciocaltea 	u32 val;
1264*de6e0b19SCristian Ciocaltea 
1265*de6e0b19SCristian Ciocaltea 	/* Wait while data transfer is in progress. */
1266*de6e0b19SCristian Ciocaltea 	return readl_poll_timeout(priv->base + OWL_EMAC_REG_MAC_CSR10,
1267*de6e0b19SCristian Ciocaltea 				  val, !(val & OWL_EMAC_BIT_MAC_CSR10_SB),
1268*de6e0b19SCristian Ciocaltea 				  OWL_EMAC_POLL_DELAY_USEC,
1269*de6e0b19SCristian Ciocaltea 				  OWL_EMAC_MDIO_POLL_TIMEOUT_USEC);
1270*de6e0b19SCristian Ciocaltea }
1271*de6e0b19SCristian Ciocaltea 
1272*de6e0b19SCristian Ciocaltea static int owl_emac_mdio_read(struct mii_bus *bus, int addr, int regnum)
1273*de6e0b19SCristian Ciocaltea {
1274*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = bus->priv;
1275*de6e0b19SCristian Ciocaltea 	u32 data, tmp;
1276*de6e0b19SCristian Ciocaltea 	int ret;
1277*de6e0b19SCristian Ciocaltea 
1278*de6e0b19SCristian Ciocaltea 	if (regnum & MII_ADDR_C45)
1279*de6e0b19SCristian Ciocaltea 		return -EOPNOTSUPP;
1280*de6e0b19SCristian Ciocaltea 
1281*de6e0b19SCristian Ciocaltea 	data = OWL_EMAC_BIT_MAC_CSR10_SB;
1282*de6e0b19SCristian Ciocaltea 	data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_RD << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
1283*de6e0b19SCristian Ciocaltea 
1284*de6e0b19SCristian Ciocaltea 	tmp = addr << OWL_EMAC_OFF_MAC_CSR10_PHYADD;
1285*de6e0b19SCristian Ciocaltea 	data |= tmp & OWL_EMAC_MSK_MAC_CSR10_PHYADD;
1286*de6e0b19SCristian Ciocaltea 
1287*de6e0b19SCristian Ciocaltea 	tmp = regnum << OWL_EMAC_OFF_MAC_CSR10_REGADD;
1288*de6e0b19SCristian Ciocaltea 	data |= tmp & OWL_EMAC_MSK_MAC_CSR10_REGADD;
1289*de6e0b19SCristian Ciocaltea 
1290*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data);
1291*de6e0b19SCristian Ciocaltea 
1292*de6e0b19SCristian Ciocaltea 	ret = owl_emac_mdio_wait(priv);
1293*de6e0b19SCristian Ciocaltea 	if (ret)
1294*de6e0b19SCristian Ciocaltea 		return ret;
1295*de6e0b19SCristian Ciocaltea 
1296*de6e0b19SCristian Ciocaltea 	data = owl_emac_reg_read(priv, OWL_EMAC_REG_MAC_CSR10);
1297*de6e0b19SCristian Ciocaltea 	data &= OWL_EMAC_MSK_MAC_CSR10_DATA;
1298*de6e0b19SCristian Ciocaltea 
1299*de6e0b19SCristian Ciocaltea 	return data;
1300*de6e0b19SCristian Ciocaltea }
1301*de6e0b19SCristian Ciocaltea 
1302*de6e0b19SCristian Ciocaltea static int
1303*de6e0b19SCristian Ciocaltea owl_emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val)
1304*de6e0b19SCristian Ciocaltea {
1305*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = bus->priv;
1306*de6e0b19SCristian Ciocaltea 	u32 data, tmp;
1307*de6e0b19SCristian Ciocaltea 
1308*de6e0b19SCristian Ciocaltea 	if (regnum & MII_ADDR_C45)
1309*de6e0b19SCristian Ciocaltea 		return -EOPNOTSUPP;
1310*de6e0b19SCristian Ciocaltea 
1311*de6e0b19SCristian Ciocaltea 	data = OWL_EMAC_BIT_MAC_CSR10_SB;
1312*de6e0b19SCristian Ciocaltea 	data |= OWL_EMAC_VAL_MAC_CSR10_OPCODE_WR << OWL_EMAC_OFF_MAC_CSR10_OPCODE;
1313*de6e0b19SCristian Ciocaltea 
1314*de6e0b19SCristian Ciocaltea 	tmp = addr << OWL_EMAC_OFF_MAC_CSR10_PHYADD;
1315*de6e0b19SCristian Ciocaltea 	data |= tmp & OWL_EMAC_MSK_MAC_CSR10_PHYADD;
1316*de6e0b19SCristian Ciocaltea 
1317*de6e0b19SCristian Ciocaltea 	tmp = regnum << OWL_EMAC_OFF_MAC_CSR10_REGADD;
1318*de6e0b19SCristian Ciocaltea 	data |= tmp & OWL_EMAC_MSK_MAC_CSR10_REGADD;
1319*de6e0b19SCristian Ciocaltea 
1320*de6e0b19SCristian Ciocaltea 	data |= val & OWL_EMAC_MSK_MAC_CSR10_DATA;
1321*de6e0b19SCristian Ciocaltea 
1322*de6e0b19SCristian Ciocaltea 	owl_emac_reg_write(priv, OWL_EMAC_REG_MAC_CSR10, data);
1323*de6e0b19SCristian Ciocaltea 
1324*de6e0b19SCristian Ciocaltea 	return owl_emac_mdio_wait(priv);
1325*de6e0b19SCristian Ciocaltea }
1326*de6e0b19SCristian Ciocaltea 
1327*de6e0b19SCristian Ciocaltea static int owl_emac_mdio_init(struct net_device *netdev)
1328*de6e0b19SCristian Ciocaltea {
1329*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
1330*de6e0b19SCristian Ciocaltea 	struct device *dev = owl_emac_get_dev(priv);
1331*de6e0b19SCristian Ciocaltea 	struct device_node *mdio_node;
1332*de6e0b19SCristian Ciocaltea 	int ret;
1333*de6e0b19SCristian Ciocaltea 
1334*de6e0b19SCristian Ciocaltea 	mdio_node = of_get_child_by_name(dev->of_node, "mdio");
1335*de6e0b19SCristian Ciocaltea 	if (!mdio_node)
1336*de6e0b19SCristian Ciocaltea 		return -ENODEV;
1337*de6e0b19SCristian Ciocaltea 
1338*de6e0b19SCristian Ciocaltea 	if (!of_device_is_available(mdio_node)) {
1339*de6e0b19SCristian Ciocaltea 		ret = -ENODEV;
1340*de6e0b19SCristian Ciocaltea 		goto err_put_node;
1341*de6e0b19SCristian Ciocaltea 	}
1342*de6e0b19SCristian Ciocaltea 
1343*de6e0b19SCristian Ciocaltea 	priv->mii = devm_mdiobus_alloc(dev);
1344*de6e0b19SCristian Ciocaltea 	if (!priv->mii) {
1345*de6e0b19SCristian Ciocaltea 		ret = -ENOMEM;
1346*de6e0b19SCristian Ciocaltea 		goto err_put_node;
1347*de6e0b19SCristian Ciocaltea 	}
1348*de6e0b19SCristian Ciocaltea 
1349*de6e0b19SCristian Ciocaltea 	snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
1350*de6e0b19SCristian Ciocaltea 	priv->mii->name = "owl-emac-mdio";
1351*de6e0b19SCristian Ciocaltea 	priv->mii->parent = dev;
1352*de6e0b19SCristian Ciocaltea 	priv->mii->read = owl_emac_mdio_read;
1353*de6e0b19SCristian Ciocaltea 	priv->mii->write = owl_emac_mdio_write;
1354*de6e0b19SCristian Ciocaltea 	priv->mii->phy_mask = ~0; /* Mask out all PHYs from auto probing. */
1355*de6e0b19SCristian Ciocaltea 	priv->mii->priv = priv;
1356*de6e0b19SCristian Ciocaltea 
1357*de6e0b19SCristian Ciocaltea 	ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
1358*de6e0b19SCristian Ciocaltea 
1359*de6e0b19SCristian Ciocaltea err_put_node:
1360*de6e0b19SCristian Ciocaltea 	of_node_put(mdio_node);
1361*de6e0b19SCristian Ciocaltea 	return ret;
1362*de6e0b19SCristian Ciocaltea }
1363*de6e0b19SCristian Ciocaltea 
1364*de6e0b19SCristian Ciocaltea static int owl_emac_phy_init(struct net_device *netdev)
1365*de6e0b19SCristian Ciocaltea {
1366*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
1367*de6e0b19SCristian Ciocaltea 	struct device *dev = owl_emac_get_dev(priv);
1368*de6e0b19SCristian Ciocaltea 	struct phy_device *phy;
1369*de6e0b19SCristian Ciocaltea 
1370*de6e0b19SCristian Ciocaltea 	phy = of_phy_get_and_connect(netdev, dev->of_node,
1371*de6e0b19SCristian Ciocaltea 				     owl_emac_adjust_link);
1372*de6e0b19SCristian Ciocaltea 	if (!phy)
1373*de6e0b19SCristian Ciocaltea 		return -ENODEV;
1374*de6e0b19SCristian Ciocaltea 
1375*de6e0b19SCristian Ciocaltea 	phy_set_sym_pause(phy, true, true, true);
1376*de6e0b19SCristian Ciocaltea 
1377*de6e0b19SCristian Ciocaltea 	if (netif_msg_link(priv))
1378*de6e0b19SCristian Ciocaltea 		phy_attached_info(phy);
1379*de6e0b19SCristian Ciocaltea 
1380*de6e0b19SCristian Ciocaltea 	return 0;
1381*de6e0b19SCristian Ciocaltea }
1382*de6e0b19SCristian Ciocaltea 
1383*de6e0b19SCristian Ciocaltea static void owl_emac_get_mac_addr(struct net_device *netdev)
1384*de6e0b19SCristian Ciocaltea {
1385*de6e0b19SCristian Ciocaltea 	struct device *dev = netdev->dev.parent;
1386*de6e0b19SCristian Ciocaltea 	int ret;
1387*de6e0b19SCristian Ciocaltea 
1388*de6e0b19SCristian Ciocaltea 	ret = eth_platform_get_mac_address(dev, netdev->dev_addr);
1389*de6e0b19SCristian Ciocaltea 	if (!ret && is_valid_ether_addr(netdev->dev_addr))
1390*de6e0b19SCristian Ciocaltea 		return;
1391*de6e0b19SCristian Ciocaltea 
1392*de6e0b19SCristian Ciocaltea 	eth_hw_addr_random(netdev);
1393*de6e0b19SCristian Ciocaltea 	dev_warn(dev, "using random MAC address %pM\n", netdev->dev_addr);
1394*de6e0b19SCristian Ciocaltea }
1395*de6e0b19SCristian Ciocaltea 
1396*de6e0b19SCristian Ciocaltea static __maybe_unused int owl_emac_suspend(struct device *dev)
1397*de6e0b19SCristian Ciocaltea {
1398*de6e0b19SCristian Ciocaltea 	struct net_device *netdev = dev_get_drvdata(dev);
1399*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
1400*de6e0b19SCristian Ciocaltea 
1401*de6e0b19SCristian Ciocaltea 	disable_irq(netdev->irq);
1402*de6e0b19SCristian Ciocaltea 
1403*de6e0b19SCristian Ciocaltea 	if (netif_running(netdev)) {
1404*de6e0b19SCristian Ciocaltea 		owl_emac_disable(netdev, true);
1405*de6e0b19SCristian Ciocaltea 		netif_device_detach(netdev);
1406*de6e0b19SCristian Ciocaltea 	}
1407*de6e0b19SCristian Ciocaltea 
1408*de6e0b19SCristian Ciocaltea 	clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks);
1409*de6e0b19SCristian Ciocaltea 
1410*de6e0b19SCristian Ciocaltea 	return 0;
1411*de6e0b19SCristian Ciocaltea }
1412*de6e0b19SCristian Ciocaltea 
1413*de6e0b19SCristian Ciocaltea static __maybe_unused int owl_emac_resume(struct device *dev)
1414*de6e0b19SCristian Ciocaltea {
1415*de6e0b19SCristian Ciocaltea 	struct net_device *netdev = dev_get_drvdata(dev);
1416*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = netdev_priv(netdev);
1417*de6e0b19SCristian Ciocaltea 	int ret;
1418*de6e0b19SCristian Ciocaltea 
1419*de6e0b19SCristian Ciocaltea 	ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks);
1420*de6e0b19SCristian Ciocaltea 	if (ret)
1421*de6e0b19SCristian Ciocaltea 		return ret;
1422*de6e0b19SCristian Ciocaltea 
1423*de6e0b19SCristian Ciocaltea 	if (netif_running(netdev)) {
1424*de6e0b19SCristian Ciocaltea 		owl_emac_core_hw_reset(priv);
1425*de6e0b19SCristian Ciocaltea 		owl_emac_core_sw_reset(priv);
1426*de6e0b19SCristian Ciocaltea 
1427*de6e0b19SCristian Ciocaltea 		ret = owl_emac_enable(netdev, true);
1428*de6e0b19SCristian Ciocaltea 		if (ret) {
1429*de6e0b19SCristian Ciocaltea 			clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks);
1430*de6e0b19SCristian Ciocaltea 			return ret;
1431*de6e0b19SCristian Ciocaltea 		}
1432*de6e0b19SCristian Ciocaltea 
1433*de6e0b19SCristian Ciocaltea 		netif_device_attach(netdev);
1434*de6e0b19SCristian Ciocaltea 	}
1435*de6e0b19SCristian Ciocaltea 
1436*de6e0b19SCristian Ciocaltea 	enable_irq(netdev->irq);
1437*de6e0b19SCristian Ciocaltea 
1438*de6e0b19SCristian Ciocaltea 	return 0;
1439*de6e0b19SCristian Ciocaltea }
1440*de6e0b19SCristian Ciocaltea 
1441*de6e0b19SCristian Ciocaltea static void owl_emac_clk_disable_unprepare(void *data)
1442*de6e0b19SCristian Ciocaltea {
1443*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = data;
1444*de6e0b19SCristian Ciocaltea 
1445*de6e0b19SCristian Ciocaltea 	clk_bulk_disable_unprepare(OWL_EMAC_NCLKS, priv->clks);
1446*de6e0b19SCristian Ciocaltea }
1447*de6e0b19SCristian Ciocaltea 
1448*de6e0b19SCristian Ciocaltea static int owl_emac_clk_set_rate(struct owl_emac_priv *priv)
1449*de6e0b19SCristian Ciocaltea {
1450*de6e0b19SCristian Ciocaltea 	struct device *dev = owl_emac_get_dev(priv);
1451*de6e0b19SCristian Ciocaltea 	unsigned long rate;
1452*de6e0b19SCristian Ciocaltea 	int ret;
1453*de6e0b19SCristian Ciocaltea 
1454*de6e0b19SCristian Ciocaltea 	switch (priv->phy_mode) {
1455*de6e0b19SCristian Ciocaltea 	case PHY_INTERFACE_MODE_RMII:
1456*de6e0b19SCristian Ciocaltea 		rate = 50000000;
1457*de6e0b19SCristian Ciocaltea 		break;
1458*de6e0b19SCristian Ciocaltea 
1459*de6e0b19SCristian Ciocaltea 	case PHY_INTERFACE_MODE_SMII:
1460*de6e0b19SCristian Ciocaltea 		rate = 125000000;
1461*de6e0b19SCristian Ciocaltea 		break;
1462*de6e0b19SCristian Ciocaltea 
1463*de6e0b19SCristian Ciocaltea 	default:
1464*de6e0b19SCristian Ciocaltea 		dev_err(dev, "unsupported phy interface mode %d\n",
1465*de6e0b19SCristian Ciocaltea 			priv->phy_mode);
1466*de6e0b19SCristian Ciocaltea 		return -EOPNOTSUPP;
1467*de6e0b19SCristian Ciocaltea 	}
1468*de6e0b19SCristian Ciocaltea 
1469*de6e0b19SCristian Ciocaltea 	ret = clk_set_rate(priv->clks[OWL_EMAC_CLK_RMII].clk, rate);
1470*de6e0b19SCristian Ciocaltea 	if (ret)
1471*de6e0b19SCristian Ciocaltea 		dev_err(dev, "failed to set RMII clock rate: %d\n", ret);
1472*de6e0b19SCristian Ciocaltea 
1473*de6e0b19SCristian Ciocaltea 	return ret;
1474*de6e0b19SCristian Ciocaltea }
1475*de6e0b19SCristian Ciocaltea 
1476*de6e0b19SCristian Ciocaltea static int owl_emac_probe(struct platform_device *pdev)
1477*de6e0b19SCristian Ciocaltea {
1478*de6e0b19SCristian Ciocaltea 	struct device *dev = &pdev->dev;
1479*de6e0b19SCristian Ciocaltea 	struct net_device *netdev;
1480*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv;
1481*de6e0b19SCristian Ciocaltea 	int ret, i;
1482*de6e0b19SCristian Ciocaltea 
1483*de6e0b19SCristian Ciocaltea 	netdev = devm_alloc_etherdev(dev, sizeof(*priv));
1484*de6e0b19SCristian Ciocaltea 	if (!netdev)
1485*de6e0b19SCristian Ciocaltea 		return -ENOMEM;
1486*de6e0b19SCristian Ciocaltea 
1487*de6e0b19SCristian Ciocaltea 	platform_set_drvdata(pdev, netdev);
1488*de6e0b19SCristian Ciocaltea 	SET_NETDEV_DEV(netdev, dev);
1489*de6e0b19SCristian Ciocaltea 
1490*de6e0b19SCristian Ciocaltea 	priv = netdev_priv(netdev);
1491*de6e0b19SCristian Ciocaltea 	priv->netdev = netdev;
1492*de6e0b19SCristian Ciocaltea 	priv->msg_enable = netif_msg_init(-1, OWL_EMAC_DEFAULT_MSG_ENABLE);
1493*de6e0b19SCristian Ciocaltea 
1494*de6e0b19SCristian Ciocaltea 	ret = of_get_phy_mode(dev->of_node, &priv->phy_mode);
1495*de6e0b19SCristian Ciocaltea 	if (ret) {
1496*de6e0b19SCristian Ciocaltea 		dev_err(dev, "failed to get phy mode: %d\n", ret);
1497*de6e0b19SCristian Ciocaltea 		return ret;
1498*de6e0b19SCristian Ciocaltea 	}
1499*de6e0b19SCristian Ciocaltea 
1500*de6e0b19SCristian Ciocaltea 	spin_lock_init(&priv->lock);
1501*de6e0b19SCristian Ciocaltea 
1502*de6e0b19SCristian Ciocaltea 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1503*de6e0b19SCristian Ciocaltea 	if (ret) {
1504*de6e0b19SCristian Ciocaltea 		dev_err(dev, "unsupported DMA mask\n");
1505*de6e0b19SCristian Ciocaltea 		return ret;
1506*de6e0b19SCristian Ciocaltea 	}
1507*de6e0b19SCristian Ciocaltea 
1508*de6e0b19SCristian Ciocaltea 	ret = owl_emac_ring_alloc(dev, &priv->rx_ring, OWL_EMAC_RX_RING_SIZE);
1509*de6e0b19SCristian Ciocaltea 	if (ret)
1510*de6e0b19SCristian Ciocaltea 		return ret;
1511*de6e0b19SCristian Ciocaltea 
1512*de6e0b19SCristian Ciocaltea 	ret = owl_emac_ring_alloc(dev, &priv->tx_ring, OWL_EMAC_TX_RING_SIZE);
1513*de6e0b19SCristian Ciocaltea 	if (ret)
1514*de6e0b19SCristian Ciocaltea 		return ret;
1515*de6e0b19SCristian Ciocaltea 
1516*de6e0b19SCristian Ciocaltea 	priv->base = devm_platform_ioremap_resource(pdev, 0);
1517*de6e0b19SCristian Ciocaltea 	if (IS_ERR(priv->base))
1518*de6e0b19SCristian Ciocaltea 		return PTR_ERR(priv->base);
1519*de6e0b19SCristian Ciocaltea 
1520*de6e0b19SCristian Ciocaltea 	netdev->irq = platform_get_irq(pdev, 0);
1521*de6e0b19SCristian Ciocaltea 	if (netdev->irq < 0)
1522*de6e0b19SCristian Ciocaltea 		return netdev->irq;
1523*de6e0b19SCristian Ciocaltea 
1524*de6e0b19SCristian Ciocaltea 	ret = devm_request_irq(dev, netdev->irq, owl_emac_handle_irq,
1525*de6e0b19SCristian Ciocaltea 			       IRQF_SHARED, netdev->name, netdev);
1526*de6e0b19SCristian Ciocaltea 	if (ret) {
1527*de6e0b19SCristian Ciocaltea 		dev_err(dev, "failed to request irq: %d\n", netdev->irq);
1528*de6e0b19SCristian Ciocaltea 		return ret;
1529*de6e0b19SCristian Ciocaltea 	}
1530*de6e0b19SCristian Ciocaltea 
1531*de6e0b19SCristian Ciocaltea 	for (i = 0; i < OWL_EMAC_NCLKS; i++)
1532*de6e0b19SCristian Ciocaltea 		priv->clks[i].id = owl_emac_clk_names[i];
1533*de6e0b19SCristian Ciocaltea 
1534*de6e0b19SCristian Ciocaltea 	ret = devm_clk_bulk_get(dev, OWL_EMAC_NCLKS, priv->clks);
1535*de6e0b19SCristian Ciocaltea 	if (ret)
1536*de6e0b19SCristian Ciocaltea 		return ret;
1537*de6e0b19SCristian Ciocaltea 
1538*de6e0b19SCristian Ciocaltea 	ret = clk_bulk_prepare_enable(OWL_EMAC_NCLKS, priv->clks);
1539*de6e0b19SCristian Ciocaltea 	if (ret)
1540*de6e0b19SCristian Ciocaltea 		return ret;
1541*de6e0b19SCristian Ciocaltea 
1542*de6e0b19SCristian Ciocaltea 	ret = devm_add_action_or_reset(dev, owl_emac_clk_disable_unprepare, priv);
1543*de6e0b19SCristian Ciocaltea 	if (ret)
1544*de6e0b19SCristian Ciocaltea 		return ret;
1545*de6e0b19SCristian Ciocaltea 
1546*de6e0b19SCristian Ciocaltea 	ret = owl_emac_clk_set_rate(priv);
1547*de6e0b19SCristian Ciocaltea 	if (ret)
1548*de6e0b19SCristian Ciocaltea 		return ret;
1549*de6e0b19SCristian Ciocaltea 
1550*de6e0b19SCristian Ciocaltea 	priv->reset = devm_reset_control_get_exclusive(dev, NULL);
1551*de6e0b19SCristian Ciocaltea 	if (IS_ERR(priv->reset))
1552*de6e0b19SCristian Ciocaltea 		return dev_err_probe(dev, PTR_ERR(priv->reset),
1553*de6e0b19SCristian Ciocaltea 				     "failed to get reset control");
1554*de6e0b19SCristian Ciocaltea 
1555*de6e0b19SCristian Ciocaltea 	owl_emac_get_mac_addr(netdev);
1556*de6e0b19SCristian Ciocaltea 
1557*de6e0b19SCristian Ciocaltea 	owl_emac_core_hw_reset(priv);
1558*de6e0b19SCristian Ciocaltea 	owl_emac_mdio_clock_enable(priv);
1559*de6e0b19SCristian Ciocaltea 
1560*de6e0b19SCristian Ciocaltea 	ret = owl_emac_mdio_init(netdev);
1561*de6e0b19SCristian Ciocaltea 	if (ret) {
1562*de6e0b19SCristian Ciocaltea 		dev_err(dev, "failed to initialize MDIO bus\n");
1563*de6e0b19SCristian Ciocaltea 		return ret;
1564*de6e0b19SCristian Ciocaltea 	}
1565*de6e0b19SCristian Ciocaltea 
1566*de6e0b19SCristian Ciocaltea 	ret = owl_emac_phy_init(netdev);
1567*de6e0b19SCristian Ciocaltea 	if (ret) {
1568*de6e0b19SCristian Ciocaltea 		dev_err(dev, "failed to initialize PHY\n");
1569*de6e0b19SCristian Ciocaltea 		return ret;
1570*de6e0b19SCristian Ciocaltea 	}
1571*de6e0b19SCristian Ciocaltea 
1572*de6e0b19SCristian Ciocaltea 	INIT_WORK(&priv->mac_reset_task, owl_emac_reset_task);
1573*de6e0b19SCristian Ciocaltea 
1574*de6e0b19SCristian Ciocaltea 	netdev->min_mtu = OWL_EMAC_MTU_MIN;
1575*de6e0b19SCristian Ciocaltea 	netdev->max_mtu = OWL_EMAC_MTU_MAX;
1576*de6e0b19SCristian Ciocaltea 	netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT;
1577*de6e0b19SCristian Ciocaltea 	netdev->netdev_ops = &owl_emac_netdev_ops;
1578*de6e0b19SCristian Ciocaltea 	netdev->ethtool_ops = &owl_emac_ethtool_ops;
1579*de6e0b19SCristian Ciocaltea 	netif_napi_add(netdev, &priv->napi, owl_emac_poll, NAPI_POLL_WEIGHT);
1580*de6e0b19SCristian Ciocaltea 
1581*de6e0b19SCristian Ciocaltea 	ret = devm_register_netdev(dev, netdev);
1582*de6e0b19SCristian Ciocaltea 	if (ret) {
1583*de6e0b19SCristian Ciocaltea 		netif_napi_del(&priv->napi);
1584*de6e0b19SCristian Ciocaltea 		phy_disconnect(netdev->phydev);
1585*de6e0b19SCristian Ciocaltea 		return ret;
1586*de6e0b19SCristian Ciocaltea 	}
1587*de6e0b19SCristian Ciocaltea 
1588*de6e0b19SCristian Ciocaltea 	return 0;
1589*de6e0b19SCristian Ciocaltea }
1590*de6e0b19SCristian Ciocaltea 
1591*de6e0b19SCristian Ciocaltea static int owl_emac_remove(struct platform_device *pdev)
1592*de6e0b19SCristian Ciocaltea {
1593*de6e0b19SCristian Ciocaltea 	struct owl_emac_priv *priv = platform_get_drvdata(pdev);
1594*de6e0b19SCristian Ciocaltea 
1595*de6e0b19SCristian Ciocaltea 	netif_napi_del(&priv->napi);
1596*de6e0b19SCristian Ciocaltea 	phy_disconnect(priv->netdev->phydev);
1597*de6e0b19SCristian Ciocaltea 	cancel_work_sync(&priv->mac_reset_task);
1598*de6e0b19SCristian Ciocaltea 
1599*de6e0b19SCristian Ciocaltea 	return 0;
1600*de6e0b19SCristian Ciocaltea }
1601*de6e0b19SCristian Ciocaltea 
1602*de6e0b19SCristian Ciocaltea static const struct of_device_id owl_emac_of_match[] = {
1603*de6e0b19SCristian Ciocaltea 	{ .compatible = "actions,owl-emac", },
1604*de6e0b19SCristian Ciocaltea 	{ }
1605*de6e0b19SCristian Ciocaltea };
1606*de6e0b19SCristian Ciocaltea MODULE_DEVICE_TABLE(of, owl_emac_of_match);
1607*de6e0b19SCristian Ciocaltea 
1608*de6e0b19SCristian Ciocaltea static SIMPLE_DEV_PM_OPS(owl_emac_pm_ops,
1609*de6e0b19SCristian Ciocaltea 			 owl_emac_suspend, owl_emac_resume);
1610*de6e0b19SCristian Ciocaltea 
1611*de6e0b19SCristian Ciocaltea static struct platform_driver owl_emac_driver = {
1612*de6e0b19SCristian Ciocaltea 	.driver = {
1613*de6e0b19SCristian Ciocaltea 		.name = OWL_EMAC_DRVNAME,
1614*de6e0b19SCristian Ciocaltea 		.of_match_table = owl_emac_of_match,
1615*de6e0b19SCristian Ciocaltea 		.pm = &owl_emac_pm_ops,
1616*de6e0b19SCristian Ciocaltea 	},
1617*de6e0b19SCristian Ciocaltea 	.probe = owl_emac_probe,
1618*de6e0b19SCristian Ciocaltea 	.remove = owl_emac_remove,
1619*de6e0b19SCristian Ciocaltea };
1620*de6e0b19SCristian Ciocaltea module_platform_driver(owl_emac_driver);
1621*de6e0b19SCristian Ciocaltea 
1622*de6e0b19SCristian Ciocaltea MODULE_DESCRIPTION("Actions Semi Owl SoCs Ethernet MAC Driver");
1623*de6e0b19SCristian Ciocaltea MODULE_AUTHOR("Actions Semi Inc.");
1624*de6e0b19SCristian Ciocaltea MODULE_AUTHOR("Cristian Ciocaltea <cristian.ciocaltea@gmail.com>");
1625*de6e0b19SCristian Ciocaltea MODULE_LICENSE("GPL");
1626