1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Broadcom GENET (Gigabit Ethernet) controller driver
4  *
5  * Copyright (c) 2014-2024 Broadcom
6  */
7 
8 #define pr_fmt(fmt)				"bcmgenet: " fmt
9 
10 #include <linux/acpi.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/interrupt.h>
17 #include <linux/string.h>
18 #include <linux/if_ether.h>
19 #include <linux/init.h>
20 #include <linux/errno.h>
21 #include <linux/delay.h>
22 #include <linux/platform_device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/pm.h>
25 #include <linux/clk.h>
26 #include <net/arp.h>
27 
28 #include <linux/mii.h>
29 #include <linux/ethtool.h>
30 #include <linux/netdevice.h>
31 #include <linux/inetdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/in.h>
35 #include <linux/ip.h>
36 #include <linux/ipv6.h>
37 #include <linux/phy.h>
38 #include <linux/platform_data/bcmgenet.h>
39 
40 #include <asm/unaligned.h>
41 
42 #include "bcmgenet.h"
43 
44 /* Maximum number of hardware queues, downsized if needed */
45 #define GENET_MAX_MQ_CNT	4
46 
47 /* Default highest priority queue for multi queue support */
48 #define GENET_Q0_PRIORITY	0
49 
50 #define GENET_Q16_RX_BD_CNT	\
51 	(TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
52 #define GENET_Q16_TX_BD_CNT	\
53 	(TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
54 
55 #define RX_BUF_LENGTH		2048
56 #define SKB_ALIGNMENT		32
57 
58 /* Tx/Rx DMA register offset, skip 256 descriptors */
59 #define WORDS_PER_BD(p)		(p->hw_params->words_per_bd)
60 #define DMA_DESC_SIZE		(WORDS_PER_BD(priv) * sizeof(u32))
61 
62 #define GENET_TDMA_REG_OFF	(priv->hw_params->tdma_offset + \
63 				TOTAL_DESC * DMA_DESC_SIZE)
64 
65 #define GENET_RDMA_REG_OFF	(priv->hw_params->rdma_offset + \
66 				TOTAL_DESC * DMA_DESC_SIZE)
67 
68 /* Forward declarations */
69 static void bcmgenet_set_rx_mode(struct net_device *dev);
70 
71 static inline void bcmgenet_writel(u32 value, void __iomem *offset)
72 {
73 	/* MIPS chips strapped for BE will automagically configure the
74 	 * peripheral registers for CPU-native byte order.
75 	 */
76 	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
77 		__raw_writel(value, offset);
78 	else
79 		writel_relaxed(value, offset);
80 }
81 
82 static inline u32 bcmgenet_readl(void __iomem *offset)
83 {
84 	if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
85 		return __raw_readl(offset);
86 	else
87 		return readl_relaxed(offset);
88 }
89 
90 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
91 					     void __iomem *d, u32 value)
92 {
93 	bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
94 }
95 
96 static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
97 				    void __iomem *d,
98 				    dma_addr_t addr)
99 {
100 	bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
101 
102 	/* Register writes to GISB bus can take couple hundred nanoseconds
103 	 * and are done for each packet, save these expensive writes unless
104 	 * the platform is explicitly configured for 64-bits/LPAE.
105 	 */
106 #ifdef CONFIG_PHYS_ADDR_T_64BIT
107 	if (priv->hw_params->flags & GENET_HAS_40BITS)
108 		bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
109 #endif
110 }
111 
112 /* Combined address + length/status setter */
113 static inline void dmadesc_set(struct bcmgenet_priv *priv,
114 			       void __iomem *d, dma_addr_t addr, u32 val)
115 {
116 	dmadesc_set_addr(priv, d, addr);
117 	dmadesc_set_length_status(priv, d, val);
118 }
119 
120 #define GENET_VER_FMT	"%1d.%1d EPHY: 0x%04x"
121 
122 #define GENET_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
123 				NETIF_MSG_LINK)
124 
125 static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
126 {
127 	if (GENET_IS_V1(priv))
128 		return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
129 	else
130 		return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
131 }
132 
133 static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
134 {
135 	if (GENET_IS_V1(priv))
136 		bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
137 	else
138 		bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
139 }
140 
141 /* These macros are defined to deal with register map change
142  * between GENET1.1 and GENET2. Only those currently being used
143  * by driver are defined.
144  */
145 static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
146 {
147 	if (GENET_IS_V1(priv))
148 		return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
149 	else
150 		return bcmgenet_readl(priv->base +
151 				      priv->hw_params->tbuf_offset + TBUF_CTRL);
152 }
153 
154 static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
155 {
156 	if (GENET_IS_V1(priv))
157 		bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
158 	else
159 		bcmgenet_writel(val, priv->base +
160 				priv->hw_params->tbuf_offset + TBUF_CTRL);
161 }
162 
163 static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
164 {
165 	if (GENET_IS_V1(priv))
166 		return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
167 	else
168 		return bcmgenet_readl(priv->base +
169 				      priv->hw_params->tbuf_offset + TBUF_BP_MC);
170 }
171 
172 static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
173 {
174 	if (GENET_IS_V1(priv))
175 		bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
176 	else
177 		bcmgenet_writel(val, priv->base +
178 				priv->hw_params->tbuf_offset + TBUF_BP_MC);
179 }
180 
181 /* RX/TX DMA register accessors */
182 enum dma_reg {
183 	DMA_RING_CFG = 0,
184 	DMA_CTRL,
185 	DMA_STATUS,
186 	DMA_SCB_BURST_SIZE,
187 	DMA_ARB_CTRL,
188 	DMA_PRIORITY_0,
189 	DMA_PRIORITY_1,
190 	DMA_PRIORITY_2,
191 	DMA_INDEX2RING_0,
192 	DMA_INDEX2RING_1,
193 	DMA_INDEX2RING_2,
194 	DMA_INDEX2RING_3,
195 	DMA_INDEX2RING_4,
196 	DMA_INDEX2RING_5,
197 	DMA_INDEX2RING_6,
198 	DMA_INDEX2RING_7,
199 	DMA_RING0_TIMEOUT,
200 	DMA_RING1_TIMEOUT,
201 	DMA_RING2_TIMEOUT,
202 	DMA_RING3_TIMEOUT,
203 	DMA_RING4_TIMEOUT,
204 	DMA_RING5_TIMEOUT,
205 	DMA_RING6_TIMEOUT,
206 	DMA_RING7_TIMEOUT,
207 	DMA_RING8_TIMEOUT,
208 	DMA_RING9_TIMEOUT,
209 	DMA_RING10_TIMEOUT,
210 	DMA_RING11_TIMEOUT,
211 	DMA_RING12_TIMEOUT,
212 	DMA_RING13_TIMEOUT,
213 	DMA_RING14_TIMEOUT,
214 	DMA_RING15_TIMEOUT,
215 	DMA_RING16_TIMEOUT,
216 };
217 
218 static const u8 bcmgenet_dma_regs_v3plus[] = {
219 	[DMA_RING_CFG]		= 0x00,
220 	[DMA_CTRL]		= 0x04,
221 	[DMA_STATUS]		= 0x08,
222 	[DMA_SCB_BURST_SIZE]	= 0x0C,
223 	[DMA_ARB_CTRL]		= 0x2C,
224 	[DMA_PRIORITY_0]	= 0x30,
225 	[DMA_PRIORITY_1]	= 0x34,
226 	[DMA_PRIORITY_2]	= 0x38,
227 	[DMA_RING0_TIMEOUT]	= 0x2C,
228 	[DMA_RING1_TIMEOUT]	= 0x30,
229 	[DMA_RING2_TIMEOUT]	= 0x34,
230 	[DMA_RING3_TIMEOUT]	= 0x38,
231 	[DMA_RING4_TIMEOUT]	= 0x3c,
232 	[DMA_RING5_TIMEOUT]	= 0x40,
233 	[DMA_RING6_TIMEOUT]	= 0x44,
234 	[DMA_RING7_TIMEOUT]	= 0x48,
235 	[DMA_RING8_TIMEOUT]	= 0x4c,
236 	[DMA_RING9_TIMEOUT]	= 0x50,
237 	[DMA_RING10_TIMEOUT]	= 0x54,
238 	[DMA_RING11_TIMEOUT]	= 0x58,
239 	[DMA_RING12_TIMEOUT]	= 0x5c,
240 	[DMA_RING13_TIMEOUT]	= 0x60,
241 	[DMA_RING14_TIMEOUT]	= 0x64,
242 	[DMA_RING15_TIMEOUT]	= 0x68,
243 	[DMA_RING16_TIMEOUT]	= 0x6C,
244 	[DMA_INDEX2RING_0]	= 0x70,
245 	[DMA_INDEX2RING_1]	= 0x74,
246 	[DMA_INDEX2RING_2]	= 0x78,
247 	[DMA_INDEX2RING_3]	= 0x7C,
248 	[DMA_INDEX2RING_4]	= 0x80,
249 	[DMA_INDEX2RING_5]	= 0x84,
250 	[DMA_INDEX2RING_6]	= 0x88,
251 	[DMA_INDEX2RING_7]	= 0x8C,
252 };
253 
254 static const u8 bcmgenet_dma_regs_v2[] = {
255 	[DMA_RING_CFG]		= 0x00,
256 	[DMA_CTRL]		= 0x04,
257 	[DMA_STATUS]		= 0x08,
258 	[DMA_SCB_BURST_SIZE]	= 0x0C,
259 	[DMA_ARB_CTRL]		= 0x30,
260 	[DMA_PRIORITY_0]	= 0x34,
261 	[DMA_PRIORITY_1]	= 0x38,
262 	[DMA_PRIORITY_2]	= 0x3C,
263 	[DMA_RING0_TIMEOUT]	= 0x2C,
264 	[DMA_RING1_TIMEOUT]	= 0x30,
265 	[DMA_RING2_TIMEOUT]	= 0x34,
266 	[DMA_RING3_TIMEOUT]	= 0x38,
267 	[DMA_RING4_TIMEOUT]	= 0x3c,
268 	[DMA_RING5_TIMEOUT]	= 0x40,
269 	[DMA_RING6_TIMEOUT]	= 0x44,
270 	[DMA_RING7_TIMEOUT]	= 0x48,
271 	[DMA_RING8_TIMEOUT]	= 0x4c,
272 	[DMA_RING9_TIMEOUT]	= 0x50,
273 	[DMA_RING10_TIMEOUT]	= 0x54,
274 	[DMA_RING11_TIMEOUT]	= 0x58,
275 	[DMA_RING12_TIMEOUT]	= 0x5c,
276 	[DMA_RING13_TIMEOUT]	= 0x60,
277 	[DMA_RING14_TIMEOUT]	= 0x64,
278 	[DMA_RING15_TIMEOUT]	= 0x68,
279 	[DMA_RING16_TIMEOUT]	= 0x6C,
280 };
281 
282 static const u8 bcmgenet_dma_regs_v1[] = {
283 	[DMA_CTRL]		= 0x00,
284 	[DMA_STATUS]		= 0x04,
285 	[DMA_SCB_BURST_SIZE]	= 0x0C,
286 	[DMA_ARB_CTRL]		= 0x30,
287 	[DMA_PRIORITY_0]	= 0x34,
288 	[DMA_PRIORITY_1]	= 0x38,
289 	[DMA_PRIORITY_2]	= 0x3C,
290 	[DMA_RING0_TIMEOUT]	= 0x2C,
291 	[DMA_RING1_TIMEOUT]	= 0x30,
292 	[DMA_RING2_TIMEOUT]	= 0x34,
293 	[DMA_RING3_TIMEOUT]	= 0x38,
294 	[DMA_RING4_TIMEOUT]	= 0x3c,
295 	[DMA_RING5_TIMEOUT]	= 0x40,
296 	[DMA_RING6_TIMEOUT]	= 0x44,
297 	[DMA_RING7_TIMEOUT]	= 0x48,
298 	[DMA_RING8_TIMEOUT]	= 0x4c,
299 	[DMA_RING9_TIMEOUT]	= 0x50,
300 	[DMA_RING10_TIMEOUT]	= 0x54,
301 	[DMA_RING11_TIMEOUT]	= 0x58,
302 	[DMA_RING12_TIMEOUT]	= 0x5c,
303 	[DMA_RING13_TIMEOUT]	= 0x60,
304 	[DMA_RING14_TIMEOUT]	= 0x64,
305 	[DMA_RING15_TIMEOUT]	= 0x68,
306 	[DMA_RING16_TIMEOUT]	= 0x6C,
307 };
308 
309 /* Set at runtime once bcmgenet version is known */
310 static const u8 *bcmgenet_dma_regs;
311 
312 static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
313 {
314 	return netdev_priv(dev_get_drvdata(dev));
315 }
316 
317 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
318 				      enum dma_reg r)
319 {
320 	return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
321 			      DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
322 }
323 
324 static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
325 					u32 val, enum dma_reg r)
326 {
327 	bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
328 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
329 }
330 
331 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
332 				      enum dma_reg r)
333 {
334 	return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
335 			      DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
336 }
337 
338 static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
339 					u32 val, enum dma_reg r)
340 {
341 	bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
342 			DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
343 }
344 
345 /* RDMA/TDMA ring registers and accessors
346  * we merge the common fields and just prefix with T/D the registers
347  * having different meaning depending on the direction
348  */
349 enum dma_ring_reg {
350 	TDMA_READ_PTR = 0,
351 	RDMA_WRITE_PTR = TDMA_READ_PTR,
352 	TDMA_READ_PTR_HI,
353 	RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
354 	TDMA_CONS_INDEX,
355 	RDMA_PROD_INDEX = TDMA_CONS_INDEX,
356 	TDMA_PROD_INDEX,
357 	RDMA_CONS_INDEX = TDMA_PROD_INDEX,
358 	DMA_RING_BUF_SIZE,
359 	DMA_START_ADDR,
360 	DMA_START_ADDR_HI,
361 	DMA_END_ADDR,
362 	DMA_END_ADDR_HI,
363 	DMA_MBUF_DONE_THRESH,
364 	TDMA_FLOW_PERIOD,
365 	RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
366 	TDMA_WRITE_PTR,
367 	RDMA_READ_PTR = TDMA_WRITE_PTR,
368 	TDMA_WRITE_PTR_HI,
369 	RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
370 };
371 
372 /* GENET v4 supports 40-bits pointer addressing
373  * for obvious reasons the LO and HI word parts
374  * are contiguous, but this offsets the other
375  * registers.
376  */
377 static const u8 genet_dma_ring_regs_v4[] = {
378 	[TDMA_READ_PTR]			= 0x00,
379 	[TDMA_READ_PTR_HI]		= 0x04,
380 	[TDMA_CONS_INDEX]		= 0x08,
381 	[TDMA_PROD_INDEX]		= 0x0C,
382 	[DMA_RING_BUF_SIZE]		= 0x10,
383 	[DMA_START_ADDR]		= 0x14,
384 	[DMA_START_ADDR_HI]		= 0x18,
385 	[DMA_END_ADDR]			= 0x1C,
386 	[DMA_END_ADDR_HI]		= 0x20,
387 	[DMA_MBUF_DONE_THRESH]		= 0x24,
388 	[TDMA_FLOW_PERIOD]		= 0x28,
389 	[TDMA_WRITE_PTR]		= 0x2C,
390 	[TDMA_WRITE_PTR_HI]		= 0x30,
391 };
392 
393 static const u8 genet_dma_ring_regs_v123[] = {
394 	[TDMA_READ_PTR]			= 0x00,
395 	[TDMA_CONS_INDEX]		= 0x04,
396 	[TDMA_PROD_INDEX]		= 0x08,
397 	[DMA_RING_BUF_SIZE]		= 0x0C,
398 	[DMA_START_ADDR]		= 0x10,
399 	[DMA_END_ADDR]			= 0x14,
400 	[DMA_MBUF_DONE_THRESH]		= 0x18,
401 	[TDMA_FLOW_PERIOD]		= 0x1C,
402 	[TDMA_WRITE_PTR]		= 0x20,
403 };
404 
405 /* Set at runtime once GENET version is known */
406 static const u8 *genet_dma_ring_regs;
407 
408 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
409 					   unsigned int ring,
410 					   enum dma_ring_reg r)
411 {
412 	return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
413 			      (DMA_RING_SIZE * ring) +
414 			      genet_dma_ring_regs[r]);
415 }
416 
417 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
418 					     unsigned int ring, u32 val,
419 					     enum dma_ring_reg r)
420 {
421 	bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
422 			(DMA_RING_SIZE * ring) +
423 			genet_dma_ring_regs[r]);
424 }
425 
426 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
427 					   unsigned int ring,
428 					   enum dma_ring_reg r)
429 {
430 	return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
431 			      (DMA_RING_SIZE * ring) +
432 			      genet_dma_ring_regs[r]);
433 }
434 
435 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
436 					     unsigned int ring, u32 val,
437 					     enum dma_ring_reg r)
438 {
439 	bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
440 			(DMA_RING_SIZE * ring) +
441 			genet_dma_ring_regs[r]);
442 }
443 
444 static void bcmgenet_hfb_enable_filter(struct bcmgenet_priv *priv, u32 f_index)
445 {
446 	u32 offset;
447 	u32 reg;
448 
449 	offset = HFB_FLT_ENABLE_V3PLUS + (f_index < 32) * sizeof(u32);
450 	reg = bcmgenet_hfb_reg_readl(priv, offset);
451 	reg |= (1 << (f_index % 32));
452 	bcmgenet_hfb_reg_writel(priv, reg, offset);
453 	reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
454 	reg |= RBUF_HFB_EN;
455 	bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
456 }
457 
458 static void bcmgenet_hfb_disable_filter(struct bcmgenet_priv *priv, u32 f_index)
459 {
460 	u32 offset, reg, reg1;
461 
462 	offset = HFB_FLT_ENABLE_V3PLUS;
463 	reg = bcmgenet_hfb_reg_readl(priv, offset);
464 	reg1 = bcmgenet_hfb_reg_readl(priv, offset + sizeof(u32));
465 	if  (f_index < 32) {
466 		reg1 &= ~(1 << (f_index % 32));
467 		bcmgenet_hfb_reg_writel(priv, reg1, offset + sizeof(u32));
468 	} else {
469 		reg &= ~(1 << (f_index % 32));
470 		bcmgenet_hfb_reg_writel(priv, reg, offset);
471 	}
472 	if (!reg && !reg1) {
473 		reg = bcmgenet_hfb_reg_readl(priv, HFB_CTRL);
474 		reg &= ~RBUF_HFB_EN;
475 		bcmgenet_hfb_reg_writel(priv, reg, HFB_CTRL);
476 	}
477 }
478 
479 static void bcmgenet_hfb_set_filter_rx_queue_mapping(struct bcmgenet_priv *priv,
480 						     u32 f_index, u32 rx_queue)
481 {
482 	u32 offset;
483 	u32 reg;
484 
485 	offset = f_index / 8;
486 	reg = bcmgenet_rdma_readl(priv, DMA_INDEX2RING_0 + offset);
487 	reg &= ~(0xF << (4 * (f_index % 8)));
488 	reg |= ((rx_queue & 0xF) << (4 * (f_index % 8)));
489 	bcmgenet_rdma_writel(priv, reg, DMA_INDEX2RING_0 + offset);
490 }
491 
492 static void bcmgenet_hfb_set_filter_length(struct bcmgenet_priv *priv,
493 					   u32 f_index, u32 f_length)
494 {
495 	u32 offset;
496 	u32 reg;
497 
498 	offset = HFB_FLT_LEN_V3PLUS +
499 		 ((priv->hw_params->hfb_filter_cnt - 1 - f_index) / 4) *
500 		 sizeof(u32);
501 	reg = bcmgenet_hfb_reg_readl(priv, offset);
502 	reg &= ~(0xFF << (8 * (f_index % 4)));
503 	reg |= ((f_length & 0xFF) << (8 * (f_index % 4)));
504 	bcmgenet_hfb_reg_writel(priv, reg, offset);
505 }
506 
507 static int bcmgenet_hfb_validate_mask(void *mask, size_t size)
508 {
509 	while (size) {
510 		switch (*(unsigned char *)mask++) {
511 		case 0x00:
512 		case 0x0f:
513 		case 0xf0:
514 		case 0xff:
515 			size--;
516 			continue;
517 		default:
518 			return -EINVAL;
519 		}
520 	}
521 
522 	return 0;
523 }
524 
525 #define VALIDATE_MASK(x) \
526 	bcmgenet_hfb_validate_mask(&(x), sizeof(x))
527 
528 static int bcmgenet_hfb_insert_data(struct bcmgenet_priv *priv, u32 f_index,
529 				    u32 offset, void *val, void *mask,
530 				    size_t size)
531 {
532 	u32 index, tmp;
533 
534 	index = f_index * priv->hw_params->hfb_filter_size + offset / 2;
535 	tmp = bcmgenet_hfb_readl(priv, index * sizeof(u32));
536 
537 	while (size--) {
538 		if (offset++ & 1) {
539 			tmp &= ~0x300FF;
540 			tmp |= (*(unsigned char *)val++);
541 			switch ((*(unsigned char *)mask++)) {
542 			case 0xFF:
543 				tmp |= 0x30000;
544 				break;
545 			case 0xF0:
546 				tmp |= 0x20000;
547 				break;
548 			case 0x0F:
549 				tmp |= 0x10000;
550 				break;
551 			}
552 			bcmgenet_hfb_writel(priv, tmp, index++ * sizeof(u32));
553 			if (size)
554 				tmp = bcmgenet_hfb_readl(priv,
555 							 index * sizeof(u32));
556 		} else {
557 			tmp &= ~0xCFF00;
558 			tmp |= (*(unsigned char *)val++) << 8;
559 			switch ((*(unsigned char *)mask++)) {
560 			case 0xFF:
561 				tmp |= 0xC0000;
562 				break;
563 			case 0xF0:
564 				tmp |= 0x80000;
565 				break;
566 			case 0x0F:
567 				tmp |= 0x40000;
568 				break;
569 			}
570 			if (!size)
571 				bcmgenet_hfb_writel(priv, tmp, index * sizeof(u32));
572 		}
573 	}
574 
575 	return 0;
576 }
577 
578 static void bcmgenet_hfb_create_rxnfc_filter(struct bcmgenet_priv *priv,
579 					     struct bcmgenet_rxnfc_rule *rule)
580 {
581 	struct ethtool_rx_flow_spec *fs = &rule->fs;
582 	u32 offset = 0, f_length = 0, f;
583 	u8 val_8, mask_8;
584 	__be16 val_16;
585 	u16 mask_16;
586 	size_t size;
587 
588 	f = fs->location;
589 	if (fs->flow_type & FLOW_MAC_EXT) {
590 		bcmgenet_hfb_insert_data(priv, f, 0,
591 					 &fs->h_ext.h_dest, &fs->m_ext.h_dest,
592 					 sizeof(fs->h_ext.h_dest));
593 	}
594 
595 	if (fs->flow_type & FLOW_EXT) {
596 		if (fs->m_ext.vlan_etype ||
597 		    fs->m_ext.vlan_tci) {
598 			bcmgenet_hfb_insert_data(priv, f, 12,
599 						 &fs->h_ext.vlan_etype,
600 						 &fs->m_ext.vlan_etype,
601 						 sizeof(fs->h_ext.vlan_etype));
602 			bcmgenet_hfb_insert_data(priv, f, 14,
603 						 &fs->h_ext.vlan_tci,
604 						 &fs->m_ext.vlan_tci,
605 						 sizeof(fs->h_ext.vlan_tci));
606 			offset += VLAN_HLEN;
607 			f_length += DIV_ROUND_UP(VLAN_HLEN, 2);
608 		}
609 	}
610 
611 	switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
612 	case ETHER_FLOW:
613 		f_length += DIV_ROUND_UP(ETH_HLEN, 2);
614 		bcmgenet_hfb_insert_data(priv, f, 0,
615 					 &fs->h_u.ether_spec.h_dest,
616 					 &fs->m_u.ether_spec.h_dest,
617 					 sizeof(fs->h_u.ether_spec.h_dest));
618 		bcmgenet_hfb_insert_data(priv, f, ETH_ALEN,
619 					 &fs->h_u.ether_spec.h_source,
620 					 &fs->m_u.ether_spec.h_source,
621 					 sizeof(fs->h_u.ether_spec.h_source));
622 		bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
623 					 &fs->h_u.ether_spec.h_proto,
624 					 &fs->m_u.ether_spec.h_proto,
625 					 sizeof(fs->h_u.ether_spec.h_proto));
626 		break;
627 	case IP_USER_FLOW:
628 		f_length += DIV_ROUND_UP(ETH_HLEN + 20, 2);
629 		/* Specify IP Ether Type */
630 		val_16 = htons(ETH_P_IP);
631 		mask_16 = 0xFFFF;
632 		bcmgenet_hfb_insert_data(priv, f, (2 * ETH_ALEN) + offset,
633 					 &val_16, &mask_16, sizeof(val_16));
634 		bcmgenet_hfb_insert_data(priv, f, 15 + offset,
635 					 &fs->h_u.usr_ip4_spec.tos,
636 					 &fs->m_u.usr_ip4_spec.tos,
637 					 sizeof(fs->h_u.usr_ip4_spec.tos));
638 		bcmgenet_hfb_insert_data(priv, f, 23 + offset,
639 					 &fs->h_u.usr_ip4_spec.proto,
640 					 &fs->m_u.usr_ip4_spec.proto,
641 					 sizeof(fs->h_u.usr_ip4_spec.proto));
642 		bcmgenet_hfb_insert_data(priv, f, 26 + offset,
643 					 &fs->h_u.usr_ip4_spec.ip4src,
644 					 &fs->m_u.usr_ip4_spec.ip4src,
645 					 sizeof(fs->h_u.usr_ip4_spec.ip4src));
646 		bcmgenet_hfb_insert_data(priv, f, 30 + offset,
647 					 &fs->h_u.usr_ip4_spec.ip4dst,
648 					 &fs->m_u.usr_ip4_spec.ip4dst,
649 					 sizeof(fs->h_u.usr_ip4_spec.ip4dst));
650 		if (!fs->m_u.usr_ip4_spec.l4_4_bytes)
651 			break;
652 
653 		/* Only supports 20 byte IPv4 header */
654 		val_8 = 0x45;
655 		mask_8 = 0xFF;
656 		bcmgenet_hfb_insert_data(priv, f, ETH_HLEN + offset,
657 					 &val_8, &mask_8,
658 					 sizeof(val_8));
659 		size = sizeof(fs->h_u.usr_ip4_spec.l4_4_bytes);
660 		bcmgenet_hfb_insert_data(priv, f,
661 					 ETH_HLEN + 20 + offset,
662 					 &fs->h_u.usr_ip4_spec.l4_4_bytes,
663 					 &fs->m_u.usr_ip4_spec.l4_4_bytes,
664 					 size);
665 		f_length += DIV_ROUND_UP(size, 2);
666 		break;
667 	}
668 
669 	bcmgenet_hfb_set_filter_length(priv, f, 2 * f_length);
670 	if (!fs->ring_cookie || fs->ring_cookie == RX_CLS_FLOW_WAKE) {
671 		/* Ring 0 flows can be handled by the default Descriptor Ring
672 		 * We'll map them to ring 0, but don't enable the filter
673 		 */
674 		bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f, 0);
675 		rule->state = BCMGENET_RXNFC_STATE_DISABLED;
676 	} else {
677 		/* Other Rx rings are direct mapped here */
678 		bcmgenet_hfb_set_filter_rx_queue_mapping(priv, f,
679 							 fs->ring_cookie);
680 		bcmgenet_hfb_enable_filter(priv, f);
681 		rule->state = BCMGENET_RXNFC_STATE_ENABLED;
682 	}
683 }
684 
685 /* bcmgenet_hfb_clear
686  *
687  * Clear Hardware Filter Block and disable all filtering.
688  */
689 static void bcmgenet_hfb_clear_filter(struct bcmgenet_priv *priv, u32 f_index)
690 {
691 	u32 base, i;
692 
693 	base = f_index * priv->hw_params->hfb_filter_size;
694 	for (i = 0; i < priv->hw_params->hfb_filter_size; i++)
695 		bcmgenet_hfb_writel(priv, 0x0, (base + i) * sizeof(u32));
696 }
697 
698 static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
699 {
700 	u32 i;
701 
702 	if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
703 		return;
704 
705 	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
706 	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
707 	bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
708 
709 	for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
710 		bcmgenet_rdma_writel(priv, 0x0, i);
711 
712 	for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
713 		bcmgenet_hfb_reg_writel(priv, 0x0,
714 					HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
715 
716 	for (i = 0; i < priv->hw_params->hfb_filter_cnt; i++)
717 		bcmgenet_hfb_clear_filter(priv, i);
718 }
719 
720 static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
721 {
722 	int i;
723 
724 	INIT_LIST_HEAD(&priv->rxnfc_list);
725 	if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
726 		return;
727 
728 	for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
729 		INIT_LIST_HEAD(&priv->rxnfc_rules[i].list);
730 		priv->rxnfc_rules[i].state = BCMGENET_RXNFC_STATE_UNUSED;
731 	}
732 
733 	bcmgenet_hfb_clear(priv);
734 }
735 
736 static int bcmgenet_begin(struct net_device *dev)
737 {
738 	struct bcmgenet_priv *priv = netdev_priv(dev);
739 
740 	/* Turn on the clock */
741 	return clk_prepare_enable(priv->clk);
742 }
743 
744 static void bcmgenet_complete(struct net_device *dev)
745 {
746 	struct bcmgenet_priv *priv = netdev_priv(dev);
747 
748 	/* Turn off the clock */
749 	clk_disable_unprepare(priv->clk);
750 }
751 
752 static int bcmgenet_get_link_ksettings(struct net_device *dev,
753 				       struct ethtool_link_ksettings *cmd)
754 {
755 	if (!netif_running(dev))
756 		return -EINVAL;
757 
758 	if (!dev->phydev)
759 		return -ENODEV;
760 
761 	phy_ethtool_ksettings_get(dev->phydev, cmd);
762 
763 	return 0;
764 }
765 
766 static int bcmgenet_set_link_ksettings(struct net_device *dev,
767 				       const struct ethtool_link_ksettings *cmd)
768 {
769 	if (!netif_running(dev))
770 		return -EINVAL;
771 
772 	if (!dev->phydev)
773 		return -ENODEV;
774 
775 	return phy_ethtool_ksettings_set(dev->phydev, cmd);
776 }
777 
778 static int bcmgenet_set_features(struct net_device *dev,
779 				 netdev_features_t features)
780 {
781 	struct bcmgenet_priv *priv = netdev_priv(dev);
782 	u32 reg;
783 	int ret;
784 
785 	ret = clk_prepare_enable(priv->clk);
786 	if (ret)
787 		return ret;
788 
789 	/* Make sure we reflect the value of CRC_CMD_FWD */
790 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
791 	priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
792 
793 	clk_disable_unprepare(priv->clk);
794 
795 	return ret;
796 }
797 
798 static u32 bcmgenet_get_msglevel(struct net_device *dev)
799 {
800 	struct bcmgenet_priv *priv = netdev_priv(dev);
801 
802 	return priv->msg_enable;
803 }
804 
805 static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
806 {
807 	struct bcmgenet_priv *priv = netdev_priv(dev);
808 
809 	priv->msg_enable = level;
810 }
811 
812 static int bcmgenet_get_coalesce(struct net_device *dev,
813 				 struct ethtool_coalesce *ec,
814 				 struct kernel_ethtool_coalesce *kernel_coal,
815 				 struct netlink_ext_ack *extack)
816 {
817 	struct bcmgenet_priv *priv = netdev_priv(dev);
818 	struct bcmgenet_rx_ring *ring;
819 	unsigned int i;
820 
821 	ec->tx_max_coalesced_frames =
822 		bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
823 					 DMA_MBUF_DONE_THRESH);
824 	ec->rx_max_coalesced_frames =
825 		bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
826 					 DMA_MBUF_DONE_THRESH);
827 	ec->rx_coalesce_usecs =
828 		bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
829 
830 	for (i = 0; i < priv->hw_params->rx_queues; i++) {
831 		ring = &priv->rx_rings[i];
832 		ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
833 	}
834 	ring = &priv->rx_rings[DESC_INDEX];
835 	ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
836 
837 	return 0;
838 }
839 
840 static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
841 				     u32 usecs, u32 pkts)
842 {
843 	struct bcmgenet_priv *priv = ring->priv;
844 	unsigned int i = ring->index;
845 	u32 reg;
846 
847 	bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH);
848 
849 	reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
850 	reg &= ~DMA_TIMEOUT_MASK;
851 	reg |= DIV_ROUND_UP(usecs * 1000, 8192);
852 	bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
853 }
854 
855 static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
856 					  struct ethtool_coalesce *ec)
857 {
858 	struct dim_cq_moder moder;
859 	u32 usecs, pkts;
860 
861 	ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
862 	ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
863 	usecs = ring->rx_coalesce_usecs;
864 	pkts = ring->rx_max_coalesced_frames;
865 
866 	if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
867 		moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
868 		usecs = moder.usec;
869 		pkts = moder.pkts;
870 	}
871 
872 	ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
873 	bcmgenet_set_rx_coalesce(ring, usecs, pkts);
874 }
875 
876 static int bcmgenet_set_coalesce(struct net_device *dev,
877 				 struct ethtool_coalesce *ec,
878 				 struct kernel_ethtool_coalesce *kernel_coal,
879 				 struct netlink_ext_ack *extack)
880 {
881 	struct bcmgenet_priv *priv = netdev_priv(dev);
882 	unsigned int i;
883 
884 	/* Base system clock is 125Mhz, DMA timeout is this reference clock
885 	 * divided by 1024, which yields roughly 8.192us, our maximum value
886 	 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
887 	 */
888 	if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
889 	    ec->tx_max_coalesced_frames == 0 ||
890 	    ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
891 	    ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
892 		return -EINVAL;
893 
894 	if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
895 		return -EINVAL;
896 
897 	/* GENET TDMA hardware does not support a configurable timeout, but will
898 	 * always generate an interrupt either after MBDONE packets have been
899 	 * transmitted, or when the ring is empty.
900 	 */
901 
902 	/* Program all TX queues with the same values, as there is no
903 	 * ethtool knob to do coalescing on a per-queue basis
904 	 */
905 	for (i = 0; i < priv->hw_params->tx_queues; i++)
906 		bcmgenet_tdma_ring_writel(priv, i,
907 					  ec->tx_max_coalesced_frames,
908 					  DMA_MBUF_DONE_THRESH);
909 	bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
910 				  ec->tx_max_coalesced_frames,
911 				  DMA_MBUF_DONE_THRESH);
912 
913 	for (i = 0; i < priv->hw_params->rx_queues; i++)
914 		bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
915 	bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
916 
917 	return 0;
918 }
919 
920 static void bcmgenet_get_pauseparam(struct net_device *dev,
921 				    struct ethtool_pauseparam *epause)
922 {
923 	struct bcmgenet_priv *priv;
924 	u32 umac_cmd;
925 
926 	priv = netdev_priv(dev);
927 
928 	epause->autoneg = priv->autoneg_pause;
929 
930 	if (netif_carrier_ok(dev)) {
931 		/* report active state when link is up */
932 		umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD);
933 		epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE);
934 		epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE);
935 	} else {
936 		/* otherwise report stored settings */
937 		epause->tx_pause = priv->tx_pause;
938 		epause->rx_pause = priv->rx_pause;
939 	}
940 }
941 
942 static int bcmgenet_set_pauseparam(struct net_device *dev,
943 				   struct ethtool_pauseparam *epause)
944 {
945 	struct bcmgenet_priv *priv = netdev_priv(dev);
946 
947 	if (!dev->phydev)
948 		return -ENODEV;
949 
950 	if (!phy_validate_pause(dev->phydev, epause))
951 		return -EINVAL;
952 
953 	priv->autoneg_pause = !!epause->autoneg;
954 	priv->tx_pause = !!epause->tx_pause;
955 	priv->rx_pause = !!epause->rx_pause;
956 
957 	bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
958 
959 	return 0;
960 }
961 
962 /* standard ethtool support functions. */
963 enum bcmgenet_stat_type {
964 	BCMGENET_STAT_NETDEV = -1,
965 	BCMGENET_STAT_MIB_RX,
966 	BCMGENET_STAT_MIB_TX,
967 	BCMGENET_STAT_RUNT,
968 	BCMGENET_STAT_MISC,
969 	BCMGENET_STAT_SOFT,
970 };
971 
972 struct bcmgenet_stats {
973 	char stat_string[ETH_GSTRING_LEN];
974 	int stat_sizeof;
975 	int stat_offset;
976 	enum bcmgenet_stat_type type;
977 	/* reg offset from UMAC base for misc counters */
978 	u16 reg_offset;
979 };
980 
981 #define STAT_NETDEV(m) { \
982 	.stat_string = __stringify(m), \
983 	.stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
984 	.stat_offset = offsetof(struct net_device_stats, m), \
985 	.type = BCMGENET_STAT_NETDEV, \
986 }
987 
988 #define STAT_GENET_MIB(str, m, _type) { \
989 	.stat_string = str, \
990 	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
991 	.stat_offset = offsetof(struct bcmgenet_priv, m), \
992 	.type = _type, \
993 }
994 
995 #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
996 #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
997 #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
998 #define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
999 
1000 #define STAT_GENET_MISC(str, m, offset) { \
1001 	.stat_string = str, \
1002 	.stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
1003 	.stat_offset = offsetof(struct bcmgenet_priv, m), \
1004 	.type = BCMGENET_STAT_MISC, \
1005 	.reg_offset = offset, \
1006 }
1007 
1008 #define STAT_GENET_Q(num) \
1009 	STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
1010 			tx_rings[num].packets), \
1011 	STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
1012 			tx_rings[num].bytes), \
1013 	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
1014 			rx_rings[num].bytes),	 \
1015 	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
1016 			rx_rings[num].packets), \
1017 	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
1018 			rx_rings[num].errors), \
1019 	STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
1020 			rx_rings[num].dropped)
1021 
1022 /* There is a 0xC gap between the end of RX and beginning of TX stats and then
1023  * between the end of TX stats and the beginning of the RX RUNT
1024  */
1025 #define BCMGENET_STAT_OFFSET	0xc
1026 
1027 /* Hardware counters must be kept in sync because the order/offset
1028  * is important here (order in structure declaration = order in hardware)
1029  */
1030 static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
1031 	/* general stats */
1032 	STAT_NETDEV(rx_packets),
1033 	STAT_NETDEV(tx_packets),
1034 	STAT_NETDEV(rx_bytes),
1035 	STAT_NETDEV(tx_bytes),
1036 	STAT_NETDEV(rx_errors),
1037 	STAT_NETDEV(tx_errors),
1038 	STAT_NETDEV(rx_dropped),
1039 	STAT_NETDEV(tx_dropped),
1040 	STAT_NETDEV(multicast),
1041 	/* UniMAC RSV counters */
1042 	STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
1043 	STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
1044 	STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
1045 	STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
1046 	STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
1047 	STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
1048 	STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
1049 	STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
1050 	STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
1051 	STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
1052 	STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
1053 	STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
1054 	STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
1055 	STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
1056 	STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
1057 	STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
1058 	STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
1059 	STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
1060 	STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
1061 	STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
1062 	STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
1063 	STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
1064 	STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
1065 	STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
1066 	STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
1067 	STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
1068 	STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
1069 	STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
1070 	STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
1071 	/* UniMAC TSV counters */
1072 	STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
1073 	STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
1074 	STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
1075 	STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
1076 	STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
1077 	STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
1078 	STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
1079 	STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
1080 	STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
1081 	STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
1082 	STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
1083 	STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
1084 	STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
1085 	STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
1086 	STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
1087 	STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
1088 	STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
1089 	STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
1090 	STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
1091 	STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
1092 	STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
1093 	STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
1094 	STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
1095 	STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
1096 	STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
1097 	STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
1098 	STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
1099 	STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
1100 	STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
1101 	/* UniMAC RUNT counters */
1102 	STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
1103 	STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
1104 	STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
1105 	STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
1106 	/* Misc UniMAC counters */
1107 	STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
1108 			UMAC_RBUF_OVFL_CNT_V1),
1109 	STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
1110 			UMAC_RBUF_ERR_CNT_V1),
1111 	STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
1112 	STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
1113 	STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
1114 	STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
1115 	STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
1116 	STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
1117 			    mib.tx_realloc_tsb_failed),
1118 	/* Per TX queues */
1119 	STAT_GENET_Q(0),
1120 	STAT_GENET_Q(1),
1121 	STAT_GENET_Q(2),
1122 	STAT_GENET_Q(3),
1123 	STAT_GENET_Q(16),
1124 };
1125 
1126 #define BCMGENET_STATS_LEN	ARRAY_SIZE(bcmgenet_gstrings_stats)
1127 
1128 static void bcmgenet_get_drvinfo(struct net_device *dev,
1129 				 struct ethtool_drvinfo *info)
1130 {
1131 	strscpy(info->driver, "bcmgenet", sizeof(info->driver));
1132 }
1133 
1134 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
1135 {
1136 	switch (string_set) {
1137 	case ETH_SS_STATS:
1138 		return BCMGENET_STATS_LEN;
1139 	default:
1140 		return -EOPNOTSUPP;
1141 	}
1142 }
1143 
1144 static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
1145 				 u8 *data)
1146 {
1147 	int i;
1148 
1149 	switch (stringset) {
1150 	case ETH_SS_STATS:
1151 		for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1152 			memcpy(data + i * ETH_GSTRING_LEN,
1153 			       bcmgenet_gstrings_stats[i].stat_string,
1154 			       ETH_GSTRING_LEN);
1155 		}
1156 		break;
1157 	}
1158 }
1159 
1160 static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
1161 {
1162 	u16 new_offset;
1163 	u32 val;
1164 
1165 	switch (offset) {
1166 	case UMAC_RBUF_OVFL_CNT_V1:
1167 		if (GENET_IS_V2(priv))
1168 			new_offset = RBUF_OVFL_CNT_V2;
1169 		else
1170 			new_offset = RBUF_OVFL_CNT_V3PLUS;
1171 
1172 		val = bcmgenet_rbuf_readl(priv,	new_offset);
1173 		/* clear if overflowed */
1174 		if (val == ~0)
1175 			bcmgenet_rbuf_writel(priv, 0, new_offset);
1176 		break;
1177 	case UMAC_RBUF_ERR_CNT_V1:
1178 		if (GENET_IS_V2(priv))
1179 			new_offset = RBUF_ERR_CNT_V2;
1180 		else
1181 			new_offset = RBUF_ERR_CNT_V3PLUS;
1182 
1183 		val = bcmgenet_rbuf_readl(priv,	new_offset);
1184 		/* clear if overflowed */
1185 		if (val == ~0)
1186 			bcmgenet_rbuf_writel(priv, 0, new_offset);
1187 		break;
1188 	default:
1189 		val = bcmgenet_umac_readl(priv, offset);
1190 		/* clear if overflowed */
1191 		if (val == ~0)
1192 			bcmgenet_umac_writel(priv, 0, offset);
1193 		break;
1194 	}
1195 
1196 	return val;
1197 }
1198 
1199 static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
1200 {
1201 	int i, j = 0;
1202 
1203 	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1204 		const struct bcmgenet_stats *s;
1205 		u8 offset = 0;
1206 		u32 val = 0;
1207 		char *p;
1208 
1209 		s = &bcmgenet_gstrings_stats[i];
1210 		switch (s->type) {
1211 		case BCMGENET_STAT_NETDEV:
1212 		case BCMGENET_STAT_SOFT:
1213 			continue;
1214 		case BCMGENET_STAT_RUNT:
1215 			offset += BCMGENET_STAT_OFFSET;
1216 			fallthrough;
1217 		case BCMGENET_STAT_MIB_TX:
1218 			offset += BCMGENET_STAT_OFFSET;
1219 			fallthrough;
1220 		case BCMGENET_STAT_MIB_RX:
1221 			val = bcmgenet_umac_readl(priv,
1222 						  UMAC_MIB_START + j + offset);
1223 			offset = 0;	/* Reset Offset */
1224 			break;
1225 		case BCMGENET_STAT_MISC:
1226 			if (GENET_IS_V1(priv)) {
1227 				val = bcmgenet_umac_readl(priv, s->reg_offset);
1228 				/* clear if overflowed */
1229 				if (val == ~0)
1230 					bcmgenet_umac_writel(priv, 0,
1231 							     s->reg_offset);
1232 			} else {
1233 				val = bcmgenet_update_stat_misc(priv,
1234 								s->reg_offset);
1235 			}
1236 			break;
1237 		}
1238 
1239 		j += s->stat_sizeof;
1240 		p = (char *)priv + s->stat_offset;
1241 		*(u32 *)p = val;
1242 	}
1243 }
1244 
1245 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
1246 				       struct ethtool_stats *stats,
1247 				       u64 *data)
1248 {
1249 	struct bcmgenet_priv *priv = netdev_priv(dev);
1250 	int i;
1251 
1252 	if (netif_running(dev))
1253 		bcmgenet_update_mib_counters(priv);
1254 
1255 	dev->netdev_ops->ndo_get_stats(dev);
1256 
1257 	for (i = 0; i < BCMGENET_STATS_LEN; i++) {
1258 		const struct bcmgenet_stats *s;
1259 		char *p;
1260 
1261 		s = &bcmgenet_gstrings_stats[i];
1262 		if (s->type == BCMGENET_STAT_NETDEV)
1263 			p = (char *)&dev->stats;
1264 		else
1265 			p = (char *)priv;
1266 		p += s->stat_offset;
1267 		if (sizeof(unsigned long) != sizeof(u32) &&
1268 		    s->stat_sizeof == sizeof(unsigned long))
1269 			data[i] = *(unsigned long *)p;
1270 		else
1271 			data[i] = *(u32 *)p;
1272 	}
1273 }
1274 
1275 void bcmgenet_eee_enable_set(struct net_device *dev, bool enable,
1276 			     bool tx_lpi_enabled)
1277 {
1278 	struct bcmgenet_priv *priv = netdev_priv(dev);
1279 	u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
1280 	u32 reg;
1281 
1282 	if (enable && !priv->clk_eee_enabled) {
1283 		clk_prepare_enable(priv->clk_eee);
1284 		priv->clk_eee_enabled = true;
1285 	}
1286 
1287 	reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
1288 	if (enable)
1289 		reg |= EEE_EN;
1290 	else
1291 		reg &= ~EEE_EN;
1292 	bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
1293 
1294 	/* Enable EEE and switch to a 27Mhz clock automatically */
1295 	reg = bcmgenet_readl(priv->base + off);
1296 	if (tx_lpi_enabled)
1297 		reg |= TBUF_EEE_EN | TBUF_PM_EN;
1298 	else
1299 		reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
1300 	bcmgenet_writel(reg, priv->base + off);
1301 
1302 	/* Do the same for thing for RBUF */
1303 	reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
1304 	if (enable)
1305 		reg |= RBUF_EEE_EN | RBUF_PM_EN;
1306 	else
1307 		reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
1308 	bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
1309 
1310 	if (!enable && priv->clk_eee_enabled) {
1311 		clk_disable_unprepare(priv->clk_eee);
1312 		priv->clk_eee_enabled = false;
1313 	}
1314 
1315 	priv->eee.eee_enabled = enable;
1316 	priv->eee.eee_active = enable;
1317 	priv->eee.tx_lpi_enabled = tx_lpi_enabled;
1318 }
1319 
1320 static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
1321 {
1322 	struct bcmgenet_priv *priv = netdev_priv(dev);
1323 	struct ethtool_eee *p = &priv->eee;
1324 
1325 	if (GENET_IS_V1(priv))
1326 		return -EOPNOTSUPP;
1327 
1328 	if (!dev->phydev)
1329 		return -ENODEV;
1330 
1331 	e->eee_enabled = p->eee_enabled;
1332 	e->eee_active = p->eee_active;
1333 	e->tx_lpi_enabled = p->tx_lpi_enabled;
1334 	e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
1335 
1336 	return phy_ethtool_get_eee(dev->phydev, e);
1337 }
1338 
1339 static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
1340 {
1341 	struct bcmgenet_priv *priv = netdev_priv(dev);
1342 	struct ethtool_eee *p = &priv->eee;
1343 
1344 	if (GENET_IS_V1(priv))
1345 		return -EOPNOTSUPP;
1346 
1347 	if (!dev->phydev)
1348 		return -ENODEV;
1349 
1350 	p->eee_enabled = e->eee_enabled;
1351 
1352 	if (!p->eee_enabled) {
1353 		bcmgenet_eee_enable_set(dev, false, false);
1354 	} else {
1355 		p->eee_active = phy_init_eee(dev->phydev, false) >= 0;
1356 		bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
1357 		bcmgenet_eee_enable_set(dev, p->eee_active, e->tx_lpi_enabled);
1358 	}
1359 
1360 	return phy_ethtool_set_eee(dev->phydev, e);
1361 }
1362 
1363 static int bcmgenet_validate_flow(struct net_device *dev,
1364 				  struct ethtool_rxnfc *cmd)
1365 {
1366 	struct ethtool_usrip4_spec *l4_mask;
1367 	struct ethhdr *eth_mask;
1368 
1369 	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES &&
1370 	    cmd->fs.location != RX_CLS_LOC_ANY) {
1371 		netdev_err(dev, "rxnfc: Invalid location (%d)\n",
1372 			   cmd->fs.location);
1373 		return -EINVAL;
1374 	}
1375 
1376 	switch (cmd->fs.flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
1377 	case IP_USER_FLOW:
1378 		l4_mask = &cmd->fs.m_u.usr_ip4_spec;
1379 		/* don't allow mask which isn't valid */
1380 		if (VALIDATE_MASK(l4_mask->ip4src) ||
1381 		    VALIDATE_MASK(l4_mask->ip4dst) ||
1382 		    VALIDATE_MASK(l4_mask->l4_4_bytes) ||
1383 		    VALIDATE_MASK(l4_mask->proto) ||
1384 		    VALIDATE_MASK(l4_mask->ip_ver) ||
1385 		    VALIDATE_MASK(l4_mask->tos)) {
1386 			netdev_err(dev, "rxnfc: Unsupported mask\n");
1387 			return -EINVAL;
1388 		}
1389 		break;
1390 	case ETHER_FLOW:
1391 		eth_mask = &cmd->fs.m_u.ether_spec;
1392 		/* don't allow mask which isn't valid */
1393 		if (VALIDATE_MASK(eth_mask->h_dest) ||
1394 		    VALIDATE_MASK(eth_mask->h_source) ||
1395 		    VALIDATE_MASK(eth_mask->h_proto)) {
1396 			netdev_err(dev, "rxnfc: Unsupported mask\n");
1397 			return -EINVAL;
1398 		}
1399 		break;
1400 	default:
1401 		netdev_err(dev, "rxnfc: Unsupported flow type (0x%x)\n",
1402 			   cmd->fs.flow_type);
1403 		return -EINVAL;
1404 	}
1405 
1406 	if ((cmd->fs.flow_type & FLOW_EXT)) {
1407 		/* don't allow mask which isn't valid */
1408 		if (VALIDATE_MASK(cmd->fs.m_ext.vlan_etype) ||
1409 		    VALIDATE_MASK(cmd->fs.m_ext.vlan_tci)) {
1410 			netdev_err(dev, "rxnfc: Unsupported mask\n");
1411 			return -EINVAL;
1412 		}
1413 		if (cmd->fs.m_ext.data[0] || cmd->fs.m_ext.data[1]) {
1414 			netdev_err(dev, "rxnfc: user-def not supported\n");
1415 			return -EINVAL;
1416 		}
1417 	}
1418 
1419 	if ((cmd->fs.flow_type & FLOW_MAC_EXT)) {
1420 		/* don't allow mask which isn't valid */
1421 		if (VALIDATE_MASK(cmd->fs.m_ext.h_dest)) {
1422 			netdev_err(dev, "rxnfc: Unsupported mask\n");
1423 			return -EINVAL;
1424 		}
1425 	}
1426 
1427 	return 0;
1428 }
1429 
1430 static int bcmgenet_insert_flow(struct net_device *dev,
1431 				struct ethtool_rxnfc *cmd)
1432 {
1433 	struct bcmgenet_priv *priv = netdev_priv(dev);
1434 	struct bcmgenet_rxnfc_rule *loc_rule;
1435 	int err, i;
1436 
1437 	if (priv->hw_params->hfb_filter_size < 128) {
1438 		netdev_err(dev, "rxnfc: Not supported by this device\n");
1439 		return -EINVAL;
1440 	}
1441 
1442 	if (cmd->fs.ring_cookie > priv->hw_params->rx_queues &&
1443 	    cmd->fs.ring_cookie != RX_CLS_FLOW_WAKE) {
1444 		netdev_err(dev, "rxnfc: Unsupported action (%llu)\n",
1445 			   cmd->fs.ring_cookie);
1446 		return -EINVAL;
1447 	}
1448 
1449 	err = bcmgenet_validate_flow(dev, cmd);
1450 	if (err)
1451 		return err;
1452 
1453 	if (cmd->fs.location == RX_CLS_LOC_ANY) {
1454 		list_for_each_entry(loc_rule, &priv->rxnfc_list, list) {
1455 			cmd->fs.location = loc_rule->fs.location;
1456 			err = memcmp(&loc_rule->fs, &cmd->fs,
1457 				     sizeof(struct ethtool_rx_flow_spec));
1458 			if (!err)
1459 				/* rule exists so return current location */
1460 				return 0;
1461 		}
1462 		for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) {
1463 			loc_rule = &priv->rxnfc_rules[i];
1464 			if (loc_rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
1465 				cmd->fs.location = i;
1466 				break;
1467 			}
1468 		}
1469 		if (i == MAX_NUM_OF_FS_RULES) {
1470 			cmd->fs.location = RX_CLS_LOC_ANY;
1471 			return -ENOSPC;
1472 		}
1473 	} else {
1474 		loc_rule = &priv->rxnfc_rules[cmd->fs.location];
1475 	}
1476 	if (loc_rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1477 		bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
1478 	if (loc_rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1479 		list_del(&loc_rule->list);
1480 		bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
1481 	}
1482 	loc_rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1483 	memcpy(&loc_rule->fs, &cmd->fs,
1484 	       sizeof(struct ethtool_rx_flow_spec));
1485 
1486 	bcmgenet_hfb_create_rxnfc_filter(priv, loc_rule);
1487 
1488 	list_add_tail(&loc_rule->list, &priv->rxnfc_list);
1489 
1490 	return 0;
1491 }
1492 
1493 static int bcmgenet_delete_flow(struct net_device *dev,
1494 				struct ethtool_rxnfc *cmd)
1495 {
1496 	struct bcmgenet_priv *priv = netdev_priv(dev);
1497 	struct bcmgenet_rxnfc_rule *rule;
1498 	int err = 0;
1499 
1500 	if (cmd->fs.location >= MAX_NUM_OF_FS_RULES)
1501 		return -EINVAL;
1502 
1503 	rule = &priv->rxnfc_rules[cmd->fs.location];
1504 	if (rule->state == BCMGENET_RXNFC_STATE_UNUSED) {
1505 		err =  -ENOENT;
1506 		goto out;
1507 	}
1508 
1509 	if (rule->state == BCMGENET_RXNFC_STATE_ENABLED)
1510 		bcmgenet_hfb_disable_filter(priv, cmd->fs.location);
1511 	if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) {
1512 		list_del(&rule->list);
1513 		bcmgenet_hfb_clear_filter(priv, cmd->fs.location);
1514 	}
1515 	rule->state = BCMGENET_RXNFC_STATE_UNUSED;
1516 	memset(&rule->fs, 0, sizeof(struct ethtool_rx_flow_spec));
1517 
1518 out:
1519 	return err;
1520 }
1521 
1522 static int bcmgenet_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
1523 {
1524 	struct bcmgenet_priv *priv = netdev_priv(dev);
1525 	int err = 0;
1526 
1527 	switch (cmd->cmd) {
1528 	case ETHTOOL_SRXCLSRLINS:
1529 		err = bcmgenet_insert_flow(dev, cmd);
1530 		break;
1531 	case ETHTOOL_SRXCLSRLDEL:
1532 		err = bcmgenet_delete_flow(dev, cmd);
1533 		break;
1534 	default:
1535 		netdev_warn(priv->dev, "Unsupported ethtool command. (%d)\n",
1536 			    cmd->cmd);
1537 		return -EINVAL;
1538 	}
1539 
1540 	return err;
1541 }
1542 
1543 static int bcmgenet_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd,
1544 			     int loc)
1545 {
1546 	struct bcmgenet_priv *priv = netdev_priv(dev);
1547 	struct bcmgenet_rxnfc_rule *rule;
1548 	int err = 0;
1549 
1550 	if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES)
1551 		return -EINVAL;
1552 
1553 	rule = &priv->rxnfc_rules[loc];
1554 	if (rule->state == BCMGENET_RXNFC_STATE_UNUSED)
1555 		err = -ENOENT;
1556 	else
1557 		memcpy(&cmd->fs, &rule->fs,
1558 		       sizeof(struct ethtool_rx_flow_spec));
1559 
1560 	return err;
1561 }
1562 
1563 static int bcmgenet_get_num_flows(struct bcmgenet_priv *priv)
1564 {
1565 	struct list_head *pos;
1566 	int res = 0;
1567 
1568 	list_for_each(pos, &priv->rxnfc_list)
1569 		res++;
1570 
1571 	return res;
1572 }
1573 
1574 static int bcmgenet_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
1575 			      u32 *rule_locs)
1576 {
1577 	struct bcmgenet_priv *priv = netdev_priv(dev);
1578 	struct bcmgenet_rxnfc_rule *rule;
1579 	int err = 0;
1580 	int i = 0;
1581 
1582 	switch (cmd->cmd) {
1583 	case ETHTOOL_GRXRINGS:
1584 		cmd->data = priv->hw_params->rx_queues ?: 1;
1585 		break;
1586 	case ETHTOOL_GRXCLSRLCNT:
1587 		cmd->rule_cnt = bcmgenet_get_num_flows(priv);
1588 		cmd->data = MAX_NUM_OF_FS_RULES | RX_CLS_LOC_SPECIAL;
1589 		break;
1590 	case ETHTOOL_GRXCLSRULE:
1591 		err = bcmgenet_get_flow(dev, cmd, cmd->fs.location);
1592 		break;
1593 	case ETHTOOL_GRXCLSRLALL:
1594 		list_for_each_entry(rule, &priv->rxnfc_list, list)
1595 			if (i < cmd->rule_cnt)
1596 				rule_locs[i++] = rule->fs.location;
1597 		cmd->rule_cnt = i;
1598 		cmd->data = MAX_NUM_OF_FS_RULES;
1599 		break;
1600 	default:
1601 		err = -EOPNOTSUPP;
1602 		break;
1603 	}
1604 
1605 	return err;
1606 }
1607 
1608 /* standard ethtool support functions. */
1609 static const struct ethtool_ops bcmgenet_ethtool_ops = {
1610 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1611 				     ETHTOOL_COALESCE_MAX_FRAMES |
1612 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
1613 	.begin			= bcmgenet_begin,
1614 	.complete		= bcmgenet_complete,
1615 	.get_strings		= bcmgenet_get_strings,
1616 	.get_sset_count		= bcmgenet_get_sset_count,
1617 	.get_ethtool_stats	= bcmgenet_get_ethtool_stats,
1618 	.get_drvinfo		= bcmgenet_get_drvinfo,
1619 	.get_link		= ethtool_op_get_link,
1620 	.get_msglevel		= bcmgenet_get_msglevel,
1621 	.set_msglevel		= bcmgenet_set_msglevel,
1622 	.get_wol		= bcmgenet_get_wol,
1623 	.set_wol		= bcmgenet_set_wol,
1624 	.get_eee		= bcmgenet_get_eee,
1625 	.set_eee		= bcmgenet_set_eee,
1626 	.nway_reset		= phy_ethtool_nway_reset,
1627 	.get_coalesce		= bcmgenet_get_coalesce,
1628 	.set_coalesce		= bcmgenet_set_coalesce,
1629 	.get_link_ksettings	= bcmgenet_get_link_ksettings,
1630 	.set_link_ksettings	= bcmgenet_set_link_ksettings,
1631 	.get_ts_info		= ethtool_op_get_ts_info,
1632 	.get_rxnfc		= bcmgenet_get_rxnfc,
1633 	.set_rxnfc		= bcmgenet_set_rxnfc,
1634 	.get_pauseparam		= bcmgenet_get_pauseparam,
1635 	.set_pauseparam		= bcmgenet_set_pauseparam,
1636 };
1637 
1638 /* Power down the unimac, based on mode. */
1639 static int bcmgenet_power_down(struct bcmgenet_priv *priv,
1640 				enum bcmgenet_power_mode mode)
1641 {
1642 	int ret = 0;
1643 	u32 reg;
1644 
1645 	switch (mode) {
1646 	case GENET_POWER_CABLE_SENSE:
1647 		phy_detach(priv->dev->phydev);
1648 		break;
1649 
1650 	case GENET_POWER_WOL_MAGIC:
1651 		ret = bcmgenet_wol_power_down_cfg(priv, mode);
1652 		break;
1653 
1654 	case GENET_POWER_PASSIVE:
1655 		/* Power down LED */
1656 		if (priv->hw_params->flags & GENET_HAS_EXT) {
1657 			reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1658 			if (GENET_IS_V5(priv) && !priv->ephy_16nm)
1659 				reg |= EXT_PWR_DOWN_PHY_EN |
1660 				       EXT_PWR_DOWN_PHY_RD |
1661 				       EXT_PWR_DOWN_PHY_SD |
1662 				       EXT_PWR_DOWN_PHY_RX |
1663 				       EXT_PWR_DOWN_PHY_TX |
1664 				       EXT_IDDQ_GLBL_PWR;
1665 			else
1666 				reg |= EXT_PWR_DOWN_PHY;
1667 
1668 			reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1669 			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1670 
1671 			bcmgenet_phy_power_set(priv->dev, false);
1672 		}
1673 		break;
1674 	default:
1675 		break;
1676 	}
1677 
1678 	return ret;
1679 }
1680 
1681 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
1682 			      enum bcmgenet_power_mode mode)
1683 {
1684 	u32 reg;
1685 
1686 	if (!(priv->hw_params->flags & GENET_HAS_EXT))
1687 		return;
1688 
1689 	reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1690 
1691 	switch (mode) {
1692 	case GENET_POWER_PASSIVE:
1693 		reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
1694 			 EXT_ENERGY_DET_MASK);
1695 		if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
1696 			reg &= ~(EXT_PWR_DOWN_PHY_EN |
1697 				 EXT_PWR_DOWN_PHY_RD |
1698 				 EXT_PWR_DOWN_PHY_SD |
1699 				 EXT_PWR_DOWN_PHY_RX |
1700 				 EXT_PWR_DOWN_PHY_TX |
1701 				 EXT_IDDQ_GLBL_PWR);
1702 			reg |=   EXT_PHY_RESET;
1703 			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1704 			mdelay(1);
1705 
1706 			reg &=  ~EXT_PHY_RESET;
1707 		} else {
1708 			reg &= ~EXT_PWR_DOWN_PHY;
1709 			reg |= EXT_PWR_DN_EN_LD;
1710 		}
1711 		bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1712 		bcmgenet_phy_power_set(priv->dev, true);
1713 		break;
1714 
1715 	case GENET_POWER_CABLE_SENSE:
1716 		/* enable APD */
1717 		if (!GENET_IS_V5(priv)) {
1718 			reg |= EXT_PWR_DN_EN_LD;
1719 			bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1720 		}
1721 		break;
1722 	case GENET_POWER_WOL_MAGIC:
1723 		bcmgenet_wol_power_up_cfg(priv, mode);
1724 		return;
1725 	default:
1726 		break;
1727 	}
1728 }
1729 
1730 static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1731 					 struct bcmgenet_tx_ring *ring)
1732 {
1733 	struct enet_cb *tx_cb_ptr;
1734 
1735 	tx_cb_ptr = ring->cbs;
1736 	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1737 
1738 	/* Advancing local write pointer */
1739 	if (ring->write_ptr == ring->end_ptr)
1740 		ring->write_ptr = ring->cb_ptr;
1741 	else
1742 		ring->write_ptr++;
1743 
1744 	return tx_cb_ptr;
1745 }
1746 
1747 static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
1748 					 struct bcmgenet_tx_ring *ring)
1749 {
1750 	struct enet_cb *tx_cb_ptr;
1751 
1752 	tx_cb_ptr = ring->cbs;
1753 	tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1754 
1755 	/* Rewinding local write pointer */
1756 	if (ring->write_ptr == ring->cb_ptr)
1757 		ring->write_ptr = ring->end_ptr;
1758 	else
1759 		ring->write_ptr--;
1760 
1761 	return tx_cb_ptr;
1762 }
1763 
1764 static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1765 {
1766 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1767 				 INTRL2_CPU_MASK_SET);
1768 }
1769 
1770 static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1771 {
1772 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
1773 				 INTRL2_CPU_MASK_CLEAR);
1774 }
1775 
1776 static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1777 {
1778 	bcmgenet_intrl2_1_writel(ring->priv,
1779 				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1780 				 INTRL2_CPU_MASK_SET);
1781 }
1782 
1783 static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1784 {
1785 	bcmgenet_intrl2_1_writel(ring->priv,
1786 				 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1787 				 INTRL2_CPU_MASK_CLEAR);
1788 }
1789 
1790 static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
1791 {
1792 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1793 				 INTRL2_CPU_MASK_SET);
1794 }
1795 
1796 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
1797 {
1798 	bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
1799 				 INTRL2_CPU_MASK_CLEAR);
1800 }
1801 
1802 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
1803 {
1804 	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1805 				 INTRL2_CPU_MASK_CLEAR);
1806 }
1807 
1808 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
1809 {
1810 	bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
1811 				 INTRL2_CPU_MASK_SET);
1812 }
1813 
1814 /* Simple helper to free a transmit control block's resources
1815  * Returns an skb when the last transmit control block associated with the
1816  * skb is freed.  The skb should be freed by the caller if necessary.
1817  */
1818 static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
1819 					   struct enet_cb *cb)
1820 {
1821 	struct sk_buff *skb;
1822 
1823 	skb = cb->skb;
1824 
1825 	if (skb) {
1826 		cb->skb = NULL;
1827 		if (cb == GENET_CB(skb)->first_cb)
1828 			dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1829 					 dma_unmap_len(cb, dma_len),
1830 					 DMA_TO_DEVICE);
1831 		else
1832 			dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
1833 				       dma_unmap_len(cb, dma_len),
1834 				       DMA_TO_DEVICE);
1835 		dma_unmap_addr_set(cb, dma_addr, 0);
1836 
1837 		if (cb == GENET_CB(skb)->last_cb)
1838 			return skb;
1839 
1840 	} else if (dma_unmap_addr(cb, dma_addr)) {
1841 		dma_unmap_page(dev,
1842 			       dma_unmap_addr(cb, dma_addr),
1843 			       dma_unmap_len(cb, dma_len),
1844 			       DMA_TO_DEVICE);
1845 		dma_unmap_addr_set(cb, dma_addr, 0);
1846 	}
1847 
1848 	return NULL;
1849 }
1850 
1851 /* Simple helper to free a receive control block's resources */
1852 static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
1853 					   struct enet_cb *cb)
1854 {
1855 	struct sk_buff *skb;
1856 
1857 	skb = cb->skb;
1858 	cb->skb = NULL;
1859 
1860 	if (dma_unmap_addr(cb, dma_addr)) {
1861 		dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1862 				 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
1863 		dma_unmap_addr_set(cb, dma_addr, 0);
1864 	}
1865 
1866 	return skb;
1867 }
1868 
1869 /* Unlocked version of the reclaim routine */
1870 static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1871 					  struct bcmgenet_tx_ring *ring)
1872 {
1873 	struct bcmgenet_priv *priv = netdev_priv(dev);
1874 	unsigned int txbds_processed = 0;
1875 	unsigned int bytes_compl = 0;
1876 	unsigned int pkts_compl = 0;
1877 	unsigned int txbds_ready;
1878 	unsigned int c_index;
1879 	struct sk_buff *skb;
1880 
1881 	/* Clear status before servicing to reduce spurious interrupts */
1882 	if (ring->index == DESC_INDEX)
1883 		bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
1884 					 INTRL2_CPU_CLEAR);
1885 	else
1886 		bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
1887 					 INTRL2_CPU_CLEAR);
1888 
1889 	/* Compute how many buffers are transmitted since last xmit call */
1890 	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
1891 		& DMA_C_INDEX_MASK;
1892 	txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
1893 
1894 	netif_dbg(priv, tx_done, dev,
1895 		  "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1896 		  __func__, ring->index, ring->c_index, c_index, txbds_ready);
1897 
1898 	/* Reclaim transmitted buffers */
1899 	while (txbds_processed < txbds_ready) {
1900 		skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
1901 					  &priv->tx_cbs[ring->clean_ptr]);
1902 		if (skb) {
1903 			pkts_compl++;
1904 			bytes_compl += GENET_CB(skb)->bytes_sent;
1905 			dev_consume_skb_any(skb);
1906 		}
1907 
1908 		txbds_processed++;
1909 		if (likely(ring->clean_ptr < ring->end_ptr))
1910 			ring->clean_ptr++;
1911 		else
1912 			ring->clean_ptr = ring->cb_ptr;
1913 	}
1914 
1915 	ring->free_bds += txbds_processed;
1916 	ring->c_index = c_index;
1917 
1918 	ring->packets += pkts_compl;
1919 	ring->bytes += bytes_compl;
1920 
1921 	netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1922 				  pkts_compl, bytes_compl);
1923 
1924 	return txbds_processed;
1925 }
1926 
1927 static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
1928 				struct bcmgenet_tx_ring *ring)
1929 {
1930 	unsigned int released;
1931 
1932 	spin_lock_bh(&ring->lock);
1933 	released = __bcmgenet_tx_reclaim(dev, ring);
1934 	spin_unlock_bh(&ring->lock);
1935 
1936 	return released;
1937 }
1938 
1939 static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1940 {
1941 	struct bcmgenet_tx_ring *ring =
1942 		container_of(napi, struct bcmgenet_tx_ring, napi);
1943 	unsigned int work_done = 0;
1944 	struct netdev_queue *txq;
1945 
1946 	spin_lock(&ring->lock);
1947 	work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
1948 	if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1949 		txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
1950 		netif_tx_wake_queue(txq);
1951 	}
1952 	spin_unlock(&ring->lock);
1953 
1954 	if (work_done == 0) {
1955 		napi_complete(napi);
1956 		ring->int_enable(ring);
1957 
1958 		return 0;
1959 	}
1960 
1961 	return budget;
1962 }
1963 
1964 static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1965 {
1966 	struct bcmgenet_priv *priv = netdev_priv(dev);
1967 	int i;
1968 
1969 	if (netif_is_multiqueue(dev)) {
1970 		for (i = 0; i < priv->hw_params->tx_queues; i++)
1971 			bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1972 	}
1973 
1974 	bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1975 }
1976 
1977 /* Reallocate the SKB to put enough headroom in front of it and insert
1978  * the transmit checksum offsets in the descriptors
1979  */
1980 static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
1981 					struct sk_buff *skb)
1982 {
1983 	struct bcmgenet_priv *priv = netdev_priv(dev);
1984 	struct status_64 *status = NULL;
1985 	struct sk_buff *new_skb;
1986 	u16 offset;
1987 	u8 ip_proto;
1988 	__be16 ip_ver;
1989 	u32 tx_csum_info;
1990 
1991 	if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1992 		/* If 64 byte status block enabled, must make sure skb has
1993 		 * enough headroom for us to insert 64B status block.
1994 		 */
1995 		new_skb = skb_realloc_headroom(skb, sizeof(*status));
1996 		if (!new_skb) {
1997 			dev_kfree_skb_any(skb);
1998 			priv->mib.tx_realloc_tsb_failed++;
1999 			dev->stats.tx_dropped++;
2000 			return NULL;
2001 		}
2002 		dev_consume_skb_any(skb);
2003 		skb = new_skb;
2004 		priv->mib.tx_realloc_tsb++;
2005 	}
2006 
2007 	skb_push(skb, sizeof(*status));
2008 	status = (struct status_64 *)skb->data;
2009 
2010 	if (skb->ip_summed  == CHECKSUM_PARTIAL) {
2011 		ip_ver = skb->protocol;
2012 		switch (ip_ver) {
2013 		case htons(ETH_P_IP):
2014 			ip_proto = ip_hdr(skb)->protocol;
2015 			break;
2016 		case htons(ETH_P_IPV6):
2017 			ip_proto = ipv6_hdr(skb)->nexthdr;
2018 			break;
2019 		default:
2020 			/* don't use UDP flag */
2021 			ip_proto = 0;
2022 			break;
2023 		}
2024 
2025 		offset = skb_checksum_start_offset(skb) - sizeof(*status);
2026 		tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
2027 				(offset + skb->csum_offset) |
2028 				STATUS_TX_CSUM_LV;
2029 
2030 		/* Set the special UDP flag for UDP */
2031 		if (ip_proto == IPPROTO_UDP)
2032 			tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
2033 
2034 		status->tx_csum_info = tx_csum_info;
2035 	}
2036 
2037 	return skb;
2038 }
2039 
2040 static void bcmgenet_hide_tsb(struct sk_buff *skb)
2041 {
2042 	__skb_pull(skb, sizeof(struct status_64));
2043 }
2044 
2045 static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
2046 {
2047 	struct bcmgenet_priv *priv = netdev_priv(dev);
2048 	struct device *kdev = &priv->pdev->dev;
2049 	struct bcmgenet_tx_ring *ring = NULL;
2050 	struct enet_cb *tx_cb_ptr;
2051 	struct netdev_queue *txq;
2052 	int nr_frags, index;
2053 	dma_addr_t mapping;
2054 	unsigned int size;
2055 	skb_frag_t *frag;
2056 	u32 len_stat;
2057 	int ret;
2058 	int i;
2059 
2060 	index = skb_get_queue_mapping(skb);
2061 	/* Mapping strategy:
2062 	 * queue_mapping = 0, unclassified, packet xmited through ring16
2063 	 * queue_mapping = 1, goes to ring 0. (highest priority queue
2064 	 * queue_mapping = 2, goes to ring 1.
2065 	 * queue_mapping = 3, goes to ring 2.
2066 	 * queue_mapping = 4, goes to ring 3.
2067 	 */
2068 	if (index == 0)
2069 		index = DESC_INDEX;
2070 	else
2071 		index -= 1;
2072 
2073 	ring = &priv->tx_rings[index];
2074 	txq = netdev_get_tx_queue(dev, ring->queue);
2075 
2076 	nr_frags = skb_shinfo(skb)->nr_frags;
2077 
2078 	spin_lock(&ring->lock);
2079 	if (ring->free_bds <= (nr_frags + 1)) {
2080 		if (!netif_tx_queue_stopped(txq))
2081 			netif_tx_stop_queue(txq);
2082 		ret = NETDEV_TX_BUSY;
2083 		goto out;
2084 	}
2085 
2086 	/* Retain how many bytes will be sent on the wire, without TSB inserted
2087 	 * by transmit checksum offload
2088 	 */
2089 	GENET_CB(skb)->bytes_sent = skb->len;
2090 
2091 	/* add the Transmit Status Block */
2092 	skb = bcmgenet_add_tsb(dev, skb);
2093 	if (!skb) {
2094 		ret = NETDEV_TX_OK;
2095 		goto out;
2096 	}
2097 
2098 	for (i = 0; i <= nr_frags; i++) {
2099 		tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
2100 
2101 		BUG_ON(!tx_cb_ptr);
2102 
2103 		if (!i) {
2104 			/* Transmit single SKB or head of fragment list */
2105 			GENET_CB(skb)->first_cb = tx_cb_ptr;
2106 			size = skb_headlen(skb);
2107 			mapping = dma_map_single(kdev, skb->data, size,
2108 						 DMA_TO_DEVICE);
2109 		} else {
2110 			/* xmit fragment */
2111 			frag = &skb_shinfo(skb)->frags[i - 1];
2112 			size = skb_frag_size(frag);
2113 			mapping = skb_frag_dma_map(kdev, frag, 0, size,
2114 						   DMA_TO_DEVICE);
2115 		}
2116 
2117 		ret = dma_mapping_error(kdev, mapping);
2118 		if (ret) {
2119 			priv->mib.tx_dma_failed++;
2120 			netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
2121 			ret = NETDEV_TX_OK;
2122 			goto out_unmap_frags;
2123 		}
2124 		dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
2125 		dma_unmap_len_set(tx_cb_ptr, dma_len, size);
2126 
2127 		tx_cb_ptr->skb = skb;
2128 
2129 		len_stat = (size << DMA_BUFLENGTH_SHIFT) |
2130 			   (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
2131 
2132 		/* Note: if we ever change from DMA_TX_APPEND_CRC below we
2133 		 * will need to restore software padding of "runt" packets
2134 		 */
2135 		len_stat |= DMA_TX_APPEND_CRC;
2136 
2137 		if (!i) {
2138 			len_stat |= DMA_SOP;
2139 			if (skb->ip_summed == CHECKSUM_PARTIAL)
2140 				len_stat |= DMA_TX_DO_CSUM;
2141 		}
2142 		if (i == nr_frags)
2143 			len_stat |= DMA_EOP;
2144 
2145 		dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
2146 	}
2147 
2148 	GENET_CB(skb)->last_cb = tx_cb_ptr;
2149 
2150 	bcmgenet_hide_tsb(skb);
2151 	skb_tx_timestamp(skb);
2152 
2153 	/* Decrement total BD count and advance our write pointer */
2154 	ring->free_bds -= nr_frags + 1;
2155 	ring->prod_index += nr_frags + 1;
2156 	ring->prod_index &= DMA_P_INDEX_MASK;
2157 
2158 	netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
2159 
2160 	if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
2161 		netif_tx_stop_queue(txq);
2162 
2163 	if (!netdev_xmit_more() || netif_xmit_stopped(txq))
2164 		/* Packets are ready, update producer index */
2165 		bcmgenet_tdma_ring_writel(priv, ring->index,
2166 					  ring->prod_index, TDMA_PROD_INDEX);
2167 out:
2168 	spin_unlock(&ring->lock);
2169 
2170 	return ret;
2171 
2172 out_unmap_frags:
2173 	/* Back up for failed control block mapping */
2174 	bcmgenet_put_txcb(priv, ring);
2175 
2176 	/* Unmap successfully mapped control blocks */
2177 	while (i-- > 0) {
2178 		tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
2179 		bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
2180 	}
2181 
2182 	dev_kfree_skb(skb);
2183 	goto out;
2184 }
2185 
2186 static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
2187 					  struct enet_cb *cb)
2188 {
2189 	struct device *kdev = &priv->pdev->dev;
2190 	struct sk_buff *skb;
2191 	struct sk_buff *rx_skb;
2192 	dma_addr_t mapping;
2193 
2194 	/* Allocate a new Rx skb */
2195 	skb = __netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT,
2196 				 GFP_ATOMIC | __GFP_NOWARN);
2197 	if (!skb) {
2198 		priv->mib.alloc_rx_buff_failed++;
2199 		netif_err(priv, rx_err, priv->dev,
2200 			  "%s: Rx skb allocation failed\n", __func__);
2201 		return NULL;
2202 	}
2203 
2204 	/* DMA-map the new Rx skb */
2205 	mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
2206 				 DMA_FROM_DEVICE);
2207 	if (dma_mapping_error(kdev, mapping)) {
2208 		priv->mib.rx_dma_failed++;
2209 		dev_kfree_skb_any(skb);
2210 		netif_err(priv, rx_err, priv->dev,
2211 			  "%s: Rx skb DMA mapping failed\n", __func__);
2212 		return NULL;
2213 	}
2214 
2215 	/* Grab the current Rx skb from the ring and DMA-unmap it */
2216 	rx_skb = bcmgenet_free_rx_cb(kdev, cb);
2217 
2218 	/* Put the new Rx skb on the ring */
2219 	cb->skb = skb;
2220 	dma_unmap_addr_set(cb, dma_addr, mapping);
2221 	dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
2222 	dmadesc_set_addr(priv, cb->bd_addr, mapping);
2223 
2224 	/* Return the current Rx skb to caller */
2225 	return rx_skb;
2226 }
2227 
2228 /* bcmgenet_desc_rx - descriptor based rx process.
2229  * this could be called from bottom half, or from NAPI polling method.
2230  */
2231 static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
2232 				     unsigned int budget)
2233 {
2234 	struct bcmgenet_priv *priv = ring->priv;
2235 	struct net_device *dev = priv->dev;
2236 	struct enet_cb *cb;
2237 	struct sk_buff *skb;
2238 	u32 dma_length_status;
2239 	unsigned long dma_flag;
2240 	int len;
2241 	unsigned int rxpktprocessed = 0, rxpkttoprocess;
2242 	unsigned int bytes_processed = 0;
2243 	unsigned int p_index, mask;
2244 	unsigned int discards;
2245 
2246 	/* Clear status before servicing to reduce spurious interrupts */
2247 	if (ring->index == DESC_INDEX) {
2248 		bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
2249 					 INTRL2_CPU_CLEAR);
2250 	} else {
2251 		mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
2252 		bcmgenet_intrl2_1_writel(priv,
2253 					 mask,
2254 					 INTRL2_CPU_CLEAR);
2255 	}
2256 
2257 	p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
2258 
2259 	discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
2260 		   DMA_P_INDEX_DISCARD_CNT_MASK;
2261 	if (discards > ring->old_discards) {
2262 		discards = discards - ring->old_discards;
2263 		ring->errors += discards;
2264 		ring->old_discards += discards;
2265 
2266 		/* Clear HW register when we reach 75% of maximum 0xFFFF */
2267 		if (ring->old_discards >= 0xC000) {
2268 			ring->old_discards = 0;
2269 			bcmgenet_rdma_ring_writel(priv, ring->index, 0,
2270 						  RDMA_PROD_INDEX);
2271 		}
2272 	}
2273 
2274 	p_index &= DMA_P_INDEX_MASK;
2275 	rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
2276 
2277 	netif_dbg(priv, rx_status, dev,
2278 		  "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
2279 
2280 	while ((rxpktprocessed < rxpkttoprocess) &&
2281 	       (rxpktprocessed < budget)) {
2282 		struct status_64 *status;
2283 		__be16 rx_csum;
2284 
2285 		cb = &priv->rx_cbs[ring->read_ptr];
2286 		skb = bcmgenet_rx_refill(priv, cb);
2287 
2288 		if (unlikely(!skb)) {
2289 			ring->dropped++;
2290 			goto next;
2291 		}
2292 
2293 		status = (struct status_64 *)skb->data;
2294 		dma_length_status = status->length_status;
2295 		if (dev->features & NETIF_F_RXCSUM) {
2296 			rx_csum = (__force __be16)(status->rx_csum & 0xffff);
2297 			if (rx_csum) {
2298 				skb->csum = (__force __wsum)ntohs(rx_csum);
2299 				skb->ip_summed = CHECKSUM_COMPLETE;
2300 			}
2301 		}
2302 
2303 		/* DMA flags and length are still valid no matter how
2304 		 * we got the Receive Status Vector (64B RSB or register)
2305 		 */
2306 		dma_flag = dma_length_status & 0xffff;
2307 		len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
2308 
2309 		netif_dbg(priv, rx_status, dev,
2310 			  "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
2311 			  __func__, p_index, ring->c_index,
2312 			  ring->read_ptr, dma_length_status);
2313 
2314 		if (unlikely(len > RX_BUF_LENGTH)) {
2315 			netif_err(priv, rx_status, dev, "oversized packet\n");
2316 			dev->stats.rx_length_errors++;
2317 			dev->stats.rx_errors++;
2318 			dev_kfree_skb_any(skb);
2319 			goto next;
2320 		}
2321 
2322 		if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
2323 			netif_err(priv, rx_status, dev,
2324 				  "dropping fragmented packet!\n");
2325 			ring->errors++;
2326 			dev_kfree_skb_any(skb);
2327 			goto next;
2328 		}
2329 
2330 		/* report errors */
2331 		if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
2332 						DMA_RX_OV |
2333 						DMA_RX_NO |
2334 						DMA_RX_LG |
2335 						DMA_RX_RXER))) {
2336 			netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
2337 				  (unsigned int)dma_flag);
2338 			if (dma_flag & DMA_RX_CRC_ERROR)
2339 				dev->stats.rx_crc_errors++;
2340 			if (dma_flag & DMA_RX_OV)
2341 				dev->stats.rx_over_errors++;
2342 			if (dma_flag & DMA_RX_NO)
2343 				dev->stats.rx_frame_errors++;
2344 			if (dma_flag & DMA_RX_LG)
2345 				dev->stats.rx_length_errors++;
2346 			dev->stats.rx_errors++;
2347 			dev_kfree_skb_any(skb);
2348 			goto next;
2349 		} /* error packet */
2350 
2351 		skb_put(skb, len);
2352 
2353 		/* remove RSB and hardware 2bytes added for IP alignment */
2354 		skb_pull(skb, 66);
2355 		len -= 66;
2356 
2357 		if (priv->crc_fwd_en) {
2358 			skb_trim(skb, len - ETH_FCS_LEN);
2359 			len -= ETH_FCS_LEN;
2360 		}
2361 
2362 		bytes_processed += len;
2363 
2364 		/*Finish setting up the received SKB and send it to the kernel*/
2365 		skb->protocol = eth_type_trans(skb, priv->dev);
2366 		ring->packets++;
2367 		ring->bytes += len;
2368 		if (dma_flag & DMA_RX_MULT)
2369 			dev->stats.multicast++;
2370 
2371 		/* Notify kernel */
2372 		napi_gro_receive(&ring->napi, skb);
2373 		netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
2374 
2375 next:
2376 		rxpktprocessed++;
2377 		if (likely(ring->read_ptr < ring->end_ptr))
2378 			ring->read_ptr++;
2379 		else
2380 			ring->read_ptr = ring->cb_ptr;
2381 
2382 		ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
2383 		bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
2384 	}
2385 
2386 	ring->dim.bytes = bytes_processed;
2387 	ring->dim.packets = rxpktprocessed;
2388 
2389 	return rxpktprocessed;
2390 }
2391 
2392 /* Rx NAPI polling method */
2393 static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
2394 {
2395 	struct bcmgenet_rx_ring *ring = container_of(napi,
2396 			struct bcmgenet_rx_ring, napi);
2397 	struct dim_sample dim_sample = {};
2398 	unsigned int work_done;
2399 
2400 	work_done = bcmgenet_desc_rx(ring, budget);
2401 
2402 	if (work_done < budget) {
2403 		napi_complete_done(napi, work_done);
2404 		ring->int_enable(ring);
2405 	}
2406 
2407 	if (ring->dim.use_dim) {
2408 		dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
2409 				  ring->dim.bytes, &dim_sample);
2410 		net_dim(&ring->dim.dim, dim_sample);
2411 	}
2412 
2413 	return work_done;
2414 }
2415 
2416 static void bcmgenet_dim_work(struct work_struct *work)
2417 {
2418 	struct dim *dim = container_of(work, struct dim, work);
2419 	struct bcmgenet_net_dim *ndim =
2420 			container_of(dim, struct bcmgenet_net_dim, dim);
2421 	struct bcmgenet_rx_ring *ring =
2422 			container_of(ndim, struct bcmgenet_rx_ring, dim);
2423 	struct dim_cq_moder cur_profile =
2424 			net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
2425 
2426 	bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
2427 	dim->state = DIM_START_MEASURE;
2428 }
2429 
2430 /* Assign skb to RX DMA descriptor. */
2431 static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
2432 				     struct bcmgenet_rx_ring *ring)
2433 {
2434 	struct enet_cb *cb;
2435 	struct sk_buff *skb;
2436 	int i;
2437 
2438 	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
2439 
2440 	/* loop here for each buffer needing assign */
2441 	for (i = 0; i < ring->size; i++) {
2442 		cb = ring->cbs + i;
2443 		skb = bcmgenet_rx_refill(priv, cb);
2444 		if (skb)
2445 			dev_consume_skb_any(skb);
2446 		if (!cb->skb)
2447 			return -ENOMEM;
2448 	}
2449 
2450 	return 0;
2451 }
2452 
2453 static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
2454 {
2455 	struct sk_buff *skb;
2456 	struct enet_cb *cb;
2457 	int i;
2458 
2459 	for (i = 0; i < priv->num_rx_bds; i++) {
2460 		cb = &priv->rx_cbs[i];
2461 
2462 		skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
2463 		if (skb)
2464 			dev_consume_skb_any(skb);
2465 	}
2466 }
2467 
2468 static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
2469 {
2470 	u32 reg;
2471 
2472 	spin_lock_bh(&priv->reg_lock);
2473 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2474 	if (reg & CMD_SW_RESET) {
2475 		spin_unlock_bh(&priv->reg_lock);
2476 		return;
2477 	}
2478 	if (enable)
2479 		reg |= mask;
2480 	else
2481 		reg &= ~mask;
2482 	bcmgenet_umac_writel(priv, reg, UMAC_CMD);
2483 	spin_unlock_bh(&priv->reg_lock);
2484 
2485 	/* UniMAC stops on a packet boundary, wait for a full-size packet
2486 	 * to be processed
2487 	 */
2488 	if (enable == 0)
2489 		usleep_range(1000, 2000);
2490 }
2491 
2492 static void reset_umac(struct bcmgenet_priv *priv)
2493 {
2494 	/* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
2495 	bcmgenet_rbuf_ctrl_set(priv, 0);
2496 	udelay(10);
2497 
2498 	/* issue soft reset and disable MAC while updating its registers */
2499 	spin_lock_bh(&priv->reg_lock);
2500 	bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
2501 	udelay(2);
2502 	spin_unlock_bh(&priv->reg_lock);
2503 }
2504 
2505 static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
2506 {
2507 	/* Mask all interrupts.*/
2508 	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
2509 	bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
2510 	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
2511 	bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
2512 }
2513 
2514 static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
2515 {
2516 	u32 int0_enable = 0;
2517 
2518 	/* Monitor cable plug/unplugged event for internal PHY, external PHY
2519 	 * and MoCA PHY
2520 	 */
2521 	if (priv->internal_phy) {
2522 		int0_enable |= UMAC_IRQ_LINK_EVENT;
2523 		if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
2524 			int0_enable |= UMAC_IRQ_PHY_DET_R;
2525 	} else if (priv->ext_phy) {
2526 		int0_enable |= UMAC_IRQ_LINK_EVENT;
2527 	} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2528 		if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
2529 			int0_enable |= UMAC_IRQ_LINK_EVENT;
2530 	}
2531 	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2532 }
2533 
2534 static void init_umac(struct bcmgenet_priv *priv)
2535 {
2536 	struct device *kdev = &priv->pdev->dev;
2537 	u32 reg;
2538 	u32 int0_enable = 0;
2539 
2540 	dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
2541 
2542 	reset_umac(priv);
2543 
2544 	/* clear tx/rx counter */
2545 	bcmgenet_umac_writel(priv,
2546 			     MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
2547 			     UMAC_MIB_CTRL);
2548 	bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
2549 
2550 	bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
2551 
2552 	/* init tx registers, enable TSB */
2553 	reg = bcmgenet_tbuf_ctrl_get(priv);
2554 	reg |= TBUF_64B_EN;
2555 	bcmgenet_tbuf_ctrl_set(priv, reg);
2556 
2557 	/* init rx registers, enable ip header optimization and RSB */
2558 	reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
2559 	reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
2560 	bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
2561 
2562 	/* enable rx checksumming */
2563 	reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
2564 	reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
2565 	/* If UniMAC forwards CRC, we need to skip over it to get
2566 	 * a valid CHK bit to be set in the per-packet status word
2567 	 */
2568 	if (priv->crc_fwd_en)
2569 		reg |= RBUF_SKIP_FCS;
2570 	else
2571 		reg &= ~RBUF_SKIP_FCS;
2572 	bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
2573 
2574 	if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
2575 		bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
2576 
2577 	bcmgenet_intr_disable(priv);
2578 
2579 	/* Configure backpressure vectors for MoCA */
2580 	if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
2581 		reg = bcmgenet_bp_mc_get(priv);
2582 		reg |= BIT(priv->hw_params->bp_in_en_shift);
2583 
2584 		/* bp_mask: back pressure mask */
2585 		if (netif_is_multiqueue(priv->dev))
2586 			reg |= priv->hw_params->bp_in_mask;
2587 		else
2588 			reg &= ~priv->hw_params->bp_in_mask;
2589 		bcmgenet_bp_mc_set(priv, reg);
2590 	}
2591 
2592 	/* Enable MDIO interrupts on GENET v3+ */
2593 	if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
2594 		int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
2595 
2596 	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
2597 
2598 	dev_dbg(kdev, "done init umac\n");
2599 }
2600 
2601 static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
2602 			      void (*cb)(struct work_struct *work))
2603 {
2604 	struct bcmgenet_net_dim *dim = &ring->dim;
2605 
2606 	INIT_WORK(&dim->dim.work, cb);
2607 	dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
2608 	dim->event_ctr = 0;
2609 	dim->packets = 0;
2610 	dim->bytes = 0;
2611 }
2612 
2613 static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
2614 {
2615 	struct bcmgenet_net_dim *dim = &ring->dim;
2616 	struct dim_cq_moder moder;
2617 	u32 usecs, pkts;
2618 
2619 	usecs = ring->rx_coalesce_usecs;
2620 	pkts = ring->rx_max_coalesced_frames;
2621 
2622 	/* If DIM was enabled, re-apply default parameters */
2623 	if (dim->use_dim) {
2624 		moder = net_dim_get_def_rx_moderation(dim->dim.mode);
2625 		usecs = moder.usec;
2626 		pkts = moder.pkts;
2627 	}
2628 
2629 	bcmgenet_set_rx_coalesce(ring, usecs, pkts);
2630 }
2631 
2632 /* Initialize a Tx ring along with corresponding hardware registers */
2633 static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2634 				  unsigned int index, unsigned int size,
2635 				  unsigned int start_ptr, unsigned int end_ptr)
2636 {
2637 	struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2638 	u32 words_per_bd = WORDS_PER_BD(priv);
2639 	u32 flow_period_val = 0;
2640 
2641 	spin_lock_init(&ring->lock);
2642 	ring->priv = priv;
2643 	ring->index = index;
2644 	if (index == DESC_INDEX) {
2645 		ring->queue = 0;
2646 		ring->int_enable = bcmgenet_tx_ring16_int_enable;
2647 		ring->int_disable = bcmgenet_tx_ring16_int_disable;
2648 	} else {
2649 		ring->queue = index + 1;
2650 		ring->int_enable = bcmgenet_tx_ring_int_enable;
2651 		ring->int_disable = bcmgenet_tx_ring_int_disable;
2652 	}
2653 	ring->cbs = priv->tx_cbs + start_ptr;
2654 	ring->size = size;
2655 	ring->clean_ptr = start_ptr;
2656 	ring->c_index = 0;
2657 	ring->free_bds = size;
2658 	ring->write_ptr = start_ptr;
2659 	ring->cb_ptr = start_ptr;
2660 	ring->end_ptr = end_ptr - 1;
2661 	ring->prod_index = 0;
2662 
2663 	/* Set flow period for ring != 16 */
2664 	if (index != DESC_INDEX)
2665 		flow_period_val = ENET_MAX_MTU_SIZE << 16;
2666 
2667 	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
2668 	bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
2669 	bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2670 	/* Disable rate control for now */
2671 	bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
2672 				  TDMA_FLOW_PERIOD);
2673 	bcmgenet_tdma_ring_writel(priv, index,
2674 				  ((size << DMA_RING_SIZE_SHIFT) |
2675 				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2676 
2677 	/* Set start and end address, read and write pointers */
2678 	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2679 				  DMA_START_ADDR);
2680 	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2681 				  TDMA_READ_PTR);
2682 	bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
2683 				  TDMA_WRITE_PTR);
2684 	bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2685 				  DMA_END_ADDR);
2686 
2687 	/* Initialize Tx NAPI */
2688 	netif_napi_add_tx(priv->dev, &ring->napi, bcmgenet_tx_poll);
2689 }
2690 
2691 /* Initialize a RDMA ring */
2692 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
2693 				 unsigned int index, unsigned int size,
2694 				 unsigned int start_ptr, unsigned int end_ptr)
2695 {
2696 	struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
2697 	u32 words_per_bd = WORDS_PER_BD(priv);
2698 	int ret;
2699 
2700 	ring->priv = priv;
2701 	ring->index = index;
2702 	if (index == DESC_INDEX) {
2703 		ring->int_enable = bcmgenet_rx_ring16_int_enable;
2704 		ring->int_disable = bcmgenet_rx_ring16_int_disable;
2705 	} else {
2706 		ring->int_enable = bcmgenet_rx_ring_int_enable;
2707 		ring->int_disable = bcmgenet_rx_ring_int_disable;
2708 	}
2709 	ring->cbs = priv->rx_cbs + start_ptr;
2710 	ring->size = size;
2711 	ring->c_index = 0;
2712 	ring->read_ptr = start_ptr;
2713 	ring->cb_ptr = start_ptr;
2714 	ring->end_ptr = end_ptr - 1;
2715 
2716 	ret = bcmgenet_alloc_rx_buffers(priv, ring);
2717 	if (ret)
2718 		return ret;
2719 
2720 	bcmgenet_init_dim(ring, bcmgenet_dim_work);
2721 	bcmgenet_init_rx_coalesce(ring);
2722 
2723 	/* Initialize Rx NAPI */
2724 	netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll);
2725 
2726 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2727 	bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2728 	bcmgenet_rdma_ring_writel(priv, index,
2729 				  ((size << DMA_RING_SIZE_SHIFT) |
2730 				   RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
2731 	bcmgenet_rdma_ring_writel(priv, index,
2732 				  (DMA_FC_THRESH_LO <<
2733 				   DMA_XOFF_THRESHOLD_SHIFT) |
2734 				   DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
2735 
2736 	/* Set start and end address, read and write pointers */
2737 	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2738 				  DMA_START_ADDR);
2739 	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2740 				  RDMA_READ_PTR);
2741 	bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2742 				  RDMA_WRITE_PTR);
2743 	bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
2744 				  DMA_END_ADDR);
2745 
2746 	return ret;
2747 }
2748 
2749 static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2750 {
2751 	unsigned int i;
2752 	struct bcmgenet_tx_ring *ring;
2753 
2754 	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2755 		ring = &priv->tx_rings[i];
2756 		napi_enable(&ring->napi);
2757 		ring->int_enable(ring);
2758 	}
2759 
2760 	ring = &priv->tx_rings[DESC_INDEX];
2761 	napi_enable(&ring->napi);
2762 	ring->int_enable(ring);
2763 }
2764 
2765 static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2766 {
2767 	unsigned int i;
2768 	struct bcmgenet_tx_ring *ring;
2769 
2770 	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2771 		ring = &priv->tx_rings[i];
2772 		napi_disable(&ring->napi);
2773 	}
2774 
2775 	ring = &priv->tx_rings[DESC_INDEX];
2776 	napi_disable(&ring->napi);
2777 }
2778 
2779 static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2780 {
2781 	unsigned int i;
2782 	struct bcmgenet_tx_ring *ring;
2783 
2784 	for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2785 		ring = &priv->tx_rings[i];
2786 		netif_napi_del(&ring->napi);
2787 	}
2788 
2789 	ring = &priv->tx_rings[DESC_INDEX];
2790 	netif_napi_del(&ring->napi);
2791 }
2792 
2793 /* Initialize Tx queues
2794  *
2795  * Queues 0-3 are priority-based, each one has 32 descriptors,
2796  * with queue 0 being the highest priority queue.
2797  *
2798  * Queue 16 is the default Tx queue with
2799  * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
2800  *
2801  * The transmit control block pool is then partitioned as follows:
2802  * - Tx queue 0 uses tx_cbs[0..31]
2803  * - Tx queue 1 uses tx_cbs[32..63]
2804  * - Tx queue 2 uses tx_cbs[64..95]
2805  * - Tx queue 3 uses tx_cbs[96..127]
2806  * - Tx queue 16 uses tx_cbs[128..255]
2807  */
2808 static void bcmgenet_init_tx_queues(struct net_device *dev)
2809 {
2810 	struct bcmgenet_priv *priv = netdev_priv(dev);
2811 	u32 i, dma_enable;
2812 	u32 dma_ctrl, ring_cfg;
2813 	u32 dma_priority[3] = {0, 0, 0};
2814 
2815 	dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2816 	dma_enable = dma_ctrl & DMA_EN;
2817 	dma_ctrl &= ~DMA_EN;
2818 	bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2819 
2820 	dma_ctrl = 0;
2821 	ring_cfg = 0;
2822 
2823 	/* Enable strict priority arbiter mode */
2824 	bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2825 
2826 	/* Initialize Tx priority queues */
2827 	for (i = 0; i < priv->hw_params->tx_queues; i++) {
2828 		bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2829 				      i * priv->hw_params->tx_bds_per_q,
2830 				      (i + 1) * priv->hw_params->tx_bds_per_q);
2831 		ring_cfg |= (1 << i);
2832 		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2833 		dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2834 			((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
2835 	}
2836 
2837 	/* Initialize Tx default queue 16 */
2838 	bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
2839 			      priv->hw_params->tx_queues *
2840 			      priv->hw_params->tx_bds_per_q,
2841 			      TOTAL_DESC);
2842 	ring_cfg |= (1 << DESC_INDEX);
2843 	dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2844 	dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2845 		((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2846 		 DMA_PRIO_REG_SHIFT(DESC_INDEX));
2847 
2848 	/* Set Tx queue priorities */
2849 	bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2850 	bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2851 	bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2852 
2853 	/* Enable Tx queues */
2854 	bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
2855 
2856 	/* Enable Tx DMA */
2857 	if (dma_enable)
2858 		dma_ctrl |= DMA_EN;
2859 	bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2860 }
2861 
2862 static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2863 {
2864 	unsigned int i;
2865 	struct bcmgenet_rx_ring *ring;
2866 
2867 	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2868 		ring = &priv->rx_rings[i];
2869 		napi_enable(&ring->napi);
2870 		ring->int_enable(ring);
2871 	}
2872 
2873 	ring = &priv->rx_rings[DESC_INDEX];
2874 	napi_enable(&ring->napi);
2875 	ring->int_enable(ring);
2876 }
2877 
2878 static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2879 {
2880 	unsigned int i;
2881 	struct bcmgenet_rx_ring *ring;
2882 
2883 	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2884 		ring = &priv->rx_rings[i];
2885 		napi_disable(&ring->napi);
2886 		cancel_work_sync(&ring->dim.dim.work);
2887 	}
2888 
2889 	ring = &priv->rx_rings[DESC_INDEX];
2890 	napi_disable(&ring->napi);
2891 	cancel_work_sync(&ring->dim.dim.work);
2892 }
2893 
2894 static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2895 {
2896 	unsigned int i;
2897 	struct bcmgenet_rx_ring *ring;
2898 
2899 	for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2900 		ring = &priv->rx_rings[i];
2901 		netif_napi_del(&ring->napi);
2902 	}
2903 
2904 	ring = &priv->rx_rings[DESC_INDEX];
2905 	netif_napi_del(&ring->napi);
2906 }
2907 
2908 /* Initialize Rx queues
2909  *
2910  * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2911  * used to direct traffic to these queues.
2912  *
2913  * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2914  */
2915 static int bcmgenet_init_rx_queues(struct net_device *dev)
2916 {
2917 	struct bcmgenet_priv *priv = netdev_priv(dev);
2918 	u32 i;
2919 	u32 dma_enable;
2920 	u32 dma_ctrl;
2921 	u32 ring_cfg;
2922 	int ret;
2923 
2924 	dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2925 	dma_enable = dma_ctrl & DMA_EN;
2926 	dma_ctrl &= ~DMA_EN;
2927 	bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2928 
2929 	dma_ctrl = 0;
2930 	ring_cfg = 0;
2931 
2932 	/* Initialize Rx priority queues */
2933 	for (i = 0; i < priv->hw_params->rx_queues; i++) {
2934 		ret = bcmgenet_init_rx_ring(priv, i,
2935 					    priv->hw_params->rx_bds_per_q,
2936 					    i * priv->hw_params->rx_bds_per_q,
2937 					    (i + 1) *
2938 					    priv->hw_params->rx_bds_per_q);
2939 		if (ret)
2940 			return ret;
2941 
2942 		ring_cfg |= (1 << i);
2943 		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2944 	}
2945 
2946 	/* Initialize Rx default queue 16 */
2947 	ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2948 				    priv->hw_params->rx_queues *
2949 				    priv->hw_params->rx_bds_per_q,
2950 				    TOTAL_DESC);
2951 	if (ret)
2952 		return ret;
2953 
2954 	ring_cfg |= (1 << DESC_INDEX);
2955 	dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2956 
2957 	/* Enable rings */
2958 	bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2959 
2960 	/* Configure ring as descriptor ring and re-enable DMA if enabled */
2961 	if (dma_enable)
2962 		dma_ctrl |= DMA_EN;
2963 	bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2964 
2965 	return 0;
2966 }
2967 
2968 static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2969 {
2970 	int ret = 0;
2971 	int timeout = 0;
2972 	u32 reg;
2973 	u32 dma_ctrl;
2974 	int i;
2975 
2976 	/* Disable TDMA to stop add more frames in TX DMA */
2977 	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2978 	reg &= ~DMA_EN;
2979 	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2980 
2981 	/* Check TDMA status register to confirm TDMA is disabled */
2982 	while (timeout++ < DMA_TIMEOUT_VAL) {
2983 		reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2984 		if (reg & DMA_DISABLED)
2985 			break;
2986 
2987 		udelay(1);
2988 	}
2989 
2990 	if (timeout == DMA_TIMEOUT_VAL) {
2991 		netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2992 		ret = -ETIMEDOUT;
2993 	}
2994 
2995 	/* Wait 10ms for packet drain in both tx and rx dma */
2996 	usleep_range(10000, 20000);
2997 
2998 	/* Disable RDMA */
2999 	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3000 	reg &= ~DMA_EN;
3001 	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3002 
3003 	timeout = 0;
3004 	/* Check RDMA status register to confirm RDMA is disabled */
3005 	while (timeout++ < DMA_TIMEOUT_VAL) {
3006 		reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
3007 		if (reg & DMA_DISABLED)
3008 			break;
3009 
3010 		udelay(1);
3011 	}
3012 
3013 	if (timeout == DMA_TIMEOUT_VAL) {
3014 		netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
3015 		ret = -ETIMEDOUT;
3016 	}
3017 
3018 	dma_ctrl = 0;
3019 	for (i = 0; i < priv->hw_params->rx_queues; i++)
3020 		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3021 	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3022 	reg &= ~dma_ctrl;
3023 	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3024 
3025 	dma_ctrl = 0;
3026 	for (i = 0; i < priv->hw_params->tx_queues; i++)
3027 		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3028 	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3029 	reg &= ~dma_ctrl;
3030 	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3031 
3032 	return ret;
3033 }
3034 
3035 static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
3036 {
3037 	struct netdev_queue *txq;
3038 	int i;
3039 
3040 	bcmgenet_fini_rx_napi(priv);
3041 	bcmgenet_fini_tx_napi(priv);
3042 
3043 	for (i = 0; i < priv->num_tx_bds; i++)
3044 		dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
3045 						  priv->tx_cbs + i));
3046 
3047 	for (i = 0; i < priv->hw_params->tx_queues; i++) {
3048 		txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
3049 		netdev_tx_reset_queue(txq);
3050 	}
3051 
3052 	txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
3053 	netdev_tx_reset_queue(txq);
3054 
3055 	bcmgenet_free_rx_buffers(priv);
3056 	kfree(priv->rx_cbs);
3057 	kfree(priv->tx_cbs);
3058 }
3059 
3060 /* init_edma: Initialize DMA control register */
3061 static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
3062 {
3063 	int ret;
3064 	unsigned int i;
3065 	struct enet_cb *cb;
3066 
3067 	netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
3068 
3069 	/* Initialize common Rx ring structures */
3070 	priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
3071 	priv->num_rx_bds = TOTAL_DESC;
3072 	priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
3073 			       GFP_KERNEL);
3074 	if (!priv->rx_cbs)
3075 		return -ENOMEM;
3076 
3077 	for (i = 0; i < priv->num_rx_bds; i++) {
3078 		cb = priv->rx_cbs + i;
3079 		cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
3080 	}
3081 
3082 	/* Initialize common TX ring structures */
3083 	priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
3084 	priv->num_tx_bds = TOTAL_DESC;
3085 	priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
3086 			       GFP_KERNEL);
3087 	if (!priv->tx_cbs) {
3088 		kfree(priv->rx_cbs);
3089 		return -ENOMEM;
3090 	}
3091 
3092 	for (i = 0; i < priv->num_tx_bds; i++) {
3093 		cb = priv->tx_cbs + i;
3094 		cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
3095 	}
3096 
3097 	/* Init rDma */
3098 	bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
3099 			     DMA_SCB_BURST_SIZE);
3100 
3101 	/* Initialize Rx queues */
3102 	ret = bcmgenet_init_rx_queues(priv->dev);
3103 	if (ret) {
3104 		netdev_err(priv->dev, "failed to initialize Rx queues\n");
3105 		bcmgenet_free_rx_buffers(priv);
3106 		kfree(priv->rx_cbs);
3107 		kfree(priv->tx_cbs);
3108 		return ret;
3109 	}
3110 
3111 	/* Init tDma */
3112 	bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
3113 			     DMA_SCB_BURST_SIZE);
3114 
3115 	/* Initialize Tx queues */
3116 	bcmgenet_init_tx_queues(priv->dev);
3117 
3118 	return 0;
3119 }
3120 
3121 /* Interrupt bottom half */
3122 static void bcmgenet_irq_task(struct work_struct *work)
3123 {
3124 	unsigned int status;
3125 	struct bcmgenet_priv *priv = container_of(
3126 			work, struct bcmgenet_priv, bcmgenet_irq_work);
3127 
3128 	netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
3129 
3130 	spin_lock_irq(&priv->lock);
3131 	status = priv->irq0_stat;
3132 	priv->irq0_stat = 0;
3133 	spin_unlock_irq(&priv->lock);
3134 
3135 	if (status & UMAC_IRQ_PHY_DET_R &&
3136 	    priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
3137 		phy_init_hw(priv->dev->phydev);
3138 		genphy_config_aneg(priv->dev->phydev);
3139 	}
3140 
3141 	/* Link UP/DOWN event */
3142 	if (status & UMAC_IRQ_LINK_EVENT)
3143 		phy_mac_interrupt(priv->dev->phydev);
3144 
3145 }
3146 
3147 /* bcmgenet_isr1: handle Rx and Tx priority queues */
3148 static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
3149 {
3150 	struct bcmgenet_priv *priv = dev_id;
3151 	struct bcmgenet_rx_ring *rx_ring;
3152 	struct bcmgenet_tx_ring *tx_ring;
3153 	unsigned int index, status;
3154 
3155 	/* Read irq status */
3156 	status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
3157 		~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3158 
3159 	/* clear interrupts */
3160 	bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
3161 
3162 	netif_dbg(priv, intr, priv->dev,
3163 		  "%s: IRQ=0x%x\n", __func__, status);
3164 
3165 	/* Check Rx priority queue interrupts */
3166 	for (index = 0; index < priv->hw_params->rx_queues; index++) {
3167 		if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
3168 			continue;
3169 
3170 		rx_ring = &priv->rx_rings[index];
3171 		rx_ring->dim.event_ctr++;
3172 
3173 		if (likely(napi_schedule_prep(&rx_ring->napi))) {
3174 			rx_ring->int_disable(rx_ring);
3175 			__napi_schedule_irqoff(&rx_ring->napi);
3176 		}
3177 	}
3178 
3179 	/* Check Tx priority queue interrupts */
3180 	for (index = 0; index < priv->hw_params->tx_queues; index++) {
3181 		if (!(status & BIT(index)))
3182 			continue;
3183 
3184 		tx_ring = &priv->tx_rings[index];
3185 
3186 		if (likely(napi_schedule_prep(&tx_ring->napi))) {
3187 			tx_ring->int_disable(tx_ring);
3188 			__napi_schedule_irqoff(&tx_ring->napi);
3189 		}
3190 	}
3191 
3192 	return IRQ_HANDLED;
3193 }
3194 
3195 /* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
3196 static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
3197 {
3198 	struct bcmgenet_priv *priv = dev_id;
3199 	struct bcmgenet_rx_ring *rx_ring;
3200 	struct bcmgenet_tx_ring *tx_ring;
3201 	unsigned int status;
3202 	unsigned long flags;
3203 
3204 	/* Read irq status */
3205 	status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
3206 		~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
3207 
3208 	/* clear interrupts */
3209 	bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
3210 
3211 	netif_dbg(priv, intr, priv->dev,
3212 		  "IRQ=0x%x\n", status);
3213 
3214 	if (status & UMAC_IRQ_RXDMA_DONE) {
3215 		rx_ring = &priv->rx_rings[DESC_INDEX];
3216 		rx_ring->dim.event_ctr++;
3217 
3218 		if (likely(napi_schedule_prep(&rx_ring->napi))) {
3219 			rx_ring->int_disable(rx_ring);
3220 			__napi_schedule_irqoff(&rx_ring->napi);
3221 		}
3222 	}
3223 
3224 	if (status & UMAC_IRQ_TXDMA_DONE) {
3225 		tx_ring = &priv->tx_rings[DESC_INDEX];
3226 
3227 		if (likely(napi_schedule_prep(&tx_ring->napi))) {
3228 			tx_ring->int_disable(tx_ring);
3229 			__napi_schedule_irqoff(&tx_ring->napi);
3230 		}
3231 	}
3232 
3233 	if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
3234 		status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
3235 		wake_up(&priv->wq);
3236 	}
3237 
3238 	/* all other interested interrupts handled in bottom half */
3239 	status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
3240 	if (status) {
3241 		/* Save irq status for bottom-half processing. */
3242 		spin_lock_irqsave(&priv->lock, flags);
3243 		priv->irq0_stat |= status;
3244 		spin_unlock_irqrestore(&priv->lock, flags);
3245 
3246 		schedule_work(&priv->bcmgenet_irq_work);
3247 	}
3248 
3249 	return IRQ_HANDLED;
3250 }
3251 
3252 static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
3253 {
3254 	/* Acknowledge the interrupt */
3255 	return IRQ_HANDLED;
3256 }
3257 
3258 #ifdef CONFIG_NET_POLL_CONTROLLER
3259 static void bcmgenet_poll_controller(struct net_device *dev)
3260 {
3261 	struct bcmgenet_priv *priv = netdev_priv(dev);
3262 
3263 	/* Invoke the main RX/TX interrupt handler */
3264 	disable_irq(priv->irq0);
3265 	bcmgenet_isr0(priv->irq0, priv);
3266 	enable_irq(priv->irq0);
3267 
3268 	/* And the interrupt handler for RX/TX priority queues */
3269 	disable_irq(priv->irq1);
3270 	bcmgenet_isr1(priv->irq1, priv);
3271 	enable_irq(priv->irq1);
3272 }
3273 #endif
3274 
3275 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
3276 {
3277 	u32 reg;
3278 
3279 	reg = bcmgenet_rbuf_ctrl_get(priv);
3280 	reg |= BIT(1);
3281 	bcmgenet_rbuf_ctrl_set(priv, reg);
3282 	udelay(10);
3283 
3284 	reg &= ~BIT(1);
3285 	bcmgenet_rbuf_ctrl_set(priv, reg);
3286 	udelay(10);
3287 }
3288 
3289 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
3290 				 const unsigned char *addr)
3291 {
3292 	bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
3293 	bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
3294 }
3295 
3296 static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
3297 				 unsigned char *addr)
3298 {
3299 	u32 addr_tmp;
3300 
3301 	addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC0);
3302 	put_unaligned_be32(addr_tmp, &addr[0]);
3303 	addr_tmp = bcmgenet_umac_readl(priv, UMAC_MAC1);
3304 	put_unaligned_be16(addr_tmp, &addr[4]);
3305 }
3306 
3307 /* Returns a reusable dma control register value */
3308 static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv, bool flush_rx)
3309 {
3310 	unsigned int i;
3311 	u32 reg;
3312 	u32 dma_ctrl;
3313 
3314 	/* disable DMA */
3315 	dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
3316 	for (i = 0; i < priv->hw_params->tx_queues; i++)
3317 		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3318 	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3319 	reg &= ~dma_ctrl;
3320 	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3321 
3322 	dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
3323 	for (i = 0; i < priv->hw_params->rx_queues; i++)
3324 		dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
3325 	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3326 	reg &= ~dma_ctrl;
3327 	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3328 
3329 	bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
3330 	udelay(10);
3331 	bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
3332 
3333 	if (flush_rx) {
3334 		reg = bcmgenet_rbuf_ctrl_get(priv);
3335 		bcmgenet_rbuf_ctrl_set(priv, reg | BIT(0));
3336 		udelay(10);
3337 		bcmgenet_rbuf_ctrl_set(priv, reg);
3338 		udelay(10);
3339 	}
3340 
3341 	return dma_ctrl;
3342 }
3343 
3344 static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
3345 {
3346 	u32 reg;
3347 
3348 	reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
3349 	reg |= dma_ctrl;
3350 	bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
3351 
3352 	reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
3353 	reg |= dma_ctrl;
3354 	bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
3355 }
3356 
3357 static void bcmgenet_netif_start(struct net_device *dev)
3358 {
3359 	struct bcmgenet_priv *priv = netdev_priv(dev);
3360 
3361 	/* Start the network engine */
3362 	netif_addr_lock_bh(dev);
3363 	bcmgenet_set_rx_mode(dev);
3364 	netif_addr_unlock_bh(dev);
3365 	bcmgenet_enable_rx_napi(priv);
3366 
3367 	umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
3368 
3369 	bcmgenet_enable_tx_napi(priv);
3370 
3371 	/* Monitor link interrupts now */
3372 	bcmgenet_link_intr_enable(priv);
3373 
3374 	phy_start(dev->phydev);
3375 }
3376 
3377 static int bcmgenet_open(struct net_device *dev)
3378 {
3379 	struct bcmgenet_priv *priv = netdev_priv(dev);
3380 	unsigned long dma_ctrl;
3381 	int ret;
3382 
3383 	netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
3384 
3385 	/* Turn on the clock */
3386 	clk_prepare_enable(priv->clk);
3387 
3388 	/* If this is an internal GPHY, power it back on now, before UniMAC is
3389 	 * brought out of reset as absolutely no UniMAC activity is allowed
3390 	 */
3391 	if (priv->internal_phy)
3392 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3393 
3394 	/* take MAC out of reset */
3395 	bcmgenet_umac_reset(priv);
3396 
3397 	init_umac(priv);
3398 
3399 	/* Apply features again in case we changed them while interface was
3400 	 * down
3401 	 */
3402 	bcmgenet_set_features(dev, dev->features);
3403 
3404 	bcmgenet_set_hw_addr(priv, dev->dev_addr);
3405 
3406 	/* Disable RX/TX DMA and flush TX and RX queues */
3407 	dma_ctrl = bcmgenet_dma_disable(priv, true);
3408 
3409 	/* Reinitialize TDMA and RDMA and SW housekeeping */
3410 	ret = bcmgenet_init_dma(priv);
3411 	if (ret) {
3412 		netdev_err(dev, "failed to initialize DMA\n");
3413 		goto err_clk_disable;
3414 	}
3415 
3416 	/* Always enable ring 16 - descriptor ring */
3417 	bcmgenet_enable_dma(priv, dma_ctrl);
3418 
3419 	/* HFB init */
3420 	bcmgenet_hfb_init(priv);
3421 
3422 	ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
3423 			  dev->name, priv);
3424 	if (ret < 0) {
3425 		netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
3426 		goto err_fini_dma;
3427 	}
3428 
3429 	ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
3430 			  dev->name, priv);
3431 	if (ret < 0) {
3432 		netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
3433 		goto err_irq0;
3434 	}
3435 
3436 	ret = bcmgenet_mii_probe(dev);
3437 	if (ret) {
3438 		netdev_err(dev, "failed to connect to PHY\n");
3439 		goto err_irq1;
3440 	}
3441 
3442 	bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
3443 
3444 	bcmgenet_netif_start(dev);
3445 
3446 	netif_tx_start_all_queues(dev);
3447 
3448 	return 0;
3449 
3450 err_irq1:
3451 	free_irq(priv->irq1, priv);
3452 err_irq0:
3453 	free_irq(priv->irq0, priv);
3454 err_fini_dma:
3455 	bcmgenet_dma_teardown(priv);
3456 	bcmgenet_fini_dma(priv);
3457 err_clk_disable:
3458 	if (priv->internal_phy)
3459 		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3460 	clk_disable_unprepare(priv->clk);
3461 	return ret;
3462 }
3463 
3464 static void bcmgenet_netif_stop(struct net_device *dev, bool stop_phy)
3465 {
3466 	struct bcmgenet_priv *priv = netdev_priv(dev);
3467 
3468 	bcmgenet_disable_tx_napi(priv);
3469 	netif_tx_disable(dev);
3470 
3471 	/* Disable MAC receive */
3472 	umac_enable_set(priv, CMD_RX_EN, false);
3473 
3474 	bcmgenet_dma_teardown(priv);
3475 
3476 	/* Disable MAC transmit. TX DMA disabled must be done before this */
3477 	umac_enable_set(priv, CMD_TX_EN, false);
3478 
3479 	if (stop_phy)
3480 		phy_stop(dev->phydev);
3481 	bcmgenet_disable_rx_napi(priv);
3482 	bcmgenet_intr_disable(priv);
3483 
3484 	/* Wait for pending work items to complete. Since interrupts are
3485 	 * disabled no new work will be scheduled.
3486 	 */
3487 	cancel_work_sync(&priv->bcmgenet_irq_work);
3488 
3489 	/* tx reclaim */
3490 	bcmgenet_tx_reclaim_all(dev);
3491 	bcmgenet_fini_dma(priv);
3492 }
3493 
3494 static int bcmgenet_close(struct net_device *dev)
3495 {
3496 	struct bcmgenet_priv *priv = netdev_priv(dev);
3497 	int ret = 0;
3498 
3499 	netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
3500 
3501 	bcmgenet_netif_stop(dev, false);
3502 
3503 	/* Really kill the PHY state machine and disconnect from it */
3504 	phy_disconnect(dev->phydev);
3505 
3506 	free_irq(priv->irq0, priv);
3507 	free_irq(priv->irq1, priv);
3508 
3509 	if (priv->internal_phy)
3510 		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3511 
3512 	clk_disable_unprepare(priv->clk);
3513 
3514 	return ret;
3515 }
3516 
3517 static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
3518 {
3519 	struct bcmgenet_priv *priv = ring->priv;
3520 	u32 p_index, c_index, intsts, intmsk;
3521 	struct netdev_queue *txq;
3522 	unsigned int free_bds;
3523 	bool txq_stopped;
3524 
3525 	if (!netif_msg_tx_err(priv))
3526 		return;
3527 
3528 	txq = netdev_get_tx_queue(priv->dev, ring->queue);
3529 
3530 	spin_lock(&ring->lock);
3531 	if (ring->index == DESC_INDEX) {
3532 		intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
3533 		intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
3534 	} else {
3535 		intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
3536 		intmsk = 1 << ring->index;
3537 	}
3538 	c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
3539 	p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
3540 	txq_stopped = netif_tx_queue_stopped(txq);
3541 	free_bds = ring->free_bds;
3542 	spin_unlock(&ring->lock);
3543 
3544 	netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
3545 		  "TX queue status: %s, interrupts: %s\n"
3546 		  "(sw)free_bds: %d (sw)size: %d\n"
3547 		  "(sw)p_index: %d (hw)p_index: %d\n"
3548 		  "(sw)c_index: %d (hw)c_index: %d\n"
3549 		  "(sw)clean_p: %d (sw)write_p: %d\n"
3550 		  "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
3551 		  ring->index, ring->queue,
3552 		  txq_stopped ? "stopped" : "active",
3553 		  intsts & intmsk ? "enabled" : "disabled",
3554 		  free_bds, ring->size,
3555 		  ring->prod_index, p_index & DMA_P_INDEX_MASK,
3556 		  ring->c_index, c_index & DMA_C_INDEX_MASK,
3557 		  ring->clean_ptr, ring->write_ptr,
3558 		  ring->cb_ptr, ring->end_ptr);
3559 }
3560 
3561 static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
3562 {
3563 	struct bcmgenet_priv *priv = netdev_priv(dev);
3564 	u32 int0_enable = 0;
3565 	u32 int1_enable = 0;
3566 	unsigned int q;
3567 
3568 	netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3569 
3570 	for (q = 0; q < priv->hw_params->tx_queues; q++)
3571 		bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
3572 	bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
3573 
3574 	bcmgenet_tx_reclaim_all(dev);
3575 
3576 	for (q = 0; q < priv->hw_params->tx_queues; q++)
3577 		int1_enable |= (1 << q);
3578 
3579 	int0_enable = UMAC_IRQ_TXDMA_DONE;
3580 
3581 	/* Re-enable TX interrupts if disabled */
3582 	bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3583 	bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3584 
3585 	netif_trans_update(dev);
3586 
3587 	dev->stats.tx_errors++;
3588 
3589 	netif_tx_wake_all_queues(dev);
3590 }
3591 
3592 #define MAX_MDF_FILTER	17
3593 
3594 static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3595 					 const unsigned char *addr,
3596 					 int *i)
3597 {
3598 	bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3599 			     UMAC_MDF_ADDR + (*i * 4));
3600 	bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3601 			     addr[4] << 8 | addr[5],
3602 			     UMAC_MDF_ADDR + ((*i + 1) * 4));
3603 	*i += 2;
3604 }
3605 
3606 static void bcmgenet_set_rx_mode(struct net_device *dev)
3607 {
3608 	struct bcmgenet_priv *priv = netdev_priv(dev);
3609 	struct netdev_hw_addr *ha;
3610 	int i, nfilter;
3611 	u32 reg;
3612 
3613 	netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3614 
3615 	/* Number of filters needed */
3616 	nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
3617 
3618 	/*
3619 	 * Turn on promicuous mode for three scenarios
3620 	 * 1. IFF_PROMISC flag is set
3621 	 * 2. IFF_ALLMULTI flag is set
3622 	 * 3. The number of filters needed exceeds the number filters
3623 	 *    supported by the hardware.
3624 	*/
3625 	spin_lock(&priv->reg_lock);
3626 	reg = bcmgenet_umac_readl(priv, UMAC_CMD);
3627 	if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
3628 	    (nfilter > MAX_MDF_FILTER)) {
3629 		reg |= CMD_PROMISC;
3630 		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3631 		spin_unlock(&priv->reg_lock);
3632 		bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3633 		return;
3634 	} else {
3635 		reg &= ~CMD_PROMISC;
3636 		bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3637 		spin_unlock(&priv->reg_lock);
3638 	}
3639 
3640 	/* update MDF filter */
3641 	i = 0;
3642 	/* Broadcast */
3643 	bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
3644 	/* my own address.*/
3645 	bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
3646 
3647 	/* Unicast */
3648 	netdev_for_each_uc_addr(ha, dev)
3649 		bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3650 
3651 	/* Multicast */
3652 	netdev_for_each_mc_addr(ha, dev)
3653 		bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3654 
3655 	/* Enable filters */
3656 	reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
3657 	bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
3658 }
3659 
3660 /* Set the hardware MAC address. */
3661 static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3662 {
3663 	struct sockaddr *addr = p;
3664 
3665 	/* Setting the MAC address at the hardware level is not possible
3666 	 * without disabling the UniMAC RX/TX enable bits.
3667 	 */
3668 	if (netif_running(dev))
3669 		return -EBUSY;
3670 
3671 	eth_hw_addr_set(dev, addr->sa_data);
3672 
3673 	return 0;
3674 }
3675 
3676 static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
3677 {
3678 	struct bcmgenet_priv *priv = netdev_priv(dev);
3679 	unsigned long tx_bytes = 0, tx_packets = 0;
3680 	unsigned long rx_bytes = 0, rx_packets = 0;
3681 	unsigned long rx_errors = 0, rx_dropped = 0;
3682 	struct bcmgenet_tx_ring *tx_ring;
3683 	struct bcmgenet_rx_ring *rx_ring;
3684 	unsigned int q;
3685 
3686 	for (q = 0; q < priv->hw_params->tx_queues; q++) {
3687 		tx_ring = &priv->tx_rings[q];
3688 		tx_bytes += tx_ring->bytes;
3689 		tx_packets += tx_ring->packets;
3690 	}
3691 	tx_ring = &priv->tx_rings[DESC_INDEX];
3692 	tx_bytes += tx_ring->bytes;
3693 	tx_packets += tx_ring->packets;
3694 
3695 	for (q = 0; q < priv->hw_params->rx_queues; q++) {
3696 		rx_ring = &priv->rx_rings[q];
3697 
3698 		rx_bytes += rx_ring->bytes;
3699 		rx_packets += rx_ring->packets;
3700 		rx_errors += rx_ring->errors;
3701 		rx_dropped += rx_ring->dropped;
3702 	}
3703 	rx_ring = &priv->rx_rings[DESC_INDEX];
3704 	rx_bytes += rx_ring->bytes;
3705 	rx_packets += rx_ring->packets;
3706 	rx_errors += rx_ring->errors;
3707 	rx_dropped += rx_ring->dropped;
3708 
3709 	dev->stats.tx_bytes = tx_bytes;
3710 	dev->stats.tx_packets = tx_packets;
3711 	dev->stats.rx_bytes = rx_bytes;
3712 	dev->stats.rx_packets = rx_packets;
3713 	dev->stats.rx_errors = rx_errors;
3714 	dev->stats.rx_missed_errors = rx_errors;
3715 	dev->stats.rx_dropped = rx_dropped;
3716 	return &dev->stats;
3717 }
3718 
3719 static int bcmgenet_change_carrier(struct net_device *dev, bool new_carrier)
3720 {
3721 	struct bcmgenet_priv *priv = netdev_priv(dev);
3722 
3723 	if (!dev->phydev || !phy_is_pseudo_fixed_link(dev->phydev) ||
3724 	    priv->phy_interface != PHY_INTERFACE_MODE_MOCA)
3725 		return -EOPNOTSUPP;
3726 
3727 	if (new_carrier)
3728 		netif_carrier_on(dev);
3729 	else
3730 		netif_carrier_off(dev);
3731 
3732 	return 0;
3733 }
3734 
3735 static const struct net_device_ops bcmgenet_netdev_ops = {
3736 	.ndo_open		= bcmgenet_open,
3737 	.ndo_stop		= bcmgenet_close,
3738 	.ndo_start_xmit		= bcmgenet_xmit,
3739 	.ndo_tx_timeout		= bcmgenet_timeout,
3740 	.ndo_set_rx_mode	= bcmgenet_set_rx_mode,
3741 	.ndo_set_mac_address	= bcmgenet_set_mac_addr,
3742 	.ndo_eth_ioctl		= phy_do_ioctl_running,
3743 	.ndo_set_features	= bcmgenet_set_features,
3744 #ifdef CONFIG_NET_POLL_CONTROLLER
3745 	.ndo_poll_controller	= bcmgenet_poll_controller,
3746 #endif
3747 	.ndo_get_stats		= bcmgenet_get_stats,
3748 	.ndo_change_carrier	= bcmgenet_change_carrier,
3749 };
3750 
3751 /* Array of GENET hardware parameters/characteristics */
3752 static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3753 	[GENET_V1] = {
3754 		.tx_queues = 0,
3755 		.tx_bds_per_q = 0,
3756 		.rx_queues = 0,
3757 		.rx_bds_per_q = 0,
3758 		.bp_in_en_shift = 16,
3759 		.bp_in_mask = 0xffff,
3760 		.hfb_filter_cnt = 16,
3761 		.qtag_mask = 0x1F,
3762 		.hfb_offset = 0x1000,
3763 		.rdma_offset = 0x2000,
3764 		.tdma_offset = 0x3000,
3765 		.words_per_bd = 2,
3766 	},
3767 	[GENET_V2] = {
3768 		.tx_queues = 4,
3769 		.tx_bds_per_q = 32,
3770 		.rx_queues = 0,
3771 		.rx_bds_per_q = 0,
3772 		.bp_in_en_shift = 16,
3773 		.bp_in_mask = 0xffff,
3774 		.hfb_filter_cnt = 16,
3775 		.qtag_mask = 0x1F,
3776 		.tbuf_offset = 0x0600,
3777 		.hfb_offset = 0x1000,
3778 		.hfb_reg_offset = 0x2000,
3779 		.rdma_offset = 0x3000,
3780 		.tdma_offset = 0x4000,
3781 		.words_per_bd = 2,
3782 		.flags = GENET_HAS_EXT,
3783 	},
3784 	[GENET_V3] = {
3785 		.tx_queues = 4,
3786 		.tx_bds_per_q = 32,
3787 		.rx_queues = 0,
3788 		.rx_bds_per_q = 0,
3789 		.bp_in_en_shift = 17,
3790 		.bp_in_mask = 0x1ffff,
3791 		.hfb_filter_cnt = 48,
3792 		.hfb_filter_size = 128,
3793 		.qtag_mask = 0x3F,
3794 		.tbuf_offset = 0x0600,
3795 		.hfb_offset = 0x8000,
3796 		.hfb_reg_offset = 0xfc00,
3797 		.rdma_offset = 0x10000,
3798 		.tdma_offset = 0x11000,
3799 		.words_per_bd = 2,
3800 		.flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3801 			 GENET_HAS_MOCA_LINK_DET,
3802 	},
3803 	[GENET_V4] = {
3804 		.tx_queues = 4,
3805 		.tx_bds_per_q = 32,
3806 		.rx_queues = 0,
3807 		.rx_bds_per_q = 0,
3808 		.bp_in_en_shift = 17,
3809 		.bp_in_mask = 0x1ffff,
3810 		.hfb_filter_cnt = 48,
3811 		.hfb_filter_size = 128,
3812 		.qtag_mask = 0x3F,
3813 		.tbuf_offset = 0x0600,
3814 		.hfb_offset = 0x8000,
3815 		.hfb_reg_offset = 0xfc00,
3816 		.rdma_offset = 0x2000,
3817 		.tdma_offset = 0x4000,
3818 		.words_per_bd = 3,
3819 		.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3820 			 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3821 	},
3822 	[GENET_V5] = {
3823 		.tx_queues = 4,
3824 		.tx_bds_per_q = 32,
3825 		.rx_queues = 0,
3826 		.rx_bds_per_q = 0,
3827 		.bp_in_en_shift = 17,
3828 		.bp_in_mask = 0x1ffff,
3829 		.hfb_filter_cnt = 48,
3830 		.hfb_filter_size = 128,
3831 		.qtag_mask = 0x3F,
3832 		.tbuf_offset = 0x0600,
3833 		.hfb_offset = 0x8000,
3834 		.hfb_reg_offset = 0xfc00,
3835 		.rdma_offset = 0x2000,
3836 		.tdma_offset = 0x4000,
3837 		.words_per_bd = 3,
3838 		.flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3839 			 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3840 	},
3841 };
3842 
3843 /* Infer hardware parameters from the detected GENET version */
3844 static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3845 {
3846 	struct bcmgenet_hw_params *params;
3847 	u32 reg;
3848 	u8 major;
3849 	u16 gphy_rev;
3850 
3851 	if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
3852 		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3853 		genet_dma_ring_regs = genet_dma_ring_regs_v4;
3854 	} else if (GENET_IS_V3(priv)) {
3855 		bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3856 		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3857 	} else if (GENET_IS_V2(priv)) {
3858 		bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3859 		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3860 	} else if (GENET_IS_V1(priv)) {
3861 		bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3862 		genet_dma_ring_regs = genet_dma_ring_regs_v123;
3863 	}
3864 
3865 	/* enum genet_version starts at 1 */
3866 	priv->hw_params = &bcmgenet_hw_params[priv->version];
3867 	params = priv->hw_params;
3868 
3869 	/* Read GENET HW version */
3870 	reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3871 	major = (reg >> 24 & 0x0f);
3872 	if (major == 6)
3873 		major = 5;
3874 	else if (major == 5)
3875 		major = 4;
3876 	else if (major == 0)
3877 		major = 1;
3878 	if (major != priv->version) {
3879 		dev_err(&priv->pdev->dev,
3880 			"GENET version mismatch, got: %d, configured for: %d\n",
3881 			major, priv->version);
3882 	}
3883 
3884 	/* Print the GENET core version */
3885 	dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
3886 		 major, (reg >> 16) & 0x0f, reg & 0xffff);
3887 
3888 	/* Store the integrated PHY revision for the MDIO probing function
3889 	 * to pass this information to the PHY driver. The PHY driver expects
3890 	 * to find the PHY major revision in bits 15:8 while the GENET register
3891 	 * stores that information in bits 7:0, account for that.
3892 	 *
3893 	 * On newer chips, starting with PHY revision G0, a new scheme is
3894 	 * deployed similar to the Starfighter 2 switch with GPHY major
3895 	 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3896 	 * is reserved as well as special value 0x01ff, we have a small
3897 	 * heuristic to check for the new GPHY revision and re-arrange things
3898 	 * so the GPHY driver is happy.
3899 	 */
3900 	gphy_rev = reg & 0xffff;
3901 
3902 	if (GENET_IS_V5(priv)) {
3903 		/* The EPHY revision should come from the MDIO registers of
3904 		 * the PHY not from GENET.
3905 		 */
3906 		if (gphy_rev != 0) {
3907 			pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3908 				gphy_rev);
3909 		}
3910 	/* This is reserved so should require special treatment */
3911 	} else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
3912 		pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3913 		return;
3914 	/* This is the good old scheme, just GPHY major, no minor nor patch */
3915 	} else if ((gphy_rev & 0xf0) != 0) {
3916 		priv->gphy_rev = gphy_rev << 8;
3917 	/* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
3918 	} else if ((gphy_rev & 0xff00) != 0) {
3919 		priv->gphy_rev = gphy_rev;
3920 	}
3921 
3922 #ifdef CONFIG_PHYS_ADDR_T_64BIT
3923 	if (!(params->flags & GENET_HAS_40BITS))
3924 		pr_warn("GENET does not support 40-bits PA\n");
3925 #endif
3926 
3927 	pr_debug("Configuration for version: %d\n"
3928 		"TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
3929 		"BP << en: %2d, BP msk: 0x%05x\n"
3930 		"HFB count: %2d, QTAQ msk: 0x%05x\n"
3931 		"TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3932 		"RDMA: 0x%05x, TDMA: 0x%05x\n"
3933 		"Words/BD: %d\n",
3934 		priv->version,
3935 		params->tx_queues, params->tx_bds_per_q,
3936 		params->rx_queues, params->rx_bds_per_q,
3937 		params->bp_in_en_shift, params->bp_in_mask,
3938 		params->hfb_filter_cnt, params->qtag_mask,
3939 		params->tbuf_offset, params->hfb_offset,
3940 		params->hfb_reg_offset,
3941 		params->rdma_offset, params->tdma_offset,
3942 		params->words_per_bd);
3943 }
3944 
3945 struct bcmgenet_plat_data {
3946 	enum bcmgenet_version version;
3947 	u32 dma_max_burst_length;
3948 	bool ephy_16nm;
3949 };
3950 
3951 static const struct bcmgenet_plat_data v1_plat_data = {
3952 	.version = GENET_V1,
3953 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3954 };
3955 
3956 static const struct bcmgenet_plat_data v2_plat_data = {
3957 	.version = GENET_V2,
3958 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3959 };
3960 
3961 static const struct bcmgenet_plat_data v3_plat_data = {
3962 	.version = GENET_V3,
3963 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3964 };
3965 
3966 static const struct bcmgenet_plat_data v4_plat_data = {
3967 	.version = GENET_V4,
3968 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3969 };
3970 
3971 static const struct bcmgenet_plat_data v5_plat_data = {
3972 	.version = GENET_V5,
3973 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3974 };
3975 
3976 static const struct bcmgenet_plat_data bcm2711_plat_data = {
3977 	.version = GENET_V5,
3978 	.dma_max_burst_length = 0x08,
3979 };
3980 
3981 static const struct bcmgenet_plat_data bcm7712_plat_data = {
3982 	.version = GENET_V5,
3983 	.dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3984 	.ephy_16nm = true,
3985 };
3986 
3987 static const struct of_device_id bcmgenet_match[] = {
3988 	{ .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3989 	{ .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3990 	{ .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3991 	{ .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3992 	{ .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3993 	{ .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
3994 	{ .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
3995 	{ },
3996 };
3997 MODULE_DEVICE_TABLE(of, bcmgenet_match);
3998 
3999 static int bcmgenet_probe(struct platform_device *pdev)
4000 {
4001 	struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
4002 	const struct bcmgenet_plat_data *pdata;
4003 	struct bcmgenet_priv *priv;
4004 	struct net_device *dev;
4005 	unsigned int i;
4006 	int err = -EIO;
4007 
4008 	/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
4009 	dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
4010 				 GENET_MAX_MQ_CNT + 1);
4011 	if (!dev) {
4012 		dev_err(&pdev->dev, "can't allocate net device\n");
4013 		return -ENOMEM;
4014 	}
4015 
4016 	priv = netdev_priv(dev);
4017 	priv->irq0 = platform_get_irq(pdev, 0);
4018 	if (priv->irq0 < 0) {
4019 		err = priv->irq0;
4020 		goto err;
4021 	}
4022 	priv->irq1 = platform_get_irq(pdev, 1);
4023 	if (priv->irq1 < 0) {
4024 		err = priv->irq1;
4025 		goto err;
4026 	}
4027 	priv->wol_irq = platform_get_irq_optional(pdev, 2);
4028 	if (priv->wol_irq == -EPROBE_DEFER) {
4029 		err = priv->wol_irq;
4030 		goto err;
4031 	}
4032 
4033 	priv->base = devm_platform_ioremap_resource(pdev, 0);
4034 	if (IS_ERR(priv->base)) {
4035 		err = PTR_ERR(priv->base);
4036 		goto err;
4037 	}
4038 
4039 	spin_lock_init(&priv->reg_lock);
4040 	spin_lock_init(&priv->lock);
4041 
4042 	/* Set default pause parameters */
4043 	priv->autoneg_pause = 1;
4044 	priv->tx_pause = 1;
4045 	priv->rx_pause = 1;
4046 
4047 	SET_NETDEV_DEV(dev, &pdev->dev);
4048 	dev_set_drvdata(&pdev->dev, dev);
4049 	dev->watchdog_timeo = 2 * HZ;
4050 	dev->ethtool_ops = &bcmgenet_ethtool_ops;
4051 	dev->netdev_ops = &bcmgenet_netdev_ops;
4052 
4053 	priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
4054 
4055 	/* Set default features */
4056 	dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
4057 			 NETIF_F_RXCSUM;
4058 	dev->hw_features |= dev->features;
4059 	dev->vlan_features |= dev->features;
4060 
4061 	/* Request the WOL interrupt and advertise suspend if available */
4062 	priv->wol_irq_disabled = true;
4063 	if (priv->wol_irq > 0) {
4064 		err = devm_request_irq(&pdev->dev, priv->wol_irq,
4065 				       bcmgenet_wol_isr, 0, dev->name, priv);
4066 		if (!err)
4067 			device_set_wakeup_capable(&pdev->dev, 1);
4068 	}
4069 
4070 	/* Set the needed headroom to account for any possible
4071 	 * features enabling/disabling at runtime
4072 	 */
4073 	dev->needed_headroom += 64;
4074 
4075 	priv->dev = dev;
4076 	priv->pdev = pdev;
4077 
4078 	pdata = device_get_match_data(&pdev->dev);
4079 	if (pdata) {
4080 		priv->version = pdata->version;
4081 		priv->dma_max_burst_length = pdata->dma_max_burst_length;
4082 		priv->ephy_16nm = pdata->ephy_16nm;
4083 	} else {
4084 		priv->version = pd->genet_version;
4085 		priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
4086 	}
4087 
4088 	priv->clk = devm_clk_get_optional(&priv->pdev->dev, "enet");
4089 	if (IS_ERR(priv->clk)) {
4090 		dev_dbg(&priv->pdev->dev, "failed to get enet clock\n");
4091 		err = PTR_ERR(priv->clk);
4092 		goto err;
4093 	}
4094 
4095 	err = clk_prepare_enable(priv->clk);
4096 	if (err)
4097 		goto err;
4098 
4099 	bcmgenet_set_hw_params(priv);
4100 
4101 	err = -EIO;
4102 	if (priv->hw_params->flags & GENET_HAS_40BITS)
4103 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
4104 	if (err)
4105 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4106 	if (err)
4107 		goto err_clk_disable;
4108 
4109 	/* Mii wait queue */
4110 	init_waitqueue_head(&priv->wq);
4111 	/* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
4112 	priv->rx_buf_len = RX_BUF_LENGTH;
4113 	INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
4114 
4115 	priv->clk_wol = devm_clk_get_optional(&priv->pdev->dev, "enet-wol");
4116 	if (IS_ERR(priv->clk_wol)) {
4117 		dev_dbg(&priv->pdev->dev, "failed to get enet-wol clock\n");
4118 		err = PTR_ERR(priv->clk_wol);
4119 		goto err_clk_disable;
4120 	}
4121 
4122 	priv->clk_eee = devm_clk_get_optional(&priv->pdev->dev, "enet-eee");
4123 	if (IS_ERR(priv->clk_eee)) {
4124 		dev_dbg(&priv->pdev->dev, "failed to get enet-eee clock\n");
4125 		err = PTR_ERR(priv->clk_eee);
4126 		goto err_clk_disable;
4127 	}
4128 
4129 	/* If this is an internal GPHY, power it on now, before UniMAC is
4130 	 * brought out of reset as absolutely no UniMAC activity is allowed
4131 	 */
4132 	if (device_get_phy_mode(&pdev->dev) == PHY_INTERFACE_MODE_INTERNAL)
4133 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
4134 
4135 	if (pd && !IS_ERR_OR_NULL(pd->mac_address))
4136 		eth_hw_addr_set(dev, pd->mac_address);
4137 	else
4138 		if (device_get_ethdev_address(&pdev->dev, dev))
4139 			if (has_acpi_companion(&pdev->dev)) {
4140 				u8 addr[ETH_ALEN];
4141 
4142 				bcmgenet_get_hw_addr(priv, addr);
4143 				eth_hw_addr_set(dev, addr);
4144 			}
4145 
4146 	if (!is_valid_ether_addr(dev->dev_addr)) {
4147 		dev_warn(&pdev->dev, "using random Ethernet MAC\n");
4148 		eth_hw_addr_random(dev);
4149 	}
4150 
4151 	reset_umac(priv);
4152 
4153 	err = bcmgenet_mii_init(dev);
4154 	if (err)
4155 		goto err_clk_disable;
4156 
4157 	/* setup number of real queues  + 1 (GENET_V1 has 0 hardware queues
4158 	 * just the ring 16 descriptor based TX
4159 	 */
4160 	netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
4161 	netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
4162 
4163 	/* Set default coalescing parameters */
4164 	for (i = 0; i < priv->hw_params->rx_queues; i++)
4165 		priv->rx_rings[i].rx_max_coalesced_frames = 1;
4166 	priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
4167 
4168 	/* libphy will determine the link state */
4169 	netif_carrier_off(dev);
4170 
4171 	/* Turn off the main clock, WOL clock is handled separately */
4172 	clk_disable_unprepare(priv->clk);
4173 
4174 	err = register_netdev(dev);
4175 	if (err) {
4176 		bcmgenet_mii_exit(dev);
4177 		goto err;
4178 	}
4179 
4180 	return err;
4181 
4182 err_clk_disable:
4183 	clk_disable_unprepare(priv->clk);
4184 err:
4185 	free_netdev(dev);
4186 	return err;
4187 }
4188 
4189 static int bcmgenet_remove(struct platform_device *pdev)
4190 {
4191 	struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
4192 
4193 	dev_set_drvdata(&pdev->dev, NULL);
4194 	unregister_netdev(priv->dev);
4195 	bcmgenet_mii_exit(priv->dev);
4196 	free_netdev(priv->dev);
4197 
4198 	return 0;
4199 }
4200 
4201 static void bcmgenet_shutdown(struct platform_device *pdev)
4202 {
4203 	bcmgenet_remove(pdev);
4204 }
4205 
4206 #ifdef CONFIG_PM_SLEEP
4207 static int bcmgenet_resume_noirq(struct device *d)
4208 {
4209 	struct net_device *dev = dev_get_drvdata(d);
4210 	struct bcmgenet_priv *priv = netdev_priv(dev);
4211 	int ret;
4212 	u32 reg;
4213 
4214 	if (!netif_running(dev))
4215 		return 0;
4216 
4217 	/* Turn on the clock */
4218 	ret = clk_prepare_enable(priv->clk);
4219 	if (ret)
4220 		return ret;
4221 
4222 	if (device_may_wakeup(d) && priv->wolopts) {
4223 		/* Account for Wake-on-LAN events and clear those events
4224 		 * (Some devices need more time between enabling the clocks
4225 		 *  and the interrupt register reflecting the wake event so
4226 		 *  read the register twice)
4227 		 */
4228 		reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
4229 		reg = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT);
4230 		if (reg & UMAC_IRQ_WAKE_EVENT)
4231 			pm_wakeup_event(&priv->pdev->dev, 0);
4232 	}
4233 
4234 	bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_WAKE_EVENT, INTRL2_CPU_CLEAR);
4235 
4236 	return 0;
4237 }
4238 
4239 static int bcmgenet_resume(struct device *d)
4240 {
4241 	struct net_device *dev = dev_get_drvdata(d);
4242 	struct bcmgenet_priv *priv = netdev_priv(dev);
4243 	struct bcmgenet_rxnfc_rule *rule;
4244 	unsigned long dma_ctrl;
4245 	int ret;
4246 
4247 	if (!netif_running(dev))
4248 		return 0;
4249 
4250 	/* From WOL-enabled suspend, switch to regular clock */
4251 	if (device_may_wakeup(d) && priv->wolopts)
4252 		bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
4253 
4254 	/* If this is an internal GPHY, power it back on now, before UniMAC is
4255 	 * brought out of reset as absolutely no UniMAC activity is allowed
4256 	 */
4257 	if (priv->internal_phy)
4258 		bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
4259 
4260 	bcmgenet_umac_reset(priv);
4261 
4262 	init_umac(priv);
4263 
4264 	phy_init_hw(dev->phydev);
4265 
4266 	/* Speed settings must be restored */
4267 	genphy_config_aneg(dev->phydev);
4268 	bcmgenet_mii_config(priv->dev, false);
4269 
4270 	/* Restore enabled features */
4271 	bcmgenet_set_features(dev, dev->features);
4272 
4273 	bcmgenet_set_hw_addr(priv, dev->dev_addr);
4274 
4275 	/* Restore hardware filters */
4276 	bcmgenet_hfb_clear(priv);
4277 	list_for_each_entry(rule, &priv->rxnfc_list, list)
4278 		if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
4279 			bcmgenet_hfb_create_rxnfc_filter(priv, rule);
4280 
4281 	/* Disable RX/TX DMA and flush TX queues */
4282 	dma_ctrl = bcmgenet_dma_disable(priv, false);
4283 
4284 	/* Reinitialize TDMA and RDMA and SW housekeeping */
4285 	ret = bcmgenet_init_dma(priv);
4286 	if (ret) {
4287 		netdev_err(dev, "failed to initialize DMA\n");
4288 		goto out_clk_disable;
4289 	}
4290 
4291 	/* Always enable ring 16 - descriptor ring */
4292 	bcmgenet_enable_dma(priv, dma_ctrl);
4293 
4294 	if (!device_may_wakeup(d))
4295 		phy_resume(dev->phydev);
4296 
4297 	bcmgenet_netif_start(dev);
4298 
4299 	netif_device_attach(dev);
4300 
4301 	return 0;
4302 
4303 out_clk_disable:
4304 	if (priv->internal_phy)
4305 		bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
4306 	clk_disable_unprepare(priv->clk);
4307 	return ret;
4308 }
4309 
4310 static int bcmgenet_suspend(struct device *d)
4311 {
4312 	struct net_device *dev = dev_get_drvdata(d);
4313 	struct bcmgenet_priv *priv = netdev_priv(dev);
4314 
4315 	if (!netif_running(dev))
4316 		return 0;
4317 
4318 	netif_device_detach(dev);
4319 
4320 	bcmgenet_netif_stop(dev, true);
4321 
4322 	if (!device_may_wakeup(d))
4323 		phy_suspend(dev->phydev);
4324 
4325 	/* Disable filtering */
4326 	bcmgenet_hfb_reg_writel(priv, 0, HFB_CTRL);
4327 
4328 	return 0;
4329 }
4330 
4331 static int bcmgenet_suspend_noirq(struct device *d)
4332 {
4333 	struct net_device *dev = dev_get_drvdata(d);
4334 	struct bcmgenet_priv *priv = netdev_priv(dev);
4335 	int ret = 0;
4336 
4337 	if (!netif_running(dev))
4338 		return 0;
4339 
4340 	/* Prepare the device for Wake-on-LAN and switch to the slow clock */
4341 	if (device_may_wakeup(d) && priv->wolopts)
4342 		ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
4343 	else if (priv->internal_phy)
4344 		ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
4345 
4346 	/* Let the framework handle resumption and leave the clocks on */
4347 	if (ret)
4348 		return ret;
4349 
4350 	/* Turn off the clocks */
4351 	clk_disable_unprepare(priv->clk);
4352 
4353 	return 0;
4354 }
4355 #else
4356 #define bcmgenet_suspend	NULL
4357 #define bcmgenet_suspend_noirq	NULL
4358 #define bcmgenet_resume		NULL
4359 #define bcmgenet_resume_noirq	NULL
4360 #endif /* CONFIG_PM_SLEEP */
4361 
4362 static const struct dev_pm_ops bcmgenet_pm_ops = {
4363 	.suspend	= bcmgenet_suspend,
4364 	.suspend_noirq	= bcmgenet_suspend_noirq,
4365 	.resume		= bcmgenet_resume,
4366 	.resume_noirq	= bcmgenet_resume_noirq,
4367 };
4368 
4369 static const struct acpi_device_id genet_acpi_match[] = {
4370 	{ "BCM6E4E", (kernel_ulong_t)&bcm2711_plat_data },
4371 	{ },
4372 };
4373 MODULE_DEVICE_TABLE(acpi, genet_acpi_match);
4374 
4375 static struct platform_driver bcmgenet_driver = {
4376 	.probe	= bcmgenet_probe,
4377 	.remove	= bcmgenet_remove,
4378 	.shutdown = bcmgenet_shutdown,
4379 	.driver	= {
4380 		.name	= "bcmgenet",
4381 		.of_match_table = bcmgenet_match,
4382 		.pm	= &bcmgenet_pm_ops,
4383 		.acpi_match_table = genet_acpi_match,
4384 	},
4385 };
4386 module_platform_driver(bcmgenet_driver);
4387 
4388 MODULE_AUTHOR("Broadcom Corporation");
4389 MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
4390 MODULE_ALIAS("platform:bcmgenet");
4391 MODULE_LICENSE("GPL");
4392 MODULE_SOFTDEP("pre: mdio-bcm-unimac");
4393