1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4  * DWC Ether MAC version 4.00  has been used for developing this code.
5  *
6  * This only implements the mac core functions for this chip.
7  *
8  * Copyright (C) 2015  STMicroelectronics Ltd
9  *
10  * Author: Alexandre Torgue <alexandre.torgue@st.com>
11  */
12 
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include "stmmac.h"
18 #include "stmmac_pcs.h"
19 #include "dwmac4.h"
20 #include "dwmac5.h"
21 
22 static void dwmac4_core_init(struct mac_device_info *hw,
23 			     struct net_device *dev)
24 {
25 	struct stmmac_priv *priv = netdev_priv(dev);
26 	void __iomem *ioaddr = hw->pcsr;
27 	u32 value = readl(ioaddr + GMAC_CONFIG);
28 
29 	value |= GMAC_CORE_INIT;
30 
31 	if (hw->ps) {
32 		value |= GMAC_CONFIG_TE;
33 
34 		value &= hw->link.speed_mask;
35 		switch (hw->ps) {
36 		case SPEED_1000:
37 			value |= hw->link.speed1000;
38 			break;
39 		case SPEED_100:
40 			value |= hw->link.speed100;
41 			break;
42 		case SPEED_10:
43 			value |= hw->link.speed10;
44 			break;
45 		}
46 	}
47 
48 	writel(value, ioaddr + GMAC_CONFIG);
49 
50 	/* Enable GMAC interrupts */
51 	value = GMAC_INT_DEFAULT_ENABLE;
52 
53 	if (hw->pcs)
54 		value |= GMAC_PCS_IRQ_DEFAULT;
55 
56 	/* Enable FPE interrupt */
57 	if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
58 		value |= GMAC_INT_FPE_EN;
59 
60 	writel(value, ioaddr + GMAC_INT_EN);
61 
62 	if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
63 		init_waitqueue_head(&priv->tstamp_busy_wait);
64 }
65 
66 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
67 				   u8 mode, u32 queue)
68 {
69 	void __iomem *ioaddr = hw->pcsr;
70 	u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
71 
72 	value &= GMAC_RX_QUEUE_CLEAR(queue);
73 	if (mode == MTL_QUEUE_AVB)
74 		value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
75 	else if (mode == MTL_QUEUE_DCB)
76 		value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
77 
78 	writel(value, ioaddr + GMAC_RXQ_CTRL0);
79 }
80 
81 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
82 				     u32 prio, u32 queue)
83 {
84 	void __iomem *ioaddr = hw->pcsr;
85 	u32 base_register;
86 	u32 value;
87 
88 	base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
89 	if (queue >= 4)
90 		queue -= 4;
91 
92 	value = readl(ioaddr + base_register);
93 
94 	value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
95 	value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
96 						GMAC_RXQCTRL_PSRQX_MASK(queue);
97 	writel(value, ioaddr + base_register);
98 }
99 
100 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
101 				     u32 prio, u32 queue)
102 {
103 	void __iomem *ioaddr = hw->pcsr;
104 	u32 base_register;
105 	u32 value;
106 
107 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
108 	if (queue >= 4)
109 		queue -= 4;
110 
111 	value = readl(ioaddr + base_register);
112 
113 	value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
114 	value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
115 						GMAC_TXQCTRL_PSTQX_MASK(queue);
116 
117 	writel(value, ioaddr + base_register);
118 }
119 
120 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
121 				    u8 packet, u32 queue)
122 {
123 	void __iomem *ioaddr = hw->pcsr;
124 	u32 value;
125 
126 	static const struct stmmac_rx_routing route_possibilities[] = {
127 		{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
128 		{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
129 		{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
130 		{ GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
131 		{ GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
132 	};
133 
134 	value = readl(ioaddr + GMAC_RXQ_CTRL1);
135 
136 	/* routing configuration */
137 	value &= ~route_possibilities[packet - 1].reg_mask;
138 	value |= (queue << route_possibilities[packet-1].reg_shift) &
139 		 route_possibilities[packet - 1].reg_mask;
140 
141 	/* some packets require extra ops */
142 	if (packet == PACKET_AVCPQ) {
143 		value &= ~GMAC_RXQCTRL_TACPQE;
144 		value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
145 	} else if (packet == PACKET_MCBCQ) {
146 		value &= ~GMAC_RXQCTRL_MCBCQEN;
147 		value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
148 	}
149 
150 	writel(value, ioaddr + GMAC_RXQ_CTRL1);
151 }
152 
153 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
154 					  u32 rx_alg)
155 {
156 	void __iomem *ioaddr = hw->pcsr;
157 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
158 
159 	value &= ~MTL_OPERATION_RAA;
160 	switch (rx_alg) {
161 	case MTL_RX_ALGORITHM_SP:
162 		value |= MTL_OPERATION_RAA_SP;
163 		break;
164 	case MTL_RX_ALGORITHM_WSP:
165 		value |= MTL_OPERATION_RAA_WSP;
166 		break;
167 	default:
168 		break;
169 	}
170 
171 	writel(value, ioaddr + MTL_OPERATION_MODE);
172 }
173 
174 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
175 					  u32 tx_alg)
176 {
177 	void __iomem *ioaddr = hw->pcsr;
178 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
179 
180 	value &= ~MTL_OPERATION_SCHALG_MASK;
181 	switch (tx_alg) {
182 	case MTL_TX_ALGORITHM_WRR:
183 		value |= MTL_OPERATION_SCHALG_WRR;
184 		break;
185 	case MTL_TX_ALGORITHM_WFQ:
186 		value |= MTL_OPERATION_SCHALG_WFQ;
187 		break;
188 	case MTL_TX_ALGORITHM_DWRR:
189 		value |= MTL_OPERATION_SCHALG_DWRR;
190 		break;
191 	case MTL_TX_ALGORITHM_SP:
192 		value |= MTL_OPERATION_SCHALG_SP;
193 		break;
194 	default:
195 		break;
196 	}
197 
198 	writel(value, ioaddr + MTL_OPERATION_MODE);
199 }
200 
201 static void dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
202 					   struct mac_device_info *hw,
203 					   u32 weight, u32 queue)
204 {
205 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
206 	void __iomem *ioaddr = hw->pcsr;
207 	u32 value = readl(ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs,
208 							     queue));
209 
210 	value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
211 	value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
212 	writel(value, ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs, queue));
213 }
214 
215 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
216 {
217 	void __iomem *ioaddr = hw->pcsr;
218 	u32 value;
219 
220 	if (queue < 4) {
221 		value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
222 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
223 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
224 		writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
225 	} else {
226 		value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
227 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
228 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
229 		writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
230 	}
231 }
232 
233 static void dwmac4_config_cbs(struct stmmac_priv *priv,
234 			      struct mac_device_info *hw,
235 			      u32 send_slope, u32 idle_slope,
236 			      u32 high_credit, u32 low_credit, u32 queue)
237 {
238 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
239 	void __iomem *ioaddr = hw->pcsr;
240 	u32 value;
241 
242 	pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
243 	pr_debug("\tsend_slope: 0x%08x\n", send_slope);
244 	pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
245 	pr_debug("\thigh_credit: 0x%08x\n", high_credit);
246 	pr_debug("\tlow_credit: 0x%08x\n", low_credit);
247 
248 	/* enable AV algorithm */
249 	value = readl(ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
250 	value |= MTL_ETS_CTRL_AVALG;
251 	value |= MTL_ETS_CTRL_CC;
252 	writel(value, ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
253 
254 	/* configure send slope */
255 	value = readl(ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
256 							    queue));
257 	value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
258 	value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
259 	writel(value, ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
260 							    queue));
261 
262 	/* configure idle slope (same register as tx weight) */
263 	dwmac4_set_mtl_tx_queue_weight(priv, hw, idle_slope, queue);
264 
265 	/* configure high credit */
266 	value = readl(ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
267 	value &= ~MTL_HIGH_CRED_HC_MASK;
268 	value |= high_credit & MTL_HIGH_CRED_HC_MASK;
269 	writel(value, ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
270 
271 	/* configure high credit */
272 	value = readl(ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
273 	value &= ~MTL_HIGH_CRED_LC_MASK;
274 	value |= low_credit & MTL_HIGH_CRED_LC_MASK;
275 	writel(value, ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
276 }
277 
278 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
279 {
280 	void __iomem *ioaddr = hw->pcsr;
281 	int i;
282 
283 	for (i = 0; i < GMAC_REG_NUM; i++)
284 		reg_space[i] = readl(ioaddr + i * 4);
285 }
286 
287 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
288 {
289 	void __iomem *ioaddr = hw->pcsr;
290 	u32 value = readl(ioaddr + GMAC_CONFIG);
291 
292 	if (hw->rx_csum)
293 		value |= GMAC_CONFIG_IPC;
294 	else
295 		value &= ~GMAC_CONFIG_IPC;
296 
297 	writel(value, ioaddr + GMAC_CONFIG);
298 
299 	value = readl(ioaddr + GMAC_CONFIG);
300 
301 	return !!(value & GMAC_CONFIG_IPC);
302 }
303 
304 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
305 {
306 	void __iomem *ioaddr = hw->pcsr;
307 	unsigned int pmt = 0;
308 	u32 config;
309 
310 	if (mode & WAKE_MAGIC) {
311 		pr_debug("GMAC: WOL Magic frame\n");
312 		pmt |= power_down | magic_pkt_en;
313 	}
314 	if (mode & WAKE_UCAST) {
315 		pr_debug("GMAC: WOL on global unicast\n");
316 		pmt |= power_down | global_unicast | wake_up_frame_en;
317 	}
318 
319 	if (pmt) {
320 		/* The receiver must be enabled for WOL before powering down */
321 		config = readl(ioaddr + GMAC_CONFIG);
322 		config |= GMAC_CONFIG_RE;
323 		writel(config, ioaddr + GMAC_CONFIG);
324 	}
325 	writel(pmt, ioaddr + GMAC_PMT);
326 }
327 
328 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
329 				 const unsigned char *addr, unsigned int reg_n)
330 {
331 	void __iomem *ioaddr = hw->pcsr;
332 
333 	stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
334 				   GMAC_ADDR_LOW(reg_n));
335 }
336 
337 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
338 				 unsigned char *addr, unsigned int reg_n)
339 {
340 	void __iomem *ioaddr = hw->pcsr;
341 
342 	stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
343 				   GMAC_ADDR_LOW(reg_n));
344 }
345 
346 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
347 				bool en_tx_lpi_clockgating)
348 {
349 	void __iomem *ioaddr = hw->pcsr;
350 	u32 value;
351 
352 	/* Enable the link status receive on RGMII, SGMII ore SMII
353 	 * receive path and instruct the transmit to enter in LPI
354 	 * state.
355 	 */
356 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
357 	value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
358 
359 	if (en_tx_lpi_clockgating)
360 		value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
361 
362 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
363 }
364 
365 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
366 {
367 	void __iomem *ioaddr = hw->pcsr;
368 	u32 value;
369 
370 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
371 	value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
372 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
373 }
374 
375 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
376 {
377 	void __iomem *ioaddr = hw->pcsr;
378 	u32 value;
379 
380 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
381 
382 	if (link)
383 		value |= GMAC4_LPI_CTRL_STATUS_PLS;
384 	else
385 		value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
386 
387 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
388 }
389 
390 static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
391 {
392 	void __iomem *ioaddr = hw->pcsr;
393 	int value = et & STMMAC_ET_MAX;
394 	int regval;
395 
396 	/* Program LPI entry timer value into register */
397 	writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
398 
399 	/* Enable/disable LPI entry timer */
400 	regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
401 	regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
402 
403 	if (et)
404 		regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
405 	else
406 		regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
407 
408 	writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
409 }
410 
411 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
412 {
413 	void __iomem *ioaddr = hw->pcsr;
414 	int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
415 
416 	/* Program the timers in the LPI timer control register:
417 	 * LS: minimum time (ms) for which the link
418 	 *  status from PHY should be ok before transmitting
419 	 *  the LPI pattern.
420 	 * TW: minimum time (us) for which the core waits
421 	 *  after it has stopped transmitting the LPI pattern.
422 	 */
423 	writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
424 }
425 
426 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
427 {
428 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
429 	u32 val;
430 
431 	val = readl(ioaddr + GMAC_VLAN_TAG);
432 	val &= ~GMAC_VLAN_TAG_VID;
433 	val |= GMAC_VLAN_TAG_ETV | vid;
434 
435 	writel(val, ioaddr + GMAC_VLAN_TAG);
436 }
437 
438 static int dwmac4_write_vlan_filter(struct net_device *dev,
439 				    struct mac_device_info *hw,
440 				    u8 index, u32 data)
441 {
442 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
443 	int i, timeout = 10;
444 	u32 val;
445 
446 	if (index >= hw->num_vlan)
447 		return -EINVAL;
448 
449 	writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
450 
451 	val = readl(ioaddr + GMAC_VLAN_TAG);
452 	val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
453 		GMAC_VLAN_TAG_CTRL_CT |
454 		GMAC_VLAN_TAG_CTRL_OB);
455 	val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
456 
457 	writel(val, ioaddr + GMAC_VLAN_TAG);
458 
459 	for (i = 0; i < timeout; i++) {
460 		val = readl(ioaddr + GMAC_VLAN_TAG);
461 		if (!(val & GMAC_VLAN_TAG_CTRL_OB))
462 			return 0;
463 		udelay(1);
464 	}
465 
466 	netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
467 
468 	return -EBUSY;
469 }
470 
471 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
472 				      struct mac_device_info *hw,
473 				      __be16 proto, u16 vid)
474 {
475 	int index = -1;
476 	u32 val = 0;
477 	int i, ret;
478 
479 	if (vid > 4095)
480 		return -EINVAL;
481 
482 	/* Single Rx VLAN Filter */
483 	if (hw->num_vlan == 1) {
484 		/* For single VLAN filter, VID 0 means VLAN promiscuous */
485 		if (vid == 0) {
486 			netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
487 			return -EPERM;
488 		}
489 
490 		if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
491 			netdev_err(dev, "Only single VLAN ID supported\n");
492 			return -EPERM;
493 		}
494 
495 		hw->vlan_filter[0] = vid;
496 		dwmac4_write_single_vlan(dev, vid);
497 
498 		return 0;
499 	}
500 
501 	/* Extended Rx VLAN Filter Enable */
502 	val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
503 
504 	for (i = 0; i < hw->num_vlan; i++) {
505 		if (hw->vlan_filter[i] == val)
506 			return 0;
507 		else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
508 			index = i;
509 	}
510 
511 	if (index == -1) {
512 		netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
513 			   hw->num_vlan);
514 		return -EPERM;
515 	}
516 
517 	ret = dwmac4_write_vlan_filter(dev, hw, index, val);
518 
519 	if (!ret)
520 		hw->vlan_filter[index] = val;
521 
522 	return ret;
523 }
524 
525 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
526 				      struct mac_device_info *hw,
527 				      __be16 proto, u16 vid)
528 {
529 	int i, ret = 0;
530 
531 	/* Single Rx VLAN Filter */
532 	if (hw->num_vlan == 1) {
533 		if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
534 			hw->vlan_filter[0] = 0;
535 			dwmac4_write_single_vlan(dev, 0);
536 		}
537 		return 0;
538 	}
539 
540 	/* Extended Rx VLAN Filter Enable */
541 	for (i = 0; i < hw->num_vlan; i++) {
542 		if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
543 			ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
544 
545 			if (!ret)
546 				hw->vlan_filter[i] = 0;
547 			else
548 				return ret;
549 		}
550 	}
551 
552 	return ret;
553 }
554 
555 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
556 					   struct mac_device_info *hw)
557 {
558 	void __iomem *ioaddr = hw->pcsr;
559 	u32 value;
560 	u32 hash;
561 	u32 val;
562 	int i;
563 
564 	/* Single Rx VLAN Filter */
565 	if (hw->num_vlan == 1) {
566 		dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
567 		return;
568 	}
569 
570 	/* Extended Rx VLAN Filter Enable */
571 	for (i = 0; i < hw->num_vlan; i++) {
572 		if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
573 			val = hw->vlan_filter[i];
574 			dwmac4_write_vlan_filter(dev, hw, i, val);
575 		}
576 	}
577 
578 	hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
579 	if (hash & GMAC_VLAN_VLHT) {
580 		value = readl(ioaddr + GMAC_VLAN_TAG);
581 		value |= GMAC_VLAN_VTHM;
582 		writel(value, ioaddr + GMAC_VLAN_TAG);
583 	}
584 }
585 
586 static void dwmac4_set_filter(struct mac_device_info *hw,
587 			      struct net_device *dev)
588 {
589 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
590 	int numhashregs = (hw->multicast_filter_bins >> 5);
591 	int mcbitslog2 = hw->mcast_bits_log2;
592 	unsigned int value;
593 	u32 mc_filter[8];
594 	int i;
595 
596 	memset(mc_filter, 0, sizeof(mc_filter));
597 
598 	value = readl(ioaddr + GMAC_PACKET_FILTER);
599 	value &= ~GMAC_PACKET_FILTER_HMC;
600 	value &= ~GMAC_PACKET_FILTER_HPF;
601 	value &= ~GMAC_PACKET_FILTER_PCF;
602 	value &= ~GMAC_PACKET_FILTER_PM;
603 	value &= ~GMAC_PACKET_FILTER_PR;
604 	value &= ~GMAC_PACKET_FILTER_RA;
605 	if (dev->flags & IFF_PROMISC) {
606 		/* VLAN Tag Filter Fail Packets Queuing */
607 		if (hw->vlan_fail_q_en) {
608 			value = readl(ioaddr + GMAC_RXQ_CTRL4);
609 			value &= ~GMAC_RXQCTRL_VFFQ_MASK;
610 			value |= GMAC_RXQCTRL_VFFQE |
611 				 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
612 			writel(value, ioaddr + GMAC_RXQ_CTRL4);
613 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
614 		} else {
615 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
616 		}
617 
618 	} else if ((dev->flags & IFF_ALLMULTI) ||
619 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
620 		/* Pass all multi */
621 		value |= GMAC_PACKET_FILTER_PM;
622 		/* Set all the bits of the HASH tab */
623 		memset(mc_filter, 0xff, sizeof(mc_filter));
624 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
625 		struct netdev_hw_addr *ha;
626 
627 		/* Hash filter for multicast */
628 		value |= GMAC_PACKET_FILTER_HMC;
629 
630 		netdev_for_each_mc_addr(ha, dev) {
631 			/* The upper n bits of the calculated CRC are used to
632 			 * index the contents of the hash table. The number of
633 			 * bits used depends on the hardware configuration
634 			 * selected at core configuration time.
635 			 */
636 			u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
637 					ETH_ALEN)) >> (32 - mcbitslog2);
638 			/* The most significant bit determines the register to
639 			 * use (H/L) while the other 5 bits determine the bit
640 			 * within the register.
641 			 */
642 			mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
643 		}
644 	}
645 
646 	for (i = 0; i < numhashregs; i++)
647 		writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
648 
649 	value |= GMAC_PACKET_FILTER_HPF;
650 
651 	/* Handle multiple unicast addresses */
652 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
653 		/* Switch to promiscuous mode if more than 128 addrs
654 		 * are required
655 		 */
656 		value |= GMAC_PACKET_FILTER_PR;
657 	} else {
658 		struct netdev_hw_addr *ha;
659 		int reg = 1;
660 
661 		netdev_for_each_uc_addr(ha, dev) {
662 			dwmac4_set_umac_addr(hw, ha->addr, reg);
663 			reg++;
664 		}
665 
666 		while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
667 			writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
668 			writel(0, ioaddr + GMAC_ADDR_LOW(reg));
669 			reg++;
670 		}
671 	}
672 
673 	/* VLAN filtering */
674 	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
675 		value &= ~GMAC_PACKET_FILTER_VTFE;
676 	else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
677 		value |= GMAC_PACKET_FILTER_VTFE;
678 
679 	writel(value, ioaddr + GMAC_PACKET_FILTER);
680 }
681 
682 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
683 			     unsigned int fc, unsigned int pause_time,
684 			     u32 tx_cnt)
685 {
686 	void __iomem *ioaddr = hw->pcsr;
687 	unsigned int flow = 0;
688 	u32 queue = 0;
689 
690 	pr_debug("GMAC Flow-Control:\n");
691 	if (fc & FLOW_RX) {
692 		pr_debug("\tReceive Flow-Control ON\n");
693 		flow |= GMAC_RX_FLOW_CTRL_RFE;
694 	} else {
695 		pr_debug("\tReceive Flow-Control OFF\n");
696 	}
697 	writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
698 
699 	if (fc & FLOW_TX) {
700 		pr_debug("\tTransmit Flow-Control ON\n");
701 
702 		if (duplex)
703 			pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
704 
705 		for (queue = 0; queue < tx_cnt; queue++) {
706 			flow = GMAC_TX_FLOW_CTRL_TFE;
707 
708 			if (duplex)
709 				flow |=
710 				(pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
711 
712 			writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
713 		}
714 	} else {
715 		for (queue = 0; queue < tx_cnt; queue++)
716 			writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
717 	}
718 }
719 
720 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
721 			    bool loopback)
722 {
723 	dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
724 }
725 
726 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
727 {
728 	dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
729 }
730 
731 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
732 {
733 	dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
734 }
735 
736 /* RGMII or SMII interface */
737 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
738 {
739 	u32 status;
740 
741 	status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
742 	x->irq_rgmii_n++;
743 
744 	/* Check the link status */
745 	if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
746 		int speed_value;
747 
748 		x->pcs_link = 1;
749 
750 		speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
751 			       GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
752 		if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
753 			x->pcs_speed = SPEED_1000;
754 		else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
755 			x->pcs_speed = SPEED_100;
756 		else
757 			x->pcs_speed = SPEED_10;
758 
759 		x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
760 
761 		pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
762 			x->pcs_duplex ? "Full" : "Half");
763 	} else {
764 		x->pcs_link = 0;
765 		pr_info("Link is Down\n");
766 	}
767 }
768 
769 static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
770 				 struct mac_device_info *hw, u32 chan)
771 {
772 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
773 	void __iomem *ioaddr = hw->pcsr;
774 	u32 mtl_int_qx_status;
775 	int ret = 0;
776 
777 	mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
778 
779 	/* Check MTL Interrupt */
780 	if (mtl_int_qx_status & MTL_INT_QX(chan)) {
781 		/* read Queue x Interrupt status */
782 		u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs,
783 							      chan));
784 
785 		if (status & MTL_RX_OVERFLOW_INT) {
786 			/*  clear Interrupt */
787 			writel(status | MTL_RX_OVERFLOW_INT,
788 			       ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs, chan));
789 			ret = CORE_IRQ_MTL_RX_OVERFLOW;
790 		}
791 	}
792 
793 	return ret;
794 }
795 
796 static int dwmac4_irq_status(struct mac_device_info *hw,
797 			     struct stmmac_extra_stats *x)
798 {
799 	void __iomem *ioaddr = hw->pcsr;
800 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
801 	u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
802 	int ret = 0;
803 
804 	/* Discard disabled bits */
805 	intr_status &= intr_enable;
806 
807 	/* Not used events (e.g. MMC interrupts) are not handled. */
808 	if ((intr_status & mmc_tx_irq))
809 		x->mmc_tx_irq_n++;
810 	if (unlikely(intr_status & mmc_rx_irq))
811 		x->mmc_rx_irq_n++;
812 	if (unlikely(intr_status & mmc_rx_csum_offload_irq))
813 		x->mmc_rx_csum_offload_irq_n++;
814 	/* Clear the PMT bits 5 and 6 by reading the PMT status reg */
815 	if (unlikely(intr_status & pmt_irq)) {
816 		readl(ioaddr + GMAC_PMT);
817 		x->irq_receive_pmt_irq_n++;
818 	}
819 
820 	/* MAC tx/rx EEE LPI entry/exit interrupts */
821 	if (intr_status & lpi_irq) {
822 		/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
823 		u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
824 
825 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
826 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
827 			x->irq_tx_path_in_lpi_mode_n++;
828 		}
829 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
830 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
831 			x->irq_tx_path_exit_lpi_mode_n++;
832 		}
833 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
834 			x->irq_rx_path_in_lpi_mode_n++;
835 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
836 			x->irq_rx_path_exit_lpi_mode_n++;
837 	}
838 
839 	dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
840 	if (intr_status & PCS_RGSMIIIS_IRQ)
841 		dwmac4_phystatus(ioaddr, x);
842 
843 	return ret;
844 }
845 
846 static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
847 			 struct stmmac_extra_stats *x,
848 			 u32 rx_queues, u32 tx_queues)
849 {
850 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
851 	u32 value;
852 	u32 queue;
853 
854 	for (queue = 0; queue < tx_queues; queue++) {
855 		value = readl(ioaddr + MTL_CHAN_TX_DEBUG(dwmac4_addrs, queue));
856 
857 		if (value & MTL_DEBUG_TXSTSFSTS)
858 			x->mtl_tx_status_fifo_full++;
859 		if (value & MTL_DEBUG_TXFSTS)
860 			x->mtl_tx_fifo_not_empty++;
861 		if (value & MTL_DEBUG_TWCSTS)
862 			x->mmtl_fifo_ctrl++;
863 		if (value & MTL_DEBUG_TRCSTS_MASK) {
864 			u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
865 				     >> MTL_DEBUG_TRCSTS_SHIFT;
866 			if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
867 				x->mtl_tx_fifo_read_ctrl_write++;
868 			else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
869 				x->mtl_tx_fifo_read_ctrl_wait++;
870 			else if (trcsts == MTL_DEBUG_TRCSTS_READ)
871 				x->mtl_tx_fifo_read_ctrl_read++;
872 			else
873 				x->mtl_tx_fifo_read_ctrl_idle++;
874 		}
875 		if (value & MTL_DEBUG_TXPAUSED)
876 			x->mac_tx_in_pause++;
877 	}
878 
879 	for (queue = 0; queue < rx_queues; queue++) {
880 		value = readl(ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue));
881 
882 		if (value & MTL_DEBUG_RXFSTS_MASK) {
883 			u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
884 				     >> MTL_DEBUG_RRCSTS_SHIFT;
885 
886 			if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
887 				x->mtl_rx_fifo_fill_level_full++;
888 			else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
889 				x->mtl_rx_fifo_fill_above_thresh++;
890 			else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
891 				x->mtl_rx_fifo_fill_below_thresh++;
892 			else
893 				x->mtl_rx_fifo_fill_level_empty++;
894 		}
895 		if (value & MTL_DEBUG_RRCSTS_MASK) {
896 			u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
897 				     MTL_DEBUG_RRCSTS_SHIFT;
898 
899 			if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
900 				x->mtl_rx_fifo_read_ctrl_flush++;
901 			else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
902 				x->mtl_rx_fifo_read_ctrl_read_data++;
903 			else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
904 				x->mtl_rx_fifo_read_ctrl_status++;
905 			else
906 				x->mtl_rx_fifo_read_ctrl_idle++;
907 		}
908 		if (value & MTL_DEBUG_RWCSTS)
909 			x->mtl_rx_fifo_ctrl_active++;
910 	}
911 
912 	/* GMAC debug */
913 	value = readl(ioaddr + GMAC_DEBUG);
914 
915 	if (value & GMAC_DEBUG_TFCSTS_MASK) {
916 		u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
917 			      >> GMAC_DEBUG_TFCSTS_SHIFT;
918 
919 		if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
920 			x->mac_tx_frame_ctrl_xfer++;
921 		else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
922 			x->mac_tx_frame_ctrl_pause++;
923 		else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
924 			x->mac_tx_frame_ctrl_wait++;
925 		else
926 			x->mac_tx_frame_ctrl_idle++;
927 	}
928 	if (value & GMAC_DEBUG_TPESTS)
929 		x->mac_gmii_tx_proto_engine++;
930 	if (value & GMAC_DEBUG_RFCFCSTS_MASK)
931 		x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
932 					    >> GMAC_DEBUG_RFCFCSTS_SHIFT;
933 	if (value & GMAC_DEBUG_RPESTS)
934 		x->mac_gmii_rx_proto_engine++;
935 }
936 
937 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
938 {
939 	u32 value = readl(ioaddr + GMAC_CONFIG);
940 
941 	if (enable)
942 		value |= GMAC_CONFIG_LM;
943 	else
944 		value &= ~GMAC_CONFIG_LM;
945 
946 	writel(value, ioaddr + GMAC_CONFIG);
947 }
948 
949 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
950 				    __le16 perfect_match, bool is_double)
951 {
952 	void __iomem *ioaddr = hw->pcsr;
953 	u32 value;
954 
955 	writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
956 
957 	value = readl(ioaddr + GMAC_VLAN_TAG);
958 
959 	if (hash) {
960 		value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
961 		if (is_double) {
962 			value |= GMAC_VLAN_EDVLP;
963 			value |= GMAC_VLAN_ESVL;
964 			value |= GMAC_VLAN_DOVLTC;
965 		}
966 
967 		writel(value, ioaddr + GMAC_VLAN_TAG);
968 	} else if (perfect_match) {
969 		u32 value = GMAC_VLAN_ETV;
970 
971 		if (is_double) {
972 			value |= GMAC_VLAN_EDVLP;
973 			value |= GMAC_VLAN_ESVL;
974 			value |= GMAC_VLAN_DOVLTC;
975 		}
976 
977 		writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
978 	} else {
979 		value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
980 		value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
981 		value &= ~GMAC_VLAN_DOVLTC;
982 		value &= ~GMAC_VLAN_VID;
983 
984 		writel(value, ioaddr + GMAC_VLAN_TAG);
985 	}
986 }
987 
988 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
989 {
990 	u32 value = readl(ioaddr + GMAC_CONFIG);
991 
992 	value &= ~GMAC_CONFIG_SARC;
993 	value |= val << GMAC_CONFIG_SARC_SHIFT;
994 
995 	writel(value, ioaddr + GMAC_CONFIG);
996 }
997 
998 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
999 {
1000 	void __iomem *ioaddr = hw->pcsr;
1001 	u32 value;
1002 
1003 	value = readl(ioaddr + GMAC_VLAN_INCL);
1004 	value |= GMAC_VLAN_VLTI;
1005 	value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
1006 	value &= ~GMAC_VLAN_VLC;
1007 	value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
1008 	writel(value, ioaddr + GMAC_VLAN_INCL);
1009 }
1010 
1011 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
1012 				   u32 addr)
1013 {
1014 	void __iomem *ioaddr = hw->pcsr;
1015 	u32 value;
1016 
1017 	writel(addr, ioaddr + GMAC_ARP_ADDR);
1018 
1019 	value = readl(ioaddr + GMAC_CONFIG);
1020 	if (en)
1021 		value |= GMAC_CONFIG_ARPEN;
1022 	else
1023 		value &= ~GMAC_CONFIG_ARPEN;
1024 	writel(value, ioaddr + GMAC_CONFIG);
1025 }
1026 
1027 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1028 				   bool en, bool ipv6, bool sa, bool inv,
1029 				   u32 match)
1030 {
1031 	void __iomem *ioaddr = hw->pcsr;
1032 	u32 value;
1033 
1034 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1035 	value |= GMAC_PACKET_FILTER_IPFE;
1036 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1037 
1038 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1039 
1040 	/* For IPv6 not both SA/DA filters can be active */
1041 	if (ipv6) {
1042 		value |= GMAC_L3PEN0;
1043 		value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1044 		value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1045 		if (sa) {
1046 			value |= GMAC_L3SAM0;
1047 			if (inv)
1048 				value |= GMAC_L3SAIM0;
1049 		} else {
1050 			value |= GMAC_L3DAM0;
1051 			if (inv)
1052 				value |= GMAC_L3DAIM0;
1053 		}
1054 	} else {
1055 		value &= ~GMAC_L3PEN0;
1056 		if (sa) {
1057 			value |= GMAC_L3SAM0;
1058 			if (inv)
1059 				value |= GMAC_L3SAIM0;
1060 		} else {
1061 			value |= GMAC_L3DAM0;
1062 			if (inv)
1063 				value |= GMAC_L3DAIM0;
1064 		}
1065 	}
1066 
1067 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1068 
1069 	if (sa) {
1070 		writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1071 	} else {
1072 		writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1073 	}
1074 
1075 	if (!en)
1076 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1077 
1078 	return 0;
1079 }
1080 
1081 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1082 				   bool en, bool udp, bool sa, bool inv,
1083 				   u32 match)
1084 {
1085 	void __iomem *ioaddr = hw->pcsr;
1086 	u32 value;
1087 
1088 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1089 	value |= GMAC_PACKET_FILTER_IPFE;
1090 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1091 
1092 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1093 	if (udp) {
1094 		value |= GMAC_L4PEN0;
1095 	} else {
1096 		value &= ~GMAC_L4PEN0;
1097 	}
1098 
1099 	value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1100 	value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1101 	if (sa) {
1102 		value |= GMAC_L4SPM0;
1103 		if (inv)
1104 			value |= GMAC_L4SPIM0;
1105 	} else {
1106 		value |= GMAC_L4DPM0;
1107 		if (inv)
1108 			value |= GMAC_L4DPIM0;
1109 	}
1110 
1111 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1112 
1113 	if (sa) {
1114 		value = match & GMAC_L4SP0;
1115 	} else {
1116 		value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1117 	}
1118 
1119 	writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1120 
1121 	if (!en)
1122 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1123 
1124 	return 0;
1125 }
1126 
1127 const struct stmmac_ops dwmac4_ops = {
1128 	.core_init = dwmac4_core_init,
1129 	.set_mac = stmmac_set_mac,
1130 	.rx_ipc = dwmac4_rx_ipc_enable,
1131 	.rx_queue_enable = dwmac4_rx_queue_enable,
1132 	.rx_queue_prio = dwmac4_rx_queue_priority,
1133 	.tx_queue_prio = dwmac4_tx_queue_priority,
1134 	.rx_queue_routing = dwmac4_rx_queue_routing,
1135 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1136 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1137 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1138 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1139 	.config_cbs = dwmac4_config_cbs,
1140 	.dump_regs = dwmac4_dump_regs,
1141 	.host_irq_status = dwmac4_irq_status,
1142 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1143 	.flow_ctrl = dwmac4_flow_ctrl,
1144 	.pmt = dwmac4_pmt,
1145 	.set_umac_addr = dwmac4_set_umac_addr,
1146 	.get_umac_addr = dwmac4_get_umac_addr,
1147 	.set_eee_mode = dwmac4_set_eee_mode,
1148 	.reset_eee_mode = dwmac4_reset_eee_mode,
1149 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1150 	.set_eee_timer = dwmac4_set_eee_timer,
1151 	.set_eee_pls = dwmac4_set_eee_pls,
1152 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1153 	.pcs_rane = dwmac4_rane,
1154 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1155 	.debug = dwmac4_debug,
1156 	.set_filter = dwmac4_set_filter,
1157 	.set_mac_loopback = dwmac4_set_mac_loopback,
1158 	.update_vlan_hash = dwmac4_update_vlan_hash,
1159 	.sarc_configure = dwmac4_sarc_configure,
1160 	.enable_vlan = dwmac4_enable_vlan,
1161 	.set_arp_offload = dwmac4_set_arp_offload,
1162 	.config_l3_filter = dwmac4_config_l3_filter,
1163 	.config_l4_filter = dwmac4_config_l4_filter,
1164 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1165 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1166 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1167 };
1168 
1169 const struct stmmac_ops dwmac410_ops = {
1170 	.core_init = dwmac4_core_init,
1171 	.set_mac = stmmac_dwmac4_set_mac,
1172 	.rx_ipc = dwmac4_rx_ipc_enable,
1173 	.rx_queue_enable = dwmac4_rx_queue_enable,
1174 	.rx_queue_prio = dwmac4_rx_queue_priority,
1175 	.tx_queue_prio = dwmac4_tx_queue_priority,
1176 	.rx_queue_routing = dwmac4_rx_queue_routing,
1177 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1178 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1179 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1180 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1181 	.config_cbs = dwmac4_config_cbs,
1182 	.dump_regs = dwmac4_dump_regs,
1183 	.host_irq_status = dwmac4_irq_status,
1184 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1185 	.flow_ctrl = dwmac4_flow_ctrl,
1186 	.pmt = dwmac4_pmt,
1187 	.set_umac_addr = dwmac4_set_umac_addr,
1188 	.get_umac_addr = dwmac4_get_umac_addr,
1189 	.set_eee_mode = dwmac4_set_eee_mode,
1190 	.reset_eee_mode = dwmac4_reset_eee_mode,
1191 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1192 	.set_eee_timer = dwmac4_set_eee_timer,
1193 	.set_eee_pls = dwmac4_set_eee_pls,
1194 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1195 	.pcs_rane = dwmac4_rane,
1196 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1197 	.debug = dwmac4_debug,
1198 	.set_filter = dwmac4_set_filter,
1199 	.flex_pps_config = dwmac5_flex_pps_config,
1200 	.set_mac_loopback = dwmac4_set_mac_loopback,
1201 	.update_vlan_hash = dwmac4_update_vlan_hash,
1202 	.sarc_configure = dwmac4_sarc_configure,
1203 	.enable_vlan = dwmac4_enable_vlan,
1204 	.set_arp_offload = dwmac4_set_arp_offload,
1205 	.config_l3_filter = dwmac4_config_l3_filter,
1206 	.config_l4_filter = dwmac4_config_l4_filter,
1207 	.est_configure = dwmac5_est_configure,
1208 	.est_irq_status = dwmac5_est_irq_status,
1209 	.fpe_configure = dwmac5_fpe_configure,
1210 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1211 	.fpe_irq_status = dwmac5_fpe_irq_status,
1212 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1213 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1214 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1215 };
1216 
1217 const struct stmmac_ops dwmac510_ops = {
1218 	.core_init = dwmac4_core_init,
1219 	.set_mac = stmmac_dwmac4_set_mac,
1220 	.rx_ipc = dwmac4_rx_ipc_enable,
1221 	.rx_queue_enable = dwmac4_rx_queue_enable,
1222 	.rx_queue_prio = dwmac4_rx_queue_priority,
1223 	.tx_queue_prio = dwmac4_tx_queue_priority,
1224 	.rx_queue_routing = dwmac4_rx_queue_routing,
1225 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1226 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1227 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1228 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1229 	.config_cbs = dwmac4_config_cbs,
1230 	.dump_regs = dwmac4_dump_regs,
1231 	.host_irq_status = dwmac4_irq_status,
1232 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1233 	.flow_ctrl = dwmac4_flow_ctrl,
1234 	.pmt = dwmac4_pmt,
1235 	.set_umac_addr = dwmac4_set_umac_addr,
1236 	.get_umac_addr = dwmac4_get_umac_addr,
1237 	.set_eee_mode = dwmac4_set_eee_mode,
1238 	.reset_eee_mode = dwmac4_reset_eee_mode,
1239 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1240 	.set_eee_timer = dwmac4_set_eee_timer,
1241 	.set_eee_pls = dwmac4_set_eee_pls,
1242 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1243 	.pcs_rane = dwmac4_rane,
1244 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1245 	.debug = dwmac4_debug,
1246 	.set_filter = dwmac4_set_filter,
1247 	.safety_feat_config = dwmac5_safety_feat_config,
1248 	.safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1249 	.safety_feat_dump = dwmac5_safety_feat_dump,
1250 	.rxp_config = dwmac5_rxp_config,
1251 	.flex_pps_config = dwmac5_flex_pps_config,
1252 	.set_mac_loopback = dwmac4_set_mac_loopback,
1253 	.update_vlan_hash = dwmac4_update_vlan_hash,
1254 	.sarc_configure = dwmac4_sarc_configure,
1255 	.enable_vlan = dwmac4_enable_vlan,
1256 	.set_arp_offload = dwmac4_set_arp_offload,
1257 	.config_l3_filter = dwmac4_config_l3_filter,
1258 	.config_l4_filter = dwmac4_config_l4_filter,
1259 	.est_configure = dwmac5_est_configure,
1260 	.est_irq_status = dwmac5_est_irq_status,
1261 	.fpe_configure = dwmac5_fpe_configure,
1262 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1263 	.fpe_irq_status = dwmac5_fpe_irq_status,
1264 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1265 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1266 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1267 };
1268 
1269 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1270 {
1271 	u32 val, num_vlan;
1272 
1273 	val = readl(ioaddr + GMAC_HW_FEATURE3);
1274 	switch (val & GMAC_HW_FEAT_NRVF) {
1275 	case 0:
1276 		num_vlan = 1;
1277 		break;
1278 	case 1:
1279 		num_vlan = 4;
1280 		break;
1281 	case 2:
1282 		num_vlan = 8;
1283 		break;
1284 	case 3:
1285 		num_vlan = 16;
1286 		break;
1287 	case 4:
1288 		num_vlan = 24;
1289 		break;
1290 	case 5:
1291 		num_vlan = 32;
1292 		break;
1293 	default:
1294 		num_vlan = 1;
1295 	}
1296 
1297 	return num_vlan;
1298 }
1299 
1300 int dwmac4_setup(struct stmmac_priv *priv)
1301 {
1302 	struct mac_device_info *mac = priv->hw;
1303 
1304 	dev_info(priv->device, "\tDWMAC4/5\n");
1305 
1306 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1307 	mac->pcsr = priv->ioaddr;
1308 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1309 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1310 	mac->mcast_bits_log2 = 0;
1311 
1312 	if (mac->multicast_filter_bins)
1313 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1314 
1315 	mac->link.duplex = GMAC_CONFIG_DM;
1316 	mac->link.speed10 = GMAC_CONFIG_PS;
1317 	mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1318 	mac->link.speed1000 = 0;
1319 	mac->link.speed2500 = GMAC_CONFIG_FES;
1320 	mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1321 	mac->mii.addr = GMAC_MDIO_ADDR;
1322 	mac->mii.data = GMAC_MDIO_DATA;
1323 	mac->mii.addr_shift = 21;
1324 	mac->mii.addr_mask = GENMASK(25, 21);
1325 	mac->mii.reg_shift = 16;
1326 	mac->mii.reg_mask = GENMASK(20, 16);
1327 	mac->mii.clk_csr_shift = 8;
1328 	mac->mii.clk_csr_mask = GENMASK(11, 8);
1329 	mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1330 
1331 	return 0;
1332 }
1333