1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4  * DWC Ether MAC version 4.00  has been used for developing this code.
5  *
6  * This only implements the mac core functions for this chip.
7  *
8  * Copyright (C) 2015  STMicroelectronics Ltd
9  *
10  * Author: Alexandre Torgue <alexandre.torgue@st.com>
11  */
12 
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include <net/dsa.h>
18 #include "stmmac.h"
19 #include "stmmac_pcs.h"
20 #include "dwmac4.h"
21 #include "dwmac5.h"
22 
23 static void dwmac4_core_init(struct mac_device_info *hw,
24 			     struct net_device *dev)
25 {
26 	void __iomem *ioaddr = hw->pcsr;
27 	u32 value = readl(ioaddr + GMAC_CONFIG);
28 
29 	value |= GMAC_CORE_INIT;
30 
31 	if (hw->ps) {
32 		value |= GMAC_CONFIG_TE;
33 
34 		value &= hw->link.speed_mask;
35 		switch (hw->ps) {
36 		case SPEED_1000:
37 			value |= hw->link.speed1000;
38 			break;
39 		case SPEED_100:
40 			value |= hw->link.speed100;
41 			break;
42 		case SPEED_10:
43 			value |= hw->link.speed10;
44 			break;
45 		}
46 	}
47 
48 	writel(value, ioaddr + GMAC_CONFIG);
49 
50 	/* Enable GMAC interrupts */
51 	value = GMAC_INT_DEFAULT_ENABLE;
52 
53 	if (hw->pcs)
54 		value |= GMAC_PCS_IRQ_DEFAULT;
55 
56 	/* Enable FPE interrupt */
57 	if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
58 		value |= GMAC_INT_FPE_EN;
59 
60 	writel(value, ioaddr + GMAC_INT_EN);
61 }
62 
63 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
64 				   u8 mode, u32 queue)
65 {
66 	void __iomem *ioaddr = hw->pcsr;
67 	u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
68 
69 	value &= GMAC_RX_QUEUE_CLEAR(queue);
70 	if (mode == MTL_QUEUE_AVB)
71 		value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
72 	else if (mode == MTL_QUEUE_DCB)
73 		value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
74 
75 	writel(value, ioaddr + GMAC_RXQ_CTRL0);
76 }
77 
78 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
79 				     u32 prio, u32 queue)
80 {
81 	void __iomem *ioaddr = hw->pcsr;
82 	u32 base_register;
83 	u32 value;
84 
85 	base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
86 	if (queue >= 4)
87 		queue -= 4;
88 
89 	value = readl(ioaddr + base_register);
90 
91 	value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
92 	value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
93 						GMAC_RXQCTRL_PSRQX_MASK(queue);
94 	writel(value, ioaddr + base_register);
95 }
96 
97 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
98 				     u32 prio, u32 queue)
99 {
100 	void __iomem *ioaddr = hw->pcsr;
101 	u32 base_register;
102 	u32 value;
103 
104 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
105 	if (queue >= 4)
106 		queue -= 4;
107 
108 	value = readl(ioaddr + base_register);
109 
110 	value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
111 	value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
112 						GMAC_TXQCTRL_PSTQX_MASK(queue);
113 
114 	writel(value, ioaddr + base_register);
115 }
116 
117 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
118 				    u8 packet, u32 queue)
119 {
120 	void __iomem *ioaddr = hw->pcsr;
121 	u32 value;
122 
123 	static const struct stmmac_rx_routing route_possibilities[] = {
124 		{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
125 		{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
126 		{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
127 		{ GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
128 		{ GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
129 	};
130 
131 	value = readl(ioaddr + GMAC_RXQ_CTRL1);
132 
133 	/* routing configuration */
134 	value &= ~route_possibilities[packet - 1].reg_mask;
135 	value |= (queue << route_possibilities[packet-1].reg_shift) &
136 		 route_possibilities[packet - 1].reg_mask;
137 
138 	/* some packets require extra ops */
139 	if (packet == PACKET_AVCPQ) {
140 		value &= ~GMAC_RXQCTRL_TACPQE;
141 		value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
142 	} else if (packet == PACKET_MCBCQ) {
143 		value &= ~GMAC_RXQCTRL_MCBCQEN;
144 		value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
145 	}
146 
147 	writel(value, ioaddr + GMAC_RXQ_CTRL1);
148 }
149 
150 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
151 					  u32 rx_alg)
152 {
153 	void __iomem *ioaddr = hw->pcsr;
154 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
155 
156 	value &= ~MTL_OPERATION_RAA;
157 	switch (rx_alg) {
158 	case MTL_RX_ALGORITHM_SP:
159 		value |= MTL_OPERATION_RAA_SP;
160 		break;
161 	case MTL_RX_ALGORITHM_WSP:
162 		value |= MTL_OPERATION_RAA_WSP;
163 		break;
164 	default:
165 		break;
166 	}
167 
168 	writel(value, ioaddr + MTL_OPERATION_MODE);
169 }
170 
171 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
172 					  u32 tx_alg)
173 {
174 	void __iomem *ioaddr = hw->pcsr;
175 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
176 
177 	value &= ~MTL_OPERATION_SCHALG_MASK;
178 	switch (tx_alg) {
179 	case MTL_TX_ALGORITHM_WRR:
180 		value |= MTL_OPERATION_SCHALG_WRR;
181 		break;
182 	case MTL_TX_ALGORITHM_WFQ:
183 		value |= MTL_OPERATION_SCHALG_WFQ;
184 		break;
185 	case MTL_TX_ALGORITHM_DWRR:
186 		value |= MTL_OPERATION_SCHALG_DWRR;
187 		break;
188 	case MTL_TX_ALGORITHM_SP:
189 		value |= MTL_OPERATION_SCHALG_SP;
190 		break;
191 	default:
192 		break;
193 	}
194 
195 	writel(value, ioaddr + MTL_OPERATION_MODE);
196 }
197 
198 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
199 					   u32 weight, u32 queue)
200 {
201 	void __iomem *ioaddr = hw->pcsr;
202 	u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
203 
204 	value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
205 	value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
206 	writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
207 }
208 
209 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
210 {
211 	void __iomem *ioaddr = hw->pcsr;
212 	u32 value;
213 
214 	if (queue < 4)
215 		value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
216 	else
217 		value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
218 
219 	if (queue == 0 || queue == 4) {
220 		value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
221 		value |= MTL_RXQ_DMA_Q04MDMACH(chan);
222 	} else {
223 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
224 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
225 	}
226 
227 	if (queue < 4)
228 		writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
229 	else
230 		writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
231 }
232 
233 static void dwmac4_config_cbs(struct mac_device_info *hw,
234 			      u32 send_slope, u32 idle_slope,
235 			      u32 high_credit, u32 low_credit, u32 queue)
236 {
237 	void __iomem *ioaddr = hw->pcsr;
238 	u32 value;
239 
240 	pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
241 	pr_debug("\tsend_slope: 0x%08x\n", send_slope);
242 	pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
243 	pr_debug("\thigh_credit: 0x%08x\n", high_credit);
244 	pr_debug("\tlow_credit: 0x%08x\n", low_credit);
245 
246 	/* enable AV algorithm */
247 	value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
248 	value |= MTL_ETS_CTRL_AVALG;
249 	value |= MTL_ETS_CTRL_CC;
250 	writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
251 
252 	/* configure send slope */
253 	value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
254 	value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
255 	value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
256 	writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
257 
258 	/* configure idle slope (same register as tx weight) */
259 	dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
260 
261 	/* configure high credit */
262 	value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
263 	value &= ~MTL_HIGH_CRED_HC_MASK;
264 	value |= high_credit & MTL_HIGH_CRED_HC_MASK;
265 	writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
266 
267 	/* configure high credit */
268 	value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
269 	value &= ~MTL_HIGH_CRED_LC_MASK;
270 	value |= low_credit & MTL_HIGH_CRED_LC_MASK;
271 	writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
272 }
273 
274 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
275 {
276 	void __iomem *ioaddr = hw->pcsr;
277 	int i;
278 
279 	for (i = 0; i < GMAC_REG_NUM; i++)
280 		reg_space[i] = readl(ioaddr + i * 4);
281 }
282 
283 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
284 {
285 	void __iomem *ioaddr = hw->pcsr;
286 	u32 value = readl(ioaddr + GMAC_CONFIG);
287 
288 	if (hw->rx_csum)
289 		value |= GMAC_CONFIG_IPC;
290 	else
291 		value &= ~GMAC_CONFIG_IPC;
292 
293 	writel(value, ioaddr + GMAC_CONFIG);
294 
295 	value = readl(ioaddr + GMAC_CONFIG);
296 
297 	return !!(value & GMAC_CONFIG_IPC);
298 }
299 
300 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
301 {
302 	void __iomem *ioaddr = hw->pcsr;
303 	unsigned int pmt = 0;
304 	u32 config;
305 
306 	if (mode & WAKE_MAGIC) {
307 		pr_debug("GMAC: WOL Magic frame\n");
308 		pmt |= power_down | magic_pkt_en;
309 	}
310 	if (mode & WAKE_UCAST) {
311 		pr_debug("GMAC: WOL on global unicast\n");
312 		pmt |= power_down | global_unicast | wake_up_frame_en;
313 	}
314 
315 	if (pmt) {
316 		/* The receiver must be enabled for WOL before powering down */
317 		config = readl(ioaddr + GMAC_CONFIG);
318 		config |= GMAC_CONFIG_RE;
319 		writel(config, ioaddr + GMAC_CONFIG);
320 	}
321 	writel(pmt, ioaddr + GMAC_PMT);
322 }
323 
324 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
325 				 unsigned char *addr, unsigned int reg_n)
326 {
327 	void __iomem *ioaddr = hw->pcsr;
328 
329 	stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
330 				   GMAC_ADDR_LOW(reg_n));
331 }
332 
333 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
334 				 unsigned char *addr, unsigned int reg_n)
335 {
336 	void __iomem *ioaddr = hw->pcsr;
337 
338 	stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
339 				   GMAC_ADDR_LOW(reg_n));
340 }
341 
342 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
343 				bool en_tx_lpi_clockgating)
344 {
345 	void __iomem *ioaddr = hw->pcsr;
346 	u32 value;
347 
348 	/* Enable the link status receive on RGMII, SGMII ore SMII
349 	 * receive path and instruct the transmit to enter in LPI
350 	 * state.
351 	 */
352 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
353 	value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
354 
355 	if (en_tx_lpi_clockgating)
356 		value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
357 
358 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
359 }
360 
361 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
362 {
363 	void __iomem *ioaddr = hw->pcsr;
364 	u32 value;
365 
366 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
367 	value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
368 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
369 }
370 
371 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
372 {
373 	void __iomem *ioaddr = hw->pcsr;
374 	u32 value;
375 
376 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
377 
378 	if (link)
379 		value |= GMAC4_LPI_CTRL_STATUS_PLS;
380 	else
381 		value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
382 
383 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
384 }
385 
386 static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
387 {
388 	void __iomem *ioaddr = hw->pcsr;
389 	int value = et & STMMAC_ET_MAX;
390 	int regval;
391 
392 	/* Program LPI entry timer value into register */
393 	writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
394 
395 	/* Enable/disable LPI entry timer */
396 	regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
397 	regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
398 
399 	if (et)
400 		regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
401 	else
402 		regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
403 
404 	writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
405 }
406 
407 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
408 {
409 	void __iomem *ioaddr = hw->pcsr;
410 	int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
411 
412 	/* Program the timers in the LPI timer control register:
413 	 * LS: minimum time (ms) for which the link
414 	 *  status from PHY should be ok before transmitting
415 	 *  the LPI pattern.
416 	 * TW: minimum time (us) for which the core waits
417 	 *  after it has stopped transmitting the LPI pattern.
418 	 */
419 	writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
420 }
421 
422 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
423 {
424 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
425 	u32 val;
426 
427 	val = readl(ioaddr + GMAC_VLAN_TAG);
428 	val &= ~GMAC_VLAN_TAG_VID;
429 	val |= GMAC_VLAN_TAG_ETV | vid;
430 
431 	writel(val, ioaddr + GMAC_VLAN_TAG);
432 }
433 
434 static int dwmac4_write_vlan_filter(struct net_device *dev,
435 				    struct mac_device_info *hw,
436 				    u8 index, u32 data)
437 {
438 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
439 	int i, timeout = 10;
440 	u32 val;
441 
442 	if (index >= hw->num_vlan)
443 		return -EINVAL;
444 
445 	writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
446 
447 	val = readl(ioaddr + GMAC_VLAN_TAG);
448 	val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
449 		GMAC_VLAN_TAG_CTRL_CT |
450 		GMAC_VLAN_TAG_CTRL_OB);
451 	val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
452 
453 	writel(val, ioaddr + GMAC_VLAN_TAG);
454 
455 	for (i = 0; i < timeout; i++) {
456 		val = readl(ioaddr + GMAC_VLAN_TAG);
457 		if (!(val & GMAC_VLAN_TAG_CTRL_OB))
458 			return 0;
459 		udelay(1);
460 	}
461 
462 	netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
463 
464 	return -EBUSY;
465 }
466 
467 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
468 				      struct mac_device_info *hw,
469 				      __be16 proto, u16 vid)
470 {
471 	int index = -1;
472 	u32 val = 0;
473 	int i, ret;
474 
475 	if (vid > 4095)
476 		return -EINVAL;
477 
478 	if (hw->promisc) {
479 		netdev_err(dev,
480 			   "Adding VLAN in promisc mode not supported\n");
481 		return -EPERM;
482 	}
483 
484 	/* Single Rx VLAN Filter */
485 	if (hw->num_vlan == 1) {
486 		/* For single VLAN filter, VID 0 means VLAN promiscuous */
487 		if (vid == 0) {
488 			netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
489 			return -EPERM;
490 		}
491 
492 		if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
493 			netdev_err(dev, "Only single VLAN ID supported\n");
494 			return -EPERM;
495 		}
496 
497 		hw->vlan_filter[0] = vid;
498 		dwmac4_write_single_vlan(dev, vid);
499 
500 		return 0;
501 	}
502 
503 	/* Extended Rx VLAN Filter Enable */
504 	val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
505 
506 	for (i = 0; i < hw->num_vlan; i++) {
507 		if (hw->vlan_filter[i] == val)
508 			return 0;
509 		else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
510 			index = i;
511 	}
512 
513 	if (index == -1) {
514 		netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
515 			   hw->num_vlan);
516 		return -EPERM;
517 	}
518 
519 	ret = dwmac4_write_vlan_filter(dev, hw, index, val);
520 
521 	if (!ret)
522 		hw->vlan_filter[index] = val;
523 
524 	return ret;
525 }
526 
527 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
528 				      struct mac_device_info *hw,
529 				      __be16 proto, u16 vid)
530 {
531 	int i, ret = 0;
532 
533 	if (hw->promisc) {
534 		netdev_err(dev,
535 			   "Deleting VLAN in promisc mode not supported\n");
536 		return -EPERM;
537 	}
538 
539 	/* Single Rx VLAN Filter */
540 	if (hw->num_vlan == 1) {
541 		if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
542 			hw->vlan_filter[0] = 0;
543 			dwmac4_write_single_vlan(dev, 0);
544 		}
545 		return 0;
546 	}
547 
548 	/* Extended Rx VLAN Filter Enable */
549 	for (i = 0; i < hw->num_vlan; i++) {
550 		if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
551 			ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
552 
553 			if (!ret)
554 				hw->vlan_filter[i] = 0;
555 			else
556 				return ret;
557 		}
558 	}
559 
560 	return ret;
561 }
562 
563 static void dwmac4_vlan_promisc_enable(struct net_device *dev,
564 				       struct mac_device_info *hw)
565 {
566 	void __iomem *ioaddr = hw->pcsr;
567 	u32 value;
568 	u32 hash;
569 	u32 val;
570 	int i;
571 
572 	/* Single Rx VLAN Filter */
573 	if (hw->num_vlan == 1) {
574 		dwmac4_write_single_vlan(dev, 0);
575 		return;
576 	}
577 
578 	/* Extended Rx VLAN Filter Enable */
579 	for (i = 0; i < hw->num_vlan; i++) {
580 		if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
581 			val = hw->vlan_filter[i] & ~GMAC_VLAN_TAG_DATA_VEN;
582 			dwmac4_write_vlan_filter(dev, hw, i, val);
583 		}
584 	}
585 
586 	hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
587 	if (hash & GMAC_VLAN_VLHT) {
588 		value = readl(ioaddr + GMAC_VLAN_TAG);
589 		if (value & GMAC_VLAN_VTHM) {
590 			value &= ~GMAC_VLAN_VTHM;
591 			writel(value, ioaddr + GMAC_VLAN_TAG);
592 		}
593 	}
594 }
595 
596 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
597 					   struct mac_device_info *hw)
598 {
599 	void __iomem *ioaddr = hw->pcsr;
600 	u32 value;
601 	u32 hash;
602 	u32 val;
603 	int i;
604 
605 	/* Single Rx VLAN Filter */
606 	if (hw->num_vlan == 1) {
607 		dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
608 		return;
609 	}
610 
611 	/* Extended Rx VLAN Filter Enable */
612 	for (i = 0; i < hw->num_vlan; i++) {
613 		if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
614 			val = hw->vlan_filter[i];
615 			dwmac4_write_vlan_filter(dev, hw, i, val);
616 		}
617 	}
618 
619 	hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
620 	if (hash & GMAC_VLAN_VLHT) {
621 		value = readl(ioaddr + GMAC_VLAN_TAG);
622 		value |= GMAC_VLAN_VTHM;
623 		writel(value, ioaddr + GMAC_VLAN_TAG);
624 	}
625 }
626 
627 static void dwmac4_set_filter(struct mac_device_info *hw,
628 			      struct net_device *dev)
629 {
630 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
631 	int numhashregs = (hw->multicast_filter_bins >> 5);
632 	int mcbitslog2 = hw->mcast_bits_log2;
633 	unsigned int value;
634 	u32 mc_filter[8];
635 	int i;
636 
637 	memset(mc_filter, 0, sizeof(mc_filter));
638 
639 	value = readl(ioaddr + GMAC_PACKET_FILTER);
640 	value &= ~GMAC_PACKET_FILTER_HMC;
641 	value &= ~GMAC_PACKET_FILTER_HPF;
642 	value &= ~GMAC_PACKET_FILTER_PCF;
643 	value &= ~GMAC_PACKET_FILTER_PM;
644 	value &= ~GMAC_PACKET_FILTER_PR;
645 	value &= ~GMAC_PACKET_FILTER_RA;
646 	if (dev->flags & IFF_PROMISC) {
647 		/* VLAN Tag Filter Fail Packets Queuing */
648 		if (hw->vlan_fail_q_en) {
649 			value = readl(ioaddr + GMAC_RXQ_CTRL4);
650 			value &= ~GMAC_RXQCTRL_VFFQ_MASK;
651 			value |= GMAC_RXQCTRL_VFFQE |
652 				 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
653 			writel(value, ioaddr + GMAC_RXQ_CTRL4);
654 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
655 		} else {
656 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
657 		}
658 
659 	} else if ((dev->flags & IFF_ALLMULTI) ||
660 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
661 		/* Pass all multi */
662 		value |= GMAC_PACKET_FILTER_PM;
663 		/* Set all the bits of the HASH tab */
664 		memset(mc_filter, 0xff, sizeof(mc_filter));
665 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
666 		struct netdev_hw_addr *ha;
667 
668 		/* Hash filter for multicast */
669 		value |= GMAC_PACKET_FILTER_HMC;
670 
671 		netdev_for_each_mc_addr(ha, dev) {
672 			/* The upper n bits of the calculated CRC are used to
673 			 * index the contents of the hash table. The number of
674 			 * bits used depends on the hardware configuration
675 			 * selected at core configuration time.
676 			 */
677 			u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
678 					ETH_ALEN)) >> (32 - mcbitslog2);
679 			/* The most significant bit determines the register to
680 			 * use (H/L) while the other 5 bits determine the bit
681 			 * within the register.
682 			 */
683 			mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
684 		}
685 	}
686 
687 	for (i = 0; i < numhashregs; i++)
688 		writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
689 
690 	value |= GMAC_PACKET_FILTER_HPF;
691 
692 	/* Handle multiple unicast addresses */
693 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
694 		/* Switch to promiscuous mode if more than 128 addrs
695 		 * are required
696 		 */
697 		value |= GMAC_PACKET_FILTER_PR;
698 	} else {
699 		struct netdev_hw_addr *ha;
700 		int reg = 1;
701 
702 		netdev_for_each_uc_addr(ha, dev) {
703 			dwmac4_set_umac_addr(hw, ha->addr, reg);
704 			reg++;
705 		}
706 
707 		while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
708 			writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
709 			writel(0, ioaddr + GMAC_ADDR_LOW(reg));
710 			reg++;
711 		}
712 	}
713 
714 	/* VLAN filtering */
715 	if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
716 		value |= GMAC_PACKET_FILTER_VTFE;
717 
718 	writel(value, ioaddr + GMAC_PACKET_FILTER);
719 
720 	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en) {
721 		if (!hw->promisc) {
722 			hw->promisc = 1;
723 			dwmac4_vlan_promisc_enable(dev, hw);
724 		}
725 	} else {
726 		if (hw->promisc) {
727 			hw->promisc = 0;
728 			dwmac4_restore_hw_vlan_rx_fltr(dev, hw);
729 		}
730 	}
731 }
732 
733 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
734 			     unsigned int fc, unsigned int pause_time,
735 			     u32 tx_cnt)
736 {
737 	void __iomem *ioaddr = hw->pcsr;
738 	unsigned int flow = 0;
739 	u32 queue = 0;
740 
741 	pr_debug("GMAC Flow-Control:\n");
742 	if (fc & FLOW_RX) {
743 		pr_debug("\tReceive Flow-Control ON\n");
744 		flow |= GMAC_RX_FLOW_CTRL_RFE;
745 	}
746 	writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
747 
748 	if (fc & FLOW_TX) {
749 		pr_debug("\tTransmit Flow-Control ON\n");
750 
751 		if (duplex)
752 			pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
753 
754 		for (queue = 0; queue < tx_cnt; queue++) {
755 			flow = GMAC_TX_FLOW_CTRL_TFE;
756 
757 			if (duplex)
758 				flow |=
759 				(pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
760 
761 			writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
762 		}
763 	} else {
764 		for (queue = 0; queue < tx_cnt; queue++)
765 			writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
766 	}
767 }
768 
769 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
770 			    bool loopback)
771 {
772 	dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
773 }
774 
775 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
776 {
777 	dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
778 }
779 
780 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
781 {
782 	dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
783 }
784 
785 /* RGMII or SMII interface */
786 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
787 {
788 	u32 status;
789 
790 	status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
791 	x->irq_rgmii_n++;
792 
793 	/* Check the link status */
794 	if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
795 		int speed_value;
796 
797 		x->pcs_link = 1;
798 
799 		speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
800 			       GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
801 		if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
802 			x->pcs_speed = SPEED_1000;
803 		else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
804 			x->pcs_speed = SPEED_100;
805 		else
806 			x->pcs_speed = SPEED_10;
807 
808 		x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
809 
810 		pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
811 			x->pcs_duplex ? "Full" : "Half");
812 	} else {
813 		x->pcs_link = 0;
814 		pr_info("Link is Down\n");
815 	}
816 }
817 
818 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
819 {
820 	void __iomem *ioaddr = hw->pcsr;
821 	u32 mtl_int_qx_status;
822 	int ret = 0;
823 
824 	mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
825 
826 	/* Check MTL Interrupt */
827 	if (mtl_int_qx_status & MTL_INT_QX(chan)) {
828 		/* read Queue x Interrupt status */
829 		u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
830 
831 		if (status & MTL_RX_OVERFLOW_INT) {
832 			/*  clear Interrupt */
833 			writel(status | MTL_RX_OVERFLOW_INT,
834 			       ioaddr + MTL_CHAN_INT_CTRL(chan));
835 			ret = CORE_IRQ_MTL_RX_OVERFLOW;
836 		}
837 	}
838 
839 	return ret;
840 }
841 
842 static int dwmac4_irq_status(struct mac_device_info *hw,
843 			     struct stmmac_extra_stats *x)
844 {
845 	void __iomem *ioaddr = hw->pcsr;
846 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
847 	u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
848 	int ret = 0;
849 
850 	/* Discard disabled bits */
851 	intr_status &= intr_enable;
852 
853 	/* Not used events (e.g. MMC interrupts) are not handled. */
854 	if ((intr_status & mmc_tx_irq))
855 		x->mmc_tx_irq_n++;
856 	if (unlikely(intr_status & mmc_rx_irq))
857 		x->mmc_rx_irq_n++;
858 	if (unlikely(intr_status & mmc_rx_csum_offload_irq))
859 		x->mmc_rx_csum_offload_irq_n++;
860 	/* Clear the PMT bits 5 and 6 by reading the PMT status reg */
861 	if (unlikely(intr_status & pmt_irq)) {
862 		readl(ioaddr + GMAC_PMT);
863 		x->irq_receive_pmt_irq_n++;
864 	}
865 
866 	/* MAC tx/rx EEE LPI entry/exit interrupts */
867 	if (intr_status & lpi_irq) {
868 		/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
869 		u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
870 
871 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
872 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
873 			x->irq_tx_path_in_lpi_mode_n++;
874 		}
875 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
876 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
877 			x->irq_tx_path_exit_lpi_mode_n++;
878 		}
879 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
880 			x->irq_rx_path_in_lpi_mode_n++;
881 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
882 			x->irq_rx_path_exit_lpi_mode_n++;
883 	}
884 
885 	dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
886 	if (intr_status & PCS_RGSMIIIS_IRQ)
887 		dwmac4_phystatus(ioaddr, x);
888 
889 	return ret;
890 }
891 
892 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
893 			 u32 rx_queues, u32 tx_queues)
894 {
895 	u32 value;
896 	u32 queue;
897 
898 	for (queue = 0; queue < tx_queues; queue++) {
899 		value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
900 
901 		if (value & MTL_DEBUG_TXSTSFSTS)
902 			x->mtl_tx_status_fifo_full++;
903 		if (value & MTL_DEBUG_TXFSTS)
904 			x->mtl_tx_fifo_not_empty++;
905 		if (value & MTL_DEBUG_TWCSTS)
906 			x->mmtl_fifo_ctrl++;
907 		if (value & MTL_DEBUG_TRCSTS_MASK) {
908 			u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
909 				     >> MTL_DEBUG_TRCSTS_SHIFT;
910 			if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
911 				x->mtl_tx_fifo_read_ctrl_write++;
912 			else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
913 				x->mtl_tx_fifo_read_ctrl_wait++;
914 			else if (trcsts == MTL_DEBUG_TRCSTS_READ)
915 				x->mtl_tx_fifo_read_ctrl_read++;
916 			else
917 				x->mtl_tx_fifo_read_ctrl_idle++;
918 		}
919 		if (value & MTL_DEBUG_TXPAUSED)
920 			x->mac_tx_in_pause++;
921 	}
922 
923 	for (queue = 0; queue < rx_queues; queue++) {
924 		value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
925 
926 		if (value & MTL_DEBUG_RXFSTS_MASK) {
927 			u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
928 				     >> MTL_DEBUG_RRCSTS_SHIFT;
929 
930 			if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
931 				x->mtl_rx_fifo_fill_level_full++;
932 			else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
933 				x->mtl_rx_fifo_fill_above_thresh++;
934 			else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
935 				x->mtl_rx_fifo_fill_below_thresh++;
936 			else
937 				x->mtl_rx_fifo_fill_level_empty++;
938 		}
939 		if (value & MTL_DEBUG_RRCSTS_MASK) {
940 			u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
941 				     MTL_DEBUG_RRCSTS_SHIFT;
942 
943 			if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
944 				x->mtl_rx_fifo_read_ctrl_flush++;
945 			else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
946 				x->mtl_rx_fifo_read_ctrl_read_data++;
947 			else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
948 				x->mtl_rx_fifo_read_ctrl_status++;
949 			else
950 				x->mtl_rx_fifo_read_ctrl_idle++;
951 		}
952 		if (value & MTL_DEBUG_RWCSTS)
953 			x->mtl_rx_fifo_ctrl_active++;
954 	}
955 
956 	/* GMAC debug */
957 	value = readl(ioaddr + GMAC_DEBUG);
958 
959 	if (value & GMAC_DEBUG_TFCSTS_MASK) {
960 		u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
961 			      >> GMAC_DEBUG_TFCSTS_SHIFT;
962 
963 		if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
964 			x->mac_tx_frame_ctrl_xfer++;
965 		else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
966 			x->mac_tx_frame_ctrl_pause++;
967 		else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
968 			x->mac_tx_frame_ctrl_wait++;
969 		else
970 			x->mac_tx_frame_ctrl_idle++;
971 	}
972 	if (value & GMAC_DEBUG_TPESTS)
973 		x->mac_gmii_tx_proto_engine++;
974 	if (value & GMAC_DEBUG_RFCFCSTS_MASK)
975 		x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
976 					    >> GMAC_DEBUG_RFCFCSTS_SHIFT;
977 	if (value & GMAC_DEBUG_RPESTS)
978 		x->mac_gmii_rx_proto_engine++;
979 }
980 
981 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
982 {
983 	u32 value = readl(ioaddr + GMAC_CONFIG);
984 
985 	if (enable)
986 		value |= GMAC_CONFIG_LM;
987 	else
988 		value &= ~GMAC_CONFIG_LM;
989 
990 	writel(value, ioaddr + GMAC_CONFIG);
991 }
992 
993 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
994 				    __le16 perfect_match, bool is_double)
995 {
996 	void __iomem *ioaddr = hw->pcsr;
997 	u32 value;
998 
999 	writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
1000 
1001 	value = readl(ioaddr + GMAC_VLAN_TAG);
1002 
1003 	if (hash) {
1004 		value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
1005 		if (is_double) {
1006 			value |= GMAC_VLAN_EDVLP;
1007 			value |= GMAC_VLAN_ESVL;
1008 			value |= GMAC_VLAN_DOVLTC;
1009 		}
1010 
1011 		writel(value, ioaddr + GMAC_VLAN_TAG);
1012 	} else if (perfect_match) {
1013 		u32 value = GMAC_VLAN_ETV;
1014 
1015 		if (is_double) {
1016 			value |= GMAC_VLAN_EDVLP;
1017 			value |= GMAC_VLAN_ESVL;
1018 			value |= GMAC_VLAN_DOVLTC;
1019 		}
1020 
1021 		writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
1022 	} else {
1023 		value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
1024 		value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
1025 		value &= ~GMAC_VLAN_DOVLTC;
1026 		value &= ~GMAC_VLAN_VID;
1027 
1028 		writel(value, ioaddr + GMAC_VLAN_TAG);
1029 	}
1030 }
1031 
1032 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
1033 {
1034 	u32 value = readl(ioaddr + GMAC_CONFIG);
1035 
1036 	value &= ~GMAC_CONFIG_SARC;
1037 	value |= val << GMAC_CONFIG_SARC_SHIFT;
1038 
1039 	writel(value, ioaddr + GMAC_CONFIG);
1040 }
1041 
1042 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
1043 {
1044 	void __iomem *ioaddr = hw->pcsr;
1045 	u32 value;
1046 
1047 	value = readl(ioaddr + GMAC_VLAN_INCL);
1048 	value |= GMAC_VLAN_VLTI;
1049 	value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
1050 	value &= ~GMAC_VLAN_VLC;
1051 	value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
1052 	writel(value, ioaddr + GMAC_VLAN_INCL);
1053 }
1054 
1055 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
1056 				   u32 addr)
1057 {
1058 	void __iomem *ioaddr = hw->pcsr;
1059 	u32 value;
1060 
1061 	writel(addr, ioaddr + GMAC_ARP_ADDR);
1062 
1063 	value = readl(ioaddr + GMAC_CONFIG);
1064 	if (en)
1065 		value |= GMAC_CONFIG_ARPEN;
1066 	else
1067 		value &= ~GMAC_CONFIG_ARPEN;
1068 	writel(value, ioaddr + GMAC_CONFIG);
1069 }
1070 
1071 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1072 				   bool en, bool ipv6, bool sa, bool inv,
1073 				   u32 match)
1074 {
1075 	void __iomem *ioaddr = hw->pcsr;
1076 	u32 value;
1077 
1078 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1079 	value |= GMAC_PACKET_FILTER_IPFE;
1080 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1081 
1082 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1083 
1084 	/* For IPv6 not both SA/DA filters can be active */
1085 	if (ipv6) {
1086 		value |= GMAC_L3PEN0;
1087 		value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1088 		value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1089 		if (sa) {
1090 			value |= GMAC_L3SAM0;
1091 			if (inv)
1092 				value |= GMAC_L3SAIM0;
1093 		} else {
1094 			value |= GMAC_L3DAM0;
1095 			if (inv)
1096 				value |= GMAC_L3DAIM0;
1097 		}
1098 	} else {
1099 		value &= ~GMAC_L3PEN0;
1100 		if (sa) {
1101 			value |= GMAC_L3SAM0;
1102 			if (inv)
1103 				value |= GMAC_L3SAIM0;
1104 		} else {
1105 			value |= GMAC_L3DAM0;
1106 			if (inv)
1107 				value |= GMAC_L3DAIM0;
1108 		}
1109 	}
1110 
1111 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1112 
1113 	if (sa) {
1114 		writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1115 	} else {
1116 		writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1117 	}
1118 
1119 	if (!en)
1120 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1121 
1122 	return 0;
1123 }
1124 
1125 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1126 				   bool en, bool udp, bool sa, bool inv,
1127 				   u32 match)
1128 {
1129 	void __iomem *ioaddr = hw->pcsr;
1130 	u32 value;
1131 
1132 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1133 	value |= GMAC_PACKET_FILTER_IPFE;
1134 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1135 
1136 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1137 	if (udp) {
1138 		value |= GMAC_L4PEN0;
1139 	} else {
1140 		value &= ~GMAC_L4PEN0;
1141 	}
1142 
1143 	value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1144 	value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1145 	if (sa) {
1146 		value |= GMAC_L4SPM0;
1147 		if (inv)
1148 			value |= GMAC_L4SPIM0;
1149 	} else {
1150 		value |= GMAC_L4DPM0;
1151 		if (inv)
1152 			value |= GMAC_L4DPIM0;
1153 	}
1154 
1155 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1156 
1157 	if (sa) {
1158 		value = match & GMAC_L4SP0;
1159 	} else {
1160 		value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1161 	}
1162 
1163 	writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1164 
1165 	if (!en)
1166 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1167 
1168 	return 0;
1169 }
1170 
1171 const struct stmmac_ops dwmac4_ops = {
1172 	.core_init = dwmac4_core_init,
1173 	.set_mac = stmmac_set_mac,
1174 	.rx_ipc = dwmac4_rx_ipc_enable,
1175 	.rx_queue_enable = dwmac4_rx_queue_enable,
1176 	.rx_queue_prio = dwmac4_rx_queue_priority,
1177 	.tx_queue_prio = dwmac4_tx_queue_priority,
1178 	.rx_queue_routing = dwmac4_rx_queue_routing,
1179 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1180 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1181 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1182 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1183 	.config_cbs = dwmac4_config_cbs,
1184 	.dump_regs = dwmac4_dump_regs,
1185 	.host_irq_status = dwmac4_irq_status,
1186 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1187 	.flow_ctrl = dwmac4_flow_ctrl,
1188 	.pmt = dwmac4_pmt,
1189 	.set_umac_addr = dwmac4_set_umac_addr,
1190 	.get_umac_addr = dwmac4_get_umac_addr,
1191 	.set_eee_mode = dwmac4_set_eee_mode,
1192 	.reset_eee_mode = dwmac4_reset_eee_mode,
1193 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1194 	.set_eee_timer = dwmac4_set_eee_timer,
1195 	.set_eee_pls = dwmac4_set_eee_pls,
1196 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1197 	.pcs_rane = dwmac4_rane,
1198 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1199 	.debug = dwmac4_debug,
1200 	.set_filter = dwmac4_set_filter,
1201 	.set_mac_loopback = dwmac4_set_mac_loopback,
1202 	.update_vlan_hash = dwmac4_update_vlan_hash,
1203 	.sarc_configure = dwmac4_sarc_configure,
1204 	.enable_vlan = dwmac4_enable_vlan,
1205 	.set_arp_offload = dwmac4_set_arp_offload,
1206 	.config_l3_filter = dwmac4_config_l3_filter,
1207 	.config_l4_filter = dwmac4_config_l4_filter,
1208 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1209 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1210 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1211 };
1212 
1213 const struct stmmac_ops dwmac410_ops = {
1214 	.core_init = dwmac4_core_init,
1215 	.set_mac = stmmac_dwmac4_set_mac,
1216 	.rx_ipc = dwmac4_rx_ipc_enable,
1217 	.rx_queue_enable = dwmac4_rx_queue_enable,
1218 	.rx_queue_prio = dwmac4_rx_queue_priority,
1219 	.tx_queue_prio = dwmac4_tx_queue_priority,
1220 	.rx_queue_routing = dwmac4_rx_queue_routing,
1221 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1222 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1223 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1224 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1225 	.config_cbs = dwmac4_config_cbs,
1226 	.dump_regs = dwmac4_dump_regs,
1227 	.host_irq_status = dwmac4_irq_status,
1228 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1229 	.flow_ctrl = dwmac4_flow_ctrl,
1230 	.pmt = dwmac4_pmt,
1231 	.set_umac_addr = dwmac4_set_umac_addr,
1232 	.get_umac_addr = dwmac4_get_umac_addr,
1233 	.set_eee_mode = dwmac4_set_eee_mode,
1234 	.reset_eee_mode = dwmac4_reset_eee_mode,
1235 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1236 	.set_eee_timer = dwmac4_set_eee_timer,
1237 	.set_eee_pls = dwmac4_set_eee_pls,
1238 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1239 	.pcs_rane = dwmac4_rane,
1240 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1241 	.debug = dwmac4_debug,
1242 	.set_filter = dwmac4_set_filter,
1243 	.flex_pps_config = dwmac5_flex_pps_config,
1244 	.set_mac_loopback = dwmac4_set_mac_loopback,
1245 	.update_vlan_hash = dwmac4_update_vlan_hash,
1246 	.sarc_configure = dwmac4_sarc_configure,
1247 	.enable_vlan = dwmac4_enable_vlan,
1248 	.set_arp_offload = dwmac4_set_arp_offload,
1249 	.config_l3_filter = dwmac4_config_l3_filter,
1250 	.config_l4_filter = dwmac4_config_l4_filter,
1251 	.est_configure = dwmac5_est_configure,
1252 	.fpe_configure = dwmac5_fpe_configure,
1253 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1254 	.fpe_irq_status = dwmac5_fpe_irq_status,
1255 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1256 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1257 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1258 };
1259 
1260 const struct stmmac_ops dwmac510_ops = {
1261 	.core_init = dwmac4_core_init,
1262 	.set_mac = stmmac_dwmac4_set_mac,
1263 	.rx_ipc = dwmac4_rx_ipc_enable,
1264 	.rx_queue_enable = dwmac4_rx_queue_enable,
1265 	.rx_queue_prio = dwmac4_rx_queue_priority,
1266 	.tx_queue_prio = dwmac4_tx_queue_priority,
1267 	.rx_queue_routing = dwmac4_rx_queue_routing,
1268 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1269 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1270 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1271 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1272 	.config_cbs = dwmac4_config_cbs,
1273 	.dump_regs = dwmac4_dump_regs,
1274 	.host_irq_status = dwmac4_irq_status,
1275 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1276 	.flow_ctrl = dwmac4_flow_ctrl,
1277 	.pmt = dwmac4_pmt,
1278 	.set_umac_addr = dwmac4_set_umac_addr,
1279 	.get_umac_addr = dwmac4_get_umac_addr,
1280 	.set_eee_mode = dwmac4_set_eee_mode,
1281 	.reset_eee_mode = dwmac4_reset_eee_mode,
1282 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1283 	.set_eee_timer = dwmac4_set_eee_timer,
1284 	.set_eee_pls = dwmac4_set_eee_pls,
1285 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1286 	.pcs_rane = dwmac4_rane,
1287 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1288 	.debug = dwmac4_debug,
1289 	.set_filter = dwmac4_set_filter,
1290 	.safety_feat_config = dwmac5_safety_feat_config,
1291 	.safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1292 	.safety_feat_dump = dwmac5_safety_feat_dump,
1293 	.rxp_config = dwmac5_rxp_config,
1294 	.flex_pps_config = dwmac5_flex_pps_config,
1295 	.set_mac_loopback = dwmac4_set_mac_loopback,
1296 	.update_vlan_hash = dwmac4_update_vlan_hash,
1297 	.sarc_configure = dwmac4_sarc_configure,
1298 	.enable_vlan = dwmac4_enable_vlan,
1299 	.set_arp_offload = dwmac4_set_arp_offload,
1300 	.config_l3_filter = dwmac4_config_l3_filter,
1301 	.config_l4_filter = dwmac4_config_l4_filter,
1302 	.est_configure = dwmac5_est_configure,
1303 	.fpe_configure = dwmac5_fpe_configure,
1304 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1305 	.fpe_irq_status = dwmac5_fpe_irq_status,
1306 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1307 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1308 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1309 };
1310 
1311 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1312 {
1313 	u32 val, num_vlan;
1314 
1315 	val = readl(ioaddr + GMAC_HW_FEATURE3);
1316 	switch (val & GMAC_HW_FEAT_NRVF) {
1317 	case 0:
1318 		num_vlan = 1;
1319 		break;
1320 	case 1:
1321 		num_vlan = 4;
1322 		break;
1323 	case 2:
1324 		num_vlan = 8;
1325 		break;
1326 	case 3:
1327 		num_vlan = 16;
1328 		break;
1329 	case 4:
1330 		num_vlan = 24;
1331 		break;
1332 	case 5:
1333 		num_vlan = 32;
1334 		break;
1335 	default:
1336 		num_vlan = 1;
1337 	}
1338 
1339 	return num_vlan;
1340 }
1341 
1342 int dwmac4_setup(struct stmmac_priv *priv)
1343 {
1344 	struct mac_device_info *mac = priv->hw;
1345 
1346 	dev_info(priv->device, "\tDWMAC4/5\n");
1347 
1348 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1349 	mac->pcsr = priv->ioaddr;
1350 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1351 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1352 	mac->mcast_bits_log2 = 0;
1353 
1354 	if (mac->multicast_filter_bins)
1355 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1356 
1357 	mac->link.duplex = GMAC_CONFIG_DM;
1358 	mac->link.speed10 = GMAC_CONFIG_PS;
1359 	mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1360 	mac->link.speed1000 = 0;
1361 	mac->link.speed2500 = GMAC_CONFIG_FES;
1362 	mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1363 	mac->mii.addr = GMAC_MDIO_ADDR;
1364 	mac->mii.data = GMAC_MDIO_DATA;
1365 	mac->mii.addr_shift = 21;
1366 	mac->mii.addr_mask = GENMASK(25, 21);
1367 	mac->mii.reg_shift = 16;
1368 	mac->mii.reg_mask = GENMASK(20, 16);
1369 	mac->mii.clk_csr_shift = 8;
1370 	mac->mii.clk_csr_mask = GENMASK(11, 8);
1371 	mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1372 
1373 	return 0;
1374 }
1375