1 /*
2  * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
3  * DWC Ether MAC version 4.00  has been used for developing this code.
4  *
5  * This only implements the mac core functions for this chip.
6  *
7  * Copyright (C) 2015  STMicroelectronics Ltd
8  *
9  * This program is free software; you can redistribute it and/or modify it
10  * under the terms and conditions of the GNU General Public License,
11  * version 2, as published by the Free Software Foundation.
12  *
13  * Author: Alexandre Torgue <alexandre.torgue@st.com>
14  */
15 
16 #include <linux/crc32.h>
17 #include <linux/slab.h>
18 #include <linux/ethtool.h>
19 #include <linux/io.h>
20 #include <net/dsa.h>
21 #include "stmmac.h"
22 #include "stmmac_pcs.h"
23 #include "dwmac4.h"
24 #include "dwmac5.h"
25 
26 static void dwmac4_core_init(struct mac_device_info *hw,
27 			     struct net_device *dev)
28 {
29 	void __iomem *ioaddr = hw->pcsr;
30 	u32 value = readl(ioaddr + GMAC_CONFIG);
31 	int mtu = dev->mtu;
32 
33 	value |= GMAC_CORE_INIT;
34 
35 	if (mtu > 1500)
36 		value |= GMAC_CONFIG_2K;
37 	if (mtu > 2000)
38 		value |= GMAC_CONFIG_JE;
39 
40 	if (hw->ps) {
41 		value |= GMAC_CONFIG_TE;
42 
43 		value &= hw->link.speed_mask;
44 		switch (hw->ps) {
45 		case SPEED_1000:
46 			value |= hw->link.speed1000;
47 			break;
48 		case SPEED_100:
49 			value |= hw->link.speed100;
50 			break;
51 		case SPEED_10:
52 			value |= hw->link.speed10;
53 			break;
54 		}
55 	}
56 
57 	writel(value, ioaddr + GMAC_CONFIG);
58 
59 	/* Enable GMAC interrupts */
60 	value = GMAC_INT_DEFAULT_ENABLE;
61 
62 	if (hw->pcs)
63 		value |= GMAC_PCS_IRQ_DEFAULT;
64 
65 	writel(value, ioaddr + GMAC_INT_EN);
66 }
67 
68 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
69 				   u8 mode, u32 queue)
70 {
71 	void __iomem *ioaddr = hw->pcsr;
72 	u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
73 
74 	value &= GMAC_RX_QUEUE_CLEAR(queue);
75 	if (mode == MTL_QUEUE_AVB)
76 		value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
77 	else if (mode == MTL_QUEUE_DCB)
78 		value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
79 
80 	writel(value, ioaddr + GMAC_RXQ_CTRL0);
81 }
82 
83 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
84 				     u32 prio, u32 queue)
85 {
86 	void __iomem *ioaddr = hw->pcsr;
87 	u32 base_register;
88 	u32 value;
89 
90 	base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
91 
92 	value = readl(ioaddr + base_register);
93 
94 	value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
95 	value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
96 						GMAC_RXQCTRL_PSRQX_MASK(queue);
97 	writel(value, ioaddr + base_register);
98 }
99 
100 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
101 				     u32 prio, u32 queue)
102 {
103 	void __iomem *ioaddr = hw->pcsr;
104 	u32 base_register;
105 	u32 value;
106 
107 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
108 
109 	value = readl(ioaddr + base_register);
110 
111 	value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
112 	value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
113 						GMAC_TXQCTRL_PSTQX_MASK(queue);
114 
115 	writel(value, ioaddr + base_register);
116 }
117 
118 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
119 				    u8 packet, u32 queue)
120 {
121 	void __iomem *ioaddr = hw->pcsr;
122 	u32 value;
123 
124 	static const struct stmmac_rx_routing route_possibilities[] = {
125 		{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
126 		{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
127 		{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
128 		{ GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
129 		{ GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
130 	};
131 
132 	value = readl(ioaddr + GMAC_RXQ_CTRL1);
133 
134 	/* routing configuration */
135 	value &= ~route_possibilities[packet - 1].reg_mask;
136 	value |= (queue << route_possibilities[packet-1].reg_shift) &
137 		 route_possibilities[packet - 1].reg_mask;
138 
139 	/* some packets require extra ops */
140 	if (packet == PACKET_AVCPQ) {
141 		value &= ~GMAC_RXQCTRL_TACPQE;
142 		value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
143 	} else if (packet == PACKET_MCBCQ) {
144 		value &= ~GMAC_RXQCTRL_MCBCQEN;
145 		value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
146 	}
147 
148 	writel(value, ioaddr + GMAC_RXQ_CTRL1);
149 }
150 
151 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
152 					  u32 rx_alg)
153 {
154 	void __iomem *ioaddr = hw->pcsr;
155 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
156 
157 	value &= ~MTL_OPERATION_RAA;
158 	switch (rx_alg) {
159 	case MTL_RX_ALGORITHM_SP:
160 		value |= MTL_OPERATION_RAA_SP;
161 		break;
162 	case MTL_RX_ALGORITHM_WSP:
163 		value |= MTL_OPERATION_RAA_WSP;
164 		break;
165 	default:
166 		break;
167 	}
168 
169 	writel(value, ioaddr + MTL_OPERATION_MODE);
170 }
171 
172 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
173 					  u32 tx_alg)
174 {
175 	void __iomem *ioaddr = hw->pcsr;
176 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
177 
178 	value &= ~MTL_OPERATION_SCHALG_MASK;
179 	switch (tx_alg) {
180 	case MTL_TX_ALGORITHM_WRR:
181 		value |= MTL_OPERATION_SCHALG_WRR;
182 		break;
183 	case MTL_TX_ALGORITHM_WFQ:
184 		value |= MTL_OPERATION_SCHALG_WFQ;
185 		break;
186 	case MTL_TX_ALGORITHM_DWRR:
187 		value |= MTL_OPERATION_SCHALG_DWRR;
188 		break;
189 	case MTL_TX_ALGORITHM_SP:
190 		value |= MTL_OPERATION_SCHALG_SP;
191 		break;
192 	default:
193 		break;
194 	}
195 }
196 
197 static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
198 					   u32 weight, u32 queue)
199 {
200 	void __iomem *ioaddr = hw->pcsr;
201 	u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
202 
203 	value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
204 	value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
205 	writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
206 }
207 
208 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
209 {
210 	void __iomem *ioaddr = hw->pcsr;
211 	u32 value;
212 
213 	if (queue < 4)
214 		value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
215 	else
216 		value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
217 
218 	if (queue == 0 || queue == 4) {
219 		value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
220 		value |= MTL_RXQ_DMA_Q04MDMACH(chan);
221 	} else {
222 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
223 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
224 	}
225 
226 	if (queue < 4)
227 		writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
228 	else
229 		writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
230 }
231 
232 static void dwmac4_config_cbs(struct mac_device_info *hw,
233 			      u32 send_slope, u32 idle_slope,
234 			      u32 high_credit, u32 low_credit, u32 queue)
235 {
236 	void __iomem *ioaddr = hw->pcsr;
237 	u32 value;
238 
239 	pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
240 	pr_debug("\tsend_slope: 0x%08x\n", send_slope);
241 	pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
242 	pr_debug("\thigh_credit: 0x%08x\n", high_credit);
243 	pr_debug("\tlow_credit: 0x%08x\n", low_credit);
244 
245 	/* enable AV algorithm */
246 	value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
247 	value |= MTL_ETS_CTRL_AVALG;
248 	value |= MTL_ETS_CTRL_CC;
249 	writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
250 
251 	/* configure send slope */
252 	value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
253 	value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
254 	value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
255 	writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
256 
257 	/* configure idle slope (same register as tx weight) */
258 	dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
259 
260 	/* configure high credit */
261 	value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
262 	value &= ~MTL_HIGH_CRED_HC_MASK;
263 	value |= high_credit & MTL_HIGH_CRED_HC_MASK;
264 	writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
265 
266 	/* configure high credit */
267 	value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
268 	value &= ~MTL_HIGH_CRED_LC_MASK;
269 	value |= low_credit & MTL_HIGH_CRED_LC_MASK;
270 	writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
271 }
272 
273 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
274 {
275 	void __iomem *ioaddr = hw->pcsr;
276 	int i;
277 
278 	for (i = 0; i < GMAC_REG_NUM; i++)
279 		reg_space[i] = readl(ioaddr + i * 4);
280 }
281 
282 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
283 {
284 	void __iomem *ioaddr = hw->pcsr;
285 	u32 value = readl(ioaddr + GMAC_CONFIG);
286 
287 	if (hw->rx_csum)
288 		value |= GMAC_CONFIG_IPC;
289 	else
290 		value &= ~GMAC_CONFIG_IPC;
291 
292 	writel(value, ioaddr + GMAC_CONFIG);
293 
294 	value = readl(ioaddr + GMAC_CONFIG);
295 
296 	return !!(value & GMAC_CONFIG_IPC);
297 }
298 
299 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
300 {
301 	void __iomem *ioaddr = hw->pcsr;
302 	unsigned int pmt = 0;
303 	u32 config;
304 
305 	if (mode & WAKE_MAGIC) {
306 		pr_debug("GMAC: WOL Magic frame\n");
307 		pmt |= power_down | magic_pkt_en;
308 	}
309 	if (mode & WAKE_UCAST) {
310 		pr_debug("GMAC: WOL on global unicast\n");
311 		pmt |= power_down | global_unicast | wake_up_frame_en;
312 	}
313 
314 	if (pmt) {
315 		/* The receiver must be enabled for WOL before powering down */
316 		config = readl(ioaddr + GMAC_CONFIG);
317 		config |= GMAC_CONFIG_RE;
318 		writel(config, ioaddr + GMAC_CONFIG);
319 	}
320 	writel(pmt, ioaddr + GMAC_PMT);
321 }
322 
323 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
324 				 unsigned char *addr, unsigned int reg_n)
325 {
326 	void __iomem *ioaddr = hw->pcsr;
327 
328 	stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
329 				   GMAC_ADDR_LOW(reg_n));
330 }
331 
332 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
333 				 unsigned char *addr, unsigned int reg_n)
334 {
335 	void __iomem *ioaddr = hw->pcsr;
336 
337 	stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
338 				   GMAC_ADDR_LOW(reg_n));
339 }
340 
341 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
342 				bool en_tx_lpi_clockgating)
343 {
344 	void __iomem *ioaddr = hw->pcsr;
345 	u32 value;
346 
347 	/* Enable the link status receive on RGMII, SGMII ore SMII
348 	 * receive path and instruct the transmit to enter in LPI
349 	 * state.
350 	 */
351 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
352 	value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
353 
354 	if (en_tx_lpi_clockgating)
355 		value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
356 
357 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
358 }
359 
360 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
361 {
362 	void __iomem *ioaddr = hw->pcsr;
363 	u32 value;
364 
365 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
366 	value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
367 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
368 }
369 
370 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
371 {
372 	void __iomem *ioaddr = hw->pcsr;
373 	u32 value;
374 
375 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
376 
377 	if (link)
378 		value |= GMAC4_LPI_CTRL_STATUS_PLS;
379 	else
380 		value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
381 
382 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
383 }
384 
385 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
386 {
387 	void __iomem *ioaddr = hw->pcsr;
388 	int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
389 
390 	/* Program the timers in the LPI timer control register:
391 	 * LS: minimum time (ms) for which the link
392 	 *  status from PHY should be ok before transmitting
393 	 *  the LPI pattern.
394 	 * TW: minimum time (us) for which the core waits
395 	 *  after it has stopped transmitting the LPI pattern.
396 	 */
397 	writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
398 }
399 
400 static void dwmac4_set_filter(struct mac_device_info *hw,
401 			      struct net_device *dev)
402 {
403 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
404 	unsigned int value = 0;
405 
406 	if (dev->flags & IFF_PROMISC) {
407 		value = GMAC_PACKET_FILTER_PR;
408 	} else if ((dev->flags & IFF_ALLMULTI) ||
409 			(netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
410 		/* Pass all multi */
411 		value = GMAC_PACKET_FILTER_PM;
412 		/* Set the 64 bits of the HASH tab. To be updated if taller
413 		 * hash table is used
414 		 */
415 		writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31);
416 		writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63);
417 	} else if (!netdev_mc_empty(dev)) {
418 		u32 mc_filter[2];
419 		struct netdev_hw_addr *ha;
420 
421 		/* Hash filter for multicast */
422 		value = GMAC_PACKET_FILTER_HMC;
423 
424 		memset(mc_filter, 0, sizeof(mc_filter));
425 		netdev_for_each_mc_addr(ha, dev) {
426 			/* The upper 6 bits of the calculated CRC are used to
427 			 * index the content of the Hash Table Reg 0 and 1.
428 			 */
429 			int bit_nr =
430 				(bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26);
431 			/* The most significant bit determines the register
432 			 * to use while the other 5 bits determines the bit
433 			 * within the selected register
434 			 */
435 			mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F));
436 		}
437 		writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31);
438 		writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
439 	}
440 
441 	/* Handle multiple unicast addresses */
442 	if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
443 		/* Switch to promiscuous mode if more than 128 addrs
444 		 * are required
445 		 */
446 		value |= GMAC_PACKET_FILTER_PR;
447 	} else if (!netdev_uc_empty(dev)) {
448 		int reg = 1;
449 		struct netdev_hw_addr *ha;
450 
451 		netdev_for_each_uc_addr(ha, dev) {
452 			dwmac4_set_umac_addr(hw, ha->addr, reg);
453 			reg++;
454 		}
455 	}
456 
457 	writel(value, ioaddr + GMAC_PACKET_FILTER);
458 }
459 
460 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
461 			     unsigned int fc, unsigned int pause_time,
462 			     u32 tx_cnt)
463 {
464 	void __iomem *ioaddr = hw->pcsr;
465 	unsigned int flow = 0;
466 	u32 queue = 0;
467 
468 	pr_debug("GMAC Flow-Control:\n");
469 	if (fc & FLOW_RX) {
470 		pr_debug("\tReceive Flow-Control ON\n");
471 		flow |= GMAC_RX_FLOW_CTRL_RFE;
472 		writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
473 	}
474 	if (fc & FLOW_TX) {
475 		pr_debug("\tTransmit Flow-Control ON\n");
476 
477 		if (duplex)
478 			pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
479 
480 		for (queue = 0; queue < tx_cnt; queue++) {
481 			flow |= GMAC_TX_FLOW_CTRL_TFE;
482 
483 			if (duplex)
484 				flow |=
485 				(pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
486 
487 			writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
488 		}
489 	}
490 }
491 
492 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
493 			    bool loopback)
494 {
495 	dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
496 }
497 
498 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
499 {
500 	dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
501 }
502 
503 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
504 {
505 	dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
506 }
507 
508 /* RGMII or SMII interface */
509 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
510 {
511 	u32 status;
512 
513 	status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
514 	x->irq_rgmii_n++;
515 
516 	/* Check the link status */
517 	if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
518 		int speed_value;
519 
520 		x->pcs_link = 1;
521 
522 		speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
523 			       GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
524 		if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
525 			x->pcs_speed = SPEED_1000;
526 		else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
527 			x->pcs_speed = SPEED_100;
528 		else
529 			x->pcs_speed = SPEED_10;
530 
531 		x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
532 
533 		pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
534 			x->pcs_duplex ? "Full" : "Half");
535 	} else {
536 		x->pcs_link = 0;
537 		pr_info("Link is Down\n");
538 	}
539 }
540 
541 static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
542 {
543 	void __iomem *ioaddr = hw->pcsr;
544 	u32 mtl_int_qx_status;
545 	int ret = 0;
546 
547 	mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
548 
549 	/* Check MTL Interrupt */
550 	if (mtl_int_qx_status & MTL_INT_QX(chan)) {
551 		/* read Queue x Interrupt status */
552 		u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
553 
554 		if (status & MTL_RX_OVERFLOW_INT) {
555 			/*  clear Interrupt */
556 			writel(status | MTL_RX_OVERFLOW_INT,
557 			       ioaddr + MTL_CHAN_INT_CTRL(chan));
558 			ret = CORE_IRQ_MTL_RX_OVERFLOW;
559 		}
560 	}
561 
562 	return ret;
563 }
564 
565 static int dwmac4_irq_status(struct mac_device_info *hw,
566 			     struct stmmac_extra_stats *x)
567 {
568 	void __iomem *ioaddr = hw->pcsr;
569 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
570 	u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
571 	int ret = 0;
572 
573 	/* Discard disabled bits */
574 	intr_status &= intr_enable;
575 
576 	/* Not used events (e.g. MMC interrupts) are not handled. */
577 	if ((intr_status & mmc_tx_irq))
578 		x->mmc_tx_irq_n++;
579 	if (unlikely(intr_status & mmc_rx_irq))
580 		x->mmc_rx_irq_n++;
581 	if (unlikely(intr_status & mmc_rx_csum_offload_irq))
582 		x->mmc_rx_csum_offload_irq_n++;
583 	/* Clear the PMT bits 5 and 6 by reading the PMT status reg */
584 	if (unlikely(intr_status & pmt_irq)) {
585 		readl(ioaddr + GMAC_PMT);
586 		x->irq_receive_pmt_irq_n++;
587 	}
588 
589 	/* MAC tx/rx EEE LPI entry/exit interrupts */
590 	if (intr_status & lpi_irq) {
591 		/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
592 		u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
593 
594 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
595 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
596 			x->irq_tx_path_in_lpi_mode_n++;
597 		}
598 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
599 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
600 			x->irq_tx_path_exit_lpi_mode_n++;
601 		}
602 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
603 			x->irq_rx_path_in_lpi_mode_n++;
604 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
605 			x->irq_rx_path_exit_lpi_mode_n++;
606 	}
607 
608 	dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
609 	if (intr_status & PCS_RGSMIIIS_IRQ)
610 		dwmac4_phystatus(ioaddr, x);
611 
612 	return ret;
613 }
614 
615 static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
616 			 u32 rx_queues, u32 tx_queues)
617 {
618 	u32 value;
619 	u32 queue;
620 
621 	for (queue = 0; queue < tx_queues; queue++) {
622 		value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
623 
624 		if (value & MTL_DEBUG_TXSTSFSTS)
625 			x->mtl_tx_status_fifo_full++;
626 		if (value & MTL_DEBUG_TXFSTS)
627 			x->mtl_tx_fifo_not_empty++;
628 		if (value & MTL_DEBUG_TWCSTS)
629 			x->mmtl_fifo_ctrl++;
630 		if (value & MTL_DEBUG_TRCSTS_MASK) {
631 			u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
632 				     >> MTL_DEBUG_TRCSTS_SHIFT;
633 			if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
634 				x->mtl_tx_fifo_read_ctrl_write++;
635 			else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
636 				x->mtl_tx_fifo_read_ctrl_wait++;
637 			else if (trcsts == MTL_DEBUG_TRCSTS_READ)
638 				x->mtl_tx_fifo_read_ctrl_read++;
639 			else
640 				x->mtl_tx_fifo_read_ctrl_idle++;
641 		}
642 		if (value & MTL_DEBUG_TXPAUSED)
643 			x->mac_tx_in_pause++;
644 	}
645 
646 	for (queue = 0; queue < rx_queues; queue++) {
647 		value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
648 
649 		if (value & MTL_DEBUG_RXFSTS_MASK) {
650 			u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
651 				     >> MTL_DEBUG_RRCSTS_SHIFT;
652 
653 			if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
654 				x->mtl_rx_fifo_fill_level_full++;
655 			else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
656 				x->mtl_rx_fifo_fill_above_thresh++;
657 			else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
658 				x->mtl_rx_fifo_fill_below_thresh++;
659 			else
660 				x->mtl_rx_fifo_fill_level_empty++;
661 		}
662 		if (value & MTL_DEBUG_RRCSTS_MASK) {
663 			u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
664 				     MTL_DEBUG_RRCSTS_SHIFT;
665 
666 			if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
667 				x->mtl_rx_fifo_read_ctrl_flush++;
668 			else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
669 				x->mtl_rx_fifo_read_ctrl_read_data++;
670 			else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
671 				x->mtl_rx_fifo_read_ctrl_status++;
672 			else
673 				x->mtl_rx_fifo_read_ctrl_idle++;
674 		}
675 		if (value & MTL_DEBUG_RWCSTS)
676 			x->mtl_rx_fifo_ctrl_active++;
677 	}
678 
679 	/* GMAC debug */
680 	value = readl(ioaddr + GMAC_DEBUG);
681 
682 	if (value & GMAC_DEBUG_TFCSTS_MASK) {
683 		u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
684 			      >> GMAC_DEBUG_TFCSTS_SHIFT;
685 
686 		if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
687 			x->mac_tx_frame_ctrl_xfer++;
688 		else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
689 			x->mac_tx_frame_ctrl_pause++;
690 		else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
691 			x->mac_tx_frame_ctrl_wait++;
692 		else
693 			x->mac_tx_frame_ctrl_idle++;
694 	}
695 	if (value & GMAC_DEBUG_TPESTS)
696 		x->mac_gmii_tx_proto_engine++;
697 	if (value & GMAC_DEBUG_RFCFCSTS_MASK)
698 		x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
699 					    >> GMAC_DEBUG_RFCFCSTS_SHIFT;
700 	if (value & GMAC_DEBUG_RPESTS)
701 		x->mac_gmii_rx_proto_engine++;
702 }
703 
704 const struct stmmac_ops dwmac4_ops = {
705 	.core_init = dwmac4_core_init,
706 	.set_mac = stmmac_set_mac,
707 	.rx_ipc = dwmac4_rx_ipc_enable,
708 	.rx_queue_enable = dwmac4_rx_queue_enable,
709 	.rx_queue_prio = dwmac4_rx_queue_priority,
710 	.tx_queue_prio = dwmac4_tx_queue_priority,
711 	.rx_queue_routing = dwmac4_rx_queue_routing,
712 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
713 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
714 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
715 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
716 	.config_cbs = dwmac4_config_cbs,
717 	.dump_regs = dwmac4_dump_regs,
718 	.host_irq_status = dwmac4_irq_status,
719 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
720 	.flow_ctrl = dwmac4_flow_ctrl,
721 	.pmt = dwmac4_pmt,
722 	.set_umac_addr = dwmac4_set_umac_addr,
723 	.get_umac_addr = dwmac4_get_umac_addr,
724 	.set_eee_mode = dwmac4_set_eee_mode,
725 	.reset_eee_mode = dwmac4_reset_eee_mode,
726 	.set_eee_timer = dwmac4_set_eee_timer,
727 	.set_eee_pls = dwmac4_set_eee_pls,
728 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
729 	.pcs_rane = dwmac4_rane,
730 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
731 	.debug = dwmac4_debug,
732 	.set_filter = dwmac4_set_filter,
733 };
734 
735 const struct stmmac_ops dwmac410_ops = {
736 	.core_init = dwmac4_core_init,
737 	.set_mac = stmmac_dwmac4_set_mac,
738 	.rx_ipc = dwmac4_rx_ipc_enable,
739 	.rx_queue_enable = dwmac4_rx_queue_enable,
740 	.rx_queue_prio = dwmac4_rx_queue_priority,
741 	.tx_queue_prio = dwmac4_tx_queue_priority,
742 	.rx_queue_routing = dwmac4_rx_queue_routing,
743 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
744 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
745 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
746 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
747 	.config_cbs = dwmac4_config_cbs,
748 	.dump_regs = dwmac4_dump_regs,
749 	.host_irq_status = dwmac4_irq_status,
750 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
751 	.flow_ctrl = dwmac4_flow_ctrl,
752 	.pmt = dwmac4_pmt,
753 	.set_umac_addr = dwmac4_set_umac_addr,
754 	.get_umac_addr = dwmac4_get_umac_addr,
755 	.set_eee_mode = dwmac4_set_eee_mode,
756 	.reset_eee_mode = dwmac4_reset_eee_mode,
757 	.set_eee_timer = dwmac4_set_eee_timer,
758 	.set_eee_pls = dwmac4_set_eee_pls,
759 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
760 	.pcs_rane = dwmac4_rane,
761 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
762 	.debug = dwmac4_debug,
763 	.set_filter = dwmac4_set_filter,
764 };
765 
766 const struct stmmac_ops dwmac510_ops = {
767 	.core_init = dwmac4_core_init,
768 	.set_mac = stmmac_dwmac4_set_mac,
769 	.rx_ipc = dwmac4_rx_ipc_enable,
770 	.rx_queue_enable = dwmac4_rx_queue_enable,
771 	.rx_queue_prio = dwmac4_rx_queue_priority,
772 	.tx_queue_prio = dwmac4_tx_queue_priority,
773 	.rx_queue_routing = dwmac4_rx_queue_routing,
774 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
775 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
776 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
777 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
778 	.config_cbs = dwmac4_config_cbs,
779 	.dump_regs = dwmac4_dump_regs,
780 	.host_irq_status = dwmac4_irq_status,
781 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
782 	.flow_ctrl = dwmac4_flow_ctrl,
783 	.pmt = dwmac4_pmt,
784 	.set_umac_addr = dwmac4_set_umac_addr,
785 	.get_umac_addr = dwmac4_get_umac_addr,
786 	.set_eee_mode = dwmac4_set_eee_mode,
787 	.reset_eee_mode = dwmac4_reset_eee_mode,
788 	.set_eee_timer = dwmac4_set_eee_timer,
789 	.set_eee_pls = dwmac4_set_eee_pls,
790 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
791 	.pcs_rane = dwmac4_rane,
792 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
793 	.debug = dwmac4_debug,
794 	.set_filter = dwmac4_set_filter,
795 	.safety_feat_config = dwmac5_safety_feat_config,
796 	.safety_feat_irq_status = dwmac5_safety_feat_irq_status,
797 	.safety_feat_dump = dwmac5_safety_feat_dump,
798 	.rxp_config = dwmac5_rxp_config,
799 	.flex_pps_config = dwmac5_flex_pps_config,
800 };
801 
802 int dwmac4_setup(struct stmmac_priv *priv)
803 {
804 	struct mac_device_info *mac = priv->hw;
805 
806 	dev_info(priv->device, "\tDWMAC4/5\n");
807 
808 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
809 	mac->pcsr = priv->ioaddr;
810 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
811 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
812 	mac->mcast_bits_log2 = 0;
813 
814 	if (mac->multicast_filter_bins)
815 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
816 
817 	mac->link.duplex = GMAC_CONFIG_DM;
818 	mac->link.speed10 = GMAC_CONFIG_PS;
819 	mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
820 	mac->link.speed1000 = 0;
821 	mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
822 	mac->mii.addr = GMAC_MDIO_ADDR;
823 	mac->mii.data = GMAC_MDIO_DATA;
824 	mac->mii.addr_shift = 21;
825 	mac->mii.addr_mask = GENMASK(25, 21);
826 	mac->mii.reg_shift = 16;
827 	mac->mii.reg_mask = GENMASK(20, 16);
828 	mac->mii.clk_csr_shift = 8;
829 	mac->mii.clk_csr_mask = GENMASK(11, 8);
830 
831 	return 0;
832 }
833