1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4  * stmmac XGMAC support.
5  */
6 
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
10 #include "stmmac.h"
11 #include "stmmac_ptp.h"
12 #include "dwxlgmac2.h"
13 #include "dwxgmac2.h"
14 
15 static void dwxgmac2_core_init(struct mac_device_info *hw,
16 			       struct net_device *dev)
17 {
18 	void __iomem *ioaddr = hw->pcsr;
19 	u32 tx, rx;
20 
21 	tx = readl(ioaddr + XGMAC_TX_CONFIG);
22 	rx = readl(ioaddr + XGMAC_RX_CONFIG);
23 
24 	tx |= XGMAC_CORE_INIT_TX;
25 	rx |= XGMAC_CORE_INIT_RX;
26 
27 	if (hw->ps) {
28 		tx |= XGMAC_CONFIG_TE;
29 		tx &= ~hw->link.speed_mask;
30 
31 		switch (hw->ps) {
32 		case SPEED_10000:
33 			tx |= hw->link.xgmii.speed10000;
34 			break;
35 		case SPEED_2500:
36 			tx |= hw->link.speed2500;
37 			break;
38 		case SPEED_1000:
39 		default:
40 			tx |= hw->link.speed1000;
41 			break;
42 		}
43 	}
44 
45 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
46 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
47 	writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
48 }
49 
50 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
51 {
52 	u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
53 	u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
54 
55 	if (enable) {
56 		tx |= XGMAC_CONFIG_TE;
57 		rx |= XGMAC_CONFIG_RE;
58 	} else {
59 		tx &= ~XGMAC_CONFIG_TE;
60 		rx &= ~XGMAC_CONFIG_RE;
61 	}
62 
63 	writel(tx, ioaddr + XGMAC_TX_CONFIG);
64 	writel(rx, ioaddr + XGMAC_RX_CONFIG);
65 }
66 
67 static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
68 {
69 	void __iomem *ioaddr = hw->pcsr;
70 	u32 value;
71 
72 	value = readl(ioaddr + XGMAC_RX_CONFIG);
73 	if (hw->rx_csum)
74 		value |= XGMAC_CONFIG_IPC;
75 	else
76 		value &= ~XGMAC_CONFIG_IPC;
77 	writel(value, ioaddr + XGMAC_RX_CONFIG);
78 
79 	return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
80 }
81 
82 static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
83 				     u32 queue)
84 {
85 	void __iomem *ioaddr = hw->pcsr;
86 	u32 value;
87 
88 	value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
89 	if (mode == MTL_QUEUE_AVB)
90 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
91 	else if (mode == MTL_QUEUE_DCB)
92 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
93 	writel(value, ioaddr + XGMAC_RXQ_CTRL0);
94 }
95 
96 static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
97 				   u32 queue)
98 {
99 	void __iomem *ioaddr = hw->pcsr;
100 	u32 value, reg;
101 
102 	reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
103 	if (queue >= 4)
104 		queue -= 4;
105 
106 	value = readl(ioaddr + reg);
107 	value &= ~XGMAC_PSRQ(queue);
108 	value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
109 
110 	writel(value, ioaddr + reg);
111 }
112 
113 static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
114 				   u32 queue)
115 {
116 	void __iomem *ioaddr = hw->pcsr;
117 	u32 value, reg;
118 
119 	reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
120 	if (queue >= 4)
121 		queue -= 4;
122 
123 	value = readl(ioaddr + reg);
124 	value &= ~XGMAC_PSTC(queue);
125 	value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
126 
127 	writel(value, ioaddr + reg);
128 }
129 
130 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
131 					    u32 rx_alg)
132 {
133 	void __iomem *ioaddr = hw->pcsr;
134 	u32 value;
135 
136 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
137 	value &= ~XGMAC_RAA;
138 
139 	switch (rx_alg) {
140 	case MTL_RX_ALGORITHM_SP:
141 		break;
142 	case MTL_RX_ALGORITHM_WSP:
143 		value |= XGMAC_RAA;
144 		break;
145 	default:
146 		break;
147 	}
148 
149 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
150 }
151 
152 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
153 					    u32 tx_alg)
154 {
155 	void __iomem *ioaddr = hw->pcsr;
156 	bool ets = true;
157 	u32 value;
158 	int i;
159 
160 	value = readl(ioaddr + XGMAC_MTL_OPMODE);
161 	value &= ~XGMAC_ETSALG;
162 
163 	switch (tx_alg) {
164 	case MTL_TX_ALGORITHM_WRR:
165 		value |= XGMAC_WRR;
166 		break;
167 	case MTL_TX_ALGORITHM_WFQ:
168 		value |= XGMAC_WFQ;
169 		break;
170 	case MTL_TX_ALGORITHM_DWRR:
171 		value |= XGMAC_DWRR;
172 		break;
173 	default:
174 		ets = false;
175 		break;
176 	}
177 
178 	writel(value, ioaddr + XGMAC_MTL_OPMODE);
179 
180 	/* Set ETS if desired */
181 	for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
182 		value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
183 		value &= ~XGMAC_TSA;
184 		if (ets)
185 			value |= XGMAC_ETS;
186 		writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
187 	}
188 }
189 
190 static void dwxgmac2_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
191 					     struct mac_device_info *hw,
192 					     u32 weight, u32 queue)
193 {
194 	void __iomem *ioaddr = hw->pcsr;
195 
196 	writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
197 }
198 
199 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
200 				    u32 chan)
201 {
202 	void __iomem *ioaddr = hw->pcsr;
203 	u32 value, reg;
204 
205 	reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
206 	if (queue >= 4)
207 		queue -= 4;
208 
209 	value = readl(ioaddr + reg);
210 	value &= ~XGMAC_QxMDMACH(queue);
211 	value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
212 
213 	writel(value, ioaddr + reg);
214 }
215 
216 static void dwxgmac2_config_cbs(struct stmmac_priv *priv,
217 				struct mac_device_info *hw,
218 				u32 send_slope, u32 idle_slope,
219 				u32 high_credit, u32 low_credit, u32 queue)
220 {
221 	void __iomem *ioaddr = hw->pcsr;
222 	u32 value;
223 
224 	writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
225 	writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
226 	writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
227 	writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
228 
229 	value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
230 	value &= ~XGMAC_TSA;
231 	value |= XGMAC_CC | XGMAC_CBS;
232 	writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
233 }
234 
235 static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
236 {
237 	void __iomem *ioaddr = hw->pcsr;
238 	int i;
239 
240 	for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
241 		reg_space[i] = readl(ioaddr + i * 4);
242 }
243 
244 static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
245 				    struct stmmac_extra_stats *x)
246 {
247 	void __iomem *ioaddr = hw->pcsr;
248 	u32 stat, en;
249 	int ret = 0;
250 
251 	en = readl(ioaddr + XGMAC_INT_EN);
252 	stat = readl(ioaddr + XGMAC_INT_STATUS);
253 
254 	stat &= en;
255 
256 	if (stat & XGMAC_PMTIS) {
257 		x->irq_receive_pmt_irq_n++;
258 		readl(ioaddr + XGMAC_PMT);
259 	}
260 
261 	if (stat & XGMAC_LPIIS) {
262 		u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
263 
264 		if (lpi & XGMAC_TLPIEN) {
265 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
266 			x->irq_tx_path_in_lpi_mode_n++;
267 		}
268 		if (lpi & XGMAC_TLPIEX) {
269 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
270 			x->irq_tx_path_exit_lpi_mode_n++;
271 		}
272 		if (lpi & XGMAC_RLPIEN)
273 			x->irq_rx_path_in_lpi_mode_n++;
274 		if (lpi & XGMAC_RLPIEX)
275 			x->irq_rx_path_exit_lpi_mode_n++;
276 	}
277 
278 	return ret;
279 }
280 
281 static int dwxgmac2_host_mtl_irq_status(struct stmmac_priv *priv,
282 					struct mac_device_info *hw, u32 chan)
283 {
284 	void __iomem *ioaddr = hw->pcsr;
285 	int ret = 0;
286 	u32 status;
287 
288 	status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
289 	if (status & BIT(chan)) {
290 		u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
291 
292 		if (chan_status & XGMAC_RXOVFIS)
293 			ret |= CORE_IRQ_MTL_RX_OVERFLOW;
294 
295 		writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
296 	}
297 
298 	return ret;
299 }
300 
301 static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
302 			       unsigned int fc, unsigned int pause_time,
303 			       u32 tx_cnt)
304 {
305 	void __iomem *ioaddr = hw->pcsr;
306 	u32 i;
307 
308 	if (fc & FLOW_RX)
309 		writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
310 	if (fc & FLOW_TX) {
311 		for (i = 0; i < tx_cnt; i++) {
312 			u32 value = XGMAC_TFE;
313 
314 			if (duplex)
315 				value |= pause_time << XGMAC_PT_SHIFT;
316 
317 			writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
318 		}
319 	}
320 }
321 
322 static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
323 {
324 	void __iomem *ioaddr = hw->pcsr;
325 	u32 val = 0x0;
326 
327 	if (mode & WAKE_MAGIC)
328 		val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
329 	if (mode & WAKE_UCAST)
330 		val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
331 	if (val) {
332 		u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
333 		cfg |= XGMAC_CONFIG_RE;
334 		writel(cfg, ioaddr + XGMAC_RX_CONFIG);
335 	}
336 
337 	writel(val, ioaddr + XGMAC_PMT);
338 }
339 
340 static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
341 				   const unsigned char *addr,
342 				   unsigned int reg_n)
343 {
344 	void __iomem *ioaddr = hw->pcsr;
345 	u32 value;
346 
347 	value = (addr[5] << 8) | addr[4];
348 	writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
349 
350 	value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
351 	writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
352 }
353 
354 static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
355 				   unsigned char *addr, unsigned int reg_n)
356 {
357 	void __iomem *ioaddr = hw->pcsr;
358 	u32 hi_addr, lo_addr;
359 
360 	/* Read the MAC address from the hardware */
361 	hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
362 	lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
363 
364 	/* Extract the MAC address from the high and low words */
365 	addr[0] = lo_addr & 0xff;
366 	addr[1] = (lo_addr >> 8) & 0xff;
367 	addr[2] = (lo_addr >> 16) & 0xff;
368 	addr[3] = (lo_addr >> 24) & 0xff;
369 	addr[4] = hi_addr & 0xff;
370 	addr[5] = (hi_addr >> 8) & 0xff;
371 }
372 
373 static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
374 				  bool en_tx_lpi_clockgating)
375 {
376 	void __iomem *ioaddr = hw->pcsr;
377 	u32 value;
378 
379 	value = readl(ioaddr + XGMAC_LPI_CTRL);
380 
381 	value |= XGMAC_LPITXEN | XGMAC_LPITXA;
382 	if (en_tx_lpi_clockgating)
383 		value |= XGMAC_TXCGE;
384 
385 	writel(value, ioaddr + XGMAC_LPI_CTRL);
386 }
387 
388 static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
389 {
390 	void __iomem *ioaddr = hw->pcsr;
391 	u32 value;
392 
393 	value = readl(ioaddr + XGMAC_LPI_CTRL);
394 	value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
395 	writel(value, ioaddr + XGMAC_LPI_CTRL);
396 }
397 
398 static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
399 {
400 	void __iomem *ioaddr = hw->pcsr;
401 	u32 value;
402 
403 	value = readl(ioaddr + XGMAC_LPI_CTRL);
404 	if (link)
405 		value |= XGMAC_PLS;
406 	else
407 		value &= ~XGMAC_PLS;
408 	writel(value, ioaddr + XGMAC_LPI_CTRL);
409 }
410 
411 static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
412 {
413 	void __iomem *ioaddr = hw->pcsr;
414 	u32 value;
415 
416 	value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
417 	writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
418 }
419 
420 static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
421 				int mcbitslog2)
422 {
423 	int numhashregs, regs;
424 
425 	switch (mcbitslog2) {
426 	case 6:
427 		numhashregs = 2;
428 		break;
429 	case 7:
430 		numhashregs = 4;
431 		break;
432 	case 8:
433 		numhashregs = 8;
434 		break;
435 	default:
436 		return;
437 	}
438 
439 	for (regs = 0; regs < numhashregs; regs++)
440 		writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
441 }
442 
443 static void dwxgmac2_set_filter(struct mac_device_info *hw,
444 				struct net_device *dev)
445 {
446 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
447 	u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
448 	int mcbitslog2 = hw->mcast_bits_log2;
449 	u32 mc_filter[8];
450 	int i;
451 
452 	value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
453 	value |= XGMAC_FILTER_HPF;
454 
455 	memset(mc_filter, 0, sizeof(mc_filter));
456 
457 	if (dev->flags & IFF_PROMISC) {
458 		value |= XGMAC_FILTER_PR;
459 		value |= XGMAC_FILTER_PCF;
460 	} else if ((dev->flags & IFF_ALLMULTI) ||
461 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
462 		value |= XGMAC_FILTER_PM;
463 
464 		for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
465 			writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
466 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
467 		struct netdev_hw_addr *ha;
468 
469 		value |= XGMAC_FILTER_HMC;
470 
471 		netdev_for_each_mc_addr(ha, dev) {
472 			u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
473 					(32 - mcbitslog2));
474 			mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
475 		}
476 	}
477 
478 	dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
479 
480 	/* Handle multiple unicast addresses */
481 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
482 		value |= XGMAC_FILTER_PR;
483 	} else {
484 		struct netdev_hw_addr *ha;
485 		int reg = 1;
486 
487 		netdev_for_each_uc_addr(ha, dev) {
488 			dwxgmac2_set_umac_addr(hw, ha->addr, reg);
489 			reg++;
490 		}
491 
492 		for ( ; reg < XGMAC_ADDR_MAX; reg++) {
493 			writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
494 			writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
495 		}
496 	}
497 
498 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
499 }
500 
501 static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
502 {
503 	u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
504 
505 	if (enable)
506 		value |= XGMAC_CONFIG_LM;
507 	else
508 		value &= ~XGMAC_CONFIG_LM;
509 
510 	writel(value, ioaddr + XGMAC_RX_CONFIG);
511 }
512 
513 static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
514 				  u32 val)
515 {
516 	u32 ctrl = 0;
517 
518 	writel(val, ioaddr + XGMAC_RSS_DATA);
519 	ctrl |= idx << XGMAC_RSSIA_SHIFT;
520 	ctrl |= is_key ? XGMAC_ADDRT : 0x0;
521 	ctrl |= XGMAC_OB;
522 	writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
523 
524 	return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
525 				  !(ctrl & XGMAC_OB), 100, 10000);
526 }
527 
528 static int dwxgmac2_rss_configure(struct mac_device_info *hw,
529 				  struct stmmac_rss *cfg, u32 num_rxq)
530 {
531 	void __iomem *ioaddr = hw->pcsr;
532 	u32 value, *key;
533 	int i, ret;
534 
535 	value = readl(ioaddr + XGMAC_RSS_CTRL);
536 	if (!cfg || !cfg->enable) {
537 		value &= ~XGMAC_RSSE;
538 		writel(value, ioaddr + XGMAC_RSS_CTRL);
539 		return 0;
540 	}
541 
542 	key = (u32 *)cfg->key;
543 	for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
544 		ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
545 		if (ret)
546 			return ret;
547 	}
548 
549 	for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
550 		ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
551 		if (ret)
552 			return ret;
553 	}
554 
555 	for (i = 0; i < num_rxq; i++)
556 		dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
557 
558 	value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
559 	writel(value, ioaddr + XGMAC_RSS_CTRL);
560 	return 0;
561 }
562 
563 static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
564 				      __le16 perfect_match, bool is_double)
565 {
566 	void __iomem *ioaddr = hw->pcsr;
567 
568 	writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
569 
570 	if (hash) {
571 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
572 
573 		value |= XGMAC_FILTER_VTFE;
574 
575 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
576 
577 		value = readl(ioaddr + XGMAC_VLAN_TAG);
578 
579 		value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
580 		if (is_double) {
581 			value |= XGMAC_VLAN_EDVLP;
582 			value |= XGMAC_VLAN_ESVL;
583 			value |= XGMAC_VLAN_DOVLTC;
584 		} else {
585 			value &= ~XGMAC_VLAN_EDVLP;
586 			value &= ~XGMAC_VLAN_ESVL;
587 			value &= ~XGMAC_VLAN_DOVLTC;
588 		}
589 
590 		value &= ~XGMAC_VLAN_VID;
591 		writel(value, ioaddr + XGMAC_VLAN_TAG);
592 	} else if (perfect_match) {
593 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
594 
595 		value |= XGMAC_FILTER_VTFE;
596 
597 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
598 
599 		value = readl(ioaddr + XGMAC_VLAN_TAG);
600 
601 		value &= ~XGMAC_VLAN_VTHM;
602 		value |= XGMAC_VLAN_ETV;
603 		if (is_double) {
604 			value |= XGMAC_VLAN_EDVLP;
605 			value |= XGMAC_VLAN_ESVL;
606 			value |= XGMAC_VLAN_DOVLTC;
607 		} else {
608 			value &= ~XGMAC_VLAN_EDVLP;
609 			value &= ~XGMAC_VLAN_ESVL;
610 			value &= ~XGMAC_VLAN_DOVLTC;
611 		}
612 
613 		value &= ~XGMAC_VLAN_VID;
614 		writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
615 	} else {
616 		u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
617 
618 		value &= ~XGMAC_FILTER_VTFE;
619 
620 		writel(value, ioaddr + XGMAC_PACKET_FILTER);
621 
622 		value = readl(ioaddr + XGMAC_VLAN_TAG);
623 
624 		value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
625 		value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
626 		value &= ~XGMAC_VLAN_DOVLTC;
627 		value &= ~XGMAC_VLAN_VID;
628 
629 		writel(value, ioaddr + XGMAC_VLAN_TAG);
630 	}
631 }
632 
633 struct dwxgmac3_error_desc {
634 	bool valid;
635 	const char *desc;
636 	const char *detailed_desc;
637 };
638 
639 #define STAT_OFF(field)		offsetof(struct stmmac_safety_stats, field)
640 
641 static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
642 			       const char *module_name,
643 			       const struct dwxgmac3_error_desc *desc,
644 			       unsigned long field_offset,
645 			       struct stmmac_safety_stats *stats)
646 {
647 	unsigned long loc, mask;
648 	u8 *bptr = (u8 *)stats;
649 	unsigned long *ptr;
650 
651 	ptr = (unsigned long *)(bptr + field_offset);
652 
653 	mask = value;
654 	for_each_set_bit(loc, &mask, 32) {
655 		netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
656 				"correctable" : "uncorrectable", module_name,
657 				desc[loc].desc, desc[loc].detailed_desc);
658 
659 		/* Update counters */
660 		ptr[loc]++;
661 	}
662 }
663 
664 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
665 	{ true, "ATPES", "Application Transmit Interface Parity Check Error" },
666 	{ true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
667 	{ true, "TPES", "TSO Data Path Parity Check Error" },
668 	{ true, "TSOPES", "TSO Header Data Path Parity Check Error" },
669 	{ true, "MTPES", "MTL Data Path Parity Check Error" },
670 	{ true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
671 	{ true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
672 	{ true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
673 	{ true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
674 	{ true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
675 	{ true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
676 	{ true, "CWPES", "CSR Write Data Path Parity Check Error" },
677 	{ true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
678 	{ true, "TTES", "TX FSM Timeout Error" },
679 	{ true, "RTES", "RX FSM Timeout Error" },
680 	{ true, "CTES", "CSR FSM Timeout Error" },
681 	{ true, "ATES", "APP FSM Timeout Error" },
682 	{ true, "PTES", "PTP FSM Timeout Error" },
683 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
684 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
685 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
686 	{ true, "MSTTES", "Master Read/Write Timeout Error" },
687 	{ true, "SLVTES", "Slave Read/Write Timeout Error" },
688 	{ true, "ATITES", "Application Timeout on ATI Interface Error" },
689 	{ true, "ARITES", "Application Timeout on ARI Interface Error" },
690 	{ true, "FSMPES", "FSM State Parity Error" },
691 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
692 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
693 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
694 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
695 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
696 	{ true, "CPI", "Control Register Parity Check Error" },
697 };
698 
699 static void dwxgmac3_handle_mac_err(struct net_device *ndev,
700 				    void __iomem *ioaddr, bool correctable,
701 				    struct stmmac_safety_stats *stats)
702 {
703 	u32 value;
704 
705 	value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
706 	writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
707 
708 	dwxgmac3_log_error(ndev, value, correctable, "MAC",
709 			   dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
710 }
711 
712 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
713 	{ true, "TXCES", "MTL TX Memory Error" },
714 	{ true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
715 	{ true, "TXUES", "MTL TX Memory Error" },
716 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
717 	{ true, "RXCES", "MTL RX Memory Error" },
718 	{ true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
719 	{ true, "RXUES", "MTL RX Memory Error" },
720 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
721 	{ true, "ECES", "MTL EST Memory Error" },
722 	{ true, "EAMS", "MTL EST Memory Address Mismatch Error" },
723 	{ true, "EUES", "MTL EST Memory Error" },
724 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
725 	{ true, "RPCES", "MTL RX Parser Memory Error" },
726 	{ true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
727 	{ true, "RPUES", "MTL RX Parser Memory Error" },
728 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
729 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
730 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
731 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
732 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
733 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
734 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
735 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
736 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
737 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
738 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
739 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
740 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
741 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
742 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
743 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
744 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
745 };
746 
747 static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
748 				    void __iomem *ioaddr, bool correctable,
749 				    struct stmmac_safety_stats *stats)
750 {
751 	u32 value;
752 
753 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
754 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
755 
756 	dwxgmac3_log_error(ndev, value, correctable, "MTL",
757 			   dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
758 }
759 
760 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
761 	{ true, "TCES", "DMA TSO Memory Error" },
762 	{ true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
763 	{ true, "TUES", "DMA TSO Memory Error" },
764 	{ false, "UNKNOWN", "Unknown Error" }, /* 3 */
765 	{ true, "DCES", "DMA DCACHE Memory Error" },
766 	{ true, "DAMS", "DMA DCACHE Address Mismatch Error" },
767 	{ true, "DUES", "DMA DCACHE Memory Error" },
768 	{ false, "UNKNOWN", "Unknown Error" }, /* 7 */
769 	{ false, "UNKNOWN", "Unknown Error" }, /* 8 */
770 	{ false, "UNKNOWN", "Unknown Error" }, /* 9 */
771 	{ false, "UNKNOWN", "Unknown Error" }, /* 10 */
772 	{ false, "UNKNOWN", "Unknown Error" }, /* 11 */
773 	{ false, "UNKNOWN", "Unknown Error" }, /* 12 */
774 	{ false, "UNKNOWN", "Unknown Error" }, /* 13 */
775 	{ false, "UNKNOWN", "Unknown Error" }, /* 14 */
776 	{ false, "UNKNOWN", "Unknown Error" }, /* 15 */
777 	{ false, "UNKNOWN", "Unknown Error" }, /* 16 */
778 	{ false, "UNKNOWN", "Unknown Error" }, /* 17 */
779 	{ false, "UNKNOWN", "Unknown Error" }, /* 18 */
780 	{ false, "UNKNOWN", "Unknown Error" }, /* 19 */
781 	{ false, "UNKNOWN", "Unknown Error" }, /* 20 */
782 	{ false, "UNKNOWN", "Unknown Error" }, /* 21 */
783 	{ false, "UNKNOWN", "Unknown Error" }, /* 22 */
784 	{ false, "UNKNOWN", "Unknown Error" }, /* 23 */
785 	{ false, "UNKNOWN", "Unknown Error" }, /* 24 */
786 	{ false, "UNKNOWN", "Unknown Error" }, /* 25 */
787 	{ false, "UNKNOWN", "Unknown Error" }, /* 26 */
788 	{ false, "UNKNOWN", "Unknown Error" }, /* 27 */
789 	{ false, "UNKNOWN", "Unknown Error" }, /* 28 */
790 	{ false, "UNKNOWN", "Unknown Error" }, /* 29 */
791 	{ false, "UNKNOWN", "Unknown Error" }, /* 30 */
792 	{ false, "UNKNOWN", "Unknown Error" }, /* 31 */
793 };
794 
795 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
796 				    void __iomem *ioaddr, bool correctable,
797 				    struct stmmac_safety_stats *stats)
798 {
799 	u32 value;
800 
801 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
802 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
803 
804 	dwxgmac3_log_error(ndev, value, correctable, "DMA",
805 			   dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
806 }
807 
808 static int
809 dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
810 			    struct stmmac_safety_feature_cfg *safety_cfg)
811 {
812 	u32 value;
813 
814 	if (!asp)
815 		return -EINVAL;
816 
817 	/* 1. Enable Safety Features */
818 	writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
819 
820 	/* 2. Enable MTL Safety Interrupts */
821 	value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
822 	value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
823 	value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
824 	value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
825 	value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
826 	writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
827 
828 	/* 3. Enable DMA Safety Interrupts */
829 	value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
830 	value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
831 	value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
832 	writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
833 
834 	/* Only ECC Protection for External Memory feature is selected */
835 	if (asp <= 0x1)
836 		return 0;
837 
838 	/* 4. Enable Parity and Timeout for FSM */
839 	value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
840 	value |= XGMAC_PRTYEN; /* FSM Parity Feature */
841 	value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
842 	writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
843 
844 	return 0;
845 }
846 
847 static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
848 					   void __iomem *ioaddr,
849 					   unsigned int asp,
850 					   struct stmmac_safety_stats *stats)
851 {
852 	bool err, corr;
853 	u32 mtl, dma;
854 	int ret = 0;
855 
856 	if (!asp)
857 		return -EINVAL;
858 
859 	mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
860 	dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
861 
862 	err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
863 	corr = false;
864 	if (err) {
865 		dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
866 		ret |= !corr;
867 	}
868 
869 	err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
870 	      (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
871 	corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
872 	if (err) {
873 		dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
874 		ret |= !corr;
875 	}
876 
877 	err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
878 	corr = dma & XGMAC_DECIS;
879 	if (err) {
880 		dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
881 		ret |= !corr;
882 	}
883 
884 	return ret;
885 }
886 
887 static const struct dwxgmac3_error {
888 	const struct dwxgmac3_error_desc *desc;
889 } dwxgmac3_all_errors[] = {
890 	{ dwxgmac3_mac_errors },
891 	{ dwxgmac3_mtl_errors },
892 	{ dwxgmac3_dma_errors },
893 };
894 
895 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
896 				     int index, unsigned long *count,
897 				     const char **desc)
898 {
899 	int module = index / 32, offset = index % 32;
900 	unsigned long *ptr = (unsigned long *)stats;
901 
902 	if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
903 		return -EINVAL;
904 	if (!dwxgmac3_all_errors[module].desc[offset].valid)
905 		return -EINVAL;
906 	if (count)
907 		*count = *(ptr + index);
908 	if (desc)
909 		*desc = dwxgmac3_all_errors[module].desc[offset].desc;
910 	return 0;
911 }
912 
913 static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
914 {
915 	u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
916 
917 	val &= ~XGMAC_FRPE;
918 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
919 
920 	return 0;
921 }
922 
923 static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
924 {
925 	u32 val;
926 
927 	val = readl(ioaddr + XGMAC_MTL_OPMODE);
928 	val |= XGMAC_FRPE;
929 	writel(val, ioaddr + XGMAC_MTL_OPMODE);
930 }
931 
932 static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
933 					    struct stmmac_tc_entry *entry,
934 					    int pos)
935 {
936 	int ret, i;
937 
938 	for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
939 		int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
940 		u32 val;
941 
942 		/* Wait for ready */
943 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
944 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
945 		if (ret)
946 			return ret;
947 
948 		/* Write data */
949 		val = *((u32 *)&entry->val + i);
950 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
951 
952 		/* Write pos */
953 		val = real_pos & XGMAC_ADDR;
954 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
955 
956 		/* Write OP */
957 		val |= XGMAC_WRRDN;
958 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
959 
960 		/* Start Write */
961 		val |= XGMAC_STARTBUSY;
962 		writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
963 
964 		/* Wait for done */
965 		ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
966 					 val, !(val & XGMAC_STARTBUSY), 1, 10000);
967 		if (ret)
968 			return ret;
969 	}
970 
971 	return 0;
972 }
973 
974 static struct stmmac_tc_entry *
975 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
976 			    unsigned int count, u32 curr_prio)
977 {
978 	struct stmmac_tc_entry *entry;
979 	u32 min_prio = ~0x0;
980 	int i, min_prio_idx;
981 	bool found = false;
982 
983 	for (i = count - 1; i >= 0; i--) {
984 		entry = &entries[i];
985 
986 		/* Do not update unused entries */
987 		if (!entry->in_use)
988 			continue;
989 		/* Do not update already updated entries (i.e. fragments) */
990 		if (entry->in_hw)
991 			continue;
992 		/* Let last entry be updated last */
993 		if (entry->is_last)
994 			continue;
995 		/* Do not return fragments */
996 		if (entry->is_frag)
997 			continue;
998 		/* Check if we already checked this prio */
999 		if (entry->prio < curr_prio)
1000 			continue;
1001 		/* Check if this is the minimum prio */
1002 		if (entry->prio < min_prio) {
1003 			min_prio = entry->prio;
1004 			min_prio_idx = i;
1005 			found = true;
1006 		}
1007 	}
1008 
1009 	if (found)
1010 		return &entries[min_prio_idx];
1011 	return NULL;
1012 }
1013 
1014 static int dwxgmac3_rxp_config(void __iomem *ioaddr,
1015 			       struct stmmac_tc_entry *entries,
1016 			       unsigned int count)
1017 {
1018 	struct stmmac_tc_entry *entry, *frag;
1019 	int i, ret, nve = 0;
1020 	u32 curr_prio = 0;
1021 	u32 old_val, val;
1022 
1023 	/* Force disable RX */
1024 	old_val = readl(ioaddr + XGMAC_RX_CONFIG);
1025 	val = old_val & ~XGMAC_CONFIG_RE;
1026 	writel(val, ioaddr + XGMAC_RX_CONFIG);
1027 
1028 	/* Disable RX Parser */
1029 	ret = dwxgmac3_rxp_disable(ioaddr);
1030 	if (ret)
1031 		goto re_enable;
1032 
1033 	/* Set all entries as NOT in HW */
1034 	for (i = 0; i < count; i++) {
1035 		entry = &entries[i];
1036 		entry->in_hw = false;
1037 	}
1038 
1039 	/* Update entries by reverse order */
1040 	while (1) {
1041 		entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1042 		if (!entry)
1043 			break;
1044 
1045 		curr_prio = entry->prio;
1046 		frag = entry->frag_ptr;
1047 
1048 		/* Set special fragment requirements */
1049 		if (frag) {
1050 			entry->val.af = 0;
1051 			entry->val.rf = 0;
1052 			entry->val.nc = 1;
1053 			entry->val.ok_index = nve + 2;
1054 		}
1055 
1056 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1057 		if (ret)
1058 			goto re_enable;
1059 
1060 		entry->table_pos = nve++;
1061 		entry->in_hw = true;
1062 
1063 		if (frag && !frag->in_hw) {
1064 			ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1065 			if (ret)
1066 				goto re_enable;
1067 			frag->table_pos = nve++;
1068 			frag->in_hw = true;
1069 		}
1070 	}
1071 
1072 	if (!nve)
1073 		goto re_enable;
1074 
1075 	/* Update all pass entry */
1076 	for (i = 0; i < count; i++) {
1077 		entry = &entries[i];
1078 		if (!entry->is_last)
1079 			continue;
1080 
1081 		ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1082 		if (ret)
1083 			goto re_enable;
1084 
1085 		entry->table_pos = nve++;
1086 	}
1087 
1088 	/* Assume n. of parsable entries == n. of valid entries */
1089 	val = (nve << 16) & XGMAC_NPE;
1090 	val |= nve & XGMAC_NVE;
1091 	writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1092 
1093 	/* Enable RX Parser */
1094 	dwxgmac3_rxp_enable(ioaddr);
1095 
1096 re_enable:
1097 	/* Re-enable RX */
1098 	writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1099 	return ret;
1100 }
1101 
1102 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1103 {
1104 	void __iomem *ioaddr = hw->pcsr;
1105 	u32 value;
1106 
1107 	if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1108 				      value, value & XGMAC_TXTSC, 100, 10000))
1109 		return -EBUSY;
1110 
1111 	*ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1112 	*ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1113 	return 0;
1114 }
1115 
1116 static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1117 				    struct stmmac_pps_cfg *cfg, bool enable,
1118 				    u32 sub_second_inc, u32 systime_flags)
1119 {
1120 	u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1121 	u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1122 	u64 period;
1123 
1124 	if (!cfg->available)
1125 		return -EINVAL;
1126 	if (tnsec & XGMAC_TRGTBUSY0)
1127 		return -EBUSY;
1128 	if (!sub_second_inc || !systime_flags)
1129 		return -EINVAL;
1130 
1131 	val &= ~XGMAC_PPSx_MASK(index);
1132 
1133 	if (!enable) {
1134 		val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1135 		writel(val, ioaddr + XGMAC_PPS_CONTROL);
1136 		return 0;
1137 	}
1138 
1139 	val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1140 	val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1141 	val |= XGMAC_PPSEN0;
1142 
1143 	writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1144 
1145 	if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1146 		cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1147 	writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1148 
1149 	period = cfg->period.tv_sec * 1000000000;
1150 	period += cfg->period.tv_nsec;
1151 
1152 	do_div(period, sub_second_inc);
1153 
1154 	if (period <= 1)
1155 		return -EINVAL;
1156 
1157 	writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1158 
1159 	period >>= 1;
1160 	if (period <= 1)
1161 		return -EINVAL;
1162 
1163 	writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1164 
1165 	/* Finally, activate it */
1166 	writel(val, ioaddr + XGMAC_PPS_CONTROL);
1167 	return 0;
1168 }
1169 
1170 static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1171 {
1172 	u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1173 
1174 	value &= ~XGMAC_CONFIG_SARC;
1175 	value |= val << XGMAC_CONFIG_SARC_SHIFT;
1176 
1177 	writel(value, ioaddr + XGMAC_TX_CONFIG);
1178 }
1179 
1180 static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1181 {
1182 	void __iomem *ioaddr = hw->pcsr;
1183 	u32 value;
1184 
1185 	value = readl(ioaddr + XGMAC_VLAN_INCL);
1186 	value |= XGMAC_VLAN_VLTI;
1187 	value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
1188 	value &= ~XGMAC_VLAN_VLC;
1189 	value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1190 	writel(value, ioaddr + XGMAC_VLAN_INCL);
1191 }
1192 
1193 static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1194 {
1195 	void __iomem *ioaddr = hw->pcsr;
1196 	u32 value;
1197 
1198 	if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1199 			       !(value & XGMAC_XB), 100, 10000))
1200 		return -EBUSY;
1201 	return 0;
1202 }
1203 
1204 static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1205 				u8 reg, u32 *data)
1206 {
1207 	void __iomem *ioaddr = hw->pcsr;
1208 	u32 value;
1209 	int ret;
1210 
1211 	ret = dwxgmac2_filter_wait(hw);
1212 	if (ret)
1213 		return ret;
1214 
1215 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1216 	value |= XGMAC_TT | XGMAC_XB;
1217 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1218 
1219 	ret = dwxgmac2_filter_wait(hw);
1220 	if (ret)
1221 		return ret;
1222 
1223 	*data = readl(ioaddr + XGMAC_L3L4_DATA);
1224 	return 0;
1225 }
1226 
1227 static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1228 				 u8 reg, u32 data)
1229 {
1230 	void __iomem *ioaddr = hw->pcsr;
1231 	u32 value;
1232 	int ret;
1233 
1234 	ret = dwxgmac2_filter_wait(hw);
1235 	if (ret)
1236 		return ret;
1237 
1238 	writel(data, ioaddr + XGMAC_L3L4_DATA);
1239 
1240 	value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1241 	value |= XGMAC_XB;
1242 	writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1243 
1244 	return dwxgmac2_filter_wait(hw);
1245 }
1246 
1247 static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1248 				     bool en, bool ipv6, bool sa, bool inv,
1249 				     u32 match)
1250 {
1251 	void __iomem *ioaddr = hw->pcsr;
1252 	u32 value;
1253 	int ret;
1254 
1255 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1256 	value |= XGMAC_FILTER_IPFE;
1257 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1258 
1259 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1260 	if (ret)
1261 		return ret;
1262 
1263 	/* For IPv6 not both SA/DA filters can be active */
1264 	if (ipv6) {
1265 		value |= XGMAC_L3PEN0;
1266 		value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1267 		value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1268 		if (sa) {
1269 			value |= XGMAC_L3SAM0;
1270 			if (inv)
1271 				value |= XGMAC_L3SAIM0;
1272 		} else {
1273 			value |= XGMAC_L3DAM0;
1274 			if (inv)
1275 				value |= XGMAC_L3DAIM0;
1276 		}
1277 	} else {
1278 		value &= ~XGMAC_L3PEN0;
1279 		if (sa) {
1280 			value |= XGMAC_L3SAM0;
1281 			if (inv)
1282 				value |= XGMAC_L3SAIM0;
1283 		} else {
1284 			value |= XGMAC_L3DAM0;
1285 			if (inv)
1286 				value |= XGMAC_L3DAIM0;
1287 		}
1288 	}
1289 
1290 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1291 	if (ret)
1292 		return ret;
1293 
1294 	if (sa) {
1295 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1296 		if (ret)
1297 			return ret;
1298 	} else {
1299 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1300 		if (ret)
1301 			return ret;
1302 	}
1303 
1304 	if (!en)
1305 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1306 
1307 	return 0;
1308 }
1309 
1310 static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1311 				     bool en, bool udp, bool sa, bool inv,
1312 				     u32 match)
1313 {
1314 	void __iomem *ioaddr = hw->pcsr;
1315 	u32 value;
1316 	int ret;
1317 
1318 	value = readl(ioaddr + XGMAC_PACKET_FILTER);
1319 	value |= XGMAC_FILTER_IPFE;
1320 	writel(value, ioaddr + XGMAC_PACKET_FILTER);
1321 
1322 	ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1323 	if (ret)
1324 		return ret;
1325 
1326 	if (udp) {
1327 		value |= XGMAC_L4PEN0;
1328 	} else {
1329 		value &= ~XGMAC_L4PEN0;
1330 	}
1331 
1332 	value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1333 	value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1334 	if (sa) {
1335 		value |= XGMAC_L4SPM0;
1336 		if (inv)
1337 			value |= XGMAC_L4SPIM0;
1338 	} else {
1339 		value |= XGMAC_L4DPM0;
1340 		if (inv)
1341 			value |= XGMAC_L4DPIM0;
1342 	}
1343 
1344 	ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1345 	if (ret)
1346 		return ret;
1347 
1348 	if (sa) {
1349 		value = match & XGMAC_L4SP0;
1350 
1351 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1352 		if (ret)
1353 			return ret;
1354 	} else {
1355 		value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1356 
1357 		ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1358 		if (ret)
1359 			return ret;
1360 	}
1361 
1362 	if (!en)
1363 		return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1364 
1365 	return 0;
1366 }
1367 
1368 static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1369 				     u32 addr)
1370 {
1371 	void __iomem *ioaddr = hw->pcsr;
1372 	u32 value;
1373 
1374 	writel(addr, ioaddr + XGMAC_ARP_ADDR);
1375 
1376 	value = readl(ioaddr + XGMAC_RX_CONFIG);
1377 	if (en)
1378 		value |= XGMAC_CONFIG_ARPEN;
1379 	else
1380 		value &= ~XGMAC_CONFIG_ARPEN;
1381 	writel(value, ioaddr + XGMAC_RX_CONFIG);
1382 }
1383 
1384 static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
1385 {
1386 	u32 ctrl;
1387 
1388 	writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
1389 
1390 	ctrl = (reg << XGMAC_ADDR_SHIFT);
1391 	ctrl |= gcl ? 0 : XGMAC_GCRR;
1392 
1393 	writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1394 
1395 	ctrl |= XGMAC_SRWO;
1396 	writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1397 
1398 	return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
1399 					 ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
1400 }
1401 
1402 static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
1403 				  unsigned int ptp_rate)
1404 {
1405 	int i, ret = 0x0;
1406 	u32 ctrl;
1407 
1408 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
1409 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
1410 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
1411 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
1412 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
1413 	ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
1414 	if (ret)
1415 		return ret;
1416 
1417 	for (i = 0; i < cfg->gcl_size; i++) {
1418 		ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
1419 		if (ret)
1420 			return ret;
1421 	}
1422 
1423 	ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
1424 	ctrl &= ~XGMAC_PTOV;
1425 	ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
1426 	if (cfg->enable)
1427 		ctrl |= XGMAC_EEST | XGMAC_SSWL;
1428 	else
1429 		ctrl &= ~XGMAC_EEST;
1430 
1431 	writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
1432 	return 0;
1433 }
1434 
1435 static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
1436 				   u32 num_rxq, bool enable)
1437 {
1438 	u32 value;
1439 
1440 	if (!enable) {
1441 		value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1442 
1443 		value &= ~XGMAC_EFPE;
1444 
1445 		writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1446 		return;
1447 	}
1448 
1449 	value = readl(ioaddr + XGMAC_RXQ_CTRL1);
1450 	value &= ~XGMAC_RQ;
1451 	value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
1452 	writel(value, ioaddr + XGMAC_RXQ_CTRL1);
1453 
1454 	value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1455 	value |= XGMAC_EFPE;
1456 	writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1457 }
1458 
1459 const struct stmmac_ops dwxgmac210_ops = {
1460 	.core_init = dwxgmac2_core_init,
1461 	.set_mac = dwxgmac2_set_mac,
1462 	.rx_ipc = dwxgmac2_rx_ipc,
1463 	.rx_queue_enable = dwxgmac2_rx_queue_enable,
1464 	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1465 	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1466 	.rx_queue_routing = NULL,
1467 	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1468 	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1469 	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1470 	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1471 	.config_cbs = dwxgmac2_config_cbs,
1472 	.dump_regs = dwxgmac2_dump_regs,
1473 	.host_irq_status = dwxgmac2_host_irq_status,
1474 	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1475 	.flow_ctrl = dwxgmac2_flow_ctrl,
1476 	.pmt = dwxgmac2_pmt,
1477 	.set_umac_addr = dwxgmac2_set_umac_addr,
1478 	.get_umac_addr = dwxgmac2_get_umac_addr,
1479 	.set_eee_mode = dwxgmac2_set_eee_mode,
1480 	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1481 	.set_eee_timer = dwxgmac2_set_eee_timer,
1482 	.set_eee_pls = dwxgmac2_set_eee_pls,
1483 	.pcs_ctrl_ane = NULL,
1484 	.pcs_rane = NULL,
1485 	.pcs_get_adv_lp = NULL,
1486 	.debug = NULL,
1487 	.set_filter = dwxgmac2_set_filter,
1488 	.safety_feat_config = dwxgmac3_safety_feat_config,
1489 	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1490 	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1491 	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1492 	.rss_configure = dwxgmac2_rss_configure,
1493 	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1494 	.rxp_config = dwxgmac3_rxp_config,
1495 	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1496 	.flex_pps_config = dwxgmac2_flex_pps_config,
1497 	.sarc_configure = dwxgmac2_sarc_configure,
1498 	.enable_vlan = dwxgmac2_enable_vlan,
1499 	.config_l3_filter = dwxgmac2_config_l3_filter,
1500 	.config_l4_filter = dwxgmac2_config_l4_filter,
1501 	.set_arp_offload = dwxgmac2_set_arp_offload,
1502 	.est_configure = dwxgmac3_est_configure,
1503 	.fpe_configure = dwxgmac3_fpe_configure,
1504 };
1505 
1506 static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
1507 				      u32 queue)
1508 {
1509 	void __iomem *ioaddr = hw->pcsr;
1510 	u32 value;
1511 
1512 	value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
1513 	if (mode == MTL_QUEUE_AVB)
1514 		value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
1515 	else if (mode == MTL_QUEUE_DCB)
1516 		value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
1517 	writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
1518 }
1519 
1520 const struct stmmac_ops dwxlgmac2_ops = {
1521 	.core_init = dwxgmac2_core_init,
1522 	.set_mac = dwxgmac2_set_mac,
1523 	.rx_ipc = dwxgmac2_rx_ipc,
1524 	.rx_queue_enable = dwxlgmac2_rx_queue_enable,
1525 	.rx_queue_prio = dwxgmac2_rx_queue_prio,
1526 	.tx_queue_prio = dwxgmac2_tx_queue_prio,
1527 	.rx_queue_routing = NULL,
1528 	.prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1529 	.prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1530 	.set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1531 	.map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1532 	.config_cbs = dwxgmac2_config_cbs,
1533 	.dump_regs = dwxgmac2_dump_regs,
1534 	.host_irq_status = dwxgmac2_host_irq_status,
1535 	.host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1536 	.flow_ctrl = dwxgmac2_flow_ctrl,
1537 	.pmt = dwxgmac2_pmt,
1538 	.set_umac_addr = dwxgmac2_set_umac_addr,
1539 	.get_umac_addr = dwxgmac2_get_umac_addr,
1540 	.set_eee_mode = dwxgmac2_set_eee_mode,
1541 	.reset_eee_mode = dwxgmac2_reset_eee_mode,
1542 	.set_eee_timer = dwxgmac2_set_eee_timer,
1543 	.set_eee_pls = dwxgmac2_set_eee_pls,
1544 	.pcs_ctrl_ane = NULL,
1545 	.pcs_rane = NULL,
1546 	.pcs_get_adv_lp = NULL,
1547 	.debug = NULL,
1548 	.set_filter = dwxgmac2_set_filter,
1549 	.safety_feat_config = dwxgmac3_safety_feat_config,
1550 	.safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1551 	.safety_feat_dump = dwxgmac3_safety_feat_dump,
1552 	.set_mac_loopback = dwxgmac2_set_mac_loopback,
1553 	.rss_configure = dwxgmac2_rss_configure,
1554 	.update_vlan_hash = dwxgmac2_update_vlan_hash,
1555 	.rxp_config = dwxgmac3_rxp_config,
1556 	.get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1557 	.flex_pps_config = dwxgmac2_flex_pps_config,
1558 	.sarc_configure = dwxgmac2_sarc_configure,
1559 	.enable_vlan = dwxgmac2_enable_vlan,
1560 	.config_l3_filter = dwxgmac2_config_l3_filter,
1561 	.config_l4_filter = dwxgmac2_config_l4_filter,
1562 	.set_arp_offload = dwxgmac2_set_arp_offload,
1563 	.est_configure = dwxgmac3_est_configure,
1564 	.fpe_configure = dwxgmac3_fpe_configure,
1565 };
1566 
1567 int dwxgmac2_setup(struct stmmac_priv *priv)
1568 {
1569 	struct mac_device_info *mac = priv->hw;
1570 
1571 	dev_info(priv->device, "\tXGMAC2\n");
1572 
1573 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1574 	mac->pcsr = priv->ioaddr;
1575 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1576 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1577 	mac->mcast_bits_log2 = 0;
1578 
1579 	if (mac->multicast_filter_bins)
1580 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1581 
1582 	mac->link.duplex = 0;
1583 	mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1584 	mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1585 	mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1586 	mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1587 	mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1588 	mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1589 	mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1590 	mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1591 
1592 	mac->mii.addr = XGMAC_MDIO_ADDR;
1593 	mac->mii.data = XGMAC_MDIO_DATA;
1594 	mac->mii.addr_shift = 16;
1595 	mac->mii.addr_mask = GENMASK(20, 16);
1596 	mac->mii.reg_shift = 0;
1597 	mac->mii.reg_mask = GENMASK(15, 0);
1598 	mac->mii.clk_csr_shift = 19;
1599 	mac->mii.clk_csr_mask = GENMASK(21, 19);
1600 
1601 	return 0;
1602 }
1603 
1604 int dwxlgmac2_setup(struct stmmac_priv *priv)
1605 {
1606 	struct mac_device_info *mac = priv->hw;
1607 
1608 	dev_info(priv->device, "\tXLGMAC\n");
1609 
1610 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1611 	mac->pcsr = priv->ioaddr;
1612 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1613 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1614 	mac->mcast_bits_log2 = 0;
1615 
1616 	if (mac->multicast_filter_bins)
1617 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1618 
1619 	mac->link.duplex = 0;
1620 	mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
1621 	mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
1622 	mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
1623 	mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
1624 	mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
1625 	mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
1626 	mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
1627 	mac->link.speed_mask = XLGMAC_CONFIG_SS;
1628 
1629 	mac->mii.addr = XGMAC_MDIO_ADDR;
1630 	mac->mii.data = XGMAC_MDIO_DATA;
1631 	mac->mii.addr_shift = 16;
1632 	mac->mii.addr_mask = GENMASK(20, 16);
1633 	mac->mii.reg_shift = 0;
1634 	mac->mii.reg_mask = GENMASK(15, 0);
1635 	mac->mii.clk_csr_shift = 19;
1636 	mac->mii.clk_csr_mask = GENMASK(21, 19);
1637 
1638 	return 0;
1639 }
1640