1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
4  * DWC Ether MAC version 4.00  has been used for developing this code.
5  *
6  * This only implements the mac core functions for this chip.
7  *
8  * Copyright (C) 2015  STMicroelectronics Ltd
9  *
10  * Author: Alexandre Torgue <alexandre.torgue@st.com>
11  */
12 
13 #include <linux/crc32.h>
14 #include <linux/slab.h>
15 #include <linux/ethtool.h>
16 #include <linux/io.h>
17 #include "stmmac.h"
18 #include "stmmac_pcs.h"
19 #include "dwmac4.h"
20 #include "dwmac5.h"
21 
dwmac4_core_init(struct mac_device_info * hw,struct net_device * dev)22 static void dwmac4_core_init(struct mac_device_info *hw,
23 			     struct net_device *dev)
24 {
25 	struct stmmac_priv *priv = netdev_priv(dev);
26 	void __iomem *ioaddr = hw->pcsr;
27 	u32 value = readl(ioaddr + GMAC_CONFIG);
28 	u32 clk_rate;
29 
30 	value |= GMAC_CORE_INIT;
31 
32 	if (hw->ps) {
33 		value |= GMAC_CONFIG_TE;
34 
35 		value &= hw->link.speed_mask;
36 		switch (hw->ps) {
37 		case SPEED_1000:
38 			value |= hw->link.speed1000;
39 			break;
40 		case SPEED_100:
41 			value |= hw->link.speed100;
42 			break;
43 		case SPEED_10:
44 			value |= hw->link.speed10;
45 			break;
46 		}
47 	}
48 
49 	writel(value, ioaddr + GMAC_CONFIG);
50 
51 	/* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
52 	clk_rate = clk_get_rate(priv->plat->stmmac_clk);
53 	writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
54 
55 	/* Enable GMAC interrupts */
56 	value = GMAC_INT_DEFAULT_ENABLE;
57 
58 	if (hw->pcs)
59 		value |= GMAC_PCS_IRQ_DEFAULT;
60 
61 	/* Enable FPE interrupt */
62 	if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
63 		value |= GMAC_INT_FPE_EN;
64 
65 	writel(value, ioaddr + GMAC_INT_EN);
66 
67 	if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
68 		init_waitqueue_head(&priv->tstamp_busy_wait);
69 }
70 
dwmac4_phylink_get_caps(struct stmmac_priv * priv)71 static void dwmac4_phylink_get_caps(struct stmmac_priv *priv)
72 {
73 	if (priv->plat->tx_queues_to_use > 1)
74 		priv->hw->link.caps &= ~(MAC_10HD | MAC_100HD | MAC_1000HD);
75 	else
76 		priv->hw->link.caps |= (MAC_10HD | MAC_100HD | MAC_1000HD);
77 }
78 
dwmac4_rx_queue_enable(struct mac_device_info * hw,u8 mode,u32 queue)79 static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
80 				   u8 mode, u32 queue)
81 {
82 	void __iomem *ioaddr = hw->pcsr;
83 	u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
84 
85 	value &= GMAC_RX_QUEUE_CLEAR(queue);
86 	if (mode == MTL_QUEUE_AVB)
87 		value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
88 	else if (mode == MTL_QUEUE_DCB)
89 		value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
90 
91 	writel(value, ioaddr + GMAC_RXQ_CTRL0);
92 }
93 
dwmac4_rx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)94 static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
95 				     u32 prio, u32 queue)
96 {
97 	void __iomem *ioaddr = hw->pcsr;
98 	u32 clear_mask = 0;
99 	u32 ctrl2, ctrl3;
100 	int i;
101 
102 	ctrl2 = readl(ioaddr + GMAC_RXQ_CTRL2);
103 	ctrl3 = readl(ioaddr + GMAC_RXQ_CTRL3);
104 
105 	/* The software must ensure that the same priority
106 	 * is not mapped to multiple Rx queues
107 	 */
108 	for (i = 0; i < 4; i++)
109 		clear_mask |= ((prio << GMAC_RXQCTRL_PSRQX_SHIFT(i)) &
110 						GMAC_RXQCTRL_PSRQX_MASK(i));
111 
112 	ctrl2 &= ~clear_mask;
113 	ctrl3 &= ~clear_mask;
114 
115 	/* First assign new priorities to a queue, then
116 	 * clear them from others queues
117 	 */
118 	if (queue < 4) {
119 		ctrl2 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
120 						GMAC_RXQCTRL_PSRQX_MASK(queue);
121 
122 		writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
123 		writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
124 	} else {
125 		queue -= 4;
126 
127 		ctrl3 |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
128 						GMAC_RXQCTRL_PSRQX_MASK(queue);
129 
130 		writel(ctrl3, ioaddr + GMAC_RXQ_CTRL3);
131 		writel(ctrl2, ioaddr + GMAC_RXQ_CTRL2);
132 	}
133 }
134 
dwmac4_tx_queue_priority(struct mac_device_info * hw,u32 prio,u32 queue)135 static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
136 				     u32 prio, u32 queue)
137 {
138 	void __iomem *ioaddr = hw->pcsr;
139 	u32 base_register;
140 	u32 value;
141 
142 	base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
143 	if (queue >= 4)
144 		queue -= 4;
145 
146 	value = readl(ioaddr + base_register);
147 
148 	value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
149 	value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
150 						GMAC_TXQCTRL_PSTQX_MASK(queue);
151 
152 	writel(value, ioaddr + base_register);
153 }
154 
dwmac4_rx_queue_routing(struct mac_device_info * hw,u8 packet,u32 queue)155 static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
156 				    u8 packet, u32 queue)
157 {
158 	void __iomem *ioaddr = hw->pcsr;
159 	u32 value;
160 
161 	static const struct stmmac_rx_routing route_possibilities[] = {
162 		{ GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
163 		{ GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
164 		{ GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
165 		{ GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
166 		{ GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
167 	};
168 
169 	value = readl(ioaddr + GMAC_RXQ_CTRL1);
170 
171 	/* routing configuration */
172 	value &= ~route_possibilities[packet - 1].reg_mask;
173 	value |= (queue << route_possibilities[packet-1].reg_shift) &
174 		 route_possibilities[packet - 1].reg_mask;
175 
176 	/* some packets require extra ops */
177 	if (packet == PACKET_AVCPQ) {
178 		value &= ~GMAC_RXQCTRL_TACPQE;
179 		value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
180 	} else if (packet == PACKET_MCBCQ) {
181 		value &= ~GMAC_RXQCTRL_MCBCQEN;
182 		value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
183 	}
184 
185 	writel(value, ioaddr + GMAC_RXQ_CTRL1);
186 }
187 
dwmac4_prog_mtl_rx_algorithms(struct mac_device_info * hw,u32 rx_alg)188 static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
189 					  u32 rx_alg)
190 {
191 	void __iomem *ioaddr = hw->pcsr;
192 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
193 
194 	value &= ~MTL_OPERATION_RAA;
195 	switch (rx_alg) {
196 	case MTL_RX_ALGORITHM_SP:
197 		value |= MTL_OPERATION_RAA_SP;
198 		break;
199 	case MTL_RX_ALGORITHM_WSP:
200 		value |= MTL_OPERATION_RAA_WSP;
201 		break;
202 	default:
203 		break;
204 	}
205 
206 	writel(value, ioaddr + MTL_OPERATION_MODE);
207 }
208 
dwmac4_prog_mtl_tx_algorithms(struct mac_device_info * hw,u32 tx_alg)209 static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
210 					  u32 tx_alg)
211 {
212 	void __iomem *ioaddr = hw->pcsr;
213 	u32 value = readl(ioaddr + MTL_OPERATION_MODE);
214 
215 	value &= ~MTL_OPERATION_SCHALG_MASK;
216 	switch (tx_alg) {
217 	case MTL_TX_ALGORITHM_WRR:
218 		value |= MTL_OPERATION_SCHALG_WRR;
219 		break;
220 	case MTL_TX_ALGORITHM_WFQ:
221 		value |= MTL_OPERATION_SCHALG_WFQ;
222 		break;
223 	case MTL_TX_ALGORITHM_DWRR:
224 		value |= MTL_OPERATION_SCHALG_DWRR;
225 		break;
226 	case MTL_TX_ALGORITHM_SP:
227 		value |= MTL_OPERATION_SCHALG_SP;
228 		break;
229 	default:
230 		break;
231 	}
232 
233 	writel(value, ioaddr + MTL_OPERATION_MODE);
234 }
235 
dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv * priv,struct mac_device_info * hw,u32 weight,u32 queue)236 static void dwmac4_set_mtl_tx_queue_weight(struct stmmac_priv *priv,
237 					   struct mac_device_info *hw,
238 					   u32 weight, u32 queue)
239 {
240 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
241 	void __iomem *ioaddr = hw->pcsr;
242 	u32 value = readl(ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs,
243 							     queue));
244 
245 	value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
246 	value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
247 	writel(value, ioaddr + mtl_txqx_weight_base_addr(dwmac4_addrs, queue));
248 }
249 
dwmac4_map_mtl_dma(struct mac_device_info * hw,u32 queue,u32 chan)250 static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
251 {
252 	void __iomem *ioaddr = hw->pcsr;
253 	u32 value;
254 
255 	if (queue < 4) {
256 		value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
257 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
258 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
259 		writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
260 	} else {
261 		value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
262 		value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
263 		value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
264 		writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
265 	}
266 }
267 
dwmac4_config_cbs(struct stmmac_priv * priv,struct mac_device_info * hw,u32 send_slope,u32 idle_slope,u32 high_credit,u32 low_credit,u32 queue)268 static void dwmac4_config_cbs(struct stmmac_priv *priv,
269 			      struct mac_device_info *hw,
270 			      u32 send_slope, u32 idle_slope,
271 			      u32 high_credit, u32 low_credit, u32 queue)
272 {
273 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
274 	void __iomem *ioaddr = hw->pcsr;
275 	u32 value;
276 
277 	pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
278 	pr_debug("\tsend_slope: 0x%08x\n", send_slope);
279 	pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
280 	pr_debug("\thigh_credit: 0x%08x\n", high_credit);
281 	pr_debug("\tlow_credit: 0x%08x\n", low_credit);
282 
283 	/* enable AV algorithm */
284 	value = readl(ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
285 	value |= MTL_ETS_CTRL_AVALG;
286 	value |= MTL_ETS_CTRL_CC;
287 	writel(value, ioaddr + mtl_etsx_ctrl_base_addr(dwmac4_addrs, queue));
288 
289 	/* configure send slope */
290 	value = readl(ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
291 							    queue));
292 	value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
293 	value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
294 	writel(value, ioaddr + mtl_send_slp_credx_base_addr(dwmac4_addrs,
295 							    queue));
296 
297 	/* configure idle slope (same register as tx weight) */
298 	dwmac4_set_mtl_tx_queue_weight(priv, hw, idle_slope, queue);
299 
300 	/* configure high credit */
301 	value = readl(ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
302 	value &= ~MTL_HIGH_CRED_HC_MASK;
303 	value |= high_credit & MTL_HIGH_CRED_HC_MASK;
304 	writel(value, ioaddr + mtl_high_credx_base_addr(dwmac4_addrs, queue));
305 
306 	/* configure high credit */
307 	value = readl(ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
308 	value &= ~MTL_HIGH_CRED_LC_MASK;
309 	value |= low_credit & MTL_HIGH_CRED_LC_MASK;
310 	writel(value, ioaddr + mtl_low_credx_base_addr(dwmac4_addrs, queue));
311 }
312 
dwmac4_dump_regs(struct mac_device_info * hw,u32 * reg_space)313 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
314 {
315 	void __iomem *ioaddr = hw->pcsr;
316 	int i;
317 
318 	for (i = 0; i < GMAC_REG_NUM; i++)
319 		reg_space[i] = readl(ioaddr + i * 4);
320 }
321 
dwmac4_rx_ipc_enable(struct mac_device_info * hw)322 static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
323 {
324 	void __iomem *ioaddr = hw->pcsr;
325 	u32 value = readl(ioaddr + GMAC_CONFIG);
326 
327 	if (hw->rx_csum)
328 		value |= GMAC_CONFIG_IPC;
329 	else
330 		value &= ~GMAC_CONFIG_IPC;
331 
332 	writel(value, ioaddr + GMAC_CONFIG);
333 
334 	value = readl(ioaddr + GMAC_CONFIG);
335 
336 	return !!(value & GMAC_CONFIG_IPC);
337 }
338 
dwmac4_pmt(struct mac_device_info * hw,unsigned long mode)339 static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
340 {
341 	void __iomem *ioaddr = hw->pcsr;
342 	unsigned int pmt = 0;
343 	u32 config;
344 
345 	if (mode & WAKE_MAGIC) {
346 		pr_debug("GMAC: WOL Magic frame\n");
347 		pmt |= power_down | magic_pkt_en;
348 	}
349 	if (mode & WAKE_UCAST) {
350 		pr_debug("GMAC: WOL on global unicast\n");
351 		pmt |= power_down | global_unicast | wake_up_frame_en;
352 	}
353 
354 	if (pmt) {
355 		/* The receiver must be enabled for WOL before powering down */
356 		config = readl(ioaddr + GMAC_CONFIG);
357 		config |= GMAC_CONFIG_RE;
358 		writel(config, ioaddr + GMAC_CONFIG);
359 	}
360 	writel(pmt, ioaddr + GMAC_PMT);
361 }
362 
dwmac4_set_umac_addr(struct mac_device_info * hw,const unsigned char * addr,unsigned int reg_n)363 static void dwmac4_set_umac_addr(struct mac_device_info *hw,
364 				 const unsigned char *addr, unsigned int reg_n)
365 {
366 	void __iomem *ioaddr = hw->pcsr;
367 
368 	stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
369 				   GMAC_ADDR_LOW(reg_n));
370 }
371 
dwmac4_get_umac_addr(struct mac_device_info * hw,unsigned char * addr,unsigned int reg_n)372 static void dwmac4_get_umac_addr(struct mac_device_info *hw,
373 				 unsigned char *addr, unsigned int reg_n)
374 {
375 	void __iomem *ioaddr = hw->pcsr;
376 
377 	stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
378 				   GMAC_ADDR_LOW(reg_n));
379 }
380 
dwmac4_set_eee_mode(struct mac_device_info * hw,bool en_tx_lpi_clockgating)381 static void dwmac4_set_eee_mode(struct mac_device_info *hw,
382 				bool en_tx_lpi_clockgating)
383 {
384 	void __iomem *ioaddr = hw->pcsr;
385 	u32 value;
386 
387 	/* Enable the link status receive on RGMII, SGMII ore SMII
388 	 * receive path and instruct the transmit to enter in LPI
389 	 * state.
390 	 */
391 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
392 	value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
393 
394 	if (en_tx_lpi_clockgating)
395 		value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
396 
397 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
398 }
399 
dwmac4_reset_eee_mode(struct mac_device_info * hw)400 static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
401 {
402 	void __iomem *ioaddr = hw->pcsr;
403 	u32 value;
404 
405 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
406 	value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
407 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
408 }
409 
dwmac4_set_eee_pls(struct mac_device_info * hw,int link)410 static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
411 {
412 	void __iomem *ioaddr = hw->pcsr;
413 	u32 value;
414 
415 	value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
416 
417 	if (link)
418 		value |= GMAC4_LPI_CTRL_STATUS_PLS;
419 	else
420 		value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
421 
422 	writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
423 }
424 
dwmac4_set_eee_lpi_entry_timer(struct mac_device_info * hw,int et)425 static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
426 {
427 	void __iomem *ioaddr = hw->pcsr;
428 	int value = et & STMMAC_ET_MAX;
429 	int regval;
430 
431 	/* Program LPI entry timer value into register */
432 	writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
433 
434 	/* Enable/disable LPI entry timer */
435 	regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
436 	regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
437 
438 	if (et)
439 		regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
440 	else
441 		regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
442 
443 	writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
444 }
445 
dwmac4_set_eee_timer(struct mac_device_info * hw,int ls,int tw)446 static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
447 {
448 	void __iomem *ioaddr = hw->pcsr;
449 	int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
450 
451 	/* Program the timers in the LPI timer control register:
452 	 * LS: minimum time (ms) for which the link
453 	 *  status from PHY should be ok before transmitting
454 	 *  the LPI pattern.
455 	 * TW: minimum time (us) for which the core waits
456 	 *  after it has stopped transmitting the LPI pattern.
457 	 */
458 	writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
459 }
460 
dwmac4_write_single_vlan(struct net_device * dev,u16 vid)461 static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
462 {
463 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
464 	u32 val;
465 
466 	val = readl(ioaddr + GMAC_VLAN_TAG);
467 	val &= ~GMAC_VLAN_TAG_VID;
468 	val |= GMAC_VLAN_TAG_ETV | vid;
469 
470 	writel(val, ioaddr + GMAC_VLAN_TAG);
471 }
472 
dwmac4_write_vlan_filter(struct net_device * dev,struct mac_device_info * hw,u8 index,u32 data)473 static int dwmac4_write_vlan_filter(struct net_device *dev,
474 				    struct mac_device_info *hw,
475 				    u8 index, u32 data)
476 {
477 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
478 	int i, timeout = 10;
479 	u32 val;
480 
481 	if (index >= hw->num_vlan)
482 		return -EINVAL;
483 
484 	writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
485 
486 	val = readl(ioaddr + GMAC_VLAN_TAG);
487 	val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
488 		GMAC_VLAN_TAG_CTRL_CT |
489 		GMAC_VLAN_TAG_CTRL_OB);
490 	val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
491 
492 	writel(val, ioaddr + GMAC_VLAN_TAG);
493 
494 	for (i = 0; i < timeout; i++) {
495 		val = readl(ioaddr + GMAC_VLAN_TAG);
496 		if (!(val & GMAC_VLAN_TAG_CTRL_OB))
497 			return 0;
498 		udelay(1);
499 	}
500 
501 	netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
502 
503 	return -EBUSY;
504 }
505 
dwmac4_add_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)506 static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
507 				      struct mac_device_info *hw,
508 				      __be16 proto, u16 vid)
509 {
510 	int index = -1;
511 	u32 val = 0;
512 	int i, ret;
513 
514 	if (vid > 4095)
515 		return -EINVAL;
516 
517 	/* Single Rx VLAN Filter */
518 	if (hw->num_vlan == 1) {
519 		/* For single VLAN filter, VID 0 means VLAN promiscuous */
520 		if (vid == 0) {
521 			netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
522 			return -EPERM;
523 		}
524 
525 		if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
526 			netdev_err(dev, "Only single VLAN ID supported\n");
527 			return -EPERM;
528 		}
529 
530 		hw->vlan_filter[0] = vid;
531 		dwmac4_write_single_vlan(dev, vid);
532 
533 		return 0;
534 	}
535 
536 	/* Extended Rx VLAN Filter Enable */
537 	val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
538 
539 	for (i = 0; i < hw->num_vlan; i++) {
540 		if (hw->vlan_filter[i] == val)
541 			return 0;
542 		else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
543 			index = i;
544 	}
545 
546 	if (index == -1) {
547 		netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
548 			   hw->num_vlan);
549 		return -EPERM;
550 	}
551 
552 	ret = dwmac4_write_vlan_filter(dev, hw, index, val);
553 
554 	if (!ret)
555 		hw->vlan_filter[index] = val;
556 
557 	return ret;
558 }
559 
dwmac4_del_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw,__be16 proto,u16 vid)560 static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
561 				      struct mac_device_info *hw,
562 				      __be16 proto, u16 vid)
563 {
564 	int i, ret = 0;
565 
566 	/* Single Rx VLAN Filter */
567 	if (hw->num_vlan == 1) {
568 		if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
569 			hw->vlan_filter[0] = 0;
570 			dwmac4_write_single_vlan(dev, 0);
571 		}
572 		return 0;
573 	}
574 
575 	/* Extended Rx VLAN Filter Enable */
576 	for (i = 0; i < hw->num_vlan; i++) {
577 		if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
578 			ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
579 
580 			if (!ret)
581 				hw->vlan_filter[i] = 0;
582 			else
583 				return ret;
584 		}
585 	}
586 
587 	return ret;
588 }
589 
dwmac4_restore_hw_vlan_rx_fltr(struct net_device * dev,struct mac_device_info * hw)590 static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
591 					   struct mac_device_info *hw)
592 {
593 	void __iomem *ioaddr = hw->pcsr;
594 	u32 value;
595 	u32 hash;
596 	u32 val;
597 	int i;
598 
599 	/* Single Rx VLAN Filter */
600 	if (hw->num_vlan == 1) {
601 		dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
602 		return;
603 	}
604 
605 	/* Extended Rx VLAN Filter Enable */
606 	for (i = 0; i < hw->num_vlan; i++) {
607 		if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
608 			val = hw->vlan_filter[i];
609 			dwmac4_write_vlan_filter(dev, hw, i, val);
610 		}
611 	}
612 
613 	hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
614 	if (hash & GMAC_VLAN_VLHT) {
615 		value = readl(ioaddr + GMAC_VLAN_TAG);
616 		value |= GMAC_VLAN_VTHM;
617 		writel(value, ioaddr + GMAC_VLAN_TAG);
618 	}
619 }
620 
dwmac4_set_filter(struct mac_device_info * hw,struct net_device * dev)621 static void dwmac4_set_filter(struct mac_device_info *hw,
622 			      struct net_device *dev)
623 {
624 	void __iomem *ioaddr = (void __iomem *)dev->base_addr;
625 	int numhashregs = (hw->multicast_filter_bins >> 5);
626 	int mcbitslog2 = hw->mcast_bits_log2;
627 	unsigned int value;
628 	u32 mc_filter[8];
629 	int i;
630 
631 	memset(mc_filter, 0, sizeof(mc_filter));
632 
633 	value = readl(ioaddr + GMAC_PACKET_FILTER);
634 	value &= ~GMAC_PACKET_FILTER_HMC;
635 	value &= ~GMAC_PACKET_FILTER_HPF;
636 	value &= ~GMAC_PACKET_FILTER_PCF;
637 	value &= ~GMAC_PACKET_FILTER_PM;
638 	value &= ~GMAC_PACKET_FILTER_PR;
639 	value &= ~GMAC_PACKET_FILTER_RA;
640 	if (dev->flags & IFF_PROMISC) {
641 		/* VLAN Tag Filter Fail Packets Queuing */
642 		if (hw->vlan_fail_q_en) {
643 			value = readl(ioaddr + GMAC_RXQ_CTRL4);
644 			value &= ~GMAC_RXQCTRL_VFFQ_MASK;
645 			value |= GMAC_RXQCTRL_VFFQE |
646 				 (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
647 			writel(value, ioaddr + GMAC_RXQ_CTRL4);
648 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
649 		} else {
650 			value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
651 		}
652 
653 	} else if ((dev->flags & IFF_ALLMULTI) ||
654 		   (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
655 		/* Pass all multi */
656 		value |= GMAC_PACKET_FILTER_PM;
657 		/* Set all the bits of the HASH tab */
658 		memset(mc_filter, 0xff, sizeof(mc_filter));
659 	} else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
660 		struct netdev_hw_addr *ha;
661 
662 		/* Hash filter for multicast */
663 		value |= GMAC_PACKET_FILTER_HMC;
664 
665 		netdev_for_each_mc_addr(ha, dev) {
666 			/* The upper n bits of the calculated CRC are used to
667 			 * index the contents of the hash table. The number of
668 			 * bits used depends on the hardware configuration
669 			 * selected at core configuration time.
670 			 */
671 			u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
672 					ETH_ALEN)) >> (32 - mcbitslog2);
673 			/* The most significant bit determines the register to
674 			 * use (H/L) while the other 5 bits determine the bit
675 			 * within the register.
676 			 */
677 			mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
678 		}
679 	}
680 
681 	for (i = 0; i < numhashregs; i++)
682 		writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
683 
684 	value |= GMAC_PACKET_FILTER_HPF;
685 
686 	/* Handle multiple unicast addresses */
687 	if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
688 		/* Switch to promiscuous mode if more than 128 addrs
689 		 * are required
690 		 */
691 		value |= GMAC_PACKET_FILTER_PR;
692 	} else {
693 		struct netdev_hw_addr *ha;
694 		int reg = 1;
695 
696 		netdev_for_each_uc_addr(ha, dev) {
697 			dwmac4_set_umac_addr(hw, ha->addr, reg);
698 			reg++;
699 		}
700 
701 		while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
702 			writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
703 			writel(0, ioaddr + GMAC_ADDR_LOW(reg));
704 			reg++;
705 		}
706 	}
707 
708 	/* VLAN filtering */
709 	if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
710 		value &= ~GMAC_PACKET_FILTER_VTFE;
711 	else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
712 		value |= GMAC_PACKET_FILTER_VTFE;
713 
714 	writel(value, ioaddr + GMAC_PACKET_FILTER);
715 }
716 
dwmac4_flow_ctrl(struct mac_device_info * hw,unsigned int duplex,unsigned int fc,unsigned int pause_time,u32 tx_cnt)717 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
718 			     unsigned int fc, unsigned int pause_time,
719 			     u32 tx_cnt)
720 {
721 	void __iomem *ioaddr = hw->pcsr;
722 	unsigned int flow = 0;
723 	u32 queue = 0;
724 
725 	pr_debug("GMAC Flow-Control:\n");
726 	if (fc & FLOW_RX) {
727 		pr_debug("\tReceive Flow-Control ON\n");
728 		flow |= GMAC_RX_FLOW_CTRL_RFE;
729 	} else {
730 		pr_debug("\tReceive Flow-Control OFF\n");
731 	}
732 	writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
733 
734 	if (fc & FLOW_TX) {
735 		pr_debug("\tTransmit Flow-Control ON\n");
736 
737 		if (duplex)
738 			pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
739 
740 		for (queue = 0; queue < tx_cnt; queue++) {
741 			flow = GMAC_TX_FLOW_CTRL_TFE;
742 
743 			if (duplex)
744 				flow |=
745 				(pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
746 
747 			writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
748 		}
749 	} else {
750 		for (queue = 0; queue < tx_cnt; queue++)
751 			writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
752 	}
753 }
754 
dwmac4_ctrl_ane(void __iomem * ioaddr,bool ane,bool srgmi_ral,bool loopback)755 static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
756 			    bool loopback)
757 {
758 	dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
759 }
760 
dwmac4_rane(void __iomem * ioaddr,bool restart)761 static void dwmac4_rane(void __iomem *ioaddr, bool restart)
762 {
763 	dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
764 }
765 
dwmac4_get_adv_lp(void __iomem * ioaddr,struct rgmii_adv * adv)766 static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
767 {
768 	dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
769 }
770 
771 /* RGMII or SMII interface */
dwmac4_phystatus(void __iomem * ioaddr,struct stmmac_extra_stats * x)772 static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
773 {
774 	u32 status;
775 
776 	status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
777 	x->irq_rgmii_n++;
778 
779 	/* Check the link status */
780 	if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
781 		int speed_value;
782 
783 		x->pcs_link = 1;
784 
785 		speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
786 			       GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
787 		if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
788 			x->pcs_speed = SPEED_1000;
789 		else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
790 			x->pcs_speed = SPEED_100;
791 		else
792 			x->pcs_speed = SPEED_10;
793 
794 		x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
795 
796 		pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
797 			x->pcs_duplex ? "Full" : "Half");
798 	} else {
799 		x->pcs_link = 0;
800 		pr_info("Link is Down\n");
801 	}
802 }
803 
dwmac4_irq_mtl_status(struct stmmac_priv * priv,struct mac_device_info * hw,u32 chan)804 static int dwmac4_irq_mtl_status(struct stmmac_priv *priv,
805 				 struct mac_device_info *hw, u32 chan)
806 {
807 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
808 	void __iomem *ioaddr = hw->pcsr;
809 	u32 mtl_int_qx_status;
810 	int ret = 0;
811 
812 	mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
813 
814 	/* Check MTL Interrupt */
815 	if (mtl_int_qx_status & MTL_INT_QX(chan)) {
816 		/* read Queue x Interrupt status */
817 		u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs,
818 							      chan));
819 
820 		if (status & MTL_RX_OVERFLOW_INT) {
821 			/*  clear Interrupt */
822 			writel(status | MTL_RX_OVERFLOW_INT,
823 			       ioaddr + MTL_CHAN_INT_CTRL(dwmac4_addrs, chan));
824 			ret = CORE_IRQ_MTL_RX_OVERFLOW;
825 		}
826 	}
827 
828 	return ret;
829 }
830 
dwmac4_irq_status(struct mac_device_info * hw,struct stmmac_extra_stats * x)831 static int dwmac4_irq_status(struct mac_device_info *hw,
832 			     struct stmmac_extra_stats *x)
833 {
834 	void __iomem *ioaddr = hw->pcsr;
835 	u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
836 	u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
837 	int ret = 0;
838 
839 	/* Discard disabled bits */
840 	intr_status &= intr_enable;
841 
842 	/* Not used events (e.g. MMC interrupts) are not handled. */
843 	if ((intr_status & mmc_tx_irq))
844 		x->mmc_tx_irq_n++;
845 	if (unlikely(intr_status & mmc_rx_irq))
846 		x->mmc_rx_irq_n++;
847 	if (unlikely(intr_status & mmc_rx_csum_offload_irq))
848 		x->mmc_rx_csum_offload_irq_n++;
849 	/* Clear the PMT bits 5 and 6 by reading the PMT status reg */
850 	if (unlikely(intr_status & pmt_irq)) {
851 		readl(ioaddr + GMAC_PMT);
852 		x->irq_receive_pmt_irq_n++;
853 	}
854 
855 	/* MAC tx/rx EEE LPI entry/exit interrupts */
856 	if (intr_status & lpi_irq) {
857 		/* Clear LPI interrupt by reading MAC_LPI_Control_Status */
858 		u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
859 
860 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
861 			ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
862 			x->irq_tx_path_in_lpi_mode_n++;
863 		}
864 		if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
865 			ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
866 			x->irq_tx_path_exit_lpi_mode_n++;
867 		}
868 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
869 			x->irq_rx_path_in_lpi_mode_n++;
870 		if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
871 			x->irq_rx_path_exit_lpi_mode_n++;
872 	}
873 
874 	dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
875 	if (intr_status & PCS_RGSMIIIS_IRQ)
876 		dwmac4_phystatus(ioaddr, x);
877 
878 	return ret;
879 }
880 
dwmac4_debug(struct stmmac_priv * priv,void __iomem * ioaddr,struct stmmac_extra_stats * x,u32 rx_queues,u32 tx_queues)881 static void dwmac4_debug(struct stmmac_priv *priv, void __iomem *ioaddr,
882 			 struct stmmac_extra_stats *x,
883 			 u32 rx_queues, u32 tx_queues)
884 {
885 	const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
886 	u32 value;
887 	u32 queue;
888 
889 	for (queue = 0; queue < tx_queues; queue++) {
890 		value = readl(ioaddr + MTL_CHAN_TX_DEBUG(dwmac4_addrs, queue));
891 
892 		if (value & MTL_DEBUG_TXSTSFSTS)
893 			x->mtl_tx_status_fifo_full++;
894 		if (value & MTL_DEBUG_TXFSTS)
895 			x->mtl_tx_fifo_not_empty++;
896 		if (value & MTL_DEBUG_TWCSTS)
897 			x->mmtl_fifo_ctrl++;
898 		if (value & MTL_DEBUG_TRCSTS_MASK) {
899 			u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
900 				     >> MTL_DEBUG_TRCSTS_SHIFT;
901 			if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
902 				x->mtl_tx_fifo_read_ctrl_write++;
903 			else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
904 				x->mtl_tx_fifo_read_ctrl_wait++;
905 			else if (trcsts == MTL_DEBUG_TRCSTS_READ)
906 				x->mtl_tx_fifo_read_ctrl_read++;
907 			else
908 				x->mtl_tx_fifo_read_ctrl_idle++;
909 		}
910 		if (value & MTL_DEBUG_TXPAUSED)
911 			x->mac_tx_in_pause++;
912 	}
913 
914 	for (queue = 0; queue < rx_queues; queue++) {
915 		value = readl(ioaddr + MTL_CHAN_RX_DEBUG(dwmac4_addrs, queue));
916 
917 		if (value & MTL_DEBUG_RXFSTS_MASK) {
918 			u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
919 				     >> MTL_DEBUG_RRCSTS_SHIFT;
920 
921 			if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
922 				x->mtl_rx_fifo_fill_level_full++;
923 			else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
924 				x->mtl_rx_fifo_fill_above_thresh++;
925 			else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
926 				x->mtl_rx_fifo_fill_below_thresh++;
927 			else
928 				x->mtl_rx_fifo_fill_level_empty++;
929 		}
930 		if (value & MTL_DEBUG_RRCSTS_MASK) {
931 			u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
932 				     MTL_DEBUG_RRCSTS_SHIFT;
933 
934 			if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
935 				x->mtl_rx_fifo_read_ctrl_flush++;
936 			else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
937 				x->mtl_rx_fifo_read_ctrl_read_data++;
938 			else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
939 				x->mtl_rx_fifo_read_ctrl_status++;
940 			else
941 				x->mtl_rx_fifo_read_ctrl_idle++;
942 		}
943 		if (value & MTL_DEBUG_RWCSTS)
944 			x->mtl_rx_fifo_ctrl_active++;
945 	}
946 
947 	/* GMAC debug */
948 	value = readl(ioaddr + GMAC_DEBUG);
949 
950 	if (value & GMAC_DEBUG_TFCSTS_MASK) {
951 		u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
952 			      >> GMAC_DEBUG_TFCSTS_SHIFT;
953 
954 		if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
955 			x->mac_tx_frame_ctrl_xfer++;
956 		else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
957 			x->mac_tx_frame_ctrl_pause++;
958 		else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
959 			x->mac_tx_frame_ctrl_wait++;
960 		else
961 			x->mac_tx_frame_ctrl_idle++;
962 	}
963 	if (value & GMAC_DEBUG_TPESTS)
964 		x->mac_gmii_tx_proto_engine++;
965 	if (value & GMAC_DEBUG_RFCFCSTS_MASK)
966 		x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
967 					    >> GMAC_DEBUG_RFCFCSTS_SHIFT;
968 	if (value & GMAC_DEBUG_RPESTS)
969 		x->mac_gmii_rx_proto_engine++;
970 }
971 
dwmac4_set_mac_loopback(void __iomem * ioaddr,bool enable)972 static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
973 {
974 	u32 value = readl(ioaddr + GMAC_CONFIG);
975 
976 	if (enable)
977 		value |= GMAC_CONFIG_LM;
978 	else
979 		value &= ~GMAC_CONFIG_LM;
980 
981 	writel(value, ioaddr + GMAC_CONFIG);
982 }
983 
dwmac4_update_vlan_hash(struct mac_device_info * hw,u32 hash,__le16 perfect_match,bool is_double)984 static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
985 				    __le16 perfect_match, bool is_double)
986 {
987 	void __iomem *ioaddr = hw->pcsr;
988 	u32 value;
989 
990 	writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
991 
992 	value = readl(ioaddr + GMAC_VLAN_TAG);
993 
994 	if (hash) {
995 		value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
996 		if (is_double) {
997 			value |= GMAC_VLAN_EDVLP;
998 			value |= GMAC_VLAN_ESVL;
999 			value |= GMAC_VLAN_DOVLTC;
1000 		}
1001 
1002 		writel(value, ioaddr + GMAC_VLAN_TAG);
1003 	} else if (perfect_match) {
1004 		u32 value = GMAC_VLAN_ETV;
1005 
1006 		if (is_double) {
1007 			value |= GMAC_VLAN_EDVLP;
1008 			value |= GMAC_VLAN_ESVL;
1009 			value |= GMAC_VLAN_DOVLTC;
1010 		}
1011 
1012 		writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
1013 	} else {
1014 		value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
1015 		value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
1016 		value &= ~GMAC_VLAN_DOVLTC;
1017 		value &= ~GMAC_VLAN_VID;
1018 
1019 		writel(value, ioaddr + GMAC_VLAN_TAG);
1020 	}
1021 }
1022 
dwmac4_sarc_configure(void __iomem * ioaddr,int val)1023 static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
1024 {
1025 	u32 value = readl(ioaddr + GMAC_CONFIG);
1026 
1027 	value &= ~GMAC_CONFIG_SARC;
1028 	value |= val << GMAC_CONFIG_SARC_SHIFT;
1029 
1030 	writel(value, ioaddr + GMAC_CONFIG);
1031 }
1032 
dwmac4_enable_vlan(struct mac_device_info * hw,u32 type)1033 static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
1034 {
1035 	void __iomem *ioaddr = hw->pcsr;
1036 	u32 value;
1037 
1038 	value = readl(ioaddr + GMAC_VLAN_INCL);
1039 	value |= GMAC_VLAN_VLTI;
1040 	value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
1041 	value &= ~GMAC_VLAN_VLC;
1042 	value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
1043 	writel(value, ioaddr + GMAC_VLAN_INCL);
1044 }
1045 
dwmac4_set_arp_offload(struct mac_device_info * hw,bool en,u32 addr)1046 static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
1047 				   u32 addr)
1048 {
1049 	void __iomem *ioaddr = hw->pcsr;
1050 	u32 value;
1051 
1052 	writel(addr, ioaddr + GMAC_ARP_ADDR);
1053 
1054 	value = readl(ioaddr + GMAC_CONFIG);
1055 	if (en)
1056 		value |= GMAC_CONFIG_ARPEN;
1057 	else
1058 		value &= ~GMAC_CONFIG_ARPEN;
1059 	writel(value, ioaddr + GMAC_CONFIG);
1060 }
1061 
dwmac4_config_l3_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool ipv6,bool sa,bool inv,u32 match)1062 static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1063 				   bool en, bool ipv6, bool sa, bool inv,
1064 				   u32 match)
1065 {
1066 	void __iomem *ioaddr = hw->pcsr;
1067 	u32 value;
1068 
1069 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1070 	value |= GMAC_PACKET_FILTER_IPFE;
1071 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1072 
1073 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1074 
1075 	/* For IPv6 not both SA/DA filters can be active */
1076 	if (ipv6) {
1077 		value |= GMAC_L3PEN0;
1078 		value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
1079 		value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
1080 		if (sa) {
1081 			value |= GMAC_L3SAM0;
1082 			if (inv)
1083 				value |= GMAC_L3SAIM0;
1084 		} else {
1085 			value |= GMAC_L3DAM0;
1086 			if (inv)
1087 				value |= GMAC_L3DAIM0;
1088 		}
1089 	} else {
1090 		value &= ~GMAC_L3PEN0;
1091 		if (sa) {
1092 			value |= GMAC_L3SAM0;
1093 			if (inv)
1094 				value |= GMAC_L3SAIM0;
1095 		} else {
1096 			value |= GMAC_L3DAM0;
1097 			if (inv)
1098 				value |= GMAC_L3DAIM0;
1099 		}
1100 	}
1101 
1102 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1103 
1104 	if (sa) {
1105 		writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
1106 	} else {
1107 		writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
1108 	}
1109 
1110 	if (!en)
1111 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1112 
1113 	return 0;
1114 }
1115 
dwmac4_config_l4_filter(struct mac_device_info * hw,u32 filter_no,bool en,bool udp,bool sa,bool inv,u32 match)1116 static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1117 				   bool en, bool udp, bool sa, bool inv,
1118 				   u32 match)
1119 {
1120 	void __iomem *ioaddr = hw->pcsr;
1121 	u32 value;
1122 
1123 	value = readl(ioaddr + GMAC_PACKET_FILTER);
1124 	value |= GMAC_PACKET_FILTER_IPFE;
1125 	writel(value, ioaddr + GMAC_PACKET_FILTER);
1126 
1127 	value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
1128 	if (udp) {
1129 		value |= GMAC_L4PEN0;
1130 	} else {
1131 		value &= ~GMAC_L4PEN0;
1132 	}
1133 
1134 	value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
1135 	value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
1136 	if (sa) {
1137 		value |= GMAC_L4SPM0;
1138 		if (inv)
1139 			value |= GMAC_L4SPIM0;
1140 	} else {
1141 		value |= GMAC_L4DPM0;
1142 		if (inv)
1143 			value |= GMAC_L4DPIM0;
1144 	}
1145 
1146 	writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
1147 
1148 	if (sa) {
1149 		value = match & GMAC_L4SP0;
1150 	} else {
1151 		value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
1152 	}
1153 
1154 	writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
1155 
1156 	if (!en)
1157 		writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
1158 
1159 	return 0;
1160 }
1161 
1162 const struct stmmac_ops dwmac4_ops = {
1163 	.core_init = dwmac4_core_init,
1164 	.phylink_get_caps = dwmac4_phylink_get_caps,
1165 	.set_mac = stmmac_set_mac,
1166 	.rx_ipc = dwmac4_rx_ipc_enable,
1167 	.rx_queue_enable = dwmac4_rx_queue_enable,
1168 	.rx_queue_prio = dwmac4_rx_queue_priority,
1169 	.tx_queue_prio = dwmac4_tx_queue_priority,
1170 	.rx_queue_routing = dwmac4_rx_queue_routing,
1171 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1172 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1173 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1174 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1175 	.config_cbs = dwmac4_config_cbs,
1176 	.dump_regs = dwmac4_dump_regs,
1177 	.host_irq_status = dwmac4_irq_status,
1178 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1179 	.flow_ctrl = dwmac4_flow_ctrl,
1180 	.pmt = dwmac4_pmt,
1181 	.set_umac_addr = dwmac4_set_umac_addr,
1182 	.get_umac_addr = dwmac4_get_umac_addr,
1183 	.set_eee_mode = dwmac4_set_eee_mode,
1184 	.reset_eee_mode = dwmac4_reset_eee_mode,
1185 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1186 	.set_eee_timer = dwmac4_set_eee_timer,
1187 	.set_eee_pls = dwmac4_set_eee_pls,
1188 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1189 	.pcs_rane = dwmac4_rane,
1190 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1191 	.debug = dwmac4_debug,
1192 	.set_filter = dwmac4_set_filter,
1193 	.set_mac_loopback = dwmac4_set_mac_loopback,
1194 	.update_vlan_hash = dwmac4_update_vlan_hash,
1195 	.sarc_configure = dwmac4_sarc_configure,
1196 	.enable_vlan = dwmac4_enable_vlan,
1197 	.set_arp_offload = dwmac4_set_arp_offload,
1198 	.config_l3_filter = dwmac4_config_l3_filter,
1199 	.config_l4_filter = dwmac4_config_l4_filter,
1200 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1201 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1202 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1203 };
1204 
1205 const struct stmmac_ops dwmac410_ops = {
1206 	.core_init = dwmac4_core_init,
1207 	.phylink_get_caps = dwmac4_phylink_get_caps,
1208 	.set_mac = stmmac_dwmac4_set_mac,
1209 	.rx_ipc = dwmac4_rx_ipc_enable,
1210 	.rx_queue_enable = dwmac4_rx_queue_enable,
1211 	.rx_queue_prio = dwmac4_rx_queue_priority,
1212 	.tx_queue_prio = dwmac4_tx_queue_priority,
1213 	.rx_queue_routing = dwmac4_rx_queue_routing,
1214 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1215 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1216 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1217 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1218 	.config_cbs = dwmac4_config_cbs,
1219 	.dump_regs = dwmac4_dump_regs,
1220 	.host_irq_status = dwmac4_irq_status,
1221 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1222 	.flow_ctrl = dwmac4_flow_ctrl,
1223 	.pmt = dwmac4_pmt,
1224 	.set_umac_addr = dwmac4_set_umac_addr,
1225 	.get_umac_addr = dwmac4_get_umac_addr,
1226 	.set_eee_mode = dwmac4_set_eee_mode,
1227 	.reset_eee_mode = dwmac4_reset_eee_mode,
1228 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1229 	.set_eee_timer = dwmac4_set_eee_timer,
1230 	.set_eee_pls = dwmac4_set_eee_pls,
1231 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1232 	.pcs_rane = dwmac4_rane,
1233 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1234 	.debug = dwmac4_debug,
1235 	.set_filter = dwmac4_set_filter,
1236 	.flex_pps_config = dwmac5_flex_pps_config,
1237 	.set_mac_loopback = dwmac4_set_mac_loopback,
1238 	.update_vlan_hash = dwmac4_update_vlan_hash,
1239 	.sarc_configure = dwmac4_sarc_configure,
1240 	.enable_vlan = dwmac4_enable_vlan,
1241 	.set_arp_offload = dwmac4_set_arp_offload,
1242 	.config_l3_filter = dwmac4_config_l3_filter,
1243 	.config_l4_filter = dwmac4_config_l4_filter,
1244 	.est_configure = dwmac5_est_configure,
1245 	.est_irq_status = dwmac5_est_irq_status,
1246 	.fpe_configure = dwmac5_fpe_configure,
1247 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1248 	.fpe_irq_status = dwmac5_fpe_irq_status,
1249 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1250 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1251 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1252 };
1253 
1254 const struct stmmac_ops dwmac510_ops = {
1255 	.core_init = dwmac4_core_init,
1256 	.phylink_get_caps = dwmac4_phylink_get_caps,
1257 	.set_mac = stmmac_dwmac4_set_mac,
1258 	.rx_ipc = dwmac4_rx_ipc_enable,
1259 	.rx_queue_enable = dwmac4_rx_queue_enable,
1260 	.rx_queue_prio = dwmac4_rx_queue_priority,
1261 	.tx_queue_prio = dwmac4_tx_queue_priority,
1262 	.rx_queue_routing = dwmac4_rx_queue_routing,
1263 	.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1264 	.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1265 	.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1266 	.map_mtl_to_dma = dwmac4_map_mtl_dma,
1267 	.config_cbs = dwmac4_config_cbs,
1268 	.dump_regs = dwmac4_dump_regs,
1269 	.host_irq_status = dwmac4_irq_status,
1270 	.host_mtl_irq_status = dwmac4_irq_mtl_status,
1271 	.flow_ctrl = dwmac4_flow_ctrl,
1272 	.pmt = dwmac4_pmt,
1273 	.set_umac_addr = dwmac4_set_umac_addr,
1274 	.get_umac_addr = dwmac4_get_umac_addr,
1275 	.set_eee_mode = dwmac4_set_eee_mode,
1276 	.reset_eee_mode = dwmac4_reset_eee_mode,
1277 	.set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
1278 	.set_eee_timer = dwmac4_set_eee_timer,
1279 	.set_eee_pls = dwmac4_set_eee_pls,
1280 	.pcs_ctrl_ane = dwmac4_ctrl_ane,
1281 	.pcs_rane = dwmac4_rane,
1282 	.pcs_get_adv_lp = dwmac4_get_adv_lp,
1283 	.debug = dwmac4_debug,
1284 	.set_filter = dwmac4_set_filter,
1285 	.safety_feat_config = dwmac5_safety_feat_config,
1286 	.safety_feat_irq_status = dwmac5_safety_feat_irq_status,
1287 	.safety_feat_dump = dwmac5_safety_feat_dump,
1288 	.rxp_config = dwmac5_rxp_config,
1289 	.flex_pps_config = dwmac5_flex_pps_config,
1290 	.set_mac_loopback = dwmac4_set_mac_loopback,
1291 	.update_vlan_hash = dwmac4_update_vlan_hash,
1292 	.sarc_configure = dwmac4_sarc_configure,
1293 	.enable_vlan = dwmac4_enable_vlan,
1294 	.set_arp_offload = dwmac4_set_arp_offload,
1295 	.config_l3_filter = dwmac4_config_l3_filter,
1296 	.config_l4_filter = dwmac4_config_l4_filter,
1297 	.est_configure = dwmac5_est_configure,
1298 	.est_irq_status = dwmac5_est_irq_status,
1299 	.fpe_configure = dwmac5_fpe_configure,
1300 	.fpe_send_mpacket = dwmac5_fpe_send_mpacket,
1301 	.fpe_irq_status = dwmac5_fpe_irq_status,
1302 	.add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
1303 	.del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
1304 	.restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
1305 };
1306 
dwmac4_get_num_vlan(void __iomem * ioaddr)1307 static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
1308 {
1309 	u32 val, num_vlan;
1310 
1311 	val = readl(ioaddr + GMAC_HW_FEATURE3);
1312 	switch (val & GMAC_HW_FEAT_NRVF) {
1313 	case 0:
1314 		num_vlan = 1;
1315 		break;
1316 	case 1:
1317 		num_vlan = 4;
1318 		break;
1319 	case 2:
1320 		num_vlan = 8;
1321 		break;
1322 	case 3:
1323 		num_vlan = 16;
1324 		break;
1325 	case 4:
1326 		num_vlan = 24;
1327 		break;
1328 	case 5:
1329 		num_vlan = 32;
1330 		break;
1331 	default:
1332 		num_vlan = 1;
1333 	}
1334 
1335 	return num_vlan;
1336 }
1337 
dwmac4_setup(struct stmmac_priv * priv)1338 int dwmac4_setup(struct stmmac_priv *priv)
1339 {
1340 	struct mac_device_info *mac = priv->hw;
1341 
1342 	dev_info(priv->device, "\tDWMAC4/5\n");
1343 
1344 	priv->dev->priv_flags |= IFF_UNICAST_FLT;
1345 	mac->pcsr = priv->ioaddr;
1346 	mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1347 	mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1348 	mac->mcast_bits_log2 = 0;
1349 
1350 	if (mac->multicast_filter_bins)
1351 		mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1352 
1353 	mac->link.caps = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1354 			 MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
1355 	mac->link.duplex = GMAC_CONFIG_DM;
1356 	mac->link.speed10 = GMAC_CONFIG_PS;
1357 	mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1358 	mac->link.speed1000 = 0;
1359 	mac->link.speed2500 = GMAC_CONFIG_FES;
1360 	mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
1361 	mac->mii.addr = GMAC_MDIO_ADDR;
1362 	mac->mii.data = GMAC_MDIO_DATA;
1363 	mac->mii.addr_shift = 21;
1364 	mac->mii.addr_mask = GENMASK(25, 21);
1365 	mac->mii.reg_shift = 16;
1366 	mac->mii.reg_mask = GENMASK(20, 16);
1367 	mac->mii.clk_csr_shift = 8;
1368 	mac->mii.clk_csr_mask = GENMASK(11, 8);
1369 	mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
1370 
1371 	return 0;
1372 }
1373