1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *	    Ravi Patel <rapatel@apm.com>
6  *	    Keyur Chudgar <kchudgar@apm.com>
7  *
8  * This program is free software; you can redistribute  it and/or modify it
9  * under  the terms of  the GNU General  Public License as published by the
10  * Free Software Foundation;  either version 2 of the  License, or (at your
11  * option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16  * GNU General Public License for more details.
17  *
18  * You should have received a copy of the GNU General Public License
19  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20  */
21 
22 #include "xgene_enet_main.h"
23 #include "xgene_enet_hw.h"
24 
25 static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring)
26 {
27 	u32 *ring_cfg = ring->state;
28 	u64 addr = ring->dma;
29 	enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize;
30 
31 	ring_cfg[4] |= (1 << SELTHRSH_POS) &
32 			CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN);
33 	ring_cfg[3] |= ACCEPTLERR;
34 	ring_cfg[2] |= QCOHERENT;
35 
36 	addr >>= 8;
37 	ring_cfg[2] |= (addr << RINGADDRL_POS) &
38 			CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN);
39 	addr >>= RINGADDRL_LEN;
40 	ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN);
41 	ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) &
42 			CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN);
43 }
44 
45 static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring)
46 {
47 	u32 *ring_cfg = ring->state;
48 	bool is_bufpool;
49 	u32 val;
50 
51 	is_bufpool = xgene_enet_is_bufpool(ring->id);
52 	val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR;
53 	ring_cfg[4] |= (val << RINGTYPE_POS) &
54 			CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN);
55 
56 	if (is_bufpool) {
57 		ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) &
58 				CREATE_MASK(RINGMODE_POS, RINGMODE_LEN);
59 	}
60 }
61 
62 static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring)
63 {
64 	u32 *ring_cfg = ring->state;
65 
66 	ring_cfg[3] |= RECOMBBUF;
67 	ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) &
68 			CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN);
69 	ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN);
70 }
71 
72 static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring,
73 				 u32 offset, u32 data)
74 {
75 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
76 
77 	iowrite32(data, pdata->ring_csr_addr + offset);
78 }
79 
80 static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring,
81 				 u32 offset, u32 *data)
82 {
83 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
84 
85 	*data = ioread32(pdata->ring_csr_addr + offset);
86 }
87 
88 static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring)
89 {
90 	struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev);
91 	int i;
92 
93 	xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num);
94 	for (i = 0; i < pdata->ring_ops->num_ring_config; i++) {
95 		xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4),
96 				     ring->state[i]);
97 	}
98 }
99 
100 static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring)
101 {
102 	memset(ring->state, 0, sizeof(ring->state));
103 	xgene_enet_write_ring_state(ring);
104 }
105 
106 static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring)
107 {
108 	xgene_enet_ring_set_type(ring);
109 
110 	if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 ||
111 	    xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1)
112 		xgene_enet_ring_set_recombbuf(ring);
113 
114 	xgene_enet_ring_init(ring);
115 	xgene_enet_write_ring_state(ring);
116 }
117 
118 static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring)
119 {
120 	u32 ring_id_val, ring_id_buf;
121 	bool is_bufpool;
122 
123 	is_bufpool = xgene_enet_is_bufpool(ring->id);
124 
125 	ring_id_val = ring->id & GENMASK(9, 0);
126 	ring_id_val |= OVERWRITE;
127 
128 	ring_id_buf = (ring->num << 9) & GENMASK(18, 9);
129 	ring_id_buf |= PREFETCH_BUF_EN;
130 	if (is_bufpool)
131 		ring_id_buf |= IS_BUFFER_POOL;
132 
133 	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val);
134 	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf);
135 }
136 
137 static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring)
138 {
139 	u32 ring_id;
140 
141 	ring_id = ring->id | OVERWRITE;
142 	xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id);
143 	xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0);
144 }
145 
146 static struct xgene_enet_desc_ring *xgene_enet_setup_ring(
147 				    struct xgene_enet_desc_ring *ring)
148 {
149 	u32 size = ring->size;
150 	u32 i, data;
151 	bool is_bufpool;
152 
153 	xgene_enet_clr_ring_state(ring);
154 	xgene_enet_set_ring_state(ring);
155 	xgene_enet_set_ring_id(ring);
156 
157 	ring->slots = xgene_enet_get_numslots(ring->id, size);
158 
159 	is_bufpool = xgene_enet_is_bufpool(ring->id);
160 	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
161 		return ring;
162 
163 	for (i = 0; i < ring->slots; i++)
164 		xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]);
165 
166 	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
167 	data |= BIT(31 - xgene_enet_ring_bufnum(ring->id));
168 	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
169 
170 	return ring;
171 }
172 
173 static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring)
174 {
175 	u32 data;
176 	bool is_bufpool;
177 
178 	is_bufpool = xgene_enet_is_bufpool(ring->id);
179 	if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU)
180 		goto out;
181 
182 	xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data);
183 	data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id));
184 	xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data);
185 
186 out:
187 	xgene_enet_clr_desc_ring_id(ring);
188 	xgene_enet_clr_ring_state(ring);
189 }
190 
191 static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count)
192 {
193 	iowrite32(count, ring->cmd);
194 }
195 
196 static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring)
197 {
198 	u32 __iomem *cmd_base = ring->cmd_base;
199 	u32 ring_state, num_msgs;
200 
201 	ring_state = ioread32(&cmd_base[1]);
202 	num_msgs = GET_VAL(NUMMSGSINQ, ring_state);
203 
204 	return num_msgs;
205 }
206 
207 static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring)
208 {
209 	u32 data = 0x7777;
210 
211 	xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e);
212 	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data);
213 	xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16);
214 	xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40);
215 	xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80);
216 }
217 
218 void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
219 			    struct xgene_enet_pdata *pdata,
220 			    enum xgene_enet_err_code status)
221 {
222 	switch (status) {
223 	case INGRESS_CRC:
224 		ring->rx_crc_errors++;
225 		ring->rx_dropped++;
226 		break;
227 	case INGRESS_CHECKSUM:
228 	case INGRESS_CHECKSUM_COMPUTE:
229 		ring->rx_errors++;
230 		ring->rx_dropped++;
231 		break;
232 	case INGRESS_TRUNC_FRAME:
233 		ring->rx_frame_errors++;
234 		ring->rx_dropped++;
235 		break;
236 	case INGRESS_PKT_LEN:
237 		ring->rx_length_errors++;
238 		ring->rx_dropped++;
239 		break;
240 	case INGRESS_PKT_UNDER:
241 		ring->rx_frame_errors++;
242 		ring->rx_dropped++;
243 		break;
244 	case INGRESS_FIFO_OVERRUN:
245 		ring->rx_fifo_errors++;
246 		break;
247 	default:
248 		break;
249 	}
250 }
251 
252 static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
253 			      u32 offset, u32 val)
254 {
255 	void __iomem *addr = pdata->eth_csr_addr + offset;
256 
257 	iowrite32(val, addr);
258 }
259 
260 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
261 				  u32 offset, u32 val)
262 {
263 	void __iomem *addr = pdata->eth_ring_if_addr + offset;
264 
265 	iowrite32(val, addr);
266 }
267 
268 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
269 				   u32 offset, u32 val)
270 {
271 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
272 
273 	iowrite32(val, addr);
274 }
275 
276 static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata,
277 				  u32 offset, u32 val)
278 {
279 	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
280 
281 	iowrite32(val, addr);
282 }
283 
284 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
285 				   void __iomem *cmd, void __iomem *cmd_done,
286 				   u32 wr_addr, u32 wr_data)
287 {
288 	u32 done;
289 	u8 wait = 10;
290 
291 	iowrite32(wr_addr, addr);
292 	iowrite32(wr_data, wr);
293 	iowrite32(XGENE_ENET_WR_CMD, cmd);
294 
295 	/* wait for write command to complete */
296 	while (!(done = ioread32(cmd_done)) && wait--)
297 		udelay(1);
298 
299 	if (!done)
300 		return false;
301 
302 	iowrite32(0, cmd);
303 
304 	return true;
305 }
306 
307 static void xgene_enet_wr_mcx_mac(struct xgene_enet_pdata *pdata,
308 				  u32 wr_addr, u32 wr_data)
309 {
310 	void __iomem *addr, *wr, *cmd, *cmd_done;
311 
312 	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
313 	wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
314 	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
315 	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
316 
317 	if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
318 		netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
319 			   wr_addr);
320 }
321 
322 static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
323 			      u32 offset, u32 *val)
324 {
325 	void __iomem *addr = pdata->eth_csr_addr + offset;
326 
327 	*val = ioread32(addr);
328 }
329 
330 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
331 				   u32 offset, u32 *val)
332 {
333 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
334 
335 	*val = ioread32(addr);
336 }
337 
338 static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata,
339 				  u32 offset, u32 *val)
340 {
341 	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
342 
343 	*val = ioread32(addr);
344 }
345 
346 static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
347 				   void __iomem *cmd, void __iomem *cmd_done,
348 				   u32 rd_addr, u32 *rd_data)
349 {
350 	u32 done;
351 	u8 wait = 10;
352 
353 	iowrite32(rd_addr, addr);
354 	iowrite32(XGENE_ENET_RD_CMD, cmd);
355 
356 	/* wait for read command to complete */
357 	while (!(done = ioread32(cmd_done)) && wait--)
358 		udelay(1);
359 
360 	if (!done)
361 		return false;
362 
363 	*rd_data = ioread32(rd);
364 	iowrite32(0, cmd);
365 
366 	return true;
367 }
368 
369 static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
370 				  u32 rd_addr, u32 *rd_data)
371 {
372 	void __iomem *addr, *rd, *cmd, *cmd_done;
373 
374 	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
375 	rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
376 	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
377 	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
378 
379 	if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
380 		netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
381 			   rd_addr);
382 }
383 
384 static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id,
385 			       u32 reg, u16 data)
386 {
387 	u32 addr = 0, wr_data = 0;
388 	u32 done;
389 	u8 wait = 10;
390 
391 	PHY_ADDR_SET(&addr, phy_id);
392 	REG_ADDR_SET(&addr, reg);
393 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
394 
395 	PHY_CONTROL_SET(&wr_data, data);
396 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data);
397 	do {
398 		usleep_range(5, 10);
399 		xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
400 	} while ((done & BUSY_MASK) && wait--);
401 
402 	if (done & BUSY_MASK) {
403 		netdev_err(pdata->ndev, "MII_MGMT write failed\n");
404 		return -EBUSY;
405 	}
406 
407 	return 0;
408 }
409 
410 static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata,
411 			      u8 phy_id, u32 reg)
412 {
413 	u32 addr = 0;
414 	u32 data, done;
415 	u8 wait = 10;
416 
417 	PHY_ADDR_SET(&addr, phy_id);
418 	REG_ADDR_SET(&addr, reg);
419 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
420 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
421 	do {
422 		usleep_range(5, 10);
423 		xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
424 	} while ((done & BUSY_MASK) && wait--);
425 
426 	if (done & BUSY_MASK) {
427 		netdev_err(pdata->ndev, "MII_MGMT read failed\n");
428 		return -EBUSY;
429 	}
430 
431 	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data);
432 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0);
433 
434 	return data;
435 }
436 
437 static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
438 {
439 	u32 addr0, addr1;
440 	u8 *dev_addr = pdata->ndev->dev_addr;
441 
442 	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
443 		(dev_addr[1] << 8) | dev_addr[0];
444 	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
445 
446 	xgene_enet_wr_mcx_mac(pdata, STATION_ADDR0_ADDR, addr0);
447 	xgene_enet_wr_mcx_mac(pdata, STATION_ADDR1_ADDR, addr1);
448 }
449 
450 static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
451 {
452 	struct net_device *ndev = pdata->ndev;
453 	u32 data;
454 	u8 wait = 10;
455 
456 	xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
457 	do {
458 		usleep_range(100, 110);
459 		xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
460 	} while ((data != 0xffffffff) && wait--);
461 
462 	if (data != 0xffffffff) {
463 		netdev_err(ndev, "Failed to release memory from shutdown\n");
464 		return -ENODEV;
465 	}
466 
467 	return 0;
468 }
469 
470 static void xgene_gmac_reset(struct xgene_enet_pdata *pdata)
471 {
472 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1);
473 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, 0);
474 }
475 
476 static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
477 {
478 	struct device *dev = &pdata->pdev->dev;
479 
480 	if (dev->of_node) {
481 		struct clk *parent = clk_get_parent(pdata->clk);
482 
483 		switch (pdata->phy_speed) {
484 		case SPEED_10:
485 			clk_set_rate(parent, 2500000);
486 			break;
487 		case SPEED_100:
488 			clk_set_rate(parent, 25000000);
489 			break;
490 		default:
491 			clk_set_rate(parent, 125000000);
492 			break;
493 		}
494 	}
495 #ifdef CONFIG_ACPI
496 	else {
497 		switch (pdata->phy_speed) {
498 		case SPEED_10:
499 			acpi_evaluate_object(ACPI_HANDLE(dev),
500 					     "S10", NULL, NULL);
501 			break;
502 		case SPEED_100:
503 			acpi_evaluate_object(ACPI_HANDLE(dev),
504 					     "S100", NULL, NULL);
505 			break;
506 		default:
507 			acpi_evaluate_object(ACPI_HANDLE(dev),
508 					     "S1G", NULL, NULL);
509 			break;
510 		}
511 	}
512 #endif
513 }
514 
515 static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
516 {
517 	struct device *dev = &pdata->pdev->dev;
518 	u32 value, mc2;
519 	u32 intf_ctl, rgmii;
520 	u32 icm0, icm2;
521 
522 	xgene_gmac_reset(pdata);
523 
524 	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
525 	xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
526 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_2_ADDR, &mc2);
527 	xgene_enet_rd_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, &intf_ctl);
528 	xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii);
529 
530 	switch (pdata->phy_speed) {
531 	case SPEED_10:
532 		ENET_INTERFACE_MODE2_SET(&mc2, 1);
533 		intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
534 		CFG_MACMODE_SET(&icm0, 0);
535 		CFG_WAITASYNCRD_SET(&icm2, 500);
536 		rgmii &= ~CFG_SPEED_1250;
537 		break;
538 	case SPEED_100:
539 		ENET_INTERFACE_MODE2_SET(&mc2, 1);
540 		intf_ctl &= ~ENET_GHD_MODE;
541 		intf_ctl |= ENET_LHD_MODE;
542 		CFG_MACMODE_SET(&icm0, 1);
543 		CFG_WAITASYNCRD_SET(&icm2, 80);
544 		rgmii &= ~CFG_SPEED_1250;
545 		break;
546 	default:
547 		ENET_INTERFACE_MODE2_SET(&mc2, 2);
548 		intf_ctl &= ~ENET_LHD_MODE;
549 		intf_ctl |= ENET_GHD_MODE;
550 		CFG_MACMODE_SET(&icm0, 2);
551 		CFG_WAITASYNCRD_SET(&icm2, 0);
552 		if (dev->of_node) {
553 			CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay);
554 			CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay);
555 		}
556 		rgmii |= CFG_SPEED_1250;
557 
558 		xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value);
559 		value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
560 		xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value);
561 		break;
562 	}
563 
564 	mc2 |= FULL_DUPLEX2 | PAD_CRC;
565 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
566 	xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
567 
568 	xgene_gmac_set_mac_addr(pdata);
569 
570 	/* Adjust MDC clock frequency */
571 	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &value);
572 	MGMT_CLOCK_SEL_SET(&value, 7);
573 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, value);
574 
575 	/* Enable drop if bufpool not available */
576 	xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value);
577 	value |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
578 	xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value);
579 
580 	/* Rtype should be copied from FP */
581 	xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
582 	xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
583 	xgene_enet_configure_clock(pdata);
584 
585 	/* Rx-Tx traffic resume */
586 	xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
587 
588 	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
589 	xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
590 
591 	xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
592 	value &= ~TX_DV_GATE_EN0;
593 	value &= ~RX_DV_GATE_EN0;
594 	value |= RESUME_RX0;
595 	xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value);
596 
597 	xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX);
598 }
599 
600 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
601 {
602 	u32 val = 0xffffffff;
603 
604 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val);
605 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val);
606 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val);
607 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val);
608 }
609 
610 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata,
611 				  u32 dst_ring_num, u16 bufpool_id)
612 {
613 	u32 cb;
614 	u32 fpsel;
615 
616 	fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
617 
618 	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb);
619 	cb |= CFG_CLE_BYPASS_EN0;
620 	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
621 	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb);
622 
623 	xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb);
624 	CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
625 	CFG_CLE_FPSEL0_SET(&cb, fpsel);
626 	xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb);
627 }
628 
629 static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata)
630 {
631 	u32 data;
632 
633 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
634 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN);
635 }
636 
637 static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata)
638 {
639 	u32 data;
640 
641 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
642 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN);
643 }
644 
645 static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata)
646 {
647 	u32 data;
648 
649 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
650 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN);
651 }
652 
653 static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
654 {
655 	u32 data;
656 
657 	xgene_enet_rd_mcx_mac(pdata, MAC_CONFIG_1_ADDR, &data);
658 	xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
659 }
660 
661 bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
662 {
663 	if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
664 		return false;
665 
666 	if (ioread32(p->ring_csr_addr + SRST_ADDR))
667 		return false;
668 
669 	return true;
670 }
671 
672 static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
673 {
674 	u32 val;
675 
676 	if (!xgene_ring_mgr_init(pdata))
677 		return -ENODEV;
678 
679 	if (!IS_ERR(pdata->clk)) {
680 		clk_prepare_enable(pdata->clk);
681 		clk_disable_unprepare(pdata->clk);
682 		clk_prepare_enable(pdata->clk);
683 		xgene_enet_ecc_init(pdata);
684 	}
685 	xgene_enet_config_ring_if_assoc(pdata);
686 
687 	/* Enable auto-incr for scanning */
688 	xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val);
689 	val |= SCAN_AUTO_INCR;
690 	MGMT_CLOCK_SEL_SET(&val, 1);
691 	xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
692 
693 	return 0;
694 }
695 
696 static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
697 {
698 	if (!IS_ERR(pdata->clk))
699 		clk_disable_unprepare(pdata->clk);
700 }
701 
702 static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
703 {
704 	struct xgene_enet_pdata *pdata = bus->priv;
705 	u32 val;
706 
707 	val = xgene_mii_phy_read(pdata, mii_id, regnum);
708 	netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n",
709 		   mii_id, regnum, val);
710 
711 	return val;
712 }
713 
714 static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
715 				 u16 val)
716 {
717 	struct xgene_enet_pdata *pdata = bus->priv;
718 
719 	netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n",
720 		   mii_id, regnum, val);
721 	return xgene_mii_phy_write(pdata, mii_id, regnum, val);
722 }
723 
724 static void xgene_enet_adjust_link(struct net_device *ndev)
725 {
726 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
727 	struct phy_device *phydev = pdata->phy_dev;
728 
729 	if (phydev->link) {
730 		if (pdata->phy_speed != phydev->speed) {
731 			pdata->phy_speed = phydev->speed;
732 			xgene_gmac_init(pdata);
733 			xgene_gmac_rx_enable(pdata);
734 			xgene_gmac_tx_enable(pdata);
735 			phy_print_status(phydev);
736 		}
737 	} else {
738 		xgene_gmac_rx_disable(pdata);
739 		xgene_gmac_tx_disable(pdata);
740 		pdata->phy_speed = SPEED_UNKNOWN;
741 		phy_print_status(phydev);
742 	}
743 }
744 
745 static int xgene_enet_phy_connect(struct net_device *ndev)
746 {
747 	struct xgene_enet_pdata *pdata = netdev_priv(ndev);
748 	struct device_node *phy_np;
749 	struct phy_device *phy_dev;
750 	struct device *dev = &pdata->pdev->dev;
751 
752 	if (dev->of_node) {
753 		phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
754 		if (!phy_np) {
755 			netdev_dbg(ndev, "No phy-handle found in DT\n");
756 			return -ENODEV;
757 		}
758 
759 		phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
760 					 0, pdata->phy_mode);
761 		if (!phy_dev) {
762 			netdev_err(ndev, "Could not connect to PHY\n");
763 			return -ENODEV;
764 		}
765 
766 		pdata->phy_dev = phy_dev;
767 	} else {
768 		phy_dev = pdata->phy_dev;
769 
770 		if (!phy_dev ||
771 		    phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link,
772 				       pdata->phy_mode)) {
773 			netdev_err(ndev, "Could not connect to PHY\n");
774 			return  -ENODEV;
775 		}
776 	}
777 
778 	pdata->phy_speed = SPEED_UNKNOWN;
779 	phy_dev->supported &= ~SUPPORTED_10baseT_Half &
780 			      ~SUPPORTED_100baseT_Half &
781 			      ~SUPPORTED_1000baseT_Half;
782 	phy_dev->advertising = phy_dev->supported;
783 
784 	return 0;
785 }
786 
787 static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
788 				  struct mii_bus *mdio)
789 {
790 	struct device *dev = &pdata->pdev->dev;
791 	struct net_device *ndev = pdata->ndev;
792 	struct phy_device *phy;
793 	struct device_node *child_np;
794 	struct device_node *mdio_np = NULL;
795 	int ret;
796 	u32 phy_id;
797 
798 	if (dev->of_node) {
799 		for_each_child_of_node(dev->of_node, child_np) {
800 			if (of_device_is_compatible(child_np,
801 						    "apm,xgene-mdio")) {
802 				mdio_np = child_np;
803 				break;
804 			}
805 		}
806 
807 		if (!mdio_np) {
808 			netdev_dbg(ndev, "No mdio node in the dts\n");
809 			return -ENXIO;
810 		}
811 
812 		return of_mdiobus_register(mdio, mdio_np);
813 	}
814 
815 	/* Mask out all PHYs from auto probing. */
816 	mdio->phy_mask = ~0;
817 
818 	/* Register the MDIO bus */
819 	ret = mdiobus_register(mdio);
820 	if (ret)
821 		return ret;
822 
823 	ret = device_property_read_u32(dev, "phy-channel", &phy_id);
824 	if (ret)
825 		ret = device_property_read_u32(dev, "phy-addr", &phy_id);
826 	if (ret)
827 		return -EINVAL;
828 
829 	phy = get_phy_device(mdio, phy_id, false);
830 	if (IS_ERR(phy))
831 		return -EIO;
832 
833 	ret = phy_device_register(phy);
834 	if (ret)
835 		phy_device_free(phy);
836 	else
837 		pdata->phy_dev = phy;
838 
839 	return ret;
840 }
841 
842 int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
843 {
844 	struct net_device *ndev = pdata->ndev;
845 	struct mii_bus *mdio_bus;
846 	int ret;
847 
848 	mdio_bus = mdiobus_alloc();
849 	if (!mdio_bus)
850 		return -ENOMEM;
851 
852 	mdio_bus->name = "APM X-Gene MDIO bus";
853 	mdio_bus->read = xgene_enet_mdio_read;
854 	mdio_bus->write = xgene_enet_mdio_write;
855 	snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
856 		 ndev->name);
857 
858 	mdio_bus->priv = pdata;
859 	mdio_bus->parent = &ndev->dev;
860 
861 	ret = xgene_mdiobus_register(pdata, mdio_bus);
862 	if (ret) {
863 		netdev_err(ndev, "Failed to register MDIO bus\n");
864 		mdiobus_free(mdio_bus);
865 		return ret;
866 	}
867 	pdata->mdio_bus = mdio_bus;
868 
869 	ret = xgene_enet_phy_connect(ndev);
870 	if (ret)
871 		xgene_enet_mdio_remove(pdata);
872 
873 	return ret;
874 }
875 
876 void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
877 {
878 	if (pdata->phy_dev)
879 		phy_disconnect(pdata->phy_dev);
880 
881 	mdiobus_unregister(pdata->mdio_bus);
882 	mdiobus_free(pdata->mdio_bus);
883 	pdata->mdio_bus = NULL;
884 }
885 
886 const struct xgene_mac_ops xgene_gmac_ops = {
887 	.init = xgene_gmac_init,
888 	.reset = xgene_gmac_reset,
889 	.rx_enable = xgene_gmac_rx_enable,
890 	.tx_enable = xgene_gmac_tx_enable,
891 	.rx_disable = xgene_gmac_rx_disable,
892 	.tx_disable = xgene_gmac_tx_disable,
893 	.set_mac_addr = xgene_gmac_set_mac_addr,
894 };
895 
896 const struct xgene_port_ops xgene_gport_ops = {
897 	.reset = xgene_enet_reset,
898 	.cle_bypass = xgene_enet_cle_bypass,
899 	.shutdown = xgene_gport_shutdown,
900 };
901 
902 struct xgene_ring_ops xgene_ring1_ops = {
903 	.num_ring_config = NUM_RING_CONFIG,
904 	.num_ring_id_shift = 6,
905 	.setup = xgene_enet_setup_ring,
906 	.clear = xgene_enet_clear_ring,
907 	.wr_cmd = xgene_enet_wr_cmd,
908 	.len = xgene_enet_ring_len,
909 	.coalesce = xgene_enet_setup_coalescing,
910 };
911