1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *	    Keyur Chudgar <kchudgar@apm.com>
6  *
7  * This program is free software; you can redistribute  it and/or modify it
8  * under  the terms of  the GNU General  Public License as published by the
9  * Free Software Foundation;  either version 2 of the  License, or (at your
10  * option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include <linux/of_gpio.h>
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_xgmac.h"
26 
27 static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
28 			      u32 offset, u32 val)
29 {
30 	void __iomem *addr = pdata->eth_csr_addr + offset;
31 
32 	iowrite32(val, addr);
33 }
34 
35 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
36 				  u32 offset, u32 val)
37 {
38 	void __iomem *addr = pdata->eth_ring_if_addr + offset;
39 
40 	iowrite32(val, addr);
41 }
42 
43 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
44 				   u32 offset, u32 val)
45 {
46 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
47 
48 	iowrite32(val, addr);
49 }
50 
51 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
52 				   void __iomem *cmd, void __iomem *cmd_done,
53 				   u32 wr_addr, u32 wr_data)
54 {
55 	u32 done;
56 	u8 wait = 10;
57 
58 	iowrite32(wr_addr, addr);
59 	iowrite32(wr_data, wr);
60 	iowrite32(XGENE_ENET_WR_CMD, cmd);
61 
62 	/* wait for write command to complete */
63 	while (!(done = ioread32(cmd_done)) && wait--)
64 		udelay(1);
65 
66 	if (!done)
67 		return false;
68 
69 	iowrite32(0, cmd);
70 
71 	return true;
72 }
73 
74 static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
75 			      u32 wr_addr, u32 wr_data)
76 {
77 	void __iomem *addr, *wr, *cmd, *cmd_done;
78 
79 	addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
80 	wr = pdata->pcs_addr + PCS_WRITE_REG_OFFSET;
81 	cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
82 	cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
83 
84 	if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
85 		netdev_err(pdata->ndev, "PCS write failed, addr: %04x\n",
86 			   wr_addr);
87 }
88 
89 static void xgene_enet_wr_axg_csr(struct xgene_enet_pdata *pdata,
90 				  u32 offset, u32 val)
91 {
92 	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
93 
94 	iowrite32(val, addr);
95 }
96 
97 static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
98 			      u32 offset, u32 *val)
99 {
100 	void __iomem *addr = pdata->eth_csr_addr + offset;
101 
102 	*val = ioread32(addr);
103 }
104 
105 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
106 				   u32 offset, u32 *val)
107 {
108 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
109 
110 	*val = ioread32(addr);
111 }
112 
113 static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
114 				   void __iomem *cmd, void __iomem *cmd_done,
115 				   u32 rd_addr, u32 *rd_data)
116 {
117 	u32 done;
118 	u8 wait = 10;
119 
120 	iowrite32(rd_addr, addr);
121 	iowrite32(XGENE_ENET_RD_CMD, cmd);
122 
123 	/* wait for read command to complete */
124 	while (!(done = ioread32(cmd_done)) && wait--)
125 		udelay(1);
126 
127 	if (!done)
128 		return false;
129 
130 	*rd_data = ioread32(rd);
131 	iowrite32(0, cmd);
132 
133 	return true;
134 }
135 
136 static bool xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
137 			      u32 rd_addr, u32 *rd_data)
138 {
139 	void __iomem *addr, *rd, *cmd, *cmd_done;
140 	bool success;
141 
142 	addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
143 	rd = pdata->pcs_addr + PCS_READ_REG_OFFSET;
144 	cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
145 	cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
146 
147 	success = xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data);
148 	if (!success)
149 		netdev_err(pdata->ndev, "PCS read failed, addr: %04x\n",
150 			   rd_addr);
151 
152 	return success;
153 }
154 
155 static void xgene_enet_rd_axg_csr(struct xgene_enet_pdata *pdata,
156 				  u32 offset, u32 *val)
157 {
158 	void __iomem *addr = pdata->mcx_mac_csr_addr + offset;
159 
160 	*val = ioread32(addr);
161 }
162 
163 static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
164 {
165 	struct net_device *ndev = pdata->ndev;
166 	u32 data;
167 	u8 wait = 10;
168 
169 	xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
170 	do {
171 		usleep_range(100, 110);
172 		xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
173 	} while ((data != 0xffffffff) && wait--);
174 
175 	if (data != 0xffffffff) {
176 		netdev_err(ndev, "Failed to release memory from shutdown\n");
177 		return -ENODEV;
178 	}
179 
180 	return 0;
181 }
182 
183 static void xgene_xgmac_get_drop_cnt(struct xgene_enet_pdata *pdata,
184 				     u32 *rx, u32 *tx)
185 {
186 	u32 count;
187 
188 	xgene_enet_rd_axg_csr(pdata, XGENET_ICM_ECM_DROP_COUNT_REG0, &count);
189 	*rx = ICM_DROP_COUNT(count);
190 	*tx = ECM_DROP_COUNT(count);
191 	/* Errata: 10GE_4 - ICM_ECM_DROP_COUNT not clear-on-read */
192 	xgene_enet_rd_axg_csr(pdata, XGENET_ECM_CONFIG0_REG_0, &count);
193 }
194 
195 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
196 {
197 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0);
198 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, 0);
199 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, 0);
200 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, 0);
201 }
202 
203 static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata)
204 {
205 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, HSTMACRST);
206 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0);
207 }
208 
209 static void xgene_pcs_reset(struct xgene_enet_pdata *pdata)
210 {
211 	u32 data;
212 
213 	if (!xgene_enet_rd_pcs(pdata, PCS_CONTROL_1, &data))
214 		return;
215 
216 	xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data | PCS_CTRL_PCS_RST);
217 	xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data & ~PCS_CTRL_PCS_RST);
218 }
219 
220 static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
221 {
222 	u32 addr0, addr1;
223 	u8 *dev_addr = pdata->ndev->dev_addr;
224 
225 	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
226 		(dev_addr[1] << 8) | dev_addr[0];
227 	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
228 
229 	xgene_enet_wr_mac(pdata, HSTMACADR_LSW_ADDR, addr0);
230 	xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
231 }
232 
233 static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata,
234 				u16 mss, u8 index)
235 {
236 	u8 offset;
237 	u32 data;
238 
239 	offset = (index < 2) ? 0 : 4;
240 	xgene_enet_rd_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, &data);
241 
242 	if (!(index & 0x1))
243 		data = SET_VAL(TSO_MSS1, data >> TSO_MSS1_POS) |
244 			SET_VAL(TSO_MSS0, mss);
245 	else
246 		data = SET_VAL(TSO_MSS1, mss) | SET_VAL(TSO_MSS0, data);
247 
248 	xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR + offset, data);
249 }
250 
251 static void xgene_xgmac_set_frame_size(struct xgene_enet_pdata *pdata, int size)
252 {
253 	xgene_enet_wr_mac(pdata, HSTMAXFRAME_LENGTH_ADDR,
254 			  ((((size + 2) >> 2) << 16) | size));
255 }
256 
257 static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
258 {
259 	u32 data;
260 
261 	xgene_enet_rd_csr(pdata, XG_LINK_STATUS_ADDR, &data);
262 
263 	return data;
264 }
265 
266 static void xgene_xgmac_enable_tx_pause(struct xgene_enet_pdata *pdata,
267 					bool enable)
268 {
269 	u32 data;
270 
271 	xgene_enet_rd_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, &data);
272 
273 	if (enable)
274 		data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN;
275 	else
276 		data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN);
277 
278 	xgene_enet_wr_axg_csr(pdata, XGENET_CSR_ECM_CFG_0_ADDR, data);
279 }
280 
281 static void xgene_xgmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable)
282 {
283 	u32 data;
284 
285 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
286 
287 	if (enable)
288 		data |= HSTTCTLEN;
289 	else
290 		data &= ~HSTTCTLEN;
291 
292 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
293 
294 	pdata->mac_ops->enable_tx_pause(pdata, enable);
295 }
296 
297 static void xgene_xgmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable)
298 {
299 	u32 data;
300 
301 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
302 
303 	if (enable)
304 		data |= HSTRCTLEN;
305 	else
306 		data &= ~HSTRCTLEN;
307 
308 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
309 }
310 
311 static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
312 {
313 	u32 data;
314 
315 	xgene_xgmac_reset(pdata);
316 
317 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
318 	data |= HSTPPEN;
319 	data &= ~HSTLENCHK;
320 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
321 
322 	xgene_xgmac_set_mac_addr(pdata);
323 
324 	xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
325 	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
326 	/* Errata 10GE_1 - FIFO threshold default value incorrect */
327 	RSIF_CLE_BUFF_THRESH_SET(&data, XG_RSIF_CLE_BUFF_THRESH);
328 	xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
329 
330 	/* Errata 10GE_1 - FIFO threshold default value incorrect */
331 	xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, &data);
332 	RSIF_PLC_CLE_BUFF_THRESH_SET(&data, XG_RSIF_PLC_CLE_BUFF_THRESH);
333 	xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG1_REG_ADDR, data);
334 
335 	xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
336 	data |= BIT(12);
337 	xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
338 	xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82);
339 	xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
340 	xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
341 
342 	/* Configure HW pause frame generation */
343 	xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, &data);
344 	data = (DEF_QUANTA << 16) | (data & 0xFFFF);
345 	xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF0_ADDR, data);
346 
347 	if (pdata->enet_id != XGENE_ENET1) {
348 		xgene_enet_rd_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, &data);
349 		data = (NORM_PAUSE_OPCODE << 16) | (data & 0xFFFF);
350 		xgene_enet_wr_axg_csr(pdata, XGENET_CSR_MULTI_DPF1_ADDR, data);
351 	}
352 
353 	data = (XG_DEF_PAUSE_OFF_THRES << 16) | XG_DEF_PAUSE_THRES;
354 	xgene_enet_wr_csr(pdata, XG_RXBUF_PAUSE_THRESH, data);
355 
356 	xgene_xgmac_flowctl_tx(pdata, pdata->tx_pause);
357 	xgene_xgmac_flowctl_rx(pdata, pdata->rx_pause);
358 }
359 
360 static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
361 {
362 	u32 data;
363 
364 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
365 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN);
366 }
367 
368 static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata)
369 {
370 	u32 data;
371 
372 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
373 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN);
374 }
375 
376 static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata)
377 {
378 	u32 data;
379 
380 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
381 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN);
382 }
383 
384 static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
385 {
386 	u32 data;
387 
388 	data = xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1);
389 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
390 }
391 
392 static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
393 {
394 	struct device *dev = &pdata->pdev->dev;
395 
396 	if (!xgene_ring_mgr_init(pdata))
397 		return -ENODEV;
398 
399 	if (dev->of_node) {
400 		clk_prepare_enable(pdata->clk);
401 		udelay(5);
402 		clk_disable_unprepare(pdata->clk);
403 		udelay(5);
404 		clk_prepare_enable(pdata->clk);
405 		udelay(5);
406 	} else {
407 #ifdef CONFIG_ACPI
408 		if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
409 			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
410 					     "_RST", NULL, NULL);
411 		} else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
412 					   "_INI")) {
413 			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
414 					     "_INI", NULL, NULL);
415 		}
416 #endif
417 	}
418 
419 	xgene_enet_ecc_init(pdata);
420 	xgene_enet_config_ring_if_assoc(pdata);
421 
422 	return 0;
423 }
424 
425 static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
426 				    u32 dst_ring_num, u16 bufpool_id,
427 				    u16 nxtbufpool_id)
428 {
429 	u32 cb, fpsel, nxtfpsel;
430 
431 	xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb);
432 	cb |= CFG_CLE_BYPASS_EN0;
433 	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
434 	xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb);
435 
436 	fpsel = xgene_enet_get_fpsel(bufpool_id);
437 	nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id);
438 	xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb);
439 	CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
440 	CFG_CLE_FPSEL0_SET(&cb, fpsel);
441 	CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel);
442 	xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
443 	pr_info("+ cle_bypass: fpsel: %d nxtfpsel: %d\n", fpsel, nxtfpsel);
444 }
445 
446 static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
447 {
448 	struct device *dev = &pdata->pdev->dev;
449 
450 	if (dev->of_node) {
451 		if (!IS_ERR(pdata->clk))
452 			clk_disable_unprepare(pdata->clk);
453 	}
454 }
455 
456 static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
457 			     struct xgene_enet_desc_ring *ring)
458 {
459 	u32 addr, data;
460 
461 	if (xgene_enet_is_bufpool(ring->id)) {
462 		addr = ENET_CFGSSQMIFPRESET_ADDR;
463 		data = BIT(xgene_enet_get_fpsel(ring->id));
464 	} else {
465 		addr = ENET_CFGSSQMIWQRESET_ADDR;
466 		data = BIT(xgene_enet_ring_bufnum(ring->id));
467 	}
468 
469 	xgene_enet_wr_ring_if(pdata, addr, data);
470 }
471 
472 static int xgene_enet_gpio_lookup(struct xgene_enet_pdata *pdata)
473 {
474 	struct device *dev = &pdata->pdev->dev;
475 
476 	pdata->sfp_rdy = gpiod_get(dev, "rxlos", GPIOD_IN);
477 	if (IS_ERR(pdata->sfp_rdy))
478 		pdata->sfp_rdy = gpiod_get(dev, "sfp", GPIOD_IN);
479 
480 	if (IS_ERR(pdata->sfp_rdy))
481 		return -ENODEV;
482 
483 	return 0;
484 }
485 
486 static void xgene_enet_link_state(struct work_struct *work)
487 {
488 	struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
489 					 struct xgene_enet_pdata, link_work);
490 	struct net_device *ndev = pdata->ndev;
491 	u32 link_status, poll_interval;
492 
493 	link_status = xgene_enet_link_status(pdata);
494 	if (pdata->sfp_gpio_en && link_status &&
495 	    (!IS_ERR(pdata->sfp_rdy) || !xgene_enet_gpio_lookup(pdata)) &&
496 	    !gpiod_get_value(pdata->sfp_rdy))
497 		link_status = 0;
498 
499 	if (link_status) {
500 		if (!netif_carrier_ok(ndev)) {
501 			netif_carrier_on(ndev);
502 			xgene_xgmac_rx_enable(pdata);
503 			xgene_xgmac_tx_enable(pdata);
504 			netdev_info(ndev, "Link is Up - 10Gbps\n");
505 		}
506 		poll_interval = PHY_POLL_LINK_ON;
507 	} else {
508 		if (netif_carrier_ok(ndev)) {
509 			xgene_xgmac_rx_disable(pdata);
510 			xgene_xgmac_tx_disable(pdata);
511 			netif_carrier_off(ndev);
512 			netdev_info(ndev, "Link is Down\n");
513 		}
514 		poll_interval = PHY_POLL_LINK_OFF;
515 
516 		xgene_pcs_reset(pdata);
517 	}
518 
519 	schedule_delayed_work(&pdata->link_work, poll_interval);
520 }
521 
522 const struct xgene_mac_ops xgene_xgmac_ops = {
523 	.init = xgene_xgmac_init,
524 	.reset = xgene_xgmac_reset,
525 	.rx_enable = xgene_xgmac_rx_enable,
526 	.tx_enable = xgene_xgmac_tx_enable,
527 	.rx_disable = xgene_xgmac_rx_disable,
528 	.tx_disable = xgene_xgmac_tx_disable,
529 	.set_mac_addr = xgene_xgmac_set_mac_addr,
530 	.set_framesize = xgene_xgmac_set_frame_size,
531 	.set_mss = xgene_xgmac_set_mss,
532 	.get_drop_cnt = xgene_xgmac_get_drop_cnt,
533 	.link_state = xgene_enet_link_state,
534 	.enable_tx_pause = xgene_xgmac_enable_tx_pause,
535 	.flowctl_rx = xgene_xgmac_flowctl_rx,
536 	.flowctl_tx = xgene_xgmac_flowctl_tx
537 };
538 
539 const struct xgene_port_ops xgene_xgport_ops = {
540 	.reset = xgene_enet_reset,
541 	.clear = xgene_enet_clear,
542 	.cle_bypass = xgene_enet_xgcle_bypass,
543 	.shutdown = xgene_enet_shutdown,
544 };
545