1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *	    Keyur Chudgar <kchudgar@apm.com>
6  *
7  * This program is free software; you can redistribute  it and/or modify it
8  * under  the terms of  the GNU General  Public License as published by the
9  * Free Software Foundation;  either version 2 of the  License, or (at your
10  * option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include <linux/of_gpio.h>
22 #include <linux/gpio.h>
23 #include "xgene_enet_main.h"
24 #include "xgene_enet_hw.h"
25 #include "xgene_enet_xgmac.h"
26 
27 static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata,
28 			      u32 offset, u32 val)
29 {
30 	void __iomem *addr = pdata->eth_csr_addr + offset;
31 
32 	iowrite32(val, addr);
33 }
34 
35 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata,
36 				  u32 offset, u32 val)
37 {
38 	void __iomem *addr = pdata->eth_ring_if_addr + offset;
39 
40 	iowrite32(val, addr);
41 }
42 
43 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata,
44 				   u32 offset, u32 val)
45 {
46 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
47 
48 	iowrite32(val, addr);
49 }
50 
51 static bool xgene_enet_wr_indirect(void __iomem *addr, void __iomem *wr,
52 				   void __iomem *cmd, void __iomem *cmd_done,
53 				   u32 wr_addr, u32 wr_data)
54 {
55 	u32 done;
56 	u8 wait = 10;
57 
58 	iowrite32(wr_addr, addr);
59 	iowrite32(wr_data, wr);
60 	iowrite32(XGENE_ENET_WR_CMD, cmd);
61 
62 	/* wait for write command to complete */
63 	while (!(done = ioread32(cmd_done)) && wait--)
64 		udelay(1);
65 
66 	if (!done)
67 		return false;
68 
69 	iowrite32(0, cmd);
70 
71 	return true;
72 }
73 
74 static void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata,
75 			      u32 wr_addr, u32 wr_data)
76 {
77 	void __iomem *addr, *wr, *cmd, *cmd_done;
78 
79 	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
80 	wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET;
81 	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
82 	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
83 
84 	if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
85 		netdev_err(pdata->ndev, "MCX mac write failed, addr: %04x\n",
86 			   wr_addr);
87 }
88 
89 static void xgene_enet_wr_pcs(struct xgene_enet_pdata *pdata,
90 			      u32 wr_addr, u32 wr_data)
91 {
92 	void __iomem *addr, *wr, *cmd, *cmd_done;
93 
94 	addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
95 	wr = pdata->pcs_addr + PCS_WRITE_REG_OFFSET;
96 	cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
97 	cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
98 
99 	if (!xgene_enet_wr_indirect(addr, wr, cmd, cmd_done, wr_addr, wr_data))
100 		netdev_err(pdata->ndev, "PCS write failed, addr: %04x\n",
101 			   wr_addr);
102 }
103 
104 static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata,
105 			      u32 offset, u32 *val)
106 {
107 	void __iomem *addr = pdata->eth_csr_addr + offset;
108 
109 	*val = ioread32(addr);
110 }
111 
112 static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata,
113 				   u32 offset, u32 *val)
114 {
115 	void __iomem *addr = pdata->eth_diag_csr_addr + offset;
116 
117 	*val = ioread32(addr);
118 }
119 
120 static bool xgene_enet_rd_indirect(void __iomem *addr, void __iomem *rd,
121 				   void __iomem *cmd, void __iomem *cmd_done,
122 				   u32 rd_addr, u32 *rd_data)
123 {
124 	u32 done;
125 	u8 wait = 10;
126 
127 	iowrite32(rd_addr, addr);
128 	iowrite32(XGENE_ENET_RD_CMD, cmd);
129 
130 	/* wait for read command to complete */
131 	while (!(done = ioread32(cmd_done)) && wait--)
132 		udelay(1);
133 
134 	if (!done)
135 		return false;
136 
137 	*rd_data = ioread32(rd);
138 	iowrite32(0, cmd);
139 
140 	return true;
141 }
142 
143 static void xgene_enet_rd_mac(struct xgene_enet_pdata *pdata,
144 			      u32 rd_addr, u32 *rd_data)
145 {
146 	void __iomem *addr, *rd, *cmd, *cmd_done;
147 
148 	addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET;
149 	rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET;
150 	cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET;
151 	cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET;
152 
153 	if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
154 		netdev_err(pdata->ndev, "MCX mac read failed, addr: %04x\n",
155 			   rd_addr);
156 }
157 
158 static void xgene_enet_rd_pcs(struct xgene_enet_pdata *pdata,
159 			      u32 rd_addr, u32 *rd_data)
160 {
161 	void __iomem *addr, *rd, *cmd, *cmd_done;
162 
163 	addr = pdata->pcs_addr + PCS_ADDR_REG_OFFSET;
164 	rd = pdata->pcs_addr + PCS_READ_REG_OFFSET;
165 	cmd = pdata->pcs_addr + PCS_COMMAND_REG_OFFSET;
166 	cmd_done = pdata->pcs_addr + PCS_COMMAND_DONE_REG_OFFSET;
167 
168 	if (!xgene_enet_rd_indirect(addr, rd, cmd, cmd_done, rd_addr, rd_data))
169 		netdev_err(pdata->ndev, "PCS read failed, addr: %04x\n",
170 			   rd_addr);
171 }
172 
173 static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata)
174 {
175 	struct net_device *ndev = pdata->ndev;
176 	u32 data;
177 	u8 wait = 10;
178 
179 	xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0);
180 	do {
181 		usleep_range(100, 110);
182 		xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data);
183 	} while ((data != 0xffffffff) && wait--);
184 
185 	if (data != 0xffffffff) {
186 		netdev_err(ndev, "Failed to release memory from shutdown\n");
187 		return -ENODEV;
188 	}
189 
190 	return 0;
191 }
192 
193 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata)
194 {
195 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, 0);
196 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, 0);
197 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, 0);
198 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, 0);
199 }
200 
201 static void xgene_xgmac_reset(struct xgene_enet_pdata *pdata)
202 {
203 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, HSTMACRST);
204 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_0, 0);
205 }
206 
207 static void xgene_pcs_reset(struct xgene_enet_pdata *pdata)
208 {
209 	u32 data;
210 
211 	xgene_enet_rd_pcs(pdata, PCS_CONTROL_1, &data);
212 	xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data | PCS_CTRL_PCS_RST);
213 	xgene_enet_wr_pcs(pdata, PCS_CONTROL_1, data & ~PCS_CTRL_PCS_RST);
214 }
215 
216 static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
217 {
218 	u32 addr0, addr1;
219 	u8 *dev_addr = pdata->ndev->dev_addr;
220 
221 	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
222 		(dev_addr[1] << 8) | dev_addr[0];
223 	addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16);
224 
225 	xgene_enet_wr_mac(pdata, HSTMACADR_LSW_ADDR, addr0);
226 	xgene_enet_wr_mac(pdata, HSTMACADR_MSW_ADDR, addr1);
227 }
228 
229 static void xgene_xgmac_set_mss(struct xgene_enet_pdata *pdata)
230 {
231 	xgene_enet_wr_csr(pdata, XG_TSIF_MSS_REG0_ADDR, pdata->mss);
232 }
233 
234 static u32 xgene_enet_link_status(struct xgene_enet_pdata *pdata)
235 {
236 	u32 data;
237 
238 	xgene_enet_rd_csr(pdata, XG_LINK_STATUS_ADDR, &data);
239 
240 	return data;
241 }
242 
243 static void xgene_xgmac_init(struct xgene_enet_pdata *pdata)
244 {
245 	u32 data;
246 
247 	xgene_xgmac_reset(pdata);
248 
249 	xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
250 	data |= HSTPPEN;
251 	data &= ~HSTLENCHK;
252 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data);
253 
254 	xgene_xgmac_set_mac_addr(pdata);
255 	xgene_xgmac_set_mss(pdata);
256 
257 	xgene_enet_rd_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, &data);
258 	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
259 	xgene_enet_wr_csr(pdata, XG_RSIF_CONFIG_REG_ADDR, data);
260 
261 	xgene_enet_rd_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, &data);
262 	data |= BIT(12);
263 	xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_ADDR, data);
264 	xgene_enet_wr_csr(pdata, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x82);
265 	xgene_enet_wr_csr(pdata, XGENET_RX_DV_GATE_REG_0_ADDR, 0);
266 	xgene_enet_wr_csr(pdata, XG_CFG_BYPASS_ADDR, RESUME_TX);
267 }
268 
269 static void xgene_xgmac_rx_enable(struct xgene_enet_pdata *pdata)
270 {
271 	u32 data;
272 
273 	xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
274 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTRFEN);
275 }
276 
277 static void xgene_xgmac_tx_enable(struct xgene_enet_pdata *pdata)
278 {
279 	u32 data;
280 
281 	xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
282 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data | HSTTFEN);
283 }
284 
285 static void xgene_xgmac_rx_disable(struct xgene_enet_pdata *pdata)
286 {
287 	u32 data;
288 
289 	xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
290 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTRFEN);
291 }
292 
293 static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
294 {
295 	u32 data;
296 
297 	xgene_enet_rd_mac(pdata, AXGMAC_CONFIG_1, &data);
298 	xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
299 }
300 
301 static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
302 {
303 	struct device *dev = &pdata->pdev->dev;
304 
305 	if (!xgene_ring_mgr_init(pdata))
306 		return -ENODEV;
307 
308 	if (dev->of_node) {
309 		clk_prepare_enable(pdata->clk);
310 		udelay(5);
311 		clk_disable_unprepare(pdata->clk);
312 		udelay(5);
313 		clk_prepare_enable(pdata->clk);
314 		udelay(5);
315 	} else {
316 #ifdef CONFIG_ACPI
317 		if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
318 			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
319 					     "_RST", NULL, NULL);
320 		} else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
321 					   "_INI")) {
322 			acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
323 					     "_INI", NULL, NULL);
324 		}
325 #endif
326 	}
327 
328 	xgene_enet_ecc_init(pdata);
329 	xgene_enet_config_ring_if_assoc(pdata);
330 
331 	return 0;
332 }
333 
334 static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
335 				    u32 dst_ring_num, u16 bufpool_id)
336 {
337 	u32 cb, fpsel;
338 
339 	xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG0_ADDR, &cb);
340 	cb |= CFG_CLE_BYPASS_EN0;
341 	CFG_CLE_IP_PROTOCOL0_SET(&cb, 3);
342 	xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG0_ADDR, cb);
343 
344 	fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
345 	xgene_enet_rd_csr(pdata, XCLE_BYPASS_REG1_ADDR, &cb);
346 	CFG_CLE_DSTQID0_SET(&cb, dst_ring_num);
347 	CFG_CLE_FPSEL0_SET(&cb, fpsel);
348 	xgene_enet_wr_csr(pdata, XCLE_BYPASS_REG1_ADDR, cb);
349 }
350 
351 static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
352 {
353 	struct device *dev = &pdata->pdev->dev;
354 	struct xgene_enet_desc_ring *ring;
355 	u32 pb, val;
356 	int i;
357 
358 	pb = 0;
359 	for (i = 0; i < pdata->rxq_cnt; i++) {
360 		ring = pdata->rx_ring[i]->buf_pool;
361 
362 		val = xgene_enet_ring_bufnum(ring->id);
363 		pb |= BIT(val - 0x20);
364 	}
365 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
366 
367 	pb = 0;
368 	for (i = 0; i < pdata->txq_cnt; i++) {
369 		ring = pdata->tx_ring[i];
370 
371 		val = xgene_enet_ring_bufnum(ring->id);
372 		pb |= BIT(val);
373 	}
374 	xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
375 
376 	if (dev->of_node) {
377 		if (!IS_ERR(pdata->clk))
378 			clk_disable_unprepare(pdata->clk);
379 	}
380 }
381 
382 static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
383 			     struct xgene_enet_desc_ring *ring)
384 {
385 	u32 addr, val, data;
386 
387 	val = xgene_enet_ring_bufnum(ring->id);
388 
389 	if (xgene_enet_is_bufpool(ring->id)) {
390 		addr = ENET_CFGSSQMIFPRESET_ADDR;
391 		data = BIT(val - 0x20);
392 	} else {
393 		addr = ENET_CFGSSQMIWQRESET_ADDR;
394 		data = BIT(val);
395 	}
396 
397 	xgene_enet_wr_ring_if(pdata, addr, data);
398 }
399 
400 static void xgene_enet_link_state(struct work_struct *work)
401 {
402 	struct xgene_enet_pdata *pdata = container_of(to_delayed_work(work),
403 					 struct xgene_enet_pdata, link_work);
404 	struct gpio_desc *sfp_rdy = pdata->sfp_rdy;
405 	struct net_device *ndev = pdata->ndev;
406 	u32 link_status, poll_interval;
407 
408 	link_status = xgene_enet_link_status(pdata);
409 	if (link_status && !IS_ERR(sfp_rdy) && !gpiod_get_value(sfp_rdy))
410 		link_status = 0;
411 
412 	if (link_status) {
413 		if (!netif_carrier_ok(ndev)) {
414 			netif_carrier_on(ndev);
415 			xgene_xgmac_rx_enable(pdata);
416 			xgene_xgmac_tx_enable(pdata);
417 			netdev_info(ndev, "Link is Up - 10Gbps\n");
418 		}
419 		poll_interval = PHY_POLL_LINK_ON;
420 	} else {
421 		if (netif_carrier_ok(ndev)) {
422 			xgene_xgmac_rx_disable(pdata);
423 			xgene_xgmac_tx_disable(pdata);
424 			netif_carrier_off(ndev);
425 			netdev_info(ndev, "Link is Down\n");
426 		}
427 		poll_interval = PHY_POLL_LINK_OFF;
428 
429 		xgene_pcs_reset(pdata);
430 	}
431 
432 	schedule_delayed_work(&pdata->link_work, poll_interval);
433 }
434 
435 const struct xgene_mac_ops xgene_xgmac_ops = {
436 	.init = xgene_xgmac_init,
437 	.reset = xgene_xgmac_reset,
438 	.rx_enable = xgene_xgmac_rx_enable,
439 	.tx_enable = xgene_xgmac_tx_enable,
440 	.rx_disable = xgene_xgmac_rx_disable,
441 	.tx_disable = xgene_xgmac_tx_disable,
442 	.set_mac_addr = xgene_xgmac_set_mac_addr,
443 	.set_mss = xgene_xgmac_set_mss,
444 	.link_state = xgene_enet_link_state
445 };
446 
447 const struct xgene_port_ops xgene_xgport_ops = {
448 	.reset = xgene_enet_reset,
449 	.clear = xgene_enet_clear,
450 	.cle_bypass = xgene_enet_xgcle_bypass,
451 	.shutdown = xgene_enet_shutdown,
452 };
453