1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *	    Keyur Chudgar <kchudgar@apm.com>
6  *
7  * This program is free software; you can redistribute  it and/or modify it
8  * under  the terms of  the GNU General  Public License as published by the
9  * Free Software Foundation;  either version 2 of the  License, or (at your
10  * option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "xgene_enet_main.h"
22 #include "xgene_enet_hw.h"
23 #include "xgene_enet_sgmac.h"
24 
25 static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
26 {
27 	iowrite32(val, p->eth_csr_addr + offset);
28 }
29 
30 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
31 				  u32 offset, u32 val)
32 {
33 	iowrite32(val, p->eth_ring_if_addr + offset);
34 }
35 
36 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
37 				   u32 offset, u32 val)
38 {
39 	iowrite32(val, p->eth_diag_csr_addr + offset);
40 }
41 
42 static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
43 				   u32 wr_addr, u32 wr_data)
44 {
45 	int i;
46 
47 	iowrite32(wr_addr, ctl->addr);
48 	iowrite32(wr_data, ctl->ctl);
49 	iowrite32(XGENE_ENET_WR_CMD, ctl->cmd);
50 
51 	/* wait for write command to complete */
52 	for (i = 0; i < 10; i++) {
53 		if (ioread32(ctl->cmd_done)) {
54 			iowrite32(0, ctl->cmd);
55 			return true;
56 		}
57 		udelay(1);
58 	}
59 
60 	return false;
61 }
62 
63 static void xgene_enet_wr_mac(struct xgene_enet_pdata *p,
64 			      u32 wr_addr, u32 wr_data)
65 {
66 	struct xgene_indirect_ctl ctl = {
67 		.addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
68 		.ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET,
69 		.cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
70 		.cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
71 	};
72 
73 	if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data))
74 		netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr);
75 }
76 
77 static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
78 {
79 	return ioread32(p->eth_csr_addr + offset);
80 }
81 
82 static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
83 {
84 	return ioread32(p->eth_diag_csr_addr + offset);
85 }
86 
87 static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
88 {
89 	u32 rd_data;
90 	int i;
91 
92 	iowrite32(rd_addr, ctl->addr);
93 	iowrite32(XGENE_ENET_RD_CMD, ctl->cmd);
94 
95 	/* wait for read command to complete */
96 	for (i = 0; i < 10; i++) {
97 		if (ioread32(ctl->cmd_done)) {
98 			rd_data = ioread32(ctl->ctl);
99 			iowrite32(0, ctl->cmd);
100 
101 			return rd_data;
102 		}
103 		udelay(1);
104 	}
105 
106 	pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr);
107 
108 	return 0;
109 }
110 
111 static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
112 {
113 	struct xgene_indirect_ctl ctl = {
114 		.addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
115 		.ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET,
116 		.cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
117 		.cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
118 	};
119 
120 	return xgene_enet_rd_indirect(&ctl, rd_addr);
121 }
122 
123 static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
124 {
125 	struct net_device *ndev = p->ndev;
126 	u32 data;
127 	int i = 0;
128 
129 	xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
130 	do {
131 		usleep_range(100, 110);
132 		data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
133 		if (data == ~0U)
134 			return 0;
135 	} while (++i < 10);
136 
137 	netdev_err(ndev, "Failed to release memory from shutdown\n");
138 	return -ENODEV;
139 }
140 
141 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
142 {
143 	u32 val = 0xffffffff;
144 
145 	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
146 	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
147 }
148 
149 static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id,
150 				u32 reg, u16 data)
151 {
152 	u32 addr, wr_data, done;
153 	int i;
154 
155 	addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
156 	xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
157 
158 	wr_data = PHY_CONTROL(data);
159 	xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data);
160 
161 	for (i = 0; i < 10; i++) {
162 		done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
163 		if (!(done & BUSY_MASK))
164 			return;
165 		usleep_range(10, 20);
166 	}
167 
168 	netdev_err(p->ndev, "MII_MGMT write failed\n");
169 }
170 
171 static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg)
172 {
173 	u32 addr, data, done;
174 	int i;
175 
176 	addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
177 	xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
178 	xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
179 
180 	for (i = 0; i < 10; i++) {
181 		done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
182 		if (!(done & BUSY_MASK)) {
183 			data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR);
184 			xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0);
185 
186 			return data;
187 		}
188 		usleep_range(10, 20);
189 	}
190 
191 	netdev_err(p->ndev, "MII_MGMT read failed\n");
192 
193 	return 0;
194 }
195 
196 static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
197 {
198 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1);
199 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0);
200 }
201 
202 static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
203 {
204 	u32 addr0, addr1;
205 	u8 *dev_addr = p->ndev->dev_addr;
206 
207 	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
208 		(dev_addr[1] << 8) | dev_addr[0];
209 	xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0);
210 
211 	addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR);
212 	addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16);
213 	xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1);
214 }
215 
216 static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
217 {
218 	u32 data;
219 
220 	data = xgene_mii_phy_read(p, INT_PHY_ADDR,
221 				  SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
222 
223 	return data & LINK_UP;
224 }
225 
226 static void xgene_sgmac_init(struct xgene_enet_pdata *p)
227 {
228 	u32 data, loop = 10;
229 
230 	xgene_sgmac_reset(p);
231 
232 	/* Enable auto-negotiation */
233 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000);
234 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
235 
236 	while (loop--) {
237 		data = xgene_mii_phy_read(p, INT_PHY_ADDR,
238 					  SGMII_STATUS_ADDR >> 2);
239 		if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
240 			break;
241 		usleep_range(10, 20);
242 	}
243 	if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
244 		netdev_err(p->ndev, "Auto-negotiation failed\n");
245 
246 	data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
247 	ENET_INTERFACE_MODE2_SET(&data, 2);
248 	xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
249 	xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
250 
251 	data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
252 	data |= MPA_IDLE_WITH_QMI_EMPTY;
253 	xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
254 
255 	xgene_sgmac_set_mac_addr(p);
256 
257 	data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
258 	data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
259 	xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
260 
261 	/* Adjust MDC clock frequency */
262 	data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
263 	MGMT_CLOCK_SEL_SET(&data, 7);
264 	xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
265 
266 	/* Enable drop if bufpool not available */
267 	data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
268 	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
269 	xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
270 
271 	/* Rtype should be copied from FP */
272 	xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
273 
274 	/* Bypass traffic gating */
275 	xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
276 	xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
277 	xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR, RESUME_RX0);
278 }
279 
280 static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
281 {
282 	u32 data;
283 
284 	data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
285 
286 	if (set)
287 		data |= bits;
288 	else
289 		data &= ~bits;
290 
291 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
292 }
293 
294 static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
295 {
296 	xgene_sgmac_rxtx(p, RX_EN, true);
297 }
298 
299 static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p)
300 {
301 	xgene_sgmac_rxtx(p, TX_EN, true);
302 }
303 
304 static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p)
305 {
306 	xgene_sgmac_rxtx(p, RX_EN, false);
307 }
308 
309 static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
310 {
311 	xgene_sgmac_rxtx(p, TX_EN, false);
312 }
313 
314 static int xgene_enet_reset(struct xgene_enet_pdata *p)
315 {
316 	if (!xgene_ring_mgr_init(p))
317 		return -ENODEV;
318 
319 	clk_prepare_enable(p->clk);
320 	clk_disable_unprepare(p->clk);
321 	clk_prepare_enable(p->clk);
322 
323 	xgene_enet_ecc_init(p);
324 	xgene_enet_config_ring_if_assoc(p);
325 
326 	return 0;
327 }
328 
329 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
330 				  u32 dst_ring_num, u16 bufpool_id)
331 {
332 	u32 data, fpsel;
333 
334 	data = CFG_CLE_BYPASS_EN0;
335 	xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR, data);
336 
337 	fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
338 	data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
339 	xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR, data);
340 }
341 
342 static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
343 {
344 	clk_disable_unprepare(p->clk);
345 }
346 
347 static void xgene_enet_link_state(struct work_struct *work)
348 {
349 	struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
350 				     struct xgene_enet_pdata, link_work);
351 	struct net_device *ndev = p->ndev;
352 	u32 link, poll_interval;
353 
354 	link = xgene_enet_link_status(p);
355 	if (link) {
356 		if (!netif_carrier_ok(ndev)) {
357 			netif_carrier_on(ndev);
358 			xgene_sgmac_init(p);
359 			xgene_sgmac_rx_enable(p);
360 			xgene_sgmac_tx_enable(p);
361 			netdev_info(ndev, "Link is Up - 1Gbps\n");
362 		}
363 		poll_interval = PHY_POLL_LINK_ON;
364 	} else {
365 		if (netif_carrier_ok(ndev)) {
366 			xgene_sgmac_rx_disable(p);
367 			xgene_sgmac_tx_disable(p);
368 			netif_carrier_off(ndev);
369 			netdev_info(ndev, "Link is Down\n");
370 		}
371 		poll_interval = PHY_POLL_LINK_OFF;
372 	}
373 
374 	schedule_delayed_work(&p->link_work, poll_interval);
375 }
376 
377 struct xgene_mac_ops xgene_sgmac_ops = {
378 	.init		= xgene_sgmac_init,
379 	.reset		= xgene_sgmac_reset,
380 	.rx_enable	= xgene_sgmac_rx_enable,
381 	.tx_enable	= xgene_sgmac_tx_enable,
382 	.rx_disable	= xgene_sgmac_rx_disable,
383 	.tx_disable	= xgene_sgmac_tx_disable,
384 	.set_mac_addr	= xgene_sgmac_set_mac_addr,
385 	.link_state	= xgene_enet_link_state
386 };
387 
388 struct xgene_port_ops xgene_sgport_ops = {
389 	.reset		= xgene_enet_reset,
390 	.cle_bypass	= xgene_enet_cle_bypass,
391 	.shutdown	= xgene_enet_shutdown
392 };
393