1 /* Applied Micro X-Gene SoC Ethernet Driver
2  *
3  * Copyright (c) 2014, Applied Micro Circuits Corporation
4  * Authors: Iyappan Subramanian <isubramanian@apm.com>
5  *	    Keyur Chudgar <kchudgar@apm.com>
6  *
7  * This program is free software; you can redistribute  it and/or modify it
8  * under  the terms of  the GNU General  Public License as published by the
9  * Free Software Foundation;  either version 2 of the  License, or (at your
10  * option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
19  */
20 
21 #include "xgene_enet_main.h"
22 #include "xgene_enet_hw.h"
23 #include "xgene_enet_sgmac.h"
24 
25 static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
26 {
27 	iowrite32(val, p->eth_csr_addr + offset);
28 }
29 
30 static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
31 				  u32 offset, u32 val)
32 {
33 	iowrite32(val, p->eth_ring_if_addr + offset);
34 }
35 
36 static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *p,
37 				   u32 offset, u32 val)
38 {
39 	iowrite32(val, p->eth_diag_csr_addr + offset);
40 }
41 
42 static bool xgene_enet_wr_indirect(struct xgene_indirect_ctl *ctl,
43 				   u32 wr_addr, u32 wr_data)
44 {
45 	int i;
46 
47 	iowrite32(wr_addr, ctl->addr);
48 	iowrite32(wr_data, ctl->ctl);
49 	iowrite32(XGENE_ENET_WR_CMD, ctl->cmd);
50 
51 	/* wait for write command to complete */
52 	for (i = 0; i < 10; i++) {
53 		if (ioread32(ctl->cmd_done)) {
54 			iowrite32(0, ctl->cmd);
55 			return true;
56 		}
57 		udelay(1);
58 	}
59 
60 	return false;
61 }
62 
63 static void xgene_enet_wr_mac(struct xgene_enet_pdata *p,
64 			      u32 wr_addr, u32 wr_data)
65 {
66 	struct xgene_indirect_ctl ctl = {
67 		.addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
68 		.ctl = p->mcx_mac_addr + MAC_WRITE_REG_OFFSET,
69 		.cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
70 		.cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
71 	};
72 
73 	if (!xgene_enet_wr_indirect(&ctl, wr_addr, wr_data))
74 		netdev_err(p->ndev, "mac write failed, addr: %04x\n", wr_addr);
75 }
76 
77 static u32 xgene_enet_rd_csr(struct xgene_enet_pdata *p, u32 offset)
78 {
79 	return ioread32(p->eth_csr_addr + offset);
80 }
81 
82 static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
83 {
84 	return ioread32(p->eth_diag_csr_addr + offset);
85 }
86 
87 static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
88 {
89 	u32 rd_data;
90 	int i;
91 
92 	iowrite32(rd_addr, ctl->addr);
93 	iowrite32(XGENE_ENET_RD_CMD, ctl->cmd);
94 
95 	/* wait for read command to complete */
96 	for (i = 0; i < 10; i++) {
97 		if (ioread32(ctl->cmd_done)) {
98 			rd_data = ioread32(ctl->ctl);
99 			iowrite32(0, ctl->cmd);
100 
101 			return rd_data;
102 		}
103 		udelay(1);
104 	}
105 
106 	pr_err("%s: mac read failed, addr: %04x\n", __func__, rd_addr);
107 
108 	return 0;
109 }
110 
111 static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
112 {
113 	struct xgene_indirect_ctl ctl = {
114 		.addr = p->mcx_mac_addr + MAC_ADDR_REG_OFFSET,
115 		.ctl = p->mcx_mac_addr + MAC_READ_REG_OFFSET,
116 		.cmd = p->mcx_mac_addr + MAC_COMMAND_REG_OFFSET,
117 		.cmd_done = p->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET
118 	};
119 
120 	return xgene_enet_rd_indirect(&ctl, rd_addr);
121 }
122 
123 static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
124 {
125 	struct net_device *ndev = p->ndev;
126 	u32 data;
127 	int i = 0;
128 
129 	xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
130 	do {
131 		usleep_range(100, 110);
132 		data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
133 		if (data == ~0U)
134 			return 0;
135 	} while (++i < 10);
136 
137 	netdev_err(ndev, "Failed to release memory from shutdown\n");
138 	return -ENODEV;
139 }
140 
141 static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
142 {
143 	u32 val = 0xffffffff;
144 
145 	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQASSOC_ADDR, val);
146 	xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPQASSOC_ADDR, val);
147 }
148 
149 static void xgene_mii_phy_write(struct xgene_enet_pdata *p, u8 phy_id,
150 				u32 reg, u16 data)
151 {
152 	u32 addr, wr_data, done;
153 	int i;
154 
155 	addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
156 	xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
157 
158 	wr_data = PHY_CONTROL(data);
159 	xgene_enet_wr_mac(p, MII_MGMT_CONTROL_ADDR, wr_data);
160 
161 	for (i = 0; i < 10; i++) {
162 		done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
163 		if (!(done & BUSY_MASK))
164 			return;
165 		usleep_range(10, 20);
166 	}
167 
168 	netdev_err(p->ndev, "MII_MGMT write failed\n");
169 }
170 
171 static u32 xgene_mii_phy_read(struct xgene_enet_pdata *p, u8 phy_id, u32 reg)
172 {
173 	u32 addr, data, done;
174 	int i;
175 
176 	addr = PHY_ADDR(phy_id) | REG_ADDR(reg);
177 	xgene_enet_wr_mac(p, MII_MGMT_ADDRESS_ADDR, addr);
178 	xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
179 
180 	for (i = 0; i < 10; i++) {
181 		done = xgene_enet_rd_mac(p, MII_MGMT_INDICATORS_ADDR);
182 		if (!(done & BUSY_MASK)) {
183 			data = xgene_enet_rd_mac(p, MII_MGMT_STATUS_ADDR);
184 			xgene_enet_wr_mac(p, MII_MGMT_COMMAND_ADDR, 0);
185 
186 			return data;
187 		}
188 		usleep_range(10, 20);
189 	}
190 
191 	netdev_err(p->ndev, "MII_MGMT read failed\n");
192 
193 	return 0;
194 }
195 
196 static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
197 {
198 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, SOFT_RESET1);
199 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, 0);
200 }
201 
202 static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
203 {
204 	u32 addr0, addr1;
205 	u8 *dev_addr = p->ndev->dev_addr;
206 
207 	addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
208 		(dev_addr[1] << 8) | dev_addr[0];
209 	xgene_enet_wr_mac(p, STATION_ADDR0_ADDR, addr0);
210 
211 	addr1 = xgene_enet_rd_mac(p, STATION_ADDR1_ADDR);
212 	addr1 |= (dev_addr[5] << 24) | (dev_addr[4] << 16);
213 	xgene_enet_wr_mac(p, STATION_ADDR1_ADDR, addr1);
214 }
215 
216 static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
217 {
218 	u32 data;
219 
220 	data = xgene_mii_phy_read(p, INT_PHY_ADDR,
221 				  SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
222 
223 	return data & LINK_UP;
224 }
225 
226 static void xgene_sgmac_init(struct xgene_enet_pdata *p)
227 {
228 	u32 data, loop = 10;
229 	u32 offset = p->port_id * 4;
230 
231 	xgene_sgmac_reset(p);
232 
233 	/* Enable auto-negotiation */
234 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000);
235 	xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
236 
237 	while (loop--) {
238 		data = xgene_mii_phy_read(p, INT_PHY_ADDR,
239 					  SGMII_STATUS_ADDR >> 2);
240 		if ((data & AUTO_NEG_COMPLETE) && (data & LINK_STATUS))
241 			break;
242 		usleep_range(10, 20);
243 	}
244 	if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
245 		netdev_err(p->ndev, "Auto-negotiation failed\n");
246 
247 	data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
248 	ENET_INTERFACE_MODE2_SET(&data, 2);
249 	xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
250 	xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
251 
252 	data = xgene_enet_rd_csr(p, ENET_SPARE_CFG_REG_ADDR);
253 	data |= MPA_IDLE_WITH_QMI_EMPTY;
254 	xgene_enet_wr_csr(p, ENET_SPARE_CFG_REG_ADDR, data);
255 
256 	xgene_sgmac_set_mac_addr(p);
257 
258 	data = xgene_enet_rd_csr(p, DEBUG_REG_ADDR);
259 	data |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
260 	xgene_enet_wr_csr(p, DEBUG_REG_ADDR, data);
261 
262 	/* Adjust MDC clock frequency */
263 	data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
264 	MGMT_CLOCK_SEL_SET(&data, 7);
265 	xgene_enet_wr_mac(p, MII_MGMT_CONFIG_ADDR, data);
266 
267 	/* Enable drop if bufpool not available */
268 	data = xgene_enet_rd_csr(p, RSIF_CONFIG_REG_ADDR);
269 	data |= CFG_RSIF_FPBUFF_TIMEOUT_EN;
270 	xgene_enet_wr_csr(p, RSIF_CONFIG_REG_ADDR, data);
271 
272 	/* Rtype should be copied from FP */
273 	xgene_enet_wr_csr(p, RSIF_RAM_DBG_REG0_ADDR, 0);
274 
275 	/* Bypass traffic gating */
276 	xgene_enet_wr_csr(p, CFG_LINK_AGGR_RESUME_0_ADDR + offset, TX_PORT0);
277 	xgene_enet_wr_csr(p, CFG_BYPASS_ADDR, RESUME_TX);
278 	xgene_enet_wr_csr(p, SG_RX_DV_GATE_REG_0_ADDR + offset, RESUME_RX0);
279 }
280 
281 static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
282 {
283 	u32 data;
284 
285 	data = xgene_enet_rd_mac(p, MAC_CONFIG_1_ADDR);
286 
287 	if (set)
288 		data |= bits;
289 	else
290 		data &= ~bits;
291 
292 	xgene_enet_wr_mac(p, MAC_CONFIG_1_ADDR, data);
293 }
294 
295 static void xgene_sgmac_rx_enable(struct xgene_enet_pdata *p)
296 {
297 	xgene_sgmac_rxtx(p, RX_EN, true);
298 }
299 
300 static void xgene_sgmac_tx_enable(struct xgene_enet_pdata *p)
301 {
302 	xgene_sgmac_rxtx(p, TX_EN, true);
303 }
304 
305 static void xgene_sgmac_rx_disable(struct xgene_enet_pdata *p)
306 {
307 	xgene_sgmac_rxtx(p, RX_EN, false);
308 }
309 
310 static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
311 {
312 	xgene_sgmac_rxtx(p, TX_EN, false);
313 }
314 
315 static int xgene_enet_reset(struct xgene_enet_pdata *p)
316 {
317 	if (!xgene_ring_mgr_init(p))
318 		return -ENODEV;
319 
320 	clk_prepare_enable(p->clk);
321 	clk_disable_unprepare(p->clk);
322 	clk_prepare_enable(p->clk);
323 
324 	xgene_enet_ecc_init(p);
325 	xgene_enet_config_ring_if_assoc(p);
326 
327 	return 0;
328 }
329 
330 static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
331 				  u32 dst_ring_num, u16 bufpool_id)
332 {
333 	u32 data, fpsel;
334 	u32 offset = p->port_id * MAC_OFFSET;
335 
336 	data = CFG_CLE_BYPASS_EN0;
337 	xgene_enet_wr_csr(p, CLE_BYPASS_REG0_0_ADDR + offset, data);
338 
339 	fpsel = xgene_enet_ring_bufnum(bufpool_id) - 0x20;
340 	data = CFG_CLE_DSTQID0(dst_ring_num) | CFG_CLE_FPSEL0(fpsel);
341 	xgene_enet_wr_csr(p, CLE_BYPASS_REG1_0_ADDR + offset, data);
342 }
343 
344 static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
345 {
346 	clk_disable_unprepare(p->clk);
347 }
348 
349 static void xgene_enet_link_state(struct work_struct *work)
350 {
351 	struct xgene_enet_pdata *p = container_of(to_delayed_work(work),
352 				     struct xgene_enet_pdata, link_work);
353 	struct net_device *ndev = p->ndev;
354 	u32 link, poll_interval;
355 
356 	link = xgene_enet_link_status(p);
357 	if (link) {
358 		if (!netif_carrier_ok(ndev)) {
359 			netif_carrier_on(ndev);
360 			xgene_sgmac_init(p);
361 			xgene_sgmac_rx_enable(p);
362 			xgene_sgmac_tx_enable(p);
363 			netdev_info(ndev, "Link is Up - 1Gbps\n");
364 		}
365 		poll_interval = PHY_POLL_LINK_ON;
366 	} else {
367 		if (netif_carrier_ok(ndev)) {
368 			xgene_sgmac_rx_disable(p);
369 			xgene_sgmac_tx_disable(p);
370 			netif_carrier_off(ndev);
371 			netdev_info(ndev, "Link is Down\n");
372 		}
373 		poll_interval = PHY_POLL_LINK_OFF;
374 	}
375 
376 	schedule_delayed_work(&p->link_work, poll_interval);
377 }
378 
379 struct xgene_mac_ops xgene_sgmac_ops = {
380 	.init		= xgene_sgmac_init,
381 	.reset		= xgene_sgmac_reset,
382 	.rx_enable	= xgene_sgmac_rx_enable,
383 	.tx_enable	= xgene_sgmac_tx_enable,
384 	.rx_disable	= xgene_sgmac_rx_disable,
385 	.tx_disable	= xgene_sgmac_tx_disable,
386 	.set_mac_addr	= xgene_sgmac_set_mac_addr,
387 	.link_state	= xgene_enet_link_state
388 };
389 
390 struct xgene_port_ops xgene_sgport_ops = {
391 	.reset		= xgene_enet_reset,
392 	.cle_bypass	= xgene_enet_cle_bypass,
393 	.shutdown	= xgene_enet_shutdown
394 };
395