1 /* 10G controller driver for Samsung SoCs
2  *
3  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
4  *		http://www.samsung.com
5  *
6  * Author: Siva Reddy Kallam <siva.kallam@samsung.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 #include <linux/delay.h>
13 #include <linux/export.h>
14 #include <linux/io.h>
15 #include <linux/netdevice.h>
16 #include <linux/phy.h>
17 
18 #include "sxgbe_common.h"
19 #include "sxgbe_dma.h"
20 #include "sxgbe_reg.h"
21 #include "sxgbe_desc.h"
22 
23 /* DMA core initialization */
24 static int sxgbe_dma_init(void __iomem *ioaddr, int fix_burst, int burst_map)
25 {
26 	int retry_count = 10;
27 	u32 reg_val;
28 
29 	/* reset the DMA */
30 	writel(SXGBE_DMA_SOFT_RESET, ioaddr + SXGBE_DMA_MODE_REG);
31 	while (retry_count--) {
32 		if (!(readl(ioaddr + SXGBE_DMA_MODE_REG) &
33 		      SXGBE_DMA_SOFT_RESET))
34 			break;
35 		mdelay(10);
36 	}
37 
38 	if (retry_count < 0)
39 		return -EBUSY;
40 
41 	reg_val = readl(ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
42 
43 	/* if fix_burst = 0, Set UNDEF = 1 of DMA_Sys_Mode Register.
44 	 * if fix_burst = 1, Set UNDEF = 0 of DMA_Sys_Mode Register.
45 	 * burst_map is bitmap for  BLEN[4, 8, 16, 32, 64, 128 and 256].
46 	 * Set burst_map irrespective of fix_burst value.
47 	 */
48 	if (!fix_burst)
49 		reg_val |= SXGBE_DMA_AXI_UNDEF_BURST;
50 
51 	/* write burst len map */
52 	reg_val |= (burst_map << SXGBE_DMA_BLENMAP_LSHIFT);
53 
54 	writel(reg_val,	ioaddr + SXGBE_DMA_SYSBUS_MODE_REG);
55 
56 	return 0;
57 }
58 
59 static void sxgbe_dma_channel_init(void __iomem *ioaddr, int cha_num,
60 				   int fix_burst, int pbl, dma_addr_t dma_tx,
61 				   dma_addr_t dma_rx, int t_rsize, int r_rsize)
62 {
63 	u32 reg_val;
64 	dma_addr_t dma_addr;
65 
66 	reg_val = readl(ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
67 	/* set the pbl */
68 	if (fix_burst) {
69 		reg_val |= SXGBE_DMA_PBL_X8MODE;
70 		writel(reg_val, ioaddr + SXGBE_DMA_CHA_CTL_REG(cha_num));
71 		/* program the TX pbl */
72 		reg_val = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
73 		reg_val |= (pbl << SXGBE_DMA_TXPBL_LSHIFT);
74 		writel(reg_val, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
75 		/* program the RX pbl */
76 		reg_val = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
77 		reg_val |= (pbl << SXGBE_DMA_RXPBL_LSHIFT);
78 		writel(reg_val, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cha_num));
79 	}
80 
81 	/* program desc registers */
82 	writel(upper_32_bits(dma_tx),
83 	       ioaddr + SXGBE_DMA_CHA_TXDESC_HADD_REG(cha_num));
84 	writel(lower_32_bits(dma_tx),
85 	       ioaddr + SXGBE_DMA_CHA_TXDESC_LADD_REG(cha_num));
86 
87 	writel(upper_32_bits(dma_rx),
88 	       ioaddr + SXGBE_DMA_CHA_RXDESC_HADD_REG(cha_num));
89 	writel(lower_32_bits(dma_rx),
90 	       ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
91 
92 	/* program tail pointers */
93 	/* assumption: upper 32 bits are constant and
94 	 * same as TX/RX desc list
95 	 */
96 	dma_addr = dma_tx + ((t_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
97 	writel(lower_32_bits(dma_addr),
98 	       ioaddr + SXGBE_DMA_CHA_TXDESC_TAILPTR_REG(cha_num));
99 
100 	dma_addr = dma_rx + ((r_rsize - 1) * SXGBE_DESC_SIZE_BYTES);
101 	writel(lower_32_bits(dma_addr),
102 	       ioaddr + SXGBE_DMA_CHA_RXDESC_LADD_REG(cha_num));
103 	/* program the ring sizes */
104 	writel(t_rsize - 1, ioaddr + SXGBE_DMA_CHA_TXDESC_RINGLEN_REG(cha_num));
105 	writel(r_rsize - 1, ioaddr + SXGBE_DMA_CHA_RXDESC_RINGLEN_REG(cha_num));
106 
107 	/* Enable TX/RX interrupts */
108 	writel(SXGBE_DMA_ENA_INT,
109 	       ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(cha_num));
110 }
111 
112 static void sxgbe_enable_dma_transmission(void __iomem *ioaddr, int cha_num)
113 {
114 	u32 tx_config;
115 
116 	tx_config = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
117 	tx_config |= SXGBE_TX_START_DMA;
118 	writel(tx_config, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cha_num));
119 }
120 
121 static void sxgbe_enable_dma_irq(void __iomem *ioaddr, int dma_cnum)
122 {
123 	/* Enable TX/RX interrupts */
124 	writel(SXGBE_DMA_ENA_INT,
125 	       ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
126 }
127 
128 static void sxgbe_disable_dma_irq(void __iomem *ioaddr, int dma_cnum)
129 {
130 	/* Disable TX/RX interrupts */
131 	writel(0, ioaddr + SXGBE_DMA_CHA_INT_ENABLE_REG(dma_cnum));
132 }
133 
134 static void sxgbe_dma_start_tx(void __iomem *ioaddr, int tchannels)
135 {
136 	int cnum;
137 	u32 tx_ctl_reg;
138 
139 	for (cnum = 0; cnum < tchannels; cnum++) {
140 		tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
141 		tx_ctl_reg |= SXGBE_TX_ENABLE;
142 		writel(tx_ctl_reg,
143 		       ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
144 	}
145 }
146 
147 static void sxgbe_dma_start_tx_queue(void __iomem *ioaddr, int dma_cnum)
148 {
149 	u32 tx_ctl_reg;
150 
151 	tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
152 	tx_ctl_reg |= SXGBE_TX_ENABLE;
153 	writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
154 }
155 
156 static void sxgbe_dma_stop_tx_queue(void __iomem *ioaddr, int dma_cnum)
157 {
158 	u32 tx_ctl_reg;
159 
160 	tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
161 	tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
162 	writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(dma_cnum));
163 }
164 
165 static void sxgbe_dma_stop_tx(void __iomem *ioaddr, int tchannels)
166 {
167 	int cnum;
168 	u32 tx_ctl_reg;
169 
170 	for (cnum = 0; cnum < tchannels; cnum++) {
171 		tx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
172 		tx_ctl_reg &= ~(SXGBE_TX_ENABLE);
173 		writel(tx_ctl_reg, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(cnum));
174 	}
175 }
176 
177 static void sxgbe_dma_start_rx(void __iomem *ioaddr, int rchannels)
178 {
179 	int cnum;
180 	u32 rx_ctl_reg;
181 
182 	for (cnum = 0; cnum < rchannels; cnum++) {
183 		rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
184 		rx_ctl_reg |= SXGBE_RX_ENABLE;
185 		writel(rx_ctl_reg,
186 		       ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
187 	}
188 }
189 
190 static void sxgbe_dma_stop_rx(void __iomem *ioaddr, int rchannels)
191 {
192 	int cnum;
193 	u32 rx_ctl_reg;
194 
195 	for (cnum = 0; cnum < rchannels; cnum++) {
196 		rx_ctl_reg = readl(ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
197 		rx_ctl_reg &= ~(SXGBE_RX_ENABLE);
198 		writel(rx_ctl_reg, ioaddr + SXGBE_DMA_CHA_RXCTL_REG(cnum));
199 	}
200 }
201 
202 static int sxgbe_tx_dma_int_status(void __iomem *ioaddr, int channel_no,
203 				   struct sxgbe_extra_stats *x)
204 {
205 	u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
206 	u32 clear_val = 0;
207 	u32 ret_val = 0;
208 
209 	/* TX Normal Interrupt Summary */
210 	if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
211 		x->normal_irq_n++;
212 		if (int_status & SXGBE_DMA_INT_STATUS_TI) {
213 			ret_val |= handle_tx;
214 			x->tx_normal_irq_n++;
215 			clear_val |= SXGBE_DMA_INT_STATUS_TI;
216 		}
217 
218 		if (int_status & SXGBE_DMA_INT_STATUS_TBU) {
219 			x->tx_underflow_irq++;
220 			ret_val |= tx_bump_tc;
221 			clear_val |= SXGBE_DMA_INT_STATUS_TBU;
222 		}
223 	} else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
224 		/* TX Abnormal Interrupt Summary */
225 		if (int_status & SXGBE_DMA_INT_STATUS_TPS) {
226 			ret_val |= tx_hard_error;
227 			clear_val |= SXGBE_DMA_INT_STATUS_TPS;
228 			x->tx_process_stopped_irq++;
229 		}
230 
231 		if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
232 			ret_val |= tx_hard_error;
233 			x->fatal_bus_error_irq++;
234 
235 			/* Assumption: FBE bit is the combination of
236 			 * all the bus access erros and cleared when
237 			 * the respective error bits cleared
238 			 */
239 
240 			/* check for actual cause */
241 			if (int_status & SXGBE_DMA_INT_STATUS_TEB0) {
242 				x->tx_read_transfer_err++;
243 				clear_val |= SXGBE_DMA_INT_STATUS_TEB0;
244 			} else {
245 				x->tx_write_transfer_err++;
246 			}
247 
248 			if (int_status & SXGBE_DMA_INT_STATUS_TEB1) {
249 				x->tx_desc_access_err++;
250 				clear_val |= SXGBE_DMA_INT_STATUS_TEB1;
251 			} else {
252 				x->tx_buffer_access_err++;
253 			}
254 
255 			if (int_status & SXGBE_DMA_INT_STATUS_TEB2) {
256 				x->tx_data_transfer_err++;
257 				clear_val |= SXGBE_DMA_INT_STATUS_TEB2;
258 			}
259 		}
260 
261 		/* context descriptor error */
262 		if (int_status & SXGBE_DMA_INT_STATUS_CTXTERR) {
263 			x->tx_ctxt_desc_err++;
264 			clear_val |= SXGBE_DMA_INT_STATUS_CTXTERR;
265 		}
266 	}
267 
268 	/* clear the served bits */
269 	writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
270 
271 	return ret_val;
272 }
273 
274 static int sxgbe_rx_dma_int_status(void __iomem *ioaddr, int channel_no,
275 				   struct sxgbe_extra_stats *x)
276 {
277 	u32 int_status = readl(ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
278 	u32 clear_val = 0;
279 	u32 ret_val = 0;
280 
281 	/* RX Normal Interrupt Summary */
282 	if (likely(int_status & SXGBE_DMA_INT_STATUS_NIS)) {
283 		x->normal_irq_n++;
284 		if (int_status & SXGBE_DMA_INT_STATUS_RI) {
285 			ret_val |= handle_rx;
286 			x->rx_normal_irq_n++;
287 			clear_val |= SXGBE_DMA_INT_STATUS_RI;
288 		}
289 	} else if (unlikely(int_status & SXGBE_DMA_INT_STATUS_AIS)) {
290 		/* RX Abnormal Interrupt Summary */
291 		if (int_status & SXGBE_DMA_INT_STATUS_RBU) {
292 			ret_val |= rx_bump_tc;
293 			clear_val |= SXGBE_DMA_INT_STATUS_RBU;
294 			x->rx_underflow_irq++;
295 		}
296 
297 		if (int_status & SXGBE_DMA_INT_STATUS_RPS) {
298 			ret_val |= rx_hard_error;
299 			clear_val |= SXGBE_DMA_INT_STATUS_RPS;
300 			x->rx_process_stopped_irq++;
301 		}
302 
303 		if (int_status & SXGBE_DMA_INT_STATUS_FBE) {
304 			ret_val |= rx_hard_error;
305 			x->fatal_bus_error_irq++;
306 
307 			/* Assumption: FBE bit is the combination of
308 			 * all the bus access erros and cleared when
309 			 * the respective error bits cleared
310 			 */
311 
312 			/* check for actual cause */
313 			if (int_status & SXGBE_DMA_INT_STATUS_REB0) {
314 				x->rx_read_transfer_err++;
315 				clear_val |= SXGBE_DMA_INT_STATUS_REB0;
316 			} else {
317 				x->rx_write_transfer_err++;
318 			}
319 
320 			if (int_status & SXGBE_DMA_INT_STATUS_REB1) {
321 				x->rx_desc_access_err++;
322 				clear_val |= SXGBE_DMA_INT_STATUS_REB1;
323 			} else {
324 				x->rx_buffer_access_err++;
325 			}
326 
327 			if (int_status & SXGBE_DMA_INT_STATUS_REB2) {
328 				x->rx_data_transfer_err++;
329 				clear_val |= SXGBE_DMA_INT_STATUS_REB2;
330 			}
331 		}
332 	}
333 
334 	/* clear the served bits */
335 	writel(clear_val, ioaddr + SXGBE_DMA_CHA_STATUS_REG(channel_no));
336 
337 	return ret_val;
338 }
339 
340 /* Program the HW RX Watchdog */
341 static void sxgbe_dma_rx_watchdog(void __iomem *ioaddr, u32 riwt)
342 {
343 	u32 que_num;
344 
345 	SXGBE_FOR_EACH_QUEUE(SXGBE_RX_QUEUES, que_num) {
346 		writel(riwt,
347 		       ioaddr + SXGBE_DMA_CHA_INT_RXWATCHTMR_REG(que_num));
348 	}
349 }
350 
351 static void sxgbe_enable_tso(void __iomem *ioaddr, u8 chan_num)
352 {
353 	u32 ctrl;
354 
355 	ctrl = readl(ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
356 	ctrl |= SXGBE_DMA_CHA_TXCTL_TSE_ENABLE;
357 	writel(ctrl, ioaddr + SXGBE_DMA_CHA_TXCTL_REG(chan_num));
358 }
359 
360 static const struct sxgbe_dma_ops sxgbe_dma_ops = {
361 	.init				= sxgbe_dma_init,
362 	.cha_init			= sxgbe_dma_channel_init,
363 	.enable_dma_transmission	= sxgbe_enable_dma_transmission,
364 	.enable_dma_irq			= sxgbe_enable_dma_irq,
365 	.disable_dma_irq		= sxgbe_disable_dma_irq,
366 	.start_tx			= sxgbe_dma_start_tx,
367 	.start_tx_queue			= sxgbe_dma_start_tx_queue,
368 	.stop_tx			= sxgbe_dma_stop_tx,
369 	.stop_tx_queue			= sxgbe_dma_stop_tx_queue,
370 	.start_rx			= sxgbe_dma_start_rx,
371 	.stop_rx			= sxgbe_dma_stop_rx,
372 	.tx_dma_int_status		= sxgbe_tx_dma_int_status,
373 	.rx_dma_int_status		= sxgbe_rx_dma_int_status,
374 	.rx_watchdog			= sxgbe_dma_rx_watchdog,
375 	.enable_tso			= sxgbe_enable_tso,
376 };
377 
378 const struct sxgbe_dma_ops *sxgbe_get_dma_ops(void)
379 {
380 	return &sxgbe_dma_ops;
381 }
382