1 /* Altera TSE SGDMA and MSGDMA Linux driver
2  * Copyright (C) 2014 Altera Corporation. All rights reserved
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 
17 #include <linux/list.h>
18 #include "altera_utils.h"
19 #include "altera_tse.h"
20 #include "altera_sgdmahw.h"
21 #include "altera_sgdma.h"
22 
23 static void sgdma_descrip(struct sgdma_descrip *desc,
24 			  struct sgdma_descrip *ndesc,
25 			  dma_addr_t ndesc_phys,
26 			  dma_addr_t raddr,
27 			  dma_addr_t waddr,
28 			  u16 length,
29 			  int generate_eop,
30 			  int rfixed,
31 			  int wfixed);
32 
33 static int sgdma_async_write(struct altera_tse_private *priv,
34 			      struct sgdma_descrip *desc);
35 
36 static int sgdma_async_read(struct altera_tse_private *priv);
37 
38 static dma_addr_t
39 sgdma_txphysaddr(struct altera_tse_private *priv,
40 		 struct sgdma_descrip *desc);
41 
42 static dma_addr_t
43 sgdma_rxphysaddr(struct altera_tse_private *priv,
44 		 struct sgdma_descrip *desc);
45 
46 static int sgdma_txbusy(struct altera_tse_private *priv);
47 
48 static int sgdma_rxbusy(struct altera_tse_private *priv);
49 
50 static void
51 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer);
52 
53 static void
54 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer);
55 
56 static struct tse_buffer *
57 dequeue_tx(struct altera_tse_private *priv);
58 
59 static struct tse_buffer *
60 dequeue_rx(struct altera_tse_private *priv);
61 
62 static struct tse_buffer *
63 queue_rx_peekhead(struct altera_tse_private *priv);
64 
65 int sgdma_initialize(struct altera_tse_private *priv)
66 {
67 	priv->txctrlreg = SGDMA_CTRLREG_ILASTD;
68 
69 	priv->rxctrlreg = SGDMA_CTRLREG_IDESCRIP |
70 		      SGDMA_CTRLREG_ILASTD;
71 
72 	INIT_LIST_HEAD(&priv->txlisthd);
73 	INIT_LIST_HEAD(&priv->rxlisthd);
74 
75 	priv->rxdescphys = (dma_addr_t) 0;
76 	priv->txdescphys = (dma_addr_t) 0;
77 
78 	priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc,
79 					  priv->rxdescmem, DMA_BIDIRECTIONAL);
80 
81 	if (dma_mapping_error(priv->device, priv->rxdescphys)) {
82 		sgdma_uninitialize(priv);
83 		netdev_err(priv->dev, "error mapping rx descriptor memory\n");
84 		return -EINVAL;
85 	}
86 
87 	priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc,
88 					  priv->txdescmem, DMA_TO_DEVICE);
89 
90 	if (dma_mapping_error(priv->device, priv->txdescphys)) {
91 		sgdma_uninitialize(priv);
92 		netdev_err(priv->dev, "error mapping tx descriptor memory\n");
93 		return -EINVAL;
94 	}
95 
96 	return 0;
97 }
98 
99 void sgdma_uninitialize(struct altera_tse_private *priv)
100 {
101 	if (priv->rxdescphys)
102 		dma_unmap_single(priv->device, priv->rxdescphys,
103 				 priv->rxdescmem, DMA_BIDIRECTIONAL);
104 
105 	if (priv->txdescphys)
106 		dma_unmap_single(priv->device, priv->txdescphys,
107 				 priv->txdescmem, DMA_TO_DEVICE);
108 }
109 
110 /* This function resets the SGDMA controller and clears the
111  * descriptor memory used for transmits and receives.
112  */
113 void sgdma_reset(struct altera_tse_private *priv)
114 {
115 	u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc;
116 	u32 txdescriplen   = priv->txdescmem;
117 	u32 *prxdescripmem = (u32 *)priv->rx_dma_desc;
118 	u32 rxdescriplen   = priv->rxdescmem;
119 	struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr;
120 	struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr;
121 
122 	/* Initialize descriptor memory to 0 */
123 	memset(ptxdescripmem, 0, txdescriplen);
124 	memset(prxdescripmem, 0, rxdescriplen);
125 
126 	iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control);
127 	iowrite32(0, &ptxsgdma->control);
128 
129 	iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control);
130 	iowrite32(0, &prxsgdma->control);
131 }
132 
133 void sgdma_enable_rxirq(struct altera_tse_private *priv)
134 {
135 	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
136 	priv->rxctrlreg |= SGDMA_CTRLREG_INTEN;
137 	tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
138 }
139 
140 void sgdma_enable_txirq(struct altera_tse_private *priv)
141 {
142 	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
143 	priv->txctrlreg |= SGDMA_CTRLREG_INTEN;
144 	tse_set_bit(&csr->control, SGDMA_CTRLREG_INTEN);
145 }
146 
147 /* for SGDMA, RX interrupts remain enabled after enabling */
148 void sgdma_disable_rxirq(struct altera_tse_private *priv)
149 {
150 }
151 
152 /* for SGDMA, TX interrupts remain enabled after enabling */
153 void sgdma_disable_txirq(struct altera_tse_private *priv)
154 {
155 }
156 
157 void sgdma_clear_rxirq(struct altera_tse_private *priv)
158 {
159 	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
160 	tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
161 }
162 
163 void sgdma_clear_txirq(struct altera_tse_private *priv)
164 {
165 	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
166 	tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT);
167 }
168 
169 /* transmits buffer through SGDMA. Returns number of buffers
170  * transmitted, 0 if not possible.
171  *
172  * tx_lock is held by the caller
173  */
174 int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer)
175 {
176 	int pktstx = 0;
177 	struct sgdma_descrip *descbase =
178 		(struct sgdma_descrip *)priv->tx_dma_desc;
179 
180 	struct sgdma_descrip *cdesc = &descbase[0];
181 	struct sgdma_descrip *ndesc = &descbase[1];
182 
183 	/* wait 'til the tx sgdma is ready for the next transmit request */
184 	if (sgdma_txbusy(priv))
185 		return 0;
186 
187 	sgdma_descrip(cdesc,			/* current descriptor */
188 		      ndesc,			/* next descriptor */
189 		      sgdma_txphysaddr(priv, ndesc),
190 		      buffer->dma_addr,		/* address of packet to xmit */
191 		      0,			/* write addr 0 for tx dma */
192 		      buffer->len,		/* length of packet */
193 		      SGDMA_CONTROL_EOP,	/* Generate EOP */
194 		      0,			/* read fixed */
195 		      SGDMA_CONTROL_WR_FIXED);	/* Generate SOP */
196 
197 	pktstx = sgdma_async_write(priv, cdesc);
198 
199 	/* enqueue the request to the pending transmit queue */
200 	queue_tx(priv, buffer);
201 
202 	return 1;
203 }
204 
205 
206 /* tx_lock held to protect access to queued tx list
207  */
208 u32 sgdma_tx_completions(struct altera_tse_private *priv)
209 {
210 	u32 ready = 0;
211 	struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc;
212 
213 	if (!sgdma_txbusy(priv) &&
214 	    ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) &&
215 	    (dequeue_tx(priv))) {
216 		ready = 1;
217 	}
218 
219 	return ready;
220 }
221 
222 int sgdma_add_rx_desc(struct altera_tse_private *priv,
223 		      struct tse_buffer *rxbuffer)
224 {
225 	queue_rx(priv, rxbuffer);
226 	return sgdma_async_read(priv);
227 }
228 
229 /* status is returned on upper 16 bits,
230  * length is returned in lower 16 bits
231  */
232 u32 sgdma_rx_status(struct altera_tse_private *priv)
233 {
234 	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
235 	struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc;
236 	struct sgdma_descrip *desc = NULL;
237 	int pktsrx;
238 	unsigned int rxstatus = 0;
239 	unsigned int pktlength = 0;
240 	unsigned int pktstatus = 0;
241 	struct tse_buffer *rxbuffer = NULL;
242 
243 	dma_sync_single_for_cpu(priv->device,
244 				priv->rxdescphys,
245 				priv->rxdescmem,
246 				DMA_BIDIRECTIONAL);
247 
248 	desc = &base[0];
249 	if ((ioread32(&csr->status) & SGDMA_STSREG_EOP) ||
250 	    (desc->status & SGDMA_STATUS_EOP)) {
251 		pktlength = desc->bytes_xferred;
252 		pktstatus = desc->status & 0x3f;
253 		rxstatus = pktstatus;
254 		rxstatus = rxstatus << 16;
255 		rxstatus |= (pktlength & 0xffff);
256 
257 		desc->status = 0;
258 
259 		rxbuffer = dequeue_rx(priv);
260 		if (rxbuffer == NULL)
261 			netdev_err(priv->dev,
262 				   "sgdma rx and rx queue empty!\n");
263 
264 		/* kick the rx sgdma after reaping this descriptor */
265 		pktsrx = sgdma_async_read(priv);
266 	}
267 
268 	return rxstatus;
269 }
270 
271 
272 /* Private functions */
273 static void sgdma_descrip(struct sgdma_descrip *desc,
274 			  struct sgdma_descrip *ndesc,
275 			  dma_addr_t ndesc_phys,
276 			  dma_addr_t raddr,
277 			  dma_addr_t waddr,
278 			  u16 length,
279 			  int generate_eop,
280 			  int rfixed,
281 			  int wfixed)
282 {
283 	/* Clear the next descriptor as not owned by hardware */
284 	u32 ctrl = ndesc->control;
285 	ctrl &= ~SGDMA_CONTROL_HW_OWNED;
286 	ndesc->control = ctrl;
287 
288 	ctrl = 0;
289 	ctrl = SGDMA_CONTROL_HW_OWNED;
290 	ctrl |= generate_eop;
291 	ctrl |= rfixed;
292 	ctrl |= wfixed;
293 
294 	/* Channel is implicitly zero, initialized to 0 by default */
295 
296 	desc->raddr = raddr;
297 	desc->waddr = waddr;
298 	desc->next = lower_32_bits(ndesc_phys);
299 	desc->control = ctrl;
300 	desc->status = 0;
301 	desc->rburst = 0;
302 	desc->wburst = 0;
303 	desc->bytes = length;
304 	desc->bytes_xferred = 0;
305 }
306 
307 /* If hardware is busy, don't restart async read.
308  * if status register is 0 - meaning initial state, restart async read,
309  * probably for the first time when populating a receive buffer.
310  * If read status indicate not busy and a status, restart the async
311  * DMA read.
312  */
313 static int sgdma_async_read(struct altera_tse_private *priv)
314 {
315 	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
316 	struct sgdma_descrip *descbase =
317 		(struct sgdma_descrip *)priv->rx_dma_desc;
318 
319 	struct sgdma_descrip *cdesc = &descbase[0];
320 	struct sgdma_descrip *ndesc = &descbase[1];
321 
322 	unsigned int sts = ioread32(&csr->status);
323 	struct tse_buffer *rxbuffer = NULL;
324 
325 	if (!sgdma_rxbusy(priv)) {
326 		rxbuffer = queue_rx_peekhead(priv);
327 		if (rxbuffer == NULL)
328 			return 0;
329 
330 		sgdma_descrip(cdesc,		/* current descriptor */
331 			      ndesc,		/* next descriptor */
332 			      sgdma_rxphysaddr(priv, ndesc),
333 			      0,		/* read addr 0 for rx dma */
334 			      rxbuffer->dma_addr, /* write addr for rx dma */
335 			      0,		/* read 'til EOP */
336 			      0,		/* EOP: NA for rx dma */
337 			      0,		/* read fixed: NA for rx dma */
338 			      0);		/* SOP: NA for rx DMA */
339 
340 		/* clear control and status */
341 		iowrite32(0, &csr->control);
342 
343 		/* If status available, clear those bits */
344 		if (sts & 0xf)
345 			iowrite32(0xf, &csr->status);
346 
347 		dma_sync_single_for_device(priv->device,
348 					   priv->rxdescphys,
349 					   priv->rxdescmem,
350 					   DMA_BIDIRECTIONAL);
351 
352 		iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)),
353 			  &csr->next_descrip);
354 
355 		iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START),
356 			  &csr->control);
357 
358 		return 1;
359 	}
360 
361 	return 0;
362 }
363 
364 static int sgdma_async_write(struct altera_tse_private *priv,
365 			     struct sgdma_descrip *desc)
366 {
367 	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
368 
369 	if (sgdma_txbusy(priv))
370 		return 0;
371 
372 	/* clear control and status */
373 	iowrite32(0, &csr->control);
374 	iowrite32(0x1f, &csr->status);
375 
376 	dma_sync_single_for_device(priv->device, priv->txdescphys,
377 				   priv->txdescmem, DMA_TO_DEVICE);
378 
379 	iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)),
380 		  &csr->next_descrip);
381 
382 	iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START),
383 		  &csr->control);
384 
385 	return 1;
386 }
387 
388 static dma_addr_t
389 sgdma_txphysaddr(struct altera_tse_private *priv,
390 		 struct sgdma_descrip *desc)
391 {
392 	dma_addr_t paddr = priv->txdescmem_busaddr;
393 	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc;
394 	return (dma_addr_t)((uintptr_t)paddr + offs);
395 }
396 
397 static dma_addr_t
398 sgdma_rxphysaddr(struct altera_tse_private *priv,
399 		 struct sgdma_descrip *desc)
400 {
401 	dma_addr_t paddr = priv->rxdescmem_busaddr;
402 	uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc;
403 	return (dma_addr_t)((uintptr_t)paddr + offs);
404 }
405 
406 #define list_remove_head(list, entry, type, member)			\
407 	do {								\
408 		entry = NULL;						\
409 		if (!list_empty(list)) {				\
410 			entry = list_entry((list)->next, type, member);	\
411 			list_del_init(&entry->member);			\
412 		}							\
413 	} while (0)
414 
415 #define list_peek_head(list, entry, type, member)			\
416 	do {								\
417 		entry = NULL;						\
418 		if (!list_empty(list)) {				\
419 			entry = list_entry((list)->next, type, member);	\
420 		}							\
421 	} while (0)
422 
423 /* adds a tse_buffer to the tail of a tx buffer list.
424  * assumes the caller is managing and holding a mutual exclusion
425  * primitive to avoid simultaneous pushes/pops to the list.
426  */
427 static void
428 queue_tx(struct altera_tse_private *priv, struct tse_buffer *buffer)
429 {
430 	list_add_tail(&buffer->lh, &priv->txlisthd);
431 }
432 
433 
434 /* adds a tse_buffer to the tail of a rx buffer list
435  * assumes the caller is managing and holding a mutual exclusion
436  * primitive to avoid simultaneous pushes/pops to the list.
437  */
438 static void
439 queue_rx(struct altera_tse_private *priv, struct tse_buffer *buffer)
440 {
441 	list_add_tail(&buffer->lh, &priv->rxlisthd);
442 }
443 
444 /* dequeues a tse_buffer from the transmit buffer list, otherwise
445  * returns NULL if empty.
446  * assumes the caller is managing and holding a mutual exclusion
447  * primitive to avoid simultaneous pushes/pops to the list.
448  */
449 static struct tse_buffer *
450 dequeue_tx(struct altera_tse_private *priv)
451 {
452 	struct tse_buffer *buffer = NULL;
453 	list_remove_head(&priv->txlisthd, buffer, struct tse_buffer, lh);
454 	return buffer;
455 }
456 
457 /* dequeues a tse_buffer from the receive buffer list, otherwise
458  * returns NULL if empty
459  * assumes the caller is managing and holding a mutual exclusion
460  * primitive to avoid simultaneous pushes/pops to the list.
461  */
462 static struct tse_buffer *
463 dequeue_rx(struct altera_tse_private *priv)
464 {
465 	struct tse_buffer *buffer = NULL;
466 	list_remove_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
467 	return buffer;
468 }
469 
470 /* dequeues a tse_buffer from the receive buffer list, otherwise
471  * returns NULL if empty
472  * assumes the caller is managing and holding a mutual exclusion
473  * primitive to avoid simultaneous pushes/pops to the list while the
474  * head is being examined.
475  */
476 static struct tse_buffer *
477 queue_rx_peekhead(struct altera_tse_private *priv)
478 {
479 	struct tse_buffer *buffer = NULL;
480 	list_peek_head(&priv->rxlisthd, buffer, struct tse_buffer, lh);
481 	return buffer;
482 }
483 
484 /* check and return rx sgdma status without polling
485  */
486 static int sgdma_rxbusy(struct altera_tse_private *priv)
487 {
488 	struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr;
489 	return ioread32(&csr->status) & SGDMA_STSREG_BUSY;
490 }
491 
492 /* waits for the tx sgdma to finish it's current operation, returns 0
493  * when it transitions to nonbusy, returns 1 if the operation times out
494  */
495 static int sgdma_txbusy(struct altera_tse_private *priv)
496 {
497 	int delay = 0;
498 	struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr;
499 
500 	/* if DMA is busy, wait for current transactino to finish */
501 	while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100))
502 		udelay(1);
503 
504 	if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) {
505 		netdev_err(priv->dev, "timeout waiting for tx dma\n");
506 		return 1;
507 	}
508 	return 0;
509 }
510