xref: /openbmc/linux/drivers/usb/renesas_usbhs/fifo.c (revision b6bec26c)
1 /*
2  * Renesas USB driver
3  *
4  * Copyright (C) 2011 Renesas Solutions Corp.
5  * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
15  *
16  */
17 #include <linux/delay.h>
18 #include <linux/io.h>
19 #include <linux/scatterlist.h>
20 #include "common.h"
21 #include "pipe.h"
22 
23 #define usbhsf_get_cfifo(p)	(&((p)->fifo_info.cfifo))
24 #define usbhsf_get_d0fifo(p)	(&((p)->fifo_info.d0fifo))
25 #define usbhsf_get_d1fifo(p)	(&((p)->fifo_info.d1fifo))
26 #define usbhsf_is_cfifo(p, f)	(usbhsf_get_cfifo(p) == f)
27 
28 #define usbhsf_fifo_is_busy(f)	((f)->pipe) /* see usbhs_pipe_select_fifo */
29 
30 /*
31  *		packet initialize
32  */
33 void usbhs_pkt_init(struct usbhs_pkt *pkt)
34 {
35 	pkt->dma = DMA_ADDR_INVALID;
36 	INIT_LIST_HEAD(&pkt->node);
37 }
38 
39 /*
40  *		packet control function
41  */
42 static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
43 {
44 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
45 	struct device *dev = usbhs_priv_to_dev(priv);
46 
47 	dev_err(dev, "null handler\n");
48 
49 	return -EINVAL;
50 }
51 
52 static struct usbhs_pkt_handle usbhsf_null_handler = {
53 	.prepare = usbhsf_null_handle,
54 	.try_run = usbhsf_null_handle,
55 };
56 
57 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
58 		    void (*done)(struct usbhs_priv *priv,
59 				 struct usbhs_pkt *pkt),
60 		    void *buf, int len, int zero, int sequence)
61 {
62 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
63 	struct device *dev = usbhs_priv_to_dev(priv);
64 	unsigned long flags;
65 
66 	if (!done) {
67 		dev_err(dev, "no done function\n");
68 		return;
69 	}
70 
71 	/********************  spin lock ********************/
72 	usbhs_lock(priv, flags);
73 
74 	if (!pipe->handler) {
75 		dev_err(dev, "no handler function\n");
76 		pipe->handler = &usbhsf_null_handler;
77 	}
78 
79 	list_move_tail(&pkt->node, &pipe->list);
80 
81 	/*
82 	 * each pkt must hold own handler.
83 	 * because handler might be changed by its situation.
84 	 * dma handler -> pio handler.
85 	 */
86 	pkt->pipe	= pipe;
87 	pkt->buf	= buf;
88 	pkt->handler	= pipe->handler;
89 	pkt->length	= len;
90 	pkt->zero	= zero;
91 	pkt->actual	= 0;
92 	pkt->done	= done;
93 	pkt->sequence	= sequence;
94 
95 	usbhs_unlock(priv, flags);
96 	/********************  spin unlock ******************/
97 }
98 
99 static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
100 {
101 	list_del_init(&pkt->node);
102 }
103 
104 static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
105 {
106 	if (list_empty(&pipe->list))
107 		return NULL;
108 
109 	return list_first_entry(&pipe->list, struct usbhs_pkt, node);
110 }
111 
112 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
113 {
114 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
115 	unsigned long flags;
116 
117 	/********************  spin lock ********************/
118 	usbhs_lock(priv, flags);
119 
120 	if (!pkt)
121 		pkt = __usbhsf_pkt_get(pipe);
122 
123 	if (pkt)
124 		__usbhsf_pkt_del(pkt);
125 
126 	usbhs_unlock(priv, flags);
127 	/********************  spin unlock ******************/
128 
129 	return pkt;
130 }
131 
132 enum {
133 	USBHSF_PKT_PREPARE,
134 	USBHSF_PKT_TRY_RUN,
135 	USBHSF_PKT_DMA_DONE,
136 };
137 
138 static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
139 {
140 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
141 	struct usbhs_pkt *pkt;
142 	struct device *dev = usbhs_priv_to_dev(priv);
143 	int (*func)(struct usbhs_pkt *pkt, int *is_done);
144 	unsigned long flags;
145 	int ret = 0;
146 	int is_done = 0;
147 
148 	/********************  spin lock ********************/
149 	usbhs_lock(priv, flags);
150 
151 	pkt = __usbhsf_pkt_get(pipe);
152 	if (!pkt)
153 		goto __usbhs_pkt_handler_end;
154 
155 	switch (type) {
156 	case USBHSF_PKT_PREPARE:
157 		func = pkt->handler->prepare;
158 		break;
159 	case USBHSF_PKT_TRY_RUN:
160 		func = pkt->handler->try_run;
161 		break;
162 	case USBHSF_PKT_DMA_DONE:
163 		func = pkt->handler->dma_done;
164 		break;
165 	default:
166 		dev_err(dev, "unknown pkt handler\n");
167 		goto __usbhs_pkt_handler_end;
168 	}
169 
170 	ret = func(pkt, &is_done);
171 
172 	if (is_done)
173 		__usbhsf_pkt_del(pkt);
174 
175 __usbhs_pkt_handler_end:
176 	usbhs_unlock(priv, flags);
177 	/********************  spin unlock ******************/
178 
179 	if (is_done) {
180 		pkt->done(priv, pkt);
181 		usbhs_pkt_start(pipe);
182 	}
183 
184 	return ret;
185 }
186 
187 void usbhs_pkt_start(struct usbhs_pipe *pipe)
188 {
189 	usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
190 }
191 
192 /*
193  *		irq enable/disable function
194  */
195 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
196 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
197 #define usbhsf_irq_callback_ctrl(pipe, status, enable)			\
198 	({								\
199 		struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);	\
200 		struct usbhs_mod *mod = usbhs_mod_get_current(priv);	\
201 		u16 status = (1 << usbhs_pipe_number(pipe));		\
202 		if (!mod)						\
203 			return;						\
204 		if (enable)						\
205 			mod->status |= status;				\
206 		else							\
207 			mod->status &= ~status;				\
208 		usbhs_irq_callback_update(priv, mod);			\
209 	})
210 
211 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
212 {
213 	/*
214 	 * And DCP pipe can NOT use "ready interrupt" for "send"
215 	 * it should use "empty" interrupt.
216 	 * see
217 	 *   "Operation" - "Interrupt Function" - "BRDY Interrupt"
218 	 *
219 	 * on the other hand, normal pipe can use "ready interrupt" for "send"
220 	 * even though it is single/double buffer
221 	 */
222 	if (usbhs_pipe_is_dcp(pipe))
223 		usbhsf_irq_empty_ctrl(pipe, enable);
224 	else
225 		usbhsf_irq_ready_ctrl(pipe, enable);
226 }
227 
228 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
229 {
230 	usbhsf_irq_ready_ctrl(pipe, enable);
231 }
232 
233 /*
234  *		FIFO ctrl
235  */
236 static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
237 				   struct usbhs_fifo *fifo)
238 {
239 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
240 
241 	usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
242 }
243 
244 static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
245 			       struct usbhs_fifo *fifo)
246 {
247 	int timeout = 1024;
248 
249 	do {
250 		/* The FIFO port is accessible */
251 		if (usbhs_read(priv, fifo->ctr) & FRDY)
252 			return 0;
253 
254 		udelay(10);
255 	} while (timeout--);
256 
257 	return -EBUSY;
258 }
259 
260 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
261 			      struct usbhs_fifo *fifo)
262 {
263 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
264 
265 	if (!usbhs_pipe_is_dcp(pipe))
266 		usbhsf_fifo_barrier(priv, fifo);
267 
268 	usbhs_write(priv, fifo->ctr, BCLR);
269 }
270 
271 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
272 			       struct usbhs_fifo *fifo)
273 {
274 	return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
275 }
276 
277 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
278 				 struct usbhs_fifo *fifo)
279 {
280 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
281 
282 	usbhs_pipe_select_fifo(pipe, NULL);
283 	usbhs_write(priv, fifo->sel, 0);
284 }
285 
286 static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
287 			      struct usbhs_fifo *fifo,
288 			      int write)
289 {
290 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
291 	struct device *dev = usbhs_priv_to_dev(priv);
292 	int timeout = 1024;
293 	u16 mask = ((1 << 5) | 0xF);		/* mask of ISEL | CURPIPE */
294 	u16 base = usbhs_pipe_number(pipe);	/* CURPIPE */
295 
296 	if (usbhs_pipe_is_busy(pipe) ||
297 	    usbhsf_fifo_is_busy(fifo))
298 		return -EBUSY;
299 
300 	if (usbhs_pipe_is_dcp(pipe)) {
301 		base |= (1 == write) << 5;	/* ISEL */
302 
303 		if (usbhs_mod_is_host(priv))
304 			usbhs_dcp_dir_for_host(pipe, write);
305 	}
306 
307 	/* "base" will be used below  */
308 	if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo))
309 		usbhs_write(priv, fifo->sel, base);
310 	else
311 		usbhs_write(priv, fifo->sel, base | MBW_32);
312 
313 	/* check ISEL and CURPIPE value */
314 	while (timeout--) {
315 		if (base == (mask & usbhs_read(priv, fifo->sel))) {
316 			usbhs_pipe_select_fifo(pipe, fifo);
317 			return 0;
318 		}
319 		udelay(10);
320 	}
321 
322 	dev_err(dev, "fifo select error\n");
323 
324 	return -EIO;
325 }
326 
327 /*
328  *		DCP status stage
329  */
330 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
331 {
332 	struct usbhs_pipe *pipe = pkt->pipe;
333 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
334 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
335 	struct device *dev = usbhs_priv_to_dev(priv);
336 	int ret;
337 
338 	usbhs_pipe_disable(pipe);
339 
340 	ret = usbhsf_fifo_select(pipe, fifo, 1);
341 	if (ret < 0) {
342 		dev_err(dev, "%s() faile\n", __func__);
343 		return ret;
344 	}
345 
346 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
347 
348 	usbhsf_fifo_clear(pipe, fifo);
349 	usbhsf_send_terminator(pipe, fifo);
350 
351 	usbhsf_fifo_unselect(pipe, fifo);
352 
353 	usbhsf_tx_irq_ctrl(pipe, 1);
354 	usbhs_pipe_enable(pipe);
355 
356 	return ret;
357 }
358 
359 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
360 {
361 	struct usbhs_pipe *pipe = pkt->pipe;
362 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
363 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
364 	struct device *dev = usbhs_priv_to_dev(priv);
365 	int ret;
366 
367 	usbhs_pipe_disable(pipe);
368 
369 	ret = usbhsf_fifo_select(pipe, fifo, 0);
370 	if (ret < 0) {
371 		dev_err(dev, "%s() fail\n", __func__);
372 		return ret;
373 	}
374 
375 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
376 	usbhsf_fifo_clear(pipe, fifo);
377 
378 	usbhsf_fifo_unselect(pipe, fifo);
379 
380 	usbhsf_rx_irq_ctrl(pipe, 1);
381 	usbhs_pipe_enable(pipe);
382 
383 	return ret;
384 
385 }
386 
387 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
388 {
389 	struct usbhs_pipe *pipe = pkt->pipe;
390 
391 	if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
392 		usbhsf_tx_irq_ctrl(pipe, 0);
393 	else
394 		usbhsf_rx_irq_ctrl(pipe, 0);
395 
396 	pkt->actual = pkt->length;
397 	*is_done = 1;
398 
399 	return 0;
400 }
401 
402 struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
403 	.prepare = usbhs_dcp_dir_switch_to_write,
404 	.try_run = usbhs_dcp_dir_switch_done,
405 };
406 
407 struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
408 	.prepare = usbhs_dcp_dir_switch_to_read,
409 	.try_run = usbhs_dcp_dir_switch_done,
410 };
411 
412 /*
413  *		DCP data stage (push)
414  */
415 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
416 {
417 	struct usbhs_pipe *pipe = pkt->pipe;
418 
419 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
420 
421 	/*
422 	 * change handler to PIO push
423 	 */
424 	pkt->handler = &usbhs_fifo_pio_push_handler;
425 
426 	return pkt->handler->prepare(pkt, is_done);
427 }
428 
429 struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
430 	.prepare = usbhsf_dcp_data_stage_try_push,
431 };
432 
433 /*
434  *		DCP data stage (pop)
435  */
436 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
437 					     int *is_done)
438 {
439 	struct usbhs_pipe *pipe = pkt->pipe;
440 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
441 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
442 
443 	if (usbhs_pipe_is_busy(pipe))
444 		return 0;
445 
446 	/*
447 	 * prepare pop for DCP should
448 	 *  - change DCP direction,
449 	 *  - clear fifo
450 	 *  - DATA1
451 	 */
452 	usbhs_pipe_disable(pipe);
453 
454 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
455 
456 	usbhsf_fifo_select(pipe, fifo, 0);
457 	usbhsf_fifo_clear(pipe, fifo);
458 	usbhsf_fifo_unselect(pipe, fifo);
459 
460 	/*
461 	 * change handler to PIO pop
462 	 */
463 	pkt->handler = &usbhs_fifo_pio_pop_handler;
464 
465 	return pkt->handler->prepare(pkt, is_done);
466 }
467 
468 struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
469 	.prepare = usbhsf_dcp_data_stage_prepare_pop,
470 };
471 
472 /*
473  *		PIO push handler
474  */
475 static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
476 {
477 	struct usbhs_pipe *pipe = pkt->pipe;
478 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
479 	struct device *dev = usbhs_priv_to_dev(priv);
480 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
481 	void __iomem *addr = priv->base + fifo->port;
482 	u8 *buf;
483 	int maxp = usbhs_pipe_get_maxpacket(pipe);
484 	int total_len;
485 	int i, ret, len;
486 	int is_short;
487 
488 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
489 	pkt->sequence = -1; /* -1 sequence will be ignored */
490 
491 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
492 
493 	ret = usbhsf_fifo_select(pipe, fifo, 1);
494 	if (ret < 0)
495 		return 0;
496 
497 	ret = usbhs_pipe_is_accessible(pipe);
498 	if (ret < 0) {
499 		/* inaccessible pipe is not an error */
500 		ret = 0;
501 		goto usbhs_fifo_write_busy;
502 	}
503 
504 	ret = usbhsf_fifo_barrier(priv, fifo);
505 	if (ret < 0)
506 		goto usbhs_fifo_write_busy;
507 
508 	buf		= pkt->buf    + pkt->actual;
509 	len		= pkt->length - pkt->actual;
510 	len		= min(len, maxp);
511 	total_len	= len;
512 	is_short	= total_len < maxp;
513 
514 	/*
515 	 * FIXME
516 	 *
517 	 * 32-bit access only
518 	 */
519 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
520 		iowrite32_rep(addr, buf, len / 4);
521 		len %= 4;
522 		buf += total_len - len;
523 	}
524 
525 	/* the rest operation */
526 	for (i = 0; i < len; i++)
527 		iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
528 
529 	/*
530 	 * variable update
531 	 */
532 	pkt->actual += total_len;
533 
534 	if (pkt->actual < pkt->length)
535 		*is_done = 0;		/* there are remainder data */
536 	else if (is_short)
537 		*is_done = 1;		/* short packet */
538 	else
539 		*is_done = !pkt->zero;	/* send zero packet ? */
540 
541 	/*
542 	 * pipe/irq handling
543 	 */
544 	if (is_short)
545 		usbhsf_send_terminator(pipe, fifo);
546 
547 	usbhsf_tx_irq_ctrl(pipe, !*is_done);
548 	usbhs_pipe_enable(pipe);
549 
550 	dev_dbg(dev, "  send %d (%d/ %d/ %d/ %d)\n",
551 		usbhs_pipe_number(pipe),
552 		pkt->length, pkt->actual, *is_done, pkt->zero);
553 
554 	/*
555 	 * Transmission end
556 	 */
557 	if (*is_done) {
558 		if (usbhs_pipe_is_dcp(pipe))
559 			usbhs_dcp_control_transfer_done(pipe);
560 	}
561 
562 	usbhsf_fifo_unselect(pipe, fifo);
563 
564 	return 0;
565 
566 usbhs_fifo_write_busy:
567 	usbhsf_fifo_unselect(pipe, fifo);
568 
569 	/*
570 	 * pipe is busy.
571 	 * retry in interrupt
572 	 */
573 	usbhsf_tx_irq_ctrl(pipe, 1);
574 
575 	return ret;
576 }
577 
578 struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
579 	.prepare = usbhsf_pio_try_push,
580 	.try_run = usbhsf_pio_try_push,
581 };
582 
583 /*
584  *		PIO pop handler
585  */
586 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
587 {
588 	struct usbhs_pipe *pipe = pkt->pipe;
589 
590 	if (usbhs_pipe_is_busy(pipe))
591 		return 0;
592 
593 	/*
594 	 * pipe enable to prepare packet receive
595 	 */
596 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
597 	pkt->sequence = -1; /* -1 sequence will be ignored */
598 
599 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
600 	usbhs_pipe_enable(pipe);
601 	usbhsf_rx_irq_ctrl(pipe, 1);
602 
603 	return 0;
604 }
605 
606 static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
607 {
608 	struct usbhs_pipe *pipe = pkt->pipe;
609 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
610 	struct device *dev = usbhs_priv_to_dev(priv);
611 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
612 	void __iomem *addr = priv->base + fifo->port;
613 	u8 *buf;
614 	u32 data = 0;
615 	int maxp = usbhs_pipe_get_maxpacket(pipe);
616 	int rcv_len, len;
617 	int i, ret;
618 	int total_len = 0;
619 
620 	ret = usbhsf_fifo_select(pipe, fifo, 0);
621 	if (ret < 0)
622 		return 0;
623 
624 	ret = usbhsf_fifo_barrier(priv, fifo);
625 	if (ret < 0)
626 		goto usbhs_fifo_read_busy;
627 
628 	rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
629 
630 	buf		= pkt->buf    + pkt->actual;
631 	len		= pkt->length - pkt->actual;
632 	len		= min(len, rcv_len);
633 	total_len	= len;
634 
635 	/*
636 	 * update actual length first here to decide disable pipe.
637 	 * if this pipe keeps BUF status and all data were popped,
638 	 * then, next interrupt/token will be issued again
639 	 */
640 	pkt->actual += total_len;
641 
642 	if ((pkt->actual == pkt->length) ||	/* receive all data */
643 	    (total_len < maxp)) {		/* short packet */
644 		*is_done = 1;
645 		usbhsf_rx_irq_ctrl(pipe, 0);
646 		usbhs_pipe_disable(pipe);	/* disable pipe first */
647 	}
648 
649 	/*
650 	 * Buffer clear if Zero-Length packet
651 	 *
652 	 * see
653 	 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
654 	 */
655 	if (0 == rcv_len) {
656 		pkt->zero = 1;
657 		usbhsf_fifo_clear(pipe, fifo);
658 		goto usbhs_fifo_read_end;
659 	}
660 
661 	/*
662 	 * FIXME
663 	 *
664 	 * 32-bit access only
665 	 */
666 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
667 		ioread32_rep(addr, buf, len / 4);
668 		len %= 4;
669 		buf += total_len - len;
670 	}
671 
672 	/* the rest operation */
673 	for (i = 0; i < len; i++) {
674 		if (!(i & 0x03))
675 			data = ioread32(addr);
676 
677 		buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
678 	}
679 
680 usbhs_fifo_read_end:
681 	dev_dbg(dev, "  recv %d (%d/ %d/ %d/ %d)\n",
682 		usbhs_pipe_number(pipe),
683 		pkt->length, pkt->actual, *is_done, pkt->zero);
684 
685 usbhs_fifo_read_busy:
686 	usbhsf_fifo_unselect(pipe, fifo);
687 
688 	return ret;
689 }
690 
691 struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
692 	.prepare = usbhsf_prepare_pop,
693 	.try_run = usbhsf_pio_try_pop,
694 };
695 
696 /*
697  *		DCP ctrol statge handler
698  */
699 static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
700 {
701 	usbhs_dcp_control_transfer_done(pkt->pipe);
702 
703 	*is_done = 1;
704 
705 	return 0;
706 }
707 
708 struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
709 	.prepare = usbhsf_ctrl_stage_end,
710 	.try_run = usbhsf_ctrl_stage_end,
711 };
712 
713 /*
714  *		DMA fifo functions
715  */
716 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
717 					    struct usbhs_pkt *pkt)
718 {
719 	if (&usbhs_fifo_dma_push_handler == pkt->handler)
720 		return fifo->tx_chan;
721 
722 	if (&usbhs_fifo_dma_pop_handler == pkt->handler)
723 		return fifo->rx_chan;
724 
725 	return NULL;
726 }
727 
728 static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
729 					      struct usbhs_pkt *pkt)
730 {
731 	struct usbhs_fifo *fifo;
732 
733 	/* DMA :: D0FIFO */
734 	fifo = usbhsf_get_d0fifo(priv);
735 	if (usbhsf_dma_chan_get(fifo, pkt) &&
736 	    !usbhsf_fifo_is_busy(fifo))
737 		return fifo;
738 
739 	/* DMA :: D1FIFO */
740 	fifo = usbhsf_get_d1fifo(priv);
741 	if (usbhsf_dma_chan_get(fifo, pkt) &&
742 	    !usbhsf_fifo_is_busy(fifo))
743 		return fifo;
744 
745 	return NULL;
746 }
747 
748 #define usbhsf_dma_start(p, f)	__usbhsf_dma_ctrl(p, f, DREQE)
749 #define usbhsf_dma_stop(p, f)	__usbhsf_dma_ctrl(p, f, 0)
750 static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
751 			      struct usbhs_fifo *fifo,
752 			      u16 dreqe)
753 {
754 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
755 
756 	usbhs_bset(priv, fifo->sel, DREQE, dreqe);
757 }
758 
759 #define usbhsf_dma_map(p)	__usbhsf_dma_map_ctrl(p, 1)
760 #define usbhsf_dma_unmap(p)	__usbhsf_dma_map_ctrl(p, 0)
761 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
762 {
763 	struct usbhs_pipe *pipe = pkt->pipe;
764 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
765 	struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
766 
767 	return info->dma_map_ctrl(pkt, map);
768 }
769 
770 static void usbhsf_dma_complete(void *arg);
771 static void xfer_work(struct work_struct *work)
772 {
773 	struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
774 	struct usbhs_pipe *pipe = pkt->pipe;
775 	struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
776 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
777 	struct dma_async_tx_descriptor *desc;
778 	struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
779 	struct device *dev = usbhs_priv_to_dev(priv);
780 	enum dma_transfer_direction dir;
781 
782 	dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
783 
784 	desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
785 					pkt->trans, dir,
786 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
787 	if (!desc)
788 		return;
789 
790 	desc->callback		= usbhsf_dma_complete;
791 	desc->callback_param	= pipe;
792 
793 	if (dmaengine_submit(desc) < 0) {
794 		dev_err(dev, "Failed to submit dma descriptor\n");
795 		return;
796 	}
797 
798 	dev_dbg(dev, "  %s %d (%d/ %d)\n",
799 		fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
800 
801 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
802 	usbhs_pipe_enable(pipe);
803 	usbhsf_dma_start(pipe, fifo);
804 	dma_async_issue_pending(chan);
805 }
806 
807 /*
808  *		DMA push handler
809  */
810 static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
811 {
812 	struct usbhs_pipe *pipe = pkt->pipe;
813 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
814 	struct usbhs_fifo *fifo;
815 	int len = pkt->length - pkt->actual;
816 	int ret;
817 
818 	if (usbhs_pipe_is_busy(pipe))
819 		return 0;
820 
821 	/* use PIO if packet is less than pio_dma_border or pipe is DCP */
822 	if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
823 	    usbhs_pipe_is_dcp(pipe))
824 		goto usbhsf_pio_prepare_push;
825 
826 	if (len & 0x7) /* 8byte alignment */
827 		goto usbhsf_pio_prepare_push;
828 
829 	if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
830 		goto usbhsf_pio_prepare_push;
831 
832 	/* get enable DMA fifo */
833 	fifo = usbhsf_get_dma_fifo(priv, pkt);
834 	if (!fifo)
835 		goto usbhsf_pio_prepare_push;
836 
837 	if (usbhsf_dma_map(pkt) < 0)
838 		goto usbhsf_pio_prepare_push;
839 
840 	ret = usbhsf_fifo_select(pipe, fifo, 0);
841 	if (ret < 0)
842 		goto usbhsf_pio_prepare_push_unmap;
843 
844 	pkt->trans = len;
845 
846 	INIT_WORK(&pkt->work, xfer_work);
847 	schedule_work(&pkt->work);
848 
849 	return 0;
850 
851 usbhsf_pio_prepare_push_unmap:
852 	usbhsf_dma_unmap(pkt);
853 usbhsf_pio_prepare_push:
854 	/*
855 	 * change handler to PIO
856 	 */
857 	pkt->handler = &usbhs_fifo_pio_push_handler;
858 
859 	return pkt->handler->prepare(pkt, is_done);
860 }
861 
862 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
863 {
864 	struct usbhs_pipe *pipe = pkt->pipe;
865 
866 	pkt->actual = pkt->trans;
867 
868 	*is_done = !pkt->zero;	/* send zero packet ? */
869 
870 	usbhsf_dma_stop(pipe, pipe->fifo);
871 	usbhsf_dma_unmap(pkt);
872 	usbhsf_fifo_unselect(pipe, pipe->fifo);
873 
874 	return 0;
875 }
876 
877 struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
878 	.prepare	= usbhsf_dma_prepare_push,
879 	.dma_done	= usbhsf_dma_push_done,
880 };
881 
882 /*
883  *		DMA pop handler
884  */
885 static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
886 {
887 	struct usbhs_pipe *pipe = pkt->pipe;
888 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
889 	struct usbhs_fifo *fifo;
890 	int len, ret;
891 
892 	if (usbhs_pipe_is_busy(pipe))
893 		return 0;
894 
895 	if (usbhs_pipe_is_dcp(pipe))
896 		goto usbhsf_pio_prepare_pop;
897 
898 	/* get enable DMA fifo */
899 	fifo = usbhsf_get_dma_fifo(priv, pkt);
900 	if (!fifo)
901 		goto usbhsf_pio_prepare_pop;
902 
903 	if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
904 		goto usbhsf_pio_prepare_pop;
905 
906 	ret = usbhsf_fifo_select(pipe, fifo, 0);
907 	if (ret < 0)
908 		goto usbhsf_pio_prepare_pop;
909 
910 	/* use PIO if packet is less than pio_dma_border */
911 	len = usbhsf_fifo_rcv_len(priv, fifo);
912 	len = min(pkt->length - pkt->actual, len);
913 	if (len & 0x7) /* 8byte alignment */
914 		goto usbhsf_pio_prepare_pop_unselect;
915 
916 	if (len < usbhs_get_dparam(priv, pio_dma_border))
917 		goto usbhsf_pio_prepare_pop_unselect;
918 
919 	ret = usbhsf_fifo_barrier(priv, fifo);
920 	if (ret < 0)
921 		goto usbhsf_pio_prepare_pop_unselect;
922 
923 	if (usbhsf_dma_map(pkt) < 0)
924 		goto usbhsf_pio_prepare_pop_unselect;
925 
926 	/* DMA */
927 
928 	/*
929 	 * usbhs_fifo_dma_pop_handler :: prepare
930 	 * enabled irq to come here.
931 	 * but it is no longer needed for DMA. disable it.
932 	 */
933 	usbhsf_rx_irq_ctrl(pipe, 0);
934 
935 	pkt->trans = len;
936 
937 	INIT_WORK(&pkt->work, xfer_work);
938 	schedule_work(&pkt->work);
939 
940 	return 0;
941 
942 usbhsf_pio_prepare_pop_unselect:
943 	usbhsf_fifo_unselect(pipe, fifo);
944 usbhsf_pio_prepare_pop:
945 
946 	/*
947 	 * change handler to PIO
948 	 */
949 	pkt->handler = &usbhs_fifo_pio_pop_handler;
950 
951 	return pkt->handler->try_run(pkt, is_done);
952 }
953 
954 static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
955 {
956 	struct usbhs_pipe *pipe = pkt->pipe;
957 	int maxp = usbhs_pipe_get_maxpacket(pipe);
958 
959 	usbhsf_dma_stop(pipe, pipe->fifo);
960 	usbhsf_dma_unmap(pkt);
961 	usbhsf_fifo_unselect(pipe, pipe->fifo);
962 
963 	pkt->actual += pkt->trans;
964 
965 	if ((pkt->actual == pkt->length) ||	/* receive all data */
966 	    (pkt->trans < maxp)) {		/* short packet */
967 		*is_done = 1;
968 	} else {
969 		/* re-enable */
970 		usbhsf_prepare_pop(pkt, is_done);
971 	}
972 
973 	return 0;
974 }
975 
976 struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
977 	.prepare	= usbhsf_prepare_pop,
978 	.try_run	= usbhsf_dma_try_pop,
979 	.dma_done	= usbhsf_dma_pop_done
980 };
981 
982 /*
983  *		DMA setting
984  */
985 static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
986 {
987 	struct sh_dmae_slave *slave = param;
988 
989 	/*
990 	 * FIXME
991 	 *
992 	 * usbhs doesn't recognize id = 0 as valid DMA
993 	 */
994 	if (0 == slave->shdma_slave.slave_id)
995 		return false;
996 
997 	chan->private = slave;
998 
999 	return true;
1000 }
1001 
1002 static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
1003 {
1004 	if (fifo->tx_chan)
1005 		dma_release_channel(fifo->tx_chan);
1006 	if (fifo->rx_chan)
1007 		dma_release_channel(fifo->rx_chan);
1008 
1009 	fifo->tx_chan = NULL;
1010 	fifo->rx_chan = NULL;
1011 }
1012 
1013 static void usbhsf_dma_init(struct usbhs_priv *priv,
1014 			    struct usbhs_fifo *fifo)
1015 {
1016 	struct device *dev = usbhs_priv_to_dev(priv);
1017 	dma_cap_mask_t mask;
1018 
1019 	dma_cap_zero(mask);
1020 	dma_cap_set(DMA_SLAVE, mask);
1021 	fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1022 					    &fifo->tx_slave);
1023 
1024 	dma_cap_zero(mask);
1025 	dma_cap_set(DMA_SLAVE, mask);
1026 	fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1027 					    &fifo->rx_slave);
1028 
1029 	if (fifo->tx_chan || fifo->rx_chan)
1030 		dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
1031 			 fifo->name,
1032 			 fifo->tx_chan ? "[TX]" : "    ",
1033 			 fifo->rx_chan ? "[RX]" : "    ");
1034 }
1035 
1036 /*
1037  *		irq functions
1038  */
1039 static int usbhsf_irq_empty(struct usbhs_priv *priv,
1040 			    struct usbhs_irq_state *irq_state)
1041 {
1042 	struct usbhs_pipe *pipe;
1043 	struct device *dev = usbhs_priv_to_dev(priv);
1044 	int i, ret;
1045 
1046 	if (!irq_state->bempsts) {
1047 		dev_err(dev, "debug %s !!\n", __func__);
1048 		return -EIO;
1049 	}
1050 
1051 	dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
1052 
1053 	/*
1054 	 * search interrupted "pipe"
1055 	 * not "uep".
1056 	 */
1057 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1058 		if (!(irq_state->bempsts & (1 << i)))
1059 			continue;
1060 
1061 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1062 		if (ret < 0)
1063 			dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
1064 	}
1065 
1066 	return 0;
1067 }
1068 
1069 static int usbhsf_irq_ready(struct usbhs_priv *priv,
1070 			    struct usbhs_irq_state *irq_state)
1071 {
1072 	struct usbhs_pipe *pipe;
1073 	struct device *dev = usbhs_priv_to_dev(priv);
1074 	int i, ret;
1075 
1076 	if (!irq_state->brdysts) {
1077 		dev_err(dev, "debug %s !!\n", __func__);
1078 		return -EIO;
1079 	}
1080 
1081 	dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
1082 
1083 	/*
1084 	 * search interrupted "pipe"
1085 	 * not "uep".
1086 	 */
1087 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1088 		if (!(irq_state->brdysts & (1 << i)))
1089 			continue;
1090 
1091 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1092 		if (ret < 0)
1093 			dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
1094 	}
1095 
1096 	return 0;
1097 }
1098 
1099 static void usbhsf_dma_complete(void *arg)
1100 {
1101 	struct usbhs_pipe *pipe = arg;
1102 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1103 	struct device *dev = usbhs_priv_to_dev(priv);
1104 	int ret;
1105 
1106 	ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
1107 	if (ret < 0)
1108 		dev_err(dev, "dma_complete run_error %d : %d\n",
1109 			usbhs_pipe_number(pipe), ret);
1110 }
1111 
1112 /*
1113  *		fifo init
1114  */
1115 void usbhs_fifo_init(struct usbhs_priv *priv)
1116 {
1117 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1118 	struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
1119 	struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv);
1120 	struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv);
1121 
1122 	mod->irq_empty		= usbhsf_irq_empty;
1123 	mod->irq_ready		= usbhsf_irq_ready;
1124 	mod->irq_bempsts	= 0;
1125 	mod->irq_brdysts	= 0;
1126 
1127 	cfifo->pipe	= NULL;
1128 	cfifo->tx_chan	= NULL;
1129 	cfifo->rx_chan	= NULL;
1130 
1131 	d0fifo->pipe	= NULL;
1132 	d0fifo->tx_chan	= NULL;
1133 	d0fifo->rx_chan	= NULL;
1134 
1135 	d1fifo->pipe	= NULL;
1136 	d1fifo->tx_chan	= NULL;
1137 	d1fifo->rx_chan	= NULL;
1138 
1139 	usbhsf_dma_init(priv, usbhsf_get_d0fifo(priv));
1140 	usbhsf_dma_init(priv, usbhsf_get_d1fifo(priv));
1141 }
1142 
1143 void usbhs_fifo_quit(struct usbhs_priv *priv)
1144 {
1145 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1146 
1147 	mod->irq_empty		= NULL;
1148 	mod->irq_ready		= NULL;
1149 	mod->irq_bempsts	= 0;
1150 	mod->irq_brdysts	= 0;
1151 
1152 	usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
1153 	usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));
1154 }
1155 
1156 int usbhs_fifo_probe(struct usbhs_priv *priv)
1157 {
1158 	struct usbhs_fifo *fifo;
1159 
1160 	/* CFIFO */
1161 	fifo = usbhsf_get_cfifo(priv);
1162 	fifo->name	= "CFIFO";
1163 	fifo->port	= CFIFO;
1164 	fifo->sel	= CFIFOSEL;
1165 	fifo->ctr	= CFIFOCTR;
1166 
1167 	/* D0FIFO */
1168 	fifo = usbhsf_get_d0fifo(priv);
1169 	fifo->name	= "D0FIFO";
1170 	fifo->port	= D0FIFO;
1171 	fifo->sel	= D0FIFOSEL;
1172 	fifo->ctr	= D0FIFOCTR;
1173 	fifo->tx_slave.shdma_slave.slave_id	= usbhs_get_dparam(priv, d0_tx_id);
1174 	fifo->rx_slave.shdma_slave.slave_id	= usbhs_get_dparam(priv, d0_rx_id);
1175 
1176 	/* D1FIFO */
1177 	fifo = usbhsf_get_d1fifo(priv);
1178 	fifo->name	= "D1FIFO";
1179 	fifo->port	= D1FIFO;
1180 	fifo->sel	= D1FIFOSEL;
1181 	fifo->ctr	= D1FIFOCTR;
1182 	fifo->tx_slave.shdma_slave.slave_id	= usbhs_get_dparam(priv, d1_tx_id);
1183 	fifo->rx_slave.shdma_slave.slave_id	= usbhs_get_dparam(priv, d1_rx_id);
1184 
1185 	return 0;
1186 }
1187 
1188 void usbhs_fifo_remove(struct usbhs_priv *priv)
1189 {
1190 }
1191