xref: /openbmc/linux/drivers/usb/renesas_usbhs/fifo.c (revision 9cdb81c7)
1 /*
2  * Renesas USB driver
3  *
4  * Copyright (C) 2011 Renesas Solutions Corp.
5  * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
15  *
16  */
17 #include <linux/delay.h>
18 #include <linux/io.h>
19 #include <linux/scatterlist.h>
20 #include "./common.h"
21 #include "./pipe.h"
22 
23 #define usbhsf_get_cfifo(p)	(&((p)->fifo_info.cfifo))
24 #define usbhsf_get_d0fifo(p)	(&((p)->fifo_info.d0fifo))
25 #define usbhsf_get_d1fifo(p)	(&((p)->fifo_info.d1fifo))
26 #define usbhsf_is_cfifo(p, f)	(usbhsf_get_cfifo(p) == f)
27 
28 #define usbhsf_fifo_is_busy(f)	((f)->pipe) /* see usbhs_pipe_select_fifo */
29 
30 /*
31  *		packet initialize
32  */
33 void usbhs_pkt_init(struct usbhs_pkt *pkt)
34 {
35 	pkt->dma = DMA_ADDR_INVALID;
36 	INIT_LIST_HEAD(&pkt->node);
37 }
38 
39 /*
40  *		packet control function
41  */
42 static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
43 {
44 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
45 	struct device *dev = usbhs_priv_to_dev(priv);
46 
47 	dev_err(dev, "null handler\n");
48 
49 	return -EINVAL;
50 }
51 
52 static struct usbhs_pkt_handle usbhsf_null_handler = {
53 	.prepare = usbhsf_null_handle,
54 	.try_run = usbhsf_null_handle,
55 };
56 
57 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
58 		    void (*done)(struct usbhs_priv *priv,
59 				 struct usbhs_pkt *pkt),
60 		    void *buf, int len, int zero, int sequence)
61 {
62 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
63 	struct device *dev = usbhs_priv_to_dev(priv);
64 	unsigned long flags;
65 
66 	if (!done) {
67 		dev_err(dev, "no done function\n");
68 		return;
69 	}
70 
71 	/********************  spin lock ********************/
72 	usbhs_lock(priv, flags);
73 
74 	if (!pipe->handler) {
75 		dev_err(dev, "no handler function\n");
76 		pipe->handler = &usbhsf_null_handler;
77 	}
78 
79 	list_move_tail(&pkt->node, &pipe->list);
80 
81 	/*
82 	 * each pkt must hold own handler.
83 	 * because handler might be changed by its situation.
84 	 * dma handler -> pio handler.
85 	 */
86 	pkt->pipe	= pipe;
87 	pkt->buf	= buf;
88 	pkt->handler	= pipe->handler;
89 	pkt->length	= len;
90 	pkt->zero	= zero;
91 	pkt->actual	= 0;
92 	pkt->done	= done;
93 	pkt->sequence	= sequence;
94 
95 	usbhs_unlock(priv, flags);
96 	/********************  spin unlock ******************/
97 }
98 
99 static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
100 {
101 	list_del_init(&pkt->node);
102 }
103 
104 static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
105 {
106 	if (list_empty(&pipe->list))
107 		return NULL;
108 
109 	return list_first_entry(&pipe->list, struct usbhs_pkt, node);
110 }
111 
112 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
113 {
114 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
115 	unsigned long flags;
116 
117 	/********************  spin lock ********************/
118 	usbhs_lock(priv, flags);
119 
120 	if (!pkt)
121 		pkt = __usbhsf_pkt_get(pipe);
122 
123 	if (pkt)
124 		__usbhsf_pkt_del(pkt);
125 
126 	usbhs_unlock(priv, flags);
127 	/********************  spin unlock ******************/
128 
129 	return pkt;
130 }
131 
132 enum {
133 	USBHSF_PKT_PREPARE,
134 	USBHSF_PKT_TRY_RUN,
135 	USBHSF_PKT_DMA_DONE,
136 };
137 
138 static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
139 {
140 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
141 	struct usbhs_pkt *pkt;
142 	struct device *dev = usbhs_priv_to_dev(priv);
143 	int (*func)(struct usbhs_pkt *pkt, int *is_done);
144 	unsigned long flags;
145 	int ret = 0;
146 	int is_done = 0;
147 
148 	/********************  spin lock ********************/
149 	usbhs_lock(priv, flags);
150 
151 	pkt = __usbhsf_pkt_get(pipe);
152 	if (!pkt)
153 		goto __usbhs_pkt_handler_end;
154 
155 	switch (type) {
156 	case USBHSF_PKT_PREPARE:
157 		func = pkt->handler->prepare;
158 		break;
159 	case USBHSF_PKT_TRY_RUN:
160 		func = pkt->handler->try_run;
161 		break;
162 	case USBHSF_PKT_DMA_DONE:
163 		func = pkt->handler->dma_done;
164 		break;
165 	default:
166 		dev_err(dev, "unknown pkt hander\n");
167 		goto __usbhs_pkt_handler_end;
168 	}
169 
170 	ret = func(pkt, &is_done);
171 
172 	if (is_done)
173 		__usbhsf_pkt_del(pkt);
174 
175 __usbhs_pkt_handler_end:
176 	usbhs_unlock(priv, flags);
177 	/********************  spin unlock ******************/
178 
179 	if (is_done) {
180 		pkt->done(priv, pkt);
181 		usbhs_pkt_start(pipe);
182 	}
183 
184 	return ret;
185 }
186 
187 void usbhs_pkt_start(struct usbhs_pipe *pipe)
188 {
189 	usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
190 }
191 
192 /*
193  *		irq enable/disable function
194  */
195 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, bempsts, e)
196 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, brdysts, e)
197 #define usbhsf_irq_callback_ctrl(pipe, status, enable)			\
198 	({								\
199 		struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);	\
200 		struct usbhs_mod *mod = usbhs_mod_get_current(priv);	\
201 		u16 status = (1 << usbhs_pipe_number(pipe));		\
202 		if (!mod)						\
203 			return;						\
204 		if (enable)						\
205 			mod->irq_##status |= status;			\
206 		else							\
207 			mod->irq_##status &= ~status;			\
208 		usbhs_irq_callback_update(priv, mod);			\
209 	})
210 
211 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
212 {
213 	/*
214 	 * And DCP pipe can NOT use "ready interrupt" for "send"
215 	 * it should use "empty" interrupt.
216 	 * see
217 	 *   "Operation" - "Interrupt Function" - "BRDY Interrupt"
218 	 *
219 	 * on the other hand, normal pipe can use "ready interrupt" for "send"
220 	 * even though it is single/double buffer
221 	 */
222 	if (usbhs_pipe_is_dcp(pipe))
223 		usbhsf_irq_empty_ctrl(pipe, enable);
224 	else
225 		usbhsf_irq_ready_ctrl(pipe, enable);
226 }
227 
228 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
229 {
230 	usbhsf_irq_ready_ctrl(pipe, enable);
231 }
232 
233 /*
234  *		FIFO ctrl
235  */
236 static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
237 				   struct usbhs_fifo *fifo)
238 {
239 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
240 
241 	usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
242 }
243 
244 static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
245 			       struct usbhs_fifo *fifo)
246 {
247 	int timeout = 1024;
248 
249 	do {
250 		/* The FIFO port is accessible */
251 		if (usbhs_read(priv, fifo->ctr) & FRDY)
252 			return 0;
253 
254 		udelay(10);
255 	} while (timeout--);
256 
257 	return -EBUSY;
258 }
259 
260 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
261 			      struct usbhs_fifo *fifo)
262 {
263 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
264 
265 	if (!usbhs_pipe_is_dcp(pipe))
266 		usbhsf_fifo_barrier(priv, fifo);
267 
268 	usbhs_write(priv, fifo->ctr, BCLR);
269 }
270 
271 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
272 			       struct usbhs_fifo *fifo)
273 {
274 	return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
275 }
276 
277 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
278 				 struct usbhs_fifo *fifo)
279 {
280 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
281 
282 	usbhs_pipe_select_fifo(pipe, NULL);
283 	usbhs_write(priv, fifo->sel, 0);
284 }
285 
286 static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
287 			      struct usbhs_fifo *fifo,
288 			      int write)
289 {
290 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
291 	struct device *dev = usbhs_priv_to_dev(priv);
292 	int timeout = 1024;
293 	u16 mask = ((1 << 5) | 0xF);		/* mask of ISEL | CURPIPE */
294 	u16 base = usbhs_pipe_number(pipe);	/* CURPIPE */
295 
296 	if (usbhs_pipe_is_busy(pipe) ||
297 	    usbhsf_fifo_is_busy(fifo))
298 		return -EBUSY;
299 
300 	if (usbhs_pipe_is_dcp(pipe)) {
301 		base |= (1 == write) << 5;	/* ISEL */
302 
303 		if (usbhs_mod_is_host(priv))
304 			usbhs_dcp_dir_for_host(pipe, write);
305 	}
306 
307 	/* "base" will be used below  */
308 	if (usbhs_get_dparam(priv, has_sudmac) && !usbhsf_is_cfifo(priv, fifo))
309 		usbhs_write(priv, fifo->sel, base);
310 	else
311 		usbhs_write(priv, fifo->sel, base | MBW_32);
312 
313 	/* check ISEL and CURPIPE value */
314 	while (timeout--) {
315 		if (base == (mask & usbhs_read(priv, fifo->sel))) {
316 			usbhs_pipe_select_fifo(pipe, fifo);
317 			return 0;
318 		}
319 		udelay(10);
320 	}
321 
322 	dev_err(dev, "fifo select error\n");
323 
324 	return -EIO;
325 }
326 
327 /*
328  *		DCP status stage
329  */
330 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
331 {
332 	struct usbhs_pipe *pipe = pkt->pipe;
333 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
334 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
335 	struct device *dev = usbhs_priv_to_dev(priv);
336 	int ret;
337 
338 	usbhs_pipe_disable(pipe);
339 
340 	ret = usbhsf_fifo_select(pipe, fifo, 1);
341 	if (ret < 0) {
342 		dev_err(dev, "%s() faile\n", __func__);
343 		return ret;
344 	}
345 
346 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
347 
348 	usbhsf_fifo_clear(pipe, fifo);
349 	usbhsf_send_terminator(pipe, fifo);
350 
351 	usbhsf_fifo_unselect(pipe, fifo);
352 
353 	usbhsf_tx_irq_ctrl(pipe, 1);
354 	usbhs_pipe_enable(pipe);
355 
356 	return ret;
357 }
358 
359 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
360 {
361 	struct usbhs_pipe *pipe = pkt->pipe;
362 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
363 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
364 	struct device *dev = usbhs_priv_to_dev(priv);
365 	int ret;
366 
367 	usbhs_pipe_disable(pipe);
368 
369 	ret = usbhsf_fifo_select(pipe, fifo, 0);
370 	if (ret < 0) {
371 		dev_err(dev, "%s() fail\n", __func__);
372 		return ret;
373 	}
374 
375 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
376 	usbhsf_fifo_clear(pipe, fifo);
377 
378 	usbhsf_fifo_unselect(pipe, fifo);
379 
380 	usbhsf_rx_irq_ctrl(pipe, 1);
381 	usbhs_pipe_enable(pipe);
382 
383 	return ret;
384 
385 }
386 
387 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
388 {
389 	struct usbhs_pipe *pipe = pkt->pipe;
390 
391 	if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
392 		usbhsf_tx_irq_ctrl(pipe, 0);
393 	else
394 		usbhsf_rx_irq_ctrl(pipe, 0);
395 
396 	pkt->actual = pkt->length;
397 	*is_done = 1;
398 
399 	return 0;
400 }
401 
402 struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
403 	.prepare = usbhs_dcp_dir_switch_to_write,
404 	.try_run = usbhs_dcp_dir_switch_done,
405 };
406 
407 struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
408 	.prepare = usbhs_dcp_dir_switch_to_read,
409 	.try_run = usbhs_dcp_dir_switch_done,
410 };
411 
412 /*
413  *		DCP data stage (push)
414  */
415 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
416 {
417 	struct usbhs_pipe *pipe = pkt->pipe;
418 
419 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
420 
421 	/*
422 	 * change handler to PIO push
423 	 */
424 	pkt->handler = &usbhs_fifo_pio_push_handler;
425 
426 	return pkt->handler->prepare(pkt, is_done);
427 }
428 
429 struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
430 	.prepare = usbhsf_dcp_data_stage_try_push,
431 };
432 
433 /*
434  *		DCP data stage (pop)
435  */
436 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
437 					     int *is_done)
438 {
439 	struct usbhs_pipe *pipe = pkt->pipe;
440 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
441 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
442 
443 	if (usbhs_pipe_is_busy(pipe))
444 		return 0;
445 
446 	/*
447 	 * prepare pop for DCP should
448 	 *  - change DCP direction,
449 	 *  - clear fifo
450 	 *  - DATA1
451 	 */
452 	usbhs_pipe_disable(pipe);
453 
454 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
455 
456 	usbhsf_fifo_select(pipe, fifo, 0);
457 	usbhsf_fifo_clear(pipe, fifo);
458 	usbhsf_fifo_unselect(pipe, fifo);
459 
460 	/*
461 	 * change handler to PIO pop
462 	 */
463 	pkt->handler = &usbhs_fifo_pio_pop_handler;
464 
465 	return pkt->handler->prepare(pkt, is_done);
466 }
467 
468 struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
469 	.prepare = usbhsf_dcp_data_stage_prepare_pop,
470 };
471 
472 /*
473  *		PIO push handler
474  */
475 static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
476 {
477 	struct usbhs_pipe *pipe = pkt->pipe;
478 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
479 	struct device *dev = usbhs_priv_to_dev(priv);
480 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
481 	void __iomem *addr = priv->base + fifo->port;
482 	u8 *buf;
483 	int maxp = usbhs_pipe_get_maxpacket(pipe);
484 	int total_len;
485 	int i, ret, len;
486 	int is_short;
487 
488 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
489 	pkt->sequence = -1; /* -1 sequence will be ignored */
490 
491 	ret = usbhsf_fifo_select(pipe, fifo, 1);
492 	if (ret < 0)
493 		return 0;
494 
495 	ret = usbhs_pipe_is_accessible(pipe);
496 	if (ret < 0) {
497 		/* inaccessible pipe is not an error */
498 		ret = 0;
499 		goto usbhs_fifo_write_busy;
500 	}
501 
502 	ret = usbhsf_fifo_barrier(priv, fifo);
503 	if (ret < 0)
504 		goto usbhs_fifo_write_busy;
505 
506 	buf		= pkt->buf    + pkt->actual;
507 	len		= pkt->length - pkt->actual;
508 	len		= min(len, maxp);
509 	total_len	= len;
510 	is_short	= total_len < maxp;
511 
512 	/*
513 	 * FIXME
514 	 *
515 	 * 32-bit access only
516 	 */
517 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
518 		iowrite32_rep(addr, buf, len / 4);
519 		len %= 4;
520 		buf += total_len - len;
521 	}
522 
523 	/* the rest operation */
524 	for (i = 0; i < len; i++)
525 		iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
526 
527 	/*
528 	 * variable update
529 	 */
530 	pkt->actual += total_len;
531 
532 	if (pkt->actual < pkt->length)
533 		*is_done = 0;		/* there are remainder data */
534 	else if (is_short)
535 		*is_done = 1;		/* short packet */
536 	else
537 		*is_done = !pkt->zero;	/* send zero packet ? */
538 
539 	/*
540 	 * pipe/irq handling
541 	 */
542 	if (is_short)
543 		usbhsf_send_terminator(pipe, fifo);
544 
545 	usbhsf_tx_irq_ctrl(pipe, !*is_done);
546 	usbhs_pipe_enable(pipe);
547 
548 	dev_dbg(dev, "  send %d (%d/ %d/ %d/ %d)\n",
549 		usbhs_pipe_number(pipe),
550 		pkt->length, pkt->actual, *is_done, pkt->zero);
551 
552 	/*
553 	 * Transmission end
554 	 */
555 	if (*is_done) {
556 		if (usbhs_pipe_is_dcp(pipe))
557 			usbhs_dcp_control_transfer_done(pipe);
558 	}
559 
560 	usbhsf_fifo_unselect(pipe, fifo);
561 
562 	return 0;
563 
564 usbhs_fifo_write_busy:
565 	usbhsf_fifo_unselect(pipe, fifo);
566 
567 	/*
568 	 * pipe is busy.
569 	 * retry in interrupt
570 	 */
571 	usbhsf_tx_irq_ctrl(pipe, 1);
572 
573 	return ret;
574 }
575 
576 struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
577 	.prepare = usbhsf_pio_try_push,
578 	.try_run = usbhsf_pio_try_push,
579 };
580 
581 /*
582  *		PIO pop handler
583  */
584 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
585 {
586 	struct usbhs_pipe *pipe = pkt->pipe;
587 
588 	if (usbhs_pipe_is_busy(pipe))
589 		return 0;
590 
591 	/*
592 	 * pipe enable to prepare packet receive
593 	 */
594 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
595 	pkt->sequence = -1; /* -1 sequence will be ignored */
596 
597 	usbhs_pipe_enable(pipe);
598 	usbhsf_rx_irq_ctrl(pipe, 1);
599 
600 	return 0;
601 }
602 
603 static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
604 {
605 	struct usbhs_pipe *pipe = pkt->pipe;
606 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
607 	struct device *dev = usbhs_priv_to_dev(priv);
608 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
609 	void __iomem *addr = priv->base + fifo->port;
610 	u8 *buf;
611 	u32 data = 0;
612 	int maxp = usbhs_pipe_get_maxpacket(pipe);
613 	int rcv_len, len;
614 	int i, ret;
615 	int total_len = 0;
616 
617 	ret = usbhsf_fifo_select(pipe, fifo, 0);
618 	if (ret < 0)
619 		return 0;
620 
621 	ret = usbhsf_fifo_barrier(priv, fifo);
622 	if (ret < 0)
623 		goto usbhs_fifo_read_busy;
624 
625 	rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
626 
627 	buf		= pkt->buf    + pkt->actual;
628 	len		= pkt->length - pkt->actual;
629 	len		= min(len, rcv_len);
630 	total_len	= len;
631 
632 	/*
633 	 * update actual length first here to decide disable pipe.
634 	 * if this pipe keeps BUF status and all data were popped,
635 	 * then, next interrupt/token will be issued again
636 	 */
637 	pkt->actual += total_len;
638 
639 	if ((pkt->actual == pkt->length) ||	/* receive all data */
640 	    (total_len < maxp)) {		/* short packet */
641 		*is_done = 1;
642 		usbhsf_rx_irq_ctrl(pipe, 0);
643 		usbhs_pipe_disable(pipe);	/* disable pipe first */
644 	}
645 
646 	/*
647 	 * Buffer clear if Zero-Length packet
648 	 *
649 	 * see
650 	 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
651 	 */
652 	if (0 == rcv_len) {
653 		pkt->zero = 1;
654 		usbhsf_fifo_clear(pipe, fifo);
655 		goto usbhs_fifo_read_end;
656 	}
657 
658 	/*
659 	 * FIXME
660 	 *
661 	 * 32-bit access only
662 	 */
663 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
664 		ioread32_rep(addr, buf, len / 4);
665 		len %= 4;
666 		buf += total_len - len;
667 	}
668 
669 	/* the rest operation */
670 	for (i = 0; i < len; i++) {
671 		if (!(i & 0x03))
672 			data = ioread32(addr);
673 
674 		buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
675 	}
676 
677 usbhs_fifo_read_end:
678 	dev_dbg(dev, "  recv %d (%d/ %d/ %d/ %d)\n",
679 		usbhs_pipe_number(pipe),
680 		pkt->length, pkt->actual, *is_done, pkt->zero);
681 
682 usbhs_fifo_read_busy:
683 	usbhsf_fifo_unselect(pipe, fifo);
684 
685 	return ret;
686 }
687 
688 struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
689 	.prepare = usbhsf_prepare_pop,
690 	.try_run = usbhsf_pio_try_pop,
691 };
692 
693 /*
694  *		DCP ctrol statge handler
695  */
696 static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
697 {
698 	usbhs_dcp_control_transfer_done(pkt->pipe);
699 
700 	*is_done = 1;
701 
702 	return 0;
703 }
704 
705 struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
706 	.prepare = usbhsf_ctrl_stage_end,
707 	.try_run = usbhsf_ctrl_stage_end,
708 };
709 
710 /*
711  *		DMA fifo functions
712  */
713 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
714 					    struct usbhs_pkt *pkt)
715 {
716 	if (&usbhs_fifo_dma_push_handler == pkt->handler)
717 		return fifo->tx_chan;
718 
719 	if (&usbhs_fifo_dma_pop_handler == pkt->handler)
720 		return fifo->rx_chan;
721 
722 	return NULL;
723 }
724 
725 static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
726 					      struct usbhs_pkt *pkt)
727 {
728 	struct usbhs_fifo *fifo;
729 
730 	/* DMA :: D0FIFO */
731 	fifo = usbhsf_get_d0fifo(priv);
732 	if (usbhsf_dma_chan_get(fifo, pkt) &&
733 	    !usbhsf_fifo_is_busy(fifo))
734 		return fifo;
735 
736 	/* DMA :: D1FIFO */
737 	fifo = usbhsf_get_d1fifo(priv);
738 	if (usbhsf_dma_chan_get(fifo, pkt) &&
739 	    !usbhsf_fifo_is_busy(fifo))
740 		return fifo;
741 
742 	return NULL;
743 }
744 
745 #define usbhsf_dma_start(p, f)	__usbhsf_dma_ctrl(p, f, DREQE)
746 #define usbhsf_dma_stop(p, f)	__usbhsf_dma_ctrl(p, f, 0)
747 static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
748 			      struct usbhs_fifo *fifo,
749 			      u16 dreqe)
750 {
751 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
752 
753 	usbhs_bset(priv, fifo->sel, DREQE, dreqe);
754 }
755 
756 #define usbhsf_dma_map(p)	__usbhsf_dma_map_ctrl(p, 1)
757 #define usbhsf_dma_unmap(p)	__usbhsf_dma_map_ctrl(p, 0)
758 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
759 {
760 	struct usbhs_pipe *pipe = pkt->pipe;
761 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
762 	struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
763 
764 	return info->dma_map_ctrl(pkt, map);
765 }
766 
767 static void usbhsf_dma_complete(void *arg);
768 static void xfer_work(struct work_struct *work)
769 {
770 	struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
771 	struct usbhs_pipe *pipe = pkt->pipe;
772 	struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
773 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
774 	struct scatterlist sg;
775 	struct dma_async_tx_descriptor *desc;
776 	struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
777 	struct device *dev = usbhs_priv_to_dev(priv);
778 	enum dma_transfer_direction dir;
779 	dma_cookie_t cookie;
780 
781 	dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
782 
783 	sg_init_table(&sg, 1);
784 	sg_set_page(&sg, virt_to_page(pkt->dma),
785 		    pkt->length, offset_in_page(pkt->dma));
786 	sg_dma_address(&sg) = pkt->dma + pkt->actual;
787 	sg_dma_len(&sg) = pkt->trans;
788 
789 	desc = chan->device->device_prep_slave_sg(chan, &sg, 1, dir,
790 						  DMA_PREP_INTERRUPT |
791 						  DMA_CTRL_ACK);
792 	if (!desc)
793 		return;
794 
795 	desc->callback		= usbhsf_dma_complete;
796 	desc->callback_param	= pipe;
797 
798 	cookie = desc->tx_submit(desc);
799 	if (cookie < 0) {
800 		dev_err(dev, "Failed to submit dma descriptor\n");
801 		return;
802 	}
803 
804 	dev_dbg(dev, "  %s %d (%d/ %d)\n",
805 		fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
806 
807 	usbhsf_dma_start(pipe, fifo);
808 	dma_async_issue_pending(chan);
809 }
810 
811 /*
812  *		DMA push handler
813  */
814 static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
815 {
816 	struct usbhs_pipe *pipe = pkt->pipe;
817 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
818 	struct usbhs_fifo *fifo;
819 	int len = pkt->length - pkt->actual;
820 	int ret;
821 
822 	if (usbhs_pipe_is_busy(pipe))
823 		return 0;
824 
825 	/* use PIO if packet is less than pio_dma_border or pipe is DCP */
826 	if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
827 	    usbhs_pipe_is_dcp(pipe))
828 		goto usbhsf_pio_prepare_push;
829 
830 	if (len % 4) /* 32bit alignment */
831 		goto usbhsf_pio_prepare_push;
832 
833 	if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
834 		goto usbhsf_pio_prepare_push;
835 
836 	/* get enable DMA fifo */
837 	fifo = usbhsf_get_dma_fifo(priv, pkt);
838 	if (!fifo)
839 		goto usbhsf_pio_prepare_push;
840 
841 	if (usbhsf_dma_map(pkt) < 0)
842 		goto usbhsf_pio_prepare_push;
843 
844 	ret = usbhsf_fifo_select(pipe, fifo, 0);
845 	if (ret < 0)
846 		goto usbhsf_pio_prepare_push_unmap;
847 
848 	pkt->trans = len;
849 
850 	INIT_WORK(&pkt->work, xfer_work);
851 	schedule_work(&pkt->work);
852 
853 	return 0;
854 
855 usbhsf_pio_prepare_push_unmap:
856 	usbhsf_dma_unmap(pkt);
857 usbhsf_pio_prepare_push:
858 	/*
859 	 * change handler to PIO
860 	 */
861 	pkt->handler = &usbhs_fifo_pio_push_handler;
862 
863 	return pkt->handler->prepare(pkt, is_done);
864 }
865 
866 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
867 {
868 	struct usbhs_pipe *pipe = pkt->pipe;
869 
870 	pkt->actual = pkt->trans;
871 
872 	*is_done = !pkt->zero;	/* send zero packet ? */
873 
874 	usbhsf_dma_stop(pipe, pipe->fifo);
875 	usbhsf_dma_unmap(pkt);
876 	usbhsf_fifo_unselect(pipe, pipe->fifo);
877 
878 	return 0;
879 }
880 
881 struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
882 	.prepare	= usbhsf_dma_prepare_push,
883 	.dma_done	= usbhsf_dma_push_done,
884 };
885 
886 /*
887  *		DMA pop handler
888  */
889 static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
890 {
891 	struct usbhs_pipe *pipe = pkt->pipe;
892 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
893 	struct usbhs_fifo *fifo;
894 	int len, ret;
895 
896 	if (usbhs_pipe_is_busy(pipe))
897 		return 0;
898 
899 	if (usbhs_pipe_is_dcp(pipe))
900 		goto usbhsf_pio_prepare_pop;
901 
902 	/* get enable DMA fifo */
903 	fifo = usbhsf_get_dma_fifo(priv, pkt);
904 	if (!fifo)
905 		goto usbhsf_pio_prepare_pop;
906 
907 	if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
908 		goto usbhsf_pio_prepare_pop;
909 
910 	ret = usbhsf_fifo_select(pipe, fifo, 0);
911 	if (ret < 0)
912 		goto usbhsf_pio_prepare_pop;
913 
914 	/* use PIO if packet is less than pio_dma_border */
915 	len = usbhsf_fifo_rcv_len(priv, fifo);
916 	len = min(pkt->length - pkt->actual, len);
917 	if (len % 4) /* 32bit alignment */
918 		goto usbhsf_pio_prepare_pop_unselect;
919 
920 	if (len < usbhs_get_dparam(priv, pio_dma_border))
921 		goto usbhsf_pio_prepare_pop_unselect;
922 
923 	ret = usbhsf_fifo_barrier(priv, fifo);
924 	if (ret < 0)
925 		goto usbhsf_pio_prepare_pop_unselect;
926 
927 	if (usbhsf_dma_map(pkt) < 0)
928 		goto usbhsf_pio_prepare_pop_unselect;
929 
930 	/* DMA */
931 
932 	/*
933 	 * usbhs_fifo_dma_pop_handler :: prepare
934 	 * enabled irq to come here.
935 	 * but it is no longer needed for DMA. disable it.
936 	 */
937 	usbhsf_rx_irq_ctrl(pipe, 0);
938 
939 	pkt->trans = len;
940 
941 	INIT_WORK(&pkt->work, xfer_work);
942 	schedule_work(&pkt->work);
943 
944 	return 0;
945 
946 usbhsf_pio_prepare_pop_unselect:
947 	usbhsf_fifo_unselect(pipe, fifo);
948 usbhsf_pio_prepare_pop:
949 
950 	/*
951 	 * change handler to PIO
952 	 */
953 	pkt->handler = &usbhs_fifo_pio_pop_handler;
954 
955 	return pkt->handler->try_run(pkt, is_done);
956 }
957 
958 static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
959 {
960 	struct usbhs_pipe *pipe = pkt->pipe;
961 	int maxp = usbhs_pipe_get_maxpacket(pipe);
962 
963 	usbhsf_dma_stop(pipe, pipe->fifo);
964 	usbhsf_dma_unmap(pkt);
965 	usbhsf_fifo_unselect(pipe, pipe->fifo);
966 
967 	pkt->actual += pkt->trans;
968 
969 	if ((pkt->actual == pkt->length) ||	/* receive all data */
970 	    (pkt->trans < maxp)) {		/* short packet */
971 		*is_done = 1;
972 	} else {
973 		/* re-enable */
974 		usbhsf_prepare_pop(pkt, is_done);
975 	}
976 
977 	return 0;
978 }
979 
980 struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
981 	.prepare	= usbhsf_prepare_pop,
982 	.try_run	= usbhsf_dma_try_pop,
983 	.dma_done	= usbhsf_dma_pop_done
984 };
985 
986 /*
987  *		DMA setting
988  */
989 static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
990 {
991 	struct sh_dmae_slave *slave = param;
992 
993 	/*
994 	 * FIXME
995 	 *
996 	 * usbhs doesn't recognize id = 0 as valid DMA
997 	 */
998 	if (0 == slave->slave_id)
999 		return false;
1000 
1001 	chan->private = slave;
1002 
1003 	return true;
1004 }
1005 
1006 static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
1007 {
1008 	if (fifo->tx_chan)
1009 		dma_release_channel(fifo->tx_chan);
1010 	if (fifo->rx_chan)
1011 		dma_release_channel(fifo->rx_chan);
1012 
1013 	fifo->tx_chan = NULL;
1014 	fifo->rx_chan = NULL;
1015 }
1016 
1017 static void usbhsf_dma_init(struct usbhs_priv *priv,
1018 			    struct usbhs_fifo *fifo)
1019 {
1020 	struct device *dev = usbhs_priv_to_dev(priv);
1021 	dma_cap_mask_t mask;
1022 
1023 	dma_cap_zero(mask);
1024 	dma_cap_set(DMA_SLAVE, mask);
1025 	fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1026 					    &fifo->tx_slave);
1027 
1028 	dma_cap_zero(mask);
1029 	dma_cap_set(DMA_SLAVE, mask);
1030 	fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1031 					    &fifo->rx_slave);
1032 
1033 	if (fifo->tx_chan || fifo->rx_chan)
1034 		dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
1035 			 fifo->name,
1036 			 fifo->tx_chan ? "[TX]" : "    ",
1037 			 fifo->rx_chan ? "[RX]" : "    ");
1038 }
1039 
1040 /*
1041  *		irq functions
1042  */
1043 static int usbhsf_irq_empty(struct usbhs_priv *priv,
1044 			    struct usbhs_irq_state *irq_state)
1045 {
1046 	struct usbhs_pipe *pipe;
1047 	struct device *dev = usbhs_priv_to_dev(priv);
1048 	int i, ret;
1049 
1050 	if (!irq_state->bempsts) {
1051 		dev_err(dev, "debug %s !!\n", __func__);
1052 		return -EIO;
1053 	}
1054 
1055 	dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
1056 
1057 	/*
1058 	 * search interrupted "pipe"
1059 	 * not "uep".
1060 	 */
1061 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1062 		if (!(irq_state->bempsts & (1 << i)))
1063 			continue;
1064 
1065 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1066 		if (ret < 0)
1067 			dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
1068 	}
1069 
1070 	return 0;
1071 }
1072 
1073 static int usbhsf_irq_ready(struct usbhs_priv *priv,
1074 			    struct usbhs_irq_state *irq_state)
1075 {
1076 	struct usbhs_pipe *pipe;
1077 	struct device *dev = usbhs_priv_to_dev(priv);
1078 	int i, ret;
1079 
1080 	if (!irq_state->brdysts) {
1081 		dev_err(dev, "debug %s !!\n", __func__);
1082 		return -EIO;
1083 	}
1084 
1085 	dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
1086 
1087 	/*
1088 	 * search interrupted "pipe"
1089 	 * not "uep".
1090 	 */
1091 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1092 		if (!(irq_state->brdysts & (1 << i)))
1093 			continue;
1094 
1095 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1096 		if (ret < 0)
1097 			dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
1098 	}
1099 
1100 	return 0;
1101 }
1102 
1103 static void usbhsf_dma_complete(void *arg)
1104 {
1105 	struct usbhs_pipe *pipe = arg;
1106 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1107 	struct device *dev = usbhs_priv_to_dev(priv);
1108 	int ret;
1109 
1110 	ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
1111 	if (ret < 0)
1112 		dev_err(dev, "dma_complete run_error %d : %d\n",
1113 			usbhs_pipe_number(pipe), ret);
1114 }
1115 
1116 /*
1117  *		fifo init
1118  */
1119 void usbhs_fifo_init(struct usbhs_priv *priv)
1120 {
1121 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1122 	struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
1123 	struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv);
1124 	struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv);
1125 
1126 	mod->irq_empty		= usbhsf_irq_empty;
1127 	mod->irq_ready		= usbhsf_irq_ready;
1128 	mod->irq_bempsts	= 0;
1129 	mod->irq_brdysts	= 0;
1130 
1131 	cfifo->pipe	= NULL;
1132 	cfifo->tx_chan	= NULL;
1133 	cfifo->rx_chan	= NULL;
1134 
1135 	d0fifo->pipe	= NULL;
1136 	d0fifo->tx_chan	= NULL;
1137 	d0fifo->rx_chan	= NULL;
1138 
1139 	d1fifo->pipe	= NULL;
1140 	d1fifo->tx_chan	= NULL;
1141 	d1fifo->rx_chan	= NULL;
1142 
1143 	usbhsf_dma_init(priv, usbhsf_get_d0fifo(priv));
1144 	usbhsf_dma_init(priv, usbhsf_get_d1fifo(priv));
1145 }
1146 
1147 void usbhs_fifo_quit(struct usbhs_priv *priv)
1148 {
1149 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1150 
1151 	mod->irq_empty		= NULL;
1152 	mod->irq_ready		= NULL;
1153 	mod->irq_bempsts	= 0;
1154 	mod->irq_brdysts	= 0;
1155 
1156 	usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
1157 	usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));
1158 }
1159 
1160 int usbhs_fifo_probe(struct usbhs_priv *priv)
1161 {
1162 	struct usbhs_fifo *fifo;
1163 
1164 	/* CFIFO */
1165 	fifo = usbhsf_get_cfifo(priv);
1166 	fifo->name	= "CFIFO";
1167 	fifo->port	= CFIFO;
1168 	fifo->sel	= CFIFOSEL;
1169 	fifo->ctr	= CFIFOCTR;
1170 
1171 	/* D0FIFO */
1172 	fifo = usbhsf_get_d0fifo(priv);
1173 	fifo->name	= "D0FIFO";
1174 	fifo->port	= D0FIFO;
1175 	fifo->sel	= D0FIFOSEL;
1176 	fifo->ctr	= D0FIFOCTR;
1177 	fifo->tx_slave.slave_id	= usbhs_get_dparam(priv, d0_tx_id);
1178 	fifo->rx_slave.slave_id	= usbhs_get_dparam(priv, d0_rx_id);
1179 
1180 	/* D1FIFO */
1181 	fifo = usbhsf_get_d1fifo(priv);
1182 	fifo->name	= "D1FIFO";
1183 	fifo->port	= D1FIFO;
1184 	fifo->sel	= D1FIFOSEL;
1185 	fifo->ctr	= D1FIFOCTR;
1186 	fifo->tx_slave.slave_id	= usbhs_get_dparam(priv, d1_tx_id);
1187 	fifo->rx_slave.slave_id	= usbhs_get_dparam(priv, d1_rx_id);
1188 
1189 	return 0;
1190 }
1191 
1192 void usbhs_fifo_remove(struct usbhs_priv *priv)
1193 {
1194 }
1195