xref: /openbmc/linux/drivers/usb/renesas_usbhs/fifo.c (revision d58f75de)
1 // SPDX-License-Identifier: GPL-1.0+
2 /*
3  * Renesas USB driver
4  *
5  * Copyright (C) 2011 Renesas Solutions Corp.
6  * Copyright (C) 2019 Renesas Electronics Corporation
7  * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
8  */
9 #include <linux/delay.h>
10 #include <linux/io.h>
11 #include <linux/scatterlist.h>
12 #include "common.h"
13 #include "pipe.h"
14 
15 #define usbhsf_get_cfifo(p)	(&((p)->fifo_info.cfifo))
16 
17 #define usbhsf_fifo_is_busy(f)	((f)->pipe) /* see usbhs_pipe_select_fifo */
18 
19 /*
20  *		packet initialize
21  */
22 void usbhs_pkt_init(struct usbhs_pkt *pkt)
23 {
24 	INIT_LIST_HEAD(&pkt->node);
25 }
26 
27 /*
28  *		packet control function
29  */
30 static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
31 {
32 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
33 	struct device *dev = usbhs_priv_to_dev(priv);
34 
35 	dev_err(dev, "null handler\n");
36 
37 	return -EINVAL;
38 }
39 
40 static const struct usbhs_pkt_handle usbhsf_null_handler = {
41 	.prepare = usbhsf_null_handle,
42 	.try_run = usbhsf_null_handle,
43 };
44 
45 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
46 		    void (*done)(struct usbhs_priv *priv,
47 				 struct usbhs_pkt *pkt),
48 		    void *buf, int len, int zero, int sequence)
49 {
50 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
51 	struct device *dev = usbhs_priv_to_dev(priv);
52 	unsigned long flags;
53 
54 	if (!done) {
55 		dev_err(dev, "no done function\n");
56 		return;
57 	}
58 
59 	/********************  spin lock ********************/
60 	usbhs_lock(priv, flags);
61 
62 	if (!pipe->handler) {
63 		dev_err(dev, "no handler function\n");
64 		pipe->handler = &usbhsf_null_handler;
65 	}
66 
67 	list_move_tail(&pkt->node, &pipe->list);
68 
69 	/*
70 	 * each pkt must hold own handler.
71 	 * because handler might be changed by its situation.
72 	 * dma handler -> pio handler.
73 	 */
74 	pkt->pipe	= pipe;
75 	pkt->buf	= buf;
76 	pkt->handler	= pipe->handler;
77 	pkt->length	= len;
78 	pkt->zero	= zero;
79 	pkt->actual	= 0;
80 	pkt->done	= done;
81 	pkt->sequence	= sequence;
82 
83 	usbhs_unlock(priv, flags);
84 	/********************  spin unlock ******************/
85 }
86 
87 static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
88 {
89 	list_del_init(&pkt->node);
90 }
91 
92 struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
93 {
94 	return list_first_entry_or_null(&pipe->list, struct usbhs_pkt, node);
95 }
96 
97 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
98 				 struct usbhs_fifo *fifo);
99 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
100 					    struct usbhs_pkt *pkt);
101 #define usbhsf_dma_map(p)	__usbhsf_dma_map_ctrl(p, 1)
102 #define usbhsf_dma_unmap(p)	__usbhsf_dma_map_ctrl(p, 0)
103 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map);
104 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
105 {
106 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
107 	struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
108 	unsigned long flags;
109 
110 	/********************  spin lock ********************/
111 	usbhs_lock(priv, flags);
112 
113 	usbhs_pipe_disable(pipe);
114 
115 	if (!pkt)
116 		pkt = __usbhsf_pkt_get(pipe);
117 
118 	if (pkt) {
119 		struct dma_chan *chan = NULL;
120 
121 		if (fifo)
122 			chan = usbhsf_dma_chan_get(fifo, pkt);
123 		if (chan) {
124 			dmaengine_terminate_all(chan);
125 			usbhsf_dma_unmap(pkt);
126 		}
127 
128 		usbhs_pipe_clear_without_sequence(pipe, 0, 0);
129 		usbhs_pipe_running(pipe, 0);
130 
131 		__usbhsf_pkt_del(pkt);
132 	}
133 
134 	if (fifo)
135 		usbhsf_fifo_unselect(pipe, fifo);
136 
137 	usbhs_unlock(priv, flags);
138 	/********************  spin unlock ******************/
139 
140 	return pkt;
141 }
142 
143 enum {
144 	USBHSF_PKT_PREPARE,
145 	USBHSF_PKT_TRY_RUN,
146 	USBHSF_PKT_DMA_DONE,
147 };
148 
149 static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
150 {
151 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
152 	struct usbhs_pkt *pkt;
153 	struct device *dev = usbhs_priv_to_dev(priv);
154 	int (*func)(struct usbhs_pkt *pkt, int *is_done);
155 	unsigned long flags;
156 	int ret = 0;
157 	int is_done = 0;
158 
159 	/********************  spin lock ********************/
160 	usbhs_lock(priv, flags);
161 
162 	pkt = __usbhsf_pkt_get(pipe);
163 	if (!pkt) {
164 		ret = -EINVAL;
165 		goto __usbhs_pkt_handler_end;
166 	}
167 
168 	switch (type) {
169 	case USBHSF_PKT_PREPARE:
170 		func = pkt->handler->prepare;
171 		break;
172 	case USBHSF_PKT_TRY_RUN:
173 		func = pkt->handler->try_run;
174 		break;
175 	case USBHSF_PKT_DMA_DONE:
176 		func = pkt->handler->dma_done;
177 		break;
178 	default:
179 		dev_err(dev, "unknown pkt handler\n");
180 		goto __usbhs_pkt_handler_end;
181 	}
182 
183 	if (likely(func))
184 		ret = func(pkt, &is_done);
185 
186 	if (is_done)
187 		__usbhsf_pkt_del(pkt);
188 
189 __usbhs_pkt_handler_end:
190 	usbhs_unlock(priv, flags);
191 	/********************  spin unlock ******************/
192 
193 	if (is_done) {
194 		pkt->done(priv, pkt);
195 		usbhs_pkt_start(pipe);
196 	}
197 
198 	return ret;
199 }
200 
201 void usbhs_pkt_start(struct usbhs_pipe *pipe)
202 {
203 	usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
204 }
205 
206 /*
207  *		irq enable/disable function
208  */
209 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_bempsts, e)
210 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, irq_brdysts, e)
211 #define usbhsf_irq_callback_ctrl(pipe, status, enable)			\
212 	({								\
213 		struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);	\
214 		struct usbhs_mod *mod = usbhs_mod_get_current(priv);	\
215 		u16 status = (1 << usbhs_pipe_number(pipe));		\
216 		if (!mod)						\
217 			return;						\
218 		if (enable)						\
219 			mod->status |= status;				\
220 		else							\
221 			mod->status &= ~status;				\
222 		usbhs_irq_callback_update(priv, mod);			\
223 	})
224 
225 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
226 {
227 	/*
228 	 * And DCP pipe can NOT use "ready interrupt" for "send"
229 	 * it should use "empty" interrupt.
230 	 * see
231 	 *   "Operation" - "Interrupt Function" - "BRDY Interrupt"
232 	 *
233 	 * on the other hand, normal pipe can use "ready interrupt" for "send"
234 	 * even though it is single/double buffer
235 	 */
236 	if (usbhs_pipe_is_dcp(pipe))
237 		usbhsf_irq_empty_ctrl(pipe, enable);
238 	else
239 		usbhsf_irq_ready_ctrl(pipe, enable);
240 }
241 
242 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
243 {
244 	usbhsf_irq_ready_ctrl(pipe, enable);
245 }
246 
247 /*
248  *		FIFO ctrl
249  */
250 static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
251 				   struct usbhs_fifo *fifo)
252 {
253 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
254 
255 	usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
256 }
257 
258 static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
259 			       struct usbhs_fifo *fifo)
260 {
261 	/* The FIFO port is accessible */
262 	if (usbhs_read(priv, fifo->ctr) & FRDY)
263 		return 0;
264 
265 	return -EBUSY;
266 }
267 
268 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
269 			      struct usbhs_fifo *fifo)
270 {
271 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
272 	int ret = 0;
273 
274 	if (!usbhs_pipe_is_dcp(pipe)) {
275 		/*
276 		 * This driver checks the pipe condition first to avoid -EBUSY
277 		 * from usbhsf_fifo_barrier() if the pipe is RX direction and
278 		 * empty.
279 		 */
280 		if (usbhs_pipe_is_dir_in(pipe))
281 			ret = usbhs_pipe_is_accessible(pipe);
282 		if (!ret)
283 			ret = usbhsf_fifo_barrier(priv, fifo);
284 	}
285 
286 	/*
287 	 * if non-DCP pipe, this driver should set BCLR when
288 	 * usbhsf_fifo_barrier() returns 0.
289 	 */
290 	if (!ret)
291 		usbhs_write(priv, fifo->ctr, BCLR);
292 }
293 
294 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
295 			       struct usbhs_fifo *fifo)
296 {
297 	return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
298 }
299 
300 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
301 				 struct usbhs_fifo *fifo)
302 {
303 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
304 
305 	usbhs_pipe_select_fifo(pipe, NULL);
306 	usbhs_write(priv, fifo->sel, 0);
307 }
308 
309 static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
310 			      struct usbhs_fifo *fifo,
311 			      int write)
312 {
313 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
314 	struct device *dev = usbhs_priv_to_dev(priv);
315 	int timeout = 1024;
316 	u16 mask = ((1 << 5) | 0xF);		/* mask of ISEL | CURPIPE */
317 	u16 base = usbhs_pipe_number(pipe);	/* CURPIPE */
318 
319 	if (usbhs_pipe_is_busy(pipe) ||
320 	    usbhsf_fifo_is_busy(fifo))
321 		return -EBUSY;
322 
323 	if (usbhs_pipe_is_dcp(pipe)) {
324 		base |= (1 == write) << 5;	/* ISEL */
325 
326 		if (usbhs_mod_is_host(priv))
327 			usbhs_dcp_dir_for_host(pipe, write);
328 	}
329 
330 	/* "base" will be used below  */
331 	usbhs_write(priv, fifo->sel, base | MBW_32);
332 
333 	/* check ISEL and CURPIPE value */
334 	while (timeout--) {
335 		if (base == (mask & usbhs_read(priv, fifo->sel))) {
336 			usbhs_pipe_select_fifo(pipe, fifo);
337 			return 0;
338 		}
339 		udelay(10);
340 	}
341 
342 	dev_err(dev, "fifo select error\n");
343 
344 	return -EIO;
345 }
346 
347 /*
348  *		DCP status stage
349  */
350 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
351 {
352 	struct usbhs_pipe *pipe = pkt->pipe;
353 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
354 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
355 	struct device *dev = usbhs_priv_to_dev(priv);
356 	int ret;
357 
358 	usbhs_pipe_disable(pipe);
359 
360 	ret = usbhsf_fifo_select(pipe, fifo, 1);
361 	if (ret < 0) {
362 		dev_err(dev, "%s() faile\n", __func__);
363 		return ret;
364 	}
365 
366 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
367 
368 	usbhsf_fifo_clear(pipe, fifo);
369 	usbhsf_send_terminator(pipe, fifo);
370 
371 	usbhsf_fifo_unselect(pipe, fifo);
372 
373 	usbhsf_tx_irq_ctrl(pipe, 1);
374 	usbhs_pipe_enable(pipe);
375 
376 	return ret;
377 }
378 
379 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
380 {
381 	struct usbhs_pipe *pipe = pkt->pipe;
382 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
383 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
384 	struct device *dev = usbhs_priv_to_dev(priv);
385 	int ret;
386 
387 	usbhs_pipe_disable(pipe);
388 
389 	ret = usbhsf_fifo_select(pipe, fifo, 0);
390 	if (ret < 0) {
391 		dev_err(dev, "%s() fail\n", __func__);
392 		return ret;
393 	}
394 
395 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
396 	usbhsf_fifo_clear(pipe, fifo);
397 
398 	usbhsf_fifo_unselect(pipe, fifo);
399 
400 	usbhsf_rx_irq_ctrl(pipe, 1);
401 	usbhs_pipe_enable(pipe);
402 
403 	return ret;
404 
405 }
406 
407 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
408 {
409 	struct usbhs_pipe *pipe = pkt->pipe;
410 
411 	if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
412 		usbhsf_tx_irq_ctrl(pipe, 0);
413 	else
414 		usbhsf_rx_irq_ctrl(pipe, 0);
415 
416 	pkt->actual = pkt->length;
417 	*is_done = 1;
418 
419 	return 0;
420 }
421 
422 const struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
423 	.prepare = usbhs_dcp_dir_switch_to_write,
424 	.try_run = usbhs_dcp_dir_switch_done,
425 };
426 
427 const struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
428 	.prepare = usbhs_dcp_dir_switch_to_read,
429 	.try_run = usbhs_dcp_dir_switch_done,
430 };
431 
432 /*
433  *		DCP data stage (push)
434  */
435 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
436 {
437 	struct usbhs_pipe *pipe = pkt->pipe;
438 
439 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
440 
441 	/*
442 	 * change handler to PIO push
443 	 */
444 	pkt->handler = &usbhs_fifo_pio_push_handler;
445 
446 	return pkt->handler->prepare(pkt, is_done);
447 }
448 
449 const struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
450 	.prepare = usbhsf_dcp_data_stage_try_push,
451 };
452 
453 /*
454  *		DCP data stage (pop)
455  */
456 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
457 					     int *is_done)
458 {
459 	struct usbhs_pipe *pipe = pkt->pipe;
460 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
461 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
462 
463 	if (usbhs_pipe_is_busy(pipe))
464 		return 0;
465 
466 	/*
467 	 * prepare pop for DCP should
468 	 *  - change DCP direction,
469 	 *  - clear fifo
470 	 *  - DATA1
471 	 */
472 	usbhs_pipe_disable(pipe);
473 
474 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
475 
476 	usbhsf_fifo_select(pipe, fifo, 0);
477 	usbhsf_fifo_clear(pipe, fifo);
478 	usbhsf_fifo_unselect(pipe, fifo);
479 
480 	/*
481 	 * change handler to PIO pop
482 	 */
483 	pkt->handler = &usbhs_fifo_pio_pop_handler;
484 
485 	return pkt->handler->prepare(pkt, is_done);
486 }
487 
488 const struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
489 	.prepare = usbhsf_dcp_data_stage_prepare_pop,
490 };
491 
492 /*
493  *		PIO push handler
494  */
495 static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
496 {
497 	struct usbhs_pipe *pipe = pkt->pipe;
498 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
499 	struct device *dev = usbhs_priv_to_dev(priv);
500 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
501 	void __iomem *addr = priv->base + fifo->port;
502 	u8 *buf;
503 	int maxp = usbhs_pipe_get_maxpacket(pipe);
504 	int total_len;
505 	int i, ret, len;
506 	int is_short;
507 
508 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
509 	pkt->sequence = -1; /* -1 sequence will be ignored */
510 
511 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
512 
513 	ret = usbhsf_fifo_select(pipe, fifo, 1);
514 	if (ret < 0)
515 		return 0;
516 
517 	ret = usbhs_pipe_is_accessible(pipe);
518 	if (ret < 0) {
519 		/* inaccessible pipe is not an error */
520 		ret = 0;
521 		goto usbhs_fifo_write_busy;
522 	}
523 
524 	ret = usbhsf_fifo_barrier(priv, fifo);
525 	if (ret < 0)
526 		goto usbhs_fifo_write_busy;
527 
528 	buf		= pkt->buf    + pkt->actual;
529 	len		= pkt->length - pkt->actual;
530 	len		= min(len, maxp);
531 	total_len	= len;
532 	is_short	= total_len < maxp;
533 
534 	/*
535 	 * FIXME
536 	 *
537 	 * 32-bit access only
538 	 */
539 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
540 		iowrite32_rep(addr, buf, len / 4);
541 		len %= 4;
542 		buf += total_len - len;
543 	}
544 
545 	/* the rest operation */
546 	if (usbhs_get_dparam(priv, cfifo_byte_addr)) {
547 		for (i = 0; i < len; i++)
548 			iowrite8(buf[i], addr + (i & 0x03));
549 	} else {
550 		for (i = 0; i < len; i++)
551 			iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
552 	}
553 
554 	/*
555 	 * variable update
556 	 */
557 	pkt->actual += total_len;
558 
559 	if (pkt->actual < pkt->length)
560 		*is_done = 0;		/* there are remainder data */
561 	else if (is_short)
562 		*is_done = 1;		/* short packet */
563 	else
564 		*is_done = !pkt->zero;	/* send zero packet ? */
565 
566 	/*
567 	 * pipe/irq handling
568 	 */
569 	if (is_short)
570 		usbhsf_send_terminator(pipe, fifo);
571 
572 	usbhsf_tx_irq_ctrl(pipe, !*is_done);
573 	usbhs_pipe_running(pipe, !*is_done);
574 	usbhs_pipe_enable(pipe);
575 
576 	dev_dbg(dev, "  send %d (%d/ %d/ %d/ %d)\n",
577 		usbhs_pipe_number(pipe),
578 		pkt->length, pkt->actual, *is_done, pkt->zero);
579 
580 	usbhsf_fifo_unselect(pipe, fifo);
581 
582 	return 0;
583 
584 usbhs_fifo_write_busy:
585 	usbhsf_fifo_unselect(pipe, fifo);
586 
587 	/*
588 	 * pipe is busy.
589 	 * retry in interrupt
590 	 */
591 	usbhsf_tx_irq_ctrl(pipe, 1);
592 	usbhs_pipe_running(pipe, 1);
593 
594 	return ret;
595 }
596 
597 static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done)
598 {
599 	if (usbhs_pipe_is_running(pkt->pipe))
600 		return 0;
601 
602 	return usbhsf_pio_try_push(pkt, is_done);
603 }
604 
605 const struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
606 	.prepare = usbhsf_pio_prepare_push,
607 	.try_run = usbhsf_pio_try_push,
608 };
609 
610 /*
611  *		PIO pop handler
612  */
613 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
614 {
615 	struct usbhs_pipe *pipe = pkt->pipe;
616 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
617 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
618 
619 	if (usbhs_pipe_is_busy(pipe))
620 		return 0;
621 
622 	if (usbhs_pipe_is_running(pipe))
623 		return 0;
624 
625 	/*
626 	 * pipe enable to prepare packet receive
627 	 */
628 	usbhs_pipe_data_sequence(pipe, pkt->sequence);
629 	pkt->sequence = -1; /* -1 sequence will be ignored */
630 
631 	if (usbhs_pipe_is_dcp(pipe))
632 		usbhsf_fifo_clear(pipe, fifo);
633 
634 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
635 	usbhs_pipe_enable(pipe);
636 	usbhs_pipe_running(pipe, 1);
637 	usbhsf_rx_irq_ctrl(pipe, 1);
638 
639 	return 0;
640 }
641 
642 static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
643 {
644 	struct usbhs_pipe *pipe = pkt->pipe;
645 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
646 	struct device *dev = usbhs_priv_to_dev(priv);
647 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
648 	void __iomem *addr = priv->base + fifo->port;
649 	u8 *buf;
650 	u32 data = 0;
651 	int maxp = usbhs_pipe_get_maxpacket(pipe);
652 	int rcv_len, len;
653 	int i, ret;
654 	int total_len = 0;
655 
656 	ret = usbhsf_fifo_select(pipe, fifo, 0);
657 	if (ret < 0)
658 		return 0;
659 
660 	ret = usbhsf_fifo_barrier(priv, fifo);
661 	if (ret < 0)
662 		goto usbhs_fifo_read_busy;
663 
664 	rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
665 
666 	buf		= pkt->buf    + pkt->actual;
667 	len		= pkt->length - pkt->actual;
668 	len		= min(len, rcv_len);
669 	total_len	= len;
670 
671 	/*
672 	 * update actual length first here to decide disable pipe.
673 	 * if this pipe keeps BUF status and all data were popped,
674 	 * then, next interrupt/token will be issued again
675 	 */
676 	pkt->actual += total_len;
677 
678 	if ((pkt->actual == pkt->length) ||	/* receive all data */
679 	    (total_len < maxp)) {		/* short packet */
680 		*is_done = 1;
681 		usbhsf_rx_irq_ctrl(pipe, 0);
682 		usbhs_pipe_running(pipe, 0);
683 		/*
684 		 * If function mode, since this controller is possible to enter
685 		 * Control Write status stage at this timing, this driver
686 		 * should not disable the pipe. If such a case happens, this
687 		 * controller is not able to complete the status stage.
688 		 */
689 		if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
690 			usbhs_pipe_disable(pipe);	/* disable pipe first */
691 	}
692 
693 	/*
694 	 * Buffer clear if Zero-Length packet
695 	 *
696 	 * see
697 	 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
698 	 */
699 	if (0 == rcv_len) {
700 		pkt->zero = 1;
701 		usbhsf_fifo_clear(pipe, fifo);
702 		goto usbhs_fifo_read_end;
703 	}
704 
705 	/*
706 	 * FIXME
707 	 *
708 	 * 32-bit access only
709 	 */
710 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
711 		ioread32_rep(addr, buf, len / 4);
712 		len %= 4;
713 		buf += total_len - len;
714 	}
715 
716 	/* the rest operation */
717 	for (i = 0; i < len; i++) {
718 		if (!(i & 0x03))
719 			data = ioread32(addr);
720 
721 		buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
722 	}
723 
724 usbhs_fifo_read_end:
725 	dev_dbg(dev, "  recv %d (%d/ %d/ %d/ %d)\n",
726 		usbhs_pipe_number(pipe),
727 		pkt->length, pkt->actual, *is_done, pkt->zero);
728 
729 usbhs_fifo_read_busy:
730 	usbhsf_fifo_unselect(pipe, fifo);
731 
732 	return ret;
733 }
734 
735 const struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
736 	.prepare = usbhsf_prepare_pop,
737 	.try_run = usbhsf_pio_try_pop,
738 };
739 
740 /*
741  *		DCP ctrol statge handler
742  */
743 static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
744 {
745 	usbhs_dcp_control_transfer_done(pkt->pipe);
746 
747 	*is_done = 1;
748 
749 	return 0;
750 }
751 
752 const struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
753 	.prepare = usbhsf_ctrl_stage_end,
754 	.try_run = usbhsf_ctrl_stage_end,
755 };
756 
757 /*
758  *		DMA fifo functions
759  */
760 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
761 					    struct usbhs_pkt *pkt)
762 {
763 	if (&usbhs_fifo_dma_push_handler == pkt->handler)
764 		return fifo->tx_chan;
765 
766 	if (&usbhs_fifo_dma_pop_handler == pkt->handler)
767 		return fifo->rx_chan;
768 
769 	return NULL;
770 }
771 
772 static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
773 					      struct usbhs_pkt *pkt)
774 {
775 	struct usbhs_fifo *fifo;
776 	int i;
777 
778 	usbhs_for_each_dfifo(priv, fifo, i) {
779 		if (usbhsf_dma_chan_get(fifo, pkt) &&
780 		    !usbhsf_fifo_is_busy(fifo))
781 			return fifo;
782 	}
783 
784 	return NULL;
785 }
786 
787 #define usbhsf_dma_start(p, f)	__usbhsf_dma_ctrl(p, f, DREQE)
788 #define usbhsf_dma_stop(p, f)	__usbhsf_dma_ctrl(p, f, 0)
789 static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
790 			      struct usbhs_fifo *fifo,
791 			      u16 dreqe)
792 {
793 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
794 
795 	usbhs_bset(priv, fifo->sel, DREQE, dreqe);
796 }
797 
798 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
799 {
800 	struct usbhs_pipe *pipe = pkt->pipe;
801 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
802 	struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
803 	struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
804 	struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
805 
806 	return info->dma_map_ctrl(chan->device->dev, pkt, map);
807 }
808 
809 static void usbhsf_dma_complete(void *arg,
810 				const struct dmaengine_result *result);
811 static void usbhsf_dma_xfer_preparing(struct usbhs_pkt *pkt)
812 {
813 	struct usbhs_pipe *pipe = pkt->pipe;
814 	struct usbhs_fifo *fifo;
815 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
816 	struct dma_async_tx_descriptor *desc;
817 	struct dma_chan *chan;
818 	struct device *dev = usbhs_priv_to_dev(priv);
819 	enum dma_transfer_direction dir;
820 	dma_cookie_t cookie;
821 
822 	fifo = usbhs_pipe_to_fifo(pipe);
823 	if (!fifo)
824 		return;
825 
826 	chan = usbhsf_dma_chan_get(fifo, pkt);
827 	dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
828 
829 	desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual,
830 					pkt->trans, dir,
831 					DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
832 	if (!desc)
833 		return;
834 
835 	desc->callback_result	= usbhsf_dma_complete;
836 	desc->callback_param	= pkt;
837 
838 	cookie = dmaengine_submit(desc);
839 	if (cookie < 0) {
840 		dev_err(dev, "Failed to submit dma descriptor\n");
841 		return;
842 	}
843 
844 	dev_dbg(dev, "  %s %d (%d/ %d)\n",
845 		fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
846 
847 	usbhs_pipe_running(pipe, 1);
848 	usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans);
849 	dma_async_issue_pending(chan);
850 	usbhsf_dma_start(pipe, fifo);
851 	usbhs_pipe_enable(pipe);
852 }
853 
854 static void xfer_work(struct work_struct *work)
855 {
856 	struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work);
857 	struct usbhs_pipe *pipe = pkt->pipe;
858 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
859 	unsigned long flags;
860 
861 	usbhs_lock(priv, flags);
862 	usbhsf_dma_xfer_preparing(pkt);
863 	usbhs_unlock(priv, flags);
864 }
865 
866 /*
867  *		DMA push handler
868  */
869 static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
870 {
871 	struct usbhs_pipe *pipe = pkt->pipe;
872 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
873 	struct usbhs_fifo *fifo;
874 	int len = pkt->length - pkt->actual;
875 	int ret;
876 	uintptr_t align_mask;
877 
878 	if (usbhs_pipe_is_busy(pipe))
879 		return 0;
880 
881 	/* use PIO if packet is less than pio_dma_border or pipe is DCP */
882 	if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
883 	    usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
884 		goto usbhsf_pio_prepare_push;
885 
886 	/* check data length if this driver don't use USB-DMAC */
887 	if (!usbhs_get_dparam(priv, has_usb_dmac) && len & 0x7)
888 		goto usbhsf_pio_prepare_push;
889 
890 	/* check buffer alignment */
891 	align_mask = usbhs_get_dparam(priv, has_usb_dmac) ?
892 					USBHS_USB_DMAC_XFER_SIZE - 1 : 0x7;
893 	if ((uintptr_t)(pkt->buf + pkt->actual) & align_mask)
894 		goto usbhsf_pio_prepare_push;
895 
896 	/* return at this time if the pipe is running */
897 	if (usbhs_pipe_is_running(pipe))
898 		return 0;
899 
900 	/* get enable DMA fifo */
901 	fifo = usbhsf_get_dma_fifo(priv, pkt);
902 	if (!fifo)
903 		goto usbhsf_pio_prepare_push;
904 
905 	ret = usbhsf_fifo_select(pipe, fifo, 0);
906 	if (ret < 0)
907 		goto usbhsf_pio_prepare_push;
908 
909 	if (usbhsf_dma_map(pkt) < 0)
910 		goto usbhsf_pio_prepare_push_unselect;
911 
912 	pkt->trans = len;
913 
914 	usbhsf_tx_irq_ctrl(pipe, 0);
915 	/* FIXME: Workaound for usb dmac that driver can be used in atomic */
916 	if (usbhs_get_dparam(priv, has_usb_dmac)) {
917 		usbhsf_dma_xfer_preparing(pkt);
918 	} else {
919 		INIT_WORK(&pkt->work, xfer_work);
920 		schedule_work(&pkt->work);
921 	}
922 
923 	return 0;
924 
925 usbhsf_pio_prepare_push_unselect:
926 	usbhsf_fifo_unselect(pipe, fifo);
927 usbhsf_pio_prepare_push:
928 	/*
929 	 * change handler to PIO
930 	 */
931 	pkt->handler = &usbhs_fifo_pio_push_handler;
932 
933 	return pkt->handler->prepare(pkt, is_done);
934 }
935 
936 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
937 {
938 	struct usbhs_pipe *pipe = pkt->pipe;
939 	int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe);
940 
941 	pkt->actual += pkt->trans;
942 
943 	if (pkt->actual < pkt->length)
944 		*is_done = 0;		/* there are remainder data */
945 	else if (is_short)
946 		*is_done = 1;		/* short packet */
947 	else
948 		*is_done = !pkt->zero;	/* send zero packet? */
949 
950 	usbhs_pipe_running(pipe, !*is_done);
951 
952 	usbhsf_dma_stop(pipe, pipe->fifo);
953 	usbhsf_dma_unmap(pkt);
954 	usbhsf_fifo_unselect(pipe, pipe->fifo);
955 
956 	if (!*is_done) {
957 		/* change handler to PIO */
958 		pkt->handler = &usbhs_fifo_pio_push_handler;
959 		return pkt->handler->try_run(pkt, is_done);
960 	}
961 
962 	return 0;
963 }
964 
965 const struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
966 	.prepare	= usbhsf_dma_prepare_push,
967 	.dma_done	= usbhsf_dma_push_done,
968 };
969 
970 /*
971  *		DMA pop handler
972  */
973 
974 static int usbhsf_dma_prepare_pop_with_rx_irq(struct usbhs_pkt *pkt,
975 					      int *is_done)
976 {
977 	return usbhsf_prepare_pop(pkt, is_done);
978 }
979 
980 static int usbhsf_dma_prepare_pop_with_usb_dmac(struct usbhs_pkt *pkt,
981 						int *is_done)
982 {
983 	struct usbhs_pipe *pipe = pkt->pipe;
984 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
985 	struct usbhs_fifo *fifo;
986 	int ret;
987 
988 	if (usbhs_pipe_is_busy(pipe))
989 		return 0;
990 
991 	/* use PIO if packet is less than pio_dma_border or pipe is DCP */
992 	if ((pkt->length < usbhs_get_dparam(priv, pio_dma_border)) ||
993 	    usbhs_pipe_type_is(pipe, USB_ENDPOINT_XFER_ISOC))
994 		goto usbhsf_pio_prepare_pop;
995 
996 	fifo = usbhsf_get_dma_fifo(priv, pkt);
997 	if (!fifo)
998 		goto usbhsf_pio_prepare_pop;
999 
1000 	if ((uintptr_t)pkt->buf & (USBHS_USB_DMAC_XFER_SIZE - 1))
1001 		goto usbhsf_pio_prepare_pop;
1002 
1003 	/* return at this time if the pipe is running */
1004 	if (usbhs_pipe_is_running(pipe))
1005 		return 0;
1006 
1007 	usbhs_pipe_config_change_bfre(pipe, 1);
1008 
1009 	ret = usbhsf_fifo_select(pipe, fifo, 0);
1010 	if (ret < 0)
1011 		goto usbhsf_pio_prepare_pop;
1012 
1013 	if (usbhsf_dma_map(pkt) < 0)
1014 		goto usbhsf_pio_prepare_pop_unselect;
1015 
1016 	/* DMA */
1017 
1018 	/*
1019 	 * usbhs_fifo_dma_pop_handler :: prepare
1020 	 * enabled irq to come here.
1021 	 * but it is no longer needed for DMA. disable it.
1022 	 */
1023 	usbhsf_rx_irq_ctrl(pipe, 0);
1024 
1025 	pkt->trans = pkt->length;
1026 
1027 	usbhsf_dma_xfer_preparing(pkt);
1028 
1029 	return 0;
1030 
1031 usbhsf_pio_prepare_pop_unselect:
1032 	usbhsf_fifo_unselect(pipe, fifo);
1033 usbhsf_pio_prepare_pop:
1034 
1035 	/*
1036 	 * change handler to PIO
1037 	 */
1038 	pkt->handler = &usbhs_fifo_pio_pop_handler;
1039 	usbhs_pipe_config_change_bfre(pipe, 0);
1040 
1041 	return pkt->handler->prepare(pkt, is_done);
1042 }
1043 
1044 static int usbhsf_dma_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
1045 {
1046 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
1047 
1048 	if (usbhs_get_dparam(priv, has_usb_dmac))
1049 		return usbhsf_dma_prepare_pop_with_usb_dmac(pkt, is_done);
1050 	else
1051 		return usbhsf_dma_prepare_pop_with_rx_irq(pkt, is_done);
1052 }
1053 
1054 static int usbhsf_dma_try_pop_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
1055 {
1056 	struct usbhs_pipe *pipe = pkt->pipe;
1057 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1058 	struct usbhs_fifo *fifo;
1059 	int len, ret;
1060 
1061 	if (usbhs_pipe_is_busy(pipe))
1062 		return 0;
1063 
1064 	if (usbhs_pipe_is_dcp(pipe))
1065 		goto usbhsf_pio_prepare_pop;
1066 
1067 	/* get enable DMA fifo */
1068 	fifo = usbhsf_get_dma_fifo(priv, pkt);
1069 	if (!fifo)
1070 		goto usbhsf_pio_prepare_pop;
1071 
1072 	if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
1073 		goto usbhsf_pio_prepare_pop;
1074 
1075 	ret = usbhsf_fifo_select(pipe, fifo, 0);
1076 	if (ret < 0)
1077 		goto usbhsf_pio_prepare_pop;
1078 
1079 	/* use PIO if packet is less than pio_dma_border */
1080 	len = usbhsf_fifo_rcv_len(priv, fifo);
1081 	len = min(pkt->length - pkt->actual, len);
1082 	if (len & 0x7) /* 8byte alignment */
1083 		goto usbhsf_pio_prepare_pop_unselect;
1084 
1085 	if (len < usbhs_get_dparam(priv, pio_dma_border))
1086 		goto usbhsf_pio_prepare_pop_unselect;
1087 
1088 	ret = usbhsf_fifo_barrier(priv, fifo);
1089 	if (ret < 0)
1090 		goto usbhsf_pio_prepare_pop_unselect;
1091 
1092 	if (usbhsf_dma_map(pkt) < 0)
1093 		goto usbhsf_pio_prepare_pop_unselect;
1094 
1095 	/* DMA */
1096 
1097 	/*
1098 	 * usbhs_fifo_dma_pop_handler :: prepare
1099 	 * enabled irq to come here.
1100 	 * but it is no longer needed for DMA. disable it.
1101 	 */
1102 	usbhsf_rx_irq_ctrl(pipe, 0);
1103 
1104 	pkt->trans = len;
1105 
1106 	INIT_WORK(&pkt->work, xfer_work);
1107 	schedule_work(&pkt->work);
1108 
1109 	return 0;
1110 
1111 usbhsf_pio_prepare_pop_unselect:
1112 	usbhsf_fifo_unselect(pipe, fifo);
1113 usbhsf_pio_prepare_pop:
1114 
1115 	/*
1116 	 * change handler to PIO
1117 	 */
1118 	pkt->handler = &usbhs_fifo_pio_pop_handler;
1119 
1120 	return pkt->handler->try_run(pkt, is_done);
1121 }
1122 
1123 static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
1124 {
1125 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
1126 
1127 	BUG_ON(usbhs_get_dparam(priv, has_usb_dmac));
1128 
1129 	return usbhsf_dma_try_pop_with_rx_irq(pkt, is_done);
1130 }
1131 
1132 static int usbhsf_dma_pop_done_with_rx_irq(struct usbhs_pkt *pkt, int *is_done)
1133 {
1134 	struct usbhs_pipe *pipe = pkt->pipe;
1135 	int maxp = usbhs_pipe_get_maxpacket(pipe);
1136 
1137 	usbhsf_dma_stop(pipe, pipe->fifo);
1138 	usbhsf_dma_unmap(pkt);
1139 	usbhsf_fifo_unselect(pipe, pipe->fifo);
1140 
1141 	pkt->actual += pkt->trans;
1142 
1143 	if ((pkt->actual == pkt->length) ||	/* receive all data */
1144 	    (pkt->trans < maxp)) {		/* short packet */
1145 		*is_done = 1;
1146 		usbhs_pipe_running(pipe, 0);
1147 	} else {
1148 		/* re-enable */
1149 		usbhs_pipe_running(pipe, 0);
1150 		usbhsf_prepare_pop(pkt, is_done);
1151 	}
1152 
1153 	return 0;
1154 }
1155 
1156 static size_t usbhs_dma_calc_received_size(struct usbhs_pkt *pkt,
1157 					   struct dma_chan *chan, int dtln)
1158 {
1159 	struct usbhs_pipe *pipe = pkt->pipe;
1160 	size_t received_size;
1161 	int maxp = usbhs_pipe_get_maxpacket(pipe);
1162 
1163 	received_size = pkt->length - pkt->dma_result->residue;
1164 
1165 	if (dtln) {
1166 		received_size -= USBHS_USB_DMAC_XFER_SIZE;
1167 		received_size &= ~(maxp - 1);
1168 		received_size += dtln;
1169 	}
1170 
1171 	return received_size;
1172 }
1173 
1174 static int usbhsf_dma_pop_done_with_usb_dmac(struct usbhs_pkt *pkt,
1175 					     int *is_done)
1176 {
1177 	struct usbhs_pipe *pipe = pkt->pipe;
1178 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1179 	struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
1180 	struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
1181 	int rcv_len;
1182 
1183 	/*
1184 	 * Since the driver disables rx_irq in DMA mode, the interrupt handler
1185 	 * cannot the BRDYSTS. So, the function clears it here because the
1186 	 * driver may use PIO mode next time.
1187 	 */
1188 	usbhs_xxxsts_clear(priv, BRDYSTS, usbhs_pipe_number(pipe));
1189 
1190 	rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
1191 	usbhsf_fifo_clear(pipe, fifo);
1192 	pkt->actual = usbhs_dma_calc_received_size(pkt, chan, rcv_len);
1193 
1194 	usbhs_pipe_running(pipe, 0);
1195 	usbhsf_dma_stop(pipe, fifo);
1196 	usbhsf_dma_unmap(pkt);
1197 	usbhsf_fifo_unselect(pipe, pipe->fifo);
1198 
1199 	/* The driver can assume the rx transaction is always "done" */
1200 	*is_done = 1;
1201 
1202 	return 0;
1203 }
1204 
1205 static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
1206 {
1207 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
1208 
1209 	if (usbhs_get_dparam(priv, has_usb_dmac))
1210 		return usbhsf_dma_pop_done_with_usb_dmac(pkt, is_done);
1211 	else
1212 		return usbhsf_dma_pop_done_with_rx_irq(pkt, is_done);
1213 }
1214 
1215 const struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
1216 	.prepare	= usbhsf_dma_prepare_pop,
1217 	.try_run	= usbhsf_dma_try_pop,
1218 	.dma_done	= usbhsf_dma_pop_done
1219 };
1220 
1221 /*
1222  *		DMA setting
1223  */
1224 static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
1225 {
1226 	struct sh_dmae_slave *slave = param;
1227 
1228 	/*
1229 	 * FIXME
1230 	 *
1231 	 * usbhs doesn't recognize id = 0 as valid DMA
1232 	 */
1233 	if (0 == slave->shdma_slave.slave_id)
1234 		return false;
1235 
1236 	chan->private = slave;
1237 
1238 	return true;
1239 }
1240 
1241 static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
1242 {
1243 	if (fifo->tx_chan)
1244 		dma_release_channel(fifo->tx_chan);
1245 	if (fifo->rx_chan)
1246 		dma_release_channel(fifo->rx_chan);
1247 
1248 	fifo->tx_chan = NULL;
1249 	fifo->rx_chan = NULL;
1250 }
1251 
1252 static void usbhsf_dma_init_pdev(struct usbhs_fifo *fifo)
1253 {
1254 	dma_cap_mask_t mask;
1255 
1256 	dma_cap_zero(mask);
1257 	dma_cap_set(DMA_SLAVE, mask);
1258 	fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1259 					    &fifo->tx_slave);
1260 
1261 	dma_cap_zero(mask);
1262 	dma_cap_set(DMA_SLAVE, mask);
1263 	fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1264 					    &fifo->rx_slave);
1265 }
1266 
1267 static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
1268 			       int channel)
1269 {
1270 	char name[16];
1271 
1272 	/*
1273 	 * To avoid complex handing for DnFIFOs, the driver uses each
1274 	 * DnFIFO as TX or RX direction (not bi-direction).
1275 	 * So, the driver uses odd channels for TX, even channels for RX.
1276 	 */
1277 	snprintf(name, sizeof(name), "ch%d", channel);
1278 	if (channel & 1) {
1279 		fifo->tx_chan = dma_request_chan(dev, name);
1280 		if (IS_ERR(fifo->tx_chan))
1281 			fifo->tx_chan = NULL;
1282 	} else {
1283 		fifo->rx_chan = dma_request_chan(dev, name);
1284 		if (IS_ERR(fifo->rx_chan))
1285 			fifo->rx_chan = NULL;
1286 	}
1287 }
1288 
1289 static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo,
1290 			    int channel)
1291 {
1292 	struct device *dev = usbhs_priv_to_dev(priv);
1293 
1294 	if (dev_of_node(dev))
1295 		usbhsf_dma_init_dt(dev, fifo, channel);
1296 	else
1297 		usbhsf_dma_init_pdev(fifo);
1298 
1299 	if (fifo->tx_chan || fifo->rx_chan)
1300 		dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
1301 			 fifo->name,
1302 			 fifo->tx_chan ? "[TX]" : "    ",
1303 			 fifo->rx_chan ? "[RX]" : "    ");
1304 }
1305 
1306 /*
1307  *		irq functions
1308  */
1309 static int usbhsf_irq_empty(struct usbhs_priv *priv,
1310 			    struct usbhs_irq_state *irq_state)
1311 {
1312 	struct usbhs_pipe *pipe;
1313 	struct device *dev = usbhs_priv_to_dev(priv);
1314 	int i, ret;
1315 
1316 	if (!irq_state->bempsts) {
1317 		dev_err(dev, "debug %s !!\n", __func__);
1318 		return -EIO;
1319 	}
1320 
1321 	dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
1322 
1323 	/*
1324 	 * search interrupted "pipe"
1325 	 * not "uep".
1326 	 */
1327 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1328 		if (!(irq_state->bempsts & (1 << i)))
1329 			continue;
1330 
1331 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1332 		if (ret < 0)
1333 			dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int usbhsf_irq_ready(struct usbhs_priv *priv,
1340 			    struct usbhs_irq_state *irq_state)
1341 {
1342 	struct usbhs_pipe *pipe;
1343 	struct device *dev = usbhs_priv_to_dev(priv);
1344 	int i, ret;
1345 
1346 	if (!irq_state->brdysts) {
1347 		dev_err(dev, "debug %s !!\n", __func__);
1348 		return -EIO;
1349 	}
1350 
1351 	dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
1352 
1353 	/*
1354 	 * search interrupted "pipe"
1355 	 * not "uep".
1356 	 */
1357 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1358 		if (!(irq_state->brdysts & (1 << i)))
1359 			continue;
1360 
1361 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1362 		if (ret < 0)
1363 			dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
1364 	}
1365 
1366 	return 0;
1367 }
1368 
1369 static void usbhsf_dma_complete(void *arg,
1370 				const struct dmaengine_result *result)
1371 {
1372 	struct usbhs_pkt *pkt = arg;
1373 	struct usbhs_pipe *pipe = pkt->pipe;
1374 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1375 	struct device *dev = usbhs_priv_to_dev(priv);
1376 	int ret;
1377 
1378 	pkt->dma_result = result;
1379 	ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
1380 	if (ret < 0)
1381 		dev_err(dev, "dma_complete run_error %d : %d\n",
1382 			usbhs_pipe_number(pipe), ret);
1383 }
1384 
1385 void usbhs_fifo_clear_dcp(struct usbhs_pipe *pipe)
1386 {
1387 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1388 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
1389 
1390 	/* clear DCP FIFO of transmission */
1391 	if (usbhsf_fifo_select(pipe, fifo, 1) < 0)
1392 		return;
1393 	usbhsf_fifo_clear(pipe, fifo);
1394 	usbhsf_fifo_unselect(pipe, fifo);
1395 
1396 	/* clear DCP FIFO of reception */
1397 	if (usbhsf_fifo_select(pipe, fifo, 0) < 0)
1398 		return;
1399 	usbhsf_fifo_clear(pipe, fifo);
1400 	usbhsf_fifo_unselect(pipe, fifo);
1401 }
1402 
1403 /*
1404  *		fifo init
1405  */
1406 void usbhs_fifo_init(struct usbhs_priv *priv)
1407 {
1408 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1409 	struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
1410 	struct usbhs_fifo *dfifo;
1411 	int i;
1412 
1413 	mod->irq_empty		= usbhsf_irq_empty;
1414 	mod->irq_ready		= usbhsf_irq_ready;
1415 	mod->irq_bempsts	= 0;
1416 	mod->irq_brdysts	= 0;
1417 
1418 	cfifo->pipe	= NULL;
1419 	usbhs_for_each_dfifo(priv, dfifo, i)
1420 		dfifo->pipe	= NULL;
1421 }
1422 
1423 void usbhs_fifo_quit(struct usbhs_priv *priv)
1424 {
1425 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1426 
1427 	mod->irq_empty		= NULL;
1428 	mod->irq_ready		= NULL;
1429 	mod->irq_bempsts	= 0;
1430 	mod->irq_brdysts	= 0;
1431 }
1432 
1433 #define __USBHS_DFIFO_INIT(priv, fifo, channel, fifo_port)		\
1434 do {									\
1435 	fifo = usbhsf_get_dnfifo(priv, channel);			\
1436 	fifo->name	= "D"#channel"FIFO";				\
1437 	fifo->port	= fifo_port;					\
1438 	fifo->sel	= D##channel##FIFOSEL;				\
1439 	fifo->ctr	= D##channel##FIFOCTR;				\
1440 	fifo->tx_slave.shdma_slave.slave_id =				\
1441 			usbhs_get_dparam(priv, d##channel##_tx_id);	\
1442 	fifo->rx_slave.shdma_slave.slave_id =				\
1443 			usbhs_get_dparam(priv, d##channel##_rx_id);	\
1444 	usbhsf_dma_init(priv, fifo, channel);				\
1445 } while (0)
1446 
1447 #define USBHS_DFIFO_INIT(priv, fifo, channel)				\
1448 		__USBHS_DFIFO_INIT(priv, fifo, channel, D##channel##FIFO)
1449 #define USBHS_DFIFO_INIT_NO_PORT(priv, fifo, channel)			\
1450 		__USBHS_DFIFO_INIT(priv, fifo, channel, 0)
1451 
1452 int usbhs_fifo_probe(struct usbhs_priv *priv)
1453 {
1454 	struct usbhs_fifo *fifo;
1455 
1456 	/* CFIFO */
1457 	fifo = usbhsf_get_cfifo(priv);
1458 	fifo->name	= "CFIFO";
1459 	fifo->port	= CFIFO;
1460 	fifo->sel	= CFIFOSEL;
1461 	fifo->ctr	= CFIFOCTR;
1462 
1463 	/* DFIFO */
1464 	USBHS_DFIFO_INIT(priv, fifo, 0);
1465 	USBHS_DFIFO_INIT(priv, fifo, 1);
1466 	USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 2);
1467 	USBHS_DFIFO_INIT_NO_PORT(priv, fifo, 3);
1468 
1469 	return 0;
1470 }
1471 
1472 void usbhs_fifo_remove(struct usbhs_priv *priv)
1473 {
1474 	struct usbhs_fifo *fifo;
1475 	int i;
1476 
1477 	usbhs_for_each_dfifo(priv, fifo, i)
1478 		usbhsf_dma_quit(priv, fifo);
1479 }
1480