xref: /openbmc/linux/drivers/usb/dwc2/hcd_ddma.c (revision f25c42b8d604fbca6d8d3eff2365a73bbef076d3)
15fd54aceSGreg Kroah-Hartman // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2197ba5f4SPaul Zimmerman /*
3197ba5f4SPaul Zimmerman  * hcd_ddma.c - DesignWare HS OTG Controller descriptor DMA routines
4197ba5f4SPaul Zimmerman  *
5197ba5f4SPaul Zimmerman  * Copyright (C) 2004-2013 Synopsys, Inc.
6197ba5f4SPaul Zimmerman  *
7197ba5f4SPaul Zimmerman  * Redistribution and use in source and binary forms, with or without
8197ba5f4SPaul Zimmerman  * modification, are permitted provided that the following conditions
9197ba5f4SPaul Zimmerman  * are met:
10197ba5f4SPaul Zimmerman  * 1. Redistributions of source code must retain the above copyright
11197ba5f4SPaul Zimmerman  *    notice, this list of conditions, and the following disclaimer,
12197ba5f4SPaul Zimmerman  *    without modification.
13197ba5f4SPaul Zimmerman  * 2. Redistributions in binary form must reproduce the above copyright
14197ba5f4SPaul Zimmerman  *    notice, this list of conditions and the following disclaimer in the
15197ba5f4SPaul Zimmerman  *    documentation and/or other materials provided with the distribution.
16197ba5f4SPaul Zimmerman  * 3. The names of the above-listed copyright holders may not be used
17197ba5f4SPaul Zimmerman  *    to endorse or promote products derived from this software without
18197ba5f4SPaul Zimmerman  *    specific prior written permission.
19197ba5f4SPaul Zimmerman  *
20197ba5f4SPaul Zimmerman  * ALTERNATIVELY, this software may be distributed under the terms of the
21197ba5f4SPaul Zimmerman  * GNU General Public License ("GPL") as published by the Free Software
22197ba5f4SPaul Zimmerman  * Foundation; either version 2 of the License, or (at your option) any
23197ba5f4SPaul Zimmerman  * later version.
24197ba5f4SPaul Zimmerman  *
25197ba5f4SPaul Zimmerman  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
26197ba5f4SPaul Zimmerman  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
27197ba5f4SPaul Zimmerman  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28197ba5f4SPaul Zimmerman  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
29197ba5f4SPaul Zimmerman  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
30197ba5f4SPaul Zimmerman  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
31197ba5f4SPaul Zimmerman  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
32197ba5f4SPaul Zimmerman  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
33197ba5f4SPaul Zimmerman  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
34197ba5f4SPaul Zimmerman  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
35197ba5f4SPaul Zimmerman  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36197ba5f4SPaul Zimmerman  */
37197ba5f4SPaul Zimmerman 
38197ba5f4SPaul Zimmerman /*
39197ba5f4SPaul Zimmerman  * This file contains the Descriptor DMA implementation for Host mode
40197ba5f4SPaul Zimmerman  */
41197ba5f4SPaul Zimmerman #include <linux/kernel.h>
42197ba5f4SPaul Zimmerman #include <linux/module.h>
43197ba5f4SPaul Zimmerman #include <linux/spinlock.h>
44197ba5f4SPaul Zimmerman #include <linux/interrupt.h>
45197ba5f4SPaul Zimmerman #include <linux/dma-mapping.h>
46197ba5f4SPaul Zimmerman #include <linux/io.h>
47197ba5f4SPaul Zimmerman #include <linux/slab.h>
48197ba5f4SPaul Zimmerman #include <linux/usb.h>
49197ba5f4SPaul Zimmerman 
50197ba5f4SPaul Zimmerman #include <linux/usb/hcd.h>
51197ba5f4SPaul Zimmerman #include <linux/usb/ch11.h>
52197ba5f4SPaul Zimmerman 
53197ba5f4SPaul Zimmerman #include "core.h"
54197ba5f4SPaul Zimmerman #include "hcd.h"
55197ba5f4SPaul Zimmerman 
56197ba5f4SPaul Zimmerman static u16 dwc2_frame_list_idx(u16 frame)
57197ba5f4SPaul Zimmerman {
58197ba5f4SPaul Zimmerman 	return frame & (FRLISTEN_64_SIZE - 1);
59197ba5f4SPaul Zimmerman }
60197ba5f4SPaul Zimmerman 
61197ba5f4SPaul Zimmerman static u16 dwc2_desclist_idx_inc(u16 idx, u16 inc, u8 speed)
62197ba5f4SPaul Zimmerman {
63197ba5f4SPaul Zimmerman 	return (idx + inc) &
64197ba5f4SPaul Zimmerman 		((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
65197ba5f4SPaul Zimmerman 		  MAX_DMA_DESC_NUM_GENERIC) - 1);
66197ba5f4SPaul Zimmerman }
67197ba5f4SPaul Zimmerman 
68197ba5f4SPaul Zimmerman static u16 dwc2_desclist_idx_dec(u16 idx, u16 inc, u8 speed)
69197ba5f4SPaul Zimmerman {
70197ba5f4SPaul Zimmerman 	return (idx - inc) &
71197ba5f4SPaul Zimmerman 		((speed == USB_SPEED_HIGH ? MAX_DMA_DESC_NUM_HS_ISOC :
72197ba5f4SPaul Zimmerman 		  MAX_DMA_DESC_NUM_GENERIC) - 1);
73197ba5f4SPaul Zimmerman }
74197ba5f4SPaul Zimmerman 
75197ba5f4SPaul Zimmerman static u16 dwc2_max_desc_num(struct dwc2_qh *qh)
76197ba5f4SPaul Zimmerman {
77197ba5f4SPaul Zimmerman 	return (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
78197ba5f4SPaul Zimmerman 		qh->dev_speed == USB_SPEED_HIGH) ?
79197ba5f4SPaul Zimmerman 		MAX_DMA_DESC_NUM_HS_ISOC : MAX_DMA_DESC_NUM_GENERIC;
80197ba5f4SPaul Zimmerman }
81197ba5f4SPaul Zimmerman 
82197ba5f4SPaul Zimmerman static u16 dwc2_frame_incr_val(struct dwc2_qh *qh)
83197ba5f4SPaul Zimmerman {
84197ba5f4SPaul Zimmerman 	return qh->dev_speed == USB_SPEED_HIGH ?
85ced9eee1SDouglas Anderson 	       (qh->host_interval + 8 - 1) / 8 : qh->host_interval;
86197ba5f4SPaul Zimmerman }
87197ba5f4SPaul Zimmerman 
88197ba5f4SPaul Zimmerman static int dwc2_desc_list_alloc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
89197ba5f4SPaul Zimmerman 				gfp_t flags)
90197ba5f4SPaul Zimmerman {
913b5fcc9aSGregory Herrero 	struct kmem_cache *desc_cache;
923b5fcc9aSGregory Herrero 
93ab283202SJohn Youn 	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
94ab283202SJohn Youn 	    qh->dev_speed == USB_SPEED_HIGH)
953b5fcc9aSGregory Herrero 		desc_cache = hsotg->desc_hsisoc_cache;
963b5fcc9aSGregory Herrero 	else
973b5fcc9aSGregory Herrero 		desc_cache = hsotg->desc_gen_cache;
983b5fcc9aSGregory Herrero 
99ec703251SVahram Aharonyan 	qh->desc_list_sz = sizeof(struct dwc2_dma_desc) *
10095105a99SGregory Herrero 						dwc2_max_desc_num(qh);
101197ba5f4SPaul Zimmerman 
1023b5fcc9aSGregory Herrero 	qh->desc_list = kmem_cache_zalloc(desc_cache, flags | GFP_DMA);
103197ba5f4SPaul Zimmerman 	if (!qh->desc_list)
104197ba5f4SPaul Zimmerman 		return -ENOMEM;
105197ba5f4SPaul Zimmerman 
10695105a99SGregory Herrero 	qh->desc_list_dma = dma_map_single(hsotg->dev, qh->desc_list,
10795105a99SGregory Herrero 					   qh->desc_list_sz,
10895105a99SGregory Herrero 					   DMA_TO_DEVICE);
109197ba5f4SPaul Zimmerman 
1109da51974SJohn Youn 	qh->n_bytes = kcalloc(dwc2_max_desc_num(qh), sizeof(u32), flags);
111197ba5f4SPaul Zimmerman 	if (!qh->n_bytes) {
11295105a99SGregory Herrero 		dma_unmap_single(hsotg->dev, qh->desc_list_dma,
11395105a99SGregory Herrero 				 qh->desc_list_sz,
11495105a99SGregory Herrero 				 DMA_FROM_DEVICE);
1159bbe91a1SAmitoj Kaur Chawla 		kmem_cache_free(desc_cache, qh->desc_list);
116197ba5f4SPaul Zimmerman 		qh->desc_list = NULL;
117197ba5f4SPaul Zimmerman 		return -ENOMEM;
118197ba5f4SPaul Zimmerman 	}
119197ba5f4SPaul Zimmerman 
120197ba5f4SPaul Zimmerman 	return 0;
121197ba5f4SPaul Zimmerman }
122197ba5f4SPaul Zimmerman 
123197ba5f4SPaul Zimmerman static void dwc2_desc_list_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
124197ba5f4SPaul Zimmerman {
1253b5fcc9aSGregory Herrero 	struct kmem_cache *desc_cache;
1263b5fcc9aSGregory Herrero 
127ab283202SJohn Youn 	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC &&
128ab283202SJohn Youn 	    qh->dev_speed == USB_SPEED_HIGH)
1293b5fcc9aSGregory Herrero 		desc_cache = hsotg->desc_hsisoc_cache;
1303b5fcc9aSGregory Herrero 	else
1313b5fcc9aSGregory Herrero 		desc_cache = hsotg->desc_gen_cache;
1323b5fcc9aSGregory Herrero 
133197ba5f4SPaul Zimmerman 	if (qh->desc_list) {
13495105a99SGregory Herrero 		dma_unmap_single(hsotg->dev, qh->desc_list_dma,
13595105a99SGregory Herrero 				 qh->desc_list_sz, DMA_FROM_DEVICE);
1363b5fcc9aSGregory Herrero 		kmem_cache_free(desc_cache, qh->desc_list);
137197ba5f4SPaul Zimmerman 		qh->desc_list = NULL;
138197ba5f4SPaul Zimmerman 	}
139197ba5f4SPaul Zimmerman 
140197ba5f4SPaul Zimmerman 	kfree(qh->n_bytes);
141197ba5f4SPaul Zimmerman 	qh->n_bytes = NULL;
142197ba5f4SPaul Zimmerman }
143197ba5f4SPaul Zimmerman 
144197ba5f4SPaul Zimmerman static int dwc2_frame_list_alloc(struct dwc2_hsotg *hsotg, gfp_t mem_flags)
145197ba5f4SPaul Zimmerman {
146197ba5f4SPaul Zimmerman 	if (hsotg->frame_list)
147197ba5f4SPaul Zimmerman 		return 0;
148197ba5f4SPaul Zimmerman 
14995105a99SGregory Herrero 	hsotg->frame_list_sz = 4 * FRLISTEN_64_SIZE;
15095105a99SGregory Herrero 	hsotg->frame_list = kzalloc(hsotg->frame_list_sz, GFP_ATOMIC | GFP_DMA);
151197ba5f4SPaul Zimmerman 	if (!hsotg->frame_list)
152197ba5f4SPaul Zimmerman 		return -ENOMEM;
153197ba5f4SPaul Zimmerman 
15495105a99SGregory Herrero 	hsotg->frame_list_dma = dma_map_single(hsotg->dev, hsotg->frame_list,
15595105a99SGregory Herrero 					       hsotg->frame_list_sz,
15695105a99SGregory Herrero 					       DMA_TO_DEVICE);
15795105a99SGregory Herrero 
158197ba5f4SPaul Zimmerman 	return 0;
159197ba5f4SPaul Zimmerman }
160197ba5f4SPaul Zimmerman 
161197ba5f4SPaul Zimmerman static void dwc2_frame_list_free(struct dwc2_hsotg *hsotg)
162197ba5f4SPaul Zimmerman {
163197ba5f4SPaul Zimmerman 	unsigned long flags;
164197ba5f4SPaul Zimmerman 
165197ba5f4SPaul Zimmerman 	spin_lock_irqsave(&hsotg->lock, flags);
166197ba5f4SPaul Zimmerman 
167197ba5f4SPaul Zimmerman 	if (!hsotg->frame_list) {
168197ba5f4SPaul Zimmerman 		spin_unlock_irqrestore(&hsotg->lock, flags);
169197ba5f4SPaul Zimmerman 		return;
170197ba5f4SPaul Zimmerman 	}
171197ba5f4SPaul Zimmerman 
17295105a99SGregory Herrero 	dma_unmap_single(hsotg->dev, hsotg->frame_list_dma,
17395105a99SGregory Herrero 			 hsotg->frame_list_sz, DMA_FROM_DEVICE);
17495105a99SGregory Herrero 
17595105a99SGregory Herrero 	kfree(hsotg->frame_list);
176197ba5f4SPaul Zimmerman 	hsotg->frame_list = NULL;
177197ba5f4SPaul Zimmerman 
178197ba5f4SPaul Zimmerman 	spin_unlock_irqrestore(&hsotg->lock, flags);
179197ba5f4SPaul Zimmerman }
180197ba5f4SPaul Zimmerman 
181197ba5f4SPaul Zimmerman static void dwc2_per_sched_enable(struct dwc2_hsotg *hsotg, u32 fr_list_en)
182197ba5f4SPaul Zimmerman {
183197ba5f4SPaul Zimmerman 	u32 hcfg;
184197ba5f4SPaul Zimmerman 	unsigned long flags;
185197ba5f4SPaul Zimmerman 
186197ba5f4SPaul Zimmerman 	spin_lock_irqsave(&hsotg->lock, flags);
187197ba5f4SPaul Zimmerman 
188*f25c42b8SGevorg Sahakyan 	hcfg = dwc2_readl(hsotg, HCFG);
189197ba5f4SPaul Zimmerman 	if (hcfg & HCFG_PERSCHEDENA) {
190197ba5f4SPaul Zimmerman 		/* already enabled */
191197ba5f4SPaul Zimmerman 		spin_unlock_irqrestore(&hsotg->lock, flags);
192197ba5f4SPaul Zimmerman 		return;
193197ba5f4SPaul Zimmerman 	}
194197ba5f4SPaul Zimmerman 
195*f25c42b8SGevorg Sahakyan 	dwc2_writel(hsotg, hsotg->frame_list_dma, HFLBADDR);
196197ba5f4SPaul Zimmerman 
197197ba5f4SPaul Zimmerman 	hcfg &= ~HCFG_FRLISTEN_MASK;
198197ba5f4SPaul Zimmerman 	hcfg |= fr_list_en | HCFG_PERSCHEDENA;
199197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "Enabling Periodic schedule\n");
200*f25c42b8SGevorg Sahakyan 	dwc2_writel(hsotg, hcfg, HCFG);
201197ba5f4SPaul Zimmerman 
202197ba5f4SPaul Zimmerman 	spin_unlock_irqrestore(&hsotg->lock, flags);
203197ba5f4SPaul Zimmerman }
204197ba5f4SPaul Zimmerman 
205197ba5f4SPaul Zimmerman static void dwc2_per_sched_disable(struct dwc2_hsotg *hsotg)
206197ba5f4SPaul Zimmerman {
207197ba5f4SPaul Zimmerman 	u32 hcfg;
208197ba5f4SPaul Zimmerman 	unsigned long flags;
209197ba5f4SPaul Zimmerman 
210197ba5f4SPaul Zimmerman 	spin_lock_irqsave(&hsotg->lock, flags);
211197ba5f4SPaul Zimmerman 
212*f25c42b8SGevorg Sahakyan 	hcfg = dwc2_readl(hsotg, HCFG);
213197ba5f4SPaul Zimmerman 	if (!(hcfg & HCFG_PERSCHEDENA)) {
214197ba5f4SPaul Zimmerman 		/* already disabled */
215197ba5f4SPaul Zimmerman 		spin_unlock_irqrestore(&hsotg->lock, flags);
216197ba5f4SPaul Zimmerman 		return;
217197ba5f4SPaul Zimmerman 	}
218197ba5f4SPaul Zimmerman 
219197ba5f4SPaul Zimmerman 	hcfg &= ~HCFG_PERSCHEDENA;
220197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "Disabling Periodic schedule\n");
221*f25c42b8SGevorg Sahakyan 	dwc2_writel(hsotg, hcfg, HCFG);
222197ba5f4SPaul Zimmerman 
223197ba5f4SPaul Zimmerman 	spin_unlock_irqrestore(&hsotg->lock, flags);
224197ba5f4SPaul Zimmerman }
225197ba5f4SPaul Zimmerman 
226197ba5f4SPaul Zimmerman /*
227197ba5f4SPaul Zimmerman  * Activates/Deactivates FrameList entries for the channel based on endpoint
228197ba5f4SPaul Zimmerman  * servicing period
229197ba5f4SPaul Zimmerman  */
230197ba5f4SPaul Zimmerman static void dwc2_update_frame_list(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
231197ba5f4SPaul Zimmerman 				   int enable)
232197ba5f4SPaul Zimmerman {
233197ba5f4SPaul Zimmerman 	struct dwc2_host_chan *chan;
234197ba5f4SPaul Zimmerman 	u16 i, j, inc;
235197ba5f4SPaul Zimmerman 
236197ba5f4SPaul Zimmerman 	if (!hsotg) {
237197ba5f4SPaul Zimmerman 		pr_err("hsotg = %p\n", hsotg);
238197ba5f4SPaul Zimmerman 		return;
239197ba5f4SPaul Zimmerman 	}
240197ba5f4SPaul Zimmerman 
241197ba5f4SPaul Zimmerman 	if (!qh->channel) {
242197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev, "qh->channel = %p\n", qh->channel);
243197ba5f4SPaul Zimmerman 		return;
244197ba5f4SPaul Zimmerman 	}
245197ba5f4SPaul Zimmerman 
246197ba5f4SPaul Zimmerman 	if (!hsotg->frame_list) {
247197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev, "hsotg->frame_list = %p\n",
248197ba5f4SPaul Zimmerman 			hsotg->frame_list);
249197ba5f4SPaul Zimmerman 		return;
250197ba5f4SPaul Zimmerman 	}
251197ba5f4SPaul Zimmerman 
252197ba5f4SPaul Zimmerman 	chan = qh->channel;
253197ba5f4SPaul Zimmerman 	inc = dwc2_frame_incr_val(qh);
254197ba5f4SPaul Zimmerman 	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
255ced9eee1SDouglas Anderson 		i = dwc2_frame_list_idx(qh->next_active_frame);
256197ba5f4SPaul Zimmerman 	else
257197ba5f4SPaul Zimmerman 		i = 0;
258197ba5f4SPaul Zimmerman 
259197ba5f4SPaul Zimmerman 	j = i;
260197ba5f4SPaul Zimmerman 	do {
261197ba5f4SPaul Zimmerman 		if (enable)
262197ba5f4SPaul Zimmerman 			hsotg->frame_list[j] |= 1 << chan->hc_num;
263197ba5f4SPaul Zimmerman 		else
264197ba5f4SPaul Zimmerman 			hsotg->frame_list[j] &= ~(1 << chan->hc_num);
265197ba5f4SPaul Zimmerman 		j = (j + inc) & (FRLISTEN_64_SIZE - 1);
266197ba5f4SPaul Zimmerman 	} while (j != i);
267197ba5f4SPaul Zimmerman 
26895105a99SGregory Herrero 	/*
26995105a99SGregory Herrero 	 * Sync frame list since controller will access it if periodic
27095105a99SGregory Herrero 	 * channel is currently enabled.
27195105a99SGregory Herrero 	 */
27295105a99SGregory Herrero 	dma_sync_single_for_device(hsotg->dev,
27395105a99SGregory Herrero 				   hsotg->frame_list_dma,
27495105a99SGregory Herrero 				   hsotg->frame_list_sz,
27595105a99SGregory Herrero 				   DMA_TO_DEVICE);
27695105a99SGregory Herrero 
277197ba5f4SPaul Zimmerman 	if (!enable)
278197ba5f4SPaul Zimmerman 		return;
279197ba5f4SPaul Zimmerman 
280197ba5f4SPaul Zimmerman 	chan->schinfo = 0;
281ced9eee1SDouglas Anderson 	if (chan->speed == USB_SPEED_HIGH && qh->host_interval) {
282197ba5f4SPaul Zimmerman 		j = 1;
283197ba5f4SPaul Zimmerman 		/* TODO - check this */
284ced9eee1SDouglas Anderson 		inc = (8 + qh->host_interval - 1) / qh->host_interval;
285197ba5f4SPaul Zimmerman 		for (i = 0; i < inc; i++) {
286197ba5f4SPaul Zimmerman 			chan->schinfo |= j;
287ced9eee1SDouglas Anderson 			j = j << qh->host_interval;
288197ba5f4SPaul Zimmerman 		}
289197ba5f4SPaul Zimmerman 	} else {
290197ba5f4SPaul Zimmerman 		chan->schinfo = 0xff;
291197ba5f4SPaul Zimmerman 	}
292197ba5f4SPaul Zimmerman }
293197ba5f4SPaul Zimmerman 
294197ba5f4SPaul Zimmerman static void dwc2_release_channel_ddma(struct dwc2_hsotg *hsotg,
295197ba5f4SPaul Zimmerman 				      struct dwc2_qh *qh)
296197ba5f4SPaul Zimmerman {
297197ba5f4SPaul Zimmerman 	struct dwc2_host_chan *chan = qh->channel;
298197ba5f4SPaul Zimmerman 
299197ba5f4SPaul Zimmerman 	if (dwc2_qh_is_non_per(qh)) {
30095832c00SJohn Youn 		if (hsotg->params.uframe_sched)
301197ba5f4SPaul Zimmerman 			hsotg->available_host_channels++;
302197ba5f4SPaul Zimmerman 		else
303197ba5f4SPaul Zimmerman 			hsotg->non_periodic_channels--;
304197ba5f4SPaul Zimmerman 	} else {
305197ba5f4SPaul Zimmerman 		dwc2_update_frame_list(hsotg, qh, 0);
3063f808bdaSGregory Herrero 		hsotg->available_host_channels++;
307197ba5f4SPaul Zimmerman 	}
308197ba5f4SPaul Zimmerman 
309197ba5f4SPaul Zimmerman 	/*
310197ba5f4SPaul Zimmerman 	 * The condition is added to prevent double cleanup try in case of
311197ba5f4SPaul Zimmerman 	 * device disconnect. See channel cleanup in dwc2_hcd_disconnect().
312197ba5f4SPaul Zimmerman 	 */
313197ba5f4SPaul Zimmerman 	if (chan->qh) {
314197ba5f4SPaul Zimmerman 		if (!list_empty(&chan->hc_list_entry))
315197ba5f4SPaul Zimmerman 			list_del(&chan->hc_list_entry);
316197ba5f4SPaul Zimmerman 		dwc2_hc_cleanup(hsotg, chan);
317197ba5f4SPaul Zimmerman 		list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
318197ba5f4SPaul Zimmerman 		chan->qh = NULL;
319197ba5f4SPaul Zimmerman 	}
320197ba5f4SPaul Zimmerman 
321197ba5f4SPaul Zimmerman 	qh->channel = NULL;
322197ba5f4SPaul Zimmerman 	qh->ntd = 0;
323197ba5f4SPaul Zimmerman 
324197ba5f4SPaul Zimmerman 	if (qh->desc_list)
325ec703251SVahram Aharonyan 		memset(qh->desc_list, 0, sizeof(struct dwc2_dma_desc) *
326197ba5f4SPaul Zimmerman 		       dwc2_max_desc_num(qh));
327197ba5f4SPaul Zimmerman }
328197ba5f4SPaul Zimmerman 
329197ba5f4SPaul Zimmerman /**
330197ba5f4SPaul Zimmerman  * dwc2_hcd_qh_init_ddma() - Initializes a QH structure's Descriptor DMA
331197ba5f4SPaul Zimmerman  * related members
332197ba5f4SPaul Zimmerman  *
333197ba5f4SPaul Zimmerman  * @hsotg: The HCD state structure for the DWC OTG controller
334197ba5f4SPaul Zimmerman  * @qh:    The QH to init
3356fb914d7SGrigor Tovmasyan  * @mem_flags: Indicates the type of memory allocation
336197ba5f4SPaul Zimmerman  *
337197ba5f4SPaul Zimmerman  * Return: 0 if successful, negative error code otherwise
338197ba5f4SPaul Zimmerman  *
339197ba5f4SPaul Zimmerman  * Allocates memory for the descriptor list. For the first periodic QH,
340197ba5f4SPaul Zimmerman  * allocates memory for the FrameList and enables periodic scheduling.
341197ba5f4SPaul Zimmerman  */
342197ba5f4SPaul Zimmerman int dwc2_hcd_qh_init_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
343197ba5f4SPaul Zimmerman 			  gfp_t mem_flags)
344197ba5f4SPaul Zimmerman {
345197ba5f4SPaul Zimmerman 	int retval;
346197ba5f4SPaul Zimmerman 
347197ba5f4SPaul Zimmerman 	if (qh->do_split) {
348197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev,
349197ba5f4SPaul Zimmerman 			"SPLIT Transfers are not supported in Descriptor DMA mode.\n");
350197ba5f4SPaul Zimmerman 		retval = -EINVAL;
351197ba5f4SPaul Zimmerman 		goto err0;
352197ba5f4SPaul Zimmerman 	}
353197ba5f4SPaul Zimmerman 
354197ba5f4SPaul Zimmerman 	retval = dwc2_desc_list_alloc(hsotg, qh, mem_flags);
355197ba5f4SPaul Zimmerman 	if (retval)
356197ba5f4SPaul Zimmerman 		goto err0;
357197ba5f4SPaul Zimmerman 
358197ba5f4SPaul Zimmerman 	if (qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
359197ba5f4SPaul Zimmerman 	    qh->ep_type == USB_ENDPOINT_XFER_INT) {
360197ba5f4SPaul Zimmerman 		if (!hsotg->frame_list) {
361197ba5f4SPaul Zimmerman 			retval = dwc2_frame_list_alloc(hsotg, mem_flags);
362197ba5f4SPaul Zimmerman 			if (retval)
363197ba5f4SPaul Zimmerman 				goto err1;
364197ba5f4SPaul Zimmerman 			/* Enable periodic schedule on first periodic QH */
365197ba5f4SPaul Zimmerman 			dwc2_per_sched_enable(hsotg, HCFG_FRLISTEN_64);
366197ba5f4SPaul Zimmerman 		}
367197ba5f4SPaul Zimmerman 	}
368197ba5f4SPaul Zimmerman 
369197ba5f4SPaul Zimmerman 	qh->ntd = 0;
370197ba5f4SPaul Zimmerman 	return 0;
371197ba5f4SPaul Zimmerman 
372197ba5f4SPaul Zimmerman err1:
373197ba5f4SPaul Zimmerman 	dwc2_desc_list_free(hsotg, qh);
374197ba5f4SPaul Zimmerman err0:
375197ba5f4SPaul Zimmerman 	return retval;
376197ba5f4SPaul Zimmerman }
377197ba5f4SPaul Zimmerman 
378197ba5f4SPaul Zimmerman /**
379197ba5f4SPaul Zimmerman  * dwc2_hcd_qh_free_ddma() - Frees a QH structure's Descriptor DMA related
380197ba5f4SPaul Zimmerman  * members
381197ba5f4SPaul Zimmerman  *
382197ba5f4SPaul Zimmerman  * @hsotg: The HCD state structure for the DWC OTG controller
383197ba5f4SPaul Zimmerman  * @qh:    The QH to free
384197ba5f4SPaul Zimmerman  *
385197ba5f4SPaul Zimmerman  * Frees descriptor list memory associated with the QH. If QH is periodic and
386197ba5f4SPaul Zimmerman  * the last, frees FrameList memory and disables periodic scheduling.
387197ba5f4SPaul Zimmerman  */
388197ba5f4SPaul Zimmerman void dwc2_hcd_qh_free_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
389197ba5f4SPaul Zimmerman {
3902b046bc5SGregory Herrero 	unsigned long flags;
3912b046bc5SGregory Herrero 
392197ba5f4SPaul Zimmerman 	dwc2_desc_list_free(hsotg, qh);
393197ba5f4SPaul Zimmerman 
394197ba5f4SPaul Zimmerman 	/*
395197ba5f4SPaul Zimmerman 	 * Channel still assigned due to some reasons.
396197ba5f4SPaul Zimmerman 	 * Seen on Isoc URB dequeue. Channel halted but no subsequent
397197ba5f4SPaul Zimmerman 	 * ChHalted interrupt to release the channel. Afterwards
398197ba5f4SPaul Zimmerman 	 * when it comes here from endpoint disable routine
399197ba5f4SPaul Zimmerman 	 * channel remains assigned.
400197ba5f4SPaul Zimmerman 	 */
4012b046bc5SGregory Herrero 	spin_lock_irqsave(&hsotg->lock, flags);
402197ba5f4SPaul Zimmerman 	if (qh->channel)
403197ba5f4SPaul Zimmerman 		dwc2_release_channel_ddma(hsotg, qh);
4042b046bc5SGregory Herrero 	spin_unlock_irqrestore(&hsotg->lock, flags);
405197ba5f4SPaul Zimmerman 
406197ba5f4SPaul Zimmerman 	if ((qh->ep_type == USB_ENDPOINT_XFER_ISOC ||
407197ba5f4SPaul Zimmerman 	     qh->ep_type == USB_ENDPOINT_XFER_INT) &&
40895832c00SJohn Youn 	    (hsotg->params.uframe_sched ||
409197ba5f4SPaul Zimmerman 	     !hsotg->periodic_channels) && hsotg->frame_list) {
410197ba5f4SPaul Zimmerman 		dwc2_per_sched_disable(hsotg);
411197ba5f4SPaul Zimmerman 		dwc2_frame_list_free(hsotg);
412197ba5f4SPaul Zimmerman 	}
413197ba5f4SPaul Zimmerman }
414197ba5f4SPaul Zimmerman 
415197ba5f4SPaul Zimmerman static u8 dwc2_frame_to_desc_idx(struct dwc2_qh *qh, u16 frame_idx)
416197ba5f4SPaul Zimmerman {
417197ba5f4SPaul Zimmerman 	if (qh->dev_speed == USB_SPEED_HIGH)
418197ba5f4SPaul Zimmerman 		/* Descriptor set (8 descriptors) index which is 8-aligned */
419197ba5f4SPaul Zimmerman 		return (frame_idx & ((MAX_DMA_DESC_NUM_HS_ISOC / 8) - 1)) * 8;
420197ba5f4SPaul Zimmerman 	else
421197ba5f4SPaul Zimmerman 		return frame_idx & (MAX_DMA_DESC_NUM_GENERIC - 1);
422197ba5f4SPaul Zimmerman }
423197ba5f4SPaul Zimmerman 
424197ba5f4SPaul Zimmerman /*
425197ba5f4SPaul Zimmerman  * Determine starting frame for Isochronous transfer.
426197ba5f4SPaul Zimmerman  * Few frames skipped to prevent race condition with HC.
427197ba5f4SPaul Zimmerman  */
428197ba5f4SPaul Zimmerman static u16 dwc2_calc_starting_frame(struct dwc2_hsotg *hsotg,
429197ba5f4SPaul Zimmerman 				    struct dwc2_qh *qh, u16 *skip_frames)
430197ba5f4SPaul Zimmerman {
431197ba5f4SPaul Zimmerman 	u16 frame;
432197ba5f4SPaul Zimmerman 
433197ba5f4SPaul Zimmerman 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
434197ba5f4SPaul Zimmerman 
435ced9eee1SDouglas Anderson 	/*
436ced9eee1SDouglas Anderson 	 * next_active_frame is always frame number (not uFrame) both in FS
437ced9eee1SDouglas Anderson 	 * and HS!
438ced9eee1SDouglas Anderson 	 */
439197ba5f4SPaul Zimmerman 
440197ba5f4SPaul Zimmerman 	/*
441197ba5f4SPaul Zimmerman 	 * skip_frames is used to limit activated descriptors number
442197ba5f4SPaul Zimmerman 	 * to avoid the situation when HC services the last activated
443197ba5f4SPaul Zimmerman 	 * descriptor firstly.
444197ba5f4SPaul Zimmerman 	 * Example for FS:
445197ba5f4SPaul Zimmerman 	 * Current frame is 1, scheduled frame is 3. Since HC always fetches
446197ba5f4SPaul Zimmerman 	 * the descriptor corresponding to curr_frame+1, the descriptor
447197ba5f4SPaul Zimmerman 	 * corresponding to frame 2 will be fetched. If the number of
448197ba5f4SPaul Zimmerman 	 * descriptors is max=64 (or greather) the list will be fully programmed
449197ba5f4SPaul Zimmerman 	 * with Active descriptors and it is possible case (rare) that the
450197ba5f4SPaul Zimmerman 	 * latest descriptor(considering rollback) corresponding to frame 2 will
451197ba5f4SPaul Zimmerman 	 * be serviced first. HS case is more probable because, in fact, up to
452197ba5f4SPaul Zimmerman 	 * 11 uframes (16 in the code) may be skipped.
453197ba5f4SPaul Zimmerman 	 */
454197ba5f4SPaul Zimmerman 	if (qh->dev_speed == USB_SPEED_HIGH) {
455197ba5f4SPaul Zimmerman 		/*
456197ba5f4SPaul Zimmerman 		 * Consider uframe counter also, to start xfer asap. If half of
457197ba5f4SPaul Zimmerman 		 * the frame elapsed skip 2 frames otherwise just 1 frame.
458197ba5f4SPaul Zimmerman 		 * Starting descriptor index must be 8-aligned, so if the
459197ba5f4SPaul Zimmerman 		 * current frame is near to complete the next one is skipped as
460197ba5f4SPaul Zimmerman 		 * well.
461197ba5f4SPaul Zimmerman 		 */
462197ba5f4SPaul Zimmerman 		if (dwc2_micro_frame_num(hsotg->frame_number) >= 5) {
463197ba5f4SPaul Zimmerman 			*skip_frames = 2 * 8;
464197ba5f4SPaul Zimmerman 			frame = dwc2_frame_num_inc(hsotg->frame_number,
465197ba5f4SPaul Zimmerman 						   *skip_frames);
466197ba5f4SPaul Zimmerman 		} else {
467197ba5f4SPaul Zimmerman 			*skip_frames = 1 * 8;
468197ba5f4SPaul Zimmerman 			frame = dwc2_frame_num_inc(hsotg->frame_number,
469197ba5f4SPaul Zimmerman 						   *skip_frames);
470197ba5f4SPaul Zimmerman 		}
471197ba5f4SPaul Zimmerman 
472197ba5f4SPaul Zimmerman 		frame = dwc2_full_frame_num(frame);
473197ba5f4SPaul Zimmerman 	} else {
474197ba5f4SPaul Zimmerman 		/*
475197ba5f4SPaul Zimmerman 		 * Two frames are skipped for FS - the current and the next.
476197ba5f4SPaul Zimmerman 		 * But for descriptor programming, 1 frame (descriptor) is
477197ba5f4SPaul Zimmerman 		 * enough, see example above.
478197ba5f4SPaul Zimmerman 		 */
479197ba5f4SPaul Zimmerman 		*skip_frames = 1;
480197ba5f4SPaul Zimmerman 		frame = dwc2_frame_num_inc(hsotg->frame_number, 2);
481197ba5f4SPaul Zimmerman 	}
482197ba5f4SPaul Zimmerman 
483197ba5f4SPaul Zimmerman 	return frame;
484197ba5f4SPaul Zimmerman }
485197ba5f4SPaul Zimmerman 
486197ba5f4SPaul Zimmerman /*
487197ba5f4SPaul Zimmerman  * Calculate initial descriptor index for isochronous transfer based on
488197ba5f4SPaul Zimmerman  * scheduled frame
489197ba5f4SPaul Zimmerman  */
490197ba5f4SPaul Zimmerman static u16 dwc2_recalc_initial_desc_idx(struct dwc2_hsotg *hsotg,
491197ba5f4SPaul Zimmerman 					struct dwc2_qh *qh)
492197ba5f4SPaul Zimmerman {
493197ba5f4SPaul Zimmerman 	u16 frame, fr_idx, fr_idx_tmp, skip_frames;
494197ba5f4SPaul Zimmerman 
495197ba5f4SPaul Zimmerman 	/*
496197ba5f4SPaul Zimmerman 	 * With current ISOC processing algorithm the channel is being released
497197ba5f4SPaul Zimmerman 	 * when no more QTDs in the list (qh->ntd == 0). Thus this function is
498197ba5f4SPaul Zimmerman 	 * called only when qh->ntd == 0 and qh->channel == 0.
499197ba5f4SPaul Zimmerman 	 *
500197ba5f4SPaul Zimmerman 	 * So qh->channel != NULL branch is not used and just not removed from
501197ba5f4SPaul Zimmerman 	 * the source file. It is required for another possible approach which
502197ba5f4SPaul Zimmerman 	 * is, do not disable and release the channel when ISOC session
503197ba5f4SPaul Zimmerman 	 * completed, just move QH to inactive schedule until new QTD arrives.
504197ba5f4SPaul Zimmerman 	 * On new QTD, the QH moved back to 'ready' schedule, starting frame and
505197ba5f4SPaul Zimmerman 	 * therefore starting desc_index are recalculated. In this case channel
506197ba5f4SPaul Zimmerman 	 * is released only on ep_disable.
507197ba5f4SPaul Zimmerman 	 */
508197ba5f4SPaul Zimmerman 
509197ba5f4SPaul Zimmerman 	/*
510197ba5f4SPaul Zimmerman 	 * Calculate starting descriptor index. For INTERRUPT endpoint it is
511197ba5f4SPaul Zimmerman 	 * always 0.
512197ba5f4SPaul Zimmerman 	 */
513197ba5f4SPaul Zimmerman 	if (qh->channel) {
514197ba5f4SPaul Zimmerman 		frame = dwc2_calc_starting_frame(hsotg, qh, &skip_frames);
515197ba5f4SPaul Zimmerman 		/*
516197ba5f4SPaul Zimmerman 		 * Calculate initial descriptor index based on FrameList current
517197ba5f4SPaul Zimmerman 		 * bitmap and servicing period
518197ba5f4SPaul Zimmerman 		 */
519197ba5f4SPaul Zimmerman 		fr_idx_tmp = dwc2_frame_list_idx(frame);
520197ba5f4SPaul Zimmerman 		fr_idx = (FRLISTEN_64_SIZE +
521ced9eee1SDouglas Anderson 			  dwc2_frame_list_idx(qh->next_active_frame) -
522ced9eee1SDouglas Anderson 			  fr_idx_tmp) % dwc2_frame_incr_val(qh);
523197ba5f4SPaul Zimmerman 		fr_idx = (fr_idx + fr_idx_tmp) % FRLISTEN_64_SIZE;
524197ba5f4SPaul Zimmerman 	} else {
525ced9eee1SDouglas Anderson 		qh->next_active_frame = dwc2_calc_starting_frame(hsotg, qh,
526197ba5f4SPaul Zimmerman 							   &skip_frames);
527ced9eee1SDouglas Anderson 		fr_idx = dwc2_frame_list_idx(qh->next_active_frame);
528197ba5f4SPaul Zimmerman 	}
529197ba5f4SPaul Zimmerman 
530197ba5f4SPaul Zimmerman 	qh->td_first = qh->td_last = dwc2_frame_to_desc_idx(qh, fr_idx);
531197ba5f4SPaul Zimmerman 
532197ba5f4SPaul Zimmerman 	return skip_frames;
533197ba5f4SPaul Zimmerman }
534197ba5f4SPaul Zimmerman 
535197ba5f4SPaul Zimmerman #define ISOC_URB_GIVEBACK_ASAP
536197ba5f4SPaul Zimmerman 
537197ba5f4SPaul Zimmerman #define MAX_ISOC_XFER_SIZE_FS	1023
538197ba5f4SPaul Zimmerman #define MAX_ISOC_XFER_SIZE_HS	3072
539197ba5f4SPaul Zimmerman #define DESCNUM_THRESHOLD	4
540197ba5f4SPaul Zimmerman 
541197ba5f4SPaul Zimmerman static void dwc2_fill_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
542197ba5f4SPaul Zimmerman 					 struct dwc2_qtd *qtd,
543197ba5f4SPaul Zimmerman 					 struct dwc2_qh *qh, u32 max_xfer_size,
544197ba5f4SPaul Zimmerman 					 u16 idx)
545197ba5f4SPaul Zimmerman {
546ec703251SVahram Aharonyan 	struct dwc2_dma_desc *dma_desc = &qh->desc_list[idx];
547197ba5f4SPaul Zimmerman 	struct dwc2_hcd_iso_packet_desc *frame_desc;
548197ba5f4SPaul Zimmerman 
549197ba5f4SPaul Zimmerman 	memset(dma_desc, 0, sizeof(*dma_desc));
550197ba5f4SPaul Zimmerman 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
551197ba5f4SPaul Zimmerman 
552197ba5f4SPaul Zimmerman 	if (frame_desc->length > max_xfer_size)
553197ba5f4SPaul Zimmerman 		qh->n_bytes[idx] = max_xfer_size;
554197ba5f4SPaul Zimmerman 	else
555197ba5f4SPaul Zimmerman 		qh->n_bytes[idx] = frame_desc->length;
556197ba5f4SPaul Zimmerman 
557197ba5f4SPaul Zimmerman 	dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
558197ba5f4SPaul Zimmerman 	dma_desc->status = qh->n_bytes[idx] << HOST_DMA_ISOC_NBYTES_SHIFT &
559197ba5f4SPaul Zimmerman 			   HOST_DMA_ISOC_NBYTES_MASK;
560197ba5f4SPaul Zimmerman 
561dde4c1bfSGregory Herrero 	/* Set active bit */
562dde4c1bfSGregory Herrero 	dma_desc->status |= HOST_DMA_A;
563dde4c1bfSGregory Herrero 
5643ac38d26SGregory Herrero 	qh->ntd++;
5653ac38d26SGregory Herrero 	qtd->isoc_frame_index_last++;
5663ac38d26SGregory Herrero 
567197ba5f4SPaul Zimmerman #ifdef ISOC_URB_GIVEBACK_ASAP
568197ba5f4SPaul Zimmerman 	/* Set IOC for each descriptor corresponding to last frame of URB */
569197ba5f4SPaul Zimmerman 	if (qtd->isoc_frame_index_last == qtd->urb->packet_count)
570197ba5f4SPaul Zimmerman 		dma_desc->status |= HOST_DMA_IOC;
571197ba5f4SPaul Zimmerman #endif
572197ba5f4SPaul Zimmerman 
57395105a99SGregory Herrero 	dma_sync_single_for_device(hsotg->dev,
57495105a99SGregory Herrero 				   qh->desc_list_dma +
575ec703251SVahram Aharonyan 			(idx * sizeof(struct dwc2_dma_desc)),
576ec703251SVahram Aharonyan 			sizeof(struct dwc2_dma_desc),
57795105a99SGregory Herrero 			DMA_TO_DEVICE);
578197ba5f4SPaul Zimmerman }
579197ba5f4SPaul Zimmerman 
580197ba5f4SPaul Zimmerman static void dwc2_init_isoc_dma_desc(struct dwc2_hsotg *hsotg,
581197ba5f4SPaul Zimmerman 				    struct dwc2_qh *qh, u16 skip_frames)
582197ba5f4SPaul Zimmerman {
583197ba5f4SPaul Zimmerman 	struct dwc2_qtd *qtd;
584197ba5f4SPaul Zimmerman 	u32 max_xfer_size;
585c17b337cSGregory Herrero 	u16 idx, inc, n_desc = 0, ntd_max = 0;
586c17b337cSGregory Herrero 	u16 cur_idx;
587c17b337cSGregory Herrero 	u16 next_idx;
588197ba5f4SPaul Zimmerman 
589197ba5f4SPaul Zimmerman 	idx = qh->td_last;
590ced9eee1SDouglas Anderson 	inc = qh->host_interval;
591c17b337cSGregory Herrero 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
592c17b337cSGregory Herrero 	cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
593c17b337cSGregory Herrero 	next_idx = dwc2_desclist_idx_inc(qh->td_last, inc, qh->dev_speed);
594c17b337cSGregory Herrero 
595c17b337cSGregory Herrero 	/*
596c17b337cSGregory Herrero 	 * Ensure current frame number didn't overstep last scheduled
597c17b337cSGregory Herrero 	 * descriptor. If it happens, the only way to recover is to move
598c17b337cSGregory Herrero 	 * qh->td_last to current frame number + 1.
599c17b337cSGregory Herrero 	 * So that next isoc descriptor will be scheduled on frame number + 1
600c17b337cSGregory Herrero 	 * and not on a past frame.
601c17b337cSGregory Herrero 	 */
602c17b337cSGregory Herrero 	if (dwc2_frame_idx_num_gt(cur_idx, next_idx) || (cur_idx == next_idx)) {
603c17b337cSGregory Herrero 		if (inc < 32) {
604c17b337cSGregory Herrero 			dev_vdbg(hsotg->dev,
605c17b337cSGregory Herrero 				 "current frame number overstep last descriptor\n");
606c17b337cSGregory Herrero 			qh->td_last = dwc2_desclist_idx_inc(cur_idx, inc,
607c17b337cSGregory Herrero 							    qh->dev_speed);
608c17b337cSGregory Herrero 			idx = qh->td_last;
609c17b337cSGregory Herrero 		}
610c17b337cSGregory Herrero 	}
611197ba5f4SPaul Zimmerman 
612ced9eee1SDouglas Anderson 	if (qh->host_interval) {
613ced9eee1SDouglas Anderson 		ntd_max = (dwc2_max_desc_num(qh) + qh->host_interval - 1) /
614ced9eee1SDouglas Anderson 				qh->host_interval;
615197ba5f4SPaul Zimmerman 		if (skip_frames && !qh->channel)
616ced9eee1SDouglas Anderson 			ntd_max -= skip_frames / qh->host_interval;
617197ba5f4SPaul Zimmerman 	}
618197ba5f4SPaul Zimmerman 
619197ba5f4SPaul Zimmerman 	max_xfer_size = qh->dev_speed == USB_SPEED_HIGH ?
620197ba5f4SPaul Zimmerman 			MAX_ISOC_XFER_SIZE_HS : MAX_ISOC_XFER_SIZE_FS;
621197ba5f4SPaul Zimmerman 
622197ba5f4SPaul Zimmerman 	list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
623c17b337cSGregory Herrero 		if (qtd->in_process &&
624c17b337cSGregory Herrero 		    qtd->isoc_frame_index_last ==
625c17b337cSGregory Herrero 		    qtd->urb->packet_count)
626c17b337cSGregory Herrero 			continue;
627c17b337cSGregory Herrero 
628c17b337cSGregory Herrero 		qtd->isoc_td_first = idx;
629197ba5f4SPaul Zimmerman 		while (qh->ntd < ntd_max && qtd->isoc_frame_index_last <
630197ba5f4SPaul Zimmerman 						qtd->urb->packet_count) {
631197ba5f4SPaul Zimmerman 			dwc2_fill_host_isoc_dma_desc(hsotg, qtd, qh,
632197ba5f4SPaul Zimmerman 						     max_xfer_size, idx);
633197ba5f4SPaul Zimmerman 			idx = dwc2_desclist_idx_inc(idx, inc, qh->dev_speed);
634197ba5f4SPaul Zimmerman 			n_desc++;
635197ba5f4SPaul Zimmerman 		}
636c17b337cSGregory Herrero 		qtd->isoc_td_last = idx;
637197ba5f4SPaul Zimmerman 		qtd->in_process = 1;
638197ba5f4SPaul Zimmerman 	}
639197ba5f4SPaul Zimmerman 
640197ba5f4SPaul Zimmerman 	qh->td_last = idx;
641197ba5f4SPaul Zimmerman 
642197ba5f4SPaul Zimmerman #ifdef ISOC_URB_GIVEBACK_ASAP
643197ba5f4SPaul Zimmerman 	/* Set IOC for last descriptor if descriptor list is full */
644197ba5f4SPaul Zimmerman 	if (qh->ntd == ntd_max) {
645197ba5f4SPaul Zimmerman 		idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
646197ba5f4SPaul Zimmerman 		qh->desc_list[idx].status |= HOST_DMA_IOC;
64795105a99SGregory Herrero 		dma_sync_single_for_device(hsotg->dev,
64895105a99SGregory Herrero 					   qh->desc_list_dma + (idx *
649ec703251SVahram Aharonyan 					   sizeof(struct dwc2_dma_desc)),
650ec703251SVahram Aharonyan 					   sizeof(struct dwc2_dma_desc),
65195105a99SGregory Herrero 					   DMA_TO_DEVICE);
652197ba5f4SPaul Zimmerman 	}
653197ba5f4SPaul Zimmerman #else
654197ba5f4SPaul Zimmerman 	/*
655197ba5f4SPaul Zimmerman 	 * Set IOC bit only for one descriptor. Always try to be ahead of HW
656197ba5f4SPaul Zimmerman 	 * processing, i.e. on IOC generation driver activates next descriptor
657197ba5f4SPaul Zimmerman 	 * but core continues to process descriptors following the one with IOC
658197ba5f4SPaul Zimmerman 	 * set.
659197ba5f4SPaul Zimmerman 	 */
660197ba5f4SPaul Zimmerman 
661197ba5f4SPaul Zimmerman 	if (n_desc > DESCNUM_THRESHOLD)
662197ba5f4SPaul Zimmerman 		/*
663197ba5f4SPaul Zimmerman 		 * Move IOC "up". Required even if there is only one QTD
664197ba5f4SPaul Zimmerman 		 * in the list, because QTDs might continue to be queued,
665197ba5f4SPaul Zimmerman 		 * but during the activation it was only one queued.
666197ba5f4SPaul Zimmerman 		 * Actually more than one QTD might be in the list if this
667197ba5f4SPaul Zimmerman 		 * function called from XferCompletion - QTDs was queued during
668197ba5f4SPaul Zimmerman 		 * HW processing of the previous descriptor chunk.
669197ba5f4SPaul Zimmerman 		 */
670197ba5f4SPaul Zimmerman 		idx = dwc2_desclist_idx_dec(idx, inc * ((qh->ntd + 1) / 2),
671197ba5f4SPaul Zimmerman 					    qh->dev_speed);
672197ba5f4SPaul Zimmerman 	else
673197ba5f4SPaul Zimmerman 		/*
674197ba5f4SPaul Zimmerman 		 * Set the IOC for the latest descriptor if either number of
675197ba5f4SPaul Zimmerman 		 * descriptors is not greater than threshold or no more new
676197ba5f4SPaul Zimmerman 		 * descriptors activated
677197ba5f4SPaul Zimmerman 		 */
678197ba5f4SPaul Zimmerman 		idx = dwc2_desclist_idx_dec(qh->td_last, inc, qh->dev_speed);
679197ba5f4SPaul Zimmerman 
680197ba5f4SPaul Zimmerman 	qh->desc_list[idx].status |= HOST_DMA_IOC;
68195105a99SGregory Herrero 	dma_sync_single_for_device(hsotg->dev,
68295105a99SGregory Herrero 				   qh->desc_list_dma +
683ec703251SVahram Aharonyan 				   (idx * sizeof(struct dwc2_dma_desc)),
684ec703251SVahram Aharonyan 				   sizeof(struct dwc2_dma_desc),
68595105a99SGregory Herrero 				   DMA_TO_DEVICE);
686197ba5f4SPaul Zimmerman #endif
687197ba5f4SPaul Zimmerman }
688197ba5f4SPaul Zimmerman 
689197ba5f4SPaul Zimmerman static void dwc2_fill_host_dma_desc(struct dwc2_hsotg *hsotg,
690197ba5f4SPaul Zimmerman 				    struct dwc2_host_chan *chan,
691197ba5f4SPaul Zimmerman 				    struct dwc2_qtd *qtd, struct dwc2_qh *qh,
692197ba5f4SPaul Zimmerman 				    int n_desc)
693197ba5f4SPaul Zimmerman {
694ec703251SVahram Aharonyan 	struct dwc2_dma_desc *dma_desc = &qh->desc_list[n_desc];
695197ba5f4SPaul Zimmerman 	int len = chan->xfer_len;
696197ba5f4SPaul Zimmerman 
6973a1ec351SVahram Aharonyan 	if (len > HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1))
6983a1ec351SVahram Aharonyan 		len = HOST_DMA_NBYTES_LIMIT - (chan->max_packet - 1);
699197ba5f4SPaul Zimmerman 
700197ba5f4SPaul Zimmerman 	if (chan->ep_is_in) {
701197ba5f4SPaul Zimmerman 		int num_packets;
702197ba5f4SPaul Zimmerman 
703197ba5f4SPaul Zimmerman 		if (len > 0 && chan->max_packet)
704197ba5f4SPaul Zimmerman 			num_packets = (len + chan->max_packet - 1)
705197ba5f4SPaul Zimmerman 					/ chan->max_packet;
706197ba5f4SPaul Zimmerman 		else
707197ba5f4SPaul Zimmerman 			/* Need 1 packet for transfer length of 0 */
708197ba5f4SPaul Zimmerman 			num_packets = 1;
709197ba5f4SPaul Zimmerman 
710197ba5f4SPaul Zimmerman 		/* Always program an integral # of packets for IN transfers */
711197ba5f4SPaul Zimmerman 		len = num_packets * chan->max_packet;
712197ba5f4SPaul Zimmerman 	}
713197ba5f4SPaul Zimmerman 
714197ba5f4SPaul Zimmerman 	dma_desc->status = len << HOST_DMA_NBYTES_SHIFT & HOST_DMA_NBYTES_MASK;
715197ba5f4SPaul Zimmerman 	qh->n_bytes[n_desc] = len;
716197ba5f4SPaul Zimmerman 
717197ba5f4SPaul Zimmerman 	if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL &&
718197ba5f4SPaul Zimmerman 	    qtd->control_phase == DWC2_CONTROL_SETUP)
719197ba5f4SPaul Zimmerman 		dma_desc->status |= HOST_DMA_SUP;
720197ba5f4SPaul Zimmerman 
721197ba5f4SPaul Zimmerman 	dma_desc->buf = (u32)chan->xfer_dma;
722197ba5f4SPaul Zimmerman 
72395105a99SGregory Herrero 	dma_sync_single_for_device(hsotg->dev,
72495105a99SGregory Herrero 				   qh->desc_list_dma +
725ec703251SVahram Aharonyan 				   (n_desc * sizeof(struct dwc2_dma_desc)),
726ec703251SVahram Aharonyan 				   sizeof(struct dwc2_dma_desc),
72795105a99SGregory Herrero 				   DMA_TO_DEVICE);
72895105a99SGregory Herrero 
729197ba5f4SPaul Zimmerman 	/*
730197ba5f4SPaul Zimmerman 	 * Last (or only) descriptor of IN transfer with actual size less
731197ba5f4SPaul Zimmerman 	 * than MaxPacket
732197ba5f4SPaul Zimmerman 	 */
733197ba5f4SPaul Zimmerman 	if (len > chan->xfer_len) {
734197ba5f4SPaul Zimmerman 		chan->xfer_len = 0;
735197ba5f4SPaul Zimmerman 	} else {
736197ba5f4SPaul Zimmerman 		chan->xfer_dma += len;
737197ba5f4SPaul Zimmerman 		chan->xfer_len -= len;
738197ba5f4SPaul Zimmerman 	}
739197ba5f4SPaul Zimmerman }
740197ba5f4SPaul Zimmerman 
741197ba5f4SPaul Zimmerman static void dwc2_init_non_isoc_dma_desc(struct dwc2_hsotg *hsotg,
742197ba5f4SPaul Zimmerman 					struct dwc2_qh *qh)
743197ba5f4SPaul Zimmerman {
744197ba5f4SPaul Zimmerman 	struct dwc2_qtd *qtd;
745197ba5f4SPaul Zimmerman 	struct dwc2_host_chan *chan = qh->channel;
746197ba5f4SPaul Zimmerman 	int n_desc = 0;
747197ba5f4SPaul Zimmerman 
748197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "%s(): qh=%p dma=%08lx len=%d\n", __func__, qh,
749197ba5f4SPaul Zimmerman 		 (unsigned long)chan->xfer_dma, chan->xfer_len);
750197ba5f4SPaul Zimmerman 
751197ba5f4SPaul Zimmerman 	/*
752197ba5f4SPaul Zimmerman 	 * Start with chan->xfer_dma initialized in assign_and_init_hc(), then
753197ba5f4SPaul Zimmerman 	 * if SG transfer consists of multiple URBs, this pointer is re-assigned
754197ba5f4SPaul Zimmerman 	 * to the buffer of the currently processed QTD. For non-SG request
755197ba5f4SPaul Zimmerman 	 * there is always one QTD active.
756197ba5f4SPaul Zimmerman 	 */
757197ba5f4SPaul Zimmerman 
758197ba5f4SPaul Zimmerman 	list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry) {
759197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "qtd=%p\n", qtd);
760197ba5f4SPaul Zimmerman 
761197ba5f4SPaul Zimmerman 		if (n_desc) {
762197ba5f4SPaul Zimmerman 			/* SG request - more than 1 QTD */
763197ba5f4SPaul Zimmerman 			chan->xfer_dma = qtd->urb->dma +
764197ba5f4SPaul Zimmerman 					qtd->urb->actual_length;
765197ba5f4SPaul Zimmerman 			chan->xfer_len = qtd->urb->length -
766197ba5f4SPaul Zimmerman 					qtd->urb->actual_length;
767197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "buf=%08lx len=%d\n",
768197ba5f4SPaul Zimmerman 				 (unsigned long)chan->xfer_dma, chan->xfer_len);
769197ba5f4SPaul Zimmerman 		}
770197ba5f4SPaul Zimmerman 
771197ba5f4SPaul Zimmerman 		qtd->n_desc = 0;
772197ba5f4SPaul Zimmerman 		do {
773197ba5f4SPaul Zimmerman 			if (n_desc > 1) {
774197ba5f4SPaul Zimmerman 				qh->desc_list[n_desc - 1].status |= HOST_DMA_A;
775197ba5f4SPaul Zimmerman 				dev_vdbg(hsotg->dev,
776197ba5f4SPaul Zimmerman 					 "set A bit in desc %d (%p)\n",
777197ba5f4SPaul Zimmerman 					 n_desc - 1,
778197ba5f4SPaul Zimmerman 					 &qh->desc_list[n_desc - 1]);
77995105a99SGregory Herrero 				dma_sync_single_for_device(hsotg->dev,
78095105a99SGregory Herrero 							   qh->desc_list_dma +
78195105a99SGregory Herrero 					((n_desc - 1) *
782ec703251SVahram Aharonyan 					sizeof(struct dwc2_dma_desc)),
783ec703251SVahram Aharonyan 					sizeof(struct dwc2_dma_desc),
78495105a99SGregory Herrero 					DMA_TO_DEVICE);
785197ba5f4SPaul Zimmerman 			}
786197ba5f4SPaul Zimmerman 			dwc2_fill_host_dma_desc(hsotg, chan, qtd, qh, n_desc);
787197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
788197ba5f4SPaul Zimmerman 				 "desc %d (%p) buf=%08x status=%08x\n",
789197ba5f4SPaul Zimmerman 				 n_desc, &qh->desc_list[n_desc],
790197ba5f4SPaul Zimmerman 				 qh->desc_list[n_desc].buf,
791197ba5f4SPaul Zimmerman 				 qh->desc_list[n_desc].status);
792197ba5f4SPaul Zimmerman 			qtd->n_desc++;
793197ba5f4SPaul Zimmerman 			n_desc++;
794197ba5f4SPaul Zimmerman 		} while (chan->xfer_len > 0 &&
795197ba5f4SPaul Zimmerman 			 n_desc != MAX_DMA_DESC_NUM_GENERIC);
796197ba5f4SPaul Zimmerman 
797197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "n_desc=%d\n", n_desc);
798197ba5f4SPaul Zimmerman 		qtd->in_process = 1;
799197ba5f4SPaul Zimmerman 		if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL)
800197ba5f4SPaul Zimmerman 			break;
801197ba5f4SPaul Zimmerman 		if (n_desc == MAX_DMA_DESC_NUM_GENERIC)
802197ba5f4SPaul Zimmerman 			break;
803197ba5f4SPaul Zimmerman 	}
804197ba5f4SPaul Zimmerman 
805197ba5f4SPaul Zimmerman 	if (n_desc) {
806197ba5f4SPaul Zimmerman 		qh->desc_list[n_desc - 1].status |=
807197ba5f4SPaul Zimmerman 				HOST_DMA_IOC | HOST_DMA_EOL | HOST_DMA_A;
808197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "set IOC/EOL/A bits in desc %d (%p)\n",
809197ba5f4SPaul Zimmerman 			 n_desc - 1, &qh->desc_list[n_desc - 1]);
81095105a99SGregory Herrero 		dma_sync_single_for_device(hsotg->dev,
81195105a99SGregory Herrero 					   qh->desc_list_dma + (n_desc - 1) *
812ec703251SVahram Aharonyan 					   sizeof(struct dwc2_dma_desc),
813ec703251SVahram Aharonyan 					   sizeof(struct dwc2_dma_desc),
81495105a99SGregory Herrero 					   DMA_TO_DEVICE);
815197ba5f4SPaul Zimmerman 		if (n_desc > 1) {
816197ba5f4SPaul Zimmerman 			qh->desc_list[0].status |= HOST_DMA_A;
817197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "set A bit in desc 0 (%p)\n",
818197ba5f4SPaul Zimmerman 				 &qh->desc_list[0]);
81995105a99SGregory Herrero 			dma_sync_single_for_device(hsotg->dev,
82095105a99SGregory Herrero 						   qh->desc_list_dma,
821ec703251SVahram Aharonyan 					sizeof(struct dwc2_dma_desc),
82295105a99SGregory Herrero 					DMA_TO_DEVICE);
823197ba5f4SPaul Zimmerman 		}
824197ba5f4SPaul Zimmerman 		chan->ntd = n_desc;
825197ba5f4SPaul Zimmerman 	}
826197ba5f4SPaul Zimmerman }
827197ba5f4SPaul Zimmerman 
828197ba5f4SPaul Zimmerman /**
829197ba5f4SPaul Zimmerman  * dwc2_hcd_start_xfer_ddma() - Starts a transfer in Descriptor DMA mode
830197ba5f4SPaul Zimmerman  *
831197ba5f4SPaul Zimmerman  * @hsotg: The HCD state structure for the DWC OTG controller
832197ba5f4SPaul Zimmerman  * @qh:    The QH to init
833197ba5f4SPaul Zimmerman  *
834197ba5f4SPaul Zimmerman  * Return: 0 if successful, negative error code otherwise
835197ba5f4SPaul Zimmerman  *
836197ba5f4SPaul Zimmerman  * For Control and Bulk endpoints, initializes descriptor list and starts the
837197ba5f4SPaul Zimmerman  * transfer. For Interrupt and Isochronous endpoints, initializes descriptor
838197ba5f4SPaul Zimmerman  * list then updates FrameList, marking appropriate entries as active.
839197ba5f4SPaul Zimmerman  *
840197ba5f4SPaul Zimmerman  * For Isochronous endpoints the starting descriptor index is calculated based
841197ba5f4SPaul Zimmerman  * on the scheduled frame, but only on the first transfer descriptor within a
842197ba5f4SPaul Zimmerman  * session. Then the transfer is started via enabling the channel.
843197ba5f4SPaul Zimmerman  *
844197ba5f4SPaul Zimmerman  * For Isochronous endpoints the channel is not halted on XferComplete
845197ba5f4SPaul Zimmerman  * interrupt so remains assigned to the endpoint(QH) until session is done.
846197ba5f4SPaul Zimmerman  */
847197ba5f4SPaul Zimmerman void dwc2_hcd_start_xfer_ddma(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
848197ba5f4SPaul Zimmerman {
849197ba5f4SPaul Zimmerman 	/* Channel is already assigned */
850197ba5f4SPaul Zimmerman 	struct dwc2_host_chan *chan = qh->channel;
851197ba5f4SPaul Zimmerman 	u16 skip_frames = 0;
852197ba5f4SPaul Zimmerman 
853197ba5f4SPaul Zimmerman 	switch (chan->ep_type) {
854197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_CONTROL:
855197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_BULK:
856197ba5f4SPaul Zimmerman 		dwc2_init_non_isoc_dma_desc(hsotg, qh);
857197ba5f4SPaul Zimmerman 		dwc2_hc_start_transfer_ddma(hsotg, chan);
858197ba5f4SPaul Zimmerman 		break;
859197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_INT:
860197ba5f4SPaul Zimmerman 		dwc2_init_non_isoc_dma_desc(hsotg, qh);
861197ba5f4SPaul Zimmerman 		dwc2_update_frame_list(hsotg, qh, 1);
862197ba5f4SPaul Zimmerman 		dwc2_hc_start_transfer_ddma(hsotg, chan);
863197ba5f4SPaul Zimmerman 		break;
864197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_ISOC:
865197ba5f4SPaul Zimmerman 		if (!qh->ntd)
866197ba5f4SPaul Zimmerman 			skip_frames = dwc2_recalc_initial_desc_idx(hsotg, qh);
867197ba5f4SPaul Zimmerman 		dwc2_init_isoc_dma_desc(hsotg, qh, skip_frames);
868197ba5f4SPaul Zimmerman 
869197ba5f4SPaul Zimmerman 		if (!chan->xfer_started) {
870197ba5f4SPaul Zimmerman 			dwc2_update_frame_list(hsotg, qh, 1);
871197ba5f4SPaul Zimmerman 
872197ba5f4SPaul Zimmerman 			/*
873197ba5f4SPaul Zimmerman 			 * Always set to max, instead of actual size. Otherwise
874197ba5f4SPaul Zimmerman 			 * ntd will be changed with channel being enabled. Not
875197ba5f4SPaul Zimmerman 			 * recommended.
876197ba5f4SPaul Zimmerman 			 */
877197ba5f4SPaul Zimmerman 			chan->ntd = dwc2_max_desc_num(qh);
878197ba5f4SPaul Zimmerman 
879197ba5f4SPaul Zimmerman 			/* Enable channel only once for ISOC */
880197ba5f4SPaul Zimmerman 			dwc2_hc_start_transfer_ddma(hsotg, chan);
881197ba5f4SPaul Zimmerman 		}
882197ba5f4SPaul Zimmerman 
883197ba5f4SPaul Zimmerman 		break;
884197ba5f4SPaul Zimmerman 	default:
885197ba5f4SPaul Zimmerman 		break;
886197ba5f4SPaul Zimmerman 	}
887197ba5f4SPaul Zimmerman }
888197ba5f4SPaul Zimmerman 
889197ba5f4SPaul Zimmerman #define DWC2_CMPL_DONE		1
890197ba5f4SPaul Zimmerman #define DWC2_CMPL_STOP		2
891197ba5f4SPaul Zimmerman 
892197ba5f4SPaul Zimmerman static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
893197ba5f4SPaul Zimmerman 					struct dwc2_host_chan *chan,
894197ba5f4SPaul Zimmerman 					struct dwc2_qtd *qtd,
895197ba5f4SPaul Zimmerman 					struct dwc2_qh *qh, u16 idx)
896197ba5f4SPaul Zimmerman {
897ec703251SVahram Aharonyan 	struct dwc2_dma_desc *dma_desc;
898197ba5f4SPaul Zimmerman 	struct dwc2_hcd_iso_packet_desc *frame_desc;
899197ba5f4SPaul Zimmerman 	u16 remain = 0;
900197ba5f4SPaul Zimmerman 	int rc = 0;
901197ba5f4SPaul Zimmerman 
902197ba5f4SPaul Zimmerman 	if (!qtd->urb)
903197ba5f4SPaul Zimmerman 		return -EINVAL;
904197ba5f4SPaul Zimmerman 
90595105a99SGregory Herrero 	dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
906ec703251SVahram Aharonyan 				sizeof(struct dwc2_dma_desc)),
907ec703251SVahram Aharonyan 				sizeof(struct dwc2_dma_desc),
90895105a99SGregory Herrero 				DMA_FROM_DEVICE);
90995105a99SGregory Herrero 
91095105a99SGregory Herrero 	dma_desc = &qh->desc_list[idx];
91195105a99SGregory Herrero 
912197ba5f4SPaul Zimmerman 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index_last];
913197ba5f4SPaul Zimmerman 	dma_desc->buf = (u32)(qtd->urb->dma + frame_desc->offset);
914197ba5f4SPaul Zimmerman 	if (chan->ep_is_in)
915197ba5f4SPaul Zimmerman 		remain = (dma_desc->status & HOST_DMA_ISOC_NBYTES_MASK) >>
916197ba5f4SPaul Zimmerman 			 HOST_DMA_ISOC_NBYTES_SHIFT;
917197ba5f4SPaul Zimmerman 
918197ba5f4SPaul Zimmerman 	if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
919197ba5f4SPaul Zimmerman 		/*
920197ba5f4SPaul Zimmerman 		 * XactError, or unable to complete all the transactions
921197ba5f4SPaul Zimmerman 		 * in the scheduled micro-frame/frame, both indicated by
922197ba5f4SPaul Zimmerman 		 * HOST_DMA_STS_PKTERR
923197ba5f4SPaul Zimmerman 		 */
924197ba5f4SPaul Zimmerman 		qtd->urb->error_count++;
925197ba5f4SPaul Zimmerman 		frame_desc->actual_length = qh->n_bytes[idx] - remain;
926197ba5f4SPaul Zimmerman 		frame_desc->status = -EPROTO;
927197ba5f4SPaul Zimmerman 	} else {
928197ba5f4SPaul Zimmerman 		/* Success */
929197ba5f4SPaul Zimmerman 		frame_desc->actual_length = qh->n_bytes[idx] - remain;
930197ba5f4SPaul Zimmerman 		frame_desc->status = 0;
931197ba5f4SPaul Zimmerman 	}
932197ba5f4SPaul Zimmerman 
933197ba5f4SPaul Zimmerman 	if (++qtd->isoc_frame_index == qtd->urb->packet_count) {
934197ba5f4SPaul Zimmerman 		/*
935197ba5f4SPaul Zimmerman 		 * urb->status is not used for isoc transfers here. The
936197ba5f4SPaul Zimmerman 		 * individual frame_desc status are used instead.
937197ba5f4SPaul Zimmerman 		 */
938197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, 0);
939197ba5f4SPaul Zimmerman 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
940197ba5f4SPaul Zimmerman 
941197ba5f4SPaul Zimmerman 		/*
942197ba5f4SPaul Zimmerman 		 * This check is necessary because urb_dequeue can be called
943197ba5f4SPaul Zimmerman 		 * from urb complete callback (sound driver for example). All
944197ba5f4SPaul Zimmerman 		 * pending URBs are dequeued there, so no need for further
945197ba5f4SPaul Zimmerman 		 * processing.
946197ba5f4SPaul Zimmerman 		 */
947197ba5f4SPaul Zimmerman 		if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE)
948197ba5f4SPaul Zimmerman 			return -1;
949197ba5f4SPaul Zimmerman 		rc = DWC2_CMPL_DONE;
950197ba5f4SPaul Zimmerman 	}
951197ba5f4SPaul Zimmerman 
952197ba5f4SPaul Zimmerman 	qh->ntd--;
953197ba5f4SPaul Zimmerman 
954197ba5f4SPaul Zimmerman 	/* Stop if IOC requested descriptor reached */
955197ba5f4SPaul Zimmerman 	if (dma_desc->status & HOST_DMA_IOC)
956197ba5f4SPaul Zimmerman 		rc = DWC2_CMPL_STOP;
957197ba5f4SPaul Zimmerman 
958197ba5f4SPaul Zimmerman 	return rc;
959197ba5f4SPaul Zimmerman }
960197ba5f4SPaul Zimmerman 
961197ba5f4SPaul Zimmerman static void dwc2_complete_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
962197ba5f4SPaul Zimmerman 					 struct dwc2_host_chan *chan,
963197ba5f4SPaul Zimmerman 					 enum dwc2_halt_status halt_status)
964197ba5f4SPaul Zimmerman {
965197ba5f4SPaul Zimmerman 	struct dwc2_hcd_iso_packet_desc *frame_desc;
966197ba5f4SPaul Zimmerman 	struct dwc2_qtd *qtd, *qtd_tmp;
967197ba5f4SPaul Zimmerman 	struct dwc2_qh *qh;
968197ba5f4SPaul Zimmerman 	u16 idx;
969197ba5f4SPaul Zimmerman 	int rc;
970197ba5f4SPaul Zimmerman 
971197ba5f4SPaul Zimmerman 	qh = chan->qh;
972197ba5f4SPaul Zimmerman 	idx = qh->td_first;
973197ba5f4SPaul Zimmerman 
974197ba5f4SPaul Zimmerman 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
975197ba5f4SPaul Zimmerman 		list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
976197ba5f4SPaul Zimmerman 			qtd->in_process = 0;
977197ba5f4SPaul Zimmerman 		return;
978197ba5f4SPaul Zimmerman 	}
979197ba5f4SPaul Zimmerman 
980197ba5f4SPaul Zimmerman 	if (halt_status == DWC2_HC_XFER_AHB_ERR ||
981197ba5f4SPaul Zimmerman 	    halt_status == DWC2_HC_XFER_BABBLE_ERR) {
982197ba5f4SPaul Zimmerman 		/*
983197ba5f4SPaul Zimmerman 		 * Channel is halted in these error cases, considered as serious
984197ba5f4SPaul Zimmerman 		 * issues.
985197ba5f4SPaul Zimmerman 		 * Complete all URBs marking all frames as failed, irrespective
986197ba5f4SPaul Zimmerman 		 * whether some of the descriptors (frames) succeeded or not.
987197ba5f4SPaul Zimmerman 		 * Pass error code to completion routine as well, to update
988197ba5f4SPaul Zimmerman 		 * urb->status, some of class drivers might use it to stop
989197ba5f4SPaul Zimmerman 		 * queing transfer requests.
990197ba5f4SPaul Zimmerman 		 */
991197ba5f4SPaul Zimmerman 		int err = halt_status == DWC2_HC_XFER_AHB_ERR ?
992197ba5f4SPaul Zimmerman 			  -EIO : -EOVERFLOW;
993197ba5f4SPaul Zimmerman 
994197ba5f4SPaul Zimmerman 		list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list,
995197ba5f4SPaul Zimmerman 					 qtd_list_entry) {
996197ba5f4SPaul Zimmerman 			if (qtd->urb) {
997197ba5f4SPaul Zimmerman 				for (idx = 0; idx < qtd->urb->packet_count;
998197ba5f4SPaul Zimmerman 				     idx++) {
999197ba5f4SPaul Zimmerman 					frame_desc = &qtd->urb->iso_descs[idx];
1000197ba5f4SPaul Zimmerman 					frame_desc->status = err;
1001197ba5f4SPaul Zimmerman 				}
1002197ba5f4SPaul Zimmerman 
1003197ba5f4SPaul Zimmerman 				dwc2_host_complete(hsotg, qtd, err);
1004197ba5f4SPaul Zimmerman 			}
1005197ba5f4SPaul Zimmerman 
1006197ba5f4SPaul Zimmerman 			dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
1007197ba5f4SPaul Zimmerman 		}
1008197ba5f4SPaul Zimmerman 
1009197ba5f4SPaul Zimmerman 		return;
1010197ba5f4SPaul Zimmerman 	}
1011197ba5f4SPaul Zimmerman 
1012197ba5f4SPaul Zimmerman 	list_for_each_entry_safe(qtd, qtd_tmp, &qh->qtd_list, qtd_list_entry) {
1013197ba5f4SPaul Zimmerman 		if (!qtd->in_process)
1014197ba5f4SPaul Zimmerman 			break;
1015762d3a1aSGregory Herrero 
1016762d3a1aSGregory Herrero 		/*
1017762d3a1aSGregory Herrero 		 * Ensure idx corresponds to descriptor where first urb of this
1018762d3a1aSGregory Herrero 		 * qtd was added. In fact, during isoc desc init, dwc2 may skip
1019762d3a1aSGregory Herrero 		 * an index if current frame number is already over this index.
1020762d3a1aSGregory Herrero 		 */
1021762d3a1aSGregory Herrero 		if (idx != qtd->isoc_td_first) {
1022762d3a1aSGregory Herrero 			dev_vdbg(hsotg->dev,
1023762d3a1aSGregory Herrero 				 "try to complete %d instead of %d\n",
1024762d3a1aSGregory Herrero 				 idx, qtd->isoc_td_first);
1025762d3a1aSGregory Herrero 			idx = qtd->isoc_td_first;
1026762d3a1aSGregory Herrero 		}
1027762d3a1aSGregory Herrero 
1028197ba5f4SPaul Zimmerman 		do {
1029762d3a1aSGregory Herrero 			struct dwc2_qtd *qtd_next;
1030762d3a1aSGregory Herrero 			u16 cur_idx;
1031762d3a1aSGregory Herrero 
1032197ba5f4SPaul Zimmerman 			rc = dwc2_cmpl_host_isoc_dma_desc(hsotg, chan, qtd, qh,
1033197ba5f4SPaul Zimmerman 							  idx);
1034197ba5f4SPaul Zimmerman 			if (rc < 0)
1035197ba5f4SPaul Zimmerman 				return;
1036ced9eee1SDouglas Anderson 			idx = dwc2_desclist_idx_inc(idx, qh->host_interval,
1037197ba5f4SPaul Zimmerman 						    chan->speed);
1038762d3a1aSGregory Herrero 			if (!rc)
1039762d3a1aSGregory Herrero 				continue;
1040762d3a1aSGregory Herrero 
1041197ba5f4SPaul Zimmerman 			if (rc == DWC2_CMPL_DONE)
1042197ba5f4SPaul Zimmerman 				break;
1043762d3a1aSGregory Herrero 
1044762d3a1aSGregory Herrero 			/* rc == DWC2_CMPL_STOP */
1045762d3a1aSGregory Herrero 
1046ced9eee1SDouglas Anderson 			if (qh->host_interval >= 32)
1047762d3a1aSGregory Herrero 				goto stop_scan;
1048762d3a1aSGregory Herrero 
1049762d3a1aSGregory Herrero 			qh->td_first = idx;
1050762d3a1aSGregory Herrero 			cur_idx = dwc2_frame_list_idx(hsotg->frame_number);
1051762d3a1aSGregory Herrero 			qtd_next = list_first_entry(&qh->qtd_list,
1052762d3a1aSGregory Herrero 						    struct dwc2_qtd,
1053762d3a1aSGregory Herrero 						    qtd_list_entry);
1054762d3a1aSGregory Herrero 			if (dwc2_frame_idx_num_gt(cur_idx,
1055762d3a1aSGregory Herrero 						  qtd_next->isoc_td_last))
1056762d3a1aSGregory Herrero 				break;
1057762d3a1aSGregory Herrero 
1058762d3a1aSGregory Herrero 			goto stop_scan;
1059762d3a1aSGregory Herrero 
1060197ba5f4SPaul Zimmerman 		} while (idx != qh->td_first);
1061197ba5f4SPaul Zimmerman 	}
1062197ba5f4SPaul Zimmerman 
1063197ba5f4SPaul Zimmerman stop_scan:
1064197ba5f4SPaul Zimmerman 	qh->td_first = idx;
1065197ba5f4SPaul Zimmerman }
1066197ba5f4SPaul Zimmerman 
1067197ba5f4SPaul Zimmerman static int dwc2_update_non_isoc_urb_state_ddma(struct dwc2_hsotg *hsotg,
1068197ba5f4SPaul Zimmerman 					       struct dwc2_host_chan *chan,
1069197ba5f4SPaul Zimmerman 					struct dwc2_qtd *qtd,
1070ec703251SVahram Aharonyan 					struct dwc2_dma_desc *dma_desc,
1071197ba5f4SPaul Zimmerman 					enum dwc2_halt_status halt_status,
1072197ba5f4SPaul Zimmerman 					u32 n_bytes, int *xfer_done)
1073197ba5f4SPaul Zimmerman {
1074197ba5f4SPaul Zimmerman 	struct dwc2_hcd_urb *urb = qtd->urb;
1075197ba5f4SPaul Zimmerman 	u16 remain = 0;
1076197ba5f4SPaul Zimmerman 
1077197ba5f4SPaul Zimmerman 	if (chan->ep_is_in)
1078197ba5f4SPaul Zimmerman 		remain = (dma_desc->status & HOST_DMA_NBYTES_MASK) >>
1079197ba5f4SPaul Zimmerman 			 HOST_DMA_NBYTES_SHIFT;
1080197ba5f4SPaul Zimmerman 
1081197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "remain=%d dwc2_urb=%p\n", remain, urb);
1082197ba5f4SPaul Zimmerman 
1083197ba5f4SPaul Zimmerman 	if (halt_status == DWC2_HC_XFER_AHB_ERR) {
1084197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev, "EIO\n");
1085197ba5f4SPaul Zimmerman 		urb->status = -EIO;
1086197ba5f4SPaul Zimmerman 		return 1;
1087197ba5f4SPaul Zimmerman 	}
1088197ba5f4SPaul Zimmerman 
1089197ba5f4SPaul Zimmerman 	if ((dma_desc->status & HOST_DMA_STS_MASK) == HOST_DMA_STS_PKTERR) {
1090197ba5f4SPaul Zimmerman 		switch (halt_status) {
1091197ba5f4SPaul Zimmerman 		case DWC2_HC_XFER_STALL:
1092197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "Stall\n");
1093197ba5f4SPaul Zimmerman 			urb->status = -EPIPE;
1094197ba5f4SPaul Zimmerman 			break;
1095197ba5f4SPaul Zimmerman 		case DWC2_HC_XFER_BABBLE_ERR:
1096197ba5f4SPaul Zimmerman 			dev_err(hsotg->dev, "Babble\n");
1097197ba5f4SPaul Zimmerman 			urb->status = -EOVERFLOW;
1098197ba5f4SPaul Zimmerman 			break;
1099197ba5f4SPaul Zimmerman 		case DWC2_HC_XFER_XACT_ERR:
1100197ba5f4SPaul Zimmerman 			dev_err(hsotg->dev, "XactErr\n");
1101197ba5f4SPaul Zimmerman 			urb->status = -EPROTO;
1102197ba5f4SPaul Zimmerman 			break;
1103197ba5f4SPaul Zimmerman 		default:
1104197ba5f4SPaul Zimmerman 			dev_err(hsotg->dev,
1105197ba5f4SPaul Zimmerman 				"%s: Unhandled descriptor error status (%d)\n",
1106197ba5f4SPaul Zimmerman 				__func__, halt_status);
1107197ba5f4SPaul Zimmerman 			break;
1108197ba5f4SPaul Zimmerman 		}
1109197ba5f4SPaul Zimmerman 		return 1;
1110197ba5f4SPaul Zimmerman 	}
1111197ba5f4SPaul Zimmerman 
1112197ba5f4SPaul Zimmerman 	if (dma_desc->status & HOST_DMA_A) {
1113197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
1114197ba5f4SPaul Zimmerman 			 "Active descriptor encountered on channel %d\n",
1115197ba5f4SPaul Zimmerman 			 chan->hc_num);
1116197ba5f4SPaul Zimmerman 		return 0;
1117197ba5f4SPaul Zimmerman 	}
1118197ba5f4SPaul Zimmerman 
1119197ba5f4SPaul Zimmerman 	if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1120197ba5f4SPaul Zimmerman 		if (qtd->control_phase == DWC2_CONTROL_DATA) {
1121197ba5f4SPaul Zimmerman 			urb->actual_length += n_bytes - remain;
1122197ba5f4SPaul Zimmerman 			if (remain || urb->actual_length >= urb->length) {
1123197ba5f4SPaul Zimmerman 				/*
1124197ba5f4SPaul Zimmerman 				 * For Control Data stage do not set urb->status
1125197ba5f4SPaul Zimmerman 				 * to 0, to prevent URB callback. Set it when
1126197ba5f4SPaul Zimmerman 				 * Status phase is done. See below.
1127197ba5f4SPaul Zimmerman 				 */
1128197ba5f4SPaul Zimmerman 				*xfer_done = 1;
1129197ba5f4SPaul Zimmerman 			}
1130197ba5f4SPaul Zimmerman 		} else if (qtd->control_phase == DWC2_CONTROL_STATUS) {
1131197ba5f4SPaul Zimmerman 			urb->status = 0;
1132197ba5f4SPaul Zimmerman 			*xfer_done = 1;
1133197ba5f4SPaul Zimmerman 		}
1134197ba5f4SPaul Zimmerman 		/* No handling for SETUP stage */
1135197ba5f4SPaul Zimmerman 	} else {
1136197ba5f4SPaul Zimmerman 		/* BULK and INTR */
1137197ba5f4SPaul Zimmerman 		urb->actual_length += n_bytes - remain;
1138197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "length=%d actual=%d\n", urb->length,
1139197ba5f4SPaul Zimmerman 			 urb->actual_length);
1140197ba5f4SPaul Zimmerman 		if (remain || urb->actual_length >= urb->length) {
1141197ba5f4SPaul Zimmerman 			urb->status = 0;
1142197ba5f4SPaul Zimmerman 			*xfer_done = 1;
1143197ba5f4SPaul Zimmerman 		}
1144197ba5f4SPaul Zimmerman 	}
1145197ba5f4SPaul Zimmerman 
1146197ba5f4SPaul Zimmerman 	return 0;
1147197ba5f4SPaul Zimmerman }
1148197ba5f4SPaul Zimmerman 
1149197ba5f4SPaul Zimmerman static int dwc2_process_non_isoc_desc(struct dwc2_hsotg *hsotg,
1150197ba5f4SPaul Zimmerman 				      struct dwc2_host_chan *chan,
1151197ba5f4SPaul Zimmerman 				      int chnum, struct dwc2_qtd *qtd,
1152197ba5f4SPaul Zimmerman 				      int desc_num,
1153197ba5f4SPaul Zimmerman 				      enum dwc2_halt_status halt_status,
1154197ba5f4SPaul Zimmerman 				      int *xfer_done)
1155197ba5f4SPaul Zimmerman {
1156197ba5f4SPaul Zimmerman 	struct dwc2_qh *qh = chan->qh;
1157197ba5f4SPaul Zimmerman 	struct dwc2_hcd_urb *urb = qtd->urb;
1158ec703251SVahram Aharonyan 	struct dwc2_dma_desc *dma_desc;
1159197ba5f4SPaul Zimmerman 	u32 n_bytes;
1160197ba5f4SPaul Zimmerman 	int failed;
1161197ba5f4SPaul Zimmerman 
1162197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
1163197ba5f4SPaul Zimmerman 
1164197ba5f4SPaul Zimmerman 	if (!urb)
1165197ba5f4SPaul Zimmerman 		return -EINVAL;
1166197ba5f4SPaul Zimmerman 
116795105a99SGregory Herrero 	dma_sync_single_for_cpu(hsotg->dev,
116895105a99SGregory Herrero 				qh->desc_list_dma + (desc_num *
1169ec703251SVahram Aharonyan 				sizeof(struct dwc2_dma_desc)),
1170ec703251SVahram Aharonyan 				sizeof(struct dwc2_dma_desc),
117195105a99SGregory Herrero 				DMA_FROM_DEVICE);
117295105a99SGregory Herrero 
1173197ba5f4SPaul Zimmerman 	dma_desc = &qh->desc_list[desc_num];
1174197ba5f4SPaul Zimmerman 	n_bytes = qh->n_bytes[desc_num];
1175197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev,
1176197ba5f4SPaul Zimmerman 		 "qtd=%p dwc2_urb=%p desc_num=%d desc=%p n_bytes=%d\n",
1177197ba5f4SPaul Zimmerman 		 qtd, urb, desc_num, dma_desc, n_bytes);
1178197ba5f4SPaul Zimmerman 	failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
1179197ba5f4SPaul Zimmerman 						     halt_status, n_bytes,
1180197ba5f4SPaul Zimmerman 						     xfer_done);
11813142a16bSVardan Mikayelyan 	if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
1182197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, urb->status);
1183197ba5f4SPaul Zimmerman 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
11843142a16bSVardan Mikayelyan 		dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
11853142a16bSVardan Mikayelyan 			 failed, *xfer_done);
1186197ba5f4SPaul Zimmerman 		return failed;
1187197ba5f4SPaul Zimmerman 	}
1188197ba5f4SPaul Zimmerman 
1189197ba5f4SPaul Zimmerman 	if (qh->ep_type == USB_ENDPOINT_XFER_CONTROL) {
1190197ba5f4SPaul Zimmerman 		switch (qtd->control_phase) {
1191197ba5f4SPaul Zimmerman 		case DWC2_CONTROL_SETUP:
1192197ba5f4SPaul Zimmerman 			if (urb->length > 0)
1193197ba5f4SPaul Zimmerman 				qtd->control_phase = DWC2_CONTROL_DATA;
1194197ba5f4SPaul Zimmerman 			else
1195197ba5f4SPaul Zimmerman 				qtd->control_phase = DWC2_CONTROL_STATUS;
1196197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
1197197ba5f4SPaul Zimmerman 				 "  Control setup transaction done\n");
1198197ba5f4SPaul Zimmerman 			break;
1199197ba5f4SPaul Zimmerman 		case DWC2_CONTROL_DATA:
1200197ba5f4SPaul Zimmerman 			if (*xfer_done) {
1201197ba5f4SPaul Zimmerman 				qtd->control_phase = DWC2_CONTROL_STATUS;
1202197ba5f4SPaul Zimmerman 				dev_vdbg(hsotg->dev,
1203197ba5f4SPaul Zimmerman 					 "  Control data transfer done\n");
1204197ba5f4SPaul Zimmerman 			} else if (desc_num + 1 == qtd->n_desc) {
1205197ba5f4SPaul Zimmerman 				/*
1206197ba5f4SPaul Zimmerman 				 * Last descriptor for Control data stage which
1207197ba5f4SPaul Zimmerman 				 * is not completed yet
1208197ba5f4SPaul Zimmerman 				 */
1209197ba5f4SPaul Zimmerman 				dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1210197ba5f4SPaul Zimmerman 							  qtd);
1211197ba5f4SPaul Zimmerman 			}
1212197ba5f4SPaul Zimmerman 			break;
1213197ba5f4SPaul Zimmerman 		default:
1214197ba5f4SPaul Zimmerman 			break;
1215197ba5f4SPaul Zimmerman 		}
1216197ba5f4SPaul Zimmerman 	}
1217197ba5f4SPaul Zimmerman 
1218197ba5f4SPaul Zimmerman 	return 0;
1219197ba5f4SPaul Zimmerman }
1220197ba5f4SPaul Zimmerman 
1221197ba5f4SPaul Zimmerman static void dwc2_complete_non_isoc_xfer_ddma(struct dwc2_hsotg *hsotg,
1222197ba5f4SPaul Zimmerman 					     struct dwc2_host_chan *chan,
1223197ba5f4SPaul Zimmerman 					     int chnum,
1224197ba5f4SPaul Zimmerman 					     enum dwc2_halt_status halt_status)
1225197ba5f4SPaul Zimmerman {
1226197ba5f4SPaul Zimmerman 	struct list_head *qtd_item, *qtd_tmp;
1227197ba5f4SPaul Zimmerman 	struct dwc2_qh *qh = chan->qh;
1228197ba5f4SPaul Zimmerman 	struct dwc2_qtd *qtd = NULL;
1229197ba5f4SPaul Zimmerman 	int xfer_done;
1230197ba5f4SPaul Zimmerman 	int desc_num = 0;
1231197ba5f4SPaul Zimmerman 
1232197ba5f4SPaul Zimmerman 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1233197ba5f4SPaul Zimmerman 		list_for_each_entry(qtd, &qh->qtd_list, qtd_list_entry)
1234197ba5f4SPaul Zimmerman 			qtd->in_process = 0;
1235197ba5f4SPaul Zimmerman 		return;
1236197ba5f4SPaul Zimmerman 	}
1237197ba5f4SPaul Zimmerman 
1238197ba5f4SPaul Zimmerman 	list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
1239197ba5f4SPaul Zimmerman 		int i;
12403142a16bSVardan Mikayelyan 		int qtd_desc_count;
1241197ba5f4SPaul Zimmerman 
1242197ba5f4SPaul Zimmerman 		qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
1243197ba5f4SPaul Zimmerman 		xfer_done = 0;
12443142a16bSVardan Mikayelyan 		qtd_desc_count = qtd->n_desc;
1245197ba5f4SPaul Zimmerman 
12463142a16bSVardan Mikayelyan 		for (i = 0; i < qtd_desc_count; i++) {
1247197ba5f4SPaul Zimmerman 			if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
1248197ba5f4SPaul Zimmerman 						       desc_num, halt_status,
12491fc65989SJohn Youn 						       &xfer_done)) {
12501fc65989SJohn Youn 				qtd = NULL;
12513142a16bSVardan Mikayelyan 				goto stop_scan;
12521fc65989SJohn Youn 			}
12533142a16bSVardan Mikayelyan 
1254197ba5f4SPaul Zimmerman 			desc_num++;
1255197ba5f4SPaul Zimmerman 		}
1256197ba5f4SPaul Zimmerman 	}
1257197ba5f4SPaul Zimmerman 
12583142a16bSVardan Mikayelyan stop_scan:
1259197ba5f4SPaul Zimmerman 	if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
1260197ba5f4SPaul Zimmerman 		/*
1261197ba5f4SPaul Zimmerman 		 * Resetting the data toggle for bulk and interrupt endpoints
1262197ba5f4SPaul Zimmerman 		 * in case of stall. See handle_hc_stall_intr().
1263197ba5f4SPaul Zimmerman 		 */
1264197ba5f4SPaul Zimmerman 		if (halt_status == DWC2_HC_XFER_STALL)
1265197ba5f4SPaul Zimmerman 			qh->data_toggle = DWC2_HC_PID_DATA0;
126662943b7dSTang, Jianqiang 		else
12671fc65989SJohn Youn 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, NULL);
1268197ba5f4SPaul Zimmerman 	}
1269197ba5f4SPaul Zimmerman 
1270197ba5f4SPaul Zimmerman 	if (halt_status == DWC2_HC_XFER_COMPLETE) {
1271197ba5f4SPaul Zimmerman 		if (chan->hcint & HCINTMSK_NYET) {
1272197ba5f4SPaul Zimmerman 			/*
1273197ba5f4SPaul Zimmerman 			 * Got a NYET on the last transaction of the transfer.
1274197ba5f4SPaul Zimmerman 			 * It means that the endpoint should be in the PING
1275197ba5f4SPaul Zimmerman 			 * state at the beginning of the next transfer.
1276197ba5f4SPaul Zimmerman 			 */
1277197ba5f4SPaul Zimmerman 			qh->ping_state = 1;
1278197ba5f4SPaul Zimmerman 		}
1279197ba5f4SPaul Zimmerman 	}
1280197ba5f4SPaul Zimmerman }
1281197ba5f4SPaul Zimmerman 
1282197ba5f4SPaul Zimmerman /**
1283197ba5f4SPaul Zimmerman  * dwc2_hcd_complete_xfer_ddma() - Scans the descriptor list, updates URB's
1284197ba5f4SPaul Zimmerman  * status and calls completion routine for the URB if it's done. Called from
1285197ba5f4SPaul Zimmerman  * interrupt handlers.
1286197ba5f4SPaul Zimmerman  *
1287197ba5f4SPaul Zimmerman  * @hsotg:       The HCD state structure for the DWC OTG controller
1288197ba5f4SPaul Zimmerman  * @chan:        Host channel the transfer is completed on
1289197ba5f4SPaul Zimmerman  * @chnum:       Index of Host channel registers
1290197ba5f4SPaul Zimmerman  * @halt_status: Reason the channel is being halted or just XferComplete
1291197ba5f4SPaul Zimmerman  *               for isochronous transfers
1292197ba5f4SPaul Zimmerman  *
1293197ba5f4SPaul Zimmerman  * Releases the channel to be used by other transfers.
1294197ba5f4SPaul Zimmerman  * In case of Isochronous endpoint the channel is not halted until the end of
1295197ba5f4SPaul Zimmerman  * the session, i.e. QTD list is empty.
1296197ba5f4SPaul Zimmerman  * If periodic channel released the FrameList is updated accordingly.
1297197ba5f4SPaul Zimmerman  * Calls transaction selection routines to activate pending transfers.
1298197ba5f4SPaul Zimmerman  */
1299197ba5f4SPaul Zimmerman void dwc2_hcd_complete_xfer_ddma(struct dwc2_hsotg *hsotg,
1300197ba5f4SPaul Zimmerman 				 struct dwc2_host_chan *chan, int chnum,
1301197ba5f4SPaul Zimmerman 				 enum dwc2_halt_status halt_status)
1302197ba5f4SPaul Zimmerman {
1303197ba5f4SPaul Zimmerman 	struct dwc2_qh *qh = chan->qh;
1304197ba5f4SPaul Zimmerman 	int continue_isoc_xfer = 0;
1305197ba5f4SPaul Zimmerman 	enum dwc2_transaction_type tr_type;
1306197ba5f4SPaul Zimmerman 
1307197ba5f4SPaul Zimmerman 	if (chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1308197ba5f4SPaul Zimmerman 		dwc2_complete_isoc_xfer_ddma(hsotg, chan, halt_status);
1309197ba5f4SPaul Zimmerman 
1310197ba5f4SPaul Zimmerman 		/* Release the channel if halted or session completed */
1311197ba5f4SPaul Zimmerman 		if (halt_status != DWC2_HC_XFER_COMPLETE ||
1312197ba5f4SPaul Zimmerman 		    list_empty(&qh->qtd_list)) {
1313c503b381SGregory Herrero 			struct dwc2_qtd *qtd, *qtd_tmp;
1314c503b381SGregory Herrero 
1315c503b381SGregory Herrero 			/*
1316c503b381SGregory Herrero 			 * Kill all remainings QTDs since channel has been
1317c503b381SGregory Herrero 			 * halted.
1318c503b381SGregory Herrero 			 */
1319c503b381SGregory Herrero 			list_for_each_entry_safe(qtd, qtd_tmp,
1320c503b381SGregory Herrero 						 &qh->qtd_list,
1321c503b381SGregory Herrero 						 qtd_list_entry) {
1322c503b381SGregory Herrero 				dwc2_host_complete(hsotg, qtd,
1323c503b381SGregory Herrero 						   -ECONNRESET);
1324c503b381SGregory Herrero 				dwc2_hcd_qtd_unlink_and_free(hsotg,
1325c503b381SGregory Herrero 							     qtd, qh);
1326c503b381SGregory Herrero 			}
1327c503b381SGregory Herrero 
1328197ba5f4SPaul Zimmerman 			/* Halt the channel if session completed */
1329197ba5f4SPaul Zimmerman 			if (halt_status == DWC2_HC_XFER_COMPLETE)
1330197ba5f4SPaul Zimmerman 				dwc2_hc_halt(hsotg, chan, halt_status);
1331197ba5f4SPaul Zimmerman 			dwc2_release_channel_ddma(hsotg, qh);
1332197ba5f4SPaul Zimmerman 			dwc2_hcd_qh_unlink(hsotg, qh);
1333197ba5f4SPaul Zimmerman 		} else {
1334197ba5f4SPaul Zimmerman 			/* Keep in assigned schedule to continue transfer */
133594ef7aeeSDouglas Anderson 			list_move_tail(&qh->qh_list_entry,
1336197ba5f4SPaul Zimmerman 				       &hsotg->periodic_sched_assigned);
1337c503b381SGregory Herrero 			/*
1338c503b381SGregory Herrero 			 * If channel has been halted during giveback of urb
1339c503b381SGregory Herrero 			 * then prevent any new scheduling.
1340c503b381SGregory Herrero 			 */
1341c503b381SGregory Herrero 			if (!chan->halt_status)
1342197ba5f4SPaul Zimmerman 				continue_isoc_xfer = 1;
1343197ba5f4SPaul Zimmerman 		}
1344197ba5f4SPaul Zimmerman 		/*
1345197ba5f4SPaul Zimmerman 		 * Todo: Consider the case when period exceeds FrameList size.
1346197ba5f4SPaul Zimmerman 		 * Frame Rollover interrupt should be used.
1347197ba5f4SPaul Zimmerman 		 */
1348197ba5f4SPaul Zimmerman 	} else {
1349197ba5f4SPaul Zimmerman 		/*
1350197ba5f4SPaul Zimmerman 		 * Scan descriptor list to complete the URB(s), then release
1351197ba5f4SPaul Zimmerman 		 * the channel
1352197ba5f4SPaul Zimmerman 		 */
1353197ba5f4SPaul Zimmerman 		dwc2_complete_non_isoc_xfer_ddma(hsotg, chan, chnum,
1354197ba5f4SPaul Zimmerman 						 halt_status);
1355197ba5f4SPaul Zimmerman 		dwc2_release_channel_ddma(hsotg, qh);
1356197ba5f4SPaul Zimmerman 		dwc2_hcd_qh_unlink(hsotg, qh);
1357197ba5f4SPaul Zimmerman 
1358197ba5f4SPaul Zimmerman 		if (!list_empty(&qh->qtd_list)) {
1359197ba5f4SPaul Zimmerman 			/*
1360197ba5f4SPaul Zimmerman 			 * Add back to inactive non-periodic schedule on normal
1361197ba5f4SPaul Zimmerman 			 * completion
1362197ba5f4SPaul Zimmerman 			 */
1363197ba5f4SPaul Zimmerman 			dwc2_hcd_qh_add(hsotg, qh);
1364197ba5f4SPaul Zimmerman 		}
1365197ba5f4SPaul Zimmerman 	}
1366197ba5f4SPaul Zimmerman 
1367197ba5f4SPaul Zimmerman 	tr_type = dwc2_hcd_select_transactions(hsotg);
1368197ba5f4SPaul Zimmerman 	if (tr_type != DWC2_TRANSACTION_NONE || continue_isoc_xfer) {
1369197ba5f4SPaul Zimmerman 		if (continue_isoc_xfer) {
1370197ba5f4SPaul Zimmerman 			if (tr_type == DWC2_TRANSACTION_NONE)
1371197ba5f4SPaul Zimmerman 				tr_type = DWC2_TRANSACTION_PERIODIC;
1372197ba5f4SPaul Zimmerman 			else if (tr_type == DWC2_TRANSACTION_NON_PERIODIC)
1373197ba5f4SPaul Zimmerman 				tr_type = DWC2_TRANSACTION_ALL;
1374197ba5f4SPaul Zimmerman 		}
1375197ba5f4SPaul Zimmerman 		dwc2_hcd_queue_transactions(hsotg, tr_type);
1376197ba5f4SPaul Zimmerman 	}
1377197ba5f4SPaul Zimmerman }
1378