xref: /openbmc/linux/drivers/usb/dwc2/hcd_intr.c (revision 151d0cbd)
1197ba5f4SPaul Zimmerman /*
2197ba5f4SPaul Zimmerman  * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
3197ba5f4SPaul Zimmerman  *
4197ba5f4SPaul Zimmerman  * Copyright (C) 2004-2013 Synopsys, Inc.
5197ba5f4SPaul Zimmerman  *
6197ba5f4SPaul Zimmerman  * Redistribution and use in source and binary forms, with or without
7197ba5f4SPaul Zimmerman  * modification, are permitted provided that the following conditions
8197ba5f4SPaul Zimmerman  * are met:
9197ba5f4SPaul Zimmerman  * 1. Redistributions of source code must retain the above copyright
10197ba5f4SPaul Zimmerman  *    notice, this list of conditions, and the following disclaimer,
11197ba5f4SPaul Zimmerman  *    without modification.
12197ba5f4SPaul Zimmerman  * 2. Redistributions in binary form must reproduce the above copyright
13197ba5f4SPaul Zimmerman  *    notice, this list of conditions and the following disclaimer in the
14197ba5f4SPaul Zimmerman  *    documentation and/or other materials provided with the distribution.
15197ba5f4SPaul Zimmerman  * 3. The names of the above-listed copyright holders may not be used
16197ba5f4SPaul Zimmerman  *    to endorse or promote products derived from this software without
17197ba5f4SPaul Zimmerman  *    specific prior written permission.
18197ba5f4SPaul Zimmerman  *
19197ba5f4SPaul Zimmerman  * ALTERNATIVELY, this software may be distributed under the terms of the
20197ba5f4SPaul Zimmerman  * GNU General Public License ("GPL") as published by the Free Software
21197ba5f4SPaul Zimmerman  * Foundation; either version 2 of the License, or (at your option) any
22197ba5f4SPaul Zimmerman  * later version.
23197ba5f4SPaul Zimmerman  *
24197ba5f4SPaul Zimmerman  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25197ba5f4SPaul Zimmerman  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26197ba5f4SPaul Zimmerman  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27197ba5f4SPaul Zimmerman  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28197ba5f4SPaul Zimmerman  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29197ba5f4SPaul Zimmerman  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30197ba5f4SPaul Zimmerman  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31197ba5f4SPaul Zimmerman  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32197ba5f4SPaul Zimmerman  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33197ba5f4SPaul Zimmerman  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34197ba5f4SPaul Zimmerman  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35197ba5f4SPaul Zimmerman  */
36197ba5f4SPaul Zimmerman 
37197ba5f4SPaul Zimmerman /*
38197ba5f4SPaul Zimmerman  * This file contains the interrupt handlers for Host mode
39197ba5f4SPaul Zimmerman  */
40197ba5f4SPaul Zimmerman #include <linux/kernel.h>
41197ba5f4SPaul Zimmerman #include <linux/module.h>
42197ba5f4SPaul Zimmerman #include <linux/spinlock.h>
43197ba5f4SPaul Zimmerman #include <linux/interrupt.h>
44197ba5f4SPaul Zimmerman #include <linux/dma-mapping.h>
45197ba5f4SPaul Zimmerman #include <linux/io.h>
46197ba5f4SPaul Zimmerman #include <linux/slab.h>
47197ba5f4SPaul Zimmerman #include <linux/usb.h>
48197ba5f4SPaul Zimmerman 
49197ba5f4SPaul Zimmerman #include <linux/usb/hcd.h>
50197ba5f4SPaul Zimmerman #include <linux/usb/ch11.h>
51197ba5f4SPaul Zimmerman 
52197ba5f4SPaul Zimmerman #include "core.h"
53197ba5f4SPaul Zimmerman #include "hcd.h"
54197ba5f4SPaul Zimmerman 
55197ba5f4SPaul Zimmerman /* This function is for debug only */
56197ba5f4SPaul Zimmerman static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
57197ba5f4SPaul Zimmerman {
58197ba5f4SPaul Zimmerman #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
59197ba5f4SPaul Zimmerman 	u16 curr_frame_number = hsotg->frame_number;
60197ba5f4SPaul Zimmerman 
61197ba5f4SPaul Zimmerman 	if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
62197ba5f4SPaul Zimmerman 		if (((hsotg->last_frame_num + 1) & HFNUM_MAX_FRNUM) !=
63197ba5f4SPaul Zimmerman 		    curr_frame_number) {
64197ba5f4SPaul Zimmerman 			hsotg->frame_num_array[hsotg->frame_num_idx] =
65197ba5f4SPaul Zimmerman 					curr_frame_number;
66197ba5f4SPaul Zimmerman 			hsotg->last_frame_num_array[hsotg->frame_num_idx] =
67197ba5f4SPaul Zimmerman 					hsotg->last_frame_num;
68197ba5f4SPaul Zimmerman 			hsotg->frame_num_idx++;
69197ba5f4SPaul Zimmerman 		}
70197ba5f4SPaul Zimmerman 	} else if (!hsotg->dumped_frame_num_array) {
71197ba5f4SPaul Zimmerman 		int i;
72197ba5f4SPaul Zimmerman 
73197ba5f4SPaul Zimmerman 		dev_info(hsotg->dev, "Frame     Last Frame\n");
74197ba5f4SPaul Zimmerman 		dev_info(hsotg->dev, "-----     ----------\n");
75197ba5f4SPaul Zimmerman 		for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
76197ba5f4SPaul Zimmerman 			dev_info(hsotg->dev, "0x%04x    0x%04x\n",
77197ba5f4SPaul Zimmerman 				 hsotg->frame_num_array[i],
78197ba5f4SPaul Zimmerman 				 hsotg->last_frame_num_array[i]);
79197ba5f4SPaul Zimmerman 		}
80197ba5f4SPaul Zimmerman 		hsotg->dumped_frame_num_array = 1;
81197ba5f4SPaul Zimmerman 	}
82197ba5f4SPaul Zimmerman 	hsotg->last_frame_num = curr_frame_number;
83197ba5f4SPaul Zimmerman #endif
84197ba5f4SPaul Zimmerman }
85197ba5f4SPaul Zimmerman 
86197ba5f4SPaul Zimmerman static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
87197ba5f4SPaul Zimmerman 				    struct dwc2_host_chan *chan,
88197ba5f4SPaul Zimmerman 				    struct dwc2_qtd *qtd)
89197ba5f4SPaul Zimmerman {
90197ba5f4SPaul Zimmerman 	struct urb *usb_urb;
91197ba5f4SPaul Zimmerman 
92197ba5f4SPaul Zimmerman 	if (!chan->qh)
93197ba5f4SPaul Zimmerman 		return;
94197ba5f4SPaul Zimmerman 
95197ba5f4SPaul Zimmerman 	if (chan->qh->dev_speed == USB_SPEED_HIGH)
96197ba5f4SPaul Zimmerman 		return;
97197ba5f4SPaul Zimmerman 
98197ba5f4SPaul Zimmerman 	if (!qtd->urb)
99197ba5f4SPaul Zimmerman 		return;
100197ba5f4SPaul Zimmerman 
101197ba5f4SPaul Zimmerman 	usb_urb = qtd->urb->priv;
102197ba5f4SPaul Zimmerman 	if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
103197ba5f4SPaul Zimmerman 		return;
104197ba5f4SPaul Zimmerman 
105197ba5f4SPaul Zimmerman 	if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
106197ba5f4SPaul Zimmerman 		chan->qh->tt_buffer_dirty = 1;
107197ba5f4SPaul Zimmerman 		if (usb_hub_clear_tt_buffer(usb_urb))
108197ba5f4SPaul Zimmerman 			/* Clear failed; let's hope things work anyway */
109197ba5f4SPaul Zimmerman 			chan->qh->tt_buffer_dirty = 0;
110197ba5f4SPaul Zimmerman 	}
111197ba5f4SPaul Zimmerman }
112197ba5f4SPaul Zimmerman 
113197ba5f4SPaul Zimmerman /*
114197ba5f4SPaul Zimmerman  * Handles the start-of-frame interrupt in host mode. Non-periodic
115197ba5f4SPaul Zimmerman  * transactions may be queued to the DWC_otg controller for the current
116197ba5f4SPaul Zimmerman  * (micro)frame. Periodic transactions may be queued to the controller
117197ba5f4SPaul Zimmerman  * for the next (micro)frame.
118197ba5f4SPaul Zimmerman  */
119197ba5f4SPaul Zimmerman static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
120197ba5f4SPaul Zimmerman {
121197ba5f4SPaul Zimmerman 	struct list_head *qh_entry;
122197ba5f4SPaul Zimmerman 	struct dwc2_qh *qh;
123197ba5f4SPaul Zimmerman 	enum dwc2_transaction_type tr_type;
124197ba5f4SPaul Zimmerman 
125197ba5f4SPaul Zimmerman #ifdef DEBUG_SOF
126197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
127197ba5f4SPaul Zimmerman #endif
128197ba5f4SPaul Zimmerman 
129197ba5f4SPaul Zimmerman 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
130197ba5f4SPaul Zimmerman 
131197ba5f4SPaul Zimmerman 	dwc2_track_missed_sofs(hsotg);
132197ba5f4SPaul Zimmerman 
133197ba5f4SPaul Zimmerman 	/* Determine whether any periodic QHs should be executed */
134197ba5f4SPaul Zimmerman 	qh_entry = hsotg->periodic_sched_inactive.next;
135197ba5f4SPaul Zimmerman 	while (qh_entry != &hsotg->periodic_sched_inactive) {
136197ba5f4SPaul Zimmerman 		qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
137197ba5f4SPaul Zimmerman 		qh_entry = qh_entry->next;
138197ba5f4SPaul Zimmerman 		if (dwc2_frame_num_le(qh->sched_frame, hsotg->frame_number))
139197ba5f4SPaul Zimmerman 			/*
140197ba5f4SPaul Zimmerman 			 * Move QH to the ready list to be executed next
141197ba5f4SPaul Zimmerman 			 * (micro)frame
142197ba5f4SPaul Zimmerman 			 */
143197ba5f4SPaul Zimmerman 			list_move(&qh->qh_list_entry,
144197ba5f4SPaul Zimmerman 				  &hsotg->periodic_sched_ready);
145197ba5f4SPaul Zimmerman 	}
146197ba5f4SPaul Zimmerman 	tr_type = dwc2_hcd_select_transactions(hsotg);
147197ba5f4SPaul Zimmerman 	if (tr_type != DWC2_TRANSACTION_NONE)
148197ba5f4SPaul Zimmerman 		dwc2_hcd_queue_transactions(hsotg, tr_type);
149197ba5f4SPaul Zimmerman 
150197ba5f4SPaul Zimmerman 	/* Clear interrupt */
151197ba5f4SPaul Zimmerman 	writel(GINTSTS_SOF, hsotg->regs + GINTSTS);
152197ba5f4SPaul Zimmerman }
153197ba5f4SPaul Zimmerman 
154197ba5f4SPaul Zimmerman /*
155197ba5f4SPaul Zimmerman  * Handles the Rx FIFO Level Interrupt, which indicates that there is
156197ba5f4SPaul Zimmerman  * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
157197ba5f4SPaul Zimmerman  * memory if the DWC_otg controller is operating in Slave mode.
158197ba5f4SPaul Zimmerman  */
159197ba5f4SPaul Zimmerman static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
160197ba5f4SPaul Zimmerman {
161197ba5f4SPaul Zimmerman 	u32 grxsts, chnum, bcnt, dpid, pktsts;
162197ba5f4SPaul Zimmerman 	struct dwc2_host_chan *chan;
163197ba5f4SPaul Zimmerman 
164197ba5f4SPaul Zimmerman 	if (dbg_perio())
165197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
166197ba5f4SPaul Zimmerman 
167197ba5f4SPaul Zimmerman 	grxsts = readl(hsotg->regs + GRXSTSP);
168197ba5f4SPaul Zimmerman 	chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
169197ba5f4SPaul Zimmerman 	chan = hsotg->hc_ptr_array[chnum];
170197ba5f4SPaul Zimmerman 	if (!chan) {
171197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev, "Unable to get corresponding channel\n");
172197ba5f4SPaul Zimmerman 		return;
173197ba5f4SPaul Zimmerman 	}
174197ba5f4SPaul Zimmerman 
175197ba5f4SPaul Zimmerman 	bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
176197ba5f4SPaul Zimmerman 	dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT;
177197ba5f4SPaul Zimmerman 	pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
178197ba5f4SPaul Zimmerman 
179197ba5f4SPaul Zimmerman 	/* Packet Status */
180197ba5f4SPaul Zimmerman 	if (dbg_perio()) {
181197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "    Ch num = %d\n", chnum);
182197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "    Count = %d\n", bcnt);
183197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "    DPID = %d, chan.dpid = %d\n", dpid,
184197ba5f4SPaul Zimmerman 			 chan->data_pid_start);
185197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "    PStatus = %d\n", pktsts);
186197ba5f4SPaul Zimmerman 	}
187197ba5f4SPaul Zimmerman 
188197ba5f4SPaul Zimmerman 	switch (pktsts) {
189197ba5f4SPaul Zimmerman 	case GRXSTS_PKTSTS_HCHIN:
190197ba5f4SPaul Zimmerman 		/* Read the data into the host buffer */
191197ba5f4SPaul Zimmerman 		if (bcnt > 0) {
192197ba5f4SPaul Zimmerman 			dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
193197ba5f4SPaul Zimmerman 
194197ba5f4SPaul Zimmerman 			/* Update the HC fields for the next packet received */
195197ba5f4SPaul Zimmerman 			chan->xfer_count += bcnt;
196197ba5f4SPaul Zimmerman 			chan->xfer_buf += bcnt;
197197ba5f4SPaul Zimmerman 		}
198197ba5f4SPaul Zimmerman 		break;
199197ba5f4SPaul Zimmerman 	case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
200197ba5f4SPaul Zimmerman 	case GRXSTS_PKTSTS_DATATOGGLEERR:
201197ba5f4SPaul Zimmerman 	case GRXSTS_PKTSTS_HCHHALTED:
202197ba5f4SPaul Zimmerman 		/* Handled in interrupt, just ignore data */
203197ba5f4SPaul Zimmerman 		break;
204197ba5f4SPaul Zimmerman 	default:
205197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev,
206197ba5f4SPaul Zimmerman 			"RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
207197ba5f4SPaul Zimmerman 		break;
208197ba5f4SPaul Zimmerman 	}
209197ba5f4SPaul Zimmerman }
210197ba5f4SPaul Zimmerman 
211197ba5f4SPaul Zimmerman /*
212197ba5f4SPaul Zimmerman  * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
213197ba5f4SPaul Zimmerman  * data packets may be written to the FIFO for OUT transfers. More requests
214197ba5f4SPaul Zimmerman  * may be written to the non-periodic request queue for IN transfers. This
215197ba5f4SPaul Zimmerman  * interrupt is enabled only in Slave mode.
216197ba5f4SPaul Zimmerman  */
217197ba5f4SPaul Zimmerman static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
218197ba5f4SPaul Zimmerman {
219197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
220197ba5f4SPaul Zimmerman 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
221197ba5f4SPaul Zimmerman }
222197ba5f4SPaul Zimmerman 
223197ba5f4SPaul Zimmerman /*
224197ba5f4SPaul Zimmerman  * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
225197ba5f4SPaul Zimmerman  * packets may be written to the FIFO for OUT transfers. More requests may be
226197ba5f4SPaul Zimmerman  * written to the periodic request queue for IN transfers. This interrupt is
227197ba5f4SPaul Zimmerman  * enabled only in Slave mode.
228197ba5f4SPaul Zimmerman  */
229197ba5f4SPaul Zimmerman static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
230197ba5f4SPaul Zimmerman {
231197ba5f4SPaul Zimmerman 	if (dbg_perio())
232197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
233197ba5f4SPaul Zimmerman 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
234197ba5f4SPaul Zimmerman }
235197ba5f4SPaul Zimmerman 
236197ba5f4SPaul Zimmerman static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
237197ba5f4SPaul Zimmerman 			      u32 *hprt0_modify)
238197ba5f4SPaul Zimmerman {
239197ba5f4SPaul Zimmerman 	struct dwc2_core_params *params = hsotg->core_params;
240197ba5f4SPaul Zimmerman 	int do_reset = 0;
241197ba5f4SPaul Zimmerman 	u32 usbcfg;
242197ba5f4SPaul Zimmerman 	u32 prtspd;
243197ba5f4SPaul Zimmerman 	u32 hcfg;
244197ba5f4SPaul Zimmerman 	u32 fslspclksel;
245197ba5f4SPaul Zimmerman 	u32 hfir;
246197ba5f4SPaul Zimmerman 
247197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
248197ba5f4SPaul Zimmerman 
249197ba5f4SPaul Zimmerman 	/* Every time when port enables calculate HFIR.FrInterval */
250197ba5f4SPaul Zimmerman 	hfir = readl(hsotg->regs + HFIR);
251197ba5f4SPaul Zimmerman 	hfir &= ~HFIR_FRINT_MASK;
252197ba5f4SPaul Zimmerman 	hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
253197ba5f4SPaul Zimmerman 		HFIR_FRINT_MASK;
254197ba5f4SPaul Zimmerman 	writel(hfir, hsotg->regs + HFIR);
255197ba5f4SPaul Zimmerman 
256197ba5f4SPaul Zimmerman 	/* Check if we need to adjust the PHY clock speed for low power */
257197ba5f4SPaul Zimmerman 	if (!params->host_support_fs_ls_low_power) {
258197ba5f4SPaul Zimmerman 		/* Port has been enabled, set the reset change flag */
259197ba5f4SPaul Zimmerman 		hsotg->flags.b.port_reset_change = 1;
260197ba5f4SPaul Zimmerman 		return;
261197ba5f4SPaul Zimmerman 	}
262197ba5f4SPaul Zimmerman 
263197ba5f4SPaul Zimmerman 	usbcfg = readl(hsotg->regs + GUSBCFG);
264197ba5f4SPaul Zimmerman 	prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
265197ba5f4SPaul Zimmerman 
266197ba5f4SPaul Zimmerman 	if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
267197ba5f4SPaul Zimmerman 		/* Low power */
268197ba5f4SPaul Zimmerman 		if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
269197ba5f4SPaul Zimmerman 			/* Set PHY low power clock select for FS/LS devices */
270197ba5f4SPaul Zimmerman 			usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
271197ba5f4SPaul Zimmerman 			writel(usbcfg, hsotg->regs + GUSBCFG);
272197ba5f4SPaul Zimmerman 			do_reset = 1;
273197ba5f4SPaul Zimmerman 		}
274197ba5f4SPaul Zimmerman 
275197ba5f4SPaul Zimmerman 		hcfg = readl(hsotg->regs + HCFG);
276197ba5f4SPaul Zimmerman 		fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
277197ba5f4SPaul Zimmerman 			      HCFG_FSLSPCLKSEL_SHIFT;
278197ba5f4SPaul Zimmerman 
279197ba5f4SPaul Zimmerman 		if (prtspd == HPRT0_SPD_LOW_SPEED &&
280197ba5f4SPaul Zimmerman 		    params->host_ls_low_power_phy_clk ==
281197ba5f4SPaul Zimmerman 		    DWC2_HOST_LS_LOW_POWER_PHY_CLK_PARAM_6MHZ) {
282197ba5f4SPaul Zimmerman 			/* 6 MHZ */
283197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
284197ba5f4SPaul Zimmerman 				 "FS_PHY programming HCFG to 6 MHz\n");
285197ba5f4SPaul Zimmerman 			if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
286197ba5f4SPaul Zimmerman 				fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
287197ba5f4SPaul Zimmerman 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
288197ba5f4SPaul Zimmerman 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
289197ba5f4SPaul Zimmerman 				writel(hcfg, hsotg->regs + HCFG);
290197ba5f4SPaul Zimmerman 				do_reset = 1;
291197ba5f4SPaul Zimmerman 			}
292197ba5f4SPaul Zimmerman 		} else {
293197ba5f4SPaul Zimmerman 			/* 48 MHZ */
294197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
295197ba5f4SPaul Zimmerman 				 "FS_PHY programming HCFG to 48 MHz\n");
296197ba5f4SPaul Zimmerman 			if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
297197ba5f4SPaul Zimmerman 				fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
298197ba5f4SPaul Zimmerman 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
299197ba5f4SPaul Zimmerman 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
300197ba5f4SPaul Zimmerman 				writel(hcfg, hsotg->regs + HCFG);
301197ba5f4SPaul Zimmerman 				do_reset = 1;
302197ba5f4SPaul Zimmerman 			}
303197ba5f4SPaul Zimmerman 		}
304197ba5f4SPaul Zimmerman 	} else {
305197ba5f4SPaul Zimmerman 		/* Not low power */
306197ba5f4SPaul Zimmerman 		if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
307197ba5f4SPaul Zimmerman 			usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
308197ba5f4SPaul Zimmerman 			writel(usbcfg, hsotg->regs + GUSBCFG);
309197ba5f4SPaul Zimmerman 			do_reset = 1;
310197ba5f4SPaul Zimmerman 		}
311197ba5f4SPaul Zimmerman 	}
312197ba5f4SPaul Zimmerman 
313197ba5f4SPaul Zimmerman 	if (do_reset) {
314197ba5f4SPaul Zimmerman 		*hprt0_modify |= HPRT0_RST;
315197ba5f4SPaul Zimmerman 		queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
316197ba5f4SPaul Zimmerman 				   msecs_to_jiffies(60));
317197ba5f4SPaul Zimmerman 	} else {
318197ba5f4SPaul Zimmerman 		/* Port has been enabled, set the reset change flag */
319197ba5f4SPaul Zimmerman 		hsotg->flags.b.port_reset_change = 1;
320197ba5f4SPaul Zimmerman 	}
321197ba5f4SPaul Zimmerman }
322197ba5f4SPaul Zimmerman 
323197ba5f4SPaul Zimmerman /*
324197ba5f4SPaul Zimmerman  * There are multiple conditions that can cause a port interrupt. This function
325197ba5f4SPaul Zimmerman  * determines which interrupt conditions have occurred and handles them
326197ba5f4SPaul Zimmerman  * appropriately.
327197ba5f4SPaul Zimmerman  */
328197ba5f4SPaul Zimmerman static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
329197ba5f4SPaul Zimmerman {
330197ba5f4SPaul Zimmerman 	u32 hprt0;
331197ba5f4SPaul Zimmerman 	u32 hprt0_modify;
332197ba5f4SPaul Zimmerman 
333197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
334197ba5f4SPaul Zimmerman 
335197ba5f4SPaul Zimmerman 	hprt0 = readl(hsotg->regs + HPRT0);
336197ba5f4SPaul Zimmerman 	hprt0_modify = hprt0;
337197ba5f4SPaul Zimmerman 
338197ba5f4SPaul Zimmerman 	/*
339197ba5f4SPaul Zimmerman 	 * Clear appropriate bits in HPRT0 to clear the interrupt bit in
340197ba5f4SPaul Zimmerman 	 * GINTSTS
341197ba5f4SPaul Zimmerman 	 */
342197ba5f4SPaul Zimmerman 	hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
343197ba5f4SPaul Zimmerman 			  HPRT0_OVRCURRCHG);
344197ba5f4SPaul Zimmerman 
345197ba5f4SPaul Zimmerman 	/*
346197ba5f4SPaul Zimmerman 	 * Port Connect Detected
347197ba5f4SPaul Zimmerman 	 * Set flag and clear if detected
348197ba5f4SPaul Zimmerman 	 */
349197ba5f4SPaul Zimmerman 	if (hprt0 & HPRT0_CONNDET) {
350197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
351197ba5f4SPaul Zimmerman 			 "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
352197ba5f4SPaul Zimmerman 			 hprt0);
353197ba5f4SPaul Zimmerman 		hsotg->flags.b.port_connect_status_change = 1;
354197ba5f4SPaul Zimmerman 		hsotg->flags.b.port_connect_status = 1;
355197ba5f4SPaul Zimmerman 		hprt0_modify |= HPRT0_CONNDET;
356197ba5f4SPaul Zimmerman 
357197ba5f4SPaul Zimmerman 		/*
358197ba5f4SPaul Zimmerman 		 * The Hub driver asserts a reset when it sees port connect
359197ba5f4SPaul Zimmerman 		 * status change flag
360197ba5f4SPaul Zimmerman 		 */
361197ba5f4SPaul Zimmerman 	}
362197ba5f4SPaul Zimmerman 
363197ba5f4SPaul Zimmerman 	/*
364197ba5f4SPaul Zimmerman 	 * Port Enable Changed
365197ba5f4SPaul Zimmerman 	 * Clear if detected - Set internal flag if disabled
366197ba5f4SPaul Zimmerman 	 */
367197ba5f4SPaul Zimmerman 	if (hprt0 & HPRT0_ENACHG) {
368197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
369197ba5f4SPaul Zimmerman 			 "  --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
370197ba5f4SPaul Zimmerman 			 hprt0, !!(hprt0 & HPRT0_ENA));
371197ba5f4SPaul Zimmerman 		hprt0_modify |= HPRT0_ENACHG;
372197ba5f4SPaul Zimmerman 		if (hprt0 & HPRT0_ENA)
373197ba5f4SPaul Zimmerman 			dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
374197ba5f4SPaul Zimmerman 		else
375197ba5f4SPaul Zimmerman 			hsotg->flags.b.port_enable_change = 1;
376197ba5f4SPaul Zimmerman 	}
377197ba5f4SPaul Zimmerman 
378197ba5f4SPaul Zimmerman 	/* Overcurrent Change Interrupt */
379197ba5f4SPaul Zimmerman 	if (hprt0 & HPRT0_OVRCURRCHG) {
380197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
381197ba5f4SPaul Zimmerman 			 "  --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
382197ba5f4SPaul Zimmerman 			 hprt0);
383197ba5f4SPaul Zimmerman 		hsotg->flags.b.port_over_current_change = 1;
384197ba5f4SPaul Zimmerman 		hprt0_modify |= HPRT0_OVRCURRCHG;
385197ba5f4SPaul Zimmerman 	}
386197ba5f4SPaul Zimmerman 
387197ba5f4SPaul Zimmerman 	/* Clear Port Interrupts */
388197ba5f4SPaul Zimmerman 	writel(hprt0_modify, hsotg->regs + HPRT0);
389197ba5f4SPaul Zimmerman }
390197ba5f4SPaul Zimmerman 
391197ba5f4SPaul Zimmerman /*
392197ba5f4SPaul Zimmerman  * Gets the actual length of a transfer after the transfer halts. halt_status
393197ba5f4SPaul Zimmerman  * holds the reason for the halt.
394197ba5f4SPaul Zimmerman  *
395197ba5f4SPaul Zimmerman  * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
396197ba5f4SPaul Zimmerman  * is set to 1 upon return if less than the requested number of bytes were
397197ba5f4SPaul Zimmerman  * transferred. short_read may also be NULL on entry, in which case it remains
398197ba5f4SPaul Zimmerman  * unchanged.
399197ba5f4SPaul Zimmerman  */
400197ba5f4SPaul Zimmerman static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
401197ba5f4SPaul Zimmerman 				       struct dwc2_host_chan *chan, int chnum,
402197ba5f4SPaul Zimmerman 				       struct dwc2_qtd *qtd,
403197ba5f4SPaul Zimmerman 				       enum dwc2_halt_status halt_status,
404197ba5f4SPaul Zimmerman 				       int *short_read)
405197ba5f4SPaul Zimmerman {
406197ba5f4SPaul Zimmerman 	u32 hctsiz, count, length;
407197ba5f4SPaul Zimmerman 
408197ba5f4SPaul Zimmerman 	hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
409197ba5f4SPaul Zimmerman 
410197ba5f4SPaul Zimmerman 	if (halt_status == DWC2_HC_XFER_COMPLETE) {
411197ba5f4SPaul Zimmerman 		if (chan->ep_is_in) {
412197ba5f4SPaul Zimmerman 			count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
413197ba5f4SPaul Zimmerman 				TSIZ_XFERSIZE_SHIFT;
414197ba5f4SPaul Zimmerman 			length = chan->xfer_len - count;
415197ba5f4SPaul Zimmerman 			if (short_read != NULL)
416197ba5f4SPaul Zimmerman 				*short_read = (count != 0);
417197ba5f4SPaul Zimmerman 		} else if (chan->qh->do_split) {
418197ba5f4SPaul Zimmerman 			length = qtd->ssplit_out_xfer_count;
419197ba5f4SPaul Zimmerman 		} else {
420197ba5f4SPaul Zimmerman 			length = chan->xfer_len;
421197ba5f4SPaul Zimmerman 		}
422197ba5f4SPaul Zimmerman 	} else {
423197ba5f4SPaul Zimmerman 		/*
424197ba5f4SPaul Zimmerman 		 * Must use the hctsiz.pktcnt field to determine how much data
425197ba5f4SPaul Zimmerman 		 * has been transferred. This field reflects the number of
426197ba5f4SPaul Zimmerman 		 * packets that have been transferred via the USB. This is
427197ba5f4SPaul Zimmerman 		 * always an integral number of packets if the transfer was
428197ba5f4SPaul Zimmerman 		 * halted before its normal completion. (Can't use the
429197ba5f4SPaul Zimmerman 		 * hctsiz.xfersize field because that reflects the number of
430197ba5f4SPaul Zimmerman 		 * bytes transferred via the AHB, not the USB).
431197ba5f4SPaul Zimmerman 		 */
432197ba5f4SPaul Zimmerman 		count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
433197ba5f4SPaul Zimmerman 		length = (chan->start_pkt_count - count) * chan->max_packet;
434197ba5f4SPaul Zimmerman 	}
435197ba5f4SPaul Zimmerman 
436197ba5f4SPaul Zimmerman 	return length;
437197ba5f4SPaul Zimmerman }
438197ba5f4SPaul Zimmerman 
439197ba5f4SPaul Zimmerman /**
440197ba5f4SPaul Zimmerman  * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
441197ba5f4SPaul Zimmerman  * Complete interrupt on the host channel. Updates the actual_length field
442197ba5f4SPaul Zimmerman  * of the URB based on the number of bytes transferred via the host channel.
443197ba5f4SPaul Zimmerman  * Sets the URB status if the data transfer is finished.
444197ba5f4SPaul Zimmerman  *
445197ba5f4SPaul Zimmerman  * Return: 1 if the data transfer specified by the URB is completely finished,
446197ba5f4SPaul Zimmerman  * 0 otherwise
447197ba5f4SPaul Zimmerman  */
448197ba5f4SPaul Zimmerman static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
449197ba5f4SPaul Zimmerman 				 struct dwc2_host_chan *chan, int chnum,
450197ba5f4SPaul Zimmerman 				 struct dwc2_hcd_urb *urb,
451197ba5f4SPaul Zimmerman 				 struct dwc2_qtd *qtd)
452197ba5f4SPaul Zimmerman {
453197ba5f4SPaul Zimmerman 	u32 hctsiz;
454197ba5f4SPaul Zimmerman 	int xfer_done = 0;
455197ba5f4SPaul Zimmerman 	int short_read = 0;
456197ba5f4SPaul Zimmerman 	int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
457197ba5f4SPaul Zimmerman 						      DWC2_HC_XFER_COMPLETE,
458197ba5f4SPaul Zimmerman 						      &short_read);
459197ba5f4SPaul Zimmerman 
460197ba5f4SPaul Zimmerman 	if (urb->actual_length + xfer_length > urb->length) {
461197ba5f4SPaul Zimmerman 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
462197ba5f4SPaul Zimmerman 		xfer_length = urb->length - urb->actual_length;
463197ba5f4SPaul Zimmerman 	}
464197ba5f4SPaul Zimmerman 
465197ba5f4SPaul Zimmerman 	/* Non DWORD-aligned buffer case handling */
466197ba5f4SPaul Zimmerman 	if (chan->align_buf && xfer_length && chan->ep_is_in) {
467197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
468197ba5f4SPaul Zimmerman 		dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
469197ba5f4SPaul Zimmerman 					DMA_FROM_DEVICE);
470197ba5f4SPaul Zimmerman 		memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
471197ba5f4SPaul Zimmerman 		       xfer_length);
472197ba5f4SPaul Zimmerman 		dma_sync_single_for_device(hsotg->dev, urb->dma, urb->length,
473197ba5f4SPaul Zimmerman 					   DMA_FROM_DEVICE);
474197ba5f4SPaul Zimmerman 	}
475197ba5f4SPaul Zimmerman 
476197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
477197ba5f4SPaul Zimmerman 		 urb->actual_length, xfer_length);
478197ba5f4SPaul Zimmerman 	urb->actual_length += xfer_length;
479197ba5f4SPaul Zimmerman 
480197ba5f4SPaul Zimmerman 	if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
481197ba5f4SPaul Zimmerman 	    (urb->flags & URB_SEND_ZERO_PACKET) &&
482197ba5f4SPaul Zimmerman 	    urb->actual_length >= urb->length &&
483197ba5f4SPaul Zimmerman 	    !(urb->length % chan->max_packet)) {
484197ba5f4SPaul Zimmerman 		xfer_done = 0;
485197ba5f4SPaul Zimmerman 	} else if (short_read || urb->actual_length >= urb->length) {
486197ba5f4SPaul Zimmerman 		xfer_done = 1;
487197ba5f4SPaul Zimmerman 		urb->status = 0;
488197ba5f4SPaul Zimmerman 	}
489197ba5f4SPaul Zimmerman 
490197ba5f4SPaul Zimmerman 	hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
491197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
492197ba5f4SPaul Zimmerman 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
493197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  chan->xfer_len %d\n", chan->xfer_len);
494197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  hctsiz.xfersize %d\n",
495197ba5f4SPaul Zimmerman 		 (hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
496197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n", urb->length);
497197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n", urb->actual_length);
498197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  short_read %d, xfer_done %d\n", short_read,
499197ba5f4SPaul Zimmerman 		 xfer_done);
500197ba5f4SPaul Zimmerman 
501197ba5f4SPaul Zimmerman 	return xfer_done;
502197ba5f4SPaul Zimmerman }
503197ba5f4SPaul Zimmerman 
504197ba5f4SPaul Zimmerman /*
505197ba5f4SPaul Zimmerman  * Save the starting data toggle for the next transfer. The data toggle is
506197ba5f4SPaul Zimmerman  * saved in the QH for non-control transfers and it's saved in the QTD for
507197ba5f4SPaul Zimmerman  * control transfers.
508197ba5f4SPaul Zimmerman  */
509197ba5f4SPaul Zimmerman void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
510197ba5f4SPaul Zimmerman 			       struct dwc2_host_chan *chan, int chnum,
511197ba5f4SPaul Zimmerman 			       struct dwc2_qtd *qtd)
512197ba5f4SPaul Zimmerman {
513197ba5f4SPaul Zimmerman 	u32 hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
514197ba5f4SPaul Zimmerman 	u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
515197ba5f4SPaul Zimmerman 
516197ba5f4SPaul Zimmerman 	if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
517197ba5f4SPaul Zimmerman 		if (pid == TSIZ_SC_MC_PID_DATA0)
518197ba5f4SPaul Zimmerman 			chan->qh->data_toggle = DWC2_HC_PID_DATA0;
519197ba5f4SPaul Zimmerman 		else
520197ba5f4SPaul Zimmerman 			chan->qh->data_toggle = DWC2_HC_PID_DATA1;
521197ba5f4SPaul Zimmerman 	} else {
522197ba5f4SPaul Zimmerman 		if (pid == TSIZ_SC_MC_PID_DATA0)
523197ba5f4SPaul Zimmerman 			qtd->data_toggle = DWC2_HC_PID_DATA0;
524197ba5f4SPaul Zimmerman 		else
525197ba5f4SPaul Zimmerman 			qtd->data_toggle = DWC2_HC_PID_DATA1;
526197ba5f4SPaul Zimmerman 	}
527197ba5f4SPaul Zimmerman }
528197ba5f4SPaul Zimmerman 
529197ba5f4SPaul Zimmerman /**
530197ba5f4SPaul Zimmerman  * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
531197ba5f4SPaul Zimmerman  * the transfer is stopped for any reason. The fields of the current entry in
532197ba5f4SPaul Zimmerman  * the frame descriptor array are set based on the transfer state and the input
533197ba5f4SPaul Zimmerman  * halt_status. Completes the Isochronous URB if all the URB frames have been
534197ba5f4SPaul Zimmerman  * completed.
535197ba5f4SPaul Zimmerman  *
536197ba5f4SPaul Zimmerman  * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
537197ba5f4SPaul Zimmerman  * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
538197ba5f4SPaul Zimmerman  */
539197ba5f4SPaul Zimmerman static enum dwc2_halt_status dwc2_update_isoc_urb_state(
540197ba5f4SPaul Zimmerman 		struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
541197ba5f4SPaul Zimmerman 		int chnum, struct dwc2_qtd *qtd,
542197ba5f4SPaul Zimmerman 		enum dwc2_halt_status halt_status)
543197ba5f4SPaul Zimmerman {
544197ba5f4SPaul Zimmerman 	struct dwc2_hcd_iso_packet_desc *frame_desc;
545197ba5f4SPaul Zimmerman 	struct dwc2_hcd_urb *urb = qtd->urb;
546197ba5f4SPaul Zimmerman 
547197ba5f4SPaul Zimmerman 	if (!urb)
548197ba5f4SPaul Zimmerman 		return DWC2_HC_XFER_NO_HALT_STATUS;
549197ba5f4SPaul Zimmerman 
550197ba5f4SPaul Zimmerman 	frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
551197ba5f4SPaul Zimmerman 
552197ba5f4SPaul Zimmerman 	switch (halt_status) {
553197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_COMPLETE:
554197ba5f4SPaul Zimmerman 		frame_desc->status = 0;
555197ba5f4SPaul Zimmerman 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
556197ba5f4SPaul Zimmerman 					chan, chnum, qtd, halt_status, NULL);
557197ba5f4SPaul Zimmerman 
558197ba5f4SPaul Zimmerman 		/* Non DWORD-aligned buffer case handling */
559197ba5f4SPaul Zimmerman 		if (chan->align_buf && frame_desc->actual_length &&
560197ba5f4SPaul Zimmerman 		    chan->ep_is_in) {
561197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
562197ba5f4SPaul Zimmerman 				 __func__);
563197ba5f4SPaul Zimmerman 			dma_sync_single_for_cpu(hsotg->dev, urb->dma,
564197ba5f4SPaul Zimmerman 						urb->length, DMA_FROM_DEVICE);
565197ba5f4SPaul Zimmerman 			memcpy(urb->buf + frame_desc->offset +
566197ba5f4SPaul Zimmerman 			       qtd->isoc_split_offset, chan->qh->dw_align_buf,
567197ba5f4SPaul Zimmerman 			       frame_desc->actual_length);
568197ba5f4SPaul Zimmerman 			dma_sync_single_for_device(hsotg->dev, urb->dma,
569197ba5f4SPaul Zimmerman 						   urb->length,
570197ba5f4SPaul Zimmerman 						   DMA_FROM_DEVICE);
571197ba5f4SPaul Zimmerman 		}
572197ba5f4SPaul Zimmerman 		break;
573197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_FRAME_OVERRUN:
574197ba5f4SPaul Zimmerman 		urb->error_count++;
575197ba5f4SPaul Zimmerman 		if (chan->ep_is_in)
576197ba5f4SPaul Zimmerman 			frame_desc->status = -ENOSR;
577197ba5f4SPaul Zimmerman 		else
578197ba5f4SPaul Zimmerman 			frame_desc->status = -ECOMM;
579197ba5f4SPaul Zimmerman 		frame_desc->actual_length = 0;
580197ba5f4SPaul Zimmerman 		break;
581197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_BABBLE_ERR:
582197ba5f4SPaul Zimmerman 		urb->error_count++;
583197ba5f4SPaul Zimmerman 		frame_desc->status = -EOVERFLOW;
584197ba5f4SPaul Zimmerman 		/* Don't need to update actual_length in this case */
585197ba5f4SPaul Zimmerman 		break;
586197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_XACT_ERR:
587197ba5f4SPaul Zimmerman 		urb->error_count++;
588197ba5f4SPaul Zimmerman 		frame_desc->status = -EPROTO;
589197ba5f4SPaul Zimmerman 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
590197ba5f4SPaul Zimmerman 					chan, chnum, qtd, halt_status, NULL);
591197ba5f4SPaul Zimmerman 
592197ba5f4SPaul Zimmerman 		/* Non DWORD-aligned buffer case handling */
593197ba5f4SPaul Zimmerman 		if (chan->align_buf && frame_desc->actual_length &&
594197ba5f4SPaul Zimmerman 		    chan->ep_is_in) {
595197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n",
596197ba5f4SPaul Zimmerman 				 __func__);
597197ba5f4SPaul Zimmerman 			dma_sync_single_for_cpu(hsotg->dev, urb->dma,
598197ba5f4SPaul Zimmerman 						urb->length, DMA_FROM_DEVICE);
599197ba5f4SPaul Zimmerman 			memcpy(urb->buf + frame_desc->offset +
600197ba5f4SPaul Zimmerman 			       qtd->isoc_split_offset, chan->qh->dw_align_buf,
601197ba5f4SPaul Zimmerman 			       frame_desc->actual_length);
602197ba5f4SPaul Zimmerman 			dma_sync_single_for_device(hsotg->dev, urb->dma,
603197ba5f4SPaul Zimmerman 						   urb->length,
604197ba5f4SPaul Zimmerman 						   DMA_FROM_DEVICE);
605197ba5f4SPaul Zimmerman 		}
606197ba5f4SPaul Zimmerman 
607197ba5f4SPaul Zimmerman 		/* Skip whole frame */
608197ba5f4SPaul Zimmerman 		if (chan->qh->do_split &&
609197ba5f4SPaul Zimmerman 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
610197ba5f4SPaul Zimmerman 		    hsotg->core_params->dma_enable > 0) {
611197ba5f4SPaul Zimmerman 			qtd->complete_split = 0;
612197ba5f4SPaul Zimmerman 			qtd->isoc_split_offset = 0;
613197ba5f4SPaul Zimmerman 		}
614197ba5f4SPaul Zimmerman 
615197ba5f4SPaul Zimmerman 		break;
616197ba5f4SPaul Zimmerman 	default:
617197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
618197ba5f4SPaul Zimmerman 			halt_status);
619197ba5f4SPaul Zimmerman 		break;
620197ba5f4SPaul Zimmerman 	}
621197ba5f4SPaul Zimmerman 
622197ba5f4SPaul Zimmerman 	if (++qtd->isoc_frame_index == urb->packet_count) {
623197ba5f4SPaul Zimmerman 		/*
624197ba5f4SPaul Zimmerman 		 * urb->status is not used for isoc transfers. The individual
625197ba5f4SPaul Zimmerman 		 * frame_desc statuses are used instead.
626197ba5f4SPaul Zimmerman 		 */
627197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, 0);
628197ba5f4SPaul Zimmerman 		halt_status = DWC2_HC_XFER_URB_COMPLETE;
629197ba5f4SPaul Zimmerman 	} else {
630197ba5f4SPaul Zimmerman 		halt_status = DWC2_HC_XFER_COMPLETE;
631197ba5f4SPaul Zimmerman 	}
632197ba5f4SPaul Zimmerman 
633197ba5f4SPaul Zimmerman 	return halt_status;
634197ba5f4SPaul Zimmerman }
635197ba5f4SPaul Zimmerman 
636197ba5f4SPaul Zimmerman /*
637197ba5f4SPaul Zimmerman  * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
638197ba5f4SPaul Zimmerman  * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
639197ba5f4SPaul Zimmerman  * still linked to the QH, the QH is added to the end of the inactive
640197ba5f4SPaul Zimmerman  * non-periodic schedule. For periodic QHs, removes the QH from the periodic
641197ba5f4SPaul Zimmerman  * schedule if no more QTDs are linked to the QH.
642197ba5f4SPaul Zimmerman  */
643197ba5f4SPaul Zimmerman static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
644197ba5f4SPaul Zimmerman 			       int free_qtd)
645197ba5f4SPaul Zimmerman {
646197ba5f4SPaul Zimmerman 	int continue_split = 0;
647197ba5f4SPaul Zimmerman 	struct dwc2_qtd *qtd;
648197ba5f4SPaul Zimmerman 
649197ba5f4SPaul Zimmerman 	if (dbg_qh(qh))
650197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "  %s(%p,%p,%d)\n", __func__,
651197ba5f4SPaul Zimmerman 			 hsotg, qh, free_qtd);
652197ba5f4SPaul Zimmerman 
653197ba5f4SPaul Zimmerman 	if (list_empty(&qh->qtd_list)) {
654197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev, "## QTD list empty ##\n");
655197ba5f4SPaul Zimmerman 		goto no_qtd;
656197ba5f4SPaul Zimmerman 	}
657197ba5f4SPaul Zimmerman 
658197ba5f4SPaul Zimmerman 	qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
659197ba5f4SPaul Zimmerman 
660197ba5f4SPaul Zimmerman 	if (qtd->complete_split)
661197ba5f4SPaul Zimmerman 		continue_split = 1;
662197ba5f4SPaul Zimmerman 	else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
663197ba5f4SPaul Zimmerman 		 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
664197ba5f4SPaul Zimmerman 		continue_split = 1;
665197ba5f4SPaul Zimmerman 
666197ba5f4SPaul Zimmerman 	if (free_qtd) {
667197ba5f4SPaul Zimmerman 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
668197ba5f4SPaul Zimmerman 		continue_split = 0;
669197ba5f4SPaul Zimmerman 	}
670197ba5f4SPaul Zimmerman 
671197ba5f4SPaul Zimmerman no_qtd:
672197ba5f4SPaul Zimmerman 	if (qh->channel)
673197ba5f4SPaul Zimmerman 		qh->channel->align_buf = 0;
674197ba5f4SPaul Zimmerman 	qh->channel = NULL;
675197ba5f4SPaul Zimmerman 	dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
676197ba5f4SPaul Zimmerman }
677197ba5f4SPaul Zimmerman 
678197ba5f4SPaul Zimmerman /**
679197ba5f4SPaul Zimmerman  * dwc2_release_channel() - Releases a host channel for use by other transfers
680197ba5f4SPaul Zimmerman  *
681197ba5f4SPaul Zimmerman  * @hsotg:       The HCD state structure
682197ba5f4SPaul Zimmerman  * @chan:        The host channel to release
683197ba5f4SPaul Zimmerman  * @qtd:         The QTD associated with the host channel. This QTD may be
684197ba5f4SPaul Zimmerman  *               freed if the transfer is complete or an error has occurred.
685197ba5f4SPaul Zimmerman  * @halt_status: Reason the channel is being released. This status
686197ba5f4SPaul Zimmerman  *               determines the actions taken by this function.
687197ba5f4SPaul Zimmerman  *
688197ba5f4SPaul Zimmerman  * Also attempts to select and queue more transactions since at least one host
689197ba5f4SPaul Zimmerman  * channel is available.
690197ba5f4SPaul Zimmerman  */
691197ba5f4SPaul Zimmerman static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
692197ba5f4SPaul Zimmerman 				 struct dwc2_host_chan *chan,
693197ba5f4SPaul Zimmerman 				 struct dwc2_qtd *qtd,
694197ba5f4SPaul Zimmerman 				 enum dwc2_halt_status halt_status)
695197ba5f4SPaul Zimmerman {
696197ba5f4SPaul Zimmerman 	enum dwc2_transaction_type tr_type;
697197ba5f4SPaul Zimmerman 	u32 haintmsk;
698197ba5f4SPaul Zimmerman 	int free_qtd = 0;
699197ba5f4SPaul Zimmerman 
700197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
701197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "  %s: channel %d, halt_status %d\n",
702197ba5f4SPaul Zimmerman 			 __func__, chan->hc_num, halt_status);
703197ba5f4SPaul Zimmerman 
704197ba5f4SPaul Zimmerman 	switch (halt_status) {
705197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_URB_COMPLETE:
706197ba5f4SPaul Zimmerman 		free_qtd = 1;
707197ba5f4SPaul Zimmerman 		break;
708197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_AHB_ERR:
709197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_STALL:
710197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_BABBLE_ERR:
711197ba5f4SPaul Zimmerman 		free_qtd = 1;
712197ba5f4SPaul Zimmerman 		break;
713197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_XACT_ERR:
714197ba5f4SPaul Zimmerman 		if (qtd && qtd->error_count >= 3) {
715197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
716197ba5f4SPaul Zimmerman 				 "  Complete URB with transaction error\n");
717197ba5f4SPaul Zimmerman 			free_qtd = 1;
718197ba5f4SPaul Zimmerman 			dwc2_host_complete(hsotg, qtd, -EPROTO);
719197ba5f4SPaul Zimmerman 		}
720197ba5f4SPaul Zimmerman 		break;
721197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_URB_DEQUEUE:
722197ba5f4SPaul Zimmerman 		/*
723197ba5f4SPaul Zimmerman 		 * The QTD has already been removed and the QH has been
724197ba5f4SPaul Zimmerman 		 * deactivated. Don't want to do anything except release the
725197ba5f4SPaul Zimmerman 		 * host channel and try to queue more transfers.
726197ba5f4SPaul Zimmerman 		 */
727197ba5f4SPaul Zimmerman 		goto cleanup;
728197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
729197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "  Complete URB with I/O error\n");
730197ba5f4SPaul Zimmerman 		free_qtd = 1;
731197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, -EIO);
732197ba5f4SPaul Zimmerman 		break;
733197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_NO_HALT_STATUS:
734197ba5f4SPaul Zimmerman 	default:
735197ba5f4SPaul Zimmerman 		break;
736197ba5f4SPaul Zimmerman 	}
737197ba5f4SPaul Zimmerman 
738197ba5f4SPaul Zimmerman 	dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
739197ba5f4SPaul Zimmerman 
740197ba5f4SPaul Zimmerman cleanup:
741197ba5f4SPaul Zimmerman 	/*
742197ba5f4SPaul Zimmerman 	 * Release the host channel for use by other transfers. The cleanup
743197ba5f4SPaul Zimmerman 	 * function clears the channel interrupt enables and conditions, so
744197ba5f4SPaul Zimmerman 	 * there's no need to clear the Channel Halted interrupt separately.
745197ba5f4SPaul Zimmerman 	 */
746197ba5f4SPaul Zimmerman 	if (!list_empty(&chan->hc_list_entry))
747197ba5f4SPaul Zimmerman 		list_del(&chan->hc_list_entry);
748197ba5f4SPaul Zimmerman 	dwc2_hc_cleanup(hsotg, chan);
749197ba5f4SPaul Zimmerman 	list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
750197ba5f4SPaul Zimmerman 
751197ba5f4SPaul Zimmerman 	if (hsotg->core_params->uframe_sched > 0) {
752197ba5f4SPaul Zimmerman 		hsotg->available_host_channels++;
753197ba5f4SPaul Zimmerman 	} else {
754197ba5f4SPaul Zimmerman 		switch (chan->ep_type) {
755197ba5f4SPaul Zimmerman 		case USB_ENDPOINT_XFER_CONTROL:
756197ba5f4SPaul Zimmerman 		case USB_ENDPOINT_XFER_BULK:
757197ba5f4SPaul Zimmerman 			hsotg->non_periodic_channels--;
758197ba5f4SPaul Zimmerman 			break;
759197ba5f4SPaul Zimmerman 		default:
760197ba5f4SPaul Zimmerman 			/*
761197ba5f4SPaul Zimmerman 			 * Don't release reservations for periodic channels
762197ba5f4SPaul Zimmerman 			 * here. That's done when a periodic transfer is
763197ba5f4SPaul Zimmerman 			 * descheduled (i.e. when the QH is removed from the
764197ba5f4SPaul Zimmerman 			 * periodic schedule).
765197ba5f4SPaul Zimmerman 			 */
766197ba5f4SPaul Zimmerman 			break;
767197ba5f4SPaul Zimmerman 		}
768197ba5f4SPaul Zimmerman 	}
769197ba5f4SPaul Zimmerman 
770197ba5f4SPaul Zimmerman 	haintmsk = readl(hsotg->regs + HAINTMSK);
771197ba5f4SPaul Zimmerman 	haintmsk &= ~(1 << chan->hc_num);
772197ba5f4SPaul Zimmerman 	writel(haintmsk, hsotg->regs + HAINTMSK);
773197ba5f4SPaul Zimmerman 
774197ba5f4SPaul Zimmerman 	/* Try to queue more transfers now that there's a free channel */
775197ba5f4SPaul Zimmerman 	tr_type = dwc2_hcd_select_transactions(hsotg);
776197ba5f4SPaul Zimmerman 	if (tr_type != DWC2_TRANSACTION_NONE)
777197ba5f4SPaul Zimmerman 		dwc2_hcd_queue_transactions(hsotg, tr_type);
778197ba5f4SPaul Zimmerman }
779197ba5f4SPaul Zimmerman 
780197ba5f4SPaul Zimmerman /*
781197ba5f4SPaul Zimmerman  * Halts a host channel. If the channel cannot be halted immediately because
782197ba5f4SPaul Zimmerman  * the request queue is full, this function ensures that the FIFO empty
783197ba5f4SPaul Zimmerman  * interrupt for the appropriate queue is enabled so that the halt request can
784197ba5f4SPaul Zimmerman  * be queued when there is space in the request queue.
785197ba5f4SPaul Zimmerman  *
786197ba5f4SPaul Zimmerman  * This function may also be called in DMA mode. In that case, the channel is
787197ba5f4SPaul Zimmerman  * simply released since the core always halts the channel automatically in
788197ba5f4SPaul Zimmerman  * DMA mode.
789197ba5f4SPaul Zimmerman  */
790197ba5f4SPaul Zimmerman static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
791197ba5f4SPaul Zimmerman 			      struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
792197ba5f4SPaul Zimmerman 			      enum dwc2_halt_status halt_status)
793197ba5f4SPaul Zimmerman {
794197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
795197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
796197ba5f4SPaul Zimmerman 
797197ba5f4SPaul Zimmerman 	if (hsotg->core_params->dma_enable > 0) {
798197ba5f4SPaul Zimmerman 		if (dbg_hc(chan))
799197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "DMA enabled\n");
800197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
801197ba5f4SPaul Zimmerman 		return;
802197ba5f4SPaul Zimmerman 	}
803197ba5f4SPaul Zimmerman 
804197ba5f4SPaul Zimmerman 	/* Slave mode processing */
805197ba5f4SPaul Zimmerman 	dwc2_hc_halt(hsotg, chan, halt_status);
806197ba5f4SPaul Zimmerman 
807197ba5f4SPaul Zimmerman 	if (chan->halt_on_queue) {
808197ba5f4SPaul Zimmerman 		u32 gintmsk;
809197ba5f4SPaul Zimmerman 
810197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "Halt on queue\n");
811197ba5f4SPaul Zimmerman 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
812197ba5f4SPaul Zimmerman 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
813197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "control/bulk\n");
814197ba5f4SPaul Zimmerman 			/*
815197ba5f4SPaul Zimmerman 			 * Make sure the Non-periodic Tx FIFO empty interrupt
816197ba5f4SPaul Zimmerman 			 * is enabled so that the non-periodic schedule will
817197ba5f4SPaul Zimmerman 			 * be processed
818197ba5f4SPaul Zimmerman 			 */
819197ba5f4SPaul Zimmerman 			gintmsk = readl(hsotg->regs + GINTMSK);
820197ba5f4SPaul Zimmerman 			gintmsk |= GINTSTS_NPTXFEMP;
821197ba5f4SPaul Zimmerman 			writel(gintmsk, hsotg->regs + GINTMSK);
822197ba5f4SPaul Zimmerman 		} else {
823197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "isoc/intr\n");
824197ba5f4SPaul Zimmerman 			/*
825197ba5f4SPaul Zimmerman 			 * Move the QH from the periodic queued schedule to
826197ba5f4SPaul Zimmerman 			 * the periodic assigned schedule. This allows the
827197ba5f4SPaul Zimmerman 			 * halt to be queued when the periodic schedule is
828197ba5f4SPaul Zimmerman 			 * processed.
829197ba5f4SPaul Zimmerman 			 */
830197ba5f4SPaul Zimmerman 			list_move(&chan->qh->qh_list_entry,
831197ba5f4SPaul Zimmerman 				  &hsotg->periodic_sched_assigned);
832197ba5f4SPaul Zimmerman 
833197ba5f4SPaul Zimmerman 			/*
834197ba5f4SPaul Zimmerman 			 * Make sure the Periodic Tx FIFO Empty interrupt is
835197ba5f4SPaul Zimmerman 			 * enabled so that the periodic schedule will be
836197ba5f4SPaul Zimmerman 			 * processed
837197ba5f4SPaul Zimmerman 			 */
838197ba5f4SPaul Zimmerman 			gintmsk = readl(hsotg->regs + GINTMSK);
839197ba5f4SPaul Zimmerman 			gintmsk |= GINTSTS_PTXFEMP;
840197ba5f4SPaul Zimmerman 			writel(gintmsk, hsotg->regs + GINTMSK);
841197ba5f4SPaul Zimmerman 		}
842197ba5f4SPaul Zimmerman 	}
843197ba5f4SPaul Zimmerman }
844197ba5f4SPaul Zimmerman 
845197ba5f4SPaul Zimmerman /*
846197ba5f4SPaul Zimmerman  * Performs common cleanup for non-periodic transfers after a Transfer
847197ba5f4SPaul Zimmerman  * Complete interrupt. This function should be called after any endpoint type
848197ba5f4SPaul Zimmerman  * specific handling is finished to release the host channel.
849197ba5f4SPaul Zimmerman  */
850197ba5f4SPaul Zimmerman static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
851197ba5f4SPaul Zimmerman 					    struct dwc2_host_chan *chan,
852197ba5f4SPaul Zimmerman 					    int chnum, struct dwc2_qtd *qtd,
853197ba5f4SPaul Zimmerman 					    enum dwc2_halt_status halt_status)
854197ba5f4SPaul Zimmerman {
855197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
856197ba5f4SPaul Zimmerman 
857197ba5f4SPaul Zimmerman 	qtd->error_count = 0;
858197ba5f4SPaul Zimmerman 
859197ba5f4SPaul Zimmerman 	if (chan->hcint & HCINTMSK_NYET) {
860197ba5f4SPaul Zimmerman 		/*
861197ba5f4SPaul Zimmerman 		 * Got a NYET on the last transaction of the transfer. This
862197ba5f4SPaul Zimmerman 		 * means that the endpoint should be in the PING state at the
863197ba5f4SPaul Zimmerman 		 * beginning of the next transfer.
864197ba5f4SPaul Zimmerman 		 */
865197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "got NYET\n");
866197ba5f4SPaul Zimmerman 		chan->qh->ping_state = 1;
867197ba5f4SPaul Zimmerman 	}
868197ba5f4SPaul Zimmerman 
869197ba5f4SPaul Zimmerman 	/*
870197ba5f4SPaul Zimmerman 	 * Always halt and release the host channel to make it available for
871197ba5f4SPaul Zimmerman 	 * more transfers. There may still be more phases for a control
872197ba5f4SPaul Zimmerman 	 * transfer or more data packets for a bulk transfer at this point,
873197ba5f4SPaul Zimmerman 	 * but the host channel is still halted. A channel will be reassigned
874197ba5f4SPaul Zimmerman 	 * to the transfer when the non-periodic schedule is processed after
875197ba5f4SPaul Zimmerman 	 * the channel is released. This allows transactions to be queued
876197ba5f4SPaul Zimmerman 	 * properly via dwc2_hcd_queue_transactions, which also enables the
877197ba5f4SPaul Zimmerman 	 * Tx FIFO Empty interrupt if necessary.
878197ba5f4SPaul Zimmerman 	 */
879197ba5f4SPaul Zimmerman 	if (chan->ep_is_in) {
880197ba5f4SPaul Zimmerman 		/*
881197ba5f4SPaul Zimmerman 		 * IN transfers in Slave mode require an explicit disable to
882197ba5f4SPaul Zimmerman 		 * halt the channel. (In DMA mode, this call simply releases
883197ba5f4SPaul Zimmerman 		 * the channel.)
884197ba5f4SPaul Zimmerman 		 */
885197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
886197ba5f4SPaul Zimmerman 	} else {
887197ba5f4SPaul Zimmerman 		/*
888197ba5f4SPaul Zimmerman 		 * The channel is automatically disabled by the core for OUT
889197ba5f4SPaul Zimmerman 		 * transfers in Slave mode
890197ba5f4SPaul Zimmerman 		 */
891197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
892197ba5f4SPaul Zimmerman 	}
893197ba5f4SPaul Zimmerman }
894197ba5f4SPaul Zimmerman 
895197ba5f4SPaul Zimmerman /*
896197ba5f4SPaul Zimmerman  * Performs common cleanup for periodic transfers after a Transfer Complete
897197ba5f4SPaul Zimmerman  * interrupt. This function should be called after any endpoint type specific
898197ba5f4SPaul Zimmerman  * handling is finished to release the host channel.
899197ba5f4SPaul Zimmerman  */
900197ba5f4SPaul Zimmerman static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
901197ba5f4SPaul Zimmerman 					struct dwc2_host_chan *chan, int chnum,
902197ba5f4SPaul Zimmerman 					struct dwc2_qtd *qtd,
903197ba5f4SPaul Zimmerman 					enum dwc2_halt_status halt_status)
904197ba5f4SPaul Zimmerman {
905197ba5f4SPaul Zimmerman 	u32 hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
906197ba5f4SPaul Zimmerman 
907197ba5f4SPaul Zimmerman 	qtd->error_count = 0;
908197ba5f4SPaul Zimmerman 
909197ba5f4SPaul Zimmerman 	if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
910197ba5f4SPaul Zimmerman 		/* Core halts channel in these cases */
911197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
912197ba5f4SPaul Zimmerman 	else
913197ba5f4SPaul Zimmerman 		/* Flush any outstanding requests from the Tx queue */
914197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
915197ba5f4SPaul Zimmerman }
916197ba5f4SPaul Zimmerman 
917197ba5f4SPaul Zimmerman static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
918197ba5f4SPaul Zimmerman 				       struct dwc2_host_chan *chan, int chnum,
919197ba5f4SPaul Zimmerman 				       struct dwc2_qtd *qtd)
920197ba5f4SPaul Zimmerman {
921197ba5f4SPaul Zimmerman 	struct dwc2_hcd_iso_packet_desc *frame_desc;
922197ba5f4SPaul Zimmerman 	u32 len;
923197ba5f4SPaul Zimmerman 
924197ba5f4SPaul Zimmerman 	if (!qtd->urb)
925197ba5f4SPaul Zimmerman 		return 0;
926197ba5f4SPaul Zimmerman 
927197ba5f4SPaul Zimmerman 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
928197ba5f4SPaul Zimmerman 	len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
929197ba5f4SPaul Zimmerman 					  DWC2_HC_XFER_COMPLETE, NULL);
930197ba5f4SPaul Zimmerman 	if (!len) {
931197ba5f4SPaul Zimmerman 		qtd->complete_split = 0;
932197ba5f4SPaul Zimmerman 		qtd->isoc_split_offset = 0;
933197ba5f4SPaul Zimmerman 		return 0;
934197ba5f4SPaul Zimmerman 	}
935197ba5f4SPaul Zimmerman 
936197ba5f4SPaul Zimmerman 	frame_desc->actual_length += len;
937197ba5f4SPaul Zimmerman 
938197ba5f4SPaul Zimmerman 	if (chan->align_buf) {
939197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
940197ba5f4SPaul Zimmerman 		dma_sync_single_for_cpu(hsotg->dev, qtd->urb->dma,
941197ba5f4SPaul Zimmerman 					qtd->urb->length, DMA_FROM_DEVICE);
942197ba5f4SPaul Zimmerman 		memcpy(qtd->urb->buf + frame_desc->offset +
943197ba5f4SPaul Zimmerman 		       qtd->isoc_split_offset, chan->qh->dw_align_buf, len);
944197ba5f4SPaul Zimmerman 		dma_sync_single_for_device(hsotg->dev, qtd->urb->dma,
945197ba5f4SPaul Zimmerman 					   qtd->urb->length, DMA_FROM_DEVICE);
946197ba5f4SPaul Zimmerman 	}
947197ba5f4SPaul Zimmerman 
948197ba5f4SPaul Zimmerman 	qtd->isoc_split_offset += len;
949197ba5f4SPaul Zimmerman 
950197ba5f4SPaul Zimmerman 	if (frame_desc->actual_length >= frame_desc->length) {
951197ba5f4SPaul Zimmerman 		frame_desc->status = 0;
952197ba5f4SPaul Zimmerman 		qtd->isoc_frame_index++;
953197ba5f4SPaul Zimmerman 		qtd->complete_split = 0;
954197ba5f4SPaul Zimmerman 		qtd->isoc_split_offset = 0;
955197ba5f4SPaul Zimmerman 	}
956197ba5f4SPaul Zimmerman 
957197ba5f4SPaul Zimmerman 	if (qtd->isoc_frame_index == qtd->urb->packet_count) {
958197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, 0);
959197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd,
960197ba5f4SPaul Zimmerman 				     DWC2_HC_XFER_URB_COMPLETE);
961197ba5f4SPaul Zimmerman 	} else {
962197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd,
963197ba5f4SPaul Zimmerman 				     DWC2_HC_XFER_NO_HALT_STATUS);
964197ba5f4SPaul Zimmerman 	}
965197ba5f4SPaul Zimmerman 
966197ba5f4SPaul Zimmerman 	return 1;	/* Indicates that channel released */
967197ba5f4SPaul Zimmerman }
968197ba5f4SPaul Zimmerman 
969197ba5f4SPaul Zimmerman /*
970197ba5f4SPaul Zimmerman  * Handles a host channel Transfer Complete interrupt. This handler may be
971197ba5f4SPaul Zimmerman  * called in either DMA mode or Slave mode.
972197ba5f4SPaul Zimmerman  */
973197ba5f4SPaul Zimmerman static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
974197ba5f4SPaul Zimmerman 				  struct dwc2_host_chan *chan, int chnum,
975197ba5f4SPaul Zimmerman 				  struct dwc2_qtd *qtd)
976197ba5f4SPaul Zimmerman {
977197ba5f4SPaul Zimmerman 	struct dwc2_hcd_urb *urb = qtd->urb;
978197ba5f4SPaul Zimmerman 	enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
9792b54fa6bSPaul Zimmerman 	int pipe_type;
980197ba5f4SPaul Zimmerman 	int urb_xfer_done;
981197ba5f4SPaul Zimmerman 
982197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
983197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
984197ba5f4SPaul Zimmerman 			 "--Host Channel %d Interrupt: Transfer Complete--\n",
985197ba5f4SPaul Zimmerman 			 chnum);
986197ba5f4SPaul Zimmerman 
9872b54fa6bSPaul Zimmerman 	if (!urb)
9882b54fa6bSPaul Zimmerman 		goto handle_xfercomp_done;
9892b54fa6bSPaul Zimmerman 
9902b54fa6bSPaul Zimmerman 	pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
9912b54fa6bSPaul Zimmerman 
992197ba5f4SPaul Zimmerman 	if (hsotg->core_params->dma_desc_enable > 0) {
993197ba5f4SPaul Zimmerman 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
994197ba5f4SPaul Zimmerman 		if (pipe_type == USB_ENDPOINT_XFER_ISOC)
995197ba5f4SPaul Zimmerman 			/* Do not disable the interrupt, just clear it */
996197ba5f4SPaul Zimmerman 			return;
997197ba5f4SPaul Zimmerman 		goto handle_xfercomp_done;
998197ba5f4SPaul Zimmerman 	}
999197ba5f4SPaul Zimmerman 
1000197ba5f4SPaul Zimmerman 	/* Handle xfer complete on CSPLIT */
1001197ba5f4SPaul Zimmerman 	if (chan->qh->do_split) {
1002197ba5f4SPaul Zimmerman 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
1003197ba5f4SPaul Zimmerman 		    hsotg->core_params->dma_enable > 0) {
1004197ba5f4SPaul Zimmerman 			if (qtd->complete_split &&
1005197ba5f4SPaul Zimmerman 			    dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
1006197ba5f4SPaul Zimmerman 							qtd))
1007197ba5f4SPaul Zimmerman 				goto handle_xfercomp_done;
1008197ba5f4SPaul Zimmerman 		} else {
1009197ba5f4SPaul Zimmerman 			qtd->complete_split = 0;
1010197ba5f4SPaul Zimmerman 		}
1011197ba5f4SPaul Zimmerman 	}
1012197ba5f4SPaul Zimmerman 
1013197ba5f4SPaul Zimmerman 	/* Update the QTD and URB states */
1014197ba5f4SPaul Zimmerman 	switch (pipe_type) {
1015197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_CONTROL:
1016197ba5f4SPaul Zimmerman 		switch (qtd->control_phase) {
1017197ba5f4SPaul Zimmerman 		case DWC2_CONTROL_SETUP:
1018197ba5f4SPaul Zimmerman 			if (urb->length > 0)
1019197ba5f4SPaul Zimmerman 				qtd->control_phase = DWC2_CONTROL_DATA;
1020197ba5f4SPaul Zimmerman 			else
1021197ba5f4SPaul Zimmerman 				qtd->control_phase = DWC2_CONTROL_STATUS;
1022197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
1023197ba5f4SPaul Zimmerman 				 "  Control setup transaction done\n");
1024197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_COMPLETE;
1025197ba5f4SPaul Zimmerman 			break;
1026197ba5f4SPaul Zimmerman 		case DWC2_CONTROL_DATA:
1027197ba5f4SPaul Zimmerman 			urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1028197ba5f4SPaul Zimmerman 							      chnum, urb, qtd);
1029197ba5f4SPaul Zimmerman 			if (urb_xfer_done) {
1030197ba5f4SPaul Zimmerman 				qtd->control_phase = DWC2_CONTROL_STATUS;
1031197ba5f4SPaul Zimmerman 				dev_vdbg(hsotg->dev,
1032197ba5f4SPaul Zimmerman 					 "  Control data transfer done\n");
1033197ba5f4SPaul Zimmerman 			} else {
1034197ba5f4SPaul Zimmerman 				dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1035197ba5f4SPaul Zimmerman 							  qtd);
1036197ba5f4SPaul Zimmerman 			}
1037197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_COMPLETE;
1038197ba5f4SPaul Zimmerman 			break;
1039197ba5f4SPaul Zimmerman 		case DWC2_CONTROL_STATUS:
1040197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "  Control transfer complete\n");
1041197ba5f4SPaul Zimmerman 			if (urb->status == -EINPROGRESS)
1042197ba5f4SPaul Zimmerman 				urb->status = 0;
1043197ba5f4SPaul Zimmerman 			dwc2_host_complete(hsotg, qtd, urb->status);
1044197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1045197ba5f4SPaul Zimmerman 			break;
1046197ba5f4SPaul Zimmerman 		}
1047197ba5f4SPaul Zimmerman 
1048197ba5f4SPaul Zimmerman 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1049197ba5f4SPaul Zimmerman 						halt_status);
1050197ba5f4SPaul Zimmerman 		break;
1051197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_BULK:
1052197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "  Bulk transfer complete\n");
1053197ba5f4SPaul Zimmerman 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1054197ba5f4SPaul Zimmerman 						      qtd);
1055197ba5f4SPaul Zimmerman 		if (urb_xfer_done) {
1056197ba5f4SPaul Zimmerman 			dwc2_host_complete(hsotg, qtd, urb->status);
1057197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1058197ba5f4SPaul Zimmerman 		} else {
1059197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_COMPLETE;
1060197ba5f4SPaul Zimmerman 		}
1061197ba5f4SPaul Zimmerman 
1062197ba5f4SPaul Zimmerman 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1063197ba5f4SPaul Zimmerman 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1064197ba5f4SPaul Zimmerman 						halt_status);
1065197ba5f4SPaul Zimmerman 		break;
1066197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_INT:
1067197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "  Interrupt transfer complete\n");
1068197ba5f4SPaul Zimmerman 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1069197ba5f4SPaul Zimmerman 						      qtd);
1070197ba5f4SPaul Zimmerman 
1071197ba5f4SPaul Zimmerman 		/*
1072197ba5f4SPaul Zimmerman 		 * Interrupt URB is done on the first transfer complete
1073197ba5f4SPaul Zimmerman 		 * interrupt
1074197ba5f4SPaul Zimmerman 		 */
1075197ba5f4SPaul Zimmerman 		if (urb_xfer_done) {
1076197ba5f4SPaul Zimmerman 			dwc2_host_complete(hsotg, qtd, urb->status);
1077197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1078197ba5f4SPaul Zimmerman 		} else {
1079197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_COMPLETE;
1080197ba5f4SPaul Zimmerman 		}
1081197ba5f4SPaul Zimmerman 
1082197ba5f4SPaul Zimmerman 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1083197ba5f4SPaul Zimmerman 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1084197ba5f4SPaul Zimmerman 					    halt_status);
1085197ba5f4SPaul Zimmerman 		break;
1086197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_ISOC:
1087197ba5f4SPaul Zimmerman 		if (dbg_perio())
1088197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "  Isochronous transfer complete\n");
1089197ba5f4SPaul Zimmerman 		if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1090197ba5f4SPaul Zimmerman 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1091197ba5f4SPaul Zimmerman 					chnum, qtd, DWC2_HC_XFER_COMPLETE);
1092197ba5f4SPaul Zimmerman 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1093197ba5f4SPaul Zimmerman 					    halt_status);
1094197ba5f4SPaul Zimmerman 		break;
1095197ba5f4SPaul Zimmerman 	}
1096197ba5f4SPaul Zimmerman 
1097197ba5f4SPaul Zimmerman handle_xfercomp_done:
1098197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1099197ba5f4SPaul Zimmerman }
1100197ba5f4SPaul Zimmerman 
1101197ba5f4SPaul Zimmerman /*
1102197ba5f4SPaul Zimmerman  * Handles a host channel STALL interrupt. This handler may be called in
1103197ba5f4SPaul Zimmerman  * either DMA mode or Slave mode.
1104197ba5f4SPaul Zimmerman  */
1105197ba5f4SPaul Zimmerman static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1106197ba5f4SPaul Zimmerman 			       struct dwc2_host_chan *chan, int chnum,
1107197ba5f4SPaul Zimmerman 			       struct dwc2_qtd *qtd)
1108197ba5f4SPaul Zimmerman {
1109197ba5f4SPaul Zimmerman 	struct dwc2_hcd_urb *urb = qtd->urb;
11102b54fa6bSPaul Zimmerman 	int pipe_type;
1111197ba5f4SPaul Zimmerman 
1112197ba5f4SPaul Zimmerman 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1113197ba5f4SPaul Zimmerman 		chnum);
1114197ba5f4SPaul Zimmerman 
1115197ba5f4SPaul Zimmerman 	if (hsotg->core_params->dma_desc_enable > 0) {
1116197ba5f4SPaul Zimmerman 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1117197ba5f4SPaul Zimmerman 					    DWC2_HC_XFER_STALL);
1118197ba5f4SPaul Zimmerman 		goto handle_stall_done;
1119197ba5f4SPaul Zimmerman 	}
1120197ba5f4SPaul Zimmerman 
1121197ba5f4SPaul Zimmerman 	if (!urb)
1122197ba5f4SPaul Zimmerman 		goto handle_stall_halt;
1123197ba5f4SPaul Zimmerman 
11242b54fa6bSPaul Zimmerman 	pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
11252b54fa6bSPaul Zimmerman 
1126197ba5f4SPaul Zimmerman 	if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1127197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1128197ba5f4SPaul Zimmerman 
1129197ba5f4SPaul Zimmerman 	if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1130197ba5f4SPaul Zimmerman 	    pipe_type == USB_ENDPOINT_XFER_INT) {
1131197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1132197ba5f4SPaul Zimmerman 		/*
1133197ba5f4SPaul Zimmerman 		 * USB protocol requires resetting the data toggle for bulk
1134197ba5f4SPaul Zimmerman 		 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1135197ba5f4SPaul Zimmerman 		 * setup command is issued to the endpoint. Anticipate the
1136197ba5f4SPaul Zimmerman 		 * CLEAR_FEATURE command since a STALL has occurred and reset
1137197ba5f4SPaul Zimmerman 		 * the data toggle now.
1138197ba5f4SPaul Zimmerman 		 */
1139197ba5f4SPaul Zimmerman 		chan->qh->data_toggle = 0;
1140197ba5f4SPaul Zimmerman 	}
1141197ba5f4SPaul Zimmerman 
1142197ba5f4SPaul Zimmerman handle_stall_halt:
1143197ba5f4SPaul Zimmerman 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1144197ba5f4SPaul Zimmerman 
1145197ba5f4SPaul Zimmerman handle_stall_done:
1146197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1147197ba5f4SPaul Zimmerman }
1148197ba5f4SPaul Zimmerman 
1149197ba5f4SPaul Zimmerman /*
1150197ba5f4SPaul Zimmerman  * Updates the state of the URB when a transfer has been stopped due to an
1151197ba5f4SPaul Zimmerman  * abnormal condition before the transfer completes. Modifies the
1152197ba5f4SPaul Zimmerman  * actual_length field of the URB to reflect the number of bytes that have
1153197ba5f4SPaul Zimmerman  * actually been transferred via the host channel.
1154197ba5f4SPaul Zimmerman  */
1155197ba5f4SPaul Zimmerman static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1156197ba5f4SPaul Zimmerman 				      struct dwc2_host_chan *chan, int chnum,
1157197ba5f4SPaul Zimmerman 				      struct dwc2_hcd_urb *urb,
1158197ba5f4SPaul Zimmerman 				      struct dwc2_qtd *qtd,
1159197ba5f4SPaul Zimmerman 				      enum dwc2_halt_status halt_status)
1160197ba5f4SPaul Zimmerman {
1161197ba5f4SPaul Zimmerman 	u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1162197ba5f4SPaul Zimmerman 						      qtd, halt_status, NULL);
1163197ba5f4SPaul Zimmerman 	u32 hctsiz;
1164197ba5f4SPaul Zimmerman 
1165197ba5f4SPaul Zimmerman 	if (urb->actual_length + xfer_length > urb->length) {
1166197ba5f4SPaul Zimmerman 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1167197ba5f4SPaul Zimmerman 		xfer_length = urb->length - urb->actual_length;
1168197ba5f4SPaul Zimmerman 	}
1169197ba5f4SPaul Zimmerman 
1170197ba5f4SPaul Zimmerman 	/* Non DWORD-aligned buffer case handling */
1171197ba5f4SPaul Zimmerman 	if (chan->align_buf && xfer_length && chan->ep_is_in) {
1172197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "%s(): non-aligned buffer\n", __func__);
1173197ba5f4SPaul Zimmerman 		dma_sync_single_for_cpu(hsotg->dev, urb->dma, urb->length,
1174197ba5f4SPaul Zimmerman 					DMA_FROM_DEVICE);
1175197ba5f4SPaul Zimmerman 		memcpy(urb->buf + urb->actual_length, chan->qh->dw_align_buf,
1176197ba5f4SPaul Zimmerman 		       xfer_length);
1177197ba5f4SPaul Zimmerman 		dma_sync_single_for_device(hsotg->dev, urb->dma, urb->length,
1178197ba5f4SPaul Zimmerman 					   DMA_FROM_DEVICE);
1179197ba5f4SPaul Zimmerman 	}
1180197ba5f4SPaul Zimmerman 
1181197ba5f4SPaul Zimmerman 	urb->actual_length += xfer_length;
1182197ba5f4SPaul Zimmerman 
1183197ba5f4SPaul Zimmerman 	hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
1184197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1185197ba5f4SPaul Zimmerman 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1186197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  chan->start_pkt_count %d\n",
1187197ba5f4SPaul Zimmerman 		 chan->start_pkt_count);
1188197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  hctsiz.pktcnt %d\n",
1189197ba5f4SPaul Zimmerman 		 (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
1190197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  chan->max_packet %d\n", chan->max_packet);
1191197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  bytes_transferred %d\n",
1192197ba5f4SPaul Zimmerman 		 xfer_length);
1193197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n",
1194197ba5f4SPaul Zimmerman 		 urb->actual_length);
1195197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n",
1196197ba5f4SPaul Zimmerman 		 urb->length);
1197197ba5f4SPaul Zimmerman }
1198197ba5f4SPaul Zimmerman 
1199197ba5f4SPaul Zimmerman /*
1200197ba5f4SPaul Zimmerman  * Handles a host channel NAK interrupt. This handler may be called in either
1201197ba5f4SPaul Zimmerman  * DMA mode or Slave mode.
1202197ba5f4SPaul Zimmerman  */
1203197ba5f4SPaul Zimmerman static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1204197ba5f4SPaul Zimmerman 			     struct dwc2_host_chan *chan, int chnum,
1205197ba5f4SPaul Zimmerman 			     struct dwc2_qtd *qtd)
1206197ba5f4SPaul Zimmerman {
1207197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1208197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1209197ba5f4SPaul Zimmerman 			 chnum);
1210197ba5f4SPaul Zimmerman 
1211197ba5f4SPaul Zimmerman 	/*
1212197ba5f4SPaul Zimmerman 	 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1213197ba5f4SPaul Zimmerman 	 * interrupt. Re-start the SSPLIT transfer.
1214197ba5f4SPaul Zimmerman 	 */
1215197ba5f4SPaul Zimmerman 	if (chan->do_split) {
1216197ba5f4SPaul Zimmerman 		if (chan->complete_split)
1217197ba5f4SPaul Zimmerman 			qtd->error_count = 0;
1218197ba5f4SPaul Zimmerman 		qtd->complete_split = 0;
1219197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1220197ba5f4SPaul Zimmerman 		goto handle_nak_done;
1221197ba5f4SPaul Zimmerman 	}
1222197ba5f4SPaul Zimmerman 
1223197ba5f4SPaul Zimmerman 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1224197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_CONTROL:
1225197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_BULK:
1226197ba5f4SPaul Zimmerman 		if (hsotg->core_params->dma_enable > 0 && chan->ep_is_in) {
1227197ba5f4SPaul Zimmerman 			/*
1228197ba5f4SPaul Zimmerman 			 * NAK interrupts are enabled on bulk/control IN
1229197ba5f4SPaul Zimmerman 			 * transfers in DMA mode for the sole purpose of
1230197ba5f4SPaul Zimmerman 			 * resetting the error count after a transaction error
1231197ba5f4SPaul Zimmerman 			 * occurs. The core will continue transferring data.
1232197ba5f4SPaul Zimmerman 			 */
1233197ba5f4SPaul Zimmerman 			qtd->error_count = 0;
1234197ba5f4SPaul Zimmerman 			break;
1235197ba5f4SPaul Zimmerman 		}
1236197ba5f4SPaul Zimmerman 
1237197ba5f4SPaul Zimmerman 		/*
1238197ba5f4SPaul Zimmerman 		 * NAK interrupts normally occur during OUT transfers in DMA
1239197ba5f4SPaul Zimmerman 		 * or Slave mode. For IN transfers, more requests will be
1240197ba5f4SPaul Zimmerman 		 * queued as request queue space is available.
1241197ba5f4SPaul Zimmerman 		 */
1242197ba5f4SPaul Zimmerman 		qtd->error_count = 0;
1243197ba5f4SPaul Zimmerman 
1244197ba5f4SPaul Zimmerman 		if (!chan->qh->ping_state) {
1245197ba5f4SPaul Zimmerman 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1246197ba5f4SPaul Zimmerman 						  qtd, DWC2_HC_XFER_NAK);
1247197ba5f4SPaul Zimmerman 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1248197ba5f4SPaul Zimmerman 
1249197ba5f4SPaul Zimmerman 			if (chan->speed == USB_SPEED_HIGH)
1250197ba5f4SPaul Zimmerman 				chan->qh->ping_state = 1;
1251197ba5f4SPaul Zimmerman 		}
1252197ba5f4SPaul Zimmerman 
1253197ba5f4SPaul Zimmerman 		/*
1254197ba5f4SPaul Zimmerman 		 * Halt the channel so the transfer can be re-started from
1255197ba5f4SPaul Zimmerman 		 * the appropriate point or the PING protocol will
1256197ba5f4SPaul Zimmerman 		 * start/continue
1257197ba5f4SPaul Zimmerman 		 */
1258197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1259197ba5f4SPaul Zimmerman 		break;
1260197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_INT:
1261197ba5f4SPaul Zimmerman 		qtd->error_count = 0;
1262197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1263197ba5f4SPaul Zimmerman 		break;
1264197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_ISOC:
1265197ba5f4SPaul Zimmerman 		/* Should never get called for isochronous transfers */
1266197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1267197ba5f4SPaul Zimmerman 		break;
1268197ba5f4SPaul Zimmerman 	}
1269197ba5f4SPaul Zimmerman 
1270197ba5f4SPaul Zimmerman handle_nak_done:
1271197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1272197ba5f4SPaul Zimmerman }
1273197ba5f4SPaul Zimmerman 
1274197ba5f4SPaul Zimmerman /*
1275197ba5f4SPaul Zimmerman  * Handles a host channel ACK interrupt. This interrupt is enabled when
1276197ba5f4SPaul Zimmerman  * performing the PING protocol in Slave mode, when errors occur during
1277197ba5f4SPaul Zimmerman  * either Slave mode or DMA mode, and during Start Split transactions.
1278197ba5f4SPaul Zimmerman  */
1279197ba5f4SPaul Zimmerman static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1280197ba5f4SPaul Zimmerman 			     struct dwc2_host_chan *chan, int chnum,
1281197ba5f4SPaul Zimmerman 			     struct dwc2_qtd *qtd)
1282197ba5f4SPaul Zimmerman {
1283197ba5f4SPaul Zimmerman 	struct dwc2_hcd_iso_packet_desc *frame_desc;
1284197ba5f4SPaul Zimmerman 
1285197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1286197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1287197ba5f4SPaul Zimmerman 			 chnum);
1288197ba5f4SPaul Zimmerman 
1289197ba5f4SPaul Zimmerman 	if (chan->do_split) {
1290197ba5f4SPaul Zimmerman 		/* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1291197ba5f4SPaul Zimmerman 		if (!chan->ep_is_in &&
1292197ba5f4SPaul Zimmerman 		    chan->data_pid_start != DWC2_HC_PID_SETUP)
1293197ba5f4SPaul Zimmerman 			qtd->ssplit_out_xfer_count = chan->xfer_len;
1294197ba5f4SPaul Zimmerman 
1295197ba5f4SPaul Zimmerman 		if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1296197ba5f4SPaul Zimmerman 			qtd->complete_split = 1;
1297197ba5f4SPaul Zimmerman 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1298197ba5f4SPaul Zimmerman 		} else {
1299197ba5f4SPaul Zimmerman 			/* ISOC OUT */
1300197ba5f4SPaul Zimmerman 			switch (chan->xact_pos) {
1301197ba5f4SPaul Zimmerman 			case DWC2_HCSPLT_XACTPOS_ALL:
1302197ba5f4SPaul Zimmerman 				break;
1303197ba5f4SPaul Zimmerman 			case DWC2_HCSPLT_XACTPOS_END:
1304197ba5f4SPaul Zimmerman 				qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1305197ba5f4SPaul Zimmerman 				qtd->isoc_split_offset = 0;
1306197ba5f4SPaul Zimmerman 				break;
1307197ba5f4SPaul Zimmerman 			case DWC2_HCSPLT_XACTPOS_BEGIN:
1308197ba5f4SPaul Zimmerman 			case DWC2_HCSPLT_XACTPOS_MID:
1309197ba5f4SPaul Zimmerman 				/*
1310197ba5f4SPaul Zimmerman 				 * For BEGIN or MID, calculate the length for
1311197ba5f4SPaul Zimmerman 				 * the next microframe to determine the correct
1312197ba5f4SPaul Zimmerman 				 * SSPLIT token, either MID or END
1313197ba5f4SPaul Zimmerman 				 */
1314197ba5f4SPaul Zimmerman 				frame_desc = &qtd->urb->iso_descs[
1315197ba5f4SPaul Zimmerman 						qtd->isoc_frame_index];
1316197ba5f4SPaul Zimmerman 				qtd->isoc_split_offset += 188;
1317197ba5f4SPaul Zimmerman 
1318197ba5f4SPaul Zimmerman 				if (frame_desc->length - qtd->isoc_split_offset
1319197ba5f4SPaul Zimmerman 							<= 188)
1320197ba5f4SPaul Zimmerman 					qtd->isoc_split_pos =
1321197ba5f4SPaul Zimmerman 							DWC2_HCSPLT_XACTPOS_END;
1322197ba5f4SPaul Zimmerman 				else
1323197ba5f4SPaul Zimmerman 					qtd->isoc_split_pos =
1324197ba5f4SPaul Zimmerman 							DWC2_HCSPLT_XACTPOS_MID;
1325197ba5f4SPaul Zimmerman 				break;
1326197ba5f4SPaul Zimmerman 			}
1327197ba5f4SPaul Zimmerman 		}
1328197ba5f4SPaul Zimmerman 	} else {
1329197ba5f4SPaul Zimmerman 		qtd->error_count = 0;
1330197ba5f4SPaul Zimmerman 
1331197ba5f4SPaul Zimmerman 		if (chan->qh->ping_state) {
1332197ba5f4SPaul Zimmerman 			chan->qh->ping_state = 0;
1333197ba5f4SPaul Zimmerman 			/*
1334197ba5f4SPaul Zimmerman 			 * Halt the channel so the transfer can be re-started
1335197ba5f4SPaul Zimmerman 			 * from the appropriate point. This only happens in
1336197ba5f4SPaul Zimmerman 			 * Slave mode. In DMA mode, the ping_state is cleared
1337197ba5f4SPaul Zimmerman 			 * when the transfer is started because the core
1338197ba5f4SPaul Zimmerman 			 * automatically executes the PING, then the transfer.
1339197ba5f4SPaul Zimmerman 			 */
1340197ba5f4SPaul Zimmerman 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1341197ba5f4SPaul Zimmerman 		}
1342197ba5f4SPaul Zimmerman 	}
1343197ba5f4SPaul Zimmerman 
1344197ba5f4SPaul Zimmerman 	/*
1345197ba5f4SPaul Zimmerman 	 * If the ACK occurred when _not_ in the PING state, let the channel
1346197ba5f4SPaul Zimmerman 	 * continue transferring data after clearing the error count
1347197ba5f4SPaul Zimmerman 	 */
1348197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1349197ba5f4SPaul Zimmerman }
1350197ba5f4SPaul Zimmerman 
1351197ba5f4SPaul Zimmerman /*
1352197ba5f4SPaul Zimmerman  * Handles a host channel NYET interrupt. This interrupt should only occur on
1353197ba5f4SPaul Zimmerman  * Bulk and Control OUT endpoints and for complete split transactions. If a
1354197ba5f4SPaul Zimmerman  * NYET occurs at the same time as a Transfer Complete interrupt, it is
1355197ba5f4SPaul Zimmerman  * handled in the xfercomp interrupt handler, not here. This handler may be
1356197ba5f4SPaul Zimmerman  * called in either DMA mode or Slave mode.
1357197ba5f4SPaul Zimmerman  */
1358197ba5f4SPaul Zimmerman static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1359197ba5f4SPaul Zimmerman 			      struct dwc2_host_chan *chan, int chnum,
1360197ba5f4SPaul Zimmerman 			      struct dwc2_qtd *qtd)
1361197ba5f4SPaul Zimmerman {
1362197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1363197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1364197ba5f4SPaul Zimmerman 			 chnum);
1365197ba5f4SPaul Zimmerman 
1366197ba5f4SPaul Zimmerman 	/*
1367197ba5f4SPaul Zimmerman 	 * NYET on CSPLIT
1368197ba5f4SPaul Zimmerman 	 * re-do the CSPLIT immediately on non-periodic
1369197ba5f4SPaul Zimmerman 	 */
1370197ba5f4SPaul Zimmerman 	if (chan->do_split && chan->complete_split) {
1371197ba5f4SPaul Zimmerman 		if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
1372197ba5f4SPaul Zimmerman 		    hsotg->core_params->dma_enable > 0) {
1373197ba5f4SPaul Zimmerman 			qtd->complete_split = 0;
1374197ba5f4SPaul Zimmerman 			qtd->isoc_split_offset = 0;
1375197ba5f4SPaul Zimmerman 			qtd->isoc_frame_index++;
1376197ba5f4SPaul Zimmerman 			if (qtd->urb &&
1377197ba5f4SPaul Zimmerman 			    qtd->isoc_frame_index == qtd->urb->packet_count) {
1378197ba5f4SPaul Zimmerman 				dwc2_host_complete(hsotg, qtd, 0);
1379197ba5f4SPaul Zimmerman 				dwc2_release_channel(hsotg, chan, qtd,
1380197ba5f4SPaul Zimmerman 						     DWC2_HC_XFER_URB_COMPLETE);
1381197ba5f4SPaul Zimmerman 			} else {
1382197ba5f4SPaul Zimmerman 				dwc2_release_channel(hsotg, chan, qtd,
1383197ba5f4SPaul Zimmerman 						DWC2_HC_XFER_NO_HALT_STATUS);
1384197ba5f4SPaul Zimmerman 			}
1385197ba5f4SPaul Zimmerman 			goto handle_nyet_done;
1386197ba5f4SPaul Zimmerman 		}
1387197ba5f4SPaul Zimmerman 
1388197ba5f4SPaul Zimmerman 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1389197ba5f4SPaul Zimmerman 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1390197ba5f4SPaul Zimmerman 			int frnum = dwc2_hcd_get_frame_number(hsotg);
1391197ba5f4SPaul Zimmerman 
1392197ba5f4SPaul Zimmerman 			if (dwc2_full_frame_num(frnum) !=
1393197ba5f4SPaul Zimmerman 			    dwc2_full_frame_num(chan->qh->sched_frame)) {
1394197ba5f4SPaul Zimmerman 				/*
1395197ba5f4SPaul Zimmerman 				 * No longer in the same full speed frame.
1396197ba5f4SPaul Zimmerman 				 * Treat this as a transaction error.
1397197ba5f4SPaul Zimmerman 				 */
1398197ba5f4SPaul Zimmerman #if 0
1399197ba5f4SPaul Zimmerman 				/*
1400197ba5f4SPaul Zimmerman 				 * Todo: Fix system performance so this can
1401197ba5f4SPaul Zimmerman 				 * be treated as an error. Right now complete
1402197ba5f4SPaul Zimmerman 				 * splits cannot be scheduled precisely enough
1403197ba5f4SPaul Zimmerman 				 * due to other system activity, so this error
1404197ba5f4SPaul Zimmerman 				 * occurs regularly in Slave mode.
1405197ba5f4SPaul Zimmerman 				 */
1406197ba5f4SPaul Zimmerman 				qtd->error_count++;
1407197ba5f4SPaul Zimmerman #endif
1408197ba5f4SPaul Zimmerman 				qtd->complete_split = 0;
1409197ba5f4SPaul Zimmerman 				dwc2_halt_channel(hsotg, chan, qtd,
1410197ba5f4SPaul Zimmerman 						  DWC2_HC_XFER_XACT_ERR);
1411197ba5f4SPaul Zimmerman 				/* Todo: add support for isoc release */
1412197ba5f4SPaul Zimmerman 				goto handle_nyet_done;
1413197ba5f4SPaul Zimmerman 			}
1414197ba5f4SPaul Zimmerman 		}
1415197ba5f4SPaul Zimmerman 
1416197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1417197ba5f4SPaul Zimmerman 		goto handle_nyet_done;
1418197ba5f4SPaul Zimmerman 	}
1419197ba5f4SPaul Zimmerman 
1420197ba5f4SPaul Zimmerman 	chan->qh->ping_state = 1;
1421197ba5f4SPaul Zimmerman 	qtd->error_count = 0;
1422197ba5f4SPaul Zimmerman 
1423197ba5f4SPaul Zimmerman 	dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1424197ba5f4SPaul Zimmerman 				  DWC2_HC_XFER_NYET);
1425197ba5f4SPaul Zimmerman 	dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1426197ba5f4SPaul Zimmerman 
1427197ba5f4SPaul Zimmerman 	/*
1428197ba5f4SPaul Zimmerman 	 * Halt the channel and re-start the transfer so the PING protocol
1429197ba5f4SPaul Zimmerman 	 * will start
1430197ba5f4SPaul Zimmerman 	 */
1431197ba5f4SPaul Zimmerman 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1432197ba5f4SPaul Zimmerman 
1433197ba5f4SPaul Zimmerman handle_nyet_done:
1434197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1435197ba5f4SPaul Zimmerman }
1436197ba5f4SPaul Zimmerman 
1437197ba5f4SPaul Zimmerman /*
1438197ba5f4SPaul Zimmerman  * Handles a host channel babble interrupt. This handler may be called in
1439197ba5f4SPaul Zimmerman  * either DMA mode or Slave mode.
1440197ba5f4SPaul Zimmerman  */
1441197ba5f4SPaul Zimmerman static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1442197ba5f4SPaul Zimmerman 				struct dwc2_host_chan *chan, int chnum,
1443197ba5f4SPaul Zimmerman 				struct dwc2_qtd *qtd)
1444197ba5f4SPaul Zimmerman {
1445197ba5f4SPaul Zimmerman 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1446197ba5f4SPaul Zimmerman 		chnum);
1447197ba5f4SPaul Zimmerman 
1448197ba5f4SPaul Zimmerman 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1449197ba5f4SPaul Zimmerman 
1450197ba5f4SPaul Zimmerman 	if (hsotg->core_params->dma_desc_enable > 0) {
1451197ba5f4SPaul Zimmerman 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1452197ba5f4SPaul Zimmerman 					    DWC2_HC_XFER_BABBLE_ERR);
1453197ba5f4SPaul Zimmerman 		goto disable_int;
1454197ba5f4SPaul Zimmerman 	}
1455197ba5f4SPaul Zimmerman 
1456197ba5f4SPaul Zimmerman 	if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1457197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1458197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1459197ba5f4SPaul Zimmerman 	} else {
1460197ba5f4SPaul Zimmerman 		enum dwc2_halt_status halt_status;
1461197ba5f4SPaul Zimmerman 
1462197ba5f4SPaul Zimmerman 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1463197ba5f4SPaul Zimmerman 						qtd, DWC2_HC_XFER_BABBLE_ERR);
1464197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1465197ba5f4SPaul Zimmerman 	}
1466197ba5f4SPaul Zimmerman 
1467197ba5f4SPaul Zimmerman disable_int:
1468197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1469197ba5f4SPaul Zimmerman }
1470197ba5f4SPaul Zimmerman 
1471197ba5f4SPaul Zimmerman /*
1472197ba5f4SPaul Zimmerman  * Handles a host channel AHB error interrupt. This handler is only called in
1473197ba5f4SPaul Zimmerman  * DMA mode.
1474197ba5f4SPaul Zimmerman  */
1475197ba5f4SPaul Zimmerman static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1476197ba5f4SPaul Zimmerman 				struct dwc2_host_chan *chan, int chnum,
1477197ba5f4SPaul Zimmerman 				struct dwc2_qtd *qtd)
1478197ba5f4SPaul Zimmerman {
1479197ba5f4SPaul Zimmerman 	struct dwc2_hcd_urb *urb = qtd->urb;
1480197ba5f4SPaul Zimmerman 	char *pipetype, *speed;
1481197ba5f4SPaul Zimmerman 	u32 hcchar;
1482197ba5f4SPaul Zimmerman 	u32 hcsplt;
1483197ba5f4SPaul Zimmerman 	u32 hctsiz;
1484197ba5f4SPaul Zimmerman 	u32 hc_dma;
1485197ba5f4SPaul Zimmerman 
1486197ba5f4SPaul Zimmerman 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1487197ba5f4SPaul Zimmerman 		chnum);
1488197ba5f4SPaul Zimmerman 
1489197ba5f4SPaul Zimmerman 	if (!urb)
1490197ba5f4SPaul Zimmerman 		goto handle_ahberr_halt;
1491197ba5f4SPaul Zimmerman 
1492197ba5f4SPaul Zimmerman 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1493197ba5f4SPaul Zimmerman 
1494197ba5f4SPaul Zimmerman 	hcchar = readl(hsotg->regs + HCCHAR(chnum));
1495197ba5f4SPaul Zimmerman 	hcsplt = readl(hsotg->regs + HCSPLT(chnum));
1496197ba5f4SPaul Zimmerman 	hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
1497197ba5f4SPaul Zimmerman 	hc_dma = readl(hsotg->regs + HCDMA(chnum));
1498197ba5f4SPaul Zimmerman 
1499197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1500197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1501197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1502197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Device address: %d\n",
1503197ba5f4SPaul Zimmerman 		dwc2_hcd_get_dev_addr(&urb->pipe_info));
1504197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Endpoint: %d, %s\n",
1505197ba5f4SPaul Zimmerman 		dwc2_hcd_get_ep_num(&urb->pipe_info),
1506197ba5f4SPaul Zimmerman 		dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1507197ba5f4SPaul Zimmerman 
1508197ba5f4SPaul Zimmerman 	switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1509197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_CONTROL:
1510197ba5f4SPaul Zimmerman 		pipetype = "CONTROL";
1511197ba5f4SPaul Zimmerman 		break;
1512197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_BULK:
1513197ba5f4SPaul Zimmerman 		pipetype = "BULK";
1514197ba5f4SPaul Zimmerman 		break;
1515197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_INT:
1516197ba5f4SPaul Zimmerman 		pipetype = "INTERRUPT";
1517197ba5f4SPaul Zimmerman 		break;
1518197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_ISOC:
1519197ba5f4SPaul Zimmerman 		pipetype = "ISOCHRONOUS";
1520197ba5f4SPaul Zimmerman 		break;
1521197ba5f4SPaul Zimmerman 	default:
1522197ba5f4SPaul Zimmerman 		pipetype = "UNKNOWN";
1523197ba5f4SPaul Zimmerman 		break;
1524197ba5f4SPaul Zimmerman 	}
1525197ba5f4SPaul Zimmerman 
1526197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Endpoint type: %s\n", pipetype);
1527197ba5f4SPaul Zimmerman 
1528197ba5f4SPaul Zimmerman 	switch (chan->speed) {
1529197ba5f4SPaul Zimmerman 	case USB_SPEED_HIGH:
1530197ba5f4SPaul Zimmerman 		speed = "HIGH";
1531197ba5f4SPaul Zimmerman 		break;
1532197ba5f4SPaul Zimmerman 	case USB_SPEED_FULL:
1533197ba5f4SPaul Zimmerman 		speed = "FULL";
1534197ba5f4SPaul Zimmerman 		break;
1535197ba5f4SPaul Zimmerman 	case USB_SPEED_LOW:
1536197ba5f4SPaul Zimmerman 		speed = "LOW";
1537197ba5f4SPaul Zimmerman 		break;
1538197ba5f4SPaul Zimmerman 	default:
1539197ba5f4SPaul Zimmerman 		speed = "UNKNOWN";
1540197ba5f4SPaul Zimmerman 		break;
1541197ba5f4SPaul Zimmerman 	}
1542197ba5f4SPaul Zimmerman 
1543197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Speed: %s\n", speed);
1544197ba5f4SPaul Zimmerman 
1545197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Max packet size: %d\n",
1546197ba5f4SPaul Zimmerman 		dwc2_hcd_get_mps(&urb->pipe_info));
1547197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Data buffer length: %d\n", urb->length);
1548197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n",
1549197ba5f4SPaul Zimmerman 		urb->buf, (unsigned long)urb->dma);
1550197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Setup buffer: %p, Setup DMA: %08lx\n",
1551197ba5f4SPaul Zimmerman 		urb->setup_packet, (unsigned long)urb->setup_dma);
1552197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Interval: %d\n", urb->interval);
1553197ba5f4SPaul Zimmerman 
1554197ba5f4SPaul Zimmerman 	/* Core halts the channel for Descriptor DMA mode */
1555197ba5f4SPaul Zimmerman 	if (hsotg->core_params->dma_desc_enable > 0) {
1556197ba5f4SPaul Zimmerman 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1557197ba5f4SPaul Zimmerman 					    DWC2_HC_XFER_AHB_ERR);
1558197ba5f4SPaul Zimmerman 		goto handle_ahberr_done;
1559197ba5f4SPaul Zimmerman 	}
1560197ba5f4SPaul Zimmerman 
1561197ba5f4SPaul Zimmerman 	dwc2_host_complete(hsotg, qtd, -EIO);
1562197ba5f4SPaul Zimmerman 
1563197ba5f4SPaul Zimmerman handle_ahberr_halt:
1564197ba5f4SPaul Zimmerman 	/*
1565197ba5f4SPaul Zimmerman 	 * Force a channel halt. Don't call dwc2_halt_channel because that won't
1566197ba5f4SPaul Zimmerman 	 * write to the HCCHARn register in DMA mode to force the halt.
1567197ba5f4SPaul Zimmerman 	 */
1568197ba5f4SPaul Zimmerman 	dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1569197ba5f4SPaul Zimmerman 
1570197ba5f4SPaul Zimmerman handle_ahberr_done:
1571197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1572197ba5f4SPaul Zimmerman }
1573197ba5f4SPaul Zimmerman 
1574197ba5f4SPaul Zimmerman /*
1575197ba5f4SPaul Zimmerman  * Handles a host channel transaction error interrupt. This handler may be
1576197ba5f4SPaul Zimmerman  * called in either DMA mode or Slave mode.
1577197ba5f4SPaul Zimmerman  */
1578197ba5f4SPaul Zimmerman static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1579197ba5f4SPaul Zimmerman 				 struct dwc2_host_chan *chan, int chnum,
1580197ba5f4SPaul Zimmerman 				 struct dwc2_qtd *qtd)
1581197ba5f4SPaul Zimmerman {
1582197ba5f4SPaul Zimmerman 	dev_dbg(hsotg->dev,
1583197ba5f4SPaul Zimmerman 		"--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1584197ba5f4SPaul Zimmerman 
1585197ba5f4SPaul Zimmerman 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1586197ba5f4SPaul Zimmerman 
1587197ba5f4SPaul Zimmerman 	if (hsotg->core_params->dma_desc_enable > 0) {
1588197ba5f4SPaul Zimmerman 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1589197ba5f4SPaul Zimmerman 					    DWC2_HC_XFER_XACT_ERR);
1590197ba5f4SPaul Zimmerman 		goto handle_xacterr_done;
1591197ba5f4SPaul Zimmerman 	}
1592197ba5f4SPaul Zimmerman 
1593197ba5f4SPaul Zimmerman 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1594197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_CONTROL:
1595197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_BULK:
1596197ba5f4SPaul Zimmerman 		qtd->error_count++;
1597197ba5f4SPaul Zimmerman 		if (!chan->qh->ping_state) {
1598197ba5f4SPaul Zimmerman 
1599197ba5f4SPaul Zimmerman 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1600197ba5f4SPaul Zimmerman 						  qtd, DWC2_HC_XFER_XACT_ERR);
1601197ba5f4SPaul Zimmerman 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1602197ba5f4SPaul Zimmerman 			if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1603197ba5f4SPaul Zimmerman 				chan->qh->ping_state = 1;
1604197ba5f4SPaul Zimmerman 		}
1605197ba5f4SPaul Zimmerman 
1606197ba5f4SPaul Zimmerman 		/*
1607197ba5f4SPaul Zimmerman 		 * Halt the channel so the transfer can be re-started from
1608197ba5f4SPaul Zimmerman 		 * the appropriate point or the PING protocol will start
1609197ba5f4SPaul Zimmerman 		 */
1610197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1611197ba5f4SPaul Zimmerman 		break;
1612197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_INT:
1613197ba5f4SPaul Zimmerman 		qtd->error_count++;
1614197ba5f4SPaul Zimmerman 		if (chan->do_split && chan->complete_split)
1615197ba5f4SPaul Zimmerman 			qtd->complete_split = 0;
1616197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1617197ba5f4SPaul Zimmerman 		break;
1618197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_ISOC:
1619197ba5f4SPaul Zimmerman 		{
1620197ba5f4SPaul Zimmerman 			enum dwc2_halt_status halt_status;
1621197ba5f4SPaul Zimmerman 
1622197ba5f4SPaul Zimmerman 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1623197ba5f4SPaul Zimmerman 					chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1624197ba5f4SPaul Zimmerman 			dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1625197ba5f4SPaul Zimmerman 		}
1626197ba5f4SPaul Zimmerman 		break;
1627197ba5f4SPaul Zimmerman 	}
1628197ba5f4SPaul Zimmerman 
1629197ba5f4SPaul Zimmerman handle_xacterr_done:
1630197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1631197ba5f4SPaul Zimmerman }
1632197ba5f4SPaul Zimmerman 
1633197ba5f4SPaul Zimmerman /*
1634197ba5f4SPaul Zimmerman  * Handles a host channel frame overrun interrupt. This handler may be called
1635197ba5f4SPaul Zimmerman  * in either DMA mode or Slave mode.
1636197ba5f4SPaul Zimmerman  */
1637197ba5f4SPaul Zimmerman static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1638197ba5f4SPaul Zimmerman 				  struct dwc2_host_chan *chan, int chnum,
1639197ba5f4SPaul Zimmerman 				  struct dwc2_qtd *qtd)
1640197ba5f4SPaul Zimmerman {
1641197ba5f4SPaul Zimmerman 	enum dwc2_halt_status halt_status;
1642197ba5f4SPaul Zimmerman 
1643197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1644197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1645197ba5f4SPaul Zimmerman 			chnum);
1646197ba5f4SPaul Zimmerman 
1647197ba5f4SPaul Zimmerman 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1648197ba5f4SPaul Zimmerman 
1649197ba5f4SPaul Zimmerman 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1650197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_CONTROL:
1651197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_BULK:
1652197ba5f4SPaul Zimmerman 		break;
1653197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_INT:
1654197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1655197ba5f4SPaul Zimmerman 		break;
1656197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_ISOC:
1657197ba5f4SPaul Zimmerman 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1658197ba5f4SPaul Zimmerman 					qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1659197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1660197ba5f4SPaul Zimmerman 		break;
1661197ba5f4SPaul Zimmerman 	}
1662197ba5f4SPaul Zimmerman 
1663197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1664197ba5f4SPaul Zimmerman }
1665197ba5f4SPaul Zimmerman 
1666197ba5f4SPaul Zimmerman /*
1667197ba5f4SPaul Zimmerman  * Handles a host channel data toggle error interrupt. This handler may be
1668197ba5f4SPaul Zimmerman  * called in either DMA mode or Slave mode.
1669197ba5f4SPaul Zimmerman  */
1670197ba5f4SPaul Zimmerman static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1671197ba5f4SPaul Zimmerman 				    struct dwc2_host_chan *chan, int chnum,
1672197ba5f4SPaul Zimmerman 				    struct dwc2_qtd *qtd)
1673197ba5f4SPaul Zimmerman {
1674197ba5f4SPaul Zimmerman 	dev_dbg(hsotg->dev,
1675197ba5f4SPaul Zimmerman 		"--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1676197ba5f4SPaul Zimmerman 
1677197ba5f4SPaul Zimmerman 	if (chan->ep_is_in)
1678197ba5f4SPaul Zimmerman 		qtd->error_count = 0;
1679197ba5f4SPaul Zimmerman 	else
1680197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev,
1681197ba5f4SPaul Zimmerman 			"Data Toggle Error on OUT transfer, channel %d\n",
1682197ba5f4SPaul Zimmerman 			chnum);
1683197ba5f4SPaul Zimmerman 
1684197ba5f4SPaul Zimmerman 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1685197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1686197ba5f4SPaul Zimmerman }
1687197ba5f4SPaul Zimmerman 
1688197ba5f4SPaul Zimmerman /*
1689197ba5f4SPaul Zimmerman  * For debug only. It checks that a valid halt status is set and that
1690197ba5f4SPaul Zimmerman  * HCCHARn.chdis is clear. If there's a problem, corrective action is
1691197ba5f4SPaul Zimmerman  * taken and a warning is issued.
1692197ba5f4SPaul Zimmerman  *
1693197ba5f4SPaul Zimmerman  * Return: true if halt status is ok, false otherwise
1694197ba5f4SPaul Zimmerman  */
1695197ba5f4SPaul Zimmerman static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1696197ba5f4SPaul Zimmerman 				struct dwc2_host_chan *chan, int chnum,
1697197ba5f4SPaul Zimmerman 				struct dwc2_qtd *qtd)
1698197ba5f4SPaul Zimmerman {
1699197ba5f4SPaul Zimmerman #ifdef DEBUG
1700197ba5f4SPaul Zimmerman 	u32 hcchar;
1701197ba5f4SPaul Zimmerman 	u32 hctsiz;
1702197ba5f4SPaul Zimmerman 	u32 hcintmsk;
1703197ba5f4SPaul Zimmerman 	u32 hcsplt;
1704197ba5f4SPaul Zimmerman 
1705197ba5f4SPaul Zimmerman 	if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1706197ba5f4SPaul Zimmerman 		/*
1707197ba5f4SPaul Zimmerman 		 * This code is here only as a check. This condition should
1708197ba5f4SPaul Zimmerman 		 * never happen. Ignore the halt if it does occur.
1709197ba5f4SPaul Zimmerman 		 */
1710197ba5f4SPaul Zimmerman 		hcchar = readl(hsotg->regs + HCCHAR(chnum));
1711197ba5f4SPaul Zimmerman 		hctsiz = readl(hsotg->regs + HCTSIZ(chnum));
1712197ba5f4SPaul Zimmerman 		hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
1713197ba5f4SPaul Zimmerman 		hcsplt = readl(hsotg->regs + HCSPLT(chnum));
1714197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev,
1715197ba5f4SPaul Zimmerman 			"%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1716197ba5f4SPaul Zimmerman 			 __func__);
1717197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev,
1718197ba5f4SPaul Zimmerman 			"channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1719197ba5f4SPaul Zimmerman 			chnum, hcchar, hctsiz);
1720197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev,
1721197ba5f4SPaul Zimmerman 			"hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1722197ba5f4SPaul Zimmerman 			chan->hcint, hcintmsk, hcsplt);
1723197ba5f4SPaul Zimmerman 		if (qtd)
1724197ba5f4SPaul Zimmerman 			dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1725197ba5f4SPaul Zimmerman 				qtd->complete_split);
1726197ba5f4SPaul Zimmerman 		dev_warn(hsotg->dev,
1727197ba5f4SPaul Zimmerman 			 "%s: no halt status, channel %d, ignoring interrupt\n",
1728197ba5f4SPaul Zimmerman 			 __func__, chnum);
1729197ba5f4SPaul Zimmerman 		return false;
1730197ba5f4SPaul Zimmerman 	}
1731197ba5f4SPaul Zimmerman 
1732197ba5f4SPaul Zimmerman 	/*
1733197ba5f4SPaul Zimmerman 	 * This code is here only as a check. hcchar.chdis should never be set
1734197ba5f4SPaul Zimmerman 	 * when the halt interrupt occurs. Halt the channel again if it does
1735197ba5f4SPaul Zimmerman 	 * occur.
1736197ba5f4SPaul Zimmerman 	 */
1737197ba5f4SPaul Zimmerman 	hcchar = readl(hsotg->regs + HCCHAR(chnum));
1738197ba5f4SPaul Zimmerman 	if (hcchar & HCCHAR_CHDIS) {
1739197ba5f4SPaul Zimmerman 		dev_warn(hsotg->dev,
1740197ba5f4SPaul Zimmerman 			 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1741197ba5f4SPaul Zimmerman 			 __func__, hcchar);
1742197ba5f4SPaul Zimmerman 		chan->halt_pending = 0;
1743197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1744197ba5f4SPaul Zimmerman 		return false;
1745197ba5f4SPaul Zimmerman 	}
1746197ba5f4SPaul Zimmerman #endif
1747197ba5f4SPaul Zimmerman 
1748197ba5f4SPaul Zimmerman 	return true;
1749197ba5f4SPaul Zimmerman }
1750197ba5f4SPaul Zimmerman 
1751197ba5f4SPaul Zimmerman /*
1752197ba5f4SPaul Zimmerman  * Handles a host Channel Halted interrupt in DMA mode. This handler
1753197ba5f4SPaul Zimmerman  * determines the reason the channel halted and proceeds accordingly.
1754197ba5f4SPaul Zimmerman  */
1755197ba5f4SPaul Zimmerman static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1756197ba5f4SPaul Zimmerman 				    struct dwc2_host_chan *chan, int chnum,
1757197ba5f4SPaul Zimmerman 				    struct dwc2_qtd *qtd)
1758197ba5f4SPaul Zimmerman {
1759197ba5f4SPaul Zimmerman 	u32 hcintmsk;
1760197ba5f4SPaul Zimmerman 	int out_nak_enh = 0;
1761197ba5f4SPaul Zimmerman 
1762197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1763197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
1764197ba5f4SPaul Zimmerman 			 "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1765197ba5f4SPaul Zimmerman 			 chnum);
1766197ba5f4SPaul Zimmerman 
1767197ba5f4SPaul Zimmerman 	/*
1768197ba5f4SPaul Zimmerman 	 * For core with OUT NAK enhancement, the flow for high-speed
1769197ba5f4SPaul Zimmerman 	 * CONTROL/BULK OUT is handled a little differently
1770197ba5f4SPaul Zimmerman 	 */
1771197ba5f4SPaul Zimmerman 	if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
1772197ba5f4SPaul Zimmerman 		if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1773197ba5f4SPaul Zimmerman 		    (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1774197ba5f4SPaul Zimmerman 		     chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1775197ba5f4SPaul Zimmerman 			out_nak_enh = 1;
1776197ba5f4SPaul Zimmerman 		}
1777197ba5f4SPaul Zimmerman 	}
1778197ba5f4SPaul Zimmerman 
1779197ba5f4SPaul Zimmerman 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1780197ba5f4SPaul Zimmerman 	    (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
1781197ba5f4SPaul Zimmerman 	     hsotg->core_params->dma_desc_enable <= 0)) {
1782197ba5f4SPaul Zimmerman 		if (hsotg->core_params->dma_desc_enable > 0)
1783197ba5f4SPaul Zimmerman 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1784197ba5f4SPaul Zimmerman 						    chan->halt_status);
1785197ba5f4SPaul Zimmerman 		else
1786197ba5f4SPaul Zimmerman 			/*
1787197ba5f4SPaul Zimmerman 			 * Just release the channel. A dequeue can happen on a
1788197ba5f4SPaul Zimmerman 			 * transfer timeout. In the case of an AHB Error, the
1789197ba5f4SPaul Zimmerman 			 * channel was forced to halt because there's no way to
1790197ba5f4SPaul Zimmerman 			 * gracefully recover.
1791197ba5f4SPaul Zimmerman 			 */
1792197ba5f4SPaul Zimmerman 			dwc2_release_channel(hsotg, chan, qtd,
1793197ba5f4SPaul Zimmerman 					     chan->halt_status);
1794197ba5f4SPaul Zimmerman 		return;
1795197ba5f4SPaul Zimmerman 	}
1796197ba5f4SPaul Zimmerman 
1797197ba5f4SPaul Zimmerman 	hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
1798197ba5f4SPaul Zimmerman 
1799197ba5f4SPaul Zimmerman 	if (chan->hcint & HCINTMSK_XFERCOMPL) {
1800197ba5f4SPaul Zimmerman 		/*
1801197ba5f4SPaul Zimmerman 		 * Todo: This is here because of a possible hardware bug. Spec
1802197ba5f4SPaul Zimmerman 		 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1803197ba5f4SPaul Zimmerman 		 * interrupt w/ACK bit set should occur, but I only see the
1804197ba5f4SPaul Zimmerman 		 * XFERCOMP bit, even with it masked out. This is a workaround
1805197ba5f4SPaul Zimmerman 		 * for that behavior. Should fix this when hardware is fixed.
1806197ba5f4SPaul Zimmerman 		 */
1807197ba5f4SPaul Zimmerman 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1808197ba5f4SPaul Zimmerman 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1809197ba5f4SPaul Zimmerman 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1810197ba5f4SPaul Zimmerman 	} else if (chan->hcint & HCINTMSK_STALL) {
1811197ba5f4SPaul Zimmerman 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1812197ba5f4SPaul Zimmerman 	} else if ((chan->hcint & HCINTMSK_XACTERR) &&
1813197ba5f4SPaul Zimmerman 		   hsotg->core_params->dma_desc_enable <= 0) {
1814197ba5f4SPaul Zimmerman 		if (out_nak_enh) {
1815197ba5f4SPaul Zimmerman 			if (chan->hcint &
1816197ba5f4SPaul Zimmerman 			    (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1817197ba5f4SPaul Zimmerman 				dev_vdbg(hsotg->dev,
1818197ba5f4SPaul Zimmerman 					 "XactErr with NYET/NAK/ACK\n");
1819197ba5f4SPaul Zimmerman 				qtd->error_count = 0;
1820197ba5f4SPaul Zimmerman 			} else {
1821197ba5f4SPaul Zimmerman 				dev_vdbg(hsotg->dev,
1822197ba5f4SPaul Zimmerman 					 "XactErr without NYET/NAK/ACK\n");
1823197ba5f4SPaul Zimmerman 			}
1824197ba5f4SPaul Zimmerman 		}
1825197ba5f4SPaul Zimmerman 
1826197ba5f4SPaul Zimmerman 		/*
1827197ba5f4SPaul Zimmerman 		 * Must handle xacterr before nak or ack. Could get a xacterr
1828197ba5f4SPaul Zimmerman 		 * at the same time as either of these on a BULK/CONTROL OUT
1829197ba5f4SPaul Zimmerman 		 * that started with a PING. The xacterr takes precedence.
1830197ba5f4SPaul Zimmerman 		 */
1831197ba5f4SPaul Zimmerman 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1832197ba5f4SPaul Zimmerman 	} else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
1833197ba5f4SPaul Zimmerman 		   hsotg->core_params->dma_desc_enable > 0) {
1834197ba5f4SPaul Zimmerman 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1835197ba5f4SPaul Zimmerman 	} else if ((chan->hcint & HCINTMSK_AHBERR) &&
1836197ba5f4SPaul Zimmerman 		   hsotg->core_params->dma_desc_enable > 0) {
1837197ba5f4SPaul Zimmerman 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1838197ba5f4SPaul Zimmerman 	} else if (chan->hcint & HCINTMSK_BBLERR) {
1839197ba5f4SPaul Zimmerman 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1840197ba5f4SPaul Zimmerman 	} else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1841197ba5f4SPaul Zimmerman 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1842197ba5f4SPaul Zimmerman 	} else if (!out_nak_enh) {
1843197ba5f4SPaul Zimmerman 		if (chan->hcint & HCINTMSK_NYET) {
1844197ba5f4SPaul Zimmerman 			/*
1845197ba5f4SPaul Zimmerman 			 * Must handle nyet before nak or ack. Could get a nyet
1846197ba5f4SPaul Zimmerman 			 * at the same time as either of those on a BULK/CONTROL
1847197ba5f4SPaul Zimmerman 			 * OUT that started with a PING. The nyet takes
1848197ba5f4SPaul Zimmerman 			 * precedence.
1849197ba5f4SPaul Zimmerman 			 */
1850197ba5f4SPaul Zimmerman 			dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1851197ba5f4SPaul Zimmerman 		} else if ((chan->hcint & HCINTMSK_NAK) &&
1852197ba5f4SPaul Zimmerman 			   !(hcintmsk & HCINTMSK_NAK)) {
1853197ba5f4SPaul Zimmerman 			/*
1854197ba5f4SPaul Zimmerman 			 * If nak is not masked, it's because a non-split IN
1855197ba5f4SPaul Zimmerman 			 * transfer is in an error state. In that case, the nak
1856197ba5f4SPaul Zimmerman 			 * is handled by the nak interrupt handler, not here.
1857197ba5f4SPaul Zimmerman 			 * Handle nak here for BULK/CONTROL OUT transfers, which
1858197ba5f4SPaul Zimmerman 			 * halt on a NAK to allow rewinding the buffer pointer.
1859197ba5f4SPaul Zimmerman 			 */
1860197ba5f4SPaul Zimmerman 			dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1861197ba5f4SPaul Zimmerman 		} else if ((chan->hcint & HCINTMSK_ACK) &&
1862197ba5f4SPaul Zimmerman 			   !(hcintmsk & HCINTMSK_ACK)) {
1863197ba5f4SPaul Zimmerman 			/*
1864197ba5f4SPaul Zimmerman 			 * If ack is not masked, it's because a non-split IN
1865197ba5f4SPaul Zimmerman 			 * transfer is in an error state. In that case, the ack
1866197ba5f4SPaul Zimmerman 			 * is handled by the ack interrupt handler, not here.
1867197ba5f4SPaul Zimmerman 			 * Handle ack here for split transfers. Start splits
1868197ba5f4SPaul Zimmerman 			 * halt on ACK.
1869197ba5f4SPaul Zimmerman 			 */
1870197ba5f4SPaul Zimmerman 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1871197ba5f4SPaul Zimmerman 		} else {
1872197ba5f4SPaul Zimmerman 			if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1873197ba5f4SPaul Zimmerman 			    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1874197ba5f4SPaul Zimmerman 				/*
1875197ba5f4SPaul Zimmerman 				 * A periodic transfer halted with no other
1876197ba5f4SPaul Zimmerman 				 * channel interrupts set. Assume it was halted
1877197ba5f4SPaul Zimmerman 				 * by the core because it could not be completed
1878197ba5f4SPaul Zimmerman 				 * in its scheduled (micro)frame.
1879197ba5f4SPaul Zimmerman 				 */
1880197ba5f4SPaul Zimmerman 				dev_dbg(hsotg->dev,
1881197ba5f4SPaul Zimmerman 					"%s: Halt channel %d (assume incomplete periodic transfer)\n",
1882197ba5f4SPaul Zimmerman 					__func__, chnum);
1883197ba5f4SPaul Zimmerman 				dwc2_halt_channel(hsotg, chan, qtd,
1884197ba5f4SPaul Zimmerman 					DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1885197ba5f4SPaul Zimmerman 			} else {
1886197ba5f4SPaul Zimmerman 				dev_err(hsotg->dev,
1887197ba5f4SPaul Zimmerman 					"%s: Channel %d - ChHltd set, but reason is unknown\n",
1888197ba5f4SPaul Zimmerman 					__func__, chnum);
1889197ba5f4SPaul Zimmerman 				dev_err(hsotg->dev,
1890197ba5f4SPaul Zimmerman 					"hcint 0x%08x, intsts 0x%08x\n",
1891197ba5f4SPaul Zimmerman 					chan->hcint,
1892197ba5f4SPaul Zimmerman 					readl(hsotg->regs + GINTSTS));
1893151d0cbdSNick Hudson 				goto error;
1894197ba5f4SPaul Zimmerman 			}
1895197ba5f4SPaul Zimmerman 		}
1896197ba5f4SPaul Zimmerman 	} else {
1897197ba5f4SPaul Zimmerman 		dev_info(hsotg->dev,
1898197ba5f4SPaul Zimmerman 			 "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1899197ba5f4SPaul Zimmerman 			 chan->hcint);
1900151d0cbdSNick Hudson error:
1901151d0cbdSNick Hudson 		/* Failthrough: use 3-strikes rule */
1902151d0cbdSNick Hudson 		qtd->error_count++;
1903151d0cbdSNick Hudson 		dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1904151d0cbdSNick Hudson 					  qtd, DWC2_HC_XFER_XACT_ERR);
1905151d0cbdSNick Hudson 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1906151d0cbdSNick Hudson 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1907197ba5f4SPaul Zimmerman 	}
1908197ba5f4SPaul Zimmerman }
1909197ba5f4SPaul Zimmerman 
1910197ba5f4SPaul Zimmerman /*
1911197ba5f4SPaul Zimmerman  * Handles a host channel Channel Halted interrupt
1912197ba5f4SPaul Zimmerman  *
1913197ba5f4SPaul Zimmerman  * In slave mode, this handler is called only when the driver specifically
1914197ba5f4SPaul Zimmerman  * requests a halt. This occurs during handling other host channel interrupts
1915197ba5f4SPaul Zimmerman  * (e.g. nak, xacterr, stall, nyet, etc.).
1916197ba5f4SPaul Zimmerman  *
1917197ba5f4SPaul Zimmerman  * In DMA mode, this is the interrupt that occurs when the core has finished
1918197ba5f4SPaul Zimmerman  * processing a transfer on a channel. Other host channel interrupts (except
1919197ba5f4SPaul Zimmerman  * ahberr) are disabled in DMA mode.
1920197ba5f4SPaul Zimmerman  */
1921197ba5f4SPaul Zimmerman static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
1922197ba5f4SPaul Zimmerman 				struct dwc2_host_chan *chan, int chnum,
1923197ba5f4SPaul Zimmerman 				struct dwc2_qtd *qtd)
1924197ba5f4SPaul Zimmerman {
1925197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1926197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
1927197ba5f4SPaul Zimmerman 			 chnum);
1928197ba5f4SPaul Zimmerman 
1929197ba5f4SPaul Zimmerman 	if (hsotg->core_params->dma_enable > 0) {
1930197ba5f4SPaul Zimmerman 		dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
1931197ba5f4SPaul Zimmerman 	} else {
1932197ba5f4SPaul Zimmerman 		if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
1933197ba5f4SPaul Zimmerman 			return;
1934197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
1935197ba5f4SPaul Zimmerman 	}
1936197ba5f4SPaul Zimmerman }
1937197ba5f4SPaul Zimmerman 
1938197ba5f4SPaul Zimmerman /* Handles interrupt for a specific Host Channel */
1939197ba5f4SPaul Zimmerman static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
1940197ba5f4SPaul Zimmerman {
1941197ba5f4SPaul Zimmerman 	struct dwc2_qtd *qtd;
1942197ba5f4SPaul Zimmerman 	struct dwc2_host_chan *chan;
1943197ba5f4SPaul Zimmerman 	u32 hcint, hcintmsk;
1944197ba5f4SPaul Zimmerman 
1945197ba5f4SPaul Zimmerman 	chan = hsotg->hc_ptr_array[chnum];
1946197ba5f4SPaul Zimmerman 
1947197ba5f4SPaul Zimmerman 	hcint = readl(hsotg->regs + HCINT(chnum));
1948197ba5f4SPaul Zimmerman 	hcintmsk = readl(hsotg->regs + HCINTMSK(chnum));
1949197ba5f4SPaul Zimmerman 	if (!chan) {
1950197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
1951197ba5f4SPaul Zimmerman 		writel(hcint, hsotg->regs + HCINT(chnum));
1952197ba5f4SPaul Zimmerman 		return;
1953197ba5f4SPaul Zimmerman 	}
1954197ba5f4SPaul Zimmerman 
1955197ba5f4SPaul Zimmerman 	if (dbg_hc(chan)) {
1956197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
1957197ba5f4SPaul Zimmerman 			 chnum);
1958197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
1959197ba5f4SPaul Zimmerman 			 "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1960197ba5f4SPaul Zimmerman 			 hcint, hcintmsk, hcint & hcintmsk);
1961197ba5f4SPaul Zimmerman 	}
1962197ba5f4SPaul Zimmerman 
1963197ba5f4SPaul Zimmerman 	writel(hcint, hsotg->regs + HCINT(chnum));
1964197ba5f4SPaul Zimmerman 	chan->hcint = hcint;
1965197ba5f4SPaul Zimmerman 	hcint &= hcintmsk;
1966197ba5f4SPaul Zimmerman 
1967197ba5f4SPaul Zimmerman 	/*
1968197ba5f4SPaul Zimmerman 	 * If the channel was halted due to a dequeue, the qtd list might
1969197ba5f4SPaul Zimmerman 	 * be empty or at least the first entry will not be the active qtd.
1970197ba5f4SPaul Zimmerman 	 * In this case, take a shortcut and just release the channel.
1971197ba5f4SPaul Zimmerman 	 */
1972197ba5f4SPaul Zimmerman 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
1973197ba5f4SPaul Zimmerman 		/*
1974197ba5f4SPaul Zimmerman 		 * If the channel was halted, this should be the only
1975197ba5f4SPaul Zimmerman 		 * interrupt unmasked
1976197ba5f4SPaul Zimmerman 		 */
1977197ba5f4SPaul Zimmerman 		WARN_ON(hcint != HCINTMSK_CHHLTD);
1978197ba5f4SPaul Zimmerman 		if (hsotg->core_params->dma_desc_enable > 0)
1979197ba5f4SPaul Zimmerman 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1980197ba5f4SPaul Zimmerman 						    chan->halt_status);
1981197ba5f4SPaul Zimmerman 		else
1982197ba5f4SPaul Zimmerman 			dwc2_release_channel(hsotg, chan, NULL,
1983197ba5f4SPaul Zimmerman 					     chan->halt_status);
1984197ba5f4SPaul Zimmerman 		return;
1985197ba5f4SPaul Zimmerman 	}
1986197ba5f4SPaul Zimmerman 
1987197ba5f4SPaul Zimmerman 	if (list_empty(&chan->qh->qtd_list)) {
1988197ba5f4SPaul Zimmerman 		/*
1989197ba5f4SPaul Zimmerman 		 * TODO: Will this ever happen with the
1990197ba5f4SPaul Zimmerman 		 * DWC2_HC_XFER_URB_DEQUEUE handling above?
1991197ba5f4SPaul Zimmerman 		 */
1992197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
1993197ba5f4SPaul Zimmerman 			chnum);
1994197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev,
1995197ba5f4SPaul Zimmerman 			"  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
1996197ba5f4SPaul Zimmerman 			chan->hcint, hcintmsk, hcint);
1997197ba5f4SPaul Zimmerman 		chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
1998197ba5f4SPaul Zimmerman 		disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
1999197ba5f4SPaul Zimmerman 		chan->hcint = 0;
2000197ba5f4SPaul Zimmerman 		return;
2001197ba5f4SPaul Zimmerman 	}
2002197ba5f4SPaul Zimmerman 
2003197ba5f4SPaul Zimmerman 	qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
2004197ba5f4SPaul Zimmerman 			       qtd_list_entry);
2005197ba5f4SPaul Zimmerman 
2006197ba5f4SPaul Zimmerman 	if (hsotg->core_params->dma_enable <= 0) {
2007197ba5f4SPaul Zimmerman 		if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2008197ba5f4SPaul Zimmerman 			hcint &= ~HCINTMSK_CHHLTD;
2009197ba5f4SPaul Zimmerman 	}
2010197ba5f4SPaul Zimmerman 
2011197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_XFERCOMPL) {
2012197ba5f4SPaul Zimmerman 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2013197ba5f4SPaul Zimmerman 		/*
2014197ba5f4SPaul Zimmerman 		 * If NYET occurred at same time as Xfer Complete, the NYET is
2015197ba5f4SPaul Zimmerman 		 * handled by the Xfer Complete interrupt handler. Don't want
2016197ba5f4SPaul Zimmerman 		 * to call the NYET interrupt handler in this case.
2017197ba5f4SPaul Zimmerman 		 */
2018197ba5f4SPaul Zimmerman 		hcint &= ~HCINTMSK_NYET;
2019197ba5f4SPaul Zimmerman 	}
2020197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_CHHLTD)
2021197ba5f4SPaul Zimmerman 		dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2022197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_AHBERR)
2023197ba5f4SPaul Zimmerman 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2024197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_STALL)
2025197ba5f4SPaul Zimmerman 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2026197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_NAK)
2027197ba5f4SPaul Zimmerman 		dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2028197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_ACK)
2029197ba5f4SPaul Zimmerman 		dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2030197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_NYET)
2031197ba5f4SPaul Zimmerman 		dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2032197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_XACTERR)
2033197ba5f4SPaul Zimmerman 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2034197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_BBLERR)
2035197ba5f4SPaul Zimmerman 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2036197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_FRMOVRUN)
2037197ba5f4SPaul Zimmerman 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2038197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_DATATGLERR)
2039197ba5f4SPaul Zimmerman 		dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2040197ba5f4SPaul Zimmerman 
2041197ba5f4SPaul Zimmerman 	chan->hcint = 0;
2042197ba5f4SPaul Zimmerman }
2043197ba5f4SPaul Zimmerman 
2044197ba5f4SPaul Zimmerman /*
2045197ba5f4SPaul Zimmerman  * This interrupt indicates that one or more host channels has a pending
2046197ba5f4SPaul Zimmerman  * interrupt. There are multiple conditions that can cause each host channel
2047197ba5f4SPaul Zimmerman  * interrupt. This function determines which conditions have occurred for each
2048197ba5f4SPaul Zimmerman  * host channel interrupt and handles them appropriately.
2049197ba5f4SPaul Zimmerman  */
2050197ba5f4SPaul Zimmerman static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2051197ba5f4SPaul Zimmerman {
2052197ba5f4SPaul Zimmerman 	u32 haint;
2053197ba5f4SPaul Zimmerman 	int i;
2054197ba5f4SPaul Zimmerman 
2055197ba5f4SPaul Zimmerman 	haint = readl(hsotg->regs + HAINT);
2056197ba5f4SPaul Zimmerman 	if (dbg_perio()) {
2057197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
2058197ba5f4SPaul Zimmerman 
2059197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2060197ba5f4SPaul Zimmerman 	}
2061197ba5f4SPaul Zimmerman 
2062197ba5f4SPaul Zimmerman 	for (i = 0; i < hsotg->core_params->host_channels; i++) {
2063197ba5f4SPaul Zimmerman 		if (haint & (1 << i))
2064197ba5f4SPaul Zimmerman 			dwc2_hc_n_intr(hsotg, i);
2065197ba5f4SPaul Zimmerman 	}
2066197ba5f4SPaul Zimmerman }
2067197ba5f4SPaul Zimmerman 
2068197ba5f4SPaul Zimmerman /* This function handles interrupts for the HCD */
2069197ba5f4SPaul Zimmerman irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2070197ba5f4SPaul Zimmerman {
2071197ba5f4SPaul Zimmerman 	u32 gintsts, dbg_gintsts;
2072197ba5f4SPaul Zimmerman 	irqreturn_t retval = IRQ_NONE;
2073197ba5f4SPaul Zimmerman 
2074197ba5f4SPaul Zimmerman 	if (!dwc2_is_controller_alive(hsotg)) {
2075197ba5f4SPaul Zimmerman 		dev_warn(hsotg->dev, "Controller is dead\n");
2076197ba5f4SPaul Zimmerman 		return retval;
2077197ba5f4SPaul Zimmerman 	}
2078197ba5f4SPaul Zimmerman 
2079197ba5f4SPaul Zimmerman 	spin_lock(&hsotg->lock);
2080197ba5f4SPaul Zimmerman 
2081197ba5f4SPaul Zimmerman 	/* Check if HOST Mode */
2082197ba5f4SPaul Zimmerman 	if (dwc2_is_host_mode(hsotg)) {
2083197ba5f4SPaul Zimmerman 		gintsts = dwc2_read_core_intr(hsotg);
2084197ba5f4SPaul Zimmerman 		if (!gintsts) {
2085197ba5f4SPaul Zimmerman 			spin_unlock(&hsotg->lock);
2086197ba5f4SPaul Zimmerman 			return retval;
2087197ba5f4SPaul Zimmerman 		}
2088197ba5f4SPaul Zimmerman 
2089197ba5f4SPaul Zimmerman 		retval = IRQ_HANDLED;
2090197ba5f4SPaul Zimmerman 
2091197ba5f4SPaul Zimmerman 		dbg_gintsts = gintsts;
2092197ba5f4SPaul Zimmerman #ifndef DEBUG_SOF
2093197ba5f4SPaul Zimmerman 		dbg_gintsts &= ~GINTSTS_SOF;
2094197ba5f4SPaul Zimmerman #endif
2095197ba5f4SPaul Zimmerman 		if (!dbg_perio())
2096197ba5f4SPaul Zimmerman 			dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2097197ba5f4SPaul Zimmerman 					 GINTSTS_PTXFEMP);
2098197ba5f4SPaul Zimmerman 
2099197ba5f4SPaul Zimmerman 		/* Only print if there are any non-suppressed interrupts left */
2100197ba5f4SPaul Zimmerman 		if (dbg_gintsts)
2101197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
2102197ba5f4SPaul Zimmerman 				 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2103197ba5f4SPaul Zimmerman 				 gintsts);
2104197ba5f4SPaul Zimmerman 
2105197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_SOF)
2106197ba5f4SPaul Zimmerman 			dwc2_sof_intr(hsotg);
2107197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_RXFLVL)
2108197ba5f4SPaul Zimmerman 			dwc2_rx_fifo_level_intr(hsotg);
2109197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_NPTXFEMP)
2110197ba5f4SPaul Zimmerman 			dwc2_np_tx_fifo_empty_intr(hsotg);
2111197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_PRTINT)
2112197ba5f4SPaul Zimmerman 			dwc2_port_intr(hsotg);
2113197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_HCHINT)
2114197ba5f4SPaul Zimmerman 			dwc2_hc_intr(hsotg);
2115197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_PTXFEMP)
2116197ba5f4SPaul Zimmerman 			dwc2_perio_tx_fifo_empty_intr(hsotg);
2117197ba5f4SPaul Zimmerman 
2118197ba5f4SPaul Zimmerman 		if (dbg_gintsts) {
2119197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
2120197ba5f4SPaul Zimmerman 				 "DWC OTG HCD Finished Servicing Interrupts\n");
2121197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
2122197ba5f4SPaul Zimmerman 				 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2123197ba5f4SPaul Zimmerman 				 readl(hsotg->regs + GINTSTS),
2124197ba5f4SPaul Zimmerman 				 readl(hsotg->regs + GINTMSK));
2125197ba5f4SPaul Zimmerman 		}
2126197ba5f4SPaul Zimmerman 	}
2127197ba5f4SPaul Zimmerman 
2128197ba5f4SPaul Zimmerman 	spin_unlock(&hsotg->lock);
2129197ba5f4SPaul Zimmerman 
2130197ba5f4SPaul Zimmerman 	return retval;
2131197ba5f4SPaul Zimmerman }
2132