xref: /openbmc/linux/drivers/usb/dwc2/hcd_intr.c (revision 92e44bdb)
15fd54aceSGreg Kroah-Hartman // SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2197ba5f4SPaul Zimmerman /*
3197ba5f4SPaul Zimmerman  * hcd_intr.c - DesignWare HS OTG Controller host-mode interrupt handling
4197ba5f4SPaul Zimmerman  *
5197ba5f4SPaul Zimmerman  * Copyright (C) 2004-2013 Synopsys, Inc.
6197ba5f4SPaul Zimmerman  */
7197ba5f4SPaul Zimmerman 
8197ba5f4SPaul Zimmerman /*
9197ba5f4SPaul Zimmerman  * This file contains the interrupt handlers for Host mode
10197ba5f4SPaul Zimmerman  */
11197ba5f4SPaul Zimmerman #include <linux/kernel.h>
12197ba5f4SPaul Zimmerman #include <linux/module.h>
13197ba5f4SPaul Zimmerman #include <linux/spinlock.h>
14197ba5f4SPaul Zimmerman #include <linux/interrupt.h>
15197ba5f4SPaul Zimmerman #include <linux/dma-mapping.h>
16197ba5f4SPaul Zimmerman #include <linux/io.h>
17197ba5f4SPaul Zimmerman #include <linux/slab.h>
18197ba5f4SPaul Zimmerman #include <linux/usb.h>
19197ba5f4SPaul Zimmerman 
20197ba5f4SPaul Zimmerman #include <linux/usb/hcd.h>
21197ba5f4SPaul Zimmerman #include <linux/usb/ch11.h>
22197ba5f4SPaul Zimmerman 
23197ba5f4SPaul Zimmerman #include "core.h"
24197ba5f4SPaul Zimmerman #include "hcd.h"
25197ba5f4SPaul Zimmerman 
2638d2b5fbSDouglas Anderson /*
2738d2b5fbSDouglas Anderson  * If we get this many NAKs on a split transaction we'll slow down
2838d2b5fbSDouglas Anderson  * retransmission.  A 1 here means delay after the first NAK.
2938d2b5fbSDouglas Anderson  */
3038d2b5fbSDouglas Anderson #define DWC2_NAKS_BEFORE_DELAY		3
3138d2b5fbSDouglas Anderson 
32197ba5f4SPaul Zimmerman /* This function is for debug only */
dwc2_track_missed_sofs(struct dwc2_hsotg * hsotg)33197ba5f4SPaul Zimmerman static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
34197ba5f4SPaul Zimmerman {
35197ba5f4SPaul Zimmerman 	u16 curr_frame_number = hsotg->frame_number;
36483bb254SDouglas Anderson 	u16 expected = dwc2_frame_num_inc(hsotg->last_frame_num, 1);
37197ba5f4SPaul Zimmerman 
38483bb254SDouglas Anderson 	if (expected != curr_frame_number)
39483bb254SDouglas Anderson 		dwc2_sch_vdbg(hsotg, "MISSED SOF %04x != %04x\n",
40483bb254SDouglas Anderson 			      expected, curr_frame_number);
41483bb254SDouglas Anderson 
42483bb254SDouglas Anderson #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
43197ba5f4SPaul Zimmerman 	if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
44483bb254SDouglas Anderson 		if (expected != curr_frame_number) {
45197ba5f4SPaul Zimmerman 			hsotg->frame_num_array[hsotg->frame_num_idx] =
46197ba5f4SPaul Zimmerman 					curr_frame_number;
47197ba5f4SPaul Zimmerman 			hsotg->last_frame_num_array[hsotg->frame_num_idx] =
48197ba5f4SPaul Zimmerman 					hsotg->last_frame_num;
49197ba5f4SPaul Zimmerman 			hsotg->frame_num_idx++;
50197ba5f4SPaul Zimmerman 		}
51197ba5f4SPaul Zimmerman 	} else if (!hsotg->dumped_frame_num_array) {
52197ba5f4SPaul Zimmerman 		int i;
53197ba5f4SPaul Zimmerman 
54197ba5f4SPaul Zimmerman 		dev_info(hsotg->dev, "Frame     Last Frame\n");
55197ba5f4SPaul Zimmerman 		dev_info(hsotg->dev, "-----     ----------\n");
56197ba5f4SPaul Zimmerman 		for (i = 0; i < FRAME_NUM_ARRAY_SIZE; i++) {
57197ba5f4SPaul Zimmerman 			dev_info(hsotg->dev, "0x%04x    0x%04x\n",
58197ba5f4SPaul Zimmerman 				 hsotg->frame_num_array[i],
59197ba5f4SPaul Zimmerman 				 hsotg->last_frame_num_array[i]);
60197ba5f4SPaul Zimmerman 		}
61197ba5f4SPaul Zimmerman 		hsotg->dumped_frame_num_array = 1;
62197ba5f4SPaul Zimmerman 	}
63197ba5f4SPaul Zimmerman #endif
64483bb254SDouglas Anderson 	hsotg->last_frame_num = curr_frame_number;
65197ba5f4SPaul Zimmerman }
66197ba5f4SPaul Zimmerman 
dwc2_hc_handle_tt_clear(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,struct dwc2_qtd * qtd)67197ba5f4SPaul Zimmerman static void dwc2_hc_handle_tt_clear(struct dwc2_hsotg *hsotg,
68197ba5f4SPaul Zimmerman 				    struct dwc2_host_chan *chan,
69197ba5f4SPaul Zimmerman 				    struct dwc2_qtd *qtd)
70197ba5f4SPaul Zimmerman {
71d82a810eSDouglas Anderson 	struct usb_device *root_hub = dwc2_hsotg_to_hcd(hsotg)->self.root_hub;
72197ba5f4SPaul Zimmerman 	struct urb *usb_urb;
73197ba5f4SPaul Zimmerman 
74197ba5f4SPaul Zimmerman 	if (!chan->qh)
75197ba5f4SPaul Zimmerman 		return;
76197ba5f4SPaul Zimmerman 
77197ba5f4SPaul Zimmerman 	if (chan->qh->dev_speed == USB_SPEED_HIGH)
78197ba5f4SPaul Zimmerman 		return;
79197ba5f4SPaul Zimmerman 
80197ba5f4SPaul Zimmerman 	if (!qtd->urb)
81197ba5f4SPaul Zimmerman 		return;
82197ba5f4SPaul Zimmerman 
83197ba5f4SPaul Zimmerman 	usb_urb = qtd->urb->priv;
84197ba5f4SPaul Zimmerman 	if (!usb_urb || !usb_urb->dev || !usb_urb->dev->tt)
85197ba5f4SPaul Zimmerman 		return;
86197ba5f4SPaul Zimmerman 
87d82a810eSDouglas Anderson 	/*
88d82a810eSDouglas Anderson 	 * The root hub doesn't really have a TT, but Linux thinks it
89d82a810eSDouglas Anderson 	 * does because how could you have a "high speed hub" that
90d82a810eSDouglas Anderson 	 * directly talks directly to low speed devices without a TT?
91d82a810eSDouglas Anderson 	 * It's all lies.  Lies, I tell you.
92d82a810eSDouglas Anderson 	 */
93d82a810eSDouglas Anderson 	if (usb_urb->dev->tt->hub == root_hub)
94d82a810eSDouglas Anderson 		return;
95d82a810eSDouglas Anderson 
96197ba5f4SPaul Zimmerman 	if (qtd->urb->status != -EPIPE && qtd->urb->status != -EREMOTEIO) {
97197ba5f4SPaul Zimmerman 		chan->qh->tt_buffer_dirty = 1;
98197ba5f4SPaul Zimmerman 		if (usb_hub_clear_tt_buffer(usb_urb))
99197ba5f4SPaul Zimmerman 			/* Clear failed; let's hope things work anyway */
100197ba5f4SPaul Zimmerman 			chan->qh->tt_buffer_dirty = 0;
101197ba5f4SPaul Zimmerman 	}
102197ba5f4SPaul Zimmerman }
103197ba5f4SPaul Zimmerman 
104197ba5f4SPaul Zimmerman /*
105197ba5f4SPaul Zimmerman  * Handles the start-of-frame interrupt in host mode. Non-periodic
106197ba5f4SPaul Zimmerman  * transactions may be queued to the DWC_otg controller for the current
107197ba5f4SPaul Zimmerman  * (micro)frame. Periodic transactions may be queued to the controller
108197ba5f4SPaul Zimmerman  * for the next (micro)frame.
109197ba5f4SPaul Zimmerman  */
dwc2_sof_intr(struct dwc2_hsotg * hsotg)110197ba5f4SPaul Zimmerman static void dwc2_sof_intr(struct dwc2_hsotg *hsotg)
111197ba5f4SPaul Zimmerman {
112197ba5f4SPaul Zimmerman 	struct list_head *qh_entry;
113197ba5f4SPaul Zimmerman 	struct dwc2_qh *qh;
114197ba5f4SPaul Zimmerman 	enum dwc2_transaction_type tr_type;
115197ba5f4SPaul Zimmerman 
11629539019SDouglas Anderson 	/* Clear interrupt */
117f25c42b8SGevorg Sahakyan 	dwc2_writel(hsotg, GINTSTS_SOF, GINTSTS);
11829539019SDouglas Anderson 
119197ba5f4SPaul Zimmerman #ifdef DEBUG_SOF
120197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "--Start of Frame Interrupt--\n");
121197ba5f4SPaul Zimmerman #endif
122197ba5f4SPaul Zimmerman 
123197ba5f4SPaul Zimmerman 	hsotg->frame_number = dwc2_hcd_get_frame_number(hsotg);
124197ba5f4SPaul Zimmerman 
125197ba5f4SPaul Zimmerman 	dwc2_track_missed_sofs(hsotg);
126197ba5f4SPaul Zimmerman 
127197ba5f4SPaul Zimmerman 	/* Determine whether any periodic QHs should be executed */
128197ba5f4SPaul Zimmerman 	qh_entry = hsotg->periodic_sched_inactive.next;
129197ba5f4SPaul Zimmerman 	while (qh_entry != &hsotg->periodic_sched_inactive) {
130197ba5f4SPaul Zimmerman 		qh = list_entry(qh_entry, struct dwc2_qh, qh_list_entry);
131197ba5f4SPaul Zimmerman 		qh_entry = qh_entry->next;
132ced9eee1SDouglas Anderson 		if (dwc2_frame_num_le(qh->next_active_frame,
133ced9eee1SDouglas Anderson 				      hsotg->frame_number)) {
134ced9eee1SDouglas Anderson 			dwc2_sch_vdbg(hsotg, "QH=%p ready fn=%04x, nxt=%04x\n",
135ced9eee1SDouglas Anderson 				      qh, hsotg->frame_number,
136ced9eee1SDouglas Anderson 				      qh->next_active_frame);
13774fc4a75SDouglas Anderson 
138197ba5f4SPaul Zimmerman 			/*
139197ba5f4SPaul Zimmerman 			 * Move QH to the ready list to be executed next
140197ba5f4SPaul Zimmerman 			 * (micro)frame
141197ba5f4SPaul Zimmerman 			 */
14294ef7aeeSDouglas Anderson 			list_move_tail(&qh->qh_list_entry,
143197ba5f4SPaul Zimmerman 				       &hsotg->periodic_sched_ready);
144197ba5f4SPaul Zimmerman 		}
14574fc4a75SDouglas Anderson 	}
146197ba5f4SPaul Zimmerman 	tr_type = dwc2_hcd_select_transactions(hsotg);
147197ba5f4SPaul Zimmerman 	if (tr_type != DWC2_TRANSACTION_NONE)
148197ba5f4SPaul Zimmerman 		dwc2_hcd_queue_transactions(hsotg, tr_type);
149197ba5f4SPaul Zimmerman }
150197ba5f4SPaul Zimmerman 
151197ba5f4SPaul Zimmerman /*
152197ba5f4SPaul Zimmerman  * Handles the Rx FIFO Level Interrupt, which indicates that there is
153197ba5f4SPaul Zimmerman  * at least one packet in the Rx FIFO. The packets are moved from the FIFO to
154197ba5f4SPaul Zimmerman  * memory if the DWC_otg controller is operating in Slave mode.
155197ba5f4SPaul Zimmerman  */
dwc2_rx_fifo_level_intr(struct dwc2_hsotg * hsotg)156197ba5f4SPaul Zimmerman static void dwc2_rx_fifo_level_intr(struct dwc2_hsotg *hsotg)
157197ba5f4SPaul Zimmerman {
158197ba5f4SPaul Zimmerman 	u32 grxsts, chnum, bcnt, dpid, pktsts;
159197ba5f4SPaul Zimmerman 	struct dwc2_host_chan *chan;
160197ba5f4SPaul Zimmerman 
161197ba5f4SPaul Zimmerman 	if (dbg_perio())
162197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--RxFIFO Level Interrupt--\n");
163197ba5f4SPaul Zimmerman 
164f25c42b8SGevorg Sahakyan 	grxsts = dwc2_readl(hsotg, GRXSTSP);
165197ba5f4SPaul Zimmerman 	chnum = (grxsts & GRXSTS_HCHNUM_MASK) >> GRXSTS_HCHNUM_SHIFT;
166197ba5f4SPaul Zimmerman 	chan = hsotg->hc_ptr_array[chnum];
167197ba5f4SPaul Zimmerman 	if (!chan) {
168197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev, "Unable to get corresponding channel\n");
169197ba5f4SPaul Zimmerman 		return;
170197ba5f4SPaul Zimmerman 	}
171197ba5f4SPaul Zimmerman 
172197ba5f4SPaul Zimmerman 	bcnt = (grxsts & GRXSTS_BYTECNT_MASK) >> GRXSTS_BYTECNT_SHIFT;
173197ba5f4SPaul Zimmerman 	dpid = (grxsts & GRXSTS_DPID_MASK) >> GRXSTS_DPID_SHIFT;
174197ba5f4SPaul Zimmerman 	pktsts = (grxsts & GRXSTS_PKTSTS_MASK) >> GRXSTS_PKTSTS_SHIFT;
175197ba5f4SPaul Zimmerman 
176197ba5f4SPaul Zimmerman 	/* Packet Status */
177197ba5f4SPaul Zimmerman 	if (dbg_perio()) {
178197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "    Ch num = %d\n", chnum);
179197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "    Count = %d\n", bcnt);
180197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "    DPID = %d, chan.dpid = %d\n", dpid,
181197ba5f4SPaul Zimmerman 			 chan->data_pid_start);
182197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "    PStatus = %d\n", pktsts);
183197ba5f4SPaul Zimmerman 	}
184197ba5f4SPaul Zimmerman 
185197ba5f4SPaul Zimmerman 	switch (pktsts) {
186197ba5f4SPaul Zimmerman 	case GRXSTS_PKTSTS_HCHIN:
187197ba5f4SPaul Zimmerman 		/* Read the data into the host buffer */
188197ba5f4SPaul Zimmerman 		if (bcnt > 0) {
189197ba5f4SPaul Zimmerman 			dwc2_read_packet(hsotg, chan->xfer_buf, bcnt);
190197ba5f4SPaul Zimmerman 
191197ba5f4SPaul Zimmerman 			/* Update the HC fields for the next packet received */
192197ba5f4SPaul Zimmerman 			chan->xfer_count += bcnt;
193197ba5f4SPaul Zimmerman 			chan->xfer_buf += bcnt;
194197ba5f4SPaul Zimmerman 		}
195197ba5f4SPaul Zimmerman 		break;
196197ba5f4SPaul Zimmerman 	case GRXSTS_PKTSTS_HCHIN_XFER_COMP:
197197ba5f4SPaul Zimmerman 	case GRXSTS_PKTSTS_DATATOGGLEERR:
198197ba5f4SPaul Zimmerman 	case GRXSTS_PKTSTS_HCHHALTED:
199197ba5f4SPaul Zimmerman 		/* Handled in interrupt, just ignore data */
200197ba5f4SPaul Zimmerman 		break;
201197ba5f4SPaul Zimmerman 	default:
202197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev,
203197ba5f4SPaul Zimmerman 			"RxFIFO Level Interrupt: Unknown status %d\n", pktsts);
204197ba5f4SPaul Zimmerman 		break;
205197ba5f4SPaul Zimmerman 	}
206197ba5f4SPaul Zimmerman }
207197ba5f4SPaul Zimmerman 
208197ba5f4SPaul Zimmerman /*
209197ba5f4SPaul Zimmerman  * This interrupt occurs when the non-periodic Tx FIFO is half-empty. More
210197ba5f4SPaul Zimmerman  * data packets may be written to the FIFO for OUT transfers. More requests
211197ba5f4SPaul Zimmerman  * may be written to the non-periodic request queue for IN transfers. This
212197ba5f4SPaul Zimmerman  * interrupt is enabled only in Slave mode.
213197ba5f4SPaul Zimmerman  */
dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg * hsotg)214197ba5f4SPaul Zimmerman static void dwc2_np_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
215197ba5f4SPaul Zimmerman {
216197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "--Non-Periodic TxFIFO Empty Interrupt--\n");
217197ba5f4SPaul Zimmerman 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_NON_PERIODIC);
218197ba5f4SPaul Zimmerman }
219197ba5f4SPaul Zimmerman 
220197ba5f4SPaul Zimmerman /*
221197ba5f4SPaul Zimmerman  * This interrupt occurs when the periodic Tx FIFO is half-empty. More data
222197ba5f4SPaul Zimmerman  * packets may be written to the FIFO for OUT transfers. More requests may be
223197ba5f4SPaul Zimmerman  * written to the periodic request queue for IN transfers. This interrupt is
224197ba5f4SPaul Zimmerman  * enabled only in Slave mode.
225197ba5f4SPaul Zimmerman  */
dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg * hsotg)226197ba5f4SPaul Zimmerman static void dwc2_perio_tx_fifo_empty_intr(struct dwc2_hsotg *hsotg)
227197ba5f4SPaul Zimmerman {
228197ba5f4SPaul Zimmerman 	if (dbg_perio())
229197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Periodic TxFIFO Empty Interrupt--\n");
230197ba5f4SPaul Zimmerman 	dwc2_hcd_queue_transactions(hsotg, DWC2_TRANSACTION_PERIODIC);
231197ba5f4SPaul Zimmerman }
232197ba5f4SPaul Zimmerman 
dwc2_hprt0_enable(struct dwc2_hsotg * hsotg,u32 hprt0,u32 * hprt0_modify)233197ba5f4SPaul Zimmerman static void dwc2_hprt0_enable(struct dwc2_hsotg *hsotg, u32 hprt0,
234197ba5f4SPaul Zimmerman 			      u32 *hprt0_modify)
235197ba5f4SPaul Zimmerman {
236bea8e86cSJohn Youn 	struct dwc2_core_params *params = &hsotg->params;
237197ba5f4SPaul Zimmerman 	int do_reset = 0;
238197ba5f4SPaul Zimmerman 	u32 usbcfg;
239197ba5f4SPaul Zimmerman 	u32 prtspd;
240197ba5f4SPaul Zimmerman 	u32 hcfg;
241197ba5f4SPaul Zimmerman 	u32 fslspclksel;
242197ba5f4SPaul Zimmerman 	u32 hfir;
243197ba5f4SPaul Zimmerman 
244197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "%s(%p)\n", __func__, hsotg);
245197ba5f4SPaul Zimmerman 
246197ba5f4SPaul Zimmerman 	/* Every time when port enables calculate HFIR.FrInterval */
247f25c42b8SGevorg Sahakyan 	hfir = dwc2_readl(hsotg, HFIR);
248197ba5f4SPaul Zimmerman 	hfir &= ~HFIR_FRINT_MASK;
249197ba5f4SPaul Zimmerman 	hfir |= dwc2_calc_frame_interval(hsotg) << HFIR_FRINT_SHIFT &
250197ba5f4SPaul Zimmerman 		HFIR_FRINT_MASK;
251f25c42b8SGevorg Sahakyan 	dwc2_writel(hsotg, hfir, HFIR);
252197ba5f4SPaul Zimmerman 
253197ba5f4SPaul Zimmerman 	/* Check if we need to adjust the PHY clock speed for low power */
254197ba5f4SPaul Zimmerman 	if (!params->host_support_fs_ls_low_power) {
255197ba5f4SPaul Zimmerman 		/* Port has been enabled, set the reset change flag */
256197ba5f4SPaul Zimmerman 		hsotg->flags.b.port_reset_change = 1;
257197ba5f4SPaul Zimmerman 		return;
258197ba5f4SPaul Zimmerman 	}
259197ba5f4SPaul Zimmerman 
260f25c42b8SGevorg Sahakyan 	usbcfg = dwc2_readl(hsotg, GUSBCFG);
261197ba5f4SPaul Zimmerman 	prtspd = (hprt0 & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
262197ba5f4SPaul Zimmerman 
263197ba5f4SPaul Zimmerman 	if (prtspd == HPRT0_SPD_LOW_SPEED || prtspd == HPRT0_SPD_FULL_SPEED) {
264197ba5f4SPaul Zimmerman 		/* Low power */
265197ba5f4SPaul Zimmerman 		if (!(usbcfg & GUSBCFG_PHY_LP_CLK_SEL)) {
266197ba5f4SPaul Zimmerman 			/* Set PHY low power clock select for FS/LS devices */
267197ba5f4SPaul Zimmerman 			usbcfg |= GUSBCFG_PHY_LP_CLK_SEL;
268f25c42b8SGevorg Sahakyan 			dwc2_writel(hsotg, usbcfg, GUSBCFG);
269197ba5f4SPaul Zimmerman 			do_reset = 1;
270197ba5f4SPaul Zimmerman 		}
271197ba5f4SPaul Zimmerman 
272f25c42b8SGevorg Sahakyan 		hcfg = dwc2_readl(hsotg, HCFG);
273197ba5f4SPaul Zimmerman 		fslspclksel = (hcfg & HCFG_FSLSPCLKSEL_MASK) >>
274197ba5f4SPaul Zimmerman 			      HCFG_FSLSPCLKSEL_SHIFT;
275197ba5f4SPaul Zimmerman 
276197ba5f4SPaul Zimmerman 		if (prtspd == HPRT0_SPD_LOW_SPEED &&
27795832c00SJohn Youn 		    params->host_ls_low_power_phy_clk) {
278197ba5f4SPaul Zimmerman 			/* 6 MHZ */
279197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
280197ba5f4SPaul Zimmerman 				 "FS_PHY programming HCFG to 6 MHz\n");
281197ba5f4SPaul Zimmerman 			if (fslspclksel != HCFG_FSLSPCLKSEL_6_MHZ) {
282197ba5f4SPaul Zimmerman 				fslspclksel = HCFG_FSLSPCLKSEL_6_MHZ;
283197ba5f4SPaul Zimmerman 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
284197ba5f4SPaul Zimmerman 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
285f25c42b8SGevorg Sahakyan 				dwc2_writel(hsotg, hcfg, HCFG);
286197ba5f4SPaul Zimmerman 				do_reset = 1;
287197ba5f4SPaul Zimmerman 			}
288197ba5f4SPaul Zimmerman 		} else {
289197ba5f4SPaul Zimmerman 			/* 48 MHZ */
290197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
291197ba5f4SPaul Zimmerman 				 "FS_PHY programming HCFG to 48 MHz\n");
292197ba5f4SPaul Zimmerman 			if (fslspclksel != HCFG_FSLSPCLKSEL_48_MHZ) {
293197ba5f4SPaul Zimmerman 				fslspclksel = HCFG_FSLSPCLKSEL_48_MHZ;
294197ba5f4SPaul Zimmerman 				hcfg &= ~HCFG_FSLSPCLKSEL_MASK;
295197ba5f4SPaul Zimmerman 				hcfg |= fslspclksel << HCFG_FSLSPCLKSEL_SHIFT;
296f25c42b8SGevorg Sahakyan 				dwc2_writel(hsotg, hcfg, HCFG);
297197ba5f4SPaul Zimmerman 				do_reset = 1;
298197ba5f4SPaul Zimmerman 			}
299197ba5f4SPaul Zimmerman 		}
300197ba5f4SPaul Zimmerman 	} else {
301197ba5f4SPaul Zimmerman 		/* Not low power */
302197ba5f4SPaul Zimmerman 		if (usbcfg & GUSBCFG_PHY_LP_CLK_SEL) {
303197ba5f4SPaul Zimmerman 			usbcfg &= ~GUSBCFG_PHY_LP_CLK_SEL;
304f25c42b8SGevorg Sahakyan 			dwc2_writel(hsotg, usbcfg, GUSBCFG);
305197ba5f4SPaul Zimmerman 			do_reset = 1;
306197ba5f4SPaul Zimmerman 		}
307197ba5f4SPaul Zimmerman 	}
308197ba5f4SPaul Zimmerman 
309197ba5f4SPaul Zimmerman 	if (do_reset) {
310197ba5f4SPaul Zimmerman 		*hprt0_modify |= HPRT0_RST;
311f25c42b8SGevorg Sahakyan 		dwc2_writel(hsotg, *hprt0_modify, HPRT0);
312197ba5f4SPaul Zimmerman 		queue_delayed_work(hsotg->wq_otg, &hsotg->reset_work,
313197ba5f4SPaul Zimmerman 				   msecs_to_jiffies(60));
314197ba5f4SPaul Zimmerman 	} else {
315197ba5f4SPaul Zimmerman 		/* Port has been enabled, set the reset change flag */
316197ba5f4SPaul Zimmerman 		hsotg->flags.b.port_reset_change = 1;
317197ba5f4SPaul Zimmerman 	}
318197ba5f4SPaul Zimmerman }
319197ba5f4SPaul Zimmerman 
320197ba5f4SPaul Zimmerman /*
321197ba5f4SPaul Zimmerman  * There are multiple conditions that can cause a port interrupt. This function
322197ba5f4SPaul Zimmerman  * determines which interrupt conditions have occurred and handles them
323197ba5f4SPaul Zimmerman  * appropriately.
324197ba5f4SPaul Zimmerman  */
dwc2_port_intr(struct dwc2_hsotg * hsotg)325197ba5f4SPaul Zimmerman static void dwc2_port_intr(struct dwc2_hsotg *hsotg)
326197ba5f4SPaul Zimmerman {
327197ba5f4SPaul Zimmerman 	u32 hprt0;
328197ba5f4SPaul Zimmerman 	u32 hprt0_modify;
329197ba5f4SPaul Zimmerman 
330197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "--Port Interrupt--\n");
331197ba5f4SPaul Zimmerman 
332f25c42b8SGevorg Sahakyan 	hprt0 = dwc2_readl(hsotg, HPRT0);
333197ba5f4SPaul Zimmerman 	hprt0_modify = hprt0;
334197ba5f4SPaul Zimmerman 
335197ba5f4SPaul Zimmerman 	/*
336197ba5f4SPaul Zimmerman 	 * Clear appropriate bits in HPRT0 to clear the interrupt bit in
337197ba5f4SPaul Zimmerman 	 * GINTSTS
338197ba5f4SPaul Zimmerman 	 */
339197ba5f4SPaul Zimmerman 	hprt0_modify &= ~(HPRT0_ENA | HPRT0_CONNDET | HPRT0_ENACHG |
340197ba5f4SPaul Zimmerman 			  HPRT0_OVRCURRCHG);
341197ba5f4SPaul Zimmerman 
342197ba5f4SPaul Zimmerman 	/*
343197ba5f4SPaul Zimmerman 	 * Port Connect Detected
344197ba5f4SPaul Zimmerman 	 * Set flag and clear if detected
345197ba5f4SPaul Zimmerman 	 */
346197ba5f4SPaul Zimmerman 	if (hprt0 & HPRT0_CONNDET) {
347f25c42b8SGevorg Sahakyan 		dwc2_writel(hsotg, hprt0_modify | HPRT0_CONNDET, HPRT0);
34829539019SDouglas Anderson 
349197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
350197ba5f4SPaul Zimmerman 			 "--Port Interrupt HPRT0=0x%08x Port Connect Detected--\n",
351197ba5f4SPaul Zimmerman 			 hprt0);
3526a659531SDouglas Anderson 		dwc2_hcd_connect(hsotg);
353197ba5f4SPaul Zimmerman 
354197ba5f4SPaul Zimmerman 		/*
355197ba5f4SPaul Zimmerman 		 * The Hub driver asserts a reset when it sees port connect
356197ba5f4SPaul Zimmerman 		 * status change flag
357197ba5f4SPaul Zimmerman 		 */
358197ba5f4SPaul Zimmerman 	}
359197ba5f4SPaul Zimmerman 
360197ba5f4SPaul Zimmerman 	/*
361197ba5f4SPaul Zimmerman 	 * Port Enable Changed
362197ba5f4SPaul Zimmerman 	 * Clear if detected - Set internal flag if disabled
363197ba5f4SPaul Zimmerman 	 */
364197ba5f4SPaul Zimmerman 	if (hprt0 & HPRT0_ENACHG) {
365f25c42b8SGevorg Sahakyan 		dwc2_writel(hsotg, hprt0_modify | HPRT0_ENACHG, HPRT0);
366197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
367197ba5f4SPaul Zimmerman 			 "  --Port Interrupt HPRT0=0x%08x Port Enable Changed (now %d)--\n",
368197ba5f4SPaul Zimmerman 			 hprt0, !!(hprt0 & HPRT0_ENA));
369fbb9e22bSMian Yousaf Kaukab 		if (hprt0 & HPRT0_ENA) {
370fbb9e22bSMian Yousaf Kaukab 			hsotg->new_connection = true;
371197ba5f4SPaul Zimmerman 			dwc2_hprt0_enable(hsotg, hprt0, &hprt0_modify);
372fbb9e22bSMian Yousaf Kaukab 		} else {
373197ba5f4SPaul Zimmerman 			hsotg->flags.b.port_enable_change = 1;
374bea8e86cSJohn Youn 			if (hsotg->params.dma_desc_fs_enable) {
375fbb9e22bSMian Yousaf Kaukab 				u32 hcfg;
376fbb9e22bSMian Yousaf Kaukab 
37795832c00SJohn Youn 				hsotg->params.dma_desc_enable = false;
378fbb9e22bSMian Yousaf Kaukab 				hsotg->new_connection = false;
379f25c42b8SGevorg Sahakyan 				hcfg = dwc2_readl(hsotg, HCFG);
380fbb9e22bSMian Yousaf Kaukab 				hcfg &= ~HCFG_DESCDMA;
381f25c42b8SGevorg Sahakyan 				dwc2_writel(hsotg, hcfg, HCFG);
382fbb9e22bSMian Yousaf Kaukab 			}
383fbb9e22bSMian Yousaf Kaukab 		}
384197ba5f4SPaul Zimmerman 	}
385197ba5f4SPaul Zimmerman 
386197ba5f4SPaul Zimmerman 	/* Overcurrent Change Interrupt */
387197ba5f4SPaul Zimmerman 	if (hprt0 & HPRT0_OVRCURRCHG) {
388f25c42b8SGevorg Sahakyan 		dwc2_writel(hsotg, hprt0_modify | HPRT0_OVRCURRCHG,
389f25c42b8SGevorg Sahakyan 			    HPRT0);
390197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
391197ba5f4SPaul Zimmerman 			 "  --Port Interrupt HPRT0=0x%08x Port Overcurrent Changed--\n",
392197ba5f4SPaul Zimmerman 			 hprt0);
393197ba5f4SPaul Zimmerman 		hsotg->flags.b.port_over_current_change = 1;
394197ba5f4SPaul Zimmerman 	}
395197ba5f4SPaul Zimmerman }
396197ba5f4SPaul Zimmerman 
397197ba5f4SPaul Zimmerman /*
398197ba5f4SPaul Zimmerman  * Gets the actual length of a transfer after the transfer halts. halt_status
399197ba5f4SPaul Zimmerman  * holds the reason for the halt.
400197ba5f4SPaul Zimmerman  *
401197ba5f4SPaul Zimmerman  * For IN transfers where halt_status is DWC2_HC_XFER_COMPLETE, *short_read
402197ba5f4SPaul Zimmerman  * is set to 1 upon return if less than the requested number of bytes were
403197ba5f4SPaul Zimmerman  * transferred. short_read may also be NULL on entry, in which case it remains
404197ba5f4SPaul Zimmerman  * unchanged.
405197ba5f4SPaul Zimmerman  */
dwc2_get_actual_xfer_length(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status,int * short_read)406197ba5f4SPaul Zimmerman static u32 dwc2_get_actual_xfer_length(struct dwc2_hsotg *hsotg,
407197ba5f4SPaul Zimmerman 				       struct dwc2_host_chan *chan, int chnum,
408197ba5f4SPaul Zimmerman 				       struct dwc2_qtd *qtd,
409197ba5f4SPaul Zimmerman 				       enum dwc2_halt_status halt_status,
410197ba5f4SPaul Zimmerman 				       int *short_read)
411197ba5f4SPaul Zimmerman {
412197ba5f4SPaul Zimmerman 	u32 hctsiz, count, length;
413197ba5f4SPaul Zimmerman 
414f25c42b8SGevorg Sahakyan 	hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
415197ba5f4SPaul Zimmerman 
416197ba5f4SPaul Zimmerman 	if (halt_status == DWC2_HC_XFER_COMPLETE) {
417197ba5f4SPaul Zimmerman 		if (chan->ep_is_in) {
418197ba5f4SPaul Zimmerman 			count = (hctsiz & TSIZ_XFERSIZE_MASK) >>
419197ba5f4SPaul Zimmerman 				TSIZ_XFERSIZE_SHIFT;
420197ba5f4SPaul Zimmerman 			length = chan->xfer_len - count;
4219da51974SJohn Youn 			if (short_read)
422197ba5f4SPaul Zimmerman 				*short_read = (count != 0);
423197ba5f4SPaul Zimmerman 		} else if (chan->qh->do_split) {
424197ba5f4SPaul Zimmerman 			length = qtd->ssplit_out_xfer_count;
425197ba5f4SPaul Zimmerman 		} else {
426197ba5f4SPaul Zimmerman 			length = chan->xfer_len;
427197ba5f4SPaul Zimmerman 		}
428197ba5f4SPaul Zimmerman 	} else {
429197ba5f4SPaul Zimmerman 		/*
430197ba5f4SPaul Zimmerman 		 * Must use the hctsiz.pktcnt field to determine how much data
431197ba5f4SPaul Zimmerman 		 * has been transferred. This field reflects the number of
432197ba5f4SPaul Zimmerman 		 * packets that have been transferred via the USB. This is
433197ba5f4SPaul Zimmerman 		 * always an integral number of packets if the transfer was
434197ba5f4SPaul Zimmerman 		 * halted before its normal completion. (Can't use the
435197ba5f4SPaul Zimmerman 		 * hctsiz.xfersize field because that reflects the number of
436197ba5f4SPaul Zimmerman 		 * bytes transferred via the AHB, not the USB).
437197ba5f4SPaul Zimmerman 		 */
438197ba5f4SPaul Zimmerman 		count = (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT;
439197ba5f4SPaul Zimmerman 		length = (chan->start_pkt_count - count) * chan->max_packet;
440197ba5f4SPaul Zimmerman 	}
441197ba5f4SPaul Zimmerman 
442197ba5f4SPaul Zimmerman 	return length;
443197ba5f4SPaul Zimmerman }
444197ba5f4SPaul Zimmerman 
445197ba5f4SPaul Zimmerman /**
446197ba5f4SPaul Zimmerman  * dwc2_update_urb_state() - Updates the state of the URB after a Transfer
447197ba5f4SPaul Zimmerman  * Complete interrupt on the host channel. Updates the actual_length field
448197ba5f4SPaul Zimmerman  * of the URB based on the number of bytes transferred via the host channel.
449197ba5f4SPaul Zimmerman  * Sets the URB status if the data transfer is finished.
450197ba5f4SPaul Zimmerman  *
4516fb914d7SGrigor Tovmasyan  * @hsotg: Programming view of the DWC_otg controller
4526fb914d7SGrigor Tovmasyan  * @chan: Programming view of host channel
4536fb914d7SGrigor Tovmasyan  * @chnum: Channel number
4546fb914d7SGrigor Tovmasyan  * @urb: Processing URB
4556fb914d7SGrigor Tovmasyan  * @qtd: Queue transfer descriptor
4566fb914d7SGrigor Tovmasyan  *
457197ba5f4SPaul Zimmerman  * Return: 1 if the data transfer specified by the URB is completely finished,
458197ba5f4SPaul Zimmerman  * 0 otherwise
459197ba5f4SPaul Zimmerman  */
dwc2_update_urb_state(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_hcd_urb * urb,struct dwc2_qtd * qtd)460197ba5f4SPaul Zimmerman static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
461197ba5f4SPaul Zimmerman 				 struct dwc2_host_chan *chan, int chnum,
462197ba5f4SPaul Zimmerman 				 struct dwc2_hcd_urb *urb,
463197ba5f4SPaul Zimmerman 				 struct dwc2_qtd *qtd)
464197ba5f4SPaul Zimmerman {
465197ba5f4SPaul Zimmerman 	u32 hctsiz;
466197ba5f4SPaul Zimmerman 	int xfer_done = 0;
467197ba5f4SPaul Zimmerman 	int short_read = 0;
468197ba5f4SPaul Zimmerman 	int xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
469197ba5f4SPaul Zimmerman 						      DWC2_HC_XFER_COMPLETE,
470197ba5f4SPaul Zimmerman 						      &short_read);
471197ba5f4SPaul Zimmerman 
472197ba5f4SPaul Zimmerman 	if (urb->actual_length + xfer_length > urb->length) {
4731a9e38caSGuenter Roeck 		dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
474197ba5f4SPaul Zimmerman 		xfer_length = urb->length - urb->actual_length;
475197ba5f4SPaul Zimmerman 	}
476197ba5f4SPaul Zimmerman 
477197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "urb->actual_length=%d xfer_length=%d\n",
478197ba5f4SPaul Zimmerman 		 urb->actual_length, xfer_length);
479197ba5f4SPaul Zimmerman 	urb->actual_length += xfer_length;
480197ba5f4SPaul Zimmerman 
481197ba5f4SPaul Zimmerman 	if (xfer_length && chan->ep_type == USB_ENDPOINT_XFER_BULK &&
482197ba5f4SPaul Zimmerman 	    (urb->flags & URB_SEND_ZERO_PACKET) &&
483197ba5f4SPaul Zimmerman 	    urb->actual_length >= urb->length &&
484197ba5f4SPaul Zimmerman 	    !(urb->length % chan->max_packet)) {
485197ba5f4SPaul Zimmerman 		xfer_done = 0;
486197ba5f4SPaul Zimmerman 	} else if (short_read || urb->actual_length >= urb->length) {
487197ba5f4SPaul Zimmerman 		xfer_done = 1;
488197ba5f4SPaul Zimmerman 		urb->status = 0;
489197ba5f4SPaul Zimmerman 	}
490197ba5f4SPaul Zimmerman 
491f25c42b8SGevorg Sahakyan 	hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
492197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
493197ba5f4SPaul Zimmerman 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
494197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  chan->xfer_len %d\n", chan->xfer_len);
495197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  hctsiz.xfersize %d\n",
496197ba5f4SPaul Zimmerman 		 (hctsiz & TSIZ_XFERSIZE_MASK) >> TSIZ_XFERSIZE_SHIFT);
497197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n", urb->length);
498197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n", urb->actual_length);
499197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  short_read %d, xfer_done %d\n", short_read,
500197ba5f4SPaul Zimmerman 		 xfer_done);
501197ba5f4SPaul Zimmerman 
502197ba5f4SPaul Zimmerman 	return xfer_done;
503197ba5f4SPaul Zimmerman }
504197ba5f4SPaul Zimmerman 
505197ba5f4SPaul Zimmerman /*
506197ba5f4SPaul Zimmerman  * Save the starting data toggle for the next transfer. The data toggle is
507197ba5f4SPaul Zimmerman  * saved in the QH for non-control transfers and it's saved in the QTD for
508197ba5f4SPaul Zimmerman  * control transfers.
509197ba5f4SPaul Zimmerman  */
dwc2_hcd_save_data_toggle(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)510197ba5f4SPaul Zimmerman void dwc2_hcd_save_data_toggle(struct dwc2_hsotg *hsotg,
511197ba5f4SPaul Zimmerman 			       struct dwc2_host_chan *chan, int chnum,
512197ba5f4SPaul Zimmerman 			       struct dwc2_qtd *qtd)
513197ba5f4SPaul Zimmerman {
514f25c42b8SGevorg Sahakyan 	u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
515197ba5f4SPaul Zimmerman 	u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
516197ba5f4SPaul Zimmerman 
517197ba5f4SPaul Zimmerman 	if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
51862943b7dSTang, Jianqiang 		if (WARN(!chan || !chan->qh,
51962943b7dSTang, Jianqiang 			 "chan->qh must be specified for non-control eps\n"))
52062943b7dSTang, Jianqiang 			return;
52162943b7dSTang, Jianqiang 
522197ba5f4SPaul Zimmerman 		if (pid == TSIZ_SC_MC_PID_DATA0)
523197ba5f4SPaul Zimmerman 			chan->qh->data_toggle = DWC2_HC_PID_DATA0;
524197ba5f4SPaul Zimmerman 		else
525197ba5f4SPaul Zimmerman 			chan->qh->data_toggle = DWC2_HC_PID_DATA1;
526197ba5f4SPaul Zimmerman 	} else {
52762943b7dSTang, Jianqiang 		if (WARN(!qtd,
52862943b7dSTang, Jianqiang 			 "qtd must be specified for control eps\n"))
52962943b7dSTang, Jianqiang 			return;
53062943b7dSTang, Jianqiang 
531197ba5f4SPaul Zimmerman 		if (pid == TSIZ_SC_MC_PID_DATA0)
532197ba5f4SPaul Zimmerman 			qtd->data_toggle = DWC2_HC_PID_DATA0;
533197ba5f4SPaul Zimmerman 		else
534197ba5f4SPaul Zimmerman 			qtd->data_toggle = DWC2_HC_PID_DATA1;
535197ba5f4SPaul Zimmerman 	}
536197ba5f4SPaul Zimmerman }
537197ba5f4SPaul Zimmerman 
538197ba5f4SPaul Zimmerman /**
539197ba5f4SPaul Zimmerman  * dwc2_update_isoc_urb_state() - Updates the state of an Isochronous URB when
540197ba5f4SPaul Zimmerman  * the transfer is stopped for any reason. The fields of the current entry in
541197ba5f4SPaul Zimmerman  * the frame descriptor array are set based on the transfer state and the input
542197ba5f4SPaul Zimmerman  * halt_status. Completes the Isochronous URB if all the URB frames have been
543197ba5f4SPaul Zimmerman  * completed.
544197ba5f4SPaul Zimmerman  *
5456fb914d7SGrigor Tovmasyan  * @hsotg: Programming view of the DWC_otg controller
5466fb914d7SGrigor Tovmasyan  * @chan: Programming view of host channel
5476fb914d7SGrigor Tovmasyan  * @chnum: Channel number
5486fb914d7SGrigor Tovmasyan  * @halt_status: Reason for halting a host channel
5496fb914d7SGrigor Tovmasyan  * @qtd: Queue transfer descriptor
5506fb914d7SGrigor Tovmasyan  *
551197ba5f4SPaul Zimmerman  * Return: DWC2_HC_XFER_COMPLETE if there are more frames remaining to be
552197ba5f4SPaul Zimmerman  * transferred in the URB. Otherwise return DWC2_HC_XFER_URB_COMPLETE.
553197ba5f4SPaul Zimmerman  */
dwc2_update_isoc_urb_state(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)554197ba5f4SPaul Zimmerman static enum dwc2_halt_status dwc2_update_isoc_urb_state(
555197ba5f4SPaul Zimmerman 		struct dwc2_hsotg *hsotg, struct dwc2_host_chan *chan,
556197ba5f4SPaul Zimmerman 		int chnum, struct dwc2_qtd *qtd,
557197ba5f4SPaul Zimmerman 		enum dwc2_halt_status halt_status)
558197ba5f4SPaul Zimmerman {
559197ba5f4SPaul Zimmerman 	struct dwc2_hcd_iso_packet_desc *frame_desc;
560197ba5f4SPaul Zimmerman 	struct dwc2_hcd_urb *urb = qtd->urb;
561197ba5f4SPaul Zimmerman 
562197ba5f4SPaul Zimmerman 	if (!urb)
563197ba5f4SPaul Zimmerman 		return DWC2_HC_XFER_NO_HALT_STATUS;
564197ba5f4SPaul Zimmerman 
565197ba5f4SPaul Zimmerman 	frame_desc = &urb->iso_descs[qtd->isoc_frame_index];
566197ba5f4SPaul Zimmerman 
567197ba5f4SPaul Zimmerman 	switch (halt_status) {
568197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_COMPLETE:
569197ba5f4SPaul Zimmerman 		frame_desc->status = 0;
570197ba5f4SPaul Zimmerman 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
571197ba5f4SPaul Zimmerman 					chan, chnum, qtd, halt_status, NULL);
572197ba5f4SPaul Zimmerman 		break;
573197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_FRAME_OVERRUN:
574197ba5f4SPaul Zimmerman 		urb->error_count++;
575197ba5f4SPaul Zimmerman 		if (chan->ep_is_in)
576197ba5f4SPaul Zimmerman 			frame_desc->status = -ENOSR;
577197ba5f4SPaul Zimmerman 		else
578197ba5f4SPaul Zimmerman 			frame_desc->status = -ECOMM;
579197ba5f4SPaul Zimmerman 		frame_desc->actual_length = 0;
580197ba5f4SPaul Zimmerman 		break;
581197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_BABBLE_ERR:
582197ba5f4SPaul Zimmerman 		urb->error_count++;
583197ba5f4SPaul Zimmerman 		frame_desc->status = -EOVERFLOW;
584197ba5f4SPaul Zimmerman 		/* Don't need to update actual_length in this case */
585197ba5f4SPaul Zimmerman 		break;
586197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_XACT_ERR:
587197ba5f4SPaul Zimmerman 		urb->error_count++;
588197ba5f4SPaul Zimmerman 		frame_desc->status = -EPROTO;
589197ba5f4SPaul Zimmerman 		frame_desc->actual_length = dwc2_get_actual_xfer_length(hsotg,
590197ba5f4SPaul Zimmerman 					chan, chnum, qtd, halt_status, NULL);
591197ba5f4SPaul Zimmerman 
592197ba5f4SPaul Zimmerman 		/* Skip whole frame */
593197ba5f4SPaul Zimmerman 		if (chan->qh->do_split &&
594197ba5f4SPaul Zimmerman 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
59595832c00SJohn Youn 		    hsotg->params.host_dma) {
596197ba5f4SPaul Zimmerman 			qtd->complete_split = 0;
597197ba5f4SPaul Zimmerman 			qtd->isoc_split_offset = 0;
598197ba5f4SPaul Zimmerman 		}
599197ba5f4SPaul Zimmerman 
600197ba5f4SPaul Zimmerman 		break;
601197ba5f4SPaul Zimmerman 	default:
602197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev, "Unhandled halt_status (%d)\n",
603197ba5f4SPaul Zimmerman 			halt_status);
604197ba5f4SPaul Zimmerman 		break;
605197ba5f4SPaul Zimmerman 	}
606197ba5f4SPaul Zimmerman 
607197ba5f4SPaul Zimmerman 	if (++qtd->isoc_frame_index == urb->packet_count) {
608197ba5f4SPaul Zimmerman 		/*
609197ba5f4SPaul Zimmerman 		 * urb->status is not used for isoc transfers. The individual
610197ba5f4SPaul Zimmerman 		 * frame_desc statuses are used instead.
611197ba5f4SPaul Zimmerman 		 */
612197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, 0);
613197ba5f4SPaul Zimmerman 		halt_status = DWC2_HC_XFER_URB_COMPLETE;
614197ba5f4SPaul Zimmerman 	} else {
615197ba5f4SPaul Zimmerman 		halt_status = DWC2_HC_XFER_COMPLETE;
616197ba5f4SPaul Zimmerman 	}
617197ba5f4SPaul Zimmerman 
618197ba5f4SPaul Zimmerman 	return halt_status;
619197ba5f4SPaul Zimmerman }
620197ba5f4SPaul Zimmerman 
621197ba5f4SPaul Zimmerman /*
622197ba5f4SPaul Zimmerman  * Frees the first QTD in the QH's list if free_qtd is 1. For non-periodic
623197ba5f4SPaul Zimmerman  * QHs, removes the QH from the active non-periodic schedule. If any QTDs are
624197ba5f4SPaul Zimmerman  * still linked to the QH, the QH is added to the end of the inactive
625197ba5f4SPaul Zimmerman  * non-periodic schedule. For periodic QHs, removes the QH from the periodic
626197ba5f4SPaul Zimmerman  * schedule if no more QTDs are linked to the QH.
627197ba5f4SPaul Zimmerman  */
dwc2_deactivate_qh(struct dwc2_hsotg * hsotg,struct dwc2_qh * qh,int free_qtd)628197ba5f4SPaul Zimmerman static void dwc2_deactivate_qh(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
629197ba5f4SPaul Zimmerman 			       int free_qtd)
630197ba5f4SPaul Zimmerman {
631197ba5f4SPaul Zimmerman 	int continue_split = 0;
632197ba5f4SPaul Zimmerman 	struct dwc2_qtd *qtd;
633197ba5f4SPaul Zimmerman 
634197ba5f4SPaul Zimmerman 	if (dbg_qh(qh))
635197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "  %s(%p,%p,%d)\n", __func__,
636197ba5f4SPaul Zimmerman 			 hsotg, qh, free_qtd);
637197ba5f4SPaul Zimmerman 
638197ba5f4SPaul Zimmerman 	if (list_empty(&qh->qtd_list)) {
639197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev, "## QTD list empty ##\n");
640197ba5f4SPaul Zimmerman 		goto no_qtd;
641197ba5f4SPaul Zimmerman 	}
642197ba5f4SPaul Zimmerman 
643197ba5f4SPaul Zimmerman 	qtd = list_first_entry(&qh->qtd_list, struct dwc2_qtd, qtd_list_entry);
644197ba5f4SPaul Zimmerman 
645197ba5f4SPaul Zimmerman 	if (qtd->complete_split)
646197ba5f4SPaul Zimmerman 		continue_split = 1;
647197ba5f4SPaul Zimmerman 	else if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_MID ||
648197ba5f4SPaul Zimmerman 		 qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_END)
649197ba5f4SPaul Zimmerman 		continue_split = 1;
650197ba5f4SPaul Zimmerman 
651197ba5f4SPaul Zimmerman 	if (free_qtd) {
652197ba5f4SPaul Zimmerman 		dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
653197ba5f4SPaul Zimmerman 		continue_split = 0;
654197ba5f4SPaul Zimmerman 	}
655197ba5f4SPaul Zimmerman 
656197ba5f4SPaul Zimmerman no_qtd:
657197ba5f4SPaul Zimmerman 	qh->channel = NULL;
658197ba5f4SPaul Zimmerman 	dwc2_hcd_qh_deactivate(hsotg, qh, continue_split);
659197ba5f4SPaul Zimmerman }
660197ba5f4SPaul Zimmerman 
661197ba5f4SPaul Zimmerman /**
662197ba5f4SPaul Zimmerman  * dwc2_release_channel() - Releases a host channel for use by other transfers
663197ba5f4SPaul Zimmerman  *
664197ba5f4SPaul Zimmerman  * @hsotg:       The HCD state structure
665197ba5f4SPaul Zimmerman  * @chan:        The host channel to release
666197ba5f4SPaul Zimmerman  * @qtd:         The QTD associated with the host channel. This QTD may be
667197ba5f4SPaul Zimmerman  *               freed if the transfer is complete or an error has occurred.
668197ba5f4SPaul Zimmerman  * @halt_status: Reason the channel is being released. This status
669197ba5f4SPaul Zimmerman  *               determines the actions taken by this function.
670197ba5f4SPaul Zimmerman  *
671197ba5f4SPaul Zimmerman  * Also attempts to select and queue more transactions since at least one host
672197ba5f4SPaul Zimmerman  * channel is available.
673197ba5f4SPaul Zimmerman  */
dwc2_release_channel(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)674197ba5f4SPaul Zimmerman static void dwc2_release_channel(struct dwc2_hsotg *hsotg,
675197ba5f4SPaul Zimmerman 				 struct dwc2_host_chan *chan,
676197ba5f4SPaul Zimmerman 				 struct dwc2_qtd *qtd,
677197ba5f4SPaul Zimmerman 				 enum dwc2_halt_status halt_status)
678197ba5f4SPaul Zimmerman {
679197ba5f4SPaul Zimmerman 	enum dwc2_transaction_type tr_type;
680197ba5f4SPaul Zimmerman 	u32 haintmsk;
681197ba5f4SPaul Zimmerman 	int free_qtd = 0;
682197ba5f4SPaul Zimmerman 
683197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
684197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "  %s: channel %d, halt_status %d\n",
685197ba5f4SPaul Zimmerman 			 __func__, chan->hc_num, halt_status);
686197ba5f4SPaul Zimmerman 
687197ba5f4SPaul Zimmerman 	switch (halt_status) {
688197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_URB_COMPLETE:
689197ba5f4SPaul Zimmerman 		free_qtd = 1;
690197ba5f4SPaul Zimmerman 		break;
691197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_AHB_ERR:
692197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_STALL:
693197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_BABBLE_ERR:
694197ba5f4SPaul Zimmerman 		free_qtd = 1;
695197ba5f4SPaul Zimmerman 		break;
696197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_XACT_ERR:
697197ba5f4SPaul Zimmerman 		if (qtd && qtd->error_count >= 3) {
698197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
699197ba5f4SPaul Zimmerman 				 "  Complete URB with transaction error\n");
700197ba5f4SPaul Zimmerman 			free_qtd = 1;
701197ba5f4SPaul Zimmerman 			dwc2_host_complete(hsotg, qtd, -EPROTO);
702197ba5f4SPaul Zimmerman 		}
703197ba5f4SPaul Zimmerman 		break;
704197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_URB_DEQUEUE:
705197ba5f4SPaul Zimmerman 		/*
706197ba5f4SPaul Zimmerman 		 * The QTD has already been removed and the QH has been
707197ba5f4SPaul Zimmerman 		 * deactivated. Don't want to do anything except release the
708197ba5f4SPaul Zimmerman 		 * host channel and try to queue more transfers.
709197ba5f4SPaul Zimmerman 		 */
710197ba5f4SPaul Zimmerman 		goto cleanup;
711197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_PERIODIC_INCOMPLETE:
712197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "  Complete URB with I/O error\n");
713197ba5f4SPaul Zimmerman 		free_qtd = 1;
714197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, -EIO);
715197ba5f4SPaul Zimmerman 		break;
716197ba5f4SPaul Zimmerman 	case DWC2_HC_XFER_NO_HALT_STATUS:
717197ba5f4SPaul Zimmerman 	default:
718197ba5f4SPaul Zimmerman 		break;
719197ba5f4SPaul Zimmerman 	}
720197ba5f4SPaul Zimmerman 
721197ba5f4SPaul Zimmerman 	dwc2_deactivate_qh(hsotg, chan->qh, free_qtd);
722197ba5f4SPaul Zimmerman 
723197ba5f4SPaul Zimmerman cleanup:
724197ba5f4SPaul Zimmerman 	/*
725197ba5f4SPaul Zimmerman 	 * Release the host channel for use by other transfers. The cleanup
726197ba5f4SPaul Zimmerman 	 * function clears the channel interrupt enables and conditions, so
727197ba5f4SPaul Zimmerman 	 * there's no need to clear the Channel Halted interrupt separately.
728197ba5f4SPaul Zimmerman 	 */
729197ba5f4SPaul Zimmerman 	if (!list_empty(&chan->hc_list_entry))
730197ba5f4SPaul Zimmerman 		list_del(&chan->hc_list_entry);
731197ba5f4SPaul Zimmerman 	dwc2_hc_cleanup(hsotg, chan);
732197ba5f4SPaul Zimmerman 	list_add_tail(&chan->hc_list_entry, &hsotg->free_hc_list);
733197ba5f4SPaul Zimmerman 
73495832c00SJohn Youn 	if (hsotg->params.uframe_sched) {
735197ba5f4SPaul Zimmerman 		hsotg->available_host_channels++;
736197ba5f4SPaul Zimmerman 	} else {
737197ba5f4SPaul Zimmerman 		switch (chan->ep_type) {
738197ba5f4SPaul Zimmerman 		case USB_ENDPOINT_XFER_CONTROL:
739197ba5f4SPaul Zimmerman 		case USB_ENDPOINT_XFER_BULK:
740197ba5f4SPaul Zimmerman 			hsotg->non_periodic_channels--;
741197ba5f4SPaul Zimmerman 			break;
742197ba5f4SPaul Zimmerman 		default:
743197ba5f4SPaul Zimmerman 			/*
744197ba5f4SPaul Zimmerman 			 * Don't release reservations for periodic channels
745197ba5f4SPaul Zimmerman 			 * here. That's done when a periodic transfer is
746197ba5f4SPaul Zimmerman 			 * descheduled (i.e. when the QH is removed from the
747197ba5f4SPaul Zimmerman 			 * periodic schedule).
748197ba5f4SPaul Zimmerman 			 */
749197ba5f4SPaul Zimmerman 			break;
750197ba5f4SPaul Zimmerman 		}
751197ba5f4SPaul Zimmerman 	}
752197ba5f4SPaul Zimmerman 
753f25c42b8SGevorg Sahakyan 	haintmsk = dwc2_readl(hsotg, HAINTMSK);
754197ba5f4SPaul Zimmerman 	haintmsk &= ~(1 << chan->hc_num);
755f25c42b8SGevorg Sahakyan 	dwc2_writel(hsotg, haintmsk, HAINTMSK);
756197ba5f4SPaul Zimmerman 
757197ba5f4SPaul Zimmerman 	/* Try to queue more transfers now that there's a free channel */
758197ba5f4SPaul Zimmerman 	tr_type = dwc2_hcd_select_transactions(hsotg);
759197ba5f4SPaul Zimmerman 	if (tr_type != DWC2_TRANSACTION_NONE)
760197ba5f4SPaul Zimmerman 		dwc2_hcd_queue_transactions(hsotg, tr_type);
761197ba5f4SPaul Zimmerman }
762197ba5f4SPaul Zimmerman 
763197ba5f4SPaul Zimmerman /*
764197ba5f4SPaul Zimmerman  * Halts a host channel. If the channel cannot be halted immediately because
765197ba5f4SPaul Zimmerman  * the request queue is full, this function ensures that the FIFO empty
766197ba5f4SPaul Zimmerman  * interrupt for the appropriate queue is enabled so that the halt request can
767197ba5f4SPaul Zimmerman  * be queued when there is space in the request queue.
768197ba5f4SPaul Zimmerman  *
769197ba5f4SPaul Zimmerman  * This function may also be called in DMA mode. In that case, the channel is
770197ba5f4SPaul Zimmerman  * simply released since the core always halts the channel automatically in
771197ba5f4SPaul Zimmerman  * DMA mode.
772197ba5f4SPaul Zimmerman  */
dwc2_halt_channel(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)773197ba5f4SPaul Zimmerman static void dwc2_halt_channel(struct dwc2_hsotg *hsotg,
774197ba5f4SPaul Zimmerman 			      struct dwc2_host_chan *chan, struct dwc2_qtd *qtd,
775197ba5f4SPaul Zimmerman 			      enum dwc2_halt_status halt_status)
776197ba5f4SPaul Zimmerman {
777197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
778197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
779197ba5f4SPaul Zimmerman 
78095832c00SJohn Youn 	if (hsotg->params.host_dma) {
781197ba5f4SPaul Zimmerman 		if (dbg_hc(chan))
782197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "DMA enabled\n");
783197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
784197ba5f4SPaul Zimmerman 		return;
785197ba5f4SPaul Zimmerman 	}
786197ba5f4SPaul Zimmerman 
787197ba5f4SPaul Zimmerman 	/* Slave mode processing */
788197ba5f4SPaul Zimmerman 	dwc2_hc_halt(hsotg, chan, halt_status);
789197ba5f4SPaul Zimmerman 
790197ba5f4SPaul Zimmerman 	if (chan->halt_on_queue) {
791197ba5f4SPaul Zimmerman 		u32 gintmsk;
792197ba5f4SPaul Zimmerman 
793197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "Halt on queue\n");
794197ba5f4SPaul Zimmerman 		if (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
795197ba5f4SPaul Zimmerman 		    chan->ep_type == USB_ENDPOINT_XFER_BULK) {
796197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "control/bulk\n");
797197ba5f4SPaul Zimmerman 			/*
798197ba5f4SPaul Zimmerman 			 * Make sure the Non-periodic Tx FIFO empty interrupt
799197ba5f4SPaul Zimmerman 			 * is enabled so that the non-periodic schedule will
800197ba5f4SPaul Zimmerman 			 * be processed
801197ba5f4SPaul Zimmerman 			 */
802f25c42b8SGevorg Sahakyan 			gintmsk = dwc2_readl(hsotg, GINTMSK);
803197ba5f4SPaul Zimmerman 			gintmsk |= GINTSTS_NPTXFEMP;
804f25c42b8SGevorg Sahakyan 			dwc2_writel(hsotg, gintmsk, GINTMSK);
805197ba5f4SPaul Zimmerman 		} else {
806197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "isoc/intr\n");
807197ba5f4SPaul Zimmerman 			/*
808197ba5f4SPaul Zimmerman 			 * Move the QH from the periodic queued schedule to
809197ba5f4SPaul Zimmerman 			 * the periodic assigned schedule. This allows the
810197ba5f4SPaul Zimmerman 			 * halt to be queued when the periodic schedule is
811197ba5f4SPaul Zimmerman 			 * processed.
812197ba5f4SPaul Zimmerman 			 */
81394ef7aeeSDouglas Anderson 			list_move_tail(&chan->qh->qh_list_entry,
814197ba5f4SPaul Zimmerman 				       &hsotg->periodic_sched_assigned);
815197ba5f4SPaul Zimmerman 
816197ba5f4SPaul Zimmerman 			/*
817197ba5f4SPaul Zimmerman 			 * Make sure the Periodic Tx FIFO Empty interrupt is
818197ba5f4SPaul Zimmerman 			 * enabled so that the periodic schedule will be
819197ba5f4SPaul Zimmerman 			 * processed
820197ba5f4SPaul Zimmerman 			 */
821f25c42b8SGevorg Sahakyan 			gintmsk = dwc2_readl(hsotg, GINTMSK);
822197ba5f4SPaul Zimmerman 			gintmsk |= GINTSTS_PTXFEMP;
823f25c42b8SGevorg Sahakyan 			dwc2_writel(hsotg, gintmsk, GINTMSK);
824197ba5f4SPaul Zimmerman 		}
825197ba5f4SPaul Zimmerman 	}
826197ba5f4SPaul Zimmerman }
827197ba5f4SPaul Zimmerman 
828197ba5f4SPaul Zimmerman /*
829197ba5f4SPaul Zimmerman  * Performs common cleanup for non-periodic transfers after a Transfer
830197ba5f4SPaul Zimmerman  * Complete interrupt. This function should be called after any endpoint type
831197ba5f4SPaul Zimmerman  * specific handling is finished to release the host channel.
832197ba5f4SPaul Zimmerman  */
dwc2_complete_non_periodic_xfer(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)833197ba5f4SPaul Zimmerman static void dwc2_complete_non_periodic_xfer(struct dwc2_hsotg *hsotg,
834197ba5f4SPaul Zimmerman 					    struct dwc2_host_chan *chan,
835197ba5f4SPaul Zimmerman 					    int chnum, struct dwc2_qtd *qtd,
836197ba5f4SPaul Zimmerman 					    enum dwc2_halt_status halt_status)
837197ba5f4SPaul Zimmerman {
838197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
839197ba5f4SPaul Zimmerman 
840197ba5f4SPaul Zimmerman 	qtd->error_count = 0;
841197ba5f4SPaul Zimmerman 
842197ba5f4SPaul Zimmerman 	if (chan->hcint & HCINTMSK_NYET) {
843197ba5f4SPaul Zimmerman 		/*
844197ba5f4SPaul Zimmerman 		 * Got a NYET on the last transaction of the transfer. This
845197ba5f4SPaul Zimmerman 		 * means that the endpoint should be in the PING state at the
846197ba5f4SPaul Zimmerman 		 * beginning of the next transfer.
847197ba5f4SPaul Zimmerman 		 */
848197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "got NYET\n");
849197ba5f4SPaul Zimmerman 		chan->qh->ping_state = 1;
850197ba5f4SPaul Zimmerman 	}
851197ba5f4SPaul Zimmerman 
852197ba5f4SPaul Zimmerman 	/*
853197ba5f4SPaul Zimmerman 	 * Always halt and release the host channel to make it available for
854197ba5f4SPaul Zimmerman 	 * more transfers. There may still be more phases for a control
855197ba5f4SPaul Zimmerman 	 * transfer or more data packets for a bulk transfer at this point,
856197ba5f4SPaul Zimmerman 	 * but the host channel is still halted. A channel will be reassigned
857197ba5f4SPaul Zimmerman 	 * to the transfer when the non-periodic schedule is processed after
858197ba5f4SPaul Zimmerman 	 * the channel is released. This allows transactions to be queued
859197ba5f4SPaul Zimmerman 	 * properly via dwc2_hcd_queue_transactions, which also enables the
860197ba5f4SPaul Zimmerman 	 * Tx FIFO Empty interrupt if necessary.
861197ba5f4SPaul Zimmerman 	 */
862197ba5f4SPaul Zimmerman 	if (chan->ep_is_in) {
863197ba5f4SPaul Zimmerman 		/*
864197ba5f4SPaul Zimmerman 		 * IN transfers in Slave mode require an explicit disable to
865197ba5f4SPaul Zimmerman 		 * halt the channel. (In DMA mode, this call simply releases
866197ba5f4SPaul Zimmerman 		 * the channel.)
867197ba5f4SPaul Zimmerman 		 */
868197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
869197ba5f4SPaul Zimmerman 	} else {
870197ba5f4SPaul Zimmerman 		/*
871197ba5f4SPaul Zimmerman 		 * The channel is automatically disabled by the core for OUT
872197ba5f4SPaul Zimmerman 		 * transfers in Slave mode
873197ba5f4SPaul Zimmerman 		 */
874197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
875197ba5f4SPaul Zimmerman 	}
876197ba5f4SPaul Zimmerman }
877197ba5f4SPaul Zimmerman 
878197ba5f4SPaul Zimmerman /*
879197ba5f4SPaul Zimmerman  * Performs common cleanup for periodic transfers after a Transfer Complete
880197ba5f4SPaul Zimmerman  * interrupt. This function should be called after any endpoint type specific
881197ba5f4SPaul Zimmerman  * handling is finished to release the host channel.
882197ba5f4SPaul Zimmerman  */
dwc2_complete_periodic_xfer(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)883197ba5f4SPaul Zimmerman static void dwc2_complete_periodic_xfer(struct dwc2_hsotg *hsotg,
884197ba5f4SPaul Zimmerman 					struct dwc2_host_chan *chan, int chnum,
885197ba5f4SPaul Zimmerman 					struct dwc2_qtd *qtd,
886197ba5f4SPaul Zimmerman 					enum dwc2_halt_status halt_status)
887197ba5f4SPaul Zimmerman {
888f25c42b8SGevorg Sahakyan 	u32 hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
889197ba5f4SPaul Zimmerman 
890197ba5f4SPaul Zimmerman 	qtd->error_count = 0;
891197ba5f4SPaul Zimmerman 
892197ba5f4SPaul Zimmerman 	if (!chan->ep_is_in || (hctsiz & TSIZ_PKTCNT_MASK) == 0)
893197ba5f4SPaul Zimmerman 		/* Core halts channel in these cases */
894197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd, halt_status);
895197ba5f4SPaul Zimmerman 	else
896197ba5f4SPaul Zimmerman 		/* Flush any outstanding requests from the Tx queue */
897197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
898197ba5f4SPaul Zimmerman }
899197ba5f4SPaul Zimmerman 
dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)900197ba5f4SPaul Zimmerman static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
901197ba5f4SPaul Zimmerman 				       struct dwc2_host_chan *chan, int chnum,
902197ba5f4SPaul Zimmerman 				       struct dwc2_qtd *qtd)
903197ba5f4SPaul Zimmerman {
904197ba5f4SPaul Zimmerman 	struct dwc2_hcd_iso_packet_desc *frame_desc;
905197ba5f4SPaul Zimmerman 	u32 len;
9069d8da857SSevak Arakelyan 	u32 hctsiz;
9079d8da857SSevak Arakelyan 	u32 pid;
908197ba5f4SPaul Zimmerman 
909197ba5f4SPaul Zimmerman 	if (!qtd->urb)
910197ba5f4SPaul Zimmerman 		return 0;
911197ba5f4SPaul Zimmerman 
912197ba5f4SPaul Zimmerman 	frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
913197ba5f4SPaul Zimmerman 	len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
914197ba5f4SPaul Zimmerman 					  DWC2_HC_XFER_COMPLETE, NULL);
91570c3c8cbSWilliam Wu 	if (!len && !qtd->isoc_split_offset) {
916197ba5f4SPaul Zimmerman 		qtd->complete_split = 0;
917197ba5f4SPaul Zimmerman 		return 0;
918197ba5f4SPaul Zimmerman 	}
919197ba5f4SPaul Zimmerman 
920197ba5f4SPaul Zimmerman 	frame_desc->actual_length += len;
921197ba5f4SPaul Zimmerman 
922af424a41SWilliam Wu 	if (chan->align_buf) {
923af424a41SWilliam Wu 		dev_vdbg(hsotg->dev, "non-aligned buffer\n");
924af424a41SWilliam Wu 		dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
925af424a41SWilliam Wu 				 DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
926af424a41SWilliam Wu 		memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
927af424a41SWilliam Wu 		       chan->qh->dw_align_buf, len);
928af424a41SWilliam Wu 	}
929af424a41SWilliam Wu 
930197ba5f4SPaul Zimmerman 	qtd->isoc_split_offset += len;
931197ba5f4SPaul Zimmerman 
932f25c42b8SGevorg Sahakyan 	hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
9339d8da857SSevak Arakelyan 	pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
9349d8da857SSevak Arakelyan 
9359d8da857SSevak Arakelyan 	if (frame_desc->actual_length >= frame_desc->length || pid == 0) {
936197ba5f4SPaul Zimmerman 		frame_desc->status = 0;
937197ba5f4SPaul Zimmerman 		qtd->isoc_frame_index++;
938197ba5f4SPaul Zimmerman 		qtd->complete_split = 0;
939197ba5f4SPaul Zimmerman 		qtd->isoc_split_offset = 0;
940197ba5f4SPaul Zimmerman 	}
941197ba5f4SPaul Zimmerman 
942197ba5f4SPaul Zimmerman 	if (qtd->isoc_frame_index == qtd->urb->packet_count) {
943197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, 0);
944197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd,
945197ba5f4SPaul Zimmerman 				     DWC2_HC_XFER_URB_COMPLETE);
946197ba5f4SPaul Zimmerman 	} else {
947197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd,
948197ba5f4SPaul Zimmerman 				     DWC2_HC_XFER_NO_HALT_STATUS);
949197ba5f4SPaul Zimmerman 	}
950197ba5f4SPaul Zimmerman 
951197ba5f4SPaul Zimmerman 	return 1;	/* Indicates that channel released */
952197ba5f4SPaul Zimmerman }
953197ba5f4SPaul Zimmerman 
954197ba5f4SPaul Zimmerman /*
955197ba5f4SPaul Zimmerman  * Handles a host channel Transfer Complete interrupt. This handler may be
956197ba5f4SPaul Zimmerman  * called in either DMA mode or Slave mode.
957197ba5f4SPaul Zimmerman  */
dwc2_hc_xfercomp_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)958197ba5f4SPaul Zimmerman static void dwc2_hc_xfercomp_intr(struct dwc2_hsotg *hsotg,
959197ba5f4SPaul Zimmerman 				  struct dwc2_host_chan *chan, int chnum,
960197ba5f4SPaul Zimmerman 				  struct dwc2_qtd *qtd)
961197ba5f4SPaul Zimmerman {
962197ba5f4SPaul Zimmerman 	struct dwc2_hcd_urb *urb = qtd->urb;
963197ba5f4SPaul Zimmerman 	enum dwc2_halt_status halt_status = DWC2_HC_XFER_COMPLETE;
9642b54fa6bSPaul Zimmerman 	int pipe_type;
965197ba5f4SPaul Zimmerman 	int urb_xfer_done;
966197ba5f4SPaul Zimmerman 
967197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
968197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
969197ba5f4SPaul Zimmerman 			 "--Host Channel %d Interrupt: Transfer Complete--\n",
970197ba5f4SPaul Zimmerman 			 chnum);
971197ba5f4SPaul Zimmerman 
9722b54fa6bSPaul Zimmerman 	if (!urb)
9732b54fa6bSPaul Zimmerman 		goto handle_xfercomp_done;
9742b54fa6bSPaul Zimmerman 
9752b54fa6bSPaul Zimmerman 	pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
9762b54fa6bSPaul Zimmerman 
97795832c00SJohn Youn 	if (hsotg->params.dma_desc_enable) {
978197ba5f4SPaul Zimmerman 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum, halt_status);
979197ba5f4SPaul Zimmerman 		if (pipe_type == USB_ENDPOINT_XFER_ISOC)
980197ba5f4SPaul Zimmerman 			/* Do not disable the interrupt, just clear it */
981197ba5f4SPaul Zimmerman 			return;
982197ba5f4SPaul Zimmerman 		goto handle_xfercomp_done;
983197ba5f4SPaul Zimmerman 	}
984197ba5f4SPaul Zimmerman 
985197ba5f4SPaul Zimmerman 	/* Handle xfer complete on CSPLIT */
986197ba5f4SPaul Zimmerman 	if (chan->qh->do_split) {
987197ba5f4SPaul Zimmerman 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && chan->ep_is_in &&
98895832c00SJohn Youn 		    hsotg->params.host_dma) {
989197ba5f4SPaul Zimmerman 			if (qtd->complete_split &&
990197ba5f4SPaul Zimmerman 			    dwc2_xfercomp_isoc_split_in(hsotg, chan, chnum,
991197ba5f4SPaul Zimmerman 							qtd))
992197ba5f4SPaul Zimmerman 				goto handle_xfercomp_done;
993197ba5f4SPaul Zimmerman 		} else {
994197ba5f4SPaul Zimmerman 			qtd->complete_split = 0;
995197ba5f4SPaul Zimmerman 		}
996197ba5f4SPaul Zimmerman 	}
997197ba5f4SPaul Zimmerman 
998197ba5f4SPaul Zimmerman 	/* Update the QTD and URB states */
999197ba5f4SPaul Zimmerman 	switch (pipe_type) {
1000197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_CONTROL:
1001197ba5f4SPaul Zimmerman 		switch (qtd->control_phase) {
1002197ba5f4SPaul Zimmerman 		case DWC2_CONTROL_SETUP:
1003197ba5f4SPaul Zimmerman 			if (urb->length > 0)
1004197ba5f4SPaul Zimmerman 				qtd->control_phase = DWC2_CONTROL_DATA;
1005197ba5f4SPaul Zimmerman 			else
1006197ba5f4SPaul Zimmerman 				qtd->control_phase = DWC2_CONTROL_STATUS;
1007197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
1008197ba5f4SPaul Zimmerman 				 "  Control setup transaction done\n");
1009197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_COMPLETE;
1010197ba5f4SPaul Zimmerman 			break;
1011197ba5f4SPaul Zimmerman 		case DWC2_CONTROL_DATA:
1012197ba5f4SPaul Zimmerman 			urb_xfer_done = dwc2_update_urb_state(hsotg, chan,
1013197ba5f4SPaul Zimmerman 							      chnum, urb, qtd);
1014197ba5f4SPaul Zimmerman 			if (urb_xfer_done) {
1015197ba5f4SPaul Zimmerman 				qtd->control_phase = DWC2_CONTROL_STATUS;
1016197ba5f4SPaul Zimmerman 				dev_vdbg(hsotg->dev,
1017197ba5f4SPaul Zimmerman 					 "  Control data transfer done\n");
1018197ba5f4SPaul Zimmerman 			} else {
1019197ba5f4SPaul Zimmerman 				dwc2_hcd_save_data_toggle(hsotg, chan, chnum,
1020197ba5f4SPaul Zimmerman 							  qtd);
1021197ba5f4SPaul Zimmerman 			}
1022197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_COMPLETE;
1023197ba5f4SPaul Zimmerman 			break;
1024197ba5f4SPaul Zimmerman 		case DWC2_CONTROL_STATUS:
1025197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "  Control transfer complete\n");
1026197ba5f4SPaul Zimmerman 			if (urb->status == -EINPROGRESS)
1027197ba5f4SPaul Zimmerman 				urb->status = 0;
1028197ba5f4SPaul Zimmerman 			dwc2_host_complete(hsotg, qtd, urb->status);
1029197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1030197ba5f4SPaul Zimmerman 			break;
1031197ba5f4SPaul Zimmerman 		}
1032197ba5f4SPaul Zimmerman 
1033197ba5f4SPaul Zimmerman 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1034197ba5f4SPaul Zimmerman 						halt_status);
1035197ba5f4SPaul Zimmerman 		break;
1036197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_BULK:
1037197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "  Bulk transfer complete\n");
1038197ba5f4SPaul Zimmerman 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1039197ba5f4SPaul Zimmerman 						      qtd);
1040197ba5f4SPaul Zimmerman 		if (urb_xfer_done) {
1041197ba5f4SPaul Zimmerman 			dwc2_host_complete(hsotg, qtd, urb->status);
1042197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1043197ba5f4SPaul Zimmerman 		} else {
1044197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_COMPLETE;
1045197ba5f4SPaul Zimmerman 		}
1046197ba5f4SPaul Zimmerman 
1047197ba5f4SPaul Zimmerman 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1048197ba5f4SPaul Zimmerman 		dwc2_complete_non_periodic_xfer(hsotg, chan, chnum, qtd,
1049197ba5f4SPaul Zimmerman 						halt_status);
1050197ba5f4SPaul Zimmerman 		break;
1051197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_INT:
1052197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "  Interrupt transfer complete\n");
1053197ba5f4SPaul Zimmerman 		urb_xfer_done = dwc2_update_urb_state(hsotg, chan, chnum, urb,
1054197ba5f4SPaul Zimmerman 						      qtd);
1055197ba5f4SPaul Zimmerman 
1056197ba5f4SPaul Zimmerman 		/*
1057197ba5f4SPaul Zimmerman 		 * Interrupt URB is done on the first transfer complete
1058197ba5f4SPaul Zimmerman 		 * interrupt
1059197ba5f4SPaul Zimmerman 		 */
1060197ba5f4SPaul Zimmerman 		if (urb_xfer_done) {
1061197ba5f4SPaul Zimmerman 			dwc2_host_complete(hsotg, qtd, urb->status);
1062197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_URB_COMPLETE;
1063197ba5f4SPaul Zimmerman 		} else {
1064197ba5f4SPaul Zimmerman 			halt_status = DWC2_HC_XFER_COMPLETE;
1065197ba5f4SPaul Zimmerman 		}
1066197ba5f4SPaul Zimmerman 
1067197ba5f4SPaul Zimmerman 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1068197ba5f4SPaul Zimmerman 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1069197ba5f4SPaul Zimmerman 					    halt_status);
1070197ba5f4SPaul Zimmerman 		break;
1071197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_ISOC:
1072197ba5f4SPaul Zimmerman 		if (dbg_perio())
1073197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev, "  Isochronous transfer complete\n");
1074197ba5f4SPaul Zimmerman 		if (qtd->isoc_split_pos == DWC2_HCSPLT_XACTPOS_ALL)
1075197ba5f4SPaul Zimmerman 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1076b98866c2SJohn Youn 							chnum, qtd,
1077b98866c2SJohn Youn 							DWC2_HC_XFER_COMPLETE);
1078197ba5f4SPaul Zimmerman 		dwc2_complete_periodic_xfer(hsotg, chan, chnum, qtd,
1079197ba5f4SPaul Zimmerman 					    halt_status);
1080197ba5f4SPaul Zimmerman 		break;
1081197ba5f4SPaul Zimmerman 	}
1082197ba5f4SPaul Zimmerman 
1083197ba5f4SPaul Zimmerman handle_xfercomp_done:
1084197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_XFERCOMPL);
1085197ba5f4SPaul Zimmerman }
1086197ba5f4SPaul Zimmerman 
1087197ba5f4SPaul Zimmerman /*
1088197ba5f4SPaul Zimmerman  * Handles a host channel STALL interrupt. This handler may be called in
1089197ba5f4SPaul Zimmerman  * either DMA mode or Slave mode.
1090197ba5f4SPaul Zimmerman  */
dwc2_hc_stall_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1091197ba5f4SPaul Zimmerman static void dwc2_hc_stall_intr(struct dwc2_hsotg *hsotg,
1092197ba5f4SPaul Zimmerman 			       struct dwc2_host_chan *chan, int chnum,
1093197ba5f4SPaul Zimmerman 			       struct dwc2_qtd *qtd)
1094197ba5f4SPaul Zimmerman {
1095197ba5f4SPaul Zimmerman 	struct dwc2_hcd_urb *urb = qtd->urb;
10962b54fa6bSPaul Zimmerman 	int pipe_type;
1097197ba5f4SPaul Zimmerman 
1098197ba5f4SPaul Zimmerman 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: STALL Received--\n",
1099197ba5f4SPaul Zimmerman 		chnum);
1100197ba5f4SPaul Zimmerman 
110195832c00SJohn Youn 	if (hsotg->params.dma_desc_enable) {
1102197ba5f4SPaul Zimmerman 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1103197ba5f4SPaul Zimmerman 					    DWC2_HC_XFER_STALL);
1104197ba5f4SPaul Zimmerman 		goto handle_stall_done;
1105197ba5f4SPaul Zimmerman 	}
1106197ba5f4SPaul Zimmerman 
1107197ba5f4SPaul Zimmerman 	if (!urb)
1108197ba5f4SPaul Zimmerman 		goto handle_stall_halt;
1109197ba5f4SPaul Zimmerman 
11102b54fa6bSPaul Zimmerman 	pipe_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
11112b54fa6bSPaul Zimmerman 
1112197ba5f4SPaul Zimmerman 	if (pipe_type == USB_ENDPOINT_XFER_CONTROL)
1113197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1114197ba5f4SPaul Zimmerman 
1115197ba5f4SPaul Zimmerman 	if (pipe_type == USB_ENDPOINT_XFER_BULK ||
1116197ba5f4SPaul Zimmerman 	    pipe_type == USB_ENDPOINT_XFER_INT) {
1117197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, -EPIPE);
1118197ba5f4SPaul Zimmerman 		/*
1119197ba5f4SPaul Zimmerman 		 * USB protocol requires resetting the data toggle for bulk
1120197ba5f4SPaul Zimmerman 		 * and interrupt endpoints when a CLEAR_FEATURE(ENDPOINT_HALT)
1121197ba5f4SPaul Zimmerman 		 * setup command is issued to the endpoint. Anticipate the
1122197ba5f4SPaul Zimmerman 		 * CLEAR_FEATURE command since a STALL has occurred and reset
1123197ba5f4SPaul Zimmerman 		 * the data toggle now.
1124197ba5f4SPaul Zimmerman 		 */
1125197ba5f4SPaul Zimmerman 		chan->qh->data_toggle = 0;
1126197ba5f4SPaul Zimmerman 	}
1127197ba5f4SPaul Zimmerman 
1128197ba5f4SPaul Zimmerman handle_stall_halt:
1129197ba5f4SPaul Zimmerman 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_STALL);
1130197ba5f4SPaul Zimmerman 
1131197ba5f4SPaul Zimmerman handle_stall_done:
1132197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_STALL);
1133197ba5f4SPaul Zimmerman }
1134197ba5f4SPaul Zimmerman 
1135197ba5f4SPaul Zimmerman /*
1136197ba5f4SPaul Zimmerman  * Updates the state of the URB when a transfer has been stopped due to an
1137197ba5f4SPaul Zimmerman  * abnormal condition before the transfer completes. Modifies the
1138197ba5f4SPaul Zimmerman  * actual_length field of the URB to reflect the number of bytes that have
1139197ba5f4SPaul Zimmerman  * actually been transferred via the host channel.
1140197ba5f4SPaul Zimmerman  */
dwc2_update_urb_state_abn(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_hcd_urb * urb,struct dwc2_qtd * qtd,enum dwc2_halt_status halt_status)1141197ba5f4SPaul Zimmerman static void dwc2_update_urb_state_abn(struct dwc2_hsotg *hsotg,
1142197ba5f4SPaul Zimmerman 				      struct dwc2_host_chan *chan, int chnum,
1143197ba5f4SPaul Zimmerman 				      struct dwc2_hcd_urb *urb,
1144197ba5f4SPaul Zimmerman 				      struct dwc2_qtd *qtd,
1145197ba5f4SPaul Zimmerman 				      enum dwc2_halt_status halt_status)
1146197ba5f4SPaul Zimmerman {
1147197ba5f4SPaul Zimmerman 	u32 xfer_length = dwc2_get_actual_xfer_length(hsotg, chan, chnum,
1148197ba5f4SPaul Zimmerman 						      qtd, halt_status, NULL);
1149197ba5f4SPaul Zimmerman 	u32 hctsiz;
1150197ba5f4SPaul Zimmerman 
1151197ba5f4SPaul Zimmerman 	if (urb->actual_length + xfer_length > urb->length) {
1152197ba5f4SPaul Zimmerman 		dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
1153197ba5f4SPaul Zimmerman 		xfer_length = urb->length - urb->actual_length;
1154197ba5f4SPaul Zimmerman 	}
1155197ba5f4SPaul Zimmerman 
1156197ba5f4SPaul Zimmerman 	urb->actual_length += xfer_length;
1157197ba5f4SPaul Zimmerman 
1158f25c42b8SGevorg Sahakyan 	hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1159197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "DWC_otg: %s: %s, channel %d\n",
1160197ba5f4SPaul Zimmerman 		 __func__, (chan->ep_is_in ? "IN" : "OUT"), chnum);
1161197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  chan->start_pkt_count %d\n",
1162197ba5f4SPaul Zimmerman 		 chan->start_pkt_count);
1163197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  hctsiz.pktcnt %d\n",
1164197ba5f4SPaul Zimmerman 		 (hctsiz & TSIZ_PKTCNT_MASK) >> TSIZ_PKTCNT_SHIFT);
1165197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  chan->max_packet %d\n", chan->max_packet);
1166197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  bytes_transferred %d\n",
1167197ba5f4SPaul Zimmerman 		 xfer_length);
1168197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  urb->actual_length %d\n",
1169197ba5f4SPaul Zimmerman 		 urb->actual_length);
1170197ba5f4SPaul Zimmerman 	dev_vdbg(hsotg->dev, "  urb->transfer_buffer_length %d\n",
1171197ba5f4SPaul Zimmerman 		 urb->length);
1172197ba5f4SPaul Zimmerman }
1173197ba5f4SPaul Zimmerman 
1174197ba5f4SPaul Zimmerman /*
1175197ba5f4SPaul Zimmerman  * Handles a host channel NAK interrupt. This handler may be called in either
1176197ba5f4SPaul Zimmerman  * DMA mode or Slave mode.
1177197ba5f4SPaul Zimmerman  */
dwc2_hc_nak_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1178197ba5f4SPaul Zimmerman static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
1179197ba5f4SPaul Zimmerman 			     struct dwc2_host_chan *chan, int chnum,
1180197ba5f4SPaul Zimmerman 			     struct dwc2_qtd *qtd)
1181197ba5f4SPaul Zimmerman {
1182e499123eSGregory Herrero 	if (!qtd) {
1183e499123eSGregory Herrero 		dev_dbg(hsotg->dev, "%s: qtd is NULL\n", __func__);
1184e499123eSGregory Herrero 		return;
1185e499123eSGregory Herrero 	}
1186e499123eSGregory Herrero 
1187e499123eSGregory Herrero 	if (!qtd->urb) {
1188e499123eSGregory Herrero 		dev_dbg(hsotg->dev, "%s: qtd->urb is NULL\n", __func__);
1189e499123eSGregory Herrero 		return;
1190e499123eSGregory Herrero 	}
1191e499123eSGregory Herrero 
1192197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1193197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NAK Received--\n",
1194197ba5f4SPaul Zimmerman 			 chnum);
1195197ba5f4SPaul Zimmerman 
1196197ba5f4SPaul Zimmerman 	/*
1197197ba5f4SPaul Zimmerman 	 * Handle NAK for IN/OUT SSPLIT/CSPLIT transfers, bulk, control, and
1198197ba5f4SPaul Zimmerman 	 * interrupt. Re-start the SSPLIT transfer.
119938d2b5fbSDouglas Anderson 	 *
120038d2b5fbSDouglas Anderson 	 * Normally for non-periodic transfers we'll retry right away, but to
120138d2b5fbSDouglas Anderson 	 * avoid interrupt storms we'll wait before retrying if we've got
120238d2b5fbSDouglas Anderson 	 * several NAKs. If we didn't do this we'd retry directly from the
120338d2b5fbSDouglas Anderson 	 * interrupt handler and could end up quickly getting another
1204b3eb981bSDmitry Torokhov 	 * interrupt (another NAK), which we'd retry. Note that we do not
1205b3eb981bSDmitry Torokhov 	 * delay retries for IN parts of control requests, as those are expected
1206b3eb981bSDmitry Torokhov 	 * to complete fairly quickly, and if we delay them we risk confusing
1207b3eb981bSDmitry Torokhov 	 * the device and cause it issue STALL.
120838d2b5fbSDouglas Anderson 	 *
120938d2b5fbSDouglas Anderson 	 * Note that in DMA mode software only gets involved to re-send NAKed
121038d2b5fbSDouglas Anderson 	 * transfers for split transactions, so we only need to apply this
121138d2b5fbSDouglas Anderson 	 * delaying logic when handling splits. In non-DMA mode presumably we
121238d2b5fbSDouglas Anderson 	 * might want a similar delay if someone can demonstrate this problem
121338d2b5fbSDouglas Anderson 	 * affects that code path too.
1214197ba5f4SPaul Zimmerman 	 */
1215197ba5f4SPaul Zimmerman 	if (chan->do_split) {
1216197ba5f4SPaul Zimmerman 		if (chan->complete_split)
1217197ba5f4SPaul Zimmerman 			qtd->error_count = 0;
1218197ba5f4SPaul Zimmerman 		qtd->complete_split = 0;
121938d2b5fbSDouglas Anderson 		qtd->num_naks++;
1220b3eb981bSDmitry Torokhov 		qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
1221b3eb981bSDmitry Torokhov 				!(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
1222b3eb981bSDmitry Torokhov 				  chan->ep_is_in);
1223197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1224197ba5f4SPaul Zimmerman 		goto handle_nak_done;
1225197ba5f4SPaul Zimmerman 	}
1226197ba5f4SPaul Zimmerman 
1227197ba5f4SPaul Zimmerman 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1228197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_CONTROL:
1229197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_BULK:
123095832c00SJohn Youn 		if (hsotg->params.host_dma && chan->ep_is_in) {
1231197ba5f4SPaul Zimmerman 			/*
1232197ba5f4SPaul Zimmerman 			 * NAK interrupts are enabled on bulk/control IN
1233197ba5f4SPaul Zimmerman 			 * transfers in DMA mode for the sole purpose of
1234197ba5f4SPaul Zimmerman 			 * resetting the error count after a transaction error
1235197ba5f4SPaul Zimmerman 			 * occurs. The core will continue transferring data.
1236197ba5f4SPaul Zimmerman 			 */
1237197ba5f4SPaul Zimmerman 			qtd->error_count = 0;
1238197ba5f4SPaul Zimmerman 			break;
1239197ba5f4SPaul Zimmerman 		}
1240197ba5f4SPaul Zimmerman 
1241197ba5f4SPaul Zimmerman 		/*
1242197ba5f4SPaul Zimmerman 		 * NAK interrupts normally occur during OUT transfers in DMA
1243197ba5f4SPaul Zimmerman 		 * or Slave mode. For IN transfers, more requests will be
1244197ba5f4SPaul Zimmerman 		 * queued as request queue space is available.
1245197ba5f4SPaul Zimmerman 		 */
1246197ba5f4SPaul Zimmerman 		qtd->error_count = 0;
1247197ba5f4SPaul Zimmerman 
1248197ba5f4SPaul Zimmerman 		if (!chan->qh->ping_state) {
1249197ba5f4SPaul Zimmerman 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1250197ba5f4SPaul Zimmerman 						  qtd, DWC2_HC_XFER_NAK);
1251197ba5f4SPaul Zimmerman 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1252197ba5f4SPaul Zimmerman 
1253197ba5f4SPaul Zimmerman 			if (chan->speed == USB_SPEED_HIGH)
1254197ba5f4SPaul Zimmerman 				chan->qh->ping_state = 1;
1255197ba5f4SPaul Zimmerman 		}
1256197ba5f4SPaul Zimmerman 
1257197ba5f4SPaul Zimmerman 		/*
1258197ba5f4SPaul Zimmerman 		 * Halt the channel so the transfer can be re-started from
1259197ba5f4SPaul Zimmerman 		 * the appropriate point or the PING protocol will
1260197ba5f4SPaul Zimmerman 		 * start/continue
1261197ba5f4SPaul Zimmerman 		 */
1262197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1263197ba5f4SPaul Zimmerman 		break;
1264197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_INT:
1265197ba5f4SPaul Zimmerman 		qtd->error_count = 0;
1266197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
1267197ba5f4SPaul Zimmerman 		break;
1268197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_ISOC:
1269197ba5f4SPaul Zimmerman 		/* Should never get called for isochronous transfers */
1270197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev, "NACK interrupt for ISOC transfer\n");
1271197ba5f4SPaul Zimmerman 		break;
1272197ba5f4SPaul Zimmerman 	}
1273197ba5f4SPaul Zimmerman 
1274197ba5f4SPaul Zimmerman handle_nak_done:
1275197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_NAK);
1276197ba5f4SPaul Zimmerman }
1277197ba5f4SPaul Zimmerman 
1278197ba5f4SPaul Zimmerman /*
1279197ba5f4SPaul Zimmerman  * Handles a host channel ACK interrupt. This interrupt is enabled when
1280197ba5f4SPaul Zimmerman  * performing the PING protocol in Slave mode, when errors occur during
1281197ba5f4SPaul Zimmerman  * either Slave mode or DMA mode, and during Start Split transactions.
1282197ba5f4SPaul Zimmerman  */
dwc2_hc_ack_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1283197ba5f4SPaul Zimmerman static void dwc2_hc_ack_intr(struct dwc2_hsotg *hsotg,
1284197ba5f4SPaul Zimmerman 			     struct dwc2_host_chan *chan, int chnum,
1285197ba5f4SPaul Zimmerman 			     struct dwc2_qtd *qtd)
1286197ba5f4SPaul Zimmerman {
1287197ba5f4SPaul Zimmerman 	struct dwc2_hcd_iso_packet_desc *frame_desc;
1288197ba5f4SPaul Zimmerman 
1289197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1290197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: ACK Received--\n",
1291197ba5f4SPaul Zimmerman 			 chnum);
1292197ba5f4SPaul Zimmerman 
1293197ba5f4SPaul Zimmerman 	if (chan->do_split) {
1294197ba5f4SPaul Zimmerman 		/* Handle ACK on SSPLIT. ACK should not occur in CSPLIT. */
1295197ba5f4SPaul Zimmerman 		if (!chan->ep_is_in &&
1296197ba5f4SPaul Zimmerman 		    chan->data_pid_start != DWC2_HC_PID_SETUP)
1297197ba5f4SPaul Zimmerman 			qtd->ssplit_out_xfer_count = chan->xfer_len;
1298197ba5f4SPaul Zimmerman 
1299197ba5f4SPaul Zimmerman 		if (chan->ep_type != USB_ENDPOINT_XFER_ISOC || chan->ep_is_in) {
1300197ba5f4SPaul Zimmerman 			qtd->complete_split = 1;
1301197ba5f4SPaul Zimmerman 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1302197ba5f4SPaul Zimmerman 		} else {
1303197ba5f4SPaul Zimmerman 			/* ISOC OUT */
1304197ba5f4SPaul Zimmerman 			switch (chan->xact_pos) {
1305197ba5f4SPaul Zimmerman 			case DWC2_HCSPLT_XACTPOS_ALL:
1306197ba5f4SPaul Zimmerman 				break;
1307197ba5f4SPaul Zimmerman 			case DWC2_HCSPLT_XACTPOS_END:
1308197ba5f4SPaul Zimmerman 				qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
1309197ba5f4SPaul Zimmerman 				qtd->isoc_split_offset = 0;
1310197ba5f4SPaul Zimmerman 				break;
1311197ba5f4SPaul Zimmerman 			case DWC2_HCSPLT_XACTPOS_BEGIN:
1312197ba5f4SPaul Zimmerman 			case DWC2_HCSPLT_XACTPOS_MID:
1313197ba5f4SPaul Zimmerman 				/*
1314197ba5f4SPaul Zimmerman 				 * For BEGIN or MID, calculate the length for
1315197ba5f4SPaul Zimmerman 				 * the next microframe to determine the correct
1316197ba5f4SPaul Zimmerman 				 * SSPLIT token, either MID or END
1317197ba5f4SPaul Zimmerman 				 */
1318197ba5f4SPaul Zimmerman 				frame_desc = &qtd->urb->iso_descs[
1319197ba5f4SPaul Zimmerman 						qtd->isoc_frame_index];
1320197ba5f4SPaul Zimmerman 				qtd->isoc_split_offset += 188;
1321197ba5f4SPaul Zimmerman 
1322197ba5f4SPaul Zimmerman 				if (frame_desc->length - qtd->isoc_split_offset
1323197ba5f4SPaul Zimmerman 							<= 188)
1324197ba5f4SPaul Zimmerman 					qtd->isoc_split_pos =
1325197ba5f4SPaul Zimmerman 							DWC2_HCSPLT_XACTPOS_END;
1326197ba5f4SPaul Zimmerman 				else
1327197ba5f4SPaul Zimmerman 					qtd->isoc_split_pos =
1328197ba5f4SPaul Zimmerman 							DWC2_HCSPLT_XACTPOS_MID;
1329197ba5f4SPaul Zimmerman 				break;
1330197ba5f4SPaul Zimmerman 			}
1331197ba5f4SPaul Zimmerman 		}
1332197ba5f4SPaul Zimmerman 	} else {
1333197ba5f4SPaul Zimmerman 		qtd->error_count = 0;
1334197ba5f4SPaul Zimmerman 
1335197ba5f4SPaul Zimmerman 		if (chan->qh->ping_state) {
1336197ba5f4SPaul Zimmerman 			chan->qh->ping_state = 0;
1337197ba5f4SPaul Zimmerman 			/*
1338197ba5f4SPaul Zimmerman 			 * Halt the channel so the transfer can be re-started
1339197ba5f4SPaul Zimmerman 			 * from the appropriate point. This only happens in
1340197ba5f4SPaul Zimmerman 			 * Slave mode. In DMA mode, the ping_state is cleared
1341197ba5f4SPaul Zimmerman 			 * when the transfer is started because the core
1342197ba5f4SPaul Zimmerman 			 * automatically executes the PING, then the transfer.
1343197ba5f4SPaul Zimmerman 			 */
1344197ba5f4SPaul Zimmerman 			dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_ACK);
1345197ba5f4SPaul Zimmerman 		}
1346197ba5f4SPaul Zimmerman 	}
1347197ba5f4SPaul Zimmerman 
1348197ba5f4SPaul Zimmerman 	/*
1349197ba5f4SPaul Zimmerman 	 * If the ACK occurred when _not_ in the PING state, let the channel
1350197ba5f4SPaul Zimmerman 	 * continue transferring data after clearing the error count
1351197ba5f4SPaul Zimmerman 	 */
1352197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_ACK);
1353197ba5f4SPaul Zimmerman }
1354197ba5f4SPaul Zimmerman 
1355197ba5f4SPaul Zimmerman /*
1356197ba5f4SPaul Zimmerman  * Handles a host channel NYET interrupt. This interrupt should only occur on
1357197ba5f4SPaul Zimmerman  * Bulk and Control OUT endpoints and for complete split transactions. If a
1358197ba5f4SPaul Zimmerman  * NYET occurs at the same time as a Transfer Complete interrupt, it is
1359197ba5f4SPaul Zimmerman  * handled in the xfercomp interrupt handler, not here. This handler may be
1360197ba5f4SPaul Zimmerman  * called in either DMA mode or Slave mode.
1361197ba5f4SPaul Zimmerman  */
dwc2_hc_nyet_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1362197ba5f4SPaul Zimmerman static void dwc2_hc_nyet_intr(struct dwc2_hsotg *hsotg,
1363197ba5f4SPaul Zimmerman 			      struct dwc2_host_chan *chan, int chnum,
1364197ba5f4SPaul Zimmerman 			      struct dwc2_qtd *qtd)
1365197ba5f4SPaul Zimmerman {
1366197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1367197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: NYET Received--\n",
1368197ba5f4SPaul Zimmerman 			 chnum);
1369197ba5f4SPaul Zimmerman 
1370197ba5f4SPaul Zimmerman 	/*
1371197ba5f4SPaul Zimmerman 	 * NYET on CSPLIT
1372197ba5f4SPaul Zimmerman 	 * re-do the CSPLIT immediately on non-periodic
1373197ba5f4SPaul Zimmerman 	 */
1374197ba5f4SPaul Zimmerman 	if (chan->do_split && chan->complete_split) {
1375197ba5f4SPaul Zimmerman 		if (chan->ep_is_in && chan->ep_type == USB_ENDPOINT_XFER_ISOC &&
137695832c00SJohn Youn 		    hsotg->params.host_dma) {
1377197ba5f4SPaul Zimmerman 			qtd->complete_split = 0;
1378197ba5f4SPaul Zimmerman 			qtd->isoc_split_offset = 0;
1379197ba5f4SPaul Zimmerman 			qtd->isoc_frame_index++;
1380197ba5f4SPaul Zimmerman 			if (qtd->urb &&
1381197ba5f4SPaul Zimmerman 			    qtd->isoc_frame_index == qtd->urb->packet_count) {
1382197ba5f4SPaul Zimmerman 				dwc2_host_complete(hsotg, qtd, 0);
1383197ba5f4SPaul Zimmerman 				dwc2_release_channel(hsotg, chan, qtd,
1384197ba5f4SPaul Zimmerman 						     DWC2_HC_XFER_URB_COMPLETE);
1385197ba5f4SPaul Zimmerman 			} else {
1386197ba5f4SPaul Zimmerman 				dwc2_release_channel(hsotg, chan, qtd,
1387197ba5f4SPaul Zimmerman 						DWC2_HC_XFER_NO_HALT_STATUS);
1388197ba5f4SPaul Zimmerman 			}
1389197ba5f4SPaul Zimmerman 			goto handle_nyet_done;
1390197ba5f4SPaul Zimmerman 		}
1391197ba5f4SPaul Zimmerman 
1392197ba5f4SPaul Zimmerman 		if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1393197ba5f4SPaul Zimmerman 		    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
13941479cb69SDouglas Anderson 			struct dwc2_qh *qh = chan->qh;
13951479cb69SDouglas Anderson 			bool past_end;
13961479cb69SDouglas Anderson 
139795832c00SJohn Youn 			if (!hsotg->params.uframe_sched) {
1398197ba5f4SPaul Zimmerman 				int frnum = dwc2_hcd_get_frame_number(hsotg);
1399197ba5f4SPaul Zimmerman 
14001479cb69SDouglas Anderson 				/* Don't have num_hs_transfers; simple logic */
14011479cb69SDouglas Anderson 				past_end = dwc2_full_frame_num(frnum) !=
14021479cb69SDouglas Anderson 				     dwc2_full_frame_num(qh->next_active_frame);
14031479cb69SDouglas Anderson 			} else {
14041479cb69SDouglas Anderson 				int end_frnum;
14051479cb69SDouglas Anderson 
1406197ba5f4SPaul Zimmerman 				/*
140738beaec6SJohn Youn 				 * Figure out the end frame based on
140838beaec6SJohn Youn 				 * schedule.
14091479cb69SDouglas Anderson 				 *
141038beaec6SJohn Youn 				 * We don't want to go on trying again
141138beaec6SJohn Youn 				 * and again forever. Let's stop when
141238beaec6SJohn Youn 				 * we've done all the transfers that
141338beaec6SJohn Youn 				 * were scheduled.
14141479cb69SDouglas Anderson 				 *
141538beaec6SJohn Youn 				 * We're going to be comparing
141638beaec6SJohn Youn 				 * start_active_frame and
141738beaec6SJohn Youn 				 * next_active_frame, both of which
141838beaec6SJohn Youn 				 * are 1 before the time the packet
141938beaec6SJohn Youn 				 * goes on the wire, so that cancels
142038beaec6SJohn Youn 				 * out. Basically if had 1 transfer
142138beaec6SJohn Youn 				 * and we saw 1 NYET then we're done.
142238beaec6SJohn Youn 				 * We're getting a NYET here so if
142338beaec6SJohn Youn 				 * next >= (start + num_transfers)
142438beaec6SJohn Youn 				 * we're done. The complexity is that
142538beaec6SJohn Youn 				 * for all but ISOC_OUT we skip one
142638beaec6SJohn Youn 				 * slot.
1427197ba5f4SPaul Zimmerman 				 */
14281479cb69SDouglas Anderson 				end_frnum = dwc2_frame_num_inc(
14291479cb69SDouglas Anderson 					qh->start_active_frame,
14301479cb69SDouglas Anderson 					qh->num_hs_transfers);
14311479cb69SDouglas Anderson 
14321479cb69SDouglas Anderson 				if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
14331479cb69SDouglas Anderson 				    qh->ep_is_in)
14341479cb69SDouglas Anderson 					end_frnum =
14351479cb69SDouglas Anderson 					       dwc2_frame_num_inc(end_frnum, 1);
14361479cb69SDouglas Anderson 
14371479cb69SDouglas Anderson 				past_end = dwc2_frame_num_le(
14381479cb69SDouglas Anderson 					end_frnum, qh->next_active_frame);
14391479cb69SDouglas Anderson 			}
14401479cb69SDouglas Anderson 
14411479cb69SDouglas Anderson 			if (past_end) {
14421479cb69SDouglas Anderson 				/* Treat this as a transaction error. */
1443197ba5f4SPaul Zimmerman #if 0
1444197ba5f4SPaul Zimmerman 				/*
1445197ba5f4SPaul Zimmerman 				 * Todo: Fix system performance so this can
1446197ba5f4SPaul Zimmerman 				 * be treated as an error. Right now complete
1447197ba5f4SPaul Zimmerman 				 * splits cannot be scheduled precisely enough
1448197ba5f4SPaul Zimmerman 				 * due to other system activity, so this error
1449197ba5f4SPaul Zimmerman 				 * occurs regularly in Slave mode.
1450197ba5f4SPaul Zimmerman 				 */
1451197ba5f4SPaul Zimmerman 				qtd->error_count++;
1452197ba5f4SPaul Zimmerman #endif
1453197ba5f4SPaul Zimmerman 				qtd->complete_split = 0;
1454197ba5f4SPaul Zimmerman 				dwc2_halt_channel(hsotg, chan, qtd,
1455197ba5f4SPaul Zimmerman 						  DWC2_HC_XFER_XACT_ERR);
1456197ba5f4SPaul Zimmerman 				/* Todo: add support for isoc release */
1457197ba5f4SPaul Zimmerman 				goto handle_nyet_done;
1458197ba5f4SPaul Zimmerman 			}
1459197ba5f4SPaul Zimmerman 		}
1460197ba5f4SPaul Zimmerman 
1461197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1462197ba5f4SPaul Zimmerman 		goto handle_nyet_done;
1463197ba5f4SPaul Zimmerman 	}
1464197ba5f4SPaul Zimmerman 
1465197ba5f4SPaul Zimmerman 	chan->qh->ping_state = 1;
1466197ba5f4SPaul Zimmerman 	qtd->error_count = 0;
1467197ba5f4SPaul Zimmerman 
1468197ba5f4SPaul Zimmerman 	dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb, qtd,
1469197ba5f4SPaul Zimmerman 				  DWC2_HC_XFER_NYET);
1470197ba5f4SPaul Zimmerman 	dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1471197ba5f4SPaul Zimmerman 
1472197ba5f4SPaul Zimmerman 	/*
1473197ba5f4SPaul Zimmerman 	 * Halt the channel and re-start the transfer so the PING protocol
1474197ba5f4SPaul Zimmerman 	 * will start
1475197ba5f4SPaul Zimmerman 	 */
1476197ba5f4SPaul Zimmerman 	dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NYET);
1477197ba5f4SPaul Zimmerman 
1478197ba5f4SPaul Zimmerman handle_nyet_done:
1479197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_NYET);
1480197ba5f4SPaul Zimmerman }
1481197ba5f4SPaul Zimmerman 
1482197ba5f4SPaul Zimmerman /*
1483197ba5f4SPaul Zimmerman  * Handles a host channel babble interrupt. This handler may be called in
1484197ba5f4SPaul Zimmerman  * either DMA mode or Slave mode.
1485197ba5f4SPaul Zimmerman  */
dwc2_hc_babble_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1486197ba5f4SPaul Zimmerman static void dwc2_hc_babble_intr(struct dwc2_hsotg *hsotg,
1487197ba5f4SPaul Zimmerman 				struct dwc2_host_chan *chan, int chnum,
1488197ba5f4SPaul Zimmerman 				struct dwc2_qtd *qtd)
1489197ba5f4SPaul Zimmerman {
1490197ba5f4SPaul Zimmerman 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Babble Error--\n",
1491197ba5f4SPaul Zimmerman 		chnum);
1492197ba5f4SPaul Zimmerman 
1493197ba5f4SPaul Zimmerman 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1494197ba5f4SPaul Zimmerman 
149595832c00SJohn Youn 	if (hsotg->params.dma_desc_enable) {
1496197ba5f4SPaul Zimmerman 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1497197ba5f4SPaul Zimmerman 					    DWC2_HC_XFER_BABBLE_ERR);
1498197ba5f4SPaul Zimmerman 		goto disable_int;
1499197ba5f4SPaul Zimmerman 	}
1500197ba5f4SPaul Zimmerman 
1501197ba5f4SPaul Zimmerman 	if (chan->ep_type != USB_ENDPOINT_XFER_ISOC) {
1502197ba5f4SPaul Zimmerman 		dwc2_host_complete(hsotg, qtd, -EOVERFLOW);
1503197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_BABBLE_ERR);
1504197ba5f4SPaul Zimmerman 	} else {
1505197ba5f4SPaul Zimmerman 		enum dwc2_halt_status halt_status;
1506197ba5f4SPaul Zimmerman 
1507197ba5f4SPaul Zimmerman 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1508197ba5f4SPaul Zimmerman 						qtd, DWC2_HC_XFER_BABBLE_ERR);
1509197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1510197ba5f4SPaul Zimmerman 	}
1511197ba5f4SPaul Zimmerman 
1512197ba5f4SPaul Zimmerman disable_int:
1513197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_BBLERR);
1514197ba5f4SPaul Zimmerman }
1515197ba5f4SPaul Zimmerman 
1516197ba5f4SPaul Zimmerman /*
1517197ba5f4SPaul Zimmerman  * Handles a host channel AHB error interrupt. This handler is only called in
1518197ba5f4SPaul Zimmerman  * DMA mode.
1519197ba5f4SPaul Zimmerman  */
dwc2_hc_ahberr_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1520197ba5f4SPaul Zimmerman static void dwc2_hc_ahberr_intr(struct dwc2_hsotg *hsotg,
1521197ba5f4SPaul Zimmerman 				struct dwc2_host_chan *chan, int chnum,
1522197ba5f4SPaul Zimmerman 				struct dwc2_qtd *qtd)
1523197ba5f4SPaul Zimmerman {
1524197ba5f4SPaul Zimmerman 	struct dwc2_hcd_urb *urb = qtd->urb;
1525197ba5f4SPaul Zimmerman 	char *pipetype, *speed;
1526197ba5f4SPaul Zimmerman 	u32 hcchar;
1527197ba5f4SPaul Zimmerman 	u32 hcsplt;
1528197ba5f4SPaul Zimmerman 	u32 hctsiz;
1529197ba5f4SPaul Zimmerman 	u32 hc_dma;
1530197ba5f4SPaul Zimmerman 
1531197ba5f4SPaul Zimmerman 	dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: AHB Error--\n",
1532197ba5f4SPaul Zimmerman 		chnum);
1533197ba5f4SPaul Zimmerman 
1534197ba5f4SPaul Zimmerman 	if (!urb)
1535197ba5f4SPaul Zimmerman 		goto handle_ahberr_halt;
1536197ba5f4SPaul Zimmerman 
1537197ba5f4SPaul Zimmerman 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1538197ba5f4SPaul Zimmerman 
1539f25c42b8SGevorg Sahakyan 	hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1540f25c42b8SGevorg Sahakyan 	hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
1541f25c42b8SGevorg Sahakyan 	hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1542f25c42b8SGevorg Sahakyan 	hc_dma = dwc2_readl(hsotg, HCDMA(chnum));
1543197ba5f4SPaul Zimmerman 
1544197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "AHB ERROR, Channel %d\n", chnum);
1545197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  hcchar 0x%08x, hcsplt 0x%08x\n", hcchar, hcsplt);
1546197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  hctsiz 0x%08x, hc_dma 0x%08x\n", hctsiz, hc_dma);
1547197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Device address: %d\n",
1548197ba5f4SPaul Zimmerman 		dwc2_hcd_get_dev_addr(&urb->pipe_info));
1549197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Endpoint: %d, %s\n",
1550197ba5f4SPaul Zimmerman 		dwc2_hcd_get_ep_num(&urb->pipe_info),
1551197ba5f4SPaul Zimmerman 		dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
1552197ba5f4SPaul Zimmerman 
1553197ba5f4SPaul Zimmerman 	switch (dwc2_hcd_get_pipe_type(&urb->pipe_info)) {
1554197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_CONTROL:
1555197ba5f4SPaul Zimmerman 		pipetype = "CONTROL";
1556197ba5f4SPaul Zimmerman 		break;
1557197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_BULK:
1558197ba5f4SPaul Zimmerman 		pipetype = "BULK";
1559197ba5f4SPaul Zimmerman 		break;
1560197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_INT:
1561197ba5f4SPaul Zimmerman 		pipetype = "INTERRUPT";
1562197ba5f4SPaul Zimmerman 		break;
1563197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_ISOC:
1564197ba5f4SPaul Zimmerman 		pipetype = "ISOCHRONOUS";
1565197ba5f4SPaul Zimmerman 		break;
1566197ba5f4SPaul Zimmerman 	default:
1567197ba5f4SPaul Zimmerman 		pipetype = "UNKNOWN";
1568197ba5f4SPaul Zimmerman 		break;
1569197ba5f4SPaul Zimmerman 	}
1570197ba5f4SPaul Zimmerman 
1571197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Endpoint type: %s\n", pipetype);
1572197ba5f4SPaul Zimmerman 
1573197ba5f4SPaul Zimmerman 	switch (chan->speed) {
1574197ba5f4SPaul Zimmerman 	case USB_SPEED_HIGH:
1575197ba5f4SPaul Zimmerman 		speed = "HIGH";
1576197ba5f4SPaul Zimmerman 		break;
1577197ba5f4SPaul Zimmerman 	case USB_SPEED_FULL:
1578197ba5f4SPaul Zimmerman 		speed = "FULL";
1579197ba5f4SPaul Zimmerman 		break;
1580197ba5f4SPaul Zimmerman 	case USB_SPEED_LOW:
1581197ba5f4SPaul Zimmerman 		speed = "LOW";
1582197ba5f4SPaul Zimmerman 		break;
1583197ba5f4SPaul Zimmerman 	default:
1584197ba5f4SPaul Zimmerman 		speed = "UNKNOWN";
1585197ba5f4SPaul Zimmerman 		break;
1586197ba5f4SPaul Zimmerman 	}
1587197ba5f4SPaul Zimmerman 
1588197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Speed: %s\n", speed);
1589197ba5f4SPaul Zimmerman 
1590babd1839SDouglas Anderson 	dev_err(hsotg->dev, "  Max packet size: %d (mult %d)\n",
1591babd1839SDouglas Anderson 		dwc2_hcd_get_maxp(&urb->pipe_info),
1592babd1839SDouglas Anderson 		dwc2_hcd_get_maxp_mult(&urb->pipe_info));
1593197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Data buffer length: %d\n", urb->length);
1594197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Transfer buffer: %p, Transfer DMA: %08lx\n",
1595197ba5f4SPaul Zimmerman 		urb->buf, (unsigned long)urb->dma);
1596197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Setup buffer: %p, Setup DMA: %08lx\n",
1597197ba5f4SPaul Zimmerman 		urb->setup_packet, (unsigned long)urb->setup_dma);
1598197ba5f4SPaul Zimmerman 	dev_err(hsotg->dev, "  Interval: %d\n", urb->interval);
1599197ba5f4SPaul Zimmerman 
1600197ba5f4SPaul Zimmerman 	/* Core halts the channel for Descriptor DMA mode */
160195832c00SJohn Youn 	if (hsotg->params.dma_desc_enable) {
1602197ba5f4SPaul Zimmerman 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1603197ba5f4SPaul Zimmerman 					    DWC2_HC_XFER_AHB_ERR);
1604197ba5f4SPaul Zimmerman 		goto handle_ahberr_done;
1605197ba5f4SPaul Zimmerman 	}
1606197ba5f4SPaul Zimmerman 
1607197ba5f4SPaul Zimmerman 	dwc2_host_complete(hsotg, qtd, -EIO);
1608197ba5f4SPaul Zimmerman 
1609197ba5f4SPaul Zimmerman handle_ahberr_halt:
1610197ba5f4SPaul Zimmerman 	/*
1611197ba5f4SPaul Zimmerman 	 * Force a channel halt. Don't call dwc2_halt_channel because that won't
1612197ba5f4SPaul Zimmerman 	 * write to the HCCHARn register in DMA mode to force the halt.
1613197ba5f4SPaul Zimmerman 	 */
1614197ba5f4SPaul Zimmerman 	dwc2_hc_halt(hsotg, chan, DWC2_HC_XFER_AHB_ERR);
1615197ba5f4SPaul Zimmerman 
1616197ba5f4SPaul Zimmerman handle_ahberr_done:
1617197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_AHBERR);
1618197ba5f4SPaul Zimmerman }
1619197ba5f4SPaul Zimmerman 
1620197ba5f4SPaul Zimmerman /*
1621197ba5f4SPaul Zimmerman  * Handles a host channel transaction error interrupt. This handler may be
1622197ba5f4SPaul Zimmerman  * called in either DMA mode or Slave mode.
1623197ba5f4SPaul Zimmerman  */
dwc2_hc_xacterr_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1624197ba5f4SPaul Zimmerman static void dwc2_hc_xacterr_intr(struct dwc2_hsotg *hsotg,
1625197ba5f4SPaul Zimmerman 				 struct dwc2_host_chan *chan, int chnum,
1626197ba5f4SPaul Zimmerman 				 struct dwc2_qtd *qtd)
1627197ba5f4SPaul Zimmerman {
1628197ba5f4SPaul Zimmerman 	dev_dbg(hsotg->dev,
1629197ba5f4SPaul Zimmerman 		"--Host Channel %d Interrupt: Transaction Error--\n", chnum);
1630197ba5f4SPaul Zimmerman 
1631197ba5f4SPaul Zimmerman 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1632197ba5f4SPaul Zimmerman 
163395832c00SJohn Youn 	if (hsotg->params.dma_desc_enable) {
1634197ba5f4SPaul Zimmerman 		dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1635197ba5f4SPaul Zimmerman 					    DWC2_HC_XFER_XACT_ERR);
1636197ba5f4SPaul Zimmerman 		goto handle_xacterr_done;
1637197ba5f4SPaul Zimmerman 	}
1638197ba5f4SPaul Zimmerman 
1639197ba5f4SPaul Zimmerman 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1640197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_CONTROL:
1641197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_BULK:
1642197ba5f4SPaul Zimmerman 		qtd->error_count++;
1643197ba5f4SPaul Zimmerman 		if (!chan->qh->ping_state) {
1644197ba5f4SPaul Zimmerman 			dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1645197ba5f4SPaul Zimmerman 						  qtd, DWC2_HC_XFER_XACT_ERR);
1646197ba5f4SPaul Zimmerman 			dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1647197ba5f4SPaul Zimmerman 			if (!chan->ep_is_in && chan->speed == USB_SPEED_HIGH)
1648197ba5f4SPaul Zimmerman 				chan->qh->ping_state = 1;
1649197ba5f4SPaul Zimmerman 		}
1650197ba5f4SPaul Zimmerman 
1651197ba5f4SPaul Zimmerman 		/*
1652197ba5f4SPaul Zimmerman 		 * Halt the channel so the transfer can be re-started from
1653197ba5f4SPaul Zimmerman 		 * the appropriate point or the PING protocol will start
1654197ba5f4SPaul Zimmerman 		 */
1655197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1656197ba5f4SPaul Zimmerman 		break;
1657197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_INT:
1658197ba5f4SPaul Zimmerman 		qtd->error_count++;
1659197ba5f4SPaul Zimmerman 		if (chan->do_split && chan->complete_split)
1660197ba5f4SPaul Zimmerman 			qtd->complete_split = 0;
1661197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1662197ba5f4SPaul Zimmerman 		break;
1663197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_ISOC:
1664197ba5f4SPaul Zimmerman 		{
1665197ba5f4SPaul Zimmerman 			enum dwc2_halt_status halt_status;
1666197ba5f4SPaul Zimmerman 
1667197ba5f4SPaul Zimmerman 			halt_status = dwc2_update_isoc_urb_state(hsotg, chan,
1668197ba5f4SPaul Zimmerman 					 chnum, qtd, DWC2_HC_XFER_XACT_ERR);
1669197ba5f4SPaul Zimmerman 			dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1670197ba5f4SPaul Zimmerman 		}
1671197ba5f4SPaul Zimmerman 		break;
1672197ba5f4SPaul Zimmerman 	}
1673197ba5f4SPaul Zimmerman 
1674197ba5f4SPaul Zimmerman handle_xacterr_done:
1675197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_XACTERR);
1676197ba5f4SPaul Zimmerman }
1677197ba5f4SPaul Zimmerman 
1678197ba5f4SPaul Zimmerman /*
1679197ba5f4SPaul Zimmerman  * Handles a host channel frame overrun interrupt. This handler may be called
1680197ba5f4SPaul Zimmerman  * in either DMA mode or Slave mode.
1681197ba5f4SPaul Zimmerman  */
dwc2_hc_frmovrun_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1682197ba5f4SPaul Zimmerman static void dwc2_hc_frmovrun_intr(struct dwc2_hsotg *hsotg,
1683197ba5f4SPaul Zimmerman 				  struct dwc2_host_chan *chan, int chnum,
1684197ba5f4SPaul Zimmerman 				  struct dwc2_qtd *qtd)
1685197ba5f4SPaul Zimmerman {
1686197ba5f4SPaul Zimmerman 	enum dwc2_halt_status halt_status;
1687197ba5f4SPaul Zimmerman 
1688197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1689197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev, "--Host Channel %d Interrupt: Frame Overrun--\n",
1690197ba5f4SPaul Zimmerman 			chnum);
1691197ba5f4SPaul Zimmerman 
1692197ba5f4SPaul Zimmerman 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1693197ba5f4SPaul Zimmerman 
1694197ba5f4SPaul Zimmerman 	switch (dwc2_hcd_get_pipe_type(&qtd->urb->pipe_info)) {
1695197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_CONTROL:
1696197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_BULK:
1697197ba5f4SPaul Zimmerman 		break;
1698197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_INT:
1699197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1700197ba5f4SPaul Zimmerman 		break;
1701197ba5f4SPaul Zimmerman 	case USB_ENDPOINT_XFER_ISOC:
1702197ba5f4SPaul Zimmerman 		halt_status = dwc2_update_isoc_urb_state(hsotg, chan, chnum,
1703197ba5f4SPaul Zimmerman 					qtd, DWC2_HC_XFER_FRAME_OVERRUN);
1704197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, halt_status);
1705197ba5f4SPaul Zimmerman 		break;
1706197ba5f4SPaul Zimmerman 	}
1707197ba5f4SPaul Zimmerman 
1708197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_FRMOVRUN);
1709197ba5f4SPaul Zimmerman }
1710197ba5f4SPaul Zimmerman 
1711197ba5f4SPaul Zimmerman /*
1712197ba5f4SPaul Zimmerman  * Handles a host channel data toggle error interrupt. This handler may be
1713197ba5f4SPaul Zimmerman  * called in either DMA mode or Slave mode.
1714197ba5f4SPaul Zimmerman  */
dwc2_hc_datatglerr_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1715197ba5f4SPaul Zimmerman static void dwc2_hc_datatglerr_intr(struct dwc2_hsotg *hsotg,
1716197ba5f4SPaul Zimmerman 				    struct dwc2_host_chan *chan, int chnum,
1717197ba5f4SPaul Zimmerman 				    struct dwc2_qtd *qtd)
1718197ba5f4SPaul Zimmerman {
1719197ba5f4SPaul Zimmerman 	dev_dbg(hsotg->dev,
1720197ba5f4SPaul Zimmerman 		"--Host Channel %d Interrupt: Data Toggle Error--\n", chnum);
1721197ba5f4SPaul Zimmerman 
1722197ba5f4SPaul Zimmerman 	if (chan->ep_is_in)
1723197ba5f4SPaul Zimmerman 		qtd->error_count = 0;
1724197ba5f4SPaul Zimmerman 	else
1725197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev,
1726197ba5f4SPaul Zimmerman 			"Data Toggle Error on OUT transfer, channel %d\n",
1727197ba5f4SPaul Zimmerman 			chnum);
1728197ba5f4SPaul Zimmerman 
1729197ba5f4SPaul Zimmerman 	dwc2_hc_handle_tt_clear(hsotg, chan, qtd);
1730197ba5f4SPaul Zimmerman 	disable_hc_int(hsotg, chnum, HCINTMSK_DATATGLERR);
1731197ba5f4SPaul Zimmerman }
1732197ba5f4SPaul Zimmerman 
1733197ba5f4SPaul Zimmerman /*
1734197ba5f4SPaul Zimmerman  * For debug only. It checks that a valid halt status is set and that
1735197ba5f4SPaul Zimmerman  * HCCHARn.chdis is clear. If there's a problem, corrective action is
1736197ba5f4SPaul Zimmerman  * taken and a warning is issued.
1737197ba5f4SPaul Zimmerman  *
1738197ba5f4SPaul Zimmerman  * Return: true if halt status is ok, false otherwise
1739197ba5f4SPaul Zimmerman  */
dwc2_halt_status_ok(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1740197ba5f4SPaul Zimmerman static bool dwc2_halt_status_ok(struct dwc2_hsotg *hsotg,
1741197ba5f4SPaul Zimmerman 				struct dwc2_host_chan *chan, int chnum,
1742197ba5f4SPaul Zimmerman 				struct dwc2_qtd *qtd)
1743197ba5f4SPaul Zimmerman {
1744197ba5f4SPaul Zimmerman #ifdef DEBUG
1745197ba5f4SPaul Zimmerman 	u32 hcchar;
1746197ba5f4SPaul Zimmerman 	u32 hctsiz;
1747197ba5f4SPaul Zimmerman 	u32 hcintmsk;
1748197ba5f4SPaul Zimmerman 	u32 hcsplt;
1749197ba5f4SPaul Zimmerman 
1750197ba5f4SPaul Zimmerman 	if (chan->halt_status == DWC2_HC_XFER_NO_HALT_STATUS) {
1751197ba5f4SPaul Zimmerman 		/*
1752197ba5f4SPaul Zimmerman 		 * This code is here only as a check. This condition should
1753197ba5f4SPaul Zimmerman 		 * never happen. Ignore the halt if it does occur.
1754197ba5f4SPaul Zimmerman 		 */
1755f25c42b8SGevorg Sahakyan 		hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1756f25c42b8SGevorg Sahakyan 		hctsiz = dwc2_readl(hsotg, HCTSIZ(chnum));
1757f25c42b8SGevorg Sahakyan 		hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
1758f25c42b8SGevorg Sahakyan 		hcsplt = dwc2_readl(hsotg, HCSPLT(chnum));
1759197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev,
1760197ba5f4SPaul Zimmerman 			"%s: chan->halt_status DWC2_HC_XFER_NO_HALT_STATUS,\n",
1761197ba5f4SPaul Zimmerman 			 __func__);
1762197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev,
1763197ba5f4SPaul Zimmerman 			"channel %d, hcchar 0x%08x, hctsiz 0x%08x,\n",
1764197ba5f4SPaul Zimmerman 			chnum, hcchar, hctsiz);
1765197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev,
1766197ba5f4SPaul Zimmerman 			"hcint 0x%08x, hcintmsk 0x%08x, hcsplt 0x%08x,\n",
1767197ba5f4SPaul Zimmerman 			chan->hcint, hcintmsk, hcsplt);
1768197ba5f4SPaul Zimmerman 		if (qtd)
1769197ba5f4SPaul Zimmerman 			dev_dbg(hsotg->dev, "qtd->complete_split %d\n",
1770197ba5f4SPaul Zimmerman 				qtd->complete_split);
1771197ba5f4SPaul Zimmerman 		dev_warn(hsotg->dev,
1772197ba5f4SPaul Zimmerman 			 "%s: no halt status, channel %d, ignoring interrupt\n",
1773197ba5f4SPaul Zimmerman 			 __func__, chnum);
1774197ba5f4SPaul Zimmerman 		return false;
1775197ba5f4SPaul Zimmerman 	}
1776197ba5f4SPaul Zimmerman 
1777197ba5f4SPaul Zimmerman 	/*
1778197ba5f4SPaul Zimmerman 	 * This code is here only as a check. hcchar.chdis should never be set
1779197ba5f4SPaul Zimmerman 	 * when the halt interrupt occurs. Halt the channel again if it does
1780197ba5f4SPaul Zimmerman 	 * occur.
1781197ba5f4SPaul Zimmerman 	 */
1782f25c42b8SGevorg Sahakyan 	hcchar = dwc2_readl(hsotg, HCCHAR(chnum));
1783197ba5f4SPaul Zimmerman 	if (hcchar & HCCHAR_CHDIS) {
1784197ba5f4SPaul Zimmerman 		dev_warn(hsotg->dev,
1785197ba5f4SPaul Zimmerman 			 "%s: hcchar.chdis set unexpectedly, hcchar 0x%08x, trying to halt again\n",
1786197ba5f4SPaul Zimmerman 			 __func__, hcchar);
1787197ba5f4SPaul Zimmerman 		chan->halt_pending = 0;
1788197ba5f4SPaul Zimmerman 		dwc2_halt_channel(hsotg, chan, qtd, chan->halt_status);
1789197ba5f4SPaul Zimmerman 		return false;
1790197ba5f4SPaul Zimmerman 	}
1791197ba5f4SPaul Zimmerman #endif
1792197ba5f4SPaul Zimmerman 
1793197ba5f4SPaul Zimmerman 	return true;
1794197ba5f4SPaul Zimmerman }
1795197ba5f4SPaul Zimmerman 
1796197ba5f4SPaul Zimmerman /*
1797197ba5f4SPaul Zimmerman  * Handles a host Channel Halted interrupt in DMA mode. This handler
1798197ba5f4SPaul Zimmerman  * determines the reason the channel halted and proceeds accordingly.
1799197ba5f4SPaul Zimmerman  */
dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1800197ba5f4SPaul Zimmerman static void dwc2_hc_chhltd_intr_dma(struct dwc2_hsotg *hsotg,
1801197ba5f4SPaul Zimmerman 				    struct dwc2_host_chan *chan, int chnum,
1802197ba5f4SPaul Zimmerman 				    struct dwc2_qtd *qtd)
1803197ba5f4SPaul Zimmerman {
1804197ba5f4SPaul Zimmerman 	u32 hcintmsk;
1805197ba5f4SPaul Zimmerman 	int out_nak_enh = 0;
1806197ba5f4SPaul Zimmerman 
1807197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1808197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
1809197ba5f4SPaul Zimmerman 			 "--Host Channel %d Interrupt: DMA Channel Halted--\n",
1810197ba5f4SPaul Zimmerman 			 chnum);
1811197ba5f4SPaul Zimmerman 
1812197ba5f4SPaul Zimmerman 	/*
1813197ba5f4SPaul Zimmerman 	 * For core with OUT NAK enhancement, the flow for high-speed
1814197ba5f4SPaul Zimmerman 	 * CONTROL/BULK OUT is handled a little differently
1815197ba5f4SPaul Zimmerman 	 */
1816197ba5f4SPaul Zimmerman 	if (hsotg->hw_params.snpsid >= DWC2_CORE_REV_2_71a) {
1817197ba5f4SPaul Zimmerman 		if (chan->speed == USB_SPEED_HIGH && !chan->ep_is_in &&
1818197ba5f4SPaul Zimmerman 		    (chan->ep_type == USB_ENDPOINT_XFER_CONTROL ||
1819197ba5f4SPaul Zimmerman 		     chan->ep_type == USB_ENDPOINT_XFER_BULK)) {
1820197ba5f4SPaul Zimmerman 			out_nak_enh = 1;
1821197ba5f4SPaul Zimmerman 		}
1822197ba5f4SPaul Zimmerman 	}
1823197ba5f4SPaul Zimmerman 
1824197ba5f4SPaul Zimmerman 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE ||
1825197ba5f4SPaul Zimmerman 	    (chan->halt_status == DWC2_HC_XFER_AHB_ERR &&
182695832c00SJohn Youn 	     !hsotg->params.dma_desc_enable)) {
182795832c00SJohn Youn 		if (hsotg->params.dma_desc_enable)
1828197ba5f4SPaul Zimmerman 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
1829197ba5f4SPaul Zimmerman 						    chan->halt_status);
1830197ba5f4SPaul Zimmerman 		else
1831197ba5f4SPaul Zimmerman 			/*
1832197ba5f4SPaul Zimmerman 			 * Just release the channel. A dequeue can happen on a
1833197ba5f4SPaul Zimmerman 			 * transfer timeout. In the case of an AHB Error, the
1834197ba5f4SPaul Zimmerman 			 * channel was forced to halt because there's no way to
1835197ba5f4SPaul Zimmerman 			 * gracefully recover.
1836197ba5f4SPaul Zimmerman 			 */
1837197ba5f4SPaul Zimmerman 			dwc2_release_channel(hsotg, chan, qtd,
1838197ba5f4SPaul Zimmerman 					     chan->halt_status);
1839197ba5f4SPaul Zimmerman 		return;
1840197ba5f4SPaul Zimmerman 	}
1841197ba5f4SPaul Zimmerman 
1842f25c42b8SGevorg Sahakyan 	hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
1843197ba5f4SPaul Zimmerman 
1844197ba5f4SPaul Zimmerman 	if (chan->hcint & HCINTMSK_XFERCOMPL) {
1845197ba5f4SPaul Zimmerman 		/*
1846197ba5f4SPaul Zimmerman 		 * Todo: This is here because of a possible hardware bug. Spec
1847197ba5f4SPaul Zimmerman 		 * says that on SPLIT-ISOC OUT transfers in DMA mode that a HALT
1848197ba5f4SPaul Zimmerman 		 * interrupt w/ACK bit set should occur, but I only see the
1849197ba5f4SPaul Zimmerman 		 * XFERCOMP bit, even with it masked out. This is a workaround
1850197ba5f4SPaul Zimmerman 		 * for that behavior. Should fix this when hardware is fixed.
1851197ba5f4SPaul Zimmerman 		 */
1852197ba5f4SPaul Zimmerman 		if (chan->ep_type == USB_ENDPOINT_XFER_ISOC && !chan->ep_is_in)
1853197ba5f4SPaul Zimmerman 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1854197ba5f4SPaul Zimmerman 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
1855197ba5f4SPaul Zimmerman 	} else if (chan->hcint & HCINTMSK_STALL) {
1856197ba5f4SPaul Zimmerman 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
1857197ba5f4SPaul Zimmerman 	} else if ((chan->hcint & HCINTMSK_XACTERR) &&
185895832c00SJohn Youn 		   !hsotg->params.dma_desc_enable) {
1859197ba5f4SPaul Zimmerman 		if (out_nak_enh) {
1860197ba5f4SPaul Zimmerman 			if (chan->hcint &
1861197ba5f4SPaul Zimmerman 			    (HCINTMSK_NYET | HCINTMSK_NAK | HCINTMSK_ACK)) {
1862197ba5f4SPaul Zimmerman 				dev_vdbg(hsotg->dev,
1863197ba5f4SPaul Zimmerman 					 "XactErr with NYET/NAK/ACK\n");
1864197ba5f4SPaul Zimmerman 				qtd->error_count = 0;
1865197ba5f4SPaul Zimmerman 			} else {
1866197ba5f4SPaul Zimmerman 				dev_vdbg(hsotg->dev,
1867197ba5f4SPaul Zimmerman 					 "XactErr without NYET/NAK/ACK\n");
1868197ba5f4SPaul Zimmerman 			}
1869197ba5f4SPaul Zimmerman 		}
1870197ba5f4SPaul Zimmerman 
1871197ba5f4SPaul Zimmerman 		/*
1872197ba5f4SPaul Zimmerman 		 * Must handle xacterr before nak or ack. Could get a xacterr
1873197ba5f4SPaul Zimmerman 		 * at the same time as either of these on a BULK/CONTROL OUT
1874197ba5f4SPaul Zimmerman 		 * that started with a PING. The xacterr takes precedence.
1875197ba5f4SPaul Zimmerman 		 */
1876197ba5f4SPaul Zimmerman 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1877197ba5f4SPaul Zimmerman 	} else if ((chan->hcint & HCINTMSK_XCS_XACT) &&
187895832c00SJohn Youn 		   hsotg->params.dma_desc_enable) {
1879197ba5f4SPaul Zimmerman 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
1880197ba5f4SPaul Zimmerman 	} else if ((chan->hcint & HCINTMSK_AHBERR) &&
188195832c00SJohn Youn 		   hsotg->params.dma_desc_enable) {
1882197ba5f4SPaul Zimmerman 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
1883197ba5f4SPaul Zimmerman 	} else if (chan->hcint & HCINTMSK_BBLERR) {
1884197ba5f4SPaul Zimmerman 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
1885197ba5f4SPaul Zimmerman 	} else if (chan->hcint & HCINTMSK_FRMOVRUN) {
1886197ba5f4SPaul Zimmerman 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
1887197ba5f4SPaul Zimmerman 	} else if (!out_nak_enh) {
1888197ba5f4SPaul Zimmerman 		if (chan->hcint & HCINTMSK_NYET) {
1889197ba5f4SPaul Zimmerman 			/*
1890197ba5f4SPaul Zimmerman 			 * Must handle nyet before nak or ack. Could get a nyet
1891197ba5f4SPaul Zimmerman 			 * at the same time as either of those on a BULK/CONTROL
1892197ba5f4SPaul Zimmerman 			 * OUT that started with a PING. The nyet takes
1893197ba5f4SPaul Zimmerman 			 * precedence.
1894197ba5f4SPaul Zimmerman 			 */
1895197ba5f4SPaul Zimmerman 			dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
1896197ba5f4SPaul Zimmerman 		} else if ((chan->hcint & HCINTMSK_NAK) &&
1897197ba5f4SPaul Zimmerman 			   !(hcintmsk & HCINTMSK_NAK)) {
1898197ba5f4SPaul Zimmerman 			/*
1899197ba5f4SPaul Zimmerman 			 * If nak is not masked, it's because a non-split IN
1900197ba5f4SPaul Zimmerman 			 * transfer is in an error state. In that case, the nak
1901197ba5f4SPaul Zimmerman 			 * is handled by the nak interrupt handler, not here.
1902197ba5f4SPaul Zimmerman 			 * Handle nak here for BULK/CONTROL OUT transfers, which
1903197ba5f4SPaul Zimmerman 			 * halt on a NAK to allow rewinding the buffer pointer.
1904197ba5f4SPaul Zimmerman 			 */
1905197ba5f4SPaul Zimmerman 			dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
1906197ba5f4SPaul Zimmerman 		} else if ((chan->hcint & HCINTMSK_ACK) &&
1907197ba5f4SPaul Zimmerman 			   !(hcintmsk & HCINTMSK_ACK)) {
1908197ba5f4SPaul Zimmerman 			/*
1909197ba5f4SPaul Zimmerman 			 * If ack is not masked, it's because a non-split IN
1910197ba5f4SPaul Zimmerman 			 * transfer is in an error state. In that case, the ack
1911197ba5f4SPaul Zimmerman 			 * is handled by the ack interrupt handler, not here.
1912197ba5f4SPaul Zimmerman 			 * Handle ack here for split transfers. Start splits
1913197ba5f4SPaul Zimmerman 			 * halt on ACK.
1914197ba5f4SPaul Zimmerman 			 */
1915197ba5f4SPaul Zimmerman 			dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
1916197ba5f4SPaul Zimmerman 		} else {
1917197ba5f4SPaul Zimmerman 			if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
1918197ba5f4SPaul Zimmerman 			    chan->ep_type == USB_ENDPOINT_XFER_ISOC) {
1919197ba5f4SPaul Zimmerman 				/*
1920197ba5f4SPaul Zimmerman 				 * A periodic transfer halted with no other
1921197ba5f4SPaul Zimmerman 				 * channel interrupts set. Assume it was halted
1922197ba5f4SPaul Zimmerman 				 * by the core because it could not be completed
1923197ba5f4SPaul Zimmerman 				 * in its scheduled (micro)frame.
1924197ba5f4SPaul Zimmerman 				 */
1925197ba5f4SPaul Zimmerman 				dev_dbg(hsotg->dev,
1926197ba5f4SPaul Zimmerman 					"%s: Halt channel %d (assume incomplete periodic transfer)\n",
1927197ba5f4SPaul Zimmerman 					__func__, chnum);
1928197ba5f4SPaul Zimmerman 				dwc2_halt_channel(hsotg, chan, qtd,
1929197ba5f4SPaul Zimmerman 					DWC2_HC_XFER_PERIODIC_INCOMPLETE);
1930197ba5f4SPaul Zimmerman 			} else {
1931197ba5f4SPaul Zimmerman 				dev_err(hsotg->dev,
1932197ba5f4SPaul Zimmerman 					"%s: Channel %d - ChHltd set, but reason is unknown\n",
1933197ba5f4SPaul Zimmerman 					__func__, chnum);
1934197ba5f4SPaul Zimmerman 				dev_err(hsotg->dev,
1935197ba5f4SPaul Zimmerman 					"hcint 0x%08x, intsts 0x%08x\n",
1936197ba5f4SPaul Zimmerman 					chan->hcint,
1937f25c42b8SGevorg Sahakyan 					dwc2_readl(hsotg, GINTSTS));
1938151d0cbdSNick Hudson 				goto error;
1939197ba5f4SPaul Zimmerman 			}
1940197ba5f4SPaul Zimmerman 		}
1941197ba5f4SPaul Zimmerman 	} else {
1942197ba5f4SPaul Zimmerman 		dev_info(hsotg->dev,
1943197ba5f4SPaul Zimmerman 			 "NYET/NAK/ACK/other in non-error case, 0x%08x\n",
1944197ba5f4SPaul Zimmerman 			 chan->hcint);
1945151d0cbdSNick Hudson error:
1946151d0cbdSNick Hudson 		/* Failthrough: use 3-strikes rule */
1947151d0cbdSNick Hudson 		qtd->error_count++;
1948151d0cbdSNick Hudson 		dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
1949151d0cbdSNick Hudson 					  qtd, DWC2_HC_XFER_XACT_ERR);
1950f74b68c6SGuenter Roeck 		/*
1951f74b68c6SGuenter Roeck 		 * We can get here after a completed transaction
1952f74b68c6SGuenter Roeck 		 * (urb->actual_length >= urb->length) which was not reported
1953f74b68c6SGuenter Roeck 		 * as completed. If that is the case, and we do not abort
1954f74b68c6SGuenter Roeck 		 * the transfer, a transfer of size 0 will be enqueued
1955f74b68c6SGuenter Roeck 		 * subsequently. If urb->actual_length is not DMA-aligned,
1956f74b68c6SGuenter Roeck 		 * the buffer will then point to an unaligned address, and
1957f74b68c6SGuenter Roeck 		 * the resulting behavior is undefined. Bail out in that
1958f74b68c6SGuenter Roeck 		 * situation.
1959f74b68c6SGuenter Roeck 		 */
1960f74b68c6SGuenter Roeck 		if (qtd->urb->actual_length >= qtd->urb->length)
1961f74b68c6SGuenter Roeck 			qtd->error_count = 3;
1962151d0cbdSNick Hudson 		dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
1963151d0cbdSNick Hudson 		dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
1964197ba5f4SPaul Zimmerman 	}
1965197ba5f4SPaul Zimmerman }
1966197ba5f4SPaul Zimmerman 
1967197ba5f4SPaul Zimmerman /*
1968197ba5f4SPaul Zimmerman  * Handles a host channel Channel Halted interrupt
1969197ba5f4SPaul Zimmerman  *
1970197ba5f4SPaul Zimmerman  * In slave mode, this handler is called only when the driver specifically
1971197ba5f4SPaul Zimmerman  * requests a halt. This occurs during handling other host channel interrupts
1972197ba5f4SPaul Zimmerman  * (e.g. nak, xacterr, stall, nyet, etc.).
1973197ba5f4SPaul Zimmerman  *
1974197ba5f4SPaul Zimmerman  * In DMA mode, this is the interrupt that occurs when the core has finished
1975197ba5f4SPaul Zimmerman  * processing a transfer on a channel. Other host channel interrupts (except
1976197ba5f4SPaul Zimmerman  * ahberr) are disabled in DMA mode.
1977197ba5f4SPaul Zimmerman  */
dwc2_hc_chhltd_intr(struct dwc2_hsotg * hsotg,struct dwc2_host_chan * chan,int chnum,struct dwc2_qtd * qtd)1978197ba5f4SPaul Zimmerman static void dwc2_hc_chhltd_intr(struct dwc2_hsotg *hsotg,
1979197ba5f4SPaul Zimmerman 				struct dwc2_host_chan *chan, int chnum,
1980197ba5f4SPaul Zimmerman 				struct dwc2_qtd *qtd)
1981197ba5f4SPaul Zimmerman {
1982197ba5f4SPaul Zimmerman 	if (dbg_hc(chan))
1983197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Host Channel %d Interrupt: Channel Halted--\n",
1984197ba5f4SPaul Zimmerman 			 chnum);
1985197ba5f4SPaul Zimmerman 
198695832c00SJohn Youn 	if (hsotg->params.host_dma) {
1987197ba5f4SPaul Zimmerman 		dwc2_hc_chhltd_intr_dma(hsotg, chan, chnum, qtd);
1988197ba5f4SPaul Zimmerman 	} else {
1989197ba5f4SPaul Zimmerman 		if (!dwc2_halt_status_ok(hsotg, chan, chnum, qtd))
1990197ba5f4SPaul Zimmerman 			return;
1991197ba5f4SPaul Zimmerman 		dwc2_release_channel(hsotg, chan, qtd, chan->halt_status);
1992197ba5f4SPaul Zimmerman 	}
1993197ba5f4SPaul Zimmerman }
1994197ba5f4SPaul Zimmerman 
1995dc873084SDoug Anderson /*
1996dc873084SDoug Anderson  * Check if the given qtd is still the top of the list (and thus valid).
1997dc873084SDoug Anderson  *
1998dc873084SDoug Anderson  * If dwc2_hcd_qtd_unlink_and_free() has been called since we grabbed
1999dc873084SDoug Anderson  * the qtd from the top of the list, this will return false (otherwise true).
2000dc873084SDoug Anderson  */
dwc2_check_qtd_still_ok(struct dwc2_qtd * qtd,struct dwc2_qh * qh)2001dc873084SDoug Anderson static bool dwc2_check_qtd_still_ok(struct dwc2_qtd *qtd, struct dwc2_qh *qh)
2002dc873084SDoug Anderson {
2003dc873084SDoug Anderson 	struct dwc2_qtd *cur_head;
2004dc873084SDoug Anderson 
20059da51974SJohn Youn 	if (!qh)
2006dc873084SDoug Anderson 		return false;
2007dc873084SDoug Anderson 
2008dc873084SDoug Anderson 	cur_head = list_first_entry(&qh->qtd_list, struct dwc2_qtd,
2009dc873084SDoug Anderson 				    qtd_list_entry);
2010dc873084SDoug Anderson 	return (cur_head == qtd);
2011dc873084SDoug Anderson }
2012dc873084SDoug Anderson 
2013197ba5f4SPaul Zimmerman /* Handles interrupt for a specific Host Channel */
dwc2_hc_n_intr(struct dwc2_hsotg * hsotg,int chnum)2014197ba5f4SPaul Zimmerman static void dwc2_hc_n_intr(struct dwc2_hsotg *hsotg, int chnum)
2015197ba5f4SPaul Zimmerman {
2016197ba5f4SPaul Zimmerman 	struct dwc2_qtd *qtd;
2017197ba5f4SPaul Zimmerman 	struct dwc2_host_chan *chan;
2018*92e44bdbSOliver Neukum 	u32 hcint, hcintraw, hcintmsk;
2019197ba5f4SPaul Zimmerman 
2020197ba5f4SPaul Zimmerman 	chan = hsotg->hc_ptr_array[chnum];
2021197ba5f4SPaul Zimmerman 
2022*92e44bdbSOliver Neukum 	hcintraw = dwc2_readl(hsotg, HCINT(chnum));
2023f25c42b8SGevorg Sahakyan 	hcintmsk = dwc2_readl(hsotg, HCINTMSK(chnum));
2024*92e44bdbSOliver Neukum 	hcint = hcintraw & hcintmsk;
2025*92e44bdbSOliver Neukum 	dwc2_writel(hsotg, hcint, HCINT(chnum));
2026*92e44bdbSOliver Neukum 
2027197ba5f4SPaul Zimmerman 	if (!chan) {
2028197ba5f4SPaul Zimmerman 		dev_err(hsotg->dev, "## hc_ptr_array for channel is NULL ##\n");
2029197ba5f4SPaul Zimmerman 		return;
2030197ba5f4SPaul Zimmerman 	}
2031197ba5f4SPaul Zimmerman 
2032197ba5f4SPaul Zimmerman 	if (dbg_hc(chan)) {
2033197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "--Host Channel Interrupt--, Channel %d\n",
2034197ba5f4SPaul Zimmerman 			 chnum);
2035197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev,
2036197ba5f4SPaul Zimmerman 			 "  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2037*92e44bdbSOliver Neukum 			 hcintraw, hcintmsk, hcint);
2038197ba5f4SPaul Zimmerman 	}
2039197ba5f4SPaul Zimmerman 
204016e80218SDouglas Anderson 	/*
204116e80218SDouglas Anderson 	 * If we got an interrupt after someone called
204216e80218SDouglas Anderson 	 * dwc2_hcd_endpoint_disable() we don't want to crash below
204316e80218SDouglas Anderson 	 */
204416e80218SDouglas Anderson 	if (!chan->qh) {
204516e80218SDouglas Anderson 		dev_warn(hsotg->dev, "Interrupt on disabled channel\n");
204616e80218SDouglas Anderson 		return;
204716e80218SDouglas Anderson 	}
204816e80218SDouglas Anderson 
2049*92e44bdbSOliver Neukum 	chan->hcint = hcintraw;
2050197ba5f4SPaul Zimmerman 
2051197ba5f4SPaul Zimmerman 	/*
2052197ba5f4SPaul Zimmerman 	 * If the channel was halted due to a dequeue, the qtd list might
2053197ba5f4SPaul Zimmerman 	 * be empty or at least the first entry will not be the active qtd.
2054197ba5f4SPaul Zimmerman 	 * In this case, take a shortcut and just release the channel.
2055197ba5f4SPaul Zimmerman 	 */
2056197ba5f4SPaul Zimmerman 	if (chan->halt_status == DWC2_HC_XFER_URB_DEQUEUE) {
2057197ba5f4SPaul Zimmerman 		/*
2058197ba5f4SPaul Zimmerman 		 * If the channel was halted, this should be the only
2059197ba5f4SPaul Zimmerman 		 * interrupt unmasked
2060197ba5f4SPaul Zimmerman 		 */
2061197ba5f4SPaul Zimmerman 		WARN_ON(hcint != HCINTMSK_CHHLTD);
206295832c00SJohn Youn 		if (hsotg->params.dma_desc_enable)
2063197ba5f4SPaul Zimmerman 			dwc2_hcd_complete_xfer_ddma(hsotg, chan, chnum,
2064197ba5f4SPaul Zimmerman 						    chan->halt_status);
2065197ba5f4SPaul Zimmerman 		else
2066197ba5f4SPaul Zimmerman 			dwc2_release_channel(hsotg, chan, NULL,
2067197ba5f4SPaul Zimmerman 					     chan->halt_status);
2068197ba5f4SPaul Zimmerman 		return;
2069197ba5f4SPaul Zimmerman 	}
2070197ba5f4SPaul Zimmerman 
2071197ba5f4SPaul Zimmerman 	if (list_empty(&chan->qh->qtd_list)) {
2072197ba5f4SPaul Zimmerman 		/*
2073197ba5f4SPaul Zimmerman 		 * TODO: Will this ever happen with the
2074197ba5f4SPaul Zimmerman 		 * DWC2_HC_XFER_URB_DEQUEUE handling above?
2075197ba5f4SPaul Zimmerman 		 */
2076197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev, "## no QTD queued for channel %d ##\n",
2077197ba5f4SPaul Zimmerman 			chnum);
2078197ba5f4SPaul Zimmerman 		dev_dbg(hsotg->dev,
2079197ba5f4SPaul Zimmerman 			"  hcint 0x%08x, hcintmsk 0x%08x, hcint&hcintmsk 0x%08x\n",
2080197ba5f4SPaul Zimmerman 			chan->hcint, hcintmsk, hcint);
2081197ba5f4SPaul Zimmerman 		chan->halt_status = DWC2_HC_XFER_NO_HALT_STATUS;
2082197ba5f4SPaul Zimmerman 		disable_hc_int(hsotg, chnum, HCINTMSK_CHHLTD);
2083197ba5f4SPaul Zimmerman 		chan->hcint = 0;
2084197ba5f4SPaul Zimmerman 		return;
2085197ba5f4SPaul Zimmerman 	}
2086197ba5f4SPaul Zimmerman 
2087197ba5f4SPaul Zimmerman 	qtd = list_first_entry(&chan->qh->qtd_list, struct dwc2_qtd,
2088197ba5f4SPaul Zimmerman 			       qtd_list_entry);
2089197ba5f4SPaul Zimmerman 
209095832c00SJohn Youn 	if (!hsotg->params.host_dma) {
2091197ba5f4SPaul Zimmerman 		if ((hcint & HCINTMSK_CHHLTD) && hcint != HCINTMSK_CHHLTD)
2092197ba5f4SPaul Zimmerman 			hcint &= ~HCINTMSK_CHHLTD;
2093197ba5f4SPaul Zimmerman 	}
2094197ba5f4SPaul Zimmerman 
2095197ba5f4SPaul Zimmerman 	if (hcint & HCINTMSK_XFERCOMPL) {
2096197ba5f4SPaul Zimmerman 		dwc2_hc_xfercomp_intr(hsotg, chan, chnum, qtd);
2097197ba5f4SPaul Zimmerman 		/*
2098197ba5f4SPaul Zimmerman 		 * If NYET occurred at same time as Xfer Complete, the NYET is
2099197ba5f4SPaul Zimmerman 		 * handled by the Xfer Complete interrupt handler. Don't want
2100197ba5f4SPaul Zimmerman 		 * to call the NYET interrupt handler in this case.
2101197ba5f4SPaul Zimmerman 		 */
2102197ba5f4SPaul Zimmerman 		hcint &= ~HCINTMSK_NYET;
2103197ba5f4SPaul Zimmerman 	}
2104197ba5f4SPaul Zimmerman 
2105dc873084SDoug Anderson 	if (hcint & HCINTMSK_CHHLTD) {
2106dc873084SDoug Anderson 		dwc2_hc_chhltd_intr(hsotg, chan, chnum, qtd);
2107dc873084SDoug Anderson 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2108dc873084SDoug Anderson 			goto exit;
2109dc873084SDoug Anderson 	}
2110dc873084SDoug Anderson 	if (hcint & HCINTMSK_AHBERR) {
2111dc873084SDoug Anderson 		dwc2_hc_ahberr_intr(hsotg, chan, chnum, qtd);
2112dc873084SDoug Anderson 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2113dc873084SDoug Anderson 			goto exit;
2114dc873084SDoug Anderson 	}
2115dc873084SDoug Anderson 	if (hcint & HCINTMSK_STALL) {
2116dc873084SDoug Anderson 		dwc2_hc_stall_intr(hsotg, chan, chnum, qtd);
2117dc873084SDoug Anderson 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2118dc873084SDoug Anderson 			goto exit;
2119dc873084SDoug Anderson 	}
2120dc873084SDoug Anderson 	if (hcint & HCINTMSK_NAK) {
2121dc873084SDoug Anderson 		dwc2_hc_nak_intr(hsotg, chan, chnum, qtd);
2122dc873084SDoug Anderson 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2123dc873084SDoug Anderson 			goto exit;
2124dc873084SDoug Anderson 	}
2125dc873084SDoug Anderson 	if (hcint & HCINTMSK_ACK) {
2126dc873084SDoug Anderson 		dwc2_hc_ack_intr(hsotg, chan, chnum, qtd);
2127dc873084SDoug Anderson 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2128dc873084SDoug Anderson 			goto exit;
2129dc873084SDoug Anderson 	}
2130dc873084SDoug Anderson 	if (hcint & HCINTMSK_NYET) {
2131dc873084SDoug Anderson 		dwc2_hc_nyet_intr(hsotg, chan, chnum, qtd);
2132dc873084SDoug Anderson 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2133dc873084SDoug Anderson 			goto exit;
2134dc873084SDoug Anderson 	}
2135dc873084SDoug Anderson 	if (hcint & HCINTMSK_XACTERR) {
2136dc873084SDoug Anderson 		dwc2_hc_xacterr_intr(hsotg, chan, chnum, qtd);
2137dc873084SDoug Anderson 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2138dc873084SDoug Anderson 			goto exit;
2139dc873084SDoug Anderson 	}
2140dc873084SDoug Anderson 	if (hcint & HCINTMSK_BBLERR) {
2141dc873084SDoug Anderson 		dwc2_hc_babble_intr(hsotg, chan, chnum, qtd);
2142dc873084SDoug Anderson 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2143dc873084SDoug Anderson 			goto exit;
2144dc873084SDoug Anderson 	}
2145dc873084SDoug Anderson 	if (hcint & HCINTMSK_FRMOVRUN) {
2146dc873084SDoug Anderson 		dwc2_hc_frmovrun_intr(hsotg, chan, chnum, qtd);
2147dc873084SDoug Anderson 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2148dc873084SDoug Anderson 			goto exit;
2149dc873084SDoug Anderson 	}
2150dc873084SDoug Anderson 	if (hcint & HCINTMSK_DATATGLERR) {
2151dc873084SDoug Anderson 		dwc2_hc_datatglerr_intr(hsotg, chan, chnum, qtd);
2152dc873084SDoug Anderson 		if (!dwc2_check_qtd_still_ok(qtd, chan->qh))
2153dc873084SDoug Anderson 			goto exit;
2154dc873084SDoug Anderson 	}
2155dc873084SDoug Anderson 
2156dc873084SDoug Anderson exit:
2157197ba5f4SPaul Zimmerman 	chan->hcint = 0;
2158197ba5f4SPaul Zimmerman }
2159197ba5f4SPaul Zimmerman 
2160197ba5f4SPaul Zimmerman /*
2161197ba5f4SPaul Zimmerman  * This interrupt indicates that one or more host channels has a pending
2162197ba5f4SPaul Zimmerman  * interrupt. There are multiple conditions that can cause each host channel
2163197ba5f4SPaul Zimmerman  * interrupt. This function determines which conditions have occurred for each
2164197ba5f4SPaul Zimmerman  * host channel interrupt and handles them appropriately.
2165197ba5f4SPaul Zimmerman  */
dwc2_hc_intr(struct dwc2_hsotg * hsotg)2166197ba5f4SPaul Zimmerman static void dwc2_hc_intr(struct dwc2_hsotg *hsotg)
2167197ba5f4SPaul Zimmerman {
2168197ba5f4SPaul Zimmerman 	u32 haint;
2169197ba5f4SPaul Zimmerman 	int i;
2170c9c8ac01SDouglas Anderson 	struct dwc2_host_chan *chan, *chan_tmp;
2171197ba5f4SPaul Zimmerman 
2172f25c42b8SGevorg Sahakyan 	haint = dwc2_readl(hsotg, HAINT);
2173197ba5f4SPaul Zimmerman 	if (dbg_perio()) {
2174197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
2175197ba5f4SPaul Zimmerman 
2176197ba5f4SPaul Zimmerman 		dev_vdbg(hsotg->dev, "HAINT=%08x\n", haint);
2177197ba5f4SPaul Zimmerman 	}
2178197ba5f4SPaul Zimmerman 
2179c9c8ac01SDouglas Anderson 	/*
2180c9c8ac01SDouglas Anderson 	 * According to USB 2.0 spec section 11.18.8, a host must
2181c9c8ac01SDouglas Anderson 	 * issue complete-split transactions in a microframe for a
2182c9c8ac01SDouglas Anderson 	 * set of full-/low-speed endpoints in the same relative
2183c9c8ac01SDouglas Anderson 	 * order as the start-splits were issued in a microframe for.
2184c9c8ac01SDouglas Anderson 	 */
2185c9c8ac01SDouglas Anderson 	list_for_each_entry_safe(chan, chan_tmp, &hsotg->split_order,
2186c9c8ac01SDouglas Anderson 				 split_order_list_entry) {
2187c9c8ac01SDouglas Anderson 		int hc_num = chan->hc_num;
2188c9c8ac01SDouglas Anderson 
2189c9c8ac01SDouglas Anderson 		if (haint & (1 << hc_num)) {
2190c9c8ac01SDouglas Anderson 			dwc2_hc_n_intr(hsotg, hc_num);
2191c9c8ac01SDouglas Anderson 			haint &= ~(1 << hc_num);
2192c9c8ac01SDouglas Anderson 		}
2193c9c8ac01SDouglas Anderson 	}
2194c9c8ac01SDouglas Anderson 
2195bea8e86cSJohn Youn 	for (i = 0; i < hsotg->params.host_channels; i++) {
2196197ba5f4SPaul Zimmerman 		if (haint & (1 << i))
2197197ba5f4SPaul Zimmerman 			dwc2_hc_n_intr(hsotg, i);
2198197ba5f4SPaul Zimmerman 	}
2199197ba5f4SPaul Zimmerman }
2200197ba5f4SPaul Zimmerman 
2201197ba5f4SPaul Zimmerman /* This function handles interrupts for the HCD */
dwc2_handle_hcd_intr(struct dwc2_hsotg * hsotg)2202197ba5f4SPaul Zimmerman irqreturn_t dwc2_handle_hcd_intr(struct dwc2_hsotg *hsotg)
2203197ba5f4SPaul Zimmerman {
2204197ba5f4SPaul Zimmerman 	u32 gintsts, dbg_gintsts;
22052ccbe854SOliver Neukum 	irqreturn_t retval = IRQ_HANDLED;
2206197ba5f4SPaul Zimmerman 
2207197ba5f4SPaul Zimmerman 	if (!dwc2_is_controller_alive(hsotg)) {
2208197ba5f4SPaul Zimmerman 		dev_warn(hsotg->dev, "Controller is dead\n");
2209197ba5f4SPaul Zimmerman 		return retval;
22102ccbe854SOliver Neukum 	} else {
22112ccbe854SOliver Neukum 		retval = IRQ_NONE;
2212197ba5f4SPaul Zimmerman 	}
2213197ba5f4SPaul Zimmerman 
2214197ba5f4SPaul Zimmerman 	spin_lock(&hsotg->lock);
2215197ba5f4SPaul Zimmerman 
2216197ba5f4SPaul Zimmerman 	/* Check if HOST Mode */
2217197ba5f4SPaul Zimmerman 	if (dwc2_is_host_mode(hsotg)) {
2218197ba5f4SPaul Zimmerman 		gintsts = dwc2_read_core_intr(hsotg);
2219197ba5f4SPaul Zimmerman 		if (!gintsts) {
2220197ba5f4SPaul Zimmerman 			spin_unlock(&hsotg->lock);
2221197ba5f4SPaul Zimmerman 			return retval;
2222197ba5f4SPaul Zimmerman 		}
2223197ba5f4SPaul Zimmerman 
2224197ba5f4SPaul Zimmerman 		retval = IRQ_HANDLED;
2225197ba5f4SPaul Zimmerman 
2226197ba5f4SPaul Zimmerman 		dbg_gintsts = gintsts;
2227197ba5f4SPaul Zimmerman #ifndef DEBUG_SOF
2228197ba5f4SPaul Zimmerman 		dbg_gintsts &= ~GINTSTS_SOF;
2229197ba5f4SPaul Zimmerman #endif
2230197ba5f4SPaul Zimmerman 		if (!dbg_perio())
2231197ba5f4SPaul Zimmerman 			dbg_gintsts &= ~(GINTSTS_HCHINT | GINTSTS_RXFLVL |
2232197ba5f4SPaul Zimmerman 					 GINTSTS_PTXFEMP);
2233197ba5f4SPaul Zimmerman 
2234197ba5f4SPaul Zimmerman 		/* Only print if there are any non-suppressed interrupts left */
2235197ba5f4SPaul Zimmerman 		if (dbg_gintsts)
2236197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
2237197ba5f4SPaul Zimmerman 				 "DWC OTG HCD Interrupt Detected gintsts&gintmsk=0x%08x\n",
2238197ba5f4SPaul Zimmerman 				 gintsts);
2239197ba5f4SPaul Zimmerman 
2240197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_SOF)
2241197ba5f4SPaul Zimmerman 			dwc2_sof_intr(hsotg);
2242197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_RXFLVL)
2243197ba5f4SPaul Zimmerman 			dwc2_rx_fifo_level_intr(hsotg);
2244197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_NPTXFEMP)
2245197ba5f4SPaul Zimmerman 			dwc2_np_tx_fifo_empty_intr(hsotg);
2246197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_PRTINT)
2247197ba5f4SPaul Zimmerman 			dwc2_port_intr(hsotg);
2248197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_HCHINT)
2249197ba5f4SPaul Zimmerman 			dwc2_hc_intr(hsotg);
2250197ba5f4SPaul Zimmerman 		if (gintsts & GINTSTS_PTXFEMP)
2251197ba5f4SPaul Zimmerman 			dwc2_perio_tx_fifo_empty_intr(hsotg);
2252197ba5f4SPaul Zimmerman 
2253197ba5f4SPaul Zimmerman 		if (dbg_gintsts) {
2254197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
2255197ba5f4SPaul Zimmerman 				 "DWC OTG HCD Finished Servicing Interrupts\n");
2256197ba5f4SPaul Zimmerman 			dev_vdbg(hsotg->dev,
2257197ba5f4SPaul Zimmerman 				 "DWC OTG HCD gintsts=0x%08x gintmsk=0x%08x\n",
2258f25c42b8SGevorg Sahakyan 				 dwc2_readl(hsotg, GINTSTS),
2259f25c42b8SGevorg Sahakyan 				 dwc2_readl(hsotg, GINTMSK));
2260197ba5f4SPaul Zimmerman 		}
2261197ba5f4SPaul Zimmerman 	}
2262197ba5f4SPaul Zimmerman 
2263197ba5f4SPaul Zimmerman 	spin_unlock(&hsotg->lock);
2264197ba5f4SPaul Zimmerman 
2265197ba5f4SPaul Zimmerman 	return retval;
2266197ba5f4SPaul Zimmerman }
2267