xref: /openbmc/linux/drivers/usb/dwc2/hcd_queue.c (revision 3932b9ca)
1 /*
2  * hcd_queue.c - DesignWare HS OTG Controller host queuing routines
3  *
4  * Copyright (C) 2004-2013 Synopsys, Inc.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions, and the following disclaimer,
11  *    without modification.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The names of the above-listed copyright holders may not be used
16  *    to endorse or promote products derived from this software without
17  *    specific prior written permission.
18  *
19  * ALTERNATIVELY, this software may be distributed under the terms of the
20  * GNU General Public License ("GPL") as published by the Free Software
21  * Foundation; either version 2 of the License, or (at your option) any
22  * later version.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
25  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
28  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 /*
38  * This file contains the functions to manage Queue Heads and Queue
39  * Transfer Descriptors for Host mode
40  */
41 #include <linux/kernel.h>
42 #include <linux/module.h>
43 #include <linux/spinlock.h>
44 #include <linux/interrupt.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/io.h>
47 #include <linux/slab.h>
48 #include <linux/usb.h>
49 
50 #include <linux/usb/hcd.h>
51 #include <linux/usb/ch11.h>
52 
53 #include "core.h"
54 #include "hcd.h"
55 
56 /**
57  * dwc2_qh_init() - Initializes a QH structure
58  *
59  * @hsotg: The HCD state structure for the DWC OTG controller
60  * @qh:    The QH to init
61  * @urb:   Holds the information about the device/endpoint needed to initialize
62  *         the QH
63  */
64 #define SCHEDULE_SLOP 10
65 static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
66 			 struct dwc2_hcd_urb *urb)
67 {
68 	int dev_speed, hub_addr, hub_port;
69 	char *speed, *type;
70 
71 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
72 
73 	/* Initialize QH */
74 	qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info);
75 	qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0;
76 
77 	qh->data_toggle = DWC2_HC_PID_DATA0;
78 	qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info);
79 	INIT_LIST_HEAD(&qh->qtd_list);
80 	INIT_LIST_HEAD(&qh->qh_list_entry);
81 
82 	/* FS/LS Endpoint on HS Hub, NOT virtual root hub */
83 	dev_speed = dwc2_host_get_speed(hsotg, urb->priv);
84 
85 	dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port);
86 
87 	if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) &&
88 	    hub_addr != 0 && hub_addr != 1) {
89 		dev_vdbg(hsotg->dev,
90 			 "QH init: EP %d: TT found at hub addr %d, for port %d\n",
91 			 dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr,
92 			 hub_port);
93 		qh->do_split = 1;
94 	}
95 
96 	if (qh->ep_type == USB_ENDPOINT_XFER_INT ||
97 	    qh->ep_type == USB_ENDPOINT_XFER_ISOC) {
98 		/* Compute scheduling parameters once and save them */
99 		u32 hprt, prtspd;
100 
101 		/* Todo: Account for split transfers in the bus time */
102 		int bytecount =
103 			dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp);
104 
105 		qh->usecs = NS_TO_US(usb_calc_bus_time(qh->do_split ?
106 				USB_SPEED_HIGH : dev_speed, qh->ep_is_in,
107 				qh->ep_type == USB_ENDPOINT_XFER_ISOC,
108 				bytecount));
109 		/* Start in a slightly future (micro)frame */
110 		qh->sched_frame = dwc2_frame_num_inc(hsotg->frame_number,
111 						     SCHEDULE_SLOP);
112 		qh->interval = urb->interval;
113 #if 0
114 		/* Increase interrupt polling rate for debugging */
115 		if (qh->ep_type == USB_ENDPOINT_XFER_INT)
116 			qh->interval = 8;
117 #endif
118 		hprt = readl(hsotg->regs + HPRT0);
119 		prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT;
120 		if (prtspd == HPRT0_SPD_HIGH_SPEED &&
121 		    (dev_speed == USB_SPEED_LOW ||
122 		     dev_speed == USB_SPEED_FULL)) {
123 			qh->interval *= 8;
124 			qh->sched_frame |= 0x7;
125 			qh->start_split_frame = qh->sched_frame;
126 		}
127 		dev_dbg(hsotg->dev, "interval=%d\n", qh->interval);
128 	}
129 
130 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n");
131 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh);
132 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n",
133 		 dwc2_hcd_get_dev_addr(&urb->pipe_info));
134 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n",
135 		 dwc2_hcd_get_ep_num(&urb->pipe_info),
136 		 dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT");
137 
138 	qh->dev_speed = dev_speed;
139 
140 	switch (dev_speed) {
141 	case USB_SPEED_LOW:
142 		speed = "low";
143 		break;
144 	case USB_SPEED_FULL:
145 		speed = "full";
146 		break;
147 	case USB_SPEED_HIGH:
148 		speed = "high";
149 		break;
150 	default:
151 		speed = "?";
152 		break;
153 	}
154 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed);
155 
156 	switch (qh->ep_type) {
157 	case USB_ENDPOINT_XFER_ISOC:
158 		type = "isochronous";
159 		break;
160 	case USB_ENDPOINT_XFER_INT:
161 		type = "interrupt";
162 		break;
163 	case USB_ENDPOINT_XFER_CONTROL:
164 		type = "control";
165 		break;
166 	case USB_ENDPOINT_XFER_BULK:
167 		type = "bulk";
168 		break;
169 	default:
170 		type = "?";
171 		break;
172 	}
173 
174 	dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type);
175 
176 	if (qh->ep_type == USB_ENDPOINT_XFER_INT) {
177 		dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n",
178 			 qh->usecs);
179 		dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n",
180 			 qh->interval);
181 	}
182 }
183 
184 /**
185  * dwc2_hcd_qh_create() - Allocates and initializes a QH
186  *
187  * @hsotg:        The HCD state structure for the DWC OTG controller
188  * @urb:          Holds the information about the device/endpoint needed
189  *                to initialize the QH
190  * @atomic_alloc: Flag to do atomic allocation if needed
191  *
192  * Return: Pointer to the newly allocated QH, or NULL on error
193  */
194 static struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg,
195 					  struct dwc2_hcd_urb *urb,
196 					  gfp_t mem_flags)
197 {
198 	struct dwc2_qh *qh;
199 
200 	if (!urb->priv)
201 		return NULL;
202 
203 	/* Allocate memory */
204 	qh = kzalloc(sizeof(*qh), mem_flags);
205 	if (!qh)
206 		return NULL;
207 
208 	dwc2_qh_init(hsotg, qh, urb);
209 
210 	if (hsotg->core_params->dma_desc_enable > 0 &&
211 	    dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) {
212 		dwc2_hcd_qh_free(hsotg, qh);
213 		return NULL;
214 	}
215 
216 	return qh;
217 }
218 
219 /**
220  * dwc2_hcd_qh_free() - Frees the QH
221  *
222  * @hsotg: HCD instance
223  * @qh:    The QH to free
224  *
225  * QH should already be removed from the list. QTD list should already be empty
226  * if called from URB Dequeue.
227  *
228  * Must NOT be called with interrupt disabled or spinlock held
229  */
230 void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
231 {
232 	u32 buf_size;
233 
234 	if (hsotg->core_params->dma_desc_enable > 0) {
235 		dwc2_hcd_qh_free_ddma(hsotg, qh);
236 	} else if (qh->dw_align_buf) {
237 		if (qh->ep_type == USB_ENDPOINT_XFER_ISOC)
238 			buf_size = 4096;
239 		else
240 			buf_size = hsotg->core_params->max_transfer_size;
241 		dma_free_coherent(hsotg->dev, buf_size, qh->dw_align_buf,
242 				  qh->dw_align_buf_dma);
243 	}
244 
245 	kfree(qh);
246 }
247 
248 /**
249  * dwc2_periodic_channel_available() - Checks that a channel is available for a
250  * periodic transfer
251  *
252  * @hsotg: The HCD state structure for the DWC OTG controller
253  *
254  * Return: 0 if successful, negative error code otherwise
255  */
256 static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg)
257 {
258 	/*
259 	 * Currently assuming that there is a dedicated host channel for
260 	 * each periodic transaction plus at least one host channel for
261 	 * non-periodic transactions
262 	 */
263 	int status;
264 	int num_channels;
265 
266 	num_channels = hsotg->core_params->host_channels;
267 	if (hsotg->periodic_channels + hsotg->non_periodic_channels <
268 								num_channels
269 	    && hsotg->periodic_channels < num_channels - 1) {
270 		status = 0;
271 	} else {
272 		dev_dbg(hsotg->dev,
273 			"%s: Total channels: %d, Periodic: %d, "
274 			"Non-periodic: %d\n", __func__, num_channels,
275 			hsotg->periodic_channels, hsotg->non_periodic_channels);
276 		status = -ENOSPC;
277 	}
278 
279 	return status;
280 }
281 
282 /**
283  * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth
284  * for the specified QH in the periodic schedule
285  *
286  * @hsotg: The HCD state structure for the DWC OTG controller
287  * @qh:    QH containing periodic bandwidth required
288  *
289  * Return: 0 if successful, negative error code otherwise
290  *
291  * For simplicity, this calculation assumes that all the transfers in the
292  * periodic schedule may occur in the same (micro)frame
293  */
294 static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg,
295 					 struct dwc2_qh *qh)
296 {
297 	int status;
298 	s16 max_claimed_usecs;
299 
300 	status = 0;
301 
302 	if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) {
303 		/*
304 		 * High speed mode
305 		 * Max periodic usecs is 80% x 125 usec = 100 usec
306 		 */
307 		max_claimed_usecs = 100 - qh->usecs;
308 	} else {
309 		/*
310 		 * Full speed mode
311 		 * Max periodic usecs is 90% x 1000 usec = 900 usec
312 		 */
313 		max_claimed_usecs = 900 - qh->usecs;
314 	}
315 
316 	if (hsotg->periodic_usecs > max_claimed_usecs) {
317 		dev_err(hsotg->dev,
318 			"%s: already claimed usecs %d, required usecs %d\n",
319 			__func__, hsotg->periodic_usecs, qh->usecs);
320 		status = -ENOSPC;
321 	}
322 
323 	return status;
324 }
325 
326 /**
327  * Microframe scheduler
328  * track the total use in hsotg->frame_usecs
329  * keep each qh use in qh->frame_usecs
330  * when surrendering the qh then donate the time back
331  */
332 static const unsigned short max_uframe_usecs[] = {
333 	100, 100, 100, 100, 100, 100, 30, 0
334 };
335 
336 void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg)
337 {
338 	int i;
339 
340 	for (i = 0; i < 8; i++)
341 		hsotg->frame_usecs[i] = max_uframe_usecs[i];
342 }
343 
344 static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
345 {
346 	unsigned short utime = qh->usecs;
347 	int i;
348 
349 	for (i = 0; i < 8; i++) {
350 		/* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */
351 		if (utime <= hsotg->frame_usecs[i]) {
352 			hsotg->frame_usecs[i] -= utime;
353 			qh->frame_usecs[i] += utime;
354 			return i;
355 		}
356 	}
357 	return -ENOSPC;
358 }
359 
360 /*
361  * use this for FS apps that can span multiple uframes
362  */
363 static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
364 {
365 	unsigned short utime = qh->usecs;
366 	unsigned short xtime;
367 	int t_left;
368 	int i;
369 	int j;
370 	int k;
371 
372 	for (i = 0; i < 8; i++) {
373 		if (hsotg->frame_usecs[i] <= 0)
374 			continue;
375 
376 		/*
377 		 * we need n consecutive slots so use j as a start slot
378 		 * j plus j+1 must be enough time (for now)
379 		 */
380 		xtime = hsotg->frame_usecs[i];
381 		for (j = i + 1; j < 8; j++) {
382 			/*
383 			 * if we add this frame remaining time to xtime we may
384 			 * be OK, if not we need to test j for a complete frame
385 			 */
386 			if (xtime + hsotg->frame_usecs[j] < utime) {
387 				if (hsotg->frame_usecs[j] <
388 							max_uframe_usecs[j])
389 					continue;
390 			}
391 			if (xtime >= utime) {
392 				t_left = utime;
393 				for (k = i; k < 8; k++) {
394 					t_left -= hsotg->frame_usecs[k];
395 					if (t_left <= 0) {
396 						qh->frame_usecs[k] +=
397 							hsotg->frame_usecs[k]
398 								+ t_left;
399 						hsotg->frame_usecs[k] = -t_left;
400 						return i;
401 					} else {
402 						qh->frame_usecs[k] +=
403 							hsotg->frame_usecs[k];
404 						hsotg->frame_usecs[k] = 0;
405 					}
406 				}
407 			}
408 			/* add the frame time to x time */
409 			xtime += hsotg->frame_usecs[j];
410 			/* we must have a fully available next frame or break */
411 			if (xtime < utime &&
412 			   hsotg->frame_usecs[j] == max_uframe_usecs[j])
413 				continue;
414 		}
415 	}
416 	return -ENOSPC;
417 }
418 
419 static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
420 {
421 	int ret;
422 
423 	if (qh->dev_speed == USB_SPEED_HIGH) {
424 		/* if this is a hs transaction we need a full frame */
425 		ret = dwc2_find_single_uframe(hsotg, qh);
426 	} else {
427 		/*
428 		 * if this is a fs transaction we may need a sequence
429 		 * of frames
430 		 */
431 		ret = dwc2_find_multi_uframe(hsotg, qh);
432 	}
433 	return ret;
434 }
435 
436 /**
437  * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a
438  * host channel is large enough to handle the maximum data transfer in a single
439  * (micro)frame for a periodic transfer
440  *
441  * @hsotg: The HCD state structure for the DWC OTG controller
442  * @qh:    QH for a periodic endpoint
443  *
444  * Return: 0 if successful, negative error code otherwise
445  */
446 static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg,
447 				    struct dwc2_qh *qh)
448 {
449 	u32 max_xfer_size;
450 	u32 max_channel_xfer_size;
451 	int status = 0;
452 
453 	max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp);
454 	max_channel_xfer_size = hsotg->core_params->max_transfer_size;
455 
456 	if (max_xfer_size > max_channel_xfer_size) {
457 		dev_err(hsotg->dev,
458 			"%s: Periodic xfer length %d > max xfer length for channel %d\n",
459 			__func__, max_xfer_size, max_channel_xfer_size);
460 		status = -ENOSPC;
461 	}
462 
463 	return status;
464 }
465 
466 /**
467  * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in
468  * the periodic schedule
469  *
470  * @hsotg: The HCD state structure for the DWC OTG controller
471  * @qh:    QH for the periodic transfer. The QH should already contain the
472  *         scheduling information.
473  *
474  * Return: 0 if successful, negative error code otherwise
475  */
476 static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
477 {
478 	int status;
479 
480 	if (hsotg->core_params->uframe_sched > 0) {
481 		int frame = -1;
482 
483 		status = dwc2_find_uframe(hsotg, qh);
484 		if (status == 0)
485 			frame = 7;
486 		else if (status > 0)
487 			frame = status - 1;
488 
489 		/* Set the new frame up */
490 		if (frame >= 0) {
491 			qh->sched_frame &= ~0x7;
492 			qh->sched_frame |= (frame & 7);
493 		}
494 
495 		if (status > 0)
496 			status = 0;
497 	} else {
498 		status = dwc2_periodic_channel_available(hsotg);
499 		if (status) {
500 			dev_info(hsotg->dev,
501 				 "%s: No host channel available for periodic transfer\n",
502 				 __func__);
503 			return status;
504 		}
505 
506 		status = dwc2_check_periodic_bandwidth(hsotg, qh);
507 	}
508 
509 	if (status) {
510 		dev_dbg(hsotg->dev,
511 			"%s: Insufficient periodic bandwidth for periodic transfer\n",
512 			__func__);
513 		return status;
514 	}
515 
516 	status = dwc2_check_max_xfer_size(hsotg, qh);
517 	if (status) {
518 		dev_dbg(hsotg->dev,
519 			"%s: Channel max transfer size too small for periodic transfer\n",
520 			__func__);
521 		return status;
522 	}
523 
524 	if (hsotg->core_params->dma_desc_enable > 0)
525 		/* Don't rely on SOF and start in ready schedule */
526 		list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
527 	else
528 		/* Always start in inactive schedule */
529 		list_add_tail(&qh->qh_list_entry,
530 			      &hsotg->periodic_sched_inactive);
531 
532 	if (hsotg->core_params->uframe_sched <= 0)
533 		/* Reserve periodic channel */
534 		hsotg->periodic_channels++;
535 
536 	/* Update claimed usecs per (micro)frame */
537 	hsotg->periodic_usecs += qh->usecs;
538 
539 	return status;
540 }
541 
542 /**
543  * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer
544  * from the periodic schedule
545  *
546  * @hsotg: The HCD state structure for the DWC OTG controller
547  * @qh:	   QH for the periodic transfer
548  */
549 static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg,
550 				     struct dwc2_qh *qh)
551 {
552 	int i;
553 
554 	list_del_init(&qh->qh_list_entry);
555 
556 	/* Update claimed usecs per (micro)frame */
557 	hsotg->periodic_usecs -= qh->usecs;
558 
559 	if (hsotg->core_params->uframe_sched > 0) {
560 		for (i = 0; i < 8; i++) {
561 			hsotg->frame_usecs[i] += qh->frame_usecs[i];
562 			qh->frame_usecs[i] = 0;
563 		}
564 	} else {
565 		/* Release periodic channel reservation */
566 		hsotg->periodic_channels--;
567 	}
568 }
569 
570 /**
571  * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic
572  * schedule if it is not already in the schedule. If the QH is already in
573  * the schedule, no action is taken.
574  *
575  * @hsotg: The HCD state structure for the DWC OTG controller
576  * @qh:    The QH to add
577  *
578  * Return: 0 if successful, negative error code otherwise
579  */
580 int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
581 {
582 	int status;
583 	u32 intr_mask;
584 
585 	if (dbg_qh(qh))
586 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
587 
588 	if (!list_empty(&qh->qh_list_entry))
589 		/* QH already in a schedule */
590 		return 0;
591 
592 	/* Add the new QH to the appropriate schedule */
593 	if (dwc2_qh_is_non_per(qh)) {
594 		/* Always start in inactive schedule */
595 		list_add_tail(&qh->qh_list_entry,
596 			      &hsotg->non_periodic_sched_inactive);
597 		return 0;
598 	}
599 
600 	status = dwc2_schedule_periodic(hsotg, qh);
601 	if (status)
602 		return status;
603 	if (!hsotg->periodic_qh_count) {
604 		intr_mask = readl(hsotg->regs + GINTMSK);
605 		intr_mask |= GINTSTS_SOF;
606 		writel(intr_mask, hsotg->regs + GINTMSK);
607 	}
608 	hsotg->periodic_qh_count++;
609 
610 	return 0;
611 }
612 
613 /**
614  * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic
615  * schedule. Memory is not freed.
616  *
617  * @hsotg: The HCD state structure
618  * @qh:    QH to remove from schedule
619  */
620 void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
621 {
622 	u32 intr_mask;
623 
624 	dev_vdbg(hsotg->dev, "%s()\n", __func__);
625 
626 	if (list_empty(&qh->qh_list_entry))
627 		/* QH is not in a schedule */
628 		return;
629 
630 	if (dwc2_qh_is_non_per(qh)) {
631 		if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry)
632 			hsotg->non_periodic_qh_ptr =
633 					hsotg->non_periodic_qh_ptr->next;
634 		list_del_init(&qh->qh_list_entry);
635 		return;
636 	}
637 
638 	dwc2_deschedule_periodic(hsotg, qh);
639 	hsotg->periodic_qh_count--;
640 	if (!hsotg->periodic_qh_count) {
641 		intr_mask = readl(hsotg->regs + GINTMSK);
642 		intr_mask &= ~GINTSTS_SOF;
643 		writel(intr_mask, hsotg->regs + GINTMSK);
644 	}
645 }
646 
647 /*
648  * Schedule the next continuing periodic split transfer
649  */
650 static void dwc2_sched_periodic_split(struct dwc2_hsotg *hsotg,
651 				      struct dwc2_qh *qh, u16 frame_number,
652 				      int sched_next_periodic_split)
653 {
654 	u16 incr;
655 
656 	if (sched_next_periodic_split) {
657 		qh->sched_frame = frame_number;
658 		incr = dwc2_frame_num_inc(qh->start_split_frame, 1);
659 		if (dwc2_frame_num_le(frame_number, incr)) {
660 			/*
661 			 * Allow one frame to elapse after start split
662 			 * microframe before scheduling complete split, but
663 			 * DON'T if we are doing the next start split in the
664 			 * same frame for an ISOC out
665 			 */
666 			if (qh->ep_type != USB_ENDPOINT_XFER_ISOC ||
667 			    qh->ep_is_in != 0) {
668 				qh->sched_frame =
669 					dwc2_frame_num_inc(qh->sched_frame, 1);
670 			}
671 		}
672 	} else {
673 		qh->sched_frame = dwc2_frame_num_inc(qh->start_split_frame,
674 						     qh->interval);
675 		if (dwc2_frame_num_le(qh->sched_frame, frame_number))
676 			qh->sched_frame = frame_number;
677 		qh->sched_frame |= 0x7;
678 		qh->start_split_frame = qh->sched_frame;
679 	}
680 }
681 
682 /*
683  * Deactivates a QH. For non-periodic QHs, removes the QH from the active
684  * non-periodic schedule. The QH is added to the inactive non-periodic
685  * schedule if any QTDs are still attached to the QH.
686  *
687  * For periodic QHs, the QH is removed from the periodic queued schedule. If
688  * there are any QTDs still attached to the QH, the QH is added to either the
689  * periodic inactive schedule or the periodic ready schedule and its next
690  * scheduled frame is calculated. The QH is placed in the ready schedule if
691  * the scheduled frame has been reached already. Otherwise it's placed in the
692  * inactive schedule. If there are no QTDs attached to the QH, the QH is
693  * completely removed from the periodic schedule.
694  */
695 void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh,
696 			    int sched_next_periodic_split)
697 {
698 	u16 frame_number;
699 
700 	if (dbg_qh(qh))
701 		dev_vdbg(hsotg->dev, "%s()\n", __func__);
702 
703 	if (dwc2_qh_is_non_per(qh)) {
704 		dwc2_hcd_qh_unlink(hsotg, qh);
705 		if (!list_empty(&qh->qtd_list))
706 			/* Add back to inactive non-periodic schedule */
707 			dwc2_hcd_qh_add(hsotg, qh);
708 		return;
709 	}
710 
711 	frame_number = dwc2_hcd_get_frame_number(hsotg);
712 
713 	if (qh->do_split) {
714 		dwc2_sched_periodic_split(hsotg, qh, frame_number,
715 					  sched_next_periodic_split);
716 	} else {
717 		qh->sched_frame = dwc2_frame_num_inc(qh->sched_frame,
718 						     qh->interval);
719 		if (dwc2_frame_num_le(qh->sched_frame, frame_number))
720 			qh->sched_frame = frame_number;
721 	}
722 
723 	if (list_empty(&qh->qtd_list)) {
724 		dwc2_hcd_qh_unlink(hsotg, qh);
725 		return;
726 	}
727 	/*
728 	 * Remove from periodic_sched_queued and move to
729 	 * appropriate queue
730 	 */
731 	if ((hsotg->core_params->uframe_sched > 0 &&
732 	     dwc2_frame_num_le(qh->sched_frame, frame_number)) ||
733 	    (hsotg->core_params->uframe_sched <= 0 &&
734 	     qh->sched_frame == frame_number))
735 		list_move(&qh->qh_list_entry, &hsotg->periodic_sched_ready);
736 	else
737 		list_move(&qh->qh_list_entry, &hsotg->periodic_sched_inactive);
738 }
739 
740 /**
741  * dwc2_hcd_qtd_init() - Initializes a QTD structure
742  *
743  * @qtd: The QTD to initialize
744  * @urb: The associated URB
745  */
746 void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb)
747 {
748 	qtd->urb = urb;
749 	if (dwc2_hcd_get_pipe_type(&urb->pipe_info) ==
750 			USB_ENDPOINT_XFER_CONTROL) {
751 		/*
752 		 * The only time the QTD data toggle is used is on the data
753 		 * phase of control transfers. This phase always starts with
754 		 * DATA1.
755 		 */
756 		qtd->data_toggle = DWC2_HC_PID_DATA1;
757 		qtd->control_phase = DWC2_CONTROL_SETUP;
758 	}
759 
760 	/* Start split */
761 	qtd->complete_split = 0;
762 	qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL;
763 	qtd->isoc_split_offset = 0;
764 	qtd->in_process = 0;
765 
766 	/* Store the qtd ptr in the urb to reference the QTD */
767 	urb->qtd = qtd;
768 }
769 
770 /**
771  * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH
772  *
773  * @hsotg:        The DWC HCD structure
774  * @qtd:          The QTD to add
775  * @qh:           Out parameter to return queue head
776  * @atomic_alloc: Flag to do atomic alloc if needed
777  *
778  * Return: 0 if successful, negative error code otherwise
779  *
780  * Finds the correct QH to place the QTD into. If it does not find a QH, it
781  * will create a new QH. If the QH to which the QTD is added is not currently
782  * scheduled, it is placed into the proper schedule based on its EP type.
783  */
784 int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd,
785 		     struct dwc2_qh **qh, gfp_t mem_flags)
786 {
787 	struct dwc2_hcd_urb *urb = qtd->urb;
788 	unsigned long flags;
789 	int allocated = 0;
790 	int retval;
791 
792 	/*
793 	 * Get the QH which holds the QTD-list to insert to. Create QH if it
794 	 * doesn't exist.
795 	 */
796 	if (*qh == NULL) {
797 		*qh = dwc2_hcd_qh_create(hsotg, urb, mem_flags);
798 		if (*qh == NULL)
799 			return -ENOMEM;
800 		allocated = 1;
801 	}
802 
803 	spin_lock_irqsave(&hsotg->lock, flags);
804 
805 	retval = dwc2_hcd_qh_add(hsotg, *qh);
806 	if (retval)
807 		goto fail;
808 
809 	qtd->qh = *qh;
810 	list_add_tail(&qtd->qtd_list_entry, &(*qh)->qtd_list);
811 	spin_unlock_irqrestore(&hsotg->lock, flags);
812 
813 	return 0;
814 
815 fail:
816 	if (allocated) {
817 		struct dwc2_qtd *qtd2, *qtd2_tmp;
818 		struct dwc2_qh *qh_tmp = *qh;
819 
820 		*qh = NULL;
821 		dwc2_hcd_qh_unlink(hsotg, qh_tmp);
822 
823 		/* Free each QTD in the QH's QTD list */
824 		list_for_each_entry_safe(qtd2, qtd2_tmp, &qh_tmp->qtd_list,
825 					 qtd_list_entry)
826 			dwc2_hcd_qtd_unlink_and_free(hsotg, qtd2, qh_tmp);
827 
828 		spin_unlock_irqrestore(&hsotg->lock, flags);
829 		dwc2_hcd_qh_free(hsotg, qh_tmp);
830 	} else {
831 		spin_unlock_irqrestore(&hsotg->lock, flags);
832 	}
833 
834 	return retval;
835 }
836