1197ba5f4SPaul Zimmerman /* 2197ba5f4SPaul Zimmerman * hcd_queue.c - DesignWare HS OTG Controller host queuing routines 3197ba5f4SPaul Zimmerman * 4197ba5f4SPaul Zimmerman * Copyright (C) 2004-2013 Synopsys, Inc. 5197ba5f4SPaul Zimmerman * 6197ba5f4SPaul Zimmerman * Redistribution and use in source and binary forms, with or without 7197ba5f4SPaul Zimmerman * modification, are permitted provided that the following conditions 8197ba5f4SPaul Zimmerman * are met: 9197ba5f4SPaul Zimmerman * 1. Redistributions of source code must retain the above copyright 10197ba5f4SPaul Zimmerman * notice, this list of conditions, and the following disclaimer, 11197ba5f4SPaul Zimmerman * without modification. 12197ba5f4SPaul Zimmerman * 2. Redistributions in binary form must reproduce the above copyright 13197ba5f4SPaul Zimmerman * notice, this list of conditions and the following disclaimer in the 14197ba5f4SPaul Zimmerman * documentation and/or other materials provided with the distribution. 15197ba5f4SPaul Zimmerman * 3. The names of the above-listed copyright holders may not be used 16197ba5f4SPaul Zimmerman * to endorse or promote products derived from this software without 17197ba5f4SPaul Zimmerman * specific prior written permission. 18197ba5f4SPaul Zimmerman * 19197ba5f4SPaul Zimmerman * ALTERNATIVELY, this software may be distributed under the terms of the 20197ba5f4SPaul Zimmerman * GNU General Public License ("GPL") as published by the Free Software 21197ba5f4SPaul Zimmerman * Foundation; either version 2 of the License, or (at your option) any 22197ba5f4SPaul Zimmerman * later version. 23197ba5f4SPaul Zimmerman * 24197ba5f4SPaul Zimmerman * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 25197ba5f4SPaul Zimmerman * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 26197ba5f4SPaul Zimmerman * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27197ba5f4SPaul Zimmerman * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 28197ba5f4SPaul Zimmerman * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 29197ba5f4SPaul Zimmerman * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 30197ba5f4SPaul Zimmerman * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 31197ba5f4SPaul Zimmerman * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 32197ba5f4SPaul Zimmerman * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 33197ba5f4SPaul Zimmerman * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 34197ba5f4SPaul Zimmerman * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 35197ba5f4SPaul Zimmerman */ 36197ba5f4SPaul Zimmerman 37197ba5f4SPaul Zimmerman /* 38197ba5f4SPaul Zimmerman * This file contains the functions to manage Queue Heads and Queue 39197ba5f4SPaul Zimmerman * Transfer Descriptors for Host mode 40197ba5f4SPaul Zimmerman */ 41fb616e3fSDouglas Anderson #include <linux/gcd.h> 42197ba5f4SPaul Zimmerman #include <linux/kernel.h> 43197ba5f4SPaul Zimmerman #include <linux/module.h> 44197ba5f4SPaul Zimmerman #include <linux/spinlock.h> 45197ba5f4SPaul Zimmerman #include <linux/interrupt.h> 46197ba5f4SPaul Zimmerman #include <linux/dma-mapping.h> 47197ba5f4SPaul Zimmerman #include <linux/io.h> 48197ba5f4SPaul Zimmerman #include <linux/slab.h> 49197ba5f4SPaul Zimmerman #include <linux/usb.h> 50197ba5f4SPaul Zimmerman 51197ba5f4SPaul Zimmerman #include <linux/usb/hcd.h> 52197ba5f4SPaul Zimmerman #include <linux/usb/ch11.h> 53197ba5f4SPaul Zimmerman 54197ba5f4SPaul Zimmerman #include "core.h" 55197ba5f4SPaul Zimmerman #include "hcd.h" 56197ba5f4SPaul Zimmerman 5717dd5b64SDouglas Anderson /* Wait this long before releasing periodic reservation */ 5817dd5b64SDouglas Anderson #define DWC2_UNRESERVE_DELAY (msecs_to_jiffies(5)) 5917dd5b64SDouglas Anderson 6017dd5b64SDouglas Anderson /** 61b951c6c7SDouglas Anderson * dwc2_periodic_channel_available() - Checks that a channel is available for a 62b951c6c7SDouglas Anderson * periodic transfer 63b951c6c7SDouglas Anderson * 64b951c6c7SDouglas Anderson * @hsotg: The HCD state structure for the DWC OTG controller 65b951c6c7SDouglas Anderson * 66b951c6c7SDouglas Anderson * Return: 0 if successful, negative error code otherwise 67b951c6c7SDouglas Anderson */ 68b951c6c7SDouglas Anderson static int dwc2_periodic_channel_available(struct dwc2_hsotg *hsotg) 69b951c6c7SDouglas Anderson { 70b951c6c7SDouglas Anderson /* 71b951c6c7SDouglas Anderson * Currently assuming that there is a dedicated host channel for 72b951c6c7SDouglas Anderson * each periodic transaction plus at least one host channel for 73b951c6c7SDouglas Anderson * non-periodic transactions 74b951c6c7SDouglas Anderson */ 75b951c6c7SDouglas Anderson int status; 76b951c6c7SDouglas Anderson int num_channels; 77b951c6c7SDouglas Anderson 78b951c6c7SDouglas Anderson num_channels = hsotg->core_params->host_channels; 79b951c6c7SDouglas Anderson if (hsotg->periodic_channels + hsotg->non_periodic_channels < 80b951c6c7SDouglas Anderson num_channels 81b951c6c7SDouglas Anderson && hsotg->periodic_channels < num_channels - 1) { 82b951c6c7SDouglas Anderson status = 0; 83b951c6c7SDouglas Anderson } else { 84b951c6c7SDouglas Anderson dev_dbg(hsotg->dev, 85b951c6c7SDouglas Anderson "%s: Total channels: %d, Periodic: %d, " 86b951c6c7SDouglas Anderson "Non-periodic: %d\n", __func__, num_channels, 87b951c6c7SDouglas Anderson hsotg->periodic_channels, hsotg->non_periodic_channels); 88b951c6c7SDouglas Anderson status = -ENOSPC; 89b951c6c7SDouglas Anderson } 90b951c6c7SDouglas Anderson 91b951c6c7SDouglas Anderson return status; 92b951c6c7SDouglas Anderson } 93b951c6c7SDouglas Anderson 94b951c6c7SDouglas Anderson /** 95b951c6c7SDouglas Anderson * dwc2_check_periodic_bandwidth() - Checks that there is sufficient bandwidth 96b951c6c7SDouglas Anderson * for the specified QH in the periodic schedule 97b951c6c7SDouglas Anderson * 98b951c6c7SDouglas Anderson * @hsotg: The HCD state structure for the DWC OTG controller 99b951c6c7SDouglas Anderson * @qh: QH containing periodic bandwidth required 100b951c6c7SDouglas Anderson * 101b951c6c7SDouglas Anderson * Return: 0 if successful, negative error code otherwise 102b951c6c7SDouglas Anderson * 103b951c6c7SDouglas Anderson * For simplicity, this calculation assumes that all the transfers in the 104b951c6c7SDouglas Anderson * periodic schedule may occur in the same (micro)frame 105b951c6c7SDouglas Anderson */ 106b951c6c7SDouglas Anderson static int dwc2_check_periodic_bandwidth(struct dwc2_hsotg *hsotg, 107b951c6c7SDouglas Anderson struct dwc2_qh *qh) 108b951c6c7SDouglas Anderson { 109b951c6c7SDouglas Anderson int status; 110b951c6c7SDouglas Anderson s16 max_claimed_usecs; 111b951c6c7SDouglas Anderson 112b951c6c7SDouglas Anderson status = 0; 113b951c6c7SDouglas Anderson 114b951c6c7SDouglas Anderson if (qh->dev_speed == USB_SPEED_HIGH || qh->do_split) { 115b951c6c7SDouglas Anderson /* 116b951c6c7SDouglas Anderson * High speed mode 117b951c6c7SDouglas Anderson * Max periodic usecs is 80% x 125 usec = 100 usec 118b951c6c7SDouglas Anderson */ 119b951c6c7SDouglas Anderson max_claimed_usecs = 100 - qh->host_us; 120b951c6c7SDouglas Anderson } else { 121b951c6c7SDouglas Anderson /* 122b951c6c7SDouglas Anderson * Full speed mode 123b951c6c7SDouglas Anderson * Max periodic usecs is 90% x 1000 usec = 900 usec 124b951c6c7SDouglas Anderson */ 125b951c6c7SDouglas Anderson max_claimed_usecs = 900 - qh->host_us; 126b951c6c7SDouglas Anderson } 127b951c6c7SDouglas Anderson 128b951c6c7SDouglas Anderson if (hsotg->periodic_usecs > max_claimed_usecs) { 129b951c6c7SDouglas Anderson dev_err(hsotg->dev, 130b951c6c7SDouglas Anderson "%s: already claimed usecs %d, required usecs %d\n", 131b951c6c7SDouglas Anderson __func__, hsotg->periodic_usecs, qh->host_us); 132b951c6c7SDouglas Anderson status = -ENOSPC; 133b951c6c7SDouglas Anderson } 134b951c6c7SDouglas Anderson 135b951c6c7SDouglas Anderson return status; 136b951c6c7SDouglas Anderson } 137b951c6c7SDouglas Anderson 138b951c6c7SDouglas Anderson /** 139b951c6c7SDouglas Anderson * Microframe scheduler 140b951c6c7SDouglas Anderson * track the total use in hsotg->frame_usecs 141b951c6c7SDouglas Anderson * keep each qh use in qh->frame_usecs 142b951c6c7SDouglas Anderson * when surrendering the qh then donate the time back 143b951c6c7SDouglas Anderson */ 144b951c6c7SDouglas Anderson static const unsigned short max_uframe_usecs[] = { 145b951c6c7SDouglas Anderson 100, 100, 100, 100, 100, 100, 30, 0 146b951c6c7SDouglas Anderson }; 147b951c6c7SDouglas Anderson 148b951c6c7SDouglas Anderson void dwc2_hcd_init_usecs(struct dwc2_hsotg *hsotg) 149b951c6c7SDouglas Anderson { 150b951c6c7SDouglas Anderson int i; 151b951c6c7SDouglas Anderson 152b951c6c7SDouglas Anderson for (i = 0; i < 8; i++) 153b951c6c7SDouglas Anderson hsotg->frame_usecs[i] = max_uframe_usecs[i]; 154b951c6c7SDouglas Anderson } 155b951c6c7SDouglas Anderson 156b951c6c7SDouglas Anderson static int dwc2_find_single_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 157b951c6c7SDouglas Anderson { 158b951c6c7SDouglas Anderson unsigned short utime = qh->host_us; 159b951c6c7SDouglas Anderson int i; 160b951c6c7SDouglas Anderson 161b951c6c7SDouglas Anderson for (i = 0; i < 8; i++) { 162b951c6c7SDouglas Anderson /* At the start hsotg->frame_usecs[i] = max_uframe_usecs[i] */ 163b951c6c7SDouglas Anderson if (utime <= hsotg->frame_usecs[i]) { 164b951c6c7SDouglas Anderson hsotg->frame_usecs[i] -= utime; 165b951c6c7SDouglas Anderson qh->frame_usecs[i] += utime; 166b951c6c7SDouglas Anderson return i; 167b951c6c7SDouglas Anderson } 168b951c6c7SDouglas Anderson } 169b951c6c7SDouglas Anderson return -ENOSPC; 170b951c6c7SDouglas Anderson } 171b951c6c7SDouglas Anderson 172b951c6c7SDouglas Anderson /* 173b951c6c7SDouglas Anderson * use this for FS apps that can span multiple uframes 174b951c6c7SDouglas Anderson */ 175b951c6c7SDouglas Anderson static int dwc2_find_multi_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 176b951c6c7SDouglas Anderson { 177b951c6c7SDouglas Anderson unsigned short utime = qh->host_us; 178b951c6c7SDouglas Anderson unsigned short xtime; 179b951c6c7SDouglas Anderson int t_left; 180b951c6c7SDouglas Anderson int i; 181b951c6c7SDouglas Anderson int j; 182b951c6c7SDouglas Anderson int k; 183b951c6c7SDouglas Anderson 184b951c6c7SDouglas Anderson for (i = 0; i < 8; i++) { 185b951c6c7SDouglas Anderson if (hsotg->frame_usecs[i] <= 0) 186b951c6c7SDouglas Anderson continue; 187b951c6c7SDouglas Anderson 188b951c6c7SDouglas Anderson /* 189b951c6c7SDouglas Anderson * we need n consecutive slots so use j as a start slot 190b951c6c7SDouglas Anderson * j plus j+1 must be enough time (for now) 191b951c6c7SDouglas Anderson */ 192b951c6c7SDouglas Anderson xtime = hsotg->frame_usecs[i]; 193b951c6c7SDouglas Anderson for (j = i + 1; j < 8; j++) { 194b951c6c7SDouglas Anderson /* 195b951c6c7SDouglas Anderson * if we add this frame remaining time to xtime we may 196b951c6c7SDouglas Anderson * be OK, if not we need to test j for a complete frame 197b951c6c7SDouglas Anderson */ 198b951c6c7SDouglas Anderson if (xtime + hsotg->frame_usecs[j] < utime) { 199b951c6c7SDouglas Anderson if (hsotg->frame_usecs[j] < 200b951c6c7SDouglas Anderson max_uframe_usecs[j]) 201b951c6c7SDouglas Anderson continue; 202b951c6c7SDouglas Anderson } 203b951c6c7SDouglas Anderson if (xtime >= utime) { 204b951c6c7SDouglas Anderson t_left = utime; 205b951c6c7SDouglas Anderson for (k = i; k < 8; k++) { 206b951c6c7SDouglas Anderson t_left -= hsotg->frame_usecs[k]; 207b951c6c7SDouglas Anderson if (t_left <= 0) { 208b951c6c7SDouglas Anderson qh->frame_usecs[k] += 209b951c6c7SDouglas Anderson hsotg->frame_usecs[k] 210b951c6c7SDouglas Anderson + t_left; 211b951c6c7SDouglas Anderson hsotg->frame_usecs[k] = -t_left; 212b951c6c7SDouglas Anderson return i; 213b951c6c7SDouglas Anderson } else { 214b951c6c7SDouglas Anderson qh->frame_usecs[k] += 215b951c6c7SDouglas Anderson hsotg->frame_usecs[k]; 216b951c6c7SDouglas Anderson hsotg->frame_usecs[k] = 0; 217b951c6c7SDouglas Anderson } 218b951c6c7SDouglas Anderson } 219b951c6c7SDouglas Anderson } 220b951c6c7SDouglas Anderson /* add the frame time to x time */ 221b951c6c7SDouglas Anderson xtime += hsotg->frame_usecs[j]; 222b951c6c7SDouglas Anderson /* we must have a fully available next frame or break */ 223b951c6c7SDouglas Anderson if (xtime < utime && 224b951c6c7SDouglas Anderson hsotg->frame_usecs[j] == max_uframe_usecs[j]) 225b951c6c7SDouglas Anderson continue; 226b951c6c7SDouglas Anderson } 227b951c6c7SDouglas Anderson } 228b951c6c7SDouglas Anderson return -ENOSPC; 229b951c6c7SDouglas Anderson } 230b951c6c7SDouglas Anderson 231b951c6c7SDouglas Anderson static int dwc2_find_uframe(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 232b951c6c7SDouglas Anderson { 233b951c6c7SDouglas Anderson int ret; 234b951c6c7SDouglas Anderson 235b951c6c7SDouglas Anderson if (qh->dev_speed == USB_SPEED_HIGH) { 236b951c6c7SDouglas Anderson /* if this is a hs transaction we need a full frame */ 237b951c6c7SDouglas Anderson ret = dwc2_find_single_uframe(hsotg, qh); 238b951c6c7SDouglas Anderson } else { 239b951c6c7SDouglas Anderson /* 240b951c6c7SDouglas Anderson * if this is a fs transaction we may need a sequence 241b951c6c7SDouglas Anderson * of frames 242b951c6c7SDouglas Anderson */ 243b951c6c7SDouglas Anderson ret = dwc2_find_multi_uframe(hsotg, qh); 244b951c6c7SDouglas Anderson } 245b951c6c7SDouglas Anderson return ret; 246b951c6c7SDouglas Anderson } 247b951c6c7SDouglas Anderson 248b951c6c7SDouglas Anderson /** 249fb616e3fSDouglas Anderson * dwc2_pick_first_frame() - Choose 1st frame for qh that's already scheduled 250fb616e3fSDouglas Anderson * 251fb616e3fSDouglas Anderson * Takes a qh that has already been scheduled (which means we know we have the 252fb616e3fSDouglas Anderson * bandwdith reserved for us) and set the next_active_frame and the 253fb616e3fSDouglas Anderson * start_active_frame. 254fb616e3fSDouglas Anderson * 255fb616e3fSDouglas Anderson * This is expected to be called on qh's that weren't previously actively 256fb616e3fSDouglas Anderson * running. It just picks the next frame that we can fit into without any 257fb616e3fSDouglas Anderson * thought about the past. 258fb616e3fSDouglas Anderson * 259fb616e3fSDouglas Anderson * @hsotg: The HCD state structure for the DWC OTG controller 260fb616e3fSDouglas Anderson * @qh: QH for a periodic endpoint 261fb616e3fSDouglas Anderson * 262fb616e3fSDouglas Anderson */ 263fb616e3fSDouglas Anderson static void dwc2_pick_first_frame(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 264fb616e3fSDouglas Anderson { 265fb616e3fSDouglas Anderson u16 frame_number; 266fb616e3fSDouglas Anderson u16 earliest_frame; 267fb616e3fSDouglas Anderson u16 next_active_frame; 268fb616e3fSDouglas Anderson u16 interval; 269fb616e3fSDouglas Anderson 270fb616e3fSDouglas Anderson /* 271fb616e3fSDouglas Anderson * Use the real frame number rather than the cached value as of the 272fb616e3fSDouglas Anderson * last SOF to give us a little extra slop. 273fb616e3fSDouglas Anderson */ 274fb616e3fSDouglas Anderson frame_number = dwc2_hcd_get_frame_number(hsotg); 275fb616e3fSDouglas Anderson 276fb616e3fSDouglas Anderson /* 277fb616e3fSDouglas Anderson * We wouldn't want to start any earlier than the next frame just in 278fb616e3fSDouglas Anderson * case the frame number ticks as we're doing this calculation. 279fb616e3fSDouglas Anderson * 280fb616e3fSDouglas Anderson * NOTE: if we could quantify how long till we actually get scheduled 281fb616e3fSDouglas Anderson * we might be able to avoid the "+ 1" by looking at the upper part of 282fb616e3fSDouglas Anderson * HFNUM (the FRREM field). For now we'll just use the + 1 though. 283fb616e3fSDouglas Anderson */ 284fb616e3fSDouglas Anderson earliest_frame = dwc2_frame_num_inc(frame_number, 1); 285fb616e3fSDouglas Anderson next_active_frame = earliest_frame; 286fb616e3fSDouglas Anderson 287fb616e3fSDouglas Anderson /* Get the "no microframe schduler" out of the way... */ 288fb616e3fSDouglas Anderson if (hsotg->core_params->uframe_sched <= 0) { 289fb616e3fSDouglas Anderson if (qh->do_split) 290fb616e3fSDouglas Anderson /* Splits are active at microframe 0 minus 1 */ 291fb616e3fSDouglas Anderson next_active_frame |= 0x7; 292fb616e3fSDouglas Anderson goto exit; 293fb616e3fSDouglas Anderson } 294fb616e3fSDouglas Anderson 295fb616e3fSDouglas Anderson /* Adjust interval as per high speed schedule which has 8 uFrame */ 296fb616e3fSDouglas Anderson interval = gcd(qh->host_interval, 8); 297fb616e3fSDouglas Anderson 298fb616e3fSDouglas Anderson /* 299fb616e3fSDouglas Anderson * We know interval must divide (HFNUM_MAX_FRNUM + 1) now that we've 300fb616e3fSDouglas Anderson * done the gcd(), so it's safe to move to the beginning of the current 301fb616e3fSDouglas Anderson * interval like this. 302fb616e3fSDouglas Anderson * 303fb616e3fSDouglas Anderson * After this we might be before earliest_frame, but don't worry, 304fb616e3fSDouglas Anderson * we'll fix it... 305fb616e3fSDouglas Anderson */ 306fb616e3fSDouglas Anderson next_active_frame = (next_active_frame / interval) * interval; 307fb616e3fSDouglas Anderson 308fb616e3fSDouglas Anderson /* 309fb616e3fSDouglas Anderson * Actually choose to start at the frame number we've been 310fb616e3fSDouglas Anderson * scheduled for. 311fb616e3fSDouglas Anderson */ 312fb616e3fSDouglas Anderson next_active_frame = dwc2_frame_num_inc(next_active_frame, 313fb616e3fSDouglas Anderson qh->assigned_uframe); 314fb616e3fSDouglas Anderson 315fb616e3fSDouglas Anderson /* 316fb616e3fSDouglas Anderson * We actually need 1 frame before since the next_active_frame is 317fb616e3fSDouglas Anderson * the frame number we'll be put on the ready list and we won't be on 318fb616e3fSDouglas Anderson * the bus until 1 frame later. 319fb616e3fSDouglas Anderson */ 320fb616e3fSDouglas Anderson next_active_frame = dwc2_frame_num_dec(next_active_frame, 1); 321fb616e3fSDouglas Anderson 322fb616e3fSDouglas Anderson /* 323fb616e3fSDouglas Anderson * By now we might actually be before the earliest_frame. Let's move 324fb616e3fSDouglas Anderson * up intervals until we're not. 325fb616e3fSDouglas Anderson */ 326fb616e3fSDouglas Anderson while (dwc2_frame_num_gt(earliest_frame, next_active_frame)) 327fb616e3fSDouglas Anderson next_active_frame = dwc2_frame_num_inc(next_active_frame, 328fb616e3fSDouglas Anderson interval); 329fb616e3fSDouglas Anderson 330fb616e3fSDouglas Anderson exit: 331fb616e3fSDouglas Anderson qh->next_active_frame = next_active_frame; 332fb616e3fSDouglas Anderson qh->start_active_frame = next_active_frame; 333fb616e3fSDouglas Anderson 334fb616e3fSDouglas Anderson dwc2_sch_vdbg(hsotg, "QH=%p First fn=%04x nxt=%04x\n", 335fb616e3fSDouglas Anderson qh, frame_number, qh->next_active_frame); 336fb616e3fSDouglas Anderson } 337fb616e3fSDouglas Anderson 338fb616e3fSDouglas Anderson /** 3392d3f1398SDouglas Anderson * dwc2_do_reserve() - Make a periodic reservation 3402d3f1398SDouglas Anderson * 3412d3f1398SDouglas Anderson * Try to allocate space in the periodic schedule. Depending on parameters 3422d3f1398SDouglas Anderson * this might use the microframe scheduler or the dumb scheduler. 3432d3f1398SDouglas Anderson * 3442d3f1398SDouglas Anderson * @hsotg: The HCD state structure for the DWC OTG controller 3452d3f1398SDouglas Anderson * @qh: QH for the periodic transfer. 3462d3f1398SDouglas Anderson * 3472d3f1398SDouglas Anderson * Returns: 0 upon success; error upon failure. 3482d3f1398SDouglas Anderson */ 3492d3f1398SDouglas Anderson static int dwc2_do_reserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 3502d3f1398SDouglas Anderson { 3512d3f1398SDouglas Anderson int status; 3522d3f1398SDouglas Anderson 3532d3f1398SDouglas Anderson if (hsotg->core_params->uframe_sched > 0) { 3542d3f1398SDouglas Anderson status = dwc2_find_uframe(hsotg, qh); 355fb616e3fSDouglas Anderson if (status >= 0) 356fb616e3fSDouglas Anderson qh->assigned_uframe = status; 3572d3f1398SDouglas Anderson } else { 3582d3f1398SDouglas Anderson status = dwc2_periodic_channel_available(hsotg); 3592d3f1398SDouglas Anderson if (status) { 3602d3f1398SDouglas Anderson dev_info(hsotg->dev, 3612d3f1398SDouglas Anderson "%s: No host channel available for periodic transfer\n", 3622d3f1398SDouglas Anderson __func__); 3632d3f1398SDouglas Anderson return status; 3642d3f1398SDouglas Anderson } 3652d3f1398SDouglas Anderson 3662d3f1398SDouglas Anderson status = dwc2_check_periodic_bandwidth(hsotg, qh); 3672d3f1398SDouglas Anderson } 3682d3f1398SDouglas Anderson 3692d3f1398SDouglas Anderson if (status) { 3702d3f1398SDouglas Anderson dev_dbg(hsotg->dev, 3712d3f1398SDouglas Anderson "%s: Insufficient periodic bandwidth for periodic transfer\n", 3722d3f1398SDouglas Anderson __func__); 3732d3f1398SDouglas Anderson return status; 3742d3f1398SDouglas Anderson } 3752d3f1398SDouglas Anderson 3762d3f1398SDouglas Anderson if (hsotg->core_params->uframe_sched <= 0) 3772d3f1398SDouglas Anderson /* Reserve periodic channel */ 3782d3f1398SDouglas Anderson hsotg->periodic_channels++; 3792d3f1398SDouglas Anderson 3802d3f1398SDouglas Anderson /* Update claimed usecs per (micro)frame */ 3812d3f1398SDouglas Anderson hsotg->periodic_usecs += qh->host_us; 3822d3f1398SDouglas Anderson 383fb616e3fSDouglas Anderson dwc2_pick_first_frame(hsotg, qh); 384fb616e3fSDouglas Anderson 3852d3f1398SDouglas Anderson return 0; 3862d3f1398SDouglas Anderson } 3872d3f1398SDouglas Anderson 3882d3f1398SDouglas Anderson /** 38917dd5b64SDouglas Anderson * dwc2_do_unreserve() - Actually release the periodic reservation 39017dd5b64SDouglas Anderson * 39117dd5b64SDouglas Anderson * This function actually releases the periodic bandwidth that was reserved 39217dd5b64SDouglas Anderson * by the given qh. 39317dd5b64SDouglas Anderson * 39417dd5b64SDouglas Anderson * @hsotg: The HCD state structure for the DWC OTG controller 39517dd5b64SDouglas Anderson * @qh: QH for the periodic transfer. 39617dd5b64SDouglas Anderson */ 39717dd5b64SDouglas Anderson static void dwc2_do_unreserve(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 39817dd5b64SDouglas Anderson { 39917dd5b64SDouglas Anderson assert_spin_locked(&hsotg->lock); 40017dd5b64SDouglas Anderson 40117dd5b64SDouglas Anderson WARN_ON(!qh->unreserve_pending); 40217dd5b64SDouglas Anderson 40317dd5b64SDouglas Anderson /* No more unreserve pending--we're doing it */ 40417dd5b64SDouglas Anderson qh->unreserve_pending = false; 40517dd5b64SDouglas Anderson 40617dd5b64SDouglas Anderson if (WARN_ON(!list_empty(&qh->qh_list_entry))) 40717dd5b64SDouglas Anderson list_del_init(&qh->qh_list_entry); 40817dd5b64SDouglas Anderson 40917dd5b64SDouglas Anderson /* Update claimed usecs per (micro)frame */ 410ced9eee1SDouglas Anderson hsotg->periodic_usecs -= qh->host_us; 41117dd5b64SDouglas Anderson 41217dd5b64SDouglas Anderson if (hsotg->core_params->uframe_sched > 0) { 41317dd5b64SDouglas Anderson int i; 41417dd5b64SDouglas Anderson 41517dd5b64SDouglas Anderson for (i = 0; i < 8; i++) { 41617dd5b64SDouglas Anderson hsotg->frame_usecs[i] += qh->frame_usecs[i]; 41717dd5b64SDouglas Anderson qh->frame_usecs[i] = 0; 41817dd5b64SDouglas Anderson } 41917dd5b64SDouglas Anderson } else { 42017dd5b64SDouglas Anderson /* Release periodic channel reservation */ 42117dd5b64SDouglas Anderson hsotg->periodic_channels--; 42217dd5b64SDouglas Anderson } 42317dd5b64SDouglas Anderson } 42417dd5b64SDouglas Anderson 42517dd5b64SDouglas Anderson /** 42617dd5b64SDouglas Anderson * dwc2_unreserve_timer_fn() - Timer function to release periodic reservation 42717dd5b64SDouglas Anderson * 42817dd5b64SDouglas Anderson * According to the kernel doc for usb_submit_urb() (specifically the part about 42917dd5b64SDouglas Anderson * "Reserved Bandwidth Transfers"), we need to keep a reservation active as 43017dd5b64SDouglas Anderson * long as a device driver keeps submitting. Since we're using HCD_BH to give 43117dd5b64SDouglas Anderson * back the URB we need to give the driver a little bit of time before we 43217dd5b64SDouglas Anderson * release the reservation. This worker is called after the appropriate 43317dd5b64SDouglas Anderson * delay. 43417dd5b64SDouglas Anderson * 43517dd5b64SDouglas Anderson * @work: Pointer to a qh unreserve_work. 43617dd5b64SDouglas Anderson */ 43717dd5b64SDouglas Anderson static void dwc2_unreserve_timer_fn(unsigned long data) 43817dd5b64SDouglas Anderson { 43917dd5b64SDouglas Anderson struct dwc2_qh *qh = (struct dwc2_qh *)data; 44017dd5b64SDouglas Anderson struct dwc2_hsotg *hsotg = qh->hsotg; 44117dd5b64SDouglas Anderson unsigned long flags; 44217dd5b64SDouglas Anderson 44317dd5b64SDouglas Anderson /* 44417dd5b64SDouglas Anderson * Wait for the lock, or for us to be scheduled again. We 44517dd5b64SDouglas Anderson * could be scheduled again if: 44617dd5b64SDouglas Anderson * - We started executing but didn't get the lock yet. 44717dd5b64SDouglas Anderson * - A new reservation came in, but cancel didn't take effect 44817dd5b64SDouglas Anderson * because we already started executing. 44917dd5b64SDouglas Anderson * - The timer has been kicked again. 45017dd5b64SDouglas Anderson * In that case cancel and wait for the next call. 45117dd5b64SDouglas Anderson */ 45217dd5b64SDouglas Anderson while (!spin_trylock_irqsave(&hsotg->lock, flags)) { 45317dd5b64SDouglas Anderson if (timer_pending(&qh->unreserve_timer)) 45417dd5b64SDouglas Anderson return; 45517dd5b64SDouglas Anderson } 45617dd5b64SDouglas Anderson 45717dd5b64SDouglas Anderson /* 45817dd5b64SDouglas Anderson * Might be no more unreserve pending if: 45917dd5b64SDouglas Anderson * - We started executing but didn't get the lock yet. 46017dd5b64SDouglas Anderson * - A new reservation came in, but cancel didn't take effect 46117dd5b64SDouglas Anderson * because we already started executing. 46217dd5b64SDouglas Anderson * 46317dd5b64SDouglas Anderson * We can't put this in the loop above because unreserve_pending needs 46417dd5b64SDouglas Anderson * to be accessed under lock, so we can only check it once we got the 46517dd5b64SDouglas Anderson * lock. 46617dd5b64SDouglas Anderson */ 46717dd5b64SDouglas Anderson if (qh->unreserve_pending) 46817dd5b64SDouglas Anderson dwc2_do_unreserve(hsotg, qh); 46917dd5b64SDouglas Anderson 47017dd5b64SDouglas Anderson spin_unlock_irqrestore(&hsotg->lock, flags); 47117dd5b64SDouglas Anderson } 47217dd5b64SDouglas Anderson 473197ba5f4SPaul Zimmerman /** 474b951c6c7SDouglas Anderson * dwc2_check_max_xfer_size() - Checks that the max transfer size allowed in a 475b951c6c7SDouglas Anderson * host channel is large enough to handle the maximum data transfer in a single 476b951c6c7SDouglas Anderson * (micro)frame for a periodic transfer 477b951c6c7SDouglas Anderson * 478b951c6c7SDouglas Anderson * @hsotg: The HCD state structure for the DWC OTG controller 479b951c6c7SDouglas Anderson * @qh: QH for a periodic endpoint 480b951c6c7SDouglas Anderson * 481b951c6c7SDouglas Anderson * Return: 0 if successful, negative error code otherwise 482b951c6c7SDouglas Anderson */ 483b951c6c7SDouglas Anderson static int dwc2_check_max_xfer_size(struct dwc2_hsotg *hsotg, 484b951c6c7SDouglas Anderson struct dwc2_qh *qh) 485b951c6c7SDouglas Anderson { 486b951c6c7SDouglas Anderson u32 max_xfer_size; 487b951c6c7SDouglas Anderson u32 max_channel_xfer_size; 488b951c6c7SDouglas Anderson int status = 0; 489b951c6c7SDouglas Anderson 490b951c6c7SDouglas Anderson max_xfer_size = dwc2_max_packet(qh->maxp) * dwc2_hb_mult(qh->maxp); 491b951c6c7SDouglas Anderson max_channel_xfer_size = hsotg->core_params->max_transfer_size; 492b951c6c7SDouglas Anderson 493b951c6c7SDouglas Anderson if (max_xfer_size > max_channel_xfer_size) { 494b951c6c7SDouglas Anderson dev_err(hsotg->dev, 495b951c6c7SDouglas Anderson "%s: Periodic xfer length %d > max xfer length for channel %d\n", 496b951c6c7SDouglas Anderson __func__, max_xfer_size, max_channel_xfer_size); 497b951c6c7SDouglas Anderson status = -ENOSPC; 498b951c6c7SDouglas Anderson } 499b951c6c7SDouglas Anderson 500b951c6c7SDouglas Anderson return status; 501b951c6c7SDouglas Anderson } 502b951c6c7SDouglas Anderson 503b951c6c7SDouglas Anderson /** 504b951c6c7SDouglas Anderson * dwc2_schedule_periodic() - Schedules an interrupt or isochronous transfer in 505b951c6c7SDouglas Anderson * the periodic schedule 506b951c6c7SDouglas Anderson * 507b951c6c7SDouglas Anderson * @hsotg: The HCD state structure for the DWC OTG controller 508b951c6c7SDouglas Anderson * @qh: QH for the periodic transfer. The QH should already contain the 509b951c6c7SDouglas Anderson * scheduling information. 510b951c6c7SDouglas Anderson * 511b951c6c7SDouglas Anderson * Return: 0 if successful, negative error code otherwise 512b951c6c7SDouglas Anderson */ 513b951c6c7SDouglas Anderson static int dwc2_schedule_periodic(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 514b951c6c7SDouglas Anderson { 515b951c6c7SDouglas Anderson int status; 516b951c6c7SDouglas Anderson 517b951c6c7SDouglas Anderson status = dwc2_check_max_xfer_size(hsotg, qh); 518b951c6c7SDouglas Anderson if (status) { 519b951c6c7SDouglas Anderson dev_dbg(hsotg->dev, 520b951c6c7SDouglas Anderson "%s: Channel max transfer size too small for periodic transfer\n", 521b951c6c7SDouglas Anderson __func__); 522b951c6c7SDouglas Anderson return status; 523b951c6c7SDouglas Anderson } 524b951c6c7SDouglas Anderson 525b951c6c7SDouglas Anderson /* Cancel pending unreserve; if canceled OK, unreserve was pending */ 526b951c6c7SDouglas Anderson if (del_timer(&qh->unreserve_timer)) 527b951c6c7SDouglas Anderson WARN_ON(!qh->unreserve_pending); 528b951c6c7SDouglas Anderson 529b951c6c7SDouglas Anderson /* 530b951c6c7SDouglas Anderson * Only need to reserve if there's not an unreserve pending, since if an 531b951c6c7SDouglas Anderson * unreserve is pending then by definition our old reservation is still 532b951c6c7SDouglas Anderson * valid. Unreserve might still be pending even if we didn't cancel if 533b951c6c7SDouglas Anderson * dwc2_unreserve_timer_fn() already started. Code in the timer handles 534b951c6c7SDouglas Anderson * that case. 535b951c6c7SDouglas Anderson */ 536b951c6c7SDouglas Anderson if (!qh->unreserve_pending) { 5372d3f1398SDouglas Anderson status = dwc2_do_reserve(hsotg, qh); 5382d3f1398SDouglas Anderson if (status) 539b951c6c7SDouglas Anderson return status; 540fb616e3fSDouglas Anderson } else { 541fb616e3fSDouglas Anderson /* 542fb616e3fSDouglas Anderson * It might have been a while, so make sure that frame_number 543fb616e3fSDouglas Anderson * is still good. Note: we could also try to use the similar 544fb616e3fSDouglas Anderson * dwc2_next_periodic_start() but that schedules much more 545fb616e3fSDouglas Anderson * tightly and we might need to hurry and queue things up. 546fb616e3fSDouglas Anderson */ 547fb616e3fSDouglas Anderson if (dwc2_frame_num_le(qh->next_active_frame, 548fb616e3fSDouglas Anderson hsotg->frame_number)) 549fb616e3fSDouglas Anderson dwc2_pick_first_frame(hsotg, qh); 550b951c6c7SDouglas Anderson } 551b951c6c7SDouglas Anderson 552b951c6c7SDouglas Anderson qh->unreserve_pending = 0; 553b951c6c7SDouglas Anderson 554b951c6c7SDouglas Anderson if (hsotg->core_params->dma_desc_enable > 0) 555b951c6c7SDouglas Anderson /* Don't rely on SOF and start in ready schedule */ 556b951c6c7SDouglas Anderson list_add_tail(&qh->qh_list_entry, &hsotg->periodic_sched_ready); 557b951c6c7SDouglas Anderson else 558b951c6c7SDouglas Anderson /* Always start in inactive schedule */ 559b951c6c7SDouglas Anderson list_add_tail(&qh->qh_list_entry, 560b951c6c7SDouglas Anderson &hsotg->periodic_sched_inactive); 561b951c6c7SDouglas Anderson 5622d3f1398SDouglas Anderson return 0; 563b951c6c7SDouglas Anderson } 564b951c6c7SDouglas Anderson 565b951c6c7SDouglas Anderson /** 566b951c6c7SDouglas Anderson * dwc2_deschedule_periodic() - Removes an interrupt or isochronous transfer 567b951c6c7SDouglas Anderson * from the periodic schedule 568b951c6c7SDouglas Anderson * 569b951c6c7SDouglas Anderson * @hsotg: The HCD state structure for the DWC OTG controller 570b951c6c7SDouglas Anderson * @qh: QH for the periodic transfer 571b951c6c7SDouglas Anderson */ 572b951c6c7SDouglas Anderson static void dwc2_deschedule_periodic(struct dwc2_hsotg *hsotg, 573b951c6c7SDouglas Anderson struct dwc2_qh *qh) 574b951c6c7SDouglas Anderson { 575b951c6c7SDouglas Anderson bool did_modify; 576b951c6c7SDouglas Anderson 577b951c6c7SDouglas Anderson assert_spin_locked(&hsotg->lock); 578b951c6c7SDouglas Anderson 579b951c6c7SDouglas Anderson /* 580b951c6c7SDouglas Anderson * Schedule the unreserve to happen in a little bit. Cases here: 581b951c6c7SDouglas Anderson * - Unreserve worker might be sitting there waiting to grab the lock. 582b951c6c7SDouglas Anderson * In this case it will notice it's been schedule again and will 583b951c6c7SDouglas Anderson * quit. 584b951c6c7SDouglas Anderson * - Unreserve worker might not be scheduled. 585b951c6c7SDouglas Anderson * 586b951c6c7SDouglas Anderson * We should never already be scheduled since dwc2_schedule_periodic() 587b951c6c7SDouglas Anderson * should have canceled the scheduled unreserve timer (hence the 588b951c6c7SDouglas Anderson * warning on did_modify). 589b951c6c7SDouglas Anderson * 590b951c6c7SDouglas Anderson * We add + 1 to the timer to guarantee that at least 1 jiffy has 591b951c6c7SDouglas Anderson * passed (otherwise if the jiffy counter might tick right after we 592b951c6c7SDouglas Anderson * read it and we'll get no delay). 593b951c6c7SDouglas Anderson */ 594b951c6c7SDouglas Anderson did_modify = mod_timer(&qh->unreserve_timer, 595b951c6c7SDouglas Anderson jiffies + DWC2_UNRESERVE_DELAY + 1); 596b951c6c7SDouglas Anderson WARN_ON(did_modify); 597b951c6c7SDouglas Anderson qh->unreserve_pending = 1; 598b951c6c7SDouglas Anderson 599b951c6c7SDouglas Anderson list_del_init(&qh->qh_list_entry); 600b951c6c7SDouglas Anderson } 601b951c6c7SDouglas Anderson 602b951c6c7SDouglas Anderson /** 603197ba5f4SPaul Zimmerman * dwc2_qh_init() - Initializes a QH structure 604197ba5f4SPaul Zimmerman * 605197ba5f4SPaul Zimmerman * @hsotg: The HCD state structure for the DWC OTG controller 606197ba5f4SPaul Zimmerman * @qh: The QH to init 607197ba5f4SPaul Zimmerman * @urb: Holds the information about the device/endpoint needed to initialize 608197ba5f4SPaul Zimmerman * the QH 609197ba5f4SPaul Zimmerman */ 610197ba5f4SPaul Zimmerman static void dwc2_qh_init(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 611197ba5f4SPaul Zimmerman struct dwc2_hcd_urb *urb) 612197ba5f4SPaul Zimmerman { 613197ba5f4SPaul Zimmerman int dev_speed, hub_addr, hub_port; 614197ba5f4SPaul Zimmerman char *speed, *type; 615197ba5f4SPaul Zimmerman 616197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "%s()\n", __func__); 617197ba5f4SPaul Zimmerman 618197ba5f4SPaul Zimmerman /* Initialize QH */ 61917dd5b64SDouglas Anderson qh->hsotg = hsotg; 62017dd5b64SDouglas Anderson setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 62117dd5b64SDouglas Anderson (unsigned long)qh); 622197ba5f4SPaul Zimmerman qh->ep_type = dwc2_hcd_get_pipe_type(&urb->pipe_info); 623197ba5f4SPaul Zimmerman qh->ep_is_in = dwc2_hcd_is_pipe_in(&urb->pipe_info) ? 1 : 0; 624197ba5f4SPaul Zimmerman 625197ba5f4SPaul Zimmerman qh->data_toggle = DWC2_HC_PID_DATA0; 626197ba5f4SPaul Zimmerman qh->maxp = dwc2_hcd_get_mps(&urb->pipe_info); 627197ba5f4SPaul Zimmerman INIT_LIST_HEAD(&qh->qtd_list); 628197ba5f4SPaul Zimmerman INIT_LIST_HEAD(&qh->qh_list_entry); 629197ba5f4SPaul Zimmerman 630197ba5f4SPaul Zimmerman /* FS/LS Endpoint on HS Hub, NOT virtual root hub */ 631197ba5f4SPaul Zimmerman dev_speed = dwc2_host_get_speed(hsotg, urb->priv); 632197ba5f4SPaul Zimmerman 633197ba5f4SPaul Zimmerman dwc2_host_hub_info(hsotg, urb->priv, &hub_addr, &hub_port); 634197ba5f4SPaul Zimmerman 635197ba5f4SPaul Zimmerman if ((dev_speed == USB_SPEED_LOW || dev_speed == USB_SPEED_FULL) && 636197ba5f4SPaul Zimmerman hub_addr != 0 && hub_addr != 1) { 637197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, 638197ba5f4SPaul Zimmerman "QH init: EP %d: TT found at hub addr %d, for port %d\n", 639197ba5f4SPaul Zimmerman dwc2_hcd_get_ep_num(&urb->pipe_info), hub_addr, 640197ba5f4SPaul Zimmerman hub_port); 641197ba5f4SPaul Zimmerman qh->do_split = 1; 642197ba5f4SPaul Zimmerman } 643197ba5f4SPaul Zimmerman 644197ba5f4SPaul Zimmerman if (qh->ep_type == USB_ENDPOINT_XFER_INT || 645197ba5f4SPaul Zimmerman qh->ep_type == USB_ENDPOINT_XFER_ISOC) { 646197ba5f4SPaul Zimmerman /* Compute scheduling parameters once and save them */ 647197ba5f4SPaul Zimmerman u32 hprt, prtspd; 648197ba5f4SPaul Zimmerman 649197ba5f4SPaul Zimmerman /* Todo: Account for split transfers in the bus time */ 650197ba5f4SPaul Zimmerman int bytecount = 651197ba5f4SPaul Zimmerman dwc2_hb_mult(qh->maxp) * dwc2_max_packet(qh->maxp); 652197ba5f4SPaul Zimmerman 653ced9eee1SDouglas Anderson qh->host_us = NS_TO_US(usb_calc_bus_time(qh->do_split ? 654197ba5f4SPaul Zimmerman USB_SPEED_HIGH : dev_speed, qh->ep_is_in, 655197ba5f4SPaul Zimmerman qh->ep_type == USB_ENDPOINT_XFER_ISOC, 656197ba5f4SPaul Zimmerman bytecount)); 657dd81dd7cSGregory Herrero 658ced9eee1SDouglas Anderson qh->host_interval = urb->interval; 659ced9eee1SDouglas Anderson dwc2_sch_dbg(hsotg, "QH=%p init nxt=%04x, fn=%04x, int=%#x\n", 660ced9eee1SDouglas Anderson qh, qh->next_active_frame, hsotg->frame_number, 661ced9eee1SDouglas Anderson qh->host_interval); 662197ba5f4SPaul Zimmerman #if 0 663197ba5f4SPaul Zimmerman /* Increase interrupt polling rate for debugging */ 664197ba5f4SPaul Zimmerman if (qh->ep_type == USB_ENDPOINT_XFER_INT) 665ced9eee1SDouglas Anderson qh->host_interval = 8; 666197ba5f4SPaul Zimmerman #endif 66795c8bc36SAntti Seppälä hprt = dwc2_readl(hsotg->regs + HPRT0); 668197ba5f4SPaul Zimmerman prtspd = (hprt & HPRT0_SPD_MASK) >> HPRT0_SPD_SHIFT; 669197ba5f4SPaul Zimmerman if (prtspd == HPRT0_SPD_HIGH_SPEED && 670197ba5f4SPaul Zimmerman (dev_speed == USB_SPEED_LOW || 671197ba5f4SPaul Zimmerman dev_speed == USB_SPEED_FULL)) { 672ced9eee1SDouglas Anderson qh->host_interval *= 8; 67374fc4a75SDouglas Anderson dwc2_sch_dbg(hsotg, 674ced9eee1SDouglas Anderson "QH=%p init*8 nxt=%04x, fn=%04x, int=%#x\n", 675ced9eee1SDouglas Anderson qh, qh->next_active_frame, 676ced9eee1SDouglas Anderson hsotg->frame_number, qh->host_interval); 67774fc4a75SDouglas Anderson 678197ba5f4SPaul Zimmerman } 679ced9eee1SDouglas Anderson dev_dbg(hsotg->dev, "interval=%d\n", qh->host_interval); 680197ba5f4SPaul Zimmerman } 681197ba5f4SPaul Zimmerman 682197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "DWC OTG HCD QH Initialized\n"); 683197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "DWC OTG HCD QH - qh = %p\n", qh); 684197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Device Address = %d\n", 685197ba5f4SPaul Zimmerman dwc2_hcd_get_dev_addr(&urb->pipe_info)); 686197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Endpoint %d, %s\n", 687197ba5f4SPaul Zimmerman dwc2_hcd_get_ep_num(&urb->pipe_info), 688197ba5f4SPaul Zimmerman dwc2_hcd_is_pipe_in(&urb->pipe_info) ? "IN" : "OUT"); 689197ba5f4SPaul Zimmerman 690197ba5f4SPaul Zimmerman qh->dev_speed = dev_speed; 691197ba5f4SPaul Zimmerman 692197ba5f4SPaul Zimmerman switch (dev_speed) { 693197ba5f4SPaul Zimmerman case USB_SPEED_LOW: 694197ba5f4SPaul Zimmerman speed = "low"; 695197ba5f4SPaul Zimmerman break; 696197ba5f4SPaul Zimmerman case USB_SPEED_FULL: 697197ba5f4SPaul Zimmerman speed = "full"; 698197ba5f4SPaul Zimmerman break; 699197ba5f4SPaul Zimmerman case USB_SPEED_HIGH: 700197ba5f4SPaul Zimmerman speed = "high"; 701197ba5f4SPaul Zimmerman break; 702197ba5f4SPaul Zimmerman default: 703197ba5f4SPaul Zimmerman speed = "?"; 704197ba5f4SPaul Zimmerman break; 705197ba5f4SPaul Zimmerman } 706197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Speed = %s\n", speed); 707197ba5f4SPaul Zimmerman 708197ba5f4SPaul Zimmerman switch (qh->ep_type) { 709197ba5f4SPaul Zimmerman case USB_ENDPOINT_XFER_ISOC: 710197ba5f4SPaul Zimmerman type = "isochronous"; 711197ba5f4SPaul Zimmerman break; 712197ba5f4SPaul Zimmerman case USB_ENDPOINT_XFER_INT: 713197ba5f4SPaul Zimmerman type = "interrupt"; 714197ba5f4SPaul Zimmerman break; 715197ba5f4SPaul Zimmerman case USB_ENDPOINT_XFER_CONTROL: 716197ba5f4SPaul Zimmerman type = "control"; 717197ba5f4SPaul Zimmerman break; 718197ba5f4SPaul Zimmerman case USB_ENDPOINT_XFER_BULK: 719197ba5f4SPaul Zimmerman type = "bulk"; 720197ba5f4SPaul Zimmerman break; 721197ba5f4SPaul Zimmerman default: 722197ba5f4SPaul Zimmerman type = "?"; 723197ba5f4SPaul Zimmerman break; 724197ba5f4SPaul Zimmerman } 725197ba5f4SPaul Zimmerman 726197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "DWC OTG HCD QH - Type = %s\n", type); 727197ba5f4SPaul Zimmerman 728197ba5f4SPaul Zimmerman if (qh->ep_type == USB_ENDPOINT_XFER_INT) { 729197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "DWC OTG HCD QH - usecs = %d\n", 730ced9eee1SDouglas Anderson qh->host_us); 731197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "DWC OTG HCD QH - interval = %d\n", 732ced9eee1SDouglas Anderson qh->host_interval); 733197ba5f4SPaul Zimmerman } 734197ba5f4SPaul Zimmerman } 735197ba5f4SPaul Zimmerman 736197ba5f4SPaul Zimmerman /** 737197ba5f4SPaul Zimmerman * dwc2_hcd_qh_create() - Allocates and initializes a QH 738197ba5f4SPaul Zimmerman * 739197ba5f4SPaul Zimmerman * @hsotg: The HCD state structure for the DWC OTG controller 740197ba5f4SPaul Zimmerman * @urb: Holds the information about the device/endpoint needed 741197ba5f4SPaul Zimmerman * to initialize the QH 742197ba5f4SPaul Zimmerman * @atomic_alloc: Flag to do atomic allocation if needed 743197ba5f4SPaul Zimmerman * 744197ba5f4SPaul Zimmerman * Return: Pointer to the newly allocated QH, or NULL on error 745197ba5f4SPaul Zimmerman */ 746b58e6ceeSMian Yousaf Kaukab struct dwc2_qh *dwc2_hcd_qh_create(struct dwc2_hsotg *hsotg, 747197ba5f4SPaul Zimmerman struct dwc2_hcd_urb *urb, 748197ba5f4SPaul Zimmerman gfp_t mem_flags) 749197ba5f4SPaul Zimmerman { 750197ba5f4SPaul Zimmerman struct dwc2_qh *qh; 751197ba5f4SPaul Zimmerman 752197ba5f4SPaul Zimmerman if (!urb->priv) 753197ba5f4SPaul Zimmerman return NULL; 754197ba5f4SPaul Zimmerman 755197ba5f4SPaul Zimmerman /* Allocate memory */ 756197ba5f4SPaul Zimmerman qh = kzalloc(sizeof(*qh), mem_flags); 757197ba5f4SPaul Zimmerman if (!qh) 758197ba5f4SPaul Zimmerman return NULL; 759197ba5f4SPaul Zimmerman 760197ba5f4SPaul Zimmerman dwc2_qh_init(hsotg, qh, urb); 761197ba5f4SPaul Zimmerman 762197ba5f4SPaul Zimmerman if (hsotg->core_params->dma_desc_enable > 0 && 763197ba5f4SPaul Zimmerman dwc2_hcd_qh_init_ddma(hsotg, qh, mem_flags) < 0) { 764197ba5f4SPaul Zimmerman dwc2_hcd_qh_free(hsotg, qh); 765197ba5f4SPaul Zimmerman return NULL; 766197ba5f4SPaul Zimmerman } 767197ba5f4SPaul Zimmerman 768197ba5f4SPaul Zimmerman return qh; 769197ba5f4SPaul Zimmerman } 770197ba5f4SPaul Zimmerman 771197ba5f4SPaul Zimmerman /** 772197ba5f4SPaul Zimmerman * dwc2_hcd_qh_free() - Frees the QH 773197ba5f4SPaul Zimmerman * 774197ba5f4SPaul Zimmerman * @hsotg: HCD instance 775197ba5f4SPaul Zimmerman * @qh: The QH to free 776197ba5f4SPaul Zimmerman * 777197ba5f4SPaul Zimmerman * QH should already be removed from the list. QTD list should already be empty 778197ba5f4SPaul Zimmerman * if called from URB Dequeue. 779197ba5f4SPaul Zimmerman * 780197ba5f4SPaul Zimmerman * Must NOT be called with interrupt disabled or spinlock held 781197ba5f4SPaul Zimmerman */ 782197ba5f4SPaul Zimmerman void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 783197ba5f4SPaul Zimmerman { 78417dd5b64SDouglas Anderson /* Make sure any unreserve work is finished. */ 78517dd5b64SDouglas Anderson if (del_timer_sync(&qh->unreserve_timer)) { 78617dd5b64SDouglas Anderson unsigned long flags; 78717dd5b64SDouglas Anderson 78817dd5b64SDouglas Anderson spin_lock_irqsave(&hsotg->lock, flags); 78917dd5b64SDouglas Anderson dwc2_do_unreserve(hsotg, qh); 79017dd5b64SDouglas Anderson spin_unlock_irqrestore(&hsotg->lock, flags); 79117dd5b64SDouglas Anderson } 79217dd5b64SDouglas Anderson 7933bc04e28SDouglas Anderson if (qh->desc_list) 794197ba5f4SPaul Zimmerman dwc2_hcd_qh_free_ddma(hsotg, qh); 795197ba5f4SPaul Zimmerman kfree(qh); 796197ba5f4SPaul Zimmerman } 797197ba5f4SPaul Zimmerman 798197ba5f4SPaul Zimmerman /** 799197ba5f4SPaul Zimmerman * dwc2_hcd_qh_add() - Adds a QH to either the non periodic or periodic 800197ba5f4SPaul Zimmerman * schedule if it is not already in the schedule. If the QH is already in 801197ba5f4SPaul Zimmerman * the schedule, no action is taken. 802197ba5f4SPaul Zimmerman * 803197ba5f4SPaul Zimmerman * @hsotg: The HCD state structure for the DWC OTG controller 804197ba5f4SPaul Zimmerman * @qh: The QH to add 805197ba5f4SPaul Zimmerman * 806197ba5f4SPaul Zimmerman * Return: 0 if successful, negative error code otherwise 807197ba5f4SPaul Zimmerman */ 808197ba5f4SPaul Zimmerman int dwc2_hcd_qh_add(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 809197ba5f4SPaul Zimmerman { 810197ba5f4SPaul Zimmerman int status; 811197ba5f4SPaul Zimmerman u32 intr_mask; 812197ba5f4SPaul Zimmerman 813197ba5f4SPaul Zimmerman if (dbg_qh(qh)) 814197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "%s()\n", __func__); 815197ba5f4SPaul Zimmerman 816197ba5f4SPaul Zimmerman if (!list_empty(&qh->qh_list_entry)) 817197ba5f4SPaul Zimmerman /* QH already in a schedule */ 818197ba5f4SPaul Zimmerman return 0; 819197ba5f4SPaul Zimmerman 820197ba5f4SPaul Zimmerman /* Add the new QH to the appropriate schedule */ 821197ba5f4SPaul Zimmerman if (dwc2_qh_is_non_per(qh)) { 822fb616e3fSDouglas Anderson /* Schedule right away */ 823fb616e3fSDouglas Anderson qh->start_active_frame = hsotg->frame_number; 824fb616e3fSDouglas Anderson qh->next_active_frame = qh->start_active_frame; 825fb616e3fSDouglas Anderson 826197ba5f4SPaul Zimmerman /* Always start in inactive schedule */ 827197ba5f4SPaul Zimmerman list_add_tail(&qh->qh_list_entry, 828197ba5f4SPaul Zimmerman &hsotg->non_periodic_sched_inactive); 829197ba5f4SPaul Zimmerman return 0; 830197ba5f4SPaul Zimmerman } 831197ba5f4SPaul Zimmerman 832197ba5f4SPaul Zimmerman status = dwc2_schedule_periodic(hsotg, qh); 833197ba5f4SPaul Zimmerman if (status) 834197ba5f4SPaul Zimmerman return status; 835197ba5f4SPaul Zimmerman if (!hsotg->periodic_qh_count) { 83695c8bc36SAntti Seppälä intr_mask = dwc2_readl(hsotg->regs + GINTMSK); 837197ba5f4SPaul Zimmerman intr_mask |= GINTSTS_SOF; 83895c8bc36SAntti Seppälä dwc2_writel(intr_mask, hsotg->regs + GINTMSK); 839197ba5f4SPaul Zimmerman } 840197ba5f4SPaul Zimmerman hsotg->periodic_qh_count++; 841197ba5f4SPaul Zimmerman 842197ba5f4SPaul Zimmerman return 0; 843197ba5f4SPaul Zimmerman } 844197ba5f4SPaul Zimmerman 845197ba5f4SPaul Zimmerman /** 846197ba5f4SPaul Zimmerman * dwc2_hcd_qh_unlink() - Removes a QH from either the non-periodic or periodic 847197ba5f4SPaul Zimmerman * schedule. Memory is not freed. 848197ba5f4SPaul Zimmerman * 849197ba5f4SPaul Zimmerman * @hsotg: The HCD state structure 850197ba5f4SPaul Zimmerman * @qh: QH to remove from schedule 851197ba5f4SPaul Zimmerman */ 852197ba5f4SPaul Zimmerman void dwc2_hcd_qh_unlink(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh) 853197ba5f4SPaul Zimmerman { 854197ba5f4SPaul Zimmerman u32 intr_mask; 855197ba5f4SPaul Zimmerman 856197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "%s()\n", __func__); 857197ba5f4SPaul Zimmerman 858197ba5f4SPaul Zimmerman if (list_empty(&qh->qh_list_entry)) 859197ba5f4SPaul Zimmerman /* QH is not in a schedule */ 860197ba5f4SPaul Zimmerman return; 861197ba5f4SPaul Zimmerman 862197ba5f4SPaul Zimmerman if (dwc2_qh_is_non_per(qh)) { 863197ba5f4SPaul Zimmerman if (hsotg->non_periodic_qh_ptr == &qh->qh_list_entry) 864197ba5f4SPaul Zimmerman hsotg->non_periodic_qh_ptr = 865197ba5f4SPaul Zimmerman hsotg->non_periodic_qh_ptr->next; 866197ba5f4SPaul Zimmerman list_del_init(&qh->qh_list_entry); 867197ba5f4SPaul Zimmerman return; 868197ba5f4SPaul Zimmerman } 869197ba5f4SPaul Zimmerman 870197ba5f4SPaul Zimmerman dwc2_deschedule_periodic(hsotg, qh); 871197ba5f4SPaul Zimmerman hsotg->periodic_qh_count--; 872197ba5f4SPaul Zimmerman if (!hsotg->periodic_qh_count) { 87395c8bc36SAntti Seppälä intr_mask = dwc2_readl(hsotg->regs + GINTMSK); 874197ba5f4SPaul Zimmerman intr_mask &= ~GINTSTS_SOF; 87595c8bc36SAntti Seppälä dwc2_writel(intr_mask, hsotg->regs + GINTMSK); 876197ba5f4SPaul Zimmerman } 877197ba5f4SPaul Zimmerman } 878197ba5f4SPaul Zimmerman 879fb616e3fSDouglas Anderson /** 880fb616e3fSDouglas Anderson * dwc2_next_for_periodic_split() - Set next_active_frame midway thru a split. 881fb616e3fSDouglas Anderson * 882fb616e3fSDouglas Anderson * This is called for setting next_active_frame for periodic splits for all but 883fb616e3fSDouglas Anderson * the first packet of the split. Confusing? I thought so... 884fb616e3fSDouglas Anderson * 885fb616e3fSDouglas Anderson * Periodic splits are single low/full speed transfers that we end up splitting 886fb616e3fSDouglas Anderson * up into several high speed transfers. They always fit into one full (1 ms) 887fb616e3fSDouglas Anderson * frame but might be split over several microframes (125 us each). We to put 888fb616e3fSDouglas Anderson * each of the parts on a very specific high speed frame. 889fb616e3fSDouglas Anderson * 890fb616e3fSDouglas Anderson * This function figures out where the next active uFrame needs to be. 891fb616e3fSDouglas Anderson * 892fb616e3fSDouglas Anderson * @hsotg: The HCD state structure 893fb616e3fSDouglas Anderson * @qh: QH for the periodic transfer. 894fb616e3fSDouglas Anderson * @frame_number: The current frame number. 895fb616e3fSDouglas Anderson * 896fb616e3fSDouglas Anderson * Return: number missed by (or 0 if we didn't miss). 897197ba5f4SPaul Zimmerman */ 898fb616e3fSDouglas Anderson static int dwc2_next_for_periodic_split(struct dwc2_hsotg *hsotg, 899fb616e3fSDouglas Anderson struct dwc2_qh *qh, u16 frame_number) 900197ba5f4SPaul Zimmerman { 901ced9eee1SDouglas Anderson u16 old_frame = qh->next_active_frame; 902fb616e3fSDouglas Anderson u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1); 903fb616e3fSDouglas Anderson int missed = 0; 904fb616e3fSDouglas Anderson u16 incr; 905197ba5f4SPaul Zimmerman 906197ba5f4SPaul Zimmerman /* 907fb616e3fSDouglas Anderson * Basically: increment 1 normally, but 2 right after the start split 908fb616e3fSDouglas Anderson * (except for ISOC out). 909197ba5f4SPaul Zimmerman */ 910fb616e3fSDouglas Anderson if (old_frame == qh->start_active_frame && 911fb616e3fSDouglas Anderson !(qh->ep_type == USB_ENDPOINT_XFER_ISOC && !qh->ep_is_in)) 912fb616e3fSDouglas Anderson incr = 2; 913fb616e3fSDouglas Anderson else 914fb616e3fSDouglas Anderson incr = 1; 915fb616e3fSDouglas Anderson 916fb616e3fSDouglas Anderson qh->next_active_frame = dwc2_frame_num_inc(old_frame, incr); 917fb616e3fSDouglas Anderson 918fb616e3fSDouglas Anderson /* 919fb616e3fSDouglas Anderson * Note that it's OK for frame_number to be 1 frame past 920fb616e3fSDouglas Anderson * next_active_frame. Remember that next_active_frame is supposed to 921fb616e3fSDouglas Anderson * be 1 frame _before_ when we want to be scheduled. If we're 1 frame 922fb616e3fSDouglas Anderson * past it just means schedule ASAP. 923fb616e3fSDouglas Anderson * 924fb616e3fSDouglas Anderson * It's _not_ OK, however, if we're more than one frame past. 925fb616e3fSDouglas Anderson */ 926fb616e3fSDouglas Anderson if (dwc2_frame_num_gt(prev_frame_number, qh->next_active_frame)) { 927fb616e3fSDouglas Anderson /* 928fb616e3fSDouglas Anderson * OOPS, we missed. That's actually pretty bad since 929fb616e3fSDouglas Anderson * the hub will be unhappy; try ASAP I guess. 930fb616e3fSDouglas Anderson */ 931fb616e3fSDouglas Anderson missed = dwc2_frame_num_dec(prev_frame_number, 932fb616e3fSDouglas Anderson qh->next_active_frame); 933ced9eee1SDouglas Anderson qh->next_active_frame = frame_number; 934197ba5f4SPaul Zimmerman } 93574fc4a75SDouglas Anderson 936fb616e3fSDouglas Anderson return missed; 937fb616e3fSDouglas Anderson } 938fb616e3fSDouglas Anderson 939fb616e3fSDouglas Anderson /** 940fb616e3fSDouglas Anderson * dwc2_next_periodic_start() - Set next_active_frame for next transfer start 941fb616e3fSDouglas Anderson * 942fb616e3fSDouglas Anderson * This is called for setting next_active_frame for a periodic transfer for 943fb616e3fSDouglas Anderson * all cases other than midway through a periodic split. This will also update 944fb616e3fSDouglas Anderson * start_active_frame. 945fb616e3fSDouglas Anderson * 946fb616e3fSDouglas Anderson * Since we _always_ keep start_active_frame as the start of the previous 947fb616e3fSDouglas Anderson * transfer this is normally pretty easy: we just add our interval to 948fb616e3fSDouglas Anderson * start_active_frame and we've got our answer. 949fb616e3fSDouglas Anderson * 950fb616e3fSDouglas Anderson * The tricks come into play if we miss. In that case we'll look for the next 951fb616e3fSDouglas Anderson * slot we can fit into. 952fb616e3fSDouglas Anderson * 953fb616e3fSDouglas Anderson * @hsotg: The HCD state structure 954fb616e3fSDouglas Anderson * @qh: QH for the periodic transfer. 955fb616e3fSDouglas Anderson * @frame_number: The current frame number. 956fb616e3fSDouglas Anderson * 957fb616e3fSDouglas Anderson * Return: number missed by (or 0 if we didn't miss). 958fb616e3fSDouglas Anderson */ 959fb616e3fSDouglas Anderson static int dwc2_next_periodic_start(struct dwc2_hsotg *hsotg, 960fb616e3fSDouglas Anderson struct dwc2_qh *qh, u16 frame_number) 961fb616e3fSDouglas Anderson { 962fb616e3fSDouglas Anderson int missed = 0; 963fb616e3fSDouglas Anderson u16 interval = qh->host_interval; 964fb616e3fSDouglas Anderson u16 prev_frame_number = dwc2_frame_num_dec(frame_number, 1); 965fb616e3fSDouglas Anderson 966fb616e3fSDouglas Anderson qh->start_active_frame = dwc2_frame_num_inc(qh->start_active_frame, 967fb616e3fSDouglas Anderson interval); 968fb616e3fSDouglas Anderson 969fb616e3fSDouglas Anderson /* 970fb616e3fSDouglas Anderson * The dwc2_frame_num_gt() function used below won't work terribly well 971fb616e3fSDouglas Anderson * with if we just incremented by a really large intervals since the 972fb616e3fSDouglas Anderson * frame counter only goes to 0x3fff. It's terribly unlikely that we 973fb616e3fSDouglas Anderson * will have missed in this case anyway. Just go to exit. If we want 974fb616e3fSDouglas Anderson * to try to do better we'll need to keep track of a bigger counter 975fb616e3fSDouglas Anderson * somewhere in the driver and handle overflows. 976fb616e3fSDouglas Anderson */ 977fb616e3fSDouglas Anderson if (interval >= 0x1000) 978fb616e3fSDouglas Anderson goto exit; 979fb616e3fSDouglas Anderson 980fb616e3fSDouglas Anderson /* 981fb616e3fSDouglas Anderson * Test for misses, which is when it's too late to schedule. 982fb616e3fSDouglas Anderson * 983fb616e3fSDouglas Anderson * A few things to note: 984fb616e3fSDouglas Anderson * - We compare against prev_frame_number since start_active_frame 985fb616e3fSDouglas Anderson * and next_active_frame are always 1 frame before we want things 986fb616e3fSDouglas Anderson * to be active and we assume we can still get scheduled in the 987fb616e3fSDouglas Anderson * current frame number. 988fb616e3fSDouglas Anderson * - Some misses are expected. Specifically, in order to work 989fb616e3fSDouglas Anderson * perfectly dwc2 really needs quite spectacular interrupt latency 990fb616e3fSDouglas Anderson * requirements. It needs to be able to handle its interrupts 991fb616e3fSDouglas Anderson * completely within 125 us of them being asserted. That not only 992fb616e3fSDouglas Anderson * means that the dwc2 interrupt handler needs to be fast but it 993fb616e3fSDouglas Anderson * means that nothing else in the system has to block dwc2 for a long 994fb616e3fSDouglas Anderson * time. We can help with the dwc2 parts of this, but it's hard to 995fb616e3fSDouglas Anderson * guarantee that a system will have interrupt latency < 125 us, so 996fb616e3fSDouglas Anderson * we have to be robust to some misses. 997fb616e3fSDouglas Anderson */ 998fb616e3fSDouglas Anderson if (dwc2_frame_num_gt(prev_frame_number, qh->start_active_frame)) { 999fb616e3fSDouglas Anderson u16 ideal_start = qh->start_active_frame; 1000fb616e3fSDouglas Anderson 1001fb616e3fSDouglas Anderson /* Adjust interval as per gcd with plan length. */ 1002fb616e3fSDouglas Anderson interval = gcd(interval, 8); 1003fb616e3fSDouglas Anderson 1004fb616e3fSDouglas Anderson do { 1005fb616e3fSDouglas Anderson qh->start_active_frame = dwc2_frame_num_inc( 1006fb616e3fSDouglas Anderson qh->start_active_frame, interval); 1007fb616e3fSDouglas Anderson } while (dwc2_frame_num_gt(prev_frame_number, 1008fb616e3fSDouglas Anderson qh->start_active_frame)); 1009fb616e3fSDouglas Anderson 1010fb616e3fSDouglas Anderson missed = dwc2_frame_num_dec(qh->start_active_frame, 1011fb616e3fSDouglas Anderson ideal_start); 1012fb616e3fSDouglas Anderson } 1013fb616e3fSDouglas Anderson 1014fb616e3fSDouglas Anderson exit: 1015fb616e3fSDouglas Anderson qh->next_active_frame = qh->start_active_frame; 1016fb616e3fSDouglas Anderson 1017fb616e3fSDouglas Anderson return missed; 1018197ba5f4SPaul Zimmerman } 1019197ba5f4SPaul Zimmerman 1020197ba5f4SPaul Zimmerman /* 1021197ba5f4SPaul Zimmerman * Deactivates a QH. For non-periodic QHs, removes the QH from the active 1022197ba5f4SPaul Zimmerman * non-periodic schedule. The QH is added to the inactive non-periodic 1023197ba5f4SPaul Zimmerman * schedule if any QTDs are still attached to the QH. 1024197ba5f4SPaul Zimmerman * 1025197ba5f4SPaul Zimmerman * For periodic QHs, the QH is removed from the periodic queued schedule. If 1026197ba5f4SPaul Zimmerman * there are any QTDs still attached to the QH, the QH is added to either the 1027197ba5f4SPaul Zimmerman * periodic inactive schedule or the periodic ready schedule and its next 1028197ba5f4SPaul Zimmerman * scheduled frame is calculated. The QH is placed in the ready schedule if 1029197ba5f4SPaul Zimmerman * the scheduled frame has been reached already. Otherwise it's placed in the 1030197ba5f4SPaul Zimmerman * inactive schedule. If there are no QTDs attached to the QH, the QH is 1031197ba5f4SPaul Zimmerman * completely removed from the periodic schedule. 1032197ba5f4SPaul Zimmerman */ 1033197ba5f4SPaul Zimmerman void dwc2_hcd_qh_deactivate(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh, 1034197ba5f4SPaul Zimmerman int sched_next_periodic_split) 1035197ba5f4SPaul Zimmerman { 1036fb616e3fSDouglas Anderson u16 old_frame = qh->next_active_frame; 1037197ba5f4SPaul Zimmerman u16 frame_number; 1038fb616e3fSDouglas Anderson int missed; 1039197ba5f4SPaul Zimmerman 1040197ba5f4SPaul Zimmerman if (dbg_qh(qh)) 1041197ba5f4SPaul Zimmerman dev_vdbg(hsotg->dev, "%s()\n", __func__); 1042197ba5f4SPaul Zimmerman 1043197ba5f4SPaul Zimmerman if (dwc2_qh_is_non_per(qh)) { 1044197ba5f4SPaul Zimmerman dwc2_hcd_qh_unlink(hsotg, qh); 1045197ba5f4SPaul Zimmerman if (!list_empty(&qh->qtd_list)) 1046197ba5f4SPaul Zimmerman /* Add back to inactive non-periodic schedule */ 1047197ba5f4SPaul Zimmerman dwc2_hcd_qh_add(hsotg, qh); 1048197ba5f4SPaul Zimmerman return; 1049197ba5f4SPaul Zimmerman } 1050197ba5f4SPaul Zimmerman 1051fb616e3fSDouglas Anderson /* 1052fb616e3fSDouglas Anderson * Use the real frame number rather than the cached value as of the 1053fb616e3fSDouglas Anderson * last SOF just to get us a little closer to reality. Note that 1054fb616e3fSDouglas Anderson * means we don't actually know if we've already handled the SOF 1055fb616e3fSDouglas Anderson * interrupt for this frame. 1056fb616e3fSDouglas Anderson */ 1057197ba5f4SPaul Zimmerman frame_number = dwc2_hcd_get_frame_number(hsotg); 1058197ba5f4SPaul Zimmerman 1059fb616e3fSDouglas Anderson if (sched_next_periodic_split) 1060fb616e3fSDouglas Anderson missed = dwc2_next_for_periodic_split(hsotg, qh, frame_number); 1061fb616e3fSDouglas Anderson else 1062fb616e3fSDouglas Anderson missed = dwc2_next_periodic_start(hsotg, qh, frame_number); 1063fb616e3fSDouglas Anderson 1064fb616e3fSDouglas Anderson dwc2_sch_vdbg(hsotg, 1065fb616e3fSDouglas Anderson "QH=%p next(%d) fn=%04x, sch=%04x=>%04x (%+d) miss=%d %s\n", 1066fb616e3fSDouglas Anderson qh, sched_next_periodic_split, frame_number, old_frame, 1067fb616e3fSDouglas Anderson qh->next_active_frame, 1068fb616e3fSDouglas Anderson dwc2_frame_num_dec(qh->next_active_frame, old_frame), 1069fb616e3fSDouglas Anderson missed, missed ? "MISS" : ""); 1070197ba5f4SPaul Zimmerman 1071197ba5f4SPaul Zimmerman if (list_empty(&qh->qtd_list)) { 1072197ba5f4SPaul Zimmerman dwc2_hcd_qh_unlink(hsotg, qh); 1073197ba5f4SPaul Zimmerman return; 1074197ba5f4SPaul Zimmerman } 1075fb616e3fSDouglas Anderson 1076197ba5f4SPaul Zimmerman /* 1077197ba5f4SPaul Zimmerman * Remove from periodic_sched_queued and move to 1078197ba5f4SPaul Zimmerman * appropriate queue 1079fb616e3fSDouglas Anderson * 1080fb616e3fSDouglas Anderson * Note: we purposely use the frame_number from the "hsotg" structure 1081fb616e3fSDouglas Anderson * since we know SOF interrupt will handle future frames. 1082197ba5f4SPaul Zimmerman */ 1083fb616e3fSDouglas Anderson if (dwc2_frame_num_le(qh->next_active_frame, hsotg->frame_number)) 108494ef7aeeSDouglas Anderson list_move_tail(&qh->qh_list_entry, 108594ef7aeeSDouglas Anderson &hsotg->periodic_sched_ready); 1086197ba5f4SPaul Zimmerman else 108794ef7aeeSDouglas Anderson list_move_tail(&qh->qh_list_entry, 108894ef7aeeSDouglas Anderson &hsotg->periodic_sched_inactive); 1089197ba5f4SPaul Zimmerman } 1090197ba5f4SPaul Zimmerman 1091197ba5f4SPaul Zimmerman /** 1092197ba5f4SPaul Zimmerman * dwc2_hcd_qtd_init() - Initializes a QTD structure 1093197ba5f4SPaul Zimmerman * 1094197ba5f4SPaul Zimmerman * @qtd: The QTD to initialize 1095197ba5f4SPaul Zimmerman * @urb: The associated URB 1096197ba5f4SPaul Zimmerman */ 1097197ba5f4SPaul Zimmerman void dwc2_hcd_qtd_init(struct dwc2_qtd *qtd, struct dwc2_hcd_urb *urb) 1098197ba5f4SPaul Zimmerman { 1099197ba5f4SPaul Zimmerman qtd->urb = urb; 1100197ba5f4SPaul Zimmerman if (dwc2_hcd_get_pipe_type(&urb->pipe_info) == 1101197ba5f4SPaul Zimmerman USB_ENDPOINT_XFER_CONTROL) { 1102197ba5f4SPaul Zimmerman /* 1103197ba5f4SPaul Zimmerman * The only time the QTD data toggle is used is on the data 1104197ba5f4SPaul Zimmerman * phase of control transfers. This phase always starts with 1105197ba5f4SPaul Zimmerman * DATA1. 1106197ba5f4SPaul Zimmerman */ 1107197ba5f4SPaul Zimmerman qtd->data_toggle = DWC2_HC_PID_DATA1; 1108197ba5f4SPaul Zimmerman qtd->control_phase = DWC2_CONTROL_SETUP; 1109197ba5f4SPaul Zimmerman } 1110197ba5f4SPaul Zimmerman 1111197ba5f4SPaul Zimmerman /* Start split */ 1112197ba5f4SPaul Zimmerman qtd->complete_split = 0; 1113197ba5f4SPaul Zimmerman qtd->isoc_split_pos = DWC2_HCSPLT_XACTPOS_ALL; 1114197ba5f4SPaul Zimmerman qtd->isoc_split_offset = 0; 1115197ba5f4SPaul Zimmerman qtd->in_process = 0; 1116197ba5f4SPaul Zimmerman 1117197ba5f4SPaul Zimmerman /* Store the qtd ptr in the urb to reference the QTD */ 1118197ba5f4SPaul Zimmerman urb->qtd = qtd; 1119197ba5f4SPaul Zimmerman } 1120197ba5f4SPaul Zimmerman 1121197ba5f4SPaul Zimmerman /** 1122197ba5f4SPaul Zimmerman * dwc2_hcd_qtd_add() - Adds a QTD to the QTD-list of a QH 112333ad261aSGregory Herrero * Caller must hold driver lock. 1124197ba5f4SPaul Zimmerman * 1125197ba5f4SPaul Zimmerman * @hsotg: The DWC HCD structure 1126197ba5f4SPaul Zimmerman * @qtd: The QTD to add 1127b58e6ceeSMian Yousaf Kaukab * @qh: Queue head to add qtd to 1128197ba5f4SPaul Zimmerman * 1129197ba5f4SPaul Zimmerman * Return: 0 if successful, negative error code otherwise 1130197ba5f4SPaul Zimmerman * 1131b58e6ceeSMian Yousaf Kaukab * If the QH to which the QTD is added is not currently scheduled, it is placed 1132b58e6ceeSMian Yousaf Kaukab * into the proper schedule based on its EP type. 1133197ba5f4SPaul Zimmerman */ 1134197ba5f4SPaul Zimmerman int dwc2_hcd_qtd_add(struct dwc2_hsotg *hsotg, struct dwc2_qtd *qtd, 1135b58e6ceeSMian Yousaf Kaukab struct dwc2_qh *qh) 1136197ba5f4SPaul Zimmerman { 1137197ba5f4SPaul Zimmerman int retval; 1138197ba5f4SPaul Zimmerman 1139b58e6ceeSMian Yousaf Kaukab if (unlikely(!qh)) { 1140b58e6ceeSMian Yousaf Kaukab dev_err(hsotg->dev, "%s: Invalid QH\n", __func__); 1141b58e6ceeSMian Yousaf Kaukab retval = -EINVAL; 1142b58e6ceeSMian Yousaf Kaukab goto fail; 1143197ba5f4SPaul Zimmerman } 1144197ba5f4SPaul Zimmerman 1145b58e6ceeSMian Yousaf Kaukab retval = dwc2_hcd_qh_add(hsotg, qh); 1146197ba5f4SPaul Zimmerman if (retval) 1147197ba5f4SPaul Zimmerman goto fail; 1148197ba5f4SPaul Zimmerman 1149b58e6ceeSMian Yousaf Kaukab qtd->qh = qh; 1150b58e6ceeSMian Yousaf Kaukab list_add_tail(&qtd->qtd_list_entry, &qh->qtd_list); 1151197ba5f4SPaul Zimmerman 1152197ba5f4SPaul Zimmerman return 0; 1153197ba5f4SPaul Zimmerman fail: 1154197ba5f4SPaul Zimmerman return retval; 1155197ba5f4SPaul Zimmerman } 1156