xref: /openbmc/linux/sound/usb/endpoint.c (revision 63dc02bd)
1 /*
2  *   This program is free software; you can redistribute it and/or modify
3  *   it under the terms of the GNU General Public License as published by
4  *   the Free Software Foundation; either version 2 of the License, or
5  *   (at your option) any later version.
6  *
7  *   This program is distributed in the hope that it will be useful,
8  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
9  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  *   GNU General Public License for more details.
11  *
12  *   You should have received a copy of the GNU General Public License
13  *   along with this program; if not, write to the Free Software
14  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
15  *
16  */
17 
18 #include <linux/gfp.h>
19 #include <linux/init.h>
20 #include <linux/ratelimit.h>
21 #include <linux/usb.h>
22 #include <linux/usb/audio.h>
23 
24 #include <sound/core.h>
25 #include <sound/pcm.h>
26 
27 #include "usbaudio.h"
28 #include "helper.h"
29 #include "card.h"
30 #include "endpoint.h"
31 #include "pcm.h"
32 
33 /*
34  * convert a sampling rate into our full speed format (fs/1000 in Q16.16)
35  * this will overflow at approx 524 kHz
36  */
37 static inline unsigned get_usb_full_speed_rate(unsigned int rate)
38 {
39 	return ((rate << 13) + 62) / 125;
40 }
41 
42 /*
43  * convert a sampling rate into USB high speed format (fs/8000 in Q16.16)
44  * this will overflow at approx 4 MHz
45  */
46 static inline unsigned get_usb_high_speed_rate(unsigned int rate)
47 {
48 	return ((rate << 10) + 62) / 125;
49 }
50 
51 /*
52  * unlink active urbs.
53  */
54 static int deactivate_urbs(struct snd_usb_substream *subs, int force, int can_sleep)
55 {
56 	struct snd_usb_audio *chip = subs->stream->chip;
57 	unsigned int i;
58 	int async;
59 
60 	subs->running = 0;
61 
62 	if (!force && subs->stream->chip->shutdown) /* to be sure... */
63 		return -EBADFD;
64 
65 	async = !can_sleep && chip->async_unlink;
66 
67 	if (!async && in_interrupt())
68 		return 0;
69 
70 	for (i = 0; i < subs->nurbs; i++) {
71 		if (test_bit(i, &subs->active_mask)) {
72 			if (!test_and_set_bit(i, &subs->unlink_mask)) {
73 				struct urb *u = subs->dataurb[i].urb;
74 				if (async)
75 					usb_unlink_urb(u);
76 				else
77 					usb_kill_urb(u);
78 			}
79 		}
80 	}
81 	if (subs->syncpipe) {
82 		for (i = 0; i < SYNC_URBS; i++) {
83 			if (test_bit(i+16, &subs->active_mask)) {
84 				if (!test_and_set_bit(i+16, &subs->unlink_mask)) {
85 					struct urb *u = subs->syncurb[i].urb;
86 					if (async)
87 						usb_unlink_urb(u);
88 					else
89 						usb_kill_urb(u);
90 				}
91 			}
92 		}
93 	}
94 	return 0;
95 }
96 
97 
98 /*
99  * release a urb data
100  */
101 static void release_urb_ctx(struct snd_urb_ctx *u)
102 {
103 	if (u->urb) {
104 		if (u->buffer_size)
105 			usb_free_coherent(u->subs->dev, u->buffer_size,
106 					u->urb->transfer_buffer,
107 					u->urb->transfer_dma);
108 		usb_free_urb(u->urb);
109 		u->urb = NULL;
110 	}
111 }
112 
113 /*
114  *  wait until all urbs are processed.
115  */
116 static int wait_clear_urbs(struct snd_usb_substream *subs)
117 {
118 	unsigned long end_time = jiffies + msecs_to_jiffies(1000);
119 	unsigned int i;
120 	int alive;
121 
122 	do {
123 		alive = 0;
124 		for (i = 0; i < subs->nurbs; i++) {
125 			if (test_bit(i, &subs->active_mask))
126 				alive++;
127 		}
128 		if (subs->syncpipe) {
129 			for (i = 0; i < SYNC_URBS; i++) {
130 				if (test_bit(i + 16, &subs->active_mask))
131 					alive++;
132 			}
133 		}
134 		if (! alive)
135 			break;
136 		schedule_timeout_uninterruptible(1);
137 	} while (time_before(jiffies, end_time));
138 	if (alive)
139 		snd_printk(KERN_ERR "timeout: still %d active urbs..\n", alive);
140 	return 0;
141 }
142 
143 /*
144  * release a substream
145  */
146 void snd_usb_release_substream_urbs(struct snd_usb_substream *subs, int force)
147 {
148 	int i;
149 
150 	/* stop urbs (to be sure) */
151 	deactivate_urbs(subs, force, 1);
152 	wait_clear_urbs(subs);
153 
154 	for (i = 0; i < MAX_URBS; i++)
155 		release_urb_ctx(&subs->dataurb[i]);
156 	for (i = 0; i < SYNC_URBS; i++)
157 		release_urb_ctx(&subs->syncurb[i]);
158 	usb_free_coherent(subs->dev, SYNC_URBS * 4,
159 			subs->syncbuf, subs->sync_dma);
160 	subs->syncbuf = NULL;
161 	subs->nurbs = 0;
162 }
163 
164 /*
165  * complete callback from data urb
166  */
167 static void snd_complete_urb(struct urb *urb)
168 {
169 	struct snd_urb_ctx *ctx = urb->context;
170 	struct snd_usb_substream *subs = ctx->subs;
171 	struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
172 	int err = 0;
173 
174 	if ((subs->running && subs->ops.retire(subs, substream->runtime, urb)) ||
175 	    !subs->running || /* can be stopped during retire callback */
176 	    (err = subs->ops.prepare(subs, substream->runtime, urb)) < 0 ||
177 	    (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
178 		clear_bit(ctx->index, &subs->active_mask);
179 		if (err < 0) {
180 			snd_printd(KERN_ERR "cannot submit urb (err = %d)\n", err);
181 			snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
182 		}
183 	}
184 }
185 
186 
187 /*
188  * complete callback from sync urb
189  */
190 static void snd_complete_sync_urb(struct urb *urb)
191 {
192 	struct snd_urb_ctx *ctx = urb->context;
193 	struct snd_usb_substream *subs = ctx->subs;
194 	struct snd_pcm_substream *substream = ctx->subs->pcm_substream;
195 	int err = 0;
196 
197 	if ((subs->running && subs->ops.retire_sync(subs, substream->runtime, urb)) ||
198 	    !subs->running || /* can be stopped during retire callback */
199 	    (err = subs->ops.prepare_sync(subs, substream->runtime, urb)) < 0 ||
200 	    (err = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
201 		clear_bit(ctx->index + 16, &subs->active_mask);
202 		if (err < 0) {
203 			snd_printd(KERN_ERR "cannot submit sync urb (err = %d)\n", err);
204 			snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
205 		}
206 	}
207 }
208 
209 
210 /*
211  * initialize a substream for plaback/capture
212  */
213 int snd_usb_init_substream_urbs(struct snd_usb_substream *subs,
214 				unsigned int period_bytes,
215 				unsigned int rate,
216 				unsigned int frame_bits)
217 {
218 	unsigned int maxsize, i;
219 	int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
220 	unsigned int urb_packs, total_packs, packs_per_ms;
221 	struct snd_usb_audio *chip = subs->stream->chip;
222 
223 	/* calculate the frequency in 16.16 format */
224 	if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
225 		subs->freqn = get_usb_full_speed_rate(rate);
226 	else
227 		subs->freqn = get_usb_high_speed_rate(rate);
228 	subs->freqm = subs->freqn;
229 	subs->freqshift = INT_MIN;
230 	/* calculate max. frequency */
231 	if (subs->maxpacksize) {
232 		/* whatever fits into a max. size packet */
233 		maxsize = subs->maxpacksize;
234 		subs->freqmax = (maxsize / (frame_bits >> 3))
235 				<< (16 - subs->datainterval);
236 	} else {
237 		/* no max. packet size: just take 25% higher than nominal */
238 		subs->freqmax = subs->freqn + (subs->freqn >> 2);
239 		maxsize = ((subs->freqmax + 0xffff) * (frame_bits >> 3))
240 				>> (16 - subs->datainterval);
241 	}
242 	subs->phase = 0;
243 
244 	if (subs->fill_max)
245 		subs->curpacksize = subs->maxpacksize;
246 	else
247 		subs->curpacksize = maxsize;
248 
249 	if (snd_usb_get_speed(subs->dev) != USB_SPEED_FULL)
250 		packs_per_ms = 8 >> subs->datainterval;
251 	else
252 		packs_per_ms = 1;
253 
254 	if (is_playback) {
255 		urb_packs = max(chip->nrpacks, 1);
256 		urb_packs = min(urb_packs, (unsigned int)MAX_PACKS);
257 	} else
258 		urb_packs = 1;
259 	urb_packs *= packs_per_ms;
260 	if (subs->syncpipe)
261 		urb_packs = min(urb_packs, 1U << subs->syncinterval);
262 
263 	/* decide how many packets to be used */
264 	if (is_playback) {
265 		unsigned int minsize, maxpacks;
266 		/* determine how small a packet can be */
267 		minsize = (subs->freqn >> (16 - subs->datainterval))
268 			  * (frame_bits >> 3);
269 		/* with sync from device, assume it can be 12% lower */
270 		if (subs->syncpipe)
271 			minsize -= minsize >> 3;
272 		minsize = max(minsize, 1u);
273 		total_packs = (period_bytes + minsize - 1) / minsize;
274 		/* we need at least two URBs for queueing */
275 		if (total_packs < 2) {
276 			total_packs = 2;
277 		} else {
278 			/* and we don't want too long a queue either */
279 			maxpacks = max(MAX_QUEUE * packs_per_ms, urb_packs * 2);
280 			total_packs = min(total_packs, maxpacks);
281 		}
282 	} else {
283 		while (urb_packs > 1 && urb_packs * maxsize >= period_bytes)
284 			urb_packs >>= 1;
285 		total_packs = MAX_URBS * urb_packs;
286 	}
287 	subs->nurbs = (total_packs + urb_packs - 1) / urb_packs;
288 	if (subs->nurbs > MAX_URBS) {
289 		/* too much... */
290 		subs->nurbs = MAX_URBS;
291 		total_packs = MAX_URBS * urb_packs;
292 	} else if (subs->nurbs < 2) {
293 		/* too little - we need at least two packets
294 		 * to ensure contiguous playback/capture
295 		 */
296 		subs->nurbs = 2;
297 	}
298 
299 	/* allocate and initialize data urbs */
300 	for (i = 0; i < subs->nurbs; i++) {
301 		struct snd_urb_ctx *u = &subs->dataurb[i];
302 		u->index = i;
303 		u->subs = subs;
304 		u->packets = (i + 1) * total_packs / subs->nurbs
305 			- i * total_packs / subs->nurbs;
306 		u->buffer_size = maxsize * u->packets;
307 		if (subs->fmt_type == UAC_FORMAT_TYPE_II)
308 			u->packets++; /* for transfer delimiter */
309 		u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
310 		if (!u->urb)
311 			goto out_of_memory;
312 		u->urb->transfer_buffer =
313 			usb_alloc_coherent(subs->dev, u->buffer_size,
314 					   GFP_KERNEL, &u->urb->transfer_dma);
315 		if (!u->urb->transfer_buffer)
316 			goto out_of_memory;
317 		u->urb->pipe = subs->datapipe;
318 		u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
319 		u->urb->interval = 1 << subs->datainterval;
320 		u->urb->context = u;
321 		u->urb->complete = snd_complete_urb;
322 	}
323 
324 	if (subs->syncpipe) {
325 		/* allocate and initialize sync urbs */
326 		subs->syncbuf = usb_alloc_coherent(subs->dev, SYNC_URBS * 4,
327 						 GFP_KERNEL, &subs->sync_dma);
328 		if (!subs->syncbuf)
329 			goto out_of_memory;
330 		for (i = 0; i < SYNC_URBS; i++) {
331 			struct snd_urb_ctx *u = &subs->syncurb[i];
332 			u->index = i;
333 			u->subs = subs;
334 			u->packets = 1;
335 			u->urb = usb_alloc_urb(1, GFP_KERNEL);
336 			if (!u->urb)
337 				goto out_of_memory;
338 			u->urb->transfer_buffer = subs->syncbuf + i * 4;
339 			u->urb->transfer_dma = subs->sync_dma + i * 4;
340 			u->urb->transfer_buffer_length = 4;
341 			u->urb->pipe = subs->syncpipe;
342 			u->urb->transfer_flags = URB_ISO_ASAP |
343 						 URB_NO_TRANSFER_DMA_MAP;
344 			u->urb->number_of_packets = 1;
345 			u->urb->interval = 1 << subs->syncinterval;
346 			u->urb->context = u;
347 			u->urb->complete = snd_complete_sync_urb;
348 		}
349 	}
350 	return 0;
351 
352 out_of_memory:
353 	snd_usb_release_substream_urbs(subs, 0);
354 	return -ENOMEM;
355 }
356 
357 /*
358  * prepare urb for full speed capture sync pipe
359  *
360  * fill the length and offset of each urb descriptor.
361  * the fixed 10.14 frequency is passed through the pipe.
362  */
363 static int prepare_capture_sync_urb(struct snd_usb_substream *subs,
364 				    struct snd_pcm_runtime *runtime,
365 				    struct urb *urb)
366 {
367 	unsigned char *cp = urb->transfer_buffer;
368 	struct snd_urb_ctx *ctx = urb->context;
369 
370 	urb->dev = ctx->subs->dev; /* we need to set this at each time */
371 	urb->iso_frame_desc[0].length = 3;
372 	urb->iso_frame_desc[0].offset = 0;
373 	cp[0] = subs->freqn >> 2;
374 	cp[1] = subs->freqn >> 10;
375 	cp[2] = subs->freqn >> 18;
376 	return 0;
377 }
378 
379 /*
380  * prepare urb for high speed capture sync pipe
381  *
382  * fill the length and offset of each urb descriptor.
383  * the fixed 12.13 frequency is passed as 16.16 through the pipe.
384  */
385 static int prepare_capture_sync_urb_hs(struct snd_usb_substream *subs,
386 				       struct snd_pcm_runtime *runtime,
387 				       struct urb *urb)
388 {
389 	unsigned char *cp = urb->transfer_buffer;
390 	struct snd_urb_ctx *ctx = urb->context;
391 
392 	urb->dev = ctx->subs->dev; /* we need to set this at each time */
393 	urb->iso_frame_desc[0].length = 4;
394 	urb->iso_frame_desc[0].offset = 0;
395 	cp[0] = subs->freqn;
396 	cp[1] = subs->freqn >> 8;
397 	cp[2] = subs->freqn >> 16;
398 	cp[3] = subs->freqn >> 24;
399 	return 0;
400 }
401 
402 /*
403  * process after capture sync complete
404  * - nothing to do
405  */
406 static int retire_capture_sync_urb(struct snd_usb_substream *subs,
407 				   struct snd_pcm_runtime *runtime,
408 				   struct urb *urb)
409 {
410 	return 0;
411 }
412 
413 /*
414  * prepare urb for capture data pipe
415  *
416  * fill the offset and length of each descriptor.
417  *
418  * we use a temporary buffer to write the captured data.
419  * since the length of written data is determined by host, we cannot
420  * write onto the pcm buffer directly...  the data is thus copied
421  * later at complete callback to the global buffer.
422  */
423 static int prepare_capture_urb(struct snd_usb_substream *subs,
424 			       struct snd_pcm_runtime *runtime,
425 			       struct urb *urb)
426 {
427 	int i, offs;
428 	struct snd_urb_ctx *ctx = urb->context;
429 
430 	offs = 0;
431 	urb->dev = ctx->subs->dev; /* we need to set this at each time */
432 	for (i = 0; i < ctx->packets; i++) {
433 		urb->iso_frame_desc[i].offset = offs;
434 		urb->iso_frame_desc[i].length = subs->curpacksize;
435 		offs += subs->curpacksize;
436 	}
437 	urb->transfer_buffer_length = offs;
438 	urb->number_of_packets = ctx->packets;
439 	return 0;
440 }
441 
442 /*
443  * process after capture complete
444  *
445  * copy the data from each desctiptor to the pcm buffer, and
446  * update the current position.
447  */
448 static int retire_capture_urb(struct snd_usb_substream *subs,
449 			      struct snd_pcm_runtime *runtime,
450 			      struct urb *urb)
451 {
452 	unsigned long flags;
453 	unsigned char *cp;
454 	int i;
455 	unsigned int stride, frames, bytes, oldptr;
456 	int period_elapsed = 0;
457 
458 	stride = runtime->frame_bits >> 3;
459 
460 	for (i = 0; i < urb->number_of_packets; i++) {
461 		cp = (unsigned char *)urb->transfer_buffer + urb->iso_frame_desc[i].offset;
462 		if (urb->iso_frame_desc[i].status && printk_ratelimit()) {
463 			snd_printdd("frame %d active: %d\n", i, urb->iso_frame_desc[i].status);
464 			// continue;
465 		}
466 		bytes = urb->iso_frame_desc[i].actual_length;
467 		frames = bytes / stride;
468 		if (!subs->txfr_quirk)
469 			bytes = frames * stride;
470 		if (bytes % (runtime->sample_bits >> 3) != 0) {
471 #ifdef CONFIG_SND_DEBUG_VERBOSE
472 			int oldbytes = bytes;
473 #endif
474 			bytes = frames * stride;
475 			snd_printdd(KERN_ERR "Corrected urb data len. %d->%d\n",
476 							oldbytes, bytes);
477 		}
478 		/* update the current pointer */
479 		spin_lock_irqsave(&subs->lock, flags);
480 		oldptr = subs->hwptr_done;
481 		subs->hwptr_done += bytes;
482 		if (subs->hwptr_done >= runtime->buffer_size * stride)
483 			subs->hwptr_done -= runtime->buffer_size * stride;
484 		frames = (bytes + (oldptr % stride)) / stride;
485 		subs->transfer_done += frames;
486 		if (subs->transfer_done >= runtime->period_size) {
487 			subs->transfer_done -= runtime->period_size;
488 			period_elapsed = 1;
489 		}
490 		spin_unlock_irqrestore(&subs->lock, flags);
491 		/* copy a data chunk */
492 		if (oldptr + bytes > runtime->buffer_size * stride) {
493 			unsigned int bytes1 =
494 					runtime->buffer_size * stride - oldptr;
495 			memcpy(runtime->dma_area + oldptr, cp, bytes1);
496 			memcpy(runtime->dma_area, cp + bytes1, bytes - bytes1);
497 		} else {
498 			memcpy(runtime->dma_area + oldptr, cp, bytes);
499 		}
500 	}
501 	if (period_elapsed)
502 		snd_pcm_period_elapsed(subs->pcm_substream);
503 	return 0;
504 }
505 
506 /*
507  * Process after capture complete when paused.  Nothing to do.
508  */
509 static int retire_paused_capture_urb(struct snd_usb_substream *subs,
510 				     struct snd_pcm_runtime *runtime,
511 				     struct urb *urb)
512 {
513 	return 0;
514 }
515 
516 
517 /*
518  * prepare urb for playback sync pipe
519  *
520  * set up the offset and length to receive the current frequency.
521  */
522 static int prepare_playback_sync_urb(struct snd_usb_substream *subs,
523 				     struct snd_pcm_runtime *runtime,
524 				     struct urb *urb)
525 {
526 	struct snd_urb_ctx *ctx = urb->context;
527 
528 	urb->dev = ctx->subs->dev; /* we need to set this at each time */
529 	urb->iso_frame_desc[0].length = min(4u, ctx->subs->syncmaxsize);
530 	urb->iso_frame_desc[0].offset = 0;
531 	return 0;
532 }
533 
534 /*
535  * process after playback sync complete
536  *
537  * Full speed devices report feedback values in 10.14 format as samples per
538  * frame, high speed devices in 16.16 format as samples per microframe.
539  * Because the Audio Class 1 spec was written before USB 2.0, many high speed
540  * devices use a wrong interpretation, some others use an entirely different
541  * format.  Therefore, we cannot predict what format any particular device uses
542  * and must detect it automatically.
543  */
544 static int retire_playback_sync_urb(struct snd_usb_substream *subs,
545 				    struct snd_pcm_runtime *runtime,
546 				    struct urb *urb)
547 {
548 	unsigned int f;
549 	int shift;
550 	unsigned long flags;
551 
552 	if (urb->iso_frame_desc[0].status != 0 ||
553 	    urb->iso_frame_desc[0].actual_length < 3)
554 		return 0;
555 
556 	f = le32_to_cpup(urb->transfer_buffer);
557 	if (urb->iso_frame_desc[0].actual_length == 3)
558 		f &= 0x00ffffff;
559 	else
560 		f &= 0x0fffffff;
561 	if (f == 0)
562 		return 0;
563 
564 	if (unlikely(subs->freqshift == INT_MIN)) {
565 		/*
566 		 * The first time we see a feedback value, determine its format
567 		 * by shifting it left or right until it matches the nominal
568 		 * frequency value.  This assumes that the feedback does not
569 		 * differ from the nominal value more than +50% or -25%.
570 		 */
571 		shift = 0;
572 		while (f < subs->freqn - subs->freqn / 4) {
573 			f <<= 1;
574 			shift++;
575 		}
576 		while (f > subs->freqn + subs->freqn / 2) {
577 			f >>= 1;
578 			shift--;
579 		}
580 		subs->freqshift = shift;
581 	}
582 	else if (subs->freqshift >= 0)
583 		f <<= subs->freqshift;
584 	else
585 		f >>= -subs->freqshift;
586 
587 	if (likely(f >= subs->freqn - subs->freqn / 8 && f <= subs->freqmax)) {
588 		/*
589 		 * If the frequency looks valid, set it.
590 		 * This value is referred to in prepare_playback_urb().
591 		 */
592 		spin_lock_irqsave(&subs->lock, flags);
593 		subs->freqm = f;
594 		spin_unlock_irqrestore(&subs->lock, flags);
595 	} else {
596 		/*
597 		 * Out of range; maybe the shift value is wrong.
598 		 * Reset it so that we autodetect again the next time.
599 		 */
600 		subs->freqshift = INT_MIN;
601 	}
602 
603 	return 0;
604 }
605 
606 /* determine the number of frames in the next packet */
607 static int snd_usb_audio_next_packet_size(struct snd_usb_substream *subs)
608 {
609 	if (subs->fill_max)
610 		return subs->maxframesize;
611 	else {
612 		subs->phase = (subs->phase & 0xffff)
613 			+ (subs->freqm << subs->datainterval);
614 		return min(subs->phase >> 16, subs->maxframesize);
615 	}
616 }
617 
618 /*
619  * Prepare urb for streaming before playback starts or when paused.
620  *
621  * We don't have any data, so we send silence.
622  */
623 static int prepare_nodata_playback_urb(struct snd_usb_substream *subs,
624 				       struct snd_pcm_runtime *runtime,
625 				       struct urb *urb)
626 {
627 	unsigned int i, offs, counts;
628 	struct snd_urb_ctx *ctx = urb->context;
629 	int stride = runtime->frame_bits >> 3;
630 
631 	offs = 0;
632 	urb->dev = ctx->subs->dev;
633 	for (i = 0; i < ctx->packets; ++i) {
634 		counts = snd_usb_audio_next_packet_size(subs);
635 		urb->iso_frame_desc[i].offset = offs * stride;
636 		urb->iso_frame_desc[i].length = counts * stride;
637 		offs += counts;
638 	}
639 	urb->number_of_packets = ctx->packets;
640 	urb->transfer_buffer_length = offs * stride;
641 	memset(urb->transfer_buffer,
642 	       runtime->format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0,
643 	       offs * stride);
644 	return 0;
645 }
646 
647 /*
648  * prepare urb for playback data pipe
649  *
650  * Since a URB can handle only a single linear buffer, we must use double
651  * buffering when the data to be transferred overflows the buffer boundary.
652  * To avoid inconsistencies when updating hwptr_done, we use double buffering
653  * for all URBs.
654  */
655 static int prepare_playback_urb(struct snd_usb_substream *subs,
656 				struct snd_pcm_runtime *runtime,
657 				struct urb *urb)
658 {
659 	int i, stride;
660 	unsigned int counts, frames, bytes;
661 	unsigned long flags;
662 	int period_elapsed = 0;
663 	struct snd_urb_ctx *ctx = urb->context;
664 
665 	stride = runtime->frame_bits >> 3;
666 
667 	frames = 0;
668 	urb->dev = ctx->subs->dev; /* we need to set this at each time */
669 	urb->number_of_packets = 0;
670 	spin_lock_irqsave(&subs->lock, flags);
671 	for (i = 0; i < ctx->packets; i++) {
672 		counts = snd_usb_audio_next_packet_size(subs);
673 		/* set up descriptor */
674 		urb->iso_frame_desc[i].offset = frames * stride;
675 		urb->iso_frame_desc[i].length = counts * stride;
676 		frames += counts;
677 		urb->number_of_packets++;
678 		subs->transfer_done += counts;
679 		if (subs->transfer_done >= runtime->period_size) {
680 			subs->transfer_done -= runtime->period_size;
681 			period_elapsed = 1;
682 			if (subs->fmt_type == UAC_FORMAT_TYPE_II) {
683 				if (subs->transfer_done > 0) {
684 					/* FIXME: fill-max mode is not
685 					 * supported yet */
686 					frames -= subs->transfer_done;
687 					counts -= subs->transfer_done;
688 					urb->iso_frame_desc[i].length =
689 						counts * stride;
690 					subs->transfer_done = 0;
691 				}
692 				i++;
693 				if (i < ctx->packets) {
694 					/* add a transfer delimiter */
695 					urb->iso_frame_desc[i].offset =
696 						frames * stride;
697 					urb->iso_frame_desc[i].length = 0;
698 					urb->number_of_packets++;
699 				}
700 				break;
701 			}
702 		}
703 		if (period_elapsed) /* finish at the period boundary */
704 			break;
705 	}
706 	bytes = frames * stride;
707 	if (subs->hwptr_done + bytes > runtime->buffer_size * stride) {
708 		/* err, the transferred area goes over buffer boundary. */
709 		unsigned int bytes1 =
710 			runtime->buffer_size * stride - subs->hwptr_done;
711 		memcpy(urb->transfer_buffer,
712 		       runtime->dma_area + subs->hwptr_done, bytes1);
713 		memcpy(urb->transfer_buffer + bytes1,
714 		       runtime->dma_area, bytes - bytes1);
715 	} else {
716 		memcpy(urb->transfer_buffer,
717 		       runtime->dma_area + subs->hwptr_done, bytes);
718 	}
719 	subs->hwptr_done += bytes;
720 	if (subs->hwptr_done >= runtime->buffer_size * stride)
721 		subs->hwptr_done -= runtime->buffer_size * stride;
722 
723 	/* update delay with exact number of samples queued */
724 	runtime->delay = subs->last_delay;
725 	runtime->delay += frames;
726 	subs->last_delay = runtime->delay;
727 
728 	/* realign last_frame_number */
729 	subs->last_frame_number = usb_get_current_frame_number(subs->dev);
730 	subs->last_frame_number &= 0xFF; /* keep 8 LSBs */
731 
732 	spin_unlock_irqrestore(&subs->lock, flags);
733 	urb->transfer_buffer_length = bytes;
734 	if (period_elapsed)
735 		snd_pcm_period_elapsed(subs->pcm_substream);
736 	return 0;
737 }
738 
739 /*
740  * process after playback data complete
741  * - decrease the delay count again
742  */
743 static int retire_playback_urb(struct snd_usb_substream *subs,
744 			       struct snd_pcm_runtime *runtime,
745 			       struct urb *urb)
746 {
747 	unsigned long flags;
748 	int stride = runtime->frame_bits >> 3;
749 	int processed = urb->transfer_buffer_length / stride;
750 	int est_delay;
751 
752 	spin_lock_irqsave(&subs->lock, flags);
753 
754 	est_delay = snd_usb_pcm_delay(subs, runtime->rate);
755 	/* update delay with exact number of samples played */
756 	if (processed > subs->last_delay)
757 		subs->last_delay = 0;
758 	else
759 		subs->last_delay -= processed;
760 	runtime->delay = subs->last_delay;
761 
762 	/*
763 	 * Report when delay estimate is off by more than 2ms.
764 	 * The error should be lower than 2ms since the estimate relies
765 	 * on two reads of a counter updated every ms.
766 	 */
767 	if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2)
768 		snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n",
769 			est_delay, subs->last_delay);
770 
771 	spin_unlock_irqrestore(&subs->lock, flags);
772 	return 0;
773 }
774 
775 static const char *usb_error_string(int err)
776 {
777 	switch (err) {
778 	case -ENODEV:
779 		return "no device";
780 	case -ENOENT:
781 		return "endpoint not enabled";
782 	case -EPIPE:
783 		return "endpoint stalled";
784 	case -ENOSPC:
785 		return "not enough bandwidth";
786 	case -ESHUTDOWN:
787 		return "device disabled";
788 	case -EHOSTUNREACH:
789 		return "device suspended";
790 	case -EINVAL:
791 	case -EAGAIN:
792 	case -EFBIG:
793 	case -EMSGSIZE:
794 		return "internal error";
795 	default:
796 		return "unknown error";
797 	}
798 }
799 
800 /*
801  * set up and start data/sync urbs
802  */
803 static int start_urbs(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime)
804 {
805 	unsigned int i;
806 	int err;
807 
808 	if (subs->stream->chip->shutdown)
809 		return -EBADFD;
810 
811 	for (i = 0; i < subs->nurbs; i++) {
812 		if (snd_BUG_ON(!subs->dataurb[i].urb))
813 			return -EINVAL;
814 		if (subs->ops.prepare(subs, runtime, subs->dataurb[i].urb) < 0) {
815 			snd_printk(KERN_ERR "cannot prepare datapipe for urb %d\n", i);
816 			goto __error;
817 		}
818 	}
819 	if (subs->syncpipe) {
820 		for (i = 0; i < SYNC_URBS; i++) {
821 			if (snd_BUG_ON(!subs->syncurb[i].urb))
822 				return -EINVAL;
823 			if (subs->ops.prepare_sync(subs, runtime, subs->syncurb[i].urb) < 0) {
824 				snd_printk(KERN_ERR "cannot prepare syncpipe for urb %d\n", i);
825 				goto __error;
826 			}
827 		}
828 	}
829 
830 	subs->active_mask = 0;
831 	subs->unlink_mask = 0;
832 	subs->running = 1;
833 	for (i = 0; i < subs->nurbs; i++) {
834 		err = usb_submit_urb(subs->dataurb[i].urb, GFP_ATOMIC);
835 		if (err < 0) {
836 			snd_printk(KERN_ERR "cannot submit datapipe "
837 				   "for urb %d, error %d: %s\n",
838 				   i, err, usb_error_string(err));
839 			goto __error;
840 		}
841 		set_bit(i, &subs->active_mask);
842 	}
843 	if (subs->syncpipe) {
844 		for (i = 0; i < SYNC_URBS; i++) {
845 			err = usb_submit_urb(subs->syncurb[i].urb, GFP_ATOMIC);
846 			if (err < 0) {
847 				snd_printk(KERN_ERR "cannot submit syncpipe "
848 					   "for urb %d, error %d: %s\n",
849 					   i, err, usb_error_string(err));
850 				goto __error;
851 			}
852 			set_bit(i + 16, &subs->active_mask);
853 		}
854 	}
855 	return 0;
856 
857  __error:
858 	// snd_pcm_stop(subs->pcm_substream, SNDRV_PCM_STATE_XRUN);
859 	deactivate_urbs(subs, 0, 0);
860 	return -EPIPE;
861 }
862 
863 
864 /*
865  */
866 static struct snd_urb_ops audio_urb_ops[2] = {
867 	{
868 		.prepare =	prepare_nodata_playback_urb,
869 		.retire =	retire_playback_urb,
870 		.prepare_sync =	prepare_playback_sync_urb,
871 		.retire_sync =	retire_playback_sync_urb,
872 	},
873 	{
874 		.prepare =	prepare_capture_urb,
875 		.retire =	retire_capture_urb,
876 		.prepare_sync =	prepare_capture_sync_urb,
877 		.retire_sync =	retire_capture_sync_urb,
878 	},
879 };
880 
881 /*
882  * initialize the substream instance.
883  */
884 
885 void snd_usb_init_substream(struct snd_usb_stream *as,
886 			    int stream, struct audioformat *fp)
887 {
888 	struct snd_usb_substream *subs = &as->substream[stream];
889 
890 	INIT_LIST_HEAD(&subs->fmt_list);
891 	spin_lock_init(&subs->lock);
892 
893 	subs->stream = as;
894 	subs->direction = stream;
895 	subs->dev = as->chip->dev;
896 	subs->txfr_quirk = as->chip->txfr_quirk;
897 	subs->ops = audio_urb_ops[stream];
898 	if (snd_usb_get_speed(subs->dev) >= USB_SPEED_HIGH)
899 		subs->ops.prepare_sync = prepare_capture_sync_urb_hs;
900 
901 	snd_usb_set_pcm_ops(as->pcm, stream);
902 
903 	list_add_tail(&fp->list, &subs->fmt_list);
904 	subs->formats |= fp->formats;
905 	subs->endpoint = fp->endpoint;
906 	subs->num_formats++;
907 	subs->fmt_type = fp->fmt_type;
908 }
909 
910 int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substream, int cmd)
911 {
912 	struct snd_usb_substream *subs = substream->runtime->private_data;
913 
914 	switch (cmd) {
915 	case SNDRV_PCM_TRIGGER_START:
916 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
917 		subs->ops.prepare = prepare_playback_urb;
918 		return 0;
919 	case SNDRV_PCM_TRIGGER_STOP:
920 		return deactivate_urbs(subs, 0, 0);
921 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
922 		subs->ops.prepare = prepare_nodata_playback_urb;
923 		return 0;
924 	}
925 
926 	return -EINVAL;
927 }
928 
929 int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd)
930 {
931 	struct snd_usb_substream *subs = substream->runtime->private_data;
932 
933 	switch (cmd) {
934 	case SNDRV_PCM_TRIGGER_START:
935 		subs->ops.retire = retire_capture_urb;
936 		return start_urbs(subs, substream->runtime);
937 	case SNDRV_PCM_TRIGGER_STOP:
938 		return deactivate_urbs(subs, 0, 0);
939 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
940 		subs->ops.retire = retire_paused_capture_urb;
941 		return 0;
942 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
943 		subs->ops.retire = retire_capture_urb;
944 		return 0;
945 	}
946 
947 	return -EINVAL;
948 }
949 
950 int snd_usb_substream_prepare(struct snd_usb_substream *subs,
951 			      struct snd_pcm_runtime *runtime)
952 {
953 	/* clear urbs (to be sure) */
954 	deactivate_urbs(subs, 0, 1);
955 	wait_clear_urbs(subs);
956 
957 	/* for playback, submit the URBs now; otherwise, the first hwptr_done
958 	 * updates for all URBs would happen at the same time when starting */
959 	if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) {
960 		subs->ops.prepare = prepare_nodata_playback_urb;
961 		return start_urbs(subs, runtime);
962 	}
963 
964 	return 0;
965 }
966 
967