1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *
4  *  Copyright (C) 2005 Mike Isely <isely@pobox.com>
5  */
6 
7 #include "pvrusb2-io.h"
8 #include "pvrusb2-debug.h"
9 #include <linux/errno.h>
10 #include <linux/string.h>
11 #include <linux/slab.h>
12 #include <linux/mutex.h>
13 
14 static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state);
15 
16 #define BUFFER_SIG 0x47653271
17 
18 // #define SANITY_CHECK_BUFFERS
19 
20 
21 #ifdef SANITY_CHECK_BUFFERS
22 #define BUFFER_CHECK(bp) do { \
23 	if ((bp)->signature != BUFFER_SIG) { \
24 		pvr2_trace(PVR2_TRACE_ERROR_LEGS, \
25 		"Buffer %p is bad at %s:%d", \
26 		(bp), __FILE__, __LINE__); \
27 		pvr2_buffer_describe(bp, "BadSig"); \
28 		BUG(); \
29 	} \
30 } while (0)
31 #else
32 #define BUFFER_CHECK(bp) do {} while (0)
33 #endif
34 
35 struct pvr2_stream {
36 	/* Buffers queued for reading */
37 	struct list_head queued_list;
38 	unsigned int q_count;
39 	unsigned int q_bcount;
40 	/* Buffers with retrieved data */
41 	struct list_head ready_list;
42 	unsigned int r_count;
43 	unsigned int r_bcount;
44 	/* Buffers available for use */
45 	struct list_head idle_list;
46 	unsigned int i_count;
47 	unsigned int i_bcount;
48 	/* Pointers to all buffers */
49 	struct pvr2_buffer **buffers;
50 	/* Array size of buffers */
51 	unsigned int buffer_slot_count;
52 	/* Total buffers actually in circulation */
53 	unsigned int buffer_total_count;
54 	/* Designed number of buffers to be in circulation */
55 	unsigned int buffer_target_count;
56 	/* Executed when ready list become non-empty */
57 	pvr2_stream_callback callback_func;
58 	void *callback_data;
59 	/* Context for transfer endpoint */
60 	struct usb_device *dev;
61 	int endpoint;
62 	/* Overhead for mutex enforcement */
63 	spinlock_t list_lock;
64 	struct mutex mutex;
65 	/* Tracking state for tolerating errors */
66 	unsigned int fail_count;
67 	unsigned int fail_tolerance;
68 
69 	unsigned int buffers_processed;
70 	unsigned int buffers_failed;
71 	unsigned int bytes_processed;
72 };
73 
74 struct pvr2_buffer {
75 	int id;
76 	int signature;
77 	enum pvr2_buffer_state state;
78 	void *ptr;               /* Pointer to storage area */
79 	unsigned int max_count;  /* Size of storage area */
80 	unsigned int used_count; /* Amount of valid data in storage area */
81 	int status;              /* Transfer result status */
82 	struct pvr2_stream *stream;
83 	struct list_head list_overhead;
84 	struct urb *purb;
85 };
86 
87 static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
88 {
89 	switch (st) {
90 	case pvr2_buffer_state_none: return "none";
91 	case pvr2_buffer_state_idle: return "idle";
92 	case pvr2_buffer_state_queued: return "queued";
93 	case pvr2_buffer_state_ready: return "ready";
94 	}
95 	return "unknown";
96 }
97 
98 #ifdef SANITY_CHECK_BUFFERS
99 static void pvr2_buffer_describe(struct pvr2_buffer *bp, const char *msg)
100 {
101 	pvr2_trace(PVR2_TRACE_INFO,
102 		   "buffer%s%s %p state=%s id=%d status=%d stream=%p purb=%p sig=0x%x",
103 		   (msg ? " " : ""),
104 		   (msg ? msg : ""),
105 		   bp,
106 		   (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"),
107 		   (bp ? bp->id : 0),
108 		   (bp ? bp->status : 0),
109 		   (bp ? bp->stream : NULL),
110 		   (bp ? bp->purb : NULL),
111 		   (bp ? bp->signature : 0));
112 }
113 #endif  /*  SANITY_CHECK_BUFFERS  */
114 
115 static void pvr2_buffer_remove(struct pvr2_buffer *bp)
116 {
117 	unsigned int *cnt;
118 	unsigned int *bcnt;
119 	unsigned int ccnt;
120 	struct pvr2_stream *sp = bp->stream;
121 	switch (bp->state) {
122 	case pvr2_buffer_state_idle:
123 		cnt = &sp->i_count;
124 		bcnt = &sp->i_bcount;
125 		ccnt = bp->max_count;
126 		break;
127 	case pvr2_buffer_state_queued:
128 		cnt = &sp->q_count;
129 		bcnt = &sp->q_bcount;
130 		ccnt = bp->max_count;
131 		break;
132 	case pvr2_buffer_state_ready:
133 		cnt = &sp->r_count;
134 		bcnt = &sp->r_bcount;
135 		ccnt = bp->used_count;
136 		break;
137 	default:
138 		return;
139 	}
140 	list_del_init(&bp->list_overhead);
141 	(*cnt)--;
142 	(*bcnt) -= ccnt;
143 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
144 		   "/*---TRACE_FLOW---*/ bufferPool	%8s dec cap=%07d cnt=%02d",
145 		   pvr2_buffer_state_decode(bp->state), *bcnt, *cnt);
146 	bp->state = pvr2_buffer_state_none;
147 }
148 
149 static void pvr2_buffer_set_none(struct pvr2_buffer *bp)
150 {
151 	unsigned long irq_flags;
152 	struct pvr2_stream *sp;
153 	BUFFER_CHECK(bp);
154 	sp = bp->stream;
155 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
156 		   "/*---TRACE_FLOW---*/ bufferState    %p %6s --> %6s",
157 		   bp,
158 		   pvr2_buffer_state_decode(bp->state),
159 		   pvr2_buffer_state_decode(pvr2_buffer_state_none));
160 	spin_lock_irqsave(&sp->list_lock, irq_flags);
161 	pvr2_buffer_remove(bp);
162 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
163 }
164 
165 static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
166 {
167 	int fl;
168 	unsigned long irq_flags;
169 	struct pvr2_stream *sp;
170 	BUFFER_CHECK(bp);
171 	sp = bp->stream;
172 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
173 		   "/*---TRACE_FLOW---*/ bufferState    %p %6s --> %6s",
174 		   bp,
175 		   pvr2_buffer_state_decode(bp->state),
176 		   pvr2_buffer_state_decode(pvr2_buffer_state_ready));
177 	spin_lock_irqsave(&sp->list_lock, irq_flags);
178 	fl = (sp->r_count == 0);
179 	pvr2_buffer_remove(bp);
180 	list_add_tail(&bp->list_overhead, &sp->ready_list);
181 	bp->state = pvr2_buffer_state_ready;
182 	(sp->r_count)++;
183 	sp->r_bcount += bp->used_count;
184 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
185 		   "/*---TRACE_FLOW---*/ bufferPool	%8s inc cap=%07d cnt=%02d",
186 		   pvr2_buffer_state_decode(bp->state),
187 		   sp->r_bcount, sp->r_count);
188 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
189 	return fl;
190 }
191 
192 static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
193 {
194 	unsigned long irq_flags;
195 	struct pvr2_stream *sp;
196 	BUFFER_CHECK(bp);
197 	sp = bp->stream;
198 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
199 		   "/*---TRACE_FLOW---*/ bufferState    %p %6s --> %6s",
200 		   bp,
201 		   pvr2_buffer_state_decode(bp->state),
202 		   pvr2_buffer_state_decode(pvr2_buffer_state_idle));
203 	spin_lock_irqsave(&sp->list_lock, irq_flags);
204 	pvr2_buffer_remove(bp);
205 	list_add_tail(&bp->list_overhead, &sp->idle_list);
206 	bp->state = pvr2_buffer_state_idle;
207 	(sp->i_count)++;
208 	sp->i_bcount += bp->max_count;
209 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
210 		   "/*---TRACE_FLOW---*/ bufferPool	%8s inc cap=%07d cnt=%02d",
211 		   pvr2_buffer_state_decode(bp->state),
212 		   sp->i_bcount, sp->i_count);
213 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
214 }
215 
216 static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
217 {
218 	unsigned long irq_flags;
219 	struct pvr2_stream *sp;
220 	BUFFER_CHECK(bp);
221 	sp = bp->stream;
222 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
223 		   "/*---TRACE_FLOW---*/ bufferState    %p %6s --> %6s",
224 		   bp,
225 		   pvr2_buffer_state_decode(bp->state),
226 		   pvr2_buffer_state_decode(pvr2_buffer_state_queued));
227 	spin_lock_irqsave(&sp->list_lock, irq_flags);
228 	pvr2_buffer_remove(bp);
229 	list_add_tail(&bp->list_overhead, &sp->queued_list);
230 	bp->state = pvr2_buffer_state_queued;
231 	(sp->q_count)++;
232 	sp->q_bcount += bp->max_count;
233 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
234 		   "/*---TRACE_FLOW---*/ bufferPool	%8s inc cap=%07d cnt=%02d",
235 		   pvr2_buffer_state_decode(bp->state),
236 		   sp->q_bcount, sp->q_count);
237 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
238 }
239 
240 static void pvr2_buffer_wipe(struct pvr2_buffer *bp)
241 {
242 	if (bp->state == pvr2_buffer_state_queued) {
243 		usb_kill_urb(bp->purb);
244 	}
245 }
246 
247 static int pvr2_buffer_init(struct pvr2_buffer *bp,
248 			    struct pvr2_stream *sp,
249 			    unsigned int id)
250 {
251 	memset(bp, 0, sizeof(*bp));
252 	bp->signature = BUFFER_SIG;
253 	bp->id = id;
254 	pvr2_trace(PVR2_TRACE_BUF_POOL,
255 		   "/*---TRACE_FLOW---*/ bufferInit     %p stream=%p", bp, sp);
256 	bp->stream = sp;
257 	bp->state = pvr2_buffer_state_none;
258 	INIT_LIST_HEAD(&bp->list_overhead);
259 	bp->purb = usb_alloc_urb(0, GFP_KERNEL);
260 	if (! bp->purb) return -ENOMEM;
261 #ifdef SANITY_CHECK_BUFFERS
262 	pvr2_buffer_describe(bp, "create");
263 #endif
264 	return 0;
265 }
266 
267 static void pvr2_buffer_done(struct pvr2_buffer *bp)
268 {
269 #ifdef SANITY_CHECK_BUFFERS
270 	pvr2_buffer_describe(bp, "delete");
271 #endif
272 	pvr2_buffer_wipe(bp);
273 	pvr2_buffer_set_none(bp);
274 	bp->signature = 0;
275 	bp->stream = NULL;
276 	usb_free_urb(bp->purb);
277 	pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/ bufferDone     %p",
278 		   bp);
279 }
280 
281 static int pvr2_stream_buffer_count(struct pvr2_stream *sp, unsigned int cnt)
282 {
283 	int ret;
284 	unsigned int scnt;
285 
286 	/* Allocate buffers pointer array in multiples of 32 entries */
287 	if (cnt == sp->buffer_total_count) return 0;
288 
289 	pvr2_trace(PVR2_TRACE_BUF_POOL,
290 		   "/*---TRACE_FLOW---*/ poolResize	stream=%p cur=%d adj=%+d",
291 		   sp,
292 		   sp->buffer_total_count,
293 		   cnt-sp->buffer_total_count);
294 
295 	scnt = cnt & ~0x1f;
296 	if (cnt > scnt) scnt += 0x20;
297 
298 	if (cnt > sp->buffer_total_count) {
299 		if (scnt > sp->buffer_slot_count) {
300 			struct pvr2_buffer **nb;
301 
302 			nb = kmalloc_array(scnt, sizeof(*nb), GFP_KERNEL);
303 			if (!nb) return -ENOMEM;
304 			if (sp->buffer_slot_count) {
305 				memcpy(nb, sp->buffers,
306 				       sp->buffer_slot_count * sizeof(*nb));
307 				kfree(sp->buffers);
308 			}
309 			sp->buffers = nb;
310 			sp->buffer_slot_count = scnt;
311 		}
312 		while (sp->buffer_total_count < cnt) {
313 			struct pvr2_buffer *bp;
314 			bp = kmalloc(sizeof(*bp), GFP_KERNEL);
315 			if (!bp) return -ENOMEM;
316 			ret = pvr2_buffer_init(bp, sp, sp->buffer_total_count);
317 			if (ret) {
318 				kfree(bp);
319 				return -ENOMEM;
320 			}
321 			sp->buffers[sp->buffer_total_count] = bp;
322 			(sp->buffer_total_count)++;
323 			pvr2_buffer_set_idle(bp);
324 		}
325 	} else {
326 		while (sp->buffer_total_count > cnt) {
327 			struct pvr2_buffer *bp;
328 			bp = sp->buffers[sp->buffer_total_count - 1];
329 			/* Paranoia */
330 			sp->buffers[sp->buffer_total_count - 1] = NULL;
331 			(sp->buffer_total_count)--;
332 			pvr2_buffer_done(bp);
333 			kfree(bp);
334 		}
335 		if (scnt < sp->buffer_slot_count) {
336 			struct pvr2_buffer **nb = NULL;
337 			if (scnt) {
338 				nb = kmemdup(sp->buffers, scnt * sizeof(*nb),
339 					     GFP_KERNEL);
340 				if (!nb) return -ENOMEM;
341 			}
342 			kfree(sp->buffers);
343 			sp->buffers = nb;
344 			sp->buffer_slot_count = scnt;
345 		}
346 	}
347 	return 0;
348 }
349 
350 static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
351 {
352 	struct pvr2_buffer *bp;
353 	unsigned int cnt;
354 
355 	if (sp->buffer_total_count == sp->buffer_target_count) return 0;
356 
357 	pvr2_trace(PVR2_TRACE_BUF_POOL,
358 		   "/*---TRACE_FLOW---*/ poolCheck	stream=%p cur=%d tgt=%d",
359 		   sp, sp->buffer_total_count, sp->buffer_target_count);
360 
361 	if (sp->buffer_total_count < sp->buffer_target_count) {
362 		return pvr2_stream_buffer_count(sp, sp->buffer_target_count);
363 	}
364 
365 	cnt = 0;
366 	while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) {
367 		bp = sp->buffers[sp->buffer_total_count - (cnt + 1)];
368 		if (bp->state != pvr2_buffer_state_idle) break;
369 		cnt++;
370 	}
371 	if (cnt) {
372 		pvr2_stream_buffer_count(sp, sp->buffer_total_count - cnt);
373 	}
374 
375 	return 0;
376 }
377 
378 static void pvr2_stream_internal_flush(struct pvr2_stream *sp)
379 {
380 	struct list_head *lp;
381 	struct pvr2_buffer *bp1;
382 	while ((lp = sp->queued_list.next) != &sp->queued_list) {
383 		bp1 = list_entry(lp, struct pvr2_buffer, list_overhead);
384 		pvr2_buffer_wipe(bp1);
385 		/* At this point, we should be guaranteed that no
386 		   completion callback may happen on this buffer.  But it's
387 		   possible that it might have completed after we noticed
388 		   it but before we wiped it.  So double check its status
389 		   here first. */
390 		if (bp1->state != pvr2_buffer_state_queued) continue;
391 		pvr2_buffer_set_idle(bp1);
392 	}
393 	if (sp->buffer_total_count != sp->buffer_target_count) {
394 		pvr2_stream_achieve_buffer_count(sp);
395 	}
396 }
397 
398 static void pvr2_stream_init(struct pvr2_stream *sp)
399 {
400 	spin_lock_init(&sp->list_lock);
401 	mutex_init(&sp->mutex);
402 	INIT_LIST_HEAD(&sp->queued_list);
403 	INIT_LIST_HEAD(&sp->ready_list);
404 	INIT_LIST_HEAD(&sp->idle_list);
405 }
406 
407 static void pvr2_stream_done(struct pvr2_stream *sp)
408 {
409 	mutex_lock(&sp->mutex); do {
410 		pvr2_stream_internal_flush(sp);
411 		pvr2_stream_buffer_count(sp, 0);
412 	} while (0); mutex_unlock(&sp->mutex);
413 }
414 
415 static void buffer_complete(struct urb *urb)
416 {
417 	struct pvr2_buffer *bp = urb->context;
418 	struct pvr2_stream *sp;
419 	unsigned long irq_flags;
420 	BUFFER_CHECK(bp);
421 	sp = bp->stream;
422 	bp->used_count = 0;
423 	bp->status = 0;
424 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
425 		   "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d",
426 		   bp, urb->status, urb->actual_length);
427 	spin_lock_irqsave(&sp->list_lock, irq_flags);
428 	if ((!(urb->status)) ||
429 	    (urb->status == -ENOENT) ||
430 	    (urb->status == -ECONNRESET) ||
431 	    (urb->status == -ESHUTDOWN)) {
432 		(sp->buffers_processed)++;
433 		sp->bytes_processed += urb->actual_length;
434 		bp->used_count = urb->actual_length;
435 		if (sp->fail_count) {
436 			pvr2_trace(PVR2_TRACE_TOLERANCE,
437 				   "stream %p transfer ok - fail count reset",
438 				   sp);
439 			sp->fail_count = 0;
440 		}
441 	} else if (sp->fail_count < sp->fail_tolerance) {
442 		// We can tolerate this error, because we're below the
443 		// threshold...
444 		(sp->fail_count)++;
445 		(sp->buffers_failed)++;
446 		pvr2_trace(PVR2_TRACE_TOLERANCE,
447 			   "stream %p ignoring error %d - fail count increased to %u",
448 			   sp, urb->status, sp->fail_count);
449 	} else {
450 		(sp->buffers_failed)++;
451 		bp->status = urb->status;
452 	}
453 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
454 	pvr2_buffer_set_ready(bp);
455 	if (sp->callback_func) {
456 		sp->callback_func(sp->callback_data);
457 	}
458 }
459 
460 struct pvr2_stream *pvr2_stream_create(void)
461 {
462 	struct pvr2_stream *sp;
463 	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
464 	if (!sp) return sp;
465 	pvr2_trace(PVR2_TRACE_INIT, "pvr2_stream_create: sp=%p", sp);
466 	pvr2_stream_init(sp);
467 	return sp;
468 }
469 
470 void pvr2_stream_destroy(struct pvr2_stream *sp)
471 {
472 	if (!sp) return;
473 	pvr2_trace(PVR2_TRACE_INIT, "pvr2_stream_destroy: sp=%p", sp);
474 	pvr2_stream_done(sp);
475 	kfree(sp);
476 }
477 
478 void pvr2_stream_setup(struct pvr2_stream *sp,
479 		       struct usb_device *dev,
480 		       int endpoint,
481 		       unsigned int tolerance)
482 {
483 	mutex_lock(&sp->mutex); do {
484 		pvr2_stream_internal_flush(sp);
485 		sp->dev = dev;
486 		sp->endpoint = endpoint;
487 		sp->fail_tolerance = tolerance;
488 	} while (0); mutex_unlock(&sp->mutex);
489 }
490 
491 void pvr2_stream_set_callback(struct pvr2_stream *sp,
492 			      pvr2_stream_callback func,
493 			      void *data)
494 {
495 	unsigned long irq_flags;
496 	mutex_lock(&sp->mutex);
497 	do {
498 		spin_lock_irqsave(&sp->list_lock, irq_flags);
499 		sp->callback_data = data;
500 		sp->callback_func = func;
501 		spin_unlock_irqrestore(&sp->list_lock, irq_flags);
502 	} while (0);
503 	mutex_unlock(&sp->mutex);
504 }
505 
506 void pvr2_stream_get_stats(struct pvr2_stream *sp,
507 			   struct pvr2_stream_stats *stats,
508 			   int zero_counts)
509 {
510 	unsigned long irq_flags;
511 	spin_lock_irqsave(&sp->list_lock, irq_flags);
512 	if (stats) {
513 		stats->buffers_in_queue = sp->q_count;
514 		stats->buffers_in_idle = sp->i_count;
515 		stats->buffers_in_ready = sp->r_count;
516 		stats->buffers_processed = sp->buffers_processed;
517 		stats->buffers_failed = sp->buffers_failed;
518 		stats->bytes_processed = sp->bytes_processed;
519 	}
520 	if (zero_counts) {
521 		sp->buffers_processed = 0;
522 		sp->buffers_failed = 0;
523 		sp->bytes_processed = 0;
524 	}
525 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
526 }
527 
528 /* Query / set the nominal buffer count */
529 int pvr2_stream_get_buffer_count(struct pvr2_stream *sp)
530 {
531 	return sp->buffer_target_count;
532 }
533 
534 int pvr2_stream_set_buffer_count(struct pvr2_stream *sp, unsigned int cnt)
535 {
536 	int ret;
537 	if (sp->buffer_target_count == cnt) return 0;
538 	mutex_lock(&sp->mutex);
539 	do {
540 		sp->buffer_target_count = cnt;
541 		ret = pvr2_stream_achieve_buffer_count(sp);
542 	} while (0);
543 	mutex_unlock(&sp->mutex);
544 	return ret;
545 }
546 
547 struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp)
548 {
549 	struct list_head *lp = sp->idle_list.next;
550 	if (lp == &sp->idle_list) return NULL;
551 	return list_entry(lp, struct pvr2_buffer, list_overhead);
552 }
553 
554 struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp)
555 {
556 	struct list_head *lp = sp->ready_list.next;
557 	if (lp == &sp->ready_list) return NULL;
558 	return list_entry(lp, struct pvr2_buffer, list_overhead);
559 }
560 
561 struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp, int id)
562 {
563 	if (id < 0) return NULL;
564 	if (id >= sp->buffer_total_count) return NULL;
565 	return sp->buffers[id];
566 }
567 
568 int pvr2_stream_get_ready_count(struct pvr2_stream *sp)
569 {
570 	return sp->r_count;
571 }
572 
573 void pvr2_stream_kill(struct pvr2_stream *sp)
574 {
575 	struct pvr2_buffer *bp;
576 	mutex_lock(&sp->mutex);
577 	do {
578 		pvr2_stream_internal_flush(sp);
579 		while ((bp = pvr2_stream_get_ready_buffer(sp)) != NULL) {
580 			pvr2_buffer_set_idle(bp);
581 		}
582 		if (sp->buffer_total_count != sp->buffer_target_count) {
583 			pvr2_stream_achieve_buffer_count(sp);
584 		}
585 	} while (0);
586 	mutex_unlock(&sp->mutex);
587 }
588 
589 int pvr2_buffer_queue(struct pvr2_buffer *bp)
590 {
591 #undef SEED_BUFFER
592 #ifdef SEED_BUFFER
593 	unsigned int idx;
594 	unsigned int val;
595 #endif
596 	int ret = 0;
597 	struct pvr2_stream *sp;
598 	if (!bp) return -EINVAL;
599 	sp = bp->stream;
600 	mutex_lock(&sp->mutex);
601 	do {
602 		pvr2_buffer_wipe(bp);
603 		if (!sp->dev) {
604 			ret = -EIO;
605 			break;
606 		}
607 		pvr2_buffer_set_queued(bp);
608 #ifdef SEED_BUFFER
609 		for (idx = 0; idx < (bp->max_count) / 4; idx++) {
610 			val = bp->id << 24;
611 			val |= idx;
612 			((unsigned int *)(bp->ptr))[idx] = val;
613 		}
614 #endif
615 		bp->status = -EINPROGRESS;
616 		usb_fill_bulk_urb(bp->purb,      // struct urb *urb
617 				  sp->dev,       // struct usb_device *dev
618 				  // endpoint (below)
619 				  usb_rcvbulkpipe(sp->dev, sp->endpoint),
620 				  bp->ptr,       // void *transfer_buffer
621 				  bp->max_count, // int buffer_length
622 				  buffer_complete,
623 				  bp);
624 		usb_submit_urb(bp->purb, GFP_KERNEL);
625 	} while (0);
626 	mutex_unlock(&sp->mutex);
627 	return ret;
628 }
629 
630 int pvr2_buffer_set_buffer(struct pvr2_buffer *bp, void *ptr, unsigned int cnt)
631 {
632 	int ret = 0;
633 	unsigned long irq_flags;
634 	struct pvr2_stream *sp;
635 	if (!bp) return -EINVAL;
636 	sp = bp->stream;
637 	mutex_lock(&sp->mutex);
638 	do {
639 		spin_lock_irqsave(&sp->list_lock, irq_flags);
640 		if (bp->state != pvr2_buffer_state_idle) {
641 			ret = -EPERM;
642 		} else {
643 			bp->ptr = ptr;
644 			bp->stream->i_bcount -= bp->max_count;
645 			bp->max_count = cnt;
646 			bp->stream->i_bcount += bp->max_count;
647 			pvr2_trace(PVR2_TRACE_BUF_FLOW,
648 				   "/*---TRACE_FLOW---*/ bufferPool	%8s cap cap=%07d cnt=%02d",
649 				   pvr2_buffer_state_decode(
650 					   pvr2_buffer_state_idle),
651 				   bp->stream->i_bcount, bp->stream->i_count);
652 		}
653 		spin_unlock_irqrestore(&sp->list_lock, irq_flags);
654 	} while (0);
655 	mutex_unlock(&sp->mutex);
656 	return ret;
657 }
658 
659 unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp)
660 {
661 	return bp->used_count;
662 }
663 
664 int pvr2_buffer_get_status(struct pvr2_buffer *bp)
665 {
666 	return bp->status;
667 }
668 
669 int pvr2_buffer_get_id(struct pvr2_buffer *bp)
670 {
671 	return bp->id;
672 }
673