1 /*
2  *
3  *
4  *  Copyright (C) 2005 Mike Isely <isely@pobox.com>
5  *
6  *  This program is free software; you can redistribute it and/or modify
7  *  it under the terms of the GNU General Public License as published by
8  *  the Free Software Foundation; either version 2 of the License
9  *
10  *  This program is distributed in the hope that it will be useful,
11  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
12  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  *  GNU General Public License for more details.
14  *
15  */
16 
17 #include "pvrusb2-io.h"
18 #include "pvrusb2-debug.h"
19 #include <linux/errno.h>
20 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/mutex.h>
23 
24 static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state);
25 
26 #define BUFFER_SIG 0x47653271
27 
28 // #define SANITY_CHECK_BUFFERS
29 
30 
31 #ifdef SANITY_CHECK_BUFFERS
32 #define BUFFER_CHECK(bp) do { \
33 	if ((bp)->signature != BUFFER_SIG) { \
34 		pvr2_trace(PVR2_TRACE_ERROR_LEGS, \
35 		"Buffer %p is bad at %s:%d", \
36 		(bp), __FILE__, __LINE__); \
37 		pvr2_buffer_describe(bp, "BadSig"); \
38 		BUG(); \
39 	} \
40 } while (0)
41 #else
42 #define BUFFER_CHECK(bp) do {} while (0)
43 #endif
44 
45 struct pvr2_stream {
46 	/* Buffers queued for reading */
47 	struct list_head queued_list;
48 	unsigned int q_count;
49 	unsigned int q_bcount;
50 	/* Buffers with retrieved data */
51 	struct list_head ready_list;
52 	unsigned int r_count;
53 	unsigned int r_bcount;
54 	/* Buffers available for use */
55 	struct list_head idle_list;
56 	unsigned int i_count;
57 	unsigned int i_bcount;
58 	/* Pointers to all buffers */
59 	struct pvr2_buffer **buffers;
60 	/* Array size of buffers */
61 	unsigned int buffer_slot_count;
62 	/* Total buffers actually in circulation */
63 	unsigned int buffer_total_count;
64 	/* Designed number of buffers to be in circulation */
65 	unsigned int buffer_target_count;
66 	/* Executed when ready list become non-empty */
67 	pvr2_stream_callback callback_func;
68 	void *callback_data;
69 	/* Context for transfer endpoint */
70 	struct usb_device *dev;
71 	int endpoint;
72 	/* Overhead for mutex enforcement */
73 	spinlock_t list_lock;
74 	struct mutex mutex;
75 	/* Tracking state for tolerating errors */
76 	unsigned int fail_count;
77 	unsigned int fail_tolerance;
78 
79 	unsigned int buffers_processed;
80 	unsigned int buffers_failed;
81 	unsigned int bytes_processed;
82 };
83 
84 struct pvr2_buffer {
85 	int id;
86 	int signature;
87 	enum pvr2_buffer_state state;
88 	void *ptr;               /* Pointer to storage area */
89 	unsigned int max_count;  /* Size of storage area */
90 	unsigned int used_count; /* Amount of valid data in storage area */
91 	int status;              /* Transfer result status */
92 	struct pvr2_stream *stream;
93 	struct list_head list_overhead;
94 	struct urb *purb;
95 };
96 
97 static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st)
98 {
99 	switch (st) {
100 	case pvr2_buffer_state_none: return "none";
101 	case pvr2_buffer_state_idle: return "idle";
102 	case pvr2_buffer_state_queued: return "queued";
103 	case pvr2_buffer_state_ready: return "ready";
104 	}
105 	return "unknown";
106 }
107 
108 #ifdef SANITY_CHECK_BUFFERS
109 static void pvr2_buffer_describe(struct pvr2_buffer *bp, const char *msg)
110 {
111 	pvr2_trace(PVR2_TRACE_INFO,
112 		   "buffer%s%s %p state=%s id=%d status=%d stream=%p purb=%p sig=0x%x",
113 		   (msg ? " " : ""),
114 		   (msg ? msg : ""),
115 		   bp,
116 		   (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"),
117 		   (bp ? bp->id : 0),
118 		   (bp ? bp->status : 0),
119 		   (bp ? bp->stream : NULL),
120 		   (bp ? bp->purb : NULL),
121 		   (bp ? bp->signature : 0));
122 }
123 #endif  /*  SANITY_CHECK_BUFFERS  */
124 
125 static void pvr2_buffer_remove(struct pvr2_buffer *bp)
126 {
127 	unsigned int *cnt;
128 	unsigned int *bcnt;
129 	unsigned int ccnt;
130 	struct pvr2_stream *sp = bp->stream;
131 	switch (bp->state) {
132 	case pvr2_buffer_state_idle:
133 		cnt = &sp->i_count;
134 		bcnt = &sp->i_bcount;
135 		ccnt = bp->max_count;
136 		break;
137 	case pvr2_buffer_state_queued:
138 		cnt = &sp->q_count;
139 		bcnt = &sp->q_bcount;
140 		ccnt = bp->max_count;
141 		break;
142 	case pvr2_buffer_state_ready:
143 		cnt = &sp->r_count;
144 		bcnt = &sp->r_bcount;
145 		ccnt = bp->used_count;
146 		break;
147 	default:
148 		return;
149 	}
150 	list_del_init(&bp->list_overhead);
151 	(*cnt)--;
152 	(*bcnt) -= ccnt;
153 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
154 		   "/*---TRACE_FLOW---*/ bufferPool	%8s dec cap=%07d cnt=%02d",
155 		   pvr2_buffer_state_decode(bp->state), *bcnt, *cnt);
156 	bp->state = pvr2_buffer_state_none;
157 }
158 
159 static void pvr2_buffer_set_none(struct pvr2_buffer *bp)
160 {
161 	unsigned long irq_flags;
162 	struct pvr2_stream *sp;
163 	BUFFER_CHECK(bp);
164 	sp = bp->stream;
165 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
166 		   "/*---TRACE_FLOW---*/ bufferState    %p %6s --> %6s",
167 		   bp,
168 		   pvr2_buffer_state_decode(bp->state),
169 		   pvr2_buffer_state_decode(pvr2_buffer_state_none));
170 	spin_lock_irqsave(&sp->list_lock, irq_flags);
171 	pvr2_buffer_remove(bp);
172 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
173 }
174 
175 static int pvr2_buffer_set_ready(struct pvr2_buffer *bp)
176 {
177 	int fl;
178 	unsigned long irq_flags;
179 	struct pvr2_stream *sp;
180 	BUFFER_CHECK(bp);
181 	sp = bp->stream;
182 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
183 		   "/*---TRACE_FLOW---*/ bufferState    %p %6s --> %6s",
184 		   bp,
185 		   pvr2_buffer_state_decode(bp->state),
186 		   pvr2_buffer_state_decode(pvr2_buffer_state_ready));
187 	spin_lock_irqsave(&sp->list_lock, irq_flags);
188 	fl = (sp->r_count == 0);
189 	pvr2_buffer_remove(bp);
190 	list_add_tail(&bp->list_overhead, &sp->ready_list);
191 	bp->state = pvr2_buffer_state_ready;
192 	(sp->r_count)++;
193 	sp->r_bcount += bp->used_count;
194 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
195 		   "/*---TRACE_FLOW---*/ bufferPool	%8s inc cap=%07d cnt=%02d",
196 		   pvr2_buffer_state_decode(bp->state),
197 		   sp->r_bcount, sp->r_count);
198 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
199 	return fl;
200 }
201 
202 static void pvr2_buffer_set_idle(struct pvr2_buffer *bp)
203 {
204 	unsigned long irq_flags;
205 	struct pvr2_stream *sp;
206 	BUFFER_CHECK(bp);
207 	sp = bp->stream;
208 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
209 		   "/*---TRACE_FLOW---*/ bufferState    %p %6s --> %6s",
210 		   bp,
211 		   pvr2_buffer_state_decode(bp->state),
212 		   pvr2_buffer_state_decode(pvr2_buffer_state_idle));
213 	spin_lock_irqsave(&sp->list_lock, irq_flags);
214 	pvr2_buffer_remove(bp);
215 	list_add_tail(&bp->list_overhead, &sp->idle_list);
216 	bp->state = pvr2_buffer_state_idle;
217 	(sp->i_count)++;
218 	sp->i_bcount += bp->max_count;
219 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
220 		   "/*---TRACE_FLOW---*/ bufferPool	%8s inc cap=%07d cnt=%02d",
221 		   pvr2_buffer_state_decode(bp->state),
222 		   sp->i_bcount, sp->i_count);
223 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
224 }
225 
226 static void pvr2_buffer_set_queued(struct pvr2_buffer *bp)
227 {
228 	unsigned long irq_flags;
229 	struct pvr2_stream *sp;
230 	BUFFER_CHECK(bp);
231 	sp = bp->stream;
232 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
233 		   "/*---TRACE_FLOW---*/ bufferState    %p %6s --> %6s",
234 		   bp,
235 		   pvr2_buffer_state_decode(bp->state),
236 		   pvr2_buffer_state_decode(pvr2_buffer_state_queued));
237 	spin_lock_irqsave(&sp->list_lock, irq_flags);
238 	pvr2_buffer_remove(bp);
239 	list_add_tail(&bp->list_overhead, &sp->queued_list);
240 	bp->state = pvr2_buffer_state_queued;
241 	(sp->q_count)++;
242 	sp->q_bcount += bp->max_count;
243 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
244 		   "/*---TRACE_FLOW---*/ bufferPool	%8s inc cap=%07d cnt=%02d",
245 		   pvr2_buffer_state_decode(bp->state),
246 		   sp->q_bcount, sp->q_count);
247 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
248 }
249 
250 static void pvr2_buffer_wipe(struct pvr2_buffer *bp)
251 {
252 	if (bp->state == pvr2_buffer_state_queued) {
253 		usb_kill_urb(bp->purb);
254 	}
255 }
256 
257 static int pvr2_buffer_init(struct pvr2_buffer *bp,
258 			    struct pvr2_stream *sp,
259 			    unsigned int id)
260 {
261 	memset(bp, 0, sizeof(*bp));
262 	bp->signature = BUFFER_SIG;
263 	bp->id = id;
264 	pvr2_trace(PVR2_TRACE_BUF_POOL,
265 		   "/*---TRACE_FLOW---*/ bufferInit     %p stream=%p", bp, sp);
266 	bp->stream = sp;
267 	bp->state = pvr2_buffer_state_none;
268 	INIT_LIST_HEAD(&bp->list_overhead);
269 	bp->purb = usb_alloc_urb(0, GFP_KERNEL);
270 	if (! bp->purb) return -ENOMEM;
271 #ifdef SANITY_CHECK_BUFFERS
272 	pvr2_buffer_describe(bp, "create");
273 #endif
274 	return 0;
275 }
276 
277 static void pvr2_buffer_done(struct pvr2_buffer *bp)
278 {
279 #ifdef SANITY_CHECK_BUFFERS
280 	pvr2_buffer_describe(bp, "delete");
281 #endif
282 	pvr2_buffer_wipe(bp);
283 	pvr2_buffer_set_none(bp);
284 	bp->signature = 0;
285 	bp->stream = NULL;
286 	usb_free_urb(bp->purb);
287 	pvr2_trace(PVR2_TRACE_BUF_POOL, "/*---TRACE_FLOW---*/ bufferDone     %p",
288 		   bp);
289 }
290 
291 static int pvr2_stream_buffer_count(struct pvr2_stream *sp, unsigned int cnt)
292 {
293 	int ret;
294 	unsigned int scnt;
295 
296 	/* Allocate buffers pointer array in multiples of 32 entries */
297 	if (cnt == sp->buffer_total_count) return 0;
298 
299 	pvr2_trace(PVR2_TRACE_BUF_POOL,
300 		   "/*---TRACE_FLOW---*/ poolResize	stream=%p cur=%d adj=%+d",
301 		   sp,
302 		   sp->buffer_total_count,
303 		   cnt-sp->buffer_total_count);
304 
305 	scnt = cnt & ~0x1f;
306 	if (cnt > scnt) scnt += 0x20;
307 
308 	if (cnt > sp->buffer_total_count) {
309 		if (scnt > sp->buffer_slot_count) {
310 			struct pvr2_buffer **nb;
311 
312 			nb = kmalloc_array(scnt, sizeof(*nb), GFP_KERNEL);
313 			if (!nb) return -ENOMEM;
314 			if (sp->buffer_slot_count) {
315 				memcpy(nb, sp->buffers,
316 				       sp->buffer_slot_count * sizeof(*nb));
317 				kfree(sp->buffers);
318 			}
319 			sp->buffers = nb;
320 			sp->buffer_slot_count = scnt;
321 		}
322 		while (sp->buffer_total_count < cnt) {
323 			struct pvr2_buffer *bp;
324 			bp = kmalloc(sizeof(*bp), GFP_KERNEL);
325 			if (!bp) return -ENOMEM;
326 			ret = pvr2_buffer_init(bp, sp, sp->buffer_total_count);
327 			if (ret) {
328 				kfree(bp);
329 				return -ENOMEM;
330 			}
331 			sp->buffers[sp->buffer_total_count] = bp;
332 			(sp->buffer_total_count)++;
333 			pvr2_buffer_set_idle(bp);
334 		}
335 	} else {
336 		while (sp->buffer_total_count > cnt) {
337 			struct pvr2_buffer *bp;
338 			bp = sp->buffers[sp->buffer_total_count - 1];
339 			/* Paranoia */
340 			sp->buffers[sp->buffer_total_count - 1] = NULL;
341 			(sp->buffer_total_count)--;
342 			pvr2_buffer_done(bp);
343 			kfree(bp);
344 		}
345 		if (scnt < sp->buffer_slot_count) {
346 			struct pvr2_buffer **nb = NULL;
347 			if (scnt) {
348 				nb = kmemdup(sp->buffers, scnt * sizeof(*nb),
349 					     GFP_KERNEL);
350 				if (!nb) return -ENOMEM;
351 			}
352 			kfree(sp->buffers);
353 			sp->buffers = nb;
354 			sp->buffer_slot_count = scnt;
355 		}
356 	}
357 	return 0;
358 }
359 
360 static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp)
361 {
362 	struct pvr2_buffer *bp;
363 	unsigned int cnt;
364 
365 	if (sp->buffer_total_count == sp->buffer_target_count) return 0;
366 
367 	pvr2_trace(PVR2_TRACE_BUF_POOL,
368 		   "/*---TRACE_FLOW---*/ poolCheck	stream=%p cur=%d tgt=%d",
369 		   sp, sp->buffer_total_count, sp->buffer_target_count);
370 
371 	if (sp->buffer_total_count < sp->buffer_target_count) {
372 		return pvr2_stream_buffer_count(sp, sp->buffer_target_count);
373 	}
374 
375 	cnt = 0;
376 	while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) {
377 		bp = sp->buffers[sp->buffer_total_count - (cnt + 1)];
378 		if (bp->state != pvr2_buffer_state_idle) break;
379 		cnt++;
380 	}
381 	if (cnt) {
382 		pvr2_stream_buffer_count(sp, sp->buffer_total_count - cnt);
383 	}
384 
385 	return 0;
386 }
387 
388 static void pvr2_stream_internal_flush(struct pvr2_stream *sp)
389 {
390 	struct list_head *lp;
391 	struct pvr2_buffer *bp1;
392 	while ((lp = sp->queued_list.next) != &sp->queued_list) {
393 		bp1 = list_entry(lp, struct pvr2_buffer, list_overhead);
394 		pvr2_buffer_wipe(bp1);
395 		/* At this point, we should be guaranteed that no
396 		   completion callback may happen on this buffer.  But it's
397 		   possible that it might have completed after we noticed
398 		   it but before we wiped it.  So double check its status
399 		   here first. */
400 		if (bp1->state != pvr2_buffer_state_queued) continue;
401 		pvr2_buffer_set_idle(bp1);
402 	}
403 	if (sp->buffer_total_count != sp->buffer_target_count) {
404 		pvr2_stream_achieve_buffer_count(sp);
405 	}
406 }
407 
408 static void pvr2_stream_init(struct pvr2_stream *sp)
409 {
410 	spin_lock_init(&sp->list_lock);
411 	mutex_init(&sp->mutex);
412 	INIT_LIST_HEAD(&sp->queued_list);
413 	INIT_LIST_HEAD(&sp->ready_list);
414 	INIT_LIST_HEAD(&sp->idle_list);
415 }
416 
417 static void pvr2_stream_done(struct pvr2_stream *sp)
418 {
419 	mutex_lock(&sp->mutex); do {
420 		pvr2_stream_internal_flush(sp);
421 		pvr2_stream_buffer_count(sp, 0);
422 	} while (0); mutex_unlock(&sp->mutex);
423 }
424 
425 static void buffer_complete(struct urb *urb)
426 {
427 	struct pvr2_buffer *bp = urb->context;
428 	struct pvr2_stream *sp;
429 	unsigned long irq_flags;
430 	BUFFER_CHECK(bp);
431 	sp = bp->stream;
432 	bp->used_count = 0;
433 	bp->status = 0;
434 	pvr2_trace(PVR2_TRACE_BUF_FLOW,
435 		   "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d",
436 		   bp, urb->status, urb->actual_length);
437 	spin_lock_irqsave(&sp->list_lock, irq_flags);
438 	if ((!(urb->status)) ||
439 	    (urb->status == -ENOENT) ||
440 	    (urb->status == -ECONNRESET) ||
441 	    (urb->status == -ESHUTDOWN)) {
442 		(sp->buffers_processed)++;
443 		sp->bytes_processed += urb->actual_length;
444 		bp->used_count = urb->actual_length;
445 		if (sp->fail_count) {
446 			pvr2_trace(PVR2_TRACE_TOLERANCE,
447 				   "stream %p transfer ok - fail count reset",
448 				   sp);
449 			sp->fail_count = 0;
450 		}
451 	} else if (sp->fail_count < sp->fail_tolerance) {
452 		// We can tolerate this error, because we're below the
453 		// threshold...
454 		(sp->fail_count)++;
455 		(sp->buffers_failed)++;
456 		pvr2_trace(PVR2_TRACE_TOLERANCE,
457 			   "stream %p ignoring error %d - fail count increased to %u",
458 			   sp, urb->status, sp->fail_count);
459 	} else {
460 		(sp->buffers_failed)++;
461 		bp->status = urb->status;
462 	}
463 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
464 	pvr2_buffer_set_ready(bp);
465 	if (sp->callback_func) {
466 		sp->callback_func(sp->callback_data);
467 	}
468 }
469 
470 struct pvr2_stream *pvr2_stream_create(void)
471 {
472 	struct pvr2_stream *sp;
473 	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
474 	if (!sp) return sp;
475 	pvr2_trace(PVR2_TRACE_INIT, "pvr2_stream_create: sp=%p", sp);
476 	pvr2_stream_init(sp);
477 	return sp;
478 }
479 
480 void pvr2_stream_destroy(struct pvr2_stream *sp)
481 {
482 	if (!sp) return;
483 	pvr2_trace(PVR2_TRACE_INIT, "pvr2_stream_destroy: sp=%p", sp);
484 	pvr2_stream_done(sp);
485 	kfree(sp);
486 }
487 
488 void pvr2_stream_setup(struct pvr2_stream *sp,
489 		       struct usb_device *dev,
490 		       int endpoint,
491 		       unsigned int tolerance)
492 {
493 	mutex_lock(&sp->mutex); do {
494 		pvr2_stream_internal_flush(sp);
495 		sp->dev = dev;
496 		sp->endpoint = endpoint;
497 		sp->fail_tolerance = tolerance;
498 	} while (0); mutex_unlock(&sp->mutex);
499 }
500 
501 void pvr2_stream_set_callback(struct pvr2_stream *sp,
502 			      pvr2_stream_callback func,
503 			      void *data)
504 {
505 	unsigned long irq_flags;
506 	mutex_lock(&sp->mutex);
507 	do {
508 		spin_lock_irqsave(&sp->list_lock, irq_flags);
509 		sp->callback_data = data;
510 		sp->callback_func = func;
511 		spin_unlock_irqrestore(&sp->list_lock, irq_flags);
512 	} while (0);
513 	mutex_unlock(&sp->mutex);
514 }
515 
516 void pvr2_stream_get_stats(struct pvr2_stream *sp,
517 			   struct pvr2_stream_stats *stats,
518 			   int zero_counts)
519 {
520 	unsigned long irq_flags;
521 	spin_lock_irqsave(&sp->list_lock, irq_flags);
522 	if (stats) {
523 		stats->buffers_in_queue = sp->q_count;
524 		stats->buffers_in_idle = sp->i_count;
525 		stats->buffers_in_ready = sp->r_count;
526 		stats->buffers_processed = sp->buffers_processed;
527 		stats->buffers_failed = sp->buffers_failed;
528 		stats->bytes_processed = sp->bytes_processed;
529 	}
530 	if (zero_counts) {
531 		sp->buffers_processed = 0;
532 		sp->buffers_failed = 0;
533 		sp->bytes_processed = 0;
534 	}
535 	spin_unlock_irqrestore(&sp->list_lock, irq_flags);
536 }
537 
538 /* Query / set the nominal buffer count */
539 int pvr2_stream_get_buffer_count(struct pvr2_stream *sp)
540 {
541 	return sp->buffer_target_count;
542 }
543 
544 int pvr2_stream_set_buffer_count(struct pvr2_stream *sp, unsigned int cnt)
545 {
546 	int ret;
547 	if (sp->buffer_target_count == cnt) return 0;
548 	mutex_lock(&sp->mutex);
549 	do {
550 		sp->buffer_target_count = cnt;
551 		ret = pvr2_stream_achieve_buffer_count(sp);
552 	} while (0);
553 	mutex_unlock(&sp->mutex);
554 	return ret;
555 }
556 
557 struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp)
558 {
559 	struct list_head *lp = sp->idle_list.next;
560 	if (lp == &sp->idle_list) return NULL;
561 	return list_entry(lp, struct pvr2_buffer, list_overhead);
562 }
563 
564 struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp)
565 {
566 	struct list_head *lp = sp->ready_list.next;
567 	if (lp == &sp->ready_list) return NULL;
568 	return list_entry(lp, struct pvr2_buffer, list_overhead);
569 }
570 
571 struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp, int id)
572 {
573 	if (id < 0) return NULL;
574 	if (id >= sp->buffer_total_count) return NULL;
575 	return sp->buffers[id];
576 }
577 
578 int pvr2_stream_get_ready_count(struct pvr2_stream *sp)
579 {
580 	return sp->r_count;
581 }
582 
583 void pvr2_stream_kill(struct pvr2_stream *sp)
584 {
585 	struct pvr2_buffer *bp;
586 	mutex_lock(&sp->mutex);
587 	do {
588 		pvr2_stream_internal_flush(sp);
589 		while ((bp = pvr2_stream_get_ready_buffer(sp)) != NULL) {
590 			pvr2_buffer_set_idle(bp);
591 		}
592 		if (sp->buffer_total_count != sp->buffer_target_count) {
593 			pvr2_stream_achieve_buffer_count(sp);
594 		}
595 	} while (0);
596 	mutex_unlock(&sp->mutex);
597 }
598 
599 int pvr2_buffer_queue(struct pvr2_buffer *bp)
600 {
601 #undef SEED_BUFFER
602 #ifdef SEED_BUFFER
603 	unsigned int idx;
604 	unsigned int val;
605 #endif
606 	int ret = 0;
607 	struct pvr2_stream *sp;
608 	if (!bp) return -EINVAL;
609 	sp = bp->stream;
610 	mutex_lock(&sp->mutex);
611 	do {
612 		pvr2_buffer_wipe(bp);
613 		if (!sp->dev) {
614 			ret = -EIO;
615 			break;
616 		}
617 		pvr2_buffer_set_queued(bp);
618 #ifdef SEED_BUFFER
619 		for (idx = 0; idx < (bp->max_count) / 4; idx++) {
620 			val = bp->id << 24;
621 			val |= idx;
622 			((unsigned int *)(bp->ptr))[idx] = val;
623 		}
624 #endif
625 		bp->status = -EINPROGRESS;
626 		usb_fill_bulk_urb(bp->purb,      // struct urb *urb
627 				  sp->dev,       // struct usb_device *dev
628 				  // endpoint (below)
629 				  usb_rcvbulkpipe(sp->dev, sp->endpoint),
630 				  bp->ptr,       // void *transfer_buffer
631 				  bp->max_count, // int buffer_length
632 				  buffer_complete,
633 				  bp);
634 		usb_submit_urb(bp->purb, GFP_KERNEL);
635 	} while (0);
636 	mutex_unlock(&sp->mutex);
637 	return ret;
638 }
639 
640 int pvr2_buffer_set_buffer(struct pvr2_buffer *bp, void *ptr, unsigned int cnt)
641 {
642 	int ret = 0;
643 	unsigned long irq_flags;
644 	struct pvr2_stream *sp;
645 	if (!bp) return -EINVAL;
646 	sp = bp->stream;
647 	mutex_lock(&sp->mutex);
648 	do {
649 		spin_lock_irqsave(&sp->list_lock, irq_flags);
650 		if (bp->state != pvr2_buffer_state_idle) {
651 			ret = -EPERM;
652 		} else {
653 			bp->ptr = ptr;
654 			bp->stream->i_bcount -= bp->max_count;
655 			bp->max_count = cnt;
656 			bp->stream->i_bcount += bp->max_count;
657 			pvr2_trace(PVR2_TRACE_BUF_FLOW,
658 				   "/*---TRACE_FLOW---*/ bufferPool	%8s cap cap=%07d cnt=%02d",
659 				   pvr2_buffer_state_decode(
660 					   pvr2_buffer_state_idle),
661 				   bp->stream->i_bcount, bp->stream->i_count);
662 		}
663 		spin_unlock_irqrestore(&sp->list_lock, irq_flags);
664 	} while (0);
665 	mutex_unlock(&sp->mutex);
666 	return ret;
667 }
668 
669 unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp)
670 {
671 	return bp->used_count;
672 }
673 
674 int pvr2_buffer_get_status(struct pvr2_buffer *bp)
675 {
676 	return bp->status;
677 }
678 
679 int pvr2_buffer_get_id(struct pvr2_buffer *bp)
680 {
681 	return bp->id;
682 }
683