1 /* 2 * 3 * 4 * Copyright (C) 2005 Mike Isely <isely@pobox.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * 19 */ 20 21 #include "pvrusb2-io.h" 22 #include "pvrusb2-debug.h" 23 #include <linux/errno.h> 24 #include <linux/string.h> 25 #include <linux/slab.h> 26 #include <linux/mutex.h> 27 28 static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state); 29 30 #define BUFFER_SIG 0x47653271 31 32 // #define SANITY_CHECK_BUFFERS 33 34 35 #ifdef SANITY_CHECK_BUFFERS 36 #define BUFFER_CHECK(bp) do { \ 37 if ((bp)->signature != BUFFER_SIG) { \ 38 pvr2_trace(PVR2_TRACE_ERROR_LEGS, \ 39 "Buffer %p is bad at %s:%d", \ 40 (bp),__FILE__,__LINE__); \ 41 pvr2_buffer_describe(bp,"BadSig"); \ 42 BUG(); \ 43 } \ 44 } while (0) 45 #else 46 #define BUFFER_CHECK(bp) do {} while(0) 47 #endif 48 49 struct pvr2_stream { 50 /* Buffers queued for reading */ 51 struct list_head queued_list; 52 unsigned int q_count; 53 unsigned int q_bcount; 54 /* Buffers with retrieved data */ 55 struct list_head ready_list; 56 unsigned int r_count; 57 unsigned int r_bcount; 58 /* Buffers available for use */ 59 struct list_head idle_list; 60 unsigned int i_count; 61 unsigned int i_bcount; 62 /* Pointers to all buffers */ 63 struct pvr2_buffer **buffers; 64 /* Array size of buffers */ 65 unsigned int buffer_slot_count; 66 /* Total buffers actually in circulation */ 67 unsigned int buffer_total_count; 68 /* Designed number of buffers to be in circulation */ 69 unsigned int buffer_target_count; 70 /* Executed when ready list become non-empty */ 71 pvr2_stream_callback callback_func; 72 void *callback_data; 73 /* Context for transfer endpoint */ 74 struct usb_device *dev; 75 int endpoint; 76 /* Overhead for mutex enforcement */ 77 spinlock_t list_lock; 78 struct mutex mutex; 79 /* Tracking state for tolerating errors */ 80 unsigned int fail_count; 81 unsigned int fail_tolerance; 82 83 unsigned int buffers_processed; 84 unsigned int buffers_failed; 85 unsigned int bytes_processed; 86 }; 87 88 struct pvr2_buffer { 89 int id; 90 int signature; 91 enum pvr2_buffer_state state; 92 void *ptr; /* Pointer to storage area */ 93 unsigned int max_count; /* Size of storage area */ 94 unsigned int used_count; /* Amount of valid data in storage area */ 95 int status; /* Transfer result status */ 96 struct pvr2_stream *stream; 97 struct list_head list_overhead; 98 struct urb *purb; 99 }; 100 101 static const char *pvr2_buffer_state_decode(enum pvr2_buffer_state st) 102 { 103 switch (st) { 104 case pvr2_buffer_state_none: return "none"; 105 case pvr2_buffer_state_idle: return "idle"; 106 case pvr2_buffer_state_queued: return "queued"; 107 case pvr2_buffer_state_ready: return "ready"; 108 } 109 return "unknown"; 110 } 111 112 #ifdef SANITY_CHECK_BUFFERS 113 static void pvr2_buffer_describe(struct pvr2_buffer *bp,const char *msg) 114 { 115 pvr2_trace(PVR2_TRACE_INFO, 116 "buffer%s%s %p state=%s id=%d status=%d" 117 " stream=%p purb=%p sig=0x%x", 118 (msg ? " " : ""), 119 (msg ? msg : ""), 120 bp, 121 (bp ? pvr2_buffer_state_decode(bp->state) : "(invalid)"), 122 (bp ? bp->id : 0), 123 (bp ? bp->status : 0), 124 (bp ? bp->stream : NULL), 125 (bp ? bp->purb : NULL), 126 (bp ? bp->signature : 0)); 127 } 128 #endif /* SANITY_CHECK_BUFFERS */ 129 130 static void pvr2_buffer_remove(struct pvr2_buffer *bp) 131 { 132 unsigned int *cnt; 133 unsigned int *bcnt; 134 unsigned int ccnt; 135 struct pvr2_stream *sp = bp->stream; 136 switch (bp->state) { 137 case pvr2_buffer_state_idle: 138 cnt = &sp->i_count; 139 bcnt = &sp->i_bcount; 140 ccnt = bp->max_count; 141 break; 142 case pvr2_buffer_state_queued: 143 cnt = &sp->q_count; 144 bcnt = &sp->q_bcount; 145 ccnt = bp->max_count; 146 break; 147 case pvr2_buffer_state_ready: 148 cnt = &sp->r_count; 149 bcnt = &sp->r_bcount; 150 ccnt = bp->used_count; 151 break; 152 default: 153 return; 154 } 155 list_del_init(&bp->list_overhead); 156 (*cnt)--; 157 (*bcnt) -= ccnt; 158 pvr2_trace(PVR2_TRACE_BUF_FLOW, 159 "/*---TRACE_FLOW---*/" 160 " bufferPool %8s dec cap=%07d cnt=%02d", 161 pvr2_buffer_state_decode(bp->state),*bcnt,*cnt); 162 bp->state = pvr2_buffer_state_none; 163 } 164 165 static void pvr2_buffer_set_none(struct pvr2_buffer *bp) 166 { 167 unsigned long irq_flags; 168 struct pvr2_stream *sp; 169 BUFFER_CHECK(bp); 170 sp = bp->stream; 171 pvr2_trace(PVR2_TRACE_BUF_FLOW, 172 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", 173 bp, 174 pvr2_buffer_state_decode(bp->state), 175 pvr2_buffer_state_decode(pvr2_buffer_state_none)); 176 spin_lock_irqsave(&sp->list_lock,irq_flags); 177 pvr2_buffer_remove(bp); 178 spin_unlock_irqrestore(&sp->list_lock,irq_flags); 179 } 180 181 static int pvr2_buffer_set_ready(struct pvr2_buffer *bp) 182 { 183 int fl; 184 unsigned long irq_flags; 185 struct pvr2_stream *sp; 186 BUFFER_CHECK(bp); 187 sp = bp->stream; 188 pvr2_trace(PVR2_TRACE_BUF_FLOW, 189 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", 190 bp, 191 pvr2_buffer_state_decode(bp->state), 192 pvr2_buffer_state_decode(pvr2_buffer_state_ready)); 193 spin_lock_irqsave(&sp->list_lock,irq_flags); 194 fl = (sp->r_count == 0); 195 pvr2_buffer_remove(bp); 196 list_add_tail(&bp->list_overhead,&sp->ready_list); 197 bp->state = pvr2_buffer_state_ready; 198 (sp->r_count)++; 199 sp->r_bcount += bp->used_count; 200 pvr2_trace(PVR2_TRACE_BUF_FLOW, 201 "/*---TRACE_FLOW---*/" 202 " bufferPool %8s inc cap=%07d cnt=%02d", 203 pvr2_buffer_state_decode(bp->state), 204 sp->r_bcount,sp->r_count); 205 spin_unlock_irqrestore(&sp->list_lock,irq_flags); 206 return fl; 207 } 208 209 static void pvr2_buffer_set_idle(struct pvr2_buffer *bp) 210 { 211 unsigned long irq_flags; 212 struct pvr2_stream *sp; 213 BUFFER_CHECK(bp); 214 sp = bp->stream; 215 pvr2_trace(PVR2_TRACE_BUF_FLOW, 216 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", 217 bp, 218 pvr2_buffer_state_decode(bp->state), 219 pvr2_buffer_state_decode(pvr2_buffer_state_idle)); 220 spin_lock_irqsave(&sp->list_lock,irq_flags); 221 pvr2_buffer_remove(bp); 222 list_add_tail(&bp->list_overhead,&sp->idle_list); 223 bp->state = pvr2_buffer_state_idle; 224 (sp->i_count)++; 225 sp->i_bcount += bp->max_count; 226 pvr2_trace(PVR2_TRACE_BUF_FLOW, 227 "/*---TRACE_FLOW---*/" 228 " bufferPool %8s inc cap=%07d cnt=%02d", 229 pvr2_buffer_state_decode(bp->state), 230 sp->i_bcount,sp->i_count); 231 spin_unlock_irqrestore(&sp->list_lock,irq_flags); 232 } 233 234 static void pvr2_buffer_set_queued(struct pvr2_buffer *bp) 235 { 236 unsigned long irq_flags; 237 struct pvr2_stream *sp; 238 BUFFER_CHECK(bp); 239 sp = bp->stream; 240 pvr2_trace(PVR2_TRACE_BUF_FLOW, 241 "/*---TRACE_FLOW---*/ bufferState %p %6s --> %6s", 242 bp, 243 pvr2_buffer_state_decode(bp->state), 244 pvr2_buffer_state_decode(pvr2_buffer_state_queued)); 245 spin_lock_irqsave(&sp->list_lock,irq_flags); 246 pvr2_buffer_remove(bp); 247 list_add_tail(&bp->list_overhead,&sp->queued_list); 248 bp->state = pvr2_buffer_state_queued; 249 (sp->q_count)++; 250 sp->q_bcount += bp->max_count; 251 pvr2_trace(PVR2_TRACE_BUF_FLOW, 252 "/*---TRACE_FLOW---*/" 253 " bufferPool %8s inc cap=%07d cnt=%02d", 254 pvr2_buffer_state_decode(bp->state), 255 sp->q_bcount,sp->q_count); 256 spin_unlock_irqrestore(&sp->list_lock,irq_flags); 257 } 258 259 static void pvr2_buffer_wipe(struct pvr2_buffer *bp) 260 { 261 if (bp->state == pvr2_buffer_state_queued) { 262 usb_kill_urb(bp->purb); 263 } 264 } 265 266 static int pvr2_buffer_init(struct pvr2_buffer *bp, 267 struct pvr2_stream *sp, 268 unsigned int id) 269 { 270 memset(bp,0,sizeof(*bp)); 271 bp->signature = BUFFER_SIG; 272 bp->id = id; 273 pvr2_trace(PVR2_TRACE_BUF_POOL, 274 "/*---TRACE_FLOW---*/ bufferInit %p stream=%p",bp,sp); 275 bp->stream = sp; 276 bp->state = pvr2_buffer_state_none; 277 INIT_LIST_HEAD(&bp->list_overhead); 278 bp->purb = usb_alloc_urb(0,GFP_KERNEL); 279 if (! bp->purb) return -ENOMEM; 280 #ifdef SANITY_CHECK_BUFFERS 281 pvr2_buffer_describe(bp,"create"); 282 #endif 283 return 0; 284 } 285 286 static void pvr2_buffer_done(struct pvr2_buffer *bp) 287 { 288 #ifdef SANITY_CHECK_BUFFERS 289 pvr2_buffer_describe(bp,"delete"); 290 #endif 291 pvr2_buffer_wipe(bp); 292 pvr2_buffer_set_none(bp); 293 bp->signature = 0; 294 bp->stream = NULL; 295 usb_free_urb(bp->purb); 296 pvr2_trace(PVR2_TRACE_BUF_POOL,"/*---TRACE_FLOW---*/" 297 " bufferDone %p",bp); 298 } 299 300 static int pvr2_stream_buffer_count(struct pvr2_stream *sp,unsigned int cnt) 301 { 302 int ret; 303 unsigned int scnt; 304 305 /* Allocate buffers pointer array in multiples of 32 entries */ 306 if (cnt == sp->buffer_total_count) return 0; 307 308 pvr2_trace(PVR2_TRACE_BUF_POOL, 309 "/*---TRACE_FLOW---*/ poolResize " 310 " stream=%p cur=%d adj=%+d", 311 sp, 312 sp->buffer_total_count, 313 cnt-sp->buffer_total_count); 314 315 scnt = cnt & ~0x1f; 316 if (cnt > scnt) scnt += 0x20; 317 318 if (cnt > sp->buffer_total_count) { 319 if (scnt > sp->buffer_slot_count) { 320 struct pvr2_buffer **nb; 321 nb = kmalloc(scnt * sizeof(*nb),GFP_KERNEL); 322 if (!nb) return -ENOMEM; 323 if (sp->buffer_slot_count) { 324 memcpy(nb,sp->buffers, 325 sp->buffer_slot_count * sizeof(*nb)); 326 kfree(sp->buffers); 327 } 328 sp->buffers = nb; 329 sp->buffer_slot_count = scnt; 330 } 331 while (sp->buffer_total_count < cnt) { 332 struct pvr2_buffer *bp; 333 bp = kmalloc(sizeof(*bp),GFP_KERNEL); 334 if (!bp) return -ENOMEM; 335 ret = pvr2_buffer_init(bp,sp,sp->buffer_total_count); 336 if (ret) { 337 kfree(bp); 338 return -ENOMEM; 339 } 340 sp->buffers[sp->buffer_total_count] = bp; 341 (sp->buffer_total_count)++; 342 pvr2_buffer_set_idle(bp); 343 } 344 } else { 345 while (sp->buffer_total_count > cnt) { 346 struct pvr2_buffer *bp; 347 bp = sp->buffers[sp->buffer_total_count - 1]; 348 /* Paranoia */ 349 sp->buffers[sp->buffer_total_count - 1] = NULL; 350 (sp->buffer_total_count)--; 351 pvr2_buffer_done(bp); 352 kfree(bp); 353 } 354 if (scnt < sp->buffer_slot_count) { 355 struct pvr2_buffer **nb = NULL; 356 if (scnt) { 357 nb = kmemdup(sp->buffers, scnt * sizeof(*nb), 358 GFP_KERNEL); 359 if (!nb) return -ENOMEM; 360 } 361 kfree(sp->buffers); 362 sp->buffers = nb; 363 sp->buffer_slot_count = scnt; 364 } 365 } 366 return 0; 367 } 368 369 static int pvr2_stream_achieve_buffer_count(struct pvr2_stream *sp) 370 { 371 struct pvr2_buffer *bp; 372 unsigned int cnt; 373 374 if (sp->buffer_total_count == sp->buffer_target_count) return 0; 375 376 pvr2_trace(PVR2_TRACE_BUF_POOL, 377 "/*---TRACE_FLOW---*/" 378 " poolCheck stream=%p cur=%d tgt=%d", 379 sp,sp->buffer_total_count,sp->buffer_target_count); 380 381 if (sp->buffer_total_count < sp->buffer_target_count) { 382 return pvr2_stream_buffer_count(sp,sp->buffer_target_count); 383 } 384 385 cnt = 0; 386 while ((sp->buffer_total_count - cnt) > sp->buffer_target_count) { 387 bp = sp->buffers[sp->buffer_total_count - (cnt + 1)]; 388 if (bp->state != pvr2_buffer_state_idle) break; 389 cnt++; 390 } 391 if (cnt) { 392 pvr2_stream_buffer_count(sp,sp->buffer_total_count - cnt); 393 } 394 395 return 0; 396 } 397 398 static void pvr2_stream_internal_flush(struct pvr2_stream *sp) 399 { 400 struct list_head *lp; 401 struct pvr2_buffer *bp1; 402 while ((lp = sp->queued_list.next) != &sp->queued_list) { 403 bp1 = list_entry(lp,struct pvr2_buffer,list_overhead); 404 pvr2_buffer_wipe(bp1); 405 /* At this point, we should be guaranteed that no 406 completion callback may happen on this buffer. But it's 407 possible that it might have completed after we noticed 408 it but before we wiped it. So double check its status 409 here first. */ 410 if (bp1->state != pvr2_buffer_state_queued) continue; 411 pvr2_buffer_set_idle(bp1); 412 } 413 if (sp->buffer_total_count != sp->buffer_target_count) { 414 pvr2_stream_achieve_buffer_count(sp); 415 } 416 } 417 418 static void pvr2_stream_init(struct pvr2_stream *sp) 419 { 420 spin_lock_init(&sp->list_lock); 421 mutex_init(&sp->mutex); 422 INIT_LIST_HEAD(&sp->queued_list); 423 INIT_LIST_HEAD(&sp->ready_list); 424 INIT_LIST_HEAD(&sp->idle_list); 425 } 426 427 static void pvr2_stream_done(struct pvr2_stream *sp) 428 { 429 mutex_lock(&sp->mutex); do { 430 pvr2_stream_internal_flush(sp); 431 pvr2_stream_buffer_count(sp,0); 432 } while (0); mutex_unlock(&sp->mutex); 433 } 434 435 static void buffer_complete(struct urb *urb) 436 { 437 struct pvr2_buffer *bp = urb->context; 438 struct pvr2_stream *sp; 439 unsigned long irq_flags; 440 BUFFER_CHECK(bp); 441 sp = bp->stream; 442 bp->used_count = 0; 443 bp->status = 0; 444 pvr2_trace(PVR2_TRACE_BUF_FLOW, 445 "/*---TRACE_FLOW---*/ bufferComplete %p stat=%d cnt=%d", 446 bp,urb->status,urb->actual_length); 447 spin_lock_irqsave(&sp->list_lock,irq_flags); 448 if ((!(urb->status)) || 449 (urb->status == -ENOENT) || 450 (urb->status == -ECONNRESET) || 451 (urb->status == -ESHUTDOWN)) { 452 (sp->buffers_processed)++; 453 sp->bytes_processed += urb->actual_length; 454 bp->used_count = urb->actual_length; 455 if (sp->fail_count) { 456 pvr2_trace(PVR2_TRACE_TOLERANCE, 457 "stream %p transfer ok" 458 " - fail count reset",sp); 459 sp->fail_count = 0; 460 } 461 } else if (sp->fail_count < sp->fail_tolerance) { 462 // We can tolerate this error, because we're below the 463 // threshold... 464 (sp->fail_count)++; 465 (sp->buffers_failed)++; 466 pvr2_trace(PVR2_TRACE_TOLERANCE, 467 "stream %p ignoring error %d" 468 " - fail count increased to %u", 469 sp,urb->status,sp->fail_count); 470 } else { 471 (sp->buffers_failed)++; 472 bp->status = urb->status; 473 } 474 spin_unlock_irqrestore(&sp->list_lock,irq_flags); 475 pvr2_buffer_set_ready(bp); 476 if (sp && sp->callback_func) { 477 sp->callback_func(sp->callback_data); 478 } 479 } 480 481 struct pvr2_stream *pvr2_stream_create(void) 482 { 483 struct pvr2_stream *sp; 484 sp = kzalloc(sizeof(*sp),GFP_KERNEL); 485 if (!sp) return sp; 486 pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_create: sp=%p",sp); 487 pvr2_stream_init(sp); 488 return sp; 489 } 490 491 void pvr2_stream_destroy(struct pvr2_stream *sp) 492 { 493 if (!sp) return; 494 pvr2_trace(PVR2_TRACE_INIT,"pvr2_stream_destroy: sp=%p",sp); 495 pvr2_stream_done(sp); 496 kfree(sp); 497 } 498 499 void pvr2_stream_setup(struct pvr2_stream *sp, 500 struct usb_device *dev, 501 int endpoint, 502 unsigned int tolerance) 503 { 504 mutex_lock(&sp->mutex); do { 505 pvr2_stream_internal_flush(sp); 506 sp->dev = dev; 507 sp->endpoint = endpoint; 508 sp->fail_tolerance = tolerance; 509 } while(0); mutex_unlock(&sp->mutex); 510 } 511 512 void pvr2_stream_set_callback(struct pvr2_stream *sp, 513 pvr2_stream_callback func, 514 void *data) 515 { 516 unsigned long irq_flags; 517 mutex_lock(&sp->mutex); do { 518 spin_lock_irqsave(&sp->list_lock,irq_flags); 519 sp->callback_data = data; 520 sp->callback_func = func; 521 spin_unlock_irqrestore(&sp->list_lock,irq_flags); 522 } while(0); mutex_unlock(&sp->mutex); 523 } 524 525 void pvr2_stream_get_stats(struct pvr2_stream *sp, 526 struct pvr2_stream_stats *stats, 527 int zero_counts) 528 { 529 unsigned long irq_flags; 530 spin_lock_irqsave(&sp->list_lock,irq_flags); 531 if (stats) { 532 stats->buffers_in_queue = sp->q_count; 533 stats->buffers_in_idle = sp->i_count; 534 stats->buffers_in_ready = sp->r_count; 535 stats->buffers_processed = sp->buffers_processed; 536 stats->buffers_failed = sp->buffers_failed; 537 stats->bytes_processed = sp->bytes_processed; 538 } 539 if (zero_counts) { 540 sp->buffers_processed = 0; 541 sp->buffers_failed = 0; 542 sp->bytes_processed = 0; 543 } 544 spin_unlock_irqrestore(&sp->list_lock,irq_flags); 545 } 546 547 /* Query / set the nominal buffer count */ 548 int pvr2_stream_get_buffer_count(struct pvr2_stream *sp) 549 { 550 return sp->buffer_target_count; 551 } 552 553 int pvr2_stream_set_buffer_count(struct pvr2_stream *sp,unsigned int cnt) 554 { 555 int ret; 556 if (sp->buffer_target_count == cnt) return 0; 557 mutex_lock(&sp->mutex); do { 558 sp->buffer_target_count = cnt; 559 ret = pvr2_stream_achieve_buffer_count(sp); 560 } while(0); mutex_unlock(&sp->mutex); 561 return ret; 562 } 563 564 struct pvr2_buffer *pvr2_stream_get_idle_buffer(struct pvr2_stream *sp) 565 { 566 struct list_head *lp = sp->idle_list.next; 567 if (lp == &sp->idle_list) return NULL; 568 return list_entry(lp,struct pvr2_buffer,list_overhead); 569 } 570 571 struct pvr2_buffer *pvr2_stream_get_ready_buffer(struct pvr2_stream *sp) 572 { 573 struct list_head *lp = sp->ready_list.next; 574 if (lp == &sp->ready_list) return NULL; 575 return list_entry(lp,struct pvr2_buffer,list_overhead); 576 } 577 578 struct pvr2_buffer *pvr2_stream_get_buffer(struct pvr2_stream *sp,int id) 579 { 580 if (id < 0) return NULL; 581 if (id >= sp->buffer_total_count) return NULL; 582 return sp->buffers[id]; 583 } 584 585 int pvr2_stream_get_ready_count(struct pvr2_stream *sp) 586 { 587 return sp->r_count; 588 } 589 590 void pvr2_stream_kill(struct pvr2_stream *sp) 591 { 592 struct pvr2_buffer *bp; 593 mutex_lock(&sp->mutex); do { 594 pvr2_stream_internal_flush(sp); 595 while ((bp = pvr2_stream_get_ready_buffer(sp)) != NULL) { 596 pvr2_buffer_set_idle(bp); 597 } 598 if (sp->buffer_total_count != sp->buffer_target_count) { 599 pvr2_stream_achieve_buffer_count(sp); 600 } 601 } while(0); mutex_unlock(&sp->mutex); 602 } 603 604 int pvr2_buffer_queue(struct pvr2_buffer *bp) 605 { 606 #undef SEED_BUFFER 607 #ifdef SEED_BUFFER 608 unsigned int idx; 609 unsigned int val; 610 #endif 611 int ret = 0; 612 struct pvr2_stream *sp; 613 if (!bp) return -EINVAL; 614 sp = bp->stream; 615 mutex_lock(&sp->mutex); do { 616 pvr2_buffer_wipe(bp); 617 if (!sp->dev) { 618 ret = -EIO; 619 break; 620 } 621 pvr2_buffer_set_queued(bp); 622 #ifdef SEED_BUFFER 623 for (idx = 0; idx < (bp->max_count) / 4; idx++) { 624 val = bp->id << 24; 625 val |= idx; 626 ((unsigned int *)(bp->ptr))[idx] = val; 627 } 628 #endif 629 bp->status = -EINPROGRESS; 630 usb_fill_bulk_urb(bp->purb, // struct urb *urb 631 sp->dev, // struct usb_device *dev 632 // endpoint (below) 633 usb_rcvbulkpipe(sp->dev,sp->endpoint), 634 bp->ptr, // void *transfer_buffer 635 bp->max_count, // int buffer_length 636 buffer_complete, 637 bp); 638 usb_submit_urb(bp->purb,GFP_KERNEL); 639 } while(0); mutex_unlock(&sp->mutex); 640 return ret; 641 } 642 643 int pvr2_buffer_set_buffer(struct pvr2_buffer *bp,void *ptr,unsigned int cnt) 644 { 645 int ret = 0; 646 unsigned long irq_flags; 647 struct pvr2_stream *sp; 648 if (!bp) return -EINVAL; 649 sp = bp->stream; 650 mutex_lock(&sp->mutex); do { 651 spin_lock_irqsave(&sp->list_lock,irq_flags); 652 if (bp->state != pvr2_buffer_state_idle) { 653 ret = -EPERM; 654 } else { 655 bp->ptr = ptr; 656 bp->stream->i_bcount -= bp->max_count; 657 bp->max_count = cnt; 658 bp->stream->i_bcount += bp->max_count; 659 pvr2_trace(PVR2_TRACE_BUF_FLOW, 660 "/*---TRACE_FLOW---*/ bufferPool " 661 " %8s cap cap=%07d cnt=%02d", 662 pvr2_buffer_state_decode( 663 pvr2_buffer_state_idle), 664 bp->stream->i_bcount,bp->stream->i_count); 665 } 666 spin_unlock_irqrestore(&sp->list_lock,irq_flags); 667 } while(0); mutex_unlock(&sp->mutex); 668 return ret; 669 } 670 671 unsigned int pvr2_buffer_get_count(struct pvr2_buffer *bp) 672 { 673 return bp->used_count; 674 } 675 676 int pvr2_buffer_get_status(struct pvr2_buffer *bp) 677 { 678 return bp->status; 679 } 680 681 int pvr2_buffer_get_id(struct pvr2_buffer *bp) 682 { 683 return bp->id; 684 } 685 686 687 /* 688 Stuff for Emacs to see, in order to encourage consistent editing style: 689 *** Local Variables: *** 690 *** mode: c *** 691 *** fill-column: 75 *** 692 *** tab-width: 8 *** 693 *** c-basic-offset: 8 *** 694 *** End: *** 695 */ 696