1 /* 2 * The USB Monitor, inspired by Dave Harding's USBMon. 3 * 4 * This is a binary format reader. 5 * 6 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) 7 * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com) 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/types.h> 12 #include <linux/fs.h> 13 #include <linux/cdev.h> 14 #include <linux/usb.h> 15 #include <linux/poll.h> 16 #include <linux/compat.h> 17 #include <linux/mm.h> 18 19 #include <asm/uaccess.h> 20 21 #include "usb_mon.h" 22 23 /* 24 * Defined by USB 2.0 clause 9.3, table 9.2. 25 */ 26 #define SETUP_LEN 8 27 28 /* ioctl macros */ 29 #define MON_IOC_MAGIC 0x92 30 31 #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) 32 /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ 33 #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) 34 #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) 35 #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) 36 #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) 37 #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) 38 #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) 39 #ifdef CONFIG_COMPAT 40 #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) 41 #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) 42 #endif 43 44 /* 45 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). 46 * But it's all right. Just use a simple way to make sure the chunk is never 47 * smaller than a page. 48 * 49 * N.B. An application does not know our chunk size. 50 * 51 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with 52 * page-sized chunks for the time being. 53 */ 54 #define CHUNK_SIZE PAGE_SIZE 55 #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) 56 57 /* 58 * The magic limit was calculated so that it allows the monitoring 59 * application to pick data once in two ticks. This way, another application, 60 * which presumably drives the bus, gets to hog CPU, yet we collect our data. 61 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an 62 * enormous overhead built into the bus protocol, so we need about 1000 KB. 63 * 64 * This is still too much for most cases, where we just snoop a few 65 * descriptor fetches for enumeration. So, the default is a "reasonable" 66 * amount for systems with HZ=250 and incomplete bus saturation. 67 * 68 * XXX What about multi-megabyte URBs which take minutes to transfer? 69 */ 70 #define BUFF_MAX CHUNK_ALIGN(1200*1024) 71 #define BUFF_DFL CHUNK_ALIGN(300*1024) 72 #define BUFF_MIN CHUNK_ALIGN(8*1024) 73 74 /* 75 * The per-event API header (2 per URB). 76 * 77 * This structure is seen in userland as defined by the documentation. 78 */ 79 struct mon_bin_hdr { 80 u64 id; /* URB ID - from submission to callback */ 81 unsigned char type; /* Same as in text API; extensible. */ 82 unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ 83 unsigned char epnum; /* Endpoint number and transfer direction */ 84 unsigned char devnum; /* Device address */ 85 unsigned short busnum; /* Bus number */ 86 char flag_setup; 87 char flag_data; 88 s64 ts_sec; /* gettimeofday */ 89 s32 ts_usec; /* gettimeofday */ 90 int status; 91 unsigned int len_urb; /* Length of data (submitted or actual) */ 92 unsigned int len_cap; /* Delivered length */ 93 unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ 94 }; 95 96 /* per file statistic */ 97 struct mon_bin_stats { 98 u32 queued; 99 u32 dropped; 100 }; 101 102 struct mon_bin_get { 103 struct mon_bin_hdr __user *hdr; /* Only 48 bytes, not 64. */ 104 void __user *data; 105 size_t alloc; /* Length of data (can be zero) */ 106 }; 107 108 struct mon_bin_mfetch { 109 u32 __user *offvec; /* Vector of events fetched */ 110 u32 nfetch; /* Number of events to fetch (out: fetched) */ 111 u32 nflush; /* Number of events to flush */ 112 }; 113 114 #ifdef CONFIG_COMPAT 115 struct mon_bin_get32 { 116 u32 hdr32; 117 u32 data32; 118 u32 alloc32; 119 }; 120 121 struct mon_bin_mfetch32 { 122 u32 offvec32; 123 u32 nfetch32; 124 u32 nflush32; 125 }; 126 #endif 127 128 /* Having these two values same prevents wrapping of the mon_bin_hdr */ 129 #define PKT_ALIGN 64 130 #define PKT_SIZE 64 131 132 /* max number of USB bus supported */ 133 #define MON_BIN_MAX_MINOR 128 134 135 /* 136 * The buffer: map of used pages. 137 */ 138 struct mon_pgmap { 139 struct page *pg; 140 unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ 141 }; 142 143 /* 144 * This gets associated with an open file struct. 145 */ 146 struct mon_reader_bin { 147 /* The buffer: one per open. */ 148 spinlock_t b_lock; /* Protect b_cnt, b_in */ 149 unsigned int b_size; /* Current size of the buffer - bytes */ 150 unsigned int b_cnt; /* Bytes used */ 151 unsigned int b_in, b_out; /* Offsets into buffer - bytes */ 152 unsigned int b_read; /* Amount of read data in curr. pkt. */ 153 struct mon_pgmap *b_vec; /* The map array */ 154 wait_queue_head_t b_wait; /* Wait for data here */ 155 156 struct mutex fetch_lock; /* Protect b_read, b_out */ 157 int mmap_active; 158 159 /* A list of these is needed for "bus 0". Some time later. */ 160 struct mon_reader r; 161 162 /* Stats */ 163 unsigned int cnt_lost; 164 }; 165 166 static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, 167 unsigned int offset) 168 { 169 return (struct mon_bin_hdr *) 170 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); 171 } 172 173 #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) 174 175 static unsigned char xfer_to_pipe[4] = { 176 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT 177 }; 178 179 static struct class *mon_bin_class; 180 static dev_t mon_bin_dev0; 181 static struct cdev mon_bin_cdev; 182 183 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 184 unsigned int offset, unsigned int size); 185 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); 186 static int mon_alloc_buff(struct mon_pgmap *map, int npages); 187 static void mon_free_buff(struct mon_pgmap *map, int npages); 188 189 /* 190 * This is a "chunked memcpy". It does not manipulate any counters. 191 * But it returns the new offset for repeated application. 192 */ 193 unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, 194 unsigned int off, const unsigned char *from, unsigned int length) 195 { 196 unsigned int step_len; 197 unsigned char *buf; 198 unsigned int in_page; 199 200 while (length) { 201 /* 202 * Determine step_len. 203 */ 204 step_len = length; 205 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 206 if (in_page < step_len) 207 step_len = in_page; 208 209 /* 210 * Copy data and advance pointers. 211 */ 212 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 213 memcpy(buf, from, step_len); 214 if ((off += step_len) >= this->b_size) off = 0; 215 from += step_len; 216 length -= step_len; 217 } 218 return off; 219 } 220 221 /* 222 * This is a little worse than the above because it's "chunked copy_to_user". 223 * The return value is an error code, not an offset. 224 */ 225 static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, 226 char __user *to, int length) 227 { 228 unsigned int step_len; 229 unsigned char *buf; 230 unsigned int in_page; 231 232 while (length) { 233 /* 234 * Determine step_len. 235 */ 236 step_len = length; 237 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 238 if (in_page < step_len) 239 step_len = in_page; 240 241 /* 242 * Copy data and advance pointers. 243 */ 244 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 245 if (copy_to_user(to, buf, step_len)) 246 return -EINVAL; 247 if ((off += step_len) >= this->b_size) off = 0; 248 to += step_len; 249 length -= step_len; 250 } 251 return 0; 252 } 253 254 /* 255 * Allocate an (aligned) area in the buffer. 256 * This is called under b_lock. 257 * Returns ~0 on failure. 258 */ 259 static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, 260 unsigned int size) 261 { 262 unsigned int offset; 263 264 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 265 if (rp->b_cnt + size > rp->b_size) 266 return ~0; 267 offset = rp->b_in; 268 rp->b_cnt += size; 269 if ((rp->b_in += size) >= rp->b_size) 270 rp->b_in -= rp->b_size; 271 return offset; 272 } 273 274 /* 275 * This is the same thing as mon_buff_area_alloc, only it does not allow 276 * buffers to wrap. This is needed by applications which pass references 277 * into mmap-ed buffers up their stacks (libpcap can do that). 278 * 279 * Currently, we always have the header stuck with the data, although 280 * it is not strictly speaking necessary. 281 * 282 * When a buffer would wrap, we place a filler packet to mark the space. 283 */ 284 static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, 285 unsigned int size) 286 { 287 unsigned int offset; 288 unsigned int fill_size; 289 290 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 291 if (rp->b_cnt + size > rp->b_size) 292 return ~0; 293 if (rp->b_in + size > rp->b_size) { 294 /* 295 * This would wrap. Find if we still have space after 296 * skipping to the end of the buffer. If we do, place 297 * a filler packet and allocate a new packet. 298 */ 299 fill_size = rp->b_size - rp->b_in; 300 if (rp->b_cnt + size + fill_size > rp->b_size) 301 return ~0; 302 mon_buff_area_fill(rp, rp->b_in, fill_size); 303 304 offset = 0; 305 rp->b_in = size; 306 rp->b_cnt += size + fill_size; 307 } else if (rp->b_in + size == rp->b_size) { 308 offset = rp->b_in; 309 rp->b_in = 0; 310 rp->b_cnt += size; 311 } else { 312 offset = rp->b_in; 313 rp->b_in += size; 314 rp->b_cnt += size; 315 } 316 return offset; 317 } 318 319 /* 320 * Return a few (kilo-)bytes to the head of the buffer. 321 * This is used if a DMA fetch fails. 322 */ 323 static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) 324 { 325 326 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 327 rp->b_cnt -= size; 328 if (rp->b_in < size) 329 rp->b_in += rp->b_size; 330 rp->b_in -= size; 331 } 332 333 /* 334 * This has to be called under both b_lock and fetch_lock, because 335 * it accesses both b_cnt and b_out. 336 */ 337 static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) 338 { 339 340 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 341 rp->b_cnt -= size; 342 if ((rp->b_out += size) >= rp->b_size) 343 rp->b_out -= rp->b_size; 344 } 345 346 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 347 unsigned int offset, unsigned int size) 348 { 349 struct mon_bin_hdr *ep; 350 351 ep = MON_OFF2HDR(rp, offset); 352 memset(ep, 0, PKT_SIZE); 353 ep->type = '@'; 354 ep->len_cap = size - PKT_SIZE; 355 } 356 357 static inline char mon_bin_get_setup(unsigned char *setupb, 358 const struct urb *urb, char ev_type) 359 { 360 361 if (!usb_endpoint_xfer_control(&urb->ep->desc) || ev_type != 'S') 362 return '-'; 363 364 if (urb->setup_packet == NULL) 365 return 'Z'; 366 367 memcpy(setupb, urb->setup_packet, SETUP_LEN); 368 return 0; 369 } 370 371 static char mon_bin_get_data(const struct mon_reader_bin *rp, 372 unsigned int offset, struct urb *urb, unsigned int length) 373 { 374 375 if (urb->dev->bus->uses_dma && 376 (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { 377 mon_dmapeek_vec(rp, offset, urb->transfer_dma, length); 378 return 0; 379 } 380 381 if (urb->transfer_buffer == NULL) 382 return 'Z'; 383 384 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); 385 return 0; 386 } 387 388 static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, 389 char ev_type, int status) 390 { 391 const struct usb_endpoint_descriptor *epd = &urb->ep->desc; 392 unsigned long flags; 393 struct timeval ts; 394 unsigned int urb_length; 395 unsigned int offset; 396 unsigned int length; 397 unsigned char dir; 398 struct mon_bin_hdr *ep; 399 char data_tag = 0; 400 401 do_gettimeofday(&ts); 402 403 spin_lock_irqsave(&rp->b_lock, flags); 404 405 /* 406 * Find the maximum allowable length, then allocate space. 407 */ 408 urb_length = (ev_type == 'S') ? 409 urb->transfer_buffer_length : urb->actual_length; 410 length = urb_length; 411 412 if (length >= rp->b_size/5) 413 length = rp->b_size/5; 414 415 if (usb_urb_dir_in(urb)) { 416 if (ev_type == 'S') { 417 length = 0; 418 data_tag = '<'; 419 } 420 /* Cannot rely on endpoint number in case of control ep.0 */ 421 dir = USB_DIR_IN; 422 } else { 423 if (ev_type == 'C') { 424 length = 0; 425 data_tag = '>'; 426 } 427 dir = 0; 428 } 429 430 if (rp->mmap_active) 431 offset = mon_buff_area_alloc_contiguous(rp, length + PKT_SIZE); 432 else 433 offset = mon_buff_area_alloc(rp, length + PKT_SIZE); 434 if (offset == ~0) { 435 rp->cnt_lost++; 436 spin_unlock_irqrestore(&rp->b_lock, flags); 437 return; 438 } 439 440 ep = MON_OFF2HDR(rp, offset); 441 if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; 442 443 /* 444 * Fill the allocated area. 445 */ 446 memset(ep, 0, PKT_SIZE); 447 ep->type = ev_type; 448 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; 449 ep->epnum = dir | usb_endpoint_num(epd); 450 ep->devnum = urb->dev->devnum; 451 ep->busnum = urb->dev->bus->busnum; 452 ep->id = (unsigned long) urb; 453 ep->ts_sec = ts.tv_sec; 454 ep->ts_usec = ts.tv_usec; 455 ep->status = status; 456 ep->len_urb = urb_length; 457 ep->len_cap = length; 458 459 ep->flag_setup = mon_bin_get_setup(ep->setup, urb, ev_type); 460 if (length != 0) { 461 ep->flag_data = mon_bin_get_data(rp, offset, urb, length); 462 if (ep->flag_data != 0) { /* Yes, it's 0x00, not '0' */ 463 ep->len_cap = 0; 464 mon_buff_area_shrink(rp, length); 465 } 466 } else { 467 ep->flag_data = data_tag; 468 } 469 470 spin_unlock_irqrestore(&rp->b_lock, flags); 471 472 wake_up(&rp->b_wait); 473 } 474 475 static void mon_bin_submit(void *data, struct urb *urb) 476 { 477 struct mon_reader_bin *rp = data; 478 mon_bin_event(rp, urb, 'S', -EINPROGRESS); 479 } 480 481 static void mon_bin_complete(void *data, struct urb *urb, int status) 482 { 483 struct mon_reader_bin *rp = data; 484 mon_bin_event(rp, urb, 'C', status); 485 } 486 487 static void mon_bin_error(void *data, struct urb *urb, int error) 488 { 489 struct mon_reader_bin *rp = data; 490 unsigned long flags; 491 unsigned int offset; 492 struct mon_bin_hdr *ep; 493 494 spin_lock_irqsave(&rp->b_lock, flags); 495 496 offset = mon_buff_area_alloc(rp, PKT_SIZE); 497 if (offset == ~0) { 498 /* Not incrementing cnt_lost. Just because. */ 499 spin_unlock_irqrestore(&rp->b_lock, flags); 500 return; 501 } 502 503 ep = MON_OFF2HDR(rp, offset); 504 505 memset(ep, 0, PKT_SIZE); 506 ep->type = 'E'; 507 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; 508 ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; 509 ep->epnum |= usb_endpoint_num(&urb->ep->desc); 510 ep->devnum = urb->dev->devnum; 511 ep->busnum = urb->dev->bus->busnum; 512 ep->id = (unsigned long) urb; 513 ep->status = error; 514 515 ep->flag_setup = '-'; 516 ep->flag_data = 'E'; 517 518 spin_unlock_irqrestore(&rp->b_lock, flags); 519 520 wake_up(&rp->b_wait); 521 } 522 523 static int mon_bin_open(struct inode *inode, struct file *file) 524 { 525 struct mon_bus *mbus; 526 struct mon_reader_bin *rp; 527 size_t size; 528 int rc; 529 530 mutex_lock(&mon_lock); 531 if ((mbus = mon_bus_lookup(iminor(inode))) == NULL) { 532 mutex_unlock(&mon_lock); 533 return -ENODEV; 534 } 535 if (mbus != &mon_bus0 && mbus->u_bus == NULL) { 536 printk(KERN_ERR TAG ": consistency error on open\n"); 537 mutex_unlock(&mon_lock); 538 return -ENODEV; 539 } 540 541 rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); 542 if (rp == NULL) { 543 rc = -ENOMEM; 544 goto err_alloc; 545 } 546 spin_lock_init(&rp->b_lock); 547 init_waitqueue_head(&rp->b_wait); 548 mutex_init(&rp->fetch_lock); 549 550 rp->b_size = BUFF_DFL; 551 552 size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); 553 if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { 554 rc = -ENOMEM; 555 goto err_allocvec; 556 } 557 558 if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) 559 goto err_allocbuff; 560 561 rp->r.m_bus = mbus; 562 rp->r.r_data = rp; 563 rp->r.rnf_submit = mon_bin_submit; 564 rp->r.rnf_error = mon_bin_error; 565 rp->r.rnf_complete = mon_bin_complete; 566 567 mon_reader_add(mbus, &rp->r); 568 569 file->private_data = rp; 570 mutex_unlock(&mon_lock); 571 return 0; 572 573 err_allocbuff: 574 kfree(rp->b_vec); 575 err_allocvec: 576 kfree(rp); 577 err_alloc: 578 mutex_unlock(&mon_lock); 579 return rc; 580 } 581 582 /* 583 * Extract an event from buffer and copy it to user space. 584 * Wait if there is no event ready. 585 * Returns zero or error. 586 */ 587 static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, 588 struct mon_bin_hdr __user *hdr, void __user *data, unsigned int nbytes) 589 { 590 unsigned long flags; 591 struct mon_bin_hdr *ep; 592 size_t step_len; 593 unsigned int offset; 594 int rc; 595 596 mutex_lock(&rp->fetch_lock); 597 598 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 599 mutex_unlock(&rp->fetch_lock); 600 return rc; 601 } 602 603 ep = MON_OFF2HDR(rp, rp->b_out); 604 605 if (copy_to_user(hdr, ep, sizeof(struct mon_bin_hdr))) { 606 mutex_unlock(&rp->fetch_lock); 607 return -EFAULT; 608 } 609 610 step_len = min(ep->len_cap, nbytes); 611 if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; 612 613 if (copy_from_buf(rp, offset, data, step_len)) { 614 mutex_unlock(&rp->fetch_lock); 615 return -EFAULT; 616 } 617 618 spin_lock_irqsave(&rp->b_lock, flags); 619 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 620 spin_unlock_irqrestore(&rp->b_lock, flags); 621 rp->b_read = 0; 622 623 mutex_unlock(&rp->fetch_lock); 624 return 0; 625 } 626 627 static int mon_bin_release(struct inode *inode, struct file *file) 628 { 629 struct mon_reader_bin *rp = file->private_data; 630 struct mon_bus* mbus = rp->r.m_bus; 631 632 mutex_lock(&mon_lock); 633 634 if (mbus->nreaders <= 0) { 635 printk(KERN_ERR TAG ": consistency error on close\n"); 636 mutex_unlock(&mon_lock); 637 return 0; 638 } 639 mon_reader_del(mbus, &rp->r); 640 641 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); 642 kfree(rp->b_vec); 643 kfree(rp); 644 645 mutex_unlock(&mon_lock); 646 return 0; 647 } 648 649 static ssize_t mon_bin_read(struct file *file, char __user *buf, 650 size_t nbytes, loff_t *ppos) 651 { 652 struct mon_reader_bin *rp = file->private_data; 653 unsigned long flags; 654 struct mon_bin_hdr *ep; 655 unsigned int offset; 656 size_t step_len; 657 char *ptr; 658 ssize_t done = 0; 659 int rc; 660 661 mutex_lock(&rp->fetch_lock); 662 663 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 664 mutex_unlock(&rp->fetch_lock); 665 return rc; 666 } 667 668 ep = MON_OFF2HDR(rp, rp->b_out); 669 670 if (rp->b_read < sizeof(struct mon_bin_hdr)) { 671 step_len = min(nbytes, sizeof(struct mon_bin_hdr) - rp->b_read); 672 ptr = ((char *)ep) + rp->b_read; 673 if (step_len && copy_to_user(buf, ptr, step_len)) { 674 mutex_unlock(&rp->fetch_lock); 675 return -EFAULT; 676 } 677 nbytes -= step_len; 678 buf += step_len; 679 rp->b_read += step_len; 680 done += step_len; 681 } 682 683 if (rp->b_read >= sizeof(struct mon_bin_hdr)) { 684 step_len = min(nbytes, (size_t)ep->len_cap); 685 offset = rp->b_out + PKT_SIZE; 686 offset += rp->b_read - sizeof(struct mon_bin_hdr); 687 if (offset >= rp->b_size) 688 offset -= rp->b_size; 689 if (copy_from_buf(rp, offset, buf, step_len)) { 690 mutex_unlock(&rp->fetch_lock); 691 return -EFAULT; 692 } 693 nbytes -= step_len; 694 buf += step_len; 695 rp->b_read += step_len; 696 done += step_len; 697 } 698 699 /* 700 * Check if whole packet was read, and if so, jump to the next one. 701 */ 702 if (rp->b_read >= sizeof(struct mon_bin_hdr) + ep->len_cap) { 703 spin_lock_irqsave(&rp->b_lock, flags); 704 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 705 spin_unlock_irqrestore(&rp->b_lock, flags); 706 rp->b_read = 0; 707 } 708 709 mutex_unlock(&rp->fetch_lock); 710 return done; 711 } 712 713 /* 714 * Remove at most nevents from chunked buffer. 715 * Returns the number of removed events. 716 */ 717 static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) 718 { 719 unsigned long flags; 720 struct mon_bin_hdr *ep; 721 int i; 722 723 mutex_lock(&rp->fetch_lock); 724 spin_lock_irqsave(&rp->b_lock, flags); 725 for (i = 0; i < nevents; ++i) { 726 if (MON_RING_EMPTY(rp)) 727 break; 728 729 ep = MON_OFF2HDR(rp, rp->b_out); 730 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 731 } 732 spin_unlock_irqrestore(&rp->b_lock, flags); 733 rp->b_read = 0; 734 mutex_unlock(&rp->fetch_lock); 735 return i; 736 } 737 738 /* 739 * Fetch at most max event offsets into the buffer and put them into vec. 740 * The events are usually freed later with mon_bin_flush. 741 * Return the effective number of events fetched. 742 */ 743 static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, 744 u32 __user *vec, unsigned int max) 745 { 746 unsigned int cur_out; 747 unsigned int bytes, avail; 748 unsigned int size; 749 unsigned int nevents; 750 struct mon_bin_hdr *ep; 751 unsigned long flags; 752 int rc; 753 754 mutex_lock(&rp->fetch_lock); 755 756 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 757 mutex_unlock(&rp->fetch_lock); 758 return rc; 759 } 760 761 spin_lock_irqsave(&rp->b_lock, flags); 762 avail = rp->b_cnt; 763 spin_unlock_irqrestore(&rp->b_lock, flags); 764 765 cur_out = rp->b_out; 766 nevents = 0; 767 bytes = 0; 768 while (bytes < avail) { 769 if (nevents >= max) 770 break; 771 772 ep = MON_OFF2HDR(rp, cur_out); 773 if (put_user(cur_out, &vec[nevents])) { 774 mutex_unlock(&rp->fetch_lock); 775 return -EFAULT; 776 } 777 778 nevents++; 779 size = ep->len_cap + PKT_SIZE; 780 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 781 if ((cur_out += size) >= rp->b_size) 782 cur_out -= rp->b_size; 783 bytes += size; 784 } 785 786 mutex_unlock(&rp->fetch_lock); 787 return nevents; 788 } 789 790 /* 791 * Count events. This is almost the same as the above mon_bin_fetch, 792 * only we do not store offsets into user vector, and we have no limit. 793 */ 794 static int mon_bin_queued(struct mon_reader_bin *rp) 795 { 796 unsigned int cur_out; 797 unsigned int bytes, avail; 798 unsigned int size; 799 unsigned int nevents; 800 struct mon_bin_hdr *ep; 801 unsigned long flags; 802 803 mutex_lock(&rp->fetch_lock); 804 805 spin_lock_irqsave(&rp->b_lock, flags); 806 avail = rp->b_cnt; 807 spin_unlock_irqrestore(&rp->b_lock, flags); 808 809 cur_out = rp->b_out; 810 nevents = 0; 811 bytes = 0; 812 while (bytes < avail) { 813 ep = MON_OFF2HDR(rp, cur_out); 814 815 nevents++; 816 size = ep->len_cap + PKT_SIZE; 817 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 818 if ((cur_out += size) >= rp->b_size) 819 cur_out -= rp->b_size; 820 bytes += size; 821 } 822 823 mutex_unlock(&rp->fetch_lock); 824 return nevents; 825 } 826 827 /* 828 */ 829 static int mon_bin_ioctl(struct inode *inode, struct file *file, 830 unsigned int cmd, unsigned long arg) 831 { 832 struct mon_reader_bin *rp = file->private_data; 833 // struct mon_bus* mbus = rp->r.m_bus; 834 int ret = 0; 835 struct mon_bin_hdr *ep; 836 unsigned long flags; 837 838 switch (cmd) { 839 840 case MON_IOCQ_URB_LEN: 841 /* 842 * N.B. This only returns the size of data, without the header. 843 */ 844 spin_lock_irqsave(&rp->b_lock, flags); 845 if (!MON_RING_EMPTY(rp)) { 846 ep = MON_OFF2HDR(rp, rp->b_out); 847 ret = ep->len_cap; 848 } 849 spin_unlock_irqrestore(&rp->b_lock, flags); 850 break; 851 852 case MON_IOCQ_RING_SIZE: 853 ret = rp->b_size; 854 break; 855 856 case MON_IOCT_RING_SIZE: 857 /* 858 * Changing the buffer size will flush it's contents; the new 859 * buffer is allocated before releasing the old one to be sure 860 * the device will stay functional also in case of memory 861 * pressure. 862 */ 863 { 864 int size; 865 struct mon_pgmap *vec; 866 867 if (arg < BUFF_MIN || arg > BUFF_MAX) 868 return -EINVAL; 869 870 size = CHUNK_ALIGN(arg); 871 if ((vec = kzalloc(sizeof(struct mon_pgmap) * (size/CHUNK_SIZE), 872 GFP_KERNEL)) == NULL) { 873 ret = -ENOMEM; 874 break; 875 } 876 877 ret = mon_alloc_buff(vec, size/CHUNK_SIZE); 878 if (ret < 0) { 879 kfree(vec); 880 break; 881 } 882 883 mutex_lock(&rp->fetch_lock); 884 spin_lock_irqsave(&rp->b_lock, flags); 885 mon_free_buff(rp->b_vec, size/CHUNK_SIZE); 886 kfree(rp->b_vec); 887 rp->b_vec = vec; 888 rp->b_size = size; 889 rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; 890 rp->cnt_lost = 0; 891 spin_unlock_irqrestore(&rp->b_lock, flags); 892 mutex_unlock(&rp->fetch_lock); 893 } 894 break; 895 896 case MON_IOCH_MFLUSH: 897 ret = mon_bin_flush(rp, arg); 898 break; 899 900 case MON_IOCX_GET: 901 { 902 struct mon_bin_get getb; 903 904 if (copy_from_user(&getb, (void __user *)arg, 905 sizeof(struct mon_bin_get))) 906 return -EFAULT; 907 908 if (getb.alloc > 0x10000000) /* Want to cast to u32 */ 909 return -EINVAL; 910 ret = mon_bin_get_event(file, rp, 911 getb.hdr, getb.data, (unsigned int)getb.alloc); 912 } 913 break; 914 915 #ifdef CONFIG_COMPAT 916 case MON_IOCX_GET32: { 917 struct mon_bin_get32 getb; 918 919 if (copy_from_user(&getb, (void __user *)arg, 920 sizeof(struct mon_bin_get32))) 921 return -EFAULT; 922 923 ret = mon_bin_get_event(file, rp, 924 compat_ptr(getb.hdr32), compat_ptr(getb.data32), 925 getb.alloc32); 926 } 927 break; 928 #endif 929 930 case MON_IOCX_MFETCH: 931 { 932 struct mon_bin_mfetch mfetch; 933 struct mon_bin_mfetch __user *uptr; 934 935 uptr = (struct mon_bin_mfetch __user *)arg; 936 937 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 938 return -EFAULT; 939 940 if (mfetch.nflush) { 941 ret = mon_bin_flush(rp, mfetch.nflush); 942 if (ret < 0) 943 return ret; 944 if (put_user(ret, &uptr->nflush)) 945 return -EFAULT; 946 } 947 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); 948 if (ret < 0) 949 return ret; 950 if (put_user(ret, &uptr->nfetch)) 951 return -EFAULT; 952 ret = 0; 953 } 954 break; 955 956 #ifdef CONFIG_COMPAT 957 case MON_IOCX_MFETCH32: 958 { 959 struct mon_bin_mfetch32 mfetch; 960 struct mon_bin_mfetch32 __user *uptr; 961 962 uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); 963 964 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 965 return -EFAULT; 966 967 if (mfetch.nflush32) { 968 ret = mon_bin_flush(rp, mfetch.nflush32); 969 if (ret < 0) 970 return ret; 971 if (put_user(ret, &uptr->nflush32)) 972 return -EFAULT; 973 } 974 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), 975 mfetch.nfetch32); 976 if (ret < 0) 977 return ret; 978 if (put_user(ret, &uptr->nfetch32)) 979 return -EFAULT; 980 ret = 0; 981 } 982 break; 983 #endif 984 985 case MON_IOCG_STATS: { 986 struct mon_bin_stats __user *sp; 987 unsigned int nevents; 988 unsigned int ndropped; 989 990 spin_lock_irqsave(&rp->b_lock, flags); 991 ndropped = rp->cnt_lost; 992 rp->cnt_lost = 0; 993 spin_unlock_irqrestore(&rp->b_lock, flags); 994 nevents = mon_bin_queued(rp); 995 996 sp = (struct mon_bin_stats __user *)arg; 997 if (put_user(rp->cnt_lost, &sp->dropped)) 998 return -EFAULT; 999 if (put_user(nevents, &sp->queued)) 1000 return -EFAULT; 1001 1002 } 1003 break; 1004 1005 default: 1006 return -ENOTTY; 1007 } 1008 1009 return ret; 1010 } 1011 1012 static unsigned int 1013 mon_bin_poll(struct file *file, struct poll_table_struct *wait) 1014 { 1015 struct mon_reader_bin *rp = file->private_data; 1016 unsigned int mask = 0; 1017 unsigned long flags; 1018 1019 if (file->f_mode & FMODE_READ) 1020 poll_wait(file, &rp->b_wait, wait); 1021 1022 spin_lock_irqsave(&rp->b_lock, flags); 1023 if (!MON_RING_EMPTY(rp)) 1024 mask |= POLLIN | POLLRDNORM; /* readable */ 1025 spin_unlock_irqrestore(&rp->b_lock, flags); 1026 return mask; 1027 } 1028 1029 #if 0 1030 1031 /* 1032 * open and close: just keep track of how many times the device is 1033 * mapped, to use the proper memory allocation function. 1034 */ 1035 static void mon_bin_vma_open(struct vm_area_struct *vma) 1036 { 1037 struct mon_reader_bin *rp = vma->vm_private_data; 1038 rp->mmap_active++; 1039 } 1040 1041 static void mon_bin_vma_close(struct vm_area_struct *vma) 1042 { 1043 struct mon_reader_bin *rp = vma->vm_private_data; 1044 rp->mmap_active--; 1045 } 1046 1047 /* 1048 * Map ring pages to user space. 1049 */ 1050 static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1051 { 1052 struct mon_reader_bin *rp = vma->vm_private_data; 1053 unsigned long offset, chunk_idx; 1054 struct page *pageptr; 1055 1056 offset = vmf->pgoff << PAGE_SHIFT; 1057 if (offset >= rp->b_size) 1058 return VM_FAULT_SIGBUS; 1059 chunk_idx = offset / CHUNK_SIZE; 1060 pageptr = rp->b_vec[chunk_idx].pg; 1061 get_page(pageptr); 1062 vmf->page = pageptr; 1063 return 0; 1064 } 1065 1066 struct vm_operations_struct mon_bin_vm_ops = { 1067 .open = mon_bin_vma_open, 1068 .close = mon_bin_vma_close, 1069 .fault = mon_bin_vma_fault, 1070 }; 1071 1072 int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) 1073 { 1074 /* don't do anything here: "fault" will set up page table entries */ 1075 vma->vm_ops = &mon_bin_vm_ops; 1076 vma->vm_flags |= VM_RESERVED; 1077 vma->vm_private_data = filp->private_data; 1078 mon_bin_vma_open(vma); 1079 return 0; 1080 } 1081 1082 #endif /* 0 */ 1083 1084 static const struct file_operations mon_fops_binary = { 1085 .owner = THIS_MODULE, 1086 .open = mon_bin_open, 1087 .llseek = no_llseek, 1088 .read = mon_bin_read, 1089 /* .write = mon_text_write, */ 1090 .poll = mon_bin_poll, 1091 .ioctl = mon_bin_ioctl, 1092 .release = mon_bin_release, 1093 }; 1094 1095 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) 1096 { 1097 DECLARE_WAITQUEUE(waita, current); 1098 unsigned long flags; 1099 1100 add_wait_queue(&rp->b_wait, &waita); 1101 set_current_state(TASK_INTERRUPTIBLE); 1102 1103 spin_lock_irqsave(&rp->b_lock, flags); 1104 while (MON_RING_EMPTY(rp)) { 1105 spin_unlock_irqrestore(&rp->b_lock, flags); 1106 1107 if (file->f_flags & O_NONBLOCK) { 1108 set_current_state(TASK_RUNNING); 1109 remove_wait_queue(&rp->b_wait, &waita); 1110 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ 1111 } 1112 schedule(); 1113 if (signal_pending(current)) { 1114 remove_wait_queue(&rp->b_wait, &waita); 1115 return -EINTR; 1116 } 1117 set_current_state(TASK_INTERRUPTIBLE); 1118 1119 spin_lock_irqsave(&rp->b_lock, flags); 1120 } 1121 spin_unlock_irqrestore(&rp->b_lock, flags); 1122 1123 set_current_state(TASK_RUNNING); 1124 remove_wait_queue(&rp->b_wait, &waita); 1125 return 0; 1126 } 1127 1128 static int mon_alloc_buff(struct mon_pgmap *map, int npages) 1129 { 1130 int n; 1131 unsigned long vaddr; 1132 1133 for (n = 0; n < npages; n++) { 1134 vaddr = get_zeroed_page(GFP_KERNEL); 1135 if (vaddr == 0) { 1136 while (n-- != 0) 1137 free_page((unsigned long) map[n].ptr); 1138 return -ENOMEM; 1139 } 1140 map[n].ptr = (unsigned char *) vaddr; 1141 map[n].pg = virt_to_page(vaddr); 1142 } 1143 return 0; 1144 } 1145 1146 static void mon_free_buff(struct mon_pgmap *map, int npages) 1147 { 1148 int n; 1149 1150 for (n = 0; n < npages; n++) 1151 free_page((unsigned long) map[n].ptr); 1152 } 1153 1154 int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus) 1155 { 1156 struct device *dev; 1157 unsigned minor = ubus? ubus->busnum: 0; 1158 1159 if (minor >= MON_BIN_MAX_MINOR) 1160 return 0; 1161 1162 dev = device_create(mon_bin_class, ubus? ubus->controller: NULL, 1163 MKDEV(MAJOR(mon_bin_dev0), minor), "usbmon%d", minor); 1164 if (IS_ERR(dev)) 1165 return 0; 1166 1167 mbus->classdev = dev; 1168 return 1; 1169 } 1170 1171 void mon_bin_del(struct mon_bus *mbus) 1172 { 1173 device_destroy(mon_bin_class, mbus->classdev->devt); 1174 } 1175 1176 int __init mon_bin_init(void) 1177 { 1178 int rc; 1179 1180 mon_bin_class = class_create(THIS_MODULE, "usbmon"); 1181 if (IS_ERR(mon_bin_class)) { 1182 rc = PTR_ERR(mon_bin_class); 1183 goto err_class; 1184 } 1185 1186 rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); 1187 if (rc < 0) 1188 goto err_dev; 1189 1190 cdev_init(&mon_bin_cdev, &mon_fops_binary); 1191 mon_bin_cdev.owner = THIS_MODULE; 1192 1193 rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); 1194 if (rc < 0) 1195 goto err_add; 1196 1197 return 0; 1198 1199 err_add: 1200 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1201 err_dev: 1202 class_destroy(mon_bin_class); 1203 err_class: 1204 return rc; 1205 } 1206 1207 void mon_bin_exit(void) 1208 { 1209 cdev_del(&mon_bin_cdev); 1210 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1211 class_destroy(mon_bin_class); 1212 } 1213