1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * The USB Monitor, inspired by Dave Harding's USBMon. 4 * 5 * This is a binary format reader. 6 * 7 * Copyright (C) 2006 Paolo Abeni (paolo.abeni@email.it) 8 * Copyright (C) 2006,2007 Pete Zaitcev (zaitcev@redhat.com) 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/sched/signal.h> 13 #include <linux/types.h> 14 #include <linux/fs.h> 15 #include <linux/cdev.h> 16 #include <linux/export.h> 17 #include <linux/usb.h> 18 #include <linux/poll.h> 19 #include <linux/compat.h> 20 #include <linux/mm.h> 21 #include <linux/scatterlist.h> 22 #include <linux/slab.h> 23 #include <linux/time64.h> 24 25 #include <linux/uaccess.h> 26 27 #include "usb_mon.h" 28 29 /* 30 * Defined by USB 2.0 clause 9.3, table 9.2. 31 */ 32 #define SETUP_LEN 8 33 34 /* ioctl macros */ 35 #define MON_IOC_MAGIC 0x92 36 37 #define MON_IOCQ_URB_LEN _IO(MON_IOC_MAGIC, 1) 38 /* #2 used to be MON_IOCX_URB, removed before it got into Linus tree */ 39 #define MON_IOCG_STATS _IOR(MON_IOC_MAGIC, 3, struct mon_bin_stats) 40 #define MON_IOCT_RING_SIZE _IO(MON_IOC_MAGIC, 4) 41 #define MON_IOCQ_RING_SIZE _IO(MON_IOC_MAGIC, 5) 42 #define MON_IOCX_GET _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get) 43 #define MON_IOCX_MFETCH _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch) 44 #define MON_IOCH_MFLUSH _IO(MON_IOC_MAGIC, 8) 45 /* #9 was MON_IOCT_SETAPI */ 46 #define MON_IOCX_GETX _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get) 47 48 #ifdef CONFIG_COMPAT 49 #define MON_IOCX_GET32 _IOW(MON_IOC_MAGIC, 6, struct mon_bin_get32) 50 #define MON_IOCX_MFETCH32 _IOWR(MON_IOC_MAGIC, 7, struct mon_bin_mfetch32) 51 #define MON_IOCX_GETX32 _IOW(MON_IOC_MAGIC, 10, struct mon_bin_get32) 52 #endif 53 54 /* 55 * Some architectures have enormous basic pages (16KB for ia64, 64KB for ppc). 56 * But it's all right. Just use a simple way to make sure the chunk is never 57 * smaller than a page. 58 * 59 * N.B. An application does not know our chunk size. 60 * 61 * Woops, get_zeroed_page() returns a single page. I guess we're stuck with 62 * page-sized chunks for the time being. 63 */ 64 #define CHUNK_SIZE PAGE_SIZE 65 #define CHUNK_ALIGN(x) (((x)+CHUNK_SIZE-1) & ~(CHUNK_SIZE-1)) 66 67 /* 68 * The magic limit was calculated so that it allows the monitoring 69 * application to pick data once in two ticks. This way, another application, 70 * which presumably drives the bus, gets to hog CPU, yet we collect our data. 71 * If HZ is 100, a 480 mbit/s bus drives 614 KB every jiffy. USB has an 72 * enormous overhead built into the bus protocol, so we need about 1000 KB. 73 * 74 * This is still too much for most cases, where we just snoop a few 75 * descriptor fetches for enumeration. So, the default is a "reasonable" 76 * amount for systems with HZ=250 and incomplete bus saturation. 77 * 78 * XXX What about multi-megabyte URBs which take minutes to transfer? 79 */ 80 #define BUFF_MAX CHUNK_ALIGN(1200*1024) 81 #define BUFF_DFL CHUNK_ALIGN(300*1024) 82 #define BUFF_MIN CHUNK_ALIGN(8*1024) 83 84 /* 85 * The per-event API header (2 per URB). 86 * 87 * This structure is seen in userland as defined by the documentation. 88 */ 89 struct mon_bin_hdr { 90 u64 id; /* URB ID - from submission to callback */ 91 unsigned char type; /* Same as in text API; extensible. */ 92 unsigned char xfer_type; /* ISO, Intr, Control, Bulk */ 93 unsigned char epnum; /* Endpoint number and transfer direction */ 94 unsigned char devnum; /* Device address */ 95 unsigned short busnum; /* Bus number */ 96 char flag_setup; 97 char flag_data; 98 s64 ts_sec; /* ktime_get_real_ts64 */ 99 s32 ts_usec; /* ktime_get_real_ts64 */ 100 int status; 101 unsigned int len_urb; /* Length of data (submitted or actual) */ 102 unsigned int len_cap; /* Delivered length */ 103 union { 104 unsigned char setup[SETUP_LEN]; /* Only for Control S-type */ 105 struct iso_rec { 106 int error_count; 107 int numdesc; 108 } iso; 109 } s; 110 int interval; 111 int start_frame; 112 unsigned int xfer_flags; 113 unsigned int ndesc; /* Actual number of ISO descriptors */ 114 }; 115 116 /* 117 * ISO vector, packed into the head of data stream. 118 * This has to take 16 bytes to make sure that the end of buffer 119 * wrap is not happening in the middle of a descriptor. 120 */ 121 struct mon_bin_isodesc { 122 int iso_status; 123 unsigned int iso_off; 124 unsigned int iso_len; 125 u32 _pad; 126 }; 127 128 /* per file statistic */ 129 struct mon_bin_stats { 130 u32 queued; 131 u32 dropped; 132 }; 133 134 struct mon_bin_get { 135 struct mon_bin_hdr __user *hdr; /* Can be 48 bytes or 64. */ 136 void __user *data; 137 size_t alloc; /* Length of data (can be zero) */ 138 }; 139 140 struct mon_bin_mfetch { 141 u32 __user *offvec; /* Vector of events fetched */ 142 u32 nfetch; /* Number of events to fetch (out: fetched) */ 143 u32 nflush; /* Number of events to flush */ 144 }; 145 146 #ifdef CONFIG_COMPAT 147 struct mon_bin_get32 { 148 u32 hdr32; 149 u32 data32; 150 u32 alloc32; 151 }; 152 153 struct mon_bin_mfetch32 { 154 u32 offvec32; 155 u32 nfetch32; 156 u32 nflush32; 157 }; 158 #endif 159 160 /* Having these two values same prevents wrapping of the mon_bin_hdr */ 161 #define PKT_ALIGN 64 162 #define PKT_SIZE 64 163 164 #define PKT_SZ_API0 48 /* API 0 (2.6.20) size */ 165 #define PKT_SZ_API1 64 /* API 1 size: extra fields */ 166 167 #define ISODESC_MAX 128 /* Same number as usbfs allows, 2048 bytes. */ 168 169 /* max number of USB bus supported */ 170 #define MON_BIN_MAX_MINOR 128 171 172 /* 173 * The buffer: map of used pages. 174 */ 175 struct mon_pgmap { 176 struct page *pg; 177 unsigned char *ptr; /* XXX just use page_to_virt everywhere? */ 178 }; 179 180 /* 181 * This gets associated with an open file struct. 182 */ 183 struct mon_reader_bin { 184 /* The buffer: one per open. */ 185 spinlock_t b_lock; /* Protect b_cnt, b_in */ 186 unsigned int b_size; /* Current size of the buffer - bytes */ 187 unsigned int b_cnt; /* Bytes used */ 188 unsigned int b_in, b_out; /* Offsets into buffer - bytes */ 189 unsigned int b_read; /* Amount of read data in curr. pkt. */ 190 struct mon_pgmap *b_vec; /* The map array */ 191 wait_queue_head_t b_wait; /* Wait for data here */ 192 193 struct mutex fetch_lock; /* Protect b_read, b_out */ 194 int mmap_active; 195 196 /* A list of these is needed for "bus 0". Some time later. */ 197 struct mon_reader r; 198 199 /* Stats */ 200 unsigned int cnt_lost; 201 }; 202 203 static inline struct mon_bin_hdr *MON_OFF2HDR(const struct mon_reader_bin *rp, 204 unsigned int offset) 205 { 206 return (struct mon_bin_hdr *) 207 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); 208 } 209 210 #define MON_RING_EMPTY(rp) ((rp)->b_cnt == 0) 211 212 static unsigned char xfer_to_pipe[4] = { 213 PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT 214 }; 215 216 static struct class *mon_bin_class; 217 static dev_t mon_bin_dev0; 218 static struct cdev mon_bin_cdev; 219 220 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 221 unsigned int offset, unsigned int size); 222 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp); 223 static int mon_alloc_buff(struct mon_pgmap *map, int npages); 224 static void mon_free_buff(struct mon_pgmap *map, int npages); 225 226 /* 227 * This is a "chunked memcpy". It does not manipulate any counters. 228 */ 229 static unsigned int mon_copy_to_buff(const struct mon_reader_bin *this, 230 unsigned int off, const unsigned char *from, unsigned int length) 231 { 232 unsigned int step_len; 233 unsigned char *buf; 234 unsigned int in_page; 235 236 while (length) { 237 /* 238 * Determine step_len. 239 */ 240 step_len = length; 241 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 242 if (in_page < step_len) 243 step_len = in_page; 244 245 /* 246 * Copy data and advance pointers. 247 */ 248 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 249 memcpy(buf, from, step_len); 250 if ((off += step_len) >= this->b_size) off = 0; 251 from += step_len; 252 length -= step_len; 253 } 254 return off; 255 } 256 257 /* 258 * This is a little worse than the above because it's "chunked copy_to_user". 259 * The return value is an error code, not an offset. 260 */ 261 static int copy_from_buf(const struct mon_reader_bin *this, unsigned int off, 262 char __user *to, int length) 263 { 264 unsigned int step_len; 265 unsigned char *buf; 266 unsigned int in_page; 267 268 while (length) { 269 /* 270 * Determine step_len. 271 */ 272 step_len = length; 273 in_page = CHUNK_SIZE - (off & (CHUNK_SIZE-1)); 274 if (in_page < step_len) 275 step_len = in_page; 276 277 /* 278 * Copy data and advance pointers. 279 */ 280 buf = this->b_vec[off / CHUNK_SIZE].ptr + off % CHUNK_SIZE; 281 if (copy_to_user(to, buf, step_len)) 282 return -EINVAL; 283 if ((off += step_len) >= this->b_size) off = 0; 284 to += step_len; 285 length -= step_len; 286 } 287 return 0; 288 } 289 290 /* 291 * Allocate an (aligned) area in the buffer. 292 * This is called under b_lock. 293 * Returns ~0 on failure. 294 */ 295 static unsigned int mon_buff_area_alloc(struct mon_reader_bin *rp, 296 unsigned int size) 297 { 298 unsigned int offset; 299 300 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 301 if (rp->b_cnt + size > rp->b_size) 302 return ~0; 303 offset = rp->b_in; 304 rp->b_cnt += size; 305 if ((rp->b_in += size) >= rp->b_size) 306 rp->b_in -= rp->b_size; 307 return offset; 308 } 309 310 /* 311 * This is the same thing as mon_buff_area_alloc, only it does not allow 312 * buffers to wrap. This is needed by applications which pass references 313 * into mmap-ed buffers up their stacks (libpcap can do that). 314 * 315 * Currently, we always have the header stuck with the data, although 316 * it is not strictly speaking necessary. 317 * 318 * When a buffer would wrap, we place a filler packet to mark the space. 319 */ 320 static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp, 321 unsigned int size) 322 { 323 unsigned int offset; 324 unsigned int fill_size; 325 326 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 327 if (rp->b_cnt + size > rp->b_size) 328 return ~0; 329 if (rp->b_in + size > rp->b_size) { 330 /* 331 * This would wrap. Find if we still have space after 332 * skipping to the end of the buffer. If we do, place 333 * a filler packet and allocate a new packet. 334 */ 335 fill_size = rp->b_size - rp->b_in; 336 if (rp->b_cnt + size + fill_size > rp->b_size) 337 return ~0; 338 mon_buff_area_fill(rp, rp->b_in, fill_size); 339 340 offset = 0; 341 rp->b_in = size; 342 rp->b_cnt += size + fill_size; 343 } else if (rp->b_in + size == rp->b_size) { 344 offset = rp->b_in; 345 rp->b_in = 0; 346 rp->b_cnt += size; 347 } else { 348 offset = rp->b_in; 349 rp->b_in += size; 350 rp->b_cnt += size; 351 } 352 return offset; 353 } 354 355 /* 356 * Return a few (kilo-)bytes to the head of the buffer. 357 * This is used if a data fetch fails. 358 */ 359 static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size) 360 { 361 362 /* size &= ~(PKT_ALIGN-1); -- we're called with aligned size */ 363 rp->b_cnt -= size; 364 if (rp->b_in < size) 365 rp->b_in += rp->b_size; 366 rp->b_in -= size; 367 } 368 369 /* 370 * This has to be called under both b_lock and fetch_lock, because 371 * it accesses both b_cnt and b_out. 372 */ 373 static void mon_buff_area_free(struct mon_reader_bin *rp, unsigned int size) 374 { 375 376 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 377 rp->b_cnt -= size; 378 if ((rp->b_out += size) >= rp->b_size) 379 rp->b_out -= rp->b_size; 380 } 381 382 static void mon_buff_area_fill(const struct mon_reader_bin *rp, 383 unsigned int offset, unsigned int size) 384 { 385 struct mon_bin_hdr *ep; 386 387 ep = MON_OFF2HDR(rp, offset); 388 memset(ep, 0, PKT_SIZE); 389 ep->type = '@'; 390 ep->len_cap = size - PKT_SIZE; 391 } 392 393 static inline char mon_bin_get_setup(unsigned char *setupb, 394 const struct urb *urb, char ev_type) 395 { 396 397 if (urb->setup_packet == NULL) 398 return 'Z'; 399 memcpy(setupb, urb->setup_packet, SETUP_LEN); 400 return 0; 401 } 402 403 static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp, 404 unsigned int offset, struct urb *urb, unsigned int length, 405 char *flag) 406 { 407 int i; 408 struct scatterlist *sg; 409 unsigned int this_len; 410 411 *flag = 0; 412 if (urb->num_sgs == 0) { 413 if (urb->transfer_buffer == NULL) { 414 *flag = 'Z'; 415 return length; 416 } 417 mon_copy_to_buff(rp, offset, urb->transfer_buffer, length); 418 length = 0; 419 420 } else { 421 /* If IOMMU coalescing occurred, we cannot trust sg_page */ 422 if (urb->transfer_flags & URB_DMA_SG_COMBINED) { 423 *flag = 'D'; 424 return length; 425 } 426 427 /* Copy up to the first non-addressable segment */ 428 for_each_sg(urb->sg, sg, urb->num_sgs, i) { 429 if (length == 0 || PageHighMem(sg_page(sg))) 430 break; 431 this_len = min_t(unsigned int, sg->length, length); 432 offset = mon_copy_to_buff(rp, offset, sg_virt(sg), 433 this_len); 434 length -= this_len; 435 } 436 if (i == 0) 437 *flag = 'D'; 438 } 439 440 return length; 441 } 442 443 /* 444 * This is the look-ahead pass in case of 'C Zi', when actual_length cannot 445 * be used to determine the length of the whole contiguous buffer. 446 */ 447 static unsigned int mon_bin_collate_isodesc(const struct mon_reader_bin *rp, 448 struct urb *urb, unsigned int ndesc) 449 { 450 struct usb_iso_packet_descriptor *fp; 451 unsigned int length; 452 453 length = 0; 454 fp = urb->iso_frame_desc; 455 while (ndesc-- != 0) { 456 if (fp->actual_length != 0) { 457 if (fp->offset + fp->actual_length > length) 458 length = fp->offset + fp->actual_length; 459 } 460 fp++; 461 } 462 return length; 463 } 464 465 static void mon_bin_get_isodesc(const struct mon_reader_bin *rp, 466 unsigned int offset, struct urb *urb, char ev_type, unsigned int ndesc) 467 { 468 struct mon_bin_isodesc *dp; 469 struct usb_iso_packet_descriptor *fp; 470 471 fp = urb->iso_frame_desc; 472 while (ndesc-- != 0) { 473 dp = (struct mon_bin_isodesc *) 474 (rp->b_vec[offset / CHUNK_SIZE].ptr + offset % CHUNK_SIZE); 475 dp->iso_status = fp->status; 476 dp->iso_off = fp->offset; 477 dp->iso_len = (ev_type == 'S') ? fp->length : fp->actual_length; 478 dp->_pad = 0; 479 if ((offset += sizeof(struct mon_bin_isodesc)) >= rp->b_size) 480 offset = 0; 481 fp++; 482 } 483 } 484 485 static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb, 486 char ev_type, int status) 487 { 488 const struct usb_endpoint_descriptor *epd = &urb->ep->desc; 489 struct timespec64 ts; 490 unsigned long flags; 491 unsigned int urb_length; 492 unsigned int offset; 493 unsigned int length; 494 unsigned int delta; 495 unsigned int ndesc, lendesc; 496 unsigned char dir; 497 struct mon_bin_hdr *ep; 498 char data_tag = 0; 499 500 ktime_get_real_ts64(&ts); 501 502 spin_lock_irqsave(&rp->b_lock, flags); 503 504 /* 505 * Find the maximum allowable length, then allocate space. 506 */ 507 urb_length = (ev_type == 'S') ? 508 urb->transfer_buffer_length : urb->actual_length; 509 length = urb_length; 510 511 if (usb_endpoint_xfer_isoc(epd)) { 512 if (urb->number_of_packets < 0) { 513 ndesc = 0; 514 } else if (urb->number_of_packets >= ISODESC_MAX) { 515 ndesc = ISODESC_MAX; 516 } else { 517 ndesc = urb->number_of_packets; 518 } 519 if (ev_type == 'C' && usb_urb_dir_in(urb)) 520 length = mon_bin_collate_isodesc(rp, urb, ndesc); 521 } else { 522 ndesc = 0; 523 } 524 lendesc = ndesc*sizeof(struct mon_bin_isodesc); 525 526 /* not an issue unless there's a subtle bug in a HCD somewhere */ 527 if (length >= urb->transfer_buffer_length) 528 length = urb->transfer_buffer_length; 529 530 if (length >= rp->b_size/5) 531 length = rp->b_size/5; 532 533 if (usb_urb_dir_in(urb)) { 534 if (ev_type == 'S') { 535 length = 0; 536 data_tag = '<'; 537 } 538 /* Cannot rely on endpoint number in case of control ep.0 */ 539 dir = USB_DIR_IN; 540 } else { 541 if (ev_type == 'C') { 542 length = 0; 543 data_tag = '>'; 544 } 545 dir = 0; 546 } 547 548 if (rp->mmap_active) { 549 offset = mon_buff_area_alloc_contiguous(rp, 550 length + PKT_SIZE + lendesc); 551 } else { 552 offset = mon_buff_area_alloc(rp, length + PKT_SIZE + lendesc); 553 } 554 if (offset == ~0) { 555 rp->cnt_lost++; 556 spin_unlock_irqrestore(&rp->b_lock, flags); 557 return; 558 } 559 560 ep = MON_OFF2HDR(rp, offset); 561 if ((offset += PKT_SIZE) >= rp->b_size) offset = 0; 562 563 /* 564 * Fill the allocated area. 565 */ 566 memset(ep, 0, PKT_SIZE); 567 ep->type = ev_type; 568 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(epd)]; 569 ep->epnum = dir | usb_endpoint_num(epd); 570 ep->devnum = urb->dev->devnum; 571 ep->busnum = urb->dev->bus->busnum; 572 ep->id = (unsigned long) urb; 573 ep->ts_sec = ts.tv_sec; 574 ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; 575 ep->status = status; 576 ep->len_urb = urb_length; 577 ep->len_cap = length + lendesc; 578 ep->xfer_flags = urb->transfer_flags; 579 580 if (usb_endpoint_xfer_int(epd)) { 581 ep->interval = urb->interval; 582 } else if (usb_endpoint_xfer_isoc(epd)) { 583 ep->interval = urb->interval; 584 ep->start_frame = urb->start_frame; 585 ep->s.iso.error_count = urb->error_count; 586 ep->s.iso.numdesc = urb->number_of_packets; 587 } 588 589 if (usb_endpoint_xfer_control(epd) && ev_type == 'S') { 590 ep->flag_setup = mon_bin_get_setup(ep->s.setup, urb, ev_type); 591 } else { 592 ep->flag_setup = '-'; 593 } 594 595 if (ndesc != 0) { 596 ep->ndesc = ndesc; 597 mon_bin_get_isodesc(rp, offset, urb, ev_type, ndesc); 598 if ((offset += lendesc) >= rp->b_size) 599 offset -= rp->b_size; 600 } 601 602 if (length != 0) { 603 length = mon_bin_get_data(rp, offset, urb, length, 604 &ep->flag_data); 605 if (length > 0) { 606 delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 607 ep->len_cap -= length; 608 delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 609 mon_buff_area_shrink(rp, delta); 610 } 611 } else { 612 ep->flag_data = data_tag; 613 } 614 615 spin_unlock_irqrestore(&rp->b_lock, flags); 616 617 wake_up(&rp->b_wait); 618 } 619 620 static void mon_bin_submit(void *data, struct urb *urb) 621 { 622 struct mon_reader_bin *rp = data; 623 mon_bin_event(rp, urb, 'S', -EINPROGRESS); 624 } 625 626 static void mon_bin_complete(void *data, struct urb *urb, int status) 627 { 628 struct mon_reader_bin *rp = data; 629 mon_bin_event(rp, urb, 'C', status); 630 } 631 632 static void mon_bin_error(void *data, struct urb *urb, int error) 633 { 634 struct mon_reader_bin *rp = data; 635 struct timespec64 ts; 636 unsigned long flags; 637 unsigned int offset; 638 struct mon_bin_hdr *ep; 639 640 ktime_get_real_ts64(&ts); 641 642 spin_lock_irqsave(&rp->b_lock, flags); 643 644 offset = mon_buff_area_alloc(rp, PKT_SIZE); 645 if (offset == ~0) { 646 /* Not incrementing cnt_lost. Just because. */ 647 spin_unlock_irqrestore(&rp->b_lock, flags); 648 return; 649 } 650 651 ep = MON_OFF2HDR(rp, offset); 652 653 memset(ep, 0, PKT_SIZE); 654 ep->type = 'E'; 655 ep->xfer_type = xfer_to_pipe[usb_endpoint_type(&urb->ep->desc)]; 656 ep->epnum = usb_urb_dir_in(urb) ? USB_DIR_IN : 0; 657 ep->epnum |= usb_endpoint_num(&urb->ep->desc); 658 ep->devnum = urb->dev->devnum; 659 ep->busnum = urb->dev->bus->busnum; 660 ep->id = (unsigned long) urb; 661 ep->ts_sec = ts.tv_sec; 662 ep->ts_usec = ts.tv_nsec / NSEC_PER_USEC; 663 ep->status = error; 664 665 ep->flag_setup = '-'; 666 ep->flag_data = 'E'; 667 668 spin_unlock_irqrestore(&rp->b_lock, flags); 669 670 wake_up(&rp->b_wait); 671 } 672 673 static int mon_bin_open(struct inode *inode, struct file *file) 674 { 675 struct mon_bus *mbus; 676 struct mon_reader_bin *rp; 677 size_t size; 678 int rc; 679 680 mutex_lock(&mon_lock); 681 mbus = mon_bus_lookup(iminor(inode)); 682 if (mbus == NULL) { 683 mutex_unlock(&mon_lock); 684 return -ENODEV; 685 } 686 if (mbus != &mon_bus0 && mbus->u_bus == NULL) { 687 printk(KERN_ERR TAG ": consistency error on open\n"); 688 mutex_unlock(&mon_lock); 689 return -ENODEV; 690 } 691 692 rp = kzalloc(sizeof(struct mon_reader_bin), GFP_KERNEL); 693 if (rp == NULL) { 694 rc = -ENOMEM; 695 goto err_alloc; 696 } 697 spin_lock_init(&rp->b_lock); 698 init_waitqueue_head(&rp->b_wait); 699 mutex_init(&rp->fetch_lock); 700 rp->b_size = BUFF_DFL; 701 702 size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE); 703 if ((rp->b_vec = kzalloc(size, GFP_KERNEL)) == NULL) { 704 rc = -ENOMEM; 705 goto err_allocvec; 706 } 707 708 if ((rc = mon_alloc_buff(rp->b_vec, rp->b_size/CHUNK_SIZE)) < 0) 709 goto err_allocbuff; 710 711 rp->r.m_bus = mbus; 712 rp->r.r_data = rp; 713 rp->r.rnf_submit = mon_bin_submit; 714 rp->r.rnf_error = mon_bin_error; 715 rp->r.rnf_complete = mon_bin_complete; 716 717 mon_reader_add(mbus, &rp->r); 718 719 file->private_data = rp; 720 mutex_unlock(&mon_lock); 721 return 0; 722 723 err_allocbuff: 724 kfree(rp->b_vec); 725 err_allocvec: 726 kfree(rp); 727 err_alloc: 728 mutex_unlock(&mon_lock); 729 return rc; 730 } 731 732 /* 733 * Extract an event from buffer and copy it to user space. 734 * Wait if there is no event ready. 735 * Returns zero or error. 736 */ 737 static int mon_bin_get_event(struct file *file, struct mon_reader_bin *rp, 738 struct mon_bin_hdr __user *hdr, unsigned int hdrbytes, 739 void __user *data, unsigned int nbytes) 740 { 741 unsigned long flags; 742 struct mon_bin_hdr *ep; 743 size_t step_len; 744 unsigned int offset; 745 int rc; 746 747 mutex_lock(&rp->fetch_lock); 748 749 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 750 mutex_unlock(&rp->fetch_lock); 751 return rc; 752 } 753 754 ep = MON_OFF2HDR(rp, rp->b_out); 755 756 if (copy_to_user(hdr, ep, hdrbytes)) { 757 mutex_unlock(&rp->fetch_lock); 758 return -EFAULT; 759 } 760 761 step_len = min(ep->len_cap, nbytes); 762 if ((offset = rp->b_out + PKT_SIZE) >= rp->b_size) offset = 0; 763 764 if (copy_from_buf(rp, offset, data, step_len)) { 765 mutex_unlock(&rp->fetch_lock); 766 return -EFAULT; 767 } 768 769 spin_lock_irqsave(&rp->b_lock, flags); 770 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 771 spin_unlock_irqrestore(&rp->b_lock, flags); 772 rp->b_read = 0; 773 774 mutex_unlock(&rp->fetch_lock); 775 return 0; 776 } 777 778 static int mon_bin_release(struct inode *inode, struct file *file) 779 { 780 struct mon_reader_bin *rp = file->private_data; 781 struct mon_bus* mbus = rp->r.m_bus; 782 783 mutex_lock(&mon_lock); 784 785 if (mbus->nreaders <= 0) { 786 printk(KERN_ERR TAG ": consistency error on close\n"); 787 mutex_unlock(&mon_lock); 788 return 0; 789 } 790 mon_reader_del(mbus, &rp->r); 791 792 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); 793 kfree(rp->b_vec); 794 kfree(rp); 795 796 mutex_unlock(&mon_lock); 797 return 0; 798 } 799 800 static ssize_t mon_bin_read(struct file *file, char __user *buf, 801 size_t nbytes, loff_t *ppos) 802 { 803 struct mon_reader_bin *rp = file->private_data; 804 unsigned int hdrbytes = PKT_SZ_API0; 805 unsigned long flags; 806 struct mon_bin_hdr *ep; 807 unsigned int offset; 808 size_t step_len; 809 char *ptr; 810 ssize_t done = 0; 811 int rc; 812 813 mutex_lock(&rp->fetch_lock); 814 815 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 816 mutex_unlock(&rp->fetch_lock); 817 return rc; 818 } 819 820 ep = MON_OFF2HDR(rp, rp->b_out); 821 822 if (rp->b_read < hdrbytes) { 823 step_len = min(nbytes, (size_t)(hdrbytes - rp->b_read)); 824 ptr = ((char *)ep) + rp->b_read; 825 if (step_len && copy_to_user(buf, ptr, step_len)) { 826 mutex_unlock(&rp->fetch_lock); 827 return -EFAULT; 828 } 829 nbytes -= step_len; 830 buf += step_len; 831 rp->b_read += step_len; 832 done += step_len; 833 } 834 835 if (rp->b_read >= hdrbytes) { 836 step_len = ep->len_cap; 837 step_len -= rp->b_read - hdrbytes; 838 if (step_len > nbytes) 839 step_len = nbytes; 840 offset = rp->b_out + PKT_SIZE; 841 offset += rp->b_read - hdrbytes; 842 if (offset >= rp->b_size) 843 offset -= rp->b_size; 844 if (copy_from_buf(rp, offset, buf, step_len)) { 845 mutex_unlock(&rp->fetch_lock); 846 return -EFAULT; 847 } 848 nbytes -= step_len; 849 buf += step_len; 850 rp->b_read += step_len; 851 done += step_len; 852 } 853 854 /* 855 * Check if whole packet was read, and if so, jump to the next one. 856 */ 857 if (rp->b_read >= hdrbytes + ep->len_cap) { 858 spin_lock_irqsave(&rp->b_lock, flags); 859 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 860 spin_unlock_irqrestore(&rp->b_lock, flags); 861 rp->b_read = 0; 862 } 863 864 mutex_unlock(&rp->fetch_lock); 865 return done; 866 } 867 868 /* 869 * Remove at most nevents from chunked buffer. 870 * Returns the number of removed events. 871 */ 872 static int mon_bin_flush(struct mon_reader_bin *rp, unsigned nevents) 873 { 874 unsigned long flags; 875 struct mon_bin_hdr *ep; 876 int i; 877 878 mutex_lock(&rp->fetch_lock); 879 spin_lock_irqsave(&rp->b_lock, flags); 880 for (i = 0; i < nevents; ++i) { 881 if (MON_RING_EMPTY(rp)) 882 break; 883 884 ep = MON_OFF2HDR(rp, rp->b_out); 885 mon_buff_area_free(rp, PKT_SIZE + ep->len_cap); 886 } 887 spin_unlock_irqrestore(&rp->b_lock, flags); 888 rp->b_read = 0; 889 mutex_unlock(&rp->fetch_lock); 890 return i; 891 } 892 893 /* 894 * Fetch at most max event offsets into the buffer and put them into vec. 895 * The events are usually freed later with mon_bin_flush. 896 * Return the effective number of events fetched. 897 */ 898 static int mon_bin_fetch(struct file *file, struct mon_reader_bin *rp, 899 u32 __user *vec, unsigned int max) 900 { 901 unsigned int cur_out; 902 unsigned int bytes, avail; 903 unsigned int size; 904 unsigned int nevents; 905 struct mon_bin_hdr *ep; 906 unsigned long flags; 907 int rc; 908 909 mutex_lock(&rp->fetch_lock); 910 911 if ((rc = mon_bin_wait_event(file, rp)) < 0) { 912 mutex_unlock(&rp->fetch_lock); 913 return rc; 914 } 915 916 spin_lock_irqsave(&rp->b_lock, flags); 917 avail = rp->b_cnt; 918 spin_unlock_irqrestore(&rp->b_lock, flags); 919 920 cur_out = rp->b_out; 921 nevents = 0; 922 bytes = 0; 923 while (bytes < avail) { 924 if (nevents >= max) 925 break; 926 927 ep = MON_OFF2HDR(rp, cur_out); 928 if (put_user(cur_out, &vec[nevents])) { 929 mutex_unlock(&rp->fetch_lock); 930 return -EFAULT; 931 } 932 933 nevents++; 934 size = ep->len_cap + PKT_SIZE; 935 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 936 if ((cur_out += size) >= rp->b_size) 937 cur_out -= rp->b_size; 938 bytes += size; 939 } 940 941 mutex_unlock(&rp->fetch_lock); 942 return nevents; 943 } 944 945 /* 946 * Count events. This is almost the same as the above mon_bin_fetch, 947 * only we do not store offsets into user vector, and we have no limit. 948 */ 949 static int mon_bin_queued(struct mon_reader_bin *rp) 950 { 951 unsigned int cur_out; 952 unsigned int bytes, avail; 953 unsigned int size; 954 unsigned int nevents; 955 struct mon_bin_hdr *ep; 956 unsigned long flags; 957 958 mutex_lock(&rp->fetch_lock); 959 960 spin_lock_irqsave(&rp->b_lock, flags); 961 avail = rp->b_cnt; 962 spin_unlock_irqrestore(&rp->b_lock, flags); 963 964 cur_out = rp->b_out; 965 nevents = 0; 966 bytes = 0; 967 while (bytes < avail) { 968 ep = MON_OFF2HDR(rp, cur_out); 969 970 nevents++; 971 size = ep->len_cap + PKT_SIZE; 972 size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1); 973 if ((cur_out += size) >= rp->b_size) 974 cur_out -= rp->b_size; 975 bytes += size; 976 } 977 978 mutex_unlock(&rp->fetch_lock); 979 return nevents; 980 } 981 982 /* 983 */ 984 static long mon_bin_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 985 { 986 struct mon_reader_bin *rp = file->private_data; 987 // struct mon_bus* mbus = rp->r.m_bus; 988 int ret = 0; 989 struct mon_bin_hdr *ep; 990 unsigned long flags; 991 992 switch (cmd) { 993 994 case MON_IOCQ_URB_LEN: 995 /* 996 * N.B. This only returns the size of data, without the header. 997 */ 998 spin_lock_irqsave(&rp->b_lock, flags); 999 if (!MON_RING_EMPTY(rp)) { 1000 ep = MON_OFF2HDR(rp, rp->b_out); 1001 ret = ep->len_cap; 1002 } 1003 spin_unlock_irqrestore(&rp->b_lock, flags); 1004 break; 1005 1006 case MON_IOCQ_RING_SIZE: 1007 mutex_lock(&rp->fetch_lock); 1008 ret = rp->b_size; 1009 mutex_unlock(&rp->fetch_lock); 1010 break; 1011 1012 case MON_IOCT_RING_SIZE: 1013 /* 1014 * Changing the buffer size will flush it's contents; the new 1015 * buffer is allocated before releasing the old one to be sure 1016 * the device will stay functional also in case of memory 1017 * pressure. 1018 */ 1019 { 1020 int size; 1021 struct mon_pgmap *vec; 1022 1023 if (arg < BUFF_MIN || arg > BUFF_MAX) 1024 return -EINVAL; 1025 1026 size = CHUNK_ALIGN(arg); 1027 vec = kcalloc(size / CHUNK_SIZE, sizeof(struct mon_pgmap), 1028 GFP_KERNEL); 1029 if (vec == NULL) { 1030 ret = -ENOMEM; 1031 break; 1032 } 1033 1034 ret = mon_alloc_buff(vec, size/CHUNK_SIZE); 1035 if (ret < 0) { 1036 kfree(vec); 1037 break; 1038 } 1039 1040 mutex_lock(&rp->fetch_lock); 1041 spin_lock_irqsave(&rp->b_lock, flags); 1042 if (rp->mmap_active) { 1043 mon_free_buff(vec, size/CHUNK_SIZE); 1044 kfree(vec); 1045 ret = -EBUSY; 1046 } else { 1047 mon_free_buff(rp->b_vec, rp->b_size/CHUNK_SIZE); 1048 kfree(rp->b_vec); 1049 rp->b_vec = vec; 1050 rp->b_size = size; 1051 rp->b_read = rp->b_in = rp->b_out = rp->b_cnt = 0; 1052 rp->cnt_lost = 0; 1053 } 1054 spin_unlock_irqrestore(&rp->b_lock, flags); 1055 mutex_unlock(&rp->fetch_lock); 1056 } 1057 break; 1058 1059 case MON_IOCH_MFLUSH: 1060 ret = mon_bin_flush(rp, arg); 1061 break; 1062 1063 case MON_IOCX_GET: 1064 case MON_IOCX_GETX: 1065 { 1066 struct mon_bin_get getb; 1067 1068 if (copy_from_user(&getb, (void __user *)arg, 1069 sizeof(struct mon_bin_get))) 1070 return -EFAULT; 1071 1072 if (getb.alloc > 0x10000000) /* Want to cast to u32 */ 1073 return -EINVAL; 1074 ret = mon_bin_get_event(file, rp, getb.hdr, 1075 (cmd == MON_IOCX_GET)? PKT_SZ_API0: PKT_SZ_API1, 1076 getb.data, (unsigned int)getb.alloc); 1077 } 1078 break; 1079 1080 case MON_IOCX_MFETCH: 1081 { 1082 struct mon_bin_mfetch mfetch; 1083 struct mon_bin_mfetch __user *uptr; 1084 1085 uptr = (struct mon_bin_mfetch __user *)arg; 1086 1087 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 1088 return -EFAULT; 1089 1090 if (mfetch.nflush) { 1091 ret = mon_bin_flush(rp, mfetch.nflush); 1092 if (ret < 0) 1093 return ret; 1094 if (put_user(ret, &uptr->nflush)) 1095 return -EFAULT; 1096 } 1097 ret = mon_bin_fetch(file, rp, mfetch.offvec, mfetch.nfetch); 1098 if (ret < 0) 1099 return ret; 1100 if (put_user(ret, &uptr->nfetch)) 1101 return -EFAULT; 1102 ret = 0; 1103 } 1104 break; 1105 1106 case MON_IOCG_STATS: { 1107 struct mon_bin_stats __user *sp; 1108 unsigned int nevents; 1109 unsigned int ndropped; 1110 1111 spin_lock_irqsave(&rp->b_lock, flags); 1112 ndropped = rp->cnt_lost; 1113 rp->cnt_lost = 0; 1114 spin_unlock_irqrestore(&rp->b_lock, flags); 1115 nevents = mon_bin_queued(rp); 1116 1117 sp = (struct mon_bin_stats __user *)arg; 1118 if (put_user(ndropped, &sp->dropped)) 1119 return -EFAULT; 1120 if (put_user(nevents, &sp->queued)) 1121 return -EFAULT; 1122 1123 } 1124 break; 1125 1126 default: 1127 return -ENOTTY; 1128 } 1129 1130 return ret; 1131 } 1132 1133 #ifdef CONFIG_COMPAT 1134 static long mon_bin_compat_ioctl(struct file *file, 1135 unsigned int cmd, unsigned long arg) 1136 { 1137 struct mon_reader_bin *rp = file->private_data; 1138 int ret; 1139 1140 switch (cmd) { 1141 1142 case MON_IOCX_GET32: 1143 case MON_IOCX_GETX32: 1144 { 1145 struct mon_bin_get32 getb; 1146 1147 if (copy_from_user(&getb, (void __user *)arg, 1148 sizeof(struct mon_bin_get32))) 1149 return -EFAULT; 1150 1151 ret = mon_bin_get_event(file, rp, compat_ptr(getb.hdr32), 1152 (cmd == MON_IOCX_GET32)? PKT_SZ_API0: PKT_SZ_API1, 1153 compat_ptr(getb.data32), getb.alloc32); 1154 if (ret < 0) 1155 return ret; 1156 } 1157 return 0; 1158 1159 case MON_IOCX_MFETCH32: 1160 { 1161 struct mon_bin_mfetch32 mfetch; 1162 struct mon_bin_mfetch32 __user *uptr; 1163 1164 uptr = (struct mon_bin_mfetch32 __user *) compat_ptr(arg); 1165 1166 if (copy_from_user(&mfetch, uptr, sizeof(mfetch))) 1167 return -EFAULT; 1168 1169 if (mfetch.nflush32) { 1170 ret = mon_bin_flush(rp, mfetch.nflush32); 1171 if (ret < 0) 1172 return ret; 1173 if (put_user(ret, &uptr->nflush32)) 1174 return -EFAULT; 1175 } 1176 ret = mon_bin_fetch(file, rp, compat_ptr(mfetch.offvec32), 1177 mfetch.nfetch32); 1178 if (ret < 0) 1179 return ret; 1180 if (put_user(ret, &uptr->nfetch32)) 1181 return -EFAULT; 1182 } 1183 return 0; 1184 1185 case MON_IOCG_STATS: 1186 return mon_bin_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); 1187 1188 case MON_IOCQ_URB_LEN: 1189 case MON_IOCQ_RING_SIZE: 1190 case MON_IOCT_RING_SIZE: 1191 case MON_IOCH_MFLUSH: 1192 return mon_bin_ioctl(file, cmd, arg); 1193 1194 default: 1195 ; 1196 } 1197 return -ENOTTY; 1198 } 1199 #endif /* CONFIG_COMPAT */ 1200 1201 static __poll_t 1202 mon_bin_poll(struct file *file, struct poll_table_struct *wait) 1203 { 1204 struct mon_reader_bin *rp = file->private_data; 1205 __poll_t mask = 0; 1206 unsigned long flags; 1207 1208 if (file->f_mode & FMODE_READ) 1209 poll_wait(file, &rp->b_wait, wait); 1210 1211 spin_lock_irqsave(&rp->b_lock, flags); 1212 if (!MON_RING_EMPTY(rp)) 1213 mask |= EPOLLIN | EPOLLRDNORM; /* readable */ 1214 spin_unlock_irqrestore(&rp->b_lock, flags); 1215 return mask; 1216 } 1217 1218 /* 1219 * open and close: just keep track of how many times the device is 1220 * mapped, to use the proper memory allocation function. 1221 */ 1222 static void mon_bin_vma_open(struct vm_area_struct *vma) 1223 { 1224 struct mon_reader_bin *rp = vma->vm_private_data; 1225 unsigned long flags; 1226 1227 spin_lock_irqsave(&rp->b_lock, flags); 1228 rp->mmap_active++; 1229 spin_unlock_irqrestore(&rp->b_lock, flags); 1230 } 1231 1232 static void mon_bin_vma_close(struct vm_area_struct *vma) 1233 { 1234 unsigned long flags; 1235 1236 struct mon_reader_bin *rp = vma->vm_private_data; 1237 spin_lock_irqsave(&rp->b_lock, flags); 1238 rp->mmap_active--; 1239 spin_unlock_irqrestore(&rp->b_lock, flags); 1240 } 1241 1242 /* 1243 * Map ring pages to user space. 1244 */ 1245 static vm_fault_t mon_bin_vma_fault(struct vm_fault *vmf) 1246 { 1247 struct mon_reader_bin *rp = vmf->vma->vm_private_data; 1248 unsigned long offset, chunk_idx; 1249 struct page *pageptr; 1250 1251 offset = vmf->pgoff << PAGE_SHIFT; 1252 if (offset >= rp->b_size) 1253 return VM_FAULT_SIGBUS; 1254 chunk_idx = offset / CHUNK_SIZE; 1255 pageptr = rp->b_vec[chunk_idx].pg; 1256 get_page(pageptr); 1257 vmf->page = pageptr; 1258 return 0; 1259 } 1260 1261 static const struct vm_operations_struct mon_bin_vm_ops = { 1262 .open = mon_bin_vma_open, 1263 .close = mon_bin_vma_close, 1264 .fault = mon_bin_vma_fault, 1265 }; 1266 1267 static int mon_bin_mmap(struct file *filp, struct vm_area_struct *vma) 1268 { 1269 /* don't do anything here: "fault" will set up page table entries */ 1270 vma->vm_ops = &mon_bin_vm_ops; 1271 1272 if (vma->vm_flags & VM_WRITE) 1273 return -EPERM; 1274 1275 vm_flags_mod(vma, VM_DONTEXPAND | VM_DONTDUMP, VM_MAYWRITE); 1276 vma->vm_private_data = filp->private_data; 1277 mon_bin_vma_open(vma); 1278 return 0; 1279 } 1280 1281 static const struct file_operations mon_fops_binary = { 1282 .owner = THIS_MODULE, 1283 .open = mon_bin_open, 1284 .llseek = no_llseek, 1285 .read = mon_bin_read, 1286 /* .write = mon_text_write, */ 1287 .poll = mon_bin_poll, 1288 .unlocked_ioctl = mon_bin_ioctl, 1289 #ifdef CONFIG_COMPAT 1290 .compat_ioctl = mon_bin_compat_ioctl, 1291 #endif 1292 .release = mon_bin_release, 1293 .mmap = mon_bin_mmap, 1294 }; 1295 1296 static int mon_bin_wait_event(struct file *file, struct mon_reader_bin *rp) 1297 { 1298 DECLARE_WAITQUEUE(waita, current); 1299 unsigned long flags; 1300 1301 add_wait_queue(&rp->b_wait, &waita); 1302 set_current_state(TASK_INTERRUPTIBLE); 1303 1304 spin_lock_irqsave(&rp->b_lock, flags); 1305 while (MON_RING_EMPTY(rp)) { 1306 spin_unlock_irqrestore(&rp->b_lock, flags); 1307 1308 if (file->f_flags & O_NONBLOCK) { 1309 set_current_state(TASK_RUNNING); 1310 remove_wait_queue(&rp->b_wait, &waita); 1311 return -EWOULDBLOCK; /* Same as EAGAIN in Linux */ 1312 } 1313 schedule(); 1314 if (signal_pending(current)) { 1315 remove_wait_queue(&rp->b_wait, &waita); 1316 return -EINTR; 1317 } 1318 set_current_state(TASK_INTERRUPTIBLE); 1319 1320 spin_lock_irqsave(&rp->b_lock, flags); 1321 } 1322 spin_unlock_irqrestore(&rp->b_lock, flags); 1323 1324 set_current_state(TASK_RUNNING); 1325 remove_wait_queue(&rp->b_wait, &waita); 1326 return 0; 1327 } 1328 1329 static int mon_alloc_buff(struct mon_pgmap *map, int npages) 1330 { 1331 int n; 1332 unsigned long vaddr; 1333 1334 for (n = 0; n < npages; n++) { 1335 vaddr = get_zeroed_page(GFP_KERNEL); 1336 if (vaddr == 0) { 1337 while (n-- != 0) 1338 free_page((unsigned long) map[n].ptr); 1339 return -ENOMEM; 1340 } 1341 map[n].ptr = (unsigned char *) vaddr; 1342 map[n].pg = virt_to_page((void *) vaddr); 1343 } 1344 return 0; 1345 } 1346 1347 static void mon_free_buff(struct mon_pgmap *map, int npages) 1348 { 1349 int n; 1350 1351 for (n = 0; n < npages; n++) 1352 free_page((unsigned long) map[n].ptr); 1353 } 1354 1355 int mon_bin_add(struct mon_bus *mbus, const struct usb_bus *ubus) 1356 { 1357 struct device *dev; 1358 unsigned minor = ubus? ubus->busnum: 0; 1359 1360 if (minor >= MON_BIN_MAX_MINOR) 1361 return 0; 1362 1363 dev = device_create(mon_bin_class, ubus ? ubus->controller : NULL, 1364 MKDEV(MAJOR(mon_bin_dev0), minor), NULL, 1365 "usbmon%d", minor); 1366 if (IS_ERR(dev)) 1367 return 0; 1368 1369 mbus->classdev = dev; 1370 return 1; 1371 } 1372 1373 void mon_bin_del(struct mon_bus *mbus) 1374 { 1375 device_destroy(mon_bin_class, mbus->classdev->devt); 1376 } 1377 1378 int __init mon_bin_init(void) 1379 { 1380 int rc; 1381 1382 mon_bin_class = class_create(THIS_MODULE, "usbmon"); 1383 if (IS_ERR(mon_bin_class)) { 1384 rc = PTR_ERR(mon_bin_class); 1385 goto err_class; 1386 } 1387 1388 rc = alloc_chrdev_region(&mon_bin_dev0, 0, MON_BIN_MAX_MINOR, "usbmon"); 1389 if (rc < 0) 1390 goto err_dev; 1391 1392 cdev_init(&mon_bin_cdev, &mon_fops_binary); 1393 mon_bin_cdev.owner = THIS_MODULE; 1394 1395 rc = cdev_add(&mon_bin_cdev, mon_bin_dev0, MON_BIN_MAX_MINOR); 1396 if (rc < 0) 1397 goto err_add; 1398 1399 return 0; 1400 1401 err_add: 1402 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1403 err_dev: 1404 class_destroy(mon_bin_class); 1405 err_class: 1406 return rc; 1407 } 1408 1409 void mon_bin_exit(void) 1410 { 1411 cdev_del(&mon_bin_cdev); 1412 unregister_chrdev_region(mon_bin_dev0, MON_BIN_MAX_MINOR); 1413 class_destroy(mon_bin_class); 1414 } 1415