1 /* 2 * drivers/uio/uio.c 3 * 4 * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> 5 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> 6 * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de> 7 * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> 8 * 9 * Userspace IO 10 * 11 * Base Functions 12 * 13 * Licensed under the GPLv2 only. 14 */ 15 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/poll.h> 19 #include <linux/device.h> 20 #include <linux/mm.h> 21 #include <linux/idr.h> 22 #include <linux/string.h> 23 #include <linux/kobject.h> 24 #include <linux/uio_driver.h> 25 26 #define UIO_MAX_DEVICES 255 27 28 struct uio_device { 29 struct module *owner; 30 struct device *dev; 31 int minor; 32 atomic_t event; 33 struct fasync_struct *async_queue; 34 wait_queue_head_t wait; 35 int vma_count; 36 struct uio_info *info; 37 struct kobject *map_dir; 38 }; 39 40 static int uio_major; 41 static DEFINE_IDR(uio_idr); 42 static const struct file_operations uio_fops; 43 44 /* UIO class infrastructure */ 45 static struct uio_class { 46 struct kref kref; 47 struct class *class; 48 } *uio_class; 49 50 /* Protect idr accesses */ 51 static DEFINE_MUTEX(minor_lock); 52 53 /* 54 * attributes 55 */ 56 57 struct uio_map { 58 struct kobject kobj; 59 struct uio_mem *mem; 60 }; 61 #define to_map(map) container_of(map, struct uio_map, kobj) 62 63 static ssize_t map_addr_show(struct uio_mem *mem, char *buf) 64 { 65 return sprintf(buf, "0x%lx\n", mem->addr); 66 } 67 68 static ssize_t map_size_show(struct uio_mem *mem, char *buf) 69 { 70 return sprintf(buf, "0x%lx\n", mem->size); 71 } 72 73 static ssize_t map_offset_show(struct uio_mem *mem, char *buf) 74 { 75 return sprintf(buf, "0x%lx\n", mem->addr & ~PAGE_MASK); 76 } 77 78 struct uio_sysfs_entry { 79 struct attribute attr; 80 ssize_t (*show)(struct uio_mem *, char *); 81 ssize_t (*store)(struct uio_mem *, const char *, size_t); 82 }; 83 84 static struct uio_sysfs_entry addr_attribute = 85 __ATTR(addr, S_IRUGO, map_addr_show, NULL); 86 static struct uio_sysfs_entry size_attribute = 87 __ATTR(size, S_IRUGO, map_size_show, NULL); 88 static struct uio_sysfs_entry offset_attribute = 89 __ATTR(offset, S_IRUGO, map_offset_show, NULL); 90 91 static struct attribute *attrs[] = { 92 &addr_attribute.attr, 93 &size_attribute.attr, 94 &offset_attribute.attr, 95 NULL, /* need to NULL terminate the list of attributes */ 96 }; 97 98 static void map_release(struct kobject *kobj) 99 { 100 struct uio_map *map = to_map(kobj); 101 kfree(map); 102 } 103 104 static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr, 105 char *buf) 106 { 107 struct uio_map *map = to_map(kobj); 108 struct uio_mem *mem = map->mem; 109 struct uio_sysfs_entry *entry; 110 111 entry = container_of(attr, struct uio_sysfs_entry, attr); 112 113 if (!entry->show) 114 return -EIO; 115 116 return entry->show(mem, buf); 117 } 118 119 static struct sysfs_ops uio_sysfs_ops = { 120 .show = map_type_show, 121 }; 122 123 static struct kobj_type map_attr_type = { 124 .release = map_release, 125 .sysfs_ops = &uio_sysfs_ops, 126 .default_attrs = attrs, 127 }; 128 129 static ssize_t show_name(struct device *dev, 130 struct device_attribute *attr, char *buf) 131 { 132 struct uio_device *idev = dev_get_drvdata(dev); 133 if (idev) 134 return sprintf(buf, "%s\n", idev->info->name); 135 else 136 return -ENODEV; 137 } 138 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 139 140 static ssize_t show_version(struct device *dev, 141 struct device_attribute *attr, char *buf) 142 { 143 struct uio_device *idev = dev_get_drvdata(dev); 144 if (idev) 145 return sprintf(buf, "%s\n", idev->info->version); 146 else 147 return -ENODEV; 148 } 149 static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); 150 151 static ssize_t show_event(struct device *dev, 152 struct device_attribute *attr, char *buf) 153 { 154 struct uio_device *idev = dev_get_drvdata(dev); 155 if (idev) 156 return sprintf(buf, "%u\n", 157 (unsigned int)atomic_read(&idev->event)); 158 else 159 return -ENODEV; 160 } 161 static DEVICE_ATTR(event, S_IRUGO, show_event, NULL); 162 163 static struct attribute *uio_attrs[] = { 164 &dev_attr_name.attr, 165 &dev_attr_version.attr, 166 &dev_attr_event.attr, 167 NULL, 168 }; 169 170 static struct attribute_group uio_attr_grp = { 171 .attrs = uio_attrs, 172 }; 173 174 /* 175 * device functions 176 */ 177 static int uio_dev_add_attributes(struct uio_device *idev) 178 { 179 int ret; 180 int mi; 181 int map_found = 0; 182 struct uio_mem *mem; 183 struct uio_map *map; 184 185 ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp); 186 if (ret) 187 goto err_group; 188 189 for (mi = 0; mi < MAX_UIO_MAPS; mi++) { 190 mem = &idev->info->mem[mi]; 191 if (mem->size == 0) 192 break; 193 if (!map_found) { 194 map_found = 1; 195 idev->map_dir = kobject_create_and_add("maps", 196 &idev->dev->kobj); 197 if (!idev->map_dir) 198 goto err; 199 } 200 map = kzalloc(sizeof(*map), GFP_KERNEL); 201 if (!map) 202 goto err; 203 kobject_init(&map->kobj, &map_attr_type); 204 map->mem = mem; 205 mem->map = map; 206 ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); 207 if (ret) 208 goto err; 209 ret = kobject_uevent(&map->kobj, KOBJ_ADD); 210 if (ret) 211 goto err; 212 } 213 214 return 0; 215 216 err: 217 for (mi--; mi>=0; mi--) { 218 mem = &idev->info->mem[mi]; 219 map = mem->map; 220 kobject_put(&map->kobj); 221 } 222 kobject_put(idev->map_dir); 223 sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); 224 err_group: 225 dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); 226 return ret; 227 } 228 229 static void uio_dev_del_attributes(struct uio_device *idev) 230 { 231 int mi; 232 struct uio_mem *mem; 233 for (mi = 0; mi < MAX_UIO_MAPS; mi++) { 234 mem = &idev->info->mem[mi]; 235 if (mem->size == 0) 236 break; 237 kobject_put(&mem->map->kobj); 238 } 239 kobject_put(idev->map_dir); 240 sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); 241 } 242 243 static int uio_get_minor(struct uio_device *idev) 244 { 245 int retval = -ENOMEM; 246 int id; 247 248 mutex_lock(&minor_lock); 249 if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0) 250 goto exit; 251 252 retval = idr_get_new(&uio_idr, idev, &id); 253 if (retval < 0) { 254 if (retval == -EAGAIN) 255 retval = -ENOMEM; 256 goto exit; 257 } 258 idev->minor = id & MAX_ID_MASK; 259 exit: 260 mutex_unlock(&minor_lock); 261 return retval; 262 } 263 264 static void uio_free_minor(struct uio_device *idev) 265 { 266 mutex_lock(&minor_lock); 267 idr_remove(&uio_idr, idev->minor); 268 mutex_unlock(&minor_lock); 269 } 270 271 /** 272 * uio_event_notify - trigger an interrupt event 273 * @info: UIO device capabilities 274 */ 275 void uio_event_notify(struct uio_info *info) 276 { 277 struct uio_device *idev = info->uio_dev; 278 279 atomic_inc(&idev->event); 280 wake_up_interruptible(&idev->wait); 281 kill_fasync(&idev->async_queue, SIGIO, POLL_IN); 282 } 283 EXPORT_SYMBOL_GPL(uio_event_notify); 284 285 /** 286 * uio_interrupt - hardware interrupt handler 287 * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer 288 * @dev_id: Pointer to the devices uio_device structure 289 */ 290 static irqreturn_t uio_interrupt(int irq, void *dev_id) 291 { 292 struct uio_device *idev = (struct uio_device *)dev_id; 293 irqreturn_t ret = idev->info->handler(irq, idev->info); 294 295 if (ret == IRQ_HANDLED) 296 uio_event_notify(idev->info); 297 298 return ret; 299 } 300 301 struct uio_listener { 302 struct uio_device *dev; 303 s32 event_count; 304 }; 305 306 static int uio_open(struct inode *inode, struct file *filep) 307 { 308 struct uio_device *idev; 309 struct uio_listener *listener; 310 int ret = 0; 311 312 mutex_lock(&minor_lock); 313 idev = idr_find(&uio_idr, iminor(inode)); 314 mutex_unlock(&minor_lock); 315 if (!idev) { 316 ret = -ENODEV; 317 goto out; 318 } 319 320 if (!try_module_get(idev->owner)) { 321 ret = -ENODEV; 322 goto out; 323 } 324 325 listener = kmalloc(sizeof(*listener), GFP_KERNEL); 326 if (!listener) { 327 ret = -ENOMEM; 328 goto err_alloc_listener; 329 } 330 331 listener->dev = idev; 332 listener->event_count = atomic_read(&idev->event); 333 filep->private_data = listener; 334 335 if (idev->info->open) { 336 ret = idev->info->open(idev->info, inode); 337 if (ret) 338 goto err_infoopen; 339 } 340 return 0; 341 342 err_infoopen: 343 kfree(listener); 344 345 err_alloc_listener: 346 module_put(idev->owner); 347 348 out: 349 return ret; 350 } 351 352 static int uio_fasync(int fd, struct file *filep, int on) 353 { 354 struct uio_listener *listener = filep->private_data; 355 struct uio_device *idev = listener->dev; 356 357 return fasync_helper(fd, filep, on, &idev->async_queue); 358 } 359 360 static int uio_release(struct inode *inode, struct file *filep) 361 { 362 int ret = 0; 363 struct uio_listener *listener = filep->private_data; 364 struct uio_device *idev = listener->dev; 365 366 if (idev->info->release) 367 ret = idev->info->release(idev->info, inode); 368 369 module_put(idev->owner); 370 kfree(listener); 371 return ret; 372 } 373 374 static unsigned int uio_poll(struct file *filep, poll_table *wait) 375 { 376 struct uio_listener *listener = filep->private_data; 377 struct uio_device *idev = listener->dev; 378 379 if (idev->info->irq == UIO_IRQ_NONE) 380 return -EIO; 381 382 poll_wait(filep, &idev->wait, wait); 383 if (listener->event_count != atomic_read(&idev->event)) 384 return POLLIN | POLLRDNORM; 385 return 0; 386 } 387 388 static ssize_t uio_read(struct file *filep, char __user *buf, 389 size_t count, loff_t *ppos) 390 { 391 struct uio_listener *listener = filep->private_data; 392 struct uio_device *idev = listener->dev; 393 DECLARE_WAITQUEUE(wait, current); 394 ssize_t retval; 395 s32 event_count; 396 397 if (idev->info->irq == UIO_IRQ_NONE) 398 return -EIO; 399 400 if (count != sizeof(s32)) 401 return -EINVAL; 402 403 add_wait_queue(&idev->wait, &wait); 404 405 do { 406 set_current_state(TASK_INTERRUPTIBLE); 407 408 event_count = atomic_read(&idev->event); 409 if (event_count != listener->event_count) { 410 if (copy_to_user(buf, &event_count, count)) 411 retval = -EFAULT; 412 else { 413 listener->event_count = event_count; 414 retval = count; 415 } 416 break; 417 } 418 419 if (filep->f_flags & O_NONBLOCK) { 420 retval = -EAGAIN; 421 break; 422 } 423 424 if (signal_pending(current)) { 425 retval = -ERESTARTSYS; 426 break; 427 } 428 schedule(); 429 } while (1); 430 431 __set_current_state(TASK_RUNNING); 432 remove_wait_queue(&idev->wait, &wait); 433 434 return retval; 435 } 436 437 static ssize_t uio_write(struct file *filep, const char __user *buf, 438 size_t count, loff_t *ppos) 439 { 440 struct uio_listener *listener = filep->private_data; 441 struct uio_device *idev = listener->dev; 442 ssize_t retval; 443 s32 irq_on; 444 445 if (idev->info->irq == UIO_IRQ_NONE) 446 return -EIO; 447 448 if (count != sizeof(s32)) 449 return -EINVAL; 450 451 if (!idev->info->irqcontrol) 452 return -ENOSYS; 453 454 if (copy_from_user(&irq_on, buf, count)) 455 return -EFAULT; 456 457 retval = idev->info->irqcontrol(idev->info, irq_on); 458 459 return retval ? retval : sizeof(s32); 460 } 461 462 static int uio_find_mem_index(struct vm_area_struct *vma) 463 { 464 int mi; 465 struct uio_device *idev = vma->vm_private_data; 466 467 for (mi = 0; mi < MAX_UIO_MAPS; mi++) { 468 if (idev->info->mem[mi].size == 0) 469 return -1; 470 if (vma->vm_pgoff == mi) 471 return mi; 472 } 473 return -1; 474 } 475 476 static void uio_vma_open(struct vm_area_struct *vma) 477 { 478 struct uio_device *idev = vma->vm_private_data; 479 idev->vma_count++; 480 } 481 482 static void uio_vma_close(struct vm_area_struct *vma) 483 { 484 struct uio_device *idev = vma->vm_private_data; 485 idev->vma_count--; 486 } 487 488 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 489 { 490 struct uio_device *idev = vma->vm_private_data; 491 struct page *page; 492 unsigned long offset; 493 494 int mi = uio_find_mem_index(vma); 495 if (mi < 0) 496 return VM_FAULT_SIGBUS; 497 498 /* 499 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE 500 * to use mem[N]. 501 */ 502 offset = (vmf->pgoff - mi) << PAGE_SHIFT; 503 504 if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) 505 page = virt_to_page(idev->info->mem[mi].addr + offset); 506 else 507 page = vmalloc_to_page((void *)idev->info->mem[mi].addr 508 + offset); 509 get_page(page); 510 vmf->page = page; 511 return 0; 512 } 513 514 static struct vm_operations_struct uio_vm_ops = { 515 .open = uio_vma_open, 516 .close = uio_vma_close, 517 .fault = uio_vma_fault, 518 }; 519 520 static int uio_mmap_physical(struct vm_area_struct *vma) 521 { 522 struct uio_device *idev = vma->vm_private_data; 523 int mi = uio_find_mem_index(vma); 524 if (mi < 0) 525 return -EINVAL; 526 527 vma->vm_flags |= VM_IO | VM_RESERVED; 528 529 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 530 531 return remap_pfn_range(vma, 532 vma->vm_start, 533 idev->info->mem[mi].addr >> PAGE_SHIFT, 534 vma->vm_end - vma->vm_start, 535 vma->vm_page_prot); 536 } 537 538 static int uio_mmap_logical(struct vm_area_struct *vma) 539 { 540 vma->vm_flags |= VM_RESERVED; 541 vma->vm_ops = &uio_vm_ops; 542 uio_vma_open(vma); 543 return 0; 544 } 545 546 static int uio_mmap(struct file *filep, struct vm_area_struct *vma) 547 { 548 struct uio_listener *listener = filep->private_data; 549 struct uio_device *idev = listener->dev; 550 int mi; 551 unsigned long requested_pages, actual_pages; 552 int ret = 0; 553 554 if (vma->vm_end < vma->vm_start) 555 return -EINVAL; 556 557 vma->vm_private_data = idev; 558 559 mi = uio_find_mem_index(vma); 560 if (mi < 0) 561 return -EINVAL; 562 563 requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 564 actual_pages = (idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; 565 if (requested_pages > actual_pages) 566 return -EINVAL; 567 568 if (idev->info->mmap) { 569 ret = idev->info->mmap(idev->info, vma); 570 return ret; 571 } 572 573 switch (idev->info->mem[mi].memtype) { 574 case UIO_MEM_PHYS: 575 return uio_mmap_physical(vma); 576 case UIO_MEM_LOGICAL: 577 case UIO_MEM_VIRTUAL: 578 return uio_mmap_logical(vma); 579 default: 580 return -EINVAL; 581 } 582 } 583 584 static const struct file_operations uio_fops = { 585 .owner = THIS_MODULE, 586 .open = uio_open, 587 .release = uio_release, 588 .read = uio_read, 589 .write = uio_write, 590 .mmap = uio_mmap, 591 .poll = uio_poll, 592 .fasync = uio_fasync, 593 }; 594 595 static int uio_major_init(void) 596 { 597 uio_major = register_chrdev(0, "uio", &uio_fops); 598 if (uio_major < 0) 599 return uio_major; 600 return 0; 601 } 602 603 static void uio_major_cleanup(void) 604 { 605 unregister_chrdev(uio_major, "uio"); 606 } 607 608 static int init_uio_class(void) 609 { 610 int ret = 0; 611 612 if (uio_class != NULL) { 613 kref_get(&uio_class->kref); 614 goto exit; 615 } 616 617 /* This is the first time in here, set everything up properly */ 618 ret = uio_major_init(); 619 if (ret) 620 goto exit; 621 622 uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL); 623 if (!uio_class) { 624 ret = -ENOMEM; 625 goto err_kzalloc; 626 } 627 628 kref_init(&uio_class->kref); 629 uio_class->class = class_create(THIS_MODULE, "uio"); 630 if (IS_ERR(uio_class->class)) { 631 ret = IS_ERR(uio_class->class); 632 printk(KERN_ERR "class_create failed for uio\n"); 633 goto err_class_create; 634 } 635 return 0; 636 637 err_class_create: 638 kfree(uio_class); 639 uio_class = NULL; 640 err_kzalloc: 641 uio_major_cleanup(); 642 exit: 643 return ret; 644 } 645 646 static void release_uio_class(struct kref *kref) 647 { 648 /* Ok, we cheat as we know we only have one uio_class */ 649 class_destroy(uio_class->class); 650 kfree(uio_class); 651 uio_major_cleanup(); 652 uio_class = NULL; 653 } 654 655 static void uio_class_destroy(void) 656 { 657 if (uio_class) 658 kref_put(&uio_class->kref, release_uio_class); 659 } 660 661 /** 662 * uio_register_device - register a new userspace IO device 663 * @owner: module that creates the new device 664 * @parent: parent device 665 * @info: UIO device capabilities 666 * 667 * returns zero on success or a negative error code. 668 */ 669 int __uio_register_device(struct module *owner, 670 struct device *parent, 671 struct uio_info *info) 672 { 673 struct uio_device *idev; 674 int ret = 0; 675 676 if (!parent || !info || !info->name || !info->version) 677 return -EINVAL; 678 679 info->uio_dev = NULL; 680 681 ret = init_uio_class(); 682 if (ret) 683 return ret; 684 685 idev = kzalloc(sizeof(*idev), GFP_KERNEL); 686 if (!idev) { 687 ret = -ENOMEM; 688 goto err_kzalloc; 689 } 690 691 idev->owner = owner; 692 idev->info = info; 693 init_waitqueue_head(&idev->wait); 694 atomic_set(&idev->event, 0); 695 696 ret = uio_get_minor(idev); 697 if (ret) 698 goto err_get_minor; 699 700 idev->dev = device_create(uio_class->class, parent, 701 MKDEV(uio_major, idev->minor), idev, 702 "uio%d", idev->minor); 703 if (IS_ERR(idev->dev)) { 704 printk(KERN_ERR "UIO: device register failed\n"); 705 ret = PTR_ERR(idev->dev); 706 goto err_device_create; 707 } 708 709 ret = uio_dev_add_attributes(idev); 710 if (ret) 711 goto err_uio_dev_add_attributes; 712 713 info->uio_dev = idev; 714 715 if (idev->info->irq >= 0) { 716 ret = request_irq(idev->info->irq, uio_interrupt, 717 idev->info->irq_flags, idev->info->name, idev); 718 if (ret) 719 goto err_request_irq; 720 } 721 722 return 0; 723 724 err_request_irq: 725 uio_dev_del_attributes(idev); 726 err_uio_dev_add_attributes: 727 device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); 728 err_device_create: 729 uio_free_minor(idev); 730 err_get_minor: 731 kfree(idev); 732 err_kzalloc: 733 uio_class_destroy(); 734 return ret; 735 } 736 EXPORT_SYMBOL_GPL(__uio_register_device); 737 738 /** 739 * uio_unregister_device - unregister a industrial IO device 740 * @info: UIO device capabilities 741 * 742 */ 743 void uio_unregister_device(struct uio_info *info) 744 { 745 struct uio_device *idev; 746 747 if (!info || !info->uio_dev) 748 return; 749 750 idev = info->uio_dev; 751 752 uio_free_minor(idev); 753 754 if (info->irq >= 0) 755 free_irq(info->irq, idev); 756 757 uio_dev_del_attributes(idev); 758 759 dev_set_drvdata(idev->dev, NULL); 760 device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); 761 kfree(idev); 762 uio_class_destroy(); 763 764 return; 765 } 766 EXPORT_SYMBOL_GPL(uio_unregister_device); 767 768 static int __init uio_init(void) 769 { 770 return 0; 771 } 772 773 static void __exit uio_exit(void) 774 { 775 } 776 777 module_init(uio_init) 778 module_exit(uio_exit) 779 MODULE_LICENSE("GPL v2"); 780