1 /* 2 * drivers/uio/uio.c 3 * 4 * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> 5 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> 6 * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de> 7 * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> 8 * 9 * Userspace IO 10 * 11 * Base Functions 12 * 13 * Licensed under the GPLv2 only. 14 */ 15 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/poll.h> 19 #include <linux/device.h> 20 #include <linux/mm.h> 21 #include <linux/idr.h> 22 #include <linux/string.h> 23 #include <linux/kobject.h> 24 #include <linux/uio_driver.h> 25 26 #define UIO_MAX_DEVICES 255 27 28 struct uio_device { 29 struct module *owner; 30 struct device *dev; 31 int minor; 32 atomic_t event; 33 struct fasync_struct *async_queue; 34 wait_queue_head_t wait; 35 int vma_count; 36 struct uio_info *info; 37 struct kobject *map_dir; 38 }; 39 40 static int uio_major; 41 static DEFINE_IDR(uio_idr); 42 static const struct file_operations uio_fops; 43 44 /* UIO class infrastructure */ 45 static struct uio_class { 46 struct kref kref; 47 struct class *class; 48 } *uio_class; 49 50 /* 51 * attributes 52 */ 53 54 struct uio_map { 55 struct kobject kobj; 56 struct uio_mem *mem; 57 }; 58 #define to_map(map) container_of(map, struct uio_map, kobj) 59 60 static ssize_t map_addr_show(struct uio_mem *mem, char *buf) 61 { 62 return sprintf(buf, "0x%lx\n", mem->addr); 63 } 64 65 static ssize_t map_size_show(struct uio_mem *mem, char *buf) 66 { 67 return sprintf(buf, "0x%lx\n", mem->size); 68 } 69 70 struct uio_sysfs_entry { 71 struct attribute attr; 72 ssize_t (*show)(struct uio_mem *, char *); 73 ssize_t (*store)(struct uio_mem *, const char *, size_t); 74 }; 75 76 static struct uio_sysfs_entry addr_attribute = 77 __ATTR(addr, S_IRUGO, map_addr_show, NULL); 78 static struct uio_sysfs_entry size_attribute = 79 __ATTR(size, S_IRUGO, map_size_show, NULL); 80 81 static struct attribute *attrs[] = { 82 &addr_attribute.attr, 83 &size_attribute.attr, 84 NULL, /* need to NULL terminate the list of attributes */ 85 }; 86 87 static void map_release(struct kobject *kobj) 88 { 89 struct uio_map *map = to_map(kobj); 90 kfree(map); 91 } 92 93 static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr, 94 char *buf) 95 { 96 struct uio_map *map = to_map(kobj); 97 struct uio_mem *mem = map->mem; 98 struct uio_sysfs_entry *entry; 99 100 entry = container_of(attr, struct uio_sysfs_entry, attr); 101 102 if (!entry->show) 103 return -EIO; 104 105 return entry->show(mem, buf); 106 } 107 108 static struct sysfs_ops uio_sysfs_ops = { 109 .show = map_type_show, 110 }; 111 112 static struct kobj_type map_attr_type = { 113 .release = map_release, 114 .sysfs_ops = &uio_sysfs_ops, 115 .default_attrs = attrs, 116 }; 117 118 static ssize_t show_name(struct device *dev, 119 struct device_attribute *attr, char *buf) 120 { 121 struct uio_device *idev = dev_get_drvdata(dev); 122 if (idev) 123 return sprintf(buf, "%s\n", idev->info->name); 124 else 125 return -ENODEV; 126 } 127 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 128 129 static ssize_t show_version(struct device *dev, 130 struct device_attribute *attr, char *buf) 131 { 132 struct uio_device *idev = dev_get_drvdata(dev); 133 if (idev) 134 return sprintf(buf, "%s\n", idev->info->version); 135 else 136 return -ENODEV; 137 } 138 static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); 139 140 static ssize_t show_event(struct device *dev, 141 struct device_attribute *attr, char *buf) 142 { 143 struct uio_device *idev = dev_get_drvdata(dev); 144 if (idev) 145 return sprintf(buf, "%u\n", 146 (unsigned int)atomic_read(&idev->event)); 147 else 148 return -ENODEV; 149 } 150 static DEVICE_ATTR(event, S_IRUGO, show_event, NULL); 151 152 static struct attribute *uio_attrs[] = { 153 &dev_attr_name.attr, 154 &dev_attr_version.attr, 155 &dev_attr_event.attr, 156 NULL, 157 }; 158 159 static struct attribute_group uio_attr_grp = { 160 .attrs = uio_attrs, 161 }; 162 163 /* 164 * device functions 165 */ 166 static int uio_dev_add_attributes(struct uio_device *idev) 167 { 168 int ret; 169 int mi; 170 int map_found = 0; 171 struct uio_mem *mem; 172 struct uio_map *map; 173 174 ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp); 175 if (ret) 176 goto err_group; 177 178 for (mi = 0; mi < MAX_UIO_MAPS; mi++) { 179 mem = &idev->info->mem[mi]; 180 if (mem->size == 0) 181 break; 182 if (!map_found) { 183 map_found = 1; 184 idev->map_dir = kobject_create_and_add("maps", 185 &idev->dev->kobj); 186 if (!idev->map_dir) 187 goto err; 188 } 189 map = kzalloc(sizeof(*map), GFP_KERNEL); 190 if (!map) 191 goto err; 192 kobject_init(&map->kobj, &map_attr_type); 193 map->mem = mem; 194 mem->map = map; 195 ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); 196 if (ret) 197 goto err; 198 ret = kobject_uevent(&map->kobj, KOBJ_ADD); 199 if (ret) 200 goto err; 201 } 202 203 return 0; 204 205 err: 206 for (mi--; mi>=0; mi--) { 207 mem = &idev->info->mem[mi]; 208 map = mem->map; 209 kobject_put(&map->kobj); 210 } 211 kobject_put(idev->map_dir); 212 sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); 213 err_group: 214 dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); 215 return ret; 216 } 217 218 static void uio_dev_del_attributes(struct uio_device *idev) 219 { 220 int mi; 221 struct uio_mem *mem; 222 for (mi = 0; mi < MAX_UIO_MAPS; mi++) { 223 mem = &idev->info->mem[mi]; 224 if (mem->size == 0) 225 break; 226 kobject_put(&mem->map->kobj); 227 } 228 kobject_put(idev->map_dir); 229 sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); 230 } 231 232 static int uio_get_minor(struct uio_device *idev) 233 { 234 static DEFINE_MUTEX(minor_lock); 235 int retval = -ENOMEM; 236 int id; 237 238 mutex_lock(&minor_lock); 239 if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0) 240 goto exit; 241 242 retval = idr_get_new(&uio_idr, idev, &id); 243 if (retval < 0) { 244 if (retval == -EAGAIN) 245 retval = -ENOMEM; 246 goto exit; 247 } 248 idev->minor = id & MAX_ID_MASK; 249 exit: 250 mutex_unlock(&minor_lock); 251 return retval; 252 } 253 254 static void uio_free_minor(struct uio_device *idev) 255 { 256 idr_remove(&uio_idr, idev->minor); 257 } 258 259 /** 260 * uio_event_notify - trigger an interrupt event 261 * @info: UIO device capabilities 262 */ 263 void uio_event_notify(struct uio_info *info) 264 { 265 struct uio_device *idev = info->uio_dev; 266 267 atomic_inc(&idev->event); 268 wake_up_interruptible(&idev->wait); 269 kill_fasync(&idev->async_queue, SIGIO, POLL_IN); 270 } 271 EXPORT_SYMBOL_GPL(uio_event_notify); 272 273 /** 274 * uio_interrupt - hardware interrupt handler 275 * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer 276 * @dev_id: Pointer to the devices uio_device structure 277 */ 278 static irqreturn_t uio_interrupt(int irq, void *dev_id) 279 { 280 struct uio_device *idev = (struct uio_device *)dev_id; 281 irqreturn_t ret = idev->info->handler(irq, idev->info); 282 283 if (ret == IRQ_HANDLED) 284 uio_event_notify(idev->info); 285 286 return ret; 287 } 288 289 struct uio_listener { 290 struct uio_device *dev; 291 s32 event_count; 292 }; 293 294 static int uio_open(struct inode *inode, struct file *filep) 295 { 296 struct uio_device *idev; 297 struct uio_listener *listener; 298 int ret = 0; 299 300 idev = idr_find(&uio_idr, iminor(inode)); 301 if (!idev) 302 return -ENODEV; 303 304 if (!try_module_get(idev->owner)) 305 return -ENODEV; 306 307 listener = kmalloc(sizeof(*listener), GFP_KERNEL); 308 if (!listener) { 309 ret = -ENOMEM; 310 goto err_alloc_listener; 311 } 312 313 listener->dev = idev; 314 listener->event_count = atomic_read(&idev->event); 315 filep->private_data = listener; 316 317 if (idev->info->open) { 318 ret = idev->info->open(idev->info, inode); 319 if (ret) 320 goto err_infoopen; 321 } 322 323 return 0; 324 325 err_infoopen: 326 327 kfree(listener); 328 err_alloc_listener: 329 330 module_put(idev->owner); 331 332 return ret; 333 } 334 335 static int uio_fasync(int fd, struct file *filep, int on) 336 { 337 struct uio_listener *listener = filep->private_data; 338 struct uio_device *idev = listener->dev; 339 340 return fasync_helper(fd, filep, on, &idev->async_queue); 341 } 342 343 static int uio_release(struct inode *inode, struct file *filep) 344 { 345 int ret = 0; 346 struct uio_listener *listener = filep->private_data; 347 struct uio_device *idev = listener->dev; 348 349 if (idev->info->release) 350 ret = idev->info->release(idev->info, inode); 351 352 module_put(idev->owner); 353 354 if (filep->f_flags & FASYNC) 355 ret = uio_fasync(-1, filep, 0); 356 kfree(listener); 357 return ret; 358 } 359 360 static unsigned int uio_poll(struct file *filep, poll_table *wait) 361 { 362 struct uio_listener *listener = filep->private_data; 363 struct uio_device *idev = listener->dev; 364 365 if (idev->info->irq == UIO_IRQ_NONE) 366 return -EIO; 367 368 poll_wait(filep, &idev->wait, wait); 369 if (listener->event_count != atomic_read(&idev->event)) 370 return POLLIN | POLLRDNORM; 371 return 0; 372 } 373 374 static ssize_t uio_read(struct file *filep, char __user *buf, 375 size_t count, loff_t *ppos) 376 { 377 struct uio_listener *listener = filep->private_data; 378 struct uio_device *idev = listener->dev; 379 DECLARE_WAITQUEUE(wait, current); 380 ssize_t retval; 381 s32 event_count; 382 383 if (idev->info->irq == UIO_IRQ_NONE) 384 return -EIO; 385 386 if (count != sizeof(s32)) 387 return -EINVAL; 388 389 add_wait_queue(&idev->wait, &wait); 390 391 do { 392 set_current_state(TASK_INTERRUPTIBLE); 393 394 event_count = atomic_read(&idev->event); 395 if (event_count != listener->event_count) { 396 if (copy_to_user(buf, &event_count, count)) 397 retval = -EFAULT; 398 else { 399 listener->event_count = event_count; 400 retval = count; 401 } 402 break; 403 } 404 405 if (filep->f_flags & O_NONBLOCK) { 406 retval = -EAGAIN; 407 break; 408 } 409 410 if (signal_pending(current)) { 411 retval = -ERESTARTSYS; 412 break; 413 } 414 schedule(); 415 } while (1); 416 417 __set_current_state(TASK_RUNNING); 418 remove_wait_queue(&idev->wait, &wait); 419 420 return retval; 421 } 422 423 static int uio_find_mem_index(struct vm_area_struct *vma) 424 { 425 int mi; 426 struct uio_device *idev = vma->vm_private_data; 427 428 for (mi = 0; mi < MAX_UIO_MAPS; mi++) { 429 if (idev->info->mem[mi].size == 0) 430 return -1; 431 if (vma->vm_pgoff == mi) 432 return mi; 433 } 434 return -1; 435 } 436 437 static void uio_vma_open(struct vm_area_struct *vma) 438 { 439 struct uio_device *idev = vma->vm_private_data; 440 idev->vma_count++; 441 } 442 443 static void uio_vma_close(struct vm_area_struct *vma) 444 { 445 struct uio_device *idev = vma->vm_private_data; 446 idev->vma_count--; 447 } 448 449 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 450 { 451 struct uio_device *idev = vma->vm_private_data; 452 struct page *page; 453 454 int mi = uio_find_mem_index(vma); 455 if (mi < 0) 456 return VM_FAULT_SIGBUS; 457 458 if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) 459 page = virt_to_page(idev->info->mem[mi].addr); 460 else 461 page = vmalloc_to_page((void*)idev->info->mem[mi].addr); 462 get_page(page); 463 vmf->page = page; 464 return 0; 465 } 466 467 static struct vm_operations_struct uio_vm_ops = { 468 .open = uio_vma_open, 469 .close = uio_vma_close, 470 .fault = uio_vma_fault, 471 }; 472 473 static int uio_mmap_physical(struct vm_area_struct *vma) 474 { 475 struct uio_device *idev = vma->vm_private_data; 476 int mi = uio_find_mem_index(vma); 477 if (mi < 0) 478 return -EINVAL; 479 480 vma->vm_flags |= VM_IO | VM_RESERVED; 481 482 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 483 484 return remap_pfn_range(vma, 485 vma->vm_start, 486 idev->info->mem[mi].addr >> PAGE_SHIFT, 487 vma->vm_end - vma->vm_start, 488 vma->vm_page_prot); 489 } 490 491 static int uio_mmap_logical(struct vm_area_struct *vma) 492 { 493 vma->vm_flags |= VM_RESERVED; 494 vma->vm_ops = &uio_vm_ops; 495 uio_vma_open(vma); 496 return 0; 497 } 498 499 static int uio_mmap(struct file *filep, struct vm_area_struct *vma) 500 { 501 struct uio_listener *listener = filep->private_data; 502 struct uio_device *idev = listener->dev; 503 int mi; 504 unsigned long requested_pages, actual_pages; 505 int ret = 0; 506 507 if (vma->vm_end < vma->vm_start) 508 return -EINVAL; 509 510 vma->vm_private_data = idev; 511 512 mi = uio_find_mem_index(vma); 513 if (mi < 0) 514 return -EINVAL; 515 516 requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 517 actual_pages = (idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; 518 if (requested_pages > actual_pages) 519 return -EINVAL; 520 521 if (idev->info->mmap) { 522 ret = idev->info->mmap(idev->info, vma); 523 return ret; 524 } 525 526 switch (idev->info->mem[mi].memtype) { 527 case UIO_MEM_PHYS: 528 return uio_mmap_physical(vma); 529 case UIO_MEM_LOGICAL: 530 case UIO_MEM_VIRTUAL: 531 return uio_mmap_logical(vma); 532 default: 533 return -EINVAL; 534 } 535 } 536 537 static const struct file_operations uio_fops = { 538 .owner = THIS_MODULE, 539 .open = uio_open, 540 .release = uio_release, 541 .read = uio_read, 542 .mmap = uio_mmap, 543 .poll = uio_poll, 544 .fasync = uio_fasync, 545 }; 546 547 static int uio_major_init(void) 548 { 549 uio_major = register_chrdev(0, "uio", &uio_fops); 550 if (uio_major < 0) 551 return uio_major; 552 return 0; 553 } 554 555 static void uio_major_cleanup(void) 556 { 557 unregister_chrdev(uio_major, "uio"); 558 } 559 560 static int init_uio_class(void) 561 { 562 int ret = 0; 563 564 if (uio_class != NULL) { 565 kref_get(&uio_class->kref); 566 goto exit; 567 } 568 569 /* This is the first time in here, set everything up properly */ 570 ret = uio_major_init(); 571 if (ret) 572 goto exit; 573 574 uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL); 575 if (!uio_class) { 576 ret = -ENOMEM; 577 goto err_kzalloc; 578 } 579 580 kref_init(&uio_class->kref); 581 uio_class->class = class_create(THIS_MODULE, "uio"); 582 if (IS_ERR(uio_class->class)) { 583 ret = IS_ERR(uio_class->class); 584 printk(KERN_ERR "class_create failed for uio\n"); 585 goto err_class_create; 586 } 587 return 0; 588 589 err_class_create: 590 kfree(uio_class); 591 uio_class = NULL; 592 err_kzalloc: 593 uio_major_cleanup(); 594 exit: 595 return ret; 596 } 597 598 static void release_uio_class(struct kref *kref) 599 { 600 /* Ok, we cheat as we know we only have one uio_class */ 601 class_destroy(uio_class->class); 602 kfree(uio_class); 603 uio_major_cleanup(); 604 uio_class = NULL; 605 } 606 607 static void uio_class_destroy(void) 608 { 609 if (uio_class) 610 kref_put(&uio_class->kref, release_uio_class); 611 } 612 613 /** 614 * uio_register_device - register a new userspace IO device 615 * @owner: module that creates the new device 616 * @parent: parent device 617 * @info: UIO device capabilities 618 * 619 * returns zero on success or a negative error code. 620 */ 621 int __uio_register_device(struct module *owner, 622 struct device *parent, 623 struct uio_info *info) 624 { 625 struct uio_device *idev; 626 int ret = 0; 627 628 if (!parent || !info || !info->name || !info->version) 629 return -EINVAL; 630 631 info->uio_dev = NULL; 632 633 ret = init_uio_class(); 634 if (ret) 635 return ret; 636 637 idev = kzalloc(sizeof(*idev), GFP_KERNEL); 638 if (!idev) { 639 ret = -ENOMEM; 640 goto err_kzalloc; 641 } 642 643 idev->owner = owner; 644 idev->info = info; 645 init_waitqueue_head(&idev->wait); 646 atomic_set(&idev->event, 0); 647 648 ret = uio_get_minor(idev); 649 if (ret) 650 goto err_get_minor; 651 652 idev->dev = device_create_drvdata(uio_class->class, parent, 653 MKDEV(uio_major, idev->minor), idev, 654 "uio%d", idev->minor); 655 if (IS_ERR(idev->dev)) { 656 printk(KERN_ERR "UIO: device register failed\n"); 657 ret = PTR_ERR(idev->dev); 658 goto err_device_create; 659 } 660 661 ret = uio_dev_add_attributes(idev); 662 if (ret) 663 goto err_uio_dev_add_attributes; 664 665 info->uio_dev = idev; 666 667 if (idev->info->irq >= 0) { 668 ret = request_irq(idev->info->irq, uio_interrupt, 669 idev->info->irq_flags, idev->info->name, idev); 670 if (ret) 671 goto err_request_irq; 672 } 673 674 return 0; 675 676 err_request_irq: 677 uio_dev_del_attributes(idev); 678 err_uio_dev_add_attributes: 679 device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); 680 err_device_create: 681 uio_free_minor(idev); 682 err_get_minor: 683 kfree(idev); 684 err_kzalloc: 685 uio_class_destroy(); 686 return ret; 687 } 688 EXPORT_SYMBOL_GPL(__uio_register_device); 689 690 /** 691 * uio_unregister_device - unregister a industrial IO device 692 * @info: UIO device capabilities 693 * 694 */ 695 void uio_unregister_device(struct uio_info *info) 696 { 697 struct uio_device *idev; 698 699 if (!info || !info->uio_dev) 700 return; 701 702 idev = info->uio_dev; 703 704 uio_free_minor(idev); 705 706 if (info->irq >= 0) 707 free_irq(info->irq, idev); 708 709 uio_dev_del_attributes(idev); 710 711 dev_set_drvdata(idev->dev, NULL); 712 device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); 713 kfree(idev); 714 uio_class_destroy(); 715 716 return; 717 } 718 EXPORT_SYMBOL_GPL(uio_unregister_device); 719 720 static int __init uio_init(void) 721 { 722 return 0; 723 } 724 725 static void __exit uio_exit(void) 726 { 727 } 728 729 module_init(uio_init) 730 module_exit(uio_exit) 731 MODULE_LICENSE("GPL v2"); 732