1 /* 2 * drivers/uio/uio.c 3 * 4 * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> 5 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> 6 * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de> 7 * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> 8 * 9 * Userspace IO 10 * 11 * Base Functions 12 * 13 * Licensed under the GPLv2 only. 14 */ 15 16 #include <linux/module.h> 17 #include <linux/init.h> 18 #include <linux/poll.h> 19 #include <linux/device.h> 20 #include <linux/mm.h> 21 #include <linux/idr.h> 22 #include <linux/string.h> 23 #include <linux/kobject.h> 24 #include <linux/uio_driver.h> 25 26 #define UIO_MAX_DEVICES 255 27 28 struct uio_device { 29 struct module *owner; 30 struct device *dev; 31 int minor; 32 atomic_t event; 33 struct fasync_struct *async_queue; 34 wait_queue_head_t wait; 35 int vma_count; 36 struct uio_info *info; 37 struct kobject *map_dir; 38 }; 39 40 static int uio_major; 41 static DEFINE_IDR(uio_idr); 42 static const struct file_operations uio_fops; 43 44 /* UIO class infrastructure */ 45 static struct uio_class { 46 struct kref kref; 47 struct class *class; 48 } *uio_class; 49 50 /* 51 * attributes 52 */ 53 54 struct uio_map { 55 struct kobject kobj; 56 struct uio_mem *mem; 57 }; 58 #define to_map(map) container_of(map, struct uio_map, kobj) 59 60 static ssize_t map_addr_show(struct uio_mem *mem, char *buf) 61 { 62 return sprintf(buf, "0x%lx\n", mem->addr); 63 } 64 65 static ssize_t map_size_show(struct uio_mem *mem, char *buf) 66 { 67 return sprintf(buf, "0x%lx\n", mem->size); 68 } 69 70 struct uio_sysfs_entry { 71 struct attribute attr; 72 ssize_t (*show)(struct uio_mem *, char *); 73 ssize_t (*store)(struct uio_mem *, const char *, size_t); 74 }; 75 76 static struct uio_sysfs_entry addr_attribute = 77 __ATTR(addr, S_IRUGO, map_addr_show, NULL); 78 static struct uio_sysfs_entry size_attribute = 79 __ATTR(size, S_IRUGO, map_size_show, NULL); 80 81 static struct attribute *attrs[] = { 82 &addr_attribute.attr, 83 &size_attribute.attr, 84 NULL, /* need to NULL terminate the list of attributes */ 85 }; 86 87 static void map_release(struct kobject *kobj) 88 { 89 struct uio_map *map = to_map(kobj); 90 kfree(map); 91 } 92 93 static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr, 94 char *buf) 95 { 96 struct uio_map *map = to_map(kobj); 97 struct uio_mem *mem = map->mem; 98 struct uio_sysfs_entry *entry; 99 100 entry = container_of(attr, struct uio_sysfs_entry, attr); 101 102 if (!entry->show) 103 return -EIO; 104 105 return entry->show(mem, buf); 106 } 107 108 static struct sysfs_ops uio_sysfs_ops = { 109 .show = map_type_show, 110 }; 111 112 static struct kobj_type map_attr_type = { 113 .release = map_release, 114 .sysfs_ops = &uio_sysfs_ops, 115 .default_attrs = attrs, 116 }; 117 118 static ssize_t show_name(struct device *dev, 119 struct device_attribute *attr, char *buf) 120 { 121 struct uio_device *idev = dev_get_drvdata(dev); 122 if (idev) 123 return sprintf(buf, "%s\n", idev->info->name); 124 else 125 return -ENODEV; 126 } 127 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); 128 129 static ssize_t show_version(struct device *dev, 130 struct device_attribute *attr, char *buf) 131 { 132 struct uio_device *idev = dev_get_drvdata(dev); 133 if (idev) 134 return sprintf(buf, "%s\n", idev->info->version); 135 else 136 return -ENODEV; 137 } 138 static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); 139 140 static ssize_t show_event(struct device *dev, 141 struct device_attribute *attr, char *buf) 142 { 143 struct uio_device *idev = dev_get_drvdata(dev); 144 if (idev) 145 return sprintf(buf, "%u\n", 146 (unsigned int)atomic_read(&idev->event)); 147 else 148 return -ENODEV; 149 } 150 static DEVICE_ATTR(event, S_IRUGO, show_event, NULL); 151 152 static struct attribute *uio_attrs[] = { 153 &dev_attr_name.attr, 154 &dev_attr_version.attr, 155 &dev_attr_event.attr, 156 NULL, 157 }; 158 159 static struct attribute_group uio_attr_grp = { 160 .attrs = uio_attrs, 161 }; 162 163 /* 164 * device functions 165 */ 166 static int uio_dev_add_attributes(struct uio_device *idev) 167 { 168 int ret; 169 int mi; 170 int map_found = 0; 171 struct uio_mem *mem; 172 struct uio_map *map; 173 174 ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp); 175 if (ret) 176 goto err_group; 177 178 for (mi = 0; mi < MAX_UIO_MAPS; mi++) { 179 mem = &idev->info->mem[mi]; 180 if (mem->size == 0) 181 break; 182 if (!map_found) { 183 map_found = 1; 184 idev->map_dir = kobject_create_and_add("maps", 185 &idev->dev->kobj); 186 if (!idev->map_dir) 187 goto err; 188 } 189 map = kzalloc(sizeof(*map), GFP_KERNEL); 190 if (!map) 191 goto err; 192 kobject_init(&map->kobj, &map_attr_type); 193 map->mem = mem; 194 mem->map = map; 195 ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); 196 if (ret) 197 goto err; 198 ret = kobject_uevent(&map->kobj, KOBJ_ADD); 199 if (ret) 200 goto err; 201 } 202 203 return 0; 204 205 err: 206 for (mi--; mi>=0; mi--) { 207 mem = &idev->info->mem[mi]; 208 map = mem->map; 209 kobject_put(&map->kobj); 210 } 211 kobject_put(idev->map_dir); 212 sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); 213 err_group: 214 dev_err(idev->dev, "error creating sysfs files (%d)\n", ret); 215 return ret; 216 } 217 218 static void uio_dev_del_attributes(struct uio_device *idev) 219 { 220 int mi; 221 struct uio_mem *mem; 222 for (mi = 0; mi < MAX_UIO_MAPS; mi++) { 223 mem = &idev->info->mem[mi]; 224 if (mem->size == 0) 225 break; 226 kobject_put(&mem->map->kobj); 227 } 228 kobject_put(idev->map_dir); 229 sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp); 230 } 231 232 static int uio_get_minor(struct uio_device *idev) 233 { 234 static DEFINE_MUTEX(minor_lock); 235 int retval = -ENOMEM; 236 int id; 237 238 mutex_lock(&minor_lock); 239 if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0) 240 goto exit; 241 242 retval = idr_get_new(&uio_idr, idev, &id); 243 if (retval < 0) { 244 if (retval == -EAGAIN) 245 retval = -ENOMEM; 246 goto exit; 247 } 248 idev->minor = id & MAX_ID_MASK; 249 exit: 250 mutex_unlock(&minor_lock); 251 return retval; 252 } 253 254 static void uio_free_minor(struct uio_device *idev) 255 { 256 idr_remove(&uio_idr, idev->minor); 257 } 258 259 /** 260 * uio_event_notify - trigger an interrupt event 261 * @info: UIO device capabilities 262 */ 263 void uio_event_notify(struct uio_info *info) 264 { 265 struct uio_device *idev = info->uio_dev; 266 267 atomic_inc(&idev->event); 268 wake_up_interruptible(&idev->wait); 269 kill_fasync(&idev->async_queue, SIGIO, POLL_IN); 270 } 271 EXPORT_SYMBOL_GPL(uio_event_notify); 272 273 /** 274 * uio_interrupt - hardware interrupt handler 275 * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer 276 * @dev_id: Pointer to the devices uio_device structure 277 */ 278 static irqreturn_t uio_interrupt(int irq, void *dev_id) 279 { 280 struct uio_device *idev = (struct uio_device *)dev_id; 281 irqreturn_t ret = idev->info->handler(irq, idev->info); 282 283 if (ret == IRQ_HANDLED) 284 uio_event_notify(idev->info); 285 286 return ret; 287 } 288 289 struct uio_listener { 290 struct uio_device *dev; 291 s32 event_count; 292 }; 293 294 static int uio_open(struct inode *inode, struct file *filep) 295 { 296 struct uio_device *idev; 297 struct uio_listener *listener; 298 int ret = 0; 299 300 idev = idr_find(&uio_idr, iminor(inode)); 301 if (!idev) 302 return -ENODEV; 303 304 listener = kmalloc(sizeof(*listener), GFP_KERNEL); 305 if (!listener) 306 return -ENOMEM; 307 308 listener->dev = idev; 309 listener->event_count = atomic_read(&idev->event); 310 filep->private_data = listener; 311 312 if (idev->info->open) { 313 if (!try_module_get(idev->owner)) 314 return -ENODEV; 315 ret = idev->info->open(idev->info, inode); 316 module_put(idev->owner); 317 } 318 319 if (ret) 320 kfree(listener); 321 322 return ret; 323 } 324 325 static int uio_fasync(int fd, struct file *filep, int on) 326 { 327 struct uio_listener *listener = filep->private_data; 328 struct uio_device *idev = listener->dev; 329 330 return fasync_helper(fd, filep, on, &idev->async_queue); 331 } 332 333 static int uio_release(struct inode *inode, struct file *filep) 334 { 335 int ret = 0; 336 struct uio_listener *listener = filep->private_data; 337 struct uio_device *idev = listener->dev; 338 339 if (idev->info->release) { 340 if (!try_module_get(idev->owner)) 341 return -ENODEV; 342 ret = idev->info->release(idev->info, inode); 343 module_put(idev->owner); 344 } 345 if (filep->f_flags & FASYNC) 346 ret = uio_fasync(-1, filep, 0); 347 kfree(listener); 348 return ret; 349 } 350 351 static unsigned int uio_poll(struct file *filep, poll_table *wait) 352 { 353 struct uio_listener *listener = filep->private_data; 354 struct uio_device *idev = listener->dev; 355 356 if (idev->info->irq == UIO_IRQ_NONE) 357 return -EIO; 358 359 poll_wait(filep, &idev->wait, wait); 360 if (listener->event_count != atomic_read(&idev->event)) 361 return POLLIN | POLLRDNORM; 362 return 0; 363 } 364 365 static ssize_t uio_read(struct file *filep, char __user *buf, 366 size_t count, loff_t *ppos) 367 { 368 struct uio_listener *listener = filep->private_data; 369 struct uio_device *idev = listener->dev; 370 DECLARE_WAITQUEUE(wait, current); 371 ssize_t retval; 372 s32 event_count; 373 374 if (idev->info->irq == UIO_IRQ_NONE) 375 return -EIO; 376 377 if (count != sizeof(s32)) 378 return -EINVAL; 379 380 add_wait_queue(&idev->wait, &wait); 381 382 do { 383 set_current_state(TASK_INTERRUPTIBLE); 384 385 event_count = atomic_read(&idev->event); 386 if (event_count != listener->event_count) { 387 if (copy_to_user(buf, &event_count, count)) 388 retval = -EFAULT; 389 else { 390 listener->event_count = event_count; 391 retval = count; 392 } 393 break; 394 } 395 396 if (filep->f_flags & O_NONBLOCK) { 397 retval = -EAGAIN; 398 break; 399 } 400 401 if (signal_pending(current)) { 402 retval = -ERESTARTSYS; 403 break; 404 } 405 schedule(); 406 } while (1); 407 408 __set_current_state(TASK_RUNNING); 409 remove_wait_queue(&idev->wait, &wait); 410 411 return retval; 412 } 413 414 static int uio_find_mem_index(struct vm_area_struct *vma) 415 { 416 int mi; 417 struct uio_device *idev = vma->vm_private_data; 418 419 for (mi = 0; mi < MAX_UIO_MAPS; mi++) { 420 if (idev->info->mem[mi].size == 0) 421 return -1; 422 if (vma->vm_pgoff == mi) 423 return mi; 424 } 425 return -1; 426 } 427 428 static void uio_vma_open(struct vm_area_struct *vma) 429 { 430 struct uio_device *idev = vma->vm_private_data; 431 idev->vma_count++; 432 } 433 434 static void uio_vma_close(struct vm_area_struct *vma) 435 { 436 struct uio_device *idev = vma->vm_private_data; 437 idev->vma_count--; 438 } 439 440 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 441 { 442 struct uio_device *idev = vma->vm_private_data; 443 struct page *page; 444 445 int mi = uio_find_mem_index(vma); 446 if (mi < 0) 447 return VM_FAULT_SIGBUS; 448 449 if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) 450 page = virt_to_page(idev->info->mem[mi].addr); 451 else 452 page = vmalloc_to_page((void*)idev->info->mem[mi].addr); 453 get_page(page); 454 vmf->page = page; 455 return 0; 456 } 457 458 static struct vm_operations_struct uio_vm_ops = { 459 .open = uio_vma_open, 460 .close = uio_vma_close, 461 .fault = uio_vma_fault, 462 }; 463 464 static int uio_mmap_physical(struct vm_area_struct *vma) 465 { 466 struct uio_device *idev = vma->vm_private_data; 467 int mi = uio_find_mem_index(vma); 468 if (mi < 0) 469 return -EINVAL; 470 471 vma->vm_flags |= VM_IO | VM_RESERVED; 472 473 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 474 475 return remap_pfn_range(vma, 476 vma->vm_start, 477 idev->info->mem[mi].addr >> PAGE_SHIFT, 478 vma->vm_end - vma->vm_start, 479 vma->vm_page_prot); 480 } 481 482 static int uio_mmap_logical(struct vm_area_struct *vma) 483 { 484 vma->vm_flags |= VM_RESERVED; 485 vma->vm_ops = &uio_vm_ops; 486 uio_vma_open(vma); 487 return 0; 488 } 489 490 static int uio_mmap(struct file *filep, struct vm_area_struct *vma) 491 { 492 struct uio_listener *listener = filep->private_data; 493 struct uio_device *idev = listener->dev; 494 int mi; 495 unsigned long requested_pages, actual_pages; 496 int ret = 0; 497 498 if (vma->vm_end < vma->vm_start) 499 return -EINVAL; 500 501 vma->vm_private_data = idev; 502 503 mi = uio_find_mem_index(vma); 504 if (mi < 0) 505 return -EINVAL; 506 507 requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 508 actual_pages = (idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; 509 if (requested_pages > actual_pages) 510 return -EINVAL; 511 512 if (idev->info->mmap) { 513 if (!try_module_get(idev->owner)) 514 return -ENODEV; 515 ret = idev->info->mmap(idev->info, vma); 516 module_put(idev->owner); 517 return ret; 518 } 519 520 switch (idev->info->mem[mi].memtype) { 521 case UIO_MEM_PHYS: 522 return uio_mmap_physical(vma); 523 case UIO_MEM_LOGICAL: 524 case UIO_MEM_VIRTUAL: 525 return uio_mmap_logical(vma); 526 default: 527 return -EINVAL; 528 } 529 } 530 531 static const struct file_operations uio_fops = { 532 .owner = THIS_MODULE, 533 .open = uio_open, 534 .release = uio_release, 535 .read = uio_read, 536 .mmap = uio_mmap, 537 .poll = uio_poll, 538 .fasync = uio_fasync, 539 }; 540 541 static int uio_major_init(void) 542 { 543 uio_major = register_chrdev(0, "uio", &uio_fops); 544 if (uio_major < 0) 545 return uio_major; 546 return 0; 547 } 548 549 static void uio_major_cleanup(void) 550 { 551 unregister_chrdev(uio_major, "uio"); 552 } 553 554 static int init_uio_class(void) 555 { 556 int ret = 0; 557 558 if (uio_class != NULL) { 559 kref_get(&uio_class->kref); 560 goto exit; 561 } 562 563 /* This is the first time in here, set everything up properly */ 564 ret = uio_major_init(); 565 if (ret) 566 goto exit; 567 568 uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL); 569 if (!uio_class) { 570 ret = -ENOMEM; 571 goto err_kzalloc; 572 } 573 574 kref_init(&uio_class->kref); 575 uio_class->class = class_create(THIS_MODULE, "uio"); 576 if (IS_ERR(uio_class->class)) { 577 ret = IS_ERR(uio_class->class); 578 printk(KERN_ERR "class_create failed for uio\n"); 579 goto err_class_create; 580 } 581 return 0; 582 583 err_class_create: 584 kfree(uio_class); 585 uio_class = NULL; 586 err_kzalloc: 587 uio_major_cleanup(); 588 exit: 589 return ret; 590 } 591 592 static void release_uio_class(struct kref *kref) 593 { 594 /* Ok, we cheat as we know we only have one uio_class */ 595 class_destroy(uio_class->class); 596 kfree(uio_class); 597 uio_major_cleanup(); 598 uio_class = NULL; 599 } 600 601 static void uio_class_destroy(void) 602 { 603 if (uio_class) 604 kref_put(&uio_class->kref, release_uio_class); 605 } 606 607 /** 608 * uio_register_device - register a new userspace IO device 609 * @owner: module that creates the new device 610 * @parent: parent device 611 * @info: UIO device capabilities 612 * 613 * returns zero on success or a negative error code. 614 */ 615 int __uio_register_device(struct module *owner, 616 struct device *parent, 617 struct uio_info *info) 618 { 619 struct uio_device *idev; 620 int ret = 0; 621 622 if (!parent || !info || !info->name || !info->version) 623 return -EINVAL; 624 625 info->uio_dev = NULL; 626 627 ret = init_uio_class(); 628 if (ret) 629 return ret; 630 631 idev = kzalloc(sizeof(*idev), GFP_KERNEL); 632 if (!idev) { 633 ret = -ENOMEM; 634 goto err_kzalloc; 635 } 636 637 idev->owner = owner; 638 idev->info = info; 639 init_waitqueue_head(&idev->wait); 640 atomic_set(&idev->event, 0); 641 642 ret = uio_get_minor(idev); 643 if (ret) 644 goto err_get_minor; 645 646 idev->dev = device_create(uio_class->class, parent, 647 MKDEV(uio_major, idev->minor), 648 "uio%d", idev->minor); 649 if (IS_ERR(idev->dev)) { 650 printk(KERN_ERR "UIO: device register failed\n"); 651 ret = PTR_ERR(idev->dev); 652 goto err_device_create; 653 } 654 dev_set_drvdata(idev->dev, idev); 655 656 ret = uio_dev_add_attributes(idev); 657 if (ret) 658 goto err_uio_dev_add_attributes; 659 660 info->uio_dev = idev; 661 662 if (idev->info->irq >= 0) { 663 ret = request_irq(idev->info->irq, uio_interrupt, 664 idev->info->irq_flags, idev->info->name, idev); 665 if (ret) 666 goto err_request_irq; 667 } 668 669 return 0; 670 671 err_request_irq: 672 uio_dev_del_attributes(idev); 673 err_uio_dev_add_attributes: 674 device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); 675 err_device_create: 676 uio_free_minor(idev); 677 err_get_minor: 678 kfree(idev); 679 err_kzalloc: 680 uio_class_destroy(); 681 return ret; 682 } 683 EXPORT_SYMBOL_GPL(__uio_register_device); 684 685 /** 686 * uio_unregister_device - unregister a industrial IO device 687 * @info: UIO device capabilities 688 * 689 */ 690 void uio_unregister_device(struct uio_info *info) 691 { 692 struct uio_device *idev; 693 694 if (!info || !info->uio_dev) 695 return; 696 697 idev = info->uio_dev; 698 699 uio_free_minor(idev); 700 701 if (info->irq >= 0) 702 free_irq(info->irq, idev); 703 704 uio_dev_del_attributes(idev); 705 706 dev_set_drvdata(idev->dev, NULL); 707 device_destroy(uio_class->class, MKDEV(uio_major, idev->minor)); 708 kfree(idev); 709 uio_class_destroy(); 710 711 return; 712 } 713 EXPORT_SYMBOL_GPL(uio_unregister_device); 714 715 static int __init uio_init(void) 716 { 717 return 0; 718 } 719 720 static void __exit uio_exit(void) 721 { 722 } 723 724 module_init(uio_init) 725 module_exit(uio_exit) 726 MODULE_LICENSE("GPL v2"); 727