1 /* 2 * Linux driver for System z and s390 unit record devices 3 * (z/VM virtual punch, reader, printer) 4 * 5 * Copyright IBM Corp. 2001, 2009 6 * Authors: Malcolm Beattie <beattiem@uk.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com> 8 * Frank Munzert <munzert@de.ibm.com> 9 */ 10 11 #define KMSG_COMPONENT "vmur" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/cdev.h> 15 #include <linux/smp_lock.h> 16 17 #include <asm/uaccess.h> 18 #include <asm/cio.h> 19 #include <asm/ccwdev.h> 20 #include <asm/debug.h> 21 #include <asm/diag.h> 22 23 #include "vmur.h" 24 25 /* 26 * Driver overview 27 * 28 * Unit record device support is implemented as a character device driver. 29 * We can fit at least 16 bits into a device minor number and use the 30 * simple method of mapping a character device number with minor abcd 31 * to the unit record device with devno abcd. 32 * I/O to virtual unit record devices is handled as follows: 33 * Reads: Diagnose code 0x14 (input spool file manipulation) 34 * is used to read spool data page-wise. 35 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length 36 * is available by reading sysfs attr reclen. Each write() to the device 37 * must specify an integral multiple (maximal 511) of reclen. 38 */ 39 40 static char ur_banner[] = "z/VM virtual unit record device driver"; 41 42 MODULE_AUTHOR("IBM Corporation"); 43 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); 44 MODULE_LICENSE("GPL"); 45 46 static dev_t ur_first_dev_maj_min; 47 static struct class *vmur_class; 48 static struct debug_info *vmur_dbf; 49 50 /* We put the device's record length (for writes) in the driver_info field */ 51 static struct ccw_device_id ur_ids[] = { 52 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) }, 53 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) }, 54 { /* end of list */ } 55 }; 56 57 MODULE_DEVICE_TABLE(ccw, ur_ids); 58 59 static int ur_probe(struct ccw_device *cdev); 60 static void ur_remove(struct ccw_device *cdev); 61 static int ur_set_online(struct ccw_device *cdev); 62 static int ur_set_offline(struct ccw_device *cdev); 63 static int ur_pm_suspend(struct ccw_device *cdev); 64 65 static struct ccw_driver ur_driver = { 66 .name = "vmur", 67 .owner = THIS_MODULE, 68 .ids = ur_ids, 69 .probe = ur_probe, 70 .remove = ur_remove, 71 .set_online = ur_set_online, 72 .set_offline = ur_set_offline, 73 .freeze = ur_pm_suspend, 74 }; 75 76 static DEFINE_MUTEX(vmur_mutex); 77 78 /* 79 * Allocation, freeing, getting and putting of urdev structures 80 * 81 * Each ur device (urd) contains a reference to its corresponding ccw device 82 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the 83 * ur device using dev_get_drvdata(&cdev->dev) pointer. 84 * 85 * urd references: 86 * - ur_probe gets a urd reference, ur_remove drops the reference 87 * dev_get_drvdata(&cdev->dev) 88 * - ur_open gets a urd reference, ur_relase drops the reference 89 * (urf->urd) 90 * 91 * cdev references: 92 * - urdev_alloc get a cdev reference (urd->cdev) 93 * - urdev_free drops the cdev reference (urd->cdev) 94 * 95 * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock 96 */ 97 static struct urdev *urdev_alloc(struct ccw_device *cdev) 98 { 99 struct urdev *urd; 100 101 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); 102 if (!urd) 103 return NULL; 104 urd->reclen = cdev->id.driver_info; 105 ccw_device_get_id(cdev, &urd->dev_id); 106 mutex_init(&urd->io_mutex); 107 init_waitqueue_head(&urd->wait); 108 spin_lock_init(&urd->open_lock); 109 atomic_set(&urd->ref_count, 1); 110 urd->cdev = cdev; 111 get_device(&cdev->dev); 112 return urd; 113 } 114 115 static void urdev_free(struct urdev *urd) 116 { 117 TRACE("urdev_free: %p\n", urd); 118 if (urd->cdev) 119 put_device(&urd->cdev->dev); 120 kfree(urd); 121 } 122 123 static void urdev_get(struct urdev *urd) 124 { 125 atomic_inc(&urd->ref_count); 126 } 127 128 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev) 129 { 130 struct urdev *urd; 131 unsigned long flags; 132 133 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 134 urd = dev_get_drvdata(&cdev->dev); 135 if (urd) 136 urdev_get(urd); 137 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 138 return urd; 139 } 140 141 static struct urdev *urdev_get_from_devno(u16 devno) 142 { 143 char bus_id[16]; 144 struct ccw_device *cdev; 145 struct urdev *urd; 146 147 sprintf(bus_id, "0.0.%04x", devno); 148 cdev = get_ccwdev_by_busid(&ur_driver, bus_id); 149 if (!cdev) 150 return NULL; 151 urd = urdev_get_from_cdev(cdev); 152 put_device(&cdev->dev); 153 return urd; 154 } 155 156 static void urdev_put(struct urdev *urd) 157 { 158 if (atomic_dec_and_test(&urd->ref_count)) 159 urdev_free(urd); 160 } 161 162 /* 163 * State and contents of ur devices can be changed by class D users issuing 164 * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended. 165 * Also the Linux guest might be logged off, which causes all active spool 166 * files to be closed. 167 * So we cannot guarantee that spool files are still the same when the Linux 168 * guest is resumed. In order to avoid unpredictable results at resume time 169 * we simply refuse to suspend if a ur device node is open. 170 */ 171 static int ur_pm_suspend(struct ccw_device *cdev) 172 { 173 struct urdev *urd = dev_get_drvdata(&cdev->dev); 174 175 TRACE("ur_pm_suspend: cdev=%p\n", cdev); 176 if (urd->open_flag) { 177 pr_err("Unit record device %s is busy, %s refusing to " 178 "suspend.\n", dev_name(&cdev->dev), ur_banner); 179 return -EBUSY; 180 } 181 return 0; 182 } 183 184 /* 185 * Low-level functions to do I/O to a ur device. 186 * alloc_chan_prog 187 * free_chan_prog 188 * do_ur_io 189 * ur_int_handler 190 * 191 * alloc_chan_prog allocates and builds the channel program 192 * free_chan_prog frees memory of the channel program 193 * 194 * do_ur_io issues the channel program to the device and blocks waiting 195 * on a completion event it publishes at urd->io_done. The function 196 * serialises itself on the device's mutex so that only one I/O 197 * is issued at a time (and that I/O is synchronous). 198 * 199 * ur_int_handler catches the "I/O done" interrupt, writes the 200 * subchannel status word into the scsw member of the urdev structure 201 * and complete()s the io_done to wake the waiting do_ur_io. 202 * 203 * The caller of do_ur_io is responsible for kfree()ing the channel program 204 * address pointer that alloc_chan_prog returned. 205 */ 206 207 static void free_chan_prog(struct ccw1 *cpa) 208 { 209 struct ccw1 *ptr = cpa; 210 211 while (ptr->cda) { 212 kfree((void *)(addr_t) ptr->cda); 213 ptr++; 214 } 215 kfree(cpa); 216 } 217 218 /* 219 * alloc_chan_prog 220 * The channel program we use is write commands chained together 221 * with a final NOP CCW command-chained on (which ensures that CE and DE 222 * are presented together in a single interrupt instead of as separate 223 * interrupts unless an incorrect length indication kicks in first). The 224 * data length in each CCW is reclen. 225 */ 226 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, 227 int reclen) 228 { 229 struct ccw1 *cpa; 230 void *kbuf; 231 int i; 232 233 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen); 234 235 /* 236 * We chain a NOP onto the writes to force CE+DE together. 237 * That means we allocate room for CCWs to cover count/reclen 238 * records plus a NOP. 239 */ 240 cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1), 241 GFP_KERNEL | GFP_DMA); 242 if (!cpa) 243 return ERR_PTR(-ENOMEM); 244 245 for (i = 0; i < rec_count; i++) { 246 cpa[i].cmd_code = WRITE_CCW_CMD; 247 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 248 cpa[i].count = reclen; 249 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA); 250 if (!kbuf) { 251 free_chan_prog(cpa); 252 return ERR_PTR(-ENOMEM); 253 } 254 cpa[i].cda = (u32)(addr_t) kbuf; 255 if (copy_from_user(kbuf, ubuf, reclen)) { 256 free_chan_prog(cpa); 257 return ERR_PTR(-EFAULT); 258 } 259 ubuf += reclen; 260 } 261 /* The following NOP CCW forces CE+DE to be presented together */ 262 cpa[i].cmd_code = CCW_CMD_NOOP; 263 return cpa; 264 } 265 266 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) 267 { 268 int rc; 269 struct ccw_device *cdev = urd->cdev; 270 DECLARE_COMPLETION_ONSTACK(event); 271 272 TRACE("do_ur_io: cpa=%p\n", cpa); 273 274 rc = mutex_lock_interruptible(&urd->io_mutex); 275 if (rc) 276 return rc; 277 278 urd->io_done = &event; 279 280 spin_lock_irq(get_ccwdev_lock(cdev)); 281 rc = ccw_device_start(cdev, cpa, 1, 0, 0); 282 spin_unlock_irq(get_ccwdev_lock(cdev)); 283 284 TRACE("do_ur_io: ccw_device_start returned %d\n", rc); 285 if (rc) 286 goto out; 287 288 wait_for_completion(&event); 289 TRACE("do_ur_io: I/O complete\n"); 290 rc = 0; 291 292 out: 293 mutex_unlock(&urd->io_mutex); 294 return rc; 295 } 296 297 /* 298 * ur interrupt handler, called from the ccw_device layer 299 */ 300 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, 301 struct irb *irb) 302 { 303 struct urdev *urd; 304 305 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 306 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 307 irb->scsw.cmd.count); 308 309 if (!intparm) { 310 TRACE("ur_int_handler: unsolicited interrupt\n"); 311 return; 312 } 313 urd = dev_get_drvdata(&cdev->dev); 314 BUG_ON(!urd); 315 /* On special conditions irb is an error pointer */ 316 if (IS_ERR(irb)) 317 urd->io_request_rc = PTR_ERR(irb); 318 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 319 urd->io_request_rc = 0; 320 else 321 urd->io_request_rc = -EIO; 322 323 complete(urd->io_done); 324 } 325 326 /* 327 * reclen sysfs attribute - The record length to be used for write CCWs 328 */ 329 static ssize_t ur_attr_reclen_show(struct device *dev, 330 struct device_attribute *attr, char *buf) 331 { 332 struct urdev *urd; 333 int rc; 334 335 urd = urdev_get_from_cdev(to_ccwdev(dev)); 336 if (!urd) 337 return -ENODEV; 338 rc = sprintf(buf, "%zu\n", urd->reclen); 339 urdev_put(urd); 340 return rc; 341 } 342 343 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); 344 345 static int ur_create_attributes(struct device *dev) 346 { 347 return device_create_file(dev, &dev_attr_reclen); 348 } 349 350 static void ur_remove_attributes(struct device *dev) 351 { 352 device_remove_file(dev, &dev_attr_reclen); 353 } 354 355 /* 356 * diagnose code 0x210 - retrieve device information 357 * cc=0 normal completion, we have a real device 358 * cc=1 CP paging error 359 * cc=2 The virtual device exists, but is not associated with a real device 360 * cc=3 Invalid device address, or the virtual device does not exist 361 */ 362 static int get_urd_class(struct urdev *urd) 363 { 364 static struct diag210 ur_diag210; 365 int cc; 366 367 ur_diag210.vrdcdvno = urd->dev_id.devno; 368 ur_diag210.vrdclen = sizeof(struct diag210); 369 370 cc = diag210(&ur_diag210); 371 switch (cc) { 372 case 0: 373 return -EOPNOTSUPP; 374 case 2: 375 return ur_diag210.vrdcvcla; /* virtual device class */ 376 case 3: 377 return -ENODEV; 378 default: 379 return -EIO; 380 } 381 } 382 383 /* 384 * Allocation and freeing of urfile structures 385 */ 386 static struct urfile *urfile_alloc(struct urdev *urd) 387 { 388 struct urfile *urf; 389 390 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL); 391 if (!urf) 392 return NULL; 393 urf->urd = urd; 394 395 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, 396 urf->dev_reclen); 397 398 return urf; 399 } 400 401 static void urfile_free(struct urfile *urf) 402 { 403 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd); 404 kfree(urf); 405 } 406 407 /* 408 * The fops implementation of the character device driver 409 */ 410 static ssize_t do_write(struct urdev *urd, const char __user *udata, 411 size_t count, size_t reclen, loff_t *ppos) 412 { 413 struct ccw1 *cpa; 414 int rc; 415 416 cpa = alloc_chan_prog(udata, count / reclen, reclen); 417 if (IS_ERR(cpa)) 418 return PTR_ERR(cpa); 419 420 rc = do_ur_io(urd, cpa); 421 if (rc) 422 goto fail_kfree_cpa; 423 424 if (urd->io_request_rc) { 425 rc = urd->io_request_rc; 426 goto fail_kfree_cpa; 427 } 428 *ppos += count; 429 rc = count; 430 431 fail_kfree_cpa: 432 free_chan_prog(cpa); 433 return rc; 434 } 435 436 static ssize_t ur_write(struct file *file, const char __user *udata, 437 size_t count, loff_t *ppos) 438 { 439 struct urfile *urf = file->private_data; 440 441 TRACE("ur_write: count=%zu\n", count); 442 443 if (count == 0) 444 return 0; 445 446 if (count % urf->dev_reclen) 447 return -EINVAL; /* count must be a multiple of reclen */ 448 449 if (count > urf->dev_reclen * MAX_RECS_PER_IO) 450 count = urf->dev_reclen * MAX_RECS_PER_IO; 451 452 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); 453 } 454 455 /* 456 * diagnose code 0x14 subcode 0x0028 - position spool file to designated 457 * record 458 * cc=0 normal completion 459 * cc=2 no file active on the virtual reader or device not ready 460 * cc=3 record specified is beyond EOF 461 */ 462 static int diag_position_to_record(int devno, int record) 463 { 464 int cc; 465 466 cc = diag14(record, devno, 0x28); 467 switch (cc) { 468 case 0: 469 return 0; 470 case 2: 471 return -ENOMEDIUM; 472 case 3: 473 return -ENODATA; /* position beyond end of file */ 474 default: 475 return -EIO; 476 } 477 } 478 479 /* 480 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer 481 * cc=0 normal completion 482 * cc=1 EOF reached 483 * cc=2 no file active on the virtual reader, and no file eligible 484 * cc=3 file already active on the virtual reader or specified virtual 485 * reader does not exist or is not a reader 486 */ 487 static int diag_read_file(int devno, char *buf) 488 { 489 int cc; 490 491 cc = diag14((unsigned long) buf, devno, 0x00); 492 switch (cc) { 493 case 0: 494 return 0; 495 case 1: 496 return -ENODATA; 497 case 2: 498 return -ENOMEDIUM; 499 default: 500 return -EIO; 501 } 502 } 503 504 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, 505 loff_t *offs) 506 { 507 size_t len, copied, res; 508 char *buf; 509 int rc; 510 u16 reclen; 511 struct urdev *urd; 512 513 urd = ((struct urfile *) file->private_data)->urd; 514 reclen = ((struct urfile *) file->private_data)->file_reclen; 515 516 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1); 517 if (rc == -ENODATA) 518 return 0; 519 if (rc) 520 return rc; 521 522 len = min((size_t) PAGE_SIZE, count); 523 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 524 if (!buf) 525 return -ENOMEM; 526 527 copied = 0; 528 res = (size_t) (*offs % PAGE_SIZE); 529 do { 530 rc = diag_read_file(urd->dev_id.devno, buf); 531 if (rc == -ENODATA) { 532 break; 533 } 534 if (rc) 535 goto fail; 536 if (reclen && (copied == 0) && (*offs < PAGE_SIZE)) 537 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen; 538 len = min(count - copied, PAGE_SIZE - res); 539 if (copy_to_user(ubuf + copied, buf + res, len)) { 540 rc = -EFAULT; 541 goto fail; 542 } 543 res = 0; 544 copied += len; 545 } while (copied != count); 546 547 *offs += copied; 548 rc = copied; 549 fail: 550 free_page((unsigned long) buf); 551 return rc; 552 } 553 554 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count, 555 loff_t *offs) 556 { 557 struct urdev *urd; 558 int rc; 559 560 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs); 561 562 if (count == 0) 563 return 0; 564 565 urd = ((struct urfile *) file->private_data)->urd; 566 rc = mutex_lock_interruptible(&urd->io_mutex); 567 if (rc) 568 return rc; 569 rc = diag14_read(file, ubuf, count, offs); 570 mutex_unlock(&urd->io_mutex); 571 return rc; 572 } 573 574 /* 575 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor 576 * cc=0 normal completion 577 * cc=1 no files on reader queue or no subsequent file 578 * cc=2 spid specified is invalid 579 */ 580 static int diag_read_next_file_info(struct file_control_block *buf, int spid) 581 { 582 int cc; 583 584 cc = diag14((unsigned long) buf, spid, 0xfff); 585 switch (cc) { 586 case 0: 587 return 0; 588 default: 589 return -ENODATA; 590 } 591 } 592 593 static int verify_uri_device(struct urdev *urd) 594 { 595 struct file_control_block *fcb; 596 char *buf; 597 int rc; 598 599 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 600 if (!fcb) 601 return -ENOMEM; 602 603 /* check for empty reader device (beginning of chain) */ 604 rc = diag_read_next_file_info(fcb, 0); 605 if (rc) 606 goto fail_free_fcb; 607 608 /* if file is in hold status, we do not read it */ 609 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) { 610 rc = -EPERM; 611 goto fail_free_fcb; 612 } 613 614 /* open file on virtual reader */ 615 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 616 if (!buf) { 617 rc = -ENOMEM; 618 goto fail_free_fcb; 619 } 620 rc = diag_read_file(urd->dev_id.devno, buf); 621 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 622 goto fail_free_buf; 623 624 /* check if the file on top of the queue is open now */ 625 rc = diag_read_next_file_info(fcb, 0); 626 if (rc) 627 goto fail_free_buf; 628 if (!(fcb->file_stat & FLG_IN_USE)) { 629 rc = -EMFILE; 630 goto fail_free_buf; 631 } 632 rc = 0; 633 634 fail_free_buf: 635 free_page((unsigned long) buf); 636 fail_free_fcb: 637 kfree(fcb); 638 return rc; 639 } 640 641 static int verify_device(struct urdev *urd) 642 { 643 switch (urd->class) { 644 case DEV_CLASS_UR_O: 645 return 0; /* no check needed here */ 646 case DEV_CLASS_UR_I: 647 return verify_uri_device(urd); 648 default: 649 return -EOPNOTSUPP; 650 } 651 } 652 653 static int get_uri_file_reclen(struct urdev *urd) 654 { 655 struct file_control_block *fcb; 656 int rc; 657 658 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 659 if (!fcb) 660 return -ENOMEM; 661 rc = diag_read_next_file_info(fcb, 0); 662 if (rc) 663 goto fail_free; 664 if (fcb->file_stat & FLG_CP_DUMP) 665 rc = 0; 666 else 667 rc = fcb->rec_len; 668 669 fail_free: 670 kfree(fcb); 671 return rc; 672 } 673 674 static int get_file_reclen(struct urdev *urd) 675 { 676 switch (urd->class) { 677 case DEV_CLASS_UR_O: 678 return 0; 679 case DEV_CLASS_UR_I: 680 return get_uri_file_reclen(urd); 681 default: 682 return -EOPNOTSUPP; 683 } 684 } 685 686 static int ur_open(struct inode *inode, struct file *file) 687 { 688 u16 devno; 689 struct urdev *urd; 690 struct urfile *urf; 691 unsigned short accmode; 692 int rc; 693 694 accmode = file->f_flags & O_ACCMODE; 695 696 if (accmode == O_RDWR) 697 return -EACCES; 698 lock_kernel(); 699 /* 700 * We treat the minor number as the devno of the ur device 701 * to find in the driver tree. 702 */ 703 devno = MINOR(file->f_dentry->d_inode->i_rdev); 704 705 urd = urdev_get_from_devno(devno); 706 if (!urd) { 707 rc = -ENXIO; 708 goto out; 709 } 710 711 spin_lock(&urd->open_lock); 712 while (urd->open_flag) { 713 spin_unlock(&urd->open_lock); 714 if (file->f_flags & O_NONBLOCK) { 715 rc = -EBUSY; 716 goto fail_put; 717 } 718 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) { 719 rc = -ERESTARTSYS; 720 goto fail_put; 721 } 722 spin_lock(&urd->open_lock); 723 } 724 urd->open_flag++; 725 spin_unlock(&urd->open_lock); 726 727 TRACE("ur_open\n"); 728 729 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) || 730 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) { 731 TRACE("ur_open: unsupported dev class (%d)\n", urd->class); 732 rc = -EACCES; 733 goto fail_unlock; 734 } 735 736 rc = verify_device(urd); 737 if (rc) 738 goto fail_unlock; 739 740 urf = urfile_alloc(urd); 741 if (!urf) { 742 rc = -ENOMEM; 743 goto fail_unlock; 744 } 745 746 urf->dev_reclen = urd->reclen; 747 rc = get_file_reclen(urd); 748 if (rc < 0) 749 goto fail_urfile_free; 750 urf->file_reclen = rc; 751 file->private_data = urf; 752 unlock_kernel(); 753 return 0; 754 755 fail_urfile_free: 756 urfile_free(urf); 757 fail_unlock: 758 spin_lock(&urd->open_lock); 759 urd->open_flag--; 760 spin_unlock(&urd->open_lock); 761 fail_put: 762 urdev_put(urd); 763 out: 764 unlock_kernel(); 765 return rc; 766 } 767 768 static int ur_release(struct inode *inode, struct file *file) 769 { 770 struct urfile *urf = file->private_data; 771 772 TRACE("ur_release\n"); 773 spin_lock(&urf->urd->open_lock); 774 urf->urd->open_flag--; 775 spin_unlock(&urf->urd->open_lock); 776 wake_up_interruptible(&urf->urd->wait); 777 urdev_put(urf->urd); 778 urfile_free(urf); 779 return 0; 780 } 781 782 static loff_t ur_llseek(struct file *file, loff_t offset, int whence) 783 { 784 loff_t newpos; 785 786 if ((file->f_flags & O_ACCMODE) != O_RDONLY) 787 return -ESPIPE; /* seek allowed only for reader */ 788 if (offset % PAGE_SIZE) 789 return -ESPIPE; /* only multiples of 4K allowed */ 790 switch (whence) { 791 case 0: /* SEEK_SET */ 792 newpos = offset; 793 break; 794 case 1: /* SEEK_CUR */ 795 newpos = file->f_pos + offset; 796 break; 797 default: 798 return -EINVAL; 799 } 800 file->f_pos = newpos; 801 return newpos; 802 } 803 804 static const struct file_operations ur_fops = { 805 .owner = THIS_MODULE, 806 .open = ur_open, 807 .release = ur_release, 808 .read = ur_read, 809 .write = ur_write, 810 .llseek = ur_llseek, 811 }; 812 813 /* 814 * ccw_device infrastructure: 815 * ur_probe creates the struct urdev (with refcount = 1), the device 816 * attributes, sets up the interrupt handler and validates the virtual 817 * unit record device. 818 * ur_remove removes the device attributes and drops the reference to 819 * struct urdev. 820 * 821 * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized 822 * by the vmur_mutex lock. 823 * 824 * urd->char_device is used as indication that the online function has 825 * been completed successfully. 826 */ 827 static int ur_probe(struct ccw_device *cdev) 828 { 829 struct urdev *urd; 830 int rc; 831 832 TRACE("ur_probe: cdev=%p\n", cdev); 833 834 mutex_lock(&vmur_mutex); 835 urd = urdev_alloc(cdev); 836 if (!urd) { 837 rc = -ENOMEM; 838 goto fail_unlock; 839 } 840 841 rc = ur_create_attributes(&cdev->dev); 842 if (rc) { 843 rc = -ENOMEM; 844 goto fail_urdev_put; 845 } 846 cdev->handler = ur_int_handler; 847 848 /* validate virtual unit record device */ 849 urd->class = get_urd_class(urd); 850 if (urd->class < 0) { 851 rc = urd->class; 852 goto fail_remove_attr; 853 } 854 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 855 rc = -EOPNOTSUPP; 856 goto fail_remove_attr; 857 } 858 spin_lock_irq(get_ccwdev_lock(cdev)); 859 dev_set_drvdata(&cdev->dev, urd); 860 spin_unlock_irq(get_ccwdev_lock(cdev)); 861 862 mutex_unlock(&vmur_mutex); 863 return 0; 864 865 fail_remove_attr: 866 ur_remove_attributes(&cdev->dev); 867 fail_urdev_put: 868 urdev_put(urd); 869 fail_unlock: 870 mutex_unlock(&vmur_mutex); 871 return rc; 872 } 873 874 static int ur_set_online(struct ccw_device *cdev) 875 { 876 struct urdev *urd; 877 int minor, major, rc; 878 char node_id[16]; 879 880 TRACE("ur_set_online: cdev=%p\n", cdev); 881 882 mutex_lock(&vmur_mutex); 883 urd = urdev_get_from_cdev(cdev); 884 if (!urd) { 885 /* ur_remove already deleted our urd */ 886 rc = -ENODEV; 887 goto fail_unlock; 888 } 889 890 if (urd->char_device) { 891 /* Another ur_set_online was faster */ 892 rc = -EBUSY; 893 goto fail_urdev_put; 894 } 895 896 minor = urd->dev_id.devno; 897 major = MAJOR(ur_first_dev_maj_min); 898 899 urd->char_device = cdev_alloc(); 900 if (!urd->char_device) { 901 rc = -ENOMEM; 902 goto fail_urdev_put; 903 } 904 905 cdev_init(urd->char_device, &ur_fops); 906 urd->char_device->dev = MKDEV(major, minor); 907 urd->char_device->owner = ur_fops.owner; 908 909 rc = cdev_add(urd->char_device, urd->char_device->dev, 1); 910 if (rc) 911 goto fail_free_cdev; 912 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { 913 if (urd->class == DEV_CLASS_UR_I) 914 sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev)); 915 if (urd->class == DEV_CLASS_UR_O) 916 sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev)); 917 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 918 sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev)); 919 } else { 920 rc = -EOPNOTSUPP; 921 goto fail_free_cdev; 922 } 923 924 urd->device = device_create(vmur_class, NULL, urd->char_device->dev, 925 NULL, "%s", node_id); 926 if (IS_ERR(urd->device)) { 927 rc = PTR_ERR(urd->device); 928 TRACE("ur_set_online: device_create rc=%d\n", rc); 929 goto fail_free_cdev; 930 } 931 urdev_put(urd); 932 mutex_unlock(&vmur_mutex); 933 return 0; 934 935 fail_free_cdev: 936 cdev_del(urd->char_device); 937 urd->char_device = NULL; 938 fail_urdev_put: 939 urdev_put(urd); 940 fail_unlock: 941 mutex_unlock(&vmur_mutex); 942 return rc; 943 } 944 945 static int ur_set_offline_force(struct ccw_device *cdev, int force) 946 { 947 struct urdev *urd; 948 int rc; 949 950 TRACE("ur_set_offline: cdev=%p\n", cdev); 951 urd = urdev_get_from_cdev(cdev); 952 if (!urd) 953 /* ur_remove already deleted our urd */ 954 return -ENODEV; 955 if (!urd->char_device) { 956 /* Another ur_set_offline was faster */ 957 rc = -EBUSY; 958 goto fail_urdev_put; 959 } 960 if (!force && (atomic_read(&urd->ref_count) > 2)) { 961 /* There is still a user of urd (e.g. ur_open) */ 962 TRACE("ur_set_offline: BUSY\n"); 963 rc = -EBUSY; 964 goto fail_urdev_put; 965 } 966 device_destroy(vmur_class, urd->char_device->dev); 967 cdev_del(urd->char_device); 968 urd->char_device = NULL; 969 rc = 0; 970 971 fail_urdev_put: 972 urdev_put(urd); 973 return rc; 974 } 975 976 static int ur_set_offline(struct ccw_device *cdev) 977 { 978 int rc; 979 980 mutex_lock(&vmur_mutex); 981 rc = ur_set_offline_force(cdev, 0); 982 mutex_unlock(&vmur_mutex); 983 return rc; 984 } 985 986 static void ur_remove(struct ccw_device *cdev) 987 { 988 unsigned long flags; 989 990 TRACE("ur_remove\n"); 991 992 mutex_lock(&vmur_mutex); 993 994 if (cdev->online) 995 ur_set_offline_force(cdev, 1); 996 ur_remove_attributes(&cdev->dev); 997 998 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 999 urdev_put(dev_get_drvdata(&cdev->dev)); 1000 dev_set_drvdata(&cdev->dev, NULL); 1001 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1002 1003 mutex_unlock(&vmur_mutex); 1004 } 1005 1006 /* 1007 * Module initialisation and cleanup 1008 */ 1009 static int __init ur_init(void) 1010 { 1011 int rc; 1012 dev_t dev; 1013 1014 if (!MACHINE_IS_VM) { 1015 pr_err("The %s cannot be loaded without z/VM\n", 1016 ur_banner); 1017 return -ENODEV; 1018 } 1019 1020 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long)); 1021 if (!vmur_dbf) 1022 return -ENOMEM; 1023 rc = debug_register_view(vmur_dbf, &debug_sprintf_view); 1024 if (rc) 1025 goto fail_free_dbf; 1026 1027 debug_set_level(vmur_dbf, 6); 1028 1029 vmur_class = class_create(THIS_MODULE, "vmur"); 1030 if (IS_ERR(vmur_class)) { 1031 rc = PTR_ERR(vmur_class); 1032 goto fail_free_dbf; 1033 } 1034 1035 rc = ccw_driver_register(&ur_driver); 1036 if (rc) 1037 goto fail_class_destroy; 1038 1039 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 1040 if (rc) { 1041 pr_err("Kernel function alloc_chrdev_region failed with " 1042 "error code %d\n", rc); 1043 goto fail_unregister_driver; 1044 } 1045 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 1046 1047 pr_info("%s loaded.\n", ur_banner); 1048 return 0; 1049 1050 fail_unregister_driver: 1051 ccw_driver_unregister(&ur_driver); 1052 fail_class_destroy: 1053 class_destroy(vmur_class); 1054 fail_free_dbf: 1055 debug_unregister(vmur_dbf); 1056 return rc; 1057 } 1058 1059 static void __exit ur_exit(void) 1060 { 1061 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1062 ccw_driver_unregister(&ur_driver); 1063 class_destroy(vmur_class); 1064 debug_unregister(vmur_dbf); 1065 pr_info("%s unloaded.\n", ur_banner); 1066 } 1067 1068 module_init(ur_init); 1069 module_exit(ur_exit); 1070