1 /* 2 * Linux driver for System z and s390 unit record devices 3 * (z/VM virtual punch, reader, printer) 4 * 5 * Copyright IBM Corp. 2001, 2009 6 * Authors: Malcolm Beattie <beattiem@uk.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com> 8 * Frank Munzert <munzert@de.ibm.com> 9 */ 10 11 #define KMSG_COMPONENT "vmur" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/cdev.h> 15 #include <linux/slab.h> 16 #include <linux/module.h> 17 18 #include <asm/uaccess.h> 19 #include <asm/cio.h> 20 #include <asm/ccwdev.h> 21 #include <asm/debug.h> 22 #include <asm/diag.h> 23 24 #include "vmur.h" 25 26 /* 27 * Driver overview 28 * 29 * Unit record device support is implemented as a character device driver. 30 * We can fit at least 16 bits into a device minor number and use the 31 * simple method of mapping a character device number with minor abcd 32 * to the unit record device with devno abcd. 33 * I/O to virtual unit record devices is handled as follows: 34 * Reads: Diagnose code 0x14 (input spool file manipulation) 35 * is used to read spool data page-wise. 36 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length 37 * is available by reading sysfs attr reclen. Each write() to the device 38 * must specify an integral multiple (maximal 511) of reclen. 39 */ 40 41 static char ur_banner[] = "z/VM virtual unit record device driver"; 42 43 MODULE_AUTHOR("IBM Corporation"); 44 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); 45 MODULE_LICENSE("GPL"); 46 47 static dev_t ur_first_dev_maj_min; 48 static struct class *vmur_class; 49 static struct debug_info *vmur_dbf; 50 51 /* We put the device's record length (for writes) in the driver_info field */ 52 static struct ccw_device_id ur_ids[] = { 53 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) }, 54 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) }, 55 { /* end of list */ } 56 }; 57 58 MODULE_DEVICE_TABLE(ccw, ur_ids); 59 60 static int ur_probe(struct ccw_device *cdev); 61 static void ur_remove(struct ccw_device *cdev); 62 static int ur_set_online(struct ccw_device *cdev); 63 static int ur_set_offline(struct ccw_device *cdev); 64 static int ur_pm_suspend(struct ccw_device *cdev); 65 66 static struct ccw_driver ur_driver = { 67 .driver = { 68 .name = "vmur", 69 .owner = THIS_MODULE, 70 }, 71 .ids = ur_ids, 72 .probe = ur_probe, 73 .remove = ur_remove, 74 .set_online = ur_set_online, 75 .set_offline = ur_set_offline, 76 .freeze = ur_pm_suspend, 77 .int_class = IRQIO_VMR, 78 }; 79 80 static DEFINE_MUTEX(vmur_mutex); 81 82 /* 83 * Allocation, freeing, getting and putting of urdev structures 84 * 85 * Each ur device (urd) contains a reference to its corresponding ccw device 86 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the 87 * ur device using dev_get_drvdata(&cdev->dev) pointer. 88 * 89 * urd references: 90 * - ur_probe gets a urd reference, ur_remove drops the reference 91 * dev_get_drvdata(&cdev->dev) 92 * - ur_open gets a urd reference, ur_release drops the reference 93 * (urf->urd) 94 * 95 * cdev references: 96 * - urdev_alloc get a cdev reference (urd->cdev) 97 * - urdev_free drops the cdev reference (urd->cdev) 98 * 99 * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock 100 */ 101 static struct urdev *urdev_alloc(struct ccw_device *cdev) 102 { 103 struct urdev *urd; 104 105 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); 106 if (!urd) 107 return NULL; 108 urd->reclen = cdev->id.driver_info; 109 ccw_device_get_id(cdev, &urd->dev_id); 110 mutex_init(&urd->io_mutex); 111 init_waitqueue_head(&urd->wait); 112 spin_lock_init(&urd->open_lock); 113 atomic_set(&urd->ref_count, 1); 114 urd->cdev = cdev; 115 get_device(&cdev->dev); 116 return urd; 117 } 118 119 static void urdev_free(struct urdev *urd) 120 { 121 TRACE("urdev_free: %p\n", urd); 122 if (urd->cdev) 123 put_device(&urd->cdev->dev); 124 kfree(urd); 125 } 126 127 static void urdev_get(struct urdev *urd) 128 { 129 atomic_inc(&urd->ref_count); 130 } 131 132 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev) 133 { 134 struct urdev *urd; 135 unsigned long flags; 136 137 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 138 urd = dev_get_drvdata(&cdev->dev); 139 if (urd) 140 urdev_get(urd); 141 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 142 return urd; 143 } 144 145 static struct urdev *urdev_get_from_devno(u16 devno) 146 { 147 char bus_id[16]; 148 struct ccw_device *cdev; 149 struct urdev *urd; 150 151 sprintf(bus_id, "0.0.%04x", devno); 152 cdev = get_ccwdev_by_busid(&ur_driver, bus_id); 153 if (!cdev) 154 return NULL; 155 urd = urdev_get_from_cdev(cdev); 156 put_device(&cdev->dev); 157 return urd; 158 } 159 160 static void urdev_put(struct urdev *urd) 161 { 162 if (atomic_dec_and_test(&urd->ref_count)) 163 urdev_free(urd); 164 } 165 166 /* 167 * State and contents of ur devices can be changed by class D users issuing 168 * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended. 169 * Also the Linux guest might be logged off, which causes all active spool 170 * files to be closed. 171 * So we cannot guarantee that spool files are still the same when the Linux 172 * guest is resumed. In order to avoid unpredictable results at resume time 173 * we simply refuse to suspend if a ur device node is open. 174 */ 175 static int ur_pm_suspend(struct ccw_device *cdev) 176 { 177 struct urdev *urd = dev_get_drvdata(&cdev->dev); 178 179 TRACE("ur_pm_suspend: cdev=%p\n", cdev); 180 if (urd->open_flag) { 181 pr_err("Unit record device %s is busy, %s refusing to " 182 "suspend.\n", dev_name(&cdev->dev), ur_banner); 183 return -EBUSY; 184 } 185 return 0; 186 } 187 188 /* 189 * Low-level functions to do I/O to a ur device. 190 * alloc_chan_prog 191 * free_chan_prog 192 * do_ur_io 193 * ur_int_handler 194 * 195 * alloc_chan_prog allocates and builds the channel program 196 * free_chan_prog frees memory of the channel program 197 * 198 * do_ur_io issues the channel program to the device and blocks waiting 199 * on a completion event it publishes at urd->io_done. The function 200 * serialises itself on the device's mutex so that only one I/O 201 * is issued at a time (and that I/O is synchronous). 202 * 203 * ur_int_handler catches the "I/O done" interrupt, writes the 204 * subchannel status word into the scsw member of the urdev structure 205 * and complete()s the io_done to wake the waiting do_ur_io. 206 * 207 * The caller of do_ur_io is responsible for kfree()ing the channel program 208 * address pointer that alloc_chan_prog returned. 209 */ 210 211 static void free_chan_prog(struct ccw1 *cpa) 212 { 213 struct ccw1 *ptr = cpa; 214 215 while (ptr->cda) { 216 kfree((void *)(addr_t) ptr->cda); 217 ptr++; 218 } 219 kfree(cpa); 220 } 221 222 /* 223 * alloc_chan_prog 224 * The channel program we use is write commands chained together 225 * with a final NOP CCW command-chained on (which ensures that CE and DE 226 * are presented together in a single interrupt instead of as separate 227 * interrupts unless an incorrect length indication kicks in first). The 228 * data length in each CCW is reclen. 229 */ 230 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, 231 int reclen) 232 { 233 struct ccw1 *cpa; 234 void *kbuf; 235 int i; 236 237 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen); 238 239 /* 240 * We chain a NOP onto the writes to force CE+DE together. 241 * That means we allocate room for CCWs to cover count/reclen 242 * records plus a NOP. 243 */ 244 cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1), 245 GFP_KERNEL | GFP_DMA); 246 if (!cpa) 247 return ERR_PTR(-ENOMEM); 248 249 for (i = 0; i < rec_count; i++) { 250 cpa[i].cmd_code = WRITE_CCW_CMD; 251 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 252 cpa[i].count = reclen; 253 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA); 254 if (!kbuf) { 255 free_chan_prog(cpa); 256 return ERR_PTR(-ENOMEM); 257 } 258 cpa[i].cda = (u32)(addr_t) kbuf; 259 if (copy_from_user(kbuf, ubuf, reclen)) { 260 free_chan_prog(cpa); 261 return ERR_PTR(-EFAULT); 262 } 263 ubuf += reclen; 264 } 265 /* The following NOP CCW forces CE+DE to be presented together */ 266 cpa[i].cmd_code = CCW_CMD_NOOP; 267 return cpa; 268 } 269 270 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) 271 { 272 int rc; 273 struct ccw_device *cdev = urd->cdev; 274 DECLARE_COMPLETION_ONSTACK(event); 275 276 TRACE("do_ur_io: cpa=%p\n", cpa); 277 278 rc = mutex_lock_interruptible(&urd->io_mutex); 279 if (rc) 280 return rc; 281 282 urd->io_done = &event; 283 284 spin_lock_irq(get_ccwdev_lock(cdev)); 285 rc = ccw_device_start(cdev, cpa, 1, 0, 0); 286 spin_unlock_irq(get_ccwdev_lock(cdev)); 287 288 TRACE("do_ur_io: ccw_device_start returned %d\n", rc); 289 if (rc) 290 goto out; 291 292 wait_for_completion(&event); 293 TRACE("do_ur_io: I/O complete\n"); 294 rc = 0; 295 296 out: 297 mutex_unlock(&urd->io_mutex); 298 return rc; 299 } 300 301 /* 302 * ur interrupt handler, called from the ccw_device layer 303 */ 304 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, 305 struct irb *irb) 306 { 307 struct urdev *urd; 308 309 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 310 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 311 irb->scsw.cmd.count); 312 313 if (!intparm) { 314 TRACE("ur_int_handler: unsolicited interrupt\n"); 315 return; 316 } 317 urd = dev_get_drvdata(&cdev->dev); 318 BUG_ON(!urd); 319 /* On special conditions irb is an error pointer */ 320 if (IS_ERR(irb)) 321 urd->io_request_rc = PTR_ERR(irb); 322 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 323 urd->io_request_rc = 0; 324 else 325 urd->io_request_rc = -EIO; 326 327 complete(urd->io_done); 328 } 329 330 /* 331 * reclen sysfs attribute - The record length to be used for write CCWs 332 */ 333 static ssize_t ur_attr_reclen_show(struct device *dev, 334 struct device_attribute *attr, char *buf) 335 { 336 struct urdev *urd; 337 int rc; 338 339 urd = urdev_get_from_cdev(to_ccwdev(dev)); 340 if (!urd) 341 return -ENODEV; 342 rc = sprintf(buf, "%zu\n", urd->reclen); 343 urdev_put(urd); 344 return rc; 345 } 346 347 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); 348 349 static int ur_create_attributes(struct device *dev) 350 { 351 return device_create_file(dev, &dev_attr_reclen); 352 } 353 354 static void ur_remove_attributes(struct device *dev) 355 { 356 device_remove_file(dev, &dev_attr_reclen); 357 } 358 359 /* 360 * diagnose code 0x210 - retrieve device information 361 * cc=0 normal completion, we have a real device 362 * cc=1 CP paging error 363 * cc=2 The virtual device exists, but is not associated with a real device 364 * cc=3 Invalid device address, or the virtual device does not exist 365 */ 366 static int get_urd_class(struct urdev *urd) 367 { 368 static struct diag210 ur_diag210; 369 int cc; 370 371 ur_diag210.vrdcdvno = urd->dev_id.devno; 372 ur_diag210.vrdclen = sizeof(struct diag210); 373 374 cc = diag210(&ur_diag210); 375 switch (cc) { 376 case 0: 377 return -EOPNOTSUPP; 378 case 2: 379 return ur_diag210.vrdcvcla; /* virtual device class */ 380 case 3: 381 return -ENODEV; 382 default: 383 return -EIO; 384 } 385 } 386 387 /* 388 * Allocation and freeing of urfile structures 389 */ 390 static struct urfile *urfile_alloc(struct urdev *urd) 391 { 392 struct urfile *urf; 393 394 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL); 395 if (!urf) 396 return NULL; 397 urf->urd = urd; 398 399 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, 400 urf->dev_reclen); 401 402 return urf; 403 } 404 405 static void urfile_free(struct urfile *urf) 406 { 407 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd); 408 kfree(urf); 409 } 410 411 /* 412 * The fops implementation of the character device driver 413 */ 414 static ssize_t do_write(struct urdev *urd, const char __user *udata, 415 size_t count, size_t reclen, loff_t *ppos) 416 { 417 struct ccw1 *cpa; 418 int rc; 419 420 cpa = alloc_chan_prog(udata, count / reclen, reclen); 421 if (IS_ERR(cpa)) 422 return PTR_ERR(cpa); 423 424 rc = do_ur_io(urd, cpa); 425 if (rc) 426 goto fail_kfree_cpa; 427 428 if (urd->io_request_rc) { 429 rc = urd->io_request_rc; 430 goto fail_kfree_cpa; 431 } 432 *ppos += count; 433 rc = count; 434 435 fail_kfree_cpa: 436 free_chan_prog(cpa); 437 return rc; 438 } 439 440 static ssize_t ur_write(struct file *file, const char __user *udata, 441 size_t count, loff_t *ppos) 442 { 443 struct urfile *urf = file->private_data; 444 445 TRACE("ur_write: count=%zu\n", count); 446 447 if (count == 0) 448 return 0; 449 450 if (count % urf->dev_reclen) 451 return -EINVAL; /* count must be a multiple of reclen */ 452 453 if (count > urf->dev_reclen * MAX_RECS_PER_IO) 454 count = urf->dev_reclen * MAX_RECS_PER_IO; 455 456 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); 457 } 458 459 /* 460 * diagnose code 0x14 subcode 0x0028 - position spool file to designated 461 * record 462 * cc=0 normal completion 463 * cc=2 no file active on the virtual reader or device not ready 464 * cc=3 record specified is beyond EOF 465 */ 466 static int diag_position_to_record(int devno, int record) 467 { 468 int cc; 469 470 cc = diag14(record, devno, 0x28); 471 switch (cc) { 472 case 0: 473 return 0; 474 case 2: 475 return -ENOMEDIUM; 476 case 3: 477 return -ENODATA; /* position beyond end of file */ 478 default: 479 return -EIO; 480 } 481 } 482 483 /* 484 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer 485 * cc=0 normal completion 486 * cc=1 EOF reached 487 * cc=2 no file active on the virtual reader, and no file eligible 488 * cc=3 file already active on the virtual reader or specified virtual 489 * reader does not exist or is not a reader 490 */ 491 static int diag_read_file(int devno, char *buf) 492 { 493 int cc; 494 495 cc = diag14((unsigned long) buf, devno, 0x00); 496 switch (cc) { 497 case 0: 498 return 0; 499 case 1: 500 return -ENODATA; 501 case 2: 502 return -ENOMEDIUM; 503 default: 504 return -EIO; 505 } 506 } 507 508 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, 509 loff_t *offs) 510 { 511 size_t len, copied, res; 512 char *buf; 513 int rc; 514 u16 reclen; 515 struct urdev *urd; 516 517 urd = ((struct urfile *) file->private_data)->urd; 518 reclen = ((struct urfile *) file->private_data)->file_reclen; 519 520 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1); 521 if (rc == -ENODATA) 522 return 0; 523 if (rc) 524 return rc; 525 526 len = min((size_t) PAGE_SIZE, count); 527 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 528 if (!buf) 529 return -ENOMEM; 530 531 copied = 0; 532 res = (size_t) (*offs % PAGE_SIZE); 533 do { 534 rc = diag_read_file(urd->dev_id.devno, buf); 535 if (rc == -ENODATA) { 536 break; 537 } 538 if (rc) 539 goto fail; 540 if (reclen && (copied == 0) && (*offs < PAGE_SIZE)) 541 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen; 542 len = min(count - copied, PAGE_SIZE - res); 543 if (copy_to_user(ubuf + copied, buf + res, len)) { 544 rc = -EFAULT; 545 goto fail; 546 } 547 res = 0; 548 copied += len; 549 } while (copied != count); 550 551 *offs += copied; 552 rc = copied; 553 fail: 554 free_page((unsigned long) buf); 555 return rc; 556 } 557 558 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count, 559 loff_t *offs) 560 { 561 struct urdev *urd; 562 int rc; 563 564 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs); 565 566 if (count == 0) 567 return 0; 568 569 urd = ((struct urfile *) file->private_data)->urd; 570 rc = mutex_lock_interruptible(&urd->io_mutex); 571 if (rc) 572 return rc; 573 rc = diag14_read(file, ubuf, count, offs); 574 mutex_unlock(&urd->io_mutex); 575 return rc; 576 } 577 578 /* 579 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor 580 * cc=0 normal completion 581 * cc=1 no files on reader queue or no subsequent file 582 * cc=2 spid specified is invalid 583 */ 584 static int diag_read_next_file_info(struct file_control_block *buf, int spid) 585 { 586 int cc; 587 588 cc = diag14((unsigned long) buf, spid, 0xfff); 589 switch (cc) { 590 case 0: 591 return 0; 592 default: 593 return -ENODATA; 594 } 595 } 596 597 static int verify_uri_device(struct urdev *urd) 598 { 599 struct file_control_block *fcb; 600 char *buf; 601 int rc; 602 603 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 604 if (!fcb) 605 return -ENOMEM; 606 607 /* check for empty reader device (beginning of chain) */ 608 rc = diag_read_next_file_info(fcb, 0); 609 if (rc) 610 goto fail_free_fcb; 611 612 /* if file is in hold status, we do not read it */ 613 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) { 614 rc = -EPERM; 615 goto fail_free_fcb; 616 } 617 618 /* open file on virtual reader */ 619 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 620 if (!buf) { 621 rc = -ENOMEM; 622 goto fail_free_fcb; 623 } 624 rc = diag_read_file(urd->dev_id.devno, buf); 625 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 626 goto fail_free_buf; 627 628 /* check if the file on top of the queue is open now */ 629 rc = diag_read_next_file_info(fcb, 0); 630 if (rc) 631 goto fail_free_buf; 632 if (!(fcb->file_stat & FLG_IN_USE)) { 633 rc = -EMFILE; 634 goto fail_free_buf; 635 } 636 rc = 0; 637 638 fail_free_buf: 639 free_page((unsigned long) buf); 640 fail_free_fcb: 641 kfree(fcb); 642 return rc; 643 } 644 645 static int verify_device(struct urdev *urd) 646 { 647 switch (urd->class) { 648 case DEV_CLASS_UR_O: 649 return 0; /* no check needed here */ 650 case DEV_CLASS_UR_I: 651 return verify_uri_device(urd); 652 default: 653 return -EOPNOTSUPP; 654 } 655 } 656 657 static int get_uri_file_reclen(struct urdev *urd) 658 { 659 struct file_control_block *fcb; 660 int rc; 661 662 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 663 if (!fcb) 664 return -ENOMEM; 665 rc = diag_read_next_file_info(fcb, 0); 666 if (rc) 667 goto fail_free; 668 if (fcb->file_stat & FLG_CP_DUMP) 669 rc = 0; 670 else 671 rc = fcb->rec_len; 672 673 fail_free: 674 kfree(fcb); 675 return rc; 676 } 677 678 static int get_file_reclen(struct urdev *urd) 679 { 680 switch (urd->class) { 681 case DEV_CLASS_UR_O: 682 return 0; 683 case DEV_CLASS_UR_I: 684 return get_uri_file_reclen(urd); 685 default: 686 return -EOPNOTSUPP; 687 } 688 } 689 690 static int ur_open(struct inode *inode, struct file *file) 691 { 692 u16 devno; 693 struct urdev *urd; 694 struct urfile *urf; 695 unsigned short accmode; 696 int rc; 697 698 accmode = file->f_flags & O_ACCMODE; 699 700 if (accmode == O_RDWR) 701 return -EACCES; 702 /* 703 * We treat the minor number as the devno of the ur device 704 * to find in the driver tree. 705 */ 706 devno = MINOR(file_inode(file)->i_rdev); 707 708 urd = urdev_get_from_devno(devno); 709 if (!urd) { 710 rc = -ENXIO; 711 goto out; 712 } 713 714 spin_lock(&urd->open_lock); 715 while (urd->open_flag) { 716 spin_unlock(&urd->open_lock); 717 if (file->f_flags & O_NONBLOCK) { 718 rc = -EBUSY; 719 goto fail_put; 720 } 721 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) { 722 rc = -ERESTARTSYS; 723 goto fail_put; 724 } 725 spin_lock(&urd->open_lock); 726 } 727 urd->open_flag++; 728 spin_unlock(&urd->open_lock); 729 730 TRACE("ur_open\n"); 731 732 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) || 733 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) { 734 TRACE("ur_open: unsupported dev class (%d)\n", urd->class); 735 rc = -EACCES; 736 goto fail_unlock; 737 } 738 739 rc = verify_device(urd); 740 if (rc) 741 goto fail_unlock; 742 743 urf = urfile_alloc(urd); 744 if (!urf) { 745 rc = -ENOMEM; 746 goto fail_unlock; 747 } 748 749 urf->dev_reclen = urd->reclen; 750 rc = get_file_reclen(urd); 751 if (rc < 0) 752 goto fail_urfile_free; 753 urf->file_reclen = rc; 754 file->private_data = urf; 755 return 0; 756 757 fail_urfile_free: 758 urfile_free(urf); 759 fail_unlock: 760 spin_lock(&urd->open_lock); 761 urd->open_flag--; 762 spin_unlock(&urd->open_lock); 763 fail_put: 764 urdev_put(urd); 765 out: 766 return rc; 767 } 768 769 static int ur_release(struct inode *inode, struct file *file) 770 { 771 struct urfile *urf = file->private_data; 772 773 TRACE("ur_release\n"); 774 spin_lock(&urf->urd->open_lock); 775 urf->urd->open_flag--; 776 spin_unlock(&urf->urd->open_lock); 777 wake_up_interruptible(&urf->urd->wait); 778 urdev_put(urf->urd); 779 urfile_free(urf); 780 return 0; 781 } 782 783 static loff_t ur_llseek(struct file *file, loff_t offset, int whence) 784 { 785 loff_t newpos; 786 787 if ((file->f_flags & O_ACCMODE) != O_RDONLY) 788 return -ESPIPE; /* seek allowed only for reader */ 789 if (offset % PAGE_SIZE) 790 return -ESPIPE; /* only multiples of 4K allowed */ 791 switch (whence) { 792 case 0: /* SEEK_SET */ 793 newpos = offset; 794 break; 795 case 1: /* SEEK_CUR */ 796 newpos = file->f_pos + offset; 797 break; 798 default: 799 return -EINVAL; 800 } 801 file->f_pos = newpos; 802 return newpos; 803 } 804 805 static const struct file_operations ur_fops = { 806 .owner = THIS_MODULE, 807 .open = ur_open, 808 .release = ur_release, 809 .read = ur_read, 810 .write = ur_write, 811 .llseek = ur_llseek, 812 }; 813 814 /* 815 * ccw_device infrastructure: 816 * ur_probe creates the struct urdev (with refcount = 1), the device 817 * attributes, sets up the interrupt handler and validates the virtual 818 * unit record device. 819 * ur_remove removes the device attributes and drops the reference to 820 * struct urdev. 821 * 822 * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized 823 * by the vmur_mutex lock. 824 * 825 * urd->char_device is used as indication that the online function has 826 * been completed successfully. 827 */ 828 static int ur_probe(struct ccw_device *cdev) 829 { 830 struct urdev *urd; 831 int rc; 832 833 TRACE("ur_probe: cdev=%p\n", cdev); 834 835 mutex_lock(&vmur_mutex); 836 urd = urdev_alloc(cdev); 837 if (!urd) { 838 rc = -ENOMEM; 839 goto fail_unlock; 840 } 841 842 rc = ur_create_attributes(&cdev->dev); 843 if (rc) { 844 rc = -ENOMEM; 845 goto fail_urdev_put; 846 } 847 cdev->handler = ur_int_handler; 848 849 /* validate virtual unit record device */ 850 urd->class = get_urd_class(urd); 851 if (urd->class < 0) { 852 rc = urd->class; 853 goto fail_remove_attr; 854 } 855 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 856 rc = -EOPNOTSUPP; 857 goto fail_remove_attr; 858 } 859 spin_lock_irq(get_ccwdev_lock(cdev)); 860 dev_set_drvdata(&cdev->dev, urd); 861 spin_unlock_irq(get_ccwdev_lock(cdev)); 862 863 mutex_unlock(&vmur_mutex); 864 return 0; 865 866 fail_remove_attr: 867 ur_remove_attributes(&cdev->dev); 868 fail_urdev_put: 869 urdev_put(urd); 870 fail_unlock: 871 mutex_unlock(&vmur_mutex); 872 return rc; 873 } 874 875 static int ur_set_online(struct ccw_device *cdev) 876 { 877 struct urdev *urd; 878 int minor, major, rc; 879 char node_id[16]; 880 881 TRACE("ur_set_online: cdev=%p\n", cdev); 882 883 mutex_lock(&vmur_mutex); 884 urd = urdev_get_from_cdev(cdev); 885 if (!urd) { 886 /* ur_remove already deleted our urd */ 887 rc = -ENODEV; 888 goto fail_unlock; 889 } 890 891 if (urd->char_device) { 892 /* Another ur_set_online was faster */ 893 rc = -EBUSY; 894 goto fail_urdev_put; 895 } 896 897 minor = urd->dev_id.devno; 898 major = MAJOR(ur_first_dev_maj_min); 899 900 urd->char_device = cdev_alloc(); 901 if (!urd->char_device) { 902 rc = -ENOMEM; 903 goto fail_urdev_put; 904 } 905 906 urd->char_device->ops = &ur_fops; 907 urd->char_device->dev = MKDEV(major, minor); 908 urd->char_device->owner = ur_fops.owner; 909 910 rc = cdev_add(urd->char_device, urd->char_device->dev, 1); 911 if (rc) 912 goto fail_free_cdev; 913 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { 914 if (urd->class == DEV_CLASS_UR_I) 915 sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev)); 916 if (urd->class == DEV_CLASS_UR_O) 917 sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev)); 918 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 919 sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev)); 920 } else { 921 rc = -EOPNOTSUPP; 922 goto fail_free_cdev; 923 } 924 925 urd->device = device_create(vmur_class, &cdev->dev, 926 urd->char_device->dev, NULL, "%s", node_id); 927 if (IS_ERR(urd->device)) { 928 rc = PTR_ERR(urd->device); 929 TRACE("ur_set_online: device_create rc=%d\n", rc); 930 goto fail_free_cdev; 931 } 932 urdev_put(urd); 933 mutex_unlock(&vmur_mutex); 934 return 0; 935 936 fail_free_cdev: 937 cdev_del(urd->char_device); 938 urd->char_device = NULL; 939 fail_urdev_put: 940 urdev_put(urd); 941 fail_unlock: 942 mutex_unlock(&vmur_mutex); 943 return rc; 944 } 945 946 static int ur_set_offline_force(struct ccw_device *cdev, int force) 947 { 948 struct urdev *urd; 949 int rc; 950 951 TRACE("ur_set_offline: cdev=%p\n", cdev); 952 urd = urdev_get_from_cdev(cdev); 953 if (!urd) 954 /* ur_remove already deleted our urd */ 955 return -ENODEV; 956 if (!urd->char_device) { 957 /* Another ur_set_offline was faster */ 958 rc = -EBUSY; 959 goto fail_urdev_put; 960 } 961 if (!force && (atomic_read(&urd->ref_count) > 2)) { 962 /* There is still a user of urd (e.g. ur_open) */ 963 TRACE("ur_set_offline: BUSY\n"); 964 rc = -EBUSY; 965 goto fail_urdev_put; 966 } 967 device_destroy(vmur_class, urd->char_device->dev); 968 cdev_del(urd->char_device); 969 urd->char_device = NULL; 970 rc = 0; 971 972 fail_urdev_put: 973 urdev_put(urd); 974 return rc; 975 } 976 977 static int ur_set_offline(struct ccw_device *cdev) 978 { 979 int rc; 980 981 mutex_lock(&vmur_mutex); 982 rc = ur_set_offline_force(cdev, 0); 983 mutex_unlock(&vmur_mutex); 984 return rc; 985 } 986 987 static void ur_remove(struct ccw_device *cdev) 988 { 989 unsigned long flags; 990 991 TRACE("ur_remove\n"); 992 993 mutex_lock(&vmur_mutex); 994 995 if (cdev->online) 996 ur_set_offline_force(cdev, 1); 997 ur_remove_attributes(&cdev->dev); 998 999 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1000 urdev_put(dev_get_drvdata(&cdev->dev)); 1001 dev_set_drvdata(&cdev->dev, NULL); 1002 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1003 1004 mutex_unlock(&vmur_mutex); 1005 } 1006 1007 /* 1008 * Module initialisation and cleanup 1009 */ 1010 static int __init ur_init(void) 1011 { 1012 int rc; 1013 dev_t dev; 1014 1015 if (!MACHINE_IS_VM) { 1016 pr_err("The %s cannot be loaded without z/VM\n", 1017 ur_banner); 1018 return -ENODEV; 1019 } 1020 1021 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long)); 1022 if (!vmur_dbf) 1023 return -ENOMEM; 1024 rc = debug_register_view(vmur_dbf, &debug_sprintf_view); 1025 if (rc) 1026 goto fail_free_dbf; 1027 1028 debug_set_level(vmur_dbf, 6); 1029 1030 vmur_class = class_create(THIS_MODULE, "vmur"); 1031 if (IS_ERR(vmur_class)) { 1032 rc = PTR_ERR(vmur_class); 1033 goto fail_free_dbf; 1034 } 1035 1036 rc = ccw_driver_register(&ur_driver); 1037 if (rc) 1038 goto fail_class_destroy; 1039 1040 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 1041 if (rc) { 1042 pr_err("Kernel function alloc_chrdev_region failed with " 1043 "error code %d\n", rc); 1044 goto fail_unregister_driver; 1045 } 1046 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 1047 1048 pr_info("%s loaded.\n", ur_banner); 1049 return 0; 1050 1051 fail_unregister_driver: 1052 ccw_driver_unregister(&ur_driver); 1053 fail_class_destroy: 1054 class_destroy(vmur_class); 1055 fail_free_dbf: 1056 debug_unregister(vmur_dbf); 1057 return rc; 1058 } 1059 1060 static void __exit ur_exit(void) 1061 { 1062 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1063 ccw_driver_unregister(&ur_driver); 1064 class_destroy(vmur_class); 1065 debug_unregister(vmur_dbf); 1066 pr_info("%s unloaded.\n", ur_banner); 1067 } 1068 1069 module_init(ur_init); 1070 module_exit(ur_exit); 1071