1 /* 2 * Linux driver for System z and s390 unit record devices 3 * (z/VM virtual punch, reader, printer) 4 * 5 * Copyright IBM Corp. 2001, 2009 6 * Authors: Malcolm Beattie <beattiem@uk.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com> 8 * Frank Munzert <munzert@de.ibm.com> 9 */ 10 11 #define KMSG_COMPONENT "vmur" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/cdev.h> 15 #include <linux/smp_lock.h> 16 17 #include <asm/uaccess.h> 18 #include <asm/cio.h> 19 #include <asm/ccwdev.h> 20 #include <asm/debug.h> 21 #include <asm/diag.h> 22 23 #include "vmur.h" 24 25 /* 26 * Driver overview 27 * 28 * Unit record device support is implemented as a character device driver. 29 * We can fit at least 16 bits into a device minor number and use the 30 * simple method of mapping a character device number with minor abcd 31 * to the unit record device with devno abcd. 32 * I/O to virtual unit record devices is handled as follows: 33 * Reads: Diagnose code 0x14 (input spool file manipulation) 34 * is used to read spool data page-wise. 35 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length 36 * is available by reading sysfs attr reclen. Each write() to the device 37 * must specify an integral multiple (maximal 511) of reclen. 38 */ 39 40 static char ur_banner[] = "z/VM virtual unit record device driver"; 41 42 MODULE_AUTHOR("IBM Corporation"); 43 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); 44 MODULE_LICENSE("GPL"); 45 46 static dev_t ur_first_dev_maj_min; 47 static struct class *vmur_class; 48 static struct debug_info *vmur_dbf; 49 50 /* We put the device's record length (for writes) in the driver_info field */ 51 static struct ccw_device_id ur_ids[] = { 52 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) }, 53 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) }, 54 { /* end of list */ } 55 }; 56 57 MODULE_DEVICE_TABLE(ccw, ur_ids); 58 59 static int ur_probe(struct ccw_device *cdev); 60 static void ur_remove(struct ccw_device *cdev); 61 static int ur_set_online(struct ccw_device *cdev); 62 static int ur_set_offline(struct ccw_device *cdev); 63 static int ur_pm_suspend(struct ccw_device *cdev); 64 65 static struct ccw_driver ur_driver = { 66 .name = "vmur", 67 .owner = THIS_MODULE, 68 .ids = ur_ids, 69 .probe = ur_probe, 70 .remove = ur_remove, 71 .set_online = ur_set_online, 72 .set_offline = ur_set_offline, 73 .freeze = ur_pm_suspend, 74 }; 75 76 static DEFINE_MUTEX(vmur_mutex); 77 78 /* 79 * Allocation, freeing, getting and putting of urdev structures 80 * 81 * Each ur device (urd) contains a reference to its corresponding ccw device 82 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the 83 * ur device using dev_get_drvdata(&cdev->dev) pointer. 84 * 85 * urd references: 86 * - ur_probe gets a urd reference, ur_remove drops the reference 87 * dev_get_drvdata(&cdev->dev) 88 * - ur_open gets a urd reference, ur_relase drops the reference 89 * (urf->urd) 90 * 91 * cdev references: 92 * - urdev_alloc get a cdev reference (urd->cdev) 93 * - urdev_free drops the cdev reference (urd->cdev) 94 * 95 * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock 96 */ 97 static struct urdev *urdev_alloc(struct ccw_device *cdev) 98 { 99 struct urdev *urd; 100 101 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); 102 if (!urd) 103 return NULL; 104 urd->reclen = cdev->id.driver_info; 105 ccw_device_get_id(cdev, &urd->dev_id); 106 mutex_init(&urd->io_mutex); 107 init_waitqueue_head(&urd->wait); 108 spin_lock_init(&urd->open_lock); 109 atomic_set(&urd->ref_count, 1); 110 urd->cdev = cdev; 111 get_device(&cdev->dev); 112 return urd; 113 } 114 115 static void urdev_free(struct urdev *urd) 116 { 117 TRACE("urdev_free: %p\n", urd); 118 if (urd->cdev) 119 put_device(&urd->cdev->dev); 120 kfree(urd); 121 } 122 123 static void urdev_get(struct urdev *urd) 124 { 125 atomic_inc(&urd->ref_count); 126 } 127 128 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev) 129 { 130 struct urdev *urd; 131 unsigned long flags; 132 133 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 134 urd = dev_get_drvdata(&cdev->dev); 135 if (urd) 136 urdev_get(urd); 137 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 138 return urd; 139 } 140 141 static struct urdev *urdev_get_from_devno(u16 devno) 142 { 143 char bus_id[16]; 144 struct ccw_device *cdev; 145 struct urdev *urd; 146 147 sprintf(bus_id, "0.0.%04x", devno); 148 cdev = get_ccwdev_by_busid(&ur_driver, bus_id); 149 if (!cdev) 150 return NULL; 151 urd = urdev_get_from_cdev(cdev); 152 put_device(&cdev->dev); 153 return urd; 154 } 155 156 static void urdev_put(struct urdev *urd) 157 { 158 if (atomic_dec_and_test(&urd->ref_count)) 159 urdev_free(urd); 160 } 161 162 /* 163 * State and contents of ur devices can be changed by class D users issuing 164 * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended. 165 * Also the Linux guest might be logged off, which causes all active spool 166 * files to be closed. 167 * So we cannot guarantee that spool files are still the same when the Linux 168 * guest is resumed. In order to avoid unpredictable results at resume time 169 * we simply refuse to suspend if a ur device node is open. 170 */ 171 static int ur_pm_suspend(struct ccw_device *cdev) 172 { 173 struct urdev *urd = dev_get_drvdata(&cdev->dev); 174 175 TRACE("ur_pm_suspend: cdev=%p\n", cdev); 176 if (urd->open_flag) { 177 pr_err("Unit record device %s is busy, %s refusing to " 178 "suspend.\n", dev_name(&cdev->dev), ur_banner); 179 return -EBUSY; 180 } 181 return 0; 182 } 183 184 /* 185 * Low-level functions to do I/O to a ur device. 186 * alloc_chan_prog 187 * free_chan_prog 188 * do_ur_io 189 * ur_int_handler 190 * 191 * alloc_chan_prog allocates and builds the channel program 192 * free_chan_prog frees memory of the channel program 193 * 194 * do_ur_io issues the channel program to the device and blocks waiting 195 * on a completion event it publishes at urd->io_done. The function 196 * serialises itself on the device's mutex so that only one I/O 197 * is issued at a time (and that I/O is synchronous). 198 * 199 * ur_int_handler catches the "I/O done" interrupt, writes the 200 * subchannel status word into the scsw member of the urdev structure 201 * and complete()s the io_done to wake the waiting do_ur_io. 202 * 203 * The caller of do_ur_io is responsible for kfree()ing the channel program 204 * address pointer that alloc_chan_prog returned. 205 */ 206 207 static void free_chan_prog(struct ccw1 *cpa) 208 { 209 struct ccw1 *ptr = cpa; 210 211 while (ptr->cda) { 212 kfree((void *)(addr_t) ptr->cda); 213 ptr++; 214 } 215 kfree(cpa); 216 } 217 218 /* 219 * alloc_chan_prog 220 * The channel program we use is write commands chained together 221 * with a final NOP CCW command-chained on (which ensures that CE and DE 222 * are presented together in a single interrupt instead of as separate 223 * interrupts unless an incorrect length indication kicks in first). The 224 * data length in each CCW is reclen. 225 */ 226 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, 227 int reclen) 228 { 229 struct ccw1 *cpa; 230 void *kbuf; 231 int i; 232 233 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen); 234 235 /* 236 * We chain a NOP onto the writes to force CE+DE together. 237 * That means we allocate room for CCWs to cover count/reclen 238 * records plus a NOP. 239 */ 240 cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1), 241 GFP_KERNEL | GFP_DMA); 242 if (!cpa) 243 return ERR_PTR(-ENOMEM); 244 245 for (i = 0; i < rec_count; i++) { 246 cpa[i].cmd_code = WRITE_CCW_CMD; 247 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 248 cpa[i].count = reclen; 249 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA); 250 if (!kbuf) { 251 free_chan_prog(cpa); 252 return ERR_PTR(-ENOMEM); 253 } 254 cpa[i].cda = (u32)(addr_t) kbuf; 255 if (copy_from_user(kbuf, ubuf, reclen)) { 256 free_chan_prog(cpa); 257 return ERR_PTR(-EFAULT); 258 } 259 ubuf += reclen; 260 } 261 /* The following NOP CCW forces CE+DE to be presented together */ 262 cpa[i].cmd_code = CCW_CMD_NOOP; 263 return cpa; 264 } 265 266 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) 267 { 268 int rc; 269 struct ccw_device *cdev = urd->cdev; 270 DECLARE_COMPLETION_ONSTACK(event); 271 272 TRACE("do_ur_io: cpa=%p\n", cpa); 273 274 rc = mutex_lock_interruptible(&urd->io_mutex); 275 if (rc) 276 return rc; 277 278 urd->io_done = &event; 279 280 spin_lock_irq(get_ccwdev_lock(cdev)); 281 rc = ccw_device_start(cdev, cpa, 1, 0, 0); 282 spin_unlock_irq(get_ccwdev_lock(cdev)); 283 284 TRACE("do_ur_io: ccw_device_start returned %d\n", rc); 285 if (rc) 286 goto out; 287 288 wait_for_completion(&event); 289 TRACE("do_ur_io: I/O complete\n"); 290 rc = 0; 291 292 out: 293 mutex_unlock(&urd->io_mutex); 294 return rc; 295 } 296 297 /* 298 * ur interrupt handler, called from the ccw_device layer 299 */ 300 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, 301 struct irb *irb) 302 { 303 struct urdev *urd; 304 305 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 306 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 307 irb->scsw.cmd.count); 308 309 if (!intparm) { 310 TRACE("ur_int_handler: unsolicited interrupt\n"); 311 return; 312 } 313 urd = dev_get_drvdata(&cdev->dev); 314 BUG_ON(!urd); 315 /* On special conditions irb is an error pointer */ 316 if (IS_ERR(irb)) 317 urd->io_request_rc = PTR_ERR(irb); 318 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 319 urd->io_request_rc = 0; 320 else 321 urd->io_request_rc = -EIO; 322 323 complete(urd->io_done); 324 } 325 326 /* 327 * reclen sysfs attribute - The record length to be used for write CCWs 328 */ 329 static ssize_t ur_attr_reclen_show(struct device *dev, 330 struct device_attribute *attr, char *buf) 331 { 332 struct urdev *urd; 333 int rc; 334 335 urd = urdev_get_from_cdev(to_ccwdev(dev)); 336 if (!urd) 337 return -ENODEV; 338 rc = sprintf(buf, "%zu\n", urd->reclen); 339 urdev_put(urd); 340 return rc; 341 } 342 343 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); 344 345 static int ur_create_attributes(struct device *dev) 346 { 347 return device_create_file(dev, &dev_attr_reclen); 348 } 349 350 static void ur_remove_attributes(struct device *dev) 351 { 352 device_remove_file(dev, &dev_attr_reclen); 353 } 354 355 /* 356 * diagnose code 0x210 - retrieve device information 357 * cc=0 normal completion, we have a real device 358 * cc=1 CP paging error 359 * cc=2 The virtual device exists, but is not associated with a real device 360 * cc=3 Invalid device address, or the virtual device does not exist 361 */ 362 static int get_urd_class(struct urdev *urd) 363 { 364 static struct diag210 ur_diag210; 365 int cc; 366 367 ur_diag210.vrdcdvno = urd->dev_id.devno; 368 ur_diag210.vrdclen = sizeof(struct diag210); 369 370 cc = diag210(&ur_diag210); 371 switch (cc) { 372 case 0: 373 return -EOPNOTSUPP; 374 case 2: 375 return ur_diag210.vrdcvcla; /* virtual device class */ 376 case 3: 377 return -ENODEV; 378 default: 379 return -EIO; 380 } 381 } 382 383 /* 384 * Allocation and freeing of urfile structures 385 */ 386 static struct urfile *urfile_alloc(struct urdev *urd) 387 { 388 struct urfile *urf; 389 390 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL); 391 if (!urf) 392 return NULL; 393 urf->urd = urd; 394 395 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, 396 urf->dev_reclen); 397 398 return urf; 399 } 400 401 static void urfile_free(struct urfile *urf) 402 { 403 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd); 404 kfree(urf); 405 } 406 407 /* 408 * The fops implementation of the character device driver 409 */ 410 static ssize_t do_write(struct urdev *urd, const char __user *udata, 411 size_t count, size_t reclen, loff_t *ppos) 412 { 413 struct ccw1 *cpa; 414 int rc; 415 416 cpa = alloc_chan_prog(udata, count / reclen, reclen); 417 if (IS_ERR(cpa)) 418 return PTR_ERR(cpa); 419 420 rc = do_ur_io(urd, cpa); 421 if (rc) 422 goto fail_kfree_cpa; 423 424 if (urd->io_request_rc) { 425 rc = urd->io_request_rc; 426 goto fail_kfree_cpa; 427 } 428 *ppos += count; 429 rc = count; 430 431 fail_kfree_cpa: 432 free_chan_prog(cpa); 433 return rc; 434 } 435 436 static ssize_t ur_write(struct file *file, const char __user *udata, 437 size_t count, loff_t *ppos) 438 { 439 struct urfile *urf = file->private_data; 440 441 TRACE("ur_write: count=%zu\n", count); 442 443 if (count == 0) 444 return 0; 445 446 if (count % urf->dev_reclen) 447 return -EINVAL; /* count must be a multiple of reclen */ 448 449 if (count > urf->dev_reclen * MAX_RECS_PER_IO) 450 count = urf->dev_reclen * MAX_RECS_PER_IO; 451 452 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); 453 } 454 455 /* 456 * diagnose code 0x14 subcode 0x0028 - position spool file to designated 457 * record 458 * cc=0 normal completion 459 * cc=2 no file active on the virtual reader or device not ready 460 * cc=3 record specified is beyond EOF 461 */ 462 static int diag_position_to_record(int devno, int record) 463 { 464 int cc; 465 466 cc = diag14(record, devno, 0x28); 467 switch (cc) { 468 case 0: 469 return 0; 470 case 2: 471 return -ENOMEDIUM; 472 case 3: 473 return -ENODATA; /* position beyond end of file */ 474 default: 475 return -EIO; 476 } 477 } 478 479 /* 480 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer 481 * cc=0 normal completion 482 * cc=1 EOF reached 483 * cc=2 no file active on the virtual reader, and no file eligible 484 * cc=3 file already active on the virtual reader or specified virtual 485 * reader does not exist or is not a reader 486 */ 487 static int diag_read_file(int devno, char *buf) 488 { 489 int cc; 490 491 cc = diag14((unsigned long) buf, devno, 0x00); 492 switch (cc) { 493 case 0: 494 return 0; 495 case 1: 496 return -ENODATA; 497 case 2: 498 return -ENOMEDIUM; 499 default: 500 return -EIO; 501 } 502 } 503 504 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, 505 loff_t *offs) 506 { 507 size_t len, copied, res; 508 char *buf; 509 int rc; 510 u16 reclen; 511 struct urdev *urd; 512 513 urd = ((struct urfile *) file->private_data)->urd; 514 reclen = ((struct urfile *) file->private_data)->file_reclen; 515 516 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1); 517 if (rc == -ENODATA) 518 return 0; 519 if (rc) 520 return rc; 521 522 len = min((size_t) PAGE_SIZE, count); 523 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 524 if (!buf) 525 return -ENOMEM; 526 527 copied = 0; 528 res = (size_t) (*offs % PAGE_SIZE); 529 do { 530 rc = diag_read_file(urd->dev_id.devno, buf); 531 if (rc == -ENODATA) { 532 break; 533 } 534 if (rc) 535 goto fail; 536 if (reclen && (copied == 0) && (*offs < PAGE_SIZE)) 537 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen; 538 len = min(count - copied, PAGE_SIZE - res); 539 if (copy_to_user(ubuf + copied, buf + res, len)) { 540 rc = -EFAULT; 541 goto fail; 542 } 543 res = 0; 544 copied += len; 545 } while (copied != count); 546 547 *offs += copied; 548 rc = copied; 549 fail: 550 free_page((unsigned long) buf); 551 return rc; 552 } 553 554 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count, 555 loff_t *offs) 556 { 557 struct urdev *urd; 558 int rc; 559 560 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs); 561 562 if (count == 0) 563 return 0; 564 565 urd = ((struct urfile *) file->private_data)->urd; 566 rc = mutex_lock_interruptible(&urd->io_mutex); 567 if (rc) 568 return rc; 569 rc = diag14_read(file, ubuf, count, offs); 570 mutex_unlock(&urd->io_mutex); 571 return rc; 572 } 573 574 /* 575 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor 576 * cc=0 normal completion 577 * cc=1 no files on reader queue or no subsequent file 578 * cc=2 spid specified is invalid 579 */ 580 static int diag_read_next_file_info(struct file_control_block *buf, int spid) 581 { 582 int cc; 583 584 cc = diag14((unsigned long) buf, spid, 0xfff); 585 switch (cc) { 586 case 0: 587 return 0; 588 default: 589 return -ENODATA; 590 } 591 } 592 593 static int verify_uri_device(struct urdev *urd) 594 { 595 struct file_control_block *fcb; 596 char *buf; 597 int rc; 598 599 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 600 if (!fcb) 601 return -ENOMEM; 602 603 /* check for empty reader device (beginning of chain) */ 604 rc = diag_read_next_file_info(fcb, 0); 605 if (rc) 606 goto fail_free_fcb; 607 608 /* if file is in hold status, we do not read it */ 609 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) { 610 rc = -EPERM; 611 goto fail_free_fcb; 612 } 613 614 /* open file on virtual reader */ 615 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 616 if (!buf) { 617 rc = -ENOMEM; 618 goto fail_free_fcb; 619 } 620 rc = diag_read_file(urd->dev_id.devno, buf); 621 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 622 goto fail_free_buf; 623 624 /* check if the file on top of the queue is open now */ 625 rc = diag_read_next_file_info(fcb, 0); 626 if (rc) 627 goto fail_free_buf; 628 if (!(fcb->file_stat & FLG_IN_USE)) { 629 rc = -EMFILE; 630 goto fail_free_buf; 631 } 632 rc = 0; 633 634 fail_free_buf: 635 free_page((unsigned long) buf); 636 fail_free_fcb: 637 kfree(fcb); 638 return rc; 639 } 640 641 static int verify_device(struct urdev *urd) 642 { 643 switch (urd->class) { 644 case DEV_CLASS_UR_O: 645 return 0; /* no check needed here */ 646 case DEV_CLASS_UR_I: 647 return verify_uri_device(urd); 648 default: 649 return -EOPNOTSUPP; 650 } 651 } 652 653 static int get_uri_file_reclen(struct urdev *urd) 654 { 655 struct file_control_block *fcb; 656 int rc; 657 658 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 659 if (!fcb) 660 return -ENOMEM; 661 rc = diag_read_next_file_info(fcb, 0); 662 if (rc) 663 goto fail_free; 664 if (fcb->file_stat & FLG_CP_DUMP) 665 rc = 0; 666 else 667 rc = fcb->rec_len; 668 669 fail_free: 670 kfree(fcb); 671 return rc; 672 } 673 674 static int get_file_reclen(struct urdev *urd) 675 { 676 switch (urd->class) { 677 case DEV_CLASS_UR_O: 678 return 0; 679 case DEV_CLASS_UR_I: 680 return get_uri_file_reclen(urd); 681 default: 682 return -EOPNOTSUPP; 683 } 684 } 685 686 static int ur_open(struct inode *inode, struct file *file) 687 { 688 u16 devno; 689 struct urdev *urd; 690 struct urfile *urf; 691 unsigned short accmode; 692 int rc; 693 694 accmode = file->f_flags & O_ACCMODE; 695 696 if (accmode == O_RDWR) 697 return -EACCES; 698 /* 699 * We treat the minor number as the devno of the ur device 700 * to find in the driver tree. 701 */ 702 devno = MINOR(file->f_dentry->d_inode->i_rdev); 703 704 urd = urdev_get_from_devno(devno); 705 if (!urd) { 706 rc = -ENXIO; 707 goto out; 708 } 709 710 spin_lock(&urd->open_lock); 711 while (urd->open_flag) { 712 spin_unlock(&urd->open_lock); 713 if (file->f_flags & O_NONBLOCK) { 714 rc = -EBUSY; 715 goto fail_put; 716 } 717 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) { 718 rc = -ERESTARTSYS; 719 goto fail_put; 720 } 721 spin_lock(&urd->open_lock); 722 } 723 urd->open_flag++; 724 spin_unlock(&urd->open_lock); 725 726 TRACE("ur_open\n"); 727 728 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) || 729 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) { 730 TRACE("ur_open: unsupported dev class (%d)\n", urd->class); 731 rc = -EACCES; 732 goto fail_unlock; 733 } 734 735 rc = verify_device(urd); 736 if (rc) 737 goto fail_unlock; 738 739 urf = urfile_alloc(urd); 740 if (!urf) { 741 rc = -ENOMEM; 742 goto fail_unlock; 743 } 744 745 urf->dev_reclen = urd->reclen; 746 rc = get_file_reclen(urd); 747 if (rc < 0) 748 goto fail_urfile_free; 749 urf->file_reclen = rc; 750 file->private_data = urf; 751 return 0; 752 753 fail_urfile_free: 754 urfile_free(urf); 755 fail_unlock: 756 spin_lock(&urd->open_lock); 757 urd->open_flag--; 758 spin_unlock(&urd->open_lock); 759 fail_put: 760 urdev_put(urd); 761 out: 762 return rc; 763 } 764 765 static int ur_release(struct inode *inode, struct file *file) 766 { 767 struct urfile *urf = file->private_data; 768 769 TRACE("ur_release\n"); 770 spin_lock(&urf->urd->open_lock); 771 urf->urd->open_flag--; 772 spin_unlock(&urf->urd->open_lock); 773 wake_up_interruptible(&urf->urd->wait); 774 urdev_put(urf->urd); 775 urfile_free(urf); 776 return 0; 777 } 778 779 static loff_t ur_llseek(struct file *file, loff_t offset, int whence) 780 { 781 loff_t newpos; 782 783 if ((file->f_flags & O_ACCMODE) != O_RDONLY) 784 return -ESPIPE; /* seek allowed only for reader */ 785 if (offset % PAGE_SIZE) 786 return -ESPIPE; /* only multiples of 4K allowed */ 787 switch (whence) { 788 case 0: /* SEEK_SET */ 789 newpos = offset; 790 break; 791 case 1: /* SEEK_CUR */ 792 newpos = file->f_pos + offset; 793 break; 794 default: 795 return -EINVAL; 796 } 797 file->f_pos = newpos; 798 return newpos; 799 } 800 801 static const struct file_operations ur_fops = { 802 .owner = THIS_MODULE, 803 .open = ur_open, 804 .release = ur_release, 805 .read = ur_read, 806 .write = ur_write, 807 .llseek = ur_llseek, 808 }; 809 810 /* 811 * ccw_device infrastructure: 812 * ur_probe creates the struct urdev (with refcount = 1), the device 813 * attributes, sets up the interrupt handler and validates the virtual 814 * unit record device. 815 * ur_remove removes the device attributes and drops the reference to 816 * struct urdev. 817 * 818 * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized 819 * by the vmur_mutex lock. 820 * 821 * urd->char_device is used as indication that the online function has 822 * been completed successfully. 823 */ 824 static int ur_probe(struct ccw_device *cdev) 825 { 826 struct urdev *urd; 827 int rc; 828 829 TRACE("ur_probe: cdev=%p\n", cdev); 830 831 mutex_lock(&vmur_mutex); 832 urd = urdev_alloc(cdev); 833 if (!urd) { 834 rc = -ENOMEM; 835 goto fail_unlock; 836 } 837 838 rc = ur_create_attributes(&cdev->dev); 839 if (rc) { 840 rc = -ENOMEM; 841 goto fail_urdev_put; 842 } 843 cdev->handler = ur_int_handler; 844 845 /* validate virtual unit record device */ 846 urd->class = get_urd_class(urd); 847 if (urd->class < 0) { 848 rc = urd->class; 849 goto fail_remove_attr; 850 } 851 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 852 rc = -EOPNOTSUPP; 853 goto fail_remove_attr; 854 } 855 spin_lock_irq(get_ccwdev_lock(cdev)); 856 dev_set_drvdata(&cdev->dev, urd); 857 spin_unlock_irq(get_ccwdev_lock(cdev)); 858 859 mutex_unlock(&vmur_mutex); 860 return 0; 861 862 fail_remove_attr: 863 ur_remove_attributes(&cdev->dev); 864 fail_urdev_put: 865 urdev_put(urd); 866 fail_unlock: 867 mutex_unlock(&vmur_mutex); 868 return rc; 869 } 870 871 static int ur_set_online(struct ccw_device *cdev) 872 { 873 struct urdev *urd; 874 int minor, major, rc; 875 char node_id[16]; 876 877 TRACE("ur_set_online: cdev=%p\n", cdev); 878 879 mutex_lock(&vmur_mutex); 880 urd = urdev_get_from_cdev(cdev); 881 if (!urd) { 882 /* ur_remove already deleted our urd */ 883 rc = -ENODEV; 884 goto fail_unlock; 885 } 886 887 if (urd->char_device) { 888 /* Another ur_set_online was faster */ 889 rc = -EBUSY; 890 goto fail_urdev_put; 891 } 892 893 minor = urd->dev_id.devno; 894 major = MAJOR(ur_first_dev_maj_min); 895 896 urd->char_device = cdev_alloc(); 897 if (!urd->char_device) { 898 rc = -ENOMEM; 899 goto fail_urdev_put; 900 } 901 902 cdev_init(urd->char_device, &ur_fops); 903 urd->char_device->dev = MKDEV(major, minor); 904 urd->char_device->owner = ur_fops.owner; 905 906 rc = cdev_add(urd->char_device, urd->char_device->dev, 1); 907 if (rc) 908 goto fail_free_cdev; 909 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { 910 if (urd->class == DEV_CLASS_UR_I) 911 sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev)); 912 if (urd->class == DEV_CLASS_UR_O) 913 sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev)); 914 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 915 sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev)); 916 } else { 917 rc = -EOPNOTSUPP; 918 goto fail_free_cdev; 919 } 920 921 urd->device = device_create(vmur_class, NULL, urd->char_device->dev, 922 NULL, "%s", node_id); 923 if (IS_ERR(urd->device)) { 924 rc = PTR_ERR(urd->device); 925 TRACE("ur_set_online: device_create rc=%d\n", rc); 926 goto fail_free_cdev; 927 } 928 urdev_put(urd); 929 mutex_unlock(&vmur_mutex); 930 return 0; 931 932 fail_free_cdev: 933 cdev_del(urd->char_device); 934 urd->char_device = NULL; 935 fail_urdev_put: 936 urdev_put(urd); 937 fail_unlock: 938 mutex_unlock(&vmur_mutex); 939 return rc; 940 } 941 942 static int ur_set_offline_force(struct ccw_device *cdev, int force) 943 { 944 struct urdev *urd; 945 int rc; 946 947 TRACE("ur_set_offline: cdev=%p\n", cdev); 948 urd = urdev_get_from_cdev(cdev); 949 if (!urd) 950 /* ur_remove already deleted our urd */ 951 return -ENODEV; 952 if (!urd->char_device) { 953 /* Another ur_set_offline was faster */ 954 rc = -EBUSY; 955 goto fail_urdev_put; 956 } 957 if (!force && (atomic_read(&urd->ref_count) > 2)) { 958 /* There is still a user of urd (e.g. ur_open) */ 959 TRACE("ur_set_offline: BUSY\n"); 960 rc = -EBUSY; 961 goto fail_urdev_put; 962 } 963 device_destroy(vmur_class, urd->char_device->dev); 964 cdev_del(urd->char_device); 965 urd->char_device = NULL; 966 rc = 0; 967 968 fail_urdev_put: 969 urdev_put(urd); 970 return rc; 971 } 972 973 static int ur_set_offline(struct ccw_device *cdev) 974 { 975 int rc; 976 977 mutex_lock(&vmur_mutex); 978 rc = ur_set_offline_force(cdev, 0); 979 mutex_unlock(&vmur_mutex); 980 return rc; 981 } 982 983 static void ur_remove(struct ccw_device *cdev) 984 { 985 unsigned long flags; 986 987 TRACE("ur_remove\n"); 988 989 mutex_lock(&vmur_mutex); 990 991 if (cdev->online) 992 ur_set_offline_force(cdev, 1); 993 ur_remove_attributes(&cdev->dev); 994 995 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 996 urdev_put(dev_get_drvdata(&cdev->dev)); 997 dev_set_drvdata(&cdev->dev, NULL); 998 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 999 1000 mutex_unlock(&vmur_mutex); 1001 } 1002 1003 /* 1004 * Module initialisation and cleanup 1005 */ 1006 static int __init ur_init(void) 1007 { 1008 int rc; 1009 dev_t dev; 1010 1011 if (!MACHINE_IS_VM) { 1012 pr_err("The %s cannot be loaded without z/VM\n", 1013 ur_banner); 1014 return -ENODEV; 1015 } 1016 1017 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long)); 1018 if (!vmur_dbf) 1019 return -ENOMEM; 1020 rc = debug_register_view(vmur_dbf, &debug_sprintf_view); 1021 if (rc) 1022 goto fail_free_dbf; 1023 1024 debug_set_level(vmur_dbf, 6); 1025 1026 vmur_class = class_create(THIS_MODULE, "vmur"); 1027 if (IS_ERR(vmur_class)) { 1028 rc = PTR_ERR(vmur_class); 1029 goto fail_free_dbf; 1030 } 1031 1032 rc = ccw_driver_register(&ur_driver); 1033 if (rc) 1034 goto fail_class_destroy; 1035 1036 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 1037 if (rc) { 1038 pr_err("Kernel function alloc_chrdev_region failed with " 1039 "error code %d\n", rc); 1040 goto fail_unregister_driver; 1041 } 1042 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 1043 1044 pr_info("%s loaded.\n", ur_banner); 1045 return 0; 1046 1047 fail_unregister_driver: 1048 ccw_driver_unregister(&ur_driver); 1049 fail_class_destroy: 1050 class_destroy(vmur_class); 1051 fail_free_dbf: 1052 debug_unregister(vmur_dbf); 1053 return rc; 1054 } 1055 1056 static void __exit ur_exit(void) 1057 { 1058 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1059 ccw_driver_unregister(&ur_driver); 1060 class_destroy(vmur_class); 1061 debug_unregister(vmur_dbf); 1062 pr_info("%s unloaded.\n", ur_banner); 1063 } 1064 1065 module_init(ur_init); 1066 module_exit(ur_exit); 1067