1 /* 2 * Linux driver for System z and s390 unit record devices 3 * (z/VM virtual punch, reader, printer) 4 * 5 * Copyright IBM Corp. 2001, 2009 6 * Authors: Malcolm Beattie <beattiem@uk.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com> 8 * Frank Munzert <munzert@de.ibm.com> 9 */ 10 11 #define KMSG_COMPONENT "vmur" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kernel_stat.h> 15 #include <linux/cdev.h> 16 #include <linux/slab.h> 17 18 #include <asm/uaccess.h> 19 #include <asm/cio.h> 20 #include <asm/ccwdev.h> 21 #include <asm/debug.h> 22 #include <asm/diag.h> 23 24 #include "vmur.h" 25 26 /* 27 * Driver overview 28 * 29 * Unit record device support is implemented as a character device driver. 30 * We can fit at least 16 bits into a device minor number and use the 31 * simple method of mapping a character device number with minor abcd 32 * to the unit record device with devno abcd. 33 * I/O to virtual unit record devices is handled as follows: 34 * Reads: Diagnose code 0x14 (input spool file manipulation) 35 * is used to read spool data page-wise. 36 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length 37 * is available by reading sysfs attr reclen. Each write() to the device 38 * must specify an integral multiple (maximal 511) of reclen. 39 */ 40 41 static char ur_banner[] = "z/VM virtual unit record device driver"; 42 43 MODULE_AUTHOR("IBM Corporation"); 44 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); 45 MODULE_LICENSE("GPL"); 46 47 static dev_t ur_first_dev_maj_min; 48 static struct class *vmur_class; 49 static struct debug_info *vmur_dbf; 50 51 /* We put the device's record length (for writes) in the driver_info field */ 52 static struct ccw_device_id ur_ids[] = { 53 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) }, 54 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) }, 55 { /* end of list */ } 56 }; 57 58 MODULE_DEVICE_TABLE(ccw, ur_ids); 59 60 static int ur_probe(struct ccw_device *cdev); 61 static void ur_remove(struct ccw_device *cdev); 62 static int ur_set_online(struct ccw_device *cdev); 63 static int ur_set_offline(struct ccw_device *cdev); 64 static int ur_pm_suspend(struct ccw_device *cdev); 65 66 static struct ccw_driver ur_driver = { 67 .name = "vmur", 68 .owner = THIS_MODULE, 69 .ids = ur_ids, 70 .probe = ur_probe, 71 .remove = ur_remove, 72 .set_online = ur_set_online, 73 .set_offline = ur_set_offline, 74 .freeze = ur_pm_suspend, 75 }; 76 77 static DEFINE_MUTEX(vmur_mutex); 78 79 /* 80 * Allocation, freeing, getting and putting of urdev structures 81 * 82 * Each ur device (urd) contains a reference to its corresponding ccw device 83 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the 84 * ur device using dev_get_drvdata(&cdev->dev) pointer. 85 * 86 * urd references: 87 * - ur_probe gets a urd reference, ur_remove drops the reference 88 * dev_get_drvdata(&cdev->dev) 89 * - ur_open gets a urd reference, ur_relase drops the reference 90 * (urf->urd) 91 * 92 * cdev references: 93 * - urdev_alloc get a cdev reference (urd->cdev) 94 * - urdev_free drops the cdev reference (urd->cdev) 95 * 96 * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock 97 */ 98 static struct urdev *urdev_alloc(struct ccw_device *cdev) 99 { 100 struct urdev *urd; 101 102 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); 103 if (!urd) 104 return NULL; 105 urd->reclen = cdev->id.driver_info; 106 ccw_device_get_id(cdev, &urd->dev_id); 107 mutex_init(&urd->io_mutex); 108 init_waitqueue_head(&urd->wait); 109 spin_lock_init(&urd->open_lock); 110 atomic_set(&urd->ref_count, 1); 111 urd->cdev = cdev; 112 get_device(&cdev->dev); 113 return urd; 114 } 115 116 static void urdev_free(struct urdev *urd) 117 { 118 TRACE("urdev_free: %p\n", urd); 119 if (urd->cdev) 120 put_device(&urd->cdev->dev); 121 kfree(urd); 122 } 123 124 static void urdev_get(struct urdev *urd) 125 { 126 atomic_inc(&urd->ref_count); 127 } 128 129 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev) 130 { 131 struct urdev *urd; 132 unsigned long flags; 133 134 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 135 urd = dev_get_drvdata(&cdev->dev); 136 if (urd) 137 urdev_get(urd); 138 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 139 return urd; 140 } 141 142 static struct urdev *urdev_get_from_devno(u16 devno) 143 { 144 char bus_id[16]; 145 struct ccw_device *cdev; 146 struct urdev *urd; 147 148 sprintf(bus_id, "0.0.%04x", devno); 149 cdev = get_ccwdev_by_busid(&ur_driver, bus_id); 150 if (!cdev) 151 return NULL; 152 urd = urdev_get_from_cdev(cdev); 153 put_device(&cdev->dev); 154 return urd; 155 } 156 157 static void urdev_put(struct urdev *urd) 158 { 159 if (atomic_dec_and_test(&urd->ref_count)) 160 urdev_free(urd); 161 } 162 163 /* 164 * State and contents of ur devices can be changed by class D users issuing 165 * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended. 166 * Also the Linux guest might be logged off, which causes all active spool 167 * files to be closed. 168 * So we cannot guarantee that spool files are still the same when the Linux 169 * guest is resumed. In order to avoid unpredictable results at resume time 170 * we simply refuse to suspend if a ur device node is open. 171 */ 172 static int ur_pm_suspend(struct ccw_device *cdev) 173 { 174 struct urdev *urd = dev_get_drvdata(&cdev->dev); 175 176 TRACE("ur_pm_suspend: cdev=%p\n", cdev); 177 if (urd->open_flag) { 178 pr_err("Unit record device %s is busy, %s refusing to " 179 "suspend.\n", dev_name(&cdev->dev), ur_banner); 180 return -EBUSY; 181 } 182 return 0; 183 } 184 185 /* 186 * Low-level functions to do I/O to a ur device. 187 * alloc_chan_prog 188 * free_chan_prog 189 * do_ur_io 190 * ur_int_handler 191 * 192 * alloc_chan_prog allocates and builds the channel program 193 * free_chan_prog frees memory of the channel program 194 * 195 * do_ur_io issues the channel program to the device and blocks waiting 196 * on a completion event it publishes at urd->io_done. The function 197 * serialises itself on the device's mutex so that only one I/O 198 * is issued at a time (and that I/O is synchronous). 199 * 200 * ur_int_handler catches the "I/O done" interrupt, writes the 201 * subchannel status word into the scsw member of the urdev structure 202 * and complete()s the io_done to wake the waiting do_ur_io. 203 * 204 * The caller of do_ur_io is responsible for kfree()ing the channel program 205 * address pointer that alloc_chan_prog returned. 206 */ 207 208 static void free_chan_prog(struct ccw1 *cpa) 209 { 210 struct ccw1 *ptr = cpa; 211 212 while (ptr->cda) { 213 kfree((void *)(addr_t) ptr->cda); 214 ptr++; 215 } 216 kfree(cpa); 217 } 218 219 /* 220 * alloc_chan_prog 221 * The channel program we use is write commands chained together 222 * with a final NOP CCW command-chained on (which ensures that CE and DE 223 * are presented together in a single interrupt instead of as separate 224 * interrupts unless an incorrect length indication kicks in first). The 225 * data length in each CCW is reclen. 226 */ 227 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, 228 int reclen) 229 { 230 struct ccw1 *cpa; 231 void *kbuf; 232 int i; 233 234 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen); 235 236 /* 237 * We chain a NOP onto the writes to force CE+DE together. 238 * That means we allocate room for CCWs to cover count/reclen 239 * records plus a NOP. 240 */ 241 cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1), 242 GFP_KERNEL | GFP_DMA); 243 if (!cpa) 244 return ERR_PTR(-ENOMEM); 245 246 for (i = 0; i < rec_count; i++) { 247 cpa[i].cmd_code = WRITE_CCW_CMD; 248 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 249 cpa[i].count = reclen; 250 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA); 251 if (!kbuf) { 252 free_chan_prog(cpa); 253 return ERR_PTR(-ENOMEM); 254 } 255 cpa[i].cda = (u32)(addr_t) kbuf; 256 if (copy_from_user(kbuf, ubuf, reclen)) { 257 free_chan_prog(cpa); 258 return ERR_PTR(-EFAULT); 259 } 260 ubuf += reclen; 261 } 262 /* The following NOP CCW forces CE+DE to be presented together */ 263 cpa[i].cmd_code = CCW_CMD_NOOP; 264 return cpa; 265 } 266 267 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) 268 { 269 int rc; 270 struct ccw_device *cdev = urd->cdev; 271 DECLARE_COMPLETION_ONSTACK(event); 272 273 TRACE("do_ur_io: cpa=%p\n", cpa); 274 275 rc = mutex_lock_interruptible(&urd->io_mutex); 276 if (rc) 277 return rc; 278 279 urd->io_done = &event; 280 281 spin_lock_irq(get_ccwdev_lock(cdev)); 282 rc = ccw_device_start(cdev, cpa, 1, 0, 0); 283 spin_unlock_irq(get_ccwdev_lock(cdev)); 284 285 TRACE("do_ur_io: ccw_device_start returned %d\n", rc); 286 if (rc) 287 goto out; 288 289 wait_for_completion(&event); 290 TRACE("do_ur_io: I/O complete\n"); 291 rc = 0; 292 293 out: 294 mutex_unlock(&urd->io_mutex); 295 return rc; 296 } 297 298 /* 299 * ur interrupt handler, called from the ccw_device layer 300 */ 301 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, 302 struct irb *irb) 303 { 304 struct urdev *urd; 305 306 kstat_cpu(smp_processor_id()).irqs[IOINT_VMR]++; 307 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 308 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 309 irb->scsw.cmd.count); 310 311 if (!intparm) { 312 TRACE("ur_int_handler: unsolicited interrupt\n"); 313 return; 314 } 315 urd = dev_get_drvdata(&cdev->dev); 316 BUG_ON(!urd); 317 /* On special conditions irb is an error pointer */ 318 if (IS_ERR(irb)) 319 urd->io_request_rc = PTR_ERR(irb); 320 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 321 urd->io_request_rc = 0; 322 else 323 urd->io_request_rc = -EIO; 324 325 complete(urd->io_done); 326 } 327 328 /* 329 * reclen sysfs attribute - The record length to be used for write CCWs 330 */ 331 static ssize_t ur_attr_reclen_show(struct device *dev, 332 struct device_attribute *attr, char *buf) 333 { 334 struct urdev *urd; 335 int rc; 336 337 urd = urdev_get_from_cdev(to_ccwdev(dev)); 338 if (!urd) 339 return -ENODEV; 340 rc = sprintf(buf, "%zu\n", urd->reclen); 341 urdev_put(urd); 342 return rc; 343 } 344 345 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); 346 347 static int ur_create_attributes(struct device *dev) 348 { 349 return device_create_file(dev, &dev_attr_reclen); 350 } 351 352 static void ur_remove_attributes(struct device *dev) 353 { 354 device_remove_file(dev, &dev_attr_reclen); 355 } 356 357 /* 358 * diagnose code 0x210 - retrieve device information 359 * cc=0 normal completion, we have a real device 360 * cc=1 CP paging error 361 * cc=2 The virtual device exists, but is not associated with a real device 362 * cc=3 Invalid device address, or the virtual device does not exist 363 */ 364 static int get_urd_class(struct urdev *urd) 365 { 366 static struct diag210 ur_diag210; 367 int cc; 368 369 ur_diag210.vrdcdvno = urd->dev_id.devno; 370 ur_diag210.vrdclen = sizeof(struct diag210); 371 372 cc = diag210(&ur_diag210); 373 switch (cc) { 374 case 0: 375 return -EOPNOTSUPP; 376 case 2: 377 return ur_diag210.vrdcvcla; /* virtual device class */ 378 case 3: 379 return -ENODEV; 380 default: 381 return -EIO; 382 } 383 } 384 385 /* 386 * Allocation and freeing of urfile structures 387 */ 388 static struct urfile *urfile_alloc(struct urdev *urd) 389 { 390 struct urfile *urf; 391 392 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL); 393 if (!urf) 394 return NULL; 395 urf->urd = urd; 396 397 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, 398 urf->dev_reclen); 399 400 return urf; 401 } 402 403 static void urfile_free(struct urfile *urf) 404 { 405 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd); 406 kfree(urf); 407 } 408 409 /* 410 * The fops implementation of the character device driver 411 */ 412 static ssize_t do_write(struct urdev *urd, const char __user *udata, 413 size_t count, size_t reclen, loff_t *ppos) 414 { 415 struct ccw1 *cpa; 416 int rc; 417 418 cpa = alloc_chan_prog(udata, count / reclen, reclen); 419 if (IS_ERR(cpa)) 420 return PTR_ERR(cpa); 421 422 rc = do_ur_io(urd, cpa); 423 if (rc) 424 goto fail_kfree_cpa; 425 426 if (urd->io_request_rc) { 427 rc = urd->io_request_rc; 428 goto fail_kfree_cpa; 429 } 430 *ppos += count; 431 rc = count; 432 433 fail_kfree_cpa: 434 free_chan_prog(cpa); 435 return rc; 436 } 437 438 static ssize_t ur_write(struct file *file, const char __user *udata, 439 size_t count, loff_t *ppos) 440 { 441 struct urfile *urf = file->private_data; 442 443 TRACE("ur_write: count=%zu\n", count); 444 445 if (count == 0) 446 return 0; 447 448 if (count % urf->dev_reclen) 449 return -EINVAL; /* count must be a multiple of reclen */ 450 451 if (count > urf->dev_reclen * MAX_RECS_PER_IO) 452 count = urf->dev_reclen * MAX_RECS_PER_IO; 453 454 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); 455 } 456 457 /* 458 * diagnose code 0x14 subcode 0x0028 - position spool file to designated 459 * record 460 * cc=0 normal completion 461 * cc=2 no file active on the virtual reader or device not ready 462 * cc=3 record specified is beyond EOF 463 */ 464 static int diag_position_to_record(int devno, int record) 465 { 466 int cc; 467 468 cc = diag14(record, devno, 0x28); 469 switch (cc) { 470 case 0: 471 return 0; 472 case 2: 473 return -ENOMEDIUM; 474 case 3: 475 return -ENODATA; /* position beyond end of file */ 476 default: 477 return -EIO; 478 } 479 } 480 481 /* 482 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer 483 * cc=0 normal completion 484 * cc=1 EOF reached 485 * cc=2 no file active on the virtual reader, and no file eligible 486 * cc=3 file already active on the virtual reader or specified virtual 487 * reader does not exist or is not a reader 488 */ 489 static int diag_read_file(int devno, char *buf) 490 { 491 int cc; 492 493 cc = diag14((unsigned long) buf, devno, 0x00); 494 switch (cc) { 495 case 0: 496 return 0; 497 case 1: 498 return -ENODATA; 499 case 2: 500 return -ENOMEDIUM; 501 default: 502 return -EIO; 503 } 504 } 505 506 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, 507 loff_t *offs) 508 { 509 size_t len, copied, res; 510 char *buf; 511 int rc; 512 u16 reclen; 513 struct urdev *urd; 514 515 urd = ((struct urfile *) file->private_data)->urd; 516 reclen = ((struct urfile *) file->private_data)->file_reclen; 517 518 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1); 519 if (rc == -ENODATA) 520 return 0; 521 if (rc) 522 return rc; 523 524 len = min((size_t) PAGE_SIZE, count); 525 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 526 if (!buf) 527 return -ENOMEM; 528 529 copied = 0; 530 res = (size_t) (*offs % PAGE_SIZE); 531 do { 532 rc = diag_read_file(urd->dev_id.devno, buf); 533 if (rc == -ENODATA) { 534 break; 535 } 536 if (rc) 537 goto fail; 538 if (reclen && (copied == 0) && (*offs < PAGE_SIZE)) 539 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen; 540 len = min(count - copied, PAGE_SIZE - res); 541 if (copy_to_user(ubuf + copied, buf + res, len)) { 542 rc = -EFAULT; 543 goto fail; 544 } 545 res = 0; 546 copied += len; 547 } while (copied != count); 548 549 *offs += copied; 550 rc = copied; 551 fail: 552 free_page((unsigned long) buf); 553 return rc; 554 } 555 556 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count, 557 loff_t *offs) 558 { 559 struct urdev *urd; 560 int rc; 561 562 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs); 563 564 if (count == 0) 565 return 0; 566 567 urd = ((struct urfile *) file->private_data)->urd; 568 rc = mutex_lock_interruptible(&urd->io_mutex); 569 if (rc) 570 return rc; 571 rc = diag14_read(file, ubuf, count, offs); 572 mutex_unlock(&urd->io_mutex); 573 return rc; 574 } 575 576 /* 577 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor 578 * cc=0 normal completion 579 * cc=1 no files on reader queue or no subsequent file 580 * cc=2 spid specified is invalid 581 */ 582 static int diag_read_next_file_info(struct file_control_block *buf, int spid) 583 { 584 int cc; 585 586 cc = diag14((unsigned long) buf, spid, 0xfff); 587 switch (cc) { 588 case 0: 589 return 0; 590 default: 591 return -ENODATA; 592 } 593 } 594 595 static int verify_uri_device(struct urdev *urd) 596 { 597 struct file_control_block *fcb; 598 char *buf; 599 int rc; 600 601 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 602 if (!fcb) 603 return -ENOMEM; 604 605 /* check for empty reader device (beginning of chain) */ 606 rc = diag_read_next_file_info(fcb, 0); 607 if (rc) 608 goto fail_free_fcb; 609 610 /* if file is in hold status, we do not read it */ 611 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) { 612 rc = -EPERM; 613 goto fail_free_fcb; 614 } 615 616 /* open file on virtual reader */ 617 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 618 if (!buf) { 619 rc = -ENOMEM; 620 goto fail_free_fcb; 621 } 622 rc = diag_read_file(urd->dev_id.devno, buf); 623 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 624 goto fail_free_buf; 625 626 /* check if the file on top of the queue is open now */ 627 rc = diag_read_next_file_info(fcb, 0); 628 if (rc) 629 goto fail_free_buf; 630 if (!(fcb->file_stat & FLG_IN_USE)) { 631 rc = -EMFILE; 632 goto fail_free_buf; 633 } 634 rc = 0; 635 636 fail_free_buf: 637 free_page((unsigned long) buf); 638 fail_free_fcb: 639 kfree(fcb); 640 return rc; 641 } 642 643 static int verify_device(struct urdev *urd) 644 { 645 switch (urd->class) { 646 case DEV_CLASS_UR_O: 647 return 0; /* no check needed here */ 648 case DEV_CLASS_UR_I: 649 return verify_uri_device(urd); 650 default: 651 return -EOPNOTSUPP; 652 } 653 } 654 655 static int get_uri_file_reclen(struct urdev *urd) 656 { 657 struct file_control_block *fcb; 658 int rc; 659 660 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 661 if (!fcb) 662 return -ENOMEM; 663 rc = diag_read_next_file_info(fcb, 0); 664 if (rc) 665 goto fail_free; 666 if (fcb->file_stat & FLG_CP_DUMP) 667 rc = 0; 668 else 669 rc = fcb->rec_len; 670 671 fail_free: 672 kfree(fcb); 673 return rc; 674 } 675 676 static int get_file_reclen(struct urdev *urd) 677 { 678 switch (urd->class) { 679 case DEV_CLASS_UR_O: 680 return 0; 681 case DEV_CLASS_UR_I: 682 return get_uri_file_reclen(urd); 683 default: 684 return -EOPNOTSUPP; 685 } 686 } 687 688 static int ur_open(struct inode *inode, struct file *file) 689 { 690 u16 devno; 691 struct urdev *urd; 692 struct urfile *urf; 693 unsigned short accmode; 694 int rc; 695 696 accmode = file->f_flags & O_ACCMODE; 697 698 if (accmode == O_RDWR) 699 return -EACCES; 700 /* 701 * We treat the minor number as the devno of the ur device 702 * to find in the driver tree. 703 */ 704 devno = MINOR(file->f_dentry->d_inode->i_rdev); 705 706 urd = urdev_get_from_devno(devno); 707 if (!urd) { 708 rc = -ENXIO; 709 goto out; 710 } 711 712 spin_lock(&urd->open_lock); 713 while (urd->open_flag) { 714 spin_unlock(&urd->open_lock); 715 if (file->f_flags & O_NONBLOCK) { 716 rc = -EBUSY; 717 goto fail_put; 718 } 719 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) { 720 rc = -ERESTARTSYS; 721 goto fail_put; 722 } 723 spin_lock(&urd->open_lock); 724 } 725 urd->open_flag++; 726 spin_unlock(&urd->open_lock); 727 728 TRACE("ur_open\n"); 729 730 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) || 731 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) { 732 TRACE("ur_open: unsupported dev class (%d)\n", urd->class); 733 rc = -EACCES; 734 goto fail_unlock; 735 } 736 737 rc = verify_device(urd); 738 if (rc) 739 goto fail_unlock; 740 741 urf = urfile_alloc(urd); 742 if (!urf) { 743 rc = -ENOMEM; 744 goto fail_unlock; 745 } 746 747 urf->dev_reclen = urd->reclen; 748 rc = get_file_reclen(urd); 749 if (rc < 0) 750 goto fail_urfile_free; 751 urf->file_reclen = rc; 752 file->private_data = urf; 753 return 0; 754 755 fail_urfile_free: 756 urfile_free(urf); 757 fail_unlock: 758 spin_lock(&urd->open_lock); 759 urd->open_flag--; 760 spin_unlock(&urd->open_lock); 761 fail_put: 762 urdev_put(urd); 763 out: 764 return rc; 765 } 766 767 static int ur_release(struct inode *inode, struct file *file) 768 { 769 struct urfile *urf = file->private_data; 770 771 TRACE("ur_release\n"); 772 spin_lock(&urf->urd->open_lock); 773 urf->urd->open_flag--; 774 spin_unlock(&urf->urd->open_lock); 775 wake_up_interruptible(&urf->urd->wait); 776 urdev_put(urf->urd); 777 urfile_free(urf); 778 return 0; 779 } 780 781 static loff_t ur_llseek(struct file *file, loff_t offset, int whence) 782 { 783 loff_t newpos; 784 785 if ((file->f_flags & O_ACCMODE) != O_RDONLY) 786 return -ESPIPE; /* seek allowed only for reader */ 787 if (offset % PAGE_SIZE) 788 return -ESPIPE; /* only multiples of 4K allowed */ 789 switch (whence) { 790 case 0: /* SEEK_SET */ 791 newpos = offset; 792 break; 793 case 1: /* SEEK_CUR */ 794 newpos = file->f_pos + offset; 795 break; 796 default: 797 return -EINVAL; 798 } 799 file->f_pos = newpos; 800 return newpos; 801 } 802 803 static const struct file_operations ur_fops = { 804 .owner = THIS_MODULE, 805 .open = ur_open, 806 .release = ur_release, 807 .read = ur_read, 808 .write = ur_write, 809 .llseek = ur_llseek, 810 }; 811 812 /* 813 * ccw_device infrastructure: 814 * ur_probe creates the struct urdev (with refcount = 1), the device 815 * attributes, sets up the interrupt handler and validates the virtual 816 * unit record device. 817 * ur_remove removes the device attributes and drops the reference to 818 * struct urdev. 819 * 820 * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized 821 * by the vmur_mutex lock. 822 * 823 * urd->char_device is used as indication that the online function has 824 * been completed successfully. 825 */ 826 static int ur_probe(struct ccw_device *cdev) 827 { 828 struct urdev *urd; 829 int rc; 830 831 TRACE("ur_probe: cdev=%p\n", cdev); 832 833 mutex_lock(&vmur_mutex); 834 urd = urdev_alloc(cdev); 835 if (!urd) { 836 rc = -ENOMEM; 837 goto fail_unlock; 838 } 839 840 rc = ur_create_attributes(&cdev->dev); 841 if (rc) { 842 rc = -ENOMEM; 843 goto fail_urdev_put; 844 } 845 cdev->handler = ur_int_handler; 846 847 /* validate virtual unit record device */ 848 urd->class = get_urd_class(urd); 849 if (urd->class < 0) { 850 rc = urd->class; 851 goto fail_remove_attr; 852 } 853 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 854 rc = -EOPNOTSUPP; 855 goto fail_remove_attr; 856 } 857 spin_lock_irq(get_ccwdev_lock(cdev)); 858 dev_set_drvdata(&cdev->dev, urd); 859 spin_unlock_irq(get_ccwdev_lock(cdev)); 860 861 mutex_unlock(&vmur_mutex); 862 return 0; 863 864 fail_remove_attr: 865 ur_remove_attributes(&cdev->dev); 866 fail_urdev_put: 867 urdev_put(urd); 868 fail_unlock: 869 mutex_unlock(&vmur_mutex); 870 return rc; 871 } 872 873 static int ur_set_online(struct ccw_device *cdev) 874 { 875 struct urdev *urd; 876 int minor, major, rc; 877 char node_id[16]; 878 879 TRACE("ur_set_online: cdev=%p\n", cdev); 880 881 mutex_lock(&vmur_mutex); 882 urd = urdev_get_from_cdev(cdev); 883 if (!urd) { 884 /* ur_remove already deleted our urd */ 885 rc = -ENODEV; 886 goto fail_unlock; 887 } 888 889 if (urd->char_device) { 890 /* Another ur_set_online was faster */ 891 rc = -EBUSY; 892 goto fail_urdev_put; 893 } 894 895 minor = urd->dev_id.devno; 896 major = MAJOR(ur_first_dev_maj_min); 897 898 urd->char_device = cdev_alloc(); 899 if (!urd->char_device) { 900 rc = -ENOMEM; 901 goto fail_urdev_put; 902 } 903 904 cdev_init(urd->char_device, &ur_fops); 905 urd->char_device->dev = MKDEV(major, minor); 906 urd->char_device->owner = ur_fops.owner; 907 908 rc = cdev_add(urd->char_device, urd->char_device->dev, 1); 909 if (rc) 910 goto fail_free_cdev; 911 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { 912 if (urd->class == DEV_CLASS_UR_I) 913 sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev)); 914 if (urd->class == DEV_CLASS_UR_O) 915 sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev)); 916 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 917 sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev)); 918 } else { 919 rc = -EOPNOTSUPP; 920 goto fail_free_cdev; 921 } 922 923 urd->device = device_create(vmur_class, NULL, urd->char_device->dev, 924 NULL, "%s", node_id); 925 if (IS_ERR(urd->device)) { 926 rc = PTR_ERR(urd->device); 927 TRACE("ur_set_online: device_create rc=%d\n", rc); 928 goto fail_free_cdev; 929 } 930 urdev_put(urd); 931 mutex_unlock(&vmur_mutex); 932 return 0; 933 934 fail_free_cdev: 935 cdev_del(urd->char_device); 936 urd->char_device = NULL; 937 fail_urdev_put: 938 urdev_put(urd); 939 fail_unlock: 940 mutex_unlock(&vmur_mutex); 941 return rc; 942 } 943 944 static int ur_set_offline_force(struct ccw_device *cdev, int force) 945 { 946 struct urdev *urd; 947 int rc; 948 949 TRACE("ur_set_offline: cdev=%p\n", cdev); 950 urd = urdev_get_from_cdev(cdev); 951 if (!urd) 952 /* ur_remove already deleted our urd */ 953 return -ENODEV; 954 if (!urd->char_device) { 955 /* Another ur_set_offline was faster */ 956 rc = -EBUSY; 957 goto fail_urdev_put; 958 } 959 if (!force && (atomic_read(&urd->ref_count) > 2)) { 960 /* There is still a user of urd (e.g. ur_open) */ 961 TRACE("ur_set_offline: BUSY\n"); 962 rc = -EBUSY; 963 goto fail_urdev_put; 964 } 965 device_destroy(vmur_class, urd->char_device->dev); 966 cdev_del(urd->char_device); 967 urd->char_device = NULL; 968 rc = 0; 969 970 fail_urdev_put: 971 urdev_put(urd); 972 return rc; 973 } 974 975 static int ur_set_offline(struct ccw_device *cdev) 976 { 977 int rc; 978 979 mutex_lock(&vmur_mutex); 980 rc = ur_set_offline_force(cdev, 0); 981 mutex_unlock(&vmur_mutex); 982 return rc; 983 } 984 985 static void ur_remove(struct ccw_device *cdev) 986 { 987 unsigned long flags; 988 989 TRACE("ur_remove\n"); 990 991 mutex_lock(&vmur_mutex); 992 993 if (cdev->online) 994 ur_set_offline_force(cdev, 1); 995 ur_remove_attributes(&cdev->dev); 996 997 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 998 urdev_put(dev_get_drvdata(&cdev->dev)); 999 dev_set_drvdata(&cdev->dev, NULL); 1000 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1001 1002 mutex_unlock(&vmur_mutex); 1003 } 1004 1005 /* 1006 * Module initialisation and cleanup 1007 */ 1008 static int __init ur_init(void) 1009 { 1010 int rc; 1011 dev_t dev; 1012 1013 if (!MACHINE_IS_VM) { 1014 pr_err("The %s cannot be loaded without z/VM\n", 1015 ur_banner); 1016 return -ENODEV; 1017 } 1018 1019 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long)); 1020 if (!vmur_dbf) 1021 return -ENOMEM; 1022 rc = debug_register_view(vmur_dbf, &debug_sprintf_view); 1023 if (rc) 1024 goto fail_free_dbf; 1025 1026 debug_set_level(vmur_dbf, 6); 1027 1028 vmur_class = class_create(THIS_MODULE, "vmur"); 1029 if (IS_ERR(vmur_class)) { 1030 rc = PTR_ERR(vmur_class); 1031 goto fail_free_dbf; 1032 } 1033 1034 rc = ccw_driver_register(&ur_driver); 1035 if (rc) 1036 goto fail_class_destroy; 1037 1038 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 1039 if (rc) { 1040 pr_err("Kernel function alloc_chrdev_region failed with " 1041 "error code %d\n", rc); 1042 goto fail_unregister_driver; 1043 } 1044 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 1045 1046 pr_info("%s loaded.\n", ur_banner); 1047 return 0; 1048 1049 fail_unregister_driver: 1050 ccw_driver_unregister(&ur_driver); 1051 fail_class_destroy: 1052 class_destroy(vmur_class); 1053 fail_free_dbf: 1054 debug_unregister(vmur_dbf); 1055 return rc; 1056 } 1057 1058 static void __exit ur_exit(void) 1059 { 1060 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1061 ccw_driver_unregister(&ur_driver); 1062 class_destroy(vmur_class); 1063 debug_unregister(vmur_dbf); 1064 pr_info("%s unloaded.\n", ur_banner); 1065 } 1066 1067 module_init(ur_init); 1068 module_exit(ur_exit); 1069