1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Linux driver for System z and s390 unit record devices 4 * (z/VM virtual punch, reader, printer) 5 * 6 * Copyright IBM Corp. 2001, 2009 7 * Authors: Malcolm Beattie <beattiem@uk.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com> 9 * Frank Munzert <munzert@de.ibm.com> 10 */ 11 12 #define KMSG_COMPONENT "vmur" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/cdev.h> 16 #include <linux/slab.h> 17 #include <linux/module.h> 18 19 #include <linux/uaccess.h> 20 #include <asm/cio.h> 21 #include <asm/ccwdev.h> 22 #include <asm/debug.h> 23 #include <asm/diag.h> 24 25 #include "vmur.h" 26 27 /* 28 * Driver overview 29 * 30 * Unit record device support is implemented as a character device driver. 31 * We can fit at least 16 bits into a device minor number and use the 32 * simple method of mapping a character device number with minor abcd 33 * to the unit record device with devno abcd. 34 * I/O to virtual unit record devices is handled as follows: 35 * Reads: Diagnose code 0x14 (input spool file manipulation) 36 * is used to read spool data page-wise. 37 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length 38 * is available by reading sysfs attr reclen. Each write() to the device 39 * must specify an integral multiple (maximal 511) of reclen. 40 */ 41 42 static char ur_banner[] = "z/VM virtual unit record device driver"; 43 44 MODULE_AUTHOR("IBM Corporation"); 45 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); 46 MODULE_LICENSE("GPL"); 47 48 static dev_t ur_first_dev_maj_min; 49 static struct class *vmur_class; 50 static struct debug_info *vmur_dbf; 51 52 /* We put the device's record length (for writes) in the driver_info field */ 53 static struct ccw_device_id ur_ids[] = { 54 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) }, 55 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) }, 56 { /* end of list */ } 57 }; 58 59 MODULE_DEVICE_TABLE(ccw, ur_ids); 60 61 static int ur_probe(struct ccw_device *cdev); 62 static void ur_remove(struct ccw_device *cdev); 63 static int ur_set_online(struct ccw_device *cdev); 64 static int ur_set_offline(struct ccw_device *cdev); 65 66 static struct ccw_driver ur_driver = { 67 .driver = { 68 .name = "vmur", 69 .owner = THIS_MODULE, 70 }, 71 .ids = ur_ids, 72 .probe = ur_probe, 73 .remove = ur_remove, 74 .set_online = ur_set_online, 75 .set_offline = ur_set_offline, 76 .int_class = IRQIO_VMR, 77 }; 78 79 static DEFINE_MUTEX(vmur_mutex); 80 81 /* 82 * Allocation, freeing, getting and putting of urdev structures 83 * 84 * Each ur device (urd) contains a reference to its corresponding ccw device 85 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the 86 * ur device using dev_get_drvdata(&cdev->dev) pointer. 87 * 88 * urd references: 89 * - ur_probe gets a urd reference, ur_remove drops the reference 90 * dev_get_drvdata(&cdev->dev) 91 * - ur_open gets a urd reference, ur_release drops the reference 92 * (urf->urd) 93 * 94 * cdev references: 95 * - urdev_alloc get a cdev reference (urd->cdev) 96 * - urdev_free drops the cdev reference (urd->cdev) 97 * 98 * Setting and clearing of dev_get_drvdata(&cdev->dev) is protected by the ccwdev lock 99 */ 100 static struct urdev *urdev_alloc(struct ccw_device *cdev) 101 { 102 struct urdev *urd; 103 104 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); 105 if (!urd) 106 return NULL; 107 urd->reclen = cdev->id.driver_info; 108 ccw_device_get_id(cdev, &urd->dev_id); 109 mutex_init(&urd->io_mutex); 110 init_waitqueue_head(&urd->wait); 111 spin_lock_init(&urd->open_lock); 112 refcount_set(&urd->ref_count, 1); 113 urd->cdev = cdev; 114 get_device(&cdev->dev); 115 return urd; 116 } 117 118 static void urdev_free(struct urdev *urd) 119 { 120 TRACE("urdev_free: %p\n", urd); 121 if (urd->cdev) 122 put_device(&urd->cdev->dev); 123 kfree(urd); 124 } 125 126 static void urdev_get(struct urdev *urd) 127 { 128 refcount_inc(&urd->ref_count); 129 } 130 131 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev) 132 { 133 struct urdev *urd; 134 unsigned long flags; 135 136 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 137 urd = dev_get_drvdata(&cdev->dev); 138 if (urd) 139 urdev_get(urd); 140 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 141 return urd; 142 } 143 144 static struct urdev *urdev_get_from_devno(u16 devno) 145 { 146 char bus_id[16]; 147 struct ccw_device *cdev; 148 struct urdev *urd; 149 150 sprintf(bus_id, "0.0.%04x", devno); 151 cdev = get_ccwdev_by_busid(&ur_driver, bus_id); 152 if (!cdev) 153 return NULL; 154 urd = urdev_get_from_cdev(cdev); 155 put_device(&cdev->dev); 156 return urd; 157 } 158 159 static void urdev_put(struct urdev *urd) 160 { 161 if (refcount_dec_and_test(&urd->ref_count)) 162 urdev_free(urd); 163 } 164 165 /* 166 * Low-level functions to do I/O to a ur device. 167 * alloc_chan_prog 168 * free_chan_prog 169 * do_ur_io 170 * ur_int_handler 171 * 172 * alloc_chan_prog allocates and builds the channel program 173 * free_chan_prog frees memory of the channel program 174 * 175 * do_ur_io issues the channel program to the device and blocks waiting 176 * on a completion event it publishes at urd->io_done. The function 177 * serialises itself on the device's mutex so that only one I/O 178 * is issued at a time (and that I/O is synchronous). 179 * 180 * ur_int_handler catches the "I/O done" interrupt, writes the 181 * subchannel status word into the scsw member of the urdev structure 182 * and complete()s the io_done to wake the waiting do_ur_io. 183 * 184 * The caller of do_ur_io is responsible for kfree()ing the channel program 185 * address pointer that alloc_chan_prog returned. 186 */ 187 188 static void free_chan_prog(struct ccw1 *cpa) 189 { 190 struct ccw1 *ptr = cpa; 191 192 while (ptr->cda) { 193 kfree((void *)(addr_t) ptr->cda); 194 ptr++; 195 } 196 kfree(cpa); 197 } 198 199 /* 200 * alloc_chan_prog 201 * The channel program we use is write commands chained together 202 * with a final NOP CCW command-chained on (which ensures that CE and DE 203 * are presented together in a single interrupt instead of as separate 204 * interrupts unless an incorrect length indication kicks in first). The 205 * data length in each CCW is reclen. 206 */ 207 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, 208 int reclen) 209 { 210 struct ccw1 *cpa; 211 void *kbuf; 212 int i; 213 214 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen); 215 216 /* 217 * We chain a NOP onto the writes to force CE+DE together. 218 * That means we allocate room for CCWs to cover count/reclen 219 * records plus a NOP. 220 */ 221 cpa = kcalloc(rec_count + 1, sizeof(struct ccw1), 222 GFP_KERNEL | GFP_DMA); 223 if (!cpa) 224 return ERR_PTR(-ENOMEM); 225 226 for (i = 0; i < rec_count; i++) { 227 cpa[i].cmd_code = WRITE_CCW_CMD; 228 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 229 cpa[i].count = reclen; 230 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA); 231 if (!kbuf) { 232 free_chan_prog(cpa); 233 return ERR_PTR(-ENOMEM); 234 } 235 cpa[i].cda = (u32)(addr_t) kbuf; 236 if (copy_from_user(kbuf, ubuf, reclen)) { 237 free_chan_prog(cpa); 238 return ERR_PTR(-EFAULT); 239 } 240 ubuf += reclen; 241 } 242 /* The following NOP CCW forces CE+DE to be presented together */ 243 cpa[i].cmd_code = CCW_CMD_NOOP; 244 return cpa; 245 } 246 247 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) 248 { 249 int rc; 250 struct ccw_device *cdev = urd->cdev; 251 DECLARE_COMPLETION_ONSTACK(event); 252 253 TRACE("do_ur_io: cpa=%p\n", cpa); 254 255 rc = mutex_lock_interruptible(&urd->io_mutex); 256 if (rc) 257 return rc; 258 259 urd->io_done = &event; 260 261 spin_lock_irq(get_ccwdev_lock(cdev)); 262 rc = ccw_device_start(cdev, cpa, 1, 0, 0); 263 spin_unlock_irq(get_ccwdev_lock(cdev)); 264 265 TRACE("do_ur_io: ccw_device_start returned %d\n", rc); 266 if (rc) 267 goto out; 268 269 wait_for_completion(&event); 270 TRACE("do_ur_io: I/O complete\n"); 271 rc = 0; 272 273 out: 274 mutex_unlock(&urd->io_mutex); 275 return rc; 276 } 277 278 /* 279 * ur interrupt handler, called from the ccw_device layer 280 */ 281 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, 282 struct irb *irb) 283 { 284 struct urdev *urd; 285 286 if (!IS_ERR(irb)) { 287 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 288 intparm, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat, 289 irb->scsw.cmd.count); 290 } 291 if (!intparm) { 292 TRACE("ur_int_handler: unsolicited interrupt\n"); 293 return; 294 } 295 urd = dev_get_drvdata(&cdev->dev); 296 /* On special conditions irb is an error pointer */ 297 if (IS_ERR(irb)) 298 urd->io_request_rc = PTR_ERR(irb); 299 else if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 300 urd->io_request_rc = 0; 301 else 302 urd->io_request_rc = -EIO; 303 304 complete(urd->io_done); 305 } 306 307 /* 308 * reclen sysfs attribute - The record length to be used for write CCWs 309 */ 310 static ssize_t ur_attr_reclen_show(struct device *dev, 311 struct device_attribute *attr, char *buf) 312 { 313 struct urdev *urd; 314 int rc; 315 316 urd = urdev_get_from_cdev(to_ccwdev(dev)); 317 if (!urd) 318 return -ENODEV; 319 rc = sprintf(buf, "%zu\n", urd->reclen); 320 urdev_put(urd); 321 return rc; 322 } 323 324 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); 325 326 static int ur_create_attributes(struct device *dev) 327 { 328 return device_create_file(dev, &dev_attr_reclen); 329 } 330 331 static void ur_remove_attributes(struct device *dev) 332 { 333 device_remove_file(dev, &dev_attr_reclen); 334 } 335 336 /* 337 * diagnose code 0x210 - retrieve device information 338 * cc=0 normal completion, we have a real device 339 * cc=1 CP paging error 340 * cc=2 The virtual device exists, but is not associated with a real device 341 * cc=3 Invalid device address, or the virtual device does not exist 342 */ 343 static int get_urd_class(struct urdev *urd) 344 { 345 static struct diag210 ur_diag210; 346 int cc; 347 348 ur_diag210.vrdcdvno = urd->dev_id.devno; 349 ur_diag210.vrdclen = sizeof(struct diag210); 350 351 cc = diag210(&ur_diag210); 352 switch (cc) { 353 case 0: 354 return -EOPNOTSUPP; 355 case 2: 356 return ur_diag210.vrdcvcla; /* virtual device class */ 357 case 3: 358 return -ENODEV; 359 default: 360 return -EIO; 361 } 362 } 363 364 /* 365 * Allocation and freeing of urfile structures 366 */ 367 static struct urfile *urfile_alloc(struct urdev *urd) 368 { 369 struct urfile *urf; 370 371 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL); 372 if (!urf) 373 return NULL; 374 urf->urd = urd; 375 376 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, 377 urf->dev_reclen); 378 379 return urf; 380 } 381 382 static void urfile_free(struct urfile *urf) 383 { 384 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd); 385 kfree(urf); 386 } 387 388 /* 389 * The fops implementation of the character device driver 390 */ 391 static ssize_t do_write(struct urdev *urd, const char __user *udata, 392 size_t count, size_t reclen, loff_t *ppos) 393 { 394 struct ccw1 *cpa; 395 int rc; 396 397 cpa = alloc_chan_prog(udata, count / reclen, reclen); 398 if (IS_ERR(cpa)) 399 return PTR_ERR(cpa); 400 401 rc = do_ur_io(urd, cpa); 402 if (rc) 403 goto fail_kfree_cpa; 404 405 if (urd->io_request_rc) { 406 rc = urd->io_request_rc; 407 goto fail_kfree_cpa; 408 } 409 *ppos += count; 410 rc = count; 411 412 fail_kfree_cpa: 413 free_chan_prog(cpa); 414 return rc; 415 } 416 417 static ssize_t ur_write(struct file *file, const char __user *udata, 418 size_t count, loff_t *ppos) 419 { 420 struct urfile *urf = file->private_data; 421 422 TRACE("ur_write: count=%zu\n", count); 423 424 if (count == 0) 425 return 0; 426 427 if (count % urf->dev_reclen) 428 return -EINVAL; /* count must be a multiple of reclen */ 429 430 if (count > urf->dev_reclen * MAX_RECS_PER_IO) 431 count = urf->dev_reclen * MAX_RECS_PER_IO; 432 433 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); 434 } 435 436 /* 437 * diagnose code 0x14 subcode 0x0028 - position spool file to designated 438 * record 439 * cc=0 normal completion 440 * cc=2 no file active on the virtual reader or device not ready 441 * cc=3 record specified is beyond EOF 442 */ 443 static int diag_position_to_record(int devno, int record) 444 { 445 int cc; 446 447 cc = diag14(record, devno, 0x28); 448 switch (cc) { 449 case 0: 450 return 0; 451 case 2: 452 return -ENOMEDIUM; 453 case 3: 454 return -ENODATA; /* position beyond end of file */ 455 default: 456 return -EIO; 457 } 458 } 459 460 /* 461 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer 462 * cc=0 normal completion 463 * cc=1 EOF reached 464 * cc=2 no file active on the virtual reader, and no file eligible 465 * cc=3 file already active on the virtual reader or specified virtual 466 * reader does not exist or is not a reader 467 */ 468 static int diag_read_file(int devno, char *buf) 469 { 470 int cc; 471 472 cc = diag14((unsigned long) buf, devno, 0x00); 473 switch (cc) { 474 case 0: 475 return 0; 476 case 1: 477 return -ENODATA; 478 case 2: 479 return -ENOMEDIUM; 480 default: 481 return -EIO; 482 } 483 } 484 485 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, 486 loff_t *offs) 487 { 488 size_t len, copied, res; 489 char *buf; 490 int rc; 491 u16 reclen; 492 struct urdev *urd; 493 494 urd = ((struct urfile *) file->private_data)->urd; 495 reclen = ((struct urfile *) file->private_data)->file_reclen; 496 497 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1); 498 if (rc == -ENODATA) 499 return 0; 500 if (rc) 501 return rc; 502 503 len = min((size_t) PAGE_SIZE, count); 504 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 505 if (!buf) 506 return -ENOMEM; 507 508 copied = 0; 509 res = (size_t) (*offs % PAGE_SIZE); 510 do { 511 rc = diag_read_file(urd->dev_id.devno, buf); 512 if (rc == -ENODATA) { 513 break; 514 } 515 if (rc) 516 goto fail; 517 if (reclen && (copied == 0) && (*offs < PAGE_SIZE)) 518 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen; 519 len = min(count - copied, PAGE_SIZE - res); 520 if (copy_to_user(ubuf + copied, buf + res, len)) { 521 rc = -EFAULT; 522 goto fail; 523 } 524 res = 0; 525 copied += len; 526 } while (copied != count); 527 528 *offs += copied; 529 rc = copied; 530 fail: 531 free_page((unsigned long) buf); 532 return rc; 533 } 534 535 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count, 536 loff_t *offs) 537 { 538 struct urdev *urd; 539 int rc; 540 541 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs); 542 543 if (count == 0) 544 return 0; 545 546 urd = ((struct urfile *) file->private_data)->urd; 547 rc = mutex_lock_interruptible(&urd->io_mutex); 548 if (rc) 549 return rc; 550 rc = diag14_read(file, ubuf, count, offs); 551 mutex_unlock(&urd->io_mutex); 552 return rc; 553 } 554 555 /* 556 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor 557 * cc=0 normal completion 558 * cc=1 no files on reader queue or no subsequent file 559 * cc=2 spid specified is invalid 560 */ 561 static int diag_read_next_file_info(struct file_control_block *buf, int spid) 562 { 563 int cc; 564 565 cc = diag14((unsigned long) buf, spid, 0xfff); 566 switch (cc) { 567 case 0: 568 return 0; 569 default: 570 return -ENODATA; 571 } 572 } 573 574 static int verify_uri_device(struct urdev *urd) 575 { 576 struct file_control_block *fcb; 577 char *buf; 578 int rc; 579 580 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 581 if (!fcb) 582 return -ENOMEM; 583 584 /* check for empty reader device (beginning of chain) */ 585 rc = diag_read_next_file_info(fcb, 0); 586 if (rc) 587 goto fail_free_fcb; 588 589 /* if file is in hold status, we do not read it */ 590 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) { 591 rc = -EPERM; 592 goto fail_free_fcb; 593 } 594 595 /* open file on virtual reader */ 596 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 597 if (!buf) { 598 rc = -ENOMEM; 599 goto fail_free_fcb; 600 } 601 rc = diag_read_file(urd->dev_id.devno, buf); 602 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 603 goto fail_free_buf; 604 605 /* check if the file on top of the queue is open now */ 606 rc = diag_read_next_file_info(fcb, 0); 607 if (rc) 608 goto fail_free_buf; 609 if (!(fcb->file_stat & FLG_IN_USE)) { 610 rc = -EMFILE; 611 goto fail_free_buf; 612 } 613 rc = 0; 614 615 fail_free_buf: 616 free_page((unsigned long) buf); 617 fail_free_fcb: 618 kfree(fcb); 619 return rc; 620 } 621 622 static int verify_device(struct urdev *urd) 623 { 624 switch (urd->class) { 625 case DEV_CLASS_UR_O: 626 return 0; /* no check needed here */ 627 case DEV_CLASS_UR_I: 628 return verify_uri_device(urd); 629 default: 630 return -EOPNOTSUPP; 631 } 632 } 633 634 static int get_uri_file_reclen(struct urdev *urd) 635 { 636 struct file_control_block *fcb; 637 int rc; 638 639 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 640 if (!fcb) 641 return -ENOMEM; 642 rc = diag_read_next_file_info(fcb, 0); 643 if (rc) 644 goto fail_free; 645 if (fcb->file_stat & FLG_CP_DUMP) 646 rc = 0; 647 else 648 rc = fcb->rec_len; 649 650 fail_free: 651 kfree(fcb); 652 return rc; 653 } 654 655 static int get_file_reclen(struct urdev *urd) 656 { 657 switch (urd->class) { 658 case DEV_CLASS_UR_O: 659 return 0; 660 case DEV_CLASS_UR_I: 661 return get_uri_file_reclen(urd); 662 default: 663 return -EOPNOTSUPP; 664 } 665 } 666 667 static int ur_open(struct inode *inode, struct file *file) 668 { 669 u16 devno; 670 struct urdev *urd; 671 struct urfile *urf; 672 unsigned short accmode; 673 int rc; 674 675 accmode = file->f_flags & O_ACCMODE; 676 677 if (accmode == O_RDWR) 678 return -EACCES; 679 /* 680 * We treat the minor number as the devno of the ur device 681 * to find in the driver tree. 682 */ 683 devno = iminor(file_inode(file)); 684 685 urd = urdev_get_from_devno(devno); 686 if (!urd) { 687 rc = -ENXIO; 688 goto out; 689 } 690 691 spin_lock(&urd->open_lock); 692 while (urd->open_flag) { 693 spin_unlock(&urd->open_lock); 694 if (file->f_flags & O_NONBLOCK) { 695 rc = -EBUSY; 696 goto fail_put; 697 } 698 if (wait_event_interruptible(urd->wait, urd->open_flag == 0)) { 699 rc = -ERESTARTSYS; 700 goto fail_put; 701 } 702 spin_lock(&urd->open_lock); 703 } 704 urd->open_flag++; 705 spin_unlock(&urd->open_lock); 706 707 TRACE("ur_open\n"); 708 709 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) || 710 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) { 711 TRACE("ur_open: unsupported dev class (%d)\n", urd->class); 712 rc = -EACCES; 713 goto fail_unlock; 714 } 715 716 rc = verify_device(urd); 717 if (rc) 718 goto fail_unlock; 719 720 urf = urfile_alloc(urd); 721 if (!urf) { 722 rc = -ENOMEM; 723 goto fail_unlock; 724 } 725 726 urf->dev_reclen = urd->reclen; 727 rc = get_file_reclen(urd); 728 if (rc < 0) 729 goto fail_urfile_free; 730 urf->file_reclen = rc; 731 file->private_data = urf; 732 return 0; 733 734 fail_urfile_free: 735 urfile_free(urf); 736 fail_unlock: 737 spin_lock(&urd->open_lock); 738 urd->open_flag--; 739 spin_unlock(&urd->open_lock); 740 fail_put: 741 urdev_put(urd); 742 out: 743 return rc; 744 } 745 746 static int ur_release(struct inode *inode, struct file *file) 747 { 748 struct urfile *urf = file->private_data; 749 750 TRACE("ur_release\n"); 751 spin_lock(&urf->urd->open_lock); 752 urf->urd->open_flag--; 753 spin_unlock(&urf->urd->open_lock); 754 wake_up_interruptible(&urf->urd->wait); 755 urdev_put(urf->urd); 756 urfile_free(urf); 757 return 0; 758 } 759 760 static loff_t ur_llseek(struct file *file, loff_t offset, int whence) 761 { 762 if ((file->f_flags & O_ACCMODE) != O_RDONLY) 763 return -ESPIPE; /* seek allowed only for reader */ 764 if (offset % PAGE_SIZE) 765 return -ESPIPE; /* only multiples of 4K allowed */ 766 return no_seek_end_llseek(file, offset, whence); 767 } 768 769 static const struct file_operations ur_fops = { 770 .owner = THIS_MODULE, 771 .open = ur_open, 772 .release = ur_release, 773 .read = ur_read, 774 .write = ur_write, 775 .llseek = ur_llseek, 776 }; 777 778 /* 779 * ccw_device infrastructure: 780 * ur_probe creates the struct urdev (with refcount = 1), the device 781 * attributes, sets up the interrupt handler and validates the virtual 782 * unit record device. 783 * ur_remove removes the device attributes and drops the reference to 784 * struct urdev. 785 * 786 * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized 787 * by the vmur_mutex lock. 788 * 789 * urd->char_device is used as indication that the online function has 790 * been completed successfully. 791 */ 792 static int ur_probe(struct ccw_device *cdev) 793 { 794 struct urdev *urd; 795 int rc; 796 797 TRACE("ur_probe: cdev=%p\n", cdev); 798 799 mutex_lock(&vmur_mutex); 800 urd = urdev_alloc(cdev); 801 if (!urd) { 802 rc = -ENOMEM; 803 goto fail_unlock; 804 } 805 806 rc = ur_create_attributes(&cdev->dev); 807 if (rc) { 808 rc = -ENOMEM; 809 goto fail_urdev_put; 810 } 811 812 /* validate virtual unit record device */ 813 urd->class = get_urd_class(urd); 814 if (urd->class < 0) { 815 rc = urd->class; 816 goto fail_remove_attr; 817 } 818 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 819 rc = -EOPNOTSUPP; 820 goto fail_remove_attr; 821 } 822 spin_lock_irq(get_ccwdev_lock(cdev)); 823 dev_set_drvdata(&cdev->dev, urd); 824 cdev->handler = ur_int_handler; 825 spin_unlock_irq(get_ccwdev_lock(cdev)); 826 827 mutex_unlock(&vmur_mutex); 828 return 0; 829 830 fail_remove_attr: 831 ur_remove_attributes(&cdev->dev); 832 fail_urdev_put: 833 urdev_put(urd); 834 fail_unlock: 835 mutex_unlock(&vmur_mutex); 836 return rc; 837 } 838 839 static int ur_set_online(struct ccw_device *cdev) 840 { 841 struct urdev *urd; 842 int minor, major, rc; 843 char node_id[16]; 844 845 TRACE("ur_set_online: cdev=%p\n", cdev); 846 847 mutex_lock(&vmur_mutex); 848 urd = urdev_get_from_cdev(cdev); 849 if (!urd) { 850 /* ur_remove already deleted our urd */ 851 rc = -ENODEV; 852 goto fail_unlock; 853 } 854 855 if (urd->char_device) { 856 /* Another ur_set_online was faster */ 857 rc = -EBUSY; 858 goto fail_urdev_put; 859 } 860 861 minor = urd->dev_id.devno; 862 major = MAJOR(ur_first_dev_maj_min); 863 864 urd->char_device = cdev_alloc(); 865 if (!urd->char_device) { 866 rc = -ENOMEM; 867 goto fail_urdev_put; 868 } 869 870 urd->char_device->ops = &ur_fops; 871 urd->char_device->owner = ur_fops.owner; 872 873 rc = cdev_add(urd->char_device, MKDEV(major, minor), 1); 874 if (rc) 875 goto fail_free_cdev; 876 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { 877 if (urd->class == DEV_CLASS_UR_I) 878 sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev)); 879 if (urd->class == DEV_CLASS_UR_O) 880 sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev)); 881 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 882 sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev)); 883 } else { 884 rc = -EOPNOTSUPP; 885 goto fail_free_cdev; 886 } 887 888 urd->device = device_create(vmur_class, &cdev->dev, 889 urd->char_device->dev, NULL, "%s", node_id); 890 if (IS_ERR(urd->device)) { 891 rc = PTR_ERR(urd->device); 892 TRACE("ur_set_online: device_create rc=%d\n", rc); 893 goto fail_free_cdev; 894 } 895 urdev_put(urd); 896 mutex_unlock(&vmur_mutex); 897 return 0; 898 899 fail_free_cdev: 900 cdev_del(urd->char_device); 901 urd->char_device = NULL; 902 fail_urdev_put: 903 urdev_put(urd); 904 fail_unlock: 905 mutex_unlock(&vmur_mutex); 906 return rc; 907 } 908 909 static int ur_set_offline_force(struct ccw_device *cdev, int force) 910 { 911 struct urdev *urd; 912 int rc; 913 914 TRACE("ur_set_offline: cdev=%p\n", cdev); 915 urd = urdev_get_from_cdev(cdev); 916 if (!urd) 917 /* ur_remove already deleted our urd */ 918 return -ENODEV; 919 if (!urd->char_device) { 920 /* Another ur_set_offline was faster */ 921 rc = -EBUSY; 922 goto fail_urdev_put; 923 } 924 if (!force && (refcount_read(&urd->ref_count) > 2)) { 925 /* There is still a user of urd (e.g. ur_open) */ 926 TRACE("ur_set_offline: BUSY\n"); 927 rc = -EBUSY; 928 goto fail_urdev_put; 929 } 930 device_destroy(vmur_class, urd->char_device->dev); 931 cdev_del(urd->char_device); 932 urd->char_device = NULL; 933 rc = 0; 934 935 fail_urdev_put: 936 urdev_put(urd); 937 return rc; 938 } 939 940 static int ur_set_offline(struct ccw_device *cdev) 941 { 942 int rc; 943 944 mutex_lock(&vmur_mutex); 945 rc = ur_set_offline_force(cdev, 0); 946 mutex_unlock(&vmur_mutex); 947 return rc; 948 } 949 950 static void ur_remove(struct ccw_device *cdev) 951 { 952 unsigned long flags; 953 954 TRACE("ur_remove\n"); 955 956 mutex_lock(&vmur_mutex); 957 958 if (cdev->online) 959 ur_set_offline_force(cdev, 1); 960 ur_remove_attributes(&cdev->dev); 961 962 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 963 urdev_put(dev_get_drvdata(&cdev->dev)); 964 dev_set_drvdata(&cdev->dev, NULL); 965 cdev->handler = NULL; 966 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 967 968 mutex_unlock(&vmur_mutex); 969 } 970 971 /* 972 * Module initialisation and cleanup 973 */ 974 static int __init ur_init(void) 975 { 976 int rc; 977 dev_t dev; 978 979 if (!MACHINE_IS_VM) { 980 pr_err("The %s cannot be loaded without z/VM\n", 981 ur_banner); 982 return -ENODEV; 983 } 984 985 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long)); 986 if (!vmur_dbf) 987 return -ENOMEM; 988 rc = debug_register_view(vmur_dbf, &debug_sprintf_view); 989 if (rc) 990 goto fail_free_dbf; 991 992 debug_set_level(vmur_dbf, 6); 993 994 vmur_class = class_create(THIS_MODULE, "vmur"); 995 if (IS_ERR(vmur_class)) { 996 rc = PTR_ERR(vmur_class); 997 goto fail_free_dbf; 998 } 999 1000 rc = ccw_driver_register(&ur_driver); 1001 if (rc) 1002 goto fail_class_destroy; 1003 1004 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 1005 if (rc) { 1006 pr_err("Kernel function alloc_chrdev_region failed with " 1007 "error code %d\n", rc); 1008 goto fail_unregister_driver; 1009 } 1010 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 1011 1012 pr_info("%s loaded.\n", ur_banner); 1013 return 0; 1014 1015 fail_unregister_driver: 1016 ccw_driver_unregister(&ur_driver); 1017 fail_class_destroy: 1018 class_destroy(vmur_class); 1019 fail_free_dbf: 1020 debug_unregister(vmur_dbf); 1021 return rc; 1022 } 1023 1024 static void __exit ur_exit(void) 1025 { 1026 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1027 ccw_driver_unregister(&ur_driver); 1028 class_destroy(vmur_class); 1029 debug_unregister(vmur_dbf); 1030 pr_info("%s unloaded.\n", ur_banner); 1031 } 1032 1033 module_init(ur_init); 1034 module_exit(ur_exit); 1035