1 /* 2 * Linux driver for System z and s390 unit record devices 3 * (z/VM virtual punch, reader, printer) 4 * 5 * Copyright IBM Corp. 2001, 2007 6 * Authors: Malcolm Beattie <beattiem@uk.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com> 8 * Frank Munzert <munzert@de.ibm.com> 9 */ 10 11 #include <linux/cdev.h> 12 13 #include <asm/uaccess.h> 14 #include <asm/cio.h> 15 #include <asm/ccwdev.h> 16 #include <asm/debug.h> 17 #include <asm/diag.h> 18 19 #include "vmur.h" 20 21 /* 22 * Driver overview 23 * 24 * Unit record device support is implemented as a character device driver. 25 * We can fit at least 16 bits into a device minor number and use the 26 * simple method of mapping a character device number with minor abcd 27 * to the unit record device with devno abcd. 28 * I/O to virtual unit record devices is handled as follows: 29 * Reads: Diagnose code 0x14 (input spool file manipulation) 30 * is used to read spool data page-wise. 31 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length 32 * is available by reading sysfs attr reclen. Each write() to the device 33 * must specify an integral multiple (maximal 511) of reclen. 34 */ 35 36 static char ur_banner[] = "z/VM virtual unit record device driver"; 37 38 MODULE_AUTHOR("IBM Corporation"); 39 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); 40 MODULE_LICENSE("GPL"); 41 42 #define PRINTK_HEADER "vmur: " 43 44 static dev_t ur_first_dev_maj_min; 45 static struct class *vmur_class; 46 static struct debug_info *vmur_dbf; 47 48 /* We put the device's record length (for writes) in the driver_info field */ 49 static struct ccw_device_id ur_ids[] = { 50 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) }, 51 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) }, 52 { /* end of list */ } 53 }; 54 55 MODULE_DEVICE_TABLE(ccw, ur_ids); 56 57 static int ur_probe(struct ccw_device *cdev); 58 static void ur_remove(struct ccw_device *cdev); 59 static int ur_set_online(struct ccw_device *cdev); 60 static int ur_set_offline(struct ccw_device *cdev); 61 62 static struct ccw_driver ur_driver = { 63 .name = "vmur", 64 .owner = THIS_MODULE, 65 .ids = ur_ids, 66 .probe = ur_probe, 67 .remove = ur_remove, 68 .set_online = ur_set_online, 69 .set_offline = ur_set_offline, 70 }; 71 72 static DEFINE_MUTEX(vmur_mutex); 73 74 /* 75 * Allocation, freeing, getting and putting of urdev structures 76 * 77 * Each ur device (urd) contains a reference to its corresponding ccw device 78 * (cdev) using the urd->cdev pointer. Each ccw device has a reference to the 79 * ur device using the cdev->dev.driver_data pointer. 80 * 81 * urd references: 82 * - ur_probe gets a urd reference, ur_remove drops the reference 83 * (cdev->dev.driver_data) 84 * - ur_open gets a urd reference, ur_relase drops the reference 85 * (urf->urd) 86 * 87 * cdev references: 88 * - urdev_alloc get a cdev reference (urd->cdev) 89 * - urdev_free drops the cdev reference (urd->cdev) 90 * 91 * Setting and clearing of cdev->dev.driver_data is protected by the ccwdev lock 92 */ 93 static struct urdev *urdev_alloc(struct ccw_device *cdev) 94 { 95 struct urdev *urd; 96 97 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); 98 if (!urd) 99 return NULL; 100 urd->reclen = cdev->id.driver_info; 101 ccw_device_get_id(cdev, &urd->dev_id); 102 mutex_init(&urd->io_mutex); 103 mutex_init(&urd->open_mutex); 104 atomic_set(&urd->ref_count, 1); 105 urd->cdev = cdev; 106 get_device(&cdev->dev); 107 return urd; 108 } 109 110 static void urdev_free(struct urdev *urd) 111 { 112 TRACE("urdev_free: %p\n", urd); 113 if (urd->cdev) 114 put_device(&urd->cdev->dev); 115 kfree(urd); 116 } 117 118 static void urdev_get(struct urdev *urd) 119 { 120 atomic_inc(&urd->ref_count); 121 } 122 123 static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev) 124 { 125 struct urdev *urd; 126 unsigned long flags; 127 128 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 129 urd = cdev->dev.driver_data; 130 if (urd) 131 urdev_get(urd); 132 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 133 return urd; 134 } 135 136 static struct urdev *urdev_get_from_devno(u16 devno) 137 { 138 char bus_id[16]; 139 struct ccw_device *cdev; 140 struct urdev *urd; 141 142 sprintf(bus_id, "0.0.%04x", devno); 143 cdev = get_ccwdev_by_busid(&ur_driver, bus_id); 144 if (!cdev) 145 return NULL; 146 urd = urdev_get_from_cdev(cdev); 147 put_device(&cdev->dev); 148 return urd; 149 } 150 151 static void urdev_put(struct urdev *urd) 152 { 153 if (atomic_dec_and_test(&urd->ref_count)) 154 urdev_free(urd); 155 } 156 157 /* 158 * Low-level functions to do I/O to a ur device. 159 * alloc_chan_prog 160 * free_chan_prog 161 * do_ur_io 162 * ur_int_handler 163 * 164 * alloc_chan_prog allocates and builds the channel program 165 * free_chan_prog frees memory of the channel program 166 * 167 * do_ur_io issues the channel program to the device and blocks waiting 168 * on a completion event it publishes at urd->io_done. The function 169 * serialises itself on the device's mutex so that only one I/O 170 * is issued at a time (and that I/O is synchronous). 171 * 172 * ur_int_handler catches the "I/O done" interrupt, writes the 173 * subchannel status word into the scsw member of the urdev structure 174 * and complete()s the io_done to wake the waiting do_ur_io. 175 * 176 * The caller of do_ur_io is responsible for kfree()ing the channel program 177 * address pointer that alloc_chan_prog returned. 178 */ 179 180 static void free_chan_prog(struct ccw1 *cpa) 181 { 182 struct ccw1 *ptr = cpa; 183 184 while (ptr->cda) { 185 kfree((void *)(addr_t) ptr->cda); 186 ptr++; 187 } 188 kfree(cpa); 189 } 190 191 /* 192 * alloc_chan_prog 193 * The channel program we use is write commands chained together 194 * with a final NOP CCW command-chained on (which ensures that CE and DE 195 * are presented together in a single interrupt instead of as separate 196 * interrupts unless an incorrect length indication kicks in first). The 197 * data length in each CCW is reclen. 198 */ 199 static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count, 200 int reclen) 201 { 202 struct ccw1 *cpa; 203 void *kbuf; 204 int i; 205 206 TRACE("alloc_chan_prog(%p, %i, %i)\n", ubuf, rec_count, reclen); 207 208 /* 209 * We chain a NOP onto the writes to force CE+DE together. 210 * That means we allocate room for CCWs to cover count/reclen 211 * records plus a NOP. 212 */ 213 cpa = kzalloc((rec_count + 1) * sizeof(struct ccw1), 214 GFP_KERNEL | GFP_DMA); 215 if (!cpa) 216 return ERR_PTR(-ENOMEM); 217 218 for (i = 0; i < rec_count; i++) { 219 cpa[i].cmd_code = WRITE_CCW_CMD; 220 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 221 cpa[i].count = reclen; 222 kbuf = kmalloc(reclen, GFP_KERNEL | GFP_DMA); 223 if (!kbuf) { 224 free_chan_prog(cpa); 225 return ERR_PTR(-ENOMEM); 226 } 227 cpa[i].cda = (u32)(addr_t) kbuf; 228 if (copy_from_user(kbuf, ubuf, reclen)) { 229 free_chan_prog(cpa); 230 return ERR_PTR(-EFAULT); 231 } 232 ubuf += reclen; 233 } 234 /* The following NOP CCW forces CE+DE to be presented together */ 235 cpa[i].cmd_code = CCW_CMD_NOOP; 236 return cpa; 237 } 238 239 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) 240 { 241 int rc; 242 struct ccw_device *cdev = urd->cdev; 243 DECLARE_COMPLETION_ONSTACK(event); 244 245 TRACE("do_ur_io: cpa=%p\n", cpa); 246 247 rc = mutex_lock_interruptible(&urd->io_mutex); 248 if (rc) 249 return rc; 250 251 urd->io_done = &event; 252 253 spin_lock_irq(get_ccwdev_lock(cdev)); 254 rc = ccw_device_start(cdev, cpa, 1, 0, 0); 255 spin_unlock_irq(get_ccwdev_lock(cdev)); 256 257 TRACE("do_ur_io: ccw_device_start returned %d\n", rc); 258 if (rc) 259 goto out; 260 261 wait_for_completion(&event); 262 TRACE("do_ur_io: I/O complete\n"); 263 rc = 0; 264 265 out: 266 mutex_unlock(&urd->io_mutex); 267 return rc; 268 } 269 270 /* 271 * ur interrupt handler, called from the ccw_device layer 272 */ 273 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, 274 struct irb *irb) 275 { 276 struct urdev *urd; 277 278 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 279 intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count); 280 281 if (!intparm) { 282 TRACE("ur_int_handler: unsolicited interrupt\n"); 283 return; 284 } 285 urd = cdev->dev.driver_data; 286 BUG_ON(!urd); 287 /* On special conditions irb is an error pointer */ 288 if (IS_ERR(irb)) 289 urd->io_request_rc = PTR_ERR(irb); 290 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 291 urd->io_request_rc = 0; 292 else 293 urd->io_request_rc = -EIO; 294 295 complete(urd->io_done); 296 } 297 298 /* 299 * reclen sysfs attribute - The record length to be used for write CCWs 300 */ 301 static ssize_t ur_attr_reclen_show(struct device *dev, 302 struct device_attribute *attr, char *buf) 303 { 304 struct urdev *urd; 305 int rc; 306 307 urd = urdev_get_from_cdev(to_ccwdev(dev)); 308 if (!urd) 309 return -ENODEV; 310 rc = sprintf(buf, "%zu\n", urd->reclen); 311 urdev_put(urd); 312 return rc; 313 } 314 315 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); 316 317 static int ur_create_attributes(struct device *dev) 318 { 319 return device_create_file(dev, &dev_attr_reclen); 320 } 321 322 static void ur_remove_attributes(struct device *dev) 323 { 324 device_remove_file(dev, &dev_attr_reclen); 325 } 326 327 /* 328 * diagnose code 0x210 - retrieve device information 329 * cc=0 normal completion, we have a real device 330 * cc=1 CP paging error 331 * cc=2 The virtual device exists, but is not associated with a real device 332 * cc=3 Invalid device address, or the virtual device does not exist 333 */ 334 static int get_urd_class(struct urdev *urd) 335 { 336 static struct diag210 ur_diag210; 337 int cc; 338 339 ur_diag210.vrdcdvno = urd->dev_id.devno; 340 ur_diag210.vrdclen = sizeof(struct diag210); 341 342 cc = diag210(&ur_diag210); 343 switch (cc) { 344 case 0: 345 return -ENOTSUPP; 346 case 2: 347 return ur_diag210.vrdcvcla; /* virtual device class */ 348 case 3: 349 return -ENODEV; 350 default: 351 return -EIO; 352 } 353 } 354 355 /* 356 * Allocation and freeing of urfile structures 357 */ 358 static struct urfile *urfile_alloc(struct urdev *urd) 359 { 360 struct urfile *urf; 361 362 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL); 363 if (!urf) 364 return NULL; 365 urf->urd = urd; 366 367 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, 368 urf->dev_reclen); 369 370 return urf; 371 } 372 373 static void urfile_free(struct urfile *urf) 374 { 375 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd); 376 kfree(urf); 377 } 378 379 /* 380 * The fops implementation of the character device driver 381 */ 382 static ssize_t do_write(struct urdev *urd, const char __user *udata, 383 size_t count, size_t reclen, loff_t *ppos) 384 { 385 struct ccw1 *cpa; 386 int rc; 387 388 cpa = alloc_chan_prog(udata, count / reclen, reclen); 389 if (IS_ERR(cpa)) 390 return PTR_ERR(cpa); 391 392 rc = do_ur_io(urd, cpa); 393 if (rc) 394 goto fail_kfree_cpa; 395 396 if (urd->io_request_rc) { 397 rc = urd->io_request_rc; 398 goto fail_kfree_cpa; 399 } 400 *ppos += count; 401 rc = count; 402 403 fail_kfree_cpa: 404 free_chan_prog(cpa); 405 return rc; 406 } 407 408 static ssize_t ur_write(struct file *file, const char __user *udata, 409 size_t count, loff_t *ppos) 410 { 411 struct urfile *urf = file->private_data; 412 413 TRACE("ur_write: count=%zu\n", count); 414 415 if (count == 0) 416 return 0; 417 418 if (count % urf->dev_reclen) 419 return -EINVAL; /* count must be a multiple of reclen */ 420 421 if (count > urf->dev_reclen * MAX_RECS_PER_IO) 422 count = urf->dev_reclen * MAX_RECS_PER_IO; 423 424 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); 425 } 426 427 /* 428 * diagnose code 0x14 subcode 0x0028 - position spool file to designated 429 * record 430 * cc=0 normal completion 431 * cc=2 no file active on the virtual reader or device not ready 432 * cc=3 record specified is beyond EOF 433 */ 434 static int diag_position_to_record(int devno, int record) 435 { 436 int cc; 437 438 cc = diag14(record, devno, 0x28); 439 switch (cc) { 440 case 0: 441 return 0; 442 case 2: 443 return -ENOMEDIUM; 444 case 3: 445 return -ENODATA; /* position beyond end of file */ 446 default: 447 return -EIO; 448 } 449 } 450 451 /* 452 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer 453 * cc=0 normal completion 454 * cc=1 EOF reached 455 * cc=2 no file active on the virtual reader, and no file eligible 456 * cc=3 file already active on the virtual reader or specified virtual 457 * reader does not exist or is not a reader 458 */ 459 static int diag_read_file(int devno, char *buf) 460 { 461 int cc; 462 463 cc = diag14((unsigned long) buf, devno, 0x00); 464 switch (cc) { 465 case 0: 466 return 0; 467 case 1: 468 return -ENODATA; 469 case 2: 470 return -ENOMEDIUM; 471 default: 472 return -EIO; 473 } 474 } 475 476 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, 477 loff_t *offs) 478 { 479 size_t len, copied, res; 480 char *buf; 481 int rc; 482 u16 reclen; 483 struct urdev *urd; 484 485 urd = ((struct urfile *) file->private_data)->urd; 486 reclen = ((struct urfile *) file->private_data)->file_reclen; 487 488 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1); 489 if (rc == -ENODATA) 490 return 0; 491 if (rc) 492 return rc; 493 494 len = min((size_t) PAGE_SIZE, count); 495 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 496 if (!buf) 497 return -ENOMEM; 498 499 copied = 0; 500 res = (size_t) (*offs % PAGE_SIZE); 501 do { 502 rc = diag_read_file(urd->dev_id.devno, buf); 503 if (rc == -ENODATA) { 504 break; 505 } 506 if (rc) 507 goto fail; 508 if (reclen && (copied == 0) && (*offs < PAGE_SIZE)) 509 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen; 510 len = min(count - copied, PAGE_SIZE - res); 511 if (copy_to_user(ubuf + copied, buf + res, len)) { 512 rc = -EFAULT; 513 goto fail; 514 } 515 res = 0; 516 copied += len; 517 } while (copied != count); 518 519 *offs += copied; 520 rc = copied; 521 fail: 522 free_page((unsigned long) buf); 523 return rc; 524 } 525 526 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count, 527 loff_t *offs) 528 { 529 struct urdev *urd; 530 int rc; 531 532 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs); 533 534 if (count == 0) 535 return 0; 536 537 urd = ((struct urfile *) file->private_data)->urd; 538 rc = mutex_lock_interruptible(&urd->io_mutex); 539 if (rc) 540 return rc; 541 rc = diag14_read(file, ubuf, count, offs); 542 mutex_unlock(&urd->io_mutex); 543 return rc; 544 } 545 546 /* 547 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor 548 * cc=0 normal completion 549 * cc=1 no files on reader queue or no subsequent file 550 * cc=2 spid specified is invalid 551 */ 552 static int diag_read_next_file_info(struct file_control_block *buf, int spid) 553 { 554 int cc; 555 556 cc = diag14((unsigned long) buf, spid, 0xfff); 557 switch (cc) { 558 case 0: 559 return 0; 560 default: 561 return -ENODATA; 562 } 563 } 564 565 static int verify_uri_device(struct urdev *urd) 566 { 567 struct file_control_block *fcb; 568 char *buf; 569 int rc; 570 571 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 572 if (!fcb) 573 return -ENOMEM; 574 575 /* check for empty reader device (beginning of chain) */ 576 rc = diag_read_next_file_info(fcb, 0); 577 if (rc) 578 goto fail_free_fcb; 579 580 /* if file is in hold status, we do not read it */ 581 if (fcb->file_stat & (FLG_SYSTEM_HOLD | FLG_USER_HOLD)) { 582 rc = -EPERM; 583 goto fail_free_fcb; 584 } 585 586 /* open file on virtual reader */ 587 buf = (char *) __get_free_page(GFP_KERNEL | GFP_DMA); 588 if (!buf) { 589 rc = -ENOMEM; 590 goto fail_free_fcb; 591 } 592 rc = diag_read_file(urd->dev_id.devno, buf); 593 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 594 goto fail_free_buf; 595 596 /* check if the file on top of the queue is open now */ 597 rc = diag_read_next_file_info(fcb, 0); 598 if (rc) 599 goto fail_free_buf; 600 if (!(fcb->file_stat & FLG_IN_USE)) { 601 rc = -EMFILE; 602 goto fail_free_buf; 603 } 604 rc = 0; 605 606 fail_free_buf: 607 free_page((unsigned long) buf); 608 fail_free_fcb: 609 kfree(fcb); 610 return rc; 611 } 612 613 static int verify_device(struct urdev *urd) 614 { 615 switch (urd->class) { 616 case DEV_CLASS_UR_O: 617 return 0; /* no check needed here */ 618 case DEV_CLASS_UR_I: 619 return verify_uri_device(urd); 620 default: 621 return -ENOTSUPP; 622 } 623 } 624 625 static int get_uri_file_reclen(struct urdev *urd) 626 { 627 struct file_control_block *fcb; 628 int rc; 629 630 fcb = kmalloc(sizeof(*fcb), GFP_KERNEL | GFP_DMA); 631 if (!fcb) 632 return -ENOMEM; 633 rc = diag_read_next_file_info(fcb, 0); 634 if (rc) 635 goto fail_free; 636 if (fcb->file_stat & FLG_CP_DUMP) 637 rc = 0; 638 else 639 rc = fcb->rec_len; 640 641 fail_free: 642 kfree(fcb); 643 return rc; 644 } 645 646 static int get_file_reclen(struct urdev *urd) 647 { 648 switch (urd->class) { 649 case DEV_CLASS_UR_O: 650 return 0; 651 case DEV_CLASS_UR_I: 652 return get_uri_file_reclen(urd); 653 default: 654 return -ENOTSUPP; 655 } 656 } 657 658 static int ur_open(struct inode *inode, struct file *file) 659 { 660 u16 devno; 661 struct urdev *urd; 662 struct urfile *urf; 663 unsigned short accmode; 664 int rc; 665 666 accmode = file->f_flags & O_ACCMODE; 667 668 if (accmode == O_RDWR) 669 return -EACCES; 670 671 /* 672 * We treat the minor number as the devno of the ur device 673 * to find in the driver tree. 674 */ 675 devno = MINOR(file->f_dentry->d_inode->i_rdev); 676 677 urd = urdev_get_from_devno(devno); 678 if (!urd) 679 return -ENXIO; 680 681 if (file->f_flags & O_NONBLOCK) { 682 if (!mutex_trylock(&urd->open_mutex)) { 683 rc = -EBUSY; 684 goto fail_put; 685 } 686 } else { 687 if (mutex_lock_interruptible(&urd->open_mutex)) { 688 rc = -ERESTARTSYS; 689 goto fail_put; 690 } 691 } 692 693 TRACE("ur_open\n"); 694 695 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) || 696 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) { 697 TRACE("ur_open: unsupported dev class (%d)\n", urd->class); 698 rc = -EACCES; 699 goto fail_unlock; 700 } 701 702 rc = verify_device(urd); 703 if (rc) 704 goto fail_unlock; 705 706 urf = urfile_alloc(urd); 707 if (!urf) { 708 rc = -ENOMEM; 709 goto fail_unlock; 710 } 711 712 urf->dev_reclen = urd->reclen; 713 rc = get_file_reclen(urd); 714 if (rc < 0) 715 goto fail_urfile_free; 716 urf->file_reclen = rc; 717 file->private_data = urf; 718 return 0; 719 720 fail_urfile_free: 721 urfile_free(urf); 722 fail_unlock: 723 mutex_unlock(&urd->open_mutex); 724 fail_put: 725 urdev_put(urd); 726 return rc; 727 } 728 729 static int ur_release(struct inode *inode, struct file *file) 730 { 731 struct urfile *urf = file->private_data; 732 733 TRACE("ur_release\n"); 734 mutex_unlock(&urf->urd->open_mutex); 735 urdev_put(urf->urd); 736 urfile_free(urf); 737 return 0; 738 } 739 740 static loff_t ur_llseek(struct file *file, loff_t offset, int whence) 741 { 742 loff_t newpos; 743 744 if ((file->f_flags & O_ACCMODE) != O_RDONLY) 745 return -ESPIPE; /* seek allowed only for reader */ 746 if (offset % PAGE_SIZE) 747 return -ESPIPE; /* only multiples of 4K allowed */ 748 switch (whence) { 749 case 0: /* SEEK_SET */ 750 newpos = offset; 751 break; 752 case 1: /* SEEK_CUR */ 753 newpos = file->f_pos + offset; 754 break; 755 default: 756 return -EINVAL; 757 } 758 file->f_pos = newpos; 759 return newpos; 760 } 761 762 static const struct file_operations ur_fops = { 763 .owner = THIS_MODULE, 764 .open = ur_open, 765 .release = ur_release, 766 .read = ur_read, 767 .write = ur_write, 768 .llseek = ur_llseek, 769 }; 770 771 /* 772 * ccw_device infrastructure: 773 * ur_probe creates the struct urdev (with refcount = 1), the device 774 * attributes, sets up the interrupt handler and validates the virtual 775 * unit record device. 776 * ur_remove removes the device attributes and drops the reference to 777 * struct urdev. 778 * 779 * ur_probe, ur_remove, ur_set_online and ur_set_offline are serialized 780 * by the vmur_mutex lock. 781 * 782 * urd->char_device is used as indication that the online function has 783 * been completed successfully. 784 */ 785 static int ur_probe(struct ccw_device *cdev) 786 { 787 struct urdev *urd; 788 int rc; 789 790 TRACE("ur_probe: cdev=%p\n", cdev); 791 792 mutex_lock(&vmur_mutex); 793 urd = urdev_alloc(cdev); 794 if (!urd) { 795 rc = -ENOMEM; 796 goto fail_unlock; 797 } 798 799 rc = ur_create_attributes(&cdev->dev); 800 if (rc) { 801 rc = -ENOMEM; 802 goto fail_urdev_put; 803 } 804 cdev->handler = ur_int_handler; 805 806 /* validate virtual unit record device */ 807 urd->class = get_urd_class(urd); 808 if (urd->class < 0) { 809 rc = urd->class; 810 goto fail_remove_attr; 811 } 812 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 813 rc = -ENOTSUPP; 814 goto fail_remove_attr; 815 } 816 spin_lock_irq(get_ccwdev_lock(cdev)); 817 cdev->dev.driver_data = urd; 818 spin_unlock_irq(get_ccwdev_lock(cdev)); 819 820 mutex_unlock(&vmur_mutex); 821 return 0; 822 823 fail_remove_attr: 824 ur_remove_attributes(&cdev->dev); 825 fail_urdev_put: 826 urdev_put(urd); 827 fail_unlock: 828 mutex_unlock(&vmur_mutex); 829 return rc; 830 } 831 832 static int ur_set_online(struct ccw_device *cdev) 833 { 834 struct urdev *urd; 835 int minor, major, rc; 836 char node_id[16]; 837 838 TRACE("ur_set_online: cdev=%p\n", cdev); 839 840 mutex_lock(&vmur_mutex); 841 urd = urdev_get_from_cdev(cdev); 842 if (!urd) { 843 /* ur_remove already deleted our urd */ 844 rc = -ENODEV; 845 goto fail_unlock; 846 } 847 848 if (urd->char_device) { 849 /* Another ur_set_online was faster */ 850 rc = -EBUSY; 851 goto fail_urdev_put; 852 } 853 854 minor = urd->dev_id.devno; 855 major = MAJOR(ur_first_dev_maj_min); 856 857 urd->char_device = cdev_alloc(); 858 if (!urd->char_device) { 859 rc = -ENOMEM; 860 goto fail_urdev_put; 861 } 862 863 cdev_init(urd->char_device, &ur_fops); 864 urd->char_device->dev = MKDEV(major, minor); 865 urd->char_device->owner = ur_fops.owner; 866 867 rc = cdev_add(urd->char_device, urd->char_device->dev, 1); 868 if (rc) 869 goto fail_free_cdev; 870 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { 871 if (urd->class == DEV_CLASS_UR_I) 872 sprintf(node_id, "vmrdr-%s", cdev->dev.bus_id); 873 if (urd->class == DEV_CLASS_UR_O) 874 sprintf(node_id, "vmpun-%s", cdev->dev.bus_id); 875 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 876 sprintf(node_id, "vmprt-%s", cdev->dev.bus_id); 877 } else { 878 rc = -ENOTSUPP; 879 goto fail_free_cdev; 880 } 881 882 urd->device = device_create(vmur_class, NULL, urd->char_device->dev, 883 "%s", node_id); 884 if (IS_ERR(urd->device)) { 885 rc = PTR_ERR(urd->device); 886 TRACE("ur_set_online: device_create rc=%d\n", rc); 887 goto fail_free_cdev; 888 } 889 urdev_put(urd); 890 mutex_unlock(&vmur_mutex); 891 return 0; 892 893 fail_free_cdev: 894 cdev_del(urd->char_device); 895 urd->char_device = NULL; 896 fail_urdev_put: 897 urdev_put(urd); 898 fail_unlock: 899 mutex_unlock(&vmur_mutex); 900 return rc; 901 } 902 903 static int ur_set_offline_force(struct ccw_device *cdev, int force) 904 { 905 struct urdev *urd; 906 int rc; 907 908 TRACE("ur_set_offline: cdev=%p\n", cdev); 909 urd = urdev_get_from_cdev(cdev); 910 if (!urd) 911 /* ur_remove already deleted our urd */ 912 return -ENODEV; 913 if (!urd->char_device) { 914 /* Another ur_set_offline was faster */ 915 rc = -EBUSY; 916 goto fail_urdev_put; 917 } 918 if (!force && (atomic_read(&urd->ref_count) > 2)) { 919 /* There is still a user of urd (e.g. ur_open) */ 920 TRACE("ur_set_offline: BUSY\n"); 921 rc = -EBUSY; 922 goto fail_urdev_put; 923 } 924 device_destroy(vmur_class, urd->char_device->dev); 925 cdev_del(urd->char_device); 926 urd->char_device = NULL; 927 rc = 0; 928 929 fail_urdev_put: 930 urdev_put(urd); 931 return rc; 932 } 933 934 static int ur_set_offline(struct ccw_device *cdev) 935 { 936 int rc; 937 938 mutex_lock(&vmur_mutex); 939 rc = ur_set_offline_force(cdev, 0); 940 mutex_unlock(&vmur_mutex); 941 return rc; 942 } 943 944 static void ur_remove(struct ccw_device *cdev) 945 { 946 unsigned long flags; 947 948 TRACE("ur_remove\n"); 949 950 mutex_lock(&vmur_mutex); 951 952 if (cdev->online) 953 ur_set_offline_force(cdev, 1); 954 ur_remove_attributes(&cdev->dev); 955 956 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 957 urdev_put(cdev->dev.driver_data); 958 cdev->dev.driver_data = NULL; 959 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 960 961 mutex_unlock(&vmur_mutex); 962 } 963 964 /* 965 * Module initialisation and cleanup 966 */ 967 static int __init ur_init(void) 968 { 969 int rc; 970 dev_t dev; 971 972 if (!MACHINE_IS_VM) { 973 PRINT_ERR("%s is only available under z/VM.\n", ur_banner); 974 return -ENODEV; 975 } 976 977 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long)); 978 if (!vmur_dbf) 979 return -ENOMEM; 980 rc = debug_register_view(vmur_dbf, &debug_sprintf_view); 981 if (rc) 982 goto fail_free_dbf; 983 984 debug_set_level(vmur_dbf, 6); 985 986 rc = ccw_driver_register(&ur_driver); 987 if (rc) 988 goto fail_free_dbf; 989 990 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 991 if (rc) { 992 PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc); 993 goto fail_unregister_driver; 994 } 995 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 996 997 vmur_class = class_create(THIS_MODULE, "vmur"); 998 if (IS_ERR(vmur_class)) { 999 rc = PTR_ERR(vmur_class); 1000 goto fail_unregister_region; 1001 } 1002 PRINT_INFO("%s loaded.\n", ur_banner); 1003 return 0; 1004 1005 fail_unregister_region: 1006 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1007 fail_unregister_driver: 1008 ccw_driver_unregister(&ur_driver); 1009 fail_free_dbf: 1010 debug_unregister(vmur_dbf); 1011 return rc; 1012 } 1013 1014 static void __exit ur_exit(void) 1015 { 1016 class_destroy(vmur_class); 1017 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 1018 ccw_driver_unregister(&ur_driver); 1019 debug_unregister(vmur_dbf); 1020 PRINT_INFO("%s unloaded.\n", ur_banner); 1021 } 1022 1023 module_init(ur_init); 1024 module_exit(ur_exit); 1025