1 /* 2 * Linux driver for System z and s390 unit record devices 3 * (z/VM virtual punch, reader, printer) 4 * 5 * Copyright IBM Corp. 2001, 2007 6 * Authors: Malcolm Beattie <beattiem@uk.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com> 8 * Frank Munzert <munzert@de.ibm.com> 9 */ 10 11 #include <linux/cdev.h> 12 13 #include <asm/uaccess.h> 14 #include <asm/cio.h> 15 #include <asm/ccwdev.h> 16 #include <asm/debug.h> 17 18 #include "vmur.h" 19 20 /* 21 * Driver overview 22 * 23 * Unit record device support is implemented as a character device driver. 24 * We can fit at least 16 bits into a device minor number and use the 25 * simple method of mapping a character device number with minor abcd 26 * to the unit record device with devno abcd. 27 * I/O to virtual unit record devices is handled as follows: 28 * Reads: Diagnose code 0x14 (input spool file manipulation) 29 * is used to read spool data page-wise. 30 * Writes: The CCW used is WRITE_CCW_CMD (0x01). The device's record length 31 * is available by reading sysfs attr reclen. Each write() to the device 32 * must specify an integral multiple (maximal 511) of reclen. 33 */ 34 35 static char ur_banner[] = "z/VM virtual unit record device driver"; 36 37 MODULE_AUTHOR("IBM Corporation"); 38 MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); 39 MODULE_LICENSE("GPL"); 40 41 #define PRINTK_HEADER "vmur: " 42 43 static dev_t ur_first_dev_maj_min; 44 static struct class *vmur_class; 45 static struct debug_info *vmur_dbf; 46 47 /* We put the device's record length (for writes) in the driver_info field */ 48 static struct ccw_device_id ur_ids[] = { 49 { CCWDEV_CU_DI(READER_PUNCH_DEVTYPE, 80) }, 50 { CCWDEV_CU_DI(PRINTER_DEVTYPE, 132) }, 51 { /* end of list */ } 52 }; 53 54 MODULE_DEVICE_TABLE(ccw, ur_ids); 55 56 static int ur_probe(struct ccw_device *cdev); 57 static void ur_remove(struct ccw_device *cdev); 58 static int ur_set_online(struct ccw_device *cdev); 59 static int ur_set_offline(struct ccw_device *cdev); 60 61 static struct ccw_driver ur_driver = { 62 .name = "vmur", 63 .owner = THIS_MODULE, 64 .ids = ur_ids, 65 .probe = ur_probe, 66 .remove = ur_remove, 67 .set_online = ur_set_online, 68 .set_offline = ur_set_offline, 69 }; 70 71 /* 72 * Allocation, freeing, getting and putting of urdev structures 73 */ 74 static struct urdev *urdev_alloc(struct ccw_device *cdev) 75 { 76 struct urdev *urd; 77 78 urd = kzalloc(sizeof(struct urdev), GFP_KERNEL); 79 if (!urd) 80 return NULL; 81 urd->cdev = cdev; 82 urd->reclen = cdev->id.driver_info; 83 ccw_device_get_id(cdev, &urd->dev_id); 84 mutex_init(&urd->io_mutex); 85 mutex_init(&urd->open_mutex); 86 return urd; 87 } 88 89 static void urdev_free(struct urdev *urd) 90 { 91 kfree(urd); 92 } 93 94 /* 95 * This is how the character device driver gets a reference to a 96 * ur device. When this call returns successfully, a reference has 97 * been taken (by get_device) on the underlying kobject. The recipient 98 * of this urdev pointer must eventually drop it with urdev_put(urd) 99 * which does the corresponding put_device(). 100 */ 101 static struct urdev *urdev_get_from_devno(u16 devno) 102 { 103 char bus_id[16]; 104 struct ccw_device *cdev; 105 106 sprintf(bus_id, "0.0.%04x", devno); 107 cdev = get_ccwdev_by_busid(&ur_driver, bus_id); 108 if (!cdev) 109 return NULL; 110 111 return cdev->dev.driver_data; 112 } 113 114 static void urdev_put(struct urdev *urd) 115 { 116 put_device(&urd->cdev->dev); 117 } 118 119 /* 120 * Low-level functions to do I/O to a ur device. 121 * alloc_chan_prog 122 * do_ur_io 123 * ur_int_handler 124 * 125 * alloc_chan_prog allocates and builds the channel program 126 * 127 * do_ur_io issues the channel program to the device and blocks waiting 128 * on a completion event it publishes at urd->io_done. The function 129 * serialises itself on the device's mutex so that only one I/O 130 * is issued at a time (and that I/O is synchronous). 131 * 132 * ur_int_handler catches the "I/O done" interrupt, writes the 133 * subchannel status word into the scsw member of the urdev structure 134 * and complete()s the io_done to wake the waiting do_ur_io. 135 * 136 * The caller of do_ur_io is responsible for kfree()ing the channel program 137 * address pointer that alloc_chan_prog returned. 138 */ 139 140 141 /* 142 * alloc_chan_prog 143 * The channel program we use is write commands chained together 144 * with a final NOP CCW command-chained on (which ensures that CE and DE 145 * are presented together in a single interrupt instead of as separate 146 * interrupts unless an incorrect length indication kicks in first). The 147 * data length in each CCW is reclen. The caller must ensure that count 148 * is an integral multiple of reclen. 149 * The channel program pointer returned by this function must be freed 150 * with kfree. The caller is responsible for checking that 151 * count/reclen is not ridiculously large. 152 */ 153 static struct ccw1 *alloc_chan_prog(char *buf, size_t count, size_t reclen) 154 { 155 size_t num_ccws; 156 struct ccw1 *cpa; 157 int i; 158 159 TRACE("alloc_chan_prog(%p, %zu, %zu)\n", buf, count, reclen); 160 161 /* 162 * We chain a NOP onto the writes to force CE+DE together. 163 * That means we allocate room for CCWs to cover count/reclen 164 * records plus a NOP. 165 */ 166 num_ccws = count / reclen + 1; 167 cpa = kmalloc(num_ccws * sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 168 if (!cpa) 169 return NULL; 170 171 for (i = 0; count; i++) { 172 cpa[i].cmd_code = WRITE_CCW_CMD; 173 cpa[i].flags = CCW_FLAG_CC | CCW_FLAG_SLI; 174 cpa[i].count = reclen; 175 cpa[i].cda = __pa(buf); 176 buf += reclen; 177 count -= reclen; 178 } 179 /* The following NOP CCW forces CE+DE to be presented together */ 180 cpa[i].cmd_code = CCW_CMD_NOOP; 181 cpa[i].flags = 0; 182 cpa[i].count = 0; 183 cpa[i].cda = 0; 184 185 return cpa; 186 } 187 188 static int do_ur_io(struct urdev *urd, struct ccw1 *cpa) 189 { 190 int rc; 191 struct ccw_device *cdev = urd->cdev; 192 DECLARE_COMPLETION(event); 193 194 TRACE("do_ur_io: cpa=%p\n", cpa); 195 196 rc = mutex_lock_interruptible(&urd->io_mutex); 197 if (rc) 198 return rc; 199 200 urd->io_done = &event; 201 202 spin_lock_irq(get_ccwdev_lock(cdev)); 203 rc = ccw_device_start(cdev, cpa, 1, 0, 0); 204 spin_unlock_irq(get_ccwdev_lock(cdev)); 205 206 TRACE("do_ur_io: ccw_device_start returned %d\n", rc); 207 if (rc) 208 goto out; 209 210 wait_for_completion(&event); 211 TRACE("do_ur_io: I/O complete\n"); 212 rc = 0; 213 214 out: 215 mutex_unlock(&urd->io_mutex); 216 return rc; 217 } 218 219 /* 220 * ur interrupt handler, called from the ccw_device layer 221 */ 222 static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm, 223 struct irb *irb) 224 { 225 struct urdev *urd; 226 227 TRACE("ur_int_handler: intparm=0x%lx cstat=%02x dstat=%02x res=%u\n", 228 intparm, irb->scsw.cstat, irb->scsw.dstat, irb->scsw.count); 229 230 if (!intparm) { 231 TRACE("ur_int_handler: unsolicited interrupt\n"); 232 return; 233 } 234 urd = cdev->dev.driver_data; 235 /* On special conditions irb is an error pointer */ 236 if (IS_ERR(irb)) 237 urd->io_request_rc = PTR_ERR(irb); 238 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 239 urd->io_request_rc = 0; 240 else 241 urd->io_request_rc = -EIO; 242 243 complete(urd->io_done); 244 } 245 246 /* 247 * reclen sysfs attribute - The record length to be used for write CCWs 248 */ 249 static ssize_t ur_attr_reclen_show(struct device *dev, 250 struct device_attribute *attr, char *buf) 251 { 252 struct urdev *urd = dev->driver_data; 253 254 return sprintf(buf, "%zu\n", urd->reclen); 255 } 256 257 static DEVICE_ATTR(reclen, 0444, ur_attr_reclen_show, NULL); 258 259 static int ur_create_attributes(struct device *dev) 260 { 261 return device_create_file(dev, &dev_attr_reclen); 262 } 263 264 static void ur_remove_attributes(struct device *dev) 265 { 266 device_remove_file(dev, &dev_attr_reclen); 267 } 268 269 /* 270 * diagnose code 0x210 - retrieve device information 271 * cc=0 normal completion, we have a real device 272 * cc=1 CP paging error 273 * cc=2 The virtual device exists, but is not associated with a real device 274 * cc=3 Invalid device address, or the virtual device does not exist 275 */ 276 static int get_urd_class(struct urdev *urd) 277 { 278 static struct diag210 ur_diag210; 279 int cc; 280 281 ur_diag210.vrdcdvno = urd->dev_id.devno; 282 ur_diag210.vrdclen = sizeof(struct diag210); 283 284 cc = diag210(&ur_diag210); 285 switch (cc) { 286 case 0: 287 return -ENOTSUPP; 288 case 2: 289 return ur_diag210.vrdcvcla; /* virtual device class */ 290 case 3: 291 return -ENODEV; 292 default: 293 return -EIO; 294 } 295 } 296 297 /* 298 * Allocation and freeing of urfile structures 299 */ 300 static struct urfile *urfile_alloc(struct urdev *urd) 301 { 302 struct urfile *urf; 303 304 urf = kzalloc(sizeof(struct urfile), GFP_KERNEL); 305 if (!urf) 306 return NULL; 307 urf->urd = urd; 308 309 TRACE("urfile_alloc: urd=%p urf=%p rl=%zu\n", urd, urf, 310 urf->dev_reclen); 311 312 return urf; 313 } 314 315 static void urfile_free(struct urfile *urf) 316 { 317 TRACE("urfile_free: urf=%p urd=%p\n", urf, urf->urd); 318 kfree(urf); 319 } 320 321 /* 322 * The fops implementation of the character device driver 323 */ 324 static ssize_t do_write(struct urdev *urd, const char __user *udata, 325 size_t count, size_t reclen, loff_t *ppos) 326 { 327 struct ccw1 *cpa; 328 char *buf; 329 int rc; 330 331 /* Data buffer must be under 2GB line for fmt1 CCWs: hence GFP_DMA */ 332 buf = kmalloc(count, GFP_KERNEL | GFP_DMA); 333 if (!buf) 334 return -ENOMEM; 335 336 if (copy_from_user(buf, udata, count)) { 337 rc = -EFAULT; 338 goto fail_kfree_buf; 339 } 340 341 cpa = alloc_chan_prog(buf, count, reclen); 342 if (!cpa) { 343 rc = -ENOMEM; 344 goto fail_kfree_buf; 345 } 346 347 rc = do_ur_io(urd, cpa); 348 if (rc) 349 goto fail_kfree_cpa; 350 351 if (urd->io_request_rc) { 352 rc = urd->io_request_rc; 353 goto fail_kfree_cpa; 354 } 355 *ppos += count; 356 rc = count; 357 fail_kfree_cpa: 358 kfree(cpa); 359 fail_kfree_buf: 360 kfree(buf); 361 return rc; 362 } 363 364 static ssize_t ur_write(struct file *file, const char __user *udata, 365 size_t count, loff_t *ppos) 366 { 367 struct urfile *urf = file->private_data; 368 369 TRACE("ur_write: count=%zu\n", count); 370 371 if (count == 0) 372 return 0; 373 374 if (count % urf->dev_reclen) 375 return -EINVAL; /* count must be a multiple of reclen */ 376 377 if (count > urf->dev_reclen * MAX_RECS_PER_IO) 378 count = urf->dev_reclen * MAX_RECS_PER_IO; 379 380 return do_write(urf->urd, udata, count, urf->dev_reclen, ppos); 381 } 382 383 static int do_diag_14(unsigned long rx, unsigned long ry1, 384 unsigned long subcode) 385 { 386 register unsigned long _ry1 asm("2") = ry1; 387 register unsigned long _ry2 asm("3") = subcode; 388 int rc = 0; 389 390 asm volatile( 391 #ifdef CONFIG_64BIT 392 " sam31\n" 393 " diag %2,2,0x14\n" 394 " sam64\n" 395 #else 396 " diag %2,2,0x14\n" 397 #endif 398 " ipm %0\n" 399 " srl %0,28\n" 400 : "=d" (rc), "+d" (_ry2) 401 : "d" (rx), "d" (_ry1) 402 : "cc"); 403 404 TRACE("diag 14: subcode=0x%lx, cc=%i\n", subcode, rc); 405 return rc; 406 } 407 408 /* 409 * diagnose code 0x14 subcode 0x0028 - position spool file to designated 410 * record 411 * cc=0 normal completion 412 * cc=2 no file active on the virtual reader or device not ready 413 * cc=3 record specified is beyond EOF 414 */ 415 static int diag_position_to_record(int devno, int record) 416 { 417 int cc; 418 419 cc = do_diag_14(record, devno, 0x28); 420 switch (cc) { 421 case 0: 422 return 0; 423 case 2: 424 return -ENOMEDIUM; 425 case 3: 426 return -ENODATA; /* position beyond end of file */ 427 default: 428 return -EIO; 429 } 430 } 431 432 /* 433 * diagnose code 0x14 subcode 0x0000 - read next spool file buffer 434 * cc=0 normal completion 435 * cc=1 EOF reached 436 * cc=2 no file active on the virtual reader, and no file eligible 437 * cc=3 file already active on the virtual reader or specified virtual 438 * reader does not exist or is not a reader 439 */ 440 static int diag_read_file(int devno, char *buf) 441 { 442 int cc; 443 444 cc = do_diag_14((unsigned long) buf, devno, 0x00); 445 switch (cc) { 446 case 0: 447 return 0; 448 case 1: 449 return -ENODATA; 450 case 2: 451 return -ENOMEDIUM; 452 default: 453 return -EIO; 454 } 455 } 456 457 static ssize_t diag14_read(struct file *file, char __user *ubuf, size_t count, 458 loff_t *offs) 459 { 460 size_t len, copied, res; 461 char *buf; 462 int rc; 463 u16 reclen; 464 struct urdev *urd; 465 466 urd = ((struct urfile *) file->private_data)->urd; 467 reclen = ((struct urfile *) file->private_data)->file_reclen; 468 469 rc = diag_position_to_record(urd->dev_id.devno, *offs / PAGE_SIZE + 1); 470 if (rc == -ENODATA) 471 return 0; 472 if (rc) 473 return rc; 474 475 len = min((size_t) PAGE_SIZE, count); 476 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 477 if (!buf) 478 return -ENOMEM; 479 480 copied = 0; 481 res = (size_t) (*offs % PAGE_SIZE); 482 do { 483 rc = diag_read_file(urd->dev_id.devno, buf); 484 if (rc == -ENODATA) { 485 break; 486 } 487 if (rc) 488 goto fail; 489 if (reclen && (copied == 0) && (*offs < PAGE_SIZE)) 490 *((u16 *) &buf[FILE_RECLEN_OFFSET]) = reclen; 491 len = min(count - copied, PAGE_SIZE - res); 492 if (copy_to_user(ubuf + copied, buf + res, len)) { 493 rc = -EFAULT; 494 goto fail; 495 } 496 res = 0; 497 copied += len; 498 } while (copied != count); 499 500 *offs += copied; 501 rc = copied; 502 fail: 503 kfree(buf); 504 return rc; 505 } 506 507 static ssize_t ur_read(struct file *file, char __user *ubuf, size_t count, 508 loff_t *offs) 509 { 510 struct urdev *urd; 511 int rc; 512 513 TRACE("ur_read: count=%zu ppos=%li\n", count, (unsigned long) *offs); 514 515 if (count == 0) 516 return 0; 517 518 urd = ((struct urfile *) file->private_data)->urd; 519 rc = mutex_lock_interruptible(&urd->io_mutex); 520 if (rc) 521 return rc; 522 rc = diag14_read(file, ubuf, count, offs); 523 mutex_unlock(&urd->io_mutex); 524 return rc; 525 } 526 527 /* 528 * diagnose code 0x14 subcode 0x0fff - retrieve next file descriptor 529 * cc=0 normal completion 530 * cc=1 no files on reader queue or no subsequent file 531 * cc=2 spid specified is invalid 532 */ 533 static int diag_read_next_file_info(struct file_control_block *buf, int spid) 534 { 535 int cc; 536 537 cc = do_diag_14((unsigned long) buf, spid, 0xfff); 538 switch (cc) { 539 case 0: 540 return 0; 541 default: 542 return -ENODATA; 543 } 544 } 545 546 static int verify_device(struct urdev *urd) 547 { 548 struct file_control_block fcb; 549 char *buf; 550 int rc; 551 552 switch (urd->class) { 553 case DEV_CLASS_UR_O: 554 return 0; /* no check needed here */ 555 case DEV_CLASS_UR_I: 556 /* check for empty reader device (beginning of chain) */ 557 rc = diag_read_next_file_info(&fcb, 0); 558 if (rc) 559 return rc; 560 561 /* open file on virtual reader */ 562 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 563 if (!buf) 564 return -ENOMEM; 565 rc = diag_read_file(urd->dev_id.devno, buf); 566 kfree(buf); 567 568 if ((rc != 0) && (rc != -ENODATA)) /* EOF does not hurt */ 569 return rc; 570 return 0; 571 default: 572 return -ENOTSUPP; 573 } 574 } 575 576 static int get_file_reclen(struct urdev *urd) 577 { 578 struct file_control_block fcb; 579 int rc; 580 581 switch (urd->class) { 582 case DEV_CLASS_UR_O: 583 return 0; 584 case DEV_CLASS_UR_I: 585 rc = diag_read_next_file_info(&fcb, 0); 586 if (rc) 587 return rc; 588 break; 589 default: 590 return -ENOTSUPP; 591 } 592 if (fcb.file_stat & FLG_CP_DUMP) 593 return 0; 594 595 return fcb.rec_len; 596 } 597 598 static int ur_open(struct inode *inode, struct file *file) 599 { 600 u16 devno; 601 struct urdev *urd; 602 struct urfile *urf; 603 unsigned short accmode; 604 int rc; 605 606 accmode = file->f_flags & O_ACCMODE; 607 608 if (accmode == O_RDWR) 609 return -EACCES; 610 611 /* 612 * We treat the minor number as the devno of the ur device 613 * to find in the driver tree. 614 */ 615 devno = MINOR(file->f_dentry->d_inode->i_rdev); 616 617 urd = urdev_get_from_devno(devno); 618 if (!urd) 619 return -ENXIO; 620 621 if (file->f_flags & O_NONBLOCK) { 622 if (!mutex_trylock(&urd->open_mutex)) { 623 rc = -EBUSY; 624 goto fail_put; 625 } 626 } else { 627 if (mutex_lock_interruptible(&urd->open_mutex)) { 628 rc = -ERESTARTSYS; 629 goto fail_put; 630 } 631 } 632 633 TRACE("ur_open\n"); 634 635 if (((accmode == O_RDONLY) && (urd->class != DEV_CLASS_UR_I)) || 636 ((accmode == O_WRONLY) && (urd->class != DEV_CLASS_UR_O))) { 637 TRACE("ur_open: unsupported dev class (%d)\n", urd->class); 638 rc = -EACCES; 639 goto fail_unlock; 640 } 641 642 rc = verify_device(urd); 643 if (rc) 644 goto fail_unlock; 645 646 urf = urfile_alloc(urd); 647 if (!urf) { 648 rc = -ENOMEM; 649 goto fail_unlock; 650 } 651 652 urf->dev_reclen = urd->reclen; 653 rc = get_file_reclen(urd); 654 if (rc < 0) 655 goto fail_urfile_free; 656 urf->file_reclen = rc; 657 file->private_data = urf; 658 return 0; 659 660 fail_urfile_free: 661 urfile_free(urf); 662 fail_unlock: 663 mutex_unlock(&urd->open_mutex); 664 fail_put: 665 urdev_put(urd); 666 return rc; 667 } 668 669 static int ur_release(struct inode *inode, struct file *file) 670 { 671 struct urfile *urf = file->private_data; 672 673 TRACE("ur_release\n"); 674 mutex_unlock(&urf->urd->open_mutex); 675 urdev_put(urf->urd); 676 urfile_free(urf); 677 return 0; 678 } 679 680 static loff_t ur_llseek(struct file *file, loff_t offset, int whence) 681 { 682 loff_t newpos; 683 684 if ((file->f_flags & O_ACCMODE) != O_RDONLY) 685 return -ESPIPE; /* seek allowed only for reader */ 686 if (offset % PAGE_SIZE) 687 return -ESPIPE; /* only multiples of 4K allowed */ 688 switch (whence) { 689 case 0: /* SEEK_SET */ 690 newpos = offset; 691 break; 692 case 1: /* SEEK_CUR */ 693 newpos = file->f_pos + offset; 694 break; 695 default: 696 return -EINVAL; 697 } 698 file->f_pos = newpos; 699 return newpos; 700 } 701 702 static struct file_operations ur_fops = { 703 .owner = THIS_MODULE, 704 .open = ur_open, 705 .release = ur_release, 706 .read = ur_read, 707 .write = ur_write, 708 .llseek = ur_llseek, 709 }; 710 711 /* 712 * ccw_device infrastructure: 713 * ur_probe gets its own ref to the device (i.e. get_device), 714 * creates the struct urdev, the device attributes, sets up 715 * the interrupt handler and validates the virtual unit record device. 716 * ur_remove removes the device attributes, frees the struct urdev 717 * and drops (put_device) the ref to the device we got in ur_probe. 718 */ 719 static int ur_probe(struct ccw_device *cdev) 720 { 721 struct urdev *urd; 722 int rc; 723 724 TRACE("ur_probe: cdev=%p state=%d\n", cdev, *(int *) cdev->private); 725 726 if (!get_device(&cdev->dev)) 727 return -ENODEV; 728 729 urd = urdev_alloc(cdev); 730 if (!urd) { 731 rc = -ENOMEM; 732 goto fail; 733 } 734 rc = ur_create_attributes(&cdev->dev); 735 if (rc) { 736 rc = -ENOMEM; 737 goto fail; 738 } 739 cdev->dev.driver_data = urd; 740 cdev->handler = ur_int_handler; 741 742 /* validate virtual unit record device */ 743 urd->class = get_urd_class(urd); 744 if (urd->class < 0) { 745 rc = urd->class; 746 goto fail; 747 } 748 if ((urd->class != DEV_CLASS_UR_I) && (urd->class != DEV_CLASS_UR_O)) { 749 rc = -ENOTSUPP; 750 goto fail; 751 } 752 753 return 0; 754 755 fail: 756 urdev_free(urd); 757 put_device(&cdev->dev); 758 return rc; 759 } 760 761 static void ur_remove(struct ccw_device *cdev) 762 { 763 struct urdev *urd = cdev->dev.driver_data; 764 765 TRACE("ur_remove\n"); 766 if (cdev->online) 767 ur_set_offline(cdev); 768 ur_remove_attributes(&cdev->dev); 769 urdev_free(urd); 770 put_device(&cdev->dev); 771 } 772 773 static int ur_set_online(struct ccw_device *cdev) 774 { 775 struct urdev *urd; 776 int minor, major, rc; 777 char node_id[16]; 778 779 TRACE("ur_set_online: cdev=%p state=%d\n", cdev, 780 *(int *) cdev->private); 781 782 if (!try_module_get(ur_driver.owner)) 783 return -EINVAL; 784 785 urd = (struct urdev *) cdev->dev.driver_data; 786 minor = urd->dev_id.devno; 787 major = MAJOR(ur_first_dev_maj_min); 788 789 urd->char_device = cdev_alloc(); 790 if (!urd->char_device) { 791 rc = -ENOMEM; 792 goto fail_module_put; 793 } 794 795 cdev_init(urd->char_device, &ur_fops); 796 urd->char_device->dev = MKDEV(major, minor); 797 urd->char_device->owner = ur_fops.owner; 798 799 rc = cdev_add(urd->char_device, urd->char_device->dev, 1); 800 if (rc) 801 goto fail_free_cdev; 802 if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { 803 if (urd->class == DEV_CLASS_UR_I) 804 sprintf(node_id, "vmrdr-%s", cdev->dev.bus_id); 805 if (urd->class == DEV_CLASS_UR_O) 806 sprintf(node_id, "vmpun-%s", cdev->dev.bus_id); 807 } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { 808 sprintf(node_id, "vmprt-%s", cdev->dev.bus_id); 809 } else { 810 rc = -ENOTSUPP; 811 goto fail_free_cdev; 812 } 813 814 urd->device = device_create(vmur_class, NULL, urd->char_device->dev, 815 "%s", node_id); 816 if (IS_ERR(urd->device)) { 817 rc = PTR_ERR(urd->device); 818 TRACE("ur_set_online: device_create rc=%d\n", rc); 819 goto fail_free_cdev; 820 } 821 822 return 0; 823 824 fail_free_cdev: 825 cdev_del(urd->char_device); 826 fail_module_put: 827 module_put(ur_driver.owner); 828 829 return rc; 830 } 831 832 static int ur_set_offline(struct ccw_device *cdev) 833 { 834 struct urdev *urd; 835 836 TRACE("ur_set_offline: cdev=%p cdev->private=%p state=%d\n", 837 cdev, cdev->private, *(int *) cdev->private); 838 urd = (struct urdev *) cdev->dev.driver_data; 839 device_destroy(vmur_class, urd->char_device->dev); 840 cdev_del(urd->char_device); 841 module_put(ur_driver.owner); 842 843 return 0; 844 } 845 846 /* 847 * Module initialisation and cleanup 848 */ 849 static int __init ur_init(void) 850 { 851 int rc; 852 dev_t dev; 853 854 if (!MACHINE_IS_VM) { 855 PRINT_ERR("%s is only available under z/VM.\n", ur_banner); 856 return -ENODEV; 857 } 858 859 vmur_dbf = debug_register("vmur", 4, 1, 4 * sizeof(long)); 860 if (!vmur_dbf) 861 return -ENOMEM; 862 rc = debug_register_view(vmur_dbf, &debug_sprintf_view); 863 if (rc) 864 goto fail_free_dbf; 865 866 debug_set_level(vmur_dbf, 6); 867 868 rc = ccw_driver_register(&ur_driver); 869 if (rc) 870 goto fail_free_dbf; 871 872 rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); 873 if (rc) { 874 PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc); 875 goto fail_unregister_driver; 876 } 877 ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); 878 879 vmur_class = class_create(THIS_MODULE, "vmur"); 880 if (IS_ERR(vmur_class)) { 881 rc = PTR_ERR(vmur_class); 882 goto fail_unregister_region; 883 } 884 PRINT_INFO("%s loaded.\n", ur_banner); 885 return 0; 886 887 fail_unregister_region: 888 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 889 fail_unregister_driver: 890 ccw_driver_unregister(&ur_driver); 891 fail_free_dbf: 892 debug_unregister(vmur_dbf); 893 return rc; 894 } 895 896 static void __exit ur_exit(void) 897 { 898 class_destroy(vmur_class); 899 unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); 900 ccw_driver_unregister(&ur_driver); 901 debug_unregister(vmur_dbf); 902 PRINT_INFO("%s unloaded.\n", ur_banner); 903 } 904 905 module_init(ur_init); 906 module_exit(ur_exit); 907