1 /* 2 * basic function of the tape device driver 3 * 4 * S390 and zSeries version 5 * Copyright IBM Corp. 2001, 2009 6 * Author(s): Carsten Otte <cotte@de.ibm.com> 7 * Michael Holzheu <holzheu@de.ibm.com> 8 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 9 * Martin Schwidefsky <schwidefsky@de.ibm.com> 10 * Stefan Bader <shbader@de.ibm.com> 11 */ 12 13 #define KMSG_COMPONENT "tape" 14 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/init.h> // for kernel parameters 18 #include <linux/kmod.h> // for requesting modules 19 #include <linux/spinlock.h> // for locks 20 #include <linux/vmalloc.h> 21 #include <linux/list.h> 22 #include <linux/slab.h> 23 24 #include <asm/types.h> // for variable types 25 26 #define TAPE_DBF_AREA tape_core_dbf 27 28 #include "tape.h" 29 #include "tape_std.h" 30 31 #define LONG_BUSY_TIMEOUT 180 /* seconds */ 32 33 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 34 static void tape_delayed_next_request(struct work_struct *); 35 static void tape_long_busy_timeout(struct timer_list *t); 36 37 /* 38 * One list to contain all tape devices of all disciplines, so 39 * we can assign the devices to minor numbers of the same major 40 * The list is protected by the rwlock 41 */ 42 static LIST_HEAD(tape_device_list); 43 static DEFINE_RWLOCK(tape_device_lock); 44 45 /* 46 * Pointer to debug area. 47 */ 48 debug_info_t *TAPE_DBF_AREA = NULL; 49 EXPORT_SYMBOL(TAPE_DBF_AREA); 50 51 /* 52 * Printable strings for tape enumerations. 53 */ 54 const char *tape_state_verbose[TS_SIZE] = 55 { 56 [TS_UNUSED] = "UNUSED", 57 [TS_IN_USE] = "IN_USE", 58 [TS_BLKUSE] = "BLKUSE", 59 [TS_INIT] = "INIT ", 60 [TS_NOT_OPER] = "NOT_OP" 61 }; 62 63 const char *tape_op_verbose[TO_SIZE] = 64 { 65 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB", 66 [TO_BSF] = "BSF", [TO_DSE] = "DSE", 67 [TO_FSB] = "FSB", [TO_FSF] = "FSF", 68 [TO_LBL] = "LBL", [TO_NOP] = "NOP", 69 [TO_RBA] = "RBA", [TO_RBI] = "RBI", 70 [TO_RFO] = "RFO", [TO_REW] = "REW", 71 [TO_RUN] = "RUN", [TO_WRI] = "WRI", 72 [TO_WTM] = "WTM", [TO_MSEN] = "MSN", 73 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 74 [TO_READ_ATTMSG] = "RAT", 75 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 76 [TO_UNASSIGN] = "UAS", [TO_CRYPT_ON] = "CON", 77 [TO_CRYPT_OFF] = "COF", [TO_KEKL_SET] = "KLS", 78 [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC", 79 }; 80 81 static int devid_to_int(struct ccw_dev_id *dev_id) 82 { 83 return dev_id->devno + (dev_id->ssid << 16); 84 } 85 86 /* 87 * Some channel attached tape specific attributes. 88 * 89 * FIXME: In the future the first_minor and blocksize attribute should be 90 * replaced by a link to the cdev tree. 91 */ 92 static ssize_t 93 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) 94 { 95 struct tape_device *tdev; 96 97 tdev = dev_get_drvdata(dev); 98 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); 99 } 100 101 static 102 DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL); 103 104 static ssize_t 105 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) 106 { 107 struct tape_device *tdev; 108 109 tdev = dev_get_drvdata(dev); 110 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); 111 } 112 113 static 114 DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL); 115 116 static ssize_t 117 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) 118 { 119 struct tape_device *tdev; 120 121 tdev = dev_get_drvdata(dev); 122 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? 123 "OFFLINE" : tape_state_verbose[tdev->tape_state]); 124 } 125 126 static 127 DEVICE_ATTR(state, 0444, tape_state_show, NULL); 128 129 static ssize_t 130 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) 131 { 132 struct tape_device *tdev; 133 ssize_t rc; 134 135 tdev = dev_get_drvdata(dev); 136 if (tdev->first_minor < 0) 137 return scnprintf(buf, PAGE_SIZE, "N/A\n"); 138 139 spin_lock_irq(get_ccwdev_lock(tdev->cdev)); 140 if (list_empty(&tdev->req_queue)) 141 rc = scnprintf(buf, PAGE_SIZE, "---\n"); 142 else { 143 struct tape_request *req; 144 145 req = list_entry(tdev->req_queue.next, struct tape_request, 146 list); 147 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); 148 } 149 spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); 150 return rc; 151 } 152 153 static 154 DEVICE_ATTR(operation, 0444, tape_operation_show, NULL); 155 156 static ssize_t 157 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) 158 { 159 struct tape_device *tdev; 160 161 tdev = dev_get_drvdata(dev); 162 163 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); 164 } 165 166 static 167 DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL); 168 169 static struct attribute *tape_attrs[] = { 170 &dev_attr_medium_state.attr, 171 &dev_attr_first_minor.attr, 172 &dev_attr_state.attr, 173 &dev_attr_operation.attr, 174 &dev_attr_blocksize.attr, 175 NULL 176 }; 177 178 static const struct attribute_group tape_attr_group = { 179 .attrs = tape_attrs, 180 }; 181 182 /* 183 * Tape state functions 184 */ 185 void 186 tape_state_set(struct tape_device *device, enum tape_state newstate) 187 { 188 const char *str; 189 190 if (device->tape_state == TS_NOT_OPER) { 191 DBF_EVENT(3, "ts_set err: not oper\n"); 192 return; 193 } 194 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); 195 DBF_EVENT(4, "old ts:\t\n"); 196 if (device->tape_state < TS_SIZE && device->tape_state >=0 ) 197 str = tape_state_verbose[device->tape_state]; 198 else 199 str = "UNKNOWN TS"; 200 DBF_EVENT(4, "%s\n", str); 201 DBF_EVENT(4, "new ts:\t\n"); 202 if (newstate < TS_SIZE && newstate >= 0) 203 str = tape_state_verbose[newstate]; 204 else 205 str = "UNKNOWN TS"; 206 DBF_EVENT(4, "%s\n", str); 207 device->tape_state = newstate; 208 wake_up(&device->state_change_wq); 209 } 210 211 struct tape_med_state_work_data { 212 struct tape_device *device; 213 enum tape_medium_state state; 214 struct work_struct work; 215 }; 216 217 static void 218 tape_med_state_work_handler(struct work_struct *work) 219 { 220 static char env_state_loaded[] = "MEDIUM_STATE=LOADED"; 221 static char env_state_unloaded[] = "MEDIUM_STATE=UNLOADED"; 222 struct tape_med_state_work_data *p = 223 container_of(work, struct tape_med_state_work_data, work); 224 struct tape_device *device = p->device; 225 char *envp[] = { NULL, NULL }; 226 227 switch (p->state) { 228 case MS_UNLOADED: 229 pr_info("%s: The tape cartridge has been successfully " 230 "unloaded\n", dev_name(&device->cdev->dev)); 231 envp[0] = env_state_unloaded; 232 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); 233 break; 234 case MS_LOADED: 235 pr_info("%s: A tape cartridge has been mounted\n", 236 dev_name(&device->cdev->dev)); 237 envp[0] = env_state_loaded; 238 kobject_uevent_env(&device->cdev->dev.kobj, KOBJ_CHANGE, envp); 239 break; 240 default: 241 break; 242 } 243 tape_put_device(device); 244 kfree(p); 245 } 246 247 static void 248 tape_med_state_work(struct tape_device *device, enum tape_medium_state state) 249 { 250 struct tape_med_state_work_data *p; 251 252 p = kzalloc(sizeof(*p), GFP_ATOMIC); 253 if (p) { 254 INIT_WORK(&p->work, tape_med_state_work_handler); 255 p->device = tape_get_device(device); 256 p->state = state; 257 schedule_work(&p->work); 258 } 259 } 260 261 void 262 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) 263 { 264 enum tape_medium_state oldstate; 265 266 oldstate = device->medium_state; 267 if (oldstate == newstate) 268 return; 269 device->medium_state = newstate; 270 switch(newstate){ 271 case MS_UNLOADED: 272 device->tape_generic_status |= GMT_DR_OPEN(~0); 273 if (oldstate == MS_LOADED) 274 tape_med_state_work(device, MS_UNLOADED); 275 break; 276 case MS_LOADED: 277 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 278 if (oldstate == MS_UNLOADED) 279 tape_med_state_work(device, MS_LOADED); 280 break; 281 default: 282 break; 283 } 284 wake_up(&device->state_change_wq); 285 } 286 287 /* 288 * Stop running ccw. Has to be called with the device lock held. 289 */ 290 static int 291 __tape_cancel_io(struct tape_device *device, struct tape_request *request) 292 { 293 int retries; 294 int rc; 295 296 /* Check if interrupt has already been processed */ 297 if (request->callback == NULL) 298 return 0; 299 300 rc = 0; 301 for (retries = 0; retries < 5; retries++) { 302 rc = ccw_device_clear(device->cdev, (long) request); 303 304 switch (rc) { 305 case 0: 306 request->status = TAPE_REQUEST_DONE; 307 return 0; 308 case -EBUSY: 309 request->status = TAPE_REQUEST_CANCEL; 310 schedule_delayed_work(&device->tape_dnr, 0); 311 return 0; 312 case -ENODEV: 313 DBF_EXCEPTION(2, "device gone, retry\n"); 314 break; 315 case -EIO: 316 DBF_EXCEPTION(2, "I/O error, retry\n"); 317 break; 318 default: 319 BUG(); 320 } 321 } 322 323 return rc; 324 } 325 326 /* 327 * Add device into the sorted list, giving it the first 328 * available minor number. 329 */ 330 static int 331 tape_assign_minor(struct tape_device *device) 332 { 333 struct tape_device *tmp; 334 int minor; 335 336 minor = 0; 337 write_lock(&tape_device_lock); 338 list_for_each_entry(tmp, &tape_device_list, node) { 339 if (minor < tmp->first_minor) 340 break; 341 minor += TAPE_MINORS_PER_DEV; 342 } 343 if (minor >= 256) { 344 write_unlock(&tape_device_lock); 345 return -ENODEV; 346 } 347 device->first_minor = minor; 348 list_add_tail(&device->node, &tmp->node); 349 write_unlock(&tape_device_lock); 350 return 0; 351 } 352 353 /* remove device from the list */ 354 static void 355 tape_remove_minor(struct tape_device *device) 356 { 357 write_lock(&tape_device_lock); 358 list_del_init(&device->node); 359 device->first_minor = -1; 360 write_unlock(&tape_device_lock); 361 } 362 363 /* 364 * Set a device online. 365 * 366 * This function is called by the common I/O layer to move a device from the 367 * detected but offline into the online state. 368 * If we return an error (RC < 0) the device remains in the offline state. This 369 * can happen if the device is assigned somewhere else, for example. 370 */ 371 int 372 tape_generic_online(struct tape_device *device, 373 struct tape_discipline *discipline) 374 { 375 int rc; 376 377 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); 378 379 if (device->tape_state != TS_INIT) { 380 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); 381 return -EINVAL; 382 } 383 384 timer_setup(&device->lb_timeout, tape_long_busy_timeout, 0); 385 386 /* Let the discipline have a go at the device. */ 387 device->discipline = discipline; 388 if (!try_module_get(discipline->owner)) { 389 return -EINVAL; 390 } 391 392 rc = discipline->setup_device(device); 393 if (rc) 394 goto out; 395 rc = tape_assign_minor(device); 396 if (rc) 397 goto out_discipline; 398 399 rc = tapechar_setup_device(device); 400 if (rc) 401 goto out_minor; 402 403 tape_state_set(device, TS_UNUSED); 404 405 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); 406 407 return 0; 408 409 out_minor: 410 tape_remove_minor(device); 411 out_discipline: 412 device->discipline->cleanup_device(device); 413 device->discipline = NULL; 414 out: 415 module_put(discipline->owner); 416 return rc; 417 } 418 419 static void 420 tape_cleanup_device(struct tape_device *device) 421 { 422 tapechar_cleanup_device(device); 423 device->discipline->cleanup_device(device); 424 module_put(device->discipline->owner); 425 tape_remove_minor(device); 426 tape_med_state_set(device, MS_UNKNOWN); 427 } 428 429 /* 430 * Suspend device. 431 * 432 * Called by the common I/O layer if the drive should be suspended on user 433 * request. We refuse to suspend if the device is loaded or in use for the 434 * following reason: 435 * While the Linux guest is suspended, it might be logged off which causes 436 * devices to be detached. Tape devices are automatically rewound and unloaded 437 * during DETACH processing (unless the tape device was attached with the 438 * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to 439 * resume the original state of the tape device, since we would need to 440 * manually re-load the cartridge which was active at suspend time. 441 */ 442 int tape_generic_pm_suspend(struct ccw_device *cdev) 443 { 444 struct tape_device *device; 445 446 device = dev_get_drvdata(&cdev->dev); 447 if (!device) { 448 return -ENODEV; 449 } 450 451 DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n", 452 device->cdev_id, device); 453 454 if (device->medium_state != MS_UNLOADED) { 455 pr_err("A cartridge is loaded in tape device %s, " 456 "refusing to suspend\n", dev_name(&cdev->dev)); 457 return -EBUSY; 458 } 459 460 spin_lock_irq(get_ccwdev_lock(device->cdev)); 461 switch (device->tape_state) { 462 case TS_INIT: 463 case TS_NOT_OPER: 464 case TS_UNUSED: 465 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 466 break; 467 default: 468 pr_err("Tape device %s is busy, refusing to " 469 "suspend\n", dev_name(&cdev->dev)); 470 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 471 return -EBUSY; 472 } 473 474 DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id); 475 return 0; 476 } 477 478 /* 479 * Set device offline. 480 * 481 * Called by the common I/O layer if the drive should set offline on user 482 * request. We may prevent this by returning an error. 483 * Manual offline is only allowed while the drive is not in use. 484 */ 485 int 486 tape_generic_offline(struct ccw_device *cdev) 487 { 488 struct tape_device *device; 489 490 device = dev_get_drvdata(&cdev->dev); 491 if (!device) { 492 return -ENODEV; 493 } 494 495 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n", 496 device->cdev_id, device); 497 498 spin_lock_irq(get_ccwdev_lock(device->cdev)); 499 switch (device->tape_state) { 500 case TS_INIT: 501 case TS_NOT_OPER: 502 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 503 break; 504 case TS_UNUSED: 505 tape_state_set(device, TS_INIT); 506 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 507 tape_cleanup_device(device); 508 break; 509 default: 510 DBF_EVENT(3, "(%08x): Set offline failed " 511 "- drive in use.\n", 512 device->cdev_id); 513 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 514 return -EBUSY; 515 } 516 517 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); 518 return 0; 519 } 520 521 /* 522 * Allocate memory for a new device structure. 523 */ 524 static struct tape_device * 525 tape_alloc_device(void) 526 { 527 struct tape_device *device; 528 529 device = kzalloc(sizeof(struct tape_device), GFP_KERNEL); 530 if (device == NULL) { 531 DBF_EXCEPTION(2, "ti:no mem\n"); 532 return ERR_PTR(-ENOMEM); 533 } 534 device->modeset_byte = kmalloc(1, GFP_KERNEL | GFP_DMA); 535 if (device->modeset_byte == NULL) { 536 DBF_EXCEPTION(2, "ti:no mem\n"); 537 kfree(device); 538 return ERR_PTR(-ENOMEM); 539 } 540 mutex_init(&device->mutex); 541 INIT_LIST_HEAD(&device->req_queue); 542 INIT_LIST_HEAD(&device->node); 543 init_waitqueue_head(&device->state_change_wq); 544 init_waitqueue_head(&device->wait_queue); 545 device->tape_state = TS_INIT; 546 device->medium_state = MS_UNKNOWN; 547 *device->modeset_byte = 0; 548 device->first_minor = -1; 549 atomic_set(&device->ref_count, 1); 550 INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request); 551 552 return device; 553 } 554 555 /* 556 * Get a reference to an existing device structure. This will automatically 557 * increment the reference count. 558 */ 559 struct tape_device * 560 tape_get_device(struct tape_device *device) 561 { 562 int count; 563 564 count = atomic_inc_return(&device->ref_count); 565 DBF_EVENT(4, "tape_get_device(%p) = %i\n", device, count); 566 return device; 567 } 568 569 /* 570 * Decrease the reference counter of a devices structure. If the 571 * reference counter reaches zero free the device structure. 572 * The function returns a NULL pointer to be used by the caller 573 * for clearing reference pointers. 574 */ 575 void 576 tape_put_device(struct tape_device *device) 577 { 578 int count; 579 580 count = atomic_dec_return(&device->ref_count); 581 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, count); 582 BUG_ON(count < 0); 583 if (count == 0) { 584 kfree(device->modeset_byte); 585 kfree(device); 586 } 587 } 588 589 /* 590 * Find tape device by a device index. 591 */ 592 struct tape_device * 593 tape_find_device(int devindex) 594 { 595 struct tape_device *device, *tmp; 596 597 device = ERR_PTR(-ENODEV); 598 read_lock(&tape_device_lock); 599 list_for_each_entry(tmp, &tape_device_list, node) { 600 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { 601 device = tape_get_device(tmp); 602 break; 603 } 604 } 605 read_unlock(&tape_device_lock); 606 return device; 607 } 608 609 /* 610 * Driverfs tape probe function. 611 */ 612 int 613 tape_generic_probe(struct ccw_device *cdev) 614 { 615 struct tape_device *device; 616 int ret; 617 struct ccw_dev_id dev_id; 618 619 device = tape_alloc_device(); 620 if (IS_ERR(device)) 621 return -ENODEV; 622 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP | 623 CCWDEV_DO_MULTIPATH); 624 ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 625 if (ret) { 626 tape_put_device(device); 627 return ret; 628 } 629 dev_set_drvdata(&cdev->dev, device); 630 cdev->handler = __tape_do_irq; 631 device->cdev = cdev; 632 ccw_device_get_id(cdev, &dev_id); 633 device->cdev_id = devid_to_int(&dev_id); 634 return ret; 635 } 636 637 static void 638 __tape_discard_requests(struct tape_device *device) 639 { 640 struct tape_request * request; 641 struct list_head * l, *n; 642 643 list_for_each_safe(l, n, &device->req_queue) { 644 request = list_entry(l, struct tape_request, list); 645 if (request->status == TAPE_REQUEST_IN_IO) 646 request->status = TAPE_REQUEST_DONE; 647 list_del(&request->list); 648 649 /* Decrease ref_count for removed request. */ 650 request->device = NULL; 651 tape_put_device(device); 652 request->rc = -EIO; 653 if (request->callback != NULL) 654 request->callback(request, request->callback_data); 655 } 656 } 657 658 /* 659 * Driverfs tape remove function. 660 * 661 * This function is called whenever the common I/O layer detects the device 662 * gone. This can happen at any time and we cannot refuse. 663 */ 664 void 665 tape_generic_remove(struct ccw_device *cdev) 666 { 667 struct tape_device * device; 668 669 device = dev_get_drvdata(&cdev->dev); 670 if (!device) { 671 return; 672 } 673 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); 674 675 spin_lock_irq(get_ccwdev_lock(device->cdev)); 676 switch (device->tape_state) { 677 case TS_INIT: 678 tape_state_set(device, TS_NOT_OPER); 679 case TS_NOT_OPER: 680 /* 681 * Nothing to do. 682 */ 683 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 684 break; 685 case TS_UNUSED: 686 /* 687 * Need only to release the device. 688 */ 689 tape_state_set(device, TS_NOT_OPER); 690 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 691 tape_cleanup_device(device); 692 break; 693 default: 694 /* 695 * There may be requests on the queue. We will not get 696 * an interrupt for a request that was running. So we 697 * just post them all as I/O errors. 698 */ 699 DBF_EVENT(3, "(%08x): Drive in use vanished!\n", 700 device->cdev_id); 701 pr_warn("%s: A tape unit was detached while in use\n", 702 dev_name(&device->cdev->dev)); 703 tape_state_set(device, TS_NOT_OPER); 704 __tape_discard_requests(device); 705 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 706 tape_cleanup_device(device); 707 } 708 709 device = dev_get_drvdata(&cdev->dev); 710 if (device) { 711 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 712 dev_set_drvdata(&cdev->dev, NULL); 713 tape_put_device(device); 714 } 715 } 716 717 /* 718 * Allocate a new tape ccw request 719 */ 720 struct tape_request * 721 tape_alloc_request(int cplength, int datasize) 722 { 723 struct tape_request *request; 724 725 BUG_ON(datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 726 727 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); 728 729 request = kzalloc(sizeof(struct tape_request), GFP_KERNEL); 730 if (request == NULL) { 731 DBF_EXCEPTION(1, "cqra nomem\n"); 732 return ERR_PTR(-ENOMEM); 733 } 734 /* allocate channel program */ 735 if (cplength > 0) { 736 request->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 737 GFP_ATOMIC | GFP_DMA); 738 if (request->cpaddr == NULL) { 739 DBF_EXCEPTION(1, "cqra nomem\n"); 740 kfree(request); 741 return ERR_PTR(-ENOMEM); 742 } 743 } 744 /* alloc small kernel buffer */ 745 if (datasize > 0) { 746 request->cpdata = kzalloc(datasize, GFP_KERNEL | GFP_DMA); 747 if (request->cpdata == NULL) { 748 DBF_EXCEPTION(1, "cqra nomem\n"); 749 kfree(request->cpaddr); 750 kfree(request); 751 return ERR_PTR(-ENOMEM); 752 } 753 } 754 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, 755 request->cpdata); 756 757 return request; 758 } 759 760 /* 761 * Free tape ccw request 762 */ 763 void 764 tape_free_request (struct tape_request * request) 765 { 766 DBF_LH(6, "Free request %p\n", request); 767 768 if (request->device) 769 tape_put_device(request->device); 770 kfree(request->cpdata); 771 kfree(request->cpaddr); 772 kfree(request); 773 } 774 775 static int 776 __tape_start_io(struct tape_device *device, struct tape_request *request) 777 { 778 int rc; 779 780 rc = ccw_device_start( 781 device->cdev, 782 request->cpaddr, 783 (unsigned long) request, 784 0x00, 785 request->options 786 ); 787 if (rc == 0) { 788 request->status = TAPE_REQUEST_IN_IO; 789 } else if (rc == -EBUSY) { 790 /* The common I/O subsystem is currently busy. Retry later. */ 791 request->status = TAPE_REQUEST_QUEUED; 792 schedule_delayed_work(&device->tape_dnr, 0); 793 rc = 0; 794 } else { 795 /* Start failed. Remove request and indicate failure. */ 796 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); 797 } 798 return rc; 799 } 800 801 static void 802 __tape_start_next_request(struct tape_device *device) 803 { 804 struct list_head *l, *n; 805 struct tape_request *request; 806 int rc; 807 808 DBF_LH(6, "__tape_start_next_request(%p)\n", device); 809 /* 810 * Try to start each request on request queue until one is 811 * started successful. 812 */ 813 list_for_each_safe(l, n, &device->req_queue) { 814 request = list_entry(l, struct tape_request, list); 815 816 /* 817 * Avoid race condition if bottom-half was triggered more than 818 * once. 819 */ 820 if (request->status == TAPE_REQUEST_IN_IO) 821 return; 822 /* 823 * Request has already been stopped. We have to wait until 824 * the request is removed from the queue in the interrupt 825 * handling. 826 */ 827 if (request->status == TAPE_REQUEST_DONE) 828 return; 829 830 /* 831 * We wanted to cancel the request but the common I/O layer 832 * was busy at that time. This can only happen if this 833 * function is called by delayed_next_request. 834 * Otherwise we start the next request on the queue. 835 */ 836 if (request->status == TAPE_REQUEST_CANCEL) { 837 rc = __tape_cancel_io(device, request); 838 } else { 839 rc = __tape_start_io(device, request); 840 } 841 if (rc == 0) 842 return; 843 844 /* Set ending status. */ 845 request->rc = rc; 846 request->status = TAPE_REQUEST_DONE; 847 848 /* Remove from request queue. */ 849 list_del(&request->list); 850 851 /* Do callback. */ 852 if (request->callback != NULL) 853 request->callback(request, request->callback_data); 854 } 855 } 856 857 static void 858 tape_delayed_next_request(struct work_struct *work) 859 { 860 struct tape_device *device = 861 container_of(work, struct tape_device, tape_dnr.work); 862 863 DBF_LH(6, "tape_delayed_next_request(%p)\n", device); 864 spin_lock_irq(get_ccwdev_lock(device->cdev)); 865 __tape_start_next_request(device); 866 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 867 } 868 869 static void tape_long_busy_timeout(struct timer_list *t) 870 { 871 struct tape_device *device = from_timer(device, t, lb_timeout); 872 struct tape_request *request; 873 874 spin_lock_irq(get_ccwdev_lock(device->cdev)); 875 request = list_entry(device->req_queue.next, struct tape_request, list); 876 BUG_ON(request->status != TAPE_REQUEST_LONG_BUSY); 877 DBF_LH(6, "%08x: Long busy timeout.\n", device->cdev_id); 878 __tape_start_next_request(device); 879 tape_put_device(device); 880 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 881 } 882 883 static void 884 __tape_end_request( 885 struct tape_device * device, 886 struct tape_request * request, 887 int rc) 888 { 889 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); 890 if (request) { 891 request->rc = rc; 892 request->status = TAPE_REQUEST_DONE; 893 894 /* Remove from request queue. */ 895 list_del(&request->list); 896 897 /* Do callback. */ 898 if (request->callback != NULL) 899 request->callback(request, request->callback_data); 900 } 901 902 /* Start next request. */ 903 if (!list_empty(&device->req_queue)) 904 __tape_start_next_request(device); 905 } 906 907 /* 908 * Write sense data to dbf 909 */ 910 void 911 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, 912 struct irb *irb) 913 { 914 unsigned int *sptr; 915 const char* op; 916 917 if (request != NULL) 918 op = tape_op_verbose[request->op]; 919 else 920 op = "---"; 921 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", 922 irb->scsw.cmd.dstat, irb->scsw.cmd.cstat); 923 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); 924 sptr = (unsigned int *) irb->ecw; 925 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); 926 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); 927 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); 928 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); 929 } 930 931 /* 932 * I/O helper function. Adds the request to the request queue 933 * and starts it if the tape is idle. Has to be called with 934 * the device lock held. 935 */ 936 static int 937 __tape_start_request(struct tape_device *device, struct tape_request *request) 938 { 939 int rc; 940 941 switch (request->op) { 942 case TO_MSEN: 943 case TO_ASSIGN: 944 case TO_UNASSIGN: 945 case TO_READ_ATTMSG: 946 case TO_RDC: 947 if (device->tape_state == TS_INIT) 948 break; 949 if (device->tape_state == TS_UNUSED) 950 break; 951 default: 952 if (device->tape_state == TS_BLKUSE) 953 break; 954 if (device->tape_state != TS_IN_USE) 955 return -ENODEV; 956 } 957 958 /* Increase use count of device for the added request. */ 959 request->device = tape_get_device(device); 960 961 if (list_empty(&device->req_queue)) { 962 /* No other requests are on the queue. Start this one. */ 963 rc = __tape_start_io(device, request); 964 if (rc) 965 return rc; 966 967 DBF_LH(5, "Request %p added for execution.\n", request); 968 list_add(&request->list, &device->req_queue); 969 } else { 970 DBF_LH(5, "Request %p add to queue.\n", request); 971 request->status = TAPE_REQUEST_QUEUED; 972 list_add_tail(&request->list, &device->req_queue); 973 } 974 return 0; 975 } 976 977 /* 978 * Add the request to the request queue, try to start it if the 979 * tape is idle. Return without waiting for end of i/o. 980 */ 981 int 982 tape_do_io_async(struct tape_device *device, struct tape_request *request) 983 { 984 int rc; 985 986 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); 987 988 spin_lock_irq(get_ccwdev_lock(device->cdev)); 989 /* Add request to request queue and try to start it. */ 990 rc = __tape_start_request(device, request); 991 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 992 return rc; 993 } 994 995 /* 996 * tape_do_io/__tape_wake_up 997 * Add the request to the request queue, try to start it if the 998 * tape is idle and wait uninterruptible for its completion. 999 */ 1000 static void 1001 __tape_wake_up(struct tape_request *request, void *data) 1002 { 1003 request->callback = NULL; 1004 wake_up((wait_queue_head_t *) data); 1005 } 1006 1007 int 1008 tape_do_io(struct tape_device *device, struct tape_request *request) 1009 { 1010 int rc; 1011 1012 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1013 /* Setup callback */ 1014 request->callback = __tape_wake_up; 1015 request->callback_data = &device->wait_queue; 1016 /* Add request to request queue and try to start it. */ 1017 rc = __tape_start_request(device, request); 1018 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1019 if (rc) 1020 return rc; 1021 /* Request added to the queue. Wait for its completion. */ 1022 wait_event(device->wait_queue, (request->callback == NULL)); 1023 /* Get rc from request */ 1024 return request->rc; 1025 } 1026 1027 /* 1028 * tape_do_io_interruptible/__tape_wake_up_interruptible 1029 * Add the request to the request queue, try to start it if the 1030 * tape is idle and wait uninterruptible for its completion. 1031 */ 1032 static void 1033 __tape_wake_up_interruptible(struct tape_request *request, void *data) 1034 { 1035 request->callback = NULL; 1036 wake_up_interruptible((wait_queue_head_t *) data); 1037 } 1038 1039 int 1040 tape_do_io_interruptible(struct tape_device *device, 1041 struct tape_request *request) 1042 { 1043 int rc; 1044 1045 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1046 /* Setup callback */ 1047 request->callback = __tape_wake_up_interruptible; 1048 request->callback_data = &device->wait_queue; 1049 rc = __tape_start_request(device, request); 1050 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1051 if (rc) 1052 return rc; 1053 /* Request added to the queue. Wait for its completion. */ 1054 rc = wait_event_interruptible(device->wait_queue, 1055 (request->callback == NULL)); 1056 if (rc != -ERESTARTSYS) 1057 /* Request finished normally. */ 1058 return request->rc; 1059 1060 /* Interrupted by a signal. We have to stop the current request. */ 1061 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1062 rc = __tape_cancel_io(device, request); 1063 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1064 if (rc == 0) { 1065 /* Wait for the interrupt that acknowledges the halt. */ 1066 do { 1067 rc = wait_event_interruptible( 1068 device->wait_queue, 1069 (request->callback == NULL) 1070 ); 1071 } while (rc == -ERESTARTSYS); 1072 1073 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); 1074 rc = -ERESTARTSYS; 1075 } 1076 return rc; 1077 } 1078 1079 /* 1080 * Stop running ccw. 1081 */ 1082 int 1083 tape_cancel_io(struct tape_device *device, struct tape_request *request) 1084 { 1085 int rc; 1086 1087 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1088 rc = __tape_cancel_io(device, request); 1089 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1090 return rc; 1091 } 1092 1093 /* 1094 * Tape interrupt routine, called from the ccw_device layer 1095 */ 1096 static void 1097 __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) 1098 { 1099 struct tape_device *device; 1100 struct tape_request *request; 1101 int rc; 1102 1103 device = dev_get_drvdata(&cdev->dev); 1104 if (device == NULL) { 1105 return; 1106 } 1107 request = (struct tape_request *) intparm; 1108 1109 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); 1110 1111 /* On special conditions irb is an error pointer */ 1112 if (IS_ERR(irb)) { 1113 /* FIXME: What to do with the request? */ 1114 switch (PTR_ERR(irb)) { 1115 case -ETIMEDOUT: 1116 DBF_LH(1, "(%08x): Request timed out\n", 1117 device->cdev_id); 1118 case -EIO: 1119 __tape_end_request(device, request, -EIO); 1120 break; 1121 default: 1122 DBF_LH(1, "(%08x): Unexpected i/o error %li\n", 1123 device->cdev_id, PTR_ERR(irb)); 1124 } 1125 return; 1126 } 1127 1128 /* 1129 * If the condition code is not zero and the start function bit is 1130 * still set, this is an deferred error and the last start I/O did 1131 * not succeed. At this point the condition that caused the deferred 1132 * error might still apply. So we just schedule the request to be 1133 * started later. 1134 */ 1135 if (irb->scsw.cmd.cc != 0 && 1136 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1137 (request->status == TAPE_REQUEST_IN_IO)) { 1138 DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n", 1139 device->cdev_id, irb->scsw.cmd.cc, irb->scsw.cmd.fctl); 1140 request->status = TAPE_REQUEST_QUEUED; 1141 schedule_delayed_work(&device->tape_dnr, HZ); 1142 return; 1143 } 1144 1145 /* May be an unsolicited irq */ 1146 if(request != NULL) 1147 request->rescnt = irb->scsw.cmd.count; 1148 else if ((irb->scsw.cmd.dstat == 0x85 || irb->scsw.cmd.dstat == 0x80) && 1149 !list_empty(&device->req_queue)) { 1150 /* Not Ready to Ready after long busy ? */ 1151 struct tape_request *req; 1152 req = list_entry(device->req_queue.next, 1153 struct tape_request, list); 1154 if (req->status == TAPE_REQUEST_LONG_BUSY) { 1155 DBF_EVENT(3, "(%08x): del timer\n", device->cdev_id); 1156 if (del_timer(&device->lb_timeout)) { 1157 tape_put_device(device); 1158 __tape_start_next_request(device); 1159 } 1160 return; 1161 } 1162 } 1163 if (irb->scsw.cmd.dstat != 0x0c) { 1164 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1165 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1166 device->tape_generic_status |= GMT_ONLINE(~0); 1167 else 1168 device->tape_generic_status &= ~GMT_ONLINE(~0); 1169 1170 /* 1171 * Any request that does not come back with channel end 1172 * and device end is unusual. Log the sense data. 1173 */ 1174 DBF_EVENT(3,"-- Tape Interrupthandler --\n"); 1175 tape_dump_sense_dbf(device, request, irb); 1176 } else { 1177 /* Upon normal completion the device _is_ online */ 1178 device->tape_generic_status |= GMT_ONLINE(~0); 1179 } 1180 if (device->tape_state == TS_NOT_OPER) { 1181 DBF_EVENT(6, "tape:device is not operational\n"); 1182 return; 1183 } 1184 1185 /* 1186 * Request that were canceled still come back with an interrupt. 1187 * To detect these request the state will be set to TAPE_REQUEST_DONE. 1188 */ 1189 if(request != NULL && request->status == TAPE_REQUEST_DONE) { 1190 __tape_end_request(device, request, -EIO); 1191 return; 1192 } 1193 1194 rc = device->discipline->irq(device, request, irb); 1195 /* 1196 * rc < 0 : request finished unsuccessfully. 1197 * rc == TAPE_IO_SUCCESS: request finished successfully. 1198 * rc == TAPE_IO_PENDING: request is still running. Ignore rc. 1199 * rc == TAPE_IO_RETRY: request finished but needs another go. 1200 * rc == TAPE_IO_STOP: request needs to get terminated. 1201 */ 1202 switch (rc) { 1203 case TAPE_IO_SUCCESS: 1204 /* Upon normal completion the device _is_ online */ 1205 device->tape_generic_status |= GMT_ONLINE(~0); 1206 __tape_end_request(device, request, rc); 1207 break; 1208 case TAPE_IO_PENDING: 1209 break; 1210 case TAPE_IO_LONG_BUSY: 1211 device->lb_timeout.expires = jiffies + 1212 LONG_BUSY_TIMEOUT * HZ; 1213 DBF_EVENT(3, "(%08x): add timer\n", device->cdev_id); 1214 add_timer(&device->lb_timeout); 1215 request->status = TAPE_REQUEST_LONG_BUSY; 1216 break; 1217 case TAPE_IO_RETRY: 1218 rc = __tape_start_io(device, request); 1219 if (rc) 1220 __tape_end_request(device, request, rc); 1221 break; 1222 case TAPE_IO_STOP: 1223 rc = __tape_cancel_io(device, request); 1224 if (rc) 1225 __tape_end_request(device, request, rc); 1226 break; 1227 default: 1228 if (rc > 0) { 1229 DBF_EVENT(6, "xunknownrc\n"); 1230 __tape_end_request(device, request, -EIO); 1231 } else { 1232 __tape_end_request(device, request, rc); 1233 } 1234 break; 1235 } 1236 } 1237 1238 /* 1239 * Tape device open function used by tape_char frontend. 1240 */ 1241 int 1242 tape_open(struct tape_device *device) 1243 { 1244 int rc; 1245 1246 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1247 if (device->tape_state == TS_NOT_OPER) { 1248 DBF_EVENT(6, "TAPE:nodev\n"); 1249 rc = -ENODEV; 1250 } else if (device->tape_state == TS_IN_USE) { 1251 DBF_EVENT(6, "TAPE:dbusy\n"); 1252 rc = -EBUSY; 1253 } else if (device->tape_state == TS_BLKUSE) { 1254 DBF_EVENT(6, "TAPE:dbusy\n"); 1255 rc = -EBUSY; 1256 } else if (device->discipline != NULL && 1257 !try_module_get(device->discipline->owner)) { 1258 DBF_EVENT(6, "TAPE:nodisc\n"); 1259 rc = -ENODEV; 1260 } else { 1261 tape_state_set(device, TS_IN_USE); 1262 rc = 0; 1263 } 1264 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1265 return rc; 1266 } 1267 1268 /* 1269 * Tape device release function used by tape_char frontend. 1270 */ 1271 int 1272 tape_release(struct tape_device *device) 1273 { 1274 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1275 if (device->tape_state == TS_IN_USE) 1276 tape_state_set(device, TS_UNUSED); 1277 module_put(device->discipline->owner); 1278 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1279 return 0; 1280 } 1281 1282 /* 1283 * Execute a magnetic tape command a number of times. 1284 */ 1285 int 1286 tape_mtop(struct tape_device *device, int mt_op, int mt_count) 1287 { 1288 tape_mtop_fn fn; 1289 int rc; 1290 1291 DBF_EVENT(6, "TAPE:mtio\n"); 1292 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); 1293 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); 1294 1295 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) 1296 return -EINVAL; 1297 fn = device->discipline->mtop_array[mt_op]; 1298 if (fn == NULL) 1299 return -EINVAL; 1300 1301 /* We assume that the backends can handle count up to 500. */ 1302 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || 1303 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { 1304 rc = 0; 1305 for (; mt_count > 500; mt_count -= 500) 1306 if ((rc = fn(device, 500)) != 0) 1307 break; 1308 if (rc == 0) 1309 rc = fn(device, mt_count); 1310 } else 1311 rc = fn(device, mt_count); 1312 return rc; 1313 1314 } 1315 1316 /* 1317 * Tape init function. 1318 */ 1319 static int 1320 tape_init (void) 1321 { 1322 TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long)); 1323 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); 1324 #ifdef DBF_LIKE_HELL 1325 debug_set_level(TAPE_DBF_AREA, 6); 1326 #endif 1327 DBF_EVENT(3, "tape init\n"); 1328 tape_proc_init(); 1329 tapechar_init (); 1330 return 0; 1331 } 1332 1333 /* 1334 * Tape exit function. 1335 */ 1336 static void 1337 tape_exit(void) 1338 { 1339 DBF_EVENT(6, "tape exit\n"); 1340 1341 /* Get rid of the frontends */ 1342 tapechar_exit(); 1343 tape_proc_cleanup(); 1344 debug_unregister (TAPE_DBF_AREA); 1345 } 1346 1347 MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " 1348 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); 1349 MODULE_DESCRIPTION("Linux on zSeries channel attached tape device driver"); 1350 MODULE_LICENSE("GPL"); 1351 1352 module_init(tape_init); 1353 module_exit(tape_exit); 1354 1355 EXPORT_SYMBOL(tape_generic_remove); 1356 EXPORT_SYMBOL(tape_generic_probe); 1357 EXPORT_SYMBOL(tape_generic_online); 1358 EXPORT_SYMBOL(tape_generic_offline); 1359 EXPORT_SYMBOL(tape_generic_pm_suspend); 1360 EXPORT_SYMBOL(tape_put_device); 1361 EXPORT_SYMBOL(tape_get_device); 1362 EXPORT_SYMBOL(tape_state_verbose); 1363 EXPORT_SYMBOL(tape_op_verbose); 1364 EXPORT_SYMBOL(tape_state_set); 1365 EXPORT_SYMBOL(tape_med_state_set); 1366 EXPORT_SYMBOL(tape_alloc_request); 1367 EXPORT_SYMBOL(tape_free_request); 1368 EXPORT_SYMBOL(tape_dump_sense_dbf); 1369 EXPORT_SYMBOL(tape_do_io); 1370 EXPORT_SYMBOL(tape_do_io_async); 1371 EXPORT_SYMBOL(tape_do_io_interruptible); 1372 EXPORT_SYMBOL(tape_cancel_io); 1373 EXPORT_SYMBOL(tape_mtop); 1374