1 /* 2 * drivers/s390/char/tape_core.c 3 * basic function of the tape device driver 4 * 5 * S390 and zSeries version 6 * Copyright (C) 2001,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation 7 * Author(s): Carsten Otte <cotte@de.ibm.com> 8 * Michael Holzheu <holzheu@de.ibm.com> 9 * Tuan Ngo-Anh <ngoanh@de.ibm.com> 10 * Martin Schwidefsky <schwidefsky@de.ibm.com> 11 * Stefan Bader <shbader@de.ibm.com> 12 */ 13 14 #include <linux/config.h> 15 #include <linux/module.h> 16 #include <linux/init.h> // for kernel parameters 17 #include <linux/kmod.h> // for requesting modules 18 #include <linux/spinlock.h> // for locks 19 #include <linux/vmalloc.h> 20 #include <linux/list.h> 21 22 #include <asm/types.h> // for variable types 23 24 #define TAPE_DBF_AREA tape_core_dbf 25 26 #include "tape.h" 27 #include "tape_std.h" 28 29 #define PRINTK_HEADER "TAPE_CORE: " 30 31 static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 32 static void tape_delayed_next_request(void * data); 33 34 /* 35 * One list to contain all tape devices of all disciplines, so 36 * we can assign the devices to minor numbers of the same major 37 * The list is protected by the rwlock 38 */ 39 static struct list_head tape_device_list = LIST_HEAD_INIT(tape_device_list); 40 static DEFINE_RWLOCK(tape_device_lock); 41 42 /* 43 * Pointer to debug area. 44 */ 45 debug_info_t *TAPE_DBF_AREA = NULL; 46 EXPORT_SYMBOL(TAPE_DBF_AREA); 47 48 /* 49 * Printable strings for tape enumerations. 50 */ 51 const char *tape_state_verbose[TS_SIZE] = 52 { 53 [TS_UNUSED] = "UNUSED", 54 [TS_IN_USE] = "IN_USE", 55 [TS_BLKUSE] = "BLKUSE", 56 [TS_INIT] = "INIT ", 57 [TS_NOT_OPER] = "NOT_OP" 58 }; 59 60 const char *tape_op_verbose[TO_SIZE] = 61 { 62 [TO_BLOCK] = "BLK", [TO_BSB] = "BSB", 63 [TO_BSF] = "BSF", [TO_DSE] = "DSE", 64 [TO_FSB] = "FSB", [TO_FSF] = "FSF", 65 [TO_LBL] = "LBL", [TO_NOP] = "NOP", 66 [TO_RBA] = "RBA", [TO_RBI] = "RBI", 67 [TO_RFO] = "RFO", [TO_REW] = "REW", 68 [TO_RUN] = "RUN", [TO_WRI] = "WRI", 69 [TO_WTM] = "WTM", [TO_MSEN] = "MSN", 70 [TO_LOAD] = "LOA", [TO_READ_CONFIG] = "RCF", 71 [TO_READ_ATTMSG] = "RAT", 72 [TO_DIS] = "DIS", [TO_ASSIGN] = "ASS", 73 [TO_UNASSIGN] = "UAS" 74 }; 75 76 static inline int 77 busid_to_int(char *bus_id) 78 { 79 int dec; 80 int d; 81 char * s; 82 83 for(s = bus_id, d = 0; *s != '\0' && *s != '.'; s++) 84 d = (d * 10) + (*s - '0'); 85 dec = d; 86 for(s++, d = 0; *s != '\0' && *s != '.'; s++) 87 d = (d * 10) + (*s - '0'); 88 dec = (dec << 8) + d; 89 90 for(s++; *s != '\0'; s++) { 91 if (*s >= '0' && *s <= '9') { 92 d = *s - '0'; 93 } else if (*s >= 'a' && *s <= 'f') { 94 d = *s - 'a' + 10; 95 } else { 96 d = *s - 'A' + 10; 97 } 98 dec = (dec << 4) + d; 99 } 100 101 return dec; 102 } 103 104 /* 105 * Some channel attached tape specific attributes. 106 * 107 * FIXME: In the future the first_minor and blocksize attribute should be 108 * replaced by a link to the cdev tree. 109 */ 110 static ssize_t 111 tape_medium_state_show(struct device *dev, struct device_attribute *attr, char *buf) 112 { 113 struct tape_device *tdev; 114 115 tdev = (struct tape_device *) dev->driver_data; 116 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->medium_state); 117 } 118 119 static 120 DEVICE_ATTR(medium_state, 0444, tape_medium_state_show, NULL); 121 122 static ssize_t 123 tape_first_minor_show(struct device *dev, struct device_attribute *attr, char *buf) 124 { 125 struct tape_device *tdev; 126 127 tdev = (struct tape_device *) dev->driver_data; 128 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->first_minor); 129 } 130 131 static 132 DEVICE_ATTR(first_minor, 0444, tape_first_minor_show, NULL); 133 134 static ssize_t 135 tape_state_show(struct device *dev, struct device_attribute *attr, char *buf) 136 { 137 struct tape_device *tdev; 138 139 tdev = (struct tape_device *) dev->driver_data; 140 return scnprintf(buf, PAGE_SIZE, "%s\n", (tdev->first_minor < 0) ? 141 "OFFLINE" : tape_state_verbose[tdev->tape_state]); 142 } 143 144 static 145 DEVICE_ATTR(state, 0444, tape_state_show, NULL); 146 147 static ssize_t 148 tape_operation_show(struct device *dev, struct device_attribute *attr, char *buf) 149 { 150 struct tape_device *tdev; 151 ssize_t rc; 152 153 tdev = (struct tape_device *) dev->driver_data; 154 if (tdev->first_minor < 0) 155 return scnprintf(buf, PAGE_SIZE, "N/A\n"); 156 157 spin_lock_irq(get_ccwdev_lock(tdev->cdev)); 158 if (list_empty(&tdev->req_queue)) 159 rc = scnprintf(buf, PAGE_SIZE, "---\n"); 160 else { 161 struct tape_request *req; 162 163 req = list_entry(tdev->req_queue.next, struct tape_request, 164 list); 165 rc = scnprintf(buf,PAGE_SIZE, "%s\n", tape_op_verbose[req->op]); 166 } 167 spin_unlock_irq(get_ccwdev_lock(tdev->cdev)); 168 return rc; 169 } 170 171 static 172 DEVICE_ATTR(operation, 0444, tape_operation_show, NULL); 173 174 static ssize_t 175 tape_blocksize_show(struct device *dev, struct device_attribute *attr, char *buf) 176 { 177 struct tape_device *tdev; 178 179 tdev = (struct tape_device *) dev->driver_data; 180 181 return scnprintf(buf, PAGE_SIZE, "%i\n", tdev->char_data.block_size); 182 } 183 184 static 185 DEVICE_ATTR(blocksize, 0444, tape_blocksize_show, NULL); 186 187 static struct attribute *tape_attrs[] = { 188 &dev_attr_medium_state.attr, 189 &dev_attr_first_minor.attr, 190 &dev_attr_state.attr, 191 &dev_attr_operation.attr, 192 &dev_attr_blocksize.attr, 193 NULL 194 }; 195 196 static struct attribute_group tape_attr_group = { 197 .attrs = tape_attrs, 198 }; 199 200 /* 201 * Tape state functions 202 */ 203 void 204 tape_state_set(struct tape_device *device, enum tape_state newstate) 205 { 206 const char *str; 207 208 if (device->tape_state == TS_NOT_OPER) { 209 DBF_EVENT(3, "ts_set err: not oper\n"); 210 return; 211 } 212 DBF_EVENT(4, "ts. dev: %x\n", device->first_minor); 213 if (device->tape_state < TO_SIZE && device->tape_state >= 0) 214 str = tape_state_verbose[device->tape_state]; 215 else 216 str = "UNKNOWN TS"; 217 DBF_EVENT(4, "old ts: %s\n", str); 218 if (device->tape_state < TO_SIZE && device->tape_state >=0 ) 219 str = tape_state_verbose[device->tape_state]; 220 else 221 str = "UNKNOWN TS"; 222 DBF_EVENT(4, "%s\n", str); 223 DBF_EVENT(4, "new ts:\t\n"); 224 if (newstate < TO_SIZE && newstate >= 0) 225 str = tape_state_verbose[newstate]; 226 else 227 str = "UNKNOWN TS"; 228 DBF_EVENT(4, "%s\n", str); 229 device->tape_state = newstate; 230 wake_up(&device->state_change_wq); 231 } 232 233 void 234 tape_med_state_set(struct tape_device *device, enum tape_medium_state newstate) 235 { 236 if (device->medium_state == newstate) 237 return; 238 switch(newstate){ 239 case MS_UNLOADED: 240 device->tape_generic_status |= GMT_DR_OPEN(~0); 241 PRINT_INFO("(%s): Tape is unloaded\n", 242 device->cdev->dev.bus_id); 243 break; 244 case MS_LOADED: 245 device->tape_generic_status &= ~GMT_DR_OPEN(~0); 246 PRINT_INFO("(%s): Tape has been mounted\n", 247 device->cdev->dev.bus_id); 248 break; 249 default: 250 // print nothing 251 break; 252 } 253 device->medium_state = newstate; 254 wake_up(&device->state_change_wq); 255 } 256 257 /* 258 * Stop running ccw. Has to be called with the device lock held. 259 */ 260 static inline int 261 __tape_cancel_io(struct tape_device *device, struct tape_request *request) 262 { 263 int retries; 264 int rc; 265 266 /* Check if interrupt has already been processed */ 267 if (request->callback == NULL) 268 return 0; 269 270 rc = 0; 271 for (retries = 0; retries < 5; retries++) { 272 rc = ccw_device_clear(device->cdev, (long) request); 273 274 switch (rc) { 275 case 0: 276 request->status = TAPE_REQUEST_DONE; 277 return 0; 278 case -EBUSY: 279 request->status = TAPE_REQUEST_CANCEL; 280 schedule_work(&device->tape_dnr); 281 return 0; 282 case -ENODEV: 283 DBF_EXCEPTION(2, "device gone, retry\n"); 284 break; 285 case -EIO: 286 DBF_EXCEPTION(2, "I/O error, retry\n"); 287 break; 288 default: 289 BUG(); 290 } 291 } 292 293 return rc; 294 } 295 296 /* 297 * Add device into the sorted list, giving it the first 298 * available minor number. 299 */ 300 static int 301 tape_assign_minor(struct tape_device *device) 302 { 303 struct tape_device *tmp; 304 int minor; 305 306 minor = 0; 307 write_lock(&tape_device_lock); 308 list_for_each_entry(tmp, &tape_device_list, node) { 309 if (minor < tmp->first_minor) 310 break; 311 minor += TAPE_MINORS_PER_DEV; 312 } 313 if (minor >= 256) { 314 write_unlock(&tape_device_lock); 315 return -ENODEV; 316 } 317 device->first_minor = minor; 318 list_add_tail(&device->node, &tmp->node); 319 write_unlock(&tape_device_lock); 320 return 0; 321 } 322 323 /* remove device from the list */ 324 static void 325 tape_remove_minor(struct tape_device *device) 326 { 327 write_lock(&tape_device_lock); 328 list_del_init(&device->node); 329 device->first_minor = -1; 330 write_unlock(&tape_device_lock); 331 } 332 333 /* 334 * Set a device online. 335 * 336 * This function is called by the common I/O layer to move a device from the 337 * detected but offline into the online state. 338 * If we return an error (RC < 0) the device remains in the offline state. This 339 * can happen if the device is assigned somewhere else, for example. 340 */ 341 int 342 tape_generic_online(struct tape_device *device, 343 struct tape_discipline *discipline) 344 { 345 int rc; 346 347 DBF_LH(6, "tape_enable_device(%p, %p)\n", device, discipline); 348 349 if (device->tape_state != TS_INIT) { 350 DBF_LH(3, "Tapestate not INIT (%d)\n", device->tape_state); 351 return -EINVAL; 352 } 353 354 /* Let the discipline have a go at the device. */ 355 device->discipline = discipline; 356 if (!try_module_get(discipline->owner)) { 357 PRINT_ERR("Cannot get module. Module gone.\n"); 358 return -EINVAL; 359 } 360 361 rc = discipline->setup_device(device); 362 if (rc) 363 goto out; 364 rc = tape_assign_minor(device); 365 if (rc) 366 goto out_discipline; 367 368 rc = tapechar_setup_device(device); 369 if (rc) 370 goto out_minor; 371 rc = tapeblock_setup_device(device); 372 if (rc) 373 goto out_char; 374 375 tape_state_set(device, TS_UNUSED); 376 377 DBF_LH(3, "(%08x): Drive set online\n", device->cdev_id); 378 379 return 0; 380 381 out_char: 382 tapechar_cleanup_device(device); 383 out_discipline: 384 device->discipline->cleanup_device(device); 385 device->discipline = NULL; 386 out_minor: 387 tape_remove_minor(device); 388 out: 389 module_put(discipline->owner); 390 return rc; 391 } 392 393 static inline void 394 tape_cleanup_device(struct tape_device *device) 395 { 396 tapeblock_cleanup_device(device); 397 tapechar_cleanup_device(device); 398 device->discipline->cleanup_device(device); 399 module_put(device->discipline->owner); 400 tape_remove_minor(device); 401 tape_med_state_set(device, MS_UNKNOWN); 402 } 403 404 /* 405 * Set device offline. 406 * 407 * Called by the common I/O layer if the drive should set offline on user 408 * request. We may prevent this by returning an error. 409 * Manual offline is only allowed while the drive is not in use. 410 */ 411 int 412 tape_generic_offline(struct tape_device *device) 413 { 414 if (!device) { 415 PRINT_ERR("tape_generic_offline: no such device\n"); 416 return -ENODEV; 417 } 418 419 DBF_LH(3, "(%08x): tape_generic_offline(%p)\n", 420 device->cdev_id, device); 421 422 spin_lock_irq(get_ccwdev_lock(device->cdev)); 423 switch (device->tape_state) { 424 case TS_INIT: 425 case TS_NOT_OPER: 426 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 427 break; 428 case TS_UNUSED: 429 tape_state_set(device, TS_INIT); 430 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 431 tape_cleanup_device(device); 432 break; 433 default: 434 DBF_EVENT(3, "(%08x): Set offline failed " 435 "- drive in use.\n", 436 device->cdev_id); 437 PRINT_WARN("(%s): Set offline failed " 438 "- drive in use.\n", 439 device->cdev->dev.bus_id); 440 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 441 return -EBUSY; 442 } 443 444 DBF_LH(3, "(%08x): Drive set offline.\n", device->cdev_id); 445 return 0; 446 } 447 448 /* 449 * Allocate memory for a new device structure. 450 */ 451 static struct tape_device * 452 tape_alloc_device(void) 453 { 454 struct tape_device *device; 455 456 device = (struct tape_device *) 457 kmalloc(sizeof(struct tape_device), GFP_KERNEL); 458 if (device == NULL) { 459 DBF_EXCEPTION(2, "ti:no mem\n"); 460 PRINT_INFO ("can't allocate memory for " 461 "tape info structure\n"); 462 return ERR_PTR(-ENOMEM); 463 } 464 memset(device, 0, sizeof(struct tape_device)); 465 device->modeset_byte = (char *) kmalloc(1, GFP_KERNEL | GFP_DMA); 466 if (device->modeset_byte == NULL) { 467 DBF_EXCEPTION(2, "ti:no mem\n"); 468 PRINT_INFO("can't allocate memory for modeset byte\n"); 469 kfree(device); 470 return ERR_PTR(-ENOMEM); 471 } 472 INIT_LIST_HEAD(&device->req_queue); 473 INIT_LIST_HEAD(&device->node); 474 init_waitqueue_head(&device->state_change_wq); 475 device->tape_state = TS_INIT; 476 device->medium_state = MS_UNKNOWN; 477 *device->modeset_byte = 0; 478 device->first_minor = -1; 479 atomic_set(&device->ref_count, 1); 480 INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device); 481 482 return device; 483 } 484 485 /* 486 * Get a reference to an existing device structure. This will automatically 487 * increment the reference count. 488 */ 489 struct tape_device * 490 tape_get_device_reference(struct tape_device *device) 491 { 492 DBF_EVENT(4, "tape_get_device_reference(%p) = %i\n", device, 493 atomic_inc_return(&device->ref_count)); 494 495 return device; 496 } 497 498 /* 499 * Decrease the reference counter of a devices structure. If the 500 * reference counter reaches zero free the device structure. 501 * The function returns a NULL pointer to be used by the caller 502 * for clearing reference pointers. 503 */ 504 struct tape_device * 505 tape_put_device(struct tape_device *device) 506 { 507 int remain; 508 509 remain = atomic_dec_return(&device->ref_count); 510 if (remain > 0) { 511 DBF_EVENT(4, "tape_put_device(%p) -> %i\n", device, remain); 512 } else { 513 if (remain < 0) { 514 DBF_EVENT(4, "put device without reference\n"); 515 PRINT_ERR("put device without reference\n"); 516 } else { 517 DBF_EVENT(4, "tape_free_device(%p)\n", device); 518 kfree(device->modeset_byte); 519 kfree(device); 520 } 521 } 522 523 return NULL; 524 } 525 526 /* 527 * Find tape device by a device index. 528 */ 529 struct tape_device * 530 tape_get_device(int devindex) 531 { 532 struct tape_device *device, *tmp; 533 534 device = ERR_PTR(-ENODEV); 535 read_lock(&tape_device_lock); 536 list_for_each_entry(tmp, &tape_device_list, node) { 537 if (tmp->first_minor / TAPE_MINORS_PER_DEV == devindex) { 538 device = tape_get_device_reference(tmp); 539 break; 540 } 541 } 542 read_unlock(&tape_device_lock); 543 return device; 544 } 545 546 /* 547 * Driverfs tape probe function. 548 */ 549 int 550 tape_generic_probe(struct ccw_device *cdev) 551 { 552 struct tape_device *device; 553 554 device = tape_alloc_device(); 555 if (IS_ERR(device)) 556 return -ENODEV; 557 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); 558 cdev->dev.driver_data = device; 559 device->cdev = cdev; 560 device->cdev_id = busid_to_int(cdev->dev.bus_id); 561 cdev->handler = __tape_do_irq; 562 563 ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 564 sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); 565 566 return 0; 567 } 568 569 static inline void 570 __tape_discard_requests(struct tape_device *device) 571 { 572 struct tape_request * request; 573 struct list_head * l, *n; 574 575 list_for_each_safe(l, n, &device->req_queue) { 576 request = list_entry(l, struct tape_request, list); 577 if (request->status == TAPE_REQUEST_IN_IO) 578 request->status = TAPE_REQUEST_DONE; 579 list_del(&request->list); 580 581 /* Decrease ref_count for removed request. */ 582 request->device = tape_put_device(device); 583 request->rc = -EIO; 584 if (request->callback != NULL) 585 request->callback(request, request->callback_data); 586 } 587 } 588 589 /* 590 * Driverfs tape remove function. 591 * 592 * This function is called whenever the common I/O layer detects the device 593 * gone. This can happen at any time and we cannot refuse. 594 */ 595 void 596 tape_generic_remove(struct ccw_device *cdev) 597 { 598 struct tape_device * device; 599 600 device = cdev->dev.driver_data; 601 if (!device) { 602 PRINT_ERR("No device pointer in tape_generic_remove!\n"); 603 return; 604 } 605 DBF_LH(3, "(%08x): tape_generic_remove(%p)\n", device->cdev_id, cdev); 606 607 spin_lock_irq(get_ccwdev_lock(device->cdev)); 608 switch (device->tape_state) { 609 case TS_INIT: 610 tape_state_set(device, TS_NOT_OPER); 611 case TS_NOT_OPER: 612 /* 613 * Nothing to do. 614 */ 615 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 616 break; 617 case TS_UNUSED: 618 /* 619 * Need only to release the device. 620 */ 621 tape_state_set(device, TS_NOT_OPER); 622 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 623 tape_cleanup_device(device); 624 break; 625 default: 626 /* 627 * There may be requests on the queue. We will not get 628 * an interrupt for a request that was running. So we 629 * just post them all as I/O errors. 630 */ 631 DBF_EVENT(3, "(%08x): Drive in use vanished!\n", 632 device->cdev_id); 633 PRINT_WARN("(%s): Drive in use vanished - " 634 "expect trouble!\n", 635 device->cdev->dev.bus_id); 636 PRINT_WARN("State was %i\n", device->tape_state); 637 tape_state_set(device, TS_NOT_OPER); 638 __tape_discard_requests(device); 639 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 640 tape_cleanup_device(device); 641 } 642 643 if (cdev->dev.driver_data != NULL) { 644 sysfs_remove_group(&cdev->dev.kobj, &tape_attr_group); 645 cdev->dev.driver_data = tape_put_device(cdev->dev.driver_data); 646 } 647 } 648 649 /* 650 * Allocate a new tape ccw request 651 */ 652 struct tape_request * 653 tape_alloc_request(int cplength, int datasize) 654 { 655 struct tape_request *request; 656 657 if (datasize > PAGE_SIZE || (cplength*sizeof(struct ccw1)) > PAGE_SIZE) 658 BUG(); 659 660 DBF_LH(6, "tape_alloc_request(%d, %d)\n", cplength, datasize); 661 662 request = (struct tape_request *) kmalloc(sizeof(struct tape_request), 663 GFP_KERNEL); 664 if (request == NULL) { 665 DBF_EXCEPTION(1, "cqra nomem\n"); 666 return ERR_PTR(-ENOMEM); 667 } 668 memset(request, 0, sizeof(struct tape_request)); 669 /* allocate channel program */ 670 if (cplength > 0) { 671 request->cpaddr = kmalloc(cplength*sizeof(struct ccw1), 672 GFP_ATOMIC | GFP_DMA); 673 if (request->cpaddr == NULL) { 674 DBF_EXCEPTION(1, "cqra nomem\n"); 675 kfree(request); 676 return ERR_PTR(-ENOMEM); 677 } 678 memset(request->cpaddr, 0, cplength*sizeof(struct ccw1)); 679 } 680 /* alloc small kernel buffer */ 681 if (datasize > 0) { 682 request->cpdata = kmalloc(datasize, GFP_KERNEL | GFP_DMA); 683 if (request->cpdata == NULL) { 684 DBF_EXCEPTION(1, "cqra nomem\n"); 685 kfree(request->cpaddr); 686 kfree(request); 687 return ERR_PTR(-ENOMEM); 688 } 689 memset(request->cpdata, 0, datasize); 690 } 691 DBF_LH(6, "New request %p(%p/%p)\n", request, request->cpaddr, 692 request->cpdata); 693 694 return request; 695 } 696 697 /* 698 * Free tape ccw request 699 */ 700 void 701 tape_free_request (struct tape_request * request) 702 { 703 DBF_LH(6, "Free request %p\n", request); 704 705 if (request->device != NULL) { 706 request->device = tape_put_device(request->device); 707 } 708 kfree(request->cpdata); 709 kfree(request->cpaddr); 710 kfree(request); 711 } 712 713 static inline int 714 __tape_start_io(struct tape_device *device, struct tape_request *request) 715 { 716 int rc; 717 718 #ifdef CONFIG_S390_TAPE_BLOCK 719 if (request->op == TO_BLOCK) 720 device->discipline->check_locate(device, request); 721 #endif 722 rc = ccw_device_start( 723 device->cdev, 724 request->cpaddr, 725 (unsigned long) request, 726 0x00, 727 request->options 728 ); 729 if (rc == 0) { 730 request->status = TAPE_REQUEST_IN_IO; 731 } else if (rc == -EBUSY) { 732 /* The common I/O subsystem is currently busy. Retry later. */ 733 request->status = TAPE_REQUEST_QUEUED; 734 schedule_work(&device->tape_dnr); 735 rc = 0; 736 } else { 737 /* Start failed. Remove request and indicate failure. */ 738 DBF_EVENT(1, "tape: start request failed with RC = %i\n", rc); 739 } 740 return rc; 741 } 742 743 static inline void 744 __tape_start_next_request(struct tape_device *device) 745 { 746 struct list_head *l, *n; 747 struct tape_request *request; 748 int rc; 749 750 DBF_LH(6, "__tape_start_next_request(%p)\n", device); 751 /* 752 * Try to start each request on request queue until one is 753 * started successful. 754 */ 755 list_for_each_safe(l, n, &device->req_queue) { 756 request = list_entry(l, struct tape_request, list); 757 758 /* 759 * Avoid race condition if bottom-half was triggered more than 760 * once. 761 */ 762 if (request->status == TAPE_REQUEST_IN_IO) 763 return; 764 765 /* 766 * We wanted to cancel the request but the common I/O layer 767 * was busy at that time. This can only happen if this 768 * function is called by delayed_next_request. 769 * Otherwise we start the next request on the queue. 770 */ 771 if (request->status == TAPE_REQUEST_CANCEL) { 772 rc = __tape_cancel_io(device, request); 773 } else { 774 rc = __tape_start_io(device, request); 775 } 776 if (rc == 0) 777 return; 778 779 /* Set ending status. */ 780 request->rc = rc; 781 request->status = TAPE_REQUEST_DONE; 782 783 /* Remove from request queue. */ 784 list_del(&request->list); 785 786 /* Do callback. */ 787 if (request->callback != NULL) 788 request->callback(request, request->callback_data); 789 } 790 } 791 792 static void 793 tape_delayed_next_request(void *data) 794 { 795 struct tape_device * device; 796 797 device = (struct tape_device *) data; 798 DBF_LH(6, "tape_delayed_next_request(%p)\n", device); 799 spin_lock_irq(get_ccwdev_lock(device->cdev)); 800 __tape_start_next_request(device); 801 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 802 } 803 804 static inline void 805 __tape_end_request( 806 struct tape_device * device, 807 struct tape_request * request, 808 int rc) 809 { 810 DBF_LH(6, "__tape_end_request(%p, %p, %i)\n", device, request, rc); 811 if (request) { 812 request->rc = rc; 813 request->status = TAPE_REQUEST_DONE; 814 815 /* Remove from request queue. */ 816 list_del(&request->list); 817 818 /* Do callback. */ 819 if (request->callback != NULL) 820 request->callback(request, request->callback_data); 821 } 822 823 /* Start next request. */ 824 if (!list_empty(&device->req_queue)) 825 __tape_start_next_request(device); 826 } 827 828 /* 829 * Write sense data to console/dbf 830 */ 831 void 832 tape_dump_sense(struct tape_device* device, struct tape_request *request, 833 struct irb *irb) 834 { 835 unsigned int *sptr; 836 837 PRINT_INFO("-------------------------------------------------\n"); 838 PRINT_INFO("DSTAT : %02x CSTAT: %02x CPA: %04x\n", 839 irb->scsw.dstat, irb->scsw.cstat, irb->scsw.cpa); 840 PRINT_INFO("DEVICE: %s\n", device->cdev->dev.bus_id); 841 if (request != NULL) 842 PRINT_INFO("OP : %s\n", tape_op_verbose[request->op]); 843 844 sptr = (unsigned int *) irb->ecw; 845 PRINT_INFO("Sense data: %08X %08X %08X %08X \n", 846 sptr[0], sptr[1], sptr[2], sptr[3]); 847 PRINT_INFO("Sense data: %08X %08X %08X %08X \n", 848 sptr[4], sptr[5], sptr[6], sptr[7]); 849 PRINT_INFO("--------------------------------------------------\n"); 850 } 851 852 /* 853 * Write sense data to dbf 854 */ 855 void 856 tape_dump_sense_dbf(struct tape_device *device, struct tape_request *request, 857 struct irb *irb) 858 { 859 unsigned int *sptr; 860 const char* op; 861 862 if (request != NULL) 863 op = tape_op_verbose[request->op]; 864 else 865 op = "---"; 866 DBF_EVENT(3, "DSTAT : %02x CSTAT: %02x\n", 867 irb->scsw.dstat,irb->scsw.cstat); 868 DBF_EVENT(3, "DEVICE: %08x OP\t: %s\n", device->cdev_id, op); 869 sptr = (unsigned int *) irb->ecw; 870 DBF_EVENT(3, "%08x %08x\n", sptr[0], sptr[1]); 871 DBF_EVENT(3, "%08x %08x\n", sptr[2], sptr[3]); 872 DBF_EVENT(3, "%08x %08x\n", sptr[4], sptr[5]); 873 DBF_EVENT(3, "%08x %08x\n", sptr[6], sptr[7]); 874 } 875 876 /* 877 * I/O helper function. Adds the request to the request queue 878 * and starts it if the tape is idle. Has to be called with 879 * the device lock held. 880 */ 881 static inline int 882 __tape_start_request(struct tape_device *device, struct tape_request *request) 883 { 884 int rc; 885 886 switch (request->op) { 887 case TO_MSEN: 888 case TO_ASSIGN: 889 case TO_UNASSIGN: 890 case TO_READ_ATTMSG: 891 if (device->tape_state == TS_INIT) 892 break; 893 if (device->tape_state == TS_UNUSED) 894 break; 895 default: 896 if (device->tape_state == TS_BLKUSE) 897 break; 898 if (device->tape_state != TS_IN_USE) 899 return -ENODEV; 900 } 901 902 /* Increase use count of device for the added request. */ 903 request->device = tape_get_device_reference(device); 904 905 if (list_empty(&device->req_queue)) { 906 /* No other requests are on the queue. Start this one. */ 907 rc = __tape_start_io(device, request); 908 if (rc) 909 return rc; 910 911 DBF_LH(5, "Request %p added for execution.\n", request); 912 list_add(&request->list, &device->req_queue); 913 } else { 914 DBF_LH(5, "Request %p add to queue.\n", request); 915 request->status = TAPE_REQUEST_QUEUED; 916 list_add_tail(&request->list, &device->req_queue); 917 } 918 return 0; 919 } 920 921 /* 922 * Add the request to the request queue, try to start it if the 923 * tape is idle. Return without waiting for end of i/o. 924 */ 925 int 926 tape_do_io_async(struct tape_device *device, struct tape_request *request) 927 { 928 int rc; 929 930 DBF_LH(6, "tape_do_io_async(%p, %p)\n", device, request); 931 932 spin_lock_irq(get_ccwdev_lock(device->cdev)); 933 /* Add request to request queue and try to start it. */ 934 rc = __tape_start_request(device, request); 935 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 936 return rc; 937 } 938 939 /* 940 * tape_do_io/__tape_wake_up 941 * Add the request to the request queue, try to start it if the 942 * tape is idle and wait uninterruptible for its completion. 943 */ 944 static void 945 __tape_wake_up(struct tape_request *request, void *data) 946 { 947 request->callback = NULL; 948 wake_up((wait_queue_head_t *) data); 949 } 950 951 int 952 tape_do_io(struct tape_device *device, struct tape_request *request) 953 { 954 wait_queue_head_t wq; 955 int rc; 956 957 init_waitqueue_head(&wq); 958 spin_lock_irq(get_ccwdev_lock(device->cdev)); 959 /* Setup callback */ 960 request->callback = __tape_wake_up; 961 request->callback_data = &wq; 962 /* Add request to request queue and try to start it. */ 963 rc = __tape_start_request(device, request); 964 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 965 if (rc) 966 return rc; 967 /* Request added to the queue. Wait for its completion. */ 968 wait_event(wq, (request->callback == NULL)); 969 /* Get rc from request */ 970 return request->rc; 971 } 972 973 /* 974 * tape_do_io_interruptible/__tape_wake_up_interruptible 975 * Add the request to the request queue, try to start it if the 976 * tape is idle and wait uninterruptible for its completion. 977 */ 978 static void 979 __tape_wake_up_interruptible(struct tape_request *request, void *data) 980 { 981 request->callback = NULL; 982 wake_up_interruptible((wait_queue_head_t *) data); 983 } 984 985 int 986 tape_do_io_interruptible(struct tape_device *device, 987 struct tape_request *request) 988 { 989 wait_queue_head_t wq; 990 int rc; 991 992 init_waitqueue_head(&wq); 993 spin_lock_irq(get_ccwdev_lock(device->cdev)); 994 /* Setup callback */ 995 request->callback = __tape_wake_up_interruptible; 996 request->callback_data = &wq; 997 rc = __tape_start_request(device, request); 998 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 999 if (rc) 1000 return rc; 1001 /* Request added to the queue. Wait for its completion. */ 1002 rc = wait_event_interruptible(wq, (request->callback == NULL)); 1003 if (rc != -ERESTARTSYS) 1004 /* Request finished normally. */ 1005 return request->rc; 1006 1007 /* Interrupted by a signal. We have to stop the current request. */ 1008 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1009 rc = __tape_cancel_io(device, request); 1010 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1011 if (rc == 0) { 1012 /* Wait for the interrupt that acknowledges the halt. */ 1013 do { 1014 rc = wait_event_interruptible( 1015 wq, 1016 (request->callback == NULL) 1017 ); 1018 } while (rc != -ERESTARTSYS); 1019 1020 DBF_EVENT(3, "IO stopped on %08x\n", device->cdev_id); 1021 rc = -ERESTARTSYS; 1022 } 1023 return rc; 1024 } 1025 1026 /* 1027 * Tape interrupt routine, called from the ccw_device layer 1028 */ 1029 static void 1030 __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb) 1031 { 1032 struct tape_device *device; 1033 struct tape_request *request; 1034 int rc; 1035 1036 device = (struct tape_device *) cdev->dev.driver_data; 1037 if (device == NULL) { 1038 PRINT_ERR("could not get device structure for %s " 1039 "in interrupt\n", cdev->dev.bus_id); 1040 return; 1041 } 1042 request = (struct tape_request *) intparm; 1043 1044 DBF_LH(6, "__tape_do_irq(device=%p, request=%p)\n", device, request); 1045 1046 /* On special conditions irb is an error pointer */ 1047 if (IS_ERR(irb)) { 1048 /* FIXME: What to do with the request? */ 1049 switch (PTR_ERR(irb)) { 1050 case -ETIMEDOUT: 1051 PRINT_WARN("(%s): Request timed out\n", 1052 cdev->dev.bus_id); 1053 case -EIO: 1054 __tape_end_request(device, request, -EIO); 1055 break; 1056 default: 1057 PRINT_ERR("(%s): Unexpected i/o error %li\n", 1058 cdev->dev.bus_id, 1059 PTR_ERR(irb)); 1060 } 1061 return; 1062 } 1063 1064 /* 1065 * If the condition code is not zero and the start function bit is 1066 * still set, this is an deferred error and the last start I/O did 1067 * not succeed. Restart the request now. 1068 */ 1069 if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) { 1070 PRINT_WARN("(%s): deferred cc=%i. restaring\n", 1071 cdev->dev.bus_id, 1072 irb->scsw.cc); 1073 rc = __tape_start_io(device, request); 1074 if (rc) 1075 __tape_end_request(device, request, rc); 1076 return; 1077 } 1078 1079 /* May be an unsolicited irq */ 1080 if(request != NULL) 1081 request->rescnt = irb->scsw.count; 1082 1083 if (irb->scsw.dstat != 0x0c) { 1084 /* Set the 'ONLINE' flag depending on sense byte 1 */ 1085 if(*(((__u8 *) irb->ecw) + 1) & SENSE_DRIVE_ONLINE) 1086 device->tape_generic_status |= GMT_ONLINE(~0); 1087 else 1088 device->tape_generic_status &= ~GMT_ONLINE(~0); 1089 1090 /* 1091 * Any request that does not come back with channel end 1092 * and device end is unusual. Log the sense data. 1093 */ 1094 DBF_EVENT(3,"-- Tape Interrupthandler --\n"); 1095 tape_dump_sense_dbf(device, request, irb); 1096 } else { 1097 /* Upon normal completion the device _is_ online */ 1098 device->tape_generic_status |= GMT_ONLINE(~0); 1099 } 1100 if (device->tape_state == TS_NOT_OPER) { 1101 DBF_EVENT(6, "tape:device is not operational\n"); 1102 return; 1103 } 1104 1105 /* 1106 * Request that were canceled still come back with an interrupt. 1107 * To detect these request the state will be set to TAPE_REQUEST_DONE. 1108 */ 1109 if(request != NULL && request->status == TAPE_REQUEST_DONE) { 1110 __tape_end_request(device, request, -EIO); 1111 return; 1112 } 1113 1114 rc = device->discipline->irq(device, request, irb); 1115 /* 1116 * rc < 0 : request finished unsuccessfully. 1117 * rc == TAPE_IO_SUCCESS: request finished successfully. 1118 * rc == TAPE_IO_PENDING: request is still running. Ignore rc. 1119 * rc == TAPE_IO_RETRY: request finished but needs another go. 1120 * rc == TAPE_IO_STOP: request needs to get terminated. 1121 */ 1122 switch (rc) { 1123 case TAPE_IO_SUCCESS: 1124 /* Upon normal completion the device _is_ online */ 1125 device->tape_generic_status |= GMT_ONLINE(~0); 1126 __tape_end_request(device, request, rc); 1127 break; 1128 case TAPE_IO_PENDING: 1129 break; 1130 case TAPE_IO_RETRY: 1131 rc = __tape_start_io(device, request); 1132 if (rc) 1133 __tape_end_request(device, request, rc); 1134 break; 1135 case TAPE_IO_STOP: 1136 rc = __tape_cancel_io(device, request); 1137 if (rc) 1138 __tape_end_request(device, request, rc); 1139 break; 1140 default: 1141 if (rc > 0) { 1142 DBF_EVENT(6, "xunknownrc\n"); 1143 PRINT_ERR("Invalid return code from discipline " 1144 "interrupt function.\n"); 1145 __tape_end_request(device, request, -EIO); 1146 } else { 1147 __tape_end_request(device, request, rc); 1148 } 1149 break; 1150 } 1151 } 1152 1153 /* 1154 * Tape device open function used by tape_char & tape_block frontends. 1155 */ 1156 int 1157 tape_open(struct tape_device *device) 1158 { 1159 int rc; 1160 1161 spin_lock(get_ccwdev_lock(device->cdev)); 1162 if (device->tape_state == TS_NOT_OPER) { 1163 DBF_EVENT(6, "TAPE:nodev\n"); 1164 rc = -ENODEV; 1165 } else if (device->tape_state == TS_IN_USE) { 1166 DBF_EVENT(6, "TAPE:dbusy\n"); 1167 rc = -EBUSY; 1168 } else if (device->tape_state == TS_BLKUSE) { 1169 DBF_EVENT(6, "TAPE:dbusy\n"); 1170 rc = -EBUSY; 1171 } else if (device->discipline != NULL && 1172 !try_module_get(device->discipline->owner)) { 1173 DBF_EVENT(6, "TAPE:nodisc\n"); 1174 rc = -ENODEV; 1175 } else { 1176 tape_state_set(device, TS_IN_USE); 1177 rc = 0; 1178 } 1179 spin_unlock(get_ccwdev_lock(device->cdev)); 1180 return rc; 1181 } 1182 1183 /* 1184 * Tape device release function used by tape_char & tape_block frontends. 1185 */ 1186 int 1187 tape_release(struct tape_device *device) 1188 { 1189 spin_lock(get_ccwdev_lock(device->cdev)); 1190 if (device->tape_state == TS_IN_USE) 1191 tape_state_set(device, TS_UNUSED); 1192 module_put(device->discipline->owner); 1193 spin_unlock(get_ccwdev_lock(device->cdev)); 1194 return 0; 1195 } 1196 1197 /* 1198 * Execute a magnetic tape command a number of times. 1199 */ 1200 int 1201 tape_mtop(struct tape_device *device, int mt_op, int mt_count) 1202 { 1203 tape_mtop_fn fn; 1204 int rc; 1205 1206 DBF_EVENT(6, "TAPE:mtio\n"); 1207 DBF_EVENT(6, "TAPE:ioop: %x\n", mt_op); 1208 DBF_EVENT(6, "TAPE:arg: %x\n", mt_count); 1209 1210 if (mt_op < 0 || mt_op >= TAPE_NR_MTOPS) 1211 return -EINVAL; 1212 fn = device->discipline->mtop_array[mt_op]; 1213 if (fn == NULL) 1214 return -EINVAL; 1215 1216 /* We assume that the backends can handle count up to 500. */ 1217 if (mt_op == MTBSR || mt_op == MTFSR || mt_op == MTFSF || 1218 mt_op == MTBSF || mt_op == MTFSFM || mt_op == MTBSFM) { 1219 rc = 0; 1220 for (; mt_count > 500; mt_count -= 500) 1221 if ((rc = fn(device, 500)) != 0) 1222 break; 1223 if (rc == 0) 1224 rc = fn(device, mt_count); 1225 } else 1226 rc = fn(device, mt_count); 1227 return rc; 1228 1229 } 1230 1231 /* 1232 * Tape init function. 1233 */ 1234 static int 1235 tape_init (void) 1236 { 1237 TAPE_DBF_AREA = debug_register ( "tape", 2, 2, 4*sizeof(long)); 1238 debug_register_view(TAPE_DBF_AREA, &debug_sprintf_view); 1239 #ifdef DBF_LIKE_HELL 1240 debug_set_level(TAPE_DBF_AREA, 6); 1241 #endif 1242 DBF_EVENT(3, "tape init: ($Revision: 1.54 $)\n"); 1243 tape_proc_init(); 1244 tapechar_init (); 1245 tapeblock_init (); 1246 return 0; 1247 } 1248 1249 /* 1250 * Tape exit function. 1251 */ 1252 static void 1253 tape_exit(void) 1254 { 1255 DBF_EVENT(6, "tape exit\n"); 1256 1257 /* Get rid of the frontends */ 1258 tapechar_exit(); 1259 tapeblock_exit(); 1260 tape_proc_cleanup(); 1261 debug_unregister (TAPE_DBF_AREA); 1262 } 1263 1264 MODULE_AUTHOR("(C) 2001 IBM Deutschland Entwicklung GmbH by Carsten Otte and " 1265 "Michael Holzheu (cotte@de.ibm.com,holzheu@de.ibm.com)"); 1266 MODULE_DESCRIPTION("Linux on zSeries channel attached " 1267 "tape device driver ($Revision: 1.54 $)"); 1268 MODULE_LICENSE("GPL"); 1269 1270 module_init(tape_init); 1271 module_exit(tape_exit); 1272 1273 EXPORT_SYMBOL(tape_generic_remove); 1274 EXPORT_SYMBOL(tape_generic_probe); 1275 EXPORT_SYMBOL(tape_generic_online); 1276 EXPORT_SYMBOL(tape_generic_offline); 1277 EXPORT_SYMBOL(tape_put_device); 1278 EXPORT_SYMBOL(tape_get_device_reference); 1279 EXPORT_SYMBOL(tape_state_verbose); 1280 EXPORT_SYMBOL(tape_op_verbose); 1281 EXPORT_SYMBOL(tape_state_set); 1282 EXPORT_SYMBOL(tape_med_state_set); 1283 EXPORT_SYMBOL(tape_alloc_request); 1284 EXPORT_SYMBOL(tape_free_request); 1285 EXPORT_SYMBOL(tape_dump_sense); 1286 EXPORT_SYMBOL(tape_dump_sense_dbf); 1287 EXPORT_SYMBOL(tape_do_io); 1288 EXPORT_SYMBOL(tape_do_io_async); 1289 EXPORT_SYMBOL(tape_do_io_interruptible); 1290 EXPORT_SYMBOL(tape_mtop); 1291