1 /* 2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Carsten Otte <Cotte@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Copyright IBM Corp. 1999, 2009 8 */ 9 10 #define KMSG_COMPONENT "dasd" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/kmod.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/ctype.h> 17 #include <linux/major.h> 18 #include <linux/slab.h> 19 #include <linux/hdreg.h> 20 #include <linux/async.h> 21 #include <linux/mutex.h> 22 #include <linux/debugfs.h> 23 #include <linux/seq_file.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> 28 #include <asm/idals.h> 29 #include <asm/itcw.h> 30 #include <asm/diag.h> 31 32 /* This is ugly... */ 33 #define PRINTK_HEADER "dasd:" 34 35 #include "dasd_int.h" 36 /* 37 * SECTION: Constant definitions to be used within this file 38 */ 39 #define DASD_CHANQ_MAX_SIZE 4 40 41 #define DASD_DIAG_MOD "dasd_diag_mod" 42 43 /* 44 * SECTION: exported variables of dasd.c 45 */ 46 debug_info_t *dasd_debug_area; 47 EXPORT_SYMBOL(dasd_debug_area); 48 static struct dentry *dasd_debugfs_root_entry; 49 struct dasd_discipline *dasd_diag_discipline_pointer; 50 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 51 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 52 53 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 54 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 55 " Copyright IBM Corp. 2000"); 56 MODULE_SUPPORTED_DEVICE("dasd"); 57 MODULE_LICENSE("GPL"); 58 59 /* 60 * SECTION: prototypes for static functions of dasd.c 61 */ 62 static int dasd_alloc_queue(struct dasd_block *); 63 static void dasd_setup_queue(struct dasd_block *); 64 static void dasd_free_queue(struct dasd_block *); 65 static void dasd_flush_request_queue(struct dasd_block *); 66 static int dasd_flush_block_queue(struct dasd_block *); 67 static void dasd_device_tasklet(struct dasd_device *); 68 static void dasd_block_tasklet(struct dasd_block *); 69 static void do_kick_device(struct work_struct *); 70 static void do_restore_device(struct work_struct *); 71 static void do_reload_device(struct work_struct *); 72 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 73 static void dasd_device_timeout(unsigned long); 74 static void dasd_block_timeout(unsigned long); 75 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 76 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 77 static void dasd_profile_exit(struct dasd_profile *); 78 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 79 static void dasd_hosts_exit(struct dasd_device *); 80 81 /* 82 * SECTION: Operations on the device structure. 83 */ 84 static wait_queue_head_t dasd_init_waitq; 85 static wait_queue_head_t dasd_flush_wq; 86 static wait_queue_head_t generic_waitq; 87 static wait_queue_head_t shutdown_waitq; 88 89 /* 90 * Allocate memory for a new device structure. 91 */ 92 struct dasd_device *dasd_alloc_device(void) 93 { 94 struct dasd_device *device; 95 96 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 97 if (!device) 98 return ERR_PTR(-ENOMEM); 99 100 /* Get two pages for normal block device operations. */ 101 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 102 if (!device->ccw_mem) { 103 kfree(device); 104 return ERR_PTR(-ENOMEM); 105 } 106 /* Get one page for error recovery. */ 107 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 108 if (!device->erp_mem) { 109 free_pages((unsigned long) device->ccw_mem, 1); 110 kfree(device); 111 return ERR_PTR(-ENOMEM); 112 } 113 114 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 115 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 116 spin_lock_init(&device->mem_lock); 117 atomic_set(&device->tasklet_scheduled, 0); 118 tasklet_init(&device->tasklet, 119 (void (*)(unsigned long)) dasd_device_tasklet, 120 (unsigned long) device); 121 INIT_LIST_HEAD(&device->ccw_queue); 122 init_timer(&device->timer); 123 device->timer.function = dasd_device_timeout; 124 device->timer.data = (unsigned long) device; 125 INIT_WORK(&device->kick_work, do_kick_device); 126 INIT_WORK(&device->restore_device, do_restore_device); 127 INIT_WORK(&device->reload_device, do_reload_device); 128 device->state = DASD_STATE_NEW; 129 device->target = DASD_STATE_NEW; 130 mutex_init(&device->state_mutex); 131 spin_lock_init(&device->profile.lock); 132 return device; 133 } 134 135 /* 136 * Free memory of a device structure. 137 */ 138 void dasd_free_device(struct dasd_device *device) 139 { 140 kfree(device->private); 141 free_page((unsigned long) device->erp_mem); 142 free_pages((unsigned long) device->ccw_mem, 1); 143 kfree(device); 144 } 145 146 /* 147 * Allocate memory for a new device structure. 148 */ 149 struct dasd_block *dasd_alloc_block(void) 150 { 151 struct dasd_block *block; 152 153 block = kzalloc(sizeof(*block), GFP_ATOMIC); 154 if (!block) 155 return ERR_PTR(-ENOMEM); 156 /* open_count = 0 means device online but not in use */ 157 atomic_set(&block->open_count, -1); 158 159 spin_lock_init(&block->request_queue_lock); 160 atomic_set(&block->tasklet_scheduled, 0); 161 tasklet_init(&block->tasklet, 162 (void (*)(unsigned long)) dasd_block_tasklet, 163 (unsigned long) block); 164 INIT_LIST_HEAD(&block->ccw_queue); 165 spin_lock_init(&block->queue_lock); 166 init_timer(&block->timer); 167 block->timer.function = dasd_block_timeout; 168 block->timer.data = (unsigned long) block; 169 spin_lock_init(&block->profile.lock); 170 171 return block; 172 } 173 EXPORT_SYMBOL_GPL(dasd_alloc_block); 174 175 /* 176 * Free memory of a device structure. 177 */ 178 void dasd_free_block(struct dasd_block *block) 179 { 180 kfree(block); 181 } 182 EXPORT_SYMBOL_GPL(dasd_free_block); 183 184 /* 185 * Make a new device known to the system. 186 */ 187 static int dasd_state_new_to_known(struct dasd_device *device) 188 { 189 int rc; 190 191 /* 192 * As long as the device is not in state DASD_STATE_NEW we want to 193 * keep the reference count > 0. 194 */ 195 dasd_get_device(device); 196 197 if (device->block) { 198 rc = dasd_alloc_queue(device->block); 199 if (rc) { 200 dasd_put_device(device); 201 return rc; 202 } 203 } 204 device->state = DASD_STATE_KNOWN; 205 return 0; 206 } 207 208 /* 209 * Let the system forget about a device. 210 */ 211 static int dasd_state_known_to_new(struct dasd_device *device) 212 { 213 /* Disable extended error reporting for this device. */ 214 dasd_eer_disable(device); 215 /* Forget the discipline information. */ 216 if (device->discipline) { 217 if (device->discipline->uncheck_device) 218 device->discipline->uncheck_device(device); 219 module_put(device->discipline->owner); 220 } 221 device->discipline = NULL; 222 if (device->base_discipline) 223 module_put(device->base_discipline->owner); 224 device->base_discipline = NULL; 225 device->state = DASD_STATE_NEW; 226 227 if (device->block) 228 dasd_free_queue(device->block); 229 230 /* Give up reference we took in dasd_state_new_to_known. */ 231 dasd_put_device(device); 232 return 0; 233 } 234 235 static struct dentry *dasd_debugfs_setup(const char *name, 236 struct dentry *base_dentry) 237 { 238 struct dentry *pde; 239 240 if (!base_dentry) 241 return NULL; 242 pde = debugfs_create_dir(name, base_dentry); 243 if (!pde || IS_ERR(pde)) 244 return NULL; 245 return pde; 246 } 247 248 /* 249 * Request the irq line for the device. 250 */ 251 static int dasd_state_known_to_basic(struct dasd_device *device) 252 { 253 struct dasd_block *block = device->block; 254 int rc = 0; 255 256 /* Allocate and register gendisk structure. */ 257 if (block) { 258 rc = dasd_gendisk_alloc(block); 259 if (rc) 260 return rc; 261 block->debugfs_dentry = 262 dasd_debugfs_setup(block->gdp->disk_name, 263 dasd_debugfs_root_entry); 264 dasd_profile_init(&block->profile, block->debugfs_dentry); 265 if (dasd_global_profile_level == DASD_PROFILE_ON) 266 dasd_profile_on(&device->block->profile); 267 } 268 device->debugfs_dentry = 269 dasd_debugfs_setup(dev_name(&device->cdev->dev), 270 dasd_debugfs_root_entry); 271 dasd_profile_init(&device->profile, device->debugfs_dentry); 272 dasd_hosts_init(device->debugfs_dentry, device); 273 274 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 275 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 276 8 * sizeof(long)); 277 debug_register_view(device->debug_area, &debug_sprintf_view); 278 debug_set_level(device->debug_area, DBF_WARNING); 279 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 280 281 device->state = DASD_STATE_BASIC; 282 283 return rc; 284 } 285 286 /* 287 * Release the irq line for the device. Terminate any running i/o. 288 */ 289 static int dasd_state_basic_to_known(struct dasd_device *device) 290 { 291 int rc; 292 293 if (device->discipline->basic_to_known) { 294 rc = device->discipline->basic_to_known(device); 295 if (rc) 296 return rc; 297 } 298 299 if (device->block) { 300 dasd_profile_exit(&device->block->profile); 301 debugfs_remove(device->block->debugfs_dentry); 302 dasd_gendisk_free(device->block); 303 dasd_block_clear_timer(device->block); 304 } 305 rc = dasd_flush_device_queue(device); 306 if (rc) 307 return rc; 308 dasd_device_clear_timer(device); 309 dasd_profile_exit(&device->profile); 310 dasd_hosts_exit(device); 311 debugfs_remove(device->debugfs_dentry); 312 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 313 if (device->debug_area != NULL) { 314 debug_unregister(device->debug_area); 315 device->debug_area = NULL; 316 } 317 device->state = DASD_STATE_KNOWN; 318 return 0; 319 } 320 321 /* 322 * Do the initial analysis. The do_analysis function may return 323 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 324 * until the discipline decides to continue the startup sequence 325 * by calling the function dasd_change_state. The eckd disciplines 326 * uses this to start a ccw that detects the format. The completion 327 * interrupt for this detection ccw uses the kernel event daemon to 328 * trigger the call to dasd_change_state. All this is done in the 329 * discipline code, see dasd_eckd.c. 330 * After the analysis ccw is done (do_analysis returned 0) the block 331 * device is setup. 332 * In case the analysis returns an error, the device setup is stopped 333 * (a fake disk was already added to allow formatting). 334 */ 335 static int dasd_state_basic_to_ready(struct dasd_device *device) 336 { 337 int rc; 338 struct dasd_block *block; 339 340 rc = 0; 341 block = device->block; 342 /* make disk known with correct capacity */ 343 if (block) { 344 if (block->base->discipline->do_analysis != NULL) 345 rc = block->base->discipline->do_analysis(block); 346 if (rc) { 347 if (rc != -EAGAIN) { 348 device->state = DASD_STATE_UNFMT; 349 goto out; 350 } 351 return rc; 352 } 353 dasd_setup_queue(block); 354 set_capacity(block->gdp, 355 block->blocks << block->s2b_shift); 356 device->state = DASD_STATE_READY; 357 rc = dasd_scan_partitions(block); 358 if (rc) { 359 device->state = DASD_STATE_BASIC; 360 return rc; 361 } 362 } else { 363 device->state = DASD_STATE_READY; 364 } 365 out: 366 if (device->discipline->basic_to_ready) 367 rc = device->discipline->basic_to_ready(device); 368 return rc; 369 } 370 371 static inline 372 int _wait_for_empty_queues(struct dasd_device *device) 373 { 374 if (device->block) 375 return list_empty(&device->ccw_queue) && 376 list_empty(&device->block->ccw_queue); 377 else 378 return list_empty(&device->ccw_queue); 379 } 380 381 /* 382 * Remove device from block device layer. Destroy dirty buffers. 383 * Forget format information. Check if the target level is basic 384 * and if it is create fake disk for formatting. 385 */ 386 static int dasd_state_ready_to_basic(struct dasd_device *device) 387 { 388 int rc; 389 390 device->state = DASD_STATE_BASIC; 391 if (device->block) { 392 struct dasd_block *block = device->block; 393 rc = dasd_flush_block_queue(block); 394 if (rc) { 395 device->state = DASD_STATE_READY; 396 return rc; 397 } 398 dasd_flush_request_queue(block); 399 dasd_destroy_partitions(block); 400 block->blocks = 0; 401 block->bp_block = 0; 402 block->s2b_shift = 0; 403 } 404 return 0; 405 } 406 407 /* 408 * Back to basic. 409 */ 410 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 411 { 412 device->state = DASD_STATE_BASIC; 413 return 0; 414 } 415 416 /* 417 * Make the device online and schedule the bottom half to start 418 * the requeueing of requests from the linux request queue to the 419 * ccw queue. 420 */ 421 static int 422 dasd_state_ready_to_online(struct dasd_device * device) 423 { 424 struct gendisk *disk; 425 struct disk_part_iter piter; 426 struct hd_struct *part; 427 428 device->state = DASD_STATE_ONLINE; 429 if (device->block) { 430 dasd_schedule_block_bh(device->block); 431 if ((device->features & DASD_FEATURE_USERAW)) { 432 disk = device->block->gdp; 433 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 434 return 0; 435 } 436 disk = device->block->bdev->bd_disk; 437 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 438 while ((part = disk_part_iter_next(&piter))) 439 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 440 disk_part_iter_exit(&piter); 441 } 442 return 0; 443 } 444 445 /* 446 * Stop the requeueing of requests again. 447 */ 448 static int dasd_state_online_to_ready(struct dasd_device *device) 449 { 450 int rc; 451 struct gendisk *disk; 452 struct disk_part_iter piter; 453 struct hd_struct *part; 454 455 if (device->discipline->online_to_ready) { 456 rc = device->discipline->online_to_ready(device); 457 if (rc) 458 return rc; 459 } 460 461 device->state = DASD_STATE_READY; 462 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 463 disk = device->block->bdev->bd_disk; 464 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 465 while ((part = disk_part_iter_next(&piter))) 466 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 467 disk_part_iter_exit(&piter); 468 } 469 return 0; 470 } 471 472 /* 473 * Device startup state changes. 474 */ 475 static int dasd_increase_state(struct dasd_device *device) 476 { 477 int rc; 478 479 rc = 0; 480 if (device->state == DASD_STATE_NEW && 481 device->target >= DASD_STATE_KNOWN) 482 rc = dasd_state_new_to_known(device); 483 484 if (!rc && 485 device->state == DASD_STATE_KNOWN && 486 device->target >= DASD_STATE_BASIC) 487 rc = dasd_state_known_to_basic(device); 488 489 if (!rc && 490 device->state == DASD_STATE_BASIC && 491 device->target >= DASD_STATE_READY) 492 rc = dasd_state_basic_to_ready(device); 493 494 if (!rc && 495 device->state == DASD_STATE_UNFMT && 496 device->target > DASD_STATE_UNFMT) 497 rc = -EPERM; 498 499 if (!rc && 500 device->state == DASD_STATE_READY && 501 device->target >= DASD_STATE_ONLINE) 502 rc = dasd_state_ready_to_online(device); 503 504 return rc; 505 } 506 507 /* 508 * Device shutdown state changes. 509 */ 510 static int dasd_decrease_state(struct dasd_device *device) 511 { 512 int rc; 513 514 rc = 0; 515 if (device->state == DASD_STATE_ONLINE && 516 device->target <= DASD_STATE_READY) 517 rc = dasd_state_online_to_ready(device); 518 519 if (!rc && 520 device->state == DASD_STATE_READY && 521 device->target <= DASD_STATE_BASIC) 522 rc = dasd_state_ready_to_basic(device); 523 524 if (!rc && 525 device->state == DASD_STATE_UNFMT && 526 device->target <= DASD_STATE_BASIC) 527 rc = dasd_state_unfmt_to_basic(device); 528 529 if (!rc && 530 device->state == DASD_STATE_BASIC && 531 device->target <= DASD_STATE_KNOWN) 532 rc = dasd_state_basic_to_known(device); 533 534 if (!rc && 535 device->state == DASD_STATE_KNOWN && 536 device->target <= DASD_STATE_NEW) 537 rc = dasd_state_known_to_new(device); 538 539 return rc; 540 } 541 542 /* 543 * This is the main startup/shutdown routine. 544 */ 545 static void dasd_change_state(struct dasd_device *device) 546 { 547 int rc; 548 549 if (device->state == device->target) 550 /* Already where we want to go today... */ 551 return; 552 if (device->state < device->target) 553 rc = dasd_increase_state(device); 554 else 555 rc = dasd_decrease_state(device); 556 if (rc == -EAGAIN) 557 return; 558 if (rc) 559 device->target = device->state; 560 561 /* let user-space know that the device status changed */ 562 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 563 564 if (device->state == device->target) 565 wake_up(&dasd_init_waitq); 566 } 567 568 /* 569 * Kick starter for devices that did not complete the startup/shutdown 570 * procedure or were sleeping because of a pending state. 571 * dasd_kick_device will schedule a call do do_kick_device to the kernel 572 * event daemon. 573 */ 574 static void do_kick_device(struct work_struct *work) 575 { 576 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 577 mutex_lock(&device->state_mutex); 578 dasd_change_state(device); 579 mutex_unlock(&device->state_mutex); 580 dasd_schedule_device_bh(device); 581 dasd_put_device(device); 582 } 583 584 void dasd_kick_device(struct dasd_device *device) 585 { 586 dasd_get_device(device); 587 /* queue call to dasd_kick_device to the kernel event daemon. */ 588 if (!schedule_work(&device->kick_work)) 589 dasd_put_device(device); 590 } 591 EXPORT_SYMBOL(dasd_kick_device); 592 593 /* 594 * dasd_reload_device will schedule a call do do_reload_device to the kernel 595 * event daemon. 596 */ 597 static void do_reload_device(struct work_struct *work) 598 { 599 struct dasd_device *device = container_of(work, struct dasd_device, 600 reload_device); 601 device->discipline->reload(device); 602 dasd_put_device(device); 603 } 604 605 void dasd_reload_device(struct dasd_device *device) 606 { 607 dasd_get_device(device); 608 /* queue call to dasd_reload_device to the kernel event daemon. */ 609 if (!schedule_work(&device->reload_device)) 610 dasd_put_device(device); 611 } 612 EXPORT_SYMBOL(dasd_reload_device); 613 614 /* 615 * dasd_restore_device will schedule a call do do_restore_device to the kernel 616 * event daemon. 617 */ 618 static void do_restore_device(struct work_struct *work) 619 { 620 struct dasd_device *device = container_of(work, struct dasd_device, 621 restore_device); 622 device->cdev->drv->restore(device->cdev); 623 dasd_put_device(device); 624 } 625 626 void dasd_restore_device(struct dasd_device *device) 627 { 628 dasd_get_device(device); 629 /* queue call to dasd_restore_device to the kernel event daemon. */ 630 if (!schedule_work(&device->restore_device)) 631 dasd_put_device(device); 632 } 633 634 /* 635 * Set the target state for a device and starts the state change. 636 */ 637 void dasd_set_target_state(struct dasd_device *device, int target) 638 { 639 dasd_get_device(device); 640 mutex_lock(&device->state_mutex); 641 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 642 if (dasd_probeonly && target > DASD_STATE_READY) 643 target = DASD_STATE_READY; 644 if (device->target != target) { 645 if (device->state == target) 646 wake_up(&dasd_init_waitq); 647 device->target = target; 648 } 649 if (device->state != device->target) 650 dasd_change_state(device); 651 mutex_unlock(&device->state_mutex); 652 dasd_put_device(device); 653 } 654 EXPORT_SYMBOL(dasd_set_target_state); 655 656 /* 657 * Enable devices with device numbers in [from..to]. 658 */ 659 static inline int _wait_for_device(struct dasd_device *device) 660 { 661 return (device->state == device->target); 662 } 663 664 void dasd_enable_device(struct dasd_device *device) 665 { 666 dasd_set_target_state(device, DASD_STATE_ONLINE); 667 if (device->state <= DASD_STATE_KNOWN) 668 /* No discipline for device found. */ 669 dasd_set_target_state(device, DASD_STATE_NEW); 670 /* Now wait for the devices to come up. */ 671 wait_event(dasd_init_waitq, _wait_for_device(device)); 672 673 dasd_reload_device(device); 674 if (device->discipline->kick_validate) 675 device->discipline->kick_validate(device); 676 } 677 EXPORT_SYMBOL(dasd_enable_device); 678 679 /* 680 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 681 */ 682 683 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 684 685 #ifdef CONFIG_DASD_PROFILE 686 struct dasd_profile dasd_global_profile = { 687 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 688 }; 689 static struct dentry *dasd_debugfs_global_entry; 690 691 /* 692 * Add profiling information for cqr before execution. 693 */ 694 static void dasd_profile_start(struct dasd_block *block, 695 struct dasd_ccw_req *cqr, 696 struct request *req) 697 { 698 struct list_head *l; 699 unsigned int counter; 700 struct dasd_device *device; 701 702 /* count the length of the chanq for statistics */ 703 counter = 0; 704 if (dasd_global_profile_level || block->profile.data) 705 list_for_each(l, &block->ccw_queue) 706 if (++counter >= 31) 707 break; 708 709 spin_lock(&dasd_global_profile.lock); 710 if (dasd_global_profile.data) { 711 dasd_global_profile.data->dasd_io_nr_req[counter]++; 712 if (rq_data_dir(req) == READ) 713 dasd_global_profile.data->dasd_read_nr_req[counter]++; 714 } 715 spin_unlock(&dasd_global_profile.lock); 716 717 spin_lock(&block->profile.lock); 718 if (block->profile.data) { 719 block->profile.data->dasd_io_nr_req[counter]++; 720 if (rq_data_dir(req) == READ) 721 block->profile.data->dasd_read_nr_req[counter]++; 722 } 723 spin_unlock(&block->profile.lock); 724 725 /* 726 * We count the request for the start device, even though it may run on 727 * some other device due to error recovery. This way we make sure that 728 * we count each request only once. 729 */ 730 device = cqr->startdev; 731 if (device->profile.data) { 732 counter = 1; /* request is not yet queued on the start device */ 733 list_for_each(l, &device->ccw_queue) 734 if (++counter >= 31) 735 break; 736 } 737 spin_lock(&device->profile.lock); 738 if (device->profile.data) { 739 device->profile.data->dasd_io_nr_req[counter]++; 740 if (rq_data_dir(req) == READ) 741 device->profile.data->dasd_read_nr_req[counter]++; 742 } 743 spin_unlock(&device->profile.lock); 744 } 745 746 /* 747 * Add profiling information for cqr after execution. 748 */ 749 750 #define dasd_profile_counter(value, index) \ 751 { \ 752 for (index = 0; index < 31 && value >> (2+index); index++) \ 753 ; \ 754 } 755 756 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 757 int is_alias, 758 int is_tpm, 759 int is_read, 760 long sectors, 761 int sectors_ind, 762 int tottime_ind, 763 int tottimeps_ind, 764 int strtime_ind, 765 int irqtime_ind, 766 int irqtimeps_ind, 767 int endtime_ind) 768 { 769 /* in case of an overflow, reset the whole profile */ 770 if (data->dasd_io_reqs == UINT_MAX) { 771 memset(data, 0, sizeof(*data)); 772 getnstimeofday(&data->starttod); 773 } 774 data->dasd_io_reqs++; 775 data->dasd_io_sects += sectors; 776 if (is_alias) 777 data->dasd_io_alias++; 778 if (is_tpm) 779 data->dasd_io_tpm++; 780 781 data->dasd_io_secs[sectors_ind]++; 782 data->dasd_io_times[tottime_ind]++; 783 data->dasd_io_timps[tottimeps_ind]++; 784 data->dasd_io_time1[strtime_ind]++; 785 data->dasd_io_time2[irqtime_ind]++; 786 data->dasd_io_time2ps[irqtimeps_ind]++; 787 data->dasd_io_time3[endtime_ind]++; 788 789 if (is_read) { 790 data->dasd_read_reqs++; 791 data->dasd_read_sects += sectors; 792 if (is_alias) 793 data->dasd_read_alias++; 794 if (is_tpm) 795 data->dasd_read_tpm++; 796 data->dasd_read_secs[sectors_ind]++; 797 data->dasd_read_times[tottime_ind]++; 798 data->dasd_read_time1[strtime_ind]++; 799 data->dasd_read_time2[irqtime_ind]++; 800 data->dasd_read_time3[endtime_ind]++; 801 } 802 } 803 804 static void dasd_profile_end(struct dasd_block *block, 805 struct dasd_ccw_req *cqr, 806 struct request *req) 807 { 808 long strtime, irqtime, endtime, tottime; /* in microseconds */ 809 long tottimeps, sectors; 810 struct dasd_device *device; 811 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 812 int irqtime_ind, irqtimeps_ind, endtime_ind; 813 814 device = cqr->startdev; 815 if (!(dasd_global_profile_level || 816 block->profile.data || 817 device->profile.data)) 818 return; 819 820 sectors = blk_rq_sectors(req); 821 if (!cqr->buildclk || !cqr->startclk || 822 !cqr->stopclk || !cqr->endclk || 823 !sectors) 824 return; 825 826 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 827 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 828 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 829 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 830 tottimeps = tottime / sectors; 831 832 dasd_profile_counter(sectors, sectors_ind); 833 dasd_profile_counter(tottime, tottime_ind); 834 dasd_profile_counter(tottimeps, tottimeps_ind); 835 dasd_profile_counter(strtime, strtime_ind); 836 dasd_profile_counter(irqtime, irqtime_ind); 837 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 838 dasd_profile_counter(endtime, endtime_ind); 839 840 spin_lock(&dasd_global_profile.lock); 841 if (dasd_global_profile.data) { 842 dasd_profile_end_add_data(dasd_global_profile.data, 843 cqr->startdev != block->base, 844 cqr->cpmode == 1, 845 rq_data_dir(req) == READ, 846 sectors, sectors_ind, tottime_ind, 847 tottimeps_ind, strtime_ind, 848 irqtime_ind, irqtimeps_ind, 849 endtime_ind); 850 } 851 spin_unlock(&dasd_global_profile.lock); 852 853 spin_lock(&block->profile.lock); 854 if (block->profile.data) 855 dasd_profile_end_add_data(block->profile.data, 856 cqr->startdev != block->base, 857 cqr->cpmode == 1, 858 rq_data_dir(req) == READ, 859 sectors, sectors_ind, tottime_ind, 860 tottimeps_ind, strtime_ind, 861 irqtime_ind, irqtimeps_ind, 862 endtime_ind); 863 spin_unlock(&block->profile.lock); 864 865 spin_lock(&device->profile.lock); 866 if (device->profile.data) 867 dasd_profile_end_add_data(device->profile.data, 868 cqr->startdev != block->base, 869 cqr->cpmode == 1, 870 rq_data_dir(req) == READ, 871 sectors, sectors_ind, tottime_ind, 872 tottimeps_ind, strtime_ind, 873 irqtime_ind, irqtimeps_ind, 874 endtime_ind); 875 spin_unlock(&device->profile.lock); 876 } 877 878 void dasd_profile_reset(struct dasd_profile *profile) 879 { 880 struct dasd_profile_info *data; 881 882 spin_lock_bh(&profile->lock); 883 data = profile->data; 884 if (!data) { 885 spin_unlock_bh(&profile->lock); 886 return; 887 } 888 memset(data, 0, sizeof(*data)); 889 getnstimeofday(&data->starttod); 890 spin_unlock_bh(&profile->lock); 891 } 892 893 int dasd_profile_on(struct dasd_profile *profile) 894 { 895 struct dasd_profile_info *data; 896 897 data = kzalloc(sizeof(*data), GFP_KERNEL); 898 if (!data) 899 return -ENOMEM; 900 spin_lock_bh(&profile->lock); 901 if (profile->data) { 902 spin_unlock_bh(&profile->lock); 903 kfree(data); 904 return 0; 905 } 906 getnstimeofday(&data->starttod); 907 profile->data = data; 908 spin_unlock_bh(&profile->lock); 909 return 0; 910 } 911 912 void dasd_profile_off(struct dasd_profile *profile) 913 { 914 spin_lock_bh(&profile->lock); 915 kfree(profile->data); 916 profile->data = NULL; 917 spin_unlock_bh(&profile->lock); 918 } 919 920 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 921 { 922 char *buffer; 923 924 buffer = vmalloc(user_len + 1); 925 if (buffer == NULL) 926 return ERR_PTR(-ENOMEM); 927 if (copy_from_user(buffer, user_buf, user_len) != 0) { 928 vfree(buffer); 929 return ERR_PTR(-EFAULT); 930 } 931 /* got the string, now strip linefeed. */ 932 if (buffer[user_len - 1] == '\n') 933 buffer[user_len - 1] = 0; 934 else 935 buffer[user_len] = 0; 936 return buffer; 937 } 938 939 static ssize_t dasd_stats_write(struct file *file, 940 const char __user *user_buf, 941 size_t user_len, loff_t *pos) 942 { 943 char *buffer, *str; 944 int rc; 945 struct seq_file *m = (struct seq_file *)file->private_data; 946 struct dasd_profile *prof = m->private; 947 948 if (user_len > 65536) 949 user_len = 65536; 950 buffer = dasd_get_user_string(user_buf, user_len); 951 if (IS_ERR(buffer)) 952 return PTR_ERR(buffer); 953 954 str = skip_spaces(buffer); 955 rc = user_len; 956 if (strncmp(str, "reset", 5) == 0) { 957 dasd_profile_reset(prof); 958 } else if (strncmp(str, "on", 2) == 0) { 959 rc = dasd_profile_on(prof); 960 if (rc) 961 goto out; 962 rc = user_len; 963 if (prof == &dasd_global_profile) { 964 dasd_profile_reset(prof); 965 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 966 } 967 } else if (strncmp(str, "off", 3) == 0) { 968 if (prof == &dasd_global_profile) 969 dasd_global_profile_level = DASD_PROFILE_OFF; 970 dasd_profile_off(prof); 971 } else 972 rc = -EINVAL; 973 out: 974 vfree(buffer); 975 return rc; 976 } 977 978 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 979 { 980 int i; 981 982 for (i = 0; i < 32; i++) 983 seq_printf(m, "%u ", array[i]); 984 seq_putc(m, '\n'); 985 } 986 987 static void dasd_stats_seq_print(struct seq_file *m, 988 struct dasd_profile_info *data) 989 { 990 seq_printf(m, "start_time %ld.%09ld\n", 991 data->starttod.tv_sec, data->starttod.tv_nsec); 992 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 993 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 994 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 995 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 996 seq_puts(m, "histogram_sectors "); 997 dasd_stats_array(m, data->dasd_io_secs); 998 seq_puts(m, "histogram_io_times "); 999 dasd_stats_array(m, data->dasd_io_times); 1000 seq_puts(m, "histogram_io_times_weighted "); 1001 dasd_stats_array(m, data->dasd_io_timps); 1002 seq_puts(m, "histogram_time_build_to_ssch "); 1003 dasd_stats_array(m, data->dasd_io_time1); 1004 seq_puts(m, "histogram_time_ssch_to_irq "); 1005 dasd_stats_array(m, data->dasd_io_time2); 1006 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1007 dasd_stats_array(m, data->dasd_io_time2ps); 1008 seq_puts(m, "histogram_time_irq_to_end "); 1009 dasd_stats_array(m, data->dasd_io_time3); 1010 seq_puts(m, "histogram_ccw_queue_length "); 1011 dasd_stats_array(m, data->dasd_io_nr_req); 1012 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1013 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1014 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1015 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1016 seq_puts(m, "histogram_read_sectors "); 1017 dasd_stats_array(m, data->dasd_read_secs); 1018 seq_puts(m, "histogram_read_times "); 1019 dasd_stats_array(m, data->dasd_read_times); 1020 seq_puts(m, "histogram_read_time_build_to_ssch "); 1021 dasd_stats_array(m, data->dasd_read_time1); 1022 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1023 dasd_stats_array(m, data->dasd_read_time2); 1024 seq_puts(m, "histogram_read_time_irq_to_end "); 1025 dasd_stats_array(m, data->dasd_read_time3); 1026 seq_puts(m, "histogram_read_ccw_queue_length "); 1027 dasd_stats_array(m, data->dasd_read_nr_req); 1028 } 1029 1030 static int dasd_stats_show(struct seq_file *m, void *v) 1031 { 1032 struct dasd_profile *profile; 1033 struct dasd_profile_info *data; 1034 1035 profile = m->private; 1036 spin_lock_bh(&profile->lock); 1037 data = profile->data; 1038 if (!data) { 1039 spin_unlock_bh(&profile->lock); 1040 seq_puts(m, "disabled\n"); 1041 return 0; 1042 } 1043 dasd_stats_seq_print(m, data); 1044 spin_unlock_bh(&profile->lock); 1045 return 0; 1046 } 1047 1048 static int dasd_stats_open(struct inode *inode, struct file *file) 1049 { 1050 struct dasd_profile *profile = inode->i_private; 1051 return single_open(file, dasd_stats_show, profile); 1052 } 1053 1054 static const struct file_operations dasd_stats_raw_fops = { 1055 .owner = THIS_MODULE, 1056 .open = dasd_stats_open, 1057 .read = seq_read, 1058 .llseek = seq_lseek, 1059 .release = single_release, 1060 .write = dasd_stats_write, 1061 }; 1062 1063 static void dasd_profile_init(struct dasd_profile *profile, 1064 struct dentry *base_dentry) 1065 { 1066 umode_t mode; 1067 struct dentry *pde; 1068 1069 if (!base_dentry) 1070 return; 1071 profile->dentry = NULL; 1072 profile->data = NULL; 1073 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1074 pde = debugfs_create_file("statistics", mode, base_dentry, 1075 profile, &dasd_stats_raw_fops); 1076 if (pde && !IS_ERR(pde)) 1077 profile->dentry = pde; 1078 return; 1079 } 1080 1081 static void dasd_profile_exit(struct dasd_profile *profile) 1082 { 1083 dasd_profile_off(profile); 1084 debugfs_remove(profile->dentry); 1085 profile->dentry = NULL; 1086 } 1087 1088 static void dasd_statistics_removeroot(void) 1089 { 1090 dasd_global_profile_level = DASD_PROFILE_OFF; 1091 dasd_profile_exit(&dasd_global_profile); 1092 debugfs_remove(dasd_debugfs_global_entry); 1093 debugfs_remove(dasd_debugfs_root_entry); 1094 } 1095 1096 static void dasd_statistics_createroot(void) 1097 { 1098 struct dentry *pde; 1099 1100 dasd_debugfs_root_entry = NULL; 1101 pde = debugfs_create_dir("dasd", NULL); 1102 if (!pde || IS_ERR(pde)) 1103 goto error; 1104 dasd_debugfs_root_entry = pde; 1105 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1106 if (!pde || IS_ERR(pde)) 1107 goto error; 1108 dasd_debugfs_global_entry = pde; 1109 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1110 return; 1111 1112 error: 1113 DBF_EVENT(DBF_ERR, "%s", 1114 "Creation of the dasd debugfs interface failed"); 1115 dasd_statistics_removeroot(); 1116 return; 1117 } 1118 1119 #else 1120 #define dasd_profile_start(block, cqr, req) do {} while (0) 1121 #define dasd_profile_end(block, cqr, req) do {} while (0) 1122 1123 static void dasd_statistics_createroot(void) 1124 { 1125 return; 1126 } 1127 1128 static void dasd_statistics_removeroot(void) 1129 { 1130 return; 1131 } 1132 1133 int dasd_stats_generic_show(struct seq_file *m, void *v) 1134 { 1135 seq_puts(m, "Statistics are not activated in this kernel\n"); 1136 return 0; 1137 } 1138 1139 static void dasd_profile_init(struct dasd_profile *profile, 1140 struct dentry *base_dentry) 1141 { 1142 return; 1143 } 1144 1145 static void dasd_profile_exit(struct dasd_profile *profile) 1146 { 1147 return; 1148 } 1149 1150 int dasd_profile_on(struct dasd_profile *profile) 1151 { 1152 return 0; 1153 } 1154 1155 #endif /* CONFIG_DASD_PROFILE */ 1156 1157 static int dasd_hosts_show(struct seq_file *m, void *v) 1158 { 1159 struct dasd_device *device; 1160 int rc = -EOPNOTSUPP; 1161 1162 device = m->private; 1163 dasd_get_device(device); 1164 1165 if (device->discipline->hosts_print) 1166 rc = device->discipline->hosts_print(device, m); 1167 1168 dasd_put_device(device); 1169 return rc; 1170 } 1171 1172 static int dasd_hosts_open(struct inode *inode, struct file *file) 1173 { 1174 struct dasd_device *device = inode->i_private; 1175 1176 return single_open(file, dasd_hosts_show, device); 1177 } 1178 1179 static const struct file_operations dasd_hosts_fops = { 1180 .owner = THIS_MODULE, 1181 .open = dasd_hosts_open, 1182 .read = seq_read, 1183 .llseek = seq_lseek, 1184 .release = single_release, 1185 }; 1186 1187 static void dasd_hosts_exit(struct dasd_device *device) 1188 { 1189 debugfs_remove(device->hosts_dentry); 1190 device->hosts_dentry = NULL; 1191 } 1192 1193 static void dasd_hosts_init(struct dentry *base_dentry, 1194 struct dasd_device *device) 1195 { 1196 struct dentry *pde; 1197 umode_t mode; 1198 1199 if (!base_dentry) 1200 return; 1201 1202 mode = S_IRUSR | S_IFREG; 1203 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1204 device, &dasd_hosts_fops); 1205 if (pde && !IS_ERR(pde)) 1206 device->hosts_dentry = pde; 1207 } 1208 1209 /* 1210 * Allocate memory for a channel program with 'cplength' channel 1211 * command words and 'datasize' additional space. There are two 1212 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 1213 * memory and 2) dasd_smalloc_request uses the static ccw memory 1214 * that gets allocated for each device. 1215 */ 1216 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, 1217 int datasize, 1218 struct dasd_device *device) 1219 { 1220 struct dasd_ccw_req *cqr; 1221 1222 /* Sanity checks */ 1223 BUG_ON(datasize > PAGE_SIZE || 1224 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 1225 1226 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 1227 if (cqr == NULL) 1228 return ERR_PTR(-ENOMEM); 1229 cqr->cpaddr = NULL; 1230 if (cplength > 0) { 1231 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 1232 GFP_ATOMIC | GFP_DMA); 1233 if (cqr->cpaddr == NULL) { 1234 kfree(cqr); 1235 return ERR_PTR(-ENOMEM); 1236 } 1237 } 1238 cqr->data = NULL; 1239 if (datasize > 0) { 1240 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 1241 if (cqr->data == NULL) { 1242 kfree(cqr->cpaddr); 1243 kfree(cqr); 1244 return ERR_PTR(-ENOMEM); 1245 } 1246 } 1247 cqr->magic = magic; 1248 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1249 dasd_get_device(device); 1250 return cqr; 1251 } 1252 EXPORT_SYMBOL(dasd_kmalloc_request); 1253 1254 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 1255 int datasize, 1256 struct dasd_device *device) 1257 { 1258 unsigned long flags; 1259 struct dasd_ccw_req *cqr; 1260 char *data; 1261 int size; 1262 1263 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 1264 if (cplength > 0) 1265 size += cplength * sizeof(struct ccw1); 1266 if (datasize > 0) 1267 size += datasize; 1268 spin_lock_irqsave(&device->mem_lock, flags); 1269 cqr = (struct dasd_ccw_req *) 1270 dasd_alloc_chunk(&device->ccw_chunks, size); 1271 spin_unlock_irqrestore(&device->mem_lock, flags); 1272 if (cqr == NULL) 1273 return ERR_PTR(-ENOMEM); 1274 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 1275 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 1276 cqr->cpaddr = NULL; 1277 if (cplength > 0) { 1278 cqr->cpaddr = (struct ccw1 *) data; 1279 data += cplength*sizeof(struct ccw1); 1280 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 1281 } 1282 cqr->data = NULL; 1283 if (datasize > 0) { 1284 cqr->data = data; 1285 memset(cqr->data, 0, datasize); 1286 } 1287 cqr->magic = magic; 1288 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1289 dasd_get_device(device); 1290 return cqr; 1291 } 1292 EXPORT_SYMBOL(dasd_smalloc_request); 1293 1294 /* 1295 * Free memory of a channel program. This function needs to free all the 1296 * idal lists that might have been created by dasd_set_cda and the 1297 * struct dasd_ccw_req itself. 1298 */ 1299 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1300 { 1301 struct ccw1 *ccw; 1302 1303 /* Clear any idals used for the request. */ 1304 ccw = cqr->cpaddr; 1305 do { 1306 clear_normalized_cda(ccw); 1307 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 1308 kfree(cqr->cpaddr); 1309 kfree(cqr->data); 1310 kfree(cqr); 1311 dasd_put_device(device); 1312 } 1313 EXPORT_SYMBOL(dasd_kfree_request); 1314 1315 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1316 { 1317 unsigned long flags; 1318 1319 spin_lock_irqsave(&device->mem_lock, flags); 1320 dasd_free_chunk(&device->ccw_chunks, cqr); 1321 spin_unlock_irqrestore(&device->mem_lock, flags); 1322 dasd_put_device(device); 1323 } 1324 EXPORT_SYMBOL(dasd_sfree_request); 1325 1326 /* 1327 * Check discipline magic in cqr. 1328 */ 1329 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1330 { 1331 struct dasd_device *device; 1332 1333 if (cqr == NULL) 1334 return -EINVAL; 1335 device = cqr->startdev; 1336 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1337 DBF_DEV_EVENT(DBF_WARNING, device, 1338 " dasd_ccw_req 0x%08x magic doesn't match" 1339 " discipline 0x%08x", 1340 cqr->magic, 1341 *(unsigned int *) device->discipline->name); 1342 return -EINVAL; 1343 } 1344 return 0; 1345 } 1346 1347 /* 1348 * Terminate the current i/o and set the request to clear_pending. 1349 * Timer keeps device runnig. 1350 * ccw_device_clear can fail if the i/o subsystem 1351 * is in a bad mood. 1352 */ 1353 int dasd_term_IO(struct dasd_ccw_req *cqr) 1354 { 1355 struct dasd_device *device; 1356 int retries, rc; 1357 char errorstring[ERRORLENGTH]; 1358 1359 /* Check the cqr */ 1360 rc = dasd_check_cqr(cqr); 1361 if (rc) 1362 return rc; 1363 retries = 0; 1364 device = (struct dasd_device *) cqr->startdev; 1365 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1366 rc = ccw_device_clear(device->cdev, (long) cqr); 1367 switch (rc) { 1368 case 0: /* termination successful */ 1369 cqr->status = DASD_CQR_CLEAR_PENDING; 1370 cqr->stopclk = get_tod_clock(); 1371 cqr->starttime = 0; 1372 DBF_DEV_EVENT(DBF_DEBUG, device, 1373 "terminate cqr %p successful", 1374 cqr); 1375 break; 1376 case -ENODEV: 1377 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1378 "device gone, retry"); 1379 break; 1380 case -EIO: 1381 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1382 "I/O error, retry"); 1383 break; 1384 case -EINVAL: 1385 /* 1386 * device not valid so no I/O could be running 1387 * handle CQR as termination successful 1388 */ 1389 cqr->status = DASD_CQR_CLEARED; 1390 cqr->stopclk = get_tod_clock(); 1391 cqr->starttime = 0; 1392 /* no retries for invalid devices */ 1393 cqr->retries = -1; 1394 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1395 "EINVAL, handle as terminated"); 1396 /* fake rc to success */ 1397 rc = 0; 1398 break; 1399 case -EBUSY: 1400 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1401 "device busy, retry later"); 1402 break; 1403 default: 1404 /* internal error 10 - unknown rc*/ 1405 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1406 dev_err(&device->cdev->dev, "An error occurred in the " 1407 "DASD device driver, reason=%s\n", errorstring); 1408 BUG(); 1409 break; 1410 } 1411 retries++; 1412 } 1413 dasd_schedule_device_bh(device); 1414 return rc; 1415 } 1416 EXPORT_SYMBOL(dasd_term_IO); 1417 1418 /* 1419 * Start the i/o. This start_IO can fail if the channel is really busy. 1420 * In that case set up a timer to start the request later. 1421 */ 1422 int dasd_start_IO(struct dasd_ccw_req *cqr) 1423 { 1424 struct dasd_device *device; 1425 int rc; 1426 char errorstring[ERRORLENGTH]; 1427 1428 /* Check the cqr */ 1429 rc = dasd_check_cqr(cqr); 1430 if (rc) { 1431 cqr->intrc = rc; 1432 return rc; 1433 } 1434 device = (struct dasd_device *) cqr->startdev; 1435 if (((cqr->block && 1436 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1437 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1438 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1439 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1440 "because of stolen lock", cqr); 1441 cqr->status = DASD_CQR_ERROR; 1442 cqr->intrc = -EPERM; 1443 return -EPERM; 1444 } 1445 if (cqr->retries < 0) { 1446 /* internal error 14 - start_IO run out of retries */ 1447 sprintf(errorstring, "14 %p", cqr); 1448 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1449 "device driver, reason=%s\n", errorstring); 1450 cqr->status = DASD_CQR_ERROR; 1451 return -EIO; 1452 } 1453 cqr->startclk = get_tod_clock(); 1454 cqr->starttime = jiffies; 1455 cqr->retries--; 1456 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1457 cqr->lpm &= device->path_data.opm; 1458 if (!cqr->lpm) 1459 cqr->lpm = device->path_data.opm; 1460 } 1461 if (cqr->cpmode == 1) { 1462 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1463 (long) cqr, cqr->lpm); 1464 } else { 1465 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1466 (long) cqr, cqr->lpm, 0); 1467 } 1468 switch (rc) { 1469 case 0: 1470 cqr->status = DASD_CQR_IN_IO; 1471 break; 1472 case -EBUSY: 1473 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1474 "start_IO: device busy, retry later"); 1475 break; 1476 case -ETIMEDOUT: 1477 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1478 "start_IO: request timeout, retry later"); 1479 break; 1480 case -EACCES: 1481 /* -EACCES indicates that the request used only a subset of the 1482 * available paths and all these paths are gone. If the lpm of 1483 * this request was only a subset of the opm (e.g. the ppm) then 1484 * we just do a retry with all available paths. 1485 * If we already use the full opm, something is amiss, and we 1486 * need a full path verification. 1487 */ 1488 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1489 DBF_DEV_EVENT(DBF_WARNING, device, 1490 "start_IO: selected paths gone (%x)", 1491 cqr->lpm); 1492 } else if (cqr->lpm != device->path_data.opm) { 1493 cqr->lpm = device->path_data.opm; 1494 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1495 "start_IO: selected paths gone," 1496 " retry on all paths"); 1497 } else { 1498 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1499 "start_IO: all paths in opm gone," 1500 " do path verification"); 1501 dasd_generic_last_path_gone(device); 1502 device->path_data.opm = 0; 1503 device->path_data.ppm = 0; 1504 device->path_data.npm = 0; 1505 device->path_data.tbvpm = 1506 ccw_device_get_path_mask(device->cdev); 1507 } 1508 break; 1509 case -ENODEV: 1510 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1511 "start_IO: -ENODEV device gone, retry"); 1512 break; 1513 case -EIO: 1514 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1515 "start_IO: -EIO device gone, retry"); 1516 break; 1517 case -EINVAL: 1518 /* most likely caused in power management context */ 1519 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1520 "start_IO: -EINVAL device currently " 1521 "not accessible"); 1522 break; 1523 default: 1524 /* internal error 11 - unknown rc */ 1525 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1526 dev_err(&device->cdev->dev, 1527 "An error occurred in the DASD device driver, " 1528 "reason=%s\n", errorstring); 1529 BUG(); 1530 break; 1531 } 1532 cqr->intrc = rc; 1533 return rc; 1534 } 1535 EXPORT_SYMBOL(dasd_start_IO); 1536 1537 /* 1538 * Timeout function for dasd devices. This is used for different purposes 1539 * 1) missing interrupt handler for normal operation 1540 * 2) delayed start of request where start_IO failed with -EBUSY 1541 * 3) timeout for missing state change interrupts 1542 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1543 * DASD_CQR_QUEUED for 2) and 3). 1544 */ 1545 static void dasd_device_timeout(unsigned long ptr) 1546 { 1547 unsigned long flags; 1548 struct dasd_device *device; 1549 1550 device = (struct dasd_device *) ptr; 1551 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1552 /* re-activate request queue */ 1553 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1554 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1555 dasd_schedule_device_bh(device); 1556 } 1557 1558 /* 1559 * Setup timeout for a device in jiffies. 1560 */ 1561 void dasd_device_set_timer(struct dasd_device *device, int expires) 1562 { 1563 if (expires == 0) 1564 del_timer(&device->timer); 1565 else 1566 mod_timer(&device->timer, jiffies + expires); 1567 } 1568 EXPORT_SYMBOL(dasd_device_set_timer); 1569 1570 /* 1571 * Clear timeout for a device. 1572 */ 1573 void dasd_device_clear_timer(struct dasd_device *device) 1574 { 1575 del_timer(&device->timer); 1576 } 1577 EXPORT_SYMBOL(dasd_device_clear_timer); 1578 1579 static void dasd_handle_killed_request(struct ccw_device *cdev, 1580 unsigned long intparm) 1581 { 1582 struct dasd_ccw_req *cqr; 1583 struct dasd_device *device; 1584 1585 if (!intparm) 1586 return; 1587 cqr = (struct dasd_ccw_req *) intparm; 1588 if (cqr->status != DASD_CQR_IN_IO) { 1589 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1590 "invalid status in handle_killed_request: " 1591 "%02x", cqr->status); 1592 return; 1593 } 1594 1595 device = dasd_device_from_cdev_locked(cdev); 1596 if (IS_ERR(device)) { 1597 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1598 "unable to get device from cdev"); 1599 return; 1600 } 1601 1602 if (!cqr->startdev || 1603 device != cqr->startdev || 1604 strncmp(cqr->startdev->discipline->ebcname, 1605 (char *) &cqr->magic, 4)) { 1606 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1607 "invalid device in request"); 1608 dasd_put_device(device); 1609 return; 1610 } 1611 1612 /* Schedule request to be retried. */ 1613 cqr->status = DASD_CQR_QUEUED; 1614 1615 dasd_device_clear_timer(device); 1616 dasd_schedule_device_bh(device); 1617 dasd_put_device(device); 1618 } 1619 1620 void dasd_generic_handle_state_change(struct dasd_device *device) 1621 { 1622 /* First of all start sense subsystem status request. */ 1623 dasd_eer_snss(device); 1624 1625 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1626 dasd_schedule_device_bh(device); 1627 if (device->block) 1628 dasd_schedule_block_bh(device->block); 1629 } 1630 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1631 1632 /* 1633 * Interrupt handler for "normal" ssch-io based dasd devices. 1634 */ 1635 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1636 struct irb *irb) 1637 { 1638 struct dasd_ccw_req *cqr, *next; 1639 struct dasd_device *device; 1640 unsigned long long now; 1641 int nrf_suppressed = 0; 1642 int fp_suppressed = 0; 1643 u8 *sense = NULL; 1644 int expires; 1645 1646 cqr = (struct dasd_ccw_req *) intparm; 1647 if (IS_ERR(irb)) { 1648 switch (PTR_ERR(irb)) { 1649 case -EIO: 1650 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1651 device = (struct dasd_device *) cqr->startdev; 1652 cqr->status = DASD_CQR_CLEARED; 1653 dasd_device_clear_timer(device); 1654 wake_up(&dasd_flush_wq); 1655 dasd_schedule_device_bh(device); 1656 return; 1657 } 1658 break; 1659 case -ETIMEDOUT: 1660 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1661 "request timed out\n", __func__); 1662 break; 1663 default: 1664 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1665 "unknown error %ld\n", __func__, 1666 PTR_ERR(irb)); 1667 } 1668 dasd_handle_killed_request(cdev, intparm); 1669 return; 1670 } 1671 1672 now = get_tod_clock(); 1673 /* check for conditions that should be handled immediately */ 1674 if (!cqr || 1675 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1676 scsw_cstat(&irb->scsw) == 0)) { 1677 if (cqr) 1678 memcpy(&cqr->irb, irb, sizeof(*irb)); 1679 device = dasd_device_from_cdev_locked(cdev); 1680 if (IS_ERR(device)) 1681 return; 1682 /* ignore unsolicited interrupts for DIAG discipline */ 1683 if (device->discipline == dasd_diag_discipline_pointer) { 1684 dasd_put_device(device); 1685 return; 1686 } 1687 1688 /* 1689 * In some cases 'File Protected' or 'No Record Found' errors 1690 * might be expected and debug log messages for the 1691 * corresponding interrupts shouldn't be written then. 1692 * Check if either of the according suppress bits is set. 1693 */ 1694 sense = dasd_get_sense(irb); 1695 if (sense) { 1696 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1697 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1698 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1699 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1700 } 1701 if (!(fp_suppressed || nrf_suppressed)) 1702 device->discipline->dump_sense_dbf(device, irb, "int"); 1703 1704 if (device->features & DASD_FEATURE_ERPLOG) 1705 device->discipline->dump_sense(device, cqr, irb); 1706 device->discipline->check_for_device_change(device, cqr, irb); 1707 dasd_put_device(device); 1708 } 1709 1710 /* check for for attention message */ 1711 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1712 device = dasd_device_from_cdev_locked(cdev); 1713 device->discipline->check_attention(device, irb->esw.esw1.lpum); 1714 dasd_put_device(device); 1715 } 1716 1717 if (!cqr) 1718 return; 1719 1720 device = (struct dasd_device *) cqr->startdev; 1721 if (!device || 1722 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1723 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1724 "invalid device in request"); 1725 return; 1726 } 1727 1728 /* Check for clear pending */ 1729 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1730 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1731 cqr->status = DASD_CQR_CLEARED; 1732 dasd_device_clear_timer(device); 1733 wake_up(&dasd_flush_wq); 1734 dasd_schedule_device_bh(device); 1735 return; 1736 } 1737 1738 /* check status - the request might have been killed by dyn detach */ 1739 if (cqr->status != DASD_CQR_IN_IO) { 1740 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1741 "status %02x", dev_name(&cdev->dev), cqr->status); 1742 return; 1743 } 1744 1745 next = NULL; 1746 expires = 0; 1747 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1748 scsw_cstat(&irb->scsw) == 0) { 1749 /* request was completed successfully */ 1750 cqr->status = DASD_CQR_SUCCESS; 1751 cqr->stopclk = now; 1752 /* Start first request on queue if possible -> fast_io. */ 1753 if (cqr->devlist.next != &device->ccw_queue) { 1754 next = list_entry(cqr->devlist.next, 1755 struct dasd_ccw_req, devlist); 1756 } 1757 } else { /* error */ 1758 /* 1759 * If we don't want complex ERP for this request, then just 1760 * reset this and retry it in the fastpath 1761 */ 1762 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1763 cqr->retries > 0) { 1764 if (cqr->lpm == device->path_data.opm) 1765 DBF_DEV_EVENT(DBF_DEBUG, device, 1766 "default ERP in fastpath " 1767 "(%i retries left)", 1768 cqr->retries); 1769 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1770 cqr->lpm = device->path_data.opm; 1771 cqr->status = DASD_CQR_QUEUED; 1772 next = cqr; 1773 } else 1774 cqr->status = DASD_CQR_ERROR; 1775 } 1776 if (next && (next->status == DASD_CQR_QUEUED) && 1777 (!device->stopped)) { 1778 if (device->discipline->start_IO(next) == 0) 1779 expires = next->expires; 1780 } 1781 if (expires != 0) 1782 dasd_device_set_timer(device, expires); 1783 else 1784 dasd_device_clear_timer(device); 1785 dasd_schedule_device_bh(device); 1786 } 1787 EXPORT_SYMBOL(dasd_int_handler); 1788 1789 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1790 { 1791 struct dasd_device *device; 1792 1793 device = dasd_device_from_cdev_locked(cdev); 1794 1795 if (IS_ERR(device)) 1796 goto out; 1797 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1798 device->state != device->target || 1799 !device->discipline->check_for_device_change){ 1800 dasd_put_device(device); 1801 goto out; 1802 } 1803 if (device->discipline->dump_sense_dbf) 1804 device->discipline->dump_sense_dbf(device, irb, "uc"); 1805 device->discipline->check_for_device_change(device, NULL, irb); 1806 dasd_put_device(device); 1807 out: 1808 return UC_TODO_RETRY; 1809 } 1810 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1811 1812 /* 1813 * If we have an error on a dasd_block layer request then we cancel 1814 * and return all further requests from the same dasd_block as well. 1815 */ 1816 static void __dasd_device_recovery(struct dasd_device *device, 1817 struct dasd_ccw_req *ref_cqr) 1818 { 1819 struct list_head *l, *n; 1820 struct dasd_ccw_req *cqr; 1821 1822 /* 1823 * only requeue request that came from the dasd_block layer 1824 */ 1825 if (!ref_cqr->block) 1826 return; 1827 1828 list_for_each_safe(l, n, &device->ccw_queue) { 1829 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1830 if (cqr->status == DASD_CQR_QUEUED && 1831 ref_cqr->block == cqr->block) { 1832 cqr->status = DASD_CQR_CLEARED; 1833 } 1834 } 1835 }; 1836 1837 /* 1838 * Remove those ccw requests from the queue that need to be returned 1839 * to the upper layer. 1840 */ 1841 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1842 struct list_head *final_queue) 1843 { 1844 struct list_head *l, *n; 1845 struct dasd_ccw_req *cqr; 1846 1847 /* Process request with final status. */ 1848 list_for_each_safe(l, n, &device->ccw_queue) { 1849 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1850 1851 /* Skip any non-final request. */ 1852 if (cqr->status == DASD_CQR_QUEUED || 1853 cqr->status == DASD_CQR_IN_IO || 1854 cqr->status == DASD_CQR_CLEAR_PENDING) 1855 continue; 1856 if (cqr->status == DASD_CQR_ERROR) { 1857 __dasd_device_recovery(device, cqr); 1858 } 1859 /* Rechain finished requests to final queue */ 1860 list_move_tail(&cqr->devlist, final_queue); 1861 } 1862 } 1863 1864 /* 1865 * the cqrs from the final queue are returned to the upper layer 1866 * by setting a dasd_block state and calling the callback function 1867 */ 1868 static void __dasd_device_process_final_queue(struct dasd_device *device, 1869 struct list_head *final_queue) 1870 { 1871 struct list_head *l, *n; 1872 struct dasd_ccw_req *cqr; 1873 struct dasd_block *block; 1874 void (*callback)(struct dasd_ccw_req *, void *data); 1875 void *callback_data; 1876 char errorstring[ERRORLENGTH]; 1877 1878 list_for_each_safe(l, n, final_queue) { 1879 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1880 list_del_init(&cqr->devlist); 1881 block = cqr->block; 1882 callback = cqr->callback; 1883 callback_data = cqr->callback_data; 1884 if (block) 1885 spin_lock_bh(&block->queue_lock); 1886 switch (cqr->status) { 1887 case DASD_CQR_SUCCESS: 1888 cqr->status = DASD_CQR_DONE; 1889 break; 1890 case DASD_CQR_ERROR: 1891 cqr->status = DASD_CQR_NEED_ERP; 1892 break; 1893 case DASD_CQR_CLEARED: 1894 cqr->status = DASD_CQR_TERMINATED; 1895 break; 1896 default: 1897 /* internal error 12 - wrong cqr status*/ 1898 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1899 dev_err(&device->cdev->dev, 1900 "An error occurred in the DASD device driver, " 1901 "reason=%s\n", errorstring); 1902 BUG(); 1903 } 1904 if (cqr->callback != NULL) 1905 (callback)(cqr, callback_data); 1906 if (block) 1907 spin_unlock_bh(&block->queue_lock); 1908 } 1909 } 1910 1911 /* 1912 * Take a look at the first request on the ccw queue and check 1913 * if it reached its expire time. If so, terminate the IO. 1914 */ 1915 static void __dasd_device_check_expire(struct dasd_device *device) 1916 { 1917 struct dasd_ccw_req *cqr; 1918 1919 if (list_empty(&device->ccw_queue)) 1920 return; 1921 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1922 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1923 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1924 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1925 /* 1926 * IO in safe offline processing should not 1927 * run out of retries 1928 */ 1929 cqr->retries++; 1930 } 1931 if (device->discipline->term_IO(cqr) != 0) { 1932 /* Hmpf, try again in 5 sec */ 1933 dev_err(&device->cdev->dev, 1934 "cqr %p timed out (%lus) but cannot be " 1935 "ended, retrying in 5 s\n", 1936 cqr, (cqr->expires/HZ)); 1937 cqr->expires += 5*HZ; 1938 dasd_device_set_timer(device, 5*HZ); 1939 } else { 1940 dev_err(&device->cdev->dev, 1941 "cqr %p timed out (%lus), %i retries " 1942 "remaining\n", cqr, (cqr->expires/HZ), 1943 cqr->retries); 1944 } 1945 } 1946 } 1947 1948 /* 1949 * return 1 when device is not eligible for IO 1950 */ 1951 static int __dasd_device_is_unusable(struct dasd_device *device, 1952 struct dasd_ccw_req *cqr) 1953 { 1954 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); 1955 1956 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1957 /* dasd is being set offline. */ 1958 return 1; 1959 } 1960 if (device->stopped) { 1961 if (device->stopped & mask) { 1962 /* stopped and CQR will not change that. */ 1963 return 1; 1964 } 1965 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1966 /* CQR is not able to change device to 1967 * operational. */ 1968 return 1; 1969 } 1970 /* CQR required to get device operational. */ 1971 } 1972 return 0; 1973 } 1974 1975 /* 1976 * Take a look at the first request on the ccw queue and check 1977 * if it needs to be started. 1978 */ 1979 static void __dasd_device_start_head(struct dasd_device *device) 1980 { 1981 struct dasd_ccw_req *cqr; 1982 int rc; 1983 1984 if (list_empty(&device->ccw_queue)) 1985 return; 1986 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1987 if (cqr->status != DASD_CQR_QUEUED) 1988 return; 1989 /* if device is not usable return request to upper layer */ 1990 if (__dasd_device_is_unusable(device, cqr)) { 1991 cqr->intrc = -EAGAIN; 1992 cqr->status = DASD_CQR_CLEARED; 1993 dasd_schedule_device_bh(device); 1994 return; 1995 } 1996 1997 rc = device->discipline->start_IO(cqr); 1998 if (rc == 0) 1999 dasd_device_set_timer(device, cqr->expires); 2000 else if (rc == -EACCES) { 2001 dasd_schedule_device_bh(device); 2002 } else 2003 /* Hmpf, try again in 1/2 sec */ 2004 dasd_device_set_timer(device, 50); 2005 } 2006 2007 static void __dasd_device_check_path_events(struct dasd_device *device) 2008 { 2009 int rc; 2010 2011 if (device->path_data.tbvpm) { 2012 if (device->stopped & ~(DASD_STOPPED_DC_WAIT | 2013 DASD_UNRESUMED_PM)) 2014 return; 2015 rc = device->discipline->verify_path( 2016 device, device->path_data.tbvpm); 2017 if (rc) 2018 dasd_device_set_timer(device, 50); 2019 else 2020 device->path_data.tbvpm = 0; 2021 } 2022 }; 2023 2024 /* 2025 * Go through all request on the dasd_device request queue, 2026 * terminate them on the cdev if necessary, and return them to the 2027 * submitting layer via callback. 2028 * Note: 2029 * Make sure that all 'submitting layers' still exist when 2030 * this function is called!. In other words, when 'device' is a base 2031 * device then all block layer requests must have been removed before 2032 * via dasd_flush_block_queue. 2033 */ 2034 int dasd_flush_device_queue(struct dasd_device *device) 2035 { 2036 struct dasd_ccw_req *cqr, *n; 2037 int rc; 2038 struct list_head flush_queue; 2039 2040 INIT_LIST_HEAD(&flush_queue); 2041 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2042 rc = 0; 2043 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2044 /* Check status and move request to flush_queue */ 2045 switch (cqr->status) { 2046 case DASD_CQR_IN_IO: 2047 rc = device->discipline->term_IO(cqr); 2048 if (rc) { 2049 /* unable to terminate requeust */ 2050 dev_err(&device->cdev->dev, 2051 "Flushing the DASD request queue " 2052 "failed for request %p\n", cqr); 2053 /* stop flush processing */ 2054 goto finished; 2055 } 2056 break; 2057 case DASD_CQR_QUEUED: 2058 cqr->stopclk = get_tod_clock(); 2059 cqr->status = DASD_CQR_CLEARED; 2060 break; 2061 default: /* no need to modify the others */ 2062 break; 2063 } 2064 list_move_tail(&cqr->devlist, &flush_queue); 2065 } 2066 finished: 2067 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2068 /* 2069 * After this point all requests must be in state CLEAR_PENDING, 2070 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2071 * one of the others. 2072 */ 2073 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2074 wait_event(dasd_flush_wq, 2075 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2076 /* 2077 * Now set each request back to TERMINATED, DONE or NEED_ERP 2078 * and call the callback function of flushed requests 2079 */ 2080 __dasd_device_process_final_queue(device, &flush_queue); 2081 return rc; 2082 } 2083 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2084 2085 /* 2086 * Acquire the device lock and process queues for the device. 2087 */ 2088 static void dasd_device_tasklet(struct dasd_device *device) 2089 { 2090 struct list_head final_queue; 2091 2092 atomic_set (&device->tasklet_scheduled, 0); 2093 INIT_LIST_HEAD(&final_queue); 2094 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2095 /* Check expire time of first request on the ccw queue. */ 2096 __dasd_device_check_expire(device); 2097 /* find final requests on ccw queue */ 2098 __dasd_device_process_ccw_queue(device, &final_queue); 2099 __dasd_device_check_path_events(device); 2100 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2101 /* Now call the callback function of requests with final status */ 2102 __dasd_device_process_final_queue(device, &final_queue); 2103 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2104 /* Now check if the head of the ccw queue needs to be started. */ 2105 __dasd_device_start_head(device); 2106 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2107 if (waitqueue_active(&shutdown_waitq)) 2108 wake_up(&shutdown_waitq); 2109 dasd_put_device(device); 2110 } 2111 2112 /* 2113 * Schedules a call to dasd_tasklet over the device tasklet. 2114 */ 2115 void dasd_schedule_device_bh(struct dasd_device *device) 2116 { 2117 /* Protect against rescheduling. */ 2118 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2119 return; 2120 dasd_get_device(device); 2121 tasklet_hi_schedule(&device->tasklet); 2122 } 2123 EXPORT_SYMBOL(dasd_schedule_device_bh); 2124 2125 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2126 { 2127 device->stopped |= bits; 2128 } 2129 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2130 2131 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2132 { 2133 device->stopped &= ~bits; 2134 if (!device->stopped) 2135 wake_up(&generic_waitq); 2136 } 2137 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2138 2139 /* 2140 * Queue a request to the head of the device ccw_queue. 2141 * Start the I/O if possible. 2142 */ 2143 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2144 { 2145 struct dasd_device *device; 2146 unsigned long flags; 2147 2148 device = cqr->startdev; 2149 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2150 cqr->status = DASD_CQR_QUEUED; 2151 list_add(&cqr->devlist, &device->ccw_queue); 2152 /* let the bh start the request to keep them in order */ 2153 dasd_schedule_device_bh(device); 2154 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2155 } 2156 EXPORT_SYMBOL(dasd_add_request_head); 2157 2158 /* 2159 * Queue a request to the tail of the device ccw_queue. 2160 * Start the I/O if possible. 2161 */ 2162 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2163 { 2164 struct dasd_device *device; 2165 unsigned long flags; 2166 2167 device = cqr->startdev; 2168 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2169 cqr->status = DASD_CQR_QUEUED; 2170 list_add_tail(&cqr->devlist, &device->ccw_queue); 2171 /* let the bh start the request to keep them in order */ 2172 dasd_schedule_device_bh(device); 2173 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2174 } 2175 EXPORT_SYMBOL(dasd_add_request_tail); 2176 2177 /* 2178 * Wakeup helper for the 'sleep_on' functions. 2179 */ 2180 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2181 { 2182 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2183 cqr->callback_data = DASD_SLEEPON_END_TAG; 2184 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2185 wake_up(&generic_waitq); 2186 } 2187 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2188 2189 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2190 { 2191 struct dasd_device *device; 2192 int rc; 2193 2194 device = cqr->startdev; 2195 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2196 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2197 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2198 return rc; 2199 } 2200 2201 /* 2202 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2203 */ 2204 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2205 { 2206 struct dasd_device *device; 2207 dasd_erp_fn_t erp_fn; 2208 2209 if (cqr->status == DASD_CQR_FILLED) 2210 return 0; 2211 device = cqr->startdev; 2212 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2213 if (cqr->status == DASD_CQR_TERMINATED) { 2214 device->discipline->handle_terminated_request(cqr); 2215 return 1; 2216 } 2217 if (cqr->status == DASD_CQR_NEED_ERP) { 2218 erp_fn = device->discipline->erp_action(cqr); 2219 erp_fn(cqr); 2220 return 1; 2221 } 2222 if (cqr->status == DASD_CQR_FAILED) 2223 dasd_log_sense(cqr, &cqr->irb); 2224 if (cqr->refers) { 2225 __dasd_process_erp(device, cqr); 2226 return 1; 2227 } 2228 } 2229 return 0; 2230 } 2231 2232 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2233 { 2234 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2235 if (cqr->refers) /* erp is not done yet */ 2236 return 1; 2237 return ((cqr->status != DASD_CQR_DONE) && 2238 (cqr->status != DASD_CQR_FAILED)); 2239 } else 2240 return (cqr->status == DASD_CQR_FILLED); 2241 } 2242 2243 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2244 { 2245 struct dasd_device *device; 2246 int rc; 2247 struct list_head ccw_queue; 2248 struct dasd_ccw_req *cqr; 2249 2250 INIT_LIST_HEAD(&ccw_queue); 2251 maincqr->status = DASD_CQR_FILLED; 2252 device = maincqr->startdev; 2253 list_add(&maincqr->blocklist, &ccw_queue); 2254 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2255 cqr = list_first_entry(&ccw_queue, 2256 struct dasd_ccw_req, blocklist)) { 2257 2258 if (__dasd_sleep_on_erp(cqr)) 2259 continue; 2260 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2261 continue; 2262 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2263 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2264 cqr->status = DASD_CQR_FAILED; 2265 cqr->intrc = -EPERM; 2266 continue; 2267 } 2268 /* Non-temporary stop condition will trigger fail fast */ 2269 if (device->stopped & ~DASD_STOPPED_PENDING && 2270 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2271 (!dasd_eer_enabled(device))) { 2272 cqr->status = DASD_CQR_FAILED; 2273 cqr->intrc = -ENOLINK; 2274 continue; 2275 } 2276 /* 2277 * Don't try to start requests if device is stopped 2278 * except path verification requests 2279 */ 2280 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2281 if (interruptible) { 2282 rc = wait_event_interruptible( 2283 generic_waitq, !(device->stopped)); 2284 if (rc == -ERESTARTSYS) { 2285 cqr->status = DASD_CQR_FAILED; 2286 maincqr->intrc = rc; 2287 continue; 2288 } 2289 } else 2290 wait_event(generic_waitq, !(device->stopped)); 2291 } 2292 if (!cqr->callback) 2293 cqr->callback = dasd_wakeup_cb; 2294 2295 cqr->callback_data = DASD_SLEEPON_START_TAG; 2296 dasd_add_request_tail(cqr); 2297 if (interruptible) { 2298 rc = wait_event_interruptible( 2299 generic_waitq, _wait_for_wakeup(cqr)); 2300 if (rc == -ERESTARTSYS) { 2301 dasd_cancel_req(cqr); 2302 /* wait (non-interruptible) for final status */ 2303 wait_event(generic_waitq, 2304 _wait_for_wakeup(cqr)); 2305 cqr->status = DASD_CQR_FAILED; 2306 maincqr->intrc = rc; 2307 continue; 2308 } 2309 } else 2310 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2311 } 2312 2313 maincqr->endclk = get_tod_clock(); 2314 if ((maincqr->status != DASD_CQR_DONE) && 2315 (maincqr->intrc != -ERESTARTSYS)) 2316 dasd_log_sense(maincqr, &maincqr->irb); 2317 if (maincqr->status == DASD_CQR_DONE) 2318 rc = 0; 2319 else if (maincqr->intrc) 2320 rc = maincqr->intrc; 2321 else 2322 rc = -EIO; 2323 return rc; 2324 } 2325 2326 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2327 { 2328 struct dasd_ccw_req *cqr; 2329 2330 list_for_each_entry(cqr, ccw_queue, blocklist) { 2331 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2332 return 0; 2333 } 2334 2335 return 1; 2336 } 2337 2338 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2339 { 2340 struct dasd_device *device; 2341 struct dasd_ccw_req *cqr, *n; 2342 u8 *sense = NULL; 2343 int rc; 2344 2345 retry: 2346 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2347 device = cqr->startdev; 2348 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2349 continue; 2350 2351 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2352 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2353 cqr->status = DASD_CQR_FAILED; 2354 cqr->intrc = -EPERM; 2355 continue; 2356 } 2357 /*Non-temporary stop condition will trigger fail fast*/ 2358 if (device->stopped & ~DASD_STOPPED_PENDING && 2359 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2360 !dasd_eer_enabled(device)) { 2361 cqr->status = DASD_CQR_FAILED; 2362 cqr->intrc = -EAGAIN; 2363 continue; 2364 } 2365 2366 /*Don't try to start requests if device is stopped*/ 2367 if (interruptible) { 2368 rc = wait_event_interruptible( 2369 generic_waitq, !device->stopped); 2370 if (rc == -ERESTARTSYS) { 2371 cqr->status = DASD_CQR_FAILED; 2372 cqr->intrc = rc; 2373 continue; 2374 } 2375 } else 2376 wait_event(generic_waitq, !(device->stopped)); 2377 2378 if (!cqr->callback) 2379 cqr->callback = dasd_wakeup_cb; 2380 cqr->callback_data = DASD_SLEEPON_START_TAG; 2381 dasd_add_request_tail(cqr); 2382 } 2383 2384 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2385 2386 rc = 0; 2387 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2388 /* 2389 * In some cases the 'File Protected' or 'Incorrect Length' 2390 * error might be expected and error recovery would be 2391 * unnecessary in these cases. Check if the according suppress 2392 * bit is set. 2393 */ 2394 sense = dasd_get_sense(&cqr->irb); 2395 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2396 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2397 continue; 2398 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2399 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2400 continue; 2401 2402 /* 2403 * for alias devices simplify error recovery and 2404 * return to upper layer 2405 * do not skip ERP requests 2406 */ 2407 if (cqr->startdev != cqr->basedev && !cqr->refers && 2408 (cqr->status == DASD_CQR_TERMINATED || 2409 cqr->status == DASD_CQR_NEED_ERP)) 2410 return -EAGAIN; 2411 2412 /* normal recovery for basedev IO */ 2413 if (__dasd_sleep_on_erp(cqr)) 2414 /* handle erp first */ 2415 goto retry; 2416 } 2417 2418 return 0; 2419 } 2420 2421 /* 2422 * Queue a request to the tail of the device ccw_queue and wait for 2423 * it's completion. 2424 */ 2425 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2426 { 2427 return _dasd_sleep_on(cqr, 0); 2428 } 2429 EXPORT_SYMBOL(dasd_sleep_on); 2430 2431 /* 2432 * Start requests from a ccw_queue and wait for their completion. 2433 */ 2434 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2435 { 2436 return _dasd_sleep_on_queue(ccw_queue, 0); 2437 } 2438 EXPORT_SYMBOL(dasd_sleep_on_queue); 2439 2440 /* 2441 * Queue a request to the tail of the device ccw_queue and wait 2442 * interruptible for it's completion. 2443 */ 2444 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2445 { 2446 return _dasd_sleep_on(cqr, 1); 2447 } 2448 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2449 2450 /* 2451 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2452 * for eckd devices) the currently running request has to be terminated 2453 * and be put back to status queued, before the special request is added 2454 * to the head of the queue. Then the special request is waited on normally. 2455 */ 2456 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2457 { 2458 struct dasd_ccw_req *cqr; 2459 int rc; 2460 2461 if (list_empty(&device->ccw_queue)) 2462 return 0; 2463 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2464 rc = device->discipline->term_IO(cqr); 2465 if (!rc) 2466 /* 2467 * CQR terminated because a more important request is pending. 2468 * Undo decreasing of retry counter because this is 2469 * not an error case. 2470 */ 2471 cqr->retries++; 2472 return rc; 2473 } 2474 2475 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2476 { 2477 struct dasd_device *device; 2478 int rc; 2479 2480 device = cqr->startdev; 2481 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2482 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2483 cqr->status = DASD_CQR_FAILED; 2484 cqr->intrc = -EPERM; 2485 return -EIO; 2486 } 2487 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2488 rc = _dasd_term_running_cqr(device); 2489 if (rc) { 2490 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2491 return rc; 2492 } 2493 cqr->callback = dasd_wakeup_cb; 2494 cqr->callback_data = DASD_SLEEPON_START_TAG; 2495 cqr->status = DASD_CQR_QUEUED; 2496 /* 2497 * add new request as second 2498 * first the terminated cqr needs to be finished 2499 */ 2500 list_add(&cqr->devlist, device->ccw_queue.next); 2501 2502 /* let the bh start the request to keep them in order */ 2503 dasd_schedule_device_bh(device); 2504 2505 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2506 2507 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2508 2509 if (cqr->status == DASD_CQR_DONE) 2510 rc = 0; 2511 else if (cqr->intrc) 2512 rc = cqr->intrc; 2513 else 2514 rc = -EIO; 2515 2516 /* kick tasklets */ 2517 dasd_schedule_device_bh(device); 2518 if (device->block) 2519 dasd_schedule_block_bh(device->block); 2520 2521 return rc; 2522 } 2523 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2524 2525 /* 2526 * Cancels a request that was started with dasd_sleep_on_req. 2527 * This is useful to timeout requests. The request will be 2528 * terminated if it is currently in i/o. 2529 * Returns 0 if request termination was successful 2530 * negative error code if termination failed 2531 * Cancellation of a request is an asynchronous operation! The calling 2532 * function has to wait until the request is properly returned via callback. 2533 */ 2534 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2535 { 2536 struct dasd_device *device = cqr->startdev; 2537 unsigned long flags; 2538 int rc; 2539 2540 rc = 0; 2541 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2542 switch (cqr->status) { 2543 case DASD_CQR_QUEUED: 2544 /* request was not started - just set to cleared */ 2545 cqr->status = DASD_CQR_CLEARED; 2546 if (cqr->callback_data == DASD_SLEEPON_START_TAG) 2547 cqr->callback_data = DASD_SLEEPON_END_TAG; 2548 break; 2549 case DASD_CQR_IN_IO: 2550 /* request in IO - terminate IO and release again */ 2551 rc = device->discipline->term_IO(cqr); 2552 if (rc) { 2553 dev_err(&device->cdev->dev, 2554 "Cancelling request %p failed with rc=%d\n", 2555 cqr, rc); 2556 } else { 2557 cqr->stopclk = get_tod_clock(); 2558 } 2559 break; 2560 default: /* already finished or clear pending - do nothing */ 2561 break; 2562 } 2563 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2564 dasd_schedule_device_bh(device); 2565 return rc; 2566 } 2567 EXPORT_SYMBOL(dasd_cancel_req); 2568 2569 /* 2570 * SECTION: Operations of the dasd_block layer. 2571 */ 2572 2573 /* 2574 * Timeout function for dasd_block. This is used when the block layer 2575 * is waiting for something that may not come reliably, (e.g. a state 2576 * change interrupt) 2577 */ 2578 static void dasd_block_timeout(unsigned long ptr) 2579 { 2580 unsigned long flags; 2581 struct dasd_block *block; 2582 2583 block = (struct dasd_block *) ptr; 2584 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2585 /* re-activate request queue */ 2586 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2587 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2588 dasd_schedule_block_bh(block); 2589 } 2590 2591 /* 2592 * Setup timeout for a dasd_block in jiffies. 2593 */ 2594 void dasd_block_set_timer(struct dasd_block *block, int expires) 2595 { 2596 if (expires == 0) 2597 del_timer(&block->timer); 2598 else 2599 mod_timer(&block->timer, jiffies + expires); 2600 } 2601 EXPORT_SYMBOL(dasd_block_set_timer); 2602 2603 /* 2604 * Clear timeout for a dasd_block. 2605 */ 2606 void dasd_block_clear_timer(struct dasd_block *block) 2607 { 2608 del_timer(&block->timer); 2609 } 2610 EXPORT_SYMBOL(dasd_block_clear_timer); 2611 2612 /* 2613 * Process finished error recovery ccw. 2614 */ 2615 static void __dasd_process_erp(struct dasd_device *device, 2616 struct dasd_ccw_req *cqr) 2617 { 2618 dasd_erp_fn_t erp_fn; 2619 2620 if (cqr->status == DASD_CQR_DONE) 2621 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2622 else 2623 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2624 erp_fn = device->discipline->erp_postaction(cqr); 2625 erp_fn(cqr); 2626 } 2627 2628 /* 2629 * Fetch requests from the block device queue. 2630 */ 2631 static void __dasd_process_request_queue(struct dasd_block *block) 2632 { 2633 struct request_queue *queue; 2634 struct request *req; 2635 struct dasd_ccw_req *cqr; 2636 struct dasd_device *basedev; 2637 unsigned long flags; 2638 queue = block->request_queue; 2639 basedev = block->base; 2640 /* No queue ? Then there is nothing to do. */ 2641 if (queue == NULL) 2642 return; 2643 2644 /* 2645 * We requeue request from the block device queue to the ccw 2646 * queue only in two states. In state DASD_STATE_READY the 2647 * partition detection is done and we need to requeue requests 2648 * for that. State DASD_STATE_ONLINE is normal block device 2649 * operation. 2650 */ 2651 if (basedev->state < DASD_STATE_READY) { 2652 while ((req = blk_fetch_request(block->request_queue))) 2653 __blk_end_request_all(req, -EIO); 2654 return; 2655 } 2656 2657 /* 2658 * if device is stopped do not fetch new requests 2659 * except failfast is active which will let requests fail 2660 * immediately in __dasd_block_start_head() 2661 */ 2662 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) 2663 return; 2664 2665 /* Now we try to fetch requests from the request queue */ 2666 while ((req = blk_peek_request(queue))) { 2667 if (basedev->features & DASD_FEATURE_READONLY && 2668 rq_data_dir(req) == WRITE) { 2669 DBF_DEV_EVENT(DBF_ERR, basedev, 2670 "Rejecting write request %p", 2671 req); 2672 blk_start_request(req); 2673 __blk_end_request_all(req, -EIO); 2674 continue; 2675 } 2676 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 2677 (basedev->features & DASD_FEATURE_FAILFAST || 2678 blk_noretry_request(req))) { 2679 DBF_DEV_EVENT(DBF_ERR, basedev, 2680 "Rejecting failfast request %p", 2681 req); 2682 blk_start_request(req); 2683 __blk_end_request_all(req, -ETIMEDOUT); 2684 continue; 2685 } 2686 cqr = basedev->discipline->build_cp(basedev, block, req); 2687 if (IS_ERR(cqr)) { 2688 if (PTR_ERR(cqr) == -EBUSY) 2689 break; /* normal end condition */ 2690 if (PTR_ERR(cqr) == -ENOMEM) 2691 break; /* terminate request queue loop */ 2692 if (PTR_ERR(cqr) == -EAGAIN) { 2693 /* 2694 * The current request cannot be build right 2695 * now, we have to try later. If this request 2696 * is the head-of-queue we stop the device 2697 * for 1/2 second. 2698 */ 2699 if (!list_empty(&block->ccw_queue)) 2700 break; 2701 spin_lock_irqsave( 2702 get_ccwdev_lock(basedev->cdev), flags); 2703 dasd_device_set_stop_bits(basedev, 2704 DASD_STOPPED_PENDING); 2705 spin_unlock_irqrestore( 2706 get_ccwdev_lock(basedev->cdev), flags); 2707 dasd_block_set_timer(block, HZ/2); 2708 break; 2709 } 2710 DBF_DEV_EVENT(DBF_ERR, basedev, 2711 "CCW creation failed (rc=%ld) " 2712 "on request %p", 2713 PTR_ERR(cqr), req); 2714 blk_start_request(req); 2715 __blk_end_request_all(req, -EIO); 2716 continue; 2717 } 2718 /* 2719 * Note: callback is set to dasd_return_cqr_cb in 2720 * __dasd_block_start_head to cover erp requests as well 2721 */ 2722 cqr->callback_data = (void *) req; 2723 cqr->status = DASD_CQR_FILLED; 2724 req->completion_data = cqr; 2725 blk_start_request(req); 2726 list_add_tail(&cqr->blocklist, &block->ccw_queue); 2727 INIT_LIST_HEAD(&cqr->devlist); 2728 dasd_profile_start(block, cqr, req); 2729 } 2730 } 2731 2732 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2733 { 2734 struct request *req; 2735 int status; 2736 int error = 0; 2737 2738 req = (struct request *) cqr->callback_data; 2739 dasd_profile_end(cqr->block, cqr, req); 2740 status = cqr->block->base->discipline->free_cp(cqr, req); 2741 if (status < 0) 2742 error = status; 2743 else if (status == 0) { 2744 if (cqr->intrc == -EPERM) 2745 error = -EBADE; 2746 else if (cqr->intrc == -ENOLINK || 2747 cqr->intrc == -ETIMEDOUT) 2748 error = cqr->intrc; 2749 else 2750 error = -EIO; 2751 } 2752 __blk_end_request_all(req, error); 2753 } 2754 2755 /* 2756 * Process ccw request queue. 2757 */ 2758 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2759 struct list_head *final_queue) 2760 { 2761 struct list_head *l, *n; 2762 struct dasd_ccw_req *cqr; 2763 dasd_erp_fn_t erp_fn; 2764 unsigned long flags; 2765 struct dasd_device *base = block->base; 2766 2767 restart: 2768 /* Process request with final status. */ 2769 list_for_each_safe(l, n, &block->ccw_queue) { 2770 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2771 if (cqr->status != DASD_CQR_DONE && 2772 cqr->status != DASD_CQR_FAILED && 2773 cqr->status != DASD_CQR_NEED_ERP && 2774 cqr->status != DASD_CQR_TERMINATED) 2775 continue; 2776 2777 if (cqr->status == DASD_CQR_TERMINATED) { 2778 base->discipline->handle_terminated_request(cqr); 2779 goto restart; 2780 } 2781 2782 /* Process requests that may be recovered */ 2783 if (cqr->status == DASD_CQR_NEED_ERP) { 2784 erp_fn = base->discipline->erp_action(cqr); 2785 if (IS_ERR(erp_fn(cqr))) 2786 continue; 2787 goto restart; 2788 } 2789 2790 /* log sense for fatal error */ 2791 if (cqr->status == DASD_CQR_FAILED) { 2792 dasd_log_sense(cqr, &cqr->irb); 2793 } 2794 2795 /* First of all call extended error reporting. */ 2796 if (dasd_eer_enabled(base) && 2797 cqr->status == DASD_CQR_FAILED) { 2798 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2799 2800 /* restart request */ 2801 cqr->status = DASD_CQR_FILLED; 2802 cqr->retries = 255; 2803 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2804 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2805 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2806 flags); 2807 goto restart; 2808 } 2809 2810 /* Process finished ERP request. */ 2811 if (cqr->refers) { 2812 __dasd_process_erp(base, cqr); 2813 goto restart; 2814 } 2815 2816 /* Rechain finished requests to final queue */ 2817 cqr->endclk = get_tod_clock(); 2818 list_move_tail(&cqr->blocklist, final_queue); 2819 } 2820 } 2821 2822 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2823 { 2824 dasd_schedule_block_bh(cqr->block); 2825 } 2826 2827 static void __dasd_block_start_head(struct dasd_block *block) 2828 { 2829 struct dasd_ccw_req *cqr; 2830 2831 if (list_empty(&block->ccw_queue)) 2832 return; 2833 /* We allways begin with the first requests on the queue, as some 2834 * of previously started requests have to be enqueued on a 2835 * dasd_device again for error recovery. 2836 */ 2837 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2838 if (cqr->status != DASD_CQR_FILLED) 2839 continue; 2840 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2841 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2842 cqr->status = DASD_CQR_FAILED; 2843 cqr->intrc = -EPERM; 2844 dasd_schedule_block_bh(block); 2845 continue; 2846 } 2847 /* Non-temporary stop condition will trigger fail fast */ 2848 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2849 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2850 (!dasd_eer_enabled(block->base))) { 2851 cqr->status = DASD_CQR_FAILED; 2852 cqr->intrc = -ENOLINK; 2853 dasd_schedule_block_bh(block); 2854 continue; 2855 } 2856 /* Don't try to start requests if device is stopped */ 2857 if (block->base->stopped) 2858 return; 2859 2860 /* just a fail safe check, should not happen */ 2861 if (!cqr->startdev) 2862 cqr->startdev = block->base; 2863 2864 /* make sure that the requests we submit find their way back */ 2865 cqr->callback = dasd_return_cqr_cb; 2866 2867 dasd_add_request_tail(cqr); 2868 } 2869 } 2870 2871 /* 2872 * Central dasd_block layer routine. Takes requests from the generic 2873 * block layer request queue, creates ccw requests, enqueues them on 2874 * a dasd_device and processes ccw requests that have been returned. 2875 */ 2876 static void dasd_block_tasklet(struct dasd_block *block) 2877 { 2878 struct list_head final_queue; 2879 struct list_head *l, *n; 2880 struct dasd_ccw_req *cqr; 2881 2882 atomic_set(&block->tasklet_scheduled, 0); 2883 INIT_LIST_HEAD(&final_queue); 2884 spin_lock(&block->queue_lock); 2885 /* Finish off requests on ccw queue */ 2886 __dasd_process_block_ccw_queue(block, &final_queue); 2887 spin_unlock(&block->queue_lock); 2888 /* Now call the callback function of requests with final status */ 2889 spin_lock_irq(&block->request_queue_lock); 2890 list_for_each_safe(l, n, &final_queue) { 2891 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2892 list_del_init(&cqr->blocklist); 2893 __dasd_cleanup_cqr(cqr); 2894 } 2895 spin_lock(&block->queue_lock); 2896 /* Get new request from the block device request queue */ 2897 __dasd_process_request_queue(block); 2898 /* Now check if the head of the ccw queue needs to be started. */ 2899 __dasd_block_start_head(block); 2900 spin_unlock(&block->queue_lock); 2901 spin_unlock_irq(&block->request_queue_lock); 2902 if (waitqueue_active(&shutdown_waitq)) 2903 wake_up(&shutdown_waitq); 2904 dasd_put_device(block->base); 2905 } 2906 2907 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2908 { 2909 wake_up(&dasd_flush_wq); 2910 } 2911 2912 /* 2913 * Requeue a request back to the block request queue 2914 * only works for block requests 2915 */ 2916 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2917 { 2918 struct dasd_block *block = cqr->block; 2919 struct request *req; 2920 unsigned long flags; 2921 2922 if (!block) 2923 return -EINVAL; 2924 spin_lock_irqsave(&block->queue_lock, flags); 2925 req = (struct request *) cqr->callback_data; 2926 blk_requeue_request(block->request_queue, req); 2927 spin_unlock_irqrestore(&block->queue_lock, flags); 2928 2929 return 0; 2930 } 2931 2932 /* 2933 * Go through all request on the dasd_block request queue, cancel them 2934 * on the respective dasd_device, and return them to the generic 2935 * block layer. 2936 */ 2937 static int dasd_flush_block_queue(struct dasd_block *block) 2938 { 2939 struct dasd_ccw_req *cqr, *n; 2940 int rc, i; 2941 struct list_head flush_queue; 2942 2943 INIT_LIST_HEAD(&flush_queue); 2944 spin_lock_bh(&block->queue_lock); 2945 rc = 0; 2946 restart: 2947 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2948 /* if this request currently owned by a dasd_device cancel it */ 2949 if (cqr->status >= DASD_CQR_QUEUED) 2950 rc = dasd_cancel_req(cqr); 2951 if (rc < 0) 2952 break; 2953 /* Rechain request (including erp chain) so it won't be 2954 * touched by the dasd_block_tasklet anymore. 2955 * Replace the callback so we notice when the request 2956 * is returned from the dasd_device layer. 2957 */ 2958 cqr->callback = _dasd_wake_block_flush_cb; 2959 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2960 list_move_tail(&cqr->blocklist, &flush_queue); 2961 if (i > 1) 2962 /* moved more than one request - need to restart */ 2963 goto restart; 2964 } 2965 spin_unlock_bh(&block->queue_lock); 2966 /* Now call the callback function of flushed requests */ 2967 restart_cb: 2968 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2969 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2970 /* Process finished ERP request. */ 2971 if (cqr->refers) { 2972 spin_lock_bh(&block->queue_lock); 2973 __dasd_process_erp(block->base, cqr); 2974 spin_unlock_bh(&block->queue_lock); 2975 /* restart list_for_xx loop since dasd_process_erp 2976 * might remove multiple elements */ 2977 goto restart_cb; 2978 } 2979 /* call the callback function */ 2980 spin_lock_irq(&block->request_queue_lock); 2981 cqr->endclk = get_tod_clock(); 2982 list_del_init(&cqr->blocklist); 2983 __dasd_cleanup_cqr(cqr); 2984 spin_unlock_irq(&block->request_queue_lock); 2985 } 2986 return rc; 2987 } 2988 2989 /* 2990 * Schedules a call to dasd_tasklet over the device tasklet. 2991 */ 2992 void dasd_schedule_block_bh(struct dasd_block *block) 2993 { 2994 /* Protect against rescheduling. */ 2995 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 2996 return; 2997 /* life cycle of block is bound to it's base device */ 2998 dasd_get_device(block->base); 2999 tasklet_hi_schedule(&block->tasklet); 3000 } 3001 EXPORT_SYMBOL(dasd_schedule_block_bh); 3002 3003 3004 /* 3005 * SECTION: external block device operations 3006 * (request queue handling, open, release, etc.) 3007 */ 3008 3009 /* 3010 * Dasd request queue function. Called from ll_rw_blk.c 3011 */ 3012 static void do_dasd_request(struct request_queue *queue) 3013 { 3014 struct dasd_block *block; 3015 3016 block = queue->queuedata; 3017 spin_lock(&block->queue_lock); 3018 /* Get new request from the block device request queue */ 3019 __dasd_process_request_queue(block); 3020 /* Now check if the head of the ccw queue needs to be started. */ 3021 __dasd_block_start_head(block); 3022 spin_unlock(&block->queue_lock); 3023 } 3024 3025 /* 3026 * Block timeout callback, called from the block layer 3027 * 3028 * request_queue lock is held on entry. 3029 * 3030 * Return values: 3031 * BLK_EH_RESET_TIMER if the request should be left running 3032 * BLK_EH_NOT_HANDLED if the request is handled or terminated 3033 * by the driver. 3034 */ 3035 enum blk_eh_timer_return dasd_times_out(struct request *req) 3036 { 3037 struct dasd_ccw_req *cqr = req->completion_data; 3038 struct dasd_block *block = req->q->queuedata; 3039 struct dasd_device *device; 3040 int rc = 0; 3041 3042 if (!cqr) 3043 return BLK_EH_NOT_HANDLED; 3044 3045 device = cqr->startdev ? cqr->startdev : block->base; 3046 if (!device->blk_timeout) 3047 return BLK_EH_RESET_TIMER; 3048 DBF_DEV_EVENT(DBF_WARNING, device, 3049 " dasd_times_out cqr %p status %x", 3050 cqr, cqr->status); 3051 3052 spin_lock(&block->queue_lock); 3053 spin_lock(get_ccwdev_lock(device->cdev)); 3054 cqr->retries = -1; 3055 cqr->intrc = -ETIMEDOUT; 3056 if (cqr->status >= DASD_CQR_QUEUED) { 3057 spin_unlock(get_ccwdev_lock(device->cdev)); 3058 rc = dasd_cancel_req(cqr); 3059 } else if (cqr->status == DASD_CQR_FILLED || 3060 cqr->status == DASD_CQR_NEED_ERP) { 3061 cqr->status = DASD_CQR_TERMINATED; 3062 spin_unlock(get_ccwdev_lock(device->cdev)); 3063 } else if (cqr->status == DASD_CQR_IN_ERP) { 3064 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3065 3066 list_for_each_entry_safe(searchcqr, nextcqr, 3067 &block->ccw_queue, blocklist) { 3068 tmpcqr = searchcqr; 3069 while (tmpcqr->refers) 3070 tmpcqr = tmpcqr->refers; 3071 if (tmpcqr != cqr) 3072 continue; 3073 /* searchcqr is an ERP request for cqr */ 3074 searchcqr->retries = -1; 3075 searchcqr->intrc = -ETIMEDOUT; 3076 if (searchcqr->status >= DASD_CQR_QUEUED) { 3077 spin_unlock(get_ccwdev_lock(device->cdev)); 3078 rc = dasd_cancel_req(searchcqr); 3079 spin_lock(get_ccwdev_lock(device->cdev)); 3080 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3081 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3082 searchcqr->status = DASD_CQR_TERMINATED; 3083 rc = 0; 3084 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3085 /* 3086 * Shouldn't happen; most recent ERP 3087 * request is at the front of queue 3088 */ 3089 continue; 3090 } 3091 break; 3092 } 3093 spin_unlock(get_ccwdev_lock(device->cdev)); 3094 } 3095 dasd_schedule_block_bh(block); 3096 spin_unlock(&block->queue_lock); 3097 3098 return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 3099 } 3100 3101 /* 3102 * Allocate and initialize request queue and default I/O scheduler. 3103 */ 3104 static int dasd_alloc_queue(struct dasd_block *block) 3105 { 3106 block->request_queue = blk_init_queue(do_dasd_request, 3107 &block->request_queue_lock); 3108 if (block->request_queue == NULL) 3109 return -ENOMEM; 3110 3111 block->request_queue->queuedata = block; 3112 3113 return 0; 3114 } 3115 3116 /* 3117 * Allocate and initialize request queue. 3118 */ 3119 static void dasd_setup_queue(struct dasd_block *block) 3120 { 3121 int max; 3122 3123 if (block->base->features & DASD_FEATURE_USERAW) { 3124 /* 3125 * the max_blocks value for raw_track access is 256 3126 * it is higher than the native ECKD value because we 3127 * only need one ccw per track 3128 * so the max_hw_sectors are 3129 * 2048 x 512B = 1024kB = 16 tracks 3130 */ 3131 max = 2048; 3132 } else { 3133 max = block->base->discipline->max_blocks << block->s2b_shift; 3134 } 3135 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); 3136 block->request_queue->limits.max_dev_sectors = max; 3137 blk_queue_logical_block_size(block->request_queue, 3138 block->bp_block); 3139 blk_queue_max_hw_sectors(block->request_queue, max); 3140 blk_queue_max_segments(block->request_queue, -1L); 3141 /* with page sized segments we can translate each segement into 3142 * one idaw/tidaw 3143 */ 3144 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 3145 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 3146 } 3147 3148 /* 3149 * Deactivate and free request queue. 3150 */ 3151 static void dasd_free_queue(struct dasd_block *block) 3152 { 3153 if (block->request_queue) { 3154 blk_cleanup_queue(block->request_queue); 3155 block->request_queue = NULL; 3156 } 3157 } 3158 3159 /* 3160 * Flush request on the request queue. 3161 */ 3162 static void dasd_flush_request_queue(struct dasd_block *block) 3163 { 3164 struct request *req; 3165 3166 if (!block->request_queue) 3167 return; 3168 3169 spin_lock_irq(&block->request_queue_lock); 3170 while ((req = blk_fetch_request(block->request_queue))) 3171 __blk_end_request_all(req, -EIO); 3172 spin_unlock_irq(&block->request_queue_lock); 3173 } 3174 3175 static int dasd_open(struct block_device *bdev, fmode_t mode) 3176 { 3177 struct dasd_device *base; 3178 int rc; 3179 3180 base = dasd_device_from_gendisk(bdev->bd_disk); 3181 if (!base) 3182 return -ENODEV; 3183 3184 atomic_inc(&base->block->open_count); 3185 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3186 rc = -ENODEV; 3187 goto unlock; 3188 } 3189 3190 if (!try_module_get(base->discipline->owner)) { 3191 rc = -EINVAL; 3192 goto unlock; 3193 } 3194 3195 if (dasd_probeonly) { 3196 dev_info(&base->cdev->dev, 3197 "Accessing the DASD failed because it is in " 3198 "probeonly mode\n"); 3199 rc = -EPERM; 3200 goto out; 3201 } 3202 3203 if (base->state <= DASD_STATE_BASIC) { 3204 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3205 " Cannot open unrecognized device"); 3206 rc = -ENODEV; 3207 goto out; 3208 } 3209 3210 if ((mode & FMODE_WRITE) && 3211 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3212 (base->features & DASD_FEATURE_READONLY))) { 3213 rc = -EROFS; 3214 goto out; 3215 } 3216 3217 dasd_put_device(base); 3218 return 0; 3219 3220 out: 3221 module_put(base->discipline->owner); 3222 unlock: 3223 atomic_dec(&base->block->open_count); 3224 dasd_put_device(base); 3225 return rc; 3226 } 3227 3228 static void dasd_release(struct gendisk *disk, fmode_t mode) 3229 { 3230 struct dasd_device *base = dasd_device_from_gendisk(disk); 3231 if (base) { 3232 atomic_dec(&base->block->open_count); 3233 module_put(base->discipline->owner); 3234 dasd_put_device(base); 3235 } 3236 } 3237 3238 /* 3239 * Return disk geometry. 3240 */ 3241 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3242 { 3243 struct dasd_device *base; 3244 3245 base = dasd_device_from_gendisk(bdev->bd_disk); 3246 if (!base) 3247 return -ENODEV; 3248 3249 if (!base->discipline || 3250 !base->discipline->fill_geometry) { 3251 dasd_put_device(base); 3252 return -EINVAL; 3253 } 3254 base->discipline->fill_geometry(base->block, geo); 3255 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3256 dasd_put_device(base); 3257 return 0; 3258 } 3259 3260 const struct block_device_operations 3261 dasd_device_operations = { 3262 .owner = THIS_MODULE, 3263 .open = dasd_open, 3264 .release = dasd_release, 3265 .ioctl = dasd_ioctl, 3266 .compat_ioctl = dasd_ioctl, 3267 .getgeo = dasd_getgeo, 3268 }; 3269 3270 /******************************************************************************* 3271 * end of block device operations 3272 */ 3273 3274 static void 3275 dasd_exit(void) 3276 { 3277 #ifdef CONFIG_PROC_FS 3278 dasd_proc_exit(); 3279 #endif 3280 dasd_eer_exit(); 3281 if (dasd_page_cache != NULL) { 3282 kmem_cache_destroy(dasd_page_cache); 3283 dasd_page_cache = NULL; 3284 } 3285 dasd_gendisk_exit(); 3286 dasd_devmap_exit(); 3287 if (dasd_debug_area != NULL) { 3288 debug_unregister(dasd_debug_area); 3289 dasd_debug_area = NULL; 3290 } 3291 dasd_statistics_removeroot(); 3292 } 3293 3294 /* 3295 * SECTION: common functions for ccw_driver use 3296 */ 3297 3298 /* 3299 * Is the device read-only? 3300 * Note that this function does not report the setting of the 3301 * readonly device attribute, but how it is configured in z/VM. 3302 */ 3303 int dasd_device_is_ro(struct dasd_device *device) 3304 { 3305 struct ccw_dev_id dev_id; 3306 struct diag210 diag_data; 3307 int rc; 3308 3309 if (!MACHINE_IS_VM) 3310 return 0; 3311 ccw_device_get_id(device->cdev, &dev_id); 3312 memset(&diag_data, 0, sizeof(diag_data)); 3313 diag_data.vrdcdvno = dev_id.devno; 3314 diag_data.vrdclen = sizeof(diag_data); 3315 rc = diag210(&diag_data); 3316 if (rc == 0 || rc == 2) { 3317 return diag_data.vrdcvfla & 0x80; 3318 } else { 3319 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3320 dev_id.devno, rc); 3321 return 0; 3322 } 3323 } 3324 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3325 3326 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3327 { 3328 struct ccw_device *cdev = data; 3329 int ret; 3330 3331 ret = ccw_device_set_online(cdev); 3332 if (ret) 3333 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3334 dev_name(&cdev->dev), ret); 3335 } 3336 3337 /* 3338 * Initial attempt at a probe function. this can be simplified once 3339 * the other detection code is gone. 3340 */ 3341 int dasd_generic_probe(struct ccw_device *cdev, 3342 struct dasd_discipline *discipline) 3343 { 3344 int ret; 3345 3346 ret = dasd_add_sysfs_files(cdev); 3347 if (ret) { 3348 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3349 "dasd_generic_probe: could not add " 3350 "sysfs entries"); 3351 return ret; 3352 } 3353 cdev->handler = &dasd_int_handler; 3354 3355 /* 3356 * Automatically online either all dasd devices (dasd_autodetect) 3357 * or all devices specified with dasd= parameters during 3358 * initial probe. 3359 */ 3360 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3361 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3362 async_schedule(dasd_generic_auto_online, cdev); 3363 return 0; 3364 } 3365 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3366 3367 /* 3368 * This will one day be called from a global not_oper handler. 3369 * It is also used by driver_unregister during module unload. 3370 */ 3371 void dasd_generic_remove(struct ccw_device *cdev) 3372 { 3373 struct dasd_device *device; 3374 struct dasd_block *block; 3375 3376 cdev->handler = NULL; 3377 3378 device = dasd_device_from_cdev(cdev); 3379 if (IS_ERR(device)) { 3380 dasd_remove_sysfs_files(cdev); 3381 return; 3382 } 3383 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3384 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3385 /* Already doing offline processing */ 3386 dasd_put_device(device); 3387 dasd_remove_sysfs_files(cdev); 3388 return; 3389 } 3390 /* 3391 * This device is removed unconditionally. Set offline 3392 * flag to prevent dasd_open from opening it while it is 3393 * no quite down yet. 3394 */ 3395 dasd_set_target_state(device, DASD_STATE_NEW); 3396 /* dasd_delete_device destroys the device reference. */ 3397 block = device->block; 3398 dasd_delete_device(device); 3399 /* 3400 * life cycle of block is bound to device, so delete it after 3401 * device was safely removed 3402 */ 3403 if (block) 3404 dasd_free_block(block); 3405 3406 dasd_remove_sysfs_files(cdev); 3407 } 3408 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3409 3410 /* 3411 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3412 * the device is detected for the first time and is supposed to be used 3413 * or the user has started activation through sysfs. 3414 */ 3415 int dasd_generic_set_online(struct ccw_device *cdev, 3416 struct dasd_discipline *base_discipline) 3417 { 3418 struct dasd_discipline *discipline; 3419 struct dasd_device *device; 3420 int rc; 3421 3422 /* first online clears initial online feature flag */ 3423 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3424 device = dasd_create_device(cdev); 3425 if (IS_ERR(device)) 3426 return PTR_ERR(device); 3427 3428 discipline = base_discipline; 3429 if (device->features & DASD_FEATURE_USEDIAG) { 3430 if (!dasd_diag_discipline_pointer) { 3431 /* Try to load the required module. */ 3432 rc = request_module(DASD_DIAG_MOD); 3433 if (rc) { 3434 pr_warn("%s Setting the DASD online failed " 3435 "because the required module %s " 3436 "could not be loaded (rc=%d)\n", 3437 dev_name(&cdev->dev), DASD_DIAG_MOD, 3438 rc); 3439 dasd_delete_device(device); 3440 return -ENODEV; 3441 } 3442 } 3443 /* Module init could have failed, so check again here after 3444 * request_module(). */ 3445 if (!dasd_diag_discipline_pointer) { 3446 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3447 dev_name(&cdev->dev)); 3448 dasd_delete_device(device); 3449 return -ENODEV; 3450 } 3451 discipline = dasd_diag_discipline_pointer; 3452 } 3453 if (!try_module_get(base_discipline->owner)) { 3454 dasd_delete_device(device); 3455 return -EINVAL; 3456 } 3457 if (!try_module_get(discipline->owner)) { 3458 module_put(base_discipline->owner); 3459 dasd_delete_device(device); 3460 return -EINVAL; 3461 } 3462 device->base_discipline = base_discipline; 3463 device->discipline = discipline; 3464 3465 /* check_device will allocate block device if necessary */ 3466 rc = discipline->check_device(device); 3467 if (rc) { 3468 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3469 dev_name(&cdev->dev), discipline->name, rc); 3470 module_put(discipline->owner); 3471 module_put(base_discipline->owner); 3472 dasd_delete_device(device); 3473 return rc; 3474 } 3475 3476 dasd_set_target_state(device, DASD_STATE_ONLINE); 3477 if (device->state <= DASD_STATE_KNOWN) { 3478 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3479 dev_name(&cdev->dev)); 3480 rc = -ENODEV; 3481 dasd_set_target_state(device, DASD_STATE_NEW); 3482 if (device->block) 3483 dasd_free_block(device->block); 3484 dasd_delete_device(device); 3485 } else 3486 pr_debug("dasd_generic device %s found\n", 3487 dev_name(&cdev->dev)); 3488 3489 wait_event(dasd_init_waitq, _wait_for_device(device)); 3490 3491 dasd_put_device(device); 3492 return rc; 3493 } 3494 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3495 3496 int dasd_generic_set_offline(struct ccw_device *cdev) 3497 { 3498 struct dasd_device *device; 3499 struct dasd_block *block; 3500 int max_count, open_count, rc; 3501 3502 rc = 0; 3503 device = dasd_device_from_cdev(cdev); 3504 if (IS_ERR(device)) 3505 return PTR_ERR(device); 3506 3507 /* 3508 * We must make sure that this device is currently not in use. 3509 * The open_count is increased for every opener, that includes 3510 * the blkdev_get in dasd_scan_partitions. We are only interested 3511 * in the other openers. 3512 */ 3513 if (device->block) { 3514 max_count = device->block->bdev ? 0 : -1; 3515 open_count = atomic_read(&device->block->open_count); 3516 if (open_count > max_count) { 3517 if (open_count > 0) 3518 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3519 dev_name(&cdev->dev), open_count); 3520 else 3521 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3522 dev_name(&cdev->dev)); 3523 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3524 dasd_put_device(device); 3525 return -EBUSY; 3526 } 3527 } 3528 3529 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3530 /* 3531 * safe offline already running 3532 * could only be called by normal offline so safe_offline flag 3533 * needs to be removed to run normal offline and kill all I/O 3534 */ 3535 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3536 /* Already doing normal offline processing */ 3537 dasd_put_device(device); 3538 return -EBUSY; 3539 } else 3540 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3541 3542 } else 3543 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3544 /* Already doing offline processing */ 3545 dasd_put_device(device); 3546 return -EBUSY; 3547 } 3548 3549 /* 3550 * if safe_offline called set safe_offline_running flag and 3551 * clear safe_offline so that a call to normal offline 3552 * can overrun safe_offline processing 3553 */ 3554 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3555 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3556 /* 3557 * If we want to set the device safe offline all IO operations 3558 * should be finished before continuing the offline process 3559 * so sync bdev first and then wait for our queues to become 3560 * empty 3561 */ 3562 /* sync blockdev and partitions */ 3563 rc = fsync_bdev(device->block->bdev); 3564 if (rc != 0) 3565 goto interrupted; 3566 3567 /* schedule device tasklet and wait for completion */ 3568 dasd_schedule_device_bh(device); 3569 rc = wait_event_interruptible(shutdown_waitq, 3570 _wait_for_empty_queues(device)); 3571 if (rc != 0) 3572 goto interrupted; 3573 } 3574 3575 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3576 dasd_set_target_state(device, DASD_STATE_NEW); 3577 /* dasd_delete_device destroys the device reference. */ 3578 block = device->block; 3579 dasd_delete_device(device); 3580 /* 3581 * life cycle of block is bound to device, so delete it after 3582 * device was safely removed 3583 */ 3584 if (block) 3585 dasd_free_block(block); 3586 return 0; 3587 3588 interrupted: 3589 /* interrupted by signal */ 3590 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3591 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3592 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3593 dasd_put_device(device); 3594 return rc; 3595 } 3596 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3597 3598 int dasd_generic_last_path_gone(struct dasd_device *device) 3599 { 3600 struct dasd_ccw_req *cqr; 3601 3602 dev_warn(&device->cdev->dev, "No operational channel path is left " 3603 "for the device\n"); 3604 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3605 /* First of all call extended error reporting. */ 3606 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3607 3608 if (device->state < DASD_STATE_BASIC) 3609 return 0; 3610 /* Device is active. We want to keep it. */ 3611 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3612 if ((cqr->status == DASD_CQR_IN_IO) || 3613 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3614 cqr->status = DASD_CQR_QUEUED; 3615 cqr->retries++; 3616 } 3617 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3618 dasd_device_clear_timer(device); 3619 dasd_schedule_device_bh(device); 3620 return 1; 3621 } 3622 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3623 3624 int dasd_generic_path_operational(struct dasd_device *device) 3625 { 3626 dev_info(&device->cdev->dev, "A channel path to the device has become " 3627 "operational\n"); 3628 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3629 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3630 if (device->stopped & DASD_UNRESUMED_PM) { 3631 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 3632 dasd_restore_device(device); 3633 return 1; 3634 } 3635 dasd_schedule_device_bh(device); 3636 if (device->block) 3637 dasd_schedule_block_bh(device->block); 3638 3639 if (!device->stopped) 3640 wake_up(&generic_waitq); 3641 3642 return 1; 3643 } 3644 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3645 3646 int dasd_generic_notify(struct ccw_device *cdev, int event) 3647 { 3648 struct dasd_device *device; 3649 int ret; 3650 3651 device = dasd_device_from_cdev_locked(cdev); 3652 if (IS_ERR(device)) 3653 return 0; 3654 ret = 0; 3655 switch (event) { 3656 case CIO_GONE: 3657 case CIO_BOXED: 3658 case CIO_NO_PATH: 3659 device->path_data.opm = 0; 3660 device->path_data.ppm = 0; 3661 device->path_data.npm = 0; 3662 ret = dasd_generic_last_path_gone(device); 3663 break; 3664 case CIO_OPER: 3665 ret = 1; 3666 if (device->path_data.opm) 3667 ret = dasd_generic_path_operational(device); 3668 break; 3669 } 3670 dasd_put_device(device); 3671 return ret; 3672 } 3673 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3674 3675 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3676 { 3677 int chp; 3678 __u8 oldopm, eventlpm; 3679 struct dasd_device *device; 3680 3681 device = dasd_device_from_cdev_locked(cdev); 3682 if (IS_ERR(device)) 3683 return; 3684 for (chp = 0; chp < 8; chp++) { 3685 eventlpm = 0x80 >> chp; 3686 if (path_event[chp] & PE_PATH_GONE) { 3687 oldopm = device->path_data.opm; 3688 device->path_data.opm &= ~eventlpm; 3689 device->path_data.ppm &= ~eventlpm; 3690 device->path_data.npm &= ~eventlpm; 3691 if (oldopm && !device->path_data.opm) { 3692 dev_warn(&device->cdev->dev, 3693 "No verified channel paths remain " 3694 "for the device\n"); 3695 DBF_DEV_EVENT(DBF_WARNING, device, 3696 "%s", "last verified path gone"); 3697 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3698 dasd_device_set_stop_bits(device, 3699 DASD_STOPPED_DC_WAIT); 3700 } 3701 } 3702 if (path_event[chp] & PE_PATH_AVAILABLE) { 3703 device->path_data.opm &= ~eventlpm; 3704 device->path_data.ppm &= ~eventlpm; 3705 device->path_data.npm &= ~eventlpm; 3706 device->path_data.tbvpm |= eventlpm; 3707 dasd_schedule_device_bh(device); 3708 } 3709 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3710 if (!(device->path_data.opm & eventlpm) && 3711 !(device->path_data.tbvpm & eventlpm)) { 3712 /* 3713 * we can not establish a pathgroup on an 3714 * unavailable path, so trigger a path 3715 * verification first 3716 */ 3717 device->path_data.tbvpm |= eventlpm; 3718 dasd_schedule_device_bh(device); 3719 } 3720 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3721 "Pathgroup re-established\n"); 3722 if (device->discipline->kick_validate) 3723 device->discipline->kick_validate(device); 3724 } 3725 } 3726 dasd_put_device(device); 3727 } 3728 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3729 3730 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3731 { 3732 if (!device->path_data.opm && lpm) { 3733 device->path_data.opm = lpm; 3734 dasd_generic_path_operational(device); 3735 } else 3736 device->path_data.opm |= lpm; 3737 return 0; 3738 } 3739 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3740 3741 3742 int dasd_generic_pm_freeze(struct ccw_device *cdev) 3743 { 3744 struct dasd_device *device = dasd_device_from_cdev(cdev); 3745 struct list_head freeze_queue; 3746 struct dasd_ccw_req *cqr, *n; 3747 struct dasd_ccw_req *refers; 3748 int rc; 3749 3750 if (IS_ERR(device)) 3751 return PTR_ERR(device); 3752 3753 /* mark device as suspended */ 3754 set_bit(DASD_FLAG_SUSPENDED, &device->flags); 3755 3756 if (device->discipline->freeze) 3757 rc = device->discipline->freeze(device); 3758 3759 /* disallow new I/O */ 3760 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 3761 3762 /* clear active requests and requeue them to block layer if possible */ 3763 INIT_LIST_HEAD(&freeze_queue); 3764 spin_lock_irq(get_ccwdev_lock(cdev)); 3765 rc = 0; 3766 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3767 /* Check status and move request to flush_queue */ 3768 if (cqr->status == DASD_CQR_IN_IO) { 3769 rc = device->discipline->term_IO(cqr); 3770 if (rc) { 3771 /* unable to terminate requeust */ 3772 dev_err(&device->cdev->dev, 3773 "Unable to terminate request %p " 3774 "on suspend\n", cqr); 3775 spin_unlock_irq(get_ccwdev_lock(cdev)); 3776 dasd_put_device(device); 3777 return rc; 3778 } 3779 } 3780 list_move_tail(&cqr->devlist, &freeze_queue); 3781 } 3782 spin_unlock_irq(get_ccwdev_lock(cdev)); 3783 3784 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 3785 wait_event(dasd_flush_wq, 3786 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3787 if (cqr->status == DASD_CQR_CLEARED) 3788 cqr->status = DASD_CQR_QUEUED; 3789 3790 /* requeue requests to blocklayer will only work for 3791 block device requests */ 3792 if (_dasd_requeue_request(cqr)) 3793 continue; 3794 3795 /* remove requests from device and block queue */ 3796 list_del_init(&cqr->devlist); 3797 while (cqr->refers != NULL) { 3798 refers = cqr->refers; 3799 /* remove the request from the block queue */ 3800 list_del(&cqr->blocklist); 3801 /* free the finished erp request */ 3802 dasd_free_erp_request(cqr, cqr->memdev); 3803 cqr = refers; 3804 } 3805 if (cqr->block) 3806 list_del_init(&cqr->blocklist); 3807 cqr->block->base->discipline->free_cp( 3808 cqr, (struct request *) cqr->callback_data); 3809 } 3810 3811 /* 3812 * if requests remain then they are internal request 3813 * and go back to the device queue 3814 */ 3815 if (!list_empty(&freeze_queue)) { 3816 /* move freeze_queue to start of the ccw_queue */ 3817 spin_lock_irq(get_ccwdev_lock(cdev)); 3818 list_splice_tail(&freeze_queue, &device->ccw_queue); 3819 spin_unlock_irq(get_ccwdev_lock(cdev)); 3820 } 3821 dasd_put_device(device); 3822 return rc; 3823 } 3824 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 3825 3826 int dasd_generic_restore_device(struct ccw_device *cdev) 3827 { 3828 struct dasd_device *device = dasd_device_from_cdev(cdev); 3829 int rc = 0; 3830 3831 if (IS_ERR(device)) 3832 return PTR_ERR(device); 3833 3834 /* allow new IO again */ 3835 dasd_device_remove_stop_bits(device, 3836 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 3837 3838 dasd_schedule_device_bh(device); 3839 3840 /* 3841 * call discipline restore function 3842 * if device is stopped do nothing e.g. for disconnected devices 3843 */ 3844 if (device->discipline->restore && !(device->stopped)) 3845 rc = device->discipline->restore(device); 3846 if (rc || device->stopped) 3847 /* 3848 * if the resume failed for the DASD we put it in 3849 * an UNRESUMED stop state 3850 */ 3851 device->stopped |= DASD_UNRESUMED_PM; 3852 3853 if (device->block) 3854 dasd_schedule_block_bh(device->block); 3855 3856 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 3857 dasd_put_device(device); 3858 return 0; 3859 } 3860 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 3861 3862 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 3863 void *rdc_buffer, 3864 int rdc_buffer_size, 3865 int magic) 3866 { 3867 struct dasd_ccw_req *cqr; 3868 struct ccw1 *ccw; 3869 unsigned long *idaw; 3870 3871 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 3872 3873 if (IS_ERR(cqr)) { 3874 /* internal error 13 - Allocating the RDC request failed*/ 3875 dev_err(&device->cdev->dev, 3876 "An error occurred in the DASD device driver, " 3877 "reason=%s\n", "13"); 3878 return cqr; 3879 } 3880 3881 ccw = cqr->cpaddr; 3882 ccw->cmd_code = CCW_CMD_RDC; 3883 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { 3884 idaw = (unsigned long *) (cqr->data); 3885 ccw->cda = (__u32)(addr_t) idaw; 3886 ccw->flags = CCW_FLAG_IDA; 3887 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); 3888 } else { 3889 ccw->cda = (__u32)(addr_t) rdc_buffer; 3890 ccw->flags = 0; 3891 } 3892 3893 ccw->count = rdc_buffer_size; 3894 cqr->startdev = device; 3895 cqr->memdev = device; 3896 cqr->expires = 10*HZ; 3897 cqr->retries = 256; 3898 cqr->buildclk = get_tod_clock(); 3899 cqr->status = DASD_CQR_FILLED; 3900 return cqr; 3901 } 3902 3903 3904 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 3905 void *rdc_buffer, int rdc_buffer_size) 3906 { 3907 int ret; 3908 struct dasd_ccw_req *cqr; 3909 3910 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 3911 magic); 3912 if (IS_ERR(cqr)) 3913 return PTR_ERR(cqr); 3914 3915 ret = dasd_sleep_on(cqr); 3916 dasd_sfree_request(cqr, cqr->memdev); 3917 return ret; 3918 } 3919 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 3920 3921 /* 3922 * In command mode and transport mode we need to look for sense 3923 * data in different places. The sense data itself is allways 3924 * an array of 32 bytes, so we can unify the sense data access 3925 * for both modes. 3926 */ 3927 char *dasd_get_sense(struct irb *irb) 3928 { 3929 struct tsb *tsb = NULL; 3930 char *sense = NULL; 3931 3932 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 3933 if (irb->scsw.tm.tcw) 3934 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 3935 irb->scsw.tm.tcw); 3936 if (tsb && tsb->length == 64 && tsb->flags) 3937 switch (tsb->flags & 0x07) { 3938 case 1: /* tsa_iostat */ 3939 sense = tsb->tsa.iostat.sense; 3940 break; 3941 case 2: /* tsa_ddpc */ 3942 sense = tsb->tsa.ddpc.sense; 3943 break; 3944 default: 3945 /* currently we don't use interrogate data */ 3946 break; 3947 } 3948 } else if (irb->esw.esw0.erw.cons) { 3949 sense = irb->ecw; 3950 } 3951 return sense; 3952 } 3953 EXPORT_SYMBOL_GPL(dasd_get_sense); 3954 3955 void dasd_generic_shutdown(struct ccw_device *cdev) 3956 { 3957 struct dasd_device *device; 3958 3959 device = dasd_device_from_cdev(cdev); 3960 if (IS_ERR(device)) 3961 return; 3962 3963 if (device->block) 3964 dasd_schedule_block_bh(device->block); 3965 3966 dasd_schedule_device_bh(device); 3967 3968 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 3969 } 3970 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 3971 3972 static int __init dasd_init(void) 3973 { 3974 int rc; 3975 3976 init_waitqueue_head(&dasd_init_waitq); 3977 init_waitqueue_head(&dasd_flush_wq); 3978 init_waitqueue_head(&generic_waitq); 3979 init_waitqueue_head(&shutdown_waitq); 3980 3981 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 3982 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 3983 if (dasd_debug_area == NULL) { 3984 rc = -ENOMEM; 3985 goto failed; 3986 } 3987 debug_register_view(dasd_debug_area, &debug_sprintf_view); 3988 debug_set_level(dasd_debug_area, DBF_WARNING); 3989 3990 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 3991 3992 dasd_diag_discipline_pointer = NULL; 3993 3994 dasd_statistics_createroot(); 3995 3996 rc = dasd_devmap_init(); 3997 if (rc) 3998 goto failed; 3999 rc = dasd_gendisk_init(); 4000 if (rc) 4001 goto failed; 4002 rc = dasd_parse(); 4003 if (rc) 4004 goto failed; 4005 rc = dasd_eer_init(); 4006 if (rc) 4007 goto failed; 4008 #ifdef CONFIG_PROC_FS 4009 rc = dasd_proc_init(); 4010 if (rc) 4011 goto failed; 4012 #endif 4013 4014 return 0; 4015 failed: 4016 pr_info("The DASD device driver could not be initialized\n"); 4017 dasd_exit(); 4018 return rc; 4019 } 4020 4021 module_init(dasd_init); 4022 module_exit(dasd_exit); 4023