1 /* 2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Carsten Otte <Cotte@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Copyright IBM Corp. 1999, 2009 8 */ 9 10 #define KMSG_COMPONENT "dasd" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/kmod.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/ctype.h> 17 #include <linux/major.h> 18 #include <linux/slab.h> 19 #include <linux/hdreg.h> 20 #include <linux/async.h> 21 #include <linux/mutex.h> 22 #include <linux/debugfs.h> 23 #include <linux/seq_file.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> 28 #include <asm/idals.h> 29 #include <asm/itcw.h> 30 #include <asm/diag.h> 31 32 /* This is ugly... */ 33 #define PRINTK_HEADER "dasd:" 34 35 #include "dasd_int.h" 36 /* 37 * SECTION: Constant definitions to be used within this file 38 */ 39 #define DASD_CHANQ_MAX_SIZE 4 40 41 #define DASD_DIAG_MOD "dasd_diag_mod" 42 43 /* 44 * SECTION: exported variables of dasd.c 45 */ 46 debug_info_t *dasd_debug_area; 47 EXPORT_SYMBOL(dasd_debug_area); 48 static struct dentry *dasd_debugfs_root_entry; 49 struct dasd_discipline *dasd_diag_discipline_pointer; 50 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 51 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 52 53 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 54 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 55 " Copyright IBM Corp. 2000"); 56 MODULE_SUPPORTED_DEVICE("dasd"); 57 MODULE_LICENSE("GPL"); 58 59 /* 60 * SECTION: prototypes for static functions of dasd.c 61 */ 62 static int dasd_alloc_queue(struct dasd_block *); 63 static void dasd_setup_queue(struct dasd_block *); 64 static void dasd_free_queue(struct dasd_block *); 65 static void dasd_flush_request_queue(struct dasd_block *); 66 static int dasd_flush_block_queue(struct dasd_block *); 67 static void dasd_device_tasklet(struct dasd_device *); 68 static void dasd_block_tasklet(struct dasd_block *); 69 static void do_kick_device(struct work_struct *); 70 static void do_restore_device(struct work_struct *); 71 static void do_reload_device(struct work_struct *); 72 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 73 static void dasd_device_timeout(unsigned long); 74 static void dasd_block_timeout(unsigned long); 75 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 76 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 77 static void dasd_profile_exit(struct dasd_profile *); 78 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 79 static void dasd_hosts_exit(struct dasd_device *); 80 81 /* 82 * SECTION: Operations on the device structure. 83 */ 84 static wait_queue_head_t dasd_init_waitq; 85 static wait_queue_head_t dasd_flush_wq; 86 static wait_queue_head_t generic_waitq; 87 static wait_queue_head_t shutdown_waitq; 88 89 /* 90 * Allocate memory for a new device structure. 91 */ 92 struct dasd_device *dasd_alloc_device(void) 93 { 94 struct dasd_device *device; 95 96 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 97 if (!device) 98 return ERR_PTR(-ENOMEM); 99 100 /* Get two pages for normal block device operations. */ 101 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 102 if (!device->ccw_mem) { 103 kfree(device); 104 return ERR_PTR(-ENOMEM); 105 } 106 /* Get one page for error recovery. */ 107 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 108 if (!device->erp_mem) { 109 free_pages((unsigned long) device->ccw_mem, 1); 110 kfree(device); 111 return ERR_PTR(-ENOMEM); 112 } 113 114 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 115 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 116 spin_lock_init(&device->mem_lock); 117 atomic_set(&device->tasklet_scheduled, 0); 118 tasklet_init(&device->tasklet, 119 (void (*)(unsigned long)) dasd_device_tasklet, 120 (unsigned long) device); 121 INIT_LIST_HEAD(&device->ccw_queue); 122 init_timer(&device->timer); 123 device->timer.function = dasd_device_timeout; 124 device->timer.data = (unsigned long) device; 125 INIT_WORK(&device->kick_work, do_kick_device); 126 INIT_WORK(&device->restore_device, do_restore_device); 127 INIT_WORK(&device->reload_device, do_reload_device); 128 device->state = DASD_STATE_NEW; 129 device->target = DASD_STATE_NEW; 130 mutex_init(&device->state_mutex); 131 spin_lock_init(&device->profile.lock); 132 return device; 133 } 134 135 /* 136 * Free memory of a device structure. 137 */ 138 void dasd_free_device(struct dasd_device *device) 139 { 140 kfree(device->private); 141 free_page((unsigned long) device->erp_mem); 142 free_pages((unsigned long) device->ccw_mem, 1); 143 kfree(device); 144 } 145 146 /* 147 * Allocate memory for a new device structure. 148 */ 149 struct dasd_block *dasd_alloc_block(void) 150 { 151 struct dasd_block *block; 152 153 block = kzalloc(sizeof(*block), GFP_ATOMIC); 154 if (!block) 155 return ERR_PTR(-ENOMEM); 156 /* open_count = 0 means device online but not in use */ 157 atomic_set(&block->open_count, -1); 158 159 spin_lock_init(&block->request_queue_lock); 160 atomic_set(&block->tasklet_scheduled, 0); 161 tasklet_init(&block->tasklet, 162 (void (*)(unsigned long)) dasd_block_tasklet, 163 (unsigned long) block); 164 INIT_LIST_HEAD(&block->ccw_queue); 165 spin_lock_init(&block->queue_lock); 166 init_timer(&block->timer); 167 block->timer.function = dasd_block_timeout; 168 block->timer.data = (unsigned long) block; 169 spin_lock_init(&block->profile.lock); 170 171 return block; 172 } 173 EXPORT_SYMBOL_GPL(dasd_alloc_block); 174 175 /* 176 * Free memory of a device structure. 177 */ 178 void dasd_free_block(struct dasd_block *block) 179 { 180 kfree(block); 181 } 182 EXPORT_SYMBOL_GPL(dasd_free_block); 183 184 /* 185 * Make a new device known to the system. 186 */ 187 static int dasd_state_new_to_known(struct dasd_device *device) 188 { 189 int rc; 190 191 /* 192 * As long as the device is not in state DASD_STATE_NEW we want to 193 * keep the reference count > 0. 194 */ 195 dasd_get_device(device); 196 197 if (device->block) { 198 rc = dasd_alloc_queue(device->block); 199 if (rc) { 200 dasd_put_device(device); 201 return rc; 202 } 203 } 204 device->state = DASD_STATE_KNOWN; 205 return 0; 206 } 207 208 /* 209 * Let the system forget about a device. 210 */ 211 static int dasd_state_known_to_new(struct dasd_device *device) 212 { 213 /* Disable extended error reporting for this device. */ 214 dasd_eer_disable(device); 215 device->state = DASD_STATE_NEW; 216 217 if (device->block) 218 dasd_free_queue(device->block); 219 220 /* Give up reference we took in dasd_state_new_to_known. */ 221 dasd_put_device(device); 222 return 0; 223 } 224 225 static struct dentry *dasd_debugfs_setup(const char *name, 226 struct dentry *base_dentry) 227 { 228 struct dentry *pde; 229 230 if (!base_dentry) 231 return NULL; 232 pde = debugfs_create_dir(name, base_dentry); 233 if (!pde || IS_ERR(pde)) 234 return NULL; 235 return pde; 236 } 237 238 /* 239 * Request the irq line for the device. 240 */ 241 static int dasd_state_known_to_basic(struct dasd_device *device) 242 { 243 struct dasd_block *block = device->block; 244 int rc = 0; 245 246 /* Allocate and register gendisk structure. */ 247 if (block) { 248 rc = dasd_gendisk_alloc(block); 249 if (rc) 250 return rc; 251 block->debugfs_dentry = 252 dasd_debugfs_setup(block->gdp->disk_name, 253 dasd_debugfs_root_entry); 254 dasd_profile_init(&block->profile, block->debugfs_dentry); 255 if (dasd_global_profile_level == DASD_PROFILE_ON) 256 dasd_profile_on(&device->block->profile); 257 } 258 device->debugfs_dentry = 259 dasd_debugfs_setup(dev_name(&device->cdev->dev), 260 dasd_debugfs_root_entry); 261 dasd_profile_init(&device->profile, device->debugfs_dentry); 262 dasd_hosts_init(device->debugfs_dentry, device); 263 264 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 265 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 266 8 * sizeof(long)); 267 debug_register_view(device->debug_area, &debug_sprintf_view); 268 debug_set_level(device->debug_area, DBF_WARNING); 269 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 270 271 device->state = DASD_STATE_BASIC; 272 273 return rc; 274 } 275 276 /* 277 * Release the irq line for the device. Terminate any running i/o. 278 */ 279 static int dasd_state_basic_to_known(struct dasd_device *device) 280 { 281 int rc; 282 283 if (device->discipline->basic_to_known) { 284 rc = device->discipline->basic_to_known(device); 285 if (rc) 286 return rc; 287 } 288 289 if (device->block) { 290 dasd_profile_exit(&device->block->profile); 291 debugfs_remove(device->block->debugfs_dentry); 292 dasd_gendisk_free(device->block); 293 dasd_block_clear_timer(device->block); 294 } 295 rc = dasd_flush_device_queue(device); 296 if (rc) 297 return rc; 298 dasd_device_clear_timer(device); 299 dasd_profile_exit(&device->profile); 300 dasd_hosts_exit(device); 301 debugfs_remove(device->debugfs_dentry); 302 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 303 if (device->debug_area != NULL) { 304 debug_unregister(device->debug_area); 305 device->debug_area = NULL; 306 } 307 device->state = DASD_STATE_KNOWN; 308 return 0; 309 } 310 311 /* 312 * Do the initial analysis. The do_analysis function may return 313 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 314 * until the discipline decides to continue the startup sequence 315 * by calling the function dasd_change_state. The eckd disciplines 316 * uses this to start a ccw that detects the format. The completion 317 * interrupt for this detection ccw uses the kernel event daemon to 318 * trigger the call to dasd_change_state. All this is done in the 319 * discipline code, see dasd_eckd.c. 320 * After the analysis ccw is done (do_analysis returned 0) the block 321 * device is setup. 322 * In case the analysis returns an error, the device setup is stopped 323 * (a fake disk was already added to allow formatting). 324 */ 325 static int dasd_state_basic_to_ready(struct dasd_device *device) 326 { 327 int rc; 328 struct dasd_block *block; 329 struct gendisk *disk; 330 331 rc = 0; 332 block = device->block; 333 /* make disk known with correct capacity */ 334 if (block) { 335 if (block->base->discipline->do_analysis != NULL) 336 rc = block->base->discipline->do_analysis(block); 337 if (rc) { 338 if (rc != -EAGAIN) { 339 device->state = DASD_STATE_UNFMT; 340 disk = device->block->gdp; 341 kobject_uevent(&disk_to_dev(disk)->kobj, 342 KOBJ_CHANGE); 343 goto out; 344 } 345 return rc; 346 } 347 dasd_setup_queue(block); 348 set_capacity(block->gdp, 349 block->blocks << block->s2b_shift); 350 device->state = DASD_STATE_READY; 351 rc = dasd_scan_partitions(block); 352 if (rc) { 353 device->state = DASD_STATE_BASIC; 354 return rc; 355 } 356 } else { 357 device->state = DASD_STATE_READY; 358 } 359 out: 360 if (device->discipline->basic_to_ready) 361 rc = device->discipline->basic_to_ready(device); 362 return rc; 363 } 364 365 static inline 366 int _wait_for_empty_queues(struct dasd_device *device) 367 { 368 if (device->block) 369 return list_empty(&device->ccw_queue) && 370 list_empty(&device->block->ccw_queue); 371 else 372 return list_empty(&device->ccw_queue); 373 } 374 375 /* 376 * Remove device from block device layer. Destroy dirty buffers. 377 * Forget format information. Check if the target level is basic 378 * and if it is create fake disk for formatting. 379 */ 380 static int dasd_state_ready_to_basic(struct dasd_device *device) 381 { 382 int rc; 383 384 device->state = DASD_STATE_BASIC; 385 if (device->block) { 386 struct dasd_block *block = device->block; 387 rc = dasd_flush_block_queue(block); 388 if (rc) { 389 device->state = DASD_STATE_READY; 390 return rc; 391 } 392 dasd_flush_request_queue(block); 393 dasd_destroy_partitions(block); 394 block->blocks = 0; 395 block->bp_block = 0; 396 block->s2b_shift = 0; 397 } 398 return 0; 399 } 400 401 /* 402 * Back to basic. 403 */ 404 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 405 { 406 device->state = DASD_STATE_BASIC; 407 return 0; 408 } 409 410 /* 411 * Make the device online and schedule the bottom half to start 412 * the requeueing of requests from the linux request queue to the 413 * ccw queue. 414 */ 415 static int 416 dasd_state_ready_to_online(struct dasd_device * device) 417 { 418 struct gendisk *disk; 419 struct disk_part_iter piter; 420 struct hd_struct *part; 421 422 device->state = DASD_STATE_ONLINE; 423 if (device->block) { 424 dasd_schedule_block_bh(device->block); 425 if ((device->features & DASD_FEATURE_USERAW)) { 426 disk = device->block->gdp; 427 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 428 return 0; 429 } 430 disk = device->block->bdev->bd_disk; 431 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 432 while ((part = disk_part_iter_next(&piter))) 433 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 434 disk_part_iter_exit(&piter); 435 } 436 return 0; 437 } 438 439 /* 440 * Stop the requeueing of requests again. 441 */ 442 static int dasd_state_online_to_ready(struct dasd_device *device) 443 { 444 int rc; 445 struct gendisk *disk; 446 struct disk_part_iter piter; 447 struct hd_struct *part; 448 449 if (device->discipline->online_to_ready) { 450 rc = device->discipline->online_to_ready(device); 451 if (rc) 452 return rc; 453 } 454 455 device->state = DASD_STATE_READY; 456 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 457 disk = device->block->bdev->bd_disk; 458 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 459 while ((part = disk_part_iter_next(&piter))) 460 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 461 disk_part_iter_exit(&piter); 462 } 463 return 0; 464 } 465 466 /* 467 * Device startup state changes. 468 */ 469 static int dasd_increase_state(struct dasd_device *device) 470 { 471 int rc; 472 473 rc = 0; 474 if (device->state == DASD_STATE_NEW && 475 device->target >= DASD_STATE_KNOWN) 476 rc = dasd_state_new_to_known(device); 477 478 if (!rc && 479 device->state == DASD_STATE_KNOWN && 480 device->target >= DASD_STATE_BASIC) 481 rc = dasd_state_known_to_basic(device); 482 483 if (!rc && 484 device->state == DASD_STATE_BASIC && 485 device->target >= DASD_STATE_READY) 486 rc = dasd_state_basic_to_ready(device); 487 488 if (!rc && 489 device->state == DASD_STATE_UNFMT && 490 device->target > DASD_STATE_UNFMT) 491 rc = -EPERM; 492 493 if (!rc && 494 device->state == DASD_STATE_READY && 495 device->target >= DASD_STATE_ONLINE) 496 rc = dasd_state_ready_to_online(device); 497 498 return rc; 499 } 500 501 /* 502 * Device shutdown state changes. 503 */ 504 static int dasd_decrease_state(struct dasd_device *device) 505 { 506 int rc; 507 508 rc = 0; 509 if (device->state == DASD_STATE_ONLINE && 510 device->target <= DASD_STATE_READY) 511 rc = dasd_state_online_to_ready(device); 512 513 if (!rc && 514 device->state == DASD_STATE_READY && 515 device->target <= DASD_STATE_BASIC) 516 rc = dasd_state_ready_to_basic(device); 517 518 if (!rc && 519 device->state == DASD_STATE_UNFMT && 520 device->target <= DASD_STATE_BASIC) 521 rc = dasd_state_unfmt_to_basic(device); 522 523 if (!rc && 524 device->state == DASD_STATE_BASIC && 525 device->target <= DASD_STATE_KNOWN) 526 rc = dasd_state_basic_to_known(device); 527 528 if (!rc && 529 device->state == DASD_STATE_KNOWN && 530 device->target <= DASD_STATE_NEW) 531 rc = dasd_state_known_to_new(device); 532 533 return rc; 534 } 535 536 /* 537 * This is the main startup/shutdown routine. 538 */ 539 static void dasd_change_state(struct dasd_device *device) 540 { 541 int rc; 542 543 if (device->state == device->target) 544 /* Already where we want to go today... */ 545 return; 546 if (device->state < device->target) 547 rc = dasd_increase_state(device); 548 else 549 rc = dasd_decrease_state(device); 550 if (rc == -EAGAIN) 551 return; 552 if (rc) 553 device->target = device->state; 554 555 /* let user-space know that the device status changed */ 556 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 557 558 if (device->state == device->target) 559 wake_up(&dasd_init_waitq); 560 } 561 562 /* 563 * Kick starter for devices that did not complete the startup/shutdown 564 * procedure or were sleeping because of a pending state. 565 * dasd_kick_device will schedule a call do do_kick_device to the kernel 566 * event daemon. 567 */ 568 static void do_kick_device(struct work_struct *work) 569 { 570 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 571 mutex_lock(&device->state_mutex); 572 dasd_change_state(device); 573 mutex_unlock(&device->state_mutex); 574 dasd_schedule_device_bh(device); 575 dasd_put_device(device); 576 } 577 578 void dasd_kick_device(struct dasd_device *device) 579 { 580 dasd_get_device(device); 581 /* queue call to dasd_kick_device to the kernel event daemon. */ 582 if (!schedule_work(&device->kick_work)) 583 dasd_put_device(device); 584 } 585 EXPORT_SYMBOL(dasd_kick_device); 586 587 /* 588 * dasd_reload_device will schedule a call do do_reload_device to the kernel 589 * event daemon. 590 */ 591 static void do_reload_device(struct work_struct *work) 592 { 593 struct dasd_device *device = container_of(work, struct dasd_device, 594 reload_device); 595 device->discipline->reload(device); 596 dasd_put_device(device); 597 } 598 599 void dasd_reload_device(struct dasd_device *device) 600 { 601 dasd_get_device(device); 602 /* queue call to dasd_reload_device to the kernel event daemon. */ 603 if (!schedule_work(&device->reload_device)) 604 dasd_put_device(device); 605 } 606 EXPORT_SYMBOL(dasd_reload_device); 607 608 /* 609 * dasd_restore_device will schedule a call do do_restore_device to the kernel 610 * event daemon. 611 */ 612 static void do_restore_device(struct work_struct *work) 613 { 614 struct dasd_device *device = container_of(work, struct dasd_device, 615 restore_device); 616 device->cdev->drv->restore(device->cdev); 617 dasd_put_device(device); 618 } 619 620 void dasd_restore_device(struct dasd_device *device) 621 { 622 dasd_get_device(device); 623 /* queue call to dasd_restore_device to the kernel event daemon. */ 624 if (!schedule_work(&device->restore_device)) 625 dasd_put_device(device); 626 } 627 628 /* 629 * Set the target state for a device and starts the state change. 630 */ 631 void dasd_set_target_state(struct dasd_device *device, int target) 632 { 633 dasd_get_device(device); 634 mutex_lock(&device->state_mutex); 635 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 636 if (dasd_probeonly && target > DASD_STATE_READY) 637 target = DASD_STATE_READY; 638 if (device->target != target) { 639 if (device->state == target) 640 wake_up(&dasd_init_waitq); 641 device->target = target; 642 } 643 if (device->state != device->target) 644 dasd_change_state(device); 645 mutex_unlock(&device->state_mutex); 646 dasd_put_device(device); 647 } 648 EXPORT_SYMBOL(dasd_set_target_state); 649 650 /* 651 * Enable devices with device numbers in [from..to]. 652 */ 653 static inline int _wait_for_device(struct dasd_device *device) 654 { 655 return (device->state == device->target); 656 } 657 658 void dasd_enable_device(struct dasd_device *device) 659 { 660 dasd_set_target_state(device, DASD_STATE_ONLINE); 661 if (device->state <= DASD_STATE_KNOWN) 662 /* No discipline for device found. */ 663 dasd_set_target_state(device, DASD_STATE_NEW); 664 /* Now wait for the devices to come up. */ 665 wait_event(dasd_init_waitq, _wait_for_device(device)); 666 667 dasd_reload_device(device); 668 if (device->discipline->kick_validate) 669 device->discipline->kick_validate(device); 670 } 671 EXPORT_SYMBOL(dasd_enable_device); 672 673 /* 674 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 675 */ 676 677 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 678 679 #ifdef CONFIG_DASD_PROFILE 680 struct dasd_profile dasd_global_profile = { 681 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 682 }; 683 static struct dentry *dasd_debugfs_global_entry; 684 685 /* 686 * Add profiling information for cqr before execution. 687 */ 688 static void dasd_profile_start(struct dasd_block *block, 689 struct dasd_ccw_req *cqr, 690 struct request *req) 691 { 692 struct list_head *l; 693 unsigned int counter; 694 struct dasd_device *device; 695 696 /* count the length of the chanq for statistics */ 697 counter = 0; 698 if (dasd_global_profile_level || block->profile.data) 699 list_for_each(l, &block->ccw_queue) 700 if (++counter >= 31) 701 break; 702 703 spin_lock(&dasd_global_profile.lock); 704 if (dasd_global_profile.data) { 705 dasd_global_profile.data->dasd_io_nr_req[counter]++; 706 if (rq_data_dir(req) == READ) 707 dasd_global_profile.data->dasd_read_nr_req[counter]++; 708 } 709 spin_unlock(&dasd_global_profile.lock); 710 711 spin_lock(&block->profile.lock); 712 if (block->profile.data) { 713 block->profile.data->dasd_io_nr_req[counter]++; 714 if (rq_data_dir(req) == READ) 715 block->profile.data->dasd_read_nr_req[counter]++; 716 } 717 spin_unlock(&block->profile.lock); 718 719 /* 720 * We count the request for the start device, even though it may run on 721 * some other device due to error recovery. This way we make sure that 722 * we count each request only once. 723 */ 724 device = cqr->startdev; 725 if (device->profile.data) { 726 counter = 1; /* request is not yet queued on the start device */ 727 list_for_each(l, &device->ccw_queue) 728 if (++counter >= 31) 729 break; 730 } 731 spin_lock(&device->profile.lock); 732 if (device->profile.data) { 733 device->profile.data->dasd_io_nr_req[counter]++; 734 if (rq_data_dir(req) == READ) 735 device->profile.data->dasd_read_nr_req[counter]++; 736 } 737 spin_unlock(&device->profile.lock); 738 } 739 740 /* 741 * Add profiling information for cqr after execution. 742 */ 743 744 #define dasd_profile_counter(value, index) \ 745 { \ 746 for (index = 0; index < 31 && value >> (2+index); index++) \ 747 ; \ 748 } 749 750 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 751 int is_alias, 752 int is_tpm, 753 int is_read, 754 long sectors, 755 int sectors_ind, 756 int tottime_ind, 757 int tottimeps_ind, 758 int strtime_ind, 759 int irqtime_ind, 760 int irqtimeps_ind, 761 int endtime_ind) 762 { 763 /* in case of an overflow, reset the whole profile */ 764 if (data->dasd_io_reqs == UINT_MAX) { 765 memset(data, 0, sizeof(*data)); 766 getnstimeofday(&data->starttod); 767 } 768 data->dasd_io_reqs++; 769 data->dasd_io_sects += sectors; 770 if (is_alias) 771 data->dasd_io_alias++; 772 if (is_tpm) 773 data->dasd_io_tpm++; 774 775 data->dasd_io_secs[sectors_ind]++; 776 data->dasd_io_times[tottime_ind]++; 777 data->dasd_io_timps[tottimeps_ind]++; 778 data->dasd_io_time1[strtime_ind]++; 779 data->dasd_io_time2[irqtime_ind]++; 780 data->dasd_io_time2ps[irqtimeps_ind]++; 781 data->dasd_io_time3[endtime_ind]++; 782 783 if (is_read) { 784 data->dasd_read_reqs++; 785 data->dasd_read_sects += sectors; 786 if (is_alias) 787 data->dasd_read_alias++; 788 if (is_tpm) 789 data->dasd_read_tpm++; 790 data->dasd_read_secs[sectors_ind]++; 791 data->dasd_read_times[tottime_ind]++; 792 data->dasd_read_time1[strtime_ind]++; 793 data->dasd_read_time2[irqtime_ind]++; 794 data->dasd_read_time3[endtime_ind]++; 795 } 796 } 797 798 static void dasd_profile_end(struct dasd_block *block, 799 struct dasd_ccw_req *cqr, 800 struct request *req) 801 { 802 long strtime, irqtime, endtime, tottime; /* in microseconds */ 803 long tottimeps, sectors; 804 struct dasd_device *device; 805 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 806 int irqtime_ind, irqtimeps_ind, endtime_ind; 807 808 device = cqr->startdev; 809 if (!(dasd_global_profile_level || 810 block->profile.data || 811 device->profile.data)) 812 return; 813 814 sectors = blk_rq_sectors(req); 815 if (!cqr->buildclk || !cqr->startclk || 816 !cqr->stopclk || !cqr->endclk || 817 !sectors) 818 return; 819 820 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 821 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 822 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 823 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 824 tottimeps = tottime / sectors; 825 826 dasd_profile_counter(sectors, sectors_ind); 827 dasd_profile_counter(tottime, tottime_ind); 828 dasd_profile_counter(tottimeps, tottimeps_ind); 829 dasd_profile_counter(strtime, strtime_ind); 830 dasd_profile_counter(irqtime, irqtime_ind); 831 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 832 dasd_profile_counter(endtime, endtime_ind); 833 834 spin_lock(&dasd_global_profile.lock); 835 if (dasd_global_profile.data) { 836 dasd_profile_end_add_data(dasd_global_profile.data, 837 cqr->startdev != block->base, 838 cqr->cpmode == 1, 839 rq_data_dir(req) == READ, 840 sectors, sectors_ind, tottime_ind, 841 tottimeps_ind, strtime_ind, 842 irqtime_ind, irqtimeps_ind, 843 endtime_ind); 844 } 845 spin_unlock(&dasd_global_profile.lock); 846 847 spin_lock(&block->profile.lock); 848 if (block->profile.data) 849 dasd_profile_end_add_data(block->profile.data, 850 cqr->startdev != block->base, 851 cqr->cpmode == 1, 852 rq_data_dir(req) == READ, 853 sectors, sectors_ind, tottime_ind, 854 tottimeps_ind, strtime_ind, 855 irqtime_ind, irqtimeps_ind, 856 endtime_ind); 857 spin_unlock(&block->profile.lock); 858 859 spin_lock(&device->profile.lock); 860 if (device->profile.data) 861 dasd_profile_end_add_data(device->profile.data, 862 cqr->startdev != block->base, 863 cqr->cpmode == 1, 864 rq_data_dir(req) == READ, 865 sectors, sectors_ind, tottime_ind, 866 tottimeps_ind, strtime_ind, 867 irqtime_ind, irqtimeps_ind, 868 endtime_ind); 869 spin_unlock(&device->profile.lock); 870 } 871 872 void dasd_profile_reset(struct dasd_profile *profile) 873 { 874 struct dasd_profile_info *data; 875 876 spin_lock_bh(&profile->lock); 877 data = profile->data; 878 if (!data) { 879 spin_unlock_bh(&profile->lock); 880 return; 881 } 882 memset(data, 0, sizeof(*data)); 883 getnstimeofday(&data->starttod); 884 spin_unlock_bh(&profile->lock); 885 } 886 887 int dasd_profile_on(struct dasd_profile *profile) 888 { 889 struct dasd_profile_info *data; 890 891 data = kzalloc(sizeof(*data), GFP_KERNEL); 892 if (!data) 893 return -ENOMEM; 894 spin_lock_bh(&profile->lock); 895 if (profile->data) { 896 spin_unlock_bh(&profile->lock); 897 kfree(data); 898 return 0; 899 } 900 getnstimeofday(&data->starttod); 901 profile->data = data; 902 spin_unlock_bh(&profile->lock); 903 return 0; 904 } 905 906 void dasd_profile_off(struct dasd_profile *profile) 907 { 908 spin_lock_bh(&profile->lock); 909 kfree(profile->data); 910 profile->data = NULL; 911 spin_unlock_bh(&profile->lock); 912 } 913 914 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 915 { 916 char *buffer; 917 918 buffer = vmalloc(user_len + 1); 919 if (buffer == NULL) 920 return ERR_PTR(-ENOMEM); 921 if (copy_from_user(buffer, user_buf, user_len) != 0) { 922 vfree(buffer); 923 return ERR_PTR(-EFAULT); 924 } 925 /* got the string, now strip linefeed. */ 926 if (buffer[user_len - 1] == '\n') 927 buffer[user_len - 1] = 0; 928 else 929 buffer[user_len] = 0; 930 return buffer; 931 } 932 933 static ssize_t dasd_stats_write(struct file *file, 934 const char __user *user_buf, 935 size_t user_len, loff_t *pos) 936 { 937 char *buffer, *str; 938 int rc; 939 struct seq_file *m = (struct seq_file *)file->private_data; 940 struct dasd_profile *prof = m->private; 941 942 if (user_len > 65536) 943 user_len = 65536; 944 buffer = dasd_get_user_string(user_buf, user_len); 945 if (IS_ERR(buffer)) 946 return PTR_ERR(buffer); 947 948 str = skip_spaces(buffer); 949 rc = user_len; 950 if (strncmp(str, "reset", 5) == 0) { 951 dasd_profile_reset(prof); 952 } else if (strncmp(str, "on", 2) == 0) { 953 rc = dasd_profile_on(prof); 954 if (rc) 955 goto out; 956 rc = user_len; 957 if (prof == &dasd_global_profile) { 958 dasd_profile_reset(prof); 959 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 960 } 961 } else if (strncmp(str, "off", 3) == 0) { 962 if (prof == &dasd_global_profile) 963 dasd_global_profile_level = DASD_PROFILE_OFF; 964 dasd_profile_off(prof); 965 } else 966 rc = -EINVAL; 967 out: 968 vfree(buffer); 969 return rc; 970 } 971 972 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 973 { 974 int i; 975 976 for (i = 0; i < 32; i++) 977 seq_printf(m, "%u ", array[i]); 978 seq_putc(m, '\n'); 979 } 980 981 static void dasd_stats_seq_print(struct seq_file *m, 982 struct dasd_profile_info *data) 983 { 984 seq_printf(m, "start_time %ld.%09ld\n", 985 data->starttod.tv_sec, data->starttod.tv_nsec); 986 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 987 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 988 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 989 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 990 seq_puts(m, "histogram_sectors "); 991 dasd_stats_array(m, data->dasd_io_secs); 992 seq_puts(m, "histogram_io_times "); 993 dasd_stats_array(m, data->dasd_io_times); 994 seq_puts(m, "histogram_io_times_weighted "); 995 dasd_stats_array(m, data->dasd_io_timps); 996 seq_puts(m, "histogram_time_build_to_ssch "); 997 dasd_stats_array(m, data->dasd_io_time1); 998 seq_puts(m, "histogram_time_ssch_to_irq "); 999 dasd_stats_array(m, data->dasd_io_time2); 1000 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1001 dasd_stats_array(m, data->dasd_io_time2ps); 1002 seq_puts(m, "histogram_time_irq_to_end "); 1003 dasd_stats_array(m, data->dasd_io_time3); 1004 seq_puts(m, "histogram_ccw_queue_length "); 1005 dasd_stats_array(m, data->dasd_io_nr_req); 1006 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1007 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1008 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1009 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1010 seq_puts(m, "histogram_read_sectors "); 1011 dasd_stats_array(m, data->dasd_read_secs); 1012 seq_puts(m, "histogram_read_times "); 1013 dasd_stats_array(m, data->dasd_read_times); 1014 seq_puts(m, "histogram_read_time_build_to_ssch "); 1015 dasd_stats_array(m, data->dasd_read_time1); 1016 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1017 dasd_stats_array(m, data->dasd_read_time2); 1018 seq_puts(m, "histogram_read_time_irq_to_end "); 1019 dasd_stats_array(m, data->dasd_read_time3); 1020 seq_puts(m, "histogram_read_ccw_queue_length "); 1021 dasd_stats_array(m, data->dasd_read_nr_req); 1022 } 1023 1024 static int dasd_stats_show(struct seq_file *m, void *v) 1025 { 1026 struct dasd_profile *profile; 1027 struct dasd_profile_info *data; 1028 1029 profile = m->private; 1030 spin_lock_bh(&profile->lock); 1031 data = profile->data; 1032 if (!data) { 1033 spin_unlock_bh(&profile->lock); 1034 seq_puts(m, "disabled\n"); 1035 return 0; 1036 } 1037 dasd_stats_seq_print(m, data); 1038 spin_unlock_bh(&profile->lock); 1039 return 0; 1040 } 1041 1042 static int dasd_stats_open(struct inode *inode, struct file *file) 1043 { 1044 struct dasd_profile *profile = inode->i_private; 1045 return single_open(file, dasd_stats_show, profile); 1046 } 1047 1048 static const struct file_operations dasd_stats_raw_fops = { 1049 .owner = THIS_MODULE, 1050 .open = dasd_stats_open, 1051 .read = seq_read, 1052 .llseek = seq_lseek, 1053 .release = single_release, 1054 .write = dasd_stats_write, 1055 }; 1056 1057 static void dasd_profile_init(struct dasd_profile *profile, 1058 struct dentry *base_dentry) 1059 { 1060 umode_t mode; 1061 struct dentry *pde; 1062 1063 if (!base_dentry) 1064 return; 1065 profile->dentry = NULL; 1066 profile->data = NULL; 1067 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1068 pde = debugfs_create_file("statistics", mode, base_dentry, 1069 profile, &dasd_stats_raw_fops); 1070 if (pde && !IS_ERR(pde)) 1071 profile->dentry = pde; 1072 return; 1073 } 1074 1075 static void dasd_profile_exit(struct dasd_profile *profile) 1076 { 1077 dasd_profile_off(profile); 1078 debugfs_remove(profile->dentry); 1079 profile->dentry = NULL; 1080 } 1081 1082 static void dasd_statistics_removeroot(void) 1083 { 1084 dasd_global_profile_level = DASD_PROFILE_OFF; 1085 dasd_profile_exit(&dasd_global_profile); 1086 debugfs_remove(dasd_debugfs_global_entry); 1087 debugfs_remove(dasd_debugfs_root_entry); 1088 } 1089 1090 static void dasd_statistics_createroot(void) 1091 { 1092 struct dentry *pde; 1093 1094 dasd_debugfs_root_entry = NULL; 1095 pde = debugfs_create_dir("dasd", NULL); 1096 if (!pde || IS_ERR(pde)) 1097 goto error; 1098 dasd_debugfs_root_entry = pde; 1099 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1100 if (!pde || IS_ERR(pde)) 1101 goto error; 1102 dasd_debugfs_global_entry = pde; 1103 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1104 return; 1105 1106 error: 1107 DBF_EVENT(DBF_ERR, "%s", 1108 "Creation of the dasd debugfs interface failed"); 1109 dasd_statistics_removeroot(); 1110 return; 1111 } 1112 1113 #else 1114 #define dasd_profile_start(block, cqr, req) do {} while (0) 1115 #define dasd_profile_end(block, cqr, req) do {} while (0) 1116 1117 static void dasd_statistics_createroot(void) 1118 { 1119 return; 1120 } 1121 1122 static void dasd_statistics_removeroot(void) 1123 { 1124 return; 1125 } 1126 1127 int dasd_stats_generic_show(struct seq_file *m, void *v) 1128 { 1129 seq_puts(m, "Statistics are not activated in this kernel\n"); 1130 return 0; 1131 } 1132 1133 static void dasd_profile_init(struct dasd_profile *profile, 1134 struct dentry *base_dentry) 1135 { 1136 return; 1137 } 1138 1139 static void dasd_profile_exit(struct dasd_profile *profile) 1140 { 1141 return; 1142 } 1143 1144 int dasd_profile_on(struct dasd_profile *profile) 1145 { 1146 return 0; 1147 } 1148 1149 #endif /* CONFIG_DASD_PROFILE */ 1150 1151 static int dasd_hosts_show(struct seq_file *m, void *v) 1152 { 1153 struct dasd_device *device; 1154 int rc = -EOPNOTSUPP; 1155 1156 device = m->private; 1157 dasd_get_device(device); 1158 1159 if (device->discipline->hosts_print) 1160 rc = device->discipline->hosts_print(device, m); 1161 1162 dasd_put_device(device); 1163 return rc; 1164 } 1165 1166 static int dasd_hosts_open(struct inode *inode, struct file *file) 1167 { 1168 struct dasd_device *device = inode->i_private; 1169 1170 return single_open(file, dasd_hosts_show, device); 1171 } 1172 1173 static const struct file_operations dasd_hosts_fops = { 1174 .owner = THIS_MODULE, 1175 .open = dasd_hosts_open, 1176 .read = seq_read, 1177 .llseek = seq_lseek, 1178 .release = single_release, 1179 }; 1180 1181 static void dasd_hosts_exit(struct dasd_device *device) 1182 { 1183 debugfs_remove(device->hosts_dentry); 1184 device->hosts_dentry = NULL; 1185 } 1186 1187 static void dasd_hosts_init(struct dentry *base_dentry, 1188 struct dasd_device *device) 1189 { 1190 struct dentry *pde; 1191 umode_t mode; 1192 1193 if (!base_dentry) 1194 return; 1195 1196 mode = S_IRUSR | S_IFREG; 1197 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1198 device, &dasd_hosts_fops); 1199 if (pde && !IS_ERR(pde)) 1200 device->hosts_dentry = pde; 1201 } 1202 1203 /* 1204 * Allocate memory for a channel program with 'cplength' channel 1205 * command words and 'datasize' additional space. There are two 1206 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 1207 * memory and 2) dasd_smalloc_request uses the static ccw memory 1208 * that gets allocated for each device. 1209 */ 1210 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, 1211 int datasize, 1212 struct dasd_device *device) 1213 { 1214 struct dasd_ccw_req *cqr; 1215 1216 /* Sanity checks */ 1217 BUG_ON(datasize > PAGE_SIZE || 1218 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 1219 1220 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 1221 if (cqr == NULL) 1222 return ERR_PTR(-ENOMEM); 1223 cqr->cpaddr = NULL; 1224 if (cplength > 0) { 1225 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 1226 GFP_ATOMIC | GFP_DMA); 1227 if (cqr->cpaddr == NULL) { 1228 kfree(cqr); 1229 return ERR_PTR(-ENOMEM); 1230 } 1231 } 1232 cqr->data = NULL; 1233 if (datasize > 0) { 1234 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 1235 if (cqr->data == NULL) { 1236 kfree(cqr->cpaddr); 1237 kfree(cqr); 1238 return ERR_PTR(-ENOMEM); 1239 } 1240 } 1241 cqr->magic = magic; 1242 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1243 dasd_get_device(device); 1244 return cqr; 1245 } 1246 EXPORT_SYMBOL(dasd_kmalloc_request); 1247 1248 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 1249 int datasize, 1250 struct dasd_device *device) 1251 { 1252 unsigned long flags; 1253 struct dasd_ccw_req *cqr; 1254 char *data; 1255 int size; 1256 1257 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 1258 if (cplength > 0) 1259 size += cplength * sizeof(struct ccw1); 1260 if (datasize > 0) 1261 size += datasize; 1262 spin_lock_irqsave(&device->mem_lock, flags); 1263 cqr = (struct dasd_ccw_req *) 1264 dasd_alloc_chunk(&device->ccw_chunks, size); 1265 spin_unlock_irqrestore(&device->mem_lock, flags); 1266 if (cqr == NULL) 1267 return ERR_PTR(-ENOMEM); 1268 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 1269 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 1270 cqr->cpaddr = NULL; 1271 if (cplength > 0) { 1272 cqr->cpaddr = (struct ccw1 *) data; 1273 data += cplength*sizeof(struct ccw1); 1274 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 1275 } 1276 cqr->data = NULL; 1277 if (datasize > 0) { 1278 cqr->data = data; 1279 memset(cqr->data, 0, datasize); 1280 } 1281 cqr->magic = magic; 1282 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1283 dasd_get_device(device); 1284 return cqr; 1285 } 1286 EXPORT_SYMBOL(dasd_smalloc_request); 1287 1288 /* 1289 * Free memory of a channel program. This function needs to free all the 1290 * idal lists that might have been created by dasd_set_cda and the 1291 * struct dasd_ccw_req itself. 1292 */ 1293 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1294 { 1295 struct ccw1 *ccw; 1296 1297 /* Clear any idals used for the request. */ 1298 ccw = cqr->cpaddr; 1299 do { 1300 clear_normalized_cda(ccw); 1301 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 1302 kfree(cqr->cpaddr); 1303 kfree(cqr->data); 1304 kfree(cqr); 1305 dasd_put_device(device); 1306 } 1307 EXPORT_SYMBOL(dasd_kfree_request); 1308 1309 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1310 { 1311 unsigned long flags; 1312 1313 spin_lock_irqsave(&device->mem_lock, flags); 1314 dasd_free_chunk(&device->ccw_chunks, cqr); 1315 spin_unlock_irqrestore(&device->mem_lock, flags); 1316 dasd_put_device(device); 1317 } 1318 EXPORT_SYMBOL(dasd_sfree_request); 1319 1320 /* 1321 * Check discipline magic in cqr. 1322 */ 1323 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1324 { 1325 struct dasd_device *device; 1326 1327 if (cqr == NULL) 1328 return -EINVAL; 1329 device = cqr->startdev; 1330 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1331 DBF_DEV_EVENT(DBF_WARNING, device, 1332 " dasd_ccw_req 0x%08x magic doesn't match" 1333 " discipline 0x%08x", 1334 cqr->magic, 1335 *(unsigned int *) device->discipline->name); 1336 return -EINVAL; 1337 } 1338 return 0; 1339 } 1340 1341 /* 1342 * Terminate the current i/o and set the request to clear_pending. 1343 * Timer keeps device runnig. 1344 * ccw_device_clear can fail if the i/o subsystem 1345 * is in a bad mood. 1346 */ 1347 int dasd_term_IO(struct dasd_ccw_req *cqr) 1348 { 1349 struct dasd_device *device; 1350 int retries, rc; 1351 char errorstring[ERRORLENGTH]; 1352 1353 /* Check the cqr */ 1354 rc = dasd_check_cqr(cqr); 1355 if (rc) 1356 return rc; 1357 retries = 0; 1358 device = (struct dasd_device *) cqr->startdev; 1359 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1360 rc = ccw_device_clear(device->cdev, (long) cqr); 1361 switch (rc) { 1362 case 0: /* termination successful */ 1363 cqr->status = DASD_CQR_CLEAR_PENDING; 1364 cqr->stopclk = get_tod_clock(); 1365 cqr->starttime = 0; 1366 DBF_DEV_EVENT(DBF_DEBUG, device, 1367 "terminate cqr %p successful", 1368 cqr); 1369 break; 1370 case -ENODEV: 1371 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1372 "device gone, retry"); 1373 break; 1374 case -EIO: 1375 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1376 "I/O error, retry"); 1377 break; 1378 case -EINVAL: 1379 /* 1380 * device not valid so no I/O could be running 1381 * handle CQR as termination successful 1382 */ 1383 cqr->status = DASD_CQR_CLEARED; 1384 cqr->stopclk = get_tod_clock(); 1385 cqr->starttime = 0; 1386 /* no retries for invalid devices */ 1387 cqr->retries = -1; 1388 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1389 "EINVAL, handle as terminated"); 1390 /* fake rc to success */ 1391 rc = 0; 1392 break; 1393 case -EBUSY: 1394 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1395 "device busy, retry later"); 1396 break; 1397 default: 1398 /* internal error 10 - unknown rc*/ 1399 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1400 dev_err(&device->cdev->dev, "An error occurred in the " 1401 "DASD device driver, reason=%s\n", errorstring); 1402 BUG(); 1403 break; 1404 } 1405 retries++; 1406 } 1407 dasd_schedule_device_bh(device); 1408 return rc; 1409 } 1410 EXPORT_SYMBOL(dasd_term_IO); 1411 1412 /* 1413 * Start the i/o. This start_IO can fail if the channel is really busy. 1414 * In that case set up a timer to start the request later. 1415 */ 1416 int dasd_start_IO(struct dasd_ccw_req *cqr) 1417 { 1418 struct dasd_device *device; 1419 int rc; 1420 char errorstring[ERRORLENGTH]; 1421 1422 /* Check the cqr */ 1423 rc = dasd_check_cqr(cqr); 1424 if (rc) { 1425 cqr->intrc = rc; 1426 return rc; 1427 } 1428 device = (struct dasd_device *) cqr->startdev; 1429 if (((cqr->block && 1430 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1431 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1432 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1433 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1434 "because of stolen lock", cqr); 1435 cqr->status = DASD_CQR_ERROR; 1436 cqr->intrc = -EPERM; 1437 return -EPERM; 1438 } 1439 if (cqr->retries < 0) { 1440 /* internal error 14 - start_IO run out of retries */ 1441 sprintf(errorstring, "14 %p", cqr); 1442 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1443 "device driver, reason=%s\n", errorstring); 1444 cqr->status = DASD_CQR_ERROR; 1445 return -EIO; 1446 } 1447 cqr->startclk = get_tod_clock(); 1448 cqr->starttime = jiffies; 1449 cqr->retries--; 1450 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1451 cqr->lpm &= device->path_data.opm; 1452 if (!cqr->lpm) 1453 cqr->lpm = device->path_data.opm; 1454 } 1455 if (cqr->cpmode == 1) { 1456 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1457 (long) cqr, cqr->lpm); 1458 } else { 1459 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1460 (long) cqr, cqr->lpm, 0); 1461 } 1462 switch (rc) { 1463 case 0: 1464 cqr->status = DASD_CQR_IN_IO; 1465 break; 1466 case -EBUSY: 1467 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1468 "start_IO: device busy, retry later"); 1469 break; 1470 case -ETIMEDOUT: 1471 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1472 "start_IO: request timeout, retry later"); 1473 break; 1474 case -EACCES: 1475 /* -EACCES indicates that the request used only a subset of the 1476 * available paths and all these paths are gone. If the lpm of 1477 * this request was only a subset of the opm (e.g. the ppm) then 1478 * we just do a retry with all available paths. 1479 * If we already use the full opm, something is amiss, and we 1480 * need a full path verification. 1481 */ 1482 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1483 DBF_DEV_EVENT(DBF_WARNING, device, 1484 "start_IO: selected paths gone (%x)", 1485 cqr->lpm); 1486 } else if (cqr->lpm != device->path_data.opm) { 1487 cqr->lpm = device->path_data.opm; 1488 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1489 "start_IO: selected paths gone," 1490 " retry on all paths"); 1491 } else { 1492 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1493 "start_IO: all paths in opm gone," 1494 " do path verification"); 1495 dasd_generic_last_path_gone(device); 1496 device->path_data.opm = 0; 1497 device->path_data.ppm = 0; 1498 device->path_data.npm = 0; 1499 device->path_data.tbvpm = 1500 ccw_device_get_path_mask(device->cdev); 1501 } 1502 break; 1503 case -ENODEV: 1504 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1505 "start_IO: -ENODEV device gone, retry"); 1506 break; 1507 case -EIO: 1508 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1509 "start_IO: -EIO device gone, retry"); 1510 break; 1511 case -EINVAL: 1512 /* most likely caused in power management context */ 1513 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1514 "start_IO: -EINVAL device currently " 1515 "not accessible"); 1516 break; 1517 default: 1518 /* internal error 11 - unknown rc */ 1519 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1520 dev_err(&device->cdev->dev, 1521 "An error occurred in the DASD device driver, " 1522 "reason=%s\n", errorstring); 1523 BUG(); 1524 break; 1525 } 1526 cqr->intrc = rc; 1527 return rc; 1528 } 1529 EXPORT_SYMBOL(dasd_start_IO); 1530 1531 /* 1532 * Timeout function for dasd devices. This is used for different purposes 1533 * 1) missing interrupt handler for normal operation 1534 * 2) delayed start of request where start_IO failed with -EBUSY 1535 * 3) timeout for missing state change interrupts 1536 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1537 * DASD_CQR_QUEUED for 2) and 3). 1538 */ 1539 static void dasd_device_timeout(unsigned long ptr) 1540 { 1541 unsigned long flags; 1542 struct dasd_device *device; 1543 1544 device = (struct dasd_device *) ptr; 1545 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1546 /* re-activate request queue */ 1547 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1548 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1549 dasd_schedule_device_bh(device); 1550 } 1551 1552 /* 1553 * Setup timeout for a device in jiffies. 1554 */ 1555 void dasd_device_set_timer(struct dasd_device *device, int expires) 1556 { 1557 if (expires == 0) 1558 del_timer(&device->timer); 1559 else 1560 mod_timer(&device->timer, jiffies + expires); 1561 } 1562 EXPORT_SYMBOL(dasd_device_set_timer); 1563 1564 /* 1565 * Clear timeout for a device. 1566 */ 1567 void dasd_device_clear_timer(struct dasd_device *device) 1568 { 1569 del_timer(&device->timer); 1570 } 1571 EXPORT_SYMBOL(dasd_device_clear_timer); 1572 1573 static void dasd_handle_killed_request(struct ccw_device *cdev, 1574 unsigned long intparm) 1575 { 1576 struct dasd_ccw_req *cqr; 1577 struct dasd_device *device; 1578 1579 if (!intparm) 1580 return; 1581 cqr = (struct dasd_ccw_req *) intparm; 1582 if (cqr->status != DASD_CQR_IN_IO) { 1583 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1584 "invalid status in handle_killed_request: " 1585 "%02x", cqr->status); 1586 return; 1587 } 1588 1589 device = dasd_device_from_cdev_locked(cdev); 1590 if (IS_ERR(device)) { 1591 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1592 "unable to get device from cdev"); 1593 return; 1594 } 1595 1596 if (!cqr->startdev || 1597 device != cqr->startdev || 1598 strncmp(cqr->startdev->discipline->ebcname, 1599 (char *) &cqr->magic, 4)) { 1600 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1601 "invalid device in request"); 1602 dasd_put_device(device); 1603 return; 1604 } 1605 1606 /* Schedule request to be retried. */ 1607 cqr->status = DASD_CQR_QUEUED; 1608 1609 dasd_device_clear_timer(device); 1610 dasd_schedule_device_bh(device); 1611 dasd_put_device(device); 1612 } 1613 1614 void dasd_generic_handle_state_change(struct dasd_device *device) 1615 { 1616 /* First of all start sense subsystem status request. */ 1617 dasd_eer_snss(device); 1618 1619 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1620 dasd_schedule_device_bh(device); 1621 if (device->block) 1622 dasd_schedule_block_bh(device->block); 1623 } 1624 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1625 1626 /* 1627 * Interrupt handler for "normal" ssch-io based dasd devices. 1628 */ 1629 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1630 struct irb *irb) 1631 { 1632 struct dasd_ccw_req *cqr, *next; 1633 struct dasd_device *device; 1634 unsigned long long now; 1635 int nrf_suppressed = 0; 1636 int fp_suppressed = 0; 1637 u8 *sense = NULL; 1638 int expires; 1639 1640 cqr = (struct dasd_ccw_req *) intparm; 1641 if (IS_ERR(irb)) { 1642 switch (PTR_ERR(irb)) { 1643 case -EIO: 1644 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1645 device = (struct dasd_device *) cqr->startdev; 1646 cqr->status = DASD_CQR_CLEARED; 1647 dasd_device_clear_timer(device); 1648 wake_up(&dasd_flush_wq); 1649 dasd_schedule_device_bh(device); 1650 return; 1651 } 1652 break; 1653 case -ETIMEDOUT: 1654 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1655 "request timed out\n", __func__); 1656 break; 1657 default: 1658 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1659 "unknown error %ld\n", __func__, 1660 PTR_ERR(irb)); 1661 } 1662 dasd_handle_killed_request(cdev, intparm); 1663 return; 1664 } 1665 1666 now = get_tod_clock(); 1667 /* check for conditions that should be handled immediately */ 1668 if (!cqr || 1669 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1670 scsw_cstat(&irb->scsw) == 0)) { 1671 if (cqr) 1672 memcpy(&cqr->irb, irb, sizeof(*irb)); 1673 device = dasd_device_from_cdev_locked(cdev); 1674 if (IS_ERR(device)) 1675 return; 1676 /* ignore unsolicited interrupts for DIAG discipline */ 1677 if (device->discipline == dasd_diag_discipline_pointer) { 1678 dasd_put_device(device); 1679 return; 1680 } 1681 1682 /* 1683 * In some cases 'File Protected' or 'No Record Found' errors 1684 * might be expected and debug log messages for the 1685 * corresponding interrupts shouldn't be written then. 1686 * Check if either of the according suppress bits is set. 1687 */ 1688 sense = dasd_get_sense(irb); 1689 if (sense) { 1690 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1691 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1692 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1693 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1694 } 1695 if (!(fp_suppressed || nrf_suppressed)) 1696 device->discipline->dump_sense_dbf(device, irb, "int"); 1697 1698 if (device->features & DASD_FEATURE_ERPLOG) 1699 device->discipline->dump_sense(device, cqr, irb); 1700 device->discipline->check_for_device_change(device, cqr, irb); 1701 dasd_put_device(device); 1702 } 1703 1704 /* check for for attention message */ 1705 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1706 device = dasd_device_from_cdev_locked(cdev); 1707 device->discipline->check_attention(device, irb->esw.esw1.lpum); 1708 dasd_put_device(device); 1709 } 1710 1711 if (!cqr) 1712 return; 1713 1714 device = (struct dasd_device *) cqr->startdev; 1715 if (!device || 1716 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1717 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1718 "invalid device in request"); 1719 return; 1720 } 1721 1722 /* Check for clear pending */ 1723 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1724 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1725 cqr->status = DASD_CQR_CLEARED; 1726 dasd_device_clear_timer(device); 1727 wake_up(&dasd_flush_wq); 1728 dasd_schedule_device_bh(device); 1729 return; 1730 } 1731 1732 /* check status - the request might have been killed by dyn detach */ 1733 if (cqr->status != DASD_CQR_IN_IO) { 1734 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1735 "status %02x", dev_name(&cdev->dev), cqr->status); 1736 return; 1737 } 1738 1739 next = NULL; 1740 expires = 0; 1741 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1742 scsw_cstat(&irb->scsw) == 0) { 1743 /* request was completed successfully */ 1744 cqr->status = DASD_CQR_SUCCESS; 1745 cqr->stopclk = now; 1746 /* Start first request on queue if possible -> fast_io. */ 1747 if (cqr->devlist.next != &device->ccw_queue) { 1748 next = list_entry(cqr->devlist.next, 1749 struct dasd_ccw_req, devlist); 1750 } 1751 } else { /* error */ 1752 /* 1753 * If we don't want complex ERP for this request, then just 1754 * reset this and retry it in the fastpath 1755 */ 1756 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1757 cqr->retries > 0) { 1758 if (cqr->lpm == device->path_data.opm) 1759 DBF_DEV_EVENT(DBF_DEBUG, device, 1760 "default ERP in fastpath " 1761 "(%i retries left)", 1762 cqr->retries); 1763 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1764 cqr->lpm = device->path_data.opm; 1765 cqr->status = DASD_CQR_QUEUED; 1766 next = cqr; 1767 } else 1768 cqr->status = DASD_CQR_ERROR; 1769 } 1770 if (next && (next->status == DASD_CQR_QUEUED) && 1771 (!device->stopped)) { 1772 if (device->discipline->start_IO(next) == 0) 1773 expires = next->expires; 1774 } 1775 if (expires != 0) 1776 dasd_device_set_timer(device, expires); 1777 else 1778 dasd_device_clear_timer(device); 1779 dasd_schedule_device_bh(device); 1780 } 1781 EXPORT_SYMBOL(dasd_int_handler); 1782 1783 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1784 { 1785 struct dasd_device *device; 1786 1787 device = dasd_device_from_cdev_locked(cdev); 1788 1789 if (IS_ERR(device)) 1790 goto out; 1791 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1792 device->state != device->target || 1793 !device->discipline->check_for_device_change){ 1794 dasd_put_device(device); 1795 goto out; 1796 } 1797 if (device->discipline->dump_sense_dbf) 1798 device->discipline->dump_sense_dbf(device, irb, "uc"); 1799 device->discipline->check_for_device_change(device, NULL, irb); 1800 dasd_put_device(device); 1801 out: 1802 return UC_TODO_RETRY; 1803 } 1804 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1805 1806 /* 1807 * If we have an error on a dasd_block layer request then we cancel 1808 * and return all further requests from the same dasd_block as well. 1809 */ 1810 static void __dasd_device_recovery(struct dasd_device *device, 1811 struct dasd_ccw_req *ref_cqr) 1812 { 1813 struct list_head *l, *n; 1814 struct dasd_ccw_req *cqr; 1815 1816 /* 1817 * only requeue request that came from the dasd_block layer 1818 */ 1819 if (!ref_cqr->block) 1820 return; 1821 1822 list_for_each_safe(l, n, &device->ccw_queue) { 1823 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1824 if (cqr->status == DASD_CQR_QUEUED && 1825 ref_cqr->block == cqr->block) { 1826 cqr->status = DASD_CQR_CLEARED; 1827 } 1828 } 1829 }; 1830 1831 /* 1832 * Remove those ccw requests from the queue that need to be returned 1833 * to the upper layer. 1834 */ 1835 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1836 struct list_head *final_queue) 1837 { 1838 struct list_head *l, *n; 1839 struct dasd_ccw_req *cqr; 1840 1841 /* Process request with final status. */ 1842 list_for_each_safe(l, n, &device->ccw_queue) { 1843 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1844 1845 /* Skip any non-final request. */ 1846 if (cqr->status == DASD_CQR_QUEUED || 1847 cqr->status == DASD_CQR_IN_IO || 1848 cqr->status == DASD_CQR_CLEAR_PENDING) 1849 continue; 1850 if (cqr->status == DASD_CQR_ERROR) { 1851 __dasd_device_recovery(device, cqr); 1852 } 1853 /* Rechain finished requests to final queue */ 1854 list_move_tail(&cqr->devlist, final_queue); 1855 } 1856 } 1857 1858 /* 1859 * the cqrs from the final queue are returned to the upper layer 1860 * by setting a dasd_block state and calling the callback function 1861 */ 1862 static void __dasd_device_process_final_queue(struct dasd_device *device, 1863 struct list_head *final_queue) 1864 { 1865 struct list_head *l, *n; 1866 struct dasd_ccw_req *cqr; 1867 struct dasd_block *block; 1868 void (*callback)(struct dasd_ccw_req *, void *data); 1869 void *callback_data; 1870 char errorstring[ERRORLENGTH]; 1871 1872 list_for_each_safe(l, n, final_queue) { 1873 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1874 list_del_init(&cqr->devlist); 1875 block = cqr->block; 1876 callback = cqr->callback; 1877 callback_data = cqr->callback_data; 1878 if (block) 1879 spin_lock_bh(&block->queue_lock); 1880 switch (cqr->status) { 1881 case DASD_CQR_SUCCESS: 1882 cqr->status = DASD_CQR_DONE; 1883 break; 1884 case DASD_CQR_ERROR: 1885 cqr->status = DASD_CQR_NEED_ERP; 1886 break; 1887 case DASD_CQR_CLEARED: 1888 cqr->status = DASD_CQR_TERMINATED; 1889 break; 1890 default: 1891 /* internal error 12 - wrong cqr status*/ 1892 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1893 dev_err(&device->cdev->dev, 1894 "An error occurred in the DASD device driver, " 1895 "reason=%s\n", errorstring); 1896 BUG(); 1897 } 1898 if (cqr->callback != NULL) 1899 (callback)(cqr, callback_data); 1900 if (block) 1901 spin_unlock_bh(&block->queue_lock); 1902 } 1903 } 1904 1905 /* 1906 * Take a look at the first request on the ccw queue and check 1907 * if it reached its expire time. If so, terminate the IO. 1908 */ 1909 static void __dasd_device_check_expire(struct dasd_device *device) 1910 { 1911 struct dasd_ccw_req *cqr; 1912 1913 if (list_empty(&device->ccw_queue)) 1914 return; 1915 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1916 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1917 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1918 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1919 /* 1920 * IO in safe offline processing should not 1921 * run out of retries 1922 */ 1923 cqr->retries++; 1924 } 1925 if (device->discipline->term_IO(cqr) != 0) { 1926 /* Hmpf, try again in 5 sec */ 1927 dev_err(&device->cdev->dev, 1928 "cqr %p timed out (%lus) but cannot be " 1929 "ended, retrying in 5 s\n", 1930 cqr, (cqr->expires/HZ)); 1931 cqr->expires += 5*HZ; 1932 dasd_device_set_timer(device, 5*HZ); 1933 } else { 1934 dev_err(&device->cdev->dev, 1935 "cqr %p timed out (%lus), %i retries " 1936 "remaining\n", cqr, (cqr->expires/HZ), 1937 cqr->retries); 1938 } 1939 } 1940 } 1941 1942 /* 1943 * return 1 when device is not eligible for IO 1944 */ 1945 static int __dasd_device_is_unusable(struct dasd_device *device, 1946 struct dasd_ccw_req *cqr) 1947 { 1948 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM); 1949 1950 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1951 /* dasd is being set offline. */ 1952 return 1; 1953 } 1954 if (device->stopped) { 1955 if (device->stopped & mask) { 1956 /* stopped and CQR will not change that. */ 1957 return 1; 1958 } 1959 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1960 /* CQR is not able to change device to 1961 * operational. */ 1962 return 1; 1963 } 1964 /* CQR required to get device operational. */ 1965 } 1966 return 0; 1967 } 1968 1969 /* 1970 * Take a look at the first request on the ccw queue and check 1971 * if it needs to be started. 1972 */ 1973 static void __dasd_device_start_head(struct dasd_device *device) 1974 { 1975 struct dasd_ccw_req *cqr; 1976 int rc; 1977 1978 if (list_empty(&device->ccw_queue)) 1979 return; 1980 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1981 if (cqr->status != DASD_CQR_QUEUED) 1982 return; 1983 /* if device is not usable return request to upper layer */ 1984 if (__dasd_device_is_unusable(device, cqr)) { 1985 cqr->intrc = -EAGAIN; 1986 cqr->status = DASD_CQR_CLEARED; 1987 dasd_schedule_device_bh(device); 1988 return; 1989 } 1990 1991 rc = device->discipline->start_IO(cqr); 1992 if (rc == 0) 1993 dasd_device_set_timer(device, cqr->expires); 1994 else if (rc == -EACCES) { 1995 dasd_schedule_device_bh(device); 1996 } else 1997 /* Hmpf, try again in 1/2 sec */ 1998 dasd_device_set_timer(device, 50); 1999 } 2000 2001 static void __dasd_device_check_path_events(struct dasd_device *device) 2002 { 2003 int rc; 2004 2005 if (device->path_data.tbvpm) { 2006 if (device->stopped & ~(DASD_STOPPED_DC_WAIT | 2007 DASD_UNRESUMED_PM)) 2008 return; 2009 rc = device->discipline->verify_path( 2010 device, device->path_data.tbvpm); 2011 if (rc) 2012 dasd_device_set_timer(device, 50); 2013 else 2014 device->path_data.tbvpm = 0; 2015 } 2016 }; 2017 2018 /* 2019 * Go through all request on the dasd_device request queue, 2020 * terminate them on the cdev if necessary, and return them to the 2021 * submitting layer via callback. 2022 * Note: 2023 * Make sure that all 'submitting layers' still exist when 2024 * this function is called!. In other words, when 'device' is a base 2025 * device then all block layer requests must have been removed before 2026 * via dasd_flush_block_queue. 2027 */ 2028 int dasd_flush_device_queue(struct dasd_device *device) 2029 { 2030 struct dasd_ccw_req *cqr, *n; 2031 int rc; 2032 struct list_head flush_queue; 2033 2034 INIT_LIST_HEAD(&flush_queue); 2035 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2036 rc = 0; 2037 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2038 /* Check status and move request to flush_queue */ 2039 switch (cqr->status) { 2040 case DASD_CQR_IN_IO: 2041 rc = device->discipline->term_IO(cqr); 2042 if (rc) { 2043 /* unable to terminate requeust */ 2044 dev_err(&device->cdev->dev, 2045 "Flushing the DASD request queue " 2046 "failed for request %p\n", cqr); 2047 /* stop flush processing */ 2048 goto finished; 2049 } 2050 break; 2051 case DASD_CQR_QUEUED: 2052 cqr->stopclk = get_tod_clock(); 2053 cqr->status = DASD_CQR_CLEARED; 2054 break; 2055 default: /* no need to modify the others */ 2056 break; 2057 } 2058 list_move_tail(&cqr->devlist, &flush_queue); 2059 } 2060 finished: 2061 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2062 /* 2063 * After this point all requests must be in state CLEAR_PENDING, 2064 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2065 * one of the others. 2066 */ 2067 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2068 wait_event(dasd_flush_wq, 2069 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2070 /* 2071 * Now set each request back to TERMINATED, DONE or NEED_ERP 2072 * and call the callback function of flushed requests 2073 */ 2074 __dasd_device_process_final_queue(device, &flush_queue); 2075 return rc; 2076 } 2077 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2078 2079 /* 2080 * Acquire the device lock and process queues for the device. 2081 */ 2082 static void dasd_device_tasklet(struct dasd_device *device) 2083 { 2084 struct list_head final_queue; 2085 2086 atomic_set (&device->tasklet_scheduled, 0); 2087 INIT_LIST_HEAD(&final_queue); 2088 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2089 /* Check expire time of first request on the ccw queue. */ 2090 __dasd_device_check_expire(device); 2091 /* find final requests on ccw queue */ 2092 __dasd_device_process_ccw_queue(device, &final_queue); 2093 __dasd_device_check_path_events(device); 2094 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2095 /* Now call the callback function of requests with final status */ 2096 __dasd_device_process_final_queue(device, &final_queue); 2097 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2098 /* Now check if the head of the ccw queue needs to be started. */ 2099 __dasd_device_start_head(device); 2100 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2101 if (waitqueue_active(&shutdown_waitq)) 2102 wake_up(&shutdown_waitq); 2103 dasd_put_device(device); 2104 } 2105 2106 /* 2107 * Schedules a call to dasd_tasklet over the device tasklet. 2108 */ 2109 void dasd_schedule_device_bh(struct dasd_device *device) 2110 { 2111 /* Protect against rescheduling. */ 2112 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2113 return; 2114 dasd_get_device(device); 2115 tasklet_hi_schedule(&device->tasklet); 2116 } 2117 EXPORT_SYMBOL(dasd_schedule_device_bh); 2118 2119 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2120 { 2121 device->stopped |= bits; 2122 } 2123 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2124 2125 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2126 { 2127 device->stopped &= ~bits; 2128 if (!device->stopped) 2129 wake_up(&generic_waitq); 2130 } 2131 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2132 2133 /* 2134 * Queue a request to the head of the device ccw_queue. 2135 * Start the I/O if possible. 2136 */ 2137 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2138 { 2139 struct dasd_device *device; 2140 unsigned long flags; 2141 2142 device = cqr->startdev; 2143 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2144 cqr->status = DASD_CQR_QUEUED; 2145 list_add(&cqr->devlist, &device->ccw_queue); 2146 /* let the bh start the request to keep them in order */ 2147 dasd_schedule_device_bh(device); 2148 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2149 } 2150 EXPORT_SYMBOL(dasd_add_request_head); 2151 2152 /* 2153 * Queue a request to the tail of the device ccw_queue. 2154 * Start the I/O if possible. 2155 */ 2156 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2157 { 2158 struct dasd_device *device; 2159 unsigned long flags; 2160 2161 device = cqr->startdev; 2162 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2163 cqr->status = DASD_CQR_QUEUED; 2164 list_add_tail(&cqr->devlist, &device->ccw_queue); 2165 /* let the bh start the request to keep them in order */ 2166 dasd_schedule_device_bh(device); 2167 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2168 } 2169 EXPORT_SYMBOL(dasd_add_request_tail); 2170 2171 /* 2172 * Wakeup helper for the 'sleep_on' functions. 2173 */ 2174 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2175 { 2176 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2177 cqr->callback_data = DASD_SLEEPON_END_TAG; 2178 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2179 wake_up(&generic_waitq); 2180 } 2181 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2182 2183 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2184 { 2185 struct dasd_device *device; 2186 int rc; 2187 2188 device = cqr->startdev; 2189 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2190 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2191 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2192 return rc; 2193 } 2194 2195 /* 2196 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2197 */ 2198 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2199 { 2200 struct dasd_device *device; 2201 dasd_erp_fn_t erp_fn; 2202 2203 if (cqr->status == DASD_CQR_FILLED) 2204 return 0; 2205 device = cqr->startdev; 2206 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2207 if (cqr->status == DASD_CQR_TERMINATED) { 2208 device->discipline->handle_terminated_request(cqr); 2209 return 1; 2210 } 2211 if (cqr->status == DASD_CQR_NEED_ERP) { 2212 erp_fn = device->discipline->erp_action(cqr); 2213 erp_fn(cqr); 2214 return 1; 2215 } 2216 if (cqr->status == DASD_CQR_FAILED) 2217 dasd_log_sense(cqr, &cqr->irb); 2218 if (cqr->refers) { 2219 __dasd_process_erp(device, cqr); 2220 return 1; 2221 } 2222 } 2223 return 0; 2224 } 2225 2226 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2227 { 2228 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2229 if (cqr->refers) /* erp is not done yet */ 2230 return 1; 2231 return ((cqr->status != DASD_CQR_DONE) && 2232 (cqr->status != DASD_CQR_FAILED)); 2233 } else 2234 return (cqr->status == DASD_CQR_FILLED); 2235 } 2236 2237 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2238 { 2239 struct dasd_device *device; 2240 int rc; 2241 struct list_head ccw_queue; 2242 struct dasd_ccw_req *cqr; 2243 2244 INIT_LIST_HEAD(&ccw_queue); 2245 maincqr->status = DASD_CQR_FILLED; 2246 device = maincqr->startdev; 2247 list_add(&maincqr->blocklist, &ccw_queue); 2248 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2249 cqr = list_first_entry(&ccw_queue, 2250 struct dasd_ccw_req, blocklist)) { 2251 2252 if (__dasd_sleep_on_erp(cqr)) 2253 continue; 2254 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2255 continue; 2256 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2257 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2258 cqr->status = DASD_CQR_FAILED; 2259 cqr->intrc = -EPERM; 2260 continue; 2261 } 2262 /* Non-temporary stop condition will trigger fail fast */ 2263 if (device->stopped & ~DASD_STOPPED_PENDING && 2264 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2265 (!dasd_eer_enabled(device))) { 2266 cqr->status = DASD_CQR_FAILED; 2267 cqr->intrc = -ENOLINK; 2268 continue; 2269 } 2270 /* 2271 * Don't try to start requests if device is in 2272 * offline processing, it might wait forever 2273 */ 2274 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2275 cqr->status = DASD_CQR_FAILED; 2276 cqr->intrc = -ENODEV; 2277 continue; 2278 } 2279 /* 2280 * Don't try to start requests if device is stopped 2281 * except path verification requests 2282 */ 2283 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2284 if (interruptible) { 2285 rc = wait_event_interruptible( 2286 generic_waitq, !(device->stopped)); 2287 if (rc == -ERESTARTSYS) { 2288 cqr->status = DASD_CQR_FAILED; 2289 maincqr->intrc = rc; 2290 continue; 2291 } 2292 } else 2293 wait_event(generic_waitq, !(device->stopped)); 2294 } 2295 if (!cqr->callback) 2296 cqr->callback = dasd_wakeup_cb; 2297 2298 cqr->callback_data = DASD_SLEEPON_START_TAG; 2299 dasd_add_request_tail(cqr); 2300 if (interruptible) { 2301 rc = wait_event_interruptible( 2302 generic_waitq, _wait_for_wakeup(cqr)); 2303 if (rc == -ERESTARTSYS) { 2304 dasd_cancel_req(cqr); 2305 /* wait (non-interruptible) for final status */ 2306 wait_event(generic_waitq, 2307 _wait_for_wakeup(cqr)); 2308 cqr->status = DASD_CQR_FAILED; 2309 maincqr->intrc = rc; 2310 continue; 2311 } 2312 } else 2313 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2314 } 2315 2316 maincqr->endclk = get_tod_clock(); 2317 if ((maincqr->status != DASD_CQR_DONE) && 2318 (maincqr->intrc != -ERESTARTSYS)) 2319 dasd_log_sense(maincqr, &maincqr->irb); 2320 if (maincqr->status == DASD_CQR_DONE) 2321 rc = 0; 2322 else if (maincqr->intrc) 2323 rc = maincqr->intrc; 2324 else 2325 rc = -EIO; 2326 return rc; 2327 } 2328 2329 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2330 { 2331 struct dasd_ccw_req *cqr; 2332 2333 list_for_each_entry(cqr, ccw_queue, blocklist) { 2334 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2335 return 0; 2336 } 2337 2338 return 1; 2339 } 2340 2341 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2342 { 2343 struct dasd_device *device; 2344 struct dasd_ccw_req *cqr, *n; 2345 u8 *sense = NULL; 2346 int rc; 2347 2348 retry: 2349 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2350 device = cqr->startdev; 2351 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2352 continue; 2353 2354 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2355 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2356 cqr->status = DASD_CQR_FAILED; 2357 cqr->intrc = -EPERM; 2358 continue; 2359 } 2360 /*Non-temporary stop condition will trigger fail fast*/ 2361 if (device->stopped & ~DASD_STOPPED_PENDING && 2362 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2363 !dasd_eer_enabled(device)) { 2364 cqr->status = DASD_CQR_FAILED; 2365 cqr->intrc = -EAGAIN; 2366 continue; 2367 } 2368 2369 /*Don't try to start requests if device is stopped*/ 2370 if (interruptible) { 2371 rc = wait_event_interruptible( 2372 generic_waitq, !device->stopped); 2373 if (rc == -ERESTARTSYS) { 2374 cqr->status = DASD_CQR_FAILED; 2375 cqr->intrc = rc; 2376 continue; 2377 } 2378 } else 2379 wait_event(generic_waitq, !(device->stopped)); 2380 2381 if (!cqr->callback) 2382 cqr->callback = dasd_wakeup_cb; 2383 cqr->callback_data = DASD_SLEEPON_START_TAG; 2384 dasd_add_request_tail(cqr); 2385 } 2386 2387 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2388 2389 rc = 0; 2390 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2391 /* 2392 * In some cases the 'File Protected' or 'Incorrect Length' 2393 * error might be expected and error recovery would be 2394 * unnecessary in these cases. Check if the according suppress 2395 * bit is set. 2396 */ 2397 sense = dasd_get_sense(&cqr->irb); 2398 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2399 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2400 continue; 2401 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2402 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2403 continue; 2404 2405 /* 2406 * for alias devices simplify error recovery and 2407 * return to upper layer 2408 * do not skip ERP requests 2409 */ 2410 if (cqr->startdev != cqr->basedev && !cqr->refers && 2411 (cqr->status == DASD_CQR_TERMINATED || 2412 cqr->status == DASD_CQR_NEED_ERP)) 2413 return -EAGAIN; 2414 2415 /* normal recovery for basedev IO */ 2416 if (__dasd_sleep_on_erp(cqr)) 2417 /* handle erp first */ 2418 goto retry; 2419 } 2420 2421 return 0; 2422 } 2423 2424 /* 2425 * Queue a request to the tail of the device ccw_queue and wait for 2426 * it's completion. 2427 */ 2428 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2429 { 2430 return _dasd_sleep_on(cqr, 0); 2431 } 2432 EXPORT_SYMBOL(dasd_sleep_on); 2433 2434 /* 2435 * Start requests from a ccw_queue and wait for their completion. 2436 */ 2437 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2438 { 2439 return _dasd_sleep_on_queue(ccw_queue, 0); 2440 } 2441 EXPORT_SYMBOL(dasd_sleep_on_queue); 2442 2443 /* 2444 * Queue a request to the tail of the device ccw_queue and wait 2445 * interruptible for it's completion. 2446 */ 2447 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2448 { 2449 return _dasd_sleep_on(cqr, 1); 2450 } 2451 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2452 2453 /* 2454 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2455 * for eckd devices) the currently running request has to be terminated 2456 * and be put back to status queued, before the special request is added 2457 * to the head of the queue. Then the special request is waited on normally. 2458 */ 2459 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2460 { 2461 struct dasd_ccw_req *cqr; 2462 int rc; 2463 2464 if (list_empty(&device->ccw_queue)) 2465 return 0; 2466 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2467 rc = device->discipline->term_IO(cqr); 2468 if (!rc) 2469 /* 2470 * CQR terminated because a more important request is pending. 2471 * Undo decreasing of retry counter because this is 2472 * not an error case. 2473 */ 2474 cqr->retries++; 2475 return rc; 2476 } 2477 2478 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2479 { 2480 struct dasd_device *device; 2481 int rc; 2482 2483 device = cqr->startdev; 2484 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2485 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2486 cqr->status = DASD_CQR_FAILED; 2487 cqr->intrc = -EPERM; 2488 return -EIO; 2489 } 2490 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2491 rc = _dasd_term_running_cqr(device); 2492 if (rc) { 2493 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2494 return rc; 2495 } 2496 cqr->callback = dasd_wakeup_cb; 2497 cqr->callback_data = DASD_SLEEPON_START_TAG; 2498 cqr->status = DASD_CQR_QUEUED; 2499 /* 2500 * add new request as second 2501 * first the terminated cqr needs to be finished 2502 */ 2503 list_add(&cqr->devlist, device->ccw_queue.next); 2504 2505 /* let the bh start the request to keep them in order */ 2506 dasd_schedule_device_bh(device); 2507 2508 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2509 2510 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2511 2512 if (cqr->status == DASD_CQR_DONE) 2513 rc = 0; 2514 else if (cqr->intrc) 2515 rc = cqr->intrc; 2516 else 2517 rc = -EIO; 2518 2519 /* kick tasklets */ 2520 dasd_schedule_device_bh(device); 2521 if (device->block) 2522 dasd_schedule_block_bh(device->block); 2523 2524 return rc; 2525 } 2526 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2527 2528 /* 2529 * Cancels a request that was started with dasd_sleep_on_req. 2530 * This is useful to timeout requests. The request will be 2531 * terminated if it is currently in i/o. 2532 * Returns 0 if request termination was successful 2533 * negative error code if termination failed 2534 * Cancellation of a request is an asynchronous operation! The calling 2535 * function has to wait until the request is properly returned via callback. 2536 */ 2537 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2538 { 2539 struct dasd_device *device = cqr->startdev; 2540 unsigned long flags; 2541 int rc; 2542 2543 rc = 0; 2544 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2545 switch (cqr->status) { 2546 case DASD_CQR_QUEUED: 2547 /* request was not started - just set to cleared */ 2548 cqr->status = DASD_CQR_CLEARED; 2549 if (cqr->callback_data == DASD_SLEEPON_START_TAG) 2550 cqr->callback_data = DASD_SLEEPON_END_TAG; 2551 break; 2552 case DASD_CQR_IN_IO: 2553 /* request in IO - terminate IO and release again */ 2554 rc = device->discipline->term_IO(cqr); 2555 if (rc) { 2556 dev_err(&device->cdev->dev, 2557 "Cancelling request %p failed with rc=%d\n", 2558 cqr, rc); 2559 } else { 2560 cqr->stopclk = get_tod_clock(); 2561 } 2562 break; 2563 default: /* already finished or clear pending - do nothing */ 2564 break; 2565 } 2566 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2567 dasd_schedule_device_bh(device); 2568 return rc; 2569 } 2570 EXPORT_SYMBOL(dasd_cancel_req); 2571 2572 /* 2573 * SECTION: Operations of the dasd_block layer. 2574 */ 2575 2576 /* 2577 * Timeout function for dasd_block. This is used when the block layer 2578 * is waiting for something that may not come reliably, (e.g. a state 2579 * change interrupt) 2580 */ 2581 static void dasd_block_timeout(unsigned long ptr) 2582 { 2583 unsigned long flags; 2584 struct dasd_block *block; 2585 2586 block = (struct dasd_block *) ptr; 2587 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2588 /* re-activate request queue */ 2589 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2590 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2591 dasd_schedule_block_bh(block); 2592 } 2593 2594 /* 2595 * Setup timeout for a dasd_block in jiffies. 2596 */ 2597 void dasd_block_set_timer(struct dasd_block *block, int expires) 2598 { 2599 if (expires == 0) 2600 del_timer(&block->timer); 2601 else 2602 mod_timer(&block->timer, jiffies + expires); 2603 } 2604 EXPORT_SYMBOL(dasd_block_set_timer); 2605 2606 /* 2607 * Clear timeout for a dasd_block. 2608 */ 2609 void dasd_block_clear_timer(struct dasd_block *block) 2610 { 2611 del_timer(&block->timer); 2612 } 2613 EXPORT_SYMBOL(dasd_block_clear_timer); 2614 2615 /* 2616 * Process finished error recovery ccw. 2617 */ 2618 static void __dasd_process_erp(struct dasd_device *device, 2619 struct dasd_ccw_req *cqr) 2620 { 2621 dasd_erp_fn_t erp_fn; 2622 2623 if (cqr->status == DASD_CQR_DONE) 2624 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2625 else 2626 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2627 erp_fn = device->discipline->erp_postaction(cqr); 2628 erp_fn(cqr); 2629 } 2630 2631 /* 2632 * Fetch requests from the block device queue. 2633 */ 2634 static void __dasd_process_request_queue(struct dasd_block *block) 2635 { 2636 struct request_queue *queue; 2637 struct request *req; 2638 struct dasd_ccw_req *cqr; 2639 struct dasd_device *basedev; 2640 unsigned long flags; 2641 queue = block->request_queue; 2642 basedev = block->base; 2643 /* No queue ? Then there is nothing to do. */ 2644 if (queue == NULL) 2645 return; 2646 2647 /* 2648 * We requeue request from the block device queue to the ccw 2649 * queue only in two states. In state DASD_STATE_READY the 2650 * partition detection is done and we need to requeue requests 2651 * for that. State DASD_STATE_ONLINE is normal block device 2652 * operation. 2653 */ 2654 if (basedev->state < DASD_STATE_READY) { 2655 while ((req = blk_fetch_request(block->request_queue))) 2656 __blk_end_request_all(req, -EIO); 2657 return; 2658 } 2659 2660 /* 2661 * if device is stopped do not fetch new requests 2662 * except failfast is active which will let requests fail 2663 * immediately in __dasd_block_start_head() 2664 */ 2665 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) 2666 return; 2667 2668 /* Now we try to fetch requests from the request queue */ 2669 while ((req = blk_peek_request(queue))) { 2670 if (basedev->features & DASD_FEATURE_READONLY && 2671 rq_data_dir(req) == WRITE) { 2672 DBF_DEV_EVENT(DBF_ERR, basedev, 2673 "Rejecting write request %p", 2674 req); 2675 blk_start_request(req); 2676 __blk_end_request_all(req, -EIO); 2677 continue; 2678 } 2679 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 2680 (basedev->features & DASD_FEATURE_FAILFAST || 2681 blk_noretry_request(req))) { 2682 DBF_DEV_EVENT(DBF_ERR, basedev, 2683 "Rejecting failfast request %p", 2684 req); 2685 blk_start_request(req); 2686 __blk_end_request_all(req, -ETIMEDOUT); 2687 continue; 2688 } 2689 cqr = basedev->discipline->build_cp(basedev, block, req); 2690 if (IS_ERR(cqr)) { 2691 if (PTR_ERR(cqr) == -EBUSY) 2692 break; /* normal end condition */ 2693 if (PTR_ERR(cqr) == -ENOMEM) 2694 break; /* terminate request queue loop */ 2695 if (PTR_ERR(cqr) == -EAGAIN) { 2696 /* 2697 * The current request cannot be build right 2698 * now, we have to try later. If this request 2699 * is the head-of-queue we stop the device 2700 * for 1/2 second. 2701 */ 2702 if (!list_empty(&block->ccw_queue)) 2703 break; 2704 spin_lock_irqsave( 2705 get_ccwdev_lock(basedev->cdev), flags); 2706 dasd_device_set_stop_bits(basedev, 2707 DASD_STOPPED_PENDING); 2708 spin_unlock_irqrestore( 2709 get_ccwdev_lock(basedev->cdev), flags); 2710 dasd_block_set_timer(block, HZ/2); 2711 break; 2712 } 2713 DBF_DEV_EVENT(DBF_ERR, basedev, 2714 "CCW creation failed (rc=%ld) " 2715 "on request %p", 2716 PTR_ERR(cqr), req); 2717 blk_start_request(req); 2718 __blk_end_request_all(req, -EIO); 2719 continue; 2720 } 2721 /* 2722 * Note: callback is set to dasd_return_cqr_cb in 2723 * __dasd_block_start_head to cover erp requests as well 2724 */ 2725 cqr->callback_data = (void *) req; 2726 cqr->status = DASD_CQR_FILLED; 2727 req->completion_data = cqr; 2728 blk_start_request(req); 2729 list_add_tail(&cqr->blocklist, &block->ccw_queue); 2730 INIT_LIST_HEAD(&cqr->devlist); 2731 dasd_profile_start(block, cqr, req); 2732 } 2733 } 2734 2735 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2736 { 2737 struct request *req; 2738 int status; 2739 int error = 0; 2740 2741 req = (struct request *) cqr->callback_data; 2742 dasd_profile_end(cqr->block, cqr, req); 2743 status = cqr->block->base->discipline->free_cp(cqr, req); 2744 if (status < 0) 2745 error = status; 2746 else if (status == 0) { 2747 if (cqr->intrc == -EPERM) 2748 error = -EBADE; 2749 else if (cqr->intrc == -ENOLINK || 2750 cqr->intrc == -ETIMEDOUT) 2751 error = cqr->intrc; 2752 else 2753 error = -EIO; 2754 } 2755 __blk_end_request_all(req, error); 2756 } 2757 2758 /* 2759 * Process ccw request queue. 2760 */ 2761 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2762 struct list_head *final_queue) 2763 { 2764 struct list_head *l, *n; 2765 struct dasd_ccw_req *cqr; 2766 dasd_erp_fn_t erp_fn; 2767 unsigned long flags; 2768 struct dasd_device *base = block->base; 2769 2770 restart: 2771 /* Process request with final status. */ 2772 list_for_each_safe(l, n, &block->ccw_queue) { 2773 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2774 if (cqr->status != DASD_CQR_DONE && 2775 cqr->status != DASD_CQR_FAILED && 2776 cqr->status != DASD_CQR_NEED_ERP && 2777 cqr->status != DASD_CQR_TERMINATED) 2778 continue; 2779 2780 if (cqr->status == DASD_CQR_TERMINATED) { 2781 base->discipline->handle_terminated_request(cqr); 2782 goto restart; 2783 } 2784 2785 /* Process requests that may be recovered */ 2786 if (cqr->status == DASD_CQR_NEED_ERP) { 2787 erp_fn = base->discipline->erp_action(cqr); 2788 if (IS_ERR(erp_fn(cqr))) 2789 continue; 2790 goto restart; 2791 } 2792 2793 /* log sense for fatal error */ 2794 if (cqr->status == DASD_CQR_FAILED) { 2795 dasd_log_sense(cqr, &cqr->irb); 2796 } 2797 2798 /* First of all call extended error reporting. */ 2799 if (dasd_eer_enabled(base) && 2800 cqr->status == DASD_CQR_FAILED) { 2801 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2802 2803 /* restart request */ 2804 cqr->status = DASD_CQR_FILLED; 2805 cqr->retries = 255; 2806 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2807 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2808 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2809 flags); 2810 goto restart; 2811 } 2812 2813 /* Process finished ERP request. */ 2814 if (cqr->refers) { 2815 __dasd_process_erp(base, cqr); 2816 goto restart; 2817 } 2818 2819 /* Rechain finished requests to final queue */ 2820 cqr->endclk = get_tod_clock(); 2821 list_move_tail(&cqr->blocklist, final_queue); 2822 } 2823 } 2824 2825 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2826 { 2827 dasd_schedule_block_bh(cqr->block); 2828 } 2829 2830 static void __dasd_block_start_head(struct dasd_block *block) 2831 { 2832 struct dasd_ccw_req *cqr; 2833 2834 if (list_empty(&block->ccw_queue)) 2835 return; 2836 /* We allways begin with the first requests on the queue, as some 2837 * of previously started requests have to be enqueued on a 2838 * dasd_device again for error recovery. 2839 */ 2840 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2841 if (cqr->status != DASD_CQR_FILLED) 2842 continue; 2843 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2844 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2845 cqr->status = DASD_CQR_FAILED; 2846 cqr->intrc = -EPERM; 2847 dasd_schedule_block_bh(block); 2848 continue; 2849 } 2850 /* Non-temporary stop condition will trigger fail fast */ 2851 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2852 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2853 (!dasd_eer_enabled(block->base))) { 2854 cqr->status = DASD_CQR_FAILED; 2855 cqr->intrc = -ENOLINK; 2856 dasd_schedule_block_bh(block); 2857 continue; 2858 } 2859 /* Don't try to start requests if device is stopped */ 2860 if (block->base->stopped) 2861 return; 2862 2863 /* just a fail safe check, should not happen */ 2864 if (!cqr->startdev) 2865 cqr->startdev = block->base; 2866 2867 /* make sure that the requests we submit find their way back */ 2868 cqr->callback = dasd_return_cqr_cb; 2869 2870 dasd_add_request_tail(cqr); 2871 } 2872 } 2873 2874 /* 2875 * Central dasd_block layer routine. Takes requests from the generic 2876 * block layer request queue, creates ccw requests, enqueues them on 2877 * a dasd_device and processes ccw requests that have been returned. 2878 */ 2879 static void dasd_block_tasklet(struct dasd_block *block) 2880 { 2881 struct list_head final_queue; 2882 struct list_head *l, *n; 2883 struct dasd_ccw_req *cqr; 2884 2885 atomic_set(&block->tasklet_scheduled, 0); 2886 INIT_LIST_HEAD(&final_queue); 2887 spin_lock(&block->queue_lock); 2888 /* Finish off requests on ccw queue */ 2889 __dasd_process_block_ccw_queue(block, &final_queue); 2890 spin_unlock(&block->queue_lock); 2891 /* Now call the callback function of requests with final status */ 2892 spin_lock_irq(&block->request_queue_lock); 2893 list_for_each_safe(l, n, &final_queue) { 2894 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2895 list_del_init(&cqr->blocklist); 2896 __dasd_cleanup_cqr(cqr); 2897 } 2898 spin_lock(&block->queue_lock); 2899 /* Get new request from the block device request queue */ 2900 __dasd_process_request_queue(block); 2901 /* Now check if the head of the ccw queue needs to be started. */ 2902 __dasd_block_start_head(block); 2903 spin_unlock(&block->queue_lock); 2904 spin_unlock_irq(&block->request_queue_lock); 2905 if (waitqueue_active(&shutdown_waitq)) 2906 wake_up(&shutdown_waitq); 2907 dasd_put_device(block->base); 2908 } 2909 2910 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2911 { 2912 wake_up(&dasd_flush_wq); 2913 } 2914 2915 /* 2916 * Requeue a request back to the block request queue 2917 * only works for block requests 2918 */ 2919 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2920 { 2921 struct dasd_block *block = cqr->block; 2922 struct request *req; 2923 unsigned long flags; 2924 2925 if (!block) 2926 return -EINVAL; 2927 spin_lock_irqsave(&block->queue_lock, flags); 2928 req = (struct request *) cqr->callback_data; 2929 blk_requeue_request(block->request_queue, req); 2930 spin_unlock_irqrestore(&block->queue_lock, flags); 2931 2932 return 0; 2933 } 2934 2935 /* 2936 * Go through all request on the dasd_block request queue, cancel them 2937 * on the respective dasd_device, and return them to the generic 2938 * block layer. 2939 */ 2940 static int dasd_flush_block_queue(struct dasd_block *block) 2941 { 2942 struct dasd_ccw_req *cqr, *n; 2943 int rc, i; 2944 struct list_head flush_queue; 2945 2946 INIT_LIST_HEAD(&flush_queue); 2947 spin_lock_bh(&block->queue_lock); 2948 rc = 0; 2949 restart: 2950 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2951 /* if this request currently owned by a dasd_device cancel it */ 2952 if (cqr->status >= DASD_CQR_QUEUED) 2953 rc = dasd_cancel_req(cqr); 2954 if (rc < 0) 2955 break; 2956 /* Rechain request (including erp chain) so it won't be 2957 * touched by the dasd_block_tasklet anymore. 2958 * Replace the callback so we notice when the request 2959 * is returned from the dasd_device layer. 2960 */ 2961 cqr->callback = _dasd_wake_block_flush_cb; 2962 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2963 list_move_tail(&cqr->blocklist, &flush_queue); 2964 if (i > 1) 2965 /* moved more than one request - need to restart */ 2966 goto restart; 2967 } 2968 spin_unlock_bh(&block->queue_lock); 2969 /* Now call the callback function of flushed requests */ 2970 restart_cb: 2971 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2972 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2973 /* Process finished ERP request. */ 2974 if (cqr->refers) { 2975 spin_lock_bh(&block->queue_lock); 2976 __dasd_process_erp(block->base, cqr); 2977 spin_unlock_bh(&block->queue_lock); 2978 /* restart list_for_xx loop since dasd_process_erp 2979 * might remove multiple elements */ 2980 goto restart_cb; 2981 } 2982 /* call the callback function */ 2983 spin_lock_irq(&block->request_queue_lock); 2984 cqr->endclk = get_tod_clock(); 2985 list_del_init(&cqr->blocklist); 2986 __dasd_cleanup_cqr(cqr); 2987 spin_unlock_irq(&block->request_queue_lock); 2988 } 2989 return rc; 2990 } 2991 2992 /* 2993 * Schedules a call to dasd_tasklet over the device tasklet. 2994 */ 2995 void dasd_schedule_block_bh(struct dasd_block *block) 2996 { 2997 /* Protect against rescheduling. */ 2998 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 2999 return; 3000 /* life cycle of block is bound to it's base device */ 3001 dasd_get_device(block->base); 3002 tasklet_hi_schedule(&block->tasklet); 3003 } 3004 EXPORT_SYMBOL(dasd_schedule_block_bh); 3005 3006 3007 /* 3008 * SECTION: external block device operations 3009 * (request queue handling, open, release, etc.) 3010 */ 3011 3012 /* 3013 * Dasd request queue function. Called from ll_rw_blk.c 3014 */ 3015 static void do_dasd_request(struct request_queue *queue) 3016 { 3017 struct dasd_block *block; 3018 3019 block = queue->queuedata; 3020 spin_lock(&block->queue_lock); 3021 /* Get new request from the block device request queue */ 3022 __dasd_process_request_queue(block); 3023 /* Now check if the head of the ccw queue needs to be started. */ 3024 __dasd_block_start_head(block); 3025 spin_unlock(&block->queue_lock); 3026 } 3027 3028 /* 3029 * Block timeout callback, called from the block layer 3030 * 3031 * request_queue lock is held on entry. 3032 * 3033 * Return values: 3034 * BLK_EH_RESET_TIMER if the request should be left running 3035 * BLK_EH_NOT_HANDLED if the request is handled or terminated 3036 * by the driver. 3037 */ 3038 enum blk_eh_timer_return dasd_times_out(struct request *req) 3039 { 3040 struct dasd_ccw_req *cqr = req->completion_data; 3041 struct dasd_block *block = req->q->queuedata; 3042 struct dasd_device *device; 3043 int rc = 0; 3044 3045 if (!cqr) 3046 return BLK_EH_NOT_HANDLED; 3047 3048 device = cqr->startdev ? cqr->startdev : block->base; 3049 if (!device->blk_timeout) 3050 return BLK_EH_RESET_TIMER; 3051 DBF_DEV_EVENT(DBF_WARNING, device, 3052 " dasd_times_out cqr %p status %x", 3053 cqr, cqr->status); 3054 3055 spin_lock(&block->queue_lock); 3056 spin_lock(get_ccwdev_lock(device->cdev)); 3057 cqr->retries = -1; 3058 cqr->intrc = -ETIMEDOUT; 3059 if (cqr->status >= DASD_CQR_QUEUED) { 3060 spin_unlock(get_ccwdev_lock(device->cdev)); 3061 rc = dasd_cancel_req(cqr); 3062 } else if (cqr->status == DASD_CQR_FILLED || 3063 cqr->status == DASD_CQR_NEED_ERP) { 3064 cqr->status = DASD_CQR_TERMINATED; 3065 spin_unlock(get_ccwdev_lock(device->cdev)); 3066 } else if (cqr->status == DASD_CQR_IN_ERP) { 3067 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3068 3069 list_for_each_entry_safe(searchcqr, nextcqr, 3070 &block->ccw_queue, blocklist) { 3071 tmpcqr = searchcqr; 3072 while (tmpcqr->refers) 3073 tmpcqr = tmpcqr->refers; 3074 if (tmpcqr != cqr) 3075 continue; 3076 /* searchcqr is an ERP request for cqr */ 3077 searchcqr->retries = -1; 3078 searchcqr->intrc = -ETIMEDOUT; 3079 if (searchcqr->status >= DASD_CQR_QUEUED) { 3080 spin_unlock(get_ccwdev_lock(device->cdev)); 3081 rc = dasd_cancel_req(searchcqr); 3082 spin_lock(get_ccwdev_lock(device->cdev)); 3083 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3084 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3085 searchcqr->status = DASD_CQR_TERMINATED; 3086 rc = 0; 3087 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3088 /* 3089 * Shouldn't happen; most recent ERP 3090 * request is at the front of queue 3091 */ 3092 continue; 3093 } 3094 break; 3095 } 3096 spin_unlock(get_ccwdev_lock(device->cdev)); 3097 } 3098 dasd_schedule_block_bh(block); 3099 spin_unlock(&block->queue_lock); 3100 3101 return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 3102 } 3103 3104 /* 3105 * Allocate and initialize request queue and default I/O scheduler. 3106 */ 3107 static int dasd_alloc_queue(struct dasd_block *block) 3108 { 3109 block->request_queue = blk_init_queue(do_dasd_request, 3110 &block->request_queue_lock); 3111 if (block->request_queue == NULL) 3112 return -ENOMEM; 3113 3114 block->request_queue->queuedata = block; 3115 3116 return 0; 3117 } 3118 3119 /* 3120 * Allocate and initialize request queue. 3121 */ 3122 static void dasd_setup_queue(struct dasd_block *block) 3123 { 3124 int max; 3125 3126 if (block->base->features & DASD_FEATURE_USERAW) { 3127 /* 3128 * the max_blocks value for raw_track access is 256 3129 * it is higher than the native ECKD value because we 3130 * only need one ccw per track 3131 * so the max_hw_sectors are 3132 * 2048 x 512B = 1024kB = 16 tracks 3133 */ 3134 max = 2048; 3135 } else { 3136 max = block->base->discipline->max_blocks << block->s2b_shift; 3137 } 3138 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue); 3139 block->request_queue->limits.max_dev_sectors = max; 3140 blk_queue_logical_block_size(block->request_queue, 3141 block->bp_block); 3142 blk_queue_max_hw_sectors(block->request_queue, max); 3143 blk_queue_max_segments(block->request_queue, -1L); 3144 /* with page sized segments we can translate each segement into 3145 * one idaw/tidaw 3146 */ 3147 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 3148 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 3149 } 3150 3151 /* 3152 * Deactivate and free request queue. 3153 */ 3154 static void dasd_free_queue(struct dasd_block *block) 3155 { 3156 if (block->request_queue) { 3157 blk_cleanup_queue(block->request_queue); 3158 block->request_queue = NULL; 3159 } 3160 } 3161 3162 /* 3163 * Flush request on the request queue. 3164 */ 3165 static void dasd_flush_request_queue(struct dasd_block *block) 3166 { 3167 struct request *req; 3168 3169 if (!block->request_queue) 3170 return; 3171 3172 spin_lock_irq(&block->request_queue_lock); 3173 while ((req = blk_fetch_request(block->request_queue))) 3174 __blk_end_request_all(req, -EIO); 3175 spin_unlock_irq(&block->request_queue_lock); 3176 } 3177 3178 static int dasd_open(struct block_device *bdev, fmode_t mode) 3179 { 3180 struct dasd_device *base; 3181 int rc; 3182 3183 base = dasd_device_from_gendisk(bdev->bd_disk); 3184 if (!base) 3185 return -ENODEV; 3186 3187 atomic_inc(&base->block->open_count); 3188 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3189 rc = -ENODEV; 3190 goto unlock; 3191 } 3192 3193 if (!try_module_get(base->discipline->owner)) { 3194 rc = -EINVAL; 3195 goto unlock; 3196 } 3197 3198 if (dasd_probeonly) { 3199 dev_info(&base->cdev->dev, 3200 "Accessing the DASD failed because it is in " 3201 "probeonly mode\n"); 3202 rc = -EPERM; 3203 goto out; 3204 } 3205 3206 if (base->state <= DASD_STATE_BASIC) { 3207 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3208 " Cannot open unrecognized device"); 3209 rc = -ENODEV; 3210 goto out; 3211 } 3212 3213 if ((mode & FMODE_WRITE) && 3214 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3215 (base->features & DASD_FEATURE_READONLY))) { 3216 rc = -EROFS; 3217 goto out; 3218 } 3219 3220 dasd_put_device(base); 3221 return 0; 3222 3223 out: 3224 module_put(base->discipline->owner); 3225 unlock: 3226 atomic_dec(&base->block->open_count); 3227 dasd_put_device(base); 3228 return rc; 3229 } 3230 3231 static void dasd_release(struct gendisk *disk, fmode_t mode) 3232 { 3233 struct dasd_device *base = dasd_device_from_gendisk(disk); 3234 if (base) { 3235 atomic_dec(&base->block->open_count); 3236 module_put(base->discipline->owner); 3237 dasd_put_device(base); 3238 } 3239 } 3240 3241 /* 3242 * Return disk geometry. 3243 */ 3244 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3245 { 3246 struct dasd_device *base; 3247 3248 base = dasd_device_from_gendisk(bdev->bd_disk); 3249 if (!base) 3250 return -ENODEV; 3251 3252 if (!base->discipline || 3253 !base->discipline->fill_geometry) { 3254 dasd_put_device(base); 3255 return -EINVAL; 3256 } 3257 base->discipline->fill_geometry(base->block, geo); 3258 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3259 dasd_put_device(base); 3260 return 0; 3261 } 3262 3263 const struct block_device_operations 3264 dasd_device_operations = { 3265 .owner = THIS_MODULE, 3266 .open = dasd_open, 3267 .release = dasd_release, 3268 .ioctl = dasd_ioctl, 3269 .compat_ioctl = dasd_ioctl, 3270 .getgeo = dasd_getgeo, 3271 }; 3272 3273 /******************************************************************************* 3274 * end of block device operations 3275 */ 3276 3277 static void 3278 dasd_exit(void) 3279 { 3280 #ifdef CONFIG_PROC_FS 3281 dasd_proc_exit(); 3282 #endif 3283 dasd_eer_exit(); 3284 if (dasd_page_cache != NULL) { 3285 kmem_cache_destroy(dasd_page_cache); 3286 dasd_page_cache = NULL; 3287 } 3288 dasd_gendisk_exit(); 3289 dasd_devmap_exit(); 3290 if (dasd_debug_area != NULL) { 3291 debug_unregister(dasd_debug_area); 3292 dasd_debug_area = NULL; 3293 } 3294 dasd_statistics_removeroot(); 3295 } 3296 3297 /* 3298 * SECTION: common functions for ccw_driver use 3299 */ 3300 3301 /* 3302 * Is the device read-only? 3303 * Note that this function does not report the setting of the 3304 * readonly device attribute, but how it is configured in z/VM. 3305 */ 3306 int dasd_device_is_ro(struct dasd_device *device) 3307 { 3308 struct ccw_dev_id dev_id; 3309 struct diag210 diag_data; 3310 int rc; 3311 3312 if (!MACHINE_IS_VM) 3313 return 0; 3314 ccw_device_get_id(device->cdev, &dev_id); 3315 memset(&diag_data, 0, sizeof(diag_data)); 3316 diag_data.vrdcdvno = dev_id.devno; 3317 diag_data.vrdclen = sizeof(diag_data); 3318 rc = diag210(&diag_data); 3319 if (rc == 0 || rc == 2) { 3320 return diag_data.vrdcvfla & 0x80; 3321 } else { 3322 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3323 dev_id.devno, rc); 3324 return 0; 3325 } 3326 } 3327 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3328 3329 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3330 { 3331 struct ccw_device *cdev = data; 3332 int ret; 3333 3334 ret = ccw_device_set_online(cdev); 3335 if (ret) 3336 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3337 dev_name(&cdev->dev), ret); 3338 } 3339 3340 /* 3341 * Initial attempt at a probe function. this can be simplified once 3342 * the other detection code is gone. 3343 */ 3344 int dasd_generic_probe(struct ccw_device *cdev, 3345 struct dasd_discipline *discipline) 3346 { 3347 int ret; 3348 3349 ret = dasd_add_sysfs_files(cdev); 3350 if (ret) { 3351 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3352 "dasd_generic_probe: could not add " 3353 "sysfs entries"); 3354 return ret; 3355 } 3356 cdev->handler = &dasd_int_handler; 3357 3358 /* 3359 * Automatically online either all dasd devices (dasd_autodetect) 3360 * or all devices specified with dasd= parameters during 3361 * initial probe. 3362 */ 3363 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3364 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3365 async_schedule(dasd_generic_auto_online, cdev); 3366 return 0; 3367 } 3368 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3369 3370 void dasd_generic_free_discipline(struct dasd_device *device) 3371 { 3372 /* Forget the discipline information. */ 3373 if (device->discipline) { 3374 if (device->discipline->uncheck_device) 3375 device->discipline->uncheck_device(device); 3376 module_put(device->discipline->owner); 3377 device->discipline = NULL; 3378 } 3379 if (device->base_discipline) { 3380 module_put(device->base_discipline->owner); 3381 device->base_discipline = NULL; 3382 } 3383 } 3384 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3385 3386 /* 3387 * This will one day be called from a global not_oper handler. 3388 * It is also used by driver_unregister during module unload. 3389 */ 3390 void dasd_generic_remove(struct ccw_device *cdev) 3391 { 3392 struct dasd_device *device; 3393 struct dasd_block *block; 3394 3395 cdev->handler = NULL; 3396 3397 device = dasd_device_from_cdev(cdev); 3398 if (IS_ERR(device)) { 3399 dasd_remove_sysfs_files(cdev); 3400 return; 3401 } 3402 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3403 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3404 /* Already doing offline processing */ 3405 dasd_put_device(device); 3406 dasd_remove_sysfs_files(cdev); 3407 return; 3408 } 3409 /* 3410 * This device is removed unconditionally. Set offline 3411 * flag to prevent dasd_open from opening it while it is 3412 * no quite down yet. 3413 */ 3414 dasd_set_target_state(device, DASD_STATE_NEW); 3415 /* dasd_delete_device destroys the device reference. */ 3416 block = device->block; 3417 dasd_delete_device(device); 3418 /* 3419 * life cycle of block is bound to device, so delete it after 3420 * device was safely removed 3421 */ 3422 if (block) 3423 dasd_free_block(block); 3424 3425 dasd_remove_sysfs_files(cdev); 3426 } 3427 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3428 3429 /* 3430 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3431 * the device is detected for the first time and is supposed to be used 3432 * or the user has started activation through sysfs. 3433 */ 3434 int dasd_generic_set_online(struct ccw_device *cdev, 3435 struct dasd_discipline *base_discipline) 3436 { 3437 struct dasd_discipline *discipline; 3438 struct dasd_device *device; 3439 int rc; 3440 3441 /* first online clears initial online feature flag */ 3442 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3443 device = dasd_create_device(cdev); 3444 if (IS_ERR(device)) 3445 return PTR_ERR(device); 3446 3447 discipline = base_discipline; 3448 if (device->features & DASD_FEATURE_USEDIAG) { 3449 if (!dasd_diag_discipline_pointer) { 3450 /* Try to load the required module. */ 3451 rc = request_module(DASD_DIAG_MOD); 3452 if (rc) { 3453 pr_warn("%s Setting the DASD online failed " 3454 "because the required module %s " 3455 "could not be loaded (rc=%d)\n", 3456 dev_name(&cdev->dev), DASD_DIAG_MOD, 3457 rc); 3458 dasd_delete_device(device); 3459 return -ENODEV; 3460 } 3461 } 3462 /* Module init could have failed, so check again here after 3463 * request_module(). */ 3464 if (!dasd_diag_discipline_pointer) { 3465 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3466 dev_name(&cdev->dev)); 3467 dasd_delete_device(device); 3468 return -ENODEV; 3469 } 3470 discipline = dasd_diag_discipline_pointer; 3471 } 3472 if (!try_module_get(base_discipline->owner)) { 3473 dasd_delete_device(device); 3474 return -EINVAL; 3475 } 3476 if (!try_module_get(discipline->owner)) { 3477 module_put(base_discipline->owner); 3478 dasd_delete_device(device); 3479 return -EINVAL; 3480 } 3481 device->base_discipline = base_discipline; 3482 device->discipline = discipline; 3483 3484 /* check_device will allocate block device if necessary */ 3485 rc = discipline->check_device(device); 3486 if (rc) { 3487 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3488 dev_name(&cdev->dev), discipline->name, rc); 3489 module_put(discipline->owner); 3490 module_put(base_discipline->owner); 3491 dasd_delete_device(device); 3492 return rc; 3493 } 3494 3495 dasd_set_target_state(device, DASD_STATE_ONLINE); 3496 if (device->state <= DASD_STATE_KNOWN) { 3497 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3498 dev_name(&cdev->dev)); 3499 rc = -ENODEV; 3500 dasd_set_target_state(device, DASD_STATE_NEW); 3501 if (device->block) 3502 dasd_free_block(device->block); 3503 dasd_delete_device(device); 3504 } else 3505 pr_debug("dasd_generic device %s found\n", 3506 dev_name(&cdev->dev)); 3507 3508 wait_event(dasd_init_waitq, _wait_for_device(device)); 3509 3510 dasd_put_device(device); 3511 return rc; 3512 } 3513 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3514 3515 int dasd_generic_set_offline(struct ccw_device *cdev) 3516 { 3517 struct dasd_device *device; 3518 struct dasd_block *block; 3519 int max_count, open_count, rc; 3520 3521 rc = 0; 3522 device = dasd_device_from_cdev(cdev); 3523 if (IS_ERR(device)) 3524 return PTR_ERR(device); 3525 3526 /* 3527 * We must make sure that this device is currently not in use. 3528 * The open_count is increased for every opener, that includes 3529 * the blkdev_get in dasd_scan_partitions. We are only interested 3530 * in the other openers. 3531 */ 3532 if (device->block) { 3533 max_count = device->block->bdev ? 0 : -1; 3534 open_count = atomic_read(&device->block->open_count); 3535 if (open_count > max_count) { 3536 if (open_count > 0) 3537 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3538 dev_name(&cdev->dev), open_count); 3539 else 3540 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3541 dev_name(&cdev->dev)); 3542 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3543 dasd_put_device(device); 3544 return -EBUSY; 3545 } 3546 } 3547 3548 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3549 /* 3550 * safe offline already running 3551 * could only be called by normal offline so safe_offline flag 3552 * needs to be removed to run normal offline and kill all I/O 3553 */ 3554 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3555 /* Already doing normal offline processing */ 3556 dasd_put_device(device); 3557 return -EBUSY; 3558 } else 3559 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3560 3561 } else 3562 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3563 /* Already doing offline processing */ 3564 dasd_put_device(device); 3565 return -EBUSY; 3566 } 3567 3568 /* 3569 * if safe_offline called set safe_offline_running flag and 3570 * clear safe_offline so that a call to normal offline 3571 * can overrun safe_offline processing 3572 */ 3573 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3574 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3575 /* 3576 * If we want to set the device safe offline all IO operations 3577 * should be finished before continuing the offline process 3578 * so sync bdev first and then wait for our queues to become 3579 * empty 3580 */ 3581 /* sync blockdev and partitions */ 3582 rc = fsync_bdev(device->block->bdev); 3583 if (rc != 0) 3584 goto interrupted; 3585 3586 /* schedule device tasklet and wait for completion */ 3587 dasd_schedule_device_bh(device); 3588 rc = wait_event_interruptible(shutdown_waitq, 3589 _wait_for_empty_queues(device)); 3590 if (rc != 0) 3591 goto interrupted; 3592 } 3593 3594 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3595 dasd_set_target_state(device, DASD_STATE_NEW); 3596 /* dasd_delete_device destroys the device reference. */ 3597 block = device->block; 3598 dasd_delete_device(device); 3599 /* 3600 * life cycle of block is bound to device, so delete it after 3601 * device was safely removed 3602 */ 3603 if (block) 3604 dasd_free_block(block); 3605 return 0; 3606 3607 interrupted: 3608 /* interrupted by signal */ 3609 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3610 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3611 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3612 dasd_put_device(device); 3613 return rc; 3614 } 3615 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3616 3617 int dasd_generic_last_path_gone(struct dasd_device *device) 3618 { 3619 struct dasd_ccw_req *cqr; 3620 3621 dev_warn(&device->cdev->dev, "No operational channel path is left " 3622 "for the device\n"); 3623 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3624 /* First of all call extended error reporting. */ 3625 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3626 3627 if (device->state < DASD_STATE_BASIC) 3628 return 0; 3629 /* Device is active. We want to keep it. */ 3630 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3631 if ((cqr->status == DASD_CQR_IN_IO) || 3632 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3633 cqr->status = DASD_CQR_QUEUED; 3634 cqr->retries++; 3635 } 3636 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3637 dasd_device_clear_timer(device); 3638 dasd_schedule_device_bh(device); 3639 return 1; 3640 } 3641 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3642 3643 int dasd_generic_path_operational(struct dasd_device *device) 3644 { 3645 dev_info(&device->cdev->dev, "A channel path to the device has become " 3646 "operational\n"); 3647 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3648 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3649 if (device->stopped & DASD_UNRESUMED_PM) { 3650 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 3651 dasd_restore_device(device); 3652 return 1; 3653 } 3654 dasd_schedule_device_bh(device); 3655 if (device->block) 3656 dasd_schedule_block_bh(device->block); 3657 3658 if (!device->stopped) 3659 wake_up(&generic_waitq); 3660 3661 return 1; 3662 } 3663 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3664 3665 int dasd_generic_notify(struct ccw_device *cdev, int event) 3666 { 3667 struct dasd_device *device; 3668 int ret; 3669 3670 device = dasd_device_from_cdev_locked(cdev); 3671 if (IS_ERR(device)) 3672 return 0; 3673 ret = 0; 3674 switch (event) { 3675 case CIO_GONE: 3676 case CIO_BOXED: 3677 case CIO_NO_PATH: 3678 device->path_data.opm = 0; 3679 device->path_data.ppm = 0; 3680 device->path_data.npm = 0; 3681 ret = dasd_generic_last_path_gone(device); 3682 break; 3683 case CIO_OPER: 3684 ret = 1; 3685 if (device->path_data.opm) 3686 ret = dasd_generic_path_operational(device); 3687 break; 3688 } 3689 dasd_put_device(device); 3690 return ret; 3691 } 3692 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3693 3694 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3695 { 3696 int chp; 3697 __u8 oldopm, eventlpm; 3698 struct dasd_device *device; 3699 3700 device = dasd_device_from_cdev_locked(cdev); 3701 if (IS_ERR(device)) 3702 return; 3703 for (chp = 0; chp < 8; chp++) { 3704 eventlpm = 0x80 >> chp; 3705 if (path_event[chp] & PE_PATH_GONE) { 3706 oldopm = device->path_data.opm; 3707 device->path_data.opm &= ~eventlpm; 3708 device->path_data.ppm &= ~eventlpm; 3709 device->path_data.npm &= ~eventlpm; 3710 if (oldopm && !device->path_data.opm) { 3711 dev_warn(&device->cdev->dev, 3712 "No verified channel paths remain " 3713 "for the device\n"); 3714 DBF_DEV_EVENT(DBF_WARNING, device, 3715 "%s", "last verified path gone"); 3716 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3717 dasd_device_set_stop_bits(device, 3718 DASD_STOPPED_DC_WAIT); 3719 } 3720 } 3721 if (path_event[chp] & PE_PATH_AVAILABLE) { 3722 device->path_data.opm &= ~eventlpm; 3723 device->path_data.ppm &= ~eventlpm; 3724 device->path_data.npm &= ~eventlpm; 3725 device->path_data.tbvpm |= eventlpm; 3726 dasd_schedule_device_bh(device); 3727 } 3728 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3729 if (!(device->path_data.opm & eventlpm) && 3730 !(device->path_data.tbvpm & eventlpm)) { 3731 /* 3732 * we can not establish a pathgroup on an 3733 * unavailable path, so trigger a path 3734 * verification first 3735 */ 3736 device->path_data.tbvpm |= eventlpm; 3737 dasd_schedule_device_bh(device); 3738 } 3739 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3740 "Pathgroup re-established\n"); 3741 if (device->discipline->kick_validate) 3742 device->discipline->kick_validate(device); 3743 } 3744 } 3745 dasd_put_device(device); 3746 } 3747 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3748 3749 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3750 { 3751 if (!device->path_data.opm && lpm) { 3752 device->path_data.opm = lpm; 3753 dasd_generic_path_operational(device); 3754 } else 3755 device->path_data.opm |= lpm; 3756 return 0; 3757 } 3758 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3759 3760 3761 int dasd_generic_pm_freeze(struct ccw_device *cdev) 3762 { 3763 struct dasd_device *device = dasd_device_from_cdev(cdev); 3764 struct list_head freeze_queue; 3765 struct dasd_ccw_req *cqr, *n; 3766 struct dasd_ccw_req *refers; 3767 int rc; 3768 3769 if (IS_ERR(device)) 3770 return PTR_ERR(device); 3771 3772 /* mark device as suspended */ 3773 set_bit(DASD_FLAG_SUSPENDED, &device->flags); 3774 3775 if (device->discipline->freeze) 3776 rc = device->discipline->freeze(device); 3777 3778 /* disallow new I/O */ 3779 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 3780 3781 /* clear active requests and requeue them to block layer if possible */ 3782 INIT_LIST_HEAD(&freeze_queue); 3783 spin_lock_irq(get_ccwdev_lock(cdev)); 3784 rc = 0; 3785 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3786 /* Check status and move request to flush_queue */ 3787 if (cqr->status == DASD_CQR_IN_IO) { 3788 rc = device->discipline->term_IO(cqr); 3789 if (rc) { 3790 /* unable to terminate requeust */ 3791 dev_err(&device->cdev->dev, 3792 "Unable to terminate request %p " 3793 "on suspend\n", cqr); 3794 spin_unlock_irq(get_ccwdev_lock(cdev)); 3795 dasd_put_device(device); 3796 return rc; 3797 } 3798 } 3799 list_move_tail(&cqr->devlist, &freeze_queue); 3800 } 3801 spin_unlock_irq(get_ccwdev_lock(cdev)); 3802 3803 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 3804 wait_event(dasd_flush_wq, 3805 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3806 if (cqr->status == DASD_CQR_CLEARED) 3807 cqr->status = DASD_CQR_QUEUED; 3808 3809 /* requeue requests to blocklayer will only work for 3810 block device requests */ 3811 if (_dasd_requeue_request(cqr)) 3812 continue; 3813 3814 /* remove requests from device and block queue */ 3815 list_del_init(&cqr->devlist); 3816 while (cqr->refers != NULL) { 3817 refers = cqr->refers; 3818 /* remove the request from the block queue */ 3819 list_del(&cqr->blocklist); 3820 /* free the finished erp request */ 3821 dasd_free_erp_request(cqr, cqr->memdev); 3822 cqr = refers; 3823 } 3824 if (cqr->block) 3825 list_del_init(&cqr->blocklist); 3826 cqr->block->base->discipline->free_cp( 3827 cqr, (struct request *) cqr->callback_data); 3828 } 3829 3830 /* 3831 * if requests remain then they are internal request 3832 * and go back to the device queue 3833 */ 3834 if (!list_empty(&freeze_queue)) { 3835 /* move freeze_queue to start of the ccw_queue */ 3836 spin_lock_irq(get_ccwdev_lock(cdev)); 3837 list_splice_tail(&freeze_queue, &device->ccw_queue); 3838 spin_unlock_irq(get_ccwdev_lock(cdev)); 3839 } 3840 dasd_put_device(device); 3841 return rc; 3842 } 3843 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 3844 3845 int dasd_generic_restore_device(struct ccw_device *cdev) 3846 { 3847 struct dasd_device *device = dasd_device_from_cdev(cdev); 3848 int rc = 0; 3849 3850 if (IS_ERR(device)) 3851 return PTR_ERR(device); 3852 3853 /* allow new IO again */ 3854 dasd_device_remove_stop_bits(device, 3855 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 3856 3857 dasd_schedule_device_bh(device); 3858 3859 /* 3860 * call discipline restore function 3861 * if device is stopped do nothing e.g. for disconnected devices 3862 */ 3863 if (device->discipline->restore && !(device->stopped)) 3864 rc = device->discipline->restore(device); 3865 if (rc || device->stopped) 3866 /* 3867 * if the resume failed for the DASD we put it in 3868 * an UNRESUMED stop state 3869 */ 3870 device->stopped |= DASD_UNRESUMED_PM; 3871 3872 if (device->block) 3873 dasd_schedule_block_bh(device->block); 3874 3875 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 3876 dasd_put_device(device); 3877 return 0; 3878 } 3879 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 3880 3881 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 3882 void *rdc_buffer, 3883 int rdc_buffer_size, 3884 int magic) 3885 { 3886 struct dasd_ccw_req *cqr; 3887 struct ccw1 *ccw; 3888 unsigned long *idaw; 3889 3890 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 3891 3892 if (IS_ERR(cqr)) { 3893 /* internal error 13 - Allocating the RDC request failed*/ 3894 dev_err(&device->cdev->dev, 3895 "An error occurred in the DASD device driver, " 3896 "reason=%s\n", "13"); 3897 return cqr; 3898 } 3899 3900 ccw = cqr->cpaddr; 3901 ccw->cmd_code = CCW_CMD_RDC; 3902 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { 3903 idaw = (unsigned long *) (cqr->data); 3904 ccw->cda = (__u32)(addr_t) idaw; 3905 ccw->flags = CCW_FLAG_IDA; 3906 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); 3907 } else { 3908 ccw->cda = (__u32)(addr_t) rdc_buffer; 3909 ccw->flags = 0; 3910 } 3911 3912 ccw->count = rdc_buffer_size; 3913 cqr->startdev = device; 3914 cqr->memdev = device; 3915 cqr->expires = 10*HZ; 3916 cqr->retries = 256; 3917 cqr->buildclk = get_tod_clock(); 3918 cqr->status = DASD_CQR_FILLED; 3919 return cqr; 3920 } 3921 3922 3923 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 3924 void *rdc_buffer, int rdc_buffer_size) 3925 { 3926 int ret; 3927 struct dasd_ccw_req *cqr; 3928 3929 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 3930 magic); 3931 if (IS_ERR(cqr)) 3932 return PTR_ERR(cqr); 3933 3934 ret = dasd_sleep_on(cqr); 3935 dasd_sfree_request(cqr, cqr->memdev); 3936 return ret; 3937 } 3938 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 3939 3940 /* 3941 * In command mode and transport mode we need to look for sense 3942 * data in different places. The sense data itself is allways 3943 * an array of 32 bytes, so we can unify the sense data access 3944 * for both modes. 3945 */ 3946 char *dasd_get_sense(struct irb *irb) 3947 { 3948 struct tsb *tsb = NULL; 3949 char *sense = NULL; 3950 3951 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 3952 if (irb->scsw.tm.tcw) 3953 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 3954 irb->scsw.tm.tcw); 3955 if (tsb && tsb->length == 64 && tsb->flags) 3956 switch (tsb->flags & 0x07) { 3957 case 1: /* tsa_iostat */ 3958 sense = tsb->tsa.iostat.sense; 3959 break; 3960 case 2: /* tsa_ddpc */ 3961 sense = tsb->tsa.ddpc.sense; 3962 break; 3963 default: 3964 /* currently we don't use interrogate data */ 3965 break; 3966 } 3967 } else if (irb->esw.esw0.erw.cons) { 3968 sense = irb->ecw; 3969 } 3970 return sense; 3971 } 3972 EXPORT_SYMBOL_GPL(dasd_get_sense); 3973 3974 void dasd_generic_shutdown(struct ccw_device *cdev) 3975 { 3976 struct dasd_device *device; 3977 3978 device = dasd_device_from_cdev(cdev); 3979 if (IS_ERR(device)) 3980 return; 3981 3982 if (device->block) 3983 dasd_schedule_block_bh(device->block); 3984 3985 dasd_schedule_device_bh(device); 3986 3987 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 3988 } 3989 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 3990 3991 static int __init dasd_init(void) 3992 { 3993 int rc; 3994 3995 init_waitqueue_head(&dasd_init_waitq); 3996 init_waitqueue_head(&dasd_flush_wq); 3997 init_waitqueue_head(&generic_waitq); 3998 init_waitqueue_head(&shutdown_waitq); 3999 4000 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4001 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4002 if (dasd_debug_area == NULL) { 4003 rc = -ENOMEM; 4004 goto failed; 4005 } 4006 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4007 debug_set_level(dasd_debug_area, DBF_WARNING); 4008 4009 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4010 4011 dasd_diag_discipline_pointer = NULL; 4012 4013 dasd_statistics_createroot(); 4014 4015 rc = dasd_devmap_init(); 4016 if (rc) 4017 goto failed; 4018 rc = dasd_gendisk_init(); 4019 if (rc) 4020 goto failed; 4021 rc = dasd_parse(); 4022 if (rc) 4023 goto failed; 4024 rc = dasd_eer_init(); 4025 if (rc) 4026 goto failed; 4027 #ifdef CONFIG_PROC_FS 4028 rc = dasd_proc_init(); 4029 if (rc) 4030 goto failed; 4031 #endif 4032 4033 return 0; 4034 failed: 4035 pr_info("The DASD device driver could not be initialized\n"); 4036 dasd_exit(); 4037 return rc; 4038 } 4039 4040 module_init(dasd_init); 4041 module_exit(dasd_exit); 4042