1 /* 2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Carsten Otte <Cotte@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Copyright IBM Corp. 1999, 2009 8 */ 9 10 #define KMSG_COMPONENT "dasd" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/kmod.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/ctype.h> 17 #include <linux/major.h> 18 #include <linux/slab.h> 19 #include <linux/hdreg.h> 20 #include <linux/async.h> 21 #include <linux/mutex.h> 22 #include <linux/debugfs.h> 23 #include <linux/seq_file.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> 28 #include <asm/idals.h> 29 #include <asm/itcw.h> 30 #include <asm/diag.h> 31 32 /* This is ugly... */ 33 #define PRINTK_HEADER "dasd:" 34 35 #include "dasd_int.h" 36 /* 37 * SECTION: Constant definitions to be used within this file 38 */ 39 #define DASD_CHANQ_MAX_SIZE 4 40 41 /* 42 * SECTION: exported variables of dasd.c 43 */ 44 debug_info_t *dasd_debug_area; 45 EXPORT_SYMBOL(dasd_debug_area); 46 static struct dentry *dasd_debugfs_root_entry; 47 struct dasd_discipline *dasd_diag_discipline_pointer; 48 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 49 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 50 51 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 52 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 53 " Copyright IBM Corp. 2000"); 54 MODULE_SUPPORTED_DEVICE("dasd"); 55 MODULE_LICENSE("GPL"); 56 57 /* 58 * SECTION: prototypes for static functions of dasd.c 59 */ 60 static int dasd_alloc_queue(struct dasd_block *); 61 static void dasd_setup_queue(struct dasd_block *); 62 static void dasd_free_queue(struct dasd_block *); 63 static void dasd_flush_request_queue(struct dasd_block *); 64 static int dasd_flush_block_queue(struct dasd_block *); 65 static void dasd_device_tasklet(struct dasd_device *); 66 static void dasd_block_tasklet(struct dasd_block *); 67 static void do_kick_device(struct work_struct *); 68 static void do_restore_device(struct work_struct *); 69 static void do_reload_device(struct work_struct *); 70 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 71 static void dasd_device_timeout(unsigned long); 72 static void dasd_block_timeout(unsigned long); 73 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 74 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 75 static void dasd_profile_exit(struct dasd_profile *); 76 77 /* 78 * SECTION: Operations on the device structure. 79 */ 80 static wait_queue_head_t dasd_init_waitq; 81 static wait_queue_head_t dasd_flush_wq; 82 static wait_queue_head_t generic_waitq; 83 static wait_queue_head_t shutdown_waitq; 84 85 /* 86 * Allocate memory for a new device structure. 87 */ 88 struct dasd_device *dasd_alloc_device(void) 89 { 90 struct dasd_device *device; 91 92 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 93 if (!device) 94 return ERR_PTR(-ENOMEM); 95 96 /* Get two pages for normal block device operations. */ 97 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 98 if (!device->ccw_mem) { 99 kfree(device); 100 return ERR_PTR(-ENOMEM); 101 } 102 /* Get one page for error recovery. */ 103 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 104 if (!device->erp_mem) { 105 free_pages((unsigned long) device->ccw_mem, 1); 106 kfree(device); 107 return ERR_PTR(-ENOMEM); 108 } 109 110 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 111 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 112 spin_lock_init(&device->mem_lock); 113 atomic_set(&device->tasklet_scheduled, 0); 114 tasklet_init(&device->tasklet, 115 (void (*)(unsigned long)) dasd_device_tasklet, 116 (unsigned long) device); 117 INIT_LIST_HEAD(&device->ccw_queue); 118 init_timer(&device->timer); 119 device->timer.function = dasd_device_timeout; 120 device->timer.data = (unsigned long) device; 121 INIT_WORK(&device->kick_work, do_kick_device); 122 INIT_WORK(&device->restore_device, do_restore_device); 123 INIT_WORK(&device->reload_device, do_reload_device); 124 device->state = DASD_STATE_NEW; 125 device->target = DASD_STATE_NEW; 126 mutex_init(&device->state_mutex); 127 spin_lock_init(&device->profile.lock); 128 return device; 129 } 130 131 /* 132 * Free memory of a device structure. 133 */ 134 void dasd_free_device(struct dasd_device *device) 135 { 136 kfree(device->private); 137 free_page((unsigned long) device->erp_mem); 138 free_pages((unsigned long) device->ccw_mem, 1); 139 kfree(device); 140 } 141 142 /* 143 * Allocate memory for a new device structure. 144 */ 145 struct dasd_block *dasd_alloc_block(void) 146 { 147 struct dasd_block *block; 148 149 block = kzalloc(sizeof(*block), GFP_ATOMIC); 150 if (!block) 151 return ERR_PTR(-ENOMEM); 152 /* open_count = 0 means device online but not in use */ 153 atomic_set(&block->open_count, -1); 154 155 spin_lock_init(&block->request_queue_lock); 156 atomic_set(&block->tasklet_scheduled, 0); 157 tasklet_init(&block->tasklet, 158 (void (*)(unsigned long)) dasd_block_tasklet, 159 (unsigned long) block); 160 INIT_LIST_HEAD(&block->ccw_queue); 161 spin_lock_init(&block->queue_lock); 162 init_timer(&block->timer); 163 block->timer.function = dasd_block_timeout; 164 block->timer.data = (unsigned long) block; 165 spin_lock_init(&block->profile.lock); 166 167 return block; 168 } 169 EXPORT_SYMBOL_GPL(dasd_alloc_block); 170 171 /* 172 * Free memory of a device structure. 173 */ 174 void dasd_free_block(struct dasd_block *block) 175 { 176 kfree(block); 177 } 178 EXPORT_SYMBOL_GPL(dasd_free_block); 179 180 /* 181 * Make a new device known to the system. 182 */ 183 static int dasd_state_new_to_known(struct dasd_device *device) 184 { 185 int rc; 186 187 /* 188 * As long as the device is not in state DASD_STATE_NEW we want to 189 * keep the reference count > 0. 190 */ 191 dasd_get_device(device); 192 193 if (device->block) { 194 rc = dasd_alloc_queue(device->block); 195 if (rc) { 196 dasd_put_device(device); 197 return rc; 198 } 199 } 200 device->state = DASD_STATE_KNOWN; 201 return 0; 202 } 203 204 /* 205 * Let the system forget about a device. 206 */ 207 static int dasd_state_known_to_new(struct dasd_device *device) 208 { 209 /* Disable extended error reporting for this device. */ 210 dasd_eer_disable(device); 211 /* Forget the discipline information. */ 212 if (device->discipline) { 213 if (device->discipline->uncheck_device) 214 device->discipline->uncheck_device(device); 215 module_put(device->discipline->owner); 216 } 217 device->discipline = NULL; 218 if (device->base_discipline) 219 module_put(device->base_discipline->owner); 220 device->base_discipline = NULL; 221 device->state = DASD_STATE_NEW; 222 223 if (device->block) 224 dasd_free_queue(device->block); 225 226 /* Give up reference we took in dasd_state_new_to_known. */ 227 dasd_put_device(device); 228 return 0; 229 } 230 231 static struct dentry *dasd_debugfs_setup(const char *name, 232 struct dentry *base_dentry) 233 { 234 struct dentry *pde; 235 236 if (!base_dentry) 237 return NULL; 238 pde = debugfs_create_dir(name, base_dentry); 239 if (!pde || IS_ERR(pde)) 240 return NULL; 241 return pde; 242 } 243 244 /* 245 * Request the irq line for the device. 246 */ 247 static int dasd_state_known_to_basic(struct dasd_device *device) 248 { 249 struct dasd_block *block = device->block; 250 int rc = 0; 251 252 /* Allocate and register gendisk structure. */ 253 if (block) { 254 rc = dasd_gendisk_alloc(block); 255 if (rc) 256 return rc; 257 block->debugfs_dentry = 258 dasd_debugfs_setup(block->gdp->disk_name, 259 dasd_debugfs_root_entry); 260 dasd_profile_init(&block->profile, block->debugfs_dentry); 261 if (dasd_global_profile_level == DASD_PROFILE_ON) 262 dasd_profile_on(&device->block->profile); 263 } 264 device->debugfs_dentry = 265 dasd_debugfs_setup(dev_name(&device->cdev->dev), 266 dasd_debugfs_root_entry); 267 dasd_profile_init(&device->profile, device->debugfs_dentry); 268 269 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 270 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 271 8 * sizeof(long)); 272 debug_register_view(device->debug_area, &debug_sprintf_view); 273 debug_set_level(device->debug_area, DBF_WARNING); 274 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 275 276 device->state = DASD_STATE_BASIC; 277 278 return rc; 279 } 280 281 /* 282 * Release the irq line for the device. Terminate any running i/o. 283 */ 284 static int dasd_state_basic_to_known(struct dasd_device *device) 285 { 286 int rc; 287 288 if (device->discipline->basic_to_known) { 289 rc = device->discipline->basic_to_known(device); 290 if (rc) 291 return rc; 292 } 293 294 if (device->block) { 295 dasd_profile_exit(&device->block->profile); 296 debugfs_remove(device->block->debugfs_dentry); 297 dasd_gendisk_free(device->block); 298 dasd_block_clear_timer(device->block); 299 } 300 rc = dasd_flush_device_queue(device); 301 if (rc) 302 return rc; 303 dasd_device_clear_timer(device); 304 dasd_profile_exit(&device->profile); 305 debugfs_remove(device->debugfs_dentry); 306 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 307 if (device->debug_area != NULL) { 308 debug_unregister(device->debug_area); 309 device->debug_area = NULL; 310 } 311 device->state = DASD_STATE_KNOWN; 312 return 0; 313 } 314 315 /* 316 * Do the initial analysis. The do_analysis function may return 317 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 318 * until the discipline decides to continue the startup sequence 319 * by calling the function dasd_change_state. The eckd disciplines 320 * uses this to start a ccw that detects the format. The completion 321 * interrupt for this detection ccw uses the kernel event daemon to 322 * trigger the call to dasd_change_state. All this is done in the 323 * discipline code, see dasd_eckd.c. 324 * After the analysis ccw is done (do_analysis returned 0) the block 325 * device is setup. 326 * In case the analysis returns an error, the device setup is stopped 327 * (a fake disk was already added to allow formatting). 328 */ 329 static int dasd_state_basic_to_ready(struct dasd_device *device) 330 { 331 int rc; 332 struct dasd_block *block; 333 334 rc = 0; 335 block = device->block; 336 /* make disk known with correct capacity */ 337 if (block) { 338 if (block->base->discipline->do_analysis != NULL) 339 rc = block->base->discipline->do_analysis(block); 340 if (rc) { 341 if (rc != -EAGAIN) { 342 device->state = DASD_STATE_UNFMT; 343 goto out; 344 } 345 return rc; 346 } 347 dasd_setup_queue(block); 348 set_capacity(block->gdp, 349 block->blocks << block->s2b_shift); 350 device->state = DASD_STATE_READY; 351 rc = dasd_scan_partitions(block); 352 if (rc) { 353 device->state = DASD_STATE_BASIC; 354 return rc; 355 } 356 } else { 357 device->state = DASD_STATE_READY; 358 } 359 out: 360 if (device->discipline->basic_to_ready) 361 rc = device->discipline->basic_to_ready(device); 362 return rc; 363 } 364 365 static inline 366 int _wait_for_empty_queues(struct dasd_device *device) 367 { 368 if (device->block) 369 return list_empty(&device->ccw_queue) && 370 list_empty(&device->block->ccw_queue); 371 else 372 return list_empty(&device->ccw_queue); 373 } 374 375 /* 376 * Remove device from block device layer. Destroy dirty buffers. 377 * Forget format information. Check if the target level is basic 378 * and if it is create fake disk for formatting. 379 */ 380 static int dasd_state_ready_to_basic(struct dasd_device *device) 381 { 382 int rc; 383 384 device->state = DASD_STATE_BASIC; 385 if (device->block) { 386 struct dasd_block *block = device->block; 387 rc = dasd_flush_block_queue(block); 388 if (rc) { 389 device->state = DASD_STATE_READY; 390 return rc; 391 } 392 dasd_flush_request_queue(block); 393 dasd_destroy_partitions(block); 394 block->blocks = 0; 395 block->bp_block = 0; 396 block->s2b_shift = 0; 397 } 398 return 0; 399 } 400 401 /* 402 * Back to basic. 403 */ 404 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 405 { 406 device->state = DASD_STATE_BASIC; 407 return 0; 408 } 409 410 /* 411 * Make the device online and schedule the bottom half to start 412 * the requeueing of requests from the linux request queue to the 413 * ccw queue. 414 */ 415 static int 416 dasd_state_ready_to_online(struct dasd_device * device) 417 { 418 struct gendisk *disk; 419 struct disk_part_iter piter; 420 struct hd_struct *part; 421 422 device->state = DASD_STATE_ONLINE; 423 if (device->block) { 424 dasd_schedule_block_bh(device->block); 425 if ((device->features & DASD_FEATURE_USERAW)) { 426 disk = device->block->gdp; 427 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 428 return 0; 429 } 430 disk = device->block->bdev->bd_disk; 431 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 432 while ((part = disk_part_iter_next(&piter))) 433 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 434 disk_part_iter_exit(&piter); 435 } 436 return 0; 437 } 438 439 /* 440 * Stop the requeueing of requests again. 441 */ 442 static int dasd_state_online_to_ready(struct dasd_device *device) 443 { 444 int rc; 445 struct gendisk *disk; 446 struct disk_part_iter piter; 447 struct hd_struct *part; 448 449 if (device->discipline->online_to_ready) { 450 rc = device->discipline->online_to_ready(device); 451 if (rc) 452 return rc; 453 } 454 455 device->state = DASD_STATE_READY; 456 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 457 disk = device->block->bdev->bd_disk; 458 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 459 while ((part = disk_part_iter_next(&piter))) 460 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 461 disk_part_iter_exit(&piter); 462 } 463 return 0; 464 } 465 466 /* 467 * Device startup state changes. 468 */ 469 static int dasd_increase_state(struct dasd_device *device) 470 { 471 int rc; 472 473 rc = 0; 474 if (device->state == DASD_STATE_NEW && 475 device->target >= DASD_STATE_KNOWN) 476 rc = dasd_state_new_to_known(device); 477 478 if (!rc && 479 device->state == DASD_STATE_KNOWN && 480 device->target >= DASD_STATE_BASIC) 481 rc = dasd_state_known_to_basic(device); 482 483 if (!rc && 484 device->state == DASD_STATE_BASIC && 485 device->target >= DASD_STATE_READY) 486 rc = dasd_state_basic_to_ready(device); 487 488 if (!rc && 489 device->state == DASD_STATE_UNFMT && 490 device->target > DASD_STATE_UNFMT) 491 rc = -EPERM; 492 493 if (!rc && 494 device->state == DASD_STATE_READY && 495 device->target >= DASD_STATE_ONLINE) 496 rc = dasd_state_ready_to_online(device); 497 498 return rc; 499 } 500 501 /* 502 * Device shutdown state changes. 503 */ 504 static int dasd_decrease_state(struct dasd_device *device) 505 { 506 int rc; 507 508 rc = 0; 509 if (device->state == DASD_STATE_ONLINE && 510 device->target <= DASD_STATE_READY) 511 rc = dasd_state_online_to_ready(device); 512 513 if (!rc && 514 device->state == DASD_STATE_READY && 515 device->target <= DASD_STATE_BASIC) 516 rc = dasd_state_ready_to_basic(device); 517 518 if (!rc && 519 device->state == DASD_STATE_UNFMT && 520 device->target <= DASD_STATE_BASIC) 521 rc = dasd_state_unfmt_to_basic(device); 522 523 if (!rc && 524 device->state == DASD_STATE_BASIC && 525 device->target <= DASD_STATE_KNOWN) 526 rc = dasd_state_basic_to_known(device); 527 528 if (!rc && 529 device->state == DASD_STATE_KNOWN && 530 device->target <= DASD_STATE_NEW) 531 rc = dasd_state_known_to_new(device); 532 533 return rc; 534 } 535 536 /* 537 * This is the main startup/shutdown routine. 538 */ 539 static void dasd_change_state(struct dasd_device *device) 540 { 541 int rc; 542 543 if (device->state == device->target) 544 /* Already where we want to go today... */ 545 return; 546 if (device->state < device->target) 547 rc = dasd_increase_state(device); 548 else 549 rc = dasd_decrease_state(device); 550 if (rc == -EAGAIN) 551 return; 552 if (rc) 553 device->target = device->state; 554 555 /* let user-space know that the device status changed */ 556 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 557 558 if (device->state == device->target) 559 wake_up(&dasd_init_waitq); 560 } 561 562 /* 563 * Kick starter for devices that did not complete the startup/shutdown 564 * procedure or were sleeping because of a pending state. 565 * dasd_kick_device will schedule a call do do_kick_device to the kernel 566 * event daemon. 567 */ 568 static void do_kick_device(struct work_struct *work) 569 { 570 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 571 mutex_lock(&device->state_mutex); 572 dasd_change_state(device); 573 mutex_unlock(&device->state_mutex); 574 dasd_schedule_device_bh(device); 575 dasd_put_device(device); 576 } 577 578 void dasd_kick_device(struct dasd_device *device) 579 { 580 dasd_get_device(device); 581 /* queue call to dasd_kick_device to the kernel event daemon. */ 582 schedule_work(&device->kick_work); 583 } 584 EXPORT_SYMBOL(dasd_kick_device); 585 586 /* 587 * dasd_reload_device will schedule a call do do_reload_device to the kernel 588 * event daemon. 589 */ 590 static void do_reload_device(struct work_struct *work) 591 { 592 struct dasd_device *device = container_of(work, struct dasd_device, 593 reload_device); 594 device->discipline->reload(device); 595 dasd_put_device(device); 596 } 597 598 void dasd_reload_device(struct dasd_device *device) 599 { 600 dasd_get_device(device); 601 /* queue call to dasd_reload_device to the kernel event daemon. */ 602 schedule_work(&device->reload_device); 603 } 604 EXPORT_SYMBOL(dasd_reload_device); 605 606 /* 607 * dasd_restore_device will schedule a call do do_restore_device to the kernel 608 * event daemon. 609 */ 610 static void do_restore_device(struct work_struct *work) 611 { 612 struct dasd_device *device = container_of(work, struct dasd_device, 613 restore_device); 614 device->cdev->drv->restore(device->cdev); 615 dasd_put_device(device); 616 } 617 618 void dasd_restore_device(struct dasd_device *device) 619 { 620 dasd_get_device(device); 621 /* queue call to dasd_restore_device to the kernel event daemon. */ 622 schedule_work(&device->restore_device); 623 } 624 625 /* 626 * Set the target state for a device and starts the state change. 627 */ 628 void dasd_set_target_state(struct dasd_device *device, int target) 629 { 630 dasd_get_device(device); 631 mutex_lock(&device->state_mutex); 632 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 633 if (dasd_probeonly && target > DASD_STATE_READY) 634 target = DASD_STATE_READY; 635 if (device->target != target) { 636 if (device->state == target) 637 wake_up(&dasd_init_waitq); 638 device->target = target; 639 } 640 if (device->state != device->target) 641 dasd_change_state(device); 642 mutex_unlock(&device->state_mutex); 643 dasd_put_device(device); 644 } 645 EXPORT_SYMBOL(dasd_set_target_state); 646 647 /* 648 * Enable devices with device numbers in [from..to]. 649 */ 650 static inline int _wait_for_device(struct dasd_device *device) 651 { 652 return (device->state == device->target); 653 } 654 655 void dasd_enable_device(struct dasd_device *device) 656 { 657 dasd_set_target_state(device, DASD_STATE_ONLINE); 658 if (device->state <= DASD_STATE_KNOWN) 659 /* No discipline for device found. */ 660 dasd_set_target_state(device, DASD_STATE_NEW); 661 /* Now wait for the devices to come up. */ 662 wait_event(dasd_init_waitq, _wait_for_device(device)); 663 664 dasd_reload_device(device); 665 if (device->discipline->kick_validate) 666 device->discipline->kick_validate(device); 667 } 668 EXPORT_SYMBOL(dasd_enable_device); 669 670 /* 671 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 672 */ 673 674 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 675 676 #ifdef CONFIG_DASD_PROFILE 677 struct dasd_profile dasd_global_profile = { 678 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 679 }; 680 static struct dentry *dasd_debugfs_global_entry; 681 682 /* 683 * Add profiling information for cqr before execution. 684 */ 685 static void dasd_profile_start(struct dasd_block *block, 686 struct dasd_ccw_req *cqr, 687 struct request *req) 688 { 689 struct list_head *l; 690 unsigned int counter; 691 struct dasd_device *device; 692 693 /* count the length of the chanq for statistics */ 694 counter = 0; 695 if (dasd_global_profile_level || block->profile.data) 696 list_for_each(l, &block->ccw_queue) 697 if (++counter >= 31) 698 break; 699 700 spin_lock(&dasd_global_profile.lock); 701 if (dasd_global_profile.data) { 702 dasd_global_profile.data->dasd_io_nr_req[counter]++; 703 if (rq_data_dir(req) == READ) 704 dasd_global_profile.data->dasd_read_nr_req[counter]++; 705 } 706 spin_unlock(&dasd_global_profile.lock); 707 708 spin_lock(&block->profile.lock); 709 if (block->profile.data) { 710 block->profile.data->dasd_io_nr_req[counter]++; 711 if (rq_data_dir(req) == READ) 712 block->profile.data->dasd_read_nr_req[counter]++; 713 } 714 spin_unlock(&block->profile.lock); 715 716 /* 717 * We count the request for the start device, even though it may run on 718 * some other device due to error recovery. This way we make sure that 719 * we count each request only once. 720 */ 721 device = cqr->startdev; 722 if (device->profile.data) { 723 counter = 1; /* request is not yet queued on the start device */ 724 list_for_each(l, &device->ccw_queue) 725 if (++counter >= 31) 726 break; 727 } 728 spin_lock(&device->profile.lock); 729 if (device->profile.data) { 730 device->profile.data->dasd_io_nr_req[counter]++; 731 if (rq_data_dir(req) == READ) 732 device->profile.data->dasd_read_nr_req[counter]++; 733 } 734 spin_unlock(&device->profile.lock); 735 } 736 737 /* 738 * Add profiling information for cqr after execution. 739 */ 740 741 #define dasd_profile_counter(value, index) \ 742 { \ 743 for (index = 0; index < 31 && value >> (2+index); index++) \ 744 ; \ 745 } 746 747 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 748 int is_alias, 749 int is_tpm, 750 int is_read, 751 long sectors, 752 int sectors_ind, 753 int tottime_ind, 754 int tottimeps_ind, 755 int strtime_ind, 756 int irqtime_ind, 757 int irqtimeps_ind, 758 int endtime_ind) 759 { 760 /* in case of an overflow, reset the whole profile */ 761 if (data->dasd_io_reqs == UINT_MAX) { 762 memset(data, 0, sizeof(*data)); 763 getnstimeofday(&data->starttod); 764 } 765 data->dasd_io_reqs++; 766 data->dasd_io_sects += sectors; 767 if (is_alias) 768 data->dasd_io_alias++; 769 if (is_tpm) 770 data->dasd_io_tpm++; 771 772 data->dasd_io_secs[sectors_ind]++; 773 data->dasd_io_times[tottime_ind]++; 774 data->dasd_io_timps[tottimeps_ind]++; 775 data->dasd_io_time1[strtime_ind]++; 776 data->dasd_io_time2[irqtime_ind]++; 777 data->dasd_io_time2ps[irqtimeps_ind]++; 778 data->dasd_io_time3[endtime_ind]++; 779 780 if (is_read) { 781 data->dasd_read_reqs++; 782 data->dasd_read_sects += sectors; 783 if (is_alias) 784 data->dasd_read_alias++; 785 if (is_tpm) 786 data->dasd_read_tpm++; 787 data->dasd_read_secs[sectors_ind]++; 788 data->dasd_read_times[tottime_ind]++; 789 data->dasd_read_time1[strtime_ind]++; 790 data->dasd_read_time2[irqtime_ind]++; 791 data->dasd_read_time3[endtime_ind]++; 792 } 793 } 794 795 static void dasd_profile_end(struct dasd_block *block, 796 struct dasd_ccw_req *cqr, 797 struct request *req) 798 { 799 long strtime, irqtime, endtime, tottime; /* in microseconds */ 800 long tottimeps, sectors; 801 struct dasd_device *device; 802 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 803 int irqtime_ind, irqtimeps_ind, endtime_ind; 804 805 device = cqr->startdev; 806 if (!(dasd_global_profile_level || 807 block->profile.data || 808 device->profile.data)) 809 return; 810 811 sectors = blk_rq_sectors(req); 812 if (!cqr->buildclk || !cqr->startclk || 813 !cqr->stopclk || !cqr->endclk || 814 !sectors) 815 return; 816 817 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 818 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 819 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 820 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 821 tottimeps = tottime / sectors; 822 823 dasd_profile_counter(sectors, sectors_ind); 824 dasd_profile_counter(tottime, tottime_ind); 825 dasd_profile_counter(tottimeps, tottimeps_ind); 826 dasd_profile_counter(strtime, strtime_ind); 827 dasd_profile_counter(irqtime, irqtime_ind); 828 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 829 dasd_profile_counter(endtime, endtime_ind); 830 831 spin_lock(&dasd_global_profile.lock); 832 if (dasd_global_profile.data) { 833 dasd_profile_end_add_data(dasd_global_profile.data, 834 cqr->startdev != block->base, 835 cqr->cpmode == 1, 836 rq_data_dir(req) == READ, 837 sectors, sectors_ind, tottime_ind, 838 tottimeps_ind, strtime_ind, 839 irqtime_ind, irqtimeps_ind, 840 endtime_ind); 841 } 842 spin_unlock(&dasd_global_profile.lock); 843 844 spin_lock(&block->profile.lock); 845 if (block->profile.data) 846 dasd_profile_end_add_data(block->profile.data, 847 cqr->startdev != block->base, 848 cqr->cpmode == 1, 849 rq_data_dir(req) == READ, 850 sectors, sectors_ind, tottime_ind, 851 tottimeps_ind, strtime_ind, 852 irqtime_ind, irqtimeps_ind, 853 endtime_ind); 854 spin_unlock(&block->profile.lock); 855 856 spin_lock(&device->profile.lock); 857 if (device->profile.data) 858 dasd_profile_end_add_data(device->profile.data, 859 cqr->startdev != block->base, 860 cqr->cpmode == 1, 861 rq_data_dir(req) == READ, 862 sectors, sectors_ind, tottime_ind, 863 tottimeps_ind, strtime_ind, 864 irqtime_ind, irqtimeps_ind, 865 endtime_ind); 866 spin_unlock(&device->profile.lock); 867 } 868 869 void dasd_profile_reset(struct dasd_profile *profile) 870 { 871 struct dasd_profile_info *data; 872 873 spin_lock_bh(&profile->lock); 874 data = profile->data; 875 if (!data) { 876 spin_unlock_bh(&profile->lock); 877 return; 878 } 879 memset(data, 0, sizeof(*data)); 880 getnstimeofday(&data->starttod); 881 spin_unlock_bh(&profile->lock); 882 } 883 884 int dasd_profile_on(struct dasd_profile *profile) 885 { 886 struct dasd_profile_info *data; 887 888 data = kzalloc(sizeof(*data), GFP_KERNEL); 889 if (!data) 890 return -ENOMEM; 891 spin_lock_bh(&profile->lock); 892 if (profile->data) { 893 spin_unlock_bh(&profile->lock); 894 kfree(data); 895 return 0; 896 } 897 getnstimeofday(&data->starttod); 898 profile->data = data; 899 spin_unlock_bh(&profile->lock); 900 return 0; 901 } 902 903 void dasd_profile_off(struct dasd_profile *profile) 904 { 905 spin_lock_bh(&profile->lock); 906 kfree(profile->data); 907 profile->data = NULL; 908 spin_unlock_bh(&profile->lock); 909 } 910 911 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 912 { 913 char *buffer; 914 915 buffer = vmalloc(user_len + 1); 916 if (buffer == NULL) 917 return ERR_PTR(-ENOMEM); 918 if (copy_from_user(buffer, user_buf, user_len) != 0) { 919 vfree(buffer); 920 return ERR_PTR(-EFAULT); 921 } 922 /* got the string, now strip linefeed. */ 923 if (buffer[user_len - 1] == '\n') 924 buffer[user_len - 1] = 0; 925 else 926 buffer[user_len] = 0; 927 return buffer; 928 } 929 930 static ssize_t dasd_stats_write(struct file *file, 931 const char __user *user_buf, 932 size_t user_len, loff_t *pos) 933 { 934 char *buffer, *str; 935 int rc; 936 struct seq_file *m = (struct seq_file *)file->private_data; 937 struct dasd_profile *prof = m->private; 938 939 if (user_len > 65536) 940 user_len = 65536; 941 buffer = dasd_get_user_string(user_buf, user_len); 942 if (IS_ERR(buffer)) 943 return PTR_ERR(buffer); 944 945 str = skip_spaces(buffer); 946 rc = user_len; 947 if (strncmp(str, "reset", 5) == 0) { 948 dasd_profile_reset(prof); 949 } else if (strncmp(str, "on", 2) == 0) { 950 rc = dasd_profile_on(prof); 951 if (rc) 952 goto out; 953 rc = user_len; 954 if (prof == &dasd_global_profile) { 955 dasd_profile_reset(prof); 956 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 957 } 958 } else if (strncmp(str, "off", 3) == 0) { 959 if (prof == &dasd_global_profile) 960 dasd_global_profile_level = DASD_PROFILE_OFF; 961 dasd_profile_off(prof); 962 } else 963 rc = -EINVAL; 964 out: 965 vfree(buffer); 966 return rc; 967 } 968 969 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 970 { 971 int i; 972 973 for (i = 0; i < 32; i++) 974 seq_printf(m, "%u ", array[i]); 975 seq_putc(m, '\n'); 976 } 977 978 static void dasd_stats_seq_print(struct seq_file *m, 979 struct dasd_profile_info *data) 980 { 981 seq_printf(m, "start_time %ld.%09ld\n", 982 data->starttod.tv_sec, data->starttod.tv_nsec); 983 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 984 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 985 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 986 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 987 seq_puts(m, "histogram_sectors "); 988 dasd_stats_array(m, data->dasd_io_secs); 989 seq_puts(m, "histogram_io_times "); 990 dasd_stats_array(m, data->dasd_io_times); 991 seq_puts(m, "histogram_io_times_weighted "); 992 dasd_stats_array(m, data->dasd_io_timps); 993 seq_puts(m, "histogram_time_build_to_ssch "); 994 dasd_stats_array(m, data->dasd_io_time1); 995 seq_puts(m, "histogram_time_ssch_to_irq "); 996 dasd_stats_array(m, data->dasd_io_time2); 997 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 998 dasd_stats_array(m, data->dasd_io_time2ps); 999 seq_puts(m, "histogram_time_irq_to_end "); 1000 dasd_stats_array(m, data->dasd_io_time3); 1001 seq_puts(m, "histogram_ccw_queue_length "); 1002 dasd_stats_array(m, data->dasd_io_nr_req); 1003 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1004 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1005 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1006 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1007 seq_puts(m, "histogram_read_sectors "); 1008 dasd_stats_array(m, data->dasd_read_secs); 1009 seq_puts(m, "histogram_read_times "); 1010 dasd_stats_array(m, data->dasd_read_times); 1011 seq_puts(m, "histogram_read_time_build_to_ssch "); 1012 dasd_stats_array(m, data->dasd_read_time1); 1013 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1014 dasd_stats_array(m, data->dasd_read_time2); 1015 seq_puts(m, "histogram_read_time_irq_to_end "); 1016 dasd_stats_array(m, data->dasd_read_time3); 1017 seq_puts(m, "histogram_read_ccw_queue_length "); 1018 dasd_stats_array(m, data->dasd_read_nr_req); 1019 } 1020 1021 static int dasd_stats_show(struct seq_file *m, void *v) 1022 { 1023 struct dasd_profile *profile; 1024 struct dasd_profile_info *data; 1025 1026 profile = m->private; 1027 spin_lock_bh(&profile->lock); 1028 data = profile->data; 1029 if (!data) { 1030 spin_unlock_bh(&profile->lock); 1031 seq_puts(m, "disabled\n"); 1032 return 0; 1033 } 1034 dasd_stats_seq_print(m, data); 1035 spin_unlock_bh(&profile->lock); 1036 return 0; 1037 } 1038 1039 static int dasd_stats_open(struct inode *inode, struct file *file) 1040 { 1041 struct dasd_profile *profile = inode->i_private; 1042 return single_open(file, dasd_stats_show, profile); 1043 } 1044 1045 static const struct file_operations dasd_stats_raw_fops = { 1046 .owner = THIS_MODULE, 1047 .open = dasd_stats_open, 1048 .read = seq_read, 1049 .llseek = seq_lseek, 1050 .release = single_release, 1051 .write = dasd_stats_write, 1052 }; 1053 1054 static void dasd_profile_init(struct dasd_profile *profile, 1055 struct dentry *base_dentry) 1056 { 1057 umode_t mode; 1058 struct dentry *pde; 1059 1060 if (!base_dentry) 1061 return; 1062 profile->dentry = NULL; 1063 profile->data = NULL; 1064 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1065 pde = debugfs_create_file("statistics", mode, base_dentry, 1066 profile, &dasd_stats_raw_fops); 1067 if (pde && !IS_ERR(pde)) 1068 profile->dentry = pde; 1069 return; 1070 } 1071 1072 static void dasd_profile_exit(struct dasd_profile *profile) 1073 { 1074 dasd_profile_off(profile); 1075 debugfs_remove(profile->dentry); 1076 profile->dentry = NULL; 1077 } 1078 1079 static void dasd_statistics_removeroot(void) 1080 { 1081 dasd_global_profile_level = DASD_PROFILE_OFF; 1082 dasd_profile_exit(&dasd_global_profile); 1083 debugfs_remove(dasd_debugfs_global_entry); 1084 debugfs_remove(dasd_debugfs_root_entry); 1085 } 1086 1087 static void dasd_statistics_createroot(void) 1088 { 1089 struct dentry *pde; 1090 1091 dasd_debugfs_root_entry = NULL; 1092 pde = debugfs_create_dir("dasd", NULL); 1093 if (!pde || IS_ERR(pde)) 1094 goto error; 1095 dasd_debugfs_root_entry = pde; 1096 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1097 if (!pde || IS_ERR(pde)) 1098 goto error; 1099 dasd_debugfs_global_entry = pde; 1100 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1101 return; 1102 1103 error: 1104 DBF_EVENT(DBF_ERR, "%s", 1105 "Creation of the dasd debugfs interface failed"); 1106 dasd_statistics_removeroot(); 1107 return; 1108 } 1109 1110 #else 1111 #define dasd_profile_start(block, cqr, req) do {} while (0) 1112 #define dasd_profile_end(block, cqr, req) do {} while (0) 1113 1114 static void dasd_statistics_createroot(void) 1115 { 1116 return; 1117 } 1118 1119 static void dasd_statistics_removeroot(void) 1120 { 1121 return; 1122 } 1123 1124 int dasd_stats_generic_show(struct seq_file *m, void *v) 1125 { 1126 seq_puts(m, "Statistics are not activated in this kernel\n"); 1127 return 0; 1128 } 1129 1130 static void dasd_profile_init(struct dasd_profile *profile, 1131 struct dentry *base_dentry) 1132 { 1133 return; 1134 } 1135 1136 static void dasd_profile_exit(struct dasd_profile *profile) 1137 { 1138 return; 1139 } 1140 1141 int dasd_profile_on(struct dasd_profile *profile) 1142 { 1143 return 0; 1144 } 1145 1146 #endif /* CONFIG_DASD_PROFILE */ 1147 1148 /* 1149 * Allocate memory for a channel program with 'cplength' channel 1150 * command words and 'datasize' additional space. There are two 1151 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 1152 * memory and 2) dasd_smalloc_request uses the static ccw memory 1153 * that gets allocated for each device. 1154 */ 1155 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, 1156 int datasize, 1157 struct dasd_device *device) 1158 { 1159 struct dasd_ccw_req *cqr; 1160 1161 /* Sanity checks */ 1162 BUG_ON(datasize > PAGE_SIZE || 1163 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 1164 1165 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 1166 if (cqr == NULL) 1167 return ERR_PTR(-ENOMEM); 1168 cqr->cpaddr = NULL; 1169 if (cplength > 0) { 1170 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 1171 GFP_ATOMIC | GFP_DMA); 1172 if (cqr->cpaddr == NULL) { 1173 kfree(cqr); 1174 return ERR_PTR(-ENOMEM); 1175 } 1176 } 1177 cqr->data = NULL; 1178 if (datasize > 0) { 1179 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 1180 if (cqr->data == NULL) { 1181 kfree(cqr->cpaddr); 1182 kfree(cqr); 1183 return ERR_PTR(-ENOMEM); 1184 } 1185 } 1186 cqr->magic = magic; 1187 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1188 dasd_get_device(device); 1189 return cqr; 1190 } 1191 EXPORT_SYMBOL(dasd_kmalloc_request); 1192 1193 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 1194 int datasize, 1195 struct dasd_device *device) 1196 { 1197 unsigned long flags; 1198 struct dasd_ccw_req *cqr; 1199 char *data; 1200 int size; 1201 1202 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 1203 if (cplength > 0) 1204 size += cplength * sizeof(struct ccw1); 1205 if (datasize > 0) 1206 size += datasize; 1207 spin_lock_irqsave(&device->mem_lock, flags); 1208 cqr = (struct dasd_ccw_req *) 1209 dasd_alloc_chunk(&device->ccw_chunks, size); 1210 spin_unlock_irqrestore(&device->mem_lock, flags); 1211 if (cqr == NULL) 1212 return ERR_PTR(-ENOMEM); 1213 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 1214 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 1215 cqr->cpaddr = NULL; 1216 if (cplength > 0) { 1217 cqr->cpaddr = (struct ccw1 *) data; 1218 data += cplength*sizeof(struct ccw1); 1219 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 1220 } 1221 cqr->data = NULL; 1222 if (datasize > 0) { 1223 cqr->data = data; 1224 memset(cqr->data, 0, datasize); 1225 } 1226 cqr->magic = magic; 1227 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1228 dasd_get_device(device); 1229 return cqr; 1230 } 1231 EXPORT_SYMBOL(dasd_smalloc_request); 1232 1233 /* 1234 * Free memory of a channel program. This function needs to free all the 1235 * idal lists that might have been created by dasd_set_cda and the 1236 * struct dasd_ccw_req itself. 1237 */ 1238 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1239 { 1240 #ifdef CONFIG_64BIT 1241 struct ccw1 *ccw; 1242 1243 /* Clear any idals used for the request. */ 1244 ccw = cqr->cpaddr; 1245 do { 1246 clear_normalized_cda(ccw); 1247 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 1248 #endif 1249 kfree(cqr->cpaddr); 1250 kfree(cqr->data); 1251 kfree(cqr); 1252 dasd_put_device(device); 1253 } 1254 EXPORT_SYMBOL(dasd_kfree_request); 1255 1256 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1257 { 1258 unsigned long flags; 1259 1260 spin_lock_irqsave(&device->mem_lock, flags); 1261 dasd_free_chunk(&device->ccw_chunks, cqr); 1262 spin_unlock_irqrestore(&device->mem_lock, flags); 1263 dasd_put_device(device); 1264 } 1265 EXPORT_SYMBOL(dasd_sfree_request); 1266 1267 /* 1268 * Check discipline magic in cqr. 1269 */ 1270 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1271 { 1272 struct dasd_device *device; 1273 1274 if (cqr == NULL) 1275 return -EINVAL; 1276 device = cqr->startdev; 1277 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1278 DBF_DEV_EVENT(DBF_WARNING, device, 1279 " dasd_ccw_req 0x%08x magic doesn't match" 1280 " discipline 0x%08x", 1281 cqr->magic, 1282 *(unsigned int *) device->discipline->name); 1283 return -EINVAL; 1284 } 1285 return 0; 1286 } 1287 1288 /* 1289 * Terminate the current i/o and set the request to clear_pending. 1290 * Timer keeps device runnig. 1291 * ccw_device_clear can fail if the i/o subsystem 1292 * is in a bad mood. 1293 */ 1294 int dasd_term_IO(struct dasd_ccw_req *cqr) 1295 { 1296 struct dasd_device *device; 1297 int retries, rc; 1298 char errorstring[ERRORLENGTH]; 1299 1300 /* Check the cqr */ 1301 rc = dasd_check_cqr(cqr); 1302 if (rc) 1303 return rc; 1304 retries = 0; 1305 device = (struct dasd_device *) cqr->startdev; 1306 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1307 rc = ccw_device_clear(device->cdev, (long) cqr); 1308 switch (rc) { 1309 case 0: /* termination successful */ 1310 cqr->status = DASD_CQR_CLEAR_PENDING; 1311 cqr->stopclk = get_tod_clock(); 1312 cqr->starttime = 0; 1313 DBF_DEV_EVENT(DBF_DEBUG, device, 1314 "terminate cqr %p successful", 1315 cqr); 1316 break; 1317 case -ENODEV: 1318 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1319 "device gone, retry"); 1320 break; 1321 case -EIO: 1322 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1323 "I/O error, retry"); 1324 break; 1325 case -EINVAL: 1326 /* 1327 * device not valid so no I/O could be running 1328 * handle CQR as termination successful 1329 */ 1330 cqr->status = DASD_CQR_CLEARED; 1331 cqr->stopclk = get_tod_clock(); 1332 cqr->starttime = 0; 1333 /* no retries for invalid devices */ 1334 cqr->retries = -1; 1335 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1336 "EINVAL, handle as terminated"); 1337 /* fake rc to success */ 1338 rc = 0; 1339 break; 1340 case -EBUSY: 1341 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1342 "device busy, retry later"); 1343 break; 1344 default: 1345 /* internal error 10 - unknown rc*/ 1346 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1347 dev_err(&device->cdev->dev, "An error occurred in the " 1348 "DASD device driver, reason=%s\n", errorstring); 1349 BUG(); 1350 break; 1351 } 1352 retries++; 1353 } 1354 dasd_schedule_device_bh(device); 1355 return rc; 1356 } 1357 EXPORT_SYMBOL(dasd_term_IO); 1358 1359 /* 1360 * Start the i/o. This start_IO can fail if the channel is really busy. 1361 * In that case set up a timer to start the request later. 1362 */ 1363 int dasd_start_IO(struct dasd_ccw_req *cqr) 1364 { 1365 struct dasd_device *device; 1366 int rc; 1367 char errorstring[ERRORLENGTH]; 1368 1369 /* Check the cqr */ 1370 rc = dasd_check_cqr(cqr); 1371 if (rc) { 1372 cqr->intrc = rc; 1373 return rc; 1374 } 1375 device = (struct dasd_device *) cqr->startdev; 1376 if (((cqr->block && 1377 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1378 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1379 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1380 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1381 "because of stolen lock", cqr); 1382 cqr->status = DASD_CQR_ERROR; 1383 cqr->intrc = -EPERM; 1384 return -EPERM; 1385 } 1386 if (cqr->retries < 0) { 1387 /* internal error 14 - start_IO run out of retries */ 1388 sprintf(errorstring, "14 %p", cqr); 1389 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1390 "device driver, reason=%s\n", errorstring); 1391 cqr->status = DASD_CQR_ERROR; 1392 return -EIO; 1393 } 1394 cqr->startclk = get_tod_clock(); 1395 cqr->starttime = jiffies; 1396 cqr->retries--; 1397 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1398 cqr->lpm &= device->path_data.opm; 1399 if (!cqr->lpm) 1400 cqr->lpm = device->path_data.opm; 1401 } 1402 if (cqr->cpmode == 1) { 1403 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1404 (long) cqr, cqr->lpm); 1405 } else { 1406 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1407 (long) cqr, cqr->lpm, 0); 1408 } 1409 switch (rc) { 1410 case 0: 1411 cqr->status = DASD_CQR_IN_IO; 1412 break; 1413 case -EBUSY: 1414 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1415 "start_IO: device busy, retry later"); 1416 break; 1417 case -ETIMEDOUT: 1418 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1419 "start_IO: request timeout, retry later"); 1420 break; 1421 case -EACCES: 1422 /* -EACCES indicates that the request used only a subset of the 1423 * available paths and all these paths are gone. If the lpm of 1424 * this request was only a subset of the opm (e.g. the ppm) then 1425 * we just do a retry with all available paths. 1426 * If we already use the full opm, something is amiss, and we 1427 * need a full path verification. 1428 */ 1429 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1430 DBF_DEV_EVENT(DBF_WARNING, device, 1431 "start_IO: selected paths gone (%x)", 1432 cqr->lpm); 1433 } else if (cqr->lpm != device->path_data.opm) { 1434 cqr->lpm = device->path_data.opm; 1435 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1436 "start_IO: selected paths gone," 1437 " retry on all paths"); 1438 } else { 1439 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1440 "start_IO: all paths in opm gone," 1441 " do path verification"); 1442 dasd_generic_last_path_gone(device); 1443 device->path_data.opm = 0; 1444 device->path_data.ppm = 0; 1445 device->path_data.npm = 0; 1446 device->path_data.tbvpm = 1447 ccw_device_get_path_mask(device->cdev); 1448 } 1449 break; 1450 case -ENODEV: 1451 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1452 "start_IO: -ENODEV device gone, retry"); 1453 break; 1454 case -EIO: 1455 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1456 "start_IO: -EIO device gone, retry"); 1457 break; 1458 case -EINVAL: 1459 /* most likely caused in power management context */ 1460 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1461 "start_IO: -EINVAL device currently " 1462 "not accessible"); 1463 break; 1464 default: 1465 /* internal error 11 - unknown rc */ 1466 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1467 dev_err(&device->cdev->dev, 1468 "An error occurred in the DASD device driver, " 1469 "reason=%s\n", errorstring); 1470 BUG(); 1471 break; 1472 } 1473 cqr->intrc = rc; 1474 return rc; 1475 } 1476 EXPORT_SYMBOL(dasd_start_IO); 1477 1478 /* 1479 * Timeout function for dasd devices. This is used for different purposes 1480 * 1) missing interrupt handler for normal operation 1481 * 2) delayed start of request where start_IO failed with -EBUSY 1482 * 3) timeout for missing state change interrupts 1483 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1484 * DASD_CQR_QUEUED for 2) and 3). 1485 */ 1486 static void dasd_device_timeout(unsigned long ptr) 1487 { 1488 unsigned long flags; 1489 struct dasd_device *device; 1490 1491 device = (struct dasd_device *) ptr; 1492 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1493 /* re-activate request queue */ 1494 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1495 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1496 dasd_schedule_device_bh(device); 1497 } 1498 1499 /* 1500 * Setup timeout for a device in jiffies. 1501 */ 1502 void dasd_device_set_timer(struct dasd_device *device, int expires) 1503 { 1504 if (expires == 0) 1505 del_timer(&device->timer); 1506 else 1507 mod_timer(&device->timer, jiffies + expires); 1508 } 1509 EXPORT_SYMBOL(dasd_device_set_timer); 1510 1511 /* 1512 * Clear timeout for a device. 1513 */ 1514 void dasd_device_clear_timer(struct dasd_device *device) 1515 { 1516 del_timer(&device->timer); 1517 } 1518 EXPORT_SYMBOL(dasd_device_clear_timer); 1519 1520 static void dasd_handle_killed_request(struct ccw_device *cdev, 1521 unsigned long intparm) 1522 { 1523 struct dasd_ccw_req *cqr; 1524 struct dasd_device *device; 1525 1526 if (!intparm) 1527 return; 1528 cqr = (struct dasd_ccw_req *) intparm; 1529 if (cqr->status != DASD_CQR_IN_IO) { 1530 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1531 "invalid status in handle_killed_request: " 1532 "%02x", cqr->status); 1533 return; 1534 } 1535 1536 device = dasd_device_from_cdev_locked(cdev); 1537 if (IS_ERR(device)) { 1538 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1539 "unable to get device from cdev"); 1540 return; 1541 } 1542 1543 if (!cqr->startdev || 1544 device != cqr->startdev || 1545 strncmp(cqr->startdev->discipline->ebcname, 1546 (char *) &cqr->magic, 4)) { 1547 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1548 "invalid device in request"); 1549 dasd_put_device(device); 1550 return; 1551 } 1552 1553 /* Schedule request to be retried. */ 1554 cqr->status = DASD_CQR_QUEUED; 1555 1556 dasd_device_clear_timer(device); 1557 dasd_schedule_device_bh(device); 1558 dasd_put_device(device); 1559 } 1560 1561 void dasd_generic_handle_state_change(struct dasd_device *device) 1562 { 1563 /* First of all start sense subsystem status request. */ 1564 dasd_eer_snss(device); 1565 1566 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1567 dasd_schedule_device_bh(device); 1568 if (device->block) 1569 dasd_schedule_block_bh(device->block); 1570 } 1571 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1572 1573 /* 1574 * Interrupt handler for "normal" ssch-io based dasd devices. 1575 */ 1576 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1577 struct irb *irb) 1578 { 1579 struct dasd_ccw_req *cqr, *next; 1580 struct dasd_device *device; 1581 unsigned long long now; 1582 int expires; 1583 1584 if (IS_ERR(irb)) { 1585 switch (PTR_ERR(irb)) { 1586 case -EIO: 1587 break; 1588 case -ETIMEDOUT: 1589 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1590 "request timed out\n", __func__); 1591 break; 1592 default: 1593 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1594 "unknown error %ld\n", __func__, 1595 PTR_ERR(irb)); 1596 } 1597 dasd_handle_killed_request(cdev, intparm); 1598 return; 1599 } 1600 1601 now = get_tod_clock(); 1602 cqr = (struct dasd_ccw_req *) intparm; 1603 /* check for conditions that should be handled immediately */ 1604 if (!cqr || 1605 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1606 scsw_cstat(&irb->scsw) == 0)) { 1607 if (cqr) 1608 memcpy(&cqr->irb, irb, sizeof(*irb)); 1609 device = dasd_device_from_cdev_locked(cdev); 1610 if (IS_ERR(device)) 1611 return; 1612 /* ignore unsolicited interrupts for DIAG discipline */ 1613 if (device->discipline == dasd_diag_discipline_pointer) { 1614 dasd_put_device(device); 1615 return; 1616 } 1617 device->discipline->dump_sense_dbf(device, irb, "int"); 1618 if (device->features & DASD_FEATURE_ERPLOG) 1619 device->discipline->dump_sense(device, cqr, irb); 1620 device->discipline->check_for_device_change(device, cqr, irb); 1621 dasd_put_device(device); 1622 } 1623 1624 /* check for for attention message */ 1625 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1626 device = dasd_device_from_cdev_locked(cdev); 1627 device->discipline->check_attention(device, irb->esw.esw1.lpum); 1628 dasd_put_device(device); 1629 } 1630 1631 if (!cqr) 1632 return; 1633 1634 device = (struct dasd_device *) cqr->startdev; 1635 if (!device || 1636 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1637 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1638 "invalid device in request"); 1639 return; 1640 } 1641 1642 /* Check for clear pending */ 1643 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1644 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1645 cqr->status = DASD_CQR_CLEARED; 1646 dasd_device_clear_timer(device); 1647 wake_up(&dasd_flush_wq); 1648 dasd_schedule_device_bh(device); 1649 return; 1650 } 1651 1652 /* check status - the request might have been killed by dyn detach */ 1653 if (cqr->status != DASD_CQR_IN_IO) { 1654 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1655 "status %02x", dev_name(&cdev->dev), cqr->status); 1656 return; 1657 } 1658 1659 next = NULL; 1660 expires = 0; 1661 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1662 scsw_cstat(&irb->scsw) == 0) { 1663 /* request was completed successfully */ 1664 cqr->status = DASD_CQR_SUCCESS; 1665 cqr->stopclk = now; 1666 /* Start first request on queue if possible -> fast_io. */ 1667 if (cqr->devlist.next != &device->ccw_queue) { 1668 next = list_entry(cqr->devlist.next, 1669 struct dasd_ccw_req, devlist); 1670 } 1671 } else { /* error */ 1672 /* 1673 * If we don't want complex ERP for this request, then just 1674 * reset this and retry it in the fastpath 1675 */ 1676 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1677 cqr->retries > 0) { 1678 if (cqr->lpm == device->path_data.opm) 1679 DBF_DEV_EVENT(DBF_DEBUG, device, 1680 "default ERP in fastpath " 1681 "(%i retries left)", 1682 cqr->retries); 1683 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1684 cqr->lpm = device->path_data.opm; 1685 cqr->status = DASD_CQR_QUEUED; 1686 next = cqr; 1687 } else 1688 cqr->status = DASD_CQR_ERROR; 1689 } 1690 if (next && (next->status == DASD_CQR_QUEUED) && 1691 (!device->stopped)) { 1692 if (device->discipline->start_IO(next) == 0) 1693 expires = next->expires; 1694 } 1695 if (expires != 0) 1696 dasd_device_set_timer(device, expires); 1697 else 1698 dasd_device_clear_timer(device); 1699 dasd_schedule_device_bh(device); 1700 } 1701 EXPORT_SYMBOL(dasd_int_handler); 1702 1703 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1704 { 1705 struct dasd_device *device; 1706 1707 device = dasd_device_from_cdev_locked(cdev); 1708 1709 if (IS_ERR(device)) 1710 goto out; 1711 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1712 device->state != device->target || 1713 !device->discipline->check_for_device_change){ 1714 dasd_put_device(device); 1715 goto out; 1716 } 1717 if (device->discipline->dump_sense_dbf) 1718 device->discipline->dump_sense_dbf(device, irb, "uc"); 1719 device->discipline->check_for_device_change(device, NULL, irb); 1720 dasd_put_device(device); 1721 out: 1722 return UC_TODO_RETRY; 1723 } 1724 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1725 1726 /* 1727 * If we have an error on a dasd_block layer request then we cancel 1728 * and return all further requests from the same dasd_block as well. 1729 */ 1730 static void __dasd_device_recovery(struct dasd_device *device, 1731 struct dasd_ccw_req *ref_cqr) 1732 { 1733 struct list_head *l, *n; 1734 struct dasd_ccw_req *cqr; 1735 1736 /* 1737 * only requeue request that came from the dasd_block layer 1738 */ 1739 if (!ref_cqr->block) 1740 return; 1741 1742 list_for_each_safe(l, n, &device->ccw_queue) { 1743 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1744 if (cqr->status == DASD_CQR_QUEUED && 1745 ref_cqr->block == cqr->block) { 1746 cqr->status = DASD_CQR_CLEARED; 1747 } 1748 } 1749 }; 1750 1751 /* 1752 * Remove those ccw requests from the queue that need to be returned 1753 * to the upper layer. 1754 */ 1755 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1756 struct list_head *final_queue) 1757 { 1758 struct list_head *l, *n; 1759 struct dasd_ccw_req *cqr; 1760 1761 /* Process request with final status. */ 1762 list_for_each_safe(l, n, &device->ccw_queue) { 1763 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1764 1765 /* Skip any non-final request. */ 1766 if (cqr->status == DASD_CQR_QUEUED || 1767 cqr->status == DASD_CQR_IN_IO || 1768 cqr->status == DASD_CQR_CLEAR_PENDING) 1769 continue; 1770 if (cqr->status == DASD_CQR_ERROR) { 1771 __dasd_device_recovery(device, cqr); 1772 } 1773 /* Rechain finished requests to final queue */ 1774 list_move_tail(&cqr->devlist, final_queue); 1775 } 1776 } 1777 1778 /* 1779 * the cqrs from the final queue are returned to the upper layer 1780 * by setting a dasd_block state and calling the callback function 1781 */ 1782 static void __dasd_device_process_final_queue(struct dasd_device *device, 1783 struct list_head *final_queue) 1784 { 1785 struct list_head *l, *n; 1786 struct dasd_ccw_req *cqr; 1787 struct dasd_block *block; 1788 void (*callback)(struct dasd_ccw_req *, void *data); 1789 void *callback_data; 1790 char errorstring[ERRORLENGTH]; 1791 1792 list_for_each_safe(l, n, final_queue) { 1793 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1794 list_del_init(&cqr->devlist); 1795 block = cqr->block; 1796 callback = cqr->callback; 1797 callback_data = cqr->callback_data; 1798 if (block) 1799 spin_lock_bh(&block->queue_lock); 1800 switch (cqr->status) { 1801 case DASD_CQR_SUCCESS: 1802 cqr->status = DASD_CQR_DONE; 1803 break; 1804 case DASD_CQR_ERROR: 1805 cqr->status = DASD_CQR_NEED_ERP; 1806 break; 1807 case DASD_CQR_CLEARED: 1808 cqr->status = DASD_CQR_TERMINATED; 1809 break; 1810 default: 1811 /* internal error 12 - wrong cqr status*/ 1812 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1813 dev_err(&device->cdev->dev, 1814 "An error occurred in the DASD device driver, " 1815 "reason=%s\n", errorstring); 1816 BUG(); 1817 } 1818 if (cqr->callback != NULL) 1819 (callback)(cqr, callback_data); 1820 if (block) 1821 spin_unlock_bh(&block->queue_lock); 1822 } 1823 } 1824 1825 /* 1826 * Take a look at the first request on the ccw queue and check 1827 * if it reached its expire time. If so, terminate the IO. 1828 */ 1829 static void __dasd_device_check_expire(struct dasd_device *device) 1830 { 1831 struct dasd_ccw_req *cqr; 1832 1833 if (list_empty(&device->ccw_queue)) 1834 return; 1835 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1836 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1837 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1838 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1839 /* 1840 * IO in safe offline processing should not 1841 * run out of retries 1842 */ 1843 cqr->retries++; 1844 } 1845 if (device->discipline->term_IO(cqr) != 0) { 1846 /* Hmpf, try again in 5 sec */ 1847 dev_err(&device->cdev->dev, 1848 "cqr %p timed out (%lus) but cannot be " 1849 "ended, retrying in 5 s\n", 1850 cqr, (cqr->expires/HZ)); 1851 cqr->expires += 5*HZ; 1852 dasd_device_set_timer(device, 5*HZ); 1853 } else { 1854 dev_err(&device->cdev->dev, 1855 "cqr %p timed out (%lus), %i retries " 1856 "remaining\n", cqr, (cqr->expires/HZ), 1857 cqr->retries); 1858 } 1859 } 1860 } 1861 1862 /* 1863 * Take a look at the first request on the ccw queue and check 1864 * if it needs to be started. 1865 */ 1866 static void __dasd_device_start_head(struct dasd_device *device) 1867 { 1868 struct dasd_ccw_req *cqr; 1869 int rc; 1870 1871 if (list_empty(&device->ccw_queue)) 1872 return; 1873 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1874 if (cqr->status != DASD_CQR_QUEUED) 1875 return; 1876 /* when device is stopped, return request to previous layer 1877 * exception: only the disconnect or unresumed bits are set and the 1878 * cqr is a path verification request 1879 */ 1880 if (device->stopped && 1881 !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) 1882 && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { 1883 cqr->intrc = -EAGAIN; 1884 cqr->status = DASD_CQR_CLEARED; 1885 dasd_schedule_device_bh(device); 1886 return; 1887 } 1888 1889 rc = device->discipline->start_IO(cqr); 1890 if (rc == 0) 1891 dasd_device_set_timer(device, cqr->expires); 1892 else if (rc == -EACCES) { 1893 dasd_schedule_device_bh(device); 1894 } else 1895 /* Hmpf, try again in 1/2 sec */ 1896 dasd_device_set_timer(device, 50); 1897 } 1898 1899 static void __dasd_device_check_path_events(struct dasd_device *device) 1900 { 1901 int rc; 1902 1903 if (device->path_data.tbvpm) { 1904 if (device->stopped & ~(DASD_STOPPED_DC_WAIT | 1905 DASD_UNRESUMED_PM)) 1906 return; 1907 rc = device->discipline->verify_path( 1908 device, device->path_data.tbvpm); 1909 if (rc) 1910 dasd_device_set_timer(device, 50); 1911 else 1912 device->path_data.tbvpm = 0; 1913 } 1914 }; 1915 1916 /* 1917 * Go through all request on the dasd_device request queue, 1918 * terminate them on the cdev if necessary, and return them to the 1919 * submitting layer via callback. 1920 * Note: 1921 * Make sure that all 'submitting layers' still exist when 1922 * this function is called!. In other words, when 'device' is a base 1923 * device then all block layer requests must have been removed before 1924 * via dasd_flush_block_queue. 1925 */ 1926 int dasd_flush_device_queue(struct dasd_device *device) 1927 { 1928 struct dasd_ccw_req *cqr, *n; 1929 int rc; 1930 struct list_head flush_queue; 1931 1932 INIT_LIST_HEAD(&flush_queue); 1933 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1934 rc = 0; 1935 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1936 /* Check status and move request to flush_queue */ 1937 switch (cqr->status) { 1938 case DASD_CQR_IN_IO: 1939 rc = device->discipline->term_IO(cqr); 1940 if (rc) { 1941 /* unable to terminate requeust */ 1942 dev_err(&device->cdev->dev, 1943 "Flushing the DASD request queue " 1944 "failed for request %p\n", cqr); 1945 /* stop flush processing */ 1946 goto finished; 1947 } 1948 break; 1949 case DASD_CQR_QUEUED: 1950 cqr->stopclk = get_tod_clock(); 1951 cqr->status = DASD_CQR_CLEARED; 1952 break; 1953 default: /* no need to modify the others */ 1954 break; 1955 } 1956 list_move_tail(&cqr->devlist, &flush_queue); 1957 } 1958 finished: 1959 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1960 /* 1961 * After this point all requests must be in state CLEAR_PENDING, 1962 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1963 * one of the others. 1964 */ 1965 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1966 wait_event(dasd_flush_wq, 1967 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1968 /* 1969 * Now set each request back to TERMINATED, DONE or NEED_ERP 1970 * and call the callback function of flushed requests 1971 */ 1972 __dasd_device_process_final_queue(device, &flush_queue); 1973 return rc; 1974 } 1975 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 1976 1977 /* 1978 * Acquire the device lock and process queues for the device. 1979 */ 1980 static void dasd_device_tasklet(struct dasd_device *device) 1981 { 1982 struct list_head final_queue; 1983 1984 atomic_set (&device->tasklet_scheduled, 0); 1985 INIT_LIST_HEAD(&final_queue); 1986 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1987 /* Check expire time of first request on the ccw queue. */ 1988 __dasd_device_check_expire(device); 1989 /* find final requests on ccw queue */ 1990 __dasd_device_process_ccw_queue(device, &final_queue); 1991 __dasd_device_check_path_events(device); 1992 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1993 /* Now call the callback function of requests with final status */ 1994 __dasd_device_process_final_queue(device, &final_queue); 1995 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1996 /* Now check if the head of the ccw queue needs to be started. */ 1997 __dasd_device_start_head(device); 1998 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1999 if (waitqueue_active(&shutdown_waitq)) 2000 wake_up(&shutdown_waitq); 2001 dasd_put_device(device); 2002 } 2003 2004 /* 2005 * Schedules a call to dasd_tasklet over the device tasklet. 2006 */ 2007 void dasd_schedule_device_bh(struct dasd_device *device) 2008 { 2009 /* Protect against rescheduling. */ 2010 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2011 return; 2012 dasd_get_device(device); 2013 tasklet_hi_schedule(&device->tasklet); 2014 } 2015 EXPORT_SYMBOL(dasd_schedule_device_bh); 2016 2017 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2018 { 2019 device->stopped |= bits; 2020 } 2021 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2022 2023 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2024 { 2025 device->stopped &= ~bits; 2026 if (!device->stopped) 2027 wake_up(&generic_waitq); 2028 } 2029 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2030 2031 /* 2032 * Queue a request to the head of the device ccw_queue. 2033 * Start the I/O if possible. 2034 */ 2035 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2036 { 2037 struct dasd_device *device; 2038 unsigned long flags; 2039 2040 device = cqr->startdev; 2041 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2042 cqr->status = DASD_CQR_QUEUED; 2043 list_add(&cqr->devlist, &device->ccw_queue); 2044 /* let the bh start the request to keep them in order */ 2045 dasd_schedule_device_bh(device); 2046 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2047 } 2048 EXPORT_SYMBOL(dasd_add_request_head); 2049 2050 /* 2051 * Queue a request to the tail of the device ccw_queue. 2052 * Start the I/O if possible. 2053 */ 2054 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2055 { 2056 struct dasd_device *device; 2057 unsigned long flags; 2058 2059 device = cqr->startdev; 2060 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2061 cqr->status = DASD_CQR_QUEUED; 2062 list_add_tail(&cqr->devlist, &device->ccw_queue); 2063 /* let the bh start the request to keep them in order */ 2064 dasd_schedule_device_bh(device); 2065 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2066 } 2067 EXPORT_SYMBOL(dasd_add_request_tail); 2068 2069 /* 2070 * Wakeup helper for the 'sleep_on' functions. 2071 */ 2072 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2073 { 2074 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2075 cqr->callback_data = DASD_SLEEPON_END_TAG; 2076 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2077 wake_up(&generic_waitq); 2078 } 2079 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2080 2081 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2082 { 2083 struct dasd_device *device; 2084 int rc; 2085 2086 device = cqr->startdev; 2087 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2088 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2089 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2090 return rc; 2091 } 2092 2093 /* 2094 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2095 */ 2096 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2097 { 2098 struct dasd_device *device; 2099 dasd_erp_fn_t erp_fn; 2100 2101 if (cqr->status == DASD_CQR_FILLED) 2102 return 0; 2103 device = cqr->startdev; 2104 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2105 if (cqr->status == DASD_CQR_TERMINATED) { 2106 device->discipline->handle_terminated_request(cqr); 2107 return 1; 2108 } 2109 if (cqr->status == DASD_CQR_NEED_ERP) { 2110 erp_fn = device->discipline->erp_action(cqr); 2111 erp_fn(cqr); 2112 return 1; 2113 } 2114 if (cqr->status == DASD_CQR_FAILED) 2115 dasd_log_sense(cqr, &cqr->irb); 2116 if (cqr->refers) { 2117 __dasd_process_erp(device, cqr); 2118 return 1; 2119 } 2120 } 2121 return 0; 2122 } 2123 2124 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2125 { 2126 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2127 if (cqr->refers) /* erp is not done yet */ 2128 return 1; 2129 return ((cqr->status != DASD_CQR_DONE) && 2130 (cqr->status != DASD_CQR_FAILED)); 2131 } else 2132 return (cqr->status == DASD_CQR_FILLED); 2133 } 2134 2135 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2136 { 2137 struct dasd_device *device; 2138 int rc; 2139 struct list_head ccw_queue; 2140 struct dasd_ccw_req *cqr; 2141 2142 INIT_LIST_HEAD(&ccw_queue); 2143 maincqr->status = DASD_CQR_FILLED; 2144 device = maincqr->startdev; 2145 list_add(&maincqr->blocklist, &ccw_queue); 2146 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2147 cqr = list_first_entry(&ccw_queue, 2148 struct dasd_ccw_req, blocklist)) { 2149 2150 if (__dasd_sleep_on_erp(cqr)) 2151 continue; 2152 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2153 continue; 2154 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2155 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2156 cqr->status = DASD_CQR_FAILED; 2157 cqr->intrc = -EPERM; 2158 continue; 2159 } 2160 /* Non-temporary stop condition will trigger fail fast */ 2161 if (device->stopped & ~DASD_STOPPED_PENDING && 2162 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2163 (!dasd_eer_enabled(device))) { 2164 cqr->status = DASD_CQR_FAILED; 2165 cqr->intrc = -ENOLINK; 2166 continue; 2167 } 2168 /* Don't try to start requests if device is stopped */ 2169 if (interruptible) { 2170 rc = wait_event_interruptible( 2171 generic_waitq, !(device->stopped)); 2172 if (rc == -ERESTARTSYS) { 2173 cqr->status = DASD_CQR_FAILED; 2174 maincqr->intrc = rc; 2175 continue; 2176 } 2177 } else 2178 wait_event(generic_waitq, !(device->stopped)); 2179 2180 if (!cqr->callback) 2181 cqr->callback = dasd_wakeup_cb; 2182 2183 cqr->callback_data = DASD_SLEEPON_START_TAG; 2184 dasd_add_request_tail(cqr); 2185 if (interruptible) { 2186 rc = wait_event_interruptible( 2187 generic_waitq, _wait_for_wakeup(cqr)); 2188 if (rc == -ERESTARTSYS) { 2189 dasd_cancel_req(cqr); 2190 /* wait (non-interruptible) for final status */ 2191 wait_event(generic_waitq, 2192 _wait_for_wakeup(cqr)); 2193 cqr->status = DASD_CQR_FAILED; 2194 maincqr->intrc = rc; 2195 continue; 2196 } 2197 } else 2198 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2199 } 2200 2201 maincqr->endclk = get_tod_clock(); 2202 if ((maincqr->status != DASD_CQR_DONE) && 2203 (maincqr->intrc != -ERESTARTSYS)) 2204 dasd_log_sense(maincqr, &maincqr->irb); 2205 if (maincqr->status == DASD_CQR_DONE) 2206 rc = 0; 2207 else if (maincqr->intrc) 2208 rc = maincqr->intrc; 2209 else 2210 rc = -EIO; 2211 return rc; 2212 } 2213 2214 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2215 { 2216 struct dasd_ccw_req *cqr; 2217 2218 list_for_each_entry(cqr, ccw_queue, blocklist) { 2219 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2220 return 0; 2221 } 2222 2223 return 1; 2224 } 2225 2226 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2227 { 2228 struct dasd_device *device; 2229 struct dasd_ccw_req *cqr, *n; 2230 int rc; 2231 2232 retry: 2233 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2234 device = cqr->startdev; 2235 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2236 continue; 2237 2238 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2239 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2240 cqr->status = DASD_CQR_FAILED; 2241 cqr->intrc = -EPERM; 2242 continue; 2243 } 2244 /*Non-temporary stop condition will trigger fail fast*/ 2245 if (device->stopped & ~DASD_STOPPED_PENDING && 2246 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2247 !dasd_eer_enabled(device)) { 2248 cqr->status = DASD_CQR_FAILED; 2249 cqr->intrc = -EAGAIN; 2250 continue; 2251 } 2252 2253 /*Don't try to start requests if device is stopped*/ 2254 if (interruptible) { 2255 rc = wait_event_interruptible( 2256 generic_waitq, !device->stopped); 2257 if (rc == -ERESTARTSYS) { 2258 cqr->status = DASD_CQR_FAILED; 2259 cqr->intrc = rc; 2260 continue; 2261 } 2262 } else 2263 wait_event(generic_waitq, !(device->stopped)); 2264 2265 if (!cqr->callback) 2266 cqr->callback = dasd_wakeup_cb; 2267 cqr->callback_data = DASD_SLEEPON_START_TAG; 2268 dasd_add_request_tail(cqr); 2269 } 2270 2271 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2272 2273 rc = 0; 2274 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2275 /* 2276 * for alias devices simplify error recovery and 2277 * return to upper layer 2278 * do not skip ERP requests 2279 */ 2280 if (cqr->startdev != cqr->basedev && !cqr->refers && 2281 (cqr->status == DASD_CQR_TERMINATED || 2282 cqr->status == DASD_CQR_NEED_ERP)) 2283 return -EAGAIN; 2284 2285 /* normal recovery for basedev IO */ 2286 if (__dasd_sleep_on_erp(cqr)) 2287 /* handle erp first */ 2288 goto retry; 2289 } 2290 2291 return 0; 2292 } 2293 2294 /* 2295 * Queue a request to the tail of the device ccw_queue and wait for 2296 * it's completion. 2297 */ 2298 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2299 { 2300 return _dasd_sleep_on(cqr, 0); 2301 } 2302 EXPORT_SYMBOL(dasd_sleep_on); 2303 2304 /* 2305 * Start requests from a ccw_queue and wait for their completion. 2306 */ 2307 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2308 { 2309 return _dasd_sleep_on_queue(ccw_queue, 0); 2310 } 2311 EXPORT_SYMBOL(dasd_sleep_on_queue); 2312 2313 /* 2314 * Queue a request to the tail of the device ccw_queue and wait 2315 * interruptible for it's completion. 2316 */ 2317 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2318 { 2319 return _dasd_sleep_on(cqr, 1); 2320 } 2321 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2322 2323 /* 2324 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2325 * for eckd devices) the currently running request has to be terminated 2326 * and be put back to status queued, before the special request is added 2327 * to the head of the queue. Then the special request is waited on normally. 2328 */ 2329 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2330 { 2331 struct dasd_ccw_req *cqr; 2332 int rc; 2333 2334 if (list_empty(&device->ccw_queue)) 2335 return 0; 2336 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2337 rc = device->discipline->term_IO(cqr); 2338 if (!rc) 2339 /* 2340 * CQR terminated because a more important request is pending. 2341 * Undo decreasing of retry counter because this is 2342 * not an error case. 2343 */ 2344 cqr->retries++; 2345 return rc; 2346 } 2347 2348 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2349 { 2350 struct dasd_device *device; 2351 int rc; 2352 2353 device = cqr->startdev; 2354 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2355 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2356 cqr->status = DASD_CQR_FAILED; 2357 cqr->intrc = -EPERM; 2358 return -EIO; 2359 } 2360 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2361 rc = _dasd_term_running_cqr(device); 2362 if (rc) { 2363 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2364 return rc; 2365 } 2366 cqr->callback = dasd_wakeup_cb; 2367 cqr->callback_data = DASD_SLEEPON_START_TAG; 2368 cqr->status = DASD_CQR_QUEUED; 2369 /* 2370 * add new request as second 2371 * first the terminated cqr needs to be finished 2372 */ 2373 list_add(&cqr->devlist, device->ccw_queue.next); 2374 2375 /* let the bh start the request to keep them in order */ 2376 dasd_schedule_device_bh(device); 2377 2378 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2379 2380 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2381 2382 if (cqr->status == DASD_CQR_DONE) 2383 rc = 0; 2384 else if (cqr->intrc) 2385 rc = cqr->intrc; 2386 else 2387 rc = -EIO; 2388 2389 /* kick tasklets */ 2390 dasd_schedule_device_bh(device); 2391 if (device->block) 2392 dasd_schedule_block_bh(device->block); 2393 2394 return rc; 2395 } 2396 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2397 2398 /* 2399 * Cancels a request that was started with dasd_sleep_on_req. 2400 * This is useful to timeout requests. The request will be 2401 * terminated if it is currently in i/o. 2402 * Returns 0 if request termination was successful 2403 * negative error code if termination failed 2404 * Cancellation of a request is an asynchronous operation! The calling 2405 * function has to wait until the request is properly returned via callback. 2406 */ 2407 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2408 { 2409 struct dasd_device *device = cqr->startdev; 2410 unsigned long flags; 2411 int rc; 2412 2413 rc = 0; 2414 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2415 switch (cqr->status) { 2416 case DASD_CQR_QUEUED: 2417 /* request was not started - just set to cleared */ 2418 cqr->status = DASD_CQR_CLEARED; 2419 if (cqr->callback_data == DASD_SLEEPON_START_TAG) 2420 cqr->callback_data = DASD_SLEEPON_END_TAG; 2421 break; 2422 case DASD_CQR_IN_IO: 2423 /* request in IO - terminate IO and release again */ 2424 rc = device->discipline->term_IO(cqr); 2425 if (rc) { 2426 dev_err(&device->cdev->dev, 2427 "Cancelling request %p failed with rc=%d\n", 2428 cqr, rc); 2429 } else { 2430 cqr->stopclk = get_tod_clock(); 2431 } 2432 break; 2433 default: /* already finished or clear pending - do nothing */ 2434 break; 2435 } 2436 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2437 dasd_schedule_device_bh(device); 2438 return rc; 2439 } 2440 EXPORT_SYMBOL(dasd_cancel_req); 2441 2442 /* 2443 * SECTION: Operations of the dasd_block layer. 2444 */ 2445 2446 /* 2447 * Timeout function for dasd_block. This is used when the block layer 2448 * is waiting for something that may not come reliably, (e.g. a state 2449 * change interrupt) 2450 */ 2451 static void dasd_block_timeout(unsigned long ptr) 2452 { 2453 unsigned long flags; 2454 struct dasd_block *block; 2455 2456 block = (struct dasd_block *) ptr; 2457 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2458 /* re-activate request queue */ 2459 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2460 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2461 dasd_schedule_block_bh(block); 2462 } 2463 2464 /* 2465 * Setup timeout for a dasd_block in jiffies. 2466 */ 2467 void dasd_block_set_timer(struct dasd_block *block, int expires) 2468 { 2469 if (expires == 0) 2470 del_timer(&block->timer); 2471 else 2472 mod_timer(&block->timer, jiffies + expires); 2473 } 2474 EXPORT_SYMBOL(dasd_block_set_timer); 2475 2476 /* 2477 * Clear timeout for a dasd_block. 2478 */ 2479 void dasd_block_clear_timer(struct dasd_block *block) 2480 { 2481 del_timer(&block->timer); 2482 } 2483 EXPORT_SYMBOL(dasd_block_clear_timer); 2484 2485 /* 2486 * Process finished error recovery ccw. 2487 */ 2488 static void __dasd_process_erp(struct dasd_device *device, 2489 struct dasd_ccw_req *cqr) 2490 { 2491 dasd_erp_fn_t erp_fn; 2492 2493 if (cqr->status == DASD_CQR_DONE) 2494 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2495 else 2496 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2497 erp_fn = device->discipline->erp_postaction(cqr); 2498 erp_fn(cqr); 2499 } 2500 2501 /* 2502 * Fetch requests from the block device queue. 2503 */ 2504 static void __dasd_process_request_queue(struct dasd_block *block) 2505 { 2506 struct request_queue *queue; 2507 struct request *req; 2508 struct dasd_ccw_req *cqr; 2509 struct dasd_device *basedev; 2510 unsigned long flags; 2511 queue = block->request_queue; 2512 basedev = block->base; 2513 /* No queue ? Then there is nothing to do. */ 2514 if (queue == NULL) 2515 return; 2516 2517 /* 2518 * We requeue request from the block device queue to the ccw 2519 * queue only in two states. In state DASD_STATE_READY the 2520 * partition detection is done and we need to requeue requests 2521 * for that. State DASD_STATE_ONLINE is normal block device 2522 * operation. 2523 */ 2524 if (basedev->state < DASD_STATE_READY) { 2525 while ((req = blk_fetch_request(block->request_queue))) 2526 __blk_end_request_all(req, -EIO); 2527 return; 2528 } 2529 /* Now we try to fetch requests from the request queue */ 2530 while ((req = blk_peek_request(queue))) { 2531 if (basedev->features & DASD_FEATURE_READONLY && 2532 rq_data_dir(req) == WRITE) { 2533 DBF_DEV_EVENT(DBF_ERR, basedev, 2534 "Rejecting write request %p", 2535 req); 2536 blk_start_request(req); 2537 __blk_end_request_all(req, -EIO); 2538 continue; 2539 } 2540 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 2541 (basedev->features & DASD_FEATURE_FAILFAST || 2542 blk_noretry_request(req))) { 2543 DBF_DEV_EVENT(DBF_ERR, basedev, 2544 "Rejecting failfast request %p", 2545 req); 2546 blk_start_request(req); 2547 __blk_end_request_all(req, -ETIMEDOUT); 2548 continue; 2549 } 2550 cqr = basedev->discipline->build_cp(basedev, block, req); 2551 if (IS_ERR(cqr)) { 2552 if (PTR_ERR(cqr) == -EBUSY) 2553 break; /* normal end condition */ 2554 if (PTR_ERR(cqr) == -ENOMEM) 2555 break; /* terminate request queue loop */ 2556 if (PTR_ERR(cqr) == -EAGAIN) { 2557 /* 2558 * The current request cannot be build right 2559 * now, we have to try later. If this request 2560 * is the head-of-queue we stop the device 2561 * for 1/2 second. 2562 */ 2563 if (!list_empty(&block->ccw_queue)) 2564 break; 2565 spin_lock_irqsave( 2566 get_ccwdev_lock(basedev->cdev), flags); 2567 dasd_device_set_stop_bits(basedev, 2568 DASD_STOPPED_PENDING); 2569 spin_unlock_irqrestore( 2570 get_ccwdev_lock(basedev->cdev), flags); 2571 dasd_block_set_timer(block, HZ/2); 2572 break; 2573 } 2574 DBF_DEV_EVENT(DBF_ERR, basedev, 2575 "CCW creation failed (rc=%ld) " 2576 "on request %p", 2577 PTR_ERR(cqr), req); 2578 blk_start_request(req); 2579 __blk_end_request_all(req, -EIO); 2580 continue; 2581 } 2582 /* 2583 * Note: callback is set to dasd_return_cqr_cb in 2584 * __dasd_block_start_head to cover erp requests as well 2585 */ 2586 cqr->callback_data = (void *) req; 2587 cqr->status = DASD_CQR_FILLED; 2588 req->completion_data = cqr; 2589 blk_start_request(req); 2590 list_add_tail(&cqr->blocklist, &block->ccw_queue); 2591 INIT_LIST_HEAD(&cqr->devlist); 2592 dasd_profile_start(block, cqr, req); 2593 } 2594 } 2595 2596 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2597 { 2598 struct request *req; 2599 int status; 2600 int error = 0; 2601 2602 req = (struct request *) cqr->callback_data; 2603 dasd_profile_end(cqr->block, cqr, req); 2604 status = cqr->block->base->discipline->free_cp(cqr, req); 2605 if (status < 0) 2606 error = status; 2607 else if (status == 0) { 2608 if (cqr->intrc == -EPERM) 2609 error = -EBADE; 2610 else if (cqr->intrc == -ENOLINK || 2611 cqr->intrc == -ETIMEDOUT) 2612 error = cqr->intrc; 2613 else 2614 error = -EIO; 2615 } 2616 __blk_end_request_all(req, error); 2617 } 2618 2619 /* 2620 * Process ccw request queue. 2621 */ 2622 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2623 struct list_head *final_queue) 2624 { 2625 struct list_head *l, *n; 2626 struct dasd_ccw_req *cqr; 2627 dasd_erp_fn_t erp_fn; 2628 unsigned long flags; 2629 struct dasd_device *base = block->base; 2630 2631 restart: 2632 /* Process request with final status. */ 2633 list_for_each_safe(l, n, &block->ccw_queue) { 2634 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2635 if (cqr->status != DASD_CQR_DONE && 2636 cqr->status != DASD_CQR_FAILED && 2637 cqr->status != DASD_CQR_NEED_ERP && 2638 cqr->status != DASD_CQR_TERMINATED) 2639 continue; 2640 2641 if (cqr->status == DASD_CQR_TERMINATED) { 2642 base->discipline->handle_terminated_request(cqr); 2643 goto restart; 2644 } 2645 2646 /* Process requests that may be recovered */ 2647 if (cqr->status == DASD_CQR_NEED_ERP) { 2648 erp_fn = base->discipline->erp_action(cqr); 2649 if (IS_ERR(erp_fn(cqr))) 2650 continue; 2651 goto restart; 2652 } 2653 2654 /* log sense for fatal error */ 2655 if (cqr->status == DASD_CQR_FAILED) { 2656 dasd_log_sense(cqr, &cqr->irb); 2657 } 2658 2659 /* First of all call extended error reporting. */ 2660 if (dasd_eer_enabled(base) && 2661 cqr->status == DASD_CQR_FAILED) { 2662 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2663 2664 /* restart request */ 2665 cqr->status = DASD_CQR_FILLED; 2666 cqr->retries = 255; 2667 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2668 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2669 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2670 flags); 2671 goto restart; 2672 } 2673 2674 /* Process finished ERP request. */ 2675 if (cqr->refers) { 2676 __dasd_process_erp(base, cqr); 2677 goto restart; 2678 } 2679 2680 /* Rechain finished requests to final queue */ 2681 cqr->endclk = get_tod_clock(); 2682 list_move_tail(&cqr->blocklist, final_queue); 2683 } 2684 } 2685 2686 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2687 { 2688 dasd_schedule_block_bh(cqr->block); 2689 } 2690 2691 static void __dasd_block_start_head(struct dasd_block *block) 2692 { 2693 struct dasd_ccw_req *cqr; 2694 2695 if (list_empty(&block->ccw_queue)) 2696 return; 2697 /* We allways begin with the first requests on the queue, as some 2698 * of previously started requests have to be enqueued on a 2699 * dasd_device again for error recovery. 2700 */ 2701 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2702 if (cqr->status != DASD_CQR_FILLED) 2703 continue; 2704 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2705 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2706 cqr->status = DASD_CQR_FAILED; 2707 cqr->intrc = -EPERM; 2708 dasd_schedule_block_bh(block); 2709 continue; 2710 } 2711 /* Non-temporary stop condition will trigger fail fast */ 2712 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2713 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2714 (!dasd_eer_enabled(block->base))) { 2715 cqr->status = DASD_CQR_FAILED; 2716 cqr->intrc = -ENOLINK; 2717 dasd_schedule_block_bh(block); 2718 continue; 2719 } 2720 /* Don't try to start requests if device is stopped */ 2721 if (block->base->stopped) 2722 return; 2723 2724 /* just a fail safe check, should not happen */ 2725 if (!cqr->startdev) 2726 cqr->startdev = block->base; 2727 2728 /* make sure that the requests we submit find their way back */ 2729 cqr->callback = dasd_return_cqr_cb; 2730 2731 dasd_add_request_tail(cqr); 2732 } 2733 } 2734 2735 /* 2736 * Central dasd_block layer routine. Takes requests from the generic 2737 * block layer request queue, creates ccw requests, enqueues them on 2738 * a dasd_device and processes ccw requests that have been returned. 2739 */ 2740 static void dasd_block_tasklet(struct dasd_block *block) 2741 { 2742 struct list_head final_queue; 2743 struct list_head *l, *n; 2744 struct dasd_ccw_req *cqr; 2745 2746 atomic_set(&block->tasklet_scheduled, 0); 2747 INIT_LIST_HEAD(&final_queue); 2748 spin_lock(&block->queue_lock); 2749 /* Finish off requests on ccw queue */ 2750 __dasd_process_block_ccw_queue(block, &final_queue); 2751 spin_unlock(&block->queue_lock); 2752 /* Now call the callback function of requests with final status */ 2753 spin_lock_irq(&block->request_queue_lock); 2754 list_for_each_safe(l, n, &final_queue) { 2755 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2756 list_del_init(&cqr->blocklist); 2757 __dasd_cleanup_cqr(cqr); 2758 } 2759 spin_lock(&block->queue_lock); 2760 /* Get new request from the block device request queue */ 2761 __dasd_process_request_queue(block); 2762 /* Now check if the head of the ccw queue needs to be started. */ 2763 __dasd_block_start_head(block); 2764 spin_unlock(&block->queue_lock); 2765 spin_unlock_irq(&block->request_queue_lock); 2766 if (waitqueue_active(&shutdown_waitq)) 2767 wake_up(&shutdown_waitq); 2768 dasd_put_device(block->base); 2769 } 2770 2771 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2772 { 2773 wake_up(&dasd_flush_wq); 2774 } 2775 2776 /* 2777 * Requeue a request back to the block request queue 2778 * only works for block requests 2779 */ 2780 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2781 { 2782 struct dasd_block *block = cqr->block; 2783 struct request *req; 2784 unsigned long flags; 2785 2786 if (!block) 2787 return -EINVAL; 2788 spin_lock_irqsave(&block->queue_lock, flags); 2789 req = (struct request *) cqr->callback_data; 2790 blk_requeue_request(block->request_queue, req); 2791 spin_unlock_irqrestore(&block->queue_lock, flags); 2792 2793 return 0; 2794 } 2795 2796 /* 2797 * Go through all request on the dasd_block request queue, cancel them 2798 * on the respective dasd_device, and return them to the generic 2799 * block layer. 2800 */ 2801 static int dasd_flush_block_queue(struct dasd_block *block) 2802 { 2803 struct dasd_ccw_req *cqr, *n; 2804 int rc, i; 2805 struct list_head flush_queue; 2806 2807 INIT_LIST_HEAD(&flush_queue); 2808 spin_lock_bh(&block->queue_lock); 2809 rc = 0; 2810 restart: 2811 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2812 /* if this request currently owned by a dasd_device cancel it */ 2813 if (cqr->status >= DASD_CQR_QUEUED) 2814 rc = dasd_cancel_req(cqr); 2815 if (rc < 0) 2816 break; 2817 /* Rechain request (including erp chain) so it won't be 2818 * touched by the dasd_block_tasklet anymore. 2819 * Replace the callback so we notice when the request 2820 * is returned from the dasd_device layer. 2821 */ 2822 cqr->callback = _dasd_wake_block_flush_cb; 2823 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2824 list_move_tail(&cqr->blocklist, &flush_queue); 2825 if (i > 1) 2826 /* moved more than one request - need to restart */ 2827 goto restart; 2828 } 2829 spin_unlock_bh(&block->queue_lock); 2830 /* Now call the callback function of flushed requests */ 2831 restart_cb: 2832 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2833 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2834 /* Process finished ERP request. */ 2835 if (cqr->refers) { 2836 spin_lock_bh(&block->queue_lock); 2837 __dasd_process_erp(block->base, cqr); 2838 spin_unlock_bh(&block->queue_lock); 2839 /* restart list_for_xx loop since dasd_process_erp 2840 * might remove multiple elements */ 2841 goto restart_cb; 2842 } 2843 /* call the callback function */ 2844 spin_lock_irq(&block->request_queue_lock); 2845 cqr->endclk = get_tod_clock(); 2846 list_del_init(&cqr->blocklist); 2847 __dasd_cleanup_cqr(cqr); 2848 spin_unlock_irq(&block->request_queue_lock); 2849 } 2850 return rc; 2851 } 2852 2853 /* 2854 * Schedules a call to dasd_tasklet over the device tasklet. 2855 */ 2856 void dasd_schedule_block_bh(struct dasd_block *block) 2857 { 2858 /* Protect against rescheduling. */ 2859 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 2860 return; 2861 /* life cycle of block is bound to it's base device */ 2862 dasd_get_device(block->base); 2863 tasklet_hi_schedule(&block->tasklet); 2864 } 2865 EXPORT_SYMBOL(dasd_schedule_block_bh); 2866 2867 2868 /* 2869 * SECTION: external block device operations 2870 * (request queue handling, open, release, etc.) 2871 */ 2872 2873 /* 2874 * Dasd request queue function. Called from ll_rw_blk.c 2875 */ 2876 static void do_dasd_request(struct request_queue *queue) 2877 { 2878 struct dasd_block *block; 2879 2880 block = queue->queuedata; 2881 spin_lock(&block->queue_lock); 2882 /* Get new request from the block device request queue */ 2883 __dasd_process_request_queue(block); 2884 /* Now check if the head of the ccw queue needs to be started. */ 2885 __dasd_block_start_head(block); 2886 spin_unlock(&block->queue_lock); 2887 } 2888 2889 /* 2890 * Block timeout callback, called from the block layer 2891 * 2892 * request_queue lock is held on entry. 2893 * 2894 * Return values: 2895 * BLK_EH_RESET_TIMER if the request should be left running 2896 * BLK_EH_NOT_HANDLED if the request is handled or terminated 2897 * by the driver. 2898 */ 2899 enum blk_eh_timer_return dasd_times_out(struct request *req) 2900 { 2901 struct dasd_ccw_req *cqr = req->completion_data; 2902 struct dasd_block *block = req->q->queuedata; 2903 struct dasd_device *device; 2904 int rc = 0; 2905 2906 if (!cqr) 2907 return BLK_EH_NOT_HANDLED; 2908 2909 device = cqr->startdev ? cqr->startdev : block->base; 2910 if (!device->blk_timeout) 2911 return BLK_EH_RESET_TIMER; 2912 DBF_DEV_EVENT(DBF_WARNING, device, 2913 " dasd_times_out cqr %p status %x", 2914 cqr, cqr->status); 2915 2916 spin_lock(&block->queue_lock); 2917 spin_lock(get_ccwdev_lock(device->cdev)); 2918 cqr->retries = -1; 2919 cqr->intrc = -ETIMEDOUT; 2920 if (cqr->status >= DASD_CQR_QUEUED) { 2921 spin_unlock(get_ccwdev_lock(device->cdev)); 2922 rc = dasd_cancel_req(cqr); 2923 } else if (cqr->status == DASD_CQR_FILLED || 2924 cqr->status == DASD_CQR_NEED_ERP) { 2925 cqr->status = DASD_CQR_TERMINATED; 2926 spin_unlock(get_ccwdev_lock(device->cdev)); 2927 } else if (cqr->status == DASD_CQR_IN_ERP) { 2928 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 2929 2930 list_for_each_entry_safe(searchcqr, nextcqr, 2931 &block->ccw_queue, blocklist) { 2932 tmpcqr = searchcqr; 2933 while (tmpcqr->refers) 2934 tmpcqr = tmpcqr->refers; 2935 if (tmpcqr != cqr) 2936 continue; 2937 /* searchcqr is an ERP request for cqr */ 2938 searchcqr->retries = -1; 2939 searchcqr->intrc = -ETIMEDOUT; 2940 if (searchcqr->status >= DASD_CQR_QUEUED) { 2941 spin_unlock(get_ccwdev_lock(device->cdev)); 2942 rc = dasd_cancel_req(searchcqr); 2943 spin_lock(get_ccwdev_lock(device->cdev)); 2944 } else if ((searchcqr->status == DASD_CQR_FILLED) || 2945 (searchcqr->status == DASD_CQR_NEED_ERP)) { 2946 searchcqr->status = DASD_CQR_TERMINATED; 2947 rc = 0; 2948 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 2949 /* 2950 * Shouldn't happen; most recent ERP 2951 * request is at the front of queue 2952 */ 2953 continue; 2954 } 2955 break; 2956 } 2957 spin_unlock(get_ccwdev_lock(device->cdev)); 2958 } 2959 dasd_schedule_block_bh(block); 2960 spin_unlock(&block->queue_lock); 2961 2962 return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 2963 } 2964 2965 /* 2966 * Allocate and initialize request queue and default I/O scheduler. 2967 */ 2968 static int dasd_alloc_queue(struct dasd_block *block) 2969 { 2970 int rc; 2971 2972 block->request_queue = blk_init_queue(do_dasd_request, 2973 &block->request_queue_lock); 2974 if (block->request_queue == NULL) 2975 return -ENOMEM; 2976 2977 block->request_queue->queuedata = block; 2978 2979 elevator_exit(block->request_queue->elevator); 2980 block->request_queue->elevator = NULL; 2981 mutex_lock(&block->request_queue->sysfs_lock); 2982 rc = elevator_init(block->request_queue, "deadline"); 2983 if (rc) 2984 blk_cleanup_queue(block->request_queue); 2985 mutex_unlock(&block->request_queue->sysfs_lock); 2986 return rc; 2987 } 2988 2989 /* 2990 * Allocate and initialize request queue. 2991 */ 2992 static void dasd_setup_queue(struct dasd_block *block) 2993 { 2994 int max; 2995 2996 if (block->base->features & DASD_FEATURE_USERAW) { 2997 /* 2998 * the max_blocks value for raw_track access is 256 2999 * it is higher than the native ECKD value because we 3000 * only need one ccw per track 3001 * so the max_hw_sectors are 3002 * 2048 x 512B = 1024kB = 16 tracks 3003 */ 3004 max = 2048; 3005 } else { 3006 max = block->base->discipline->max_blocks << block->s2b_shift; 3007 } 3008 blk_queue_logical_block_size(block->request_queue, 3009 block->bp_block); 3010 blk_queue_max_hw_sectors(block->request_queue, max); 3011 blk_queue_max_segments(block->request_queue, -1L); 3012 /* with page sized segments we can translate each segement into 3013 * one idaw/tidaw 3014 */ 3015 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 3016 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 3017 } 3018 3019 /* 3020 * Deactivate and free request queue. 3021 */ 3022 static void dasd_free_queue(struct dasd_block *block) 3023 { 3024 if (block->request_queue) { 3025 blk_cleanup_queue(block->request_queue); 3026 block->request_queue = NULL; 3027 } 3028 } 3029 3030 /* 3031 * Flush request on the request queue. 3032 */ 3033 static void dasd_flush_request_queue(struct dasd_block *block) 3034 { 3035 struct request *req; 3036 3037 if (!block->request_queue) 3038 return; 3039 3040 spin_lock_irq(&block->request_queue_lock); 3041 while ((req = blk_fetch_request(block->request_queue))) 3042 __blk_end_request_all(req, -EIO); 3043 spin_unlock_irq(&block->request_queue_lock); 3044 } 3045 3046 static int dasd_open(struct block_device *bdev, fmode_t mode) 3047 { 3048 struct dasd_device *base; 3049 int rc; 3050 3051 base = dasd_device_from_gendisk(bdev->bd_disk); 3052 if (!base) 3053 return -ENODEV; 3054 3055 atomic_inc(&base->block->open_count); 3056 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3057 rc = -ENODEV; 3058 goto unlock; 3059 } 3060 3061 if (!try_module_get(base->discipline->owner)) { 3062 rc = -EINVAL; 3063 goto unlock; 3064 } 3065 3066 if (dasd_probeonly) { 3067 dev_info(&base->cdev->dev, 3068 "Accessing the DASD failed because it is in " 3069 "probeonly mode\n"); 3070 rc = -EPERM; 3071 goto out; 3072 } 3073 3074 if (base->state <= DASD_STATE_BASIC) { 3075 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3076 " Cannot open unrecognized device"); 3077 rc = -ENODEV; 3078 goto out; 3079 } 3080 3081 if ((mode & FMODE_WRITE) && 3082 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3083 (base->features & DASD_FEATURE_READONLY))) { 3084 rc = -EROFS; 3085 goto out; 3086 } 3087 3088 dasd_put_device(base); 3089 return 0; 3090 3091 out: 3092 module_put(base->discipline->owner); 3093 unlock: 3094 atomic_dec(&base->block->open_count); 3095 dasd_put_device(base); 3096 return rc; 3097 } 3098 3099 static void dasd_release(struct gendisk *disk, fmode_t mode) 3100 { 3101 struct dasd_device *base = dasd_device_from_gendisk(disk); 3102 if (base) { 3103 atomic_dec(&base->block->open_count); 3104 module_put(base->discipline->owner); 3105 dasd_put_device(base); 3106 } 3107 } 3108 3109 /* 3110 * Return disk geometry. 3111 */ 3112 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3113 { 3114 struct dasd_device *base; 3115 3116 base = dasd_device_from_gendisk(bdev->bd_disk); 3117 if (!base) 3118 return -ENODEV; 3119 3120 if (!base->discipline || 3121 !base->discipline->fill_geometry) { 3122 dasd_put_device(base); 3123 return -EINVAL; 3124 } 3125 base->discipline->fill_geometry(base->block, geo); 3126 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3127 dasd_put_device(base); 3128 return 0; 3129 } 3130 3131 const struct block_device_operations 3132 dasd_device_operations = { 3133 .owner = THIS_MODULE, 3134 .open = dasd_open, 3135 .release = dasd_release, 3136 .ioctl = dasd_ioctl, 3137 .compat_ioctl = dasd_ioctl, 3138 .getgeo = dasd_getgeo, 3139 }; 3140 3141 /******************************************************************************* 3142 * end of block device operations 3143 */ 3144 3145 static void 3146 dasd_exit(void) 3147 { 3148 #ifdef CONFIG_PROC_FS 3149 dasd_proc_exit(); 3150 #endif 3151 dasd_eer_exit(); 3152 if (dasd_page_cache != NULL) { 3153 kmem_cache_destroy(dasd_page_cache); 3154 dasd_page_cache = NULL; 3155 } 3156 dasd_gendisk_exit(); 3157 dasd_devmap_exit(); 3158 if (dasd_debug_area != NULL) { 3159 debug_unregister(dasd_debug_area); 3160 dasd_debug_area = NULL; 3161 } 3162 dasd_statistics_removeroot(); 3163 } 3164 3165 /* 3166 * SECTION: common functions for ccw_driver use 3167 */ 3168 3169 /* 3170 * Is the device read-only? 3171 * Note that this function does not report the setting of the 3172 * readonly device attribute, but how it is configured in z/VM. 3173 */ 3174 int dasd_device_is_ro(struct dasd_device *device) 3175 { 3176 struct ccw_dev_id dev_id; 3177 struct diag210 diag_data; 3178 int rc; 3179 3180 if (!MACHINE_IS_VM) 3181 return 0; 3182 ccw_device_get_id(device->cdev, &dev_id); 3183 memset(&diag_data, 0, sizeof(diag_data)); 3184 diag_data.vrdcdvno = dev_id.devno; 3185 diag_data.vrdclen = sizeof(diag_data); 3186 rc = diag210(&diag_data); 3187 if (rc == 0 || rc == 2) { 3188 return diag_data.vrdcvfla & 0x80; 3189 } else { 3190 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3191 dev_id.devno, rc); 3192 return 0; 3193 } 3194 } 3195 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3196 3197 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3198 { 3199 struct ccw_device *cdev = data; 3200 int ret; 3201 3202 ret = ccw_device_set_online(cdev); 3203 if (ret) 3204 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3205 dev_name(&cdev->dev), ret); 3206 } 3207 3208 /* 3209 * Initial attempt at a probe function. this can be simplified once 3210 * the other detection code is gone. 3211 */ 3212 int dasd_generic_probe(struct ccw_device *cdev, 3213 struct dasd_discipline *discipline) 3214 { 3215 int ret; 3216 3217 ret = dasd_add_sysfs_files(cdev); 3218 if (ret) { 3219 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3220 "dasd_generic_probe: could not add " 3221 "sysfs entries"); 3222 return ret; 3223 } 3224 cdev->handler = &dasd_int_handler; 3225 3226 /* 3227 * Automatically online either all dasd devices (dasd_autodetect) 3228 * or all devices specified with dasd= parameters during 3229 * initial probe. 3230 */ 3231 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3232 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3233 async_schedule(dasd_generic_auto_online, cdev); 3234 return 0; 3235 } 3236 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3237 3238 /* 3239 * This will one day be called from a global not_oper handler. 3240 * It is also used by driver_unregister during module unload. 3241 */ 3242 void dasd_generic_remove(struct ccw_device *cdev) 3243 { 3244 struct dasd_device *device; 3245 struct dasd_block *block; 3246 3247 cdev->handler = NULL; 3248 3249 device = dasd_device_from_cdev(cdev); 3250 if (IS_ERR(device)) { 3251 dasd_remove_sysfs_files(cdev); 3252 return; 3253 } 3254 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3255 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3256 /* Already doing offline processing */ 3257 dasd_put_device(device); 3258 dasd_remove_sysfs_files(cdev); 3259 return; 3260 } 3261 /* 3262 * This device is removed unconditionally. Set offline 3263 * flag to prevent dasd_open from opening it while it is 3264 * no quite down yet. 3265 */ 3266 dasd_set_target_state(device, DASD_STATE_NEW); 3267 /* dasd_delete_device destroys the device reference. */ 3268 block = device->block; 3269 dasd_delete_device(device); 3270 /* 3271 * life cycle of block is bound to device, so delete it after 3272 * device was safely removed 3273 */ 3274 if (block) 3275 dasd_free_block(block); 3276 3277 dasd_remove_sysfs_files(cdev); 3278 } 3279 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3280 3281 /* 3282 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3283 * the device is detected for the first time and is supposed to be used 3284 * or the user has started activation through sysfs. 3285 */ 3286 int dasd_generic_set_online(struct ccw_device *cdev, 3287 struct dasd_discipline *base_discipline) 3288 { 3289 struct dasd_discipline *discipline; 3290 struct dasd_device *device; 3291 int rc; 3292 3293 /* first online clears initial online feature flag */ 3294 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3295 device = dasd_create_device(cdev); 3296 if (IS_ERR(device)) 3297 return PTR_ERR(device); 3298 3299 discipline = base_discipline; 3300 if (device->features & DASD_FEATURE_USEDIAG) { 3301 if (!dasd_diag_discipline_pointer) { 3302 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3303 dev_name(&cdev->dev)); 3304 dasd_delete_device(device); 3305 return -ENODEV; 3306 } 3307 discipline = dasd_diag_discipline_pointer; 3308 } 3309 if (!try_module_get(base_discipline->owner)) { 3310 dasd_delete_device(device); 3311 return -EINVAL; 3312 } 3313 if (!try_module_get(discipline->owner)) { 3314 module_put(base_discipline->owner); 3315 dasd_delete_device(device); 3316 return -EINVAL; 3317 } 3318 device->base_discipline = base_discipline; 3319 device->discipline = discipline; 3320 3321 /* check_device will allocate block device if necessary */ 3322 rc = discipline->check_device(device); 3323 if (rc) { 3324 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3325 dev_name(&cdev->dev), discipline->name, rc); 3326 module_put(discipline->owner); 3327 module_put(base_discipline->owner); 3328 dasd_delete_device(device); 3329 return rc; 3330 } 3331 3332 dasd_set_target_state(device, DASD_STATE_ONLINE); 3333 if (device->state <= DASD_STATE_KNOWN) { 3334 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3335 dev_name(&cdev->dev)); 3336 rc = -ENODEV; 3337 dasd_set_target_state(device, DASD_STATE_NEW); 3338 if (device->block) 3339 dasd_free_block(device->block); 3340 dasd_delete_device(device); 3341 } else 3342 pr_debug("dasd_generic device %s found\n", 3343 dev_name(&cdev->dev)); 3344 3345 wait_event(dasd_init_waitq, _wait_for_device(device)); 3346 3347 dasd_put_device(device); 3348 return rc; 3349 } 3350 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3351 3352 int dasd_generic_set_offline(struct ccw_device *cdev) 3353 { 3354 struct dasd_device *device; 3355 struct dasd_block *block; 3356 int max_count, open_count, rc; 3357 3358 rc = 0; 3359 device = dasd_device_from_cdev(cdev); 3360 if (IS_ERR(device)) 3361 return PTR_ERR(device); 3362 3363 /* 3364 * We must make sure that this device is currently not in use. 3365 * The open_count is increased for every opener, that includes 3366 * the blkdev_get in dasd_scan_partitions. We are only interested 3367 * in the other openers. 3368 */ 3369 if (device->block) { 3370 max_count = device->block->bdev ? 0 : -1; 3371 open_count = atomic_read(&device->block->open_count); 3372 if (open_count > max_count) { 3373 if (open_count > 0) 3374 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3375 dev_name(&cdev->dev), open_count); 3376 else 3377 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3378 dev_name(&cdev->dev)); 3379 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3380 dasd_put_device(device); 3381 return -EBUSY; 3382 } 3383 } 3384 3385 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3386 /* 3387 * safe offline already running 3388 * could only be called by normal offline so safe_offline flag 3389 * needs to be removed to run normal offline and kill all I/O 3390 */ 3391 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3392 /* Already doing normal offline processing */ 3393 dasd_put_device(device); 3394 return -EBUSY; 3395 } else 3396 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3397 3398 } else 3399 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3400 /* Already doing offline processing */ 3401 dasd_put_device(device); 3402 return -EBUSY; 3403 } 3404 3405 /* 3406 * if safe_offline called set safe_offline_running flag and 3407 * clear safe_offline so that a call to normal offline 3408 * can overrun safe_offline processing 3409 */ 3410 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3411 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3412 /* 3413 * If we want to set the device safe offline all IO operations 3414 * should be finished before continuing the offline process 3415 * so sync bdev first and then wait for our queues to become 3416 * empty 3417 */ 3418 /* sync blockdev and partitions */ 3419 rc = fsync_bdev(device->block->bdev); 3420 if (rc != 0) 3421 goto interrupted; 3422 3423 /* schedule device tasklet and wait for completion */ 3424 dasd_schedule_device_bh(device); 3425 rc = wait_event_interruptible(shutdown_waitq, 3426 _wait_for_empty_queues(device)); 3427 if (rc != 0) 3428 goto interrupted; 3429 } 3430 3431 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3432 dasd_set_target_state(device, DASD_STATE_NEW); 3433 /* dasd_delete_device destroys the device reference. */ 3434 block = device->block; 3435 dasd_delete_device(device); 3436 /* 3437 * life cycle of block is bound to device, so delete it after 3438 * device was safely removed 3439 */ 3440 if (block) 3441 dasd_free_block(block); 3442 return 0; 3443 3444 interrupted: 3445 /* interrupted by signal */ 3446 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3447 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3448 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3449 dasd_put_device(device); 3450 return rc; 3451 } 3452 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3453 3454 int dasd_generic_last_path_gone(struct dasd_device *device) 3455 { 3456 struct dasd_ccw_req *cqr; 3457 3458 dev_warn(&device->cdev->dev, "No operational channel path is left " 3459 "for the device\n"); 3460 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3461 /* First of all call extended error reporting. */ 3462 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3463 3464 if (device->state < DASD_STATE_BASIC) 3465 return 0; 3466 /* Device is active. We want to keep it. */ 3467 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3468 if ((cqr->status == DASD_CQR_IN_IO) || 3469 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3470 cqr->status = DASD_CQR_QUEUED; 3471 cqr->retries++; 3472 } 3473 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3474 dasd_device_clear_timer(device); 3475 dasd_schedule_device_bh(device); 3476 return 1; 3477 } 3478 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3479 3480 int dasd_generic_path_operational(struct dasd_device *device) 3481 { 3482 dev_info(&device->cdev->dev, "A channel path to the device has become " 3483 "operational\n"); 3484 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3485 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3486 if (device->stopped & DASD_UNRESUMED_PM) { 3487 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 3488 dasd_restore_device(device); 3489 return 1; 3490 } 3491 dasd_schedule_device_bh(device); 3492 if (device->block) 3493 dasd_schedule_block_bh(device->block); 3494 3495 if (!device->stopped) 3496 wake_up(&generic_waitq); 3497 3498 return 1; 3499 } 3500 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3501 3502 int dasd_generic_notify(struct ccw_device *cdev, int event) 3503 { 3504 struct dasd_device *device; 3505 int ret; 3506 3507 device = dasd_device_from_cdev_locked(cdev); 3508 if (IS_ERR(device)) 3509 return 0; 3510 ret = 0; 3511 switch (event) { 3512 case CIO_GONE: 3513 case CIO_BOXED: 3514 case CIO_NO_PATH: 3515 device->path_data.opm = 0; 3516 device->path_data.ppm = 0; 3517 device->path_data.npm = 0; 3518 ret = dasd_generic_last_path_gone(device); 3519 break; 3520 case CIO_OPER: 3521 ret = 1; 3522 if (device->path_data.opm) 3523 ret = dasd_generic_path_operational(device); 3524 break; 3525 } 3526 dasd_put_device(device); 3527 return ret; 3528 } 3529 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3530 3531 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3532 { 3533 int chp; 3534 __u8 oldopm, eventlpm; 3535 struct dasd_device *device; 3536 3537 device = dasd_device_from_cdev_locked(cdev); 3538 if (IS_ERR(device)) 3539 return; 3540 for (chp = 0; chp < 8; chp++) { 3541 eventlpm = 0x80 >> chp; 3542 if (path_event[chp] & PE_PATH_GONE) { 3543 oldopm = device->path_data.opm; 3544 device->path_data.opm &= ~eventlpm; 3545 device->path_data.ppm &= ~eventlpm; 3546 device->path_data.npm &= ~eventlpm; 3547 if (oldopm && !device->path_data.opm) { 3548 dev_warn(&device->cdev->dev, 3549 "No verified channel paths remain " 3550 "for the device\n"); 3551 DBF_DEV_EVENT(DBF_WARNING, device, 3552 "%s", "last verified path gone"); 3553 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3554 dasd_device_set_stop_bits(device, 3555 DASD_STOPPED_DC_WAIT); 3556 } 3557 } 3558 if (path_event[chp] & PE_PATH_AVAILABLE) { 3559 device->path_data.opm &= ~eventlpm; 3560 device->path_data.ppm &= ~eventlpm; 3561 device->path_data.npm &= ~eventlpm; 3562 device->path_data.tbvpm |= eventlpm; 3563 dasd_schedule_device_bh(device); 3564 } 3565 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3566 if (!(device->path_data.opm & eventlpm) && 3567 !(device->path_data.tbvpm & eventlpm)) { 3568 /* 3569 * we can not establish a pathgroup on an 3570 * unavailable path, so trigger a path 3571 * verification first 3572 */ 3573 device->path_data.tbvpm |= eventlpm; 3574 dasd_schedule_device_bh(device); 3575 } 3576 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3577 "Pathgroup re-established\n"); 3578 if (device->discipline->kick_validate) 3579 device->discipline->kick_validate(device); 3580 } 3581 } 3582 dasd_put_device(device); 3583 } 3584 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3585 3586 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3587 { 3588 if (!device->path_data.opm && lpm) { 3589 device->path_data.opm = lpm; 3590 dasd_generic_path_operational(device); 3591 } else 3592 device->path_data.opm |= lpm; 3593 return 0; 3594 } 3595 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3596 3597 3598 int dasd_generic_pm_freeze(struct ccw_device *cdev) 3599 { 3600 struct dasd_device *device = dasd_device_from_cdev(cdev); 3601 struct list_head freeze_queue; 3602 struct dasd_ccw_req *cqr, *n; 3603 struct dasd_ccw_req *refers; 3604 int rc; 3605 3606 if (IS_ERR(device)) 3607 return PTR_ERR(device); 3608 3609 /* mark device as suspended */ 3610 set_bit(DASD_FLAG_SUSPENDED, &device->flags); 3611 3612 if (device->discipline->freeze) 3613 rc = device->discipline->freeze(device); 3614 3615 /* disallow new I/O */ 3616 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 3617 3618 /* clear active requests and requeue them to block layer if possible */ 3619 INIT_LIST_HEAD(&freeze_queue); 3620 spin_lock_irq(get_ccwdev_lock(cdev)); 3621 rc = 0; 3622 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3623 /* Check status and move request to flush_queue */ 3624 if (cqr->status == DASD_CQR_IN_IO) { 3625 rc = device->discipline->term_IO(cqr); 3626 if (rc) { 3627 /* unable to terminate requeust */ 3628 dev_err(&device->cdev->dev, 3629 "Unable to terminate request %p " 3630 "on suspend\n", cqr); 3631 spin_unlock_irq(get_ccwdev_lock(cdev)); 3632 dasd_put_device(device); 3633 return rc; 3634 } 3635 } 3636 list_move_tail(&cqr->devlist, &freeze_queue); 3637 } 3638 spin_unlock_irq(get_ccwdev_lock(cdev)); 3639 3640 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 3641 wait_event(dasd_flush_wq, 3642 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3643 if (cqr->status == DASD_CQR_CLEARED) 3644 cqr->status = DASD_CQR_QUEUED; 3645 3646 /* requeue requests to blocklayer will only work for 3647 block device requests */ 3648 if (_dasd_requeue_request(cqr)) 3649 continue; 3650 3651 /* remove requests from device and block queue */ 3652 list_del_init(&cqr->devlist); 3653 while (cqr->refers != NULL) { 3654 refers = cqr->refers; 3655 /* remove the request from the block queue */ 3656 list_del(&cqr->blocklist); 3657 /* free the finished erp request */ 3658 dasd_free_erp_request(cqr, cqr->memdev); 3659 cqr = refers; 3660 } 3661 if (cqr->block) 3662 list_del_init(&cqr->blocklist); 3663 cqr->block->base->discipline->free_cp( 3664 cqr, (struct request *) cqr->callback_data); 3665 } 3666 3667 /* 3668 * if requests remain then they are internal request 3669 * and go back to the device queue 3670 */ 3671 if (!list_empty(&freeze_queue)) { 3672 /* move freeze_queue to start of the ccw_queue */ 3673 spin_lock_irq(get_ccwdev_lock(cdev)); 3674 list_splice_tail(&freeze_queue, &device->ccw_queue); 3675 spin_unlock_irq(get_ccwdev_lock(cdev)); 3676 } 3677 dasd_put_device(device); 3678 return rc; 3679 } 3680 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 3681 3682 int dasd_generic_restore_device(struct ccw_device *cdev) 3683 { 3684 struct dasd_device *device = dasd_device_from_cdev(cdev); 3685 int rc = 0; 3686 3687 if (IS_ERR(device)) 3688 return PTR_ERR(device); 3689 3690 /* allow new IO again */ 3691 dasd_device_remove_stop_bits(device, 3692 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 3693 3694 dasd_schedule_device_bh(device); 3695 3696 /* 3697 * call discipline restore function 3698 * if device is stopped do nothing e.g. for disconnected devices 3699 */ 3700 if (device->discipline->restore && !(device->stopped)) 3701 rc = device->discipline->restore(device); 3702 if (rc || device->stopped) 3703 /* 3704 * if the resume failed for the DASD we put it in 3705 * an UNRESUMED stop state 3706 */ 3707 device->stopped |= DASD_UNRESUMED_PM; 3708 3709 if (device->block) 3710 dasd_schedule_block_bh(device->block); 3711 3712 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 3713 dasd_put_device(device); 3714 return 0; 3715 } 3716 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 3717 3718 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 3719 void *rdc_buffer, 3720 int rdc_buffer_size, 3721 int magic) 3722 { 3723 struct dasd_ccw_req *cqr; 3724 struct ccw1 *ccw; 3725 unsigned long *idaw; 3726 3727 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 3728 3729 if (IS_ERR(cqr)) { 3730 /* internal error 13 - Allocating the RDC request failed*/ 3731 dev_err(&device->cdev->dev, 3732 "An error occurred in the DASD device driver, " 3733 "reason=%s\n", "13"); 3734 return cqr; 3735 } 3736 3737 ccw = cqr->cpaddr; 3738 ccw->cmd_code = CCW_CMD_RDC; 3739 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { 3740 idaw = (unsigned long *) (cqr->data); 3741 ccw->cda = (__u32)(addr_t) idaw; 3742 ccw->flags = CCW_FLAG_IDA; 3743 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); 3744 } else { 3745 ccw->cda = (__u32)(addr_t) rdc_buffer; 3746 ccw->flags = 0; 3747 } 3748 3749 ccw->count = rdc_buffer_size; 3750 cqr->startdev = device; 3751 cqr->memdev = device; 3752 cqr->expires = 10*HZ; 3753 cqr->retries = 256; 3754 cqr->buildclk = get_tod_clock(); 3755 cqr->status = DASD_CQR_FILLED; 3756 return cqr; 3757 } 3758 3759 3760 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 3761 void *rdc_buffer, int rdc_buffer_size) 3762 { 3763 int ret; 3764 struct dasd_ccw_req *cqr; 3765 3766 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 3767 magic); 3768 if (IS_ERR(cqr)) 3769 return PTR_ERR(cqr); 3770 3771 ret = dasd_sleep_on(cqr); 3772 dasd_sfree_request(cqr, cqr->memdev); 3773 return ret; 3774 } 3775 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 3776 3777 /* 3778 * In command mode and transport mode we need to look for sense 3779 * data in different places. The sense data itself is allways 3780 * an array of 32 bytes, so we can unify the sense data access 3781 * for both modes. 3782 */ 3783 char *dasd_get_sense(struct irb *irb) 3784 { 3785 struct tsb *tsb = NULL; 3786 char *sense = NULL; 3787 3788 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 3789 if (irb->scsw.tm.tcw) 3790 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 3791 irb->scsw.tm.tcw); 3792 if (tsb && tsb->length == 64 && tsb->flags) 3793 switch (tsb->flags & 0x07) { 3794 case 1: /* tsa_iostat */ 3795 sense = tsb->tsa.iostat.sense; 3796 break; 3797 case 2: /* tsa_ddpc */ 3798 sense = tsb->tsa.ddpc.sense; 3799 break; 3800 default: 3801 /* currently we don't use interrogate data */ 3802 break; 3803 } 3804 } else if (irb->esw.esw0.erw.cons) { 3805 sense = irb->ecw; 3806 } 3807 return sense; 3808 } 3809 EXPORT_SYMBOL_GPL(dasd_get_sense); 3810 3811 void dasd_generic_shutdown(struct ccw_device *cdev) 3812 { 3813 struct dasd_device *device; 3814 3815 device = dasd_device_from_cdev(cdev); 3816 if (IS_ERR(device)) 3817 return; 3818 3819 if (device->block) 3820 dasd_schedule_block_bh(device->block); 3821 3822 dasd_schedule_device_bh(device); 3823 3824 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 3825 } 3826 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 3827 3828 static int __init dasd_init(void) 3829 { 3830 int rc; 3831 3832 init_waitqueue_head(&dasd_init_waitq); 3833 init_waitqueue_head(&dasd_flush_wq); 3834 init_waitqueue_head(&generic_waitq); 3835 init_waitqueue_head(&shutdown_waitq); 3836 3837 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 3838 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 3839 if (dasd_debug_area == NULL) { 3840 rc = -ENOMEM; 3841 goto failed; 3842 } 3843 debug_register_view(dasd_debug_area, &debug_sprintf_view); 3844 debug_set_level(dasd_debug_area, DBF_WARNING); 3845 3846 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 3847 3848 dasd_diag_discipline_pointer = NULL; 3849 3850 dasd_statistics_createroot(); 3851 3852 rc = dasd_devmap_init(); 3853 if (rc) 3854 goto failed; 3855 rc = dasd_gendisk_init(); 3856 if (rc) 3857 goto failed; 3858 rc = dasd_parse(); 3859 if (rc) 3860 goto failed; 3861 rc = dasd_eer_init(); 3862 if (rc) 3863 goto failed; 3864 #ifdef CONFIG_PROC_FS 3865 rc = dasd_proc_init(); 3866 if (rc) 3867 goto failed; 3868 #endif 3869 3870 return 0; 3871 failed: 3872 pr_info("The DASD device driver could not be initialized\n"); 3873 dasd_exit(); 3874 return rc; 3875 } 3876 3877 module_init(dasd_init); 3878 module_exit(dasd_exit); 3879