1 /* 2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Carsten Otte <Cotte@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Copyright IBM Corp. 1999, 2009 8 */ 9 10 #define KMSG_COMPONENT "dasd" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/kmod.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/ctype.h> 17 #include <linux/major.h> 18 #include <linux/slab.h> 19 #include <linux/hdreg.h> 20 #include <linux/async.h> 21 #include <linux/mutex.h> 22 #include <linux/debugfs.h> 23 #include <linux/seq_file.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> 28 #include <asm/idals.h> 29 #include <asm/itcw.h> 30 #include <asm/diag.h> 31 32 /* This is ugly... */ 33 #define PRINTK_HEADER "dasd:" 34 35 #include "dasd_int.h" 36 /* 37 * SECTION: Constant definitions to be used within this file 38 */ 39 #define DASD_CHANQ_MAX_SIZE 4 40 41 /* 42 * SECTION: exported variables of dasd.c 43 */ 44 debug_info_t *dasd_debug_area; 45 EXPORT_SYMBOL(dasd_debug_area); 46 static struct dentry *dasd_debugfs_root_entry; 47 struct dasd_discipline *dasd_diag_discipline_pointer; 48 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 49 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 50 51 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 52 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 53 " Copyright IBM Corp. 2000"); 54 MODULE_SUPPORTED_DEVICE("dasd"); 55 MODULE_LICENSE("GPL"); 56 57 /* 58 * SECTION: prototypes for static functions of dasd.c 59 */ 60 static int dasd_alloc_queue(struct dasd_block *); 61 static void dasd_setup_queue(struct dasd_block *); 62 static void dasd_free_queue(struct dasd_block *); 63 static void dasd_flush_request_queue(struct dasd_block *); 64 static int dasd_flush_block_queue(struct dasd_block *); 65 static void dasd_device_tasklet(struct dasd_device *); 66 static void dasd_block_tasklet(struct dasd_block *); 67 static void do_kick_device(struct work_struct *); 68 static void do_restore_device(struct work_struct *); 69 static void do_reload_device(struct work_struct *); 70 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 71 static void dasd_device_timeout(unsigned long); 72 static void dasd_block_timeout(unsigned long); 73 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 74 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 75 static void dasd_profile_exit(struct dasd_profile *); 76 77 /* 78 * SECTION: Operations on the device structure. 79 */ 80 static wait_queue_head_t dasd_init_waitq; 81 static wait_queue_head_t dasd_flush_wq; 82 static wait_queue_head_t generic_waitq; 83 static wait_queue_head_t shutdown_waitq; 84 85 /* 86 * Allocate memory for a new device structure. 87 */ 88 struct dasd_device *dasd_alloc_device(void) 89 { 90 struct dasd_device *device; 91 92 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 93 if (!device) 94 return ERR_PTR(-ENOMEM); 95 96 /* Get two pages for normal block device operations. */ 97 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 98 if (!device->ccw_mem) { 99 kfree(device); 100 return ERR_PTR(-ENOMEM); 101 } 102 /* Get one page for error recovery. */ 103 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 104 if (!device->erp_mem) { 105 free_pages((unsigned long) device->ccw_mem, 1); 106 kfree(device); 107 return ERR_PTR(-ENOMEM); 108 } 109 110 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 111 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 112 spin_lock_init(&device->mem_lock); 113 atomic_set(&device->tasklet_scheduled, 0); 114 tasklet_init(&device->tasklet, 115 (void (*)(unsigned long)) dasd_device_tasklet, 116 (unsigned long) device); 117 INIT_LIST_HEAD(&device->ccw_queue); 118 init_timer(&device->timer); 119 device->timer.function = dasd_device_timeout; 120 device->timer.data = (unsigned long) device; 121 INIT_WORK(&device->kick_work, do_kick_device); 122 INIT_WORK(&device->restore_device, do_restore_device); 123 INIT_WORK(&device->reload_device, do_reload_device); 124 device->state = DASD_STATE_NEW; 125 device->target = DASD_STATE_NEW; 126 mutex_init(&device->state_mutex); 127 spin_lock_init(&device->profile.lock); 128 return device; 129 } 130 131 /* 132 * Free memory of a device structure. 133 */ 134 void dasd_free_device(struct dasd_device *device) 135 { 136 kfree(device->private); 137 free_page((unsigned long) device->erp_mem); 138 free_pages((unsigned long) device->ccw_mem, 1); 139 kfree(device); 140 } 141 142 /* 143 * Allocate memory for a new device structure. 144 */ 145 struct dasd_block *dasd_alloc_block(void) 146 { 147 struct dasd_block *block; 148 149 block = kzalloc(sizeof(*block), GFP_ATOMIC); 150 if (!block) 151 return ERR_PTR(-ENOMEM); 152 /* open_count = 0 means device online but not in use */ 153 atomic_set(&block->open_count, -1); 154 155 spin_lock_init(&block->request_queue_lock); 156 atomic_set(&block->tasklet_scheduled, 0); 157 tasklet_init(&block->tasklet, 158 (void (*)(unsigned long)) dasd_block_tasklet, 159 (unsigned long) block); 160 INIT_LIST_HEAD(&block->ccw_queue); 161 spin_lock_init(&block->queue_lock); 162 init_timer(&block->timer); 163 block->timer.function = dasd_block_timeout; 164 block->timer.data = (unsigned long) block; 165 spin_lock_init(&block->profile.lock); 166 167 return block; 168 } 169 EXPORT_SYMBOL_GPL(dasd_alloc_block); 170 171 /* 172 * Free memory of a device structure. 173 */ 174 void dasd_free_block(struct dasd_block *block) 175 { 176 kfree(block); 177 } 178 EXPORT_SYMBOL_GPL(dasd_free_block); 179 180 /* 181 * Make a new device known to the system. 182 */ 183 static int dasd_state_new_to_known(struct dasd_device *device) 184 { 185 int rc; 186 187 /* 188 * As long as the device is not in state DASD_STATE_NEW we want to 189 * keep the reference count > 0. 190 */ 191 dasd_get_device(device); 192 193 if (device->block) { 194 rc = dasd_alloc_queue(device->block); 195 if (rc) { 196 dasd_put_device(device); 197 return rc; 198 } 199 } 200 device->state = DASD_STATE_KNOWN; 201 return 0; 202 } 203 204 /* 205 * Let the system forget about a device. 206 */ 207 static int dasd_state_known_to_new(struct dasd_device *device) 208 { 209 /* Disable extended error reporting for this device. */ 210 dasd_eer_disable(device); 211 /* Forget the discipline information. */ 212 if (device->discipline) { 213 if (device->discipline->uncheck_device) 214 device->discipline->uncheck_device(device); 215 module_put(device->discipline->owner); 216 } 217 device->discipline = NULL; 218 if (device->base_discipline) 219 module_put(device->base_discipline->owner); 220 device->base_discipline = NULL; 221 device->state = DASD_STATE_NEW; 222 223 if (device->block) 224 dasd_free_queue(device->block); 225 226 /* Give up reference we took in dasd_state_new_to_known. */ 227 dasd_put_device(device); 228 return 0; 229 } 230 231 static struct dentry *dasd_debugfs_setup(const char *name, 232 struct dentry *base_dentry) 233 { 234 struct dentry *pde; 235 236 if (!base_dentry) 237 return NULL; 238 pde = debugfs_create_dir(name, base_dentry); 239 if (!pde || IS_ERR(pde)) 240 return NULL; 241 return pde; 242 } 243 244 /* 245 * Request the irq line for the device. 246 */ 247 static int dasd_state_known_to_basic(struct dasd_device *device) 248 { 249 struct dasd_block *block = device->block; 250 int rc = 0; 251 252 /* Allocate and register gendisk structure. */ 253 if (block) { 254 rc = dasd_gendisk_alloc(block); 255 if (rc) 256 return rc; 257 block->debugfs_dentry = 258 dasd_debugfs_setup(block->gdp->disk_name, 259 dasd_debugfs_root_entry); 260 dasd_profile_init(&block->profile, block->debugfs_dentry); 261 if (dasd_global_profile_level == DASD_PROFILE_ON) 262 dasd_profile_on(&device->block->profile); 263 } 264 device->debugfs_dentry = 265 dasd_debugfs_setup(dev_name(&device->cdev->dev), 266 dasd_debugfs_root_entry); 267 dasd_profile_init(&device->profile, device->debugfs_dentry); 268 269 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 270 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 271 8 * sizeof(long)); 272 debug_register_view(device->debug_area, &debug_sprintf_view); 273 debug_set_level(device->debug_area, DBF_WARNING); 274 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 275 276 device->state = DASD_STATE_BASIC; 277 278 return rc; 279 } 280 281 /* 282 * Release the irq line for the device. Terminate any running i/o. 283 */ 284 static int dasd_state_basic_to_known(struct dasd_device *device) 285 { 286 int rc; 287 288 if (device->discipline->basic_to_known) { 289 rc = device->discipline->basic_to_known(device); 290 if (rc) 291 return rc; 292 } 293 294 if (device->block) { 295 dasd_profile_exit(&device->block->profile); 296 debugfs_remove(device->block->debugfs_dentry); 297 dasd_gendisk_free(device->block); 298 dasd_block_clear_timer(device->block); 299 } 300 rc = dasd_flush_device_queue(device); 301 if (rc) 302 return rc; 303 dasd_device_clear_timer(device); 304 dasd_profile_exit(&device->profile); 305 debugfs_remove(device->debugfs_dentry); 306 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 307 if (device->debug_area != NULL) { 308 debug_unregister(device->debug_area); 309 device->debug_area = NULL; 310 } 311 device->state = DASD_STATE_KNOWN; 312 return 0; 313 } 314 315 /* 316 * Do the initial analysis. The do_analysis function may return 317 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 318 * until the discipline decides to continue the startup sequence 319 * by calling the function dasd_change_state. The eckd disciplines 320 * uses this to start a ccw that detects the format. The completion 321 * interrupt for this detection ccw uses the kernel event daemon to 322 * trigger the call to dasd_change_state. All this is done in the 323 * discipline code, see dasd_eckd.c. 324 * After the analysis ccw is done (do_analysis returned 0) the block 325 * device is setup. 326 * In case the analysis returns an error, the device setup is stopped 327 * (a fake disk was already added to allow formatting). 328 */ 329 static int dasd_state_basic_to_ready(struct dasd_device *device) 330 { 331 int rc; 332 struct dasd_block *block; 333 334 rc = 0; 335 block = device->block; 336 /* make disk known with correct capacity */ 337 if (block) { 338 if (block->base->discipline->do_analysis != NULL) 339 rc = block->base->discipline->do_analysis(block); 340 if (rc) { 341 if (rc != -EAGAIN) { 342 device->state = DASD_STATE_UNFMT; 343 goto out; 344 } 345 return rc; 346 } 347 dasd_setup_queue(block); 348 set_capacity(block->gdp, 349 block->blocks << block->s2b_shift); 350 device->state = DASD_STATE_READY; 351 rc = dasd_scan_partitions(block); 352 if (rc) { 353 device->state = DASD_STATE_BASIC; 354 return rc; 355 } 356 } else { 357 device->state = DASD_STATE_READY; 358 } 359 out: 360 if (device->discipline->basic_to_ready) 361 rc = device->discipline->basic_to_ready(device); 362 return rc; 363 } 364 365 static inline 366 int _wait_for_empty_queues(struct dasd_device *device) 367 { 368 if (device->block) 369 return list_empty(&device->ccw_queue) && 370 list_empty(&device->block->ccw_queue); 371 else 372 return list_empty(&device->ccw_queue); 373 } 374 375 /* 376 * Remove device from block device layer. Destroy dirty buffers. 377 * Forget format information. Check if the target level is basic 378 * and if it is create fake disk for formatting. 379 */ 380 static int dasd_state_ready_to_basic(struct dasd_device *device) 381 { 382 int rc; 383 384 device->state = DASD_STATE_BASIC; 385 if (device->block) { 386 struct dasd_block *block = device->block; 387 rc = dasd_flush_block_queue(block); 388 if (rc) { 389 device->state = DASD_STATE_READY; 390 return rc; 391 } 392 dasd_flush_request_queue(block); 393 dasd_destroy_partitions(block); 394 block->blocks = 0; 395 block->bp_block = 0; 396 block->s2b_shift = 0; 397 } 398 return 0; 399 } 400 401 /* 402 * Back to basic. 403 */ 404 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 405 { 406 device->state = DASD_STATE_BASIC; 407 return 0; 408 } 409 410 /* 411 * Make the device online and schedule the bottom half to start 412 * the requeueing of requests from the linux request queue to the 413 * ccw queue. 414 */ 415 static int 416 dasd_state_ready_to_online(struct dasd_device * device) 417 { 418 struct gendisk *disk; 419 struct disk_part_iter piter; 420 struct hd_struct *part; 421 422 device->state = DASD_STATE_ONLINE; 423 if (device->block) { 424 dasd_schedule_block_bh(device->block); 425 if ((device->features & DASD_FEATURE_USERAW)) { 426 disk = device->block->gdp; 427 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 428 return 0; 429 } 430 disk = device->block->bdev->bd_disk; 431 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 432 while ((part = disk_part_iter_next(&piter))) 433 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 434 disk_part_iter_exit(&piter); 435 } 436 return 0; 437 } 438 439 /* 440 * Stop the requeueing of requests again. 441 */ 442 static int dasd_state_online_to_ready(struct dasd_device *device) 443 { 444 int rc; 445 struct gendisk *disk; 446 struct disk_part_iter piter; 447 struct hd_struct *part; 448 449 if (device->discipline->online_to_ready) { 450 rc = device->discipline->online_to_ready(device); 451 if (rc) 452 return rc; 453 } 454 455 device->state = DASD_STATE_READY; 456 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 457 disk = device->block->bdev->bd_disk; 458 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 459 while ((part = disk_part_iter_next(&piter))) 460 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 461 disk_part_iter_exit(&piter); 462 } 463 return 0; 464 } 465 466 /* 467 * Device startup state changes. 468 */ 469 static int dasd_increase_state(struct dasd_device *device) 470 { 471 int rc; 472 473 rc = 0; 474 if (device->state == DASD_STATE_NEW && 475 device->target >= DASD_STATE_KNOWN) 476 rc = dasd_state_new_to_known(device); 477 478 if (!rc && 479 device->state == DASD_STATE_KNOWN && 480 device->target >= DASD_STATE_BASIC) 481 rc = dasd_state_known_to_basic(device); 482 483 if (!rc && 484 device->state == DASD_STATE_BASIC && 485 device->target >= DASD_STATE_READY) 486 rc = dasd_state_basic_to_ready(device); 487 488 if (!rc && 489 device->state == DASD_STATE_UNFMT && 490 device->target > DASD_STATE_UNFMT) 491 rc = -EPERM; 492 493 if (!rc && 494 device->state == DASD_STATE_READY && 495 device->target >= DASD_STATE_ONLINE) 496 rc = dasd_state_ready_to_online(device); 497 498 return rc; 499 } 500 501 /* 502 * Device shutdown state changes. 503 */ 504 static int dasd_decrease_state(struct dasd_device *device) 505 { 506 int rc; 507 508 rc = 0; 509 if (device->state == DASD_STATE_ONLINE && 510 device->target <= DASD_STATE_READY) 511 rc = dasd_state_online_to_ready(device); 512 513 if (!rc && 514 device->state == DASD_STATE_READY && 515 device->target <= DASD_STATE_BASIC) 516 rc = dasd_state_ready_to_basic(device); 517 518 if (!rc && 519 device->state == DASD_STATE_UNFMT && 520 device->target <= DASD_STATE_BASIC) 521 rc = dasd_state_unfmt_to_basic(device); 522 523 if (!rc && 524 device->state == DASD_STATE_BASIC && 525 device->target <= DASD_STATE_KNOWN) 526 rc = dasd_state_basic_to_known(device); 527 528 if (!rc && 529 device->state == DASD_STATE_KNOWN && 530 device->target <= DASD_STATE_NEW) 531 rc = dasd_state_known_to_new(device); 532 533 return rc; 534 } 535 536 /* 537 * This is the main startup/shutdown routine. 538 */ 539 static void dasd_change_state(struct dasd_device *device) 540 { 541 int rc; 542 543 if (device->state == device->target) 544 /* Already where we want to go today... */ 545 return; 546 if (device->state < device->target) 547 rc = dasd_increase_state(device); 548 else 549 rc = dasd_decrease_state(device); 550 if (rc == -EAGAIN) 551 return; 552 if (rc) 553 device->target = device->state; 554 555 /* let user-space know that the device status changed */ 556 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 557 558 if (device->state == device->target) 559 wake_up(&dasd_init_waitq); 560 } 561 562 /* 563 * Kick starter for devices that did not complete the startup/shutdown 564 * procedure or were sleeping because of a pending state. 565 * dasd_kick_device will schedule a call do do_kick_device to the kernel 566 * event daemon. 567 */ 568 static void do_kick_device(struct work_struct *work) 569 { 570 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 571 mutex_lock(&device->state_mutex); 572 dasd_change_state(device); 573 mutex_unlock(&device->state_mutex); 574 dasd_schedule_device_bh(device); 575 dasd_put_device(device); 576 } 577 578 void dasd_kick_device(struct dasd_device *device) 579 { 580 dasd_get_device(device); 581 /* queue call to dasd_kick_device to the kernel event daemon. */ 582 schedule_work(&device->kick_work); 583 } 584 EXPORT_SYMBOL(dasd_kick_device); 585 586 /* 587 * dasd_reload_device will schedule a call do do_reload_device to the kernel 588 * event daemon. 589 */ 590 static void do_reload_device(struct work_struct *work) 591 { 592 struct dasd_device *device = container_of(work, struct dasd_device, 593 reload_device); 594 device->discipline->reload(device); 595 dasd_put_device(device); 596 } 597 598 void dasd_reload_device(struct dasd_device *device) 599 { 600 dasd_get_device(device); 601 /* queue call to dasd_reload_device to the kernel event daemon. */ 602 schedule_work(&device->reload_device); 603 } 604 EXPORT_SYMBOL(dasd_reload_device); 605 606 /* 607 * dasd_restore_device will schedule a call do do_restore_device to the kernel 608 * event daemon. 609 */ 610 static void do_restore_device(struct work_struct *work) 611 { 612 struct dasd_device *device = container_of(work, struct dasd_device, 613 restore_device); 614 device->cdev->drv->restore(device->cdev); 615 dasd_put_device(device); 616 } 617 618 void dasd_restore_device(struct dasd_device *device) 619 { 620 dasd_get_device(device); 621 /* queue call to dasd_restore_device to the kernel event daemon. */ 622 schedule_work(&device->restore_device); 623 } 624 625 /* 626 * Set the target state for a device and starts the state change. 627 */ 628 void dasd_set_target_state(struct dasd_device *device, int target) 629 { 630 dasd_get_device(device); 631 mutex_lock(&device->state_mutex); 632 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 633 if (dasd_probeonly && target > DASD_STATE_READY) 634 target = DASD_STATE_READY; 635 if (device->target != target) { 636 if (device->state == target) 637 wake_up(&dasd_init_waitq); 638 device->target = target; 639 } 640 if (device->state != device->target) 641 dasd_change_state(device); 642 mutex_unlock(&device->state_mutex); 643 dasd_put_device(device); 644 } 645 EXPORT_SYMBOL(dasd_set_target_state); 646 647 /* 648 * Enable devices with device numbers in [from..to]. 649 */ 650 static inline int _wait_for_device(struct dasd_device *device) 651 { 652 return (device->state == device->target); 653 } 654 655 void dasd_enable_device(struct dasd_device *device) 656 { 657 dasd_set_target_state(device, DASD_STATE_ONLINE); 658 if (device->state <= DASD_STATE_KNOWN) 659 /* No discipline for device found. */ 660 dasd_set_target_state(device, DASD_STATE_NEW); 661 /* Now wait for the devices to come up. */ 662 wait_event(dasd_init_waitq, _wait_for_device(device)); 663 664 dasd_reload_device(device); 665 if (device->discipline->kick_validate) 666 device->discipline->kick_validate(device); 667 } 668 EXPORT_SYMBOL(dasd_enable_device); 669 670 /* 671 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 672 */ 673 674 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 675 676 #ifdef CONFIG_DASD_PROFILE 677 struct dasd_profile_info dasd_global_profile_data; 678 static struct dentry *dasd_global_profile_dentry; 679 static struct dentry *dasd_debugfs_global_entry; 680 681 /* 682 * Add profiling information for cqr before execution. 683 */ 684 static void dasd_profile_start(struct dasd_block *block, 685 struct dasd_ccw_req *cqr, 686 struct request *req) 687 { 688 struct list_head *l; 689 unsigned int counter; 690 struct dasd_device *device; 691 692 /* count the length of the chanq for statistics */ 693 counter = 0; 694 if (dasd_global_profile_level || block->profile.data) 695 list_for_each(l, &block->ccw_queue) 696 if (++counter >= 31) 697 break; 698 699 if (dasd_global_profile_level) { 700 dasd_global_profile_data.dasd_io_nr_req[counter]++; 701 if (rq_data_dir(req) == READ) 702 dasd_global_profile_data.dasd_read_nr_req[counter]++; 703 } 704 705 spin_lock(&block->profile.lock); 706 if (block->profile.data) { 707 block->profile.data->dasd_io_nr_req[counter]++; 708 if (rq_data_dir(req) == READ) 709 block->profile.data->dasd_read_nr_req[counter]++; 710 } 711 spin_unlock(&block->profile.lock); 712 713 /* 714 * We count the request for the start device, even though it may run on 715 * some other device due to error recovery. This way we make sure that 716 * we count each request only once. 717 */ 718 device = cqr->startdev; 719 if (device->profile.data) { 720 counter = 1; /* request is not yet queued on the start device */ 721 list_for_each(l, &device->ccw_queue) 722 if (++counter >= 31) 723 break; 724 } 725 spin_lock(&device->profile.lock); 726 if (device->profile.data) { 727 device->profile.data->dasd_io_nr_req[counter]++; 728 if (rq_data_dir(req) == READ) 729 device->profile.data->dasd_read_nr_req[counter]++; 730 } 731 spin_unlock(&device->profile.lock); 732 } 733 734 /* 735 * Add profiling information for cqr after execution. 736 */ 737 738 #define dasd_profile_counter(value, index) \ 739 { \ 740 for (index = 0; index < 31 && value >> (2+index); index++) \ 741 ; \ 742 } 743 744 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 745 int is_alias, 746 int is_tpm, 747 int is_read, 748 long sectors, 749 int sectors_ind, 750 int tottime_ind, 751 int tottimeps_ind, 752 int strtime_ind, 753 int irqtime_ind, 754 int irqtimeps_ind, 755 int endtime_ind) 756 { 757 /* in case of an overflow, reset the whole profile */ 758 if (data->dasd_io_reqs == UINT_MAX) { 759 memset(data, 0, sizeof(*data)); 760 getnstimeofday(&data->starttod); 761 } 762 data->dasd_io_reqs++; 763 data->dasd_io_sects += sectors; 764 if (is_alias) 765 data->dasd_io_alias++; 766 if (is_tpm) 767 data->dasd_io_tpm++; 768 769 data->dasd_io_secs[sectors_ind]++; 770 data->dasd_io_times[tottime_ind]++; 771 data->dasd_io_timps[tottimeps_ind]++; 772 data->dasd_io_time1[strtime_ind]++; 773 data->dasd_io_time2[irqtime_ind]++; 774 data->dasd_io_time2ps[irqtimeps_ind]++; 775 data->dasd_io_time3[endtime_ind]++; 776 777 if (is_read) { 778 data->dasd_read_reqs++; 779 data->dasd_read_sects += sectors; 780 if (is_alias) 781 data->dasd_read_alias++; 782 if (is_tpm) 783 data->dasd_read_tpm++; 784 data->dasd_read_secs[sectors_ind]++; 785 data->dasd_read_times[tottime_ind]++; 786 data->dasd_read_time1[strtime_ind]++; 787 data->dasd_read_time2[irqtime_ind]++; 788 data->dasd_read_time3[endtime_ind]++; 789 } 790 } 791 792 static void dasd_profile_end(struct dasd_block *block, 793 struct dasd_ccw_req *cqr, 794 struct request *req) 795 { 796 long strtime, irqtime, endtime, tottime; /* in microseconds */ 797 long tottimeps, sectors; 798 struct dasd_device *device; 799 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 800 int irqtime_ind, irqtimeps_ind, endtime_ind; 801 802 device = cqr->startdev; 803 if (!(dasd_global_profile_level || 804 block->profile.data || 805 device->profile.data)) 806 return; 807 808 sectors = blk_rq_sectors(req); 809 if (!cqr->buildclk || !cqr->startclk || 810 !cqr->stopclk || !cqr->endclk || 811 !sectors) 812 return; 813 814 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 815 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 816 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 817 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 818 tottimeps = tottime / sectors; 819 820 dasd_profile_counter(sectors, sectors_ind); 821 dasd_profile_counter(tottime, tottime_ind); 822 dasd_profile_counter(tottimeps, tottimeps_ind); 823 dasd_profile_counter(strtime, strtime_ind); 824 dasd_profile_counter(irqtime, irqtime_ind); 825 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 826 dasd_profile_counter(endtime, endtime_ind); 827 828 if (dasd_global_profile_level) { 829 dasd_profile_end_add_data(&dasd_global_profile_data, 830 cqr->startdev != block->base, 831 cqr->cpmode == 1, 832 rq_data_dir(req) == READ, 833 sectors, sectors_ind, tottime_ind, 834 tottimeps_ind, strtime_ind, 835 irqtime_ind, irqtimeps_ind, 836 endtime_ind); 837 } 838 839 spin_lock(&block->profile.lock); 840 if (block->profile.data) 841 dasd_profile_end_add_data(block->profile.data, 842 cqr->startdev != block->base, 843 cqr->cpmode == 1, 844 rq_data_dir(req) == READ, 845 sectors, sectors_ind, tottime_ind, 846 tottimeps_ind, strtime_ind, 847 irqtime_ind, irqtimeps_ind, 848 endtime_ind); 849 spin_unlock(&block->profile.lock); 850 851 spin_lock(&device->profile.lock); 852 if (device->profile.data) 853 dasd_profile_end_add_data(device->profile.data, 854 cqr->startdev != block->base, 855 cqr->cpmode == 1, 856 rq_data_dir(req) == READ, 857 sectors, sectors_ind, tottime_ind, 858 tottimeps_ind, strtime_ind, 859 irqtime_ind, irqtimeps_ind, 860 endtime_ind); 861 spin_unlock(&device->profile.lock); 862 } 863 864 void dasd_profile_reset(struct dasd_profile *profile) 865 { 866 struct dasd_profile_info *data; 867 868 spin_lock_bh(&profile->lock); 869 data = profile->data; 870 if (!data) { 871 spin_unlock_bh(&profile->lock); 872 return; 873 } 874 memset(data, 0, sizeof(*data)); 875 getnstimeofday(&data->starttod); 876 spin_unlock_bh(&profile->lock); 877 } 878 879 void dasd_global_profile_reset(void) 880 { 881 memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data)); 882 getnstimeofday(&dasd_global_profile_data.starttod); 883 } 884 885 int dasd_profile_on(struct dasd_profile *profile) 886 { 887 struct dasd_profile_info *data; 888 889 data = kzalloc(sizeof(*data), GFP_KERNEL); 890 if (!data) 891 return -ENOMEM; 892 spin_lock_bh(&profile->lock); 893 if (profile->data) { 894 spin_unlock_bh(&profile->lock); 895 kfree(data); 896 return 0; 897 } 898 getnstimeofday(&data->starttod); 899 profile->data = data; 900 spin_unlock_bh(&profile->lock); 901 return 0; 902 } 903 904 void dasd_profile_off(struct dasd_profile *profile) 905 { 906 spin_lock_bh(&profile->lock); 907 kfree(profile->data); 908 profile->data = NULL; 909 spin_unlock_bh(&profile->lock); 910 } 911 912 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 913 { 914 char *buffer; 915 916 buffer = vmalloc(user_len + 1); 917 if (buffer == NULL) 918 return ERR_PTR(-ENOMEM); 919 if (copy_from_user(buffer, user_buf, user_len) != 0) { 920 vfree(buffer); 921 return ERR_PTR(-EFAULT); 922 } 923 /* got the string, now strip linefeed. */ 924 if (buffer[user_len - 1] == '\n') 925 buffer[user_len - 1] = 0; 926 else 927 buffer[user_len] = 0; 928 return buffer; 929 } 930 931 static ssize_t dasd_stats_write(struct file *file, 932 const char __user *user_buf, 933 size_t user_len, loff_t *pos) 934 { 935 char *buffer, *str; 936 int rc; 937 struct seq_file *m = (struct seq_file *)file->private_data; 938 struct dasd_profile *prof = m->private; 939 940 if (user_len > 65536) 941 user_len = 65536; 942 buffer = dasd_get_user_string(user_buf, user_len); 943 if (IS_ERR(buffer)) 944 return PTR_ERR(buffer); 945 946 str = skip_spaces(buffer); 947 rc = user_len; 948 if (strncmp(str, "reset", 5) == 0) { 949 dasd_profile_reset(prof); 950 } else if (strncmp(str, "on", 2) == 0) { 951 rc = dasd_profile_on(prof); 952 if (!rc) 953 rc = user_len; 954 } else if (strncmp(str, "off", 3) == 0) { 955 dasd_profile_off(prof); 956 } else 957 rc = -EINVAL; 958 vfree(buffer); 959 return rc; 960 } 961 962 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 963 { 964 int i; 965 966 for (i = 0; i < 32; i++) 967 seq_printf(m, "%u ", array[i]); 968 seq_putc(m, '\n'); 969 } 970 971 static void dasd_stats_seq_print(struct seq_file *m, 972 struct dasd_profile_info *data) 973 { 974 seq_printf(m, "start_time %ld.%09ld\n", 975 data->starttod.tv_sec, data->starttod.tv_nsec); 976 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 977 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 978 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 979 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 980 seq_puts(m, "histogram_sectors "); 981 dasd_stats_array(m, data->dasd_io_secs); 982 seq_puts(m, "histogram_io_times "); 983 dasd_stats_array(m, data->dasd_io_times); 984 seq_puts(m, "histogram_io_times_weighted "); 985 dasd_stats_array(m, data->dasd_io_timps); 986 seq_puts(m, "histogram_time_build_to_ssch "); 987 dasd_stats_array(m, data->dasd_io_time1); 988 seq_puts(m, "histogram_time_ssch_to_irq "); 989 dasd_stats_array(m, data->dasd_io_time2); 990 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 991 dasd_stats_array(m, data->dasd_io_time2ps); 992 seq_puts(m, "histogram_time_irq_to_end "); 993 dasd_stats_array(m, data->dasd_io_time3); 994 seq_puts(m, "histogram_ccw_queue_length "); 995 dasd_stats_array(m, data->dasd_io_nr_req); 996 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 997 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 998 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 999 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1000 seq_puts(m, "histogram_read_sectors "); 1001 dasd_stats_array(m, data->dasd_read_secs); 1002 seq_puts(m, "histogram_read_times "); 1003 dasd_stats_array(m, data->dasd_read_times); 1004 seq_puts(m, "histogram_read_time_build_to_ssch "); 1005 dasd_stats_array(m, data->dasd_read_time1); 1006 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1007 dasd_stats_array(m, data->dasd_read_time2); 1008 seq_puts(m, "histogram_read_time_irq_to_end "); 1009 dasd_stats_array(m, data->dasd_read_time3); 1010 seq_puts(m, "histogram_read_ccw_queue_length "); 1011 dasd_stats_array(m, data->dasd_read_nr_req); 1012 } 1013 1014 static int dasd_stats_show(struct seq_file *m, void *v) 1015 { 1016 struct dasd_profile *profile; 1017 struct dasd_profile_info *data; 1018 1019 profile = m->private; 1020 spin_lock_bh(&profile->lock); 1021 data = profile->data; 1022 if (!data) { 1023 spin_unlock_bh(&profile->lock); 1024 seq_puts(m, "disabled\n"); 1025 return 0; 1026 } 1027 dasd_stats_seq_print(m, data); 1028 spin_unlock_bh(&profile->lock); 1029 return 0; 1030 } 1031 1032 static int dasd_stats_open(struct inode *inode, struct file *file) 1033 { 1034 struct dasd_profile *profile = inode->i_private; 1035 return single_open(file, dasd_stats_show, profile); 1036 } 1037 1038 static const struct file_operations dasd_stats_raw_fops = { 1039 .owner = THIS_MODULE, 1040 .open = dasd_stats_open, 1041 .read = seq_read, 1042 .llseek = seq_lseek, 1043 .release = single_release, 1044 .write = dasd_stats_write, 1045 }; 1046 1047 static ssize_t dasd_stats_global_write(struct file *file, 1048 const char __user *user_buf, 1049 size_t user_len, loff_t *pos) 1050 { 1051 char *buffer, *str; 1052 ssize_t rc; 1053 1054 if (user_len > 65536) 1055 user_len = 65536; 1056 buffer = dasd_get_user_string(user_buf, user_len); 1057 if (IS_ERR(buffer)) 1058 return PTR_ERR(buffer); 1059 str = skip_spaces(buffer); 1060 rc = user_len; 1061 if (strncmp(str, "reset", 5) == 0) { 1062 dasd_global_profile_reset(); 1063 } else if (strncmp(str, "on", 2) == 0) { 1064 dasd_global_profile_reset(); 1065 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 1066 } else if (strncmp(str, "off", 3) == 0) { 1067 dasd_global_profile_level = DASD_PROFILE_OFF; 1068 } else 1069 rc = -EINVAL; 1070 vfree(buffer); 1071 return rc; 1072 } 1073 1074 static int dasd_stats_global_show(struct seq_file *m, void *v) 1075 { 1076 if (!dasd_global_profile_level) { 1077 seq_puts(m, "disabled\n"); 1078 return 0; 1079 } 1080 dasd_stats_seq_print(m, &dasd_global_profile_data); 1081 return 0; 1082 } 1083 1084 static int dasd_stats_global_open(struct inode *inode, struct file *file) 1085 { 1086 return single_open(file, dasd_stats_global_show, NULL); 1087 } 1088 1089 static const struct file_operations dasd_stats_global_fops = { 1090 .owner = THIS_MODULE, 1091 .open = dasd_stats_global_open, 1092 .read = seq_read, 1093 .llseek = seq_lseek, 1094 .release = single_release, 1095 .write = dasd_stats_global_write, 1096 }; 1097 1098 static void dasd_profile_init(struct dasd_profile *profile, 1099 struct dentry *base_dentry) 1100 { 1101 umode_t mode; 1102 struct dentry *pde; 1103 1104 if (!base_dentry) 1105 return; 1106 profile->dentry = NULL; 1107 profile->data = NULL; 1108 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1109 pde = debugfs_create_file("statistics", mode, base_dentry, 1110 profile, &dasd_stats_raw_fops); 1111 if (pde && !IS_ERR(pde)) 1112 profile->dentry = pde; 1113 return; 1114 } 1115 1116 static void dasd_profile_exit(struct dasd_profile *profile) 1117 { 1118 dasd_profile_off(profile); 1119 debugfs_remove(profile->dentry); 1120 profile->dentry = NULL; 1121 } 1122 1123 static void dasd_statistics_removeroot(void) 1124 { 1125 dasd_global_profile_level = DASD_PROFILE_OFF; 1126 debugfs_remove(dasd_global_profile_dentry); 1127 dasd_global_profile_dentry = NULL; 1128 debugfs_remove(dasd_debugfs_global_entry); 1129 debugfs_remove(dasd_debugfs_root_entry); 1130 } 1131 1132 static void dasd_statistics_createroot(void) 1133 { 1134 umode_t mode; 1135 struct dentry *pde; 1136 1137 dasd_debugfs_root_entry = NULL; 1138 dasd_debugfs_global_entry = NULL; 1139 dasd_global_profile_dentry = NULL; 1140 pde = debugfs_create_dir("dasd", NULL); 1141 if (!pde || IS_ERR(pde)) 1142 goto error; 1143 dasd_debugfs_root_entry = pde; 1144 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1145 if (!pde || IS_ERR(pde)) 1146 goto error; 1147 dasd_debugfs_global_entry = pde; 1148 1149 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1150 pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry, 1151 NULL, &dasd_stats_global_fops); 1152 if (!pde || IS_ERR(pde)) 1153 goto error; 1154 dasd_global_profile_dentry = pde; 1155 return; 1156 1157 error: 1158 DBF_EVENT(DBF_ERR, "%s", 1159 "Creation of the dasd debugfs interface failed"); 1160 dasd_statistics_removeroot(); 1161 return; 1162 } 1163 1164 #else 1165 #define dasd_profile_start(block, cqr, req) do {} while (0) 1166 #define dasd_profile_end(block, cqr, req) do {} while (0) 1167 1168 static void dasd_statistics_createroot(void) 1169 { 1170 return; 1171 } 1172 1173 static void dasd_statistics_removeroot(void) 1174 { 1175 return; 1176 } 1177 1178 int dasd_stats_generic_show(struct seq_file *m, void *v) 1179 { 1180 seq_puts(m, "Statistics are not activated in this kernel\n"); 1181 return 0; 1182 } 1183 1184 static void dasd_profile_init(struct dasd_profile *profile, 1185 struct dentry *base_dentry) 1186 { 1187 return; 1188 } 1189 1190 static void dasd_profile_exit(struct dasd_profile *profile) 1191 { 1192 return; 1193 } 1194 1195 int dasd_profile_on(struct dasd_profile *profile) 1196 { 1197 return 0; 1198 } 1199 1200 #endif /* CONFIG_DASD_PROFILE */ 1201 1202 /* 1203 * Allocate memory for a channel program with 'cplength' channel 1204 * command words and 'datasize' additional space. There are two 1205 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 1206 * memory and 2) dasd_smalloc_request uses the static ccw memory 1207 * that gets allocated for each device. 1208 */ 1209 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, 1210 int datasize, 1211 struct dasd_device *device) 1212 { 1213 struct dasd_ccw_req *cqr; 1214 1215 /* Sanity checks */ 1216 BUG_ON(datasize > PAGE_SIZE || 1217 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 1218 1219 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 1220 if (cqr == NULL) 1221 return ERR_PTR(-ENOMEM); 1222 cqr->cpaddr = NULL; 1223 if (cplength > 0) { 1224 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 1225 GFP_ATOMIC | GFP_DMA); 1226 if (cqr->cpaddr == NULL) { 1227 kfree(cqr); 1228 return ERR_PTR(-ENOMEM); 1229 } 1230 } 1231 cqr->data = NULL; 1232 if (datasize > 0) { 1233 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 1234 if (cqr->data == NULL) { 1235 kfree(cqr->cpaddr); 1236 kfree(cqr); 1237 return ERR_PTR(-ENOMEM); 1238 } 1239 } 1240 cqr->magic = magic; 1241 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1242 dasd_get_device(device); 1243 return cqr; 1244 } 1245 EXPORT_SYMBOL(dasd_kmalloc_request); 1246 1247 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 1248 int datasize, 1249 struct dasd_device *device) 1250 { 1251 unsigned long flags; 1252 struct dasd_ccw_req *cqr; 1253 char *data; 1254 int size; 1255 1256 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 1257 if (cplength > 0) 1258 size += cplength * sizeof(struct ccw1); 1259 if (datasize > 0) 1260 size += datasize; 1261 spin_lock_irqsave(&device->mem_lock, flags); 1262 cqr = (struct dasd_ccw_req *) 1263 dasd_alloc_chunk(&device->ccw_chunks, size); 1264 spin_unlock_irqrestore(&device->mem_lock, flags); 1265 if (cqr == NULL) 1266 return ERR_PTR(-ENOMEM); 1267 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 1268 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 1269 cqr->cpaddr = NULL; 1270 if (cplength > 0) { 1271 cqr->cpaddr = (struct ccw1 *) data; 1272 data += cplength*sizeof(struct ccw1); 1273 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 1274 } 1275 cqr->data = NULL; 1276 if (datasize > 0) { 1277 cqr->data = data; 1278 memset(cqr->data, 0, datasize); 1279 } 1280 cqr->magic = magic; 1281 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1282 dasd_get_device(device); 1283 return cqr; 1284 } 1285 EXPORT_SYMBOL(dasd_smalloc_request); 1286 1287 /* 1288 * Free memory of a channel program. This function needs to free all the 1289 * idal lists that might have been created by dasd_set_cda and the 1290 * struct dasd_ccw_req itself. 1291 */ 1292 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1293 { 1294 #ifdef CONFIG_64BIT 1295 struct ccw1 *ccw; 1296 1297 /* Clear any idals used for the request. */ 1298 ccw = cqr->cpaddr; 1299 do { 1300 clear_normalized_cda(ccw); 1301 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 1302 #endif 1303 kfree(cqr->cpaddr); 1304 kfree(cqr->data); 1305 kfree(cqr); 1306 dasd_put_device(device); 1307 } 1308 EXPORT_SYMBOL(dasd_kfree_request); 1309 1310 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1311 { 1312 unsigned long flags; 1313 1314 spin_lock_irqsave(&device->mem_lock, flags); 1315 dasd_free_chunk(&device->ccw_chunks, cqr); 1316 spin_unlock_irqrestore(&device->mem_lock, flags); 1317 dasd_put_device(device); 1318 } 1319 EXPORT_SYMBOL(dasd_sfree_request); 1320 1321 /* 1322 * Check discipline magic in cqr. 1323 */ 1324 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1325 { 1326 struct dasd_device *device; 1327 1328 if (cqr == NULL) 1329 return -EINVAL; 1330 device = cqr->startdev; 1331 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1332 DBF_DEV_EVENT(DBF_WARNING, device, 1333 " dasd_ccw_req 0x%08x magic doesn't match" 1334 " discipline 0x%08x", 1335 cqr->magic, 1336 *(unsigned int *) device->discipline->name); 1337 return -EINVAL; 1338 } 1339 return 0; 1340 } 1341 1342 /* 1343 * Terminate the current i/o and set the request to clear_pending. 1344 * Timer keeps device runnig. 1345 * ccw_device_clear can fail if the i/o subsystem 1346 * is in a bad mood. 1347 */ 1348 int dasd_term_IO(struct dasd_ccw_req *cqr) 1349 { 1350 struct dasd_device *device; 1351 int retries, rc; 1352 char errorstring[ERRORLENGTH]; 1353 1354 /* Check the cqr */ 1355 rc = dasd_check_cqr(cqr); 1356 if (rc) 1357 return rc; 1358 retries = 0; 1359 device = (struct dasd_device *) cqr->startdev; 1360 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1361 rc = ccw_device_clear(device->cdev, (long) cqr); 1362 switch (rc) { 1363 case 0: /* termination successful */ 1364 cqr->status = DASD_CQR_CLEAR_PENDING; 1365 cqr->stopclk = get_tod_clock(); 1366 cqr->starttime = 0; 1367 DBF_DEV_EVENT(DBF_DEBUG, device, 1368 "terminate cqr %p successful", 1369 cqr); 1370 break; 1371 case -ENODEV: 1372 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1373 "device gone, retry"); 1374 break; 1375 case -EIO: 1376 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1377 "I/O error, retry"); 1378 break; 1379 case -EINVAL: 1380 case -EBUSY: 1381 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1382 "device busy, retry later"); 1383 break; 1384 default: 1385 /* internal error 10 - unknown rc*/ 1386 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1387 dev_err(&device->cdev->dev, "An error occurred in the " 1388 "DASD device driver, reason=%s\n", errorstring); 1389 BUG(); 1390 break; 1391 } 1392 retries++; 1393 } 1394 dasd_schedule_device_bh(device); 1395 return rc; 1396 } 1397 EXPORT_SYMBOL(dasd_term_IO); 1398 1399 /* 1400 * Start the i/o. This start_IO can fail if the channel is really busy. 1401 * In that case set up a timer to start the request later. 1402 */ 1403 int dasd_start_IO(struct dasd_ccw_req *cqr) 1404 { 1405 struct dasd_device *device; 1406 int rc; 1407 char errorstring[ERRORLENGTH]; 1408 1409 /* Check the cqr */ 1410 rc = dasd_check_cqr(cqr); 1411 if (rc) { 1412 cqr->intrc = rc; 1413 return rc; 1414 } 1415 device = (struct dasd_device *) cqr->startdev; 1416 if (((cqr->block && 1417 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1418 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1419 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1420 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1421 "because of stolen lock", cqr); 1422 cqr->status = DASD_CQR_ERROR; 1423 cqr->intrc = -EPERM; 1424 return -EPERM; 1425 } 1426 if (cqr->retries < 0) { 1427 /* internal error 14 - start_IO run out of retries */ 1428 sprintf(errorstring, "14 %p", cqr); 1429 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1430 "device driver, reason=%s\n", errorstring); 1431 cqr->status = DASD_CQR_ERROR; 1432 return -EIO; 1433 } 1434 cqr->startclk = get_tod_clock(); 1435 cqr->starttime = jiffies; 1436 cqr->retries--; 1437 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1438 cqr->lpm &= device->path_data.opm; 1439 if (!cqr->lpm) 1440 cqr->lpm = device->path_data.opm; 1441 } 1442 if (cqr->cpmode == 1) { 1443 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1444 (long) cqr, cqr->lpm); 1445 } else { 1446 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1447 (long) cqr, cqr->lpm, 0); 1448 } 1449 switch (rc) { 1450 case 0: 1451 cqr->status = DASD_CQR_IN_IO; 1452 break; 1453 case -EBUSY: 1454 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1455 "start_IO: device busy, retry later"); 1456 break; 1457 case -ETIMEDOUT: 1458 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1459 "start_IO: request timeout, retry later"); 1460 break; 1461 case -EACCES: 1462 /* -EACCES indicates that the request used only a subset of the 1463 * available paths and all these paths are gone. If the lpm of 1464 * this request was only a subset of the opm (e.g. the ppm) then 1465 * we just do a retry with all available paths. 1466 * If we already use the full opm, something is amiss, and we 1467 * need a full path verification. 1468 */ 1469 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1470 DBF_DEV_EVENT(DBF_WARNING, device, 1471 "start_IO: selected paths gone (%x)", 1472 cqr->lpm); 1473 } else if (cqr->lpm != device->path_data.opm) { 1474 cqr->lpm = device->path_data.opm; 1475 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1476 "start_IO: selected paths gone," 1477 " retry on all paths"); 1478 } else { 1479 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1480 "start_IO: all paths in opm gone," 1481 " do path verification"); 1482 dasd_generic_last_path_gone(device); 1483 device->path_data.opm = 0; 1484 device->path_data.ppm = 0; 1485 device->path_data.npm = 0; 1486 device->path_data.tbvpm = 1487 ccw_device_get_path_mask(device->cdev); 1488 } 1489 break; 1490 case -ENODEV: 1491 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1492 "start_IO: -ENODEV device gone, retry"); 1493 break; 1494 case -EIO: 1495 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1496 "start_IO: -EIO device gone, retry"); 1497 break; 1498 case -EINVAL: 1499 /* most likely caused in power management context */ 1500 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1501 "start_IO: -EINVAL device currently " 1502 "not accessible"); 1503 break; 1504 default: 1505 /* internal error 11 - unknown rc */ 1506 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1507 dev_err(&device->cdev->dev, 1508 "An error occurred in the DASD device driver, " 1509 "reason=%s\n", errorstring); 1510 BUG(); 1511 break; 1512 } 1513 cqr->intrc = rc; 1514 return rc; 1515 } 1516 EXPORT_SYMBOL(dasd_start_IO); 1517 1518 /* 1519 * Timeout function for dasd devices. This is used for different purposes 1520 * 1) missing interrupt handler for normal operation 1521 * 2) delayed start of request where start_IO failed with -EBUSY 1522 * 3) timeout for missing state change interrupts 1523 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1524 * DASD_CQR_QUEUED for 2) and 3). 1525 */ 1526 static void dasd_device_timeout(unsigned long ptr) 1527 { 1528 unsigned long flags; 1529 struct dasd_device *device; 1530 1531 device = (struct dasd_device *) ptr; 1532 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1533 /* re-activate request queue */ 1534 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1535 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1536 dasd_schedule_device_bh(device); 1537 } 1538 1539 /* 1540 * Setup timeout for a device in jiffies. 1541 */ 1542 void dasd_device_set_timer(struct dasd_device *device, int expires) 1543 { 1544 if (expires == 0) 1545 del_timer(&device->timer); 1546 else 1547 mod_timer(&device->timer, jiffies + expires); 1548 } 1549 EXPORT_SYMBOL(dasd_device_set_timer); 1550 1551 /* 1552 * Clear timeout for a device. 1553 */ 1554 void dasd_device_clear_timer(struct dasd_device *device) 1555 { 1556 del_timer(&device->timer); 1557 } 1558 EXPORT_SYMBOL(dasd_device_clear_timer); 1559 1560 static void dasd_handle_killed_request(struct ccw_device *cdev, 1561 unsigned long intparm) 1562 { 1563 struct dasd_ccw_req *cqr; 1564 struct dasd_device *device; 1565 1566 if (!intparm) 1567 return; 1568 cqr = (struct dasd_ccw_req *) intparm; 1569 if (cqr->status != DASD_CQR_IN_IO) { 1570 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1571 "invalid status in handle_killed_request: " 1572 "%02x", cqr->status); 1573 return; 1574 } 1575 1576 device = dasd_device_from_cdev_locked(cdev); 1577 if (IS_ERR(device)) { 1578 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1579 "unable to get device from cdev"); 1580 return; 1581 } 1582 1583 if (!cqr->startdev || 1584 device != cqr->startdev || 1585 strncmp(cqr->startdev->discipline->ebcname, 1586 (char *) &cqr->magic, 4)) { 1587 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1588 "invalid device in request"); 1589 dasd_put_device(device); 1590 return; 1591 } 1592 1593 /* Schedule request to be retried. */ 1594 cqr->status = DASD_CQR_QUEUED; 1595 1596 dasd_device_clear_timer(device); 1597 dasd_schedule_device_bh(device); 1598 dasd_put_device(device); 1599 } 1600 1601 void dasd_generic_handle_state_change(struct dasd_device *device) 1602 { 1603 /* First of all start sense subsystem status request. */ 1604 dasd_eer_snss(device); 1605 1606 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1607 dasd_schedule_device_bh(device); 1608 if (device->block) 1609 dasd_schedule_block_bh(device->block); 1610 } 1611 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1612 1613 /* 1614 * Interrupt handler for "normal" ssch-io based dasd devices. 1615 */ 1616 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1617 struct irb *irb) 1618 { 1619 struct dasd_ccw_req *cqr, *next; 1620 struct dasd_device *device; 1621 unsigned long long now; 1622 int expires; 1623 1624 if (IS_ERR(irb)) { 1625 switch (PTR_ERR(irb)) { 1626 case -EIO: 1627 break; 1628 case -ETIMEDOUT: 1629 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1630 "request timed out\n", __func__); 1631 break; 1632 default: 1633 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1634 "unknown error %ld\n", __func__, 1635 PTR_ERR(irb)); 1636 } 1637 dasd_handle_killed_request(cdev, intparm); 1638 return; 1639 } 1640 1641 now = get_tod_clock(); 1642 cqr = (struct dasd_ccw_req *) intparm; 1643 /* check for conditions that should be handled immediately */ 1644 if (!cqr || 1645 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1646 scsw_cstat(&irb->scsw) == 0)) { 1647 if (cqr) 1648 memcpy(&cqr->irb, irb, sizeof(*irb)); 1649 device = dasd_device_from_cdev_locked(cdev); 1650 if (IS_ERR(device)) 1651 return; 1652 /* ignore unsolicited interrupts for DIAG discipline */ 1653 if (device->discipline == dasd_diag_discipline_pointer) { 1654 dasd_put_device(device); 1655 return; 1656 } 1657 device->discipline->dump_sense_dbf(device, irb, "int"); 1658 if (device->features & DASD_FEATURE_ERPLOG) 1659 device->discipline->dump_sense(device, cqr, irb); 1660 device->discipline->check_for_device_change(device, cqr, irb); 1661 dasd_put_device(device); 1662 } 1663 1664 /* check for for attention message */ 1665 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1666 device = dasd_device_from_cdev_locked(cdev); 1667 device->discipline->check_attention(device, irb->esw.esw1.lpum); 1668 dasd_put_device(device); 1669 } 1670 1671 if (!cqr) 1672 return; 1673 1674 device = (struct dasd_device *) cqr->startdev; 1675 if (!device || 1676 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1677 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1678 "invalid device in request"); 1679 return; 1680 } 1681 1682 /* Check for clear pending */ 1683 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1684 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1685 cqr->status = DASD_CQR_CLEARED; 1686 if (cqr->callback_data == DASD_SLEEPON_START_TAG) 1687 cqr->callback_data = DASD_SLEEPON_END_TAG; 1688 dasd_device_clear_timer(device); 1689 wake_up(&dasd_flush_wq); 1690 wake_up(&generic_waitq); 1691 dasd_schedule_device_bh(device); 1692 return; 1693 } 1694 1695 /* check status - the request might have been killed by dyn detach */ 1696 if (cqr->status != DASD_CQR_IN_IO) { 1697 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1698 "status %02x", dev_name(&cdev->dev), cqr->status); 1699 return; 1700 } 1701 1702 next = NULL; 1703 expires = 0; 1704 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1705 scsw_cstat(&irb->scsw) == 0) { 1706 /* request was completed successfully */ 1707 cqr->status = DASD_CQR_SUCCESS; 1708 cqr->stopclk = now; 1709 /* Start first request on queue if possible -> fast_io. */ 1710 if (cqr->devlist.next != &device->ccw_queue) { 1711 next = list_entry(cqr->devlist.next, 1712 struct dasd_ccw_req, devlist); 1713 } 1714 } else { /* error */ 1715 /* 1716 * If we don't want complex ERP for this request, then just 1717 * reset this and retry it in the fastpath 1718 */ 1719 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1720 cqr->retries > 0) { 1721 if (cqr->lpm == device->path_data.opm) 1722 DBF_DEV_EVENT(DBF_DEBUG, device, 1723 "default ERP in fastpath " 1724 "(%i retries left)", 1725 cqr->retries); 1726 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1727 cqr->lpm = device->path_data.opm; 1728 cqr->status = DASD_CQR_QUEUED; 1729 next = cqr; 1730 } else 1731 cqr->status = DASD_CQR_ERROR; 1732 } 1733 if (next && (next->status == DASD_CQR_QUEUED) && 1734 (!device->stopped)) { 1735 if (device->discipline->start_IO(next) == 0) 1736 expires = next->expires; 1737 } 1738 if (expires != 0) 1739 dasd_device_set_timer(device, expires); 1740 else 1741 dasd_device_clear_timer(device); 1742 dasd_schedule_device_bh(device); 1743 } 1744 EXPORT_SYMBOL(dasd_int_handler); 1745 1746 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1747 { 1748 struct dasd_device *device; 1749 1750 device = dasd_device_from_cdev_locked(cdev); 1751 1752 if (IS_ERR(device)) 1753 goto out; 1754 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1755 device->state != device->target || 1756 !device->discipline->check_for_device_change){ 1757 dasd_put_device(device); 1758 goto out; 1759 } 1760 if (device->discipline->dump_sense_dbf) 1761 device->discipline->dump_sense_dbf(device, irb, "uc"); 1762 device->discipline->check_for_device_change(device, NULL, irb); 1763 dasd_put_device(device); 1764 out: 1765 return UC_TODO_RETRY; 1766 } 1767 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1768 1769 /* 1770 * If we have an error on a dasd_block layer request then we cancel 1771 * and return all further requests from the same dasd_block as well. 1772 */ 1773 static void __dasd_device_recovery(struct dasd_device *device, 1774 struct dasd_ccw_req *ref_cqr) 1775 { 1776 struct list_head *l, *n; 1777 struct dasd_ccw_req *cqr; 1778 1779 /* 1780 * only requeue request that came from the dasd_block layer 1781 */ 1782 if (!ref_cqr->block) 1783 return; 1784 1785 list_for_each_safe(l, n, &device->ccw_queue) { 1786 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1787 if (cqr->status == DASD_CQR_QUEUED && 1788 ref_cqr->block == cqr->block) { 1789 cqr->status = DASD_CQR_CLEARED; 1790 } 1791 } 1792 }; 1793 1794 /* 1795 * Remove those ccw requests from the queue that need to be returned 1796 * to the upper layer. 1797 */ 1798 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1799 struct list_head *final_queue) 1800 { 1801 struct list_head *l, *n; 1802 struct dasd_ccw_req *cqr; 1803 1804 /* Process request with final status. */ 1805 list_for_each_safe(l, n, &device->ccw_queue) { 1806 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1807 1808 /* Skip any non-final request. */ 1809 if (cqr->status == DASD_CQR_QUEUED || 1810 cqr->status == DASD_CQR_IN_IO || 1811 cqr->status == DASD_CQR_CLEAR_PENDING) 1812 continue; 1813 if (cqr->status == DASD_CQR_ERROR) { 1814 __dasd_device_recovery(device, cqr); 1815 } 1816 /* Rechain finished requests to final queue */ 1817 list_move_tail(&cqr->devlist, final_queue); 1818 } 1819 } 1820 1821 /* 1822 * the cqrs from the final queue are returned to the upper layer 1823 * by setting a dasd_block state and calling the callback function 1824 */ 1825 static void __dasd_device_process_final_queue(struct dasd_device *device, 1826 struct list_head *final_queue) 1827 { 1828 struct list_head *l, *n; 1829 struct dasd_ccw_req *cqr; 1830 struct dasd_block *block; 1831 void (*callback)(struct dasd_ccw_req *, void *data); 1832 void *callback_data; 1833 char errorstring[ERRORLENGTH]; 1834 1835 list_for_each_safe(l, n, final_queue) { 1836 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1837 list_del_init(&cqr->devlist); 1838 block = cqr->block; 1839 callback = cqr->callback; 1840 callback_data = cqr->callback_data; 1841 if (block) 1842 spin_lock_bh(&block->queue_lock); 1843 switch (cqr->status) { 1844 case DASD_CQR_SUCCESS: 1845 cqr->status = DASD_CQR_DONE; 1846 break; 1847 case DASD_CQR_ERROR: 1848 cqr->status = DASD_CQR_NEED_ERP; 1849 break; 1850 case DASD_CQR_CLEARED: 1851 cqr->status = DASD_CQR_TERMINATED; 1852 break; 1853 default: 1854 /* internal error 12 - wrong cqr status*/ 1855 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1856 dev_err(&device->cdev->dev, 1857 "An error occurred in the DASD device driver, " 1858 "reason=%s\n", errorstring); 1859 BUG(); 1860 } 1861 if (cqr->callback != NULL) 1862 (callback)(cqr, callback_data); 1863 if (block) 1864 spin_unlock_bh(&block->queue_lock); 1865 } 1866 } 1867 1868 /* 1869 * Take a look at the first request on the ccw queue and check 1870 * if it reached its expire time. If so, terminate the IO. 1871 */ 1872 static void __dasd_device_check_expire(struct dasd_device *device) 1873 { 1874 struct dasd_ccw_req *cqr; 1875 1876 if (list_empty(&device->ccw_queue)) 1877 return; 1878 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1879 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1880 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1881 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1882 /* 1883 * IO in safe offline processing should not 1884 * run out of retries 1885 */ 1886 cqr->retries++; 1887 } 1888 if (device->discipline->term_IO(cqr) != 0) { 1889 /* Hmpf, try again in 5 sec */ 1890 dev_err(&device->cdev->dev, 1891 "cqr %p timed out (%lus) but cannot be " 1892 "ended, retrying in 5 s\n", 1893 cqr, (cqr->expires/HZ)); 1894 cqr->expires += 5*HZ; 1895 dasd_device_set_timer(device, 5*HZ); 1896 } else { 1897 dev_err(&device->cdev->dev, 1898 "cqr %p timed out (%lus), %i retries " 1899 "remaining\n", cqr, (cqr->expires/HZ), 1900 cqr->retries); 1901 } 1902 } 1903 } 1904 1905 /* 1906 * Take a look at the first request on the ccw queue and check 1907 * if it needs to be started. 1908 */ 1909 static void __dasd_device_start_head(struct dasd_device *device) 1910 { 1911 struct dasd_ccw_req *cqr; 1912 int rc; 1913 1914 if (list_empty(&device->ccw_queue)) 1915 return; 1916 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1917 if (cqr->status != DASD_CQR_QUEUED) 1918 return; 1919 /* when device is stopped, return request to previous layer 1920 * exception: only the disconnect or unresumed bits are set and the 1921 * cqr is a path verification request 1922 */ 1923 if (device->stopped && 1924 !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) 1925 && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { 1926 cqr->intrc = -EAGAIN; 1927 cqr->status = DASD_CQR_CLEARED; 1928 dasd_schedule_device_bh(device); 1929 return; 1930 } 1931 1932 rc = device->discipline->start_IO(cqr); 1933 if (rc == 0) 1934 dasd_device_set_timer(device, cqr->expires); 1935 else if (rc == -EACCES) { 1936 dasd_schedule_device_bh(device); 1937 } else 1938 /* Hmpf, try again in 1/2 sec */ 1939 dasd_device_set_timer(device, 50); 1940 } 1941 1942 static void __dasd_device_check_path_events(struct dasd_device *device) 1943 { 1944 int rc; 1945 1946 if (device->path_data.tbvpm) { 1947 if (device->stopped & ~(DASD_STOPPED_DC_WAIT | 1948 DASD_UNRESUMED_PM)) 1949 return; 1950 rc = device->discipline->verify_path( 1951 device, device->path_data.tbvpm); 1952 if (rc) 1953 dasd_device_set_timer(device, 50); 1954 else 1955 device->path_data.tbvpm = 0; 1956 } 1957 }; 1958 1959 /* 1960 * Go through all request on the dasd_device request queue, 1961 * terminate them on the cdev if necessary, and return them to the 1962 * submitting layer via callback. 1963 * Note: 1964 * Make sure that all 'submitting layers' still exist when 1965 * this function is called!. In other words, when 'device' is a base 1966 * device then all block layer requests must have been removed before 1967 * via dasd_flush_block_queue. 1968 */ 1969 int dasd_flush_device_queue(struct dasd_device *device) 1970 { 1971 struct dasd_ccw_req *cqr, *n; 1972 int rc; 1973 struct list_head flush_queue; 1974 1975 INIT_LIST_HEAD(&flush_queue); 1976 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1977 rc = 0; 1978 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1979 /* Check status and move request to flush_queue */ 1980 switch (cqr->status) { 1981 case DASD_CQR_IN_IO: 1982 rc = device->discipline->term_IO(cqr); 1983 if (rc) { 1984 /* unable to terminate requeust */ 1985 dev_err(&device->cdev->dev, 1986 "Flushing the DASD request queue " 1987 "failed for request %p\n", cqr); 1988 /* stop flush processing */ 1989 goto finished; 1990 } 1991 break; 1992 case DASD_CQR_QUEUED: 1993 cqr->stopclk = get_tod_clock(); 1994 cqr->status = DASD_CQR_CLEARED; 1995 break; 1996 default: /* no need to modify the others */ 1997 break; 1998 } 1999 list_move_tail(&cqr->devlist, &flush_queue); 2000 } 2001 finished: 2002 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2003 /* 2004 * After this point all requests must be in state CLEAR_PENDING, 2005 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2006 * one of the others. 2007 */ 2008 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2009 wait_event(dasd_flush_wq, 2010 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2011 /* 2012 * Now set each request back to TERMINATED, DONE or NEED_ERP 2013 * and call the callback function of flushed requests 2014 */ 2015 __dasd_device_process_final_queue(device, &flush_queue); 2016 return rc; 2017 } 2018 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2019 2020 /* 2021 * Acquire the device lock and process queues for the device. 2022 */ 2023 static void dasd_device_tasklet(struct dasd_device *device) 2024 { 2025 struct list_head final_queue; 2026 2027 atomic_set (&device->tasklet_scheduled, 0); 2028 INIT_LIST_HEAD(&final_queue); 2029 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2030 /* Check expire time of first request on the ccw queue. */ 2031 __dasd_device_check_expire(device); 2032 /* find final requests on ccw queue */ 2033 __dasd_device_process_ccw_queue(device, &final_queue); 2034 __dasd_device_check_path_events(device); 2035 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2036 /* Now call the callback function of requests with final status */ 2037 __dasd_device_process_final_queue(device, &final_queue); 2038 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2039 /* Now check if the head of the ccw queue needs to be started. */ 2040 __dasd_device_start_head(device); 2041 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2042 if (waitqueue_active(&shutdown_waitq)) 2043 wake_up(&shutdown_waitq); 2044 dasd_put_device(device); 2045 } 2046 2047 /* 2048 * Schedules a call to dasd_tasklet over the device tasklet. 2049 */ 2050 void dasd_schedule_device_bh(struct dasd_device *device) 2051 { 2052 /* Protect against rescheduling. */ 2053 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2054 return; 2055 dasd_get_device(device); 2056 tasklet_hi_schedule(&device->tasklet); 2057 } 2058 EXPORT_SYMBOL(dasd_schedule_device_bh); 2059 2060 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2061 { 2062 device->stopped |= bits; 2063 } 2064 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2065 2066 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2067 { 2068 device->stopped &= ~bits; 2069 if (!device->stopped) 2070 wake_up(&generic_waitq); 2071 } 2072 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2073 2074 /* 2075 * Queue a request to the head of the device ccw_queue. 2076 * Start the I/O if possible. 2077 */ 2078 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2079 { 2080 struct dasd_device *device; 2081 unsigned long flags; 2082 2083 device = cqr->startdev; 2084 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2085 cqr->status = DASD_CQR_QUEUED; 2086 list_add(&cqr->devlist, &device->ccw_queue); 2087 /* let the bh start the request to keep them in order */ 2088 dasd_schedule_device_bh(device); 2089 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2090 } 2091 EXPORT_SYMBOL(dasd_add_request_head); 2092 2093 /* 2094 * Queue a request to the tail of the device ccw_queue. 2095 * Start the I/O if possible. 2096 */ 2097 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2098 { 2099 struct dasd_device *device; 2100 unsigned long flags; 2101 2102 device = cqr->startdev; 2103 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2104 cqr->status = DASD_CQR_QUEUED; 2105 list_add_tail(&cqr->devlist, &device->ccw_queue); 2106 /* let the bh start the request to keep them in order */ 2107 dasd_schedule_device_bh(device); 2108 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2109 } 2110 EXPORT_SYMBOL(dasd_add_request_tail); 2111 2112 /* 2113 * Wakeup helper for the 'sleep_on' functions. 2114 */ 2115 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2116 { 2117 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2118 cqr->callback_data = DASD_SLEEPON_END_TAG; 2119 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2120 wake_up(&generic_waitq); 2121 } 2122 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2123 2124 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2125 { 2126 struct dasd_device *device; 2127 int rc; 2128 2129 device = cqr->startdev; 2130 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2131 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2132 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2133 return rc; 2134 } 2135 2136 /* 2137 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2138 */ 2139 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2140 { 2141 struct dasd_device *device; 2142 dasd_erp_fn_t erp_fn; 2143 2144 if (cqr->status == DASD_CQR_FILLED) 2145 return 0; 2146 device = cqr->startdev; 2147 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2148 if (cqr->status == DASD_CQR_TERMINATED) { 2149 device->discipline->handle_terminated_request(cqr); 2150 return 1; 2151 } 2152 if (cqr->status == DASD_CQR_NEED_ERP) { 2153 erp_fn = device->discipline->erp_action(cqr); 2154 erp_fn(cqr); 2155 return 1; 2156 } 2157 if (cqr->status == DASD_CQR_FAILED) 2158 dasd_log_sense(cqr, &cqr->irb); 2159 if (cqr->refers) { 2160 __dasd_process_erp(device, cqr); 2161 return 1; 2162 } 2163 } 2164 return 0; 2165 } 2166 2167 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2168 { 2169 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2170 if (cqr->refers) /* erp is not done yet */ 2171 return 1; 2172 return ((cqr->status != DASD_CQR_DONE) && 2173 (cqr->status != DASD_CQR_FAILED)); 2174 } else 2175 return (cqr->status == DASD_CQR_FILLED); 2176 } 2177 2178 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2179 { 2180 struct dasd_device *device; 2181 int rc; 2182 struct list_head ccw_queue; 2183 struct dasd_ccw_req *cqr; 2184 2185 INIT_LIST_HEAD(&ccw_queue); 2186 maincqr->status = DASD_CQR_FILLED; 2187 device = maincqr->startdev; 2188 list_add(&maincqr->blocklist, &ccw_queue); 2189 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2190 cqr = list_first_entry(&ccw_queue, 2191 struct dasd_ccw_req, blocklist)) { 2192 2193 if (__dasd_sleep_on_erp(cqr)) 2194 continue; 2195 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2196 continue; 2197 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2198 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2199 cqr->status = DASD_CQR_FAILED; 2200 cqr->intrc = -EPERM; 2201 continue; 2202 } 2203 /* Non-temporary stop condition will trigger fail fast */ 2204 if (device->stopped & ~DASD_STOPPED_PENDING && 2205 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2206 (!dasd_eer_enabled(device))) { 2207 cqr->status = DASD_CQR_FAILED; 2208 cqr->intrc = -ENOLINK; 2209 continue; 2210 } 2211 /* Don't try to start requests if device is stopped */ 2212 if (interruptible) { 2213 rc = wait_event_interruptible( 2214 generic_waitq, !(device->stopped)); 2215 if (rc == -ERESTARTSYS) { 2216 cqr->status = DASD_CQR_FAILED; 2217 maincqr->intrc = rc; 2218 continue; 2219 } 2220 } else 2221 wait_event(generic_waitq, !(device->stopped)); 2222 2223 if (!cqr->callback) 2224 cqr->callback = dasd_wakeup_cb; 2225 2226 cqr->callback_data = DASD_SLEEPON_START_TAG; 2227 dasd_add_request_tail(cqr); 2228 if (interruptible) { 2229 rc = wait_event_interruptible( 2230 generic_waitq, _wait_for_wakeup(cqr)); 2231 if (rc == -ERESTARTSYS) { 2232 dasd_cancel_req(cqr); 2233 /* wait (non-interruptible) for final status */ 2234 wait_event(generic_waitq, 2235 _wait_for_wakeup(cqr)); 2236 cqr->status = DASD_CQR_FAILED; 2237 maincqr->intrc = rc; 2238 continue; 2239 } 2240 } else 2241 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2242 } 2243 2244 maincqr->endclk = get_tod_clock(); 2245 if ((maincqr->status != DASD_CQR_DONE) && 2246 (maincqr->intrc != -ERESTARTSYS)) 2247 dasd_log_sense(maincqr, &maincqr->irb); 2248 if (maincqr->status == DASD_CQR_DONE) 2249 rc = 0; 2250 else if (maincqr->intrc) 2251 rc = maincqr->intrc; 2252 else 2253 rc = -EIO; 2254 return rc; 2255 } 2256 2257 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2258 { 2259 struct dasd_ccw_req *cqr; 2260 2261 list_for_each_entry(cqr, ccw_queue, blocklist) { 2262 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2263 return 0; 2264 } 2265 2266 return 1; 2267 } 2268 2269 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2270 { 2271 struct dasd_device *device; 2272 struct dasd_ccw_req *cqr, *n; 2273 int rc; 2274 2275 retry: 2276 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2277 device = cqr->startdev; 2278 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2279 continue; 2280 2281 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2282 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2283 cqr->status = DASD_CQR_FAILED; 2284 cqr->intrc = -EPERM; 2285 continue; 2286 } 2287 /*Non-temporary stop condition will trigger fail fast*/ 2288 if (device->stopped & ~DASD_STOPPED_PENDING && 2289 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2290 !dasd_eer_enabled(device)) { 2291 cqr->status = DASD_CQR_FAILED; 2292 cqr->intrc = -EAGAIN; 2293 continue; 2294 } 2295 2296 /*Don't try to start requests if device is stopped*/ 2297 if (interruptible) { 2298 rc = wait_event_interruptible( 2299 generic_waitq, !device->stopped); 2300 if (rc == -ERESTARTSYS) { 2301 cqr->status = DASD_CQR_FAILED; 2302 cqr->intrc = rc; 2303 continue; 2304 } 2305 } else 2306 wait_event(generic_waitq, !(device->stopped)); 2307 2308 if (!cqr->callback) 2309 cqr->callback = dasd_wakeup_cb; 2310 cqr->callback_data = DASD_SLEEPON_START_TAG; 2311 dasd_add_request_tail(cqr); 2312 } 2313 2314 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2315 2316 rc = 0; 2317 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2318 /* 2319 * for alias devices simplify error recovery and 2320 * return to upper layer 2321 * do not skip ERP requests 2322 */ 2323 if (cqr->startdev != cqr->basedev && !cqr->refers && 2324 (cqr->status == DASD_CQR_TERMINATED || 2325 cqr->status == DASD_CQR_NEED_ERP)) 2326 return -EAGAIN; 2327 2328 /* normal recovery for basedev IO */ 2329 if (__dasd_sleep_on_erp(cqr)) { 2330 goto retry; 2331 /* remember that ERP was needed */ 2332 rc = 1; 2333 /* skip processing for active cqr */ 2334 if (cqr->status != DASD_CQR_TERMINATED && 2335 cqr->status != DASD_CQR_NEED_ERP) 2336 break; 2337 } 2338 } 2339 2340 /* start ERP requests in upper loop */ 2341 if (rc) 2342 goto retry; 2343 2344 return 0; 2345 } 2346 2347 /* 2348 * Queue a request to the tail of the device ccw_queue and wait for 2349 * it's completion. 2350 */ 2351 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2352 { 2353 return _dasd_sleep_on(cqr, 0); 2354 } 2355 EXPORT_SYMBOL(dasd_sleep_on); 2356 2357 /* 2358 * Start requests from a ccw_queue and wait for their completion. 2359 */ 2360 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2361 { 2362 return _dasd_sleep_on_queue(ccw_queue, 0); 2363 } 2364 EXPORT_SYMBOL(dasd_sleep_on_queue); 2365 2366 /* 2367 * Queue a request to the tail of the device ccw_queue and wait 2368 * interruptible for it's completion. 2369 */ 2370 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2371 { 2372 return _dasd_sleep_on(cqr, 1); 2373 } 2374 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2375 2376 /* 2377 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2378 * for eckd devices) the currently running request has to be terminated 2379 * and be put back to status queued, before the special request is added 2380 * to the head of the queue. Then the special request is waited on normally. 2381 */ 2382 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2383 { 2384 struct dasd_ccw_req *cqr; 2385 int rc; 2386 2387 if (list_empty(&device->ccw_queue)) 2388 return 0; 2389 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2390 rc = device->discipline->term_IO(cqr); 2391 if (!rc) 2392 /* 2393 * CQR terminated because a more important request is pending. 2394 * Undo decreasing of retry counter because this is 2395 * not an error case. 2396 */ 2397 cqr->retries++; 2398 return rc; 2399 } 2400 2401 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2402 { 2403 struct dasd_device *device; 2404 int rc; 2405 2406 device = cqr->startdev; 2407 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2408 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2409 cqr->status = DASD_CQR_FAILED; 2410 cqr->intrc = -EPERM; 2411 return -EIO; 2412 } 2413 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2414 rc = _dasd_term_running_cqr(device); 2415 if (rc) { 2416 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2417 return rc; 2418 } 2419 cqr->callback = dasd_wakeup_cb; 2420 cqr->callback_data = DASD_SLEEPON_START_TAG; 2421 cqr->status = DASD_CQR_QUEUED; 2422 /* 2423 * add new request as second 2424 * first the terminated cqr needs to be finished 2425 */ 2426 list_add(&cqr->devlist, device->ccw_queue.next); 2427 2428 /* let the bh start the request to keep them in order */ 2429 dasd_schedule_device_bh(device); 2430 2431 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2432 2433 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2434 2435 if (cqr->status == DASD_CQR_DONE) 2436 rc = 0; 2437 else if (cqr->intrc) 2438 rc = cqr->intrc; 2439 else 2440 rc = -EIO; 2441 2442 /* kick tasklets */ 2443 dasd_schedule_device_bh(device); 2444 if (device->block) 2445 dasd_schedule_block_bh(device->block); 2446 2447 return rc; 2448 } 2449 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2450 2451 /* 2452 * Cancels a request that was started with dasd_sleep_on_req. 2453 * This is useful to timeout requests. The request will be 2454 * terminated if it is currently in i/o. 2455 * Returns 0 if request termination was successful 2456 * negative error code if termination failed 2457 * Cancellation of a request is an asynchronous operation! The calling 2458 * function has to wait until the request is properly returned via callback. 2459 */ 2460 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2461 { 2462 struct dasd_device *device = cqr->startdev; 2463 unsigned long flags; 2464 int rc; 2465 2466 rc = 0; 2467 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2468 switch (cqr->status) { 2469 case DASD_CQR_QUEUED: 2470 /* request was not started - just set to cleared */ 2471 cqr->status = DASD_CQR_CLEARED; 2472 if (cqr->callback_data == DASD_SLEEPON_START_TAG) 2473 cqr->callback_data = DASD_SLEEPON_END_TAG; 2474 break; 2475 case DASD_CQR_IN_IO: 2476 /* request in IO - terminate IO and release again */ 2477 rc = device->discipline->term_IO(cqr); 2478 if (rc) { 2479 dev_err(&device->cdev->dev, 2480 "Cancelling request %p failed with rc=%d\n", 2481 cqr, rc); 2482 } else { 2483 cqr->stopclk = get_tod_clock(); 2484 } 2485 break; 2486 default: /* already finished or clear pending - do nothing */ 2487 break; 2488 } 2489 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2490 dasd_schedule_device_bh(device); 2491 return rc; 2492 } 2493 EXPORT_SYMBOL(dasd_cancel_req); 2494 2495 /* 2496 * SECTION: Operations of the dasd_block layer. 2497 */ 2498 2499 /* 2500 * Timeout function for dasd_block. This is used when the block layer 2501 * is waiting for something that may not come reliably, (e.g. a state 2502 * change interrupt) 2503 */ 2504 static void dasd_block_timeout(unsigned long ptr) 2505 { 2506 unsigned long flags; 2507 struct dasd_block *block; 2508 2509 block = (struct dasd_block *) ptr; 2510 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2511 /* re-activate request queue */ 2512 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2513 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2514 dasd_schedule_block_bh(block); 2515 } 2516 2517 /* 2518 * Setup timeout for a dasd_block in jiffies. 2519 */ 2520 void dasd_block_set_timer(struct dasd_block *block, int expires) 2521 { 2522 if (expires == 0) 2523 del_timer(&block->timer); 2524 else 2525 mod_timer(&block->timer, jiffies + expires); 2526 } 2527 EXPORT_SYMBOL(dasd_block_set_timer); 2528 2529 /* 2530 * Clear timeout for a dasd_block. 2531 */ 2532 void dasd_block_clear_timer(struct dasd_block *block) 2533 { 2534 del_timer(&block->timer); 2535 } 2536 EXPORT_SYMBOL(dasd_block_clear_timer); 2537 2538 /* 2539 * Process finished error recovery ccw. 2540 */ 2541 static void __dasd_process_erp(struct dasd_device *device, 2542 struct dasd_ccw_req *cqr) 2543 { 2544 dasd_erp_fn_t erp_fn; 2545 2546 if (cqr->status == DASD_CQR_DONE) 2547 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2548 else 2549 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2550 erp_fn = device->discipline->erp_postaction(cqr); 2551 erp_fn(cqr); 2552 } 2553 2554 /* 2555 * Fetch requests from the block device queue. 2556 */ 2557 static void __dasd_process_request_queue(struct dasd_block *block) 2558 { 2559 struct request_queue *queue; 2560 struct request *req; 2561 struct dasd_ccw_req *cqr; 2562 struct dasd_device *basedev; 2563 unsigned long flags; 2564 queue = block->request_queue; 2565 basedev = block->base; 2566 /* No queue ? Then there is nothing to do. */ 2567 if (queue == NULL) 2568 return; 2569 2570 /* 2571 * We requeue request from the block device queue to the ccw 2572 * queue only in two states. In state DASD_STATE_READY the 2573 * partition detection is done and we need to requeue requests 2574 * for that. State DASD_STATE_ONLINE is normal block device 2575 * operation. 2576 */ 2577 if (basedev->state < DASD_STATE_READY) { 2578 while ((req = blk_fetch_request(block->request_queue))) 2579 __blk_end_request_all(req, -EIO); 2580 return; 2581 } 2582 /* Now we try to fetch requests from the request queue */ 2583 while ((req = blk_peek_request(queue))) { 2584 if (basedev->features & DASD_FEATURE_READONLY && 2585 rq_data_dir(req) == WRITE) { 2586 DBF_DEV_EVENT(DBF_ERR, basedev, 2587 "Rejecting write request %p", 2588 req); 2589 blk_start_request(req); 2590 __blk_end_request_all(req, -EIO); 2591 continue; 2592 } 2593 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 2594 (basedev->features & DASD_FEATURE_FAILFAST || 2595 blk_noretry_request(req))) { 2596 DBF_DEV_EVENT(DBF_ERR, basedev, 2597 "Rejecting failfast request %p", 2598 req); 2599 blk_start_request(req); 2600 __blk_end_request_all(req, -ETIMEDOUT); 2601 continue; 2602 } 2603 cqr = basedev->discipline->build_cp(basedev, block, req); 2604 if (IS_ERR(cqr)) { 2605 if (PTR_ERR(cqr) == -EBUSY) 2606 break; /* normal end condition */ 2607 if (PTR_ERR(cqr) == -ENOMEM) 2608 break; /* terminate request queue loop */ 2609 if (PTR_ERR(cqr) == -EAGAIN) { 2610 /* 2611 * The current request cannot be build right 2612 * now, we have to try later. If this request 2613 * is the head-of-queue we stop the device 2614 * for 1/2 second. 2615 */ 2616 if (!list_empty(&block->ccw_queue)) 2617 break; 2618 spin_lock_irqsave( 2619 get_ccwdev_lock(basedev->cdev), flags); 2620 dasd_device_set_stop_bits(basedev, 2621 DASD_STOPPED_PENDING); 2622 spin_unlock_irqrestore( 2623 get_ccwdev_lock(basedev->cdev), flags); 2624 dasd_block_set_timer(block, HZ/2); 2625 break; 2626 } 2627 DBF_DEV_EVENT(DBF_ERR, basedev, 2628 "CCW creation failed (rc=%ld) " 2629 "on request %p", 2630 PTR_ERR(cqr), req); 2631 blk_start_request(req); 2632 __blk_end_request_all(req, -EIO); 2633 continue; 2634 } 2635 /* 2636 * Note: callback is set to dasd_return_cqr_cb in 2637 * __dasd_block_start_head to cover erp requests as well 2638 */ 2639 cqr->callback_data = (void *) req; 2640 cqr->status = DASD_CQR_FILLED; 2641 req->completion_data = cqr; 2642 blk_start_request(req); 2643 list_add_tail(&cqr->blocklist, &block->ccw_queue); 2644 INIT_LIST_HEAD(&cqr->devlist); 2645 dasd_profile_start(block, cqr, req); 2646 } 2647 } 2648 2649 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2650 { 2651 struct request *req; 2652 int status; 2653 int error = 0; 2654 2655 req = (struct request *) cqr->callback_data; 2656 dasd_profile_end(cqr->block, cqr, req); 2657 status = cqr->block->base->discipline->free_cp(cqr, req); 2658 if (status < 0) 2659 error = status; 2660 else if (status == 0) { 2661 if (cqr->intrc == -EPERM) 2662 error = -EBADE; 2663 else if (cqr->intrc == -ENOLINK || 2664 cqr->intrc == -ETIMEDOUT) 2665 error = cqr->intrc; 2666 else 2667 error = -EIO; 2668 } 2669 __blk_end_request_all(req, error); 2670 } 2671 2672 /* 2673 * Process ccw request queue. 2674 */ 2675 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2676 struct list_head *final_queue) 2677 { 2678 struct list_head *l, *n; 2679 struct dasd_ccw_req *cqr; 2680 dasd_erp_fn_t erp_fn; 2681 unsigned long flags; 2682 struct dasd_device *base = block->base; 2683 2684 restart: 2685 /* Process request with final status. */ 2686 list_for_each_safe(l, n, &block->ccw_queue) { 2687 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2688 if (cqr->status != DASD_CQR_DONE && 2689 cqr->status != DASD_CQR_FAILED && 2690 cqr->status != DASD_CQR_NEED_ERP && 2691 cqr->status != DASD_CQR_TERMINATED) 2692 continue; 2693 2694 if (cqr->status == DASD_CQR_TERMINATED) { 2695 base->discipline->handle_terminated_request(cqr); 2696 goto restart; 2697 } 2698 2699 /* Process requests that may be recovered */ 2700 if (cqr->status == DASD_CQR_NEED_ERP) { 2701 erp_fn = base->discipline->erp_action(cqr); 2702 if (IS_ERR(erp_fn(cqr))) 2703 continue; 2704 goto restart; 2705 } 2706 2707 /* log sense for fatal error */ 2708 if (cqr->status == DASD_CQR_FAILED) { 2709 dasd_log_sense(cqr, &cqr->irb); 2710 } 2711 2712 /* First of all call extended error reporting. */ 2713 if (dasd_eer_enabled(base) && 2714 cqr->status == DASD_CQR_FAILED) { 2715 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2716 2717 /* restart request */ 2718 cqr->status = DASD_CQR_FILLED; 2719 cqr->retries = 255; 2720 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2721 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2722 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2723 flags); 2724 goto restart; 2725 } 2726 2727 /* Process finished ERP request. */ 2728 if (cqr->refers) { 2729 __dasd_process_erp(base, cqr); 2730 goto restart; 2731 } 2732 2733 /* Rechain finished requests to final queue */ 2734 cqr->endclk = get_tod_clock(); 2735 list_move_tail(&cqr->blocklist, final_queue); 2736 } 2737 } 2738 2739 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2740 { 2741 dasd_schedule_block_bh(cqr->block); 2742 } 2743 2744 static void __dasd_block_start_head(struct dasd_block *block) 2745 { 2746 struct dasd_ccw_req *cqr; 2747 2748 if (list_empty(&block->ccw_queue)) 2749 return; 2750 /* We allways begin with the first requests on the queue, as some 2751 * of previously started requests have to be enqueued on a 2752 * dasd_device again for error recovery. 2753 */ 2754 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2755 if (cqr->status != DASD_CQR_FILLED) 2756 continue; 2757 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2758 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2759 cqr->status = DASD_CQR_FAILED; 2760 cqr->intrc = -EPERM; 2761 dasd_schedule_block_bh(block); 2762 continue; 2763 } 2764 /* Non-temporary stop condition will trigger fail fast */ 2765 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2766 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2767 (!dasd_eer_enabled(block->base))) { 2768 cqr->status = DASD_CQR_FAILED; 2769 cqr->intrc = -ENOLINK; 2770 dasd_schedule_block_bh(block); 2771 continue; 2772 } 2773 /* Don't try to start requests if device is stopped */ 2774 if (block->base->stopped) 2775 return; 2776 2777 /* just a fail safe check, should not happen */ 2778 if (!cqr->startdev) 2779 cqr->startdev = block->base; 2780 2781 /* make sure that the requests we submit find their way back */ 2782 cqr->callback = dasd_return_cqr_cb; 2783 2784 dasd_add_request_tail(cqr); 2785 } 2786 } 2787 2788 /* 2789 * Central dasd_block layer routine. Takes requests from the generic 2790 * block layer request queue, creates ccw requests, enqueues them on 2791 * a dasd_device and processes ccw requests that have been returned. 2792 */ 2793 static void dasd_block_tasklet(struct dasd_block *block) 2794 { 2795 struct list_head final_queue; 2796 struct list_head *l, *n; 2797 struct dasd_ccw_req *cqr; 2798 2799 atomic_set(&block->tasklet_scheduled, 0); 2800 INIT_LIST_HEAD(&final_queue); 2801 spin_lock(&block->queue_lock); 2802 /* Finish off requests on ccw queue */ 2803 __dasd_process_block_ccw_queue(block, &final_queue); 2804 spin_unlock(&block->queue_lock); 2805 /* Now call the callback function of requests with final status */ 2806 spin_lock_irq(&block->request_queue_lock); 2807 list_for_each_safe(l, n, &final_queue) { 2808 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2809 list_del_init(&cqr->blocklist); 2810 __dasd_cleanup_cqr(cqr); 2811 } 2812 spin_lock(&block->queue_lock); 2813 /* Get new request from the block device request queue */ 2814 __dasd_process_request_queue(block); 2815 /* Now check if the head of the ccw queue needs to be started. */ 2816 __dasd_block_start_head(block); 2817 spin_unlock(&block->queue_lock); 2818 spin_unlock_irq(&block->request_queue_lock); 2819 if (waitqueue_active(&shutdown_waitq)) 2820 wake_up(&shutdown_waitq); 2821 dasd_put_device(block->base); 2822 } 2823 2824 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2825 { 2826 wake_up(&dasd_flush_wq); 2827 } 2828 2829 /* 2830 * Requeue a request back to the block request queue 2831 * only works for block requests 2832 */ 2833 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2834 { 2835 struct dasd_block *block = cqr->block; 2836 struct request *req; 2837 unsigned long flags; 2838 2839 if (!block) 2840 return -EINVAL; 2841 spin_lock_irqsave(&block->queue_lock, flags); 2842 req = (struct request *) cqr->callback_data; 2843 blk_requeue_request(block->request_queue, req); 2844 spin_unlock_irqrestore(&block->queue_lock, flags); 2845 2846 return 0; 2847 } 2848 2849 /* 2850 * Go through all request on the dasd_block request queue, cancel them 2851 * on the respective dasd_device, and return them to the generic 2852 * block layer. 2853 */ 2854 static int dasd_flush_block_queue(struct dasd_block *block) 2855 { 2856 struct dasd_ccw_req *cqr, *n; 2857 int rc, i; 2858 struct list_head flush_queue; 2859 2860 INIT_LIST_HEAD(&flush_queue); 2861 spin_lock_bh(&block->queue_lock); 2862 rc = 0; 2863 restart: 2864 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2865 /* if this request currently owned by a dasd_device cancel it */ 2866 if (cqr->status >= DASD_CQR_QUEUED) 2867 rc = dasd_cancel_req(cqr); 2868 if (rc < 0) 2869 break; 2870 /* Rechain request (including erp chain) so it won't be 2871 * touched by the dasd_block_tasklet anymore. 2872 * Replace the callback so we notice when the request 2873 * is returned from the dasd_device layer. 2874 */ 2875 cqr->callback = _dasd_wake_block_flush_cb; 2876 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2877 list_move_tail(&cqr->blocklist, &flush_queue); 2878 if (i > 1) 2879 /* moved more than one request - need to restart */ 2880 goto restart; 2881 } 2882 spin_unlock_bh(&block->queue_lock); 2883 /* Now call the callback function of flushed requests */ 2884 restart_cb: 2885 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2886 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2887 /* Process finished ERP request. */ 2888 if (cqr->refers) { 2889 spin_lock_bh(&block->queue_lock); 2890 __dasd_process_erp(block->base, cqr); 2891 spin_unlock_bh(&block->queue_lock); 2892 /* restart list_for_xx loop since dasd_process_erp 2893 * might remove multiple elements */ 2894 goto restart_cb; 2895 } 2896 /* call the callback function */ 2897 spin_lock_irq(&block->request_queue_lock); 2898 cqr->endclk = get_tod_clock(); 2899 list_del_init(&cqr->blocklist); 2900 __dasd_cleanup_cqr(cqr); 2901 spin_unlock_irq(&block->request_queue_lock); 2902 } 2903 return rc; 2904 } 2905 2906 /* 2907 * Schedules a call to dasd_tasklet over the device tasklet. 2908 */ 2909 void dasd_schedule_block_bh(struct dasd_block *block) 2910 { 2911 /* Protect against rescheduling. */ 2912 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 2913 return; 2914 /* life cycle of block is bound to it's base device */ 2915 dasd_get_device(block->base); 2916 tasklet_hi_schedule(&block->tasklet); 2917 } 2918 EXPORT_SYMBOL(dasd_schedule_block_bh); 2919 2920 2921 /* 2922 * SECTION: external block device operations 2923 * (request queue handling, open, release, etc.) 2924 */ 2925 2926 /* 2927 * Dasd request queue function. Called from ll_rw_blk.c 2928 */ 2929 static void do_dasd_request(struct request_queue *queue) 2930 { 2931 struct dasd_block *block; 2932 2933 block = queue->queuedata; 2934 spin_lock(&block->queue_lock); 2935 /* Get new request from the block device request queue */ 2936 __dasd_process_request_queue(block); 2937 /* Now check if the head of the ccw queue needs to be started. */ 2938 __dasd_block_start_head(block); 2939 spin_unlock(&block->queue_lock); 2940 } 2941 2942 /* 2943 * Block timeout callback, called from the block layer 2944 * 2945 * request_queue lock is held on entry. 2946 * 2947 * Return values: 2948 * BLK_EH_RESET_TIMER if the request should be left running 2949 * BLK_EH_NOT_HANDLED if the request is handled or terminated 2950 * by the driver. 2951 */ 2952 enum blk_eh_timer_return dasd_times_out(struct request *req) 2953 { 2954 struct dasd_ccw_req *cqr = req->completion_data; 2955 struct dasd_block *block = req->q->queuedata; 2956 struct dasd_device *device; 2957 int rc = 0; 2958 2959 if (!cqr) 2960 return BLK_EH_NOT_HANDLED; 2961 2962 device = cqr->startdev ? cqr->startdev : block->base; 2963 if (!device->blk_timeout) 2964 return BLK_EH_RESET_TIMER; 2965 DBF_DEV_EVENT(DBF_WARNING, device, 2966 " dasd_times_out cqr %p status %x", 2967 cqr, cqr->status); 2968 2969 spin_lock(&block->queue_lock); 2970 spin_lock(get_ccwdev_lock(device->cdev)); 2971 cqr->retries = -1; 2972 cqr->intrc = -ETIMEDOUT; 2973 if (cqr->status >= DASD_CQR_QUEUED) { 2974 spin_unlock(get_ccwdev_lock(device->cdev)); 2975 rc = dasd_cancel_req(cqr); 2976 } else if (cqr->status == DASD_CQR_FILLED || 2977 cqr->status == DASD_CQR_NEED_ERP) { 2978 cqr->status = DASD_CQR_TERMINATED; 2979 spin_unlock(get_ccwdev_lock(device->cdev)); 2980 } else if (cqr->status == DASD_CQR_IN_ERP) { 2981 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 2982 2983 list_for_each_entry_safe(searchcqr, nextcqr, 2984 &block->ccw_queue, blocklist) { 2985 tmpcqr = searchcqr; 2986 while (tmpcqr->refers) 2987 tmpcqr = tmpcqr->refers; 2988 if (tmpcqr != cqr) 2989 continue; 2990 /* searchcqr is an ERP request for cqr */ 2991 searchcqr->retries = -1; 2992 searchcqr->intrc = -ETIMEDOUT; 2993 if (searchcqr->status >= DASD_CQR_QUEUED) { 2994 spin_unlock(get_ccwdev_lock(device->cdev)); 2995 rc = dasd_cancel_req(searchcqr); 2996 spin_lock(get_ccwdev_lock(device->cdev)); 2997 } else if ((searchcqr->status == DASD_CQR_FILLED) || 2998 (searchcqr->status == DASD_CQR_NEED_ERP)) { 2999 searchcqr->status = DASD_CQR_TERMINATED; 3000 rc = 0; 3001 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3002 /* 3003 * Shouldn't happen; most recent ERP 3004 * request is at the front of queue 3005 */ 3006 continue; 3007 } 3008 break; 3009 } 3010 spin_unlock(get_ccwdev_lock(device->cdev)); 3011 } 3012 dasd_schedule_block_bh(block); 3013 spin_unlock(&block->queue_lock); 3014 3015 return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 3016 } 3017 3018 /* 3019 * Allocate and initialize request queue and default I/O scheduler. 3020 */ 3021 static int dasd_alloc_queue(struct dasd_block *block) 3022 { 3023 int rc; 3024 3025 block->request_queue = blk_init_queue(do_dasd_request, 3026 &block->request_queue_lock); 3027 if (block->request_queue == NULL) 3028 return -ENOMEM; 3029 3030 block->request_queue->queuedata = block; 3031 3032 elevator_exit(block->request_queue->elevator); 3033 block->request_queue->elevator = NULL; 3034 mutex_lock(&block->request_queue->sysfs_lock); 3035 rc = elevator_init(block->request_queue, "deadline"); 3036 if (rc) 3037 blk_cleanup_queue(block->request_queue); 3038 mutex_unlock(&block->request_queue->sysfs_lock); 3039 return rc; 3040 } 3041 3042 /* 3043 * Allocate and initialize request queue. 3044 */ 3045 static void dasd_setup_queue(struct dasd_block *block) 3046 { 3047 int max; 3048 3049 if (block->base->features & DASD_FEATURE_USERAW) { 3050 /* 3051 * the max_blocks value for raw_track access is 256 3052 * it is higher than the native ECKD value because we 3053 * only need one ccw per track 3054 * so the max_hw_sectors are 3055 * 2048 x 512B = 1024kB = 16 tracks 3056 */ 3057 max = 2048; 3058 } else { 3059 max = block->base->discipline->max_blocks << block->s2b_shift; 3060 } 3061 blk_queue_logical_block_size(block->request_queue, 3062 block->bp_block); 3063 blk_queue_max_hw_sectors(block->request_queue, max); 3064 blk_queue_max_segments(block->request_queue, -1L); 3065 /* with page sized segments we can translate each segement into 3066 * one idaw/tidaw 3067 */ 3068 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 3069 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 3070 } 3071 3072 /* 3073 * Deactivate and free request queue. 3074 */ 3075 static void dasd_free_queue(struct dasd_block *block) 3076 { 3077 if (block->request_queue) { 3078 blk_cleanup_queue(block->request_queue); 3079 block->request_queue = NULL; 3080 } 3081 } 3082 3083 /* 3084 * Flush request on the request queue. 3085 */ 3086 static void dasd_flush_request_queue(struct dasd_block *block) 3087 { 3088 struct request *req; 3089 3090 if (!block->request_queue) 3091 return; 3092 3093 spin_lock_irq(&block->request_queue_lock); 3094 while ((req = blk_fetch_request(block->request_queue))) 3095 __blk_end_request_all(req, -EIO); 3096 spin_unlock_irq(&block->request_queue_lock); 3097 } 3098 3099 static int dasd_open(struct block_device *bdev, fmode_t mode) 3100 { 3101 struct dasd_device *base; 3102 int rc; 3103 3104 base = dasd_device_from_gendisk(bdev->bd_disk); 3105 if (!base) 3106 return -ENODEV; 3107 3108 atomic_inc(&base->block->open_count); 3109 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3110 rc = -ENODEV; 3111 goto unlock; 3112 } 3113 3114 if (!try_module_get(base->discipline->owner)) { 3115 rc = -EINVAL; 3116 goto unlock; 3117 } 3118 3119 if (dasd_probeonly) { 3120 dev_info(&base->cdev->dev, 3121 "Accessing the DASD failed because it is in " 3122 "probeonly mode\n"); 3123 rc = -EPERM; 3124 goto out; 3125 } 3126 3127 if (base->state <= DASD_STATE_BASIC) { 3128 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3129 " Cannot open unrecognized device"); 3130 rc = -ENODEV; 3131 goto out; 3132 } 3133 3134 if ((mode & FMODE_WRITE) && 3135 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3136 (base->features & DASD_FEATURE_READONLY))) { 3137 rc = -EROFS; 3138 goto out; 3139 } 3140 3141 dasd_put_device(base); 3142 return 0; 3143 3144 out: 3145 module_put(base->discipline->owner); 3146 unlock: 3147 atomic_dec(&base->block->open_count); 3148 dasd_put_device(base); 3149 return rc; 3150 } 3151 3152 static void dasd_release(struct gendisk *disk, fmode_t mode) 3153 { 3154 struct dasd_device *base = dasd_device_from_gendisk(disk); 3155 if (base) { 3156 atomic_dec(&base->block->open_count); 3157 module_put(base->discipline->owner); 3158 dasd_put_device(base); 3159 } 3160 } 3161 3162 /* 3163 * Return disk geometry. 3164 */ 3165 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3166 { 3167 struct dasd_device *base; 3168 3169 base = dasd_device_from_gendisk(bdev->bd_disk); 3170 if (!base) 3171 return -ENODEV; 3172 3173 if (!base->discipline || 3174 !base->discipline->fill_geometry) { 3175 dasd_put_device(base); 3176 return -EINVAL; 3177 } 3178 base->discipline->fill_geometry(base->block, geo); 3179 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3180 dasd_put_device(base); 3181 return 0; 3182 } 3183 3184 const struct block_device_operations 3185 dasd_device_operations = { 3186 .owner = THIS_MODULE, 3187 .open = dasd_open, 3188 .release = dasd_release, 3189 .ioctl = dasd_ioctl, 3190 .compat_ioctl = dasd_ioctl, 3191 .getgeo = dasd_getgeo, 3192 }; 3193 3194 /******************************************************************************* 3195 * end of block device operations 3196 */ 3197 3198 static void 3199 dasd_exit(void) 3200 { 3201 #ifdef CONFIG_PROC_FS 3202 dasd_proc_exit(); 3203 #endif 3204 dasd_eer_exit(); 3205 if (dasd_page_cache != NULL) { 3206 kmem_cache_destroy(dasd_page_cache); 3207 dasd_page_cache = NULL; 3208 } 3209 dasd_gendisk_exit(); 3210 dasd_devmap_exit(); 3211 if (dasd_debug_area != NULL) { 3212 debug_unregister(dasd_debug_area); 3213 dasd_debug_area = NULL; 3214 } 3215 dasd_statistics_removeroot(); 3216 } 3217 3218 /* 3219 * SECTION: common functions for ccw_driver use 3220 */ 3221 3222 /* 3223 * Is the device read-only? 3224 * Note that this function does not report the setting of the 3225 * readonly device attribute, but how it is configured in z/VM. 3226 */ 3227 int dasd_device_is_ro(struct dasd_device *device) 3228 { 3229 struct ccw_dev_id dev_id; 3230 struct diag210 diag_data; 3231 int rc; 3232 3233 if (!MACHINE_IS_VM) 3234 return 0; 3235 ccw_device_get_id(device->cdev, &dev_id); 3236 memset(&diag_data, 0, sizeof(diag_data)); 3237 diag_data.vrdcdvno = dev_id.devno; 3238 diag_data.vrdclen = sizeof(diag_data); 3239 rc = diag210(&diag_data); 3240 if (rc == 0 || rc == 2) { 3241 return diag_data.vrdcvfla & 0x80; 3242 } else { 3243 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3244 dev_id.devno, rc); 3245 return 0; 3246 } 3247 } 3248 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3249 3250 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3251 { 3252 struct ccw_device *cdev = data; 3253 int ret; 3254 3255 ret = ccw_device_set_online(cdev); 3256 if (ret) 3257 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3258 dev_name(&cdev->dev), ret); 3259 } 3260 3261 /* 3262 * Initial attempt at a probe function. this can be simplified once 3263 * the other detection code is gone. 3264 */ 3265 int dasd_generic_probe(struct ccw_device *cdev, 3266 struct dasd_discipline *discipline) 3267 { 3268 int ret; 3269 3270 ret = dasd_add_sysfs_files(cdev); 3271 if (ret) { 3272 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3273 "dasd_generic_probe: could not add " 3274 "sysfs entries"); 3275 return ret; 3276 } 3277 cdev->handler = &dasd_int_handler; 3278 3279 /* 3280 * Automatically online either all dasd devices (dasd_autodetect) 3281 * or all devices specified with dasd= parameters during 3282 * initial probe. 3283 */ 3284 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3285 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3286 async_schedule(dasd_generic_auto_online, cdev); 3287 return 0; 3288 } 3289 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3290 3291 /* 3292 * This will one day be called from a global not_oper handler. 3293 * It is also used by driver_unregister during module unload. 3294 */ 3295 void dasd_generic_remove(struct ccw_device *cdev) 3296 { 3297 struct dasd_device *device; 3298 struct dasd_block *block; 3299 3300 cdev->handler = NULL; 3301 3302 device = dasd_device_from_cdev(cdev); 3303 if (IS_ERR(device)) { 3304 dasd_remove_sysfs_files(cdev); 3305 return; 3306 } 3307 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3308 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3309 /* Already doing offline processing */ 3310 dasd_put_device(device); 3311 dasd_remove_sysfs_files(cdev); 3312 return; 3313 } 3314 /* 3315 * This device is removed unconditionally. Set offline 3316 * flag to prevent dasd_open from opening it while it is 3317 * no quite down yet. 3318 */ 3319 dasd_set_target_state(device, DASD_STATE_NEW); 3320 /* dasd_delete_device destroys the device reference. */ 3321 block = device->block; 3322 dasd_delete_device(device); 3323 /* 3324 * life cycle of block is bound to device, so delete it after 3325 * device was safely removed 3326 */ 3327 if (block) 3328 dasd_free_block(block); 3329 3330 dasd_remove_sysfs_files(cdev); 3331 } 3332 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3333 3334 /* 3335 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3336 * the device is detected for the first time and is supposed to be used 3337 * or the user has started activation through sysfs. 3338 */ 3339 int dasd_generic_set_online(struct ccw_device *cdev, 3340 struct dasd_discipline *base_discipline) 3341 { 3342 struct dasd_discipline *discipline; 3343 struct dasd_device *device; 3344 int rc; 3345 3346 /* first online clears initial online feature flag */ 3347 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3348 device = dasd_create_device(cdev); 3349 if (IS_ERR(device)) 3350 return PTR_ERR(device); 3351 3352 discipline = base_discipline; 3353 if (device->features & DASD_FEATURE_USEDIAG) { 3354 if (!dasd_diag_discipline_pointer) { 3355 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3356 dev_name(&cdev->dev)); 3357 dasd_delete_device(device); 3358 return -ENODEV; 3359 } 3360 discipline = dasd_diag_discipline_pointer; 3361 } 3362 if (!try_module_get(base_discipline->owner)) { 3363 dasd_delete_device(device); 3364 return -EINVAL; 3365 } 3366 if (!try_module_get(discipline->owner)) { 3367 module_put(base_discipline->owner); 3368 dasd_delete_device(device); 3369 return -EINVAL; 3370 } 3371 device->base_discipline = base_discipline; 3372 device->discipline = discipline; 3373 3374 /* check_device will allocate block device if necessary */ 3375 rc = discipline->check_device(device); 3376 if (rc) { 3377 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3378 dev_name(&cdev->dev), discipline->name, rc); 3379 module_put(discipline->owner); 3380 module_put(base_discipline->owner); 3381 dasd_delete_device(device); 3382 return rc; 3383 } 3384 3385 dasd_set_target_state(device, DASD_STATE_ONLINE); 3386 if (device->state <= DASD_STATE_KNOWN) { 3387 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3388 dev_name(&cdev->dev)); 3389 rc = -ENODEV; 3390 dasd_set_target_state(device, DASD_STATE_NEW); 3391 if (device->block) 3392 dasd_free_block(device->block); 3393 dasd_delete_device(device); 3394 } else 3395 pr_debug("dasd_generic device %s found\n", 3396 dev_name(&cdev->dev)); 3397 3398 wait_event(dasd_init_waitq, _wait_for_device(device)); 3399 3400 dasd_put_device(device); 3401 return rc; 3402 } 3403 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3404 3405 int dasd_generic_set_offline(struct ccw_device *cdev) 3406 { 3407 struct dasd_device *device; 3408 struct dasd_block *block; 3409 int max_count, open_count, rc; 3410 3411 rc = 0; 3412 device = dasd_device_from_cdev(cdev); 3413 if (IS_ERR(device)) 3414 return PTR_ERR(device); 3415 3416 /* 3417 * We must make sure that this device is currently not in use. 3418 * The open_count is increased for every opener, that includes 3419 * the blkdev_get in dasd_scan_partitions. We are only interested 3420 * in the other openers. 3421 */ 3422 if (device->block) { 3423 max_count = device->block->bdev ? 0 : -1; 3424 open_count = atomic_read(&device->block->open_count); 3425 if (open_count > max_count) { 3426 if (open_count > 0) 3427 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3428 dev_name(&cdev->dev), open_count); 3429 else 3430 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3431 dev_name(&cdev->dev)); 3432 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3433 dasd_put_device(device); 3434 return -EBUSY; 3435 } 3436 } 3437 3438 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3439 /* 3440 * safe offline already running 3441 * could only be called by normal offline so safe_offline flag 3442 * needs to be removed to run normal offline and kill all I/O 3443 */ 3444 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3445 /* Already doing normal offline processing */ 3446 dasd_put_device(device); 3447 return -EBUSY; 3448 } else 3449 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3450 3451 } else 3452 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3453 /* Already doing offline processing */ 3454 dasd_put_device(device); 3455 return -EBUSY; 3456 } 3457 3458 /* 3459 * if safe_offline called set safe_offline_running flag and 3460 * clear safe_offline so that a call to normal offline 3461 * can overrun safe_offline processing 3462 */ 3463 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3464 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3465 /* 3466 * If we want to set the device safe offline all IO operations 3467 * should be finished before continuing the offline process 3468 * so sync bdev first and then wait for our queues to become 3469 * empty 3470 */ 3471 /* sync blockdev and partitions */ 3472 rc = fsync_bdev(device->block->bdev); 3473 if (rc != 0) 3474 goto interrupted; 3475 3476 /* schedule device tasklet and wait for completion */ 3477 dasd_schedule_device_bh(device); 3478 rc = wait_event_interruptible(shutdown_waitq, 3479 _wait_for_empty_queues(device)); 3480 if (rc != 0) 3481 goto interrupted; 3482 } 3483 3484 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3485 dasd_set_target_state(device, DASD_STATE_NEW); 3486 /* dasd_delete_device destroys the device reference. */ 3487 block = device->block; 3488 dasd_delete_device(device); 3489 /* 3490 * life cycle of block is bound to device, so delete it after 3491 * device was safely removed 3492 */ 3493 if (block) 3494 dasd_free_block(block); 3495 return 0; 3496 3497 interrupted: 3498 /* interrupted by signal */ 3499 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3500 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3501 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3502 dasd_put_device(device); 3503 return rc; 3504 } 3505 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3506 3507 int dasd_generic_last_path_gone(struct dasd_device *device) 3508 { 3509 struct dasd_ccw_req *cqr; 3510 3511 dev_warn(&device->cdev->dev, "No operational channel path is left " 3512 "for the device\n"); 3513 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3514 /* First of all call extended error reporting. */ 3515 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3516 3517 if (device->state < DASD_STATE_BASIC) 3518 return 0; 3519 /* Device is active. We want to keep it. */ 3520 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3521 if ((cqr->status == DASD_CQR_IN_IO) || 3522 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3523 cqr->status = DASD_CQR_QUEUED; 3524 cqr->retries++; 3525 } 3526 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3527 dasd_device_clear_timer(device); 3528 dasd_schedule_device_bh(device); 3529 return 1; 3530 } 3531 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3532 3533 int dasd_generic_path_operational(struct dasd_device *device) 3534 { 3535 dev_info(&device->cdev->dev, "A channel path to the device has become " 3536 "operational\n"); 3537 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3538 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3539 if (device->stopped & DASD_UNRESUMED_PM) { 3540 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 3541 dasd_restore_device(device); 3542 return 1; 3543 } 3544 dasd_schedule_device_bh(device); 3545 if (device->block) 3546 dasd_schedule_block_bh(device->block); 3547 3548 if (!device->stopped) 3549 wake_up(&generic_waitq); 3550 3551 return 1; 3552 } 3553 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3554 3555 int dasd_generic_notify(struct ccw_device *cdev, int event) 3556 { 3557 struct dasd_device *device; 3558 int ret; 3559 3560 device = dasd_device_from_cdev_locked(cdev); 3561 if (IS_ERR(device)) 3562 return 0; 3563 ret = 0; 3564 switch (event) { 3565 case CIO_GONE: 3566 case CIO_BOXED: 3567 case CIO_NO_PATH: 3568 device->path_data.opm = 0; 3569 device->path_data.ppm = 0; 3570 device->path_data.npm = 0; 3571 ret = dasd_generic_last_path_gone(device); 3572 break; 3573 case CIO_OPER: 3574 ret = 1; 3575 if (device->path_data.opm) 3576 ret = dasd_generic_path_operational(device); 3577 break; 3578 } 3579 dasd_put_device(device); 3580 return ret; 3581 } 3582 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3583 3584 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3585 { 3586 int chp; 3587 __u8 oldopm, eventlpm; 3588 struct dasd_device *device; 3589 3590 device = dasd_device_from_cdev_locked(cdev); 3591 if (IS_ERR(device)) 3592 return; 3593 for (chp = 0; chp < 8; chp++) { 3594 eventlpm = 0x80 >> chp; 3595 if (path_event[chp] & PE_PATH_GONE) { 3596 oldopm = device->path_data.opm; 3597 device->path_data.opm &= ~eventlpm; 3598 device->path_data.ppm &= ~eventlpm; 3599 device->path_data.npm &= ~eventlpm; 3600 if (oldopm && !device->path_data.opm) { 3601 dev_warn(&device->cdev->dev, 3602 "No verified channel paths remain " 3603 "for the device\n"); 3604 DBF_DEV_EVENT(DBF_WARNING, device, 3605 "%s", "last verified path gone"); 3606 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3607 dasd_device_set_stop_bits(device, 3608 DASD_STOPPED_DC_WAIT); 3609 } 3610 } 3611 if (path_event[chp] & PE_PATH_AVAILABLE) { 3612 device->path_data.opm &= ~eventlpm; 3613 device->path_data.ppm &= ~eventlpm; 3614 device->path_data.npm &= ~eventlpm; 3615 device->path_data.tbvpm |= eventlpm; 3616 dasd_schedule_device_bh(device); 3617 } 3618 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3619 if (!(device->path_data.opm & eventlpm) && 3620 !(device->path_data.tbvpm & eventlpm)) { 3621 /* 3622 * we can not establish a pathgroup on an 3623 * unavailable path, so trigger a path 3624 * verification first 3625 */ 3626 device->path_data.tbvpm |= eventlpm; 3627 dasd_schedule_device_bh(device); 3628 } 3629 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3630 "Pathgroup re-established\n"); 3631 if (device->discipline->kick_validate) 3632 device->discipline->kick_validate(device); 3633 } 3634 } 3635 dasd_put_device(device); 3636 } 3637 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3638 3639 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3640 { 3641 if (!device->path_data.opm && lpm) { 3642 device->path_data.opm = lpm; 3643 dasd_generic_path_operational(device); 3644 } else 3645 device->path_data.opm |= lpm; 3646 return 0; 3647 } 3648 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3649 3650 3651 int dasd_generic_pm_freeze(struct ccw_device *cdev) 3652 { 3653 struct dasd_device *device = dasd_device_from_cdev(cdev); 3654 struct list_head freeze_queue; 3655 struct dasd_ccw_req *cqr, *n; 3656 struct dasd_ccw_req *refers; 3657 int rc; 3658 3659 if (IS_ERR(device)) 3660 return PTR_ERR(device); 3661 3662 /* mark device as suspended */ 3663 set_bit(DASD_FLAG_SUSPENDED, &device->flags); 3664 3665 if (device->discipline->freeze) 3666 rc = device->discipline->freeze(device); 3667 3668 /* disallow new I/O */ 3669 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 3670 3671 /* clear active requests and requeue them to block layer if possible */ 3672 INIT_LIST_HEAD(&freeze_queue); 3673 spin_lock_irq(get_ccwdev_lock(cdev)); 3674 rc = 0; 3675 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3676 /* Check status and move request to flush_queue */ 3677 if (cqr->status == DASD_CQR_IN_IO) { 3678 rc = device->discipline->term_IO(cqr); 3679 if (rc) { 3680 /* unable to terminate requeust */ 3681 dev_err(&device->cdev->dev, 3682 "Unable to terminate request %p " 3683 "on suspend\n", cqr); 3684 spin_unlock_irq(get_ccwdev_lock(cdev)); 3685 dasd_put_device(device); 3686 return rc; 3687 } 3688 } 3689 list_move_tail(&cqr->devlist, &freeze_queue); 3690 } 3691 spin_unlock_irq(get_ccwdev_lock(cdev)); 3692 3693 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 3694 wait_event(dasd_flush_wq, 3695 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3696 if (cqr->status == DASD_CQR_CLEARED) 3697 cqr->status = DASD_CQR_QUEUED; 3698 3699 /* requeue requests to blocklayer will only work for 3700 block device requests */ 3701 if (_dasd_requeue_request(cqr)) 3702 continue; 3703 3704 /* remove requests from device and block queue */ 3705 list_del_init(&cqr->devlist); 3706 while (cqr->refers != NULL) { 3707 refers = cqr->refers; 3708 /* remove the request from the block queue */ 3709 list_del(&cqr->blocklist); 3710 /* free the finished erp request */ 3711 dasd_free_erp_request(cqr, cqr->memdev); 3712 cqr = refers; 3713 } 3714 if (cqr->block) 3715 list_del_init(&cqr->blocklist); 3716 cqr->block->base->discipline->free_cp( 3717 cqr, (struct request *) cqr->callback_data); 3718 } 3719 3720 /* 3721 * if requests remain then they are internal request 3722 * and go back to the device queue 3723 */ 3724 if (!list_empty(&freeze_queue)) { 3725 /* move freeze_queue to start of the ccw_queue */ 3726 spin_lock_irq(get_ccwdev_lock(cdev)); 3727 list_splice_tail(&freeze_queue, &device->ccw_queue); 3728 spin_unlock_irq(get_ccwdev_lock(cdev)); 3729 } 3730 dasd_put_device(device); 3731 return rc; 3732 } 3733 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 3734 3735 int dasd_generic_restore_device(struct ccw_device *cdev) 3736 { 3737 struct dasd_device *device = dasd_device_from_cdev(cdev); 3738 int rc = 0; 3739 3740 if (IS_ERR(device)) 3741 return PTR_ERR(device); 3742 3743 /* allow new IO again */ 3744 dasd_device_remove_stop_bits(device, 3745 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 3746 3747 dasd_schedule_device_bh(device); 3748 3749 /* 3750 * call discipline restore function 3751 * if device is stopped do nothing e.g. for disconnected devices 3752 */ 3753 if (device->discipline->restore && !(device->stopped)) 3754 rc = device->discipline->restore(device); 3755 if (rc || device->stopped) 3756 /* 3757 * if the resume failed for the DASD we put it in 3758 * an UNRESUMED stop state 3759 */ 3760 device->stopped |= DASD_UNRESUMED_PM; 3761 3762 if (device->block) 3763 dasd_schedule_block_bh(device->block); 3764 3765 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 3766 dasd_put_device(device); 3767 return 0; 3768 } 3769 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 3770 3771 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 3772 void *rdc_buffer, 3773 int rdc_buffer_size, 3774 int magic) 3775 { 3776 struct dasd_ccw_req *cqr; 3777 struct ccw1 *ccw; 3778 unsigned long *idaw; 3779 3780 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 3781 3782 if (IS_ERR(cqr)) { 3783 /* internal error 13 - Allocating the RDC request failed*/ 3784 dev_err(&device->cdev->dev, 3785 "An error occurred in the DASD device driver, " 3786 "reason=%s\n", "13"); 3787 return cqr; 3788 } 3789 3790 ccw = cqr->cpaddr; 3791 ccw->cmd_code = CCW_CMD_RDC; 3792 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { 3793 idaw = (unsigned long *) (cqr->data); 3794 ccw->cda = (__u32)(addr_t) idaw; 3795 ccw->flags = CCW_FLAG_IDA; 3796 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); 3797 } else { 3798 ccw->cda = (__u32)(addr_t) rdc_buffer; 3799 ccw->flags = 0; 3800 } 3801 3802 ccw->count = rdc_buffer_size; 3803 cqr->startdev = device; 3804 cqr->memdev = device; 3805 cqr->expires = 10*HZ; 3806 cqr->retries = 256; 3807 cqr->buildclk = get_tod_clock(); 3808 cqr->status = DASD_CQR_FILLED; 3809 return cqr; 3810 } 3811 3812 3813 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 3814 void *rdc_buffer, int rdc_buffer_size) 3815 { 3816 int ret; 3817 struct dasd_ccw_req *cqr; 3818 3819 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 3820 magic); 3821 if (IS_ERR(cqr)) 3822 return PTR_ERR(cqr); 3823 3824 ret = dasd_sleep_on(cqr); 3825 dasd_sfree_request(cqr, cqr->memdev); 3826 return ret; 3827 } 3828 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 3829 3830 /* 3831 * In command mode and transport mode we need to look for sense 3832 * data in different places. The sense data itself is allways 3833 * an array of 32 bytes, so we can unify the sense data access 3834 * for both modes. 3835 */ 3836 char *dasd_get_sense(struct irb *irb) 3837 { 3838 struct tsb *tsb = NULL; 3839 char *sense = NULL; 3840 3841 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 3842 if (irb->scsw.tm.tcw) 3843 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 3844 irb->scsw.tm.tcw); 3845 if (tsb && tsb->length == 64 && tsb->flags) 3846 switch (tsb->flags & 0x07) { 3847 case 1: /* tsa_iostat */ 3848 sense = tsb->tsa.iostat.sense; 3849 break; 3850 case 2: /* tsa_ddpc */ 3851 sense = tsb->tsa.ddpc.sense; 3852 break; 3853 default: 3854 /* currently we don't use interrogate data */ 3855 break; 3856 } 3857 } else if (irb->esw.esw0.erw.cons) { 3858 sense = irb->ecw; 3859 } 3860 return sense; 3861 } 3862 EXPORT_SYMBOL_GPL(dasd_get_sense); 3863 3864 void dasd_generic_shutdown(struct ccw_device *cdev) 3865 { 3866 struct dasd_device *device; 3867 3868 device = dasd_device_from_cdev(cdev); 3869 if (IS_ERR(device)) 3870 return; 3871 3872 if (device->block) 3873 dasd_schedule_block_bh(device->block); 3874 3875 dasd_schedule_device_bh(device); 3876 3877 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 3878 } 3879 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 3880 3881 static int __init dasd_init(void) 3882 { 3883 int rc; 3884 3885 init_waitqueue_head(&dasd_init_waitq); 3886 init_waitqueue_head(&dasd_flush_wq); 3887 init_waitqueue_head(&generic_waitq); 3888 init_waitqueue_head(&shutdown_waitq); 3889 3890 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 3891 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 3892 if (dasd_debug_area == NULL) { 3893 rc = -ENOMEM; 3894 goto failed; 3895 } 3896 debug_register_view(dasd_debug_area, &debug_sprintf_view); 3897 debug_set_level(dasd_debug_area, DBF_WARNING); 3898 3899 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 3900 3901 dasd_diag_discipline_pointer = NULL; 3902 3903 dasd_statistics_createroot(); 3904 3905 rc = dasd_devmap_init(); 3906 if (rc) 3907 goto failed; 3908 rc = dasd_gendisk_init(); 3909 if (rc) 3910 goto failed; 3911 rc = dasd_parse(); 3912 if (rc) 3913 goto failed; 3914 rc = dasd_eer_init(); 3915 if (rc) 3916 goto failed; 3917 #ifdef CONFIG_PROC_FS 3918 rc = dasd_proc_init(); 3919 if (rc) 3920 goto failed; 3921 #endif 3922 3923 return 0; 3924 failed: 3925 pr_info("The DASD device driver could not be initialized\n"); 3926 dasd_exit(); 3927 return rc; 3928 } 3929 3930 module_init(dasd_init); 3931 module_exit(dasd_exit); 3932