1 /* 2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Carsten Otte <Cotte@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Copyright IBM Corp. 1999, 2009 8 */ 9 10 #define KMSG_COMPONENT "dasd" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/kmod.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/ctype.h> 17 #include <linux/major.h> 18 #include <linux/slab.h> 19 #include <linux/hdreg.h> 20 #include <linux/async.h> 21 #include <linux/mutex.h> 22 #include <linux/debugfs.h> 23 #include <linux/seq_file.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> 28 #include <asm/idals.h> 29 #include <asm/itcw.h> 30 #include <asm/diag.h> 31 32 /* This is ugly... */ 33 #define PRINTK_HEADER "dasd:" 34 35 #include "dasd_int.h" 36 /* 37 * SECTION: Constant definitions to be used within this file 38 */ 39 #define DASD_CHANQ_MAX_SIZE 4 40 41 /* 42 * SECTION: exported variables of dasd.c 43 */ 44 debug_info_t *dasd_debug_area; 45 static struct dentry *dasd_debugfs_root_entry; 46 struct dasd_discipline *dasd_diag_discipline_pointer; 47 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 48 49 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 50 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 51 " Copyright IBM Corp. 2000"); 52 MODULE_SUPPORTED_DEVICE("dasd"); 53 MODULE_LICENSE("GPL"); 54 55 /* 56 * SECTION: prototypes for static functions of dasd.c 57 */ 58 static int dasd_alloc_queue(struct dasd_block *); 59 static void dasd_setup_queue(struct dasd_block *); 60 static void dasd_free_queue(struct dasd_block *); 61 static void dasd_flush_request_queue(struct dasd_block *); 62 static int dasd_flush_block_queue(struct dasd_block *); 63 static void dasd_device_tasklet(struct dasd_device *); 64 static void dasd_block_tasklet(struct dasd_block *); 65 static void do_kick_device(struct work_struct *); 66 static void do_restore_device(struct work_struct *); 67 static void do_reload_device(struct work_struct *); 68 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 69 static void dasd_device_timeout(unsigned long); 70 static void dasd_block_timeout(unsigned long); 71 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 72 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 73 static void dasd_profile_exit(struct dasd_profile *); 74 75 /* 76 * SECTION: Operations on the device structure. 77 */ 78 static wait_queue_head_t dasd_init_waitq; 79 static wait_queue_head_t dasd_flush_wq; 80 static wait_queue_head_t generic_waitq; 81 static wait_queue_head_t shutdown_waitq; 82 83 /* 84 * Allocate memory for a new device structure. 85 */ 86 struct dasd_device *dasd_alloc_device(void) 87 { 88 struct dasd_device *device; 89 90 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 91 if (!device) 92 return ERR_PTR(-ENOMEM); 93 94 /* Get two pages for normal block device operations. */ 95 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 96 if (!device->ccw_mem) { 97 kfree(device); 98 return ERR_PTR(-ENOMEM); 99 } 100 /* Get one page for error recovery. */ 101 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 102 if (!device->erp_mem) { 103 free_pages((unsigned long) device->ccw_mem, 1); 104 kfree(device); 105 return ERR_PTR(-ENOMEM); 106 } 107 108 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 109 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 110 spin_lock_init(&device->mem_lock); 111 atomic_set(&device->tasklet_scheduled, 0); 112 tasklet_init(&device->tasklet, 113 (void (*)(unsigned long)) dasd_device_tasklet, 114 (unsigned long) device); 115 INIT_LIST_HEAD(&device->ccw_queue); 116 init_timer(&device->timer); 117 device->timer.function = dasd_device_timeout; 118 device->timer.data = (unsigned long) device; 119 INIT_WORK(&device->kick_work, do_kick_device); 120 INIT_WORK(&device->restore_device, do_restore_device); 121 INIT_WORK(&device->reload_device, do_reload_device); 122 device->state = DASD_STATE_NEW; 123 device->target = DASD_STATE_NEW; 124 mutex_init(&device->state_mutex); 125 spin_lock_init(&device->profile.lock); 126 return device; 127 } 128 129 /* 130 * Free memory of a device structure. 131 */ 132 void dasd_free_device(struct dasd_device *device) 133 { 134 kfree(device->private); 135 free_page((unsigned long) device->erp_mem); 136 free_pages((unsigned long) device->ccw_mem, 1); 137 kfree(device); 138 } 139 140 /* 141 * Allocate memory for a new device structure. 142 */ 143 struct dasd_block *dasd_alloc_block(void) 144 { 145 struct dasd_block *block; 146 147 block = kzalloc(sizeof(*block), GFP_ATOMIC); 148 if (!block) 149 return ERR_PTR(-ENOMEM); 150 /* open_count = 0 means device online but not in use */ 151 atomic_set(&block->open_count, -1); 152 153 spin_lock_init(&block->request_queue_lock); 154 atomic_set(&block->tasklet_scheduled, 0); 155 tasklet_init(&block->tasklet, 156 (void (*)(unsigned long)) dasd_block_tasklet, 157 (unsigned long) block); 158 INIT_LIST_HEAD(&block->ccw_queue); 159 spin_lock_init(&block->queue_lock); 160 init_timer(&block->timer); 161 block->timer.function = dasd_block_timeout; 162 block->timer.data = (unsigned long) block; 163 spin_lock_init(&block->profile.lock); 164 165 return block; 166 } 167 168 /* 169 * Free memory of a device structure. 170 */ 171 void dasd_free_block(struct dasd_block *block) 172 { 173 kfree(block); 174 } 175 176 /* 177 * Make a new device known to the system. 178 */ 179 static int dasd_state_new_to_known(struct dasd_device *device) 180 { 181 int rc; 182 183 /* 184 * As long as the device is not in state DASD_STATE_NEW we want to 185 * keep the reference count > 0. 186 */ 187 dasd_get_device(device); 188 189 if (device->block) { 190 rc = dasd_alloc_queue(device->block); 191 if (rc) { 192 dasd_put_device(device); 193 return rc; 194 } 195 } 196 device->state = DASD_STATE_KNOWN; 197 return 0; 198 } 199 200 /* 201 * Let the system forget about a device. 202 */ 203 static int dasd_state_known_to_new(struct dasd_device *device) 204 { 205 /* Disable extended error reporting for this device. */ 206 dasd_eer_disable(device); 207 /* Forget the discipline information. */ 208 if (device->discipline) { 209 if (device->discipline->uncheck_device) 210 device->discipline->uncheck_device(device); 211 module_put(device->discipline->owner); 212 } 213 device->discipline = NULL; 214 if (device->base_discipline) 215 module_put(device->base_discipline->owner); 216 device->base_discipline = NULL; 217 device->state = DASD_STATE_NEW; 218 219 if (device->block) 220 dasd_free_queue(device->block); 221 222 /* Give up reference we took in dasd_state_new_to_known. */ 223 dasd_put_device(device); 224 return 0; 225 } 226 227 static struct dentry *dasd_debugfs_setup(const char *name, 228 struct dentry *base_dentry) 229 { 230 struct dentry *pde; 231 232 if (!base_dentry) 233 return NULL; 234 pde = debugfs_create_dir(name, base_dentry); 235 if (!pde || IS_ERR(pde)) 236 return NULL; 237 return pde; 238 } 239 240 /* 241 * Request the irq line for the device. 242 */ 243 static int dasd_state_known_to_basic(struct dasd_device *device) 244 { 245 struct dasd_block *block = device->block; 246 int rc = 0; 247 248 /* Allocate and register gendisk structure. */ 249 if (block) { 250 rc = dasd_gendisk_alloc(block); 251 if (rc) 252 return rc; 253 block->debugfs_dentry = 254 dasd_debugfs_setup(block->gdp->disk_name, 255 dasd_debugfs_root_entry); 256 dasd_profile_init(&block->profile, block->debugfs_dentry); 257 if (dasd_global_profile_level == DASD_PROFILE_ON) 258 dasd_profile_on(&device->block->profile); 259 } 260 device->debugfs_dentry = 261 dasd_debugfs_setup(dev_name(&device->cdev->dev), 262 dasd_debugfs_root_entry); 263 dasd_profile_init(&device->profile, device->debugfs_dentry); 264 265 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 266 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 267 8 * sizeof(long)); 268 debug_register_view(device->debug_area, &debug_sprintf_view); 269 debug_set_level(device->debug_area, DBF_WARNING); 270 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 271 272 device->state = DASD_STATE_BASIC; 273 274 return rc; 275 } 276 277 /* 278 * Release the irq line for the device. Terminate any running i/o. 279 */ 280 static int dasd_state_basic_to_known(struct dasd_device *device) 281 { 282 int rc; 283 284 if (device->block) { 285 dasd_profile_exit(&device->block->profile); 286 if (device->block->debugfs_dentry) 287 debugfs_remove(device->block->debugfs_dentry); 288 dasd_gendisk_free(device->block); 289 dasd_block_clear_timer(device->block); 290 } 291 rc = dasd_flush_device_queue(device); 292 if (rc) 293 return rc; 294 dasd_device_clear_timer(device); 295 dasd_profile_exit(&device->profile); 296 if (device->debugfs_dentry) 297 debugfs_remove(device->debugfs_dentry); 298 299 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 300 if (device->debug_area != NULL) { 301 debug_unregister(device->debug_area); 302 device->debug_area = NULL; 303 } 304 device->state = DASD_STATE_KNOWN; 305 return 0; 306 } 307 308 /* 309 * Do the initial analysis. The do_analysis function may return 310 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 311 * until the discipline decides to continue the startup sequence 312 * by calling the function dasd_change_state. The eckd disciplines 313 * uses this to start a ccw that detects the format. The completion 314 * interrupt for this detection ccw uses the kernel event daemon to 315 * trigger the call to dasd_change_state. All this is done in the 316 * discipline code, see dasd_eckd.c. 317 * After the analysis ccw is done (do_analysis returned 0) the block 318 * device is setup. 319 * In case the analysis returns an error, the device setup is stopped 320 * (a fake disk was already added to allow formatting). 321 */ 322 static int dasd_state_basic_to_ready(struct dasd_device *device) 323 { 324 int rc; 325 struct dasd_block *block; 326 327 rc = 0; 328 block = device->block; 329 /* make disk known with correct capacity */ 330 if (block) { 331 if (block->base->discipline->do_analysis != NULL) 332 rc = block->base->discipline->do_analysis(block); 333 if (rc) { 334 if (rc != -EAGAIN) { 335 device->state = DASD_STATE_UNFMT; 336 goto out; 337 } 338 return rc; 339 } 340 dasd_setup_queue(block); 341 set_capacity(block->gdp, 342 block->blocks << block->s2b_shift); 343 device->state = DASD_STATE_READY; 344 rc = dasd_scan_partitions(block); 345 if (rc) { 346 device->state = DASD_STATE_BASIC; 347 return rc; 348 } 349 } else { 350 device->state = DASD_STATE_READY; 351 } 352 out: 353 if (device->discipline->basic_to_ready) 354 rc = device->discipline->basic_to_ready(device); 355 return rc; 356 } 357 358 static inline 359 int _wait_for_empty_queues(struct dasd_device *device) 360 { 361 if (device->block) 362 return list_empty(&device->ccw_queue) && 363 list_empty(&device->block->ccw_queue); 364 else 365 return list_empty(&device->ccw_queue); 366 } 367 368 /* 369 * Remove device from block device layer. Destroy dirty buffers. 370 * Forget format information. Check if the target level is basic 371 * and if it is create fake disk for formatting. 372 */ 373 static int dasd_state_ready_to_basic(struct dasd_device *device) 374 { 375 int rc; 376 377 if (device->discipline->ready_to_basic) { 378 rc = device->discipline->ready_to_basic(device); 379 if (rc) 380 return rc; 381 } 382 device->state = DASD_STATE_BASIC; 383 if (device->block) { 384 struct dasd_block *block = device->block; 385 rc = dasd_flush_block_queue(block); 386 if (rc) { 387 device->state = DASD_STATE_READY; 388 return rc; 389 } 390 dasd_flush_request_queue(block); 391 dasd_destroy_partitions(block); 392 block->blocks = 0; 393 block->bp_block = 0; 394 block->s2b_shift = 0; 395 } 396 return 0; 397 } 398 399 /* 400 * Back to basic. 401 */ 402 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 403 { 404 device->state = DASD_STATE_BASIC; 405 return 0; 406 } 407 408 /* 409 * Make the device online and schedule the bottom half to start 410 * the requeueing of requests from the linux request queue to the 411 * ccw queue. 412 */ 413 static int 414 dasd_state_ready_to_online(struct dasd_device * device) 415 { 416 struct gendisk *disk; 417 struct disk_part_iter piter; 418 struct hd_struct *part; 419 420 device->state = DASD_STATE_ONLINE; 421 if (device->block) { 422 dasd_schedule_block_bh(device->block); 423 if ((device->features & DASD_FEATURE_USERAW)) { 424 disk = device->block->gdp; 425 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 426 return 0; 427 } 428 disk = device->block->bdev->bd_disk; 429 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 430 while ((part = disk_part_iter_next(&piter))) 431 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 432 disk_part_iter_exit(&piter); 433 } 434 return 0; 435 } 436 437 /* 438 * Stop the requeueing of requests again. 439 */ 440 static int dasd_state_online_to_ready(struct dasd_device *device) 441 { 442 int rc; 443 struct gendisk *disk; 444 struct disk_part_iter piter; 445 struct hd_struct *part; 446 447 if (device->discipline->online_to_ready) { 448 rc = device->discipline->online_to_ready(device); 449 if (rc) 450 return rc; 451 } 452 453 device->state = DASD_STATE_READY; 454 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 455 disk = device->block->bdev->bd_disk; 456 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 457 while ((part = disk_part_iter_next(&piter))) 458 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 459 disk_part_iter_exit(&piter); 460 } 461 return 0; 462 } 463 464 /* 465 * Device startup state changes. 466 */ 467 static int dasd_increase_state(struct dasd_device *device) 468 { 469 int rc; 470 471 rc = 0; 472 if (device->state == DASD_STATE_NEW && 473 device->target >= DASD_STATE_KNOWN) 474 rc = dasd_state_new_to_known(device); 475 476 if (!rc && 477 device->state == DASD_STATE_KNOWN && 478 device->target >= DASD_STATE_BASIC) 479 rc = dasd_state_known_to_basic(device); 480 481 if (!rc && 482 device->state == DASD_STATE_BASIC && 483 device->target >= DASD_STATE_READY) 484 rc = dasd_state_basic_to_ready(device); 485 486 if (!rc && 487 device->state == DASD_STATE_UNFMT && 488 device->target > DASD_STATE_UNFMT) 489 rc = -EPERM; 490 491 if (!rc && 492 device->state == DASD_STATE_READY && 493 device->target >= DASD_STATE_ONLINE) 494 rc = dasd_state_ready_to_online(device); 495 496 return rc; 497 } 498 499 /* 500 * Device shutdown state changes. 501 */ 502 static int dasd_decrease_state(struct dasd_device *device) 503 { 504 int rc; 505 506 rc = 0; 507 if (device->state == DASD_STATE_ONLINE && 508 device->target <= DASD_STATE_READY) 509 rc = dasd_state_online_to_ready(device); 510 511 if (!rc && 512 device->state == DASD_STATE_READY && 513 device->target <= DASD_STATE_BASIC) 514 rc = dasd_state_ready_to_basic(device); 515 516 if (!rc && 517 device->state == DASD_STATE_UNFMT && 518 device->target <= DASD_STATE_BASIC) 519 rc = dasd_state_unfmt_to_basic(device); 520 521 if (!rc && 522 device->state == DASD_STATE_BASIC && 523 device->target <= DASD_STATE_KNOWN) 524 rc = dasd_state_basic_to_known(device); 525 526 if (!rc && 527 device->state == DASD_STATE_KNOWN && 528 device->target <= DASD_STATE_NEW) 529 rc = dasd_state_known_to_new(device); 530 531 return rc; 532 } 533 534 /* 535 * This is the main startup/shutdown routine. 536 */ 537 static void dasd_change_state(struct dasd_device *device) 538 { 539 int rc; 540 541 if (device->state == device->target) 542 /* Already where we want to go today... */ 543 return; 544 if (device->state < device->target) 545 rc = dasd_increase_state(device); 546 else 547 rc = dasd_decrease_state(device); 548 if (rc == -EAGAIN) 549 return; 550 if (rc) 551 device->target = device->state; 552 553 /* let user-space know that the device status changed */ 554 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 555 556 if (device->state == device->target) 557 wake_up(&dasd_init_waitq); 558 } 559 560 /* 561 * Kick starter for devices that did not complete the startup/shutdown 562 * procedure or were sleeping because of a pending state. 563 * dasd_kick_device will schedule a call do do_kick_device to the kernel 564 * event daemon. 565 */ 566 static void do_kick_device(struct work_struct *work) 567 { 568 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 569 mutex_lock(&device->state_mutex); 570 dasd_change_state(device); 571 mutex_unlock(&device->state_mutex); 572 dasd_schedule_device_bh(device); 573 dasd_put_device(device); 574 } 575 576 void dasd_kick_device(struct dasd_device *device) 577 { 578 dasd_get_device(device); 579 /* queue call to dasd_kick_device to the kernel event daemon. */ 580 schedule_work(&device->kick_work); 581 } 582 583 /* 584 * dasd_reload_device will schedule a call do do_reload_device to the kernel 585 * event daemon. 586 */ 587 static void do_reload_device(struct work_struct *work) 588 { 589 struct dasd_device *device = container_of(work, struct dasd_device, 590 reload_device); 591 device->discipline->reload(device); 592 dasd_put_device(device); 593 } 594 595 void dasd_reload_device(struct dasd_device *device) 596 { 597 dasd_get_device(device); 598 /* queue call to dasd_reload_device to the kernel event daemon. */ 599 schedule_work(&device->reload_device); 600 } 601 EXPORT_SYMBOL(dasd_reload_device); 602 603 /* 604 * dasd_restore_device will schedule a call do do_restore_device to the kernel 605 * event daemon. 606 */ 607 static void do_restore_device(struct work_struct *work) 608 { 609 struct dasd_device *device = container_of(work, struct dasd_device, 610 restore_device); 611 device->cdev->drv->restore(device->cdev); 612 dasd_put_device(device); 613 } 614 615 void dasd_restore_device(struct dasd_device *device) 616 { 617 dasd_get_device(device); 618 /* queue call to dasd_restore_device to the kernel event daemon. */ 619 schedule_work(&device->restore_device); 620 } 621 622 /* 623 * Set the target state for a device and starts the state change. 624 */ 625 void dasd_set_target_state(struct dasd_device *device, int target) 626 { 627 dasd_get_device(device); 628 mutex_lock(&device->state_mutex); 629 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 630 if (dasd_probeonly && target > DASD_STATE_READY) 631 target = DASD_STATE_READY; 632 if (device->target != target) { 633 if (device->state == target) 634 wake_up(&dasd_init_waitq); 635 device->target = target; 636 } 637 if (device->state != device->target) 638 dasd_change_state(device); 639 mutex_unlock(&device->state_mutex); 640 dasd_put_device(device); 641 } 642 643 /* 644 * Enable devices with device numbers in [from..to]. 645 */ 646 static inline int _wait_for_device(struct dasd_device *device) 647 { 648 return (device->state == device->target); 649 } 650 651 void dasd_enable_device(struct dasd_device *device) 652 { 653 dasd_set_target_state(device, DASD_STATE_ONLINE); 654 if (device->state <= DASD_STATE_KNOWN) 655 /* No discipline for device found. */ 656 dasd_set_target_state(device, DASD_STATE_NEW); 657 /* Now wait for the devices to come up. */ 658 wait_event(dasd_init_waitq, _wait_for_device(device)); 659 660 dasd_reload_device(device); 661 if (device->discipline->kick_validate) 662 device->discipline->kick_validate(device); 663 } 664 665 /* 666 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 667 */ 668 669 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 670 671 #ifdef CONFIG_DASD_PROFILE 672 struct dasd_profile_info dasd_global_profile_data; 673 static struct dentry *dasd_global_profile_dentry; 674 static struct dentry *dasd_debugfs_global_entry; 675 676 /* 677 * Add profiling information for cqr before execution. 678 */ 679 static void dasd_profile_start(struct dasd_block *block, 680 struct dasd_ccw_req *cqr, 681 struct request *req) 682 { 683 struct list_head *l; 684 unsigned int counter; 685 struct dasd_device *device; 686 687 /* count the length of the chanq for statistics */ 688 counter = 0; 689 if (dasd_global_profile_level || block->profile.data) 690 list_for_each(l, &block->ccw_queue) 691 if (++counter >= 31) 692 break; 693 694 if (dasd_global_profile_level) { 695 dasd_global_profile_data.dasd_io_nr_req[counter]++; 696 if (rq_data_dir(req) == READ) 697 dasd_global_profile_data.dasd_read_nr_req[counter]++; 698 } 699 700 spin_lock(&block->profile.lock); 701 if (block->profile.data) { 702 block->profile.data->dasd_io_nr_req[counter]++; 703 if (rq_data_dir(req) == READ) 704 block->profile.data->dasd_read_nr_req[counter]++; 705 } 706 spin_unlock(&block->profile.lock); 707 708 /* 709 * We count the request for the start device, even though it may run on 710 * some other device due to error recovery. This way we make sure that 711 * we count each request only once. 712 */ 713 device = cqr->startdev; 714 if (device->profile.data) { 715 counter = 1; /* request is not yet queued on the start device */ 716 list_for_each(l, &device->ccw_queue) 717 if (++counter >= 31) 718 break; 719 } 720 spin_lock(&device->profile.lock); 721 if (device->profile.data) { 722 device->profile.data->dasd_io_nr_req[counter]++; 723 if (rq_data_dir(req) == READ) 724 device->profile.data->dasd_read_nr_req[counter]++; 725 } 726 spin_unlock(&device->profile.lock); 727 } 728 729 /* 730 * Add profiling information for cqr after execution. 731 */ 732 733 #define dasd_profile_counter(value, index) \ 734 { \ 735 for (index = 0; index < 31 && value >> (2+index); index++) \ 736 ; \ 737 } 738 739 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 740 int is_alias, 741 int is_tpm, 742 int is_read, 743 long sectors, 744 int sectors_ind, 745 int tottime_ind, 746 int tottimeps_ind, 747 int strtime_ind, 748 int irqtime_ind, 749 int irqtimeps_ind, 750 int endtime_ind) 751 { 752 /* in case of an overflow, reset the whole profile */ 753 if (data->dasd_io_reqs == UINT_MAX) { 754 memset(data, 0, sizeof(*data)); 755 getnstimeofday(&data->starttod); 756 } 757 data->dasd_io_reqs++; 758 data->dasd_io_sects += sectors; 759 if (is_alias) 760 data->dasd_io_alias++; 761 if (is_tpm) 762 data->dasd_io_tpm++; 763 764 data->dasd_io_secs[sectors_ind]++; 765 data->dasd_io_times[tottime_ind]++; 766 data->dasd_io_timps[tottimeps_ind]++; 767 data->dasd_io_time1[strtime_ind]++; 768 data->dasd_io_time2[irqtime_ind]++; 769 data->dasd_io_time2ps[irqtimeps_ind]++; 770 data->dasd_io_time3[endtime_ind]++; 771 772 if (is_read) { 773 data->dasd_read_reqs++; 774 data->dasd_read_sects += sectors; 775 if (is_alias) 776 data->dasd_read_alias++; 777 if (is_tpm) 778 data->dasd_read_tpm++; 779 data->dasd_read_secs[sectors_ind]++; 780 data->dasd_read_times[tottime_ind]++; 781 data->dasd_read_time1[strtime_ind]++; 782 data->dasd_read_time2[irqtime_ind]++; 783 data->dasd_read_time3[endtime_ind]++; 784 } 785 } 786 787 static void dasd_profile_end(struct dasd_block *block, 788 struct dasd_ccw_req *cqr, 789 struct request *req) 790 { 791 long strtime, irqtime, endtime, tottime; /* in microseconds */ 792 long tottimeps, sectors; 793 struct dasd_device *device; 794 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 795 int irqtime_ind, irqtimeps_ind, endtime_ind; 796 797 device = cqr->startdev; 798 if (!(dasd_global_profile_level || 799 block->profile.data || 800 device->profile.data)) 801 return; 802 803 sectors = blk_rq_sectors(req); 804 if (!cqr->buildclk || !cqr->startclk || 805 !cqr->stopclk || !cqr->endclk || 806 !sectors) 807 return; 808 809 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 810 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 811 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 812 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 813 tottimeps = tottime / sectors; 814 815 dasd_profile_counter(sectors, sectors_ind); 816 dasd_profile_counter(tottime, tottime_ind); 817 dasd_profile_counter(tottimeps, tottimeps_ind); 818 dasd_profile_counter(strtime, strtime_ind); 819 dasd_profile_counter(irqtime, irqtime_ind); 820 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 821 dasd_profile_counter(endtime, endtime_ind); 822 823 if (dasd_global_profile_level) { 824 dasd_profile_end_add_data(&dasd_global_profile_data, 825 cqr->startdev != block->base, 826 cqr->cpmode == 1, 827 rq_data_dir(req) == READ, 828 sectors, sectors_ind, tottime_ind, 829 tottimeps_ind, strtime_ind, 830 irqtime_ind, irqtimeps_ind, 831 endtime_ind); 832 } 833 834 spin_lock(&block->profile.lock); 835 if (block->profile.data) 836 dasd_profile_end_add_data(block->profile.data, 837 cqr->startdev != block->base, 838 cqr->cpmode == 1, 839 rq_data_dir(req) == READ, 840 sectors, sectors_ind, tottime_ind, 841 tottimeps_ind, strtime_ind, 842 irqtime_ind, irqtimeps_ind, 843 endtime_ind); 844 spin_unlock(&block->profile.lock); 845 846 spin_lock(&device->profile.lock); 847 if (device->profile.data) 848 dasd_profile_end_add_data(device->profile.data, 849 cqr->startdev != block->base, 850 cqr->cpmode == 1, 851 rq_data_dir(req) == READ, 852 sectors, sectors_ind, tottime_ind, 853 tottimeps_ind, strtime_ind, 854 irqtime_ind, irqtimeps_ind, 855 endtime_ind); 856 spin_unlock(&device->profile.lock); 857 } 858 859 void dasd_profile_reset(struct dasd_profile *profile) 860 { 861 struct dasd_profile_info *data; 862 863 spin_lock_bh(&profile->lock); 864 data = profile->data; 865 if (!data) { 866 spin_unlock_bh(&profile->lock); 867 return; 868 } 869 memset(data, 0, sizeof(*data)); 870 getnstimeofday(&data->starttod); 871 spin_unlock_bh(&profile->lock); 872 } 873 874 void dasd_global_profile_reset(void) 875 { 876 memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data)); 877 getnstimeofday(&dasd_global_profile_data.starttod); 878 } 879 880 int dasd_profile_on(struct dasd_profile *profile) 881 { 882 struct dasd_profile_info *data; 883 884 data = kzalloc(sizeof(*data), GFP_KERNEL); 885 if (!data) 886 return -ENOMEM; 887 spin_lock_bh(&profile->lock); 888 if (profile->data) { 889 spin_unlock_bh(&profile->lock); 890 kfree(data); 891 return 0; 892 } 893 getnstimeofday(&data->starttod); 894 profile->data = data; 895 spin_unlock_bh(&profile->lock); 896 return 0; 897 } 898 899 void dasd_profile_off(struct dasd_profile *profile) 900 { 901 spin_lock_bh(&profile->lock); 902 kfree(profile->data); 903 profile->data = NULL; 904 spin_unlock_bh(&profile->lock); 905 } 906 907 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 908 { 909 char *buffer; 910 911 buffer = vmalloc(user_len + 1); 912 if (buffer == NULL) 913 return ERR_PTR(-ENOMEM); 914 if (copy_from_user(buffer, user_buf, user_len) != 0) { 915 vfree(buffer); 916 return ERR_PTR(-EFAULT); 917 } 918 /* got the string, now strip linefeed. */ 919 if (buffer[user_len - 1] == '\n') 920 buffer[user_len - 1] = 0; 921 else 922 buffer[user_len] = 0; 923 return buffer; 924 } 925 926 static ssize_t dasd_stats_write(struct file *file, 927 const char __user *user_buf, 928 size_t user_len, loff_t *pos) 929 { 930 char *buffer, *str; 931 int rc; 932 struct seq_file *m = (struct seq_file *)file->private_data; 933 struct dasd_profile *prof = m->private; 934 935 if (user_len > 65536) 936 user_len = 65536; 937 buffer = dasd_get_user_string(user_buf, user_len); 938 if (IS_ERR(buffer)) 939 return PTR_ERR(buffer); 940 941 str = skip_spaces(buffer); 942 rc = user_len; 943 if (strncmp(str, "reset", 5) == 0) { 944 dasd_profile_reset(prof); 945 } else if (strncmp(str, "on", 2) == 0) { 946 rc = dasd_profile_on(prof); 947 if (!rc) 948 rc = user_len; 949 } else if (strncmp(str, "off", 3) == 0) { 950 dasd_profile_off(prof); 951 } else 952 rc = -EINVAL; 953 vfree(buffer); 954 return rc; 955 } 956 957 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 958 { 959 int i; 960 961 for (i = 0; i < 32; i++) 962 seq_printf(m, "%u ", array[i]); 963 seq_putc(m, '\n'); 964 } 965 966 static void dasd_stats_seq_print(struct seq_file *m, 967 struct dasd_profile_info *data) 968 { 969 seq_printf(m, "start_time %ld.%09ld\n", 970 data->starttod.tv_sec, data->starttod.tv_nsec); 971 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 972 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 973 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 974 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 975 seq_printf(m, "histogram_sectors "); 976 dasd_stats_array(m, data->dasd_io_secs); 977 seq_printf(m, "histogram_io_times "); 978 dasd_stats_array(m, data->dasd_io_times); 979 seq_printf(m, "histogram_io_times_weighted "); 980 dasd_stats_array(m, data->dasd_io_timps); 981 seq_printf(m, "histogram_time_build_to_ssch "); 982 dasd_stats_array(m, data->dasd_io_time1); 983 seq_printf(m, "histogram_time_ssch_to_irq "); 984 dasd_stats_array(m, data->dasd_io_time2); 985 seq_printf(m, "histogram_time_ssch_to_irq_weighted "); 986 dasd_stats_array(m, data->dasd_io_time2ps); 987 seq_printf(m, "histogram_time_irq_to_end "); 988 dasd_stats_array(m, data->dasd_io_time3); 989 seq_printf(m, "histogram_ccw_queue_length "); 990 dasd_stats_array(m, data->dasd_io_nr_req); 991 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 992 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 993 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 994 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 995 seq_printf(m, "histogram_read_sectors "); 996 dasd_stats_array(m, data->dasd_read_secs); 997 seq_printf(m, "histogram_read_times "); 998 dasd_stats_array(m, data->dasd_read_times); 999 seq_printf(m, "histogram_read_time_build_to_ssch "); 1000 dasd_stats_array(m, data->dasd_read_time1); 1001 seq_printf(m, "histogram_read_time_ssch_to_irq "); 1002 dasd_stats_array(m, data->dasd_read_time2); 1003 seq_printf(m, "histogram_read_time_irq_to_end "); 1004 dasd_stats_array(m, data->dasd_read_time3); 1005 seq_printf(m, "histogram_read_ccw_queue_length "); 1006 dasd_stats_array(m, data->dasd_read_nr_req); 1007 } 1008 1009 static int dasd_stats_show(struct seq_file *m, void *v) 1010 { 1011 struct dasd_profile *profile; 1012 struct dasd_profile_info *data; 1013 1014 profile = m->private; 1015 spin_lock_bh(&profile->lock); 1016 data = profile->data; 1017 if (!data) { 1018 spin_unlock_bh(&profile->lock); 1019 seq_printf(m, "disabled\n"); 1020 return 0; 1021 } 1022 dasd_stats_seq_print(m, data); 1023 spin_unlock_bh(&profile->lock); 1024 return 0; 1025 } 1026 1027 static int dasd_stats_open(struct inode *inode, struct file *file) 1028 { 1029 struct dasd_profile *profile = inode->i_private; 1030 return single_open(file, dasd_stats_show, profile); 1031 } 1032 1033 static const struct file_operations dasd_stats_raw_fops = { 1034 .owner = THIS_MODULE, 1035 .open = dasd_stats_open, 1036 .read = seq_read, 1037 .llseek = seq_lseek, 1038 .release = single_release, 1039 .write = dasd_stats_write, 1040 }; 1041 1042 static ssize_t dasd_stats_global_write(struct file *file, 1043 const char __user *user_buf, 1044 size_t user_len, loff_t *pos) 1045 { 1046 char *buffer, *str; 1047 ssize_t rc; 1048 1049 if (user_len > 65536) 1050 user_len = 65536; 1051 buffer = dasd_get_user_string(user_buf, user_len); 1052 if (IS_ERR(buffer)) 1053 return PTR_ERR(buffer); 1054 str = skip_spaces(buffer); 1055 rc = user_len; 1056 if (strncmp(str, "reset", 5) == 0) { 1057 dasd_global_profile_reset(); 1058 } else if (strncmp(str, "on", 2) == 0) { 1059 dasd_global_profile_reset(); 1060 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 1061 } else if (strncmp(str, "off", 3) == 0) { 1062 dasd_global_profile_level = DASD_PROFILE_OFF; 1063 } else 1064 rc = -EINVAL; 1065 vfree(buffer); 1066 return rc; 1067 } 1068 1069 static int dasd_stats_global_show(struct seq_file *m, void *v) 1070 { 1071 if (!dasd_global_profile_level) { 1072 seq_printf(m, "disabled\n"); 1073 return 0; 1074 } 1075 dasd_stats_seq_print(m, &dasd_global_profile_data); 1076 return 0; 1077 } 1078 1079 static int dasd_stats_global_open(struct inode *inode, struct file *file) 1080 { 1081 return single_open(file, dasd_stats_global_show, NULL); 1082 } 1083 1084 static const struct file_operations dasd_stats_global_fops = { 1085 .owner = THIS_MODULE, 1086 .open = dasd_stats_global_open, 1087 .read = seq_read, 1088 .llseek = seq_lseek, 1089 .release = single_release, 1090 .write = dasd_stats_global_write, 1091 }; 1092 1093 static void dasd_profile_init(struct dasd_profile *profile, 1094 struct dentry *base_dentry) 1095 { 1096 umode_t mode; 1097 struct dentry *pde; 1098 1099 if (!base_dentry) 1100 return; 1101 profile->dentry = NULL; 1102 profile->data = NULL; 1103 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1104 pde = debugfs_create_file("statistics", mode, base_dentry, 1105 profile, &dasd_stats_raw_fops); 1106 if (pde && !IS_ERR(pde)) 1107 profile->dentry = pde; 1108 return; 1109 } 1110 1111 static void dasd_profile_exit(struct dasd_profile *profile) 1112 { 1113 dasd_profile_off(profile); 1114 if (profile->dentry) { 1115 debugfs_remove(profile->dentry); 1116 profile->dentry = NULL; 1117 } 1118 } 1119 1120 static void dasd_statistics_removeroot(void) 1121 { 1122 dasd_global_profile_level = DASD_PROFILE_OFF; 1123 if (dasd_global_profile_dentry) { 1124 debugfs_remove(dasd_global_profile_dentry); 1125 dasd_global_profile_dentry = NULL; 1126 } 1127 if (dasd_debugfs_global_entry) 1128 debugfs_remove(dasd_debugfs_global_entry); 1129 if (dasd_debugfs_root_entry) 1130 debugfs_remove(dasd_debugfs_root_entry); 1131 } 1132 1133 static void dasd_statistics_createroot(void) 1134 { 1135 umode_t mode; 1136 struct dentry *pde; 1137 1138 dasd_debugfs_root_entry = NULL; 1139 dasd_debugfs_global_entry = NULL; 1140 dasd_global_profile_dentry = NULL; 1141 pde = debugfs_create_dir("dasd", NULL); 1142 if (!pde || IS_ERR(pde)) 1143 goto error; 1144 dasd_debugfs_root_entry = pde; 1145 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1146 if (!pde || IS_ERR(pde)) 1147 goto error; 1148 dasd_debugfs_global_entry = pde; 1149 1150 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1151 pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry, 1152 NULL, &dasd_stats_global_fops); 1153 if (!pde || IS_ERR(pde)) 1154 goto error; 1155 dasd_global_profile_dentry = pde; 1156 return; 1157 1158 error: 1159 DBF_EVENT(DBF_ERR, "%s", 1160 "Creation of the dasd debugfs interface failed"); 1161 dasd_statistics_removeroot(); 1162 return; 1163 } 1164 1165 #else 1166 #define dasd_profile_start(block, cqr, req) do {} while (0) 1167 #define dasd_profile_end(block, cqr, req) do {} while (0) 1168 1169 static void dasd_statistics_createroot(void) 1170 { 1171 return; 1172 } 1173 1174 static void dasd_statistics_removeroot(void) 1175 { 1176 return; 1177 } 1178 1179 int dasd_stats_generic_show(struct seq_file *m, void *v) 1180 { 1181 seq_printf(m, "Statistics are not activated in this kernel\n"); 1182 return 0; 1183 } 1184 1185 static void dasd_profile_init(struct dasd_profile *profile, 1186 struct dentry *base_dentry) 1187 { 1188 return; 1189 } 1190 1191 static void dasd_profile_exit(struct dasd_profile *profile) 1192 { 1193 return; 1194 } 1195 1196 int dasd_profile_on(struct dasd_profile *profile) 1197 { 1198 return 0; 1199 } 1200 1201 #endif /* CONFIG_DASD_PROFILE */ 1202 1203 /* 1204 * Allocate memory for a channel program with 'cplength' channel 1205 * command words and 'datasize' additional space. There are two 1206 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 1207 * memory and 2) dasd_smalloc_request uses the static ccw memory 1208 * that gets allocated for each device. 1209 */ 1210 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, 1211 int datasize, 1212 struct dasd_device *device) 1213 { 1214 struct dasd_ccw_req *cqr; 1215 1216 /* Sanity checks */ 1217 BUG_ON(datasize > PAGE_SIZE || 1218 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 1219 1220 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 1221 if (cqr == NULL) 1222 return ERR_PTR(-ENOMEM); 1223 cqr->cpaddr = NULL; 1224 if (cplength > 0) { 1225 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 1226 GFP_ATOMIC | GFP_DMA); 1227 if (cqr->cpaddr == NULL) { 1228 kfree(cqr); 1229 return ERR_PTR(-ENOMEM); 1230 } 1231 } 1232 cqr->data = NULL; 1233 if (datasize > 0) { 1234 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 1235 if (cqr->data == NULL) { 1236 kfree(cqr->cpaddr); 1237 kfree(cqr); 1238 return ERR_PTR(-ENOMEM); 1239 } 1240 } 1241 cqr->magic = magic; 1242 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1243 dasd_get_device(device); 1244 return cqr; 1245 } 1246 1247 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 1248 int datasize, 1249 struct dasd_device *device) 1250 { 1251 unsigned long flags; 1252 struct dasd_ccw_req *cqr; 1253 char *data; 1254 int size; 1255 1256 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 1257 if (cplength > 0) 1258 size += cplength * sizeof(struct ccw1); 1259 if (datasize > 0) 1260 size += datasize; 1261 spin_lock_irqsave(&device->mem_lock, flags); 1262 cqr = (struct dasd_ccw_req *) 1263 dasd_alloc_chunk(&device->ccw_chunks, size); 1264 spin_unlock_irqrestore(&device->mem_lock, flags); 1265 if (cqr == NULL) 1266 return ERR_PTR(-ENOMEM); 1267 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 1268 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 1269 cqr->cpaddr = NULL; 1270 if (cplength > 0) { 1271 cqr->cpaddr = (struct ccw1 *) data; 1272 data += cplength*sizeof(struct ccw1); 1273 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 1274 } 1275 cqr->data = NULL; 1276 if (datasize > 0) { 1277 cqr->data = data; 1278 memset(cqr->data, 0, datasize); 1279 } 1280 cqr->magic = magic; 1281 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1282 dasd_get_device(device); 1283 return cqr; 1284 } 1285 1286 /* 1287 * Free memory of a channel program. This function needs to free all the 1288 * idal lists that might have been created by dasd_set_cda and the 1289 * struct dasd_ccw_req itself. 1290 */ 1291 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1292 { 1293 #ifdef CONFIG_64BIT 1294 struct ccw1 *ccw; 1295 1296 /* Clear any idals used for the request. */ 1297 ccw = cqr->cpaddr; 1298 do { 1299 clear_normalized_cda(ccw); 1300 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 1301 #endif 1302 kfree(cqr->cpaddr); 1303 kfree(cqr->data); 1304 kfree(cqr); 1305 dasd_put_device(device); 1306 } 1307 1308 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1309 { 1310 unsigned long flags; 1311 1312 spin_lock_irqsave(&device->mem_lock, flags); 1313 dasd_free_chunk(&device->ccw_chunks, cqr); 1314 spin_unlock_irqrestore(&device->mem_lock, flags); 1315 dasd_put_device(device); 1316 } 1317 1318 /* 1319 * Check discipline magic in cqr. 1320 */ 1321 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1322 { 1323 struct dasd_device *device; 1324 1325 if (cqr == NULL) 1326 return -EINVAL; 1327 device = cqr->startdev; 1328 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1329 DBF_DEV_EVENT(DBF_WARNING, device, 1330 " dasd_ccw_req 0x%08x magic doesn't match" 1331 " discipline 0x%08x", 1332 cqr->magic, 1333 *(unsigned int *) device->discipline->name); 1334 return -EINVAL; 1335 } 1336 return 0; 1337 } 1338 1339 /* 1340 * Terminate the current i/o and set the request to clear_pending. 1341 * Timer keeps device runnig. 1342 * ccw_device_clear can fail if the i/o subsystem 1343 * is in a bad mood. 1344 */ 1345 int dasd_term_IO(struct dasd_ccw_req *cqr) 1346 { 1347 struct dasd_device *device; 1348 int retries, rc; 1349 char errorstring[ERRORLENGTH]; 1350 1351 /* Check the cqr */ 1352 rc = dasd_check_cqr(cqr); 1353 if (rc) 1354 return rc; 1355 retries = 0; 1356 device = (struct dasd_device *) cqr->startdev; 1357 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1358 rc = ccw_device_clear(device->cdev, (long) cqr); 1359 switch (rc) { 1360 case 0: /* termination successful */ 1361 cqr->status = DASD_CQR_CLEAR_PENDING; 1362 cqr->stopclk = get_tod_clock(); 1363 cqr->starttime = 0; 1364 DBF_DEV_EVENT(DBF_DEBUG, device, 1365 "terminate cqr %p successful", 1366 cqr); 1367 break; 1368 case -ENODEV: 1369 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1370 "device gone, retry"); 1371 break; 1372 case -EIO: 1373 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1374 "I/O error, retry"); 1375 break; 1376 case -EINVAL: 1377 case -EBUSY: 1378 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1379 "device busy, retry later"); 1380 break; 1381 default: 1382 /* internal error 10 - unknown rc*/ 1383 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1384 dev_err(&device->cdev->dev, "An error occurred in the " 1385 "DASD device driver, reason=%s\n", errorstring); 1386 BUG(); 1387 break; 1388 } 1389 retries++; 1390 } 1391 dasd_schedule_device_bh(device); 1392 return rc; 1393 } 1394 1395 /* 1396 * Start the i/o. This start_IO can fail if the channel is really busy. 1397 * In that case set up a timer to start the request later. 1398 */ 1399 int dasd_start_IO(struct dasd_ccw_req *cqr) 1400 { 1401 struct dasd_device *device; 1402 int rc; 1403 char errorstring[ERRORLENGTH]; 1404 1405 /* Check the cqr */ 1406 rc = dasd_check_cqr(cqr); 1407 if (rc) { 1408 cqr->intrc = rc; 1409 return rc; 1410 } 1411 device = (struct dasd_device *) cqr->startdev; 1412 if (((cqr->block && 1413 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1414 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1415 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1416 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1417 "because of stolen lock", cqr); 1418 cqr->status = DASD_CQR_ERROR; 1419 cqr->intrc = -EPERM; 1420 return -EPERM; 1421 } 1422 if (cqr->retries < 0) { 1423 /* internal error 14 - start_IO run out of retries */ 1424 sprintf(errorstring, "14 %p", cqr); 1425 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1426 "device driver, reason=%s\n", errorstring); 1427 cqr->status = DASD_CQR_ERROR; 1428 return -EIO; 1429 } 1430 cqr->startclk = get_tod_clock(); 1431 cqr->starttime = jiffies; 1432 cqr->retries--; 1433 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1434 cqr->lpm &= device->path_data.opm; 1435 if (!cqr->lpm) 1436 cqr->lpm = device->path_data.opm; 1437 } 1438 if (cqr->cpmode == 1) { 1439 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1440 (long) cqr, cqr->lpm); 1441 } else { 1442 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1443 (long) cqr, cqr->lpm, 0); 1444 } 1445 switch (rc) { 1446 case 0: 1447 cqr->status = DASD_CQR_IN_IO; 1448 break; 1449 case -EBUSY: 1450 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1451 "start_IO: device busy, retry later"); 1452 break; 1453 case -ETIMEDOUT: 1454 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1455 "start_IO: request timeout, retry later"); 1456 break; 1457 case -EACCES: 1458 /* -EACCES indicates that the request used only a subset of the 1459 * available paths and all these paths are gone. If the lpm of 1460 * this request was only a subset of the opm (e.g. the ppm) then 1461 * we just do a retry with all available paths. 1462 * If we already use the full opm, something is amiss, and we 1463 * need a full path verification. 1464 */ 1465 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1466 DBF_DEV_EVENT(DBF_WARNING, device, 1467 "start_IO: selected paths gone (%x)", 1468 cqr->lpm); 1469 } else if (cqr->lpm != device->path_data.opm) { 1470 cqr->lpm = device->path_data.opm; 1471 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1472 "start_IO: selected paths gone," 1473 " retry on all paths"); 1474 } else { 1475 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1476 "start_IO: all paths in opm gone," 1477 " do path verification"); 1478 dasd_generic_last_path_gone(device); 1479 device->path_data.opm = 0; 1480 device->path_data.ppm = 0; 1481 device->path_data.npm = 0; 1482 device->path_data.tbvpm = 1483 ccw_device_get_path_mask(device->cdev); 1484 } 1485 break; 1486 case -ENODEV: 1487 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1488 "start_IO: -ENODEV device gone, retry"); 1489 break; 1490 case -EIO: 1491 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1492 "start_IO: -EIO device gone, retry"); 1493 break; 1494 case -EINVAL: 1495 /* most likely caused in power management context */ 1496 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1497 "start_IO: -EINVAL device currently " 1498 "not accessible"); 1499 break; 1500 default: 1501 /* internal error 11 - unknown rc */ 1502 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1503 dev_err(&device->cdev->dev, 1504 "An error occurred in the DASD device driver, " 1505 "reason=%s\n", errorstring); 1506 BUG(); 1507 break; 1508 } 1509 cqr->intrc = rc; 1510 return rc; 1511 } 1512 1513 /* 1514 * Timeout function for dasd devices. This is used for different purposes 1515 * 1) missing interrupt handler for normal operation 1516 * 2) delayed start of request where start_IO failed with -EBUSY 1517 * 3) timeout for missing state change interrupts 1518 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1519 * DASD_CQR_QUEUED for 2) and 3). 1520 */ 1521 static void dasd_device_timeout(unsigned long ptr) 1522 { 1523 unsigned long flags; 1524 struct dasd_device *device; 1525 1526 device = (struct dasd_device *) ptr; 1527 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1528 /* re-activate request queue */ 1529 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1530 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1531 dasd_schedule_device_bh(device); 1532 } 1533 1534 /* 1535 * Setup timeout for a device in jiffies. 1536 */ 1537 void dasd_device_set_timer(struct dasd_device *device, int expires) 1538 { 1539 if (expires == 0) 1540 del_timer(&device->timer); 1541 else 1542 mod_timer(&device->timer, jiffies + expires); 1543 } 1544 1545 /* 1546 * Clear timeout for a device. 1547 */ 1548 void dasd_device_clear_timer(struct dasd_device *device) 1549 { 1550 del_timer(&device->timer); 1551 } 1552 1553 static void dasd_handle_killed_request(struct ccw_device *cdev, 1554 unsigned long intparm) 1555 { 1556 struct dasd_ccw_req *cqr; 1557 struct dasd_device *device; 1558 1559 if (!intparm) 1560 return; 1561 cqr = (struct dasd_ccw_req *) intparm; 1562 if (cqr->status != DASD_CQR_IN_IO) { 1563 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1564 "invalid status in handle_killed_request: " 1565 "%02x", cqr->status); 1566 return; 1567 } 1568 1569 device = dasd_device_from_cdev_locked(cdev); 1570 if (IS_ERR(device)) { 1571 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1572 "unable to get device from cdev"); 1573 return; 1574 } 1575 1576 if (!cqr->startdev || 1577 device != cqr->startdev || 1578 strncmp(cqr->startdev->discipline->ebcname, 1579 (char *) &cqr->magic, 4)) { 1580 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1581 "invalid device in request"); 1582 dasd_put_device(device); 1583 return; 1584 } 1585 1586 /* Schedule request to be retried. */ 1587 cqr->status = DASD_CQR_QUEUED; 1588 1589 dasd_device_clear_timer(device); 1590 dasd_schedule_device_bh(device); 1591 dasd_put_device(device); 1592 } 1593 1594 void dasd_generic_handle_state_change(struct dasd_device *device) 1595 { 1596 /* First of all start sense subsystem status request. */ 1597 dasd_eer_snss(device); 1598 1599 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1600 dasd_schedule_device_bh(device); 1601 if (device->block) 1602 dasd_schedule_block_bh(device->block); 1603 } 1604 1605 /* 1606 * Interrupt handler for "normal" ssch-io based dasd devices. 1607 */ 1608 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1609 struct irb *irb) 1610 { 1611 struct dasd_ccw_req *cqr, *next; 1612 struct dasd_device *device; 1613 unsigned long long now; 1614 int expires; 1615 1616 if (IS_ERR(irb)) { 1617 switch (PTR_ERR(irb)) { 1618 case -EIO: 1619 break; 1620 case -ETIMEDOUT: 1621 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1622 "request timed out\n", __func__); 1623 break; 1624 default: 1625 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1626 "unknown error %ld\n", __func__, 1627 PTR_ERR(irb)); 1628 } 1629 dasd_handle_killed_request(cdev, intparm); 1630 return; 1631 } 1632 1633 now = get_tod_clock(); 1634 cqr = (struct dasd_ccw_req *) intparm; 1635 /* check for conditions that should be handled immediately */ 1636 if (!cqr || 1637 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1638 scsw_cstat(&irb->scsw) == 0)) { 1639 if (cqr) 1640 memcpy(&cqr->irb, irb, sizeof(*irb)); 1641 device = dasd_device_from_cdev_locked(cdev); 1642 if (IS_ERR(device)) 1643 return; 1644 /* ignore unsolicited interrupts for DIAG discipline */ 1645 if (device->discipline == dasd_diag_discipline_pointer) { 1646 dasd_put_device(device); 1647 return; 1648 } 1649 device->discipline->dump_sense_dbf(device, irb, "int"); 1650 if (device->features & DASD_FEATURE_ERPLOG) 1651 device->discipline->dump_sense(device, cqr, irb); 1652 device->discipline->check_for_device_change(device, cqr, irb); 1653 dasd_put_device(device); 1654 } 1655 if (!cqr) 1656 return; 1657 1658 device = (struct dasd_device *) cqr->startdev; 1659 if (!device || 1660 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1661 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1662 "invalid device in request"); 1663 return; 1664 } 1665 1666 /* Check for clear pending */ 1667 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1668 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1669 cqr->status = DASD_CQR_CLEARED; 1670 dasd_device_clear_timer(device); 1671 wake_up(&dasd_flush_wq); 1672 dasd_schedule_device_bh(device); 1673 return; 1674 } 1675 1676 /* check status - the request might have been killed by dyn detach */ 1677 if (cqr->status != DASD_CQR_IN_IO) { 1678 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1679 "status %02x", dev_name(&cdev->dev), cqr->status); 1680 return; 1681 } 1682 1683 next = NULL; 1684 expires = 0; 1685 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1686 scsw_cstat(&irb->scsw) == 0) { 1687 /* request was completed successfully */ 1688 cqr->status = DASD_CQR_SUCCESS; 1689 cqr->stopclk = now; 1690 /* Start first request on queue if possible -> fast_io. */ 1691 if (cqr->devlist.next != &device->ccw_queue) { 1692 next = list_entry(cqr->devlist.next, 1693 struct dasd_ccw_req, devlist); 1694 } 1695 } else { /* error */ 1696 /* 1697 * If we don't want complex ERP for this request, then just 1698 * reset this and retry it in the fastpath 1699 */ 1700 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1701 cqr->retries > 0) { 1702 if (cqr->lpm == device->path_data.opm) 1703 DBF_DEV_EVENT(DBF_DEBUG, device, 1704 "default ERP in fastpath " 1705 "(%i retries left)", 1706 cqr->retries); 1707 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1708 cqr->lpm = device->path_data.opm; 1709 cqr->status = DASD_CQR_QUEUED; 1710 next = cqr; 1711 } else 1712 cqr->status = DASD_CQR_ERROR; 1713 } 1714 if (next && (next->status == DASD_CQR_QUEUED) && 1715 (!device->stopped)) { 1716 if (device->discipline->start_IO(next) == 0) 1717 expires = next->expires; 1718 } 1719 if (expires != 0) 1720 dasd_device_set_timer(device, expires); 1721 else 1722 dasd_device_clear_timer(device); 1723 dasd_schedule_device_bh(device); 1724 } 1725 1726 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1727 { 1728 struct dasd_device *device; 1729 1730 device = dasd_device_from_cdev_locked(cdev); 1731 1732 if (IS_ERR(device)) 1733 goto out; 1734 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1735 device->state != device->target || 1736 !device->discipline->check_for_device_change){ 1737 dasd_put_device(device); 1738 goto out; 1739 } 1740 if (device->discipline->dump_sense_dbf) 1741 device->discipline->dump_sense_dbf(device, irb, "uc"); 1742 device->discipline->check_for_device_change(device, NULL, irb); 1743 dasd_put_device(device); 1744 out: 1745 return UC_TODO_RETRY; 1746 } 1747 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1748 1749 /* 1750 * If we have an error on a dasd_block layer request then we cancel 1751 * and return all further requests from the same dasd_block as well. 1752 */ 1753 static void __dasd_device_recovery(struct dasd_device *device, 1754 struct dasd_ccw_req *ref_cqr) 1755 { 1756 struct list_head *l, *n; 1757 struct dasd_ccw_req *cqr; 1758 1759 /* 1760 * only requeue request that came from the dasd_block layer 1761 */ 1762 if (!ref_cqr->block) 1763 return; 1764 1765 list_for_each_safe(l, n, &device->ccw_queue) { 1766 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1767 if (cqr->status == DASD_CQR_QUEUED && 1768 ref_cqr->block == cqr->block) { 1769 cqr->status = DASD_CQR_CLEARED; 1770 } 1771 } 1772 }; 1773 1774 /* 1775 * Remove those ccw requests from the queue that need to be returned 1776 * to the upper layer. 1777 */ 1778 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1779 struct list_head *final_queue) 1780 { 1781 struct list_head *l, *n; 1782 struct dasd_ccw_req *cqr; 1783 1784 /* Process request with final status. */ 1785 list_for_each_safe(l, n, &device->ccw_queue) { 1786 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1787 1788 /* Skip any non-final request. */ 1789 if (cqr->status == DASD_CQR_QUEUED || 1790 cqr->status == DASD_CQR_IN_IO || 1791 cqr->status == DASD_CQR_CLEAR_PENDING) 1792 continue; 1793 if (cqr->status == DASD_CQR_ERROR) { 1794 __dasd_device_recovery(device, cqr); 1795 } 1796 /* Rechain finished requests to final queue */ 1797 list_move_tail(&cqr->devlist, final_queue); 1798 } 1799 } 1800 1801 /* 1802 * the cqrs from the final queue are returned to the upper layer 1803 * by setting a dasd_block state and calling the callback function 1804 */ 1805 static void __dasd_device_process_final_queue(struct dasd_device *device, 1806 struct list_head *final_queue) 1807 { 1808 struct list_head *l, *n; 1809 struct dasd_ccw_req *cqr; 1810 struct dasd_block *block; 1811 void (*callback)(struct dasd_ccw_req *, void *data); 1812 void *callback_data; 1813 char errorstring[ERRORLENGTH]; 1814 1815 list_for_each_safe(l, n, final_queue) { 1816 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1817 list_del_init(&cqr->devlist); 1818 block = cqr->block; 1819 callback = cqr->callback; 1820 callback_data = cqr->callback_data; 1821 if (block) 1822 spin_lock_bh(&block->queue_lock); 1823 switch (cqr->status) { 1824 case DASD_CQR_SUCCESS: 1825 cqr->status = DASD_CQR_DONE; 1826 break; 1827 case DASD_CQR_ERROR: 1828 cqr->status = DASD_CQR_NEED_ERP; 1829 break; 1830 case DASD_CQR_CLEARED: 1831 cqr->status = DASD_CQR_TERMINATED; 1832 break; 1833 default: 1834 /* internal error 12 - wrong cqr status*/ 1835 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1836 dev_err(&device->cdev->dev, 1837 "An error occurred in the DASD device driver, " 1838 "reason=%s\n", errorstring); 1839 BUG(); 1840 } 1841 if (cqr->callback != NULL) 1842 (callback)(cqr, callback_data); 1843 if (block) 1844 spin_unlock_bh(&block->queue_lock); 1845 } 1846 } 1847 1848 /* 1849 * Take a look at the first request on the ccw queue and check 1850 * if it reached its expire time. If so, terminate the IO. 1851 */ 1852 static void __dasd_device_check_expire(struct dasd_device *device) 1853 { 1854 struct dasd_ccw_req *cqr; 1855 1856 if (list_empty(&device->ccw_queue)) 1857 return; 1858 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1859 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1860 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1861 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1862 /* 1863 * IO in safe offline processing should not 1864 * run out of retries 1865 */ 1866 cqr->retries++; 1867 } 1868 if (device->discipline->term_IO(cqr) != 0) { 1869 /* Hmpf, try again in 5 sec */ 1870 dev_err(&device->cdev->dev, 1871 "cqr %p timed out (%lus) but cannot be " 1872 "ended, retrying in 5 s\n", 1873 cqr, (cqr->expires/HZ)); 1874 cqr->expires += 5*HZ; 1875 dasd_device_set_timer(device, 5*HZ); 1876 } else { 1877 dev_err(&device->cdev->dev, 1878 "cqr %p timed out (%lus), %i retries " 1879 "remaining\n", cqr, (cqr->expires/HZ), 1880 cqr->retries); 1881 } 1882 } 1883 } 1884 1885 /* 1886 * Take a look at the first request on the ccw queue and check 1887 * if it needs to be started. 1888 */ 1889 static void __dasd_device_start_head(struct dasd_device *device) 1890 { 1891 struct dasd_ccw_req *cqr; 1892 int rc; 1893 1894 if (list_empty(&device->ccw_queue)) 1895 return; 1896 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1897 if (cqr->status != DASD_CQR_QUEUED) 1898 return; 1899 /* when device is stopped, return request to previous layer 1900 * exception: only the disconnect or unresumed bits are set and the 1901 * cqr is a path verification request 1902 */ 1903 if (device->stopped && 1904 !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) 1905 && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { 1906 cqr->intrc = -EAGAIN; 1907 cqr->status = DASD_CQR_CLEARED; 1908 dasd_schedule_device_bh(device); 1909 return; 1910 } 1911 1912 rc = device->discipline->start_IO(cqr); 1913 if (rc == 0) 1914 dasd_device_set_timer(device, cqr->expires); 1915 else if (rc == -EACCES) { 1916 dasd_schedule_device_bh(device); 1917 } else 1918 /* Hmpf, try again in 1/2 sec */ 1919 dasd_device_set_timer(device, 50); 1920 } 1921 1922 static void __dasd_device_check_path_events(struct dasd_device *device) 1923 { 1924 int rc; 1925 1926 if (device->path_data.tbvpm) { 1927 if (device->stopped & ~(DASD_STOPPED_DC_WAIT | 1928 DASD_UNRESUMED_PM)) 1929 return; 1930 rc = device->discipline->verify_path( 1931 device, device->path_data.tbvpm); 1932 if (rc) 1933 dasd_device_set_timer(device, 50); 1934 else 1935 device->path_data.tbvpm = 0; 1936 } 1937 }; 1938 1939 /* 1940 * Go through all request on the dasd_device request queue, 1941 * terminate them on the cdev if necessary, and return them to the 1942 * submitting layer via callback. 1943 * Note: 1944 * Make sure that all 'submitting layers' still exist when 1945 * this function is called!. In other words, when 'device' is a base 1946 * device then all block layer requests must have been removed before 1947 * via dasd_flush_block_queue. 1948 */ 1949 int dasd_flush_device_queue(struct dasd_device *device) 1950 { 1951 struct dasd_ccw_req *cqr, *n; 1952 int rc; 1953 struct list_head flush_queue; 1954 1955 INIT_LIST_HEAD(&flush_queue); 1956 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1957 rc = 0; 1958 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1959 /* Check status and move request to flush_queue */ 1960 switch (cqr->status) { 1961 case DASD_CQR_IN_IO: 1962 rc = device->discipline->term_IO(cqr); 1963 if (rc) { 1964 /* unable to terminate requeust */ 1965 dev_err(&device->cdev->dev, 1966 "Flushing the DASD request queue " 1967 "failed for request %p\n", cqr); 1968 /* stop flush processing */ 1969 goto finished; 1970 } 1971 break; 1972 case DASD_CQR_QUEUED: 1973 cqr->stopclk = get_tod_clock(); 1974 cqr->status = DASD_CQR_CLEARED; 1975 break; 1976 default: /* no need to modify the others */ 1977 break; 1978 } 1979 list_move_tail(&cqr->devlist, &flush_queue); 1980 } 1981 finished: 1982 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1983 /* 1984 * After this point all requests must be in state CLEAR_PENDING, 1985 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1986 * one of the others. 1987 */ 1988 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1989 wait_event(dasd_flush_wq, 1990 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1991 /* 1992 * Now set each request back to TERMINATED, DONE or NEED_ERP 1993 * and call the callback function of flushed requests 1994 */ 1995 __dasd_device_process_final_queue(device, &flush_queue); 1996 return rc; 1997 } 1998 1999 /* 2000 * Acquire the device lock and process queues for the device. 2001 */ 2002 static void dasd_device_tasklet(struct dasd_device *device) 2003 { 2004 struct list_head final_queue; 2005 2006 atomic_set (&device->tasklet_scheduled, 0); 2007 INIT_LIST_HEAD(&final_queue); 2008 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2009 /* Check expire time of first request on the ccw queue. */ 2010 __dasd_device_check_expire(device); 2011 /* find final requests on ccw queue */ 2012 __dasd_device_process_ccw_queue(device, &final_queue); 2013 __dasd_device_check_path_events(device); 2014 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2015 /* Now call the callback function of requests with final status */ 2016 __dasd_device_process_final_queue(device, &final_queue); 2017 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2018 /* Now check if the head of the ccw queue needs to be started. */ 2019 __dasd_device_start_head(device); 2020 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2021 if (waitqueue_active(&shutdown_waitq)) 2022 wake_up(&shutdown_waitq); 2023 dasd_put_device(device); 2024 } 2025 2026 /* 2027 * Schedules a call to dasd_tasklet over the device tasklet. 2028 */ 2029 void dasd_schedule_device_bh(struct dasd_device *device) 2030 { 2031 /* Protect against rescheduling. */ 2032 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2033 return; 2034 dasd_get_device(device); 2035 tasklet_hi_schedule(&device->tasklet); 2036 } 2037 2038 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2039 { 2040 device->stopped |= bits; 2041 } 2042 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2043 2044 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2045 { 2046 device->stopped &= ~bits; 2047 if (!device->stopped) 2048 wake_up(&generic_waitq); 2049 } 2050 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2051 2052 /* 2053 * Queue a request to the head of the device ccw_queue. 2054 * Start the I/O if possible. 2055 */ 2056 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2057 { 2058 struct dasd_device *device; 2059 unsigned long flags; 2060 2061 device = cqr->startdev; 2062 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2063 cqr->status = DASD_CQR_QUEUED; 2064 list_add(&cqr->devlist, &device->ccw_queue); 2065 /* let the bh start the request to keep them in order */ 2066 dasd_schedule_device_bh(device); 2067 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2068 } 2069 2070 /* 2071 * Queue a request to the tail of the device ccw_queue. 2072 * Start the I/O if possible. 2073 */ 2074 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2075 { 2076 struct dasd_device *device; 2077 unsigned long flags; 2078 2079 device = cqr->startdev; 2080 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2081 cqr->status = DASD_CQR_QUEUED; 2082 list_add_tail(&cqr->devlist, &device->ccw_queue); 2083 /* let the bh start the request to keep them in order */ 2084 dasd_schedule_device_bh(device); 2085 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2086 } 2087 2088 /* 2089 * Wakeup helper for the 'sleep_on' functions. 2090 */ 2091 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2092 { 2093 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2094 cqr->callback_data = DASD_SLEEPON_END_TAG; 2095 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2096 wake_up(&generic_waitq); 2097 } 2098 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2099 2100 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2101 { 2102 struct dasd_device *device; 2103 int rc; 2104 2105 device = cqr->startdev; 2106 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2107 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2108 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2109 return rc; 2110 } 2111 2112 /* 2113 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2114 */ 2115 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2116 { 2117 struct dasd_device *device; 2118 dasd_erp_fn_t erp_fn; 2119 2120 if (cqr->status == DASD_CQR_FILLED) 2121 return 0; 2122 device = cqr->startdev; 2123 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2124 if (cqr->status == DASD_CQR_TERMINATED) { 2125 device->discipline->handle_terminated_request(cqr); 2126 return 1; 2127 } 2128 if (cqr->status == DASD_CQR_NEED_ERP) { 2129 erp_fn = device->discipline->erp_action(cqr); 2130 erp_fn(cqr); 2131 return 1; 2132 } 2133 if (cqr->status == DASD_CQR_FAILED) 2134 dasd_log_sense(cqr, &cqr->irb); 2135 if (cqr->refers) { 2136 __dasd_process_erp(device, cqr); 2137 return 1; 2138 } 2139 } 2140 return 0; 2141 } 2142 2143 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2144 { 2145 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2146 if (cqr->refers) /* erp is not done yet */ 2147 return 1; 2148 return ((cqr->status != DASD_CQR_DONE) && 2149 (cqr->status != DASD_CQR_FAILED)); 2150 } else 2151 return (cqr->status == DASD_CQR_FILLED); 2152 } 2153 2154 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2155 { 2156 struct dasd_device *device; 2157 int rc; 2158 struct list_head ccw_queue; 2159 struct dasd_ccw_req *cqr; 2160 2161 INIT_LIST_HEAD(&ccw_queue); 2162 maincqr->status = DASD_CQR_FILLED; 2163 device = maincqr->startdev; 2164 list_add(&maincqr->blocklist, &ccw_queue); 2165 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2166 cqr = list_first_entry(&ccw_queue, 2167 struct dasd_ccw_req, blocklist)) { 2168 2169 if (__dasd_sleep_on_erp(cqr)) 2170 continue; 2171 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2172 continue; 2173 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2174 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2175 cqr->status = DASD_CQR_FAILED; 2176 cqr->intrc = -EPERM; 2177 continue; 2178 } 2179 /* Non-temporary stop condition will trigger fail fast */ 2180 if (device->stopped & ~DASD_STOPPED_PENDING && 2181 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2182 (!dasd_eer_enabled(device))) { 2183 cqr->status = DASD_CQR_FAILED; 2184 cqr->intrc = -ENOLINK; 2185 continue; 2186 } 2187 /* Don't try to start requests if device is stopped */ 2188 if (interruptible) { 2189 rc = wait_event_interruptible( 2190 generic_waitq, !(device->stopped)); 2191 if (rc == -ERESTARTSYS) { 2192 cqr->status = DASD_CQR_FAILED; 2193 maincqr->intrc = rc; 2194 continue; 2195 } 2196 } else 2197 wait_event(generic_waitq, !(device->stopped)); 2198 2199 if (!cqr->callback) 2200 cqr->callback = dasd_wakeup_cb; 2201 2202 cqr->callback_data = DASD_SLEEPON_START_TAG; 2203 dasd_add_request_tail(cqr); 2204 if (interruptible) { 2205 rc = wait_event_interruptible( 2206 generic_waitq, _wait_for_wakeup(cqr)); 2207 if (rc == -ERESTARTSYS) { 2208 dasd_cancel_req(cqr); 2209 /* wait (non-interruptible) for final status */ 2210 wait_event(generic_waitq, 2211 _wait_for_wakeup(cqr)); 2212 cqr->status = DASD_CQR_FAILED; 2213 maincqr->intrc = rc; 2214 continue; 2215 } 2216 } else 2217 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2218 } 2219 2220 maincqr->endclk = get_tod_clock(); 2221 if ((maincqr->status != DASD_CQR_DONE) && 2222 (maincqr->intrc != -ERESTARTSYS)) 2223 dasd_log_sense(maincqr, &maincqr->irb); 2224 if (maincqr->status == DASD_CQR_DONE) 2225 rc = 0; 2226 else if (maincqr->intrc) 2227 rc = maincqr->intrc; 2228 else 2229 rc = -EIO; 2230 return rc; 2231 } 2232 2233 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2234 { 2235 struct dasd_ccw_req *cqr; 2236 2237 list_for_each_entry(cqr, ccw_queue, blocklist) { 2238 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2239 return 0; 2240 } 2241 2242 return 1; 2243 } 2244 2245 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2246 { 2247 struct dasd_device *device; 2248 int rc; 2249 struct dasd_ccw_req *cqr, *n; 2250 2251 retry: 2252 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2253 device = cqr->startdev; 2254 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2255 continue; 2256 2257 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2258 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2259 cqr->status = DASD_CQR_FAILED; 2260 cqr->intrc = -EPERM; 2261 continue; 2262 } 2263 /*Non-temporary stop condition will trigger fail fast*/ 2264 if (device->stopped & ~DASD_STOPPED_PENDING && 2265 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2266 !dasd_eer_enabled(device)) { 2267 cqr->status = DASD_CQR_FAILED; 2268 cqr->intrc = -EAGAIN; 2269 continue; 2270 } 2271 2272 /*Don't try to start requests if device is stopped*/ 2273 if (interruptible) { 2274 rc = wait_event_interruptible( 2275 generic_waitq, !device->stopped); 2276 if (rc == -ERESTARTSYS) { 2277 cqr->status = DASD_CQR_FAILED; 2278 cqr->intrc = rc; 2279 continue; 2280 } 2281 } else 2282 wait_event(generic_waitq, !(device->stopped)); 2283 2284 if (!cqr->callback) 2285 cqr->callback = dasd_wakeup_cb; 2286 cqr->callback_data = DASD_SLEEPON_START_TAG; 2287 dasd_add_request_tail(cqr); 2288 } 2289 2290 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2291 2292 rc = 0; 2293 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2294 if (__dasd_sleep_on_erp(cqr)) 2295 rc = 1; 2296 } 2297 if (rc) 2298 goto retry; 2299 2300 2301 return 0; 2302 } 2303 2304 /* 2305 * Queue a request to the tail of the device ccw_queue and wait for 2306 * it's completion. 2307 */ 2308 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2309 { 2310 return _dasd_sleep_on(cqr, 0); 2311 } 2312 2313 /* 2314 * Start requests from a ccw_queue and wait for their completion. 2315 */ 2316 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2317 { 2318 return _dasd_sleep_on_queue(ccw_queue, 0); 2319 } 2320 EXPORT_SYMBOL(dasd_sleep_on_queue); 2321 2322 /* 2323 * Queue a request to the tail of the device ccw_queue and wait 2324 * interruptible for it's completion. 2325 */ 2326 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2327 { 2328 return _dasd_sleep_on(cqr, 1); 2329 } 2330 2331 /* 2332 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2333 * for eckd devices) the currently running request has to be terminated 2334 * and be put back to status queued, before the special request is added 2335 * to the head of the queue. Then the special request is waited on normally. 2336 */ 2337 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2338 { 2339 struct dasd_ccw_req *cqr; 2340 int rc; 2341 2342 if (list_empty(&device->ccw_queue)) 2343 return 0; 2344 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2345 rc = device->discipline->term_IO(cqr); 2346 if (!rc) 2347 /* 2348 * CQR terminated because a more important request is pending. 2349 * Undo decreasing of retry counter because this is 2350 * not an error case. 2351 */ 2352 cqr->retries++; 2353 return rc; 2354 } 2355 2356 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2357 { 2358 struct dasd_device *device; 2359 int rc; 2360 2361 device = cqr->startdev; 2362 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2363 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2364 cqr->status = DASD_CQR_FAILED; 2365 cqr->intrc = -EPERM; 2366 return -EIO; 2367 } 2368 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2369 rc = _dasd_term_running_cqr(device); 2370 if (rc) { 2371 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2372 return rc; 2373 } 2374 cqr->callback = dasd_wakeup_cb; 2375 cqr->callback_data = DASD_SLEEPON_START_TAG; 2376 cqr->status = DASD_CQR_QUEUED; 2377 /* 2378 * add new request as second 2379 * first the terminated cqr needs to be finished 2380 */ 2381 list_add(&cqr->devlist, device->ccw_queue.next); 2382 2383 /* let the bh start the request to keep them in order */ 2384 dasd_schedule_device_bh(device); 2385 2386 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2387 2388 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2389 2390 if (cqr->status == DASD_CQR_DONE) 2391 rc = 0; 2392 else if (cqr->intrc) 2393 rc = cqr->intrc; 2394 else 2395 rc = -EIO; 2396 2397 /* kick tasklets */ 2398 dasd_schedule_device_bh(device); 2399 if (device->block) 2400 dasd_schedule_block_bh(device->block); 2401 2402 return rc; 2403 } 2404 2405 /* 2406 * Cancels a request that was started with dasd_sleep_on_req. 2407 * This is useful to timeout requests. The request will be 2408 * terminated if it is currently in i/o. 2409 * Returns 0 if request termination was successful 2410 * negative error code if termination failed 2411 * Cancellation of a request is an asynchronous operation! The calling 2412 * function has to wait until the request is properly returned via callback. 2413 */ 2414 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2415 { 2416 struct dasd_device *device = cqr->startdev; 2417 unsigned long flags; 2418 int rc; 2419 2420 rc = 0; 2421 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2422 switch (cqr->status) { 2423 case DASD_CQR_QUEUED: 2424 /* request was not started - just set to cleared */ 2425 cqr->status = DASD_CQR_CLEARED; 2426 break; 2427 case DASD_CQR_IN_IO: 2428 /* request in IO - terminate IO and release again */ 2429 rc = device->discipline->term_IO(cqr); 2430 if (rc) { 2431 dev_err(&device->cdev->dev, 2432 "Cancelling request %p failed with rc=%d\n", 2433 cqr, rc); 2434 } else { 2435 cqr->stopclk = get_tod_clock(); 2436 } 2437 break; 2438 default: /* already finished or clear pending - do nothing */ 2439 break; 2440 } 2441 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2442 dasd_schedule_device_bh(device); 2443 return rc; 2444 } 2445 2446 /* 2447 * SECTION: Operations of the dasd_block layer. 2448 */ 2449 2450 /* 2451 * Timeout function for dasd_block. This is used when the block layer 2452 * is waiting for something that may not come reliably, (e.g. a state 2453 * change interrupt) 2454 */ 2455 static void dasd_block_timeout(unsigned long ptr) 2456 { 2457 unsigned long flags; 2458 struct dasd_block *block; 2459 2460 block = (struct dasd_block *) ptr; 2461 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2462 /* re-activate request queue */ 2463 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2464 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2465 dasd_schedule_block_bh(block); 2466 } 2467 2468 /* 2469 * Setup timeout for a dasd_block in jiffies. 2470 */ 2471 void dasd_block_set_timer(struct dasd_block *block, int expires) 2472 { 2473 if (expires == 0) 2474 del_timer(&block->timer); 2475 else 2476 mod_timer(&block->timer, jiffies + expires); 2477 } 2478 2479 /* 2480 * Clear timeout for a dasd_block. 2481 */ 2482 void dasd_block_clear_timer(struct dasd_block *block) 2483 { 2484 del_timer(&block->timer); 2485 } 2486 2487 /* 2488 * Process finished error recovery ccw. 2489 */ 2490 static void __dasd_process_erp(struct dasd_device *device, 2491 struct dasd_ccw_req *cqr) 2492 { 2493 dasd_erp_fn_t erp_fn; 2494 2495 if (cqr->status == DASD_CQR_DONE) 2496 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2497 else 2498 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2499 erp_fn = device->discipline->erp_postaction(cqr); 2500 erp_fn(cqr); 2501 } 2502 2503 /* 2504 * Fetch requests from the block device queue. 2505 */ 2506 static void __dasd_process_request_queue(struct dasd_block *block) 2507 { 2508 struct request_queue *queue; 2509 struct request *req; 2510 struct dasd_ccw_req *cqr; 2511 struct dasd_device *basedev; 2512 unsigned long flags; 2513 queue = block->request_queue; 2514 basedev = block->base; 2515 /* No queue ? Then there is nothing to do. */ 2516 if (queue == NULL) 2517 return; 2518 2519 /* 2520 * We requeue request from the block device queue to the ccw 2521 * queue only in two states. In state DASD_STATE_READY the 2522 * partition detection is done and we need to requeue requests 2523 * for that. State DASD_STATE_ONLINE is normal block device 2524 * operation. 2525 */ 2526 if (basedev->state < DASD_STATE_READY) { 2527 while ((req = blk_fetch_request(block->request_queue))) 2528 __blk_end_request_all(req, -EIO); 2529 return; 2530 } 2531 /* Now we try to fetch requests from the request queue */ 2532 while ((req = blk_peek_request(queue))) { 2533 if (basedev->features & DASD_FEATURE_READONLY && 2534 rq_data_dir(req) == WRITE) { 2535 DBF_DEV_EVENT(DBF_ERR, basedev, 2536 "Rejecting write request %p", 2537 req); 2538 blk_start_request(req); 2539 __blk_end_request_all(req, -EIO); 2540 continue; 2541 } 2542 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 2543 (basedev->features & DASD_FEATURE_FAILFAST || 2544 blk_noretry_request(req))) { 2545 DBF_DEV_EVENT(DBF_ERR, basedev, 2546 "Rejecting failfast request %p", 2547 req); 2548 blk_start_request(req); 2549 __blk_end_request_all(req, -ETIMEDOUT); 2550 continue; 2551 } 2552 cqr = basedev->discipline->build_cp(basedev, block, req); 2553 if (IS_ERR(cqr)) { 2554 if (PTR_ERR(cqr) == -EBUSY) 2555 break; /* normal end condition */ 2556 if (PTR_ERR(cqr) == -ENOMEM) 2557 break; /* terminate request queue loop */ 2558 if (PTR_ERR(cqr) == -EAGAIN) { 2559 /* 2560 * The current request cannot be build right 2561 * now, we have to try later. If this request 2562 * is the head-of-queue we stop the device 2563 * for 1/2 second. 2564 */ 2565 if (!list_empty(&block->ccw_queue)) 2566 break; 2567 spin_lock_irqsave( 2568 get_ccwdev_lock(basedev->cdev), flags); 2569 dasd_device_set_stop_bits(basedev, 2570 DASD_STOPPED_PENDING); 2571 spin_unlock_irqrestore( 2572 get_ccwdev_lock(basedev->cdev), flags); 2573 dasd_block_set_timer(block, HZ/2); 2574 break; 2575 } 2576 DBF_DEV_EVENT(DBF_ERR, basedev, 2577 "CCW creation failed (rc=%ld) " 2578 "on request %p", 2579 PTR_ERR(cqr), req); 2580 blk_start_request(req); 2581 __blk_end_request_all(req, -EIO); 2582 continue; 2583 } 2584 /* 2585 * Note: callback is set to dasd_return_cqr_cb in 2586 * __dasd_block_start_head to cover erp requests as well 2587 */ 2588 cqr->callback_data = (void *) req; 2589 cqr->status = DASD_CQR_FILLED; 2590 req->completion_data = cqr; 2591 blk_start_request(req); 2592 list_add_tail(&cqr->blocklist, &block->ccw_queue); 2593 INIT_LIST_HEAD(&cqr->devlist); 2594 dasd_profile_start(block, cqr, req); 2595 } 2596 } 2597 2598 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2599 { 2600 struct request *req; 2601 int status; 2602 int error = 0; 2603 2604 req = (struct request *) cqr->callback_data; 2605 dasd_profile_end(cqr->block, cqr, req); 2606 status = cqr->block->base->discipline->free_cp(cqr, req); 2607 if (status < 0) 2608 error = status; 2609 else if (status == 0) { 2610 if (cqr->intrc == -EPERM) 2611 error = -EBADE; 2612 else if (cqr->intrc == -ENOLINK || 2613 cqr->intrc == -ETIMEDOUT) 2614 error = cqr->intrc; 2615 else 2616 error = -EIO; 2617 } 2618 __blk_end_request_all(req, error); 2619 } 2620 2621 /* 2622 * Process ccw request queue. 2623 */ 2624 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2625 struct list_head *final_queue) 2626 { 2627 struct list_head *l, *n; 2628 struct dasd_ccw_req *cqr; 2629 dasd_erp_fn_t erp_fn; 2630 unsigned long flags; 2631 struct dasd_device *base = block->base; 2632 2633 restart: 2634 /* Process request with final status. */ 2635 list_for_each_safe(l, n, &block->ccw_queue) { 2636 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2637 if (cqr->status != DASD_CQR_DONE && 2638 cqr->status != DASD_CQR_FAILED && 2639 cqr->status != DASD_CQR_NEED_ERP && 2640 cqr->status != DASD_CQR_TERMINATED) 2641 continue; 2642 2643 if (cqr->status == DASD_CQR_TERMINATED) { 2644 base->discipline->handle_terminated_request(cqr); 2645 goto restart; 2646 } 2647 2648 /* Process requests that may be recovered */ 2649 if (cqr->status == DASD_CQR_NEED_ERP) { 2650 erp_fn = base->discipline->erp_action(cqr); 2651 if (IS_ERR(erp_fn(cqr))) 2652 continue; 2653 goto restart; 2654 } 2655 2656 /* log sense for fatal error */ 2657 if (cqr->status == DASD_CQR_FAILED) { 2658 dasd_log_sense(cqr, &cqr->irb); 2659 } 2660 2661 /* First of all call extended error reporting. */ 2662 if (dasd_eer_enabled(base) && 2663 cqr->status == DASD_CQR_FAILED) { 2664 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2665 2666 /* restart request */ 2667 cqr->status = DASD_CQR_FILLED; 2668 cqr->retries = 255; 2669 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2670 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2671 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2672 flags); 2673 goto restart; 2674 } 2675 2676 /* Process finished ERP request. */ 2677 if (cqr->refers) { 2678 __dasd_process_erp(base, cqr); 2679 goto restart; 2680 } 2681 2682 /* Rechain finished requests to final queue */ 2683 cqr->endclk = get_tod_clock(); 2684 list_move_tail(&cqr->blocklist, final_queue); 2685 } 2686 } 2687 2688 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2689 { 2690 dasd_schedule_block_bh(cqr->block); 2691 } 2692 2693 static void __dasd_block_start_head(struct dasd_block *block) 2694 { 2695 struct dasd_ccw_req *cqr; 2696 2697 if (list_empty(&block->ccw_queue)) 2698 return; 2699 /* We allways begin with the first requests on the queue, as some 2700 * of previously started requests have to be enqueued on a 2701 * dasd_device again for error recovery. 2702 */ 2703 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2704 if (cqr->status != DASD_CQR_FILLED) 2705 continue; 2706 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2707 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2708 cqr->status = DASD_CQR_FAILED; 2709 cqr->intrc = -EPERM; 2710 dasd_schedule_block_bh(block); 2711 continue; 2712 } 2713 /* Non-temporary stop condition will trigger fail fast */ 2714 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2715 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2716 (!dasd_eer_enabled(block->base))) { 2717 cqr->status = DASD_CQR_FAILED; 2718 cqr->intrc = -ENOLINK; 2719 dasd_schedule_block_bh(block); 2720 continue; 2721 } 2722 /* Don't try to start requests if device is stopped */ 2723 if (block->base->stopped) 2724 return; 2725 2726 /* just a fail safe check, should not happen */ 2727 if (!cqr->startdev) 2728 cqr->startdev = block->base; 2729 2730 /* make sure that the requests we submit find their way back */ 2731 cqr->callback = dasd_return_cqr_cb; 2732 2733 dasd_add_request_tail(cqr); 2734 } 2735 } 2736 2737 /* 2738 * Central dasd_block layer routine. Takes requests from the generic 2739 * block layer request queue, creates ccw requests, enqueues them on 2740 * a dasd_device and processes ccw requests that have been returned. 2741 */ 2742 static void dasd_block_tasklet(struct dasd_block *block) 2743 { 2744 struct list_head final_queue; 2745 struct list_head *l, *n; 2746 struct dasd_ccw_req *cqr; 2747 2748 atomic_set(&block->tasklet_scheduled, 0); 2749 INIT_LIST_HEAD(&final_queue); 2750 spin_lock(&block->queue_lock); 2751 /* Finish off requests on ccw queue */ 2752 __dasd_process_block_ccw_queue(block, &final_queue); 2753 spin_unlock(&block->queue_lock); 2754 /* Now call the callback function of requests with final status */ 2755 spin_lock_irq(&block->request_queue_lock); 2756 list_for_each_safe(l, n, &final_queue) { 2757 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2758 list_del_init(&cqr->blocklist); 2759 __dasd_cleanup_cqr(cqr); 2760 } 2761 spin_lock(&block->queue_lock); 2762 /* Get new request from the block device request queue */ 2763 __dasd_process_request_queue(block); 2764 /* Now check if the head of the ccw queue needs to be started. */ 2765 __dasd_block_start_head(block); 2766 spin_unlock(&block->queue_lock); 2767 spin_unlock_irq(&block->request_queue_lock); 2768 if (waitqueue_active(&shutdown_waitq)) 2769 wake_up(&shutdown_waitq); 2770 dasd_put_device(block->base); 2771 } 2772 2773 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2774 { 2775 wake_up(&dasd_flush_wq); 2776 } 2777 2778 /* 2779 * Requeue a request back to the block request queue 2780 * only works for block requests 2781 */ 2782 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2783 { 2784 struct dasd_block *block = cqr->block; 2785 struct request *req; 2786 unsigned long flags; 2787 2788 if (!block) 2789 return -EINVAL; 2790 spin_lock_irqsave(&block->queue_lock, flags); 2791 req = (struct request *) cqr->callback_data; 2792 blk_requeue_request(block->request_queue, req); 2793 spin_unlock_irqrestore(&block->queue_lock, flags); 2794 2795 return 0; 2796 } 2797 2798 /* 2799 * Go through all request on the dasd_block request queue, cancel them 2800 * on the respective dasd_device, and return them to the generic 2801 * block layer. 2802 */ 2803 static int dasd_flush_block_queue(struct dasd_block *block) 2804 { 2805 struct dasd_ccw_req *cqr, *n; 2806 int rc, i; 2807 struct list_head flush_queue; 2808 2809 INIT_LIST_HEAD(&flush_queue); 2810 spin_lock_bh(&block->queue_lock); 2811 rc = 0; 2812 restart: 2813 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2814 /* if this request currently owned by a dasd_device cancel it */ 2815 if (cqr->status >= DASD_CQR_QUEUED) 2816 rc = dasd_cancel_req(cqr); 2817 if (rc < 0) 2818 break; 2819 /* Rechain request (including erp chain) so it won't be 2820 * touched by the dasd_block_tasklet anymore. 2821 * Replace the callback so we notice when the request 2822 * is returned from the dasd_device layer. 2823 */ 2824 cqr->callback = _dasd_wake_block_flush_cb; 2825 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2826 list_move_tail(&cqr->blocklist, &flush_queue); 2827 if (i > 1) 2828 /* moved more than one request - need to restart */ 2829 goto restart; 2830 } 2831 spin_unlock_bh(&block->queue_lock); 2832 /* Now call the callback function of flushed requests */ 2833 restart_cb: 2834 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2835 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2836 /* Process finished ERP request. */ 2837 if (cqr->refers) { 2838 spin_lock_bh(&block->queue_lock); 2839 __dasd_process_erp(block->base, cqr); 2840 spin_unlock_bh(&block->queue_lock); 2841 /* restart list_for_xx loop since dasd_process_erp 2842 * might remove multiple elements */ 2843 goto restart_cb; 2844 } 2845 /* call the callback function */ 2846 spin_lock_irq(&block->request_queue_lock); 2847 cqr->endclk = get_tod_clock(); 2848 list_del_init(&cqr->blocklist); 2849 __dasd_cleanup_cqr(cqr); 2850 spin_unlock_irq(&block->request_queue_lock); 2851 } 2852 return rc; 2853 } 2854 2855 /* 2856 * Schedules a call to dasd_tasklet over the device tasklet. 2857 */ 2858 void dasd_schedule_block_bh(struct dasd_block *block) 2859 { 2860 /* Protect against rescheduling. */ 2861 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 2862 return; 2863 /* life cycle of block is bound to it's base device */ 2864 dasd_get_device(block->base); 2865 tasklet_hi_schedule(&block->tasklet); 2866 } 2867 2868 2869 /* 2870 * SECTION: external block device operations 2871 * (request queue handling, open, release, etc.) 2872 */ 2873 2874 /* 2875 * Dasd request queue function. Called from ll_rw_blk.c 2876 */ 2877 static void do_dasd_request(struct request_queue *queue) 2878 { 2879 struct dasd_block *block; 2880 2881 block = queue->queuedata; 2882 spin_lock(&block->queue_lock); 2883 /* Get new request from the block device request queue */ 2884 __dasd_process_request_queue(block); 2885 /* Now check if the head of the ccw queue needs to be started. */ 2886 __dasd_block_start_head(block); 2887 spin_unlock(&block->queue_lock); 2888 } 2889 2890 /* 2891 * Block timeout callback, called from the block layer 2892 * 2893 * request_queue lock is held on entry. 2894 * 2895 * Return values: 2896 * BLK_EH_RESET_TIMER if the request should be left running 2897 * BLK_EH_NOT_HANDLED if the request is handled or terminated 2898 * by the driver. 2899 */ 2900 enum blk_eh_timer_return dasd_times_out(struct request *req) 2901 { 2902 struct dasd_ccw_req *cqr = req->completion_data; 2903 struct dasd_block *block = req->q->queuedata; 2904 struct dasd_device *device; 2905 int rc = 0; 2906 2907 if (!cqr) 2908 return BLK_EH_NOT_HANDLED; 2909 2910 device = cqr->startdev ? cqr->startdev : block->base; 2911 if (!device->blk_timeout) 2912 return BLK_EH_RESET_TIMER; 2913 DBF_DEV_EVENT(DBF_WARNING, device, 2914 " dasd_times_out cqr %p status %x", 2915 cqr, cqr->status); 2916 2917 spin_lock(&block->queue_lock); 2918 spin_lock(get_ccwdev_lock(device->cdev)); 2919 cqr->retries = -1; 2920 cqr->intrc = -ETIMEDOUT; 2921 if (cqr->status >= DASD_CQR_QUEUED) { 2922 spin_unlock(get_ccwdev_lock(device->cdev)); 2923 rc = dasd_cancel_req(cqr); 2924 } else if (cqr->status == DASD_CQR_FILLED || 2925 cqr->status == DASD_CQR_NEED_ERP) { 2926 cqr->status = DASD_CQR_TERMINATED; 2927 spin_unlock(get_ccwdev_lock(device->cdev)); 2928 } else if (cqr->status == DASD_CQR_IN_ERP) { 2929 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 2930 2931 list_for_each_entry_safe(searchcqr, nextcqr, 2932 &block->ccw_queue, blocklist) { 2933 tmpcqr = searchcqr; 2934 while (tmpcqr->refers) 2935 tmpcqr = tmpcqr->refers; 2936 if (tmpcqr != cqr) 2937 continue; 2938 /* searchcqr is an ERP request for cqr */ 2939 searchcqr->retries = -1; 2940 searchcqr->intrc = -ETIMEDOUT; 2941 if (searchcqr->status >= DASD_CQR_QUEUED) { 2942 spin_unlock(get_ccwdev_lock(device->cdev)); 2943 rc = dasd_cancel_req(searchcqr); 2944 spin_lock(get_ccwdev_lock(device->cdev)); 2945 } else if ((searchcqr->status == DASD_CQR_FILLED) || 2946 (searchcqr->status == DASD_CQR_NEED_ERP)) { 2947 searchcqr->status = DASD_CQR_TERMINATED; 2948 rc = 0; 2949 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 2950 /* 2951 * Shouldn't happen; most recent ERP 2952 * request is at the front of queue 2953 */ 2954 continue; 2955 } 2956 break; 2957 } 2958 spin_unlock(get_ccwdev_lock(device->cdev)); 2959 } 2960 dasd_schedule_block_bh(block); 2961 spin_unlock(&block->queue_lock); 2962 2963 return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 2964 } 2965 2966 /* 2967 * Allocate and initialize request queue and default I/O scheduler. 2968 */ 2969 static int dasd_alloc_queue(struct dasd_block *block) 2970 { 2971 int rc; 2972 2973 block->request_queue = blk_init_queue(do_dasd_request, 2974 &block->request_queue_lock); 2975 if (block->request_queue == NULL) 2976 return -ENOMEM; 2977 2978 block->request_queue->queuedata = block; 2979 2980 elevator_exit(block->request_queue->elevator); 2981 block->request_queue->elevator = NULL; 2982 mutex_lock(&block->request_queue->sysfs_lock); 2983 rc = elevator_init(block->request_queue, "deadline"); 2984 if (rc) 2985 blk_cleanup_queue(block->request_queue); 2986 mutex_unlock(&block->request_queue->sysfs_lock); 2987 return rc; 2988 } 2989 2990 /* 2991 * Allocate and initialize request queue. 2992 */ 2993 static void dasd_setup_queue(struct dasd_block *block) 2994 { 2995 int max; 2996 2997 if (block->base->features & DASD_FEATURE_USERAW) { 2998 /* 2999 * the max_blocks value for raw_track access is 256 3000 * it is higher than the native ECKD value because we 3001 * only need one ccw per track 3002 * so the max_hw_sectors are 3003 * 2048 x 512B = 1024kB = 16 tracks 3004 */ 3005 max = 2048; 3006 } else { 3007 max = block->base->discipline->max_blocks << block->s2b_shift; 3008 } 3009 blk_queue_logical_block_size(block->request_queue, 3010 block->bp_block); 3011 blk_queue_max_hw_sectors(block->request_queue, max); 3012 blk_queue_max_segments(block->request_queue, -1L); 3013 /* with page sized segments we can translate each segement into 3014 * one idaw/tidaw 3015 */ 3016 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 3017 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 3018 } 3019 3020 /* 3021 * Deactivate and free request queue. 3022 */ 3023 static void dasd_free_queue(struct dasd_block *block) 3024 { 3025 if (block->request_queue) { 3026 blk_cleanup_queue(block->request_queue); 3027 block->request_queue = NULL; 3028 } 3029 } 3030 3031 /* 3032 * Flush request on the request queue. 3033 */ 3034 static void dasd_flush_request_queue(struct dasd_block *block) 3035 { 3036 struct request *req; 3037 3038 if (!block->request_queue) 3039 return; 3040 3041 spin_lock_irq(&block->request_queue_lock); 3042 while ((req = blk_fetch_request(block->request_queue))) 3043 __blk_end_request_all(req, -EIO); 3044 spin_unlock_irq(&block->request_queue_lock); 3045 } 3046 3047 static int dasd_open(struct block_device *bdev, fmode_t mode) 3048 { 3049 struct dasd_device *base; 3050 int rc; 3051 3052 base = dasd_device_from_gendisk(bdev->bd_disk); 3053 if (!base) 3054 return -ENODEV; 3055 3056 atomic_inc(&base->block->open_count); 3057 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3058 rc = -ENODEV; 3059 goto unlock; 3060 } 3061 3062 if (!try_module_get(base->discipline->owner)) { 3063 rc = -EINVAL; 3064 goto unlock; 3065 } 3066 3067 if (dasd_probeonly) { 3068 dev_info(&base->cdev->dev, 3069 "Accessing the DASD failed because it is in " 3070 "probeonly mode\n"); 3071 rc = -EPERM; 3072 goto out; 3073 } 3074 3075 if (base->state <= DASD_STATE_BASIC) { 3076 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3077 " Cannot open unrecognized device"); 3078 rc = -ENODEV; 3079 goto out; 3080 } 3081 3082 if ((mode & FMODE_WRITE) && 3083 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3084 (base->features & DASD_FEATURE_READONLY))) { 3085 rc = -EROFS; 3086 goto out; 3087 } 3088 3089 dasd_put_device(base); 3090 return 0; 3091 3092 out: 3093 module_put(base->discipline->owner); 3094 unlock: 3095 atomic_dec(&base->block->open_count); 3096 dasd_put_device(base); 3097 return rc; 3098 } 3099 3100 static void dasd_release(struct gendisk *disk, fmode_t mode) 3101 { 3102 struct dasd_device *base = dasd_device_from_gendisk(disk); 3103 if (base) { 3104 atomic_dec(&base->block->open_count); 3105 module_put(base->discipline->owner); 3106 dasd_put_device(base); 3107 } 3108 } 3109 3110 /* 3111 * Return disk geometry. 3112 */ 3113 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3114 { 3115 struct dasd_device *base; 3116 3117 base = dasd_device_from_gendisk(bdev->bd_disk); 3118 if (!base) 3119 return -ENODEV; 3120 3121 if (!base->discipline || 3122 !base->discipline->fill_geometry) { 3123 dasd_put_device(base); 3124 return -EINVAL; 3125 } 3126 base->discipline->fill_geometry(base->block, geo); 3127 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3128 dasd_put_device(base); 3129 return 0; 3130 } 3131 3132 const struct block_device_operations 3133 dasd_device_operations = { 3134 .owner = THIS_MODULE, 3135 .open = dasd_open, 3136 .release = dasd_release, 3137 .ioctl = dasd_ioctl, 3138 .compat_ioctl = dasd_ioctl, 3139 .getgeo = dasd_getgeo, 3140 }; 3141 3142 /******************************************************************************* 3143 * end of block device operations 3144 */ 3145 3146 static void 3147 dasd_exit(void) 3148 { 3149 #ifdef CONFIG_PROC_FS 3150 dasd_proc_exit(); 3151 #endif 3152 dasd_eer_exit(); 3153 if (dasd_page_cache != NULL) { 3154 kmem_cache_destroy(dasd_page_cache); 3155 dasd_page_cache = NULL; 3156 } 3157 dasd_gendisk_exit(); 3158 dasd_devmap_exit(); 3159 if (dasd_debug_area != NULL) { 3160 debug_unregister(dasd_debug_area); 3161 dasd_debug_area = NULL; 3162 } 3163 dasd_statistics_removeroot(); 3164 } 3165 3166 /* 3167 * SECTION: common functions for ccw_driver use 3168 */ 3169 3170 /* 3171 * Is the device read-only? 3172 * Note that this function does not report the setting of the 3173 * readonly device attribute, but how it is configured in z/VM. 3174 */ 3175 int dasd_device_is_ro(struct dasd_device *device) 3176 { 3177 struct ccw_dev_id dev_id; 3178 struct diag210 diag_data; 3179 int rc; 3180 3181 if (!MACHINE_IS_VM) 3182 return 0; 3183 ccw_device_get_id(device->cdev, &dev_id); 3184 memset(&diag_data, 0, sizeof(diag_data)); 3185 diag_data.vrdcdvno = dev_id.devno; 3186 diag_data.vrdclen = sizeof(diag_data); 3187 rc = diag210(&diag_data); 3188 if (rc == 0 || rc == 2) { 3189 return diag_data.vrdcvfla & 0x80; 3190 } else { 3191 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3192 dev_id.devno, rc); 3193 return 0; 3194 } 3195 } 3196 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3197 3198 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3199 { 3200 struct ccw_device *cdev = data; 3201 int ret; 3202 3203 ret = ccw_device_set_online(cdev); 3204 if (ret) 3205 pr_warning("%s: Setting the DASD online failed with rc=%d\n", 3206 dev_name(&cdev->dev), ret); 3207 } 3208 3209 /* 3210 * Initial attempt at a probe function. this can be simplified once 3211 * the other detection code is gone. 3212 */ 3213 int dasd_generic_probe(struct ccw_device *cdev, 3214 struct dasd_discipline *discipline) 3215 { 3216 int ret; 3217 3218 ret = dasd_add_sysfs_files(cdev); 3219 if (ret) { 3220 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3221 "dasd_generic_probe: could not add " 3222 "sysfs entries"); 3223 return ret; 3224 } 3225 cdev->handler = &dasd_int_handler; 3226 3227 /* 3228 * Automatically online either all dasd devices (dasd_autodetect) 3229 * or all devices specified with dasd= parameters during 3230 * initial probe. 3231 */ 3232 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3233 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3234 async_schedule(dasd_generic_auto_online, cdev); 3235 return 0; 3236 } 3237 3238 /* 3239 * This will one day be called from a global not_oper handler. 3240 * It is also used by driver_unregister during module unload. 3241 */ 3242 void dasd_generic_remove(struct ccw_device *cdev) 3243 { 3244 struct dasd_device *device; 3245 struct dasd_block *block; 3246 3247 cdev->handler = NULL; 3248 3249 device = dasd_device_from_cdev(cdev); 3250 if (IS_ERR(device)) { 3251 dasd_remove_sysfs_files(cdev); 3252 return; 3253 } 3254 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3255 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3256 /* Already doing offline processing */ 3257 dasd_put_device(device); 3258 dasd_remove_sysfs_files(cdev); 3259 return; 3260 } 3261 /* 3262 * This device is removed unconditionally. Set offline 3263 * flag to prevent dasd_open from opening it while it is 3264 * no quite down yet. 3265 */ 3266 dasd_set_target_state(device, DASD_STATE_NEW); 3267 /* dasd_delete_device destroys the device reference. */ 3268 block = device->block; 3269 dasd_delete_device(device); 3270 /* 3271 * life cycle of block is bound to device, so delete it after 3272 * device was safely removed 3273 */ 3274 if (block) 3275 dasd_free_block(block); 3276 3277 dasd_remove_sysfs_files(cdev); 3278 } 3279 3280 /* 3281 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3282 * the device is detected for the first time and is supposed to be used 3283 * or the user has started activation through sysfs. 3284 */ 3285 int dasd_generic_set_online(struct ccw_device *cdev, 3286 struct dasd_discipline *base_discipline) 3287 { 3288 struct dasd_discipline *discipline; 3289 struct dasd_device *device; 3290 int rc; 3291 3292 /* first online clears initial online feature flag */ 3293 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3294 device = dasd_create_device(cdev); 3295 if (IS_ERR(device)) 3296 return PTR_ERR(device); 3297 3298 discipline = base_discipline; 3299 if (device->features & DASD_FEATURE_USEDIAG) { 3300 if (!dasd_diag_discipline_pointer) { 3301 pr_warning("%s Setting the DASD online failed because " 3302 "of missing DIAG discipline\n", 3303 dev_name(&cdev->dev)); 3304 dasd_delete_device(device); 3305 return -ENODEV; 3306 } 3307 discipline = dasd_diag_discipline_pointer; 3308 } 3309 if (!try_module_get(base_discipline->owner)) { 3310 dasd_delete_device(device); 3311 return -EINVAL; 3312 } 3313 if (!try_module_get(discipline->owner)) { 3314 module_put(base_discipline->owner); 3315 dasd_delete_device(device); 3316 return -EINVAL; 3317 } 3318 device->base_discipline = base_discipline; 3319 device->discipline = discipline; 3320 3321 /* check_device will allocate block device if necessary */ 3322 rc = discipline->check_device(device); 3323 if (rc) { 3324 pr_warning("%s Setting the DASD online with discipline %s " 3325 "failed with rc=%i\n", 3326 dev_name(&cdev->dev), discipline->name, rc); 3327 module_put(discipline->owner); 3328 module_put(base_discipline->owner); 3329 dasd_delete_device(device); 3330 return rc; 3331 } 3332 3333 dasd_set_target_state(device, DASD_STATE_ONLINE); 3334 if (device->state <= DASD_STATE_KNOWN) { 3335 pr_warning("%s Setting the DASD online failed because of a " 3336 "missing discipline\n", dev_name(&cdev->dev)); 3337 rc = -ENODEV; 3338 dasd_set_target_state(device, DASD_STATE_NEW); 3339 if (device->block) 3340 dasd_free_block(device->block); 3341 dasd_delete_device(device); 3342 } else 3343 pr_debug("dasd_generic device %s found\n", 3344 dev_name(&cdev->dev)); 3345 3346 wait_event(dasd_init_waitq, _wait_for_device(device)); 3347 3348 dasd_put_device(device); 3349 return rc; 3350 } 3351 3352 int dasd_generic_set_offline(struct ccw_device *cdev) 3353 { 3354 struct dasd_device *device; 3355 struct dasd_block *block; 3356 int max_count, open_count, rc; 3357 3358 rc = 0; 3359 device = dasd_device_from_cdev(cdev); 3360 if (IS_ERR(device)) 3361 return PTR_ERR(device); 3362 3363 /* 3364 * We must make sure that this device is currently not in use. 3365 * The open_count is increased for every opener, that includes 3366 * the blkdev_get in dasd_scan_partitions. We are only interested 3367 * in the other openers. 3368 */ 3369 if (device->block) { 3370 max_count = device->block->bdev ? 0 : -1; 3371 open_count = atomic_read(&device->block->open_count); 3372 if (open_count > max_count) { 3373 if (open_count > 0) 3374 pr_warning("%s: The DASD cannot be set offline " 3375 "with open count %i\n", 3376 dev_name(&cdev->dev), open_count); 3377 else 3378 pr_warning("%s: The DASD cannot be set offline " 3379 "while it is in use\n", 3380 dev_name(&cdev->dev)); 3381 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3382 dasd_put_device(device); 3383 return -EBUSY; 3384 } 3385 } 3386 3387 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3388 /* 3389 * safe offline allready running 3390 * could only be called by normal offline so safe_offline flag 3391 * needs to be removed to run normal offline and kill all I/O 3392 */ 3393 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3394 /* Already doing normal offline processing */ 3395 dasd_put_device(device); 3396 return -EBUSY; 3397 } else 3398 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3399 3400 } else 3401 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3402 /* Already doing offline processing */ 3403 dasd_put_device(device); 3404 return -EBUSY; 3405 } 3406 3407 /* 3408 * if safe_offline called set safe_offline_running flag and 3409 * clear safe_offline so that a call to normal offline 3410 * can overrun safe_offline processing 3411 */ 3412 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3413 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3414 /* 3415 * If we want to set the device safe offline all IO operations 3416 * should be finished before continuing the offline process 3417 * so sync bdev first and then wait for our queues to become 3418 * empty 3419 */ 3420 /* sync blockdev and partitions */ 3421 rc = fsync_bdev(device->block->bdev); 3422 if (rc != 0) 3423 goto interrupted; 3424 3425 /* schedule device tasklet and wait for completion */ 3426 dasd_schedule_device_bh(device); 3427 rc = wait_event_interruptible(shutdown_waitq, 3428 _wait_for_empty_queues(device)); 3429 if (rc != 0) 3430 goto interrupted; 3431 } 3432 3433 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3434 dasd_set_target_state(device, DASD_STATE_NEW); 3435 /* dasd_delete_device destroys the device reference. */ 3436 block = device->block; 3437 dasd_delete_device(device); 3438 /* 3439 * life cycle of block is bound to device, so delete it after 3440 * device was safely removed 3441 */ 3442 if (block) 3443 dasd_free_block(block); 3444 return 0; 3445 3446 interrupted: 3447 /* interrupted by signal */ 3448 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3449 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3450 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3451 dasd_put_device(device); 3452 return rc; 3453 } 3454 3455 int dasd_generic_last_path_gone(struct dasd_device *device) 3456 { 3457 struct dasd_ccw_req *cqr; 3458 3459 dev_warn(&device->cdev->dev, "No operational channel path is left " 3460 "for the device\n"); 3461 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3462 /* First of all call extended error reporting. */ 3463 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3464 3465 if (device->state < DASD_STATE_BASIC) 3466 return 0; 3467 /* Device is active. We want to keep it. */ 3468 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3469 if ((cqr->status == DASD_CQR_IN_IO) || 3470 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3471 cqr->status = DASD_CQR_QUEUED; 3472 cqr->retries++; 3473 } 3474 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3475 dasd_device_clear_timer(device); 3476 dasd_schedule_device_bh(device); 3477 return 1; 3478 } 3479 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3480 3481 int dasd_generic_path_operational(struct dasd_device *device) 3482 { 3483 dev_info(&device->cdev->dev, "A channel path to the device has become " 3484 "operational\n"); 3485 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3486 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3487 if (device->stopped & DASD_UNRESUMED_PM) { 3488 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 3489 dasd_restore_device(device); 3490 return 1; 3491 } 3492 dasd_schedule_device_bh(device); 3493 if (device->block) 3494 dasd_schedule_block_bh(device->block); 3495 return 1; 3496 } 3497 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3498 3499 int dasd_generic_notify(struct ccw_device *cdev, int event) 3500 { 3501 struct dasd_device *device; 3502 int ret; 3503 3504 device = dasd_device_from_cdev_locked(cdev); 3505 if (IS_ERR(device)) 3506 return 0; 3507 ret = 0; 3508 switch (event) { 3509 case CIO_GONE: 3510 case CIO_BOXED: 3511 case CIO_NO_PATH: 3512 device->path_data.opm = 0; 3513 device->path_data.ppm = 0; 3514 device->path_data.npm = 0; 3515 ret = dasd_generic_last_path_gone(device); 3516 break; 3517 case CIO_OPER: 3518 ret = 1; 3519 if (device->path_data.opm) 3520 ret = dasd_generic_path_operational(device); 3521 break; 3522 } 3523 dasd_put_device(device); 3524 return ret; 3525 } 3526 3527 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3528 { 3529 int chp; 3530 __u8 oldopm, eventlpm; 3531 struct dasd_device *device; 3532 3533 device = dasd_device_from_cdev_locked(cdev); 3534 if (IS_ERR(device)) 3535 return; 3536 for (chp = 0; chp < 8; chp++) { 3537 eventlpm = 0x80 >> chp; 3538 if (path_event[chp] & PE_PATH_GONE) { 3539 oldopm = device->path_data.opm; 3540 device->path_data.opm &= ~eventlpm; 3541 device->path_data.ppm &= ~eventlpm; 3542 device->path_data.npm &= ~eventlpm; 3543 if (oldopm && !device->path_data.opm) { 3544 dev_warn(&device->cdev->dev, 3545 "No verified channel paths remain " 3546 "for the device\n"); 3547 DBF_DEV_EVENT(DBF_WARNING, device, 3548 "%s", "last verified path gone"); 3549 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3550 dasd_device_set_stop_bits(device, 3551 DASD_STOPPED_DC_WAIT); 3552 } 3553 } 3554 if (path_event[chp] & PE_PATH_AVAILABLE) { 3555 device->path_data.opm &= ~eventlpm; 3556 device->path_data.ppm &= ~eventlpm; 3557 device->path_data.npm &= ~eventlpm; 3558 device->path_data.tbvpm |= eventlpm; 3559 dasd_schedule_device_bh(device); 3560 } 3561 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3562 if (!(device->path_data.opm & eventlpm) && 3563 !(device->path_data.tbvpm & eventlpm)) { 3564 /* 3565 * we can not establish a pathgroup on an 3566 * unavailable path, so trigger a path 3567 * verification first 3568 */ 3569 device->path_data.tbvpm |= eventlpm; 3570 dasd_schedule_device_bh(device); 3571 } 3572 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3573 "Pathgroup re-established\n"); 3574 if (device->discipline->kick_validate) 3575 device->discipline->kick_validate(device); 3576 } 3577 } 3578 dasd_put_device(device); 3579 } 3580 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3581 3582 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3583 { 3584 if (!device->path_data.opm && lpm) { 3585 device->path_data.opm = lpm; 3586 dasd_generic_path_operational(device); 3587 } else 3588 device->path_data.opm |= lpm; 3589 return 0; 3590 } 3591 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3592 3593 3594 int dasd_generic_pm_freeze(struct ccw_device *cdev) 3595 { 3596 struct dasd_device *device = dasd_device_from_cdev(cdev); 3597 struct list_head freeze_queue; 3598 struct dasd_ccw_req *cqr, *n; 3599 struct dasd_ccw_req *refers; 3600 int rc; 3601 3602 if (IS_ERR(device)) 3603 return PTR_ERR(device); 3604 3605 /* mark device as suspended */ 3606 set_bit(DASD_FLAG_SUSPENDED, &device->flags); 3607 3608 if (device->discipline->freeze) 3609 rc = device->discipline->freeze(device); 3610 3611 /* disallow new I/O */ 3612 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 3613 3614 /* clear active requests and requeue them to block layer if possible */ 3615 INIT_LIST_HEAD(&freeze_queue); 3616 spin_lock_irq(get_ccwdev_lock(cdev)); 3617 rc = 0; 3618 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3619 /* Check status and move request to flush_queue */ 3620 if (cqr->status == DASD_CQR_IN_IO) { 3621 rc = device->discipline->term_IO(cqr); 3622 if (rc) { 3623 /* unable to terminate requeust */ 3624 dev_err(&device->cdev->dev, 3625 "Unable to terminate request %p " 3626 "on suspend\n", cqr); 3627 spin_unlock_irq(get_ccwdev_lock(cdev)); 3628 dasd_put_device(device); 3629 return rc; 3630 } 3631 } 3632 list_move_tail(&cqr->devlist, &freeze_queue); 3633 } 3634 spin_unlock_irq(get_ccwdev_lock(cdev)); 3635 3636 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 3637 wait_event(dasd_flush_wq, 3638 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3639 if (cqr->status == DASD_CQR_CLEARED) 3640 cqr->status = DASD_CQR_QUEUED; 3641 3642 /* requeue requests to blocklayer will only work for 3643 block device requests */ 3644 if (_dasd_requeue_request(cqr)) 3645 continue; 3646 3647 /* remove requests from device and block queue */ 3648 list_del_init(&cqr->devlist); 3649 while (cqr->refers != NULL) { 3650 refers = cqr->refers; 3651 /* remove the request from the block queue */ 3652 list_del(&cqr->blocklist); 3653 /* free the finished erp request */ 3654 dasd_free_erp_request(cqr, cqr->memdev); 3655 cqr = refers; 3656 } 3657 if (cqr->block) 3658 list_del_init(&cqr->blocklist); 3659 cqr->block->base->discipline->free_cp( 3660 cqr, (struct request *) cqr->callback_data); 3661 } 3662 3663 /* 3664 * if requests remain then they are internal request 3665 * and go back to the device queue 3666 */ 3667 if (!list_empty(&freeze_queue)) { 3668 /* move freeze_queue to start of the ccw_queue */ 3669 spin_lock_irq(get_ccwdev_lock(cdev)); 3670 list_splice_tail(&freeze_queue, &device->ccw_queue); 3671 spin_unlock_irq(get_ccwdev_lock(cdev)); 3672 } 3673 dasd_put_device(device); 3674 return rc; 3675 } 3676 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 3677 3678 int dasd_generic_restore_device(struct ccw_device *cdev) 3679 { 3680 struct dasd_device *device = dasd_device_from_cdev(cdev); 3681 int rc = 0; 3682 3683 if (IS_ERR(device)) 3684 return PTR_ERR(device); 3685 3686 /* allow new IO again */ 3687 dasd_device_remove_stop_bits(device, 3688 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 3689 3690 dasd_schedule_device_bh(device); 3691 3692 /* 3693 * call discipline restore function 3694 * if device is stopped do nothing e.g. for disconnected devices 3695 */ 3696 if (device->discipline->restore && !(device->stopped)) 3697 rc = device->discipline->restore(device); 3698 if (rc || device->stopped) 3699 /* 3700 * if the resume failed for the DASD we put it in 3701 * an UNRESUMED stop state 3702 */ 3703 device->stopped |= DASD_UNRESUMED_PM; 3704 3705 if (device->block) 3706 dasd_schedule_block_bh(device->block); 3707 3708 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 3709 dasd_put_device(device); 3710 return 0; 3711 } 3712 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 3713 3714 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 3715 void *rdc_buffer, 3716 int rdc_buffer_size, 3717 int magic) 3718 { 3719 struct dasd_ccw_req *cqr; 3720 struct ccw1 *ccw; 3721 unsigned long *idaw; 3722 3723 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 3724 3725 if (IS_ERR(cqr)) { 3726 /* internal error 13 - Allocating the RDC request failed*/ 3727 dev_err(&device->cdev->dev, 3728 "An error occurred in the DASD device driver, " 3729 "reason=%s\n", "13"); 3730 return cqr; 3731 } 3732 3733 ccw = cqr->cpaddr; 3734 ccw->cmd_code = CCW_CMD_RDC; 3735 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { 3736 idaw = (unsigned long *) (cqr->data); 3737 ccw->cda = (__u32)(addr_t) idaw; 3738 ccw->flags = CCW_FLAG_IDA; 3739 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); 3740 } else { 3741 ccw->cda = (__u32)(addr_t) rdc_buffer; 3742 ccw->flags = 0; 3743 } 3744 3745 ccw->count = rdc_buffer_size; 3746 cqr->startdev = device; 3747 cqr->memdev = device; 3748 cqr->expires = 10*HZ; 3749 cqr->retries = 256; 3750 cqr->buildclk = get_tod_clock(); 3751 cqr->status = DASD_CQR_FILLED; 3752 return cqr; 3753 } 3754 3755 3756 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 3757 void *rdc_buffer, int rdc_buffer_size) 3758 { 3759 int ret; 3760 struct dasd_ccw_req *cqr; 3761 3762 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 3763 magic); 3764 if (IS_ERR(cqr)) 3765 return PTR_ERR(cqr); 3766 3767 ret = dasd_sleep_on(cqr); 3768 dasd_sfree_request(cqr, cqr->memdev); 3769 return ret; 3770 } 3771 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 3772 3773 /* 3774 * In command mode and transport mode we need to look for sense 3775 * data in different places. The sense data itself is allways 3776 * an array of 32 bytes, so we can unify the sense data access 3777 * for both modes. 3778 */ 3779 char *dasd_get_sense(struct irb *irb) 3780 { 3781 struct tsb *tsb = NULL; 3782 char *sense = NULL; 3783 3784 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 3785 if (irb->scsw.tm.tcw) 3786 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 3787 irb->scsw.tm.tcw); 3788 if (tsb && tsb->length == 64 && tsb->flags) 3789 switch (tsb->flags & 0x07) { 3790 case 1: /* tsa_iostat */ 3791 sense = tsb->tsa.iostat.sense; 3792 break; 3793 case 2: /* tsa_ddpc */ 3794 sense = tsb->tsa.ddpc.sense; 3795 break; 3796 default: 3797 /* currently we don't use interrogate data */ 3798 break; 3799 } 3800 } else if (irb->esw.esw0.erw.cons) { 3801 sense = irb->ecw; 3802 } 3803 return sense; 3804 } 3805 EXPORT_SYMBOL_GPL(dasd_get_sense); 3806 3807 void dasd_generic_shutdown(struct ccw_device *cdev) 3808 { 3809 struct dasd_device *device; 3810 3811 device = dasd_device_from_cdev(cdev); 3812 if (IS_ERR(device)) 3813 return; 3814 3815 if (device->block) 3816 dasd_schedule_block_bh(device->block); 3817 3818 dasd_schedule_device_bh(device); 3819 3820 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 3821 } 3822 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 3823 3824 static int __init dasd_init(void) 3825 { 3826 int rc; 3827 3828 init_waitqueue_head(&dasd_init_waitq); 3829 init_waitqueue_head(&dasd_flush_wq); 3830 init_waitqueue_head(&generic_waitq); 3831 init_waitqueue_head(&shutdown_waitq); 3832 3833 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 3834 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 3835 if (dasd_debug_area == NULL) { 3836 rc = -ENOMEM; 3837 goto failed; 3838 } 3839 debug_register_view(dasd_debug_area, &debug_sprintf_view); 3840 debug_set_level(dasd_debug_area, DBF_WARNING); 3841 3842 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 3843 3844 dasd_diag_discipline_pointer = NULL; 3845 3846 dasd_statistics_createroot(); 3847 3848 rc = dasd_devmap_init(); 3849 if (rc) 3850 goto failed; 3851 rc = dasd_gendisk_init(); 3852 if (rc) 3853 goto failed; 3854 rc = dasd_parse(); 3855 if (rc) 3856 goto failed; 3857 rc = dasd_eer_init(); 3858 if (rc) 3859 goto failed; 3860 #ifdef CONFIG_PROC_FS 3861 rc = dasd_proc_init(); 3862 if (rc) 3863 goto failed; 3864 #endif 3865 3866 return 0; 3867 failed: 3868 pr_info("The DASD device driver could not be initialized\n"); 3869 dasd_exit(); 3870 return rc; 3871 } 3872 3873 module_init(dasd_init); 3874 module_exit(dasd_exit); 3875 3876 EXPORT_SYMBOL(dasd_debug_area); 3877 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 3878 3879 EXPORT_SYMBOL(dasd_add_request_head); 3880 EXPORT_SYMBOL(dasd_add_request_tail); 3881 EXPORT_SYMBOL(dasd_cancel_req); 3882 EXPORT_SYMBOL(dasd_device_clear_timer); 3883 EXPORT_SYMBOL(dasd_block_clear_timer); 3884 EXPORT_SYMBOL(dasd_enable_device); 3885 EXPORT_SYMBOL(dasd_int_handler); 3886 EXPORT_SYMBOL(dasd_kfree_request); 3887 EXPORT_SYMBOL(dasd_kick_device); 3888 EXPORT_SYMBOL(dasd_kmalloc_request); 3889 EXPORT_SYMBOL(dasd_schedule_device_bh); 3890 EXPORT_SYMBOL(dasd_schedule_block_bh); 3891 EXPORT_SYMBOL(dasd_set_target_state); 3892 EXPORT_SYMBOL(dasd_device_set_timer); 3893 EXPORT_SYMBOL(dasd_block_set_timer); 3894 EXPORT_SYMBOL(dasd_sfree_request); 3895 EXPORT_SYMBOL(dasd_sleep_on); 3896 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 3897 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 3898 EXPORT_SYMBOL(dasd_smalloc_request); 3899 EXPORT_SYMBOL(dasd_start_IO); 3900 EXPORT_SYMBOL(dasd_term_IO); 3901 3902 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3903 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3904 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3905 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3906 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3907 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 3908 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 3909 EXPORT_SYMBOL_GPL(dasd_alloc_block); 3910 EXPORT_SYMBOL_GPL(dasd_free_block); 3911