1 /* 2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Carsten Otte <Cotte@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Copyright IBM Corp. 1999, 2009 8 */ 9 10 #define KMSG_COMPONENT "dasd" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/kmod.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/ctype.h> 17 #include <linux/major.h> 18 #include <linux/slab.h> 19 #include <linux/hdreg.h> 20 #include <linux/async.h> 21 #include <linux/mutex.h> 22 #include <linux/debugfs.h> 23 #include <linux/seq_file.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> 28 #include <asm/idals.h> 29 #include <asm/itcw.h> 30 #include <asm/diag.h> 31 32 /* This is ugly... */ 33 #define PRINTK_HEADER "dasd:" 34 35 #include "dasd_int.h" 36 /* 37 * SECTION: Constant definitions to be used within this file 38 */ 39 #define DASD_CHANQ_MAX_SIZE 4 40 41 #define DASD_SLEEPON_START_TAG (void *) 1 42 #define DASD_SLEEPON_END_TAG (void *) 2 43 44 /* 45 * SECTION: exported variables of dasd.c 46 */ 47 debug_info_t *dasd_debug_area; 48 static struct dentry *dasd_debugfs_root_entry; 49 struct dasd_discipline *dasd_diag_discipline_pointer; 50 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 51 52 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 53 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 54 " Copyright IBM Corp. 2000"); 55 MODULE_SUPPORTED_DEVICE("dasd"); 56 MODULE_LICENSE("GPL"); 57 58 /* 59 * SECTION: prototypes for static functions of dasd.c 60 */ 61 static int dasd_alloc_queue(struct dasd_block *); 62 static void dasd_setup_queue(struct dasd_block *); 63 static void dasd_free_queue(struct dasd_block *); 64 static void dasd_flush_request_queue(struct dasd_block *); 65 static int dasd_flush_block_queue(struct dasd_block *); 66 static void dasd_device_tasklet(struct dasd_device *); 67 static void dasd_block_tasklet(struct dasd_block *); 68 static void do_kick_device(struct work_struct *); 69 static void do_restore_device(struct work_struct *); 70 static void do_reload_device(struct work_struct *); 71 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 72 static void dasd_device_timeout(unsigned long); 73 static void dasd_block_timeout(unsigned long); 74 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 75 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 76 static void dasd_profile_exit(struct dasd_profile *); 77 78 /* 79 * SECTION: Operations on the device structure. 80 */ 81 static wait_queue_head_t dasd_init_waitq; 82 static wait_queue_head_t dasd_flush_wq; 83 static wait_queue_head_t generic_waitq; 84 static wait_queue_head_t shutdown_waitq; 85 86 /* 87 * Allocate memory for a new device structure. 88 */ 89 struct dasd_device *dasd_alloc_device(void) 90 { 91 struct dasd_device *device; 92 93 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 94 if (!device) 95 return ERR_PTR(-ENOMEM); 96 97 /* Get two pages for normal block device operations. */ 98 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 99 if (!device->ccw_mem) { 100 kfree(device); 101 return ERR_PTR(-ENOMEM); 102 } 103 /* Get one page for error recovery. */ 104 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 105 if (!device->erp_mem) { 106 free_pages((unsigned long) device->ccw_mem, 1); 107 kfree(device); 108 return ERR_PTR(-ENOMEM); 109 } 110 111 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 112 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 113 spin_lock_init(&device->mem_lock); 114 atomic_set(&device->tasklet_scheduled, 0); 115 tasklet_init(&device->tasklet, 116 (void (*)(unsigned long)) dasd_device_tasklet, 117 (unsigned long) device); 118 INIT_LIST_HEAD(&device->ccw_queue); 119 init_timer(&device->timer); 120 device->timer.function = dasd_device_timeout; 121 device->timer.data = (unsigned long) device; 122 INIT_WORK(&device->kick_work, do_kick_device); 123 INIT_WORK(&device->restore_device, do_restore_device); 124 INIT_WORK(&device->reload_device, do_reload_device); 125 device->state = DASD_STATE_NEW; 126 device->target = DASD_STATE_NEW; 127 mutex_init(&device->state_mutex); 128 spin_lock_init(&device->profile.lock); 129 return device; 130 } 131 132 /* 133 * Free memory of a device structure. 134 */ 135 void dasd_free_device(struct dasd_device *device) 136 { 137 kfree(device->private); 138 free_page((unsigned long) device->erp_mem); 139 free_pages((unsigned long) device->ccw_mem, 1); 140 kfree(device); 141 } 142 143 /* 144 * Allocate memory for a new device structure. 145 */ 146 struct dasd_block *dasd_alloc_block(void) 147 { 148 struct dasd_block *block; 149 150 block = kzalloc(sizeof(*block), GFP_ATOMIC); 151 if (!block) 152 return ERR_PTR(-ENOMEM); 153 /* open_count = 0 means device online but not in use */ 154 atomic_set(&block->open_count, -1); 155 156 spin_lock_init(&block->request_queue_lock); 157 atomic_set(&block->tasklet_scheduled, 0); 158 tasklet_init(&block->tasklet, 159 (void (*)(unsigned long)) dasd_block_tasklet, 160 (unsigned long) block); 161 INIT_LIST_HEAD(&block->ccw_queue); 162 spin_lock_init(&block->queue_lock); 163 init_timer(&block->timer); 164 block->timer.function = dasd_block_timeout; 165 block->timer.data = (unsigned long) block; 166 spin_lock_init(&block->profile.lock); 167 168 return block; 169 } 170 171 /* 172 * Free memory of a device structure. 173 */ 174 void dasd_free_block(struct dasd_block *block) 175 { 176 kfree(block); 177 } 178 179 /* 180 * Make a new device known to the system. 181 */ 182 static int dasd_state_new_to_known(struct dasd_device *device) 183 { 184 int rc; 185 186 /* 187 * As long as the device is not in state DASD_STATE_NEW we want to 188 * keep the reference count > 0. 189 */ 190 dasd_get_device(device); 191 192 if (device->block) { 193 rc = dasd_alloc_queue(device->block); 194 if (rc) { 195 dasd_put_device(device); 196 return rc; 197 } 198 } 199 device->state = DASD_STATE_KNOWN; 200 return 0; 201 } 202 203 /* 204 * Let the system forget about a device. 205 */ 206 static int dasd_state_known_to_new(struct dasd_device *device) 207 { 208 /* Disable extended error reporting for this device. */ 209 dasd_eer_disable(device); 210 /* Forget the discipline information. */ 211 if (device->discipline) { 212 if (device->discipline->uncheck_device) 213 device->discipline->uncheck_device(device); 214 module_put(device->discipline->owner); 215 } 216 device->discipline = NULL; 217 if (device->base_discipline) 218 module_put(device->base_discipline->owner); 219 device->base_discipline = NULL; 220 device->state = DASD_STATE_NEW; 221 222 if (device->block) 223 dasd_free_queue(device->block); 224 225 /* Give up reference we took in dasd_state_new_to_known. */ 226 dasd_put_device(device); 227 return 0; 228 } 229 230 static struct dentry *dasd_debugfs_setup(const char *name, 231 struct dentry *base_dentry) 232 { 233 struct dentry *pde; 234 235 if (!base_dentry) 236 return NULL; 237 pde = debugfs_create_dir(name, base_dentry); 238 if (!pde || IS_ERR(pde)) 239 return NULL; 240 return pde; 241 } 242 243 /* 244 * Request the irq line for the device. 245 */ 246 static int dasd_state_known_to_basic(struct dasd_device *device) 247 { 248 struct dasd_block *block = device->block; 249 int rc = 0; 250 251 /* Allocate and register gendisk structure. */ 252 if (block) { 253 rc = dasd_gendisk_alloc(block); 254 if (rc) 255 return rc; 256 block->debugfs_dentry = 257 dasd_debugfs_setup(block->gdp->disk_name, 258 dasd_debugfs_root_entry); 259 dasd_profile_init(&block->profile, block->debugfs_dentry); 260 if (dasd_global_profile_level == DASD_PROFILE_ON) 261 dasd_profile_on(&device->block->profile); 262 } 263 device->debugfs_dentry = 264 dasd_debugfs_setup(dev_name(&device->cdev->dev), 265 dasd_debugfs_root_entry); 266 dasd_profile_init(&device->profile, device->debugfs_dentry); 267 268 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 269 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 270 8 * sizeof(long)); 271 debug_register_view(device->debug_area, &debug_sprintf_view); 272 debug_set_level(device->debug_area, DBF_WARNING); 273 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 274 275 device->state = DASD_STATE_BASIC; 276 277 return rc; 278 } 279 280 /* 281 * Release the irq line for the device. Terminate any running i/o. 282 */ 283 static int dasd_state_basic_to_known(struct dasd_device *device) 284 { 285 int rc; 286 287 if (device->block) { 288 dasd_profile_exit(&device->block->profile); 289 if (device->block->debugfs_dentry) 290 debugfs_remove(device->block->debugfs_dentry); 291 dasd_gendisk_free(device->block); 292 dasd_block_clear_timer(device->block); 293 } 294 rc = dasd_flush_device_queue(device); 295 if (rc) 296 return rc; 297 dasd_device_clear_timer(device); 298 dasd_profile_exit(&device->profile); 299 if (device->debugfs_dentry) 300 debugfs_remove(device->debugfs_dentry); 301 302 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 303 if (device->debug_area != NULL) { 304 debug_unregister(device->debug_area); 305 device->debug_area = NULL; 306 } 307 device->state = DASD_STATE_KNOWN; 308 return 0; 309 } 310 311 /* 312 * Do the initial analysis. The do_analysis function may return 313 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 314 * until the discipline decides to continue the startup sequence 315 * by calling the function dasd_change_state. The eckd disciplines 316 * uses this to start a ccw that detects the format. The completion 317 * interrupt for this detection ccw uses the kernel event daemon to 318 * trigger the call to dasd_change_state. All this is done in the 319 * discipline code, see dasd_eckd.c. 320 * After the analysis ccw is done (do_analysis returned 0) the block 321 * device is setup. 322 * In case the analysis returns an error, the device setup is stopped 323 * (a fake disk was already added to allow formatting). 324 */ 325 static int dasd_state_basic_to_ready(struct dasd_device *device) 326 { 327 int rc; 328 struct dasd_block *block; 329 330 rc = 0; 331 block = device->block; 332 /* make disk known with correct capacity */ 333 if (block) { 334 if (block->base->discipline->do_analysis != NULL) 335 rc = block->base->discipline->do_analysis(block); 336 if (rc) { 337 if (rc != -EAGAIN) { 338 device->state = DASD_STATE_UNFMT; 339 goto out; 340 } 341 return rc; 342 } 343 dasd_setup_queue(block); 344 set_capacity(block->gdp, 345 block->blocks << block->s2b_shift); 346 device->state = DASD_STATE_READY; 347 rc = dasd_scan_partitions(block); 348 if (rc) { 349 device->state = DASD_STATE_BASIC; 350 return rc; 351 } 352 } else { 353 device->state = DASD_STATE_READY; 354 } 355 out: 356 if (device->discipline->basic_to_ready) 357 rc = device->discipline->basic_to_ready(device); 358 return rc; 359 } 360 361 static inline 362 int _wait_for_empty_queues(struct dasd_device *device) 363 { 364 if (device->block) 365 return list_empty(&device->ccw_queue) && 366 list_empty(&device->block->ccw_queue); 367 else 368 return list_empty(&device->ccw_queue); 369 } 370 371 /* 372 * Remove device from block device layer. Destroy dirty buffers. 373 * Forget format information. Check if the target level is basic 374 * and if it is create fake disk for formatting. 375 */ 376 static int dasd_state_ready_to_basic(struct dasd_device *device) 377 { 378 int rc; 379 380 if (device->discipline->ready_to_basic) { 381 rc = device->discipline->ready_to_basic(device); 382 if (rc) 383 return rc; 384 } 385 device->state = DASD_STATE_BASIC; 386 if (device->block) { 387 struct dasd_block *block = device->block; 388 rc = dasd_flush_block_queue(block); 389 if (rc) { 390 device->state = DASD_STATE_READY; 391 return rc; 392 } 393 dasd_flush_request_queue(block); 394 dasd_destroy_partitions(block); 395 block->blocks = 0; 396 block->bp_block = 0; 397 block->s2b_shift = 0; 398 } 399 return 0; 400 } 401 402 /* 403 * Back to basic. 404 */ 405 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 406 { 407 device->state = DASD_STATE_BASIC; 408 return 0; 409 } 410 411 /* 412 * Make the device online and schedule the bottom half to start 413 * the requeueing of requests from the linux request queue to the 414 * ccw queue. 415 */ 416 static int 417 dasd_state_ready_to_online(struct dasd_device * device) 418 { 419 struct gendisk *disk; 420 struct disk_part_iter piter; 421 struct hd_struct *part; 422 423 device->state = DASD_STATE_ONLINE; 424 if (device->block) { 425 dasd_schedule_block_bh(device->block); 426 if ((device->features & DASD_FEATURE_USERAW)) { 427 disk = device->block->gdp; 428 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 429 return 0; 430 } 431 disk = device->block->bdev->bd_disk; 432 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 433 while ((part = disk_part_iter_next(&piter))) 434 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 435 disk_part_iter_exit(&piter); 436 } 437 return 0; 438 } 439 440 /* 441 * Stop the requeueing of requests again. 442 */ 443 static int dasd_state_online_to_ready(struct dasd_device *device) 444 { 445 int rc; 446 struct gendisk *disk; 447 struct disk_part_iter piter; 448 struct hd_struct *part; 449 450 if (device->discipline->online_to_ready) { 451 rc = device->discipline->online_to_ready(device); 452 if (rc) 453 return rc; 454 } 455 456 device->state = DASD_STATE_READY; 457 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 458 disk = device->block->bdev->bd_disk; 459 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 460 while ((part = disk_part_iter_next(&piter))) 461 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 462 disk_part_iter_exit(&piter); 463 } 464 return 0; 465 } 466 467 /* 468 * Device startup state changes. 469 */ 470 static int dasd_increase_state(struct dasd_device *device) 471 { 472 int rc; 473 474 rc = 0; 475 if (device->state == DASD_STATE_NEW && 476 device->target >= DASD_STATE_KNOWN) 477 rc = dasd_state_new_to_known(device); 478 479 if (!rc && 480 device->state == DASD_STATE_KNOWN && 481 device->target >= DASD_STATE_BASIC) 482 rc = dasd_state_known_to_basic(device); 483 484 if (!rc && 485 device->state == DASD_STATE_BASIC && 486 device->target >= DASD_STATE_READY) 487 rc = dasd_state_basic_to_ready(device); 488 489 if (!rc && 490 device->state == DASD_STATE_UNFMT && 491 device->target > DASD_STATE_UNFMT) 492 rc = -EPERM; 493 494 if (!rc && 495 device->state == DASD_STATE_READY && 496 device->target >= DASD_STATE_ONLINE) 497 rc = dasd_state_ready_to_online(device); 498 499 return rc; 500 } 501 502 /* 503 * Device shutdown state changes. 504 */ 505 static int dasd_decrease_state(struct dasd_device *device) 506 { 507 int rc; 508 509 rc = 0; 510 if (device->state == DASD_STATE_ONLINE && 511 device->target <= DASD_STATE_READY) 512 rc = dasd_state_online_to_ready(device); 513 514 if (!rc && 515 device->state == DASD_STATE_READY && 516 device->target <= DASD_STATE_BASIC) 517 rc = dasd_state_ready_to_basic(device); 518 519 if (!rc && 520 device->state == DASD_STATE_UNFMT && 521 device->target <= DASD_STATE_BASIC) 522 rc = dasd_state_unfmt_to_basic(device); 523 524 if (!rc && 525 device->state == DASD_STATE_BASIC && 526 device->target <= DASD_STATE_KNOWN) 527 rc = dasd_state_basic_to_known(device); 528 529 if (!rc && 530 device->state == DASD_STATE_KNOWN && 531 device->target <= DASD_STATE_NEW) 532 rc = dasd_state_known_to_new(device); 533 534 return rc; 535 } 536 537 /* 538 * This is the main startup/shutdown routine. 539 */ 540 static void dasd_change_state(struct dasd_device *device) 541 { 542 int rc; 543 544 if (device->state == device->target) 545 /* Already where we want to go today... */ 546 return; 547 if (device->state < device->target) 548 rc = dasd_increase_state(device); 549 else 550 rc = dasd_decrease_state(device); 551 if (rc == -EAGAIN) 552 return; 553 if (rc) 554 device->target = device->state; 555 556 /* let user-space know that the device status changed */ 557 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 558 559 if (device->state == device->target) 560 wake_up(&dasd_init_waitq); 561 } 562 563 /* 564 * Kick starter for devices that did not complete the startup/shutdown 565 * procedure or were sleeping because of a pending state. 566 * dasd_kick_device will schedule a call do do_kick_device to the kernel 567 * event daemon. 568 */ 569 static void do_kick_device(struct work_struct *work) 570 { 571 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 572 mutex_lock(&device->state_mutex); 573 dasd_change_state(device); 574 mutex_unlock(&device->state_mutex); 575 dasd_schedule_device_bh(device); 576 dasd_put_device(device); 577 } 578 579 void dasd_kick_device(struct dasd_device *device) 580 { 581 dasd_get_device(device); 582 /* queue call to dasd_kick_device to the kernel event daemon. */ 583 schedule_work(&device->kick_work); 584 } 585 586 /* 587 * dasd_reload_device will schedule a call do do_reload_device to the kernel 588 * event daemon. 589 */ 590 static void do_reload_device(struct work_struct *work) 591 { 592 struct dasd_device *device = container_of(work, struct dasd_device, 593 reload_device); 594 device->discipline->reload(device); 595 dasd_put_device(device); 596 } 597 598 void dasd_reload_device(struct dasd_device *device) 599 { 600 dasd_get_device(device); 601 /* queue call to dasd_reload_device to the kernel event daemon. */ 602 schedule_work(&device->reload_device); 603 } 604 EXPORT_SYMBOL(dasd_reload_device); 605 606 /* 607 * dasd_restore_device will schedule a call do do_restore_device to the kernel 608 * event daemon. 609 */ 610 static void do_restore_device(struct work_struct *work) 611 { 612 struct dasd_device *device = container_of(work, struct dasd_device, 613 restore_device); 614 device->cdev->drv->restore(device->cdev); 615 dasd_put_device(device); 616 } 617 618 void dasd_restore_device(struct dasd_device *device) 619 { 620 dasd_get_device(device); 621 /* queue call to dasd_restore_device to the kernel event daemon. */ 622 schedule_work(&device->restore_device); 623 } 624 625 /* 626 * Set the target state for a device and starts the state change. 627 */ 628 void dasd_set_target_state(struct dasd_device *device, int target) 629 { 630 dasd_get_device(device); 631 mutex_lock(&device->state_mutex); 632 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 633 if (dasd_probeonly && target > DASD_STATE_READY) 634 target = DASD_STATE_READY; 635 if (device->target != target) { 636 if (device->state == target) 637 wake_up(&dasd_init_waitq); 638 device->target = target; 639 } 640 if (device->state != device->target) 641 dasd_change_state(device); 642 mutex_unlock(&device->state_mutex); 643 dasd_put_device(device); 644 } 645 646 /* 647 * Enable devices with device numbers in [from..to]. 648 */ 649 static inline int _wait_for_device(struct dasd_device *device) 650 { 651 return (device->state == device->target); 652 } 653 654 void dasd_enable_device(struct dasd_device *device) 655 { 656 dasd_set_target_state(device, DASD_STATE_ONLINE); 657 if (device->state <= DASD_STATE_KNOWN) 658 /* No discipline for device found. */ 659 dasd_set_target_state(device, DASD_STATE_NEW); 660 /* Now wait for the devices to come up. */ 661 wait_event(dasd_init_waitq, _wait_for_device(device)); 662 663 dasd_reload_device(device); 664 if (device->discipline->kick_validate) 665 device->discipline->kick_validate(device); 666 } 667 668 /* 669 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 670 */ 671 672 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 673 674 #ifdef CONFIG_DASD_PROFILE 675 struct dasd_profile_info dasd_global_profile_data; 676 static struct dentry *dasd_global_profile_dentry; 677 static struct dentry *dasd_debugfs_global_entry; 678 679 /* 680 * Add profiling information for cqr before execution. 681 */ 682 static void dasd_profile_start(struct dasd_block *block, 683 struct dasd_ccw_req *cqr, 684 struct request *req) 685 { 686 struct list_head *l; 687 unsigned int counter; 688 struct dasd_device *device; 689 690 /* count the length of the chanq for statistics */ 691 counter = 0; 692 if (dasd_global_profile_level || block->profile.data) 693 list_for_each(l, &block->ccw_queue) 694 if (++counter >= 31) 695 break; 696 697 if (dasd_global_profile_level) { 698 dasd_global_profile_data.dasd_io_nr_req[counter]++; 699 if (rq_data_dir(req) == READ) 700 dasd_global_profile_data.dasd_read_nr_req[counter]++; 701 } 702 703 spin_lock(&block->profile.lock); 704 if (block->profile.data) 705 block->profile.data->dasd_io_nr_req[counter]++; 706 if (rq_data_dir(req) == READ) 707 block->profile.data->dasd_read_nr_req[counter]++; 708 spin_unlock(&block->profile.lock); 709 710 /* 711 * We count the request for the start device, even though it may run on 712 * some other device due to error recovery. This way we make sure that 713 * we count each request only once. 714 */ 715 device = cqr->startdev; 716 if (device->profile.data) { 717 counter = 1; /* request is not yet queued on the start device */ 718 list_for_each(l, &device->ccw_queue) 719 if (++counter >= 31) 720 break; 721 } 722 spin_lock(&device->profile.lock); 723 if (device->profile.data) { 724 device->profile.data->dasd_io_nr_req[counter]++; 725 if (rq_data_dir(req) == READ) 726 device->profile.data->dasd_read_nr_req[counter]++; 727 } 728 spin_unlock(&device->profile.lock); 729 } 730 731 /* 732 * Add profiling information for cqr after execution. 733 */ 734 735 #define dasd_profile_counter(value, index) \ 736 { \ 737 for (index = 0; index < 31 && value >> (2+index); index++) \ 738 ; \ 739 } 740 741 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 742 int is_alias, 743 int is_tpm, 744 int is_read, 745 long sectors, 746 int sectors_ind, 747 int tottime_ind, 748 int tottimeps_ind, 749 int strtime_ind, 750 int irqtime_ind, 751 int irqtimeps_ind, 752 int endtime_ind) 753 { 754 /* in case of an overflow, reset the whole profile */ 755 if (data->dasd_io_reqs == UINT_MAX) { 756 memset(data, 0, sizeof(*data)); 757 getnstimeofday(&data->starttod); 758 } 759 data->dasd_io_reqs++; 760 data->dasd_io_sects += sectors; 761 if (is_alias) 762 data->dasd_io_alias++; 763 if (is_tpm) 764 data->dasd_io_tpm++; 765 766 data->dasd_io_secs[sectors_ind]++; 767 data->dasd_io_times[tottime_ind]++; 768 data->dasd_io_timps[tottimeps_ind]++; 769 data->dasd_io_time1[strtime_ind]++; 770 data->dasd_io_time2[irqtime_ind]++; 771 data->dasd_io_time2ps[irqtimeps_ind]++; 772 data->dasd_io_time3[endtime_ind]++; 773 774 if (is_read) { 775 data->dasd_read_reqs++; 776 data->dasd_read_sects += sectors; 777 if (is_alias) 778 data->dasd_read_alias++; 779 if (is_tpm) 780 data->dasd_read_tpm++; 781 data->dasd_read_secs[sectors_ind]++; 782 data->dasd_read_times[tottime_ind]++; 783 data->dasd_read_time1[strtime_ind]++; 784 data->dasd_read_time2[irqtime_ind]++; 785 data->dasd_read_time3[endtime_ind]++; 786 } 787 } 788 789 static void dasd_profile_end(struct dasd_block *block, 790 struct dasd_ccw_req *cqr, 791 struct request *req) 792 { 793 long strtime, irqtime, endtime, tottime; /* in microseconds */ 794 long tottimeps, sectors; 795 struct dasd_device *device; 796 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 797 int irqtime_ind, irqtimeps_ind, endtime_ind; 798 799 device = cqr->startdev; 800 if (!(dasd_global_profile_level || 801 block->profile.data || 802 device->profile.data)) 803 return; 804 805 sectors = blk_rq_sectors(req); 806 if (!cqr->buildclk || !cqr->startclk || 807 !cqr->stopclk || !cqr->endclk || 808 !sectors) 809 return; 810 811 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 812 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 813 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 814 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 815 tottimeps = tottime / sectors; 816 817 dasd_profile_counter(sectors, sectors_ind); 818 dasd_profile_counter(tottime, tottime_ind); 819 dasd_profile_counter(tottimeps, tottimeps_ind); 820 dasd_profile_counter(strtime, strtime_ind); 821 dasd_profile_counter(irqtime, irqtime_ind); 822 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 823 dasd_profile_counter(endtime, endtime_ind); 824 825 if (dasd_global_profile_level) { 826 dasd_profile_end_add_data(&dasd_global_profile_data, 827 cqr->startdev != block->base, 828 cqr->cpmode == 1, 829 rq_data_dir(req) == READ, 830 sectors, sectors_ind, tottime_ind, 831 tottimeps_ind, strtime_ind, 832 irqtime_ind, irqtimeps_ind, 833 endtime_ind); 834 } 835 836 spin_lock(&block->profile.lock); 837 if (block->profile.data) 838 dasd_profile_end_add_data(block->profile.data, 839 cqr->startdev != block->base, 840 cqr->cpmode == 1, 841 rq_data_dir(req) == READ, 842 sectors, sectors_ind, tottime_ind, 843 tottimeps_ind, strtime_ind, 844 irqtime_ind, irqtimeps_ind, 845 endtime_ind); 846 spin_unlock(&block->profile.lock); 847 848 spin_lock(&device->profile.lock); 849 if (device->profile.data) 850 dasd_profile_end_add_data(device->profile.data, 851 cqr->startdev != block->base, 852 cqr->cpmode == 1, 853 rq_data_dir(req) == READ, 854 sectors, sectors_ind, tottime_ind, 855 tottimeps_ind, strtime_ind, 856 irqtime_ind, irqtimeps_ind, 857 endtime_ind); 858 spin_unlock(&device->profile.lock); 859 } 860 861 void dasd_profile_reset(struct dasd_profile *profile) 862 { 863 struct dasd_profile_info *data; 864 865 spin_lock_bh(&profile->lock); 866 data = profile->data; 867 if (!data) { 868 spin_unlock_bh(&profile->lock); 869 return; 870 } 871 memset(data, 0, sizeof(*data)); 872 getnstimeofday(&data->starttod); 873 spin_unlock_bh(&profile->lock); 874 } 875 876 void dasd_global_profile_reset(void) 877 { 878 memset(&dasd_global_profile_data, 0, sizeof(dasd_global_profile_data)); 879 getnstimeofday(&dasd_global_profile_data.starttod); 880 } 881 882 int dasd_profile_on(struct dasd_profile *profile) 883 { 884 struct dasd_profile_info *data; 885 886 data = kzalloc(sizeof(*data), GFP_KERNEL); 887 if (!data) 888 return -ENOMEM; 889 spin_lock_bh(&profile->lock); 890 if (profile->data) { 891 spin_unlock_bh(&profile->lock); 892 kfree(data); 893 return 0; 894 } 895 getnstimeofday(&data->starttod); 896 profile->data = data; 897 spin_unlock_bh(&profile->lock); 898 return 0; 899 } 900 901 void dasd_profile_off(struct dasd_profile *profile) 902 { 903 spin_lock_bh(&profile->lock); 904 kfree(profile->data); 905 profile->data = NULL; 906 spin_unlock_bh(&profile->lock); 907 } 908 909 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 910 { 911 char *buffer; 912 913 buffer = vmalloc(user_len + 1); 914 if (buffer == NULL) 915 return ERR_PTR(-ENOMEM); 916 if (copy_from_user(buffer, user_buf, user_len) != 0) { 917 vfree(buffer); 918 return ERR_PTR(-EFAULT); 919 } 920 /* got the string, now strip linefeed. */ 921 if (buffer[user_len - 1] == '\n') 922 buffer[user_len - 1] = 0; 923 else 924 buffer[user_len] = 0; 925 return buffer; 926 } 927 928 static ssize_t dasd_stats_write(struct file *file, 929 const char __user *user_buf, 930 size_t user_len, loff_t *pos) 931 { 932 char *buffer, *str; 933 int rc; 934 struct seq_file *m = (struct seq_file *)file->private_data; 935 struct dasd_profile *prof = m->private; 936 937 if (user_len > 65536) 938 user_len = 65536; 939 buffer = dasd_get_user_string(user_buf, user_len); 940 if (IS_ERR(buffer)) 941 return PTR_ERR(buffer); 942 943 str = skip_spaces(buffer); 944 rc = user_len; 945 if (strncmp(str, "reset", 5) == 0) { 946 dasd_profile_reset(prof); 947 } else if (strncmp(str, "on", 2) == 0) { 948 rc = dasd_profile_on(prof); 949 if (!rc) 950 rc = user_len; 951 } else if (strncmp(str, "off", 3) == 0) { 952 dasd_profile_off(prof); 953 } else 954 rc = -EINVAL; 955 vfree(buffer); 956 return rc; 957 } 958 959 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 960 { 961 int i; 962 963 for (i = 0; i < 32; i++) 964 seq_printf(m, "%u ", array[i]); 965 seq_putc(m, '\n'); 966 } 967 968 static void dasd_stats_seq_print(struct seq_file *m, 969 struct dasd_profile_info *data) 970 { 971 seq_printf(m, "start_time %ld.%09ld\n", 972 data->starttod.tv_sec, data->starttod.tv_nsec); 973 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 974 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 975 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 976 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 977 seq_printf(m, "histogram_sectors "); 978 dasd_stats_array(m, data->dasd_io_secs); 979 seq_printf(m, "histogram_io_times "); 980 dasd_stats_array(m, data->dasd_io_times); 981 seq_printf(m, "histogram_io_times_weighted "); 982 dasd_stats_array(m, data->dasd_io_timps); 983 seq_printf(m, "histogram_time_build_to_ssch "); 984 dasd_stats_array(m, data->dasd_io_time1); 985 seq_printf(m, "histogram_time_ssch_to_irq "); 986 dasd_stats_array(m, data->dasd_io_time2); 987 seq_printf(m, "histogram_time_ssch_to_irq_weighted "); 988 dasd_stats_array(m, data->dasd_io_time2ps); 989 seq_printf(m, "histogram_time_irq_to_end "); 990 dasd_stats_array(m, data->dasd_io_time3); 991 seq_printf(m, "histogram_ccw_queue_length "); 992 dasd_stats_array(m, data->dasd_io_nr_req); 993 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 994 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 995 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 996 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 997 seq_printf(m, "histogram_read_sectors "); 998 dasd_stats_array(m, data->dasd_read_secs); 999 seq_printf(m, "histogram_read_times "); 1000 dasd_stats_array(m, data->dasd_read_times); 1001 seq_printf(m, "histogram_read_time_build_to_ssch "); 1002 dasd_stats_array(m, data->dasd_read_time1); 1003 seq_printf(m, "histogram_read_time_ssch_to_irq "); 1004 dasd_stats_array(m, data->dasd_read_time2); 1005 seq_printf(m, "histogram_read_time_irq_to_end "); 1006 dasd_stats_array(m, data->dasd_read_time3); 1007 seq_printf(m, "histogram_read_ccw_queue_length "); 1008 dasd_stats_array(m, data->dasd_read_nr_req); 1009 } 1010 1011 static int dasd_stats_show(struct seq_file *m, void *v) 1012 { 1013 struct dasd_profile *profile; 1014 struct dasd_profile_info *data; 1015 1016 profile = m->private; 1017 spin_lock_bh(&profile->lock); 1018 data = profile->data; 1019 if (!data) { 1020 spin_unlock_bh(&profile->lock); 1021 seq_printf(m, "disabled\n"); 1022 return 0; 1023 } 1024 dasd_stats_seq_print(m, data); 1025 spin_unlock_bh(&profile->lock); 1026 return 0; 1027 } 1028 1029 static int dasd_stats_open(struct inode *inode, struct file *file) 1030 { 1031 struct dasd_profile *profile = inode->i_private; 1032 return single_open(file, dasd_stats_show, profile); 1033 } 1034 1035 static const struct file_operations dasd_stats_raw_fops = { 1036 .owner = THIS_MODULE, 1037 .open = dasd_stats_open, 1038 .read = seq_read, 1039 .llseek = seq_lseek, 1040 .release = single_release, 1041 .write = dasd_stats_write, 1042 }; 1043 1044 static ssize_t dasd_stats_global_write(struct file *file, 1045 const char __user *user_buf, 1046 size_t user_len, loff_t *pos) 1047 { 1048 char *buffer, *str; 1049 ssize_t rc; 1050 1051 if (user_len > 65536) 1052 user_len = 65536; 1053 buffer = dasd_get_user_string(user_buf, user_len); 1054 if (IS_ERR(buffer)) 1055 return PTR_ERR(buffer); 1056 str = skip_spaces(buffer); 1057 rc = user_len; 1058 if (strncmp(str, "reset", 5) == 0) { 1059 dasd_global_profile_reset(); 1060 } else if (strncmp(str, "on", 2) == 0) { 1061 dasd_global_profile_reset(); 1062 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 1063 } else if (strncmp(str, "off", 3) == 0) { 1064 dasd_global_profile_level = DASD_PROFILE_OFF; 1065 } else 1066 rc = -EINVAL; 1067 vfree(buffer); 1068 return rc; 1069 } 1070 1071 static int dasd_stats_global_show(struct seq_file *m, void *v) 1072 { 1073 if (!dasd_global_profile_level) { 1074 seq_printf(m, "disabled\n"); 1075 return 0; 1076 } 1077 dasd_stats_seq_print(m, &dasd_global_profile_data); 1078 return 0; 1079 } 1080 1081 static int dasd_stats_global_open(struct inode *inode, struct file *file) 1082 { 1083 return single_open(file, dasd_stats_global_show, NULL); 1084 } 1085 1086 static const struct file_operations dasd_stats_global_fops = { 1087 .owner = THIS_MODULE, 1088 .open = dasd_stats_global_open, 1089 .read = seq_read, 1090 .llseek = seq_lseek, 1091 .release = single_release, 1092 .write = dasd_stats_global_write, 1093 }; 1094 1095 static void dasd_profile_init(struct dasd_profile *profile, 1096 struct dentry *base_dentry) 1097 { 1098 umode_t mode; 1099 struct dentry *pde; 1100 1101 if (!base_dentry) 1102 return; 1103 profile->dentry = NULL; 1104 profile->data = NULL; 1105 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1106 pde = debugfs_create_file("statistics", mode, base_dentry, 1107 profile, &dasd_stats_raw_fops); 1108 if (pde && !IS_ERR(pde)) 1109 profile->dentry = pde; 1110 return; 1111 } 1112 1113 static void dasd_profile_exit(struct dasd_profile *profile) 1114 { 1115 dasd_profile_off(profile); 1116 if (profile->dentry) { 1117 debugfs_remove(profile->dentry); 1118 profile->dentry = NULL; 1119 } 1120 } 1121 1122 static void dasd_statistics_removeroot(void) 1123 { 1124 dasd_global_profile_level = DASD_PROFILE_OFF; 1125 if (dasd_global_profile_dentry) { 1126 debugfs_remove(dasd_global_profile_dentry); 1127 dasd_global_profile_dentry = NULL; 1128 } 1129 if (dasd_debugfs_global_entry) 1130 debugfs_remove(dasd_debugfs_global_entry); 1131 if (dasd_debugfs_root_entry) 1132 debugfs_remove(dasd_debugfs_root_entry); 1133 } 1134 1135 static void dasd_statistics_createroot(void) 1136 { 1137 umode_t mode; 1138 struct dentry *pde; 1139 1140 dasd_debugfs_root_entry = NULL; 1141 dasd_debugfs_global_entry = NULL; 1142 dasd_global_profile_dentry = NULL; 1143 pde = debugfs_create_dir("dasd", NULL); 1144 if (!pde || IS_ERR(pde)) 1145 goto error; 1146 dasd_debugfs_root_entry = pde; 1147 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1148 if (!pde || IS_ERR(pde)) 1149 goto error; 1150 dasd_debugfs_global_entry = pde; 1151 1152 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1153 pde = debugfs_create_file("statistics", mode, dasd_debugfs_global_entry, 1154 NULL, &dasd_stats_global_fops); 1155 if (!pde || IS_ERR(pde)) 1156 goto error; 1157 dasd_global_profile_dentry = pde; 1158 return; 1159 1160 error: 1161 DBF_EVENT(DBF_ERR, "%s", 1162 "Creation of the dasd debugfs interface failed"); 1163 dasd_statistics_removeroot(); 1164 return; 1165 } 1166 1167 #else 1168 #define dasd_profile_start(block, cqr, req) do {} while (0) 1169 #define dasd_profile_end(block, cqr, req) do {} while (0) 1170 1171 static void dasd_statistics_createroot(void) 1172 { 1173 return; 1174 } 1175 1176 static void dasd_statistics_removeroot(void) 1177 { 1178 return; 1179 } 1180 1181 int dasd_stats_generic_show(struct seq_file *m, void *v) 1182 { 1183 seq_printf(m, "Statistics are not activated in this kernel\n"); 1184 return 0; 1185 } 1186 1187 static void dasd_profile_init(struct dasd_profile *profile, 1188 struct dentry *base_dentry) 1189 { 1190 return; 1191 } 1192 1193 static void dasd_profile_exit(struct dasd_profile *profile) 1194 { 1195 return; 1196 } 1197 1198 int dasd_profile_on(struct dasd_profile *profile) 1199 { 1200 return 0; 1201 } 1202 1203 #endif /* CONFIG_DASD_PROFILE */ 1204 1205 /* 1206 * Allocate memory for a channel program with 'cplength' channel 1207 * command words and 'datasize' additional space. There are two 1208 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 1209 * memory and 2) dasd_smalloc_request uses the static ccw memory 1210 * that gets allocated for each device. 1211 */ 1212 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, 1213 int datasize, 1214 struct dasd_device *device) 1215 { 1216 struct dasd_ccw_req *cqr; 1217 1218 /* Sanity checks */ 1219 BUG_ON(datasize > PAGE_SIZE || 1220 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 1221 1222 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 1223 if (cqr == NULL) 1224 return ERR_PTR(-ENOMEM); 1225 cqr->cpaddr = NULL; 1226 if (cplength > 0) { 1227 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 1228 GFP_ATOMIC | GFP_DMA); 1229 if (cqr->cpaddr == NULL) { 1230 kfree(cqr); 1231 return ERR_PTR(-ENOMEM); 1232 } 1233 } 1234 cqr->data = NULL; 1235 if (datasize > 0) { 1236 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 1237 if (cqr->data == NULL) { 1238 kfree(cqr->cpaddr); 1239 kfree(cqr); 1240 return ERR_PTR(-ENOMEM); 1241 } 1242 } 1243 cqr->magic = magic; 1244 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1245 dasd_get_device(device); 1246 return cqr; 1247 } 1248 1249 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 1250 int datasize, 1251 struct dasd_device *device) 1252 { 1253 unsigned long flags; 1254 struct dasd_ccw_req *cqr; 1255 char *data; 1256 int size; 1257 1258 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 1259 if (cplength > 0) 1260 size += cplength * sizeof(struct ccw1); 1261 if (datasize > 0) 1262 size += datasize; 1263 spin_lock_irqsave(&device->mem_lock, flags); 1264 cqr = (struct dasd_ccw_req *) 1265 dasd_alloc_chunk(&device->ccw_chunks, size); 1266 spin_unlock_irqrestore(&device->mem_lock, flags); 1267 if (cqr == NULL) 1268 return ERR_PTR(-ENOMEM); 1269 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 1270 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 1271 cqr->cpaddr = NULL; 1272 if (cplength > 0) { 1273 cqr->cpaddr = (struct ccw1 *) data; 1274 data += cplength*sizeof(struct ccw1); 1275 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 1276 } 1277 cqr->data = NULL; 1278 if (datasize > 0) { 1279 cqr->data = data; 1280 memset(cqr->data, 0, datasize); 1281 } 1282 cqr->magic = magic; 1283 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1284 dasd_get_device(device); 1285 return cqr; 1286 } 1287 1288 /* 1289 * Free memory of a channel program. This function needs to free all the 1290 * idal lists that might have been created by dasd_set_cda and the 1291 * struct dasd_ccw_req itself. 1292 */ 1293 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1294 { 1295 #ifdef CONFIG_64BIT 1296 struct ccw1 *ccw; 1297 1298 /* Clear any idals used for the request. */ 1299 ccw = cqr->cpaddr; 1300 do { 1301 clear_normalized_cda(ccw); 1302 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 1303 #endif 1304 kfree(cqr->cpaddr); 1305 kfree(cqr->data); 1306 kfree(cqr); 1307 dasd_put_device(device); 1308 } 1309 1310 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1311 { 1312 unsigned long flags; 1313 1314 spin_lock_irqsave(&device->mem_lock, flags); 1315 dasd_free_chunk(&device->ccw_chunks, cqr); 1316 spin_unlock_irqrestore(&device->mem_lock, flags); 1317 dasd_put_device(device); 1318 } 1319 1320 /* 1321 * Check discipline magic in cqr. 1322 */ 1323 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1324 { 1325 struct dasd_device *device; 1326 1327 if (cqr == NULL) 1328 return -EINVAL; 1329 device = cqr->startdev; 1330 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1331 DBF_DEV_EVENT(DBF_WARNING, device, 1332 " dasd_ccw_req 0x%08x magic doesn't match" 1333 " discipline 0x%08x", 1334 cqr->magic, 1335 *(unsigned int *) device->discipline->name); 1336 return -EINVAL; 1337 } 1338 return 0; 1339 } 1340 1341 /* 1342 * Terminate the current i/o and set the request to clear_pending. 1343 * Timer keeps device runnig. 1344 * ccw_device_clear can fail if the i/o subsystem 1345 * is in a bad mood. 1346 */ 1347 int dasd_term_IO(struct dasd_ccw_req *cqr) 1348 { 1349 struct dasd_device *device; 1350 int retries, rc; 1351 char errorstring[ERRORLENGTH]; 1352 1353 /* Check the cqr */ 1354 rc = dasd_check_cqr(cqr); 1355 if (rc) 1356 return rc; 1357 retries = 0; 1358 device = (struct dasd_device *) cqr->startdev; 1359 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1360 rc = ccw_device_clear(device->cdev, (long) cqr); 1361 switch (rc) { 1362 case 0: /* termination successful */ 1363 cqr->status = DASD_CQR_CLEAR_PENDING; 1364 cqr->stopclk = get_tod_clock(); 1365 cqr->starttime = 0; 1366 DBF_DEV_EVENT(DBF_DEBUG, device, 1367 "terminate cqr %p successful", 1368 cqr); 1369 break; 1370 case -ENODEV: 1371 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1372 "device gone, retry"); 1373 break; 1374 case -EIO: 1375 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1376 "I/O error, retry"); 1377 break; 1378 case -EINVAL: 1379 case -EBUSY: 1380 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1381 "device busy, retry later"); 1382 break; 1383 default: 1384 /* internal error 10 - unknown rc*/ 1385 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1386 dev_err(&device->cdev->dev, "An error occurred in the " 1387 "DASD device driver, reason=%s\n", errorstring); 1388 BUG(); 1389 break; 1390 } 1391 retries++; 1392 } 1393 dasd_schedule_device_bh(device); 1394 return rc; 1395 } 1396 1397 /* 1398 * Start the i/o. This start_IO can fail if the channel is really busy. 1399 * In that case set up a timer to start the request later. 1400 */ 1401 int dasd_start_IO(struct dasd_ccw_req *cqr) 1402 { 1403 struct dasd_device *device; 1404 int rc; 1405 char errorstring[ERRORLENGTH]; 1406 1407 /* Check the cqr */ 1408 rc = dasd_check_cqr(cqr); 1409 if (rc) { 1410 cqr->intrc = rc; 1411 return rc; 1412 } 1413 device = (struct dasd_device *) cqr->startdev; 1414 if (((cqr->block && 1415 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1416 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1417 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1418 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1419 "because of stolen lock", cqr); 1420 cqr->status = DASD_CQR_ERROR; 1421 cqr->intrc = -EPERM; 1422 return -EPERM; 1423 } 1424 if (cqr->retries < 0) { 1425 /* internal error 14 - start_IO run out of retries */ 1426 sprintf(errorstring, "14 %p", cqr); 1427 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1428 "device driver, reason=%s\n", errorstring); 1429 cqr->status = DASD_CQR_ERROR; 1430 return -EIO; 1431 } 1432 cqr->startclk = get_tod_clock(); 1433 cqr->starttime = jiffies; 1434 cqr->retries--; 1435 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1436 cqr->lpm &= device->path_data.opm; 1437 if (!cqr->lpm) 1438 cqr->lpm = device->path_data.opm; 1439 } 1440 if (cqr->cpmode == 1) { 1441 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1442 (long) cqr, cqr->lpm); 1443 } else { 1444 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1445 (long) cqr, cqr->lpm, 0); 1446 } 1447 switch (rc) { 1448 case 0: 1449 cqr->status = DASD_CQR_IN_IO; 1450 break; 1451 case -EBUSY: 1452 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1453 "start_IO: device busy, retry later"); 1454 break; 1455 case -ETIMEDOUT: 1456 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1457 "start_IO: request timeout, retry later"); 1458 break; 1459 case -EACCES: 1460 /* -EACCES indicates that the request used only a subset of the 1461 * available paths and all these paths are gone. If the lpm of 1462 * this request was only a subset of the opm (e.g. the ppm) then 1463 * we just do a retry with all available paths. 1464 * If we already use the full opm, something is amiss, and we 1465 * need a full path verification. 1466 */ 1467 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1468 DBF_DEV_EVENT(DBF_WARNING, device, 1469 "start_IO: selected paths gone (%x)", 1470 cqr->lpm); 1471 } else if (cqr->lpm != device->path_data.opm) { 1472 cqr->lpm = device->path_data.opm; 1473 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1474 "start_IO: selected paths gone," 1475 " retry on all paths"); 1476 } else { 1477 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1478 "start_IO: all paths in opm gone," 1479 " do path verification"); 1480 dasd_generic_last_path_gone(device); 1481 device->path_data.opm = 0; 1482 device->path_data.ppm = 0; 1483 device->path_data.npm = 0; 1484 device->path_data.tbvpm = 1485 ccw_device_get_path_mask(device->cdev); 1486 } 1487 break; 1488 case -ENODEV: 1489 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1490 "start_IO: -ENODEV device gone, retry"); 1491 break; 1492 case -EIO: 1493 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1494 "start_IO: -EIO device gone, retry"); 1495 break; 1496 case -EINVAL: 1497 /* most likely caused in power management context */ 1498 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1499 "start_IO: -EINVAL device currently " 1500 "not accessible"); 1501 break; 1502 default: 1503 /* internal error 11 - unknown rc */ 1504 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1505 dev_err(&device->cdev->dev, 1506 "An error occurred in the DASD device driver, " 1507 "reason=%s\n", errorstring); 1508 BUG(); 1509 break; 1510 } 1511 cqr->intrc = rc; 1512 return rc; 1513 } 1514 1515 /* 1516 * Timeout function for dasd devices. This is used for different purposes 1517 * 1) missing interrupt handler for normal operation 1518 * 2) delayed start of request where start_IO failed with -EBUSY 1519 * 3) timeout for missing state change interrupts 1520 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1521 * DASD_CQR_QUEUED for 2) and 3). 1522 */ 1523 static void dasd_device_timeout(unsigned long ptr) 1524 { 1525 unsigned long flags; 1526 struct dasd_device *device; 1527 1528 device = (struct dasd_device *) ptr; 1529 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1530 /* re-activate request queue */ 1531 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1532 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1533 dasd_schedule_device_bh(device); 1534 } 1535 1536 /* 1537 * Setup timeout for a device in jiffies. 1538 */ 1539 void dasd_device_set_timer(struct dasd_device *device, int expires) 1540 { 1541 if (expires == 0) 1542 del_timer(&device->timer); 1543 else 1544 mod_timer(&device->timer, jiffies + expires); 1545 } 1546 1547 /* 1548 * Clear timeout for a device. 1549 */ 1550 void dasd_device_clear_timer(struct dasd_device *device) 1551 { 1552 del_timer(&device->timer); 1553 } 1554 1555 static void dasd_handle_killed_request(struct ccw_device *cdev, 1556 unsigned long intparm) 1557 { 1558 struct dasd_ccw_req *cqr; 1559 struct dasd_device *device; 1560 1561 if (!intparm) 1562 return; 1563 cqr = (struct dasd_ccw_req *) intparm; 1564 if (cqr->status != DASD_CQR_IN_IO) { 1565 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1566 "invalid status in handle_killed_request: " 1567 "%02x", cqr->status); 1568 return; 1569 } 1570 1571 device = dasd_device_from_cdev_locked(cdev); 1572 if (IS_ERR(device)) { 1573 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1574 "unable to get device from cdev"); 1575 return; 1576 } 1577 1578 if (!cqr->startdev || 1579 device != cqr->startdev || 1580 strncmp(cqr->startdev->discipline->ebcname, 1581 (char *) &cqr->magic, 4)) { 1582 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1583 "invalid device in request"); 1584 dasd_put_device(device); 1585 return; 1586 } 1587 1588 /* Schedule request to be retried. */ 1589 cqr->status = DASD_CQR_QUEUED; 1590 1591 dasd_device_clear_timer(device); 1592 dasd_schedule_device_bh(device); 1593 dasd_put_device(device); 1594 } 1595 1596 void dasd_generic_handle_state_change(struct dasd_device *device) 1597 { 1598 /* First of all start sense subsystem status request. */ 1599 dasd_eer_snss(device); 1600 1601 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1602 dasd_schedule_device_bh(device); 1603 if (device->block) 1604 dasd_schedule_block_bh(device->block); 1605 } 1606 1607 /* 1608 * Interrupt handler for "normal" ssch-io based dasd devices. 1609 */ 1610 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1611 struct irb *irb) 1612 { 1613 struct dasd_ccw_req *cqr, *next; 1614 struct dasd_device *device; 1615 unsigned long long now; 1616 int expires; 1617 1618 if (IS_ERR(irb)) { 1619 switch (PTR_ERR(irb)) { 1620 case -EIO: 1621 break; 1622 case -ETIMEDOUT: 1623 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1624 "request timed out\n", __func__); 1625 break; 1626 default: 1627 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1628 "unknown error %ld\n", __func__, 1629 PTR_ERR(irb)); 1630 } 1631 dasd_handle_killed_request(cdev, intparm); 1632 return; 1633 } 1634 1635 now = get_tod_clock(); 1636 cqr = (struct dasd_ccw_req *) intparm; 1637 /* check for conditions that should be handled immediately */ 1638 if (!cqr || 1639 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1640 scsw_cstat(&irb->scsw) == 0)) { 1641 if (cqr) 1642 memcpy(&cqr->irb, irb, sizeof(*irb)); 1643 device = dasd_device_from_cdev_locked(cdev); 1644 if (IS_ERR(device)) 1645 return; 1646 /* ignore unsolicited interrupts for DIAG discipline */ 1647 if (device->discipline == dasd_diag_discipline_pointer) { 1648 dasd_put_device(device); 1649 return; 1650 } 1651 device->discipline->dump_sense_dbf(device, irb, "int"); 1652 if (device->features & DASD_FEATURE_ERPLOG) 1653 device->discipline->dump_sense(device, cqr, irb); 1654 device->discipline->check_for_device_change(device, cqr, irb); 1655 dasd_put_device(device); 1656 } 1657 if (!cqr) 1658 return; 1659 1660 device = (struct dasd_device *) cqr->startdev; 1661 if (!device || 1662 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1663 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1664 "invalid device in request"); 1665 return; 1666 } 1667 1668 /* Check for clear pending */ 1669 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1670 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1671 cqr->status = DASD_CQR_CLEARED; 1672 dasd_device_clear_timer(device); 1673 wake_up(&dasd_flush_wq); 1674 dasd_schedule_device_bh(device); 1675 return; 1676 } 1677 1678 /* check status - the request might have been killed by dyn detach */ 1679 if (cqr->status != DASD_CQR_IN_IO) { 1680 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1681 "status %02x", dev_name(&cdev->dev), cqr->status); 1682 return; 1683 } 1684 1685 next = NULL; 1686 expires = 0; 1687 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1688 scsw_cstat(&irb->scsw) == 0) { 1689 /* request was completed successfully */ 1690 cqr->status = DASD_CQR_SUCCESS; 1691 cqr->stopclk = now; 1692 /* Start first request on queue if possible -> fast_io. */ 1693 if (cqr->devlist.next != &device->ccw_queue) { 1694 next = list_entry(cqr->devlist.next, 1695 struct dasd_ccw_req, devlist); 1696 } 1697 } else { /* error */ 1698 /* 1699 * If we don't want complex ERP for this request, then just 1700 * reset this and retry it in the fastpath 1701 */ 1702 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1703 cqr->retries > 0) { 1704 if (cqr->lpm == device->path_data.opm) 1705 DBF_DEV_EVENT(DBF_DEBUG, device, 1706 "default ERP in fastpath " 1707 "(%i retries left)", 1708 cqr->retries); 1709 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1710 cqr->lpm = device->path_data.opm; 1711 cqr->status = DASD_CQR_QUEUED; 1712 next = cqr; 1713 } else 1714 cqr->status = DASD_CQR_ERROR; 1715 } 1716 if (next && (next->status == DASD_CQR_QUEUED) && 1717 (!device->stopped)) { 1718 if (device->discipline->start_IO(next) == 0) 1719 expires = next->expires; 1720 } 1721 if (expires != 0) 1722 dasd_device_set_timer(device, expires); 1723 else 1724 dasd_device_clear_timer(device); 1725 dasd_schedule_device_bh(device); 1726 } 1727 1728 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1729 { 1730 struct dasd_device *device; 1731 1732 device = dasd_device_from_cdev_locked(cdev); 1733 1734 if (IS_ERR(device)) 1735 goto out; 1736 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1737 device->state != device->target || 1738 !device->discipline->check_for_device_change){ 1739 dasd_put_device(device); 1740 goto out; 1741 } 1742 if (device->discipline->dump_sense_dbf) 1743 device->discipline->dump_sense_dbf(device, irb, "uc"); 1744 device->discipline->check_for_device_change(device, NULL, irb); 1745 dasd_put_device(device); 1746 out: 1747 return UC_TODO_RETRY; 1748 } 1749 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1750 1751 /* 1752 * If we have an error on a dasd_block layer request then we cancel 1753 * and return all further requests from the same dasd_block as well. 1754 */ 1755 static void __dasd_device_recovery(struct dasd_device *device, 1756 struct dasd_ccw_req *ref_cqr) 1757 { 1758 struct list_head *l, *n; 1759 struct dasd_ccw_req *cqr; 1760 1761 /* 1762 * only requeue request that came from the dasd_block layer 1763 */ 1764 if (!ref_cqr->block) 1765 return; 1766 1767 list_for_each_safe(l, n, &device->ccw_queue) { 1768 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1769 if (cqr->status == DASD_CQR_QUEUED && 1770 ref_cqr->block == cqr->block) { 1771 cqr->status = DASD_CQR_CLEARED; 1772 } 1773 } 1774 }; 1775 1776 /* 1777 * Remove those ccw requests from the queue that need to be returned 1778 * to the upper layer. 1779 */ 1780 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1781 struct list_head *final_queue) 1782 { 1783 struct list_head *l, *n; 1784 struct dasd_ccw_req *cqr; 1785 1786 /* Process request with final status. */ 1787 list_for_each_safe(l, n, &device->ccw_queue) { 1788 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1789 1790 /* Stop list processing at the first non-final request. */ 1791 if (cqr->status == DASD_CQR_QUEUED || 1792 cqr->status == DASD_CQR_IN_IO || 1793 cqr->status == DASD_CQR_CLEAR_PENDING) 1794 break; 1795 if (cqr->status == DASD_CQR_ERROR) { 1796 __dasd_device_recovery(device, cqr); 1797 } 1798 /* Rechain finished requests to final queue */ 1799 list_move_tail(&cqr->devlist, final_queue); 1800 } 1801 } 1802 1803 /* 1804 * the cqrs from the final queue are returned to the upper layer 1805 * by setting a dasd_block state and calling the callback function 1806 */ 1807 static void __dasd_device_process_final_queue(struct dasd_device *device, 1808 struct list_head *final_queue) 1809 { 1810 struct list_head *l, *n; 1811 struct dasd_ccw_req *cqr; 1812 struct dasd_block *block; 1813 void (*callback)(struct dasd_ccw_req *, void *data); 1814 void *callback_data; 1815 char errorstring[ERRORLENGTH]; 1816 1817 list_for_each_safe(l, n, final_queue) { 1818 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1819 list_del_init(&cqr->devlist); 1820 block = cqr->block; 1821 callback = cqr->callback; 1822 callback_data = cqr->callback_data; 1823 if (block) 1824 spin_lock_bh(&block->queue_lock); 1825 switch (cqr->status) { 1826 case DASD_CQR_SUCCESS: 1827 cqr->status = DASD_CQR_DONE; 1828 break; 1829 case DASD_CQR_ERROR: 1830 cqr->status = DASD_CQR_NEED_ERP; 1831 break; 1832 case DASD_CQR_CLEARED: 1833 cqr->status = DASD_CQR_TERMINATED; 1834 break; 1835 default: 1836 /* internal error 12 - wrong cqr status*/ 1837 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1838 dev_err(&device->cdev->dev, 1839 "An error occurred in the DASD device driver, " 1840 "reason=%s\n", errorstring); 1841 BUG(); 1842 } 1843 if (cqr->callback != NULL) 1844 (callback)(cqr, callback_data); 1845 if (block) 1846 spin_unlock_bh(&block->queue_lock); 1847 } 1848 } 1849 1850 /* 1851 * Take a look at the first request on the ccw queue and check 1852 * if it reached its expire time. If so, terminate the IO. 1853 */ 1854 static void __dasd_device_check_expire(struct dasd_device *device) 1855 { 1856 struct dasd_ccw_req *cqr; 1857 1858 if (list_empty(&device->ccw_queue)) 1859 return; 1860 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1861 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1862 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1863 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1864 /* 1865 * IO in safe offline processing should not 1866 * run out of retries 1867 */ 1868 cqr->retries++; 1869 } 1870 if (device->discipline->term_IO(cqr) != 0) { 1871 /* Hmpf, try again in 5 sec */ 1872 dev_err(&device->cdev->dev, 1873 "cqr %p timed out (%lus) but cannot be " 1874 "ended, retrying in 5 s\n", 1875 cqr, (cqr->expires/HZ)); 1876 cqr->expires += 5*HZ; 1877 dasd_device_set_timer(device, 5*HZ); 1878 } else { 1879 dev_err(&device->cdev->dev, 1880 "cqr %p timed out (%lus), %i retries " 1881 "remaining\n", cqr, (cqr->expires/HZ), 1882 cqr->retries); 1883 } 1884 } 1885 } 1886 1887 /* 1888 * Take a look at the first request on the ccw queue and check 1889 * if it needs to be started. 1890 */ 1891 static void __dasd_device_start_head(struct dasd_device *device) 1892 { 1893 struct dasd_ccw_req *cqr; 1894 int rc; 1895 1896 if (list_empty(&device->ccw_queue)) 1897 return; 1898 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1899 if (cqr->status != DASD_CQR_QUEUED) 1900 return; 1901 /* when device is stopped, return request to previous layer 1902 * exception: only the disconnect or unresumed bits are set and the 1903 * cqr is a path verification request 1904 */ 1905 if (device->stopped && 1906 !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) 1907 && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { 1908 cqr->intrc = -EAGAIN; 1909 cqr->status = DASD_CQR_CLEARED; 1910 dasd_schedule_device_bh(device); 1911 return; 1912 } 1913 1914 rc = device->discipline->start_IO(cqr); 1915 if (rc == 0) 1916 dasd_device_set_timer(device, cqr->expires); 1917 else if (rc == -EACCES) { 1918 dasd_schedule_device_bh(device); 1919 } else 1920 /* Hmpf, try again in 1/2 sec */ 1921 dasd_device_set_timer(device, 50); 1922 } 1923 1924 static void __dasd_device_check_path_events(struct dasd_device *device) 1925 { 1926 int rc; 1927 1928 if (device->path_data.tbvpm) { 1929 if (device->stopped & ~(DASD_STOPPED_DC_WAIT | 1930 DASD_UNRESUMED_PM)) 1931 return; 1932 rc = device->discipline->verify_path( 1933 device, device->path_data.tbvpm); 1934 if (rc) 1935 dasd_device_set_timer(device, 50); 1936 else 1937 device->path_data.tbvpm = 0; 1938 } 1939 }; 1940 1941 /* 1942 * Go through all request on the dasd_device request queue, 1943 * terminate them on the cdev if necessary, and return them to the 1944 * submitting layer via callback. 1945 * Note: 1946 * Make sure that all 'submitting layers' still exist when 1947 * this function is called!. In other words, when 'device' is a base 1948 * device then all block layer requests must have been removed before 1949 * via dasd_flush_block_queue. 1950 */ 1951 int dasd_flush_device_queue(struct dasd_device *device) 1952 { 1953 struct dasd_ccw_req *cqr, *n; 1954 int rc; 1955 struct list_head flush_queue; 1956 1957 INIT_LIST_HEAD(&flush_queue); 1958 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1959 rc = 0; 1960 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1961 /* Check status and move request to flush_queue */ 1962 switch (cqr->status) { 1963 case DASD_CQR_IN_IO: 1964 rc = device->discipline->term_IO(cqr); 1965 if (rc) { 1966 /* unable to terminate requeust */ 1967 dev_err(&device->cdev->dev, 1968 "Flushing the DASD request queue " 1969 "failed for request %p\n", cqr); 1970 /* stop flush processing */ 1971 goto finished; 1972 } 1973 break; 1974 case DASD_CQR_QUEUED: 1975 cqr->stopclk = get_tod_clock(); 1976 cqr->status = DASD_CQR_CLEARED; 1977 break; 1978 default: /* no need to modify the others */ 1979 break; 1980 } 1981 list_move_tail(&cqr->devlist, &flush_queue); 1982 } 1983 finished: 1984 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1985 /* 1986 * After this point all requests must be in state CLEAR_PENDING, 1987 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1988 * one of the others. 1989 */ 1990 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1991 wait_event(dasd_flush_wq, 1992 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1993 /* 1994 * Now set each request back to TERMINATED, DONE or NEED_ERP 1995 * and call the callback function of flushed requests 1996 */ 1997 __dasd_device_process_final_queue(device, &flush_queue); 1998 return rc; 1999 } 2000 2001 /* 2002 * Acquire the device lock and process queues for the device. 2003 */ 2004 static void dasd_device_tasklet(struct dasd_device *device) 2005 { 2006 struct list_head final_queue; 2007 2008 atomic_set (&device->tasklet_scheduled, 0); 2009 INIT_LIST_HEAD(&final_queue); 2010 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2011 /* Check expire time of first request on the ccw queue. */ 2012 __dasd_device_check_expire(device); 2013 /* find final requests on ccw queue */ 2014 __dasd_device_process_ccw_queue(device, &final_queue); 2015 __dasd_device_check_path_events(device); 2016 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2017 /* Now call the callback function of requests with final status */ 2018 __dasd_device_process_final_queue(device, &final_queue); 2019 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2020 /* Now check if the head of the ccw queue needs to be started. */ 2021 __dasd_device_start_head(device); 2022 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2023 if (waitqueue_active(&shutdown_waitq)) 2024 wake_up(&shutdown_waitq); 2025 dasd_put_device(device); 2026 } 2027 2028 /* 2029 * Schedules a call to dasd_tasklet over the device tasklet. 2030 */ 2031 void dasd_schedule_device_bh(struct dasd_device *device) 2032 { 2033 /* Protect against rescheduling. */ 2034 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2035 return; 2036 dasd_get_device(device); 2037 tasklet_hi_schedule(&device->tasklet); 2038 } 2039 2040 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2041 { 2042 device->stopped |= bits; 2043 } 2044 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2045 2046 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2047 { 2048 device->stopped &= ~bits; 2049 if (!device->stopped) 2050 wake_up(&generic_waitq); 2051 } 2052 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2053 2054 /* 2055 * Queue a request to the head of the device ccw_queue. 2056 * Start the I/O if possible. 2057 */ 2058 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2059 { 2060 struct dasd_device *device; 2061 unsigned long flags; 2062 2063 device = cqr->startdev; 2064 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2065 cqr->status = DASD_CQR_QUEUED; 2066 list_add(&cqr->devlist, &device->ccw_queue); 2067 /* let the bh start the request to keep them in order */ 2068 dasd_schedule_device_bh(device); 2069 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2070 } 2071 2072 /* 2073 * Queue a request to the tail of the device ccw_queue. 2074 * Start the I/O if possible. 2075 */ 2076 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2077 { 2078 struct dasd_device *device; 2079 unsigned long flags; 2080 2081 device = cqr->startdev; 2082 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2083 cqr->status = DASD_CQR_QUEUED; 2084 list_add_tail(&cqr->devlist, &device->ccw_queue); 2085 /* let the bh start the request to keep them in order */ 2086 dasd_schedule_device_bh(device); 2087 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2088 } 2089 2090 /* 2091 * Wakeup helper for the 'sleep_on' functions. 2092 */ 2093 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2094 { 2095 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2096 cqr->callback_data = DASD_SLEEPON_END_TAG; 2097 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2098 wake_up(&generic_waitq); 2099 } 2100 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2101 2102 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2103 { 2104 struct dasd_device *device; 2105 int rc; 2106 2107 device = cqr->startdev; 2108 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2109 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2110 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2111 return rc; 2112 } 2113 2114 /* 2115 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2116 */ 2117 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2118 { 2119 struct dasd_device *device; 2120 dasd_erp_fn_t erp_fn; 2121 2122 if (cqr->status == DASD_CQR_FILLED) 2123 return 0; 2124 device = cqr->startdev; 2125 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2126 if (cqr->status == DASD_CQR_TERMINATED) { 2127 device->discipline->handle_terminated_request(cqr); 2128 return 1; 2129 } 2130 if (cqr->status == DASD_CQR_NEED_ERP) { 2131 erp_fn = device->discipline->erp_action(cqr); 2132 erp_fn(cqr); 2133 return 1; 2134 } 2135 if (cqr->status == DASD_CQR_FAILED) 2136 dasd_log_sense(cqr, &cqr->irb); 2137 if (cqr->refers) { 2138 __dasd_process_erp(device, cqr); 2139 return 1; 2140 } 2141 } 2142 return 0; 2143 } 2144 2145 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2146 { 2147 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2148 if (cqr->refers) /* erp is not done yet */ 2149 return 1; 2150 return ((cqr->status != DASD_CQR_DONE) && 2151 (cqr->status != DASD_CQR_FAILED)); 2152 } else 2153 return (cqr->status == DASD_CQR_FILLED); 2154 } 2155 2156 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2157 { 2158 struct dasd_device *device; 2159 int rc; 2160 struct list_head ccw_queue; 2161 struct dasd_ccw_req *cqr; 2162 2163 INIT_LIST_HEAD(&ccw_queue); 2164 maincqr->status = DASD_CQR_FILLED; 2165 device = maincqr->startdev; 2166 list_add(&maincqr->blocklist, &ccw_queue); 2167 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2168 cqr = list_first_entry(&ccw_queue, 2169 struct dasd_ccw_req, blocklist)) { 2170 2171 if (__dasd_sleep_on_erp(cqr)) 2172 continue; 2173 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2174 continue; 2175 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2176 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2177 cqr->status = DASD_CQR_FAILED; 2178 cqr->intrc = -EPERM; 2179 continue; 2180 } 2181 /* Non-temporary stop condition will trigger fail fast */ 2182 if (device->stopped & ~DASD_STOPPED_PENDING && 2183 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2184 (!dasd_eer_enabled(device))) { 2185 cqr->status = DASD_CQR_FAILED; 2186 cqr->intrc = -EAGAIN; 2187 continue; 2188 } 2189 /* Don't try to start requests if device is stopped */ 2190 if (interruptible) { 2191 rc = wait_event_interruptible( 2192 generic_waitq, !(device->stopped)); 2193 if (rc == -ERESTARTSYS) { 2194 cqr->status = DASD_CQR_FAILED; 2195 maincqr->intrc = rc; 2196 continue; 2197 } 2198 } else 2199 wait_event(generic_waitq, !(device->stopped)); 2200 2201 if (!cqr->callback) 2202 cqr->callback = dasd_wakeup_cb; 2203 2204 cqr->callback_data = DASD_SLEEPON_START_TAG; 2205 dasd_add_request_tail(cqr); 2206 if (interruptible) { 2207 rc = wait_event_interruptible( 2208 generic_waitq, _wait_for_wakeup(cqr)); 2209 if (rc == -ERESTARTSYS) { 2210 dasd_cancel_req(cqr); 2211 /* wait (non-interruptible) for final status */ 2212 wait_event(generic_waitq, 2213 _wait_for_wakeup(cqr)); 2214 cqr->status = DASD_CQR_FAILED; 2215 maincqr->intrc = rc; 2216 continue; 2217 } 2218 } else 2219 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2220 } 2221 2222 maincqr->endclk = get_tod_clock(); 2223 if ((maincqr->status != DASD_CQR_DONE) && 2224 (maincqr->intrc != -ERESTARTSYS)) 2225 dasd_log_sense(maincqr, &maincqr->irb); 2226 if (maincqr->status == DASD_CQR_DONE) 2227 rc = 0; 2228 else if (maincqr->intrc) 2229 rc = maincqr->intrc; 2230 else 2231 rc = -EIO; 2232 return rc; 2233 } 2234 2235 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2236 { 2237 struct dasd_ccw_req *cqr; 2238 2239 list_for_each_entry(cqr, ccw_queue, blocklist) { 2240 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2241 return 0; 2242 } 2243 2244 return 1; 2245 } 2246 2247 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2248 { 2249 struct dasd_device *device; 2250 int rc; 2251 struct dasd_ccw_req *cqr, *n; 2252 2253 retry: 2254 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2255 device = cqr->startdev; 2256 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2257 continue; 2258 2259 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2260 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2261 cqr->status = DASD_CQR_FAILED; 2262 cqr->intrc = -EPERM; 2263 continue; 2264 } 2265 /*Non-temporary stop condition will trigger fail fast*/ 2266 if (device->stopped & ~DASD_STOPPED_PENDING && 2267 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2268 !dasd_eer_enabled(device)) { 2269 cqr->status = DASD_CQR_FAILED; 2270 cqr->intrc = -EAGAIN; 2271 continue; 2272 } 2273 2274 /*Don't try to start requests if device is stopped*/ 2275 if (interruptible) { 2276 rc = wait_event_interruptible( 2277 generic_waitq, !device->stopped); 2278 if (rc == -ERESTARTSYS) { 2279 cqr->status = DASD_CQR_FAILED; 2280 cqr->intrc = rc; 2281 continue; 2282 } 2283 } else 2284 wait_event(generic_waitq, !(device->stopped)); 2285 2286 if (!cqr->callback) 2287 cqr->callback = dasd_wakeup_cb; 2288 cqr->callback_data = DASD_SLEEPON_START_TAG; 2289 dasd_add_request_tail(cqr); 2290 } 2291 2292 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2293 2294 rc = 0; 2295 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2296 if (__dasd_sleep_on_erp(cqr)) 2297 rc = 1; 2298 } 2299 if (rc) 2300 goto retry; 2301 2302 2303 return 0; 2304 } 2305 2306 /* 2307 * Queue a request to the tail of the device ccw_queue and wait for 2308 * it's completion. 2309 */ 2310 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2311 { 2312 return _dasd_sleep_on(cqr, 0); 2313 } 2314 2315 /* 2316 * Start requests from a ccw_queue and wait for their completion. 2317 */ 2318 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2319 { 2320 return _dasd_sleep_on_queue(ccw_queue, 0); 2321 } 2322 EXPORT_SYMBOL(dasd_sleep_on_queue); 2323 2324 /* 2325 * Queue a request to the tail of the device ccw_queue and wait 2326 * interruptible for it's completion. 2327 */ 2328 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2329 { 2330 return _dasd_sleep_on(cqr, 1); 2331 } 2332 2333 /* 2334 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2335 * for eckd devices) the currently running request has to be terminated 2336 * and be put back to status queued, before the special request is added 2337 * to the head of the queue. Then the special request is waited on normally. 2338 */ 2339 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2340 { 2341 struct dasd_ccw_req *cqr; 2342 int rc; 2343 2344 if (list_empty(&device->ccw_queue)) 2345 return 0; 2346 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2347 rc = device->discipline->term_IO(cqr); 2348 if (!rc) 2349 /* 2350 * CQR terminated because a more important request is pending. 2351 * Undo decreasing of retry counter because this is 2352 * not an error case. 2353 */ 2354 cqr->retries++; 2355 return rc; 2356 } 2357 2358 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2359 { 2360 struct dasd_device *device; 2361 int rc; 2362 2363 device = cqr->startdev; 2364 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2365 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2366 cqr->status = DASD_CQR_FAILED; 2367 cqr->intrc = -EPERM; 2368 return -EIO; 2369 } 2370 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2371 rc = _dasd_term_running_cqr(device); 2372 if (rc) { 2373 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2374 return rc; 2375 } 2376 cqr->callback = dasd_wakeup_cb; 2377 cqr->callback_data = DASD_SLEEPON_START_TAG; 2378 cqr->status = DASD_CQR_QUEUED; 2379 /* 2380 * add new request as second 2381 * first the terminated cqr needs to be finished 2382 */ 2383 list_add(&cqr->devlist, device->ccw_queue.next); 2384 2385 /* let the bh start the request to keep them in order */ 2386 dasd_schedule_device_bh(device); 2387 2388 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2389 2390 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2391 2392 if (cqr->status == DASD_CQR_DONE) 2393 rc = 0; 2394 else if (cqr->intrc) 2395 rc = cqr->intrc; 2396 else 2397 rc = -EIO; 2398 return rc; 2399 } 2400 2401 /* 2402 * Cancels a request that was started with dasd_sleep_on_req. 2403 * This is useful to timeout requests. The request will be 2404 * terminated if it is currently in i/o. 2405 * Returns 1 if the request has been terminated. 2406 * 0 if there was no need to terminate the request (not started yet) 2407 * negative error code if termination failed 2408 * Cancellation of a request is an asynchronous operation! The calling 2409 * function has to wait until the request is properly returned via callback. 2410 */ 2411 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2412 { 2413 struct dasd_device *device = cqr->startdev; 2414 unsigned long flags; 2415 int rc; 2416 2417 rc = 0; 2418 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2419 switch (cqr->status) { 2420 case DASD_CQR_QUEUED: 2421 /* request was not started - just set to cleared */ 2422 cqr->status = DASD_CQR_CLEARED; 2423 break; 2424 case DASD_CQR_IN_IO: 2425 /* request in IO - terminate IO and release again */ 2426 rc = device->discipline->term_IO(cqr); 2427 if (rc) { 2428 dev_err(&device->cdev->dev, 2429 "Cancelling request %p failed with rc=%d\n", 2430 cqr, rc); 2431 } else { 2432 cqr->stopclk = get_tod_clock(); 2433 } 2434 break; 2435 default: /* already finished or clear pending - do nothing */ 2436 break; 2437 } 2438 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2439 dasd_schedule_device_bh(device); 2440 return rc; 2441 } 2442 2443 2444 /* 2445 * SECTION: Operations of the dasd_block layer. 2446 */ 2447 2448 /* 2449 * Timeout function for dasd_block. This is used when the block layer 2450 * is waiting for something that may not come reliably, (e.g. a state 2451 * change interrupt) 2452 */ 2453 static void dasd_block_timeout(unsigned long ptr) 2454 { 2455 unsigned long flags; 2456 struct dasd_block *block; 2457 2458 block = (struct dasd_block *) ptr; 2459 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2460 /* re-activate request queue */ 2461 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2462 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2463 dasd_schedule_block_bh(block); 2464 } 2465 2466 /* 2467 * Setup timeout for a dasd_block in jiffies. 2468 */ 2469 void dasd_block_set_timer(struct dasd_block *block, int expires) 2470 { 2471 if (expires == 0) 2472 del_timer(&block->timer); 2473 else 2474 mod_timer(&block->timer, jiffies + expires); 2475 } 2476 2477 /* 2478 * Clear timeout for a dasd_block. 2479 */ 2480 void dasd_block_clear_timer(struct dasd_block *block) 2481 { 2482 del_timer(&block->timer); 2483 } 2484 2485 /* 2486 * Process finished error recovery ccw. 2487 */ 2488 static void __dasd_process_erp(struct dasd_device *device, 2489 struct dasd_ccw_req *cqr) 2490 { 2491 dasd_erp_fn_t erp_fn; 2492 2493 if (cqr->status == DASD_CQR_DONE) 2494 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2495 else 2496 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2497 erp_fn = device->discipline->erp_postaction(cqr); 2498 erp_fn(cqr); 2499 } 2500 2501 /* 2502 * Fetch requests from the block device queue. 2503 */ 2504 static void __dasd_process_request_queue(struct dasd_block *block) 2505 { 2506 struct request_queue *queue; 2507 struct request *req; 2508 struct dasd_ccw_req *cqr; 2509 struct dasd_device *basedev; 2510 unsigned long flags; 2511 queue = block->request_queue; 2512 basedev = block->base; 2513 /* No queue ? Then there is nothing to do. */ 2514 if (queue == NULL) 2515 return; 2516 2517 /* 2518 * We requeue request from the block device queue to the ccw 2519 * queue only in two states. In state DASD_STATE_READY the 2520 * partition detection is done and we need to requeue requests 2521 * for that. State DASD_STATE_ONLINE is normal block device 2522 * operation. 2523 */ 2524 if (basedev->state < DASD_STATE_READY) { 2525 while ((req = blk_fetch_request(block->request_queue))) 2526 __blk_end_request_all(req, -EIO); 2527 return; 2528 } 2529 /* Now we try to fetch requests from the request queue */ 2530 while ((req = blk_peek_request(queue))) { 2531 if (basedev->features & DASD_FEATURE_READONLY && 2532 rq_data_dir(req) == WRITE) { 2533 DBF_DEV_EVENT(DBF_ERR, basedev, 2534 "Rejecting write request %p", 2535 req); 2536 blk_start_request(req); 2537 __blk_end_request_all(req, -EIO); 2538 continue; 2539 } 2540 cqr = basedev->discipline->build_cp(basedev, block, req); 2541 if (IS_ERR(cqr)) { 2542 if (PTR_ERR(cqr) == -EBUSY) 2543 break; /* normal end condition */ 2544 if (PTR_ERR(cqr) == -ENOMEM) 2545 break; /* terminate request queue loop */ 2546 if (PTR_ERR(cqr) == -EAGAIN) { 2547 /* 2548 * The current request cannot be build right 2549 * now, we have to try later. If this request 2550 * is the head-of-queue we stop the device 2551 * for 1/2 second. 2552 */ 2553 if (!list_empty(&block->ccw_queue)) 2554 break; 2555 spin_lock_irqsave( 2556 get_ccwdev_lock(basedev->cdev), flags); 2557 dasd_device_set_stop_bits(basedev, 2558 DASD_STOPPED_PENDING); 2559 spin_unlock_irqrestore( 2560 get_ccwdev_lock(basedev->cdev), flags); 2561 dasd_block_set_timer(block, HZ/2); 2562 break; 2563 } 2564 DBF_DEV_EVENT(DBF_ERR, basedev, 2565 "CCW creation failed (rc=%ld) " 2566 "on request %p", 2567 PTR_ERR(cqr), req); 2568 blk_start_request(req); 2569 __blk_end_request_all(req, -EIO); 2570 continue; 2571 } 2572 /* 2573 * Note: callback is set to dasd_return_cqr_cb in 2574 * __dasd_block_start_head to cover erp requests as well 2575 */ 2576 cqr->callback_data = (void *) req; 2577 cqr->status = DASD_CQR_FILLED; 2578 blk_start_request(req); 2579 list_add_tail(&cqr->blocklist, &block->ccw_queue); 2580 dasd_profile_start(block, cqr, req); 2581 } 2582 } 2583 2584 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2585 { 2586 struct request *req; 2587 int status; 2588 int error = 0; 2589 2590 req = (struct request *) cqr->callback_data; 2591 dasd_profile_end(cqr->block, cqr, req); 2592 status = cqr->block->base->discipline->free_cp(cqr, req); 2593 if (status <= 0) 2594 error = status ? status : -EIO; 2595 __blk_end_request_all(req, error); 2596 } 2597 2598 /* 2599 * Process ccw request queue. 2600 */ 2601 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2602 struct list_head *final_queue) 2603 { 2604 struct list_head *l, *n; 2605 struct dasd_ccw_req *cqr; 2606 dasd_erp_fn_t erp_fn; 2607 unsigned long flags; 2608 struct dasd_device *base = block->base; 2609 2610 restart: 2611 /* Process request with final status. */ 2612 list_for_each_safe(l, n, &block->ccw_queue) { 2613 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2614 if (cqr->status != DASD_CQR_DONE && 2615 cqr->status != DASD_CQR_FAILED && 2616 cqr->status != DASD_CQR_NEED_ERP && 2617 cqr->status != DASD_CQR_TERMINATED) 2618 continue; 2619 2620 if (cqr->status == DASD_CQR_TERMINATED) { 2621 base->discipline->handle_terminated_request(cqr); 2622 goto restart; 2623 } 2624 2625 /* Process requests that may be recovered */ 2626 if (cqr->status == DASD_CQR_NEED_ERP) { 2627 erp_fn = base->discipline->erp_action(cqr); 2628 if (IS_ERR(erp_fn(cqr))) 2629 continue; 2630 goto restart; 2631 } 2632 2633 /* log sense for fatal error */ 2634 if (cqr->status == DASD_CQR_FAILED) { 2635 dasd_log_sense(cqr, &cqr->irb); 2636 } 2637 2638 /* First of all call extended error reporting. */ 2639 if (dasd_eer_enabled(base) && 2640 cqr->status == DASD_CQR_FAILED) { 2641 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2642 2643 /* restart request */ 2644 cqr->status = DASD_CQR_FILLED; 2645 cqr->retries = 255; 2646 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2647 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2648 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2649 flags); 2650 goto restart; 2651 } 2652 2653 /* Process finished ERP request. */ 2654 if (cqr->refers) { 2655 __dasd_process_erp(base, cqr); 2656 goto restart; 2657 } 2658 2659 /* Rechain finished requests to final queue */ 2660 cqr->endclk = get_tod_clock(); 2661 list_move_tail(&cqr->blocklist, final_queue); 2662 } 2663 } 2664 2665 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2666 { 2667 dasd_schedule_block_bh(cqr->block); 2668 } 2669 2670 static void __dasd_block_start_head(struct dasd_block *block) 2671 { 2672 struct dasd_ccw_req *cqr; 2673 2674 if (list_empty(&block->ccw_queue)) 2675 return; 2676 /* We allways begin with the first requests on the queue, as some 2677 * of previously started requests have to be enqueued on a 2678 * dasd_device again for error recovery. 2679 */ 2680 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2681 if (cqr->status != DASD_CQR_FILLED) 2682 continue; 2683 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2684 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2685 cqr->status = DASD_CQR_FAILED; 2686 cqr->intrc = -EPERM; 2687 dasd_schedule_block_bh(block); 2688 continue; 2689 } 2690 /* Non-temporary stop condition will trigger fail fast */ 2691 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2692 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2693 (!dasd_eer_enabled(block->base))) { 2694 cqr->status = DASD_CQR_FAILED; 2695 dasd_schedule_block_bh(block); 2696 continue; 2697 } 2698 /* Don't try to start requests if device is stopped */ 2699 if (block->base->stopped) 2700 return; 2701 2702 /* just a fail safe check, should not happen */ 2703 if (!cqr->startdev) 2704 cqr->startdev = block->base; 2705 2706 /* make sure that the requests we submit find their way back */ 2707 cqr->callback = dasd_return_cqr_cb; 2708 2709 dasd_add_request_tail(cqr); 2710 } 2711 } 2712 2713 /* 2714 * Central dasd_block layer routine. Takes requests from the generic 2715 * block layer request queue, creates ccw requests, enqueues them on 2716 * a dasd_device and processes ccw requests that have been returned. 2717 */ 2718 static void dasd_block_tasklet(struct dasd_block *block) 2719 { 2720 struct list_head final_queue; 2721 struct list_head *l, *n; 2722 struct dasd_ccw_req *cqr; 2723 2724 atomic_set(&block->tasklet_scheduled, 0); 2725 INIT_LIST_HEAD(&final_queue); 2726 spin_lock(&block->queue_lock); 2727 /* Finish off requests on ccw queue */ 2728 __dasd_process_block_ccw_queue(block, &final_queue); 2729 spin_unlock(&block->queue_lock); 2730 /* Now call the callback function of requests with final status */ 2731 spin_lock_irq(&block->request_queue_lock); 2732 list_for_each_safe(l, n, &final_queue) { 2733 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2734 list_del_init(&cqr->blocklist); 2735 __dasd_cleanup_cqr(cqr); 2736 } 2737 spin_lock(&block->queue_lock); 2738 /* Get new request from the block device request queue */ 2739 __dasd_process_request_queue(block); 2740 /* Now check if the head of the ccw queue needs to be started. */ 2741 __dasd_block_start_head(block); 2742 spin_unlock(&block->queue_lock); 2743 spin_unlock_irq(&block->request_queue_lock); 2744 if (waitqueue_active(&shutdown_waitq)) 2745 wake_up(&shutdown_waitq); 2746 dasd_put_device(block->base); 2747 } 2748 2749 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2750 { 2751 wake_up(&dasd_flush_wq); 2752 } 2753 2754 /* 2755 * Requeue a request back to the block request queue 2756 * only works for block requests 2757 */ 2758 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2759 { 2760 struct dasd_block *block = cqr->block; 2761 struct request *req; 2762 unsigned long flags; 2763 2764 if (!block) 2765 return -EINVAL; 2766 spin_lock_irqsave(&block->queue_lock, flags); 2767 req = (struct request *) cqr->callback_data; 2768 blk_requeue_request(block->request_queue, req); 2769 spin_unlock_irqrestore(&block->queue_lock, flags); 2770 2771 return 0; 2772 } 2773 2774 /* 2775 * Go through all request on the dasd_block request queue, cancel them 2776 * on the respective dasd_device, and return them to the generic 2777 * block layer. 2778 */ 2779 static int dasd_flush_block_queue(struct dasd_block *block) 2780 { 2781 struct dasd_ccw_req *cqr, *n; 2782 int rc, i; 2783 struct list_head flush_queue; 2784 2785 INIT_LIST_HEAD(&flush_queue); 2786 spin_lock_bh(&block->queue_lock); 2787 rc = 0; 2788 restart: 2789 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2790 /* if this request currently owned by a dasd_device cancel it */ 2791 if (cqr->status >= DASD_CQR_QUEUED) 2792 rc = dasd_cancel_req(cqr); 2793 if (rc < 0) 2794 break; 2795 /* Rechain request (including erp chain) so it won't be 2796 * touched by the dasd_block_tasklet anymore. 2797 * Replace the callback so we notice when the request 2798 * is returned from the dasd_device layer. 2799 */ 2800 cqr->callback = _dasd_wake_block_flush_cb; 2801 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2802 list_move_tail(&cqr->blocklist, &flush_queue); 2803 if (i > 1) 2804 /* moved more than one request - need to restart */ 2805 goto restart; 2806 } 2807 spin_unlock_bh(&block->queue_lock); 2808 /* Now call the callback function of flushed requests */ 2809 restart_cb: 2810 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2811 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2812 /* Process finished ERP request. */ 2813 if (cqr->refers) { 2814 spin_lock_bh(&block->queue_lock); 2815 __dasd_process_erp(block->base, cqr); 2816 spin_unlock_bh(&block->queue_lock); 2817 /* restart list_for_xx loop since dasd_process_erp 2818 * might remove multiple elements */ 2819 goto restart_cb; 2820 } 2821 /* call the callback function */ 2822 spin_lock_irq(&block->request_queue_lock); 2823 cqr->endclk = get_tod_clock(); 2824 list_del_init(&cqr->blocklist); 2825 __dasd_cleanup_cqr(cqr); 2826 spin_unlock_irq(&block->request_queue_lock); 2827 } 2828 return rc; 2829 } 2830 2831 /* 2832 * Schedules a call to dasd_tasklet over the device tasklet. 2833 */ 2834 void dasd_schedule_block_bh(struct dasd_block *block) 2835 { 2836 /* Protect against rescheduling. */ 2837 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 2838 return; 2839 /* life cycle of block is bound to it's base device */ 2840 dasd_get_device(block->base); 2841 tasklet_hi_schedule(&block->tasklet); 2842 } 2843 2844 2845 /* 2846 * SECTION: external block device operations 2847 * (request queue handling, open, release, etc.) 2848 */ 2849 2850 /* 2851 * Dasd request queue function. Called from ll_rw_blk.c 2852 */ 2853 static void do_dasd_request(struct request_queue *queue) 2854 { 2855 struct dasd_block *block; 2856 2857 block = queue->queuedata; 2858 spin_lock(&block->queue_lock); 2859 /* Get new request from the block device request queue */ 2860 __dasd_process_request_queue(block); 2861 /* Now check if the head of the ccw queue needs to be started. */ 2862 __dasd_block_start_head(block); 2863 spin_unlock(&block->queue_lock); 2864 } 2865 2866 /* 2867 * Allocate and initialize request queue and default I/O scheduler. 2868 */ 2869 static int dasd_alloc_queue(struct dasd_block *block) 2870 { 2871 int rc; 2872 2873 block->request_queue = blk_init_queue(do_dasd_request, 2874 &block->request_queue_lock); 2875 if (block->request_queue == NULL) 2876 return -ENOMEM; 2877 2878 block->request_queue->queuedata = block; 2879 2880 elevator_exit(block->request_queue->elevator); 2881 block->request_queue->elevator = NULL; 2882 rc = elevator_init(block->request_queue, "deadline"); 2883 if (rc) { 2884 blk_cleanup_queue(block->request_queue); 2885 return rc; 2886 } 2887 return 0; 2888 } 2889 2890 /* 2891 * Allocate and initialize request queue. 2892 */ 2893 static void dasd_setup_queue(struct dasd_block *block) 2894 { 2895 int max; 2896 2897 if (block->base->features & DASD_FEATURE_USERAW) { 2898 /* 2899 * the max_blocks value for raw_track access is 256 2900 * it is higher than the native ECKD value because we 2901 * only need one ccw per track 2902 * so the max_hw_sectors are 2903 * 2048 x 512B = 1024kB = 16 tracks 2904 */ 2905 max = 2048; 2906 } else { 2907 max = block->base->discipline->max_blocks << block->s2b_shift; 2908 } 2909 blk_queue_logical_block_size(block->request_queue, 2910 block->bp_block); 2911 blk_queue_max_hw_sectors(block->request_queue, max); 2912 blk_queue_max_segments(block->request_queue, -1L); 2913 /* with page sized segments we can translate each segement into 2914 * one idaw/tidaw 2915 */ 2916 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 2917 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 2918 } 2919 2920 /* 2921 * Deactivate and free request queue. 2922 */ 2923 static void dasd_free_queue(struct dasd_block *block) 2924 { 2925 if (block->request_queue) { 2926 blk_cleanup_queue(block->request_queue); 2927 block->request_queue = NULL; 2928 } 2929 } 2930 2931 /* 2932 * Flush request on the request queue. 2933 */ 2934 static void dasd_flush_request_queue(struct dasd_block *block) 2935 { 2936 struct request *req; 2937 2938 if (!block->request_queue) 2939 return; 2940 2941 spin_lock_irq(&block->request_queue_lock); 2942 while ((req = blk_fetch_request(block->request_queue))) 2943 __blk_end_request_all(req, -EIO); 2944 spin_unlock_irq(&block->request_queue_lock); 2945 } 2946 2947 static int dasd_open(struct block_device *bdev, fmode_t mode) 2948 { 2949 struct dasd_device *base; 2950 int rc; 2951 2952 base = dasd_device_from_gendisk(bdev->bd_disk); 2953 if (!base) 2954 return -ENODEV; 2955 2956 atomic_inc(&base->block->open_count); 2957 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2958 rc = -ENODEV; 2959 goto unlock; 2960 } 2961 2962 if (!try_module_get(base->discipline->owner)) { 2963 rc = -EINVAL; 2964 goto unlock; 2965 } 2966 2967 if (dasd_probeonly) { 2968 dev_info(&base->cdev->dev, 2969 "Accessing the DASD failed because it is in " 2970 "probeonly mode\n"); 2971 rc = -EPERM; 2972 goto out; 2973 } 2974 2975 if (base->state <= DASD_STATE_BASIC) { 2976 DBF_DEV_EVENT(DBF_ERR, base, " %s", 2977 " Cannot open unrecognized device"); 2978 rc = -ENODEV; 2979 goto out; 2980 } 2981 2982 if ((mode & FMODE_WRITE) && 2983 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 2984 (base->features & DASD_FEATURE_READONLY))) { 2985 rc = -EROFS; 2986 goto out; 2987 } 2988 2989 dasd_put_device(base); 2990 return 0; 2991 2992 out: 2993 module_put(base->discipline->owner); 2994 unlock: 2995 atomic_dec(&base->block->open_count); 2996 dasd_put_device(base); 2997 return rc; 2998 } 2999 3000 static void dasd_release(struct gendisk *disk, fmode_t mode) 3001 { 3002 struct dasd_device *base = dasd_device_from_gendisk(disk); 3003 if (base) { 3004 atomic_dec(&base->block->open_count); 3005 module_put(base->discipline->owner); 3006 dasd_put_device(base); 3007 } 3008 } 3009 3010 /* 3011 * Return disk geometry. 3012 */ 3013 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3014 { 3015 struct dasd_device *base; 3016 3017 base = dasd_device_from_gendisk(bdev->bd_disk); 3018 if (!base) 3019 return -ENODEV; 3020 3021 if (!base->discipline || 3022 !base->discipline->fill_geometry) { 3023 dasd_put_device(base); 3024 return -EINVAL; 3025 } 3026 base->discipline->fill_geometry(base->block, geo); 3027 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3028 dasd_put_device(base); 3029 return 0; 3030 } 3031 3032 const struct block_device_operations 3033 dasd_device_operations = { 3034 .owner = THIS_MODULE, 3035 .open = dasd_open, 3036 .release = dasd_release, 3037 .ioctl = dasd_ioctl, 3038 .compat_ioctl = dasd_ioctl, 3039 .getgeo = dasd_getgeo, 3040 }; 3041 3042 /******************************************************************************* 3043 * end of block device operations 3044 */ 3045 3046 static void 3047 dasd_exit(void) 3048 { 3049 #ifdef CONFIG_PROC_FS 3050 dasd_proc_exit(); 3051 #endif 3052 dasd_eer_exit(); 3053 if (dasd_page_cache != NULL) { 3054 kmem_cache_destroy(dasd_page_cache); 3055 dasd_page_cache = NULL; 3056 } 3057 dasd_gendisk_exit(); 3058 dasd_devmap_exit(); 3059 if (dasd_debug_area != NULL) { 3060 debug_unregister(dasd_debug_area); 3061 dasd_debug_area = NULL; 3062 } 3063 dasd_statistics_removeroot(); 3064 } 3065 3066 /* 3067 * SECTION: common functions for ccw_driver use 3068 */ 3069 3070 /* 3071 * Is the device read-only? 3072 * Note that this function does not report the setting of the 3073 * readonly device attribute, but how it is configured in z/VM. 3074 */ 3075 int dasd_device_is_ro(struct dasd_device *device) 3076 { 3077 struct ccw_dev_id dev_id; 3078 struct diag210 diag_data; 3079 int rc; 3080 3081 if (!MACHINE_IS_VM) 3082 return 0; 3083 ccw_device_get_id(device->cdev, &dev_id); 3084 memset(&diag_data, 0, sizeof(diag_data)); 3085 diag_data.vrdcdvno = dev_id.devno; 3086 diag_data.vrdclen = sizeof(diag_data); 3087 rc = diag210(&diag_data); 3088 if (rc == 0 || rc == 2) { 3089 return diag_data.vrdcvfla & 0x80; 3090 } else { 3091 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3092 dev_id.devno, rc); 3093 return 0; 3094 } 3095 } 3096 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3097 3098 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3099 { 3100 struct ccw_device *cdev = data; 3101 int ret; 3102 3103 ret = ccw_device_set_online(cdev); 3104 if (ret) 3105 pr_warning("%s: Setting the DASD online failed with rc=%d\n", 3106 dev_name(&cdev->dev), ret); 3107 } 3108 3109 /* 3110 * Initial attempt at a probe function. this can be simplified once 3111 * the other detection code is gone. 3112 */ 3113 int dasd_generic_probe(struct ccw_device *cdev, 3114 struct dasd_discipline *discipline) 3115 { 3116 int ret; 3117 3118 ret = dasd_add_sysfs_files(cdev); 3119 if (ret) { 3120 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3121 "dasd_generic_probe: could not add " 3122 "sysfs entries"); 3123 return ret; 3124 } 3125 cdev->handler = &dasd_int_handler; 3126 3127 /* 3128 * Automatically online either all dasd devices (dasd_autodetect) 3129 * or all devices specified with dasd= parameters during 3130 * initial probe. 3131 */ 3132 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3133 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3134 async_schedule(dasd_generic_auto_online, cdev); 3135 return 0; 3136 } 3137 3138 /* 3139 * This will one day be called from a global not_oper handler. 3140 * It is also used by driver_unregister during module unload. 3141 */ 3142 void dasd_generic_remove(struct ccw_device *cdev) 3143 { 3144 struct dasd_device *device; 3145 struct dasd_block *block; 3146 3147 cdev->handler = NULL; 3148 3149 device = dasd_device_from_cdev(cdev); 3150 if (IS_ERR(device)) { 3151 dasd_remove_sysfs_files(cdev); 3152 return; 3153 } 3154 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3155 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3156 /* Already doing offline processing */ 3157 dasd_put_device(device); 3158 dasd_remove_sysfs_files(cdev); 3159 return; 3160 } 3161 /* 3162 * This device is removed unconditionally. Set offline 3163 * flag to prevent dasd_open from opening it while it is 3164 * no quite down yet. 3165 */ 3166 dasd_set_target_state(device, DASD_STATE_NEW); 3167 /* dasd_delete_device destroys the device reference. */ 3168 block = device->block; 3169 dasd_delete_device(device); 3170 /* 3171 * life cycle of block is bound to device, so delete it after 3172 * device was safely removed 3173 */ 3174 if (block) 3175 dasd_free_block(block); 3176 3177 dasd_remove_sysfs_files(cdev); 3178 } 3179 3180 /* 3181 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3182 * the device is detected for the first time and is supposed to be used 3183 * or the user has started activation through sysfs. 3184 */ 3185 int dasd_generic_set_online(struct ccw_device *cdev, 3186 struct dasd_discipline *base_discipline) 3187 { 3188 struct dasd_discipline *discipline; 3189 struct dasd_device *device; 3190 int rc; 3191 3192 /* first online clears initial online feature flag */ 3193 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3194 device = dasd_create_device(cdev); 3195 if (IS_ERR(device)) 3196 return PTR_ERR(device); 3197 3198 discipline = base_discipline; 3199 if (device->features & DASD_FEATURE_USEDIAG) { 3200 if (!dasd_diag_discipline_pointer) { 3201 pr_warning("%s Setting the DASD online failed because " 3202 "of missing DIAG discipline\n", 3203 dev_name(&cdev->dev)); 3204 dasd_delete_device(device); 3205 return -ENODEV; 3206 } 3207 discipline = dasd_diag_discipline_pointer; 3208 } 3209 if (!try_module_get(base_discipline->owner)) { 3210 dasd_delete_device(device); 3211 return -EINVAL; 3212 } 3213 if (!try_module_get(discipline->owner)) { 3214 module_put(base_discipline->owner); 3215 dasd_delete_device(device); 3216 return -EINVAL; 3217 } 3218 device->base_discipline = base_discipline; 3219 device->discipline = discipline; 3220 3221 /* check_device will allocate block device if necessary */ 3222 rc = discipline->check_device(device); 3223 if (rc) { 3224 pr_warning("%s Setting the DASD online with discipline %s " 3225 "failed with rc=%i\n", 3226 dev_name(&cdev->dev), discipline->name, rc); 3227 module_put(discipline->owner); 3228 module_put(base_discipline->owner); 3229 dasd_delete_device(device); 3230 return rc; 3231 } 3232 3233 dasd_set_target_state(device, DASD_STATE_ONLINE); 3234 if (device->state <= DASD_STATE_KNOWN) { 3235 pr_warning("%s Setting the DASD online failed because of a " 3236 "missing discipline\n", dev_name(&cdev->dev)); 3237 rc = -ENODEV; 3238 dasd_set_target_state(device, DASD_STATE_NEW); 3239 if (device->block) 3240 dasd_free_block(device->block); 3241 dasd_delete_device(device); 3242 } else 3243 pr_debug("dasd_generic device %s found\n", 3244 dev_name(&cdev->dev)); 3245 3246 wait_event(dasd_init_waitq, _wait_for_device(device)); 3247 3248 dasd_put_device(device); 3249 return rc; 3250 } 3251 3252 int dasd_generic_set_offline(struct ccw_device *cdev) 3253 { 3254 struct dasd_device *device; 3255 struct dasd_block *block; 3256 int max_count, open_count, rc; 3257 3258 rc = 0; 3259 device = dasd_device_from_cdev(cdev); 3260 if (IS_ERR(device)) 3261 return PTR_ERR(device); 3262 3263 /* 3264 * We must make sure that this device is currently not in use. 3265 * The open_count is increased for every opener, that includes 3266 * the blkdev_get in dasd_scan_partitions. We are only interested 3267 * in the other openers. 3268 */ 3269 if (device->block) { 3270 max_count = device->block->bdev ? 0 : -1; 3271 open_count = atomic_read(&device->block->open_count); 3272 if (open_count > max_count) { 3273 if (open_count > 0) 3274 pr_warning("%s: The DASD cannot be set offline " 3275 "with open count %i\n", 3276 dev_name(&cdev->dev), open_count); 3277 else 3278 pr_warning("%s: The DASD cannot be set offline " 3279 "while it is in use\n", 3280 dev_name(&cdev->dev)); 3281 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3282 dasd_put_device(device); 3283 return -EBUSY; 3284 } 3285 } 3286 3287 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3288 /* 3289 * safe offline allready running 3290 * could only be called by normal offline so safe_offline flag 3291 * needs to be removed to run normal offline and kill all I/O 3292 */ 3293 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3294 /* Already doing normal offline processing */ 3295 dasd_put_device(device); 3296 return -EBUSY; 3297 } else 3298 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3299 3300 } else 3301 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3302 /* Already doing offline processing */ 3303 dasd_put_device(device); 3304 return -EBUSY; 3305 } 3306 3307 /* 3308 * if safe_offline called set safe_offline_running flag and 3309 * clear safe_offline so that a call to normal offline 3310 * can overrun safe_offline processing 3311 */ 3312 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3313 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3314 /* 3315 * If we want to set the device safe offline all IO operations 3316 * should be finished before continuing the offline process 3317 * so sync bdev first and then wait for our queues to become 3318 * empty 3319 */ 3320 /* sync blockdev and partitions */ 3321 rc = fsync_bdev(device->block->bdev); 3322 if (rc != 0) 3323 goto interrupted; 3324 3325 /* schedule device tasklet and wait for completion */ 3326 dasd_schedule_device_bh(device); 3327 rc = wait_event_interruptible(shutdown_waitq, 3328 _wait_for_empty_queues(device)); 3329 if (rc != 0) 3330 goto interrupted; 3331 } 3332 3333 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3334 dasd_set_target_state(device, DASD_STATE_NEW); 3335 /* dasd_delete_device destroys the device reference. */ 3336 block = device->block; 3337 dasd_delete_device(device); 3338 /* 3339 * life cycle of block is bound to device, so delete it after 3340 * device was safely removed 3341 */ 3342 if (block) 3343 dasd_free_block(block); 3344 return 0; 3345 3346 interrupted: 3347 /* interrupted by signal */ 3348 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3349 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3350 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3351 dasd_put_device(device); 3352 return rc; 3353 } 3354 3355 int dasd_generic_last_path_gone(struct dasd_device *device) 3356 { 3357 struct dasd_ccw_req *cqr; 3358 3359 dev_warn(&device->cdev->dev, "No operational channel path is left " 3360 "for the device\n"); 3361 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3362 /* First of all call extended error reporting. */ 3363 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3364 3365 if (device->state < DASD_STATE_BASIC) 3366 return 0; 3367 /* Device is active. We want to keep it. */ 3368 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3369 if ((cqr->status == DASD_CQR_IN_IO) || 3370 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3371 cqr->status = DASD_CQR_QUEUED; 3372 cqr->retries++; 3373 } 3374 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3375 dasd_device_clear_timer(device); 3376 dasd_schedule_device_bh(device); 3377 return 1; 3378 } 3379 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3380 3381 int dasd_generic_path_operational(struct dasd_device *device) 3382 { 3383 dev_info(&device->cdev->dev, "A channel path to the device has become " 3384 "operational\n"); 3385 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3386 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3387 if (device->stopped & DASD_UNRESUMED_PM) { 3388 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 3389 dasd_restore_device(device); 3390 return 1; 3391 } 3392 dasd_schedule_device_bh(device); 3393 if (device->block) 3394 dasd_schedule_block_bh(device->block); 3395 return 1; 3396 } 3397 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3398 3399 int dasd_generic_notify(struct ccw_device *cdev, int event) 3400 { 3401 struct dasd_device *device; 3402 int ret; 3403 3404 device = dasd_device_from_cdev_locked(cdev); 3405 if (IS_ERR(device)) 3406 return 0; 3407 ret = 0; 3408 switch (event) { 3409 case CIO_GONE: 3410 case CIO_BOXED: 3411 case CIO_NO_PATH: 3412 device->path_data.opm = 0; 3413 device->path_data.ppm = 0; 3414 device->path_data.npm = 0; 3415 ret = dasd_generic_last_path_gone(device); 3416 break; 3417 case CIO_OPER: 3418 ret = 1; 3419 if (device->path_data.opm) 3420 ret = dasd_generic_path_operational(device); 3421 break; 3422 } 3423 dasd_put_device(device); 3424 return ret; 3425 } 3426 3427 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3428 { 3429 int chp; 3430 __u8 oldopm, eventlpm; 3431 struct dasd_device *device; 3432 3433 device = dasd_device_from_cdev_locked(cdev); 3434 if (IS_ERR(device)) 3435 return; 3436 for (chp = 0; chp < 8; chp++) { 3437 eventlpm = 0x80 >> chp; 3438 if (path_event[chp] & PE_PATH_GONE) { 3439 oldopm = device->path_data.opm; 3440 device->path_data.opm &= ~eventlpm; 3441 device->path_data.ppm &= ~eventlpm; 3442 device->path_data.npm &= ~eventlpm; 3443 if (oldopm && !device->path_data.opm) { 3444 dev_warn(&device->cdev->dev, 3445 "No verified channel paths remain " 3446 "for the device\n"); 3447 DBF_DEV_EVENT(DBF_WARNING, device, 3448 "%s", "last verified path gone"); 3449 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3450 dasd_device_set_stop_bits(device, 3451 DASD_STOPPED_DC_WAIT); 3452 } 3453 } 3454 if (path_event[chp] & PE_PATH_AVAILABLE) { 3455 device->path_data.opm &= ~eventlpm; 3456 device->path_data.ppm &= ~eventlpm; 3457 device->path_data.npm &= ~eventlpm; 3458 device->path_data.tbvpm |= eventlpm; 3459 dasd_schedule_device_bh(device); 3460 } 3461 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3462 if (!(device->path_data.opm & eventlpm) && 3463 !(device->path_data.tbvpm & eventlpm)) { 3464 /* 3465 * we can not establish a pathgroup on an 3466 * unavailable path, so trigger a path 3467 * verification first 3468 */ 3469 device->path_data.tbvpm |= eventlpm; 3470 dasd_schedule_device_bh(device); 3471 } 3472 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3473 "Pathgroup re-established\n"); 3474 if (device->discipline->kick_validate) 3475 device->discipline->kick_validate(device); 3476 } 3477 } 3478 dasd_put_device(device); 3479 } 3480 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3481 3482 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3483 { 3484 if (!device->path_data.opm && lpm) { 3485 device->path_data.opm = lpm; 3486 dasd_generic_path_operational(device); 3487 } else 3488 device->path_data.opm |= lpm; 3489 return 0; 3490 } 3491 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3492 3493 3494 int dasd_generic_pm_freeze(struct ccw_device *cdev) 3495 { 3496 struct dasd_device *device = dasd_device_from_cdev(cdev); 3497 struct list_head freeze_queue; 3498 struct dasd_ccw_req *cqr, *n; 3499 struct dasd_ccw_req *refers; 3500 int rc; 3501 3502 if (IS_ERR(device)) 3503 return PTR_ERR(device); 3504 3505 /* mark device as suspended */ 3506 set_bit(DASD_FLAG_SUSPENDED, &device->flags); 3507 3508 if (device->discipline->freeze) 3509 rc = device->discipline->freeze(device); 3510 3511 /* disallow new I/O */ 3512 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 3513 3514 /* clear active requests and requeue them to block layer if possible */ 3515 INIT_LIST_HEAD(&freeze_queue); 3516 spin_lock_irq(get_ccwdev_lock(cdev)); 3517 rc = 0; 3518 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3519 /* Check status and move request to flush_queue */ 3520 if (cqr->status == DASD_CQR_IN_IO) { 3521 rc = device->discipline->term_IO(cqr); 3522 if (rc) { 3523 /* unable to terminate requeust */ 3524 dev_err(&device->cdev->dev, 3525 "Unable to terminate request %p " 3526 "on suspend\n", cqr); 3527 spin_unlock_irq(get_ccwdev_lock(cdev)); 3528 dasd_put_device(device); 3529 return rc; 3530 } 3531 } 3532 list_move_tail(&cqr->devlist, &freeze_queue); 3533 } 3534 spin_unlock_irq(get_ccwdev_lock(cdev)); 3535 3536 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 3537 wait_event(dasd_flush_wq, 3538 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3539 if (cqr->status == DASD_CQR_CLEARED) 3540 cqr->status = DASD_CQR_QUEUED; 3541 3542 /* requeue requests to blocklayer will only work for 3543 block device requests */ 3544 if (_dasd_requeue_request(cqr)) 3545 continue; 3546 3547 /* remove requests from device and block queue */ 3548 list_del_init(&cqr->devlist); 3549 while (cqr->refers != NULL) { 3550 refers = cqr->refers; 3551 /* remove the request from the block queue */ 3552 list_del(&cqr->blocklist); 3553 /* free the finished erp request */ 3554 dasd_free_erp_request(cqr, cqr->memdev); 3555 cqr = refers; 3556 } 3557 if (cqr->block) 3558 list_del_init(&cqr->blocklist); 3559 cqr->block->base->discipline->free_cp( 3560 cqr, (struct request *) cqr->callback_data); 3561 } 3562 3563 /* 3564 * if requests remain then they are internal request 3565 * and go back to the device queue 3566 */ 3567 if (!list_empty(&freeze_queue)) { 3568 /* move freeze_queue to start of the ccw_queue */ 3569 spin_lock_irq(get_ccwdev_lock(cdev)); 3570 list_splice_tail(&freeze_queue, &device->ccw_queue); 3571 spin_unlock_irq(get_ccwdev_lock(cdev)); 3572 } 3573 dasd_put_device(device); 3574 return rc; 3575 } 3576 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 3577 3578 int dasd_generic_restore_device(struct ccw_device *cdev) 3579 { 3580 struct dasd_device *device = dasd_device_from_cdev(cdev); 3581 int rc = 0; 3582 3583 if (IS_ERR(device)) 3584 return PTR_ERR(device); 3585 3586 /* allow new IO again */ 3587 dasd_device_remove_stop_bits(device, 3588 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 3589 3590 dasd_schedule_device_bh(device); 3591 3592 /* 3593 * call discipline restore function 3594 * if device is stopped do nothing e.g. for disconnected devices 3595 */ 3596 if (device->discipline->restore && !(device->stopped)) 3597 rc = device->discipline->restore(device); 3598 if (rc || device->stopped) 3599 /* 3600 * if the resume failed for the DASD we put it in 3601 * an UNRESUMED stop state 3602 */ 3603 device->stopped |= DASD_UNRESUMED_PM; 3604 3605 if (device->block) 3606 dasd_schedule_block_bh(device->block); 3607 3608 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 3609 dasd_put_device(device); 3610 return 0; 3611 } 3612 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 3613 3614 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 3615 void *rdc_buffer, 3616 int rdc_buffer_size, 3617 int magic) 3618 { 3619 struct dasd_ccw_req *cqr; 3620 struct ccw1 *ccw; 3621 unsigned long *idaw; 3622 3623 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 3624 3625 if (IS_ERR(cqr)) { 3626 /* internal error 13 - Allocating the RDC request failed*/ 3627 dev_err(&device->cdev->dev, 3628 "An error occurred in the DASD device driver, " 3629 "reason=%s\n", "13"); 3630 return cqr; 3631 } 3632 3633 ccw = cqr->cpaddr; 3634 ccw->cmd_code = CCW_CMD_RDC; 3635 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { 3636 idaw = (unsigned long *) (cqr->data); 3637 ccw->cda = (__u32)(addr_t) idaw; 3638 ccw->flags = CCW_FLAG_IDA; 3639 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); 3640 } else { 3641 ccw->cda = (__u32)(addr_t) rdc_buffer; 3642 ccw->flags = 0; 3643 } 3644 3645 ccw->count = rdc_buffer_size; 3646 cqr->startdev = device; 3647 cqr->memdev = device; 3648 cqr->expires = 10*HZ; 3649 cqr->retries = 256; 3650 cqr->buildclk = get_tod_clock(); 3651 cqr->status = DASD_CQR_FILLED; 3652 return cqr; 3653 } 3654 3655 3656 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 3657 void *rdc_buffer, int rdc_buffer_size) 3658 { 3659 int ret; 3660 struct dasd_ccw_req *cqr; 3661 3662 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 3663 magic); 3664 if (IS_ERR(cqr)) 3665 return PTR_ERR(cqr); 3666 3667 ret = dasd_sleep_on(cqr); 3668 dasd_sfree_request(cqr, cqr->memdev); 3669 return ret; 3670 } 3671 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 3672 3673 /* 3674 * In command mode and transport mode we need to look for sense 3675 * data in different places. The sense data itself is allways 3676 * an array of 32 bytes, so we can unify the sense data access 3677 * for both modes. 3678 */ 3679 char *dasd_get_sense(struct irb *irb) 3680 { 3681 struct tsb *tsb = NULL; 3682 char *sense = NULL; 3683 3684 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 3685 if (irb->scsw.tm.tcw) 3686 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 3687 irb->scsw.tm.tcw); 3688 if (tsb && tsb->length == 64 && tsb->flags) 3689 switch (tsb->flags & 0x07) { 3690 case 1: /* tsa_iostat */ 3691 sense = tsb->tsa.iostat.sense; 3692 break; 3693 case 2: /* tsa_ddpc */ 3694 sense = tsb->tsa.ddpc.sense; 3695 break; 3696 default: 3697 /* currently we don't use interrogate data */ 3698 break; 3699 } 3700 } else if (irb->esw.esw0.erw.cons) { 3701 sense = irb->ecw; 3702 } 3703 return sense; 3704 } 3705 EXPORT_SYMBOL_GPL(dasd_get_sense); 3706 3707 void dasd_generic_shutdown(struct ccw_device *cdev) 3708 { 3709 struct dasd_device *device; 3710 3711 device = dasd_device_from_cdev(cdev); 3712 if (IS_ERR(device)) 3713 return; 3714 3715 if (device->block) 3716 dasd_schedule_block_bh(device->block); 3717 3718 dasd_schedule_device_bh(device); 3719 3720 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 3721 } 3722 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 3723 3724 static int __init dasd_init(void) 3725 { 3726 int rc; 3727 3728 init_waitqueue_head(&dasd_init_waitq); 3729 init_waitqueue_head(&dasd_flush_wq); 3730 init_waitqueue_head(&generic_waitq); 3731 init_waitqueue_head(&shutdown_waitq); 3732 3733 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 3734 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 3735 if (dasd_debug_area == NULL) { 3736 rc = -ENOMEM; 3737 goto failed; 3738 } 3739 debug_register_view(dasd_debug_area, &debug_sprintf_view); 3740 debug_set_level(dasd_debug_area, DBF_WARNING); 3741 3742 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 3743 3744 dasd_diag_discipline_pointer = NULL; 3745 3746 dasd_statistics_createroot(); 3747 3748 rc = dasd_devmap_init(); 3749 if (rc) 3750 goto failed; 3751 rc = dasd_gendisk_init(); 3752 if (rc) 3753 goto failed; 3754 rc = dasd_parse(); 3755 if (rc) 3756 goto failed; 3757 rc = dasd_eer_init(); 3758 if (rc) 3759 goto failed; 3760 #ifdef CONFIG_PROC_FS 3761 rc = dasd_proc_init(); 3762 if (rc) 3763 goto failed; 3764 #endif 3765 3766 return 0; 3767 failed: 3768 pr_info("The DASD device driver could not be initialized\n"); 3769 dasd_exit(); 3770 return rc; 3771 } 3772 3773 module_init(dasd_init); 3774 module_exit(dasd_exit); 3775 3776 EXPORT_SYMBOL(dasd_debug_area); 3777 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 3778 3779 EXPORT_SYMBOL(dasd_add_request_head); 3780 EXPORT_SYMBOL(dasd_add_request_tail); 3781 EXPORT_SYMBOL(dasd_cancel_req); 3782 EXPORT_SYMBOL(dasd_device_clear_timer); 3783 EXPORT_SYMBOL(dasd_block_clear_timer); 3784 EXPORT_SYMBOL(dasd_enable_device); 3785 EXPORT_SYMBOL(dasd_int_handler); 3786 EXPORT_SYMBOL(dasd_kfree_request); 3787 EXPORT_SYMBOL(dasd_kick_device); 3788 EXPORT_SYMBOL(dasd_kmalloc_request); 3789 EXPORT_SYMBOL(dasd_schedule_device_bh); 3790 EXPORT_SYMBOL(dasd_schedule_block_bh); 3791 EXPORT_SYMBOL(dasd_set_target_state); 3792 EXPORT_SYMBOL(dasd_device_set_timer); 3793 EXPORT_SYMBOL(dasd_block_set_timer); 3794 EXPORT_SYMBOL(dasd_sfree_request); 3795 EXPORT_SYMBOL(dasd_sleep_on); 3796 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 3797 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 3798 EXPORT_SYMBOL(dasd_smalloc_request); 3799 EXPORT_SYMBOL(dasd_start_IO); 3800 EXPORT_SYMBOL(dasd_term_IO); 3801 3802 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3803 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3804 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3805 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3806 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3807 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 3808 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 3809 EXPORT_SYMBOL_GPL(dasd_alloc_block); 3810 EXPORT_SYMBOL_GPL(dasd_free_block); 3811