1 /* 2 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 3 * Horst Hummel <Horst.Hummel@de.ibm.com> 4 * Carsten Otte <Cotte@de.ibm.com> 5 * Martin Schwidefsky <schwidefsky@de.ibm.com> 6 * Bugreports.to..: <Linux390@de.ibm.com> 7 * Copyright IBM Corp. 1999, 2009 8 */ 9 10 #define KMSG_COMPONENT "dasd" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/kmod.h> 14 #include <linux/init.h> 15 #include <linux/interrupt.h> 16 #include <linux/ctype.h> 17 #include <linux/major.h> 18 #include <linux/slab.h> 19 #include <linux/hdreg.h> 20 #include <linux/async.h> 21 #include <linux/mutex.h> 22 #include <linux/debugfs.h> 23 #include <linux/seq_file.h> 24 #include <linux/vmalloc.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> 28 #include <asm/idals.h> 29 #include <asm/itcw.h> 30 #include <asm/diag.h> 31 32 /* This is ugly... */ 33 #define PRINTK_HEADER "dasd:" 34 35 #include "dasd_int.h" 36 /* 37 * SECTION: Constant definitions to be used within this file 38 */ 39 #define DASD_CHANQ_MAX_SIZE 4 40 41 /* 42 * SECTION: exported variables of dasd.c 43 */ 44 debug_info_t *dasd_debug_area; 45 EXPORT_SYMBOL(dasd_debug_area); 46 static struct dentry *dasd_debugfs_root_entry; 47 struct dasd_discipline *dasd_diag_discipline_pointer; 48 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 49 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 50 51 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 52 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 53 " Copyright IBM Corp. 2000"); 54 MODULE_SUPPORTED_DEVICE("dasd"); 55 MODULE_LICENSE("GPL"); 56 57 /* 58 * SECTION: prototypes for static functions of dasd.c 59 */ 60 static int dasd_alloc_queue(struct dasd_block *); 61 static void dasd_setup_queue(struct dasd_block *); 62 static void dasd_free_queue(struct dasd_block *); 63 static void dasd_flush_request_queue(struct dasd_block *); 64 static int dasd_flush_block_queue(struct dasd_block *); 65 static void dasd_device_tasklet(struct dasd_device *); 66 static void dasd_block_tasklet(struct dasd_block *); 67 static void do_kick_device(struct work_struct *); 68 static void do_restore_device(struct work_struct *); 69 static void do_reload_device(struct work_struct *); 70 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 71 static void dasd_device_timeout(unsigned long); 72 static void dasd_block_timeout(unsigned long); 73 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 74 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 75 static void dasd_profile_exit(struct dasd_profile *); 76 77 /* 78 * SECTION: Operations on the device structure. 79 */ 80 static wait_queue_head_t dasd_init_waitq; 81 static wait_queue_head_t dasd_flush_wq; 82 static wait_queue_head_t generic_waitq; 83 static wait_queue_head_t shutdown_waitq; 84 85 /* 86 * Allocate memory for a new device structure. 87 */ 88 struct dasd_device *dasd_alloc_device(void) 89 { 90 struct dasd_device *device; 91 92 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 93 if (!device) 94 return ERR_PTR(-ENOMEM); 95 96 /* Get two pages for normal block device operations. */ 97 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 98 if (!device->ccw_mem) { 99 kfree(device); 100 return ERR_PTR(-ENOMEM); 101 } 102 /* Get one page for error recovery. */ 103 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 104 if (!device->erp_mem) { 105 free_pages((unsigned long) device->ccw_mem, 1); 106 kfree(device); 107 return ERR_PTR(-ENOMEM); 108 } 109 110 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 111 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 112 spin_lock_init(&device->mem_lock); 113 atomic_set(&device->tasklet_scheduled, 0); 114 tasklet_init(&device->tasklet, 115 (void (*)(unsigned long)) dasd_device_tasklet, 116 (unsigned long) device); 117 INIT_LIST_HEAD(&device->ccw_queue); 118 init_timer(&device->timer); 119 device->timer.function = dasd_device_timeout; 120 device->timer.data = (unsigned long) device; 121 INIT_WORK(&device->kick_work, do_kick_device); 122 INIT_WORK(&device->restore_device, do_restore_device); 123 INIT_WORK(&device->reload_device, do_reload_device); 124 device->state = DASD_STATE_NEW; 125 device->target = DASD_STATE_NEW; 126 mutex_init(&device->state_mutex); 127 spin_lock_init(&device->profile.lock); 128 return device; 129 } 130 131 /* 132 * Free memory of a device structure. 133 */ 134 void dasd_free_device(struct dasd_device *device) 135 { 136 kfree(device->private); 137 free_page((unsigned long) device->erp_mem); 138 free_pages((unsigned long) device->ccw_mem, 1); 139 kfree(device); 140 } 141 142 /* 143 * Allocate memory for a new device structure. 144 */ 145 struct dasd_block *dasd_alloc_block(void) 146 { 147 struct dasd_block *block; 148 149 block = kzalloc(sizeof(*block), GFP_ATOMIC); 150 if (!block) 151 return ERR_PTR(-ENOMEM); 152 /* open_count = 0 means device online but not in use */ 153 atomic_set(&block->open_count, -1); 154 155 spin_lock_init(&block->request_queue_lock); 156 atomic_set(&block->tasklet_scheduled, 0); 157 tasklet_init(&block->tasklet, 158 (void (*)(unsigned long)) dasd_block_tasklet, 159 (unsigned long) block); 160 INIT_LIST_HEAD(&block->ccw_queue); 161 spin_lock_init(&block->queue_lock); 162 init_timer(&block->timer); 163 block->timer.function = dasd_block_timeout; 164 block->timer.data = (unsigned long) block; 165 spin_lock_init(&block->profile.lock); 166 167 return block; 168 } 169 EXPORT_SYMBOL_GPL(dasd_alloc_block); 170 171 /* 172 * Free memory of a device structure. 173 */ 174 void dasd_free_block(struct dasd_block *block) 175 { 176 kfree(block); 177 } 178 EXPORT_SYMBOL_GPL(dasd_free_block); 179 180 /* 181 * Make a new device known to the system. 182 */ 183 static int dasd_state_new_to_known(struct dasd_device *device) 184 { 185 int rc; 186 187 /* 188 * As long as the device is not in state DASD_STATE_NEW we want to 189 * keep the reference count > 0. 190 */ 191 dasd_get_device(device); 192 193 if (device->block) { 194 rc = dasd_alloc_queue(device->block); 195 if (rc) { 196 dasd_put_device(device); 197 return rc; 198 } 199 } 200 device->state = DASD_STATE_KNOWN; 201 return 0; 202 } 203 204 /* 205 * Let the system forget about a device. 206 */ 207 static int dasd_state_known_to_new(struct dasd_device *device) 208 { 209 /* Disable extended error reporting for this device. */ 210 dasd_eer_disable(device); 211 /* Forget the discipline information. */ 212 if (device->discipline) { 213 if (device->discipline->uncheck_device) 214 device->discipline->uncheck_device(device); 215 module_put(device->discipline->owner); 216 } 217 device->discipline = NULL; 218 if (device->base_discipline) 219 module_put(device->base_discipline->owner); 220 device->base_discipline = NULL; 221 device->state = DASD_STATE_NEW; 222 223 if (device->block) 224 dasd_free_queue(device->block); 225 226 /* Give up reference we took in dasd_state_new_to_known. */ 227 dasd_put_device(device); 228 return 0; 229 } 230 231 static struct dentry *dasd_debugfs_setup(const char *name, 232 struct dentry *base_dentry) 233 { 234 struct dentry *pde; 235 236 if (!base_dentry) 237 return NULL; 238 pde = debugfs_create_dir(name, base_dentry); 239 if (!pde || IS_ERR(pde)) 240 return NULL; 241 return pde; 242 } 243 244 /* 245 * Request the irq line for the device. 246 */ 247 static int dasd_state_known_to_basic(struct dasd_device *device) 248 { 249 struct dasd_block *block = device->block; 250 int rc = 0; 251 252 /* Allocate and register gendisk structure. */ 253 if (block) { 254 rc = dasd_gendisk_alloc(block); 255 if (rc) 256 return rc; 257 block->debugfs_dentry = 258 dasd_debugfs_setup(block->gdp->disk_name, 259 dasd_debugfs_root_entry); 260 dasd_profile_init(&block->profile, block->debugfs_dentry); 261 if (dasd_global_profile_level == DASD_PROFILE_ON) 262 dasd_profile_on(&device->block->profile); 263 } 264 device->debugfs_dentry = 265 dasd_debugfs_setup(dev_name(&device->cdev->dev), 266 dasd_debugfs_root_entry); 267 dasd_profile_init(&device->profile, device->debugfs_dentry); 268 269 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 270 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 271 8 * sizeof(long)); 272 debug_register_view(device->debug_area, &debug_sprintf_view); 273 debug_set_level(device->debug_area, DBF_WARNING); 274 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 275 276 device->state = DASD_STATE_BASIC; 277 278 return rc; 279 } 280 281 /* 282 * Release the irq line for the device. Terminate any running i/o. 283 */ 284 static int dasd_state_basic_to_known(struct dasd_device *device) 285 { 286 int rc; 287 288 if (device->discipline->basic_to_known) { 289 rc = device->discipline->basic_to_known(device); 290 if (rc) 291 return rc; 292 } 293 294 if (device->block) { 295 dasd_profile_exit(&device->block->profile); 296 debugfs_remove(device->block->debugfs_dentry); 297 dasd_gendisk_free(device->block); 298 dasd_block_clear_timer(device->block); 299 } 300 rc = dasd_flush_device_queue(device); 301 if (rc) 302 return rc; 303 dasd_device_clear_timer(device); 304 dasd_profile_exit(&device->profile); 305 debugfs_remove(device->debugfs_dentry); 306 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 307 if (device->debug_area != NULL) { 308 debug_unregister(device->debug_area); 309 device->debug_area = NULL; 310 } 311 device->state = DASD_STATE_KNOWN; 312 return 0; 313 } 314 315 /* 316 * Do the initial analysis. The do_analysis function may return 317 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 318 * until the discipline decides to continue the startup sequence 319 * by calling the function dasd_change_state. The eckd disciplines 320 * uses this to start a ccw that detects the format. The completion 321 * interrupt for this detection ccw uses the kernel event daemon to 322 * trigger the call to dasd_change_state. All this is done in the 323 * discipline code, see dasd_eckd.c. 324 * After the analysis ccw is done (do_analysis returned 0) the block 325 * device is setup. 326 * In case the analysis returns an error, the device setup is stopped 327 * (a fake disk was already added to allow formatting). 328 */ 329 static int dasd_state_basic_to_ready(struct dasd_device *device) 330 { 331 int rc; 332 struct dasd_block *block; 333 334 rc = 0; 335 block = device->block; 336 /* make disk known with correct capacity */ 337 if (block) { 338 if (block->base->discipline->do_analysis != NULL) 339 rc = block->base->discipline->do_analysis(block); 340 if (rc) { 341 if (rc != -EAGAIN) { 342 device->state = DASD_STATE_UNFMT; 343 goto out; 344 } 345 return rc; 346 } 347 dasd_setup_queue(block); 348 set_capacity(block->gdp, 349 block->blocks << block->s2b_shift); 350 device->state = DASD_STATE_READY; 351 rc = dasd_scan_partitions(block); 352 if (rc) { 353 device->state = DASD_STATE_BASIC; 354 return rc; 355 } 356 } else { 357 device->state = DASD_STATE_READY; 358 } 359 out: 360 if (device->discipline->basic_to_ready) 361 rc = device->discipline->basic_to_ready(device); 362 return rc; 363 } 364 365 static inline 366 int _wait_for_empty_queues(struct dasd_device *device) 367 { 368 if (device->block) 369 return list_empty(&device->ccw_queue) && 370 list_empty(&device->block->ccw_queue); 371 else 372 return list_empty(&device->ccw_queue); 373 } 374 375 /* 376 * Remove device from block device layer. Destroy dirty buffers. 377 * Forget format information. Check if the target level is basic 378 * and if it is create fake disk for formatting. 379 */ 380 static int dasd_state_ready_to_basic(struct dasd_device *device) 381 { 382 int rc; 383 384 device->state = DASD_STATE_BASIC; 385 if (device->block) { 386 struct dasd_block *block = device->block; 387 rc = dasd_flush_block_queue(block); 388 if (rc) { 389 device->state = DASD_STATE_READY; 390 return rc; 391 } 392 dasd_flush_request_queue(block); 393 dasd_destroy_partitions(block); 394 block->blocks = 0; 395 block->bp_block = 0; 396 block->s2b_shift = 0; 397 } 398 return 0; 399 } 400 401 /* 402 * Back to basic. 403 */ 404 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 405 { 406 device->state = DASD_STATE_BASIC; 407 return 0; 408 } 409 410 /* 411 * Make the device online and schedule the bottom half to start 412 * the requeueing of requests from the linux request queue to the 413 * ccw queue. 414 */ 415 static int 416 dasd_state_ready_to_online(struct dasd_device * device) 417 { 418 struct gendisk *disk; 419 struct disk_part_iter piter; 420 struct hd_struct *part; 421 422 device->state = DASD_STATE_ONLINE; 423 if (device->block) { 424 dasd_schedule_block_bh(device->block); 425 if ((device->features & DASD_FEATURE_USERAW)) { 426 disk = device->block->gdp; 427 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 428 return 0; 429 } 430 disk = device->block->bdev->bd_disk; 431 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 432 while ((part = disk_part_iter_next(&piter))) 433 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 434 disk_part_iter_exit(&piter); 435 } 436 return 0; 437 } 438 439 /* 440 * Stop the requeueing of requests again. 441 */ 442 static int dasd_state_online_to_ready(struct dasd_device *device) 443 { 444 int rc; 445 struct gendisk *disk; 446 struct disk_part_iter piter; 447 struct hd_struct *part; 448 449 if (device->discipline->online_to_ready) { 450 rc = device->discipline->online_to_ready(device); 451 if (rc) 452 return rc; 453 } 454 455 device->state = DASD_STATE_READY; 456 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 457 disk = device->block->bdev->bd_disk; 458 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 459 while ((part = disk_part_iter_next(&piter))) 460 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 461 disk_part_iter_exit(&piter); 462 } 463 return 0; 464 } 465 466 /* 467 * Device startup state changes. 468 */ 469 static int dasd_increase_state(struct dasd_device *device) 470 { 471 int rc; 472 473 rc = 0; 474 if (device->state == DASD_STATE_NEW && 475 device->target >= DASD_STATE_KNOWN) 476 rc = dasd_state_new_to_known(device); 477 478 if (!rc && 479 device->state == DASD_STATE_KNOWN && 480 device->target >= DASD_STATE_BASIC) 481 rc = dasd_state_known_to_basic(device); 482 483 if (!rc && 484 device->state == DASD_STATE_BASIC && 485 device->target >= DASD_STATE_READY) 486 rc = dasd_state_basic_to_ready(device); 487 488 if (!rc && 489 device->state == DASD_STATE_UNFMT && 490 device->target > DASD_STATE_UNFMT) 491 rc = -EPERM; 492 493 if (!rc && 494 device->state == DASD_STATE_READY && 495 device->target >= DASD_STATE_ONLINE) 496 rc = dasd_state_ready_to_online(device); 497 498 return rc; 499 } 500 501 /* 502 * Device shutdown state changes. 503 */ 504 static int dasd_decrease_state(struct dasd_device *device) 505 { 506 int rc; 507 508 rc = 0; 509 if (device->state == DASD_STATE_ONLINE && 510 device->target <= DASD_STATE_READY) 511 rc = dasd_state_online_to_ready(device); 512 513 if (!rc && 514 device->state == DASD_STATE_READY && 515 device->target <= DASD_STATE_BASIC) 516 rc = dasd_state_ready_to_basic(device); 517 518 if (!rc && 519 device->state == DASD_STATE_UNFMT && 520 device->target <= DASD_STATE_BASIC) 521 rc = dasd_state_unfmt_to_basic(device); 522 523 if (!rc && 524 device->state == DASD_STATE_BASIC && 525 device->target <= DASD_STATE_KNOWN) 526 rc = dasd_state_basic_to_known(device); 527 528 if (!rc && 529 device->state == DASD_STATE_KNOWN && 530 device->target <= DASD_STATE_NEW) 531 rc = dasd_state_known_to_new(device); 532 533 return rc; 534 } 535 536 /* 537 * This is the main startup/shutdown routine. 538 */ 539 static void dasd_change_state(struct dasd_device *device) 540 { 541 int rc; 542 543 if (device->state == device->target) 544 /* Already where we want to go today... */ 545 return; 546 if (device->state < device->target) 547 rc = dasd_increase_state(device); 548 else 549 rc = dasd_decrease_state(device); 550 if (rc == -EAGAIN) 551 return; 552 if (rc) 553 device->target = device->state; 554 555 /* let user-space know that the device status changed */ 556 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 557 558 if (device->state == device->target) 559 wake_up(&dasd_init_waitq); 560 } 561 562 /* 563 * Kick starter for devices that did not complete the startup/shutdown 564 * procedure or were sleeping because of a pending state. 565 * dasd_kick_device will schedule a call do do_kick_device to the kernel 566 * event daemon. 567 */ 568 static void do_kick_device(struct work_struct *work) 569 { 570 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 571 mutex_lock(&device->state_mutex); 572 dasd_change_state(device); 573 mutex_unlock(&device->state_mutex); 574 dasd_schedule_device_bh(device); 575 dasd_put_device(device); 576 } 577 578 void dasd_kick_device(struct dasd_device *device) 579 { 580 dasd_get_device(device); 581 /* queue call to dasd_kick_device to the kernel event daemon. */ 582 if (!schedule_work(&device->kick_work)) 583 dasd_put_device(device); 584 } 585 EXPORT_SYMBOL(dasd_kick_device); 586 587 /* 588 * dasd_reload_device will schedule a call do do_reload_device to the kernel 589 * event daemon. 590 */ 591 static void do_reload_device(struct work_struct *work) 592 { 593 struct dasd_device *device = container_of(work, struct dasd_device, 594 reload_device); 595 device->discipline->reload(device); 596 dasd_put_device(device); 597 } 598 599 void dasd_reload_device(struct dasd_device *device) 600 { 601 dasd_get_device(device); 602 /* queue call to dasd_reload_device to the kernel event daemon. */ 603 if (!schedule_work(&device->reload_device)) 604 dasd_put_device(device); 605 } 606 EXPORT_SYMBOL(dasd_reload_device); 607 608 /* 609 * dasd_restore_device will schedule a call do do_restore_device to the kernel 610 * event daemon. 611 */ 612 static void do_restore_device(struct work_struct *work) 613 { 614 struct dasd_device *device = container_of(work, struct dasd_device, 615 restore_device); 616 device->cdev->drv->restore(device->cdev); 617 dasd_put_device(device); 618 } 619 620 void dasd_restore_device(struct dasd_device *device) 621 { 622 dasd_get_device(device); 623 /* queue call to dasd_restore_device to the kernel event daemon. */ 624 if (!schedule_work(&device->restore_device)) 625 dasd_put_device(device); 626 } 627 628 /* 629 * Set the target state for a device and starts the state change. 630 */ 631 void dasd_set_target_state(struct dasd_device *device, int target) 632 { 633 dasd_get_device(device); 634 mutex_lock(&device->state_mutex); 635 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 636 if (dasd_probeonly && target > DASD_STATE_READY) 637 target = DASD_STATE_READY; 638 if (device->target != target) { 639 if (device->state == target) 640 wake_up(&dasd_init_waitq); 641 device->target = target; 642 } 643 if (device->state != device->target) 644 dasd_change_state(device); 645 mutex_unlock(&device->state_mutex); 646 dasd_put_device(device); 647 } 648 EXPORT_SYMBOL(dasd_set_target_state); 649 650 /* 651 * Enable devices with device numbers in [from..to]. 652 */ 653 static inline int _wait_for_device(struct dasd_device *device) 654 { 655 return (device->state == device->target); 656 } 657 658 void dasd_enable_device(struct dasd_device *device) 659 { 660 dasd_set_target_state(device, DASD_STATE_ONLINE); 661 if (device->state <= DASD_STATE_KNOWN) 662 /* No discipline for device found. */ 663 dasd_set_target_state(device, DASD_STATE_NEW); 664 /* Now wait for the devices to come up. */ 665 wait_event(dasd_init_waitq, _wait_for_device(device)); 666 667 dasd_reload_device(device); 668 if (device->discipline->kick_validate) 669 device->discipline->kick_validate(device); 670 } 671 EXPORT_SYMBOL(dasd_enable_device); 672 673 /* 674 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 675 */ 676 677 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 678 679 #ifdef CONFIG_DASD_PROFILE 680 struct dasd_profile dasd_global_profile = { 681 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 682 }; 683 static struct dentry *dasd_debugfs_global_entry; 684 685 /* 686 * Add profiling information for cqr before execution. 687 */ 688 static void dasd_profile_start(struct dasd_block *block, 689 struct dasd_ccw_req *cqr, 690 struct request *req) 691 { 692 struct list_head *l; 693 unsigned int counter; 694 struct dasd_device *device; 695 696 /* count the length of the chanq for statistics */ 697 counter = 0; 698 if (dasd_global_profile_level || block->profile.data) 699 list_for_each(l, &block->ccw_queue) 700 if (++counter >= 31) 701 break; 702 703 spin_lock(&dasd_global_profile.lock); 704 if (dasd_global_profile.data) { 705 dasd_global_profile.data->dasd_io_nr_req[counter]++; 706 if (rq_data_dir(req) == READ) 707 dasd_global_profile.data->dasd_read_nr_req[counter]++; 708 } 709 spin_unlock(&dasd_global_profile.lock); 710 711 spin_lock(&block->profile.lock); 712 if (block->profile.data) { 713 block->profile.data->dasd_io_nr_req[counter]++; 714 if (rq_data_dir(req) == READ) 715 block->profile.data->dasd_read_nr_req[counter]++; 716 } 717 spin_unlock(&block->profile.lock); 718 719 /* 720 * We count the request for the start device, even though it may run on 721 * some other device due to error recovery. This way we make sure that 722 * we count each request only once. 723 */ 724 device = cqr->startdev; 725 if (device->profile.data) { 726 counter = 1; /* request is not yet queued on the start device */ 727 list_for_each(l, &device->ccw_queue) 728 if (++counter >= 31) 729 break; 730 } 731 spin_lock(&device->profile.lock); 732 if (device->profile.data) { 733 device->profile.data->dasd_io_nr_req[counter]++; 734 if (rq_data_dir(req) == READ) 735 device->profile.data->dasd_read_nr_req[counter]++; 736 } 737 spin_unlock(&device->profile.lock); 738 } 739 740 /* 741 * Add profiling information for cqr after execution. 742 */ 743 744 #define dasd_profile_counter(value, index) \ 745 { \ 746 for (index = 0; index < 31 && value >> (2+index); index++) \ 747 ; \ 748 } 749 750 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 751 int is_alias, 752 int is_tpm, 753 int is_read, 754 long sectors, 755 int sectors_ind, 756 int tottime_ind, 757 int tottimeps_ind, 758 int strtime_ind, 759 int irqtime_ind, 760 int irqtimeps_ind, 761 int endtime_ind) 762 { 763 /* in case of an overflow, reset the whole profile */ 764 if (data->dasd_io_reqs == UINT_MAX) { 765 memset(data, 0, sizeof(*data)); 766 getnstimeofday(&data->starttod); 767 } 768 data->dasd_io_reqs++; 769 data->dasd_io_sects += sectors; 770 if (is_alias) 771 data->dasd_io_alias++; 772 if (is_tpm) 773 data->dasd_io_tpm++; 774 775 data->dasd_io_secs[sectors_ind]++; 776 data->dasd_io_times[tottime_ind]++; 777 data->dasd_io_timps[tottimeps_ind]++; 778 data->dasd_io_time1[strtime_ind]++; 779 data->dasd_io_time2[irqtime_ind]++; 780 data->dasd_io_time2ps[irqtimeps_ind]++; 781 data->dasd_io_time3[endtime_ind]++; 782 783 if (is_read) { 784 data->dasd_read_reqs++; 785 data->dasd_read_sects += sectors; 786 if (is_alias) 787 data->dasd_read_alias++; 788 if (is_tpm) 789 data->dasd_read_tpm++; 790 data->dasd_read_secs[sectors_ind]++; 791 data->dasd_read_times[tottime_ind]++; 792 data->dasd_read_time1[strtime_ind]++; 793 data->dasd_read_time2[irqtime_ind]++; 794 data->dasd_read_time3[endtime_ind]++; 795 } 796 } 797 798 static void dasd_profile_end(struct dasd_block *block, 799 struct dasd_ccw_req *cqr, 800 struct request *req) 801 { 802 long strtime, irqtime, endtime, tottime; /* in microseconds */ 803 long tottimeps, sectors; 804 struct dasd_device *device; 805 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 806 int irqtime_ind, irqtimeps_ind, endtime_ind; 807 808 device = cqr->startdev; 809 if (!(dasd_global_profile_level || 810 block->profile.data || 811 device->profile.data)) 812 return; 813 814 sectors = blk_rq_sectors(req); 815 if (!cqr->buildclk || !cqr->startclk || 816 !cqr->stopclk || !cqr->endclk || 817 !sectors) 818 return; 819 820 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 821 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 822 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 823 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 824 tottimeps = tottime / sectors; 825 826 dasd_profile_counter(sectors, sectors_ind); 827 dasd_profile_counter(tottime, tottime_ind); 828 dasd_profile_counter(tottimeps, tottimeps_ind); 829 dasd_profile_counter(strtime, strtime_ind); 830 dasd_profile_counter(irqtime, irqtime_ind); 831 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 832 dasd_profile_counter(endtime, endtime_ind); 833 834 spin_lock(&dasd_global_profile.lock); 835 if (dasd_global_profile.data) { 836 dasd_profile_end_add_data(dasd_global_profile.data, 837 cqr->startdev != block->base, 838 cqr->cpmode == 1, 839 rq_data_dir(req) == READ, 840 sectors, sectors_ind, tottime_ind, 841 tottimeps_ind, strtime_ind, 842 irqtime_ind, irqtimeps_ind, 843 endtime_ind); 844 } 845 spin_unlock(&dasd_global_profile.lock); 846 847 spin_lock(&block->profile.lock); 848 if (block->profile.data) 849 dasd_profile_end_add_data(block->profile.data, 850 cqr->startdev != block->base, 851 cqr->cpmode == 1, 852 rq_data_dir(req) == READ, 853 sectors, sectors_ind, tottime_ind, 854 tottimeps_ind, strtime_ind, 855 irqtime_ind, irqtimeps_ind, 856 endtime_ind); 857 spin_unlock(&block->profile.lock); 858 859 spin_lock(&device->profile.lock); 860 if (device->profile.data) 861 dasd_profile_end_add_data(device->profile.data, 862 cqr->startdev != block->base, 863 cqr->cpmode == 1, 864 rq_data_dir(req) == READ, 865 sectors, sectors_ind, tottime_ind, 866 tottimeps_ind, strtime_ind, 867 irqtime_ind, irqtimeps_ind, 868 endtime_ind); 869 spin_unlock(&device->profile.lock); 870 } 871 872 void dasd_profile_reset(struct dasd_profile *profile) 873 { 874 struct dasd_profile_info *data; 875 876 spin_lock_bh(&profile->lock); 877 data = profile->data; 878 if (!data) { 879 spin_unlock_bh(&profile->lock); 880 return; 881 } 882 memset(data, 0, sizeof(*data)); 883 getnstimeofday(&data->starttod); 884 spin_unlock_bh(&profile->lock); 885 } 886 887 int dasd_profile_on(struct dasd_profile *profile) 888 { 889 struct dasd_profile_info *data; 890 891 data = kzalloc(sizeof(*data), GFP_KERNEL); 892 if (!data) 893 return -ENOMEM; 894 spin_lock_bh(&profile->lock); 895 if (profile->data) { 896 spin_unlock_bh(&profile->lock); 897 kfree(data); 898 return 0; 899 } 900 getnstimeofday(&data->starttod); 901 profile->data = data; 902 spin_unlock_bh(&profile->lock); 903 return 0; 904 } 905 906 void dasd_profile_off(struct dasd_profile *profile) 907 { 908 spin_lock_bh(&profile->lock); 909 kfree(profile->data); 910 profile->data = NULL; 911 spin_unlock_bh(&profile->lock); 912 } 913 914 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 915 { 916 char *buffer; 917 918 buffer = vmalloc(user_len + 1); 919 if (buffer == NULL) 920 return ERR_PTR(-ENOMEM); 921 if (copy_from_user(buffer, user_buf, user_len) != 0) { 922 vfree(buffer); 923 return ERR_PTR(-EFAULT); 924 } 925 /* got the string, now strip linefeed. */ 926 if (buffer[user_len - 1] == '\n') 927 buffer[user_len - 1] = 0; 928 else 929 buffer[user_len] = 0; 930 return buffer; 931 } 932 933 static ssize_t dasd_stats_write(struct file *file, 934 const char __user *user_buf, 935 size_t user_len, loff_t *pos) 936 { 937 char *buffer, *str; 938 int rc; 939 struct seq_file *m = (struct seq_file *)file->private_data; 940 struct dasd_profile *prof = m->private; 941 942 if (user_len > 65536) 943 user_len = 65536; 944 buffer = dasd_get_user_string(user_buf, user_len); 945 if (IS_ERR(buffer)) 946 return PTR_ERR(buffer); 947 948 str = skip_spaces(buffer); 949 rc = user_len; 950 if (strncmp(str, "reset", 5) == 0) { 951 dasd_profile_reset(prof); 952 } else if (strncmp(str, "on", 2) == 0) { 953 rc = dasd_profile_on(prof); 954 if (rc) 955 goto out; 956 rc = user_len; 957 if (prof == &dasd_global_profile) { 958 dasd_profile_reset(prof); 959 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 960 } 961 } else if (strncmp(str, "off", 3) == 0) { 962 if (prof == &dasd_global_profile) 963 dasd_global_profile_level = DASD_PROFILE_OFF; 964 dasd_profile_off(prof); 965 } else 966 rc = -EINVAL; 967 out: 968 vfree(buffer); 969 return rc; 970 } 971 972 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 973 { 974 int i; 975 976 for (i = 0; i < 32; i++) 977 seq_printf(m, "%u ", array[i]); 978 seq_putc(m, '\n'); 979 } 980 981 static void dasd_stats_seq_print(struct seq_file *m, 982 struct dasd_profile_info *data) 983 { 984 seq_printf(m, "start_time %ld.%09ld\n", 985 data->starttod.tv_sec, data->starttod.tv_nsec); 986 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 987 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 988 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 989 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 990 seq_puts(m, "histogram_sectors "); 991 dasd_stats_array(m, data->dasd_io_secs); 992 seq_puts(m, "histogram_io_times "); 993 dasd_stats_array(m, data->dasd_io_times); 994 seq_puts(m, "histogram_io_times_weighted "); 995 dasd_stats_array(m, data->dasd_io_timps); 996 seq_puts(m, "histogram_time_build_to_ssch "); 997 dasd_stats_array(m, data->dasd_io_time1); 998 seq_puts(m, "histogram_time_ssch_to_irq "); 999 dasd_stats_array(m, data->dasd_io_time2); 1000 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1001 dasd_stats_array(m, data->dasd_io_time2ps); 1002 seq_puts(m, "histogram_time_irq_to_end "); 1003 dasd_stats_array(m, data->dasd_io_time3); 1004 seq_puts(m, "histogram_ccw_queue_length "); 1005 dasd_stats_array(m, data->dasd_io_nr_req); 1006 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1007 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1008 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1009 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1010 seq_puts(m, "histogram_read_sectors "); 1011 dasd_stats_array(m, data->dasd_read_secs); 1012 seq_puts(m, "histogram_read_times "); 1013 dasd_stats_array(m, data->dasd_read_times); 1014 seq_puts(m, "histogram_read_time_build_to_ssch "); 1015 dasd_stats_array(m, data->dasd_read_time1); 1016 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1017 dasd_stats_array(m, data->dasd_read_time2); 1018 seq_puts(m, "histogram_read_time_irq_to_end "); 1019 dasd_stats_array(m, data->dasd_read_time3); 1020 seq_puts(m, "histogram_read_ccw_queue_length "); 1021 dasd_stats_array(m, data->dasd_read_nr_req); 1022 } 1023 1024 static int dasd_stats_show(struct seq_file *m, void *v) 1025 { 1026 struct dasd_profile *profile; 1027 struct dasd_profile_info *data; 1028 1029 profile = m->private; 1030 spin_lock_bh(&profile->lock); 1031 data = profile->data; 1032 if (!data) { 1033 spin_unlock_bh(&profile->lock); 1034 seq_puts(m, "disabled\n"); 1035 return 0; 1036 } 1037 dasd_stats_seq_print(m, data); 1038 spin_unlock_bh(&profile->lock); 1039 return 0; 1040 } 1041 1042 static int dasd_stats_open(struct inode *inode, struct file *file) 1043 { 1044 struct dasd_profile *profile = inode->i_private; 1045 return single_open(file, dasd_stats_show, profile); 1046 } 1047 1048 static const struct file_operations dasd_stats_raw_fops = { 1049 .owner = THIS_MODULE, 1050 .open = dasd_stats_open, 1051 .read = seq_read, 1052 .llseek = seq_lseek, 1053 .release = single_release, 1054 .write = dasd_stats_write, 1055 }; 1056 1057 static void dasd_profile_init(struct dasd_profile *profile, 1058 struct dentry *base_dentry) 1059 { 1060 umode_t mode; 1061 struct dentry *pde; 1062 1063 if (!base_dentry) 1064 return; 1065 profile->dentry = NULL; 1066 profile->data = NULL; 1067 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1068 pde = debugfs_create_file("statistics", mode, base_dentry, 1069 profile, &dasd_stats_raw_fops); 1070 if (pde && !IS_ERR(pde)) 1071 profile->dentry = pde; 1072 return; 1073 } 1074 1075 static void dasd_profile_exit(struct dasd_profile *profile) 1076 { 1077 dasd_profile_off(profile); 1078 debugfs_remove(profile->dentry); 1079 profile->dentry = NULL; 1080 } 1081 1082 static void dasd_statistics_removeroot(void) 1083 { 1084 dasd_global_profile_level = DASD_PROFILE_OFF; 1085 dasd_profile_exit(&dasd_global_profile); 1086 debugfs_remove(dasd_debugfs_global_entry); 1087 debugfs_remove(dasd_debugfs_root_entry); 1088 } 1089 1090 static void dasd_statistics_createroot(void) 1091 { 1092 struct dentry *pde; 1093 1094 dasd_debugfs_root_entry = NULL; 1095 pde = debugfs_create_dir("dasd", NULL); 1096 if (!pde || IS_ERR(pde)) 1097 goto error; 1098 dasd_debugfs_root_entry = pde; 1099 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1100 if (!pde || IS_ERR(pde)) 1101 goto error; 1102 dasd_debugfs_global_entry = pde; 1103 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1104 return; 1105 1106 error: 1107 DBF_EVENT(DBF_ERR, "%s", 1108 "Creation of the dasd debugfs interface failed"); 1109 dasd_statistics_removeroot(); 1110 return; 1111 } 1112 1113 #else 1114 #define dasd_profile_start(block, cqr, req) do {} while (0) 1115 #define dasd_profile_end(block, cqr, req) do {} while (0) 1116 1117 static void dasd_statistics_createroot(void) 1118 { 1119 return; 1120 } 1121 1122 static void dasd_statistics_removeroot(void) 1123 { 1124 return; 1125 } 1126 1127 int dasd_stats_generic_show(struct seq_file *m, void *v) 1128 { 1129 seq_puts(m, "Statistics are not activated in this kernel\n"); 1130 return 0; 1131 } 1132 1133 static void dasd_profile_init(struct dasd_profile *profile, 1134 struct dentry *base_dentry) 1135 { 1136 return; 1137 } 1138 1139 static void dasd_profile_exit(struct dasd_profile *profile) 1140 { 1141 return; 1142 } 1143 1144 int dasd_profile_on(struct dasd_profile *profile) 1145 { 1146 return 0; 1147 } 1148 1149 #endif /* CONFIG_DASD_PROFILE */ 1150 1151 /* 1152 * Allocate memory for a channel program with 'cplength' channel 1153 * command words and 'datasize' additional space. There are two 1154 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 1155 * memory and 2) dasd_smalloc_request uses the static ccw memory 1156 * that gets allocated for each device. 1157 */ 1158 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, 1159 int datasize, 1160 struct dasd_device *device) 1161 { 1162 struct dasd_ccw_req *cqr; 1163 1164 /* Sanity checks */ 1165 BUG_ON(datasize > PAGE_SIZE || 1166 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 1167 1168 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 1169 if (cqr == NULL) 1170 return ERR_PTR(-ENOMEM); 1171 cqr->cpaddr = NULL; 1172 if (cplength > 0) { 1173 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 1174 GFP_ATOMIC | GFP_DMA); 1175 if (cqr->cpaddr == NULL) { 1176 kfree(cqr); 1177 return ERR_PTR(-ENOMEM); 1178 } 1179 } 1180 cqr->data = NULL; 1181 if (datasize > 0) { 1182 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 1183 if (cqr->data == NULL) { 1184 kfree(cqr->cpaddr); 1185 kfree(cqr); 1186 return ERR_PTR(-ENOMEM); 1187 } 1188 } 1189 cqr->magic = magic; 1190 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1191 dasd_get_device(device); 1192 return cqr; 1193 } 1194 EXPORT_SYMBOL(dasd_kmalloc_request); 1195 1196 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 1197 int datasize, 1198 struct dasd_device *device) 1199 { 1200 unsigned long flags; 1201 struct dasd_ccw_req *cqr; 1202 char *data; 1203 int size; 1204 1205 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 1206 if (cplength > 0) 1207 size += cplength * sizeof(struct ccw1); 1208 if (datasize > 0) 1209 size += datasize; 1210 spin_lock_irqsave(&device->mem_lock, flags); 1211 cqr = (struct dasd_ccw_req *) 1212 dasd_alloc_chunk(&device->ccw_chunks, size); 1213 spin_unlock_irqrestore(&device->mem_lock, flags); 1214 if (cqr == NULL) 1215 return ERR_PTR(-ENOMEM); 1216 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 1217 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 1218 cqr->cpaddr = NULL; 1219 if (cplength > 0) { 1220 cqr->cpaddr = (struct ccw1 *) data; 1221 data += cplength*sizeof(struct ccw1); 1222 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 1223 } 1224 cqr->data = NULL; 1225 if (datasize > 0) { 1226 cqr->data = data; 1227 memset(cqr->data, 0, datasize); 1228 } 1229 cqr->magic = magic; 1230 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1231 dasd_get_device(device); 1232 return cqr; 1233 } 1234 EXPORT_SYMBOL(dasd_smalloc_request); 1235 1236 /* 1237 * Free memory of a channel program. This function needs to free all the 1238 * idal lists that might have been created by dasd_set_cda and the 1239 * struct dasd_ccw_req itself. 1240 */ 1241 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1242 { 1243 struct ccw1 *ccw; 1244 1245 /* Clear any idals used for the request. */ 1246 ccw = cqr->cpaddr; 1247 do { 1248 clear_normalized_cda(ccw); 1249 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 1250 kfree(cqr->cpaddr); 1251 kfree(cqr->data); 1252 kfree(cqr); 1253 dasd_put_device(device); 1254 } 1255 EXPORT_SYMBOL(dasd_kfree_request); 1256 1257 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1258 { 1259 unsigned long flags; 1260 1261 spin_lock_irqsave(&device->mem_lock, flags); 1262 dasd_free_chunk(&device->ccw_chunks, cqr); 1263 spin_unlock_irqrestore(&device->mem_lock, flags); 1264 dasd_put_device(device); 1265 } 1266 EXPORT_SYMBOL(dasd_sfree_request); 1267 1268 /* 1269 * Check discipline magic in cqr. 1270 */ 1271 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1272 { 1273 struct dasd_device *device; 1274 1275 if (cqr == NULL) 1276 return -EINVAL; 1277 device = cqr->startdev; 1278 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1279 DBF_DEV_EVENT(DBF_WARNING, device, 1280 " dasd_ccw_req 0x%08x magic doesn't match" 1281 " discipline 0x%08x", 1282 cqr->magic, 1283 *(unsigned int *) device->discipline->name); 1284 return -EINVAL; 1285 } 1286 return 0; 1287 } 1288 1289 /* 1290 * Terminate the current i/o and set the request to clear_pending. 1291 * Timer keeps device runnig. 1292 * ccw_device_clear can fail if the i/o subsystem 1293 * is in a bad mood. 1294 */ 1295 int dasd_term_IO(struct dasd_ccw_req *cqr) 1296 { 1297 struct dasd_device *device; 1298 int retries, rc; 1299 char errorstring[ERRORLENGTH]; 1300 1301 /* Check the cqr */ 1302 rc = dasd_check_cqr(cqr); 1303 if (rc) 1304 return rc; 1305 retries = 0; 1306 device = (struct dasd_device *) cqr->startdev; 1307 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1308 rc = ccw_device_clear(device->cdev, (long) cqr); 1309 switch (rc) { 1310 case 0: /* termination successful */ 1311 cqr->status = DASD_CQR_CLEAR_PENDING; 1312 cqr->stopclk = get_tod_clock(); 1313 cqr->starttime = 0; 1314 DBF_DEV_EVENT(DBF_DEBUG, device, 1315 "terminate cqr %p successful", 1316 cqr); 1317 break; 1318 case -ENODEV: 1319 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1320 "device gone, retry"); 1321 break; 1322 case -EIO: 1323 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1324 "I/O error, retry"); 1325 break; 1326 case -EINVAL: 1327 /* 1328 * device not valid so no I/O could be running 1329 * handle CQR as termination successful 1330 */ 1331 cqr->status = DASD_CQR_CLEARED; 1332 cqr->stopclk = get_tod_clock(); 1333 cqr->starttime = 0; 1334 /* no retries for invalid devices */ 1335 cqr->retries = -1; 1336 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1337 "EINVAL, handle as terminated"); 1338 /* fake rc to success */ 1339 rc = 0; 1340 break; 1341 case -EBUSY: 1342 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1343 "device busy, retry later"); 1344 break; 1345 default: 1346 /* internal error 10 - unknown rc*/ 1347 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1348 dev_err(&device->cdev->dev, "An error occurred in the " 1349 "DASD device driver, reason=%s\n", errorstring); 1350 BUG(); 1351 break; 1352 } 1353 retries++; 1354 } 1355 dasd_schedule_device_bh(device); 1356 return rc; 1357 } 1358 EXPORT_SYMBOL(dasd_term_IO); 1359 1360 /* 1361 * Start the i/o. This start_IO can fail if the channel is really busy. 1362 * In that case set up a timer to start the request later. 1363 */ 1364 int dasd_start_IO(struct dasd_ccw_req *cqr) 1365 { 1366 struct dasd_device *device; 1367 int rc; 1368 char errorstring[ERRORLENGTH]; 1369 1370 /* Check the cqr */ 1371 rc = dasd_check_cqr(cqr); 1372 if (rc) { 1373 cqr->intrc = rc; 1374 return rc; 1375 } 1376 device = (struct dasd_device *) cqr->startdev; 1377 if (((cqr->block && 1378 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1379 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1380 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1381 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1382 "because of stolen lock", cqr); 1383 cqr->status = DASD_CQR_ERROR; 1384 cqr->intrc = -EPERM; 1385 return -EPERM; 1386 } 1387 if (cqr->retries < 0) { 1388 /* internal error 14 - start_IO run out of retries */ 1389 sprintf(errorstring, "14 %p", cqr); 1390 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1391 "device driver, reason=%s\n", errorstring); 1392 cqr->status = DASD_CQR_ERROR; 1393 return -EIO; 1394 } 1395 cqr->startclk = get_tod_clock(); 1396 cqr->starttime = jiffies; 1397 cqr->retries--; 1398 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1399 cqr->lpm &= device->path_data.opm; 1400 if (!cqr->lpm) 1401 cqr->lpm = device->path_data.opm; 1402 } 1403 if (cqr->cpmode == 1) { 1404 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1405 (long) cqr, cqr->lpm); 1406 } else { 1407 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1408 (long) cqr, cqr->lpm, 0); 1409 } 1410 switch (rc) { 1411 case 0: 1412 cqr->status = DASD_CQR_IN_IO; 1413 break; 1414 case -EBUSY: 1415 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1416 "start_IO: device busy, retry later"); 1417 break; 1418 case -ETIMEDOUT: 1419 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1420 "start_IO: request timeout, retry later"); 1421 break; 1422 case -EACCES: 1423 /* -EACCES indicates that the request used only a subset of the 1424 * available paths and all these paths are gone. If the lpm of 1425 * this request was only a subset of the opm (e.g. the ppm) then 1426 * we just do a retry with all available paths. 1427 * If we already use the full opm, something is amiss, and we 1428 * need a full path verification. 1429 */ 1430 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1431 DBF_DEV_EVENT(DBF_WARNING, device, 1432 "start_IO: selected paths gone (%x)", 1433 cqr->lpm); 1434 } else if (cqr->lpm != device->path_data.opm) { 1435 cqr->lpm = device->path_data.opm; 1436 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1437 "start_IO: selected paths gone," 1438 " retry on all paths"); 1439 } else { 1440 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1441 "start_IO: all paths in opm gone," 1442 " do path verification"); 1443 dasd_generic_last_path_gone(device); 1444 device->path_data.opm = 0; 1445 device->path_data.ppm = 0; 1446 device->path_data.npm = 0; 1447 device->path_data.tbvpm = 1448 ccw_device_get_path_mask(device->cdev); 1449 } 1450 break; 1451 case -ENODEV: 1452 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1453 "start_IO: -ENODEV device gone, retry"); 1454 break; 1455 case -EIO: 1456 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1457 "start_IO: -EIO device gone, retry"); 1458 break; 1459 case -EINVAL: 1460 /* most likely caused in power management context */ 1461 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1462 "start_IO: -EINVAL device currently " 1463 "not accessible"); 1464 break; 1465 default: 1466 /* internal error 11 - unknown rc */ 1467 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1468 dev_err(&device->cdev->dev, 1469 "An error occurred in the DASD device driver, " 1470 "reason=%s\n", errorstring); 1471 BUG(); 1472 break; 1473 } 1474 cqr->intrc = rc; 1475 return rc; 1476 } 1477 EXPORT_SYMBOL(dasd_start_IO); 1478 1479 /* 1480 * Timeout function for dasd devices. This is used for different purposes 1481 * 1) missing interrupt handler for normal operation 1482 * 2) delayed start of request where start_IO failed with -EBUSY 1483 * 3) timeout for missing state change interrupts 1484 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1485 * DASD_CQR_QUEUED for 2) and 3). 1486 */ 1487 static void dasd_device_timeout(unsigned long ptr) 1488 { 1489 unsigned long flags; 1490 struct dasd_device *device; 1491 1492 device = (struct dasd_device *) ptr; 1493 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1494 /* re-activate request queue */ 1495 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1496 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1497 dasd_schedule_device_bh(device); 1498 } 1499 1500 /* 1501 * Setup timeout for a device in jiffies. 1502 */ 1503 void dasd_device_set_timer(struct dasd_device *device, int expires) 1504 { 1505 if (expires == 0) 1506 del_timer(&device->timer); 1507 else 1508 mod_timer(&device->timer, jiffies + expires); 1509 } 1510 EXPORT_SYMBOL(dasd_device_set_timer); 1511 1512 /* 1513 * Clear timeout for a device. 1514 */ 1515 void dasd_device_clear_timer(struct dasd_device *device) 1516 { 1517 del_timer(&device->timer); 1518 } 1519 EXPORT_SYMBOL(dasd_device_clear_timer); 1520 1521 static void dasd_handle_killed_request(struct ccw_device *cdev, 1522 unsigned long intparm) 1523 { 1524 struct dasd_ccw_req *cqr; 1525 struct dasd_device *device; 1526 1527 if (!intparm) 1528 return; 1529 cqr = (struct dasd_ccw_req *) intparm; 1530 if (cqr->status != DASD_CQR_IN_IO) { 1531 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1532 "invalid status in handle_killed_request: " 1533 "%02x", cqr->status); 1534 return; 1535 } 1536 1537 device = dasd_device_from_cdev_locked(cdev); 1538 if (IS_ERR(device)) { 1539 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1540 "unable to get device from cdev"); 1541 return; 1542 } 1543 1544 if (!cqr->startdev || 1545 device != cqr->startdev || 1546 strncmp(cqr->startdev->discipline->ebcname, 1547 (char *) &cqr->magic, 4)) { 1548 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1549 "invalid device in request"); 1550 dasd_put_device(device); 1551 return; 1552 } 1553 1554 /* Schedule request to be retried. */ 1555 cqr->status = DASD_CQR_QUEUED; 1556 1557 dasd_device_clear_timer(device); 1558 dasd_schedule_device_bh(device); 1559 dasd_put_device(device); 1560 } 1561 1562 void dasd_generic_handle_state_change(struct dasd_device *device) 1563 { 1564 /* First of all start sense subsystem status request. */ 1565 dasd_eer_snss(device); 1566 1567 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1568 dasd_schedule_device_bh(device); 1569 if (device->block) 1570 dasd_schedule_block_bh(device->block); 1571 } 1572 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1573 1574 /* 1575 * Interrupt handler for "normal" ssch-io based dasd devices. 1576 */ 1577 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1578 struct irb *irb) 1579 { 1580 struct dasd_ccw_req *cqr, *next; 1581 struct dasd_device *device; 1582 unsigned long long now; 1583 int expires; 1584 1585 if (IS_ERR(irb)) { 1586 switch (PTR_ERR(irb)) { 1587 case -EIO: 1588 break; 1589 case -ETIMEDOUT: 1590 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1591 "request timed out\n", __func__); 1592 break; 1593 default: 1594 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1595 "unknown error %ld\n", __func__, 1596 PTR_ERR(irb)); 1597 } 1598 dasd_handle_killed_request(cdev, intparm); 1599 return; 1600 } 1601 1602 now = get_tod_clock(); 1603 cqr = (struct dasd_ccw_req *) intparm; 1604 /* check for conditions that should be handled immediately */ 1605 if (!cqr || 1606 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1607 scsw_cstat(&irb->scsw) == 0)) { 1608 if (cqr) 1609 memcpy(&cqr->irb, irb, sizeof(*irb)); 1610 device = dasd_device_from_cdev_locked(cdev); 1611 if (IS_ERR(device)) 1612 return; 1613 /* ignore unsolicited interrupts for DIAG discipline */ 1614 if (device->discipline == dasd_diag_discipline_pointer) { 1615 dasd_put_device(device); 1616 return; 1617 } 1618 device->discipline->dump_sense_dbf(device, irb, "int"); 1619 if (device->features & DASD_FEATURE_ERPLOG) 1620 device->discipline->dump_sense(device, cqr, irb); 1621 device->discipline->check_for_device_change(device, cqr, irb); 1622 dasd_put_device(device); 1623 } 1624 1625 /* check for for attention message */ 1626 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1627 device = dasd_device_from_cdev_locked(cdev); 1628 device->discipline->check_attention(device, irb->esw.esw1.lpum); 1629 dasd_put_device(device); 1630 } 1631 1632 if (!cqr) 1633 return; 1634 1635 device = (struct dasd_device *) cqr->startdev; 1636 if (!device || 1637 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1638 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1639 "invalid device in request"); 1640 return; 1641 } 1642 1643 /* Check for clear pending */ 1644 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1645 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1646 cqr->status = DASD_CQR_CLEARED; 1647 dasd_device_clear_timer(device); 1648 wake_up(&dasd_flush_wq); 1649 dasd_schedule_device_bh(device); 1650 return; 1651 } 1652 1653 /* check status - the request might have been killed by dyn detach */ 1654 if (cqr->status != DASD_CQR_IN_IO) { 1655 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1656 "status %02x", dev_name(&cdev->dev), cqr->status); 1657 return; 1658 } 1659 1660 next = NULL; 1661 expires = 0; 1662 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1663 scsw_cstat(&irb->scsw) == 0) { 1664 /* request was completed successfully */ 1665 cqr->status = DASD_CQR_SUCCESS; 1666 cqr->stopclk = now; 1667 /* Start first request on queue if possible -> fast_io. */ 1668 if (cqr->devlist.next != &device->ccw_queue) { 1669 next = list_entry(cqr->devlist.next, 1670 struct dasd_ccw_req, devlist); 1671 } 1672 } else { /* error */ 1673 /* 1674 * If we don't want complex ERP for this request, then just 1675 * reset this and retry it in the fastpath 1676 */ 1677 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1678 cqr->retries > 0) { 1679 if (cqr->lpm == device->path_data.opm) 1680 DBF_DEV_EVENT(DBF_DEBUG, device, 1681 "default ERP in fastpath " 1682 "(%i retries left)", 1683 cqr->retries); 1684 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1685 cqr->lpm = device->path_data.opm; 1686 cqr->status = DASD_CQR_QUEUED; 1687 next = cqr; 1688 } else 1689 cqr->status = DASD_CQR_ERROR; 1690 } 1691 if (next && (next->status == DASD_CQR_QUEUED) && 1692 (!device->stopped)) { 1693 if (device->discipline->start_IO(next) == 0) 1694 expires = next->expires; 1695 } 1696 if (expires != 0) 1697 dasd_device_set_timer(device, expires); 1698 else 1699 dasd_device_clear_timer(device); 1700 dasd_schedule_device_bh(device); 1701 } 1702 EXPORT_SYMBOL(dasd_int_handler); 1703 1704 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1705 { 1706 struct dasd_device *device; 1707 1708 device = dasd_device_from_cdev_locked(cdev); 1709 1710 if (IS_ERR(device)) 1711 goto out; 1712 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1713 device->state != device->target || 1714 !device->discipline->check_for_device_change){ 1715 dasd_put_device(device); 1716 goto out; 1717 } 1718 if (device->discipline->dump_sense_dbf) 1719 device->discipline->dump_sense_dbf(device, irb, "uc"); 1720 device->discipline->check_for_device_change(device, NULL, irb); 1721 dasd_put_device(device); 1722 out: 1723 return UC_TODO_RETRY; 1724 } 1725 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1726 1727 /* 1728 * If we have an error on a dasd_block layer request then we cancel 1729 * and return all further requests from the same dasd_block as well. 1730 */ 1731 static void __dasd_device_recovery(struct dasd_device *device, 1732 struct dasd_ccw_req *ref_cqr) 1733 { 1734 struct list_head *l, *n; 1735 struct dasd_ccw_req *cqr; 1736 1737 /* 1738 * only requeue request that came from the dasd_block layer 1739 */ 1740 if (!ref_cqr->block) 1741 return; 1742 1743 list_for_each_safe(l, n, &device->ccw_queue) { 1744 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1745 if (cqr->status == DASD_CQR_QUEUED && 1746 ref_cqr->block == cqr->block) { 1747 cqr->status = DASD_CQR_CLEARED; 1748 } 1749 } 1750 }; 1751 1752 /* 1753 * Remove those ccw requests from the queue that need to be returned 1754 * to the upper layer. 1755 */ 1756 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1757 struct list_head *final_queue) 1758 { 1759 struct list_head *l, *n; 1760 struct dasd_ccw_req *cqr; 1761 1762 /* Process request with final status. */ 1763 list_for_each_safe(l, n, &device->ccw_queue) { 1764 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1765 1766 /* Skip any non-final request. */ 1767 if (cqr->status == DASD_CQR_QUEUED || 1768 cqr->status == DASD_CQR_IN_IO || 1769 cqr->status == DASD_CQR_CLEAR_PENDING) 1770 continue; 1771 if (cqr->status == DASD_CQR_ERROR) { 1772 __dasd_device_recovery(device, cqr); 1773 } 1774 /* Rechain finished requests to final queue */ 1775 list_move_tail(&cqr->devlist, final_queue); 1776 } 1777 } 1778 1779 /* 1780 * the cqrs from the final queue are returned to the upper layer 1781 * by setting a dasd_block state and calling the callback function 1782 */ 1783 static void __dasd_device_process_final_queue(struct dasd_device *device, 1784 struct list_head *final_queue) 1785 { 1786 struct list_head *l, *n; 1787 struct dasd_ccw_req *cqr; 1788 struct dasd_block *block; 1789 void (*callback)(struct dasd_ccw_req *, void *data); 1790 void *callback_data; 1791 char errorstring[ERRORLENGTH]; 1792 1793 list_for_each_safe(l, n, final_queue) { 1794 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1795 list_del_init(&cqr->devlist); 1796 block = cqr->block; 1797 callback = cqr->callback; 1798 callback_data = cqr->callback_data; 1799 if (block) 1800 spin_lock_bh(&block->queue_lock); 1801 switch (cqr->status) { 1802 case DASD_CQR_SUCCESS: 1803 cqr->status = DASD_CQR_DONE; 1804 break; 1805 case DASD_CQR_ERROR: 1806 cqr->status = DASD_CQR_NEED_ERP; 1807 break; 1808 case DASD_CQR_CLEARED: 1809 cqr->status = DASD_CQR_TERMINATED; 1810 break; 1811 default: 1812 /* internal error 12 - wrong cqr status*/ 1813 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1814 dev_err(&device->cdev->dev, 1815 "An error occurred in the DASD device driver, " 1816 "reason=%s\n", errorstring); 1817 BUG(); 1818 } 1819 if (cqr->callback != NULL) 1820 (callback)(cqr, callback_data); 1821 if (block) 1822 spin_unlock_bh(&block->queue_lock); 1823 } 1824 } 1825 1826 /* 1827 * Take a look at the first request on the ccw queue and check 1828 * if it reached its expire time. If so, terminate the IO. 1829 */ 1830 static void __dasd_device_check_expire(struct dasd_device *device) 1831 { 1832 struct dasd_ccw_req *cqr; 1833 1834 if (list_empty(&device->ccw_queue)) 1835 return; 1836 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1837 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1838 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1839 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1840 /* 1841 * IO in safe offline processing should not 1842 * run out of retries 1843 */ 1844 cqr->retries++; 1845 } 1846 if (device->discipline->term_IO(cqr) != 0) { 1847 /* Hmpf, try again in 5 sec */ 1848 dev_err(&device->cdev->dev, 1849 "cqr %p timed out (%lus) but cannot be " 1850 "ended, retrying in 5 s\n", 1851 cqr, (cqr->expires/HZ)); 1852 cqr->expires += 5*HZ; 1853 dasd_device_set_timer(device, 5*HZ); 1854 } else { 1855 dev_err(&device->cdev->dev, 1856 "cqr %p timed out (%lus), %i retries " 1857 "remaining\n", cqr, (cqr->expires/HZ), 1858 cqr->retries); 1859 } 1860 } 1861 } 1862 1863 /* 1864 * Take a look at the first request on the ccw queue and check 1865 * if it needs to be started. 1866 */ 1867 static void __dasd_device_start_head(struct dasd_device *device) 1868 { 1869 struct dasd_ccw_req *cqr; 1870 int rc; 1871 1872 if (list_empty(&device->ccw_queue)) 1873 return; 1874 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1875 if (cqr->status != DASD_CQR_QUEUED) 1876 return; 1877 /* when device is stopped, return request to previous layer 1878 * exception: only the disconnect or unresumed bits are set and the 1879 * cqr is a path verification request 1880 */ 1881 if (device->stopped && 1882 !(!(device->stopped & ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) 1883 && test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags))) { 1884 cqr->intrc = -EAGAIN; 1885 cqr->status = DASD_CQR_CLEARED; 1886 dasd_schedule_device_bh(device); 1887 return; 1888 } 1889 1890 rc = device->discipline->start_IO(cqr); 1891 if (rc == 0) 1892 dasd_device_set_timer(device, cqr->expires); 1893 else if (rc == -EACCES) { 1894 dasd_schedule_device_bh(device); 1895 } else 1896 /* Hmpf, try again in 1/2 sec */ 1897 dasd_device_set_timer(device, 50); 1898 } 1899 1900 static void __dasd_device_check_path_events(struct dasd_device *device) 1901 { 1902 int rc; 1903 1904 if (device->path_data.tbvpm) { 1905 if (device->stopped & ~(DASD_STOPPED_DC_WAIT | 1906 DASD_UNRESUMED_PM)) 1907 return; 1908 rc = device->discipline->verify_path( 1909 device, device->path_data.tbvpm); 1910 if (rc) 1911 dasd_device_set_timer(device, 50); 1912 else 1913 device->path_data.tbvpm = 0; 1914 } 1915 }; 1916 1917 /* 1918 * Go through all request on the dasd_device request queue, 1919 * terminate them on the cdev if necessary, and return them to the 1920 * submitting layer via callback. 1921 * Note: 1922 * Make sure that all 'submitting layers' still exist when 1923 * this function is called!. In other words, when 'device' is a base 1924 * device then all block layer requests must have been removed before 1925 * via dasd_flush_block_queue. 1926 */ 1927 int dasd_flush_device_queue(struct dasd_device *device) 1928 { 1929 struct dasd_ccw_req *cqr, *n; 1930 int rc; 1931 struct list_head flush_queue; 1932 1933 INIT_LIST_HEAD(&flush_queue); 1934 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1935 rc = 0; 1936 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1937 /* Check status and move request to flush_queue */ 1938 switch (cqr->status) { 1939 case DASD_CQR_IN_IO: 1940 rc = device->discipline->term_IO(cqr); 1941 if (rc) { 1942 /* unable to terminate requeust */ 1943 dev_err(&device->cdev->dev, 1944 "Flushing the DASD request queue " 1945 "failed for request %p\n", cqr); 1946 /* stop flush processing */ 1947 goto finished; 1948 } 1949 break; 1950 case DASD_CQR_QUEUED: 1951 cqr->stopclk = get_tod_clock(); 1952 cqr->status = DASD_CQR_CLEARED; 1953 break; 1954 default: /* no need to modify the others */ 1955 break; 1956 } 1957 list_move_tail(&cqr->devlist, &flush_queue); 1958 } 1959 finished: 1960 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1961 /* 1962 * After this point all requests must be in state CLEAR_PENDING, 1963 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1964 * one of the others. 1965 */ 1966 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1967 wait_event(dasd_flush_wq, 1968 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1969 /* 1970 * Now set each request back to TERMINATED, DONE or NEED_ERP 1971 * and call the callback function of flushed requests 1972 */ 1973 __dasd_device_process_final_queue(device, &flush_queue); 1974 return rc; 1975 } 1976 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 1977 1978 /* 1979 * Acquire the device lock and process queues for the device. 1980 */ 1981 static void dasd_device_tasklet(struct dasd_device *device) 1982 { 1983 struct list_head final_queue; 1984 1985 atomic_set (&device->tasklet_scheduled, 0); 1986 INIT_LIST_HEAD(&final_queue); 1987 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1988 /* Check expire time of first request on the ccw queue. */ 1989 __dasd_device_check_expire(device); 1990 /* find final requests on ccw queue */ 1991 __dasd_device_process_ccw_queue(device, &final_queue); 1992 __dasd_device_check_path_events(device); 1993 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1994 /* Now call the callback function of requests with final status */ 1995 __dasd_device_process_final_queue(device, &final_queue); 1996 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1997 /* Now check if the head of the ccw queue needs to be started. */ 1998 __dasd_device_start_head(device); 1999 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2000 if (waitqueue_active(&shutdown_waitq)) 2001 wake_up(&shutdown_waitq); 2002 dasd_put_device(device); 2003 } 2004 2005 /* 2006 * Schedules a call to dasd_tasklet over the device tasklet. 2007 */ 2008 void dasd_schedule_device_bh(struct dasd_device *device) 2009 { 2010 /* Protect against rescheduling. */ 2011 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2012 return; 2013 dasd_get_device(device); 2014 tasklet_hi_schedule(&device->tasklet); 2015 } 2016 EXPORT_SYMBOL(dasd_schedule_device_bh); 2017 2018 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2019 { 2020 device->stopped |= bits; 2021 } 2022 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2023 2024 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2025 { 2026 device->stopped &= ~bits; 2027 if (!device->stopped) 2028 wake_up(&generic_waitq); 2029 } 2030 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2031 2032 /* 2033 * Queue a request to the head of the device ccw_queue. 2034 * Start the I/O if possible. 2035 */ 2036 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2037 { 2038 struct dasd_device *device; 2039 unsigned long flags; 2040 2041 device = cqr->startdev; 2042 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2043 cqr->status = DASD_CQR_QUEUED; 2044 list_add(&cqr->devlist, &device->ccw_queue); 2045 /* let the bh start the request to keep them in order */ 2046 dasd_schedule_device_bh(device); 2047 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2048 } 2049 EXPORT_SYMBOL(dasd_add_request_head); 2050 2051 /* 2052 * Queue a request to the tail of the device ccw_queue. 2053 * Start the I/O if possible. 2054 */ 2055 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2056 { 2057 struct dasd_device *device; 2058 unsigned long flags; 2059 2060 device = cqr->startdev; 2061 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2062 cqr->status = DASD_CQR_QUEUED; 2063 list_add_tail(&cqr->devlist, &device->ccw_queue); 2064 /* let the bh start the request to keep them in order */ 2065 dasd_schedule_device_bh(device); 2066 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2067 } 2068 EXPORT_SYMBOL(dasd_add_request_tail); 2069 2070 /* 2071 * Wakeup helper for the 'sleep_on' functions. 2072 */ 2073 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2074 { 2075 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2076 cqr->callback_data = DASD_SLEEPON_END_TAG; 2077 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2078 wake_up(&generic_waitq); 2079 } 2080 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2081 2082 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2083 { 2084 struct dasd_device *device; 2085 int rc; 2086 2087 device = cqr->startdev; 2088 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2089 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2090 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2091 return rc; 2092 } 2093 2094 /* 2095 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2096 */ 2097 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2098 { 2099 struct dasd_device *device; 2100 dasd_erp_fn_t erp_fn; 2101 2102 if (cqr->status == DASD_CQR_FILLED) 2103 return 0; 2104 device = cqr->startdev; 2105 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2106 if (cqr->status == DASD_CQR_TERMINATED) { 2107 device->discipline->handle_terminated_request(cqr); 2108 return 1; 2109 } 2110 if (cqr->status == DASD_CQR_NEED_ERP) { 2111 erp_fn = device->discipline->erp_action(cqr); 2112 erp_fn(cqr); 2113 return 1; 2114 } 2115 if (cqr->status == DASD_CQR_FAILED) 2116 dasd_log_sense(cqr, &cqr->irb); 2117 if (cqr->refers) { 2118 __dasd_process_erp(device, cqr); 2119 return 1; 2120 } 2121 } 2122 return 0; 2123 } 2124 2125 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2126 { 2127 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2128 if (cqr->refers) /* erp is not done yet */ 2129 return 1; 2130 return ((cqr->status != DASD_CQR_DONE) && 2131 (cqr->status != DASD_CQR_FAILED)); 2132 } else 2133 return (cqr->status == DASD_CQR_FILLED); 2134 } 2135 2136 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2137 { 2138 struct dasd_device *device; 2139 int rc; 2140 struct list_head ccw_queue; 2141 struct dasd_ccw_req *cqr; 2142 2143 INIT_LIST_HEAD(&ccw_queue); 2144 maincqr->status = DASD_CQR_FILLED; 2145 device = maincqr->startdev; 2146 list_add(&maincqr->blocklist, &ccw_queue); 2147 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2148 cqr = list_first_entry(&ccw_queue, 2149 struct dasd_ccw_req, blocklist)) { 2150 2151 if (__dasd_sleep_on_erp(cqr)) 2152 continue; 2153 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2154 continue; 2155 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2156 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2157 cqr->status = DASD_CQR_FAILED; 2158 cqr->intrc = -EPERM; 2159 continue; 2160 } 2161 /* Non-temporary stop condition will trigger fail fast */ 2162 if (device->stopped & ~DASD_STOPPED_PENDING && 2163 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2164 (!dasd_eer_enabled(device))) { 2165 cqr->status = DASD_CQR_FAILED; 2166 cqr->intrc = -ENOLINK; 2167 continue; 2168 } 2169 /* 2170 * Don't try to start requests if device is stopped 2171 * except path verification requests 2172 */ 2173 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2174 if (interruptible) { 2175 rc = wait_event_interruptible( 2176 generic_waitq, !(device->stopped)); 2177 if (rc == -ERESTARTSYS) { 2178 cqr->status = DASD_CQR_FAILED; 2179 maincqr->intrc = rc; 2180 continue; 2181 } 2182 } else 2183 wait_event(generic_waitq, !(device->stopped)); 2184 } 2185 if (!cqr->callback) 2186 cqr->callback = dasd_wakeup_cb; 2187 2188 cqr->callback_data = DASD_SLEEPON_START_TAG; 2189 dasd_add_request_tail(cqr); 2190 if (interruptible) { 2191 rc = wait_event_interruptible( 2192 generic_waitq, _wait_for_wakeup(cqr)); 2193 if (rc == -ERESTARTSYS) { 2194 dasd_cancel_req(cqr); 2195 /* wait (non-interruptible) for final status */ 2196 wait_event(generic_waitq, 2197 _wait_for_wakeup(cqr)); 2198 cqr->status = DASD_CQR_FAILED; 2199 maincqr->intrc = rc; 2200 continue; 2201 } 2202 } else 2203 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2204 } 2205 2206 maincqr->endclk = get_tod_clock(); 2207 if ((maincqr->status != DASD_CQR_DONE) && 2208 (maincqr->intrc != -ERESTARTSYS)) 2209 dasd_log_sense(maincqr, &maincqr->irb); 2210 if (maincqr->status == DASD_CQR_DONE) 2211 rc = 0; 2212 else if (maincqr->intrc) 2213 rc = maincqr->intrc; 2214 else 2215 rc = -EIO; 2216 return rc; 2217 } 2218 2219 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2220 { 2221 struct dasd_ccw_req *cqr; 2222 2223 list_for_each_entry(cqr, ccw_queue, blocklist) { 2224 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2225 return 0; 2226 } 2227 2228 return 1; 2229 } 2230 2231 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2232 { 2233 struct dasd_device *device; 2234 struct dasd_ccw_req *cqr, *n; 2235 int rc; 2236 2237 retry: 2238 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2239 device = cqr->startdev; 2240 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2241 continue; 2242 2243 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2244 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2245 cqr->status = DASD_CQR_FAILED; 2246 cqr->intrc = -EPERM; 2247 continue; 2248 } 2249 /*Non-temporary stop condition will trigger fail fast*/ 2250 if (device->stopped & ~DASD_STOPPED_PENDING && 2251 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2252 !dasd_eer_enabled(device)) { 2253 cqr->status = DASD_CQR_FAILED; 2254 cqr->intrc = -EAGAIN; 2255 continue; 2256 } 2257 2258 /*Don't try to start requests if device is stopped*/ 2259 if (interruptible) { 2260 rc = wait_event_interruptible( 2261 generic_waitq, !device->stopped); 2262 if (rc == -ERESTARTSYS) { 2263 cqr->status = DASD_CQR_FAILED; 2264 cqr->intrc = rc; 2265 continue; 2266 } 2267 } else 2268 wait_event(generic_waitq, !(device->stopped)); 2269 2270 if (!cqr->callback) 2271 cqr->callback = dasd_wakeup_cb; 2272 cqr->callback_data = DASD_SLEEPON_START_TAG; 2273 dasd_add_request_tail(cqr); 2274 } 2275 2276 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2277 2278 rc = 0; 2279 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2280 /* 2281 * for alias devices simplify error recovery and 2282 * return to upper layer 2283 * do not skip ERP requests 2284 */ 2285 if (cqr->startdev != cqr->basedev && !cqr->refers && 2286 (cqr->status == DASD_CQR_TERMINATED || 2287 cqr->status == DASD_CQR_NEED_ERP)) 2288 return -EAGAIN; 2289 2290 /* normal recovery for basedev IO */ 2291 if (__dasd_sleep_on_erp(cqr)) 2292 /* handle erp first */ 2293 goto retry; 2294 } 2295 2296 return 0; 2297 } 2298 2299 /* 2300 * Queue a request to the tail of the device ccw_queue and wait for 2301 * it's completion. 2302 */ 2303 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2304 { 2305 return _dasd_sleep_on(cqr, 0); 2306 } 2307 EXPORT_SYMBOL(dasd_sleep_on); 2308 2309 /* 2310 * Start requests from a ccw_queue and wait for their completion. 2311 */ 2312 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2313 { 2314 return _dasd_sleep_on_queue(ccw_queue, 0); 2315 } 2316 EXPORT_SYMBOL(dasd_sleep_on_queue); 2317 2318 /* 2319 * Queue a request to the tail of the device ccw_queue and wait 2320 * interruptible for it's completion. 2321 */ 2322 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2323 { 2324 return _dasd_sleep_on(cqr, 1); 2325 } 2326 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2327 2328 /* 2329 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2330 * for eckd devices) the currently running request has to be terminated 2331 * and be put back to status queued, before the special request is added 2332 * to the head of the queue. Then the special request is waited on normally. 2333 */ 2334 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2335 { 2336 struct dasd_ccw_req *cqr; 2337 int rc; 2338 2339 if (list_empty(&device->ccw_queue)) 2340 return 0; 2341 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2342 rc = device->discipline->term_IO(cqr); 2343 if (!rc) 2344 /* 2345 * CQR terminated because a more important request is pending. 2346 * Undo decreasing of retry counter because this is 2347 * not an error case. 2348 */ 2349 cqr->retries++; 2350 return rc; 2351 } 2352 2353 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2354 { 2355 struct dasd_device *device; 2356 int rc; 2357 2358 device = cqr->startdev; 2359 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2360 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2361 cqr->status = DASD_CQR_FAILED; 2362 cqr->intrc = -EPERM; 2363 return -EIO; 2364 } 2365 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2366 rc = _dasd_term_running_cqr(device); 2367 if (rc) { 2368 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2369 return rc; 2370 } 2371 cqr->callback = dasd_wakeup_cb; 2372 cqr->callback_data = DASD_SLEEPON_START_TAG; 2373 cqr->status = DASD_CQR_QUEUED; 2374 /* 2375 * add new request as second 2376 * first the terminated cqr needs to be finished 2377 */ 2378 list_add(&cqr->devlist, device->ccw_queue.next); 2379 2380 /* let the bh start the request to keep them in order */ 2381 dasd_schedule_device_bh(device); 2382 2383 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2384 2385 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2386 2387 if (cqr->status == DASD_CQR_DONE) 2388 rc = 0; 2389 else if (cqr->intrc) 2390 rc = cqr->intrc; 2391 else 2392 rc = -EIO; 2393 2394 /* kick tasklets */ 2395 dasd_schedule_device_bh(device); 2396 if (device->block) 2397 dasd_schedule_block_bh(device->block); 2398 2399 return rc; 2400 } 2401 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2402 2403 /* 2404 * Cancels a request that was started with dasd_sleep_on_req. 2405 * This is useful to timeout requests. The request will be 2406 * terminated if it is currently in i/o. 2407 * Returns 0 if request termination was successful 2408 * negative error code if termination failed 2409 * Cancellation of a request is an asynchronous operation! The calling 2410 * function has to wait until the request is properly returned via callback. 2411 */ 2412 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2413 { 2414 struct dasd_device *device = cqr->startdev; 2415 unsigned long flags; 2416 int rc; 2417 2418 rc = 0; 2419 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2420 switch (cqr->status) { 2421 case DASD_CQR_QUEUED: 2422 /* request was not started - just set to cleared */ 2423 cqr->status = DASD_CQR_CLEARED; 2424 if (cqr->callback_data == DASD_SLEEPON_START_TAG) 2425 cqr->callback_data = DASD_SLEEPON_END_TAG; 2426 break; 2427 case DASD_CQR_IN_IO: 2428 /* request in IO - terminate IO and release again */ 2429 rc = device->discipline->term_IO(cqr); 2430 if (rc) { 2431 dev_err(&device->cdev->dev, 2432 "Cancelling request %p failed with rc=%d\n", 2433 cqr, rc); 2434 } else { 2435 cqr->stopclk = get_tod_clock(); 2436 } 2437 break; 2438 default: /* already finished or clear pending - do nothing */ 2439 break; 2440 } 2441 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2442 dasd_schedule_device_bh(device); 2443 return rc; 2444 } 2445 EXPORT_SYMBOL(dasd_cancel_req); 2446 2447 /* 2448 * SECTION: Operations of the dasd_block layer. 2449 */ 2450 2451 /* 2452 * Timeout function for dasd_block. This is used when the block layer 2453 * is waiting for something that may not come reliably, (e.g. a state 2454 * change interrupt) 2455 */ 2456 static void dasd_block_timeout(unsigned long ptr) 2457 { 2458 unsigned long flags; 2459 struct dasd_block *block; 2460 2461 block = (struct dasd_block *) ptr; 2462 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2463 /* re-activate request queue */ 2464 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2465 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2466 dasd_schedule_block_bh(block); 2467 } 2468 2469 /* 2470 * Setup timeout for a dasd_block in jiffies. 2471 */ 2472 void dasd_block_set_timer(struct dasd_block *block, int expires) 2473 { 2474 if (expires == 0) 2475 del_timer(&block->timer); 2476 else 2477 mod_timer(&block->timer, jiffies + expires); 2478 } 2479 EXPORT_SYMBOL(dasd_block_set_timer); 2480 2481 /* 2482 * Clear timeout for a dasd_block. 2483 */ 2484 void dasd_block_clear_timer(struct dasd_block *block) 2485 { 2486 del_timer(&block->timer); 2487 } 2488 EXPORT_SYMBOL(dasd_block_clear_timer); 2489 2490 /* 2491 * Process finished error recovery ccw. 2492 */ 2493 static void __dasd_process_erp(struct dasd_device *device, 2494 struct dasd_ccw_req *cqr) 2495 { 2496 dasd_erp_fn_t erp_fn; 2497 2498 if (cqr->status == DASD_CQR_DONE) 2499 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2500 else 2501 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2502 erp_fn = device->discipline->erp_postaction(cqr); 2503 erp_fn(cqr); 2504 } 2505 2506 /* 2507 * Fetch requests from the block device queue. 2508 */ 2509 static void __dasd_process_request_queue(struct dasd_block *block) 2510 { 2511 struct request_queue *queue; 2512 struct request *req; 2513 struct dasd_ccw_req *cqr; 2514 struct dasd_device *basedev; 2515 unsigned long flags; 2516 queue = block->request_queue; 2517 basedev = block->base; 2518 /* No queue ? Then there is nothing to do. */ 2519 if (queue == NULL) 2520 return; 2521 2522 /* 2523 * We requeue request from the block device queue to the ccw 2524 * queue only in two states. In state DASD_STATE_READY the 2525 * partition detection is done and we need to requeue requests 2526 * for that. State DASD_STATE_ONLINE is normal block device 2527 * operation. 2528 */ 2529 if (basedev->state < DASD_STATE_READY) { 2530 while ((req = blk_fetch_request(block->request_queue))) 2531 __blk_end_request_all(req, -EIO); 2532 return; 2533 } 2534 2535 /* if device ist stopped do not fetch new requests */ 2536 if (basedev->stopped) 2537 return; 2538 2539 /* Now we try to fetch requests from the request queue */ 2540 while ((req = blk_peek_request(queue))) { 2541 if (basedev->features & DASD_FEATURE_READONLY && 2542 rq_data_dir(req) == WRITE) { 2543 DBF_DEV_EVENT(DBF_ERR, basedev, 2544 "Rejecting write request %p", 2545 req); 2546 blk_start_request(req); 2547 __blk_end_request_all(req, -EIO); 2548 continue; 2549 } 2550 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 2551 (basedev->features & DASD_FEATURE_FAILFAST || 2552 blk_noretry_request(req))) { 2553 DBF_DEV_EVENT(DBF_ERR, basedev, 2554 "Rejecting failfast request %p", 2555 req); 2556 blk_start_request(req); 2557 __blk_end_request_all(req, -ETIMEDOUT); 2558 continue; 2559 } 2560 cqr = basedev->discipline->build_cp(basedev, block, req); 2561 if (IS_ERR(cqr)) { 2562 if (PTR_ERR(cqr) == -EBUSY) 2563 break; /* normal end condition */ 2564 if (PTR_ERR(cqr) == -ENOMEM) 2565 break; /* terminate request queue loop */ 2566 if (PTR_ERR(cqr) == -EAGAIN) { 2567 /* 2568 * The current request cannot be build right 2569 * now, we have to try later. If this request 2570 * is the head-of-queue we stop the device 2571 * for 1/2 second. 2572 */ 2573 if (!list_empty(&block->ccw_queue)) 2574 break; 2575 spin_lock_irqsave( 2576 get_ccwdev_lock(basedev->cdev), flags); 2577 dasd_device_set_stop_bits(basedev, 2578 DASD_STOPPED_PENDING); 2579 spin_unlock_irqrestore( 2580 get_ccwdev_lock(basedev->cdev), flags); 2581 dasd_block_set_timer(block, HZ/2); 2582 break; 2583 } 2584 DBF_DEV_EVENT(DBF_ERR, basedev, 2585 "CCW creation failed (rc=%ld) " 2586 "on request %p", 2587 PTR_ERR(cqr), req); 2588 blk_start_request(req); 2589 __blk_end_request_all(req, -EIO); 2590 continue; 2591 } 2592 /* 2593 * Note: callback is set to dasd_return_cqr_cb in 2594 * __dasd_block_start_head to cover erp requests as well 2595 */ 2596 cqr->callback_data = (void *) req; 2597 cqr->status = DASD_CQR_FILLED; 2598 req->completion_data = cqr; 2599 blk_start_request(req); 2600 list_add_tail(&cqr->blocklist, &block->ccw_queue); 2601 INIT_LIST_HEAD(&cqr->devlist); 2602 dasd_profile_start(block, cqr, req); 2603 } 2604 } 2605 2606 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2607 { 2608 struct request *req; 2609 int status; 2610 int error = 0; 2611 2612 req = (struct request *) cqr->callback_data; 2613 dasd_profile_end(cqr->block, cqr, req); 2614 status = cqr->block->base->discipline->free_cp(cqr, req); 2615 if (status < 0) 2616 error = status; 2617 else if (status == 0) { 2618 if (cqr->intrc == -EPERM) 2619 error = -EBADE; 2620 else if (cqr->intrc == -ENOLINK || 2621 cqr->intrc == -ETIMEDOUT) 2622 error = cqr->intrc; 2623 else 2624 error = -EIO; 2625 } 2626 __blk_end_request_all(req, error); 2627 } 2628 2629 /* 2630 * Process ccw request queue. 2631 */ 2632 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2633 struct list_head *final_queue) 2634 { 2635 struct list_head *l, *n; 2636 struct dasd_ccw_req *cqr; 2637 dasd_erp_fn_t erp_fn; 2638 unsigned long flags; 2639 struct dasd_device *base = block->base; 2640 2641 restart: 2642 /* Process request with final status. */ 2643 list_for_each_safe(l, n, &block->ccw_queue) { 2644 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2645 if (cqr->status != DASD_CQR_DONE && 2646 cqr->status != DASD_CQR_FAILED && 2647 cqr->status != DASD_CQR_NEED_ERP && 2648 cqr->status != DASD_CQR_TERMINATED) 2649 continue; 2650 2651 if (cqr->status == DASD_CQR_TERMINATED) { 2652 base->discipline->handle_terminated_request(cqr); 2653 goto restart; 2654 } 2655 2656 /* Process requests that may be recovered */ 2657 if (cqr->status == DASD_CQR_NEED_ERP) { 2658 erp_fn = base->discipline->erp_action(cqr); 2659 if (IS_ERR(erp_fn(cqr))) 2660 continue; 2661 goto restart; 2662 } 2663 2664 /* log sense for fatal error */ 2665 if (cqr->status == DASD_CQR_FAILED) { 2666 dasd_log_sense(cqr, &cqr->irb); 2667 } 2668 2669 /* First of all call extended error reporting. */ 2670 if (dasd_eer_enabled(base) && 2671 cqr->status == DASD_CQR_FAILED) { 2672 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2673 2674 /* restart request */ 2675 cqr->status = DASD_CQR_FILLED; 2676 cqr->retries = 255; 2677 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2678 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2679 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2680 flags); 2681 goto restart; 2682 } 2683 2684 /* Process finished ERP request. */ 2685 if (cqr->refers) { 2686 __dasd_process_erp(base, cqr); 2687 goto restart; 2688 } 2689 2690 /* Rechain finished requests to final queue */ 2691 cqr->endclk = get_tod_clock(); 2692 list_move_tail(&cqr->blocklist, final_queue); 2693 } 2694 } 2695 2696 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2697 { 2698 dasd_schedule_block_bh(cqr->block); 2699 } 2700 2701 static void __dasd_block_start_head(struct dasd_block *block) 2702 { 2703 struct dasd_ccw_req *cqr; 2704 2705 if (list_empty(&block->ccw_queue)) 2706 return; 2707 /* We allways begin with the first requests on the queue, as some 2708 * of previously started requests have to be enqueued on a 2709 * dasd_device again for error recovery. 2710 */ 2711 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2712 if (cqr->status != DASD_CQR_FILLED) 2713 continue; 2714 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2715 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2716 cqr->status = DASD_CQR_FAILED; 2717 cqr->intrc = -EPERM; 2718 dasd_schedule_block_bh(block); 2719 continue; 2720 } 2721 /* Non-temporary stop condition will trigger fail fast */ 2722 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2723 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2724 (!dasd_eer_enabled(block->base))) { 2725 cqr->status = DASD_CQR_FAILED; 2726 cqr->intrc = -ENOLINK; 2727 dasd_schedule_block_bh(block); 2728 continue; 2729 } 2730 /* Don't try to start requests if device is stopped */ 2731 if (block->base->stopped) 2732 return; 2733 2734 /* just a fail safe check, should not happen */ 2735 if (!cqr->startdev) 2736 cqr->startdev = block->base; 2737 2738 /* make sure that the requests we submit find their way back */ 2739 cqr->callback = dasd_return_cqr_cb; 2740 2741 dasd_add_request_tail(cqr); 2742 } 2743 } 2744 2745 /* 2746 * Central dasd_block layer routine. Takes requests from the generic 2747 * block layer request queue, creates ccw requests, enqueues them on 2748 * a dasd_device and processes ccw requests that have been returned. 2749 */ 2750 static void dasd_block_tasklet(struct dasd_block *block) 2751 { 2752 struct list_head final_queue; 2753 struct list_head *l, *n; 2754 struct dasd_ccw_req *cqr; 2755 2756 atomic_set(&block->tasklet_scheduled, 0); 2757 INIT_LIST_HEAD(&final_queue); 2758 spin_lock(&block->queue_lock); 2759 /* Finish off requests on ccw queue */ 2760 __dasd_process_block_ccw_queue(block, &final_queue); 2761 spin_unlock(&block->queue_lock); 2762 /* Now call the callback function of requests with final status */ 2763 spin_lock_irq(&block->request_queue_lock); 2764 list_for_each_safe(l, n, &final_queue) { 2765 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2766 list_del_init(&cqr->blocklist); 2767 __dasd_cleanup_cqr(cqr); 2768 } 2769 spin_lock(&block->queue_lock); 2770 /* Get new request from the block device request queue */ 2771 __dasd_process_request_queue(block); 2772 /* Now check if the head of the ccw queue needs to be started. */ 2773 __dasd_block_start_head(block); 2774 spin_unlock(&block->queue_lock); 2775 spin_unlock_irq(&block->request_queue_lock); 2776 if (waitqueue_active(&shutdown_waitq)) 2777 wake_up(&shutdown_waitq); 2778 dasd_put_device(block->base); 2779 } 2780 2781 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2782 { 2783 wake_up(&dasd_flush_wq); 2784 } 2785 2786 /* 2787 * Requeue a request back to the block request queue 2788 * only works for block requests 2789 */ 2790 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2791 { 2792 struct dasd_block *block = cqr->block; 2793 struct request *req; 2794 unsigned long flags; 2795 2796 if (!block) 2797 return -EINVAL; 2798 spin_lock_irqsave(&block->queue_lock, flags); 2799 req = (struct request *) cqr->callback_data; 2800 blk_requeue_request(block->request_queue, req); 2801 spin_unlock_irqrestore(&block->queue_lock, flags); 2802 2803 return 0; 2804 } 2805 2806 /* 2807 * Go through all request on the dasd_block request queue, cancel them 2808 * on the respective dasd_device, and return them to the generic 2809 * block layer. 2810 */ 2811 static int dasd_flush_block_queue(struct dasd_block *block) 2812 { 2813 struct dasd_ccw_req *cqr, *n; 2814 int rc, i; 2815 struct list_head flush_queue; 2816 2817 INIT_LIST_HEAD(&flush_queue); 2818 spin_lock_bh(&block->queue_lock); 2819 rc = 0; 2820 restart: 2821 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2822 /* if this request currently owned by a dasd_device cancel it */ 2823 if (cqr->status >= DASD_CQR_QUEUED) 2824 rc = dasd_cancel_req(cqr); 2825 if (rc < 0) 2826 break; 2827 /* Rechain request (including erp chain) so it won't be 2828 * touched by the dasd_block_tasklet anymore. 2829 * Replace the callback so we notice when the request 2830 * is returned from the dasd_device layer. 2831 */ 2832 cqr->callback = _dasd_wake_block_flush_cb; 2833 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2834 list_move_tail(&cqr->blocklist, &flush_queue); 2835 if (i > 1) 2836 /* moved more than one request - need to restart */ 2837 goto restart; 2838 } 2839 spin_unlock_bh(&block->queue_lock); 2840 /* Now call the callback function of flushed requests */ 2841 restart_cb: 2842 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2843 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2844 /* Process finished ERP request. */ 2845 if (cqr->refers) { 2846 spin_lock_bh(&block->queue_lock); 2847 __dasd_process_erp(block->base, cqr); 2848 spin_unlock_bh(&block->queue_lock); 2849 /* restart list_for_xx loop since dasd_process_erp 2850 * might remove multiple elements */ 2851 goto restart_cb; 2852 } 2853 /* call the callback function */ 2854 spin_lock_irq(&block->request_queue_lock); 2855 cqr->endclk = get_tod_clock(); 2856 list_del_init(&cqr->blocklist); 2857 __dasd_cleanup_cqr(cqr); 2858 spin_unlock_irq(&block->request_queue_lock); 2859 } 2860 return rc; 2861 } 2862 2863 /* 2864 * Schedules a call to dasd_tasklet over the device tasklet. 2865 */ 2866 void dasd_schedule_block_bh(struct dasd_block *block) 2867 { 2868 /* Protect against rescheduling. */ 2869 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 2870 return; 2871 /* life cycle of block is bound to it's base device */ 2872 dasd_get_device(block->base); 2873 tasklet_hi_schedule(&block->tasklet); 2874 } 2875 EXPORT_SYMBOL(dasd_schedule_block_bh); 2876 2877 2878 /* 2879 * SECTION: external block device operations 2880 * (request queue handling, open, release, etc.) 2881 */ 2882 2883 /* 2884 * Dasd request queue function. Called from ll_rw_blk.c 2885 */ 2886 static void do_dasd_request(struct request_queue *queue) 2887 { 2888 struct dasd_block *block; 2889 2890 block = queue->queuedata; 2891 spin_lock(&block->queue_lock); 2892 /* Get new request from the block device request queue */ 2893 __dasd_process_request_queue(block); 2894 /* Now check if the head of the ccw queue needs to be started. */ 2895 __dasd_block_start_head(block); 2896 spin_unlock(&block->queue_lock); 2897 } 2898 2899 /* 2900 * Block timeout callback, called from the block layer 2901 * 2902 * request_queue lock is held on entry. 2903 * 2904 * Return values: 2905 * BLK_EH_RESET_TIMER if the request should be left running 2906 * BLK_EH_NOT_HANDLED if the request is handled or terminated 2907 * by the driver. 2908 */ 2909 enum blk_eh_timer_return dasd_times_out(struct request *req) 2910 { 2911 struct dasd_ccw_req *cqr = req->completion_data; 2912 struct dasd_block *block = req->q->queuedata; 2913 struct dasd_device *device; 2914 int rc = 0; 2915 2916 if (!cqr) 2917 return BLK_EH_NOT_HANDLED; 2918 2919 device = cqr->startdev ? cqr->startdev : block->base; 2920 if (!device->blk_timeout) 2921 return BLK_EH_RESET_TIMER; 2922 DBF_DEV_EVENT(DBF_WARNING, device, 2923 " dasd_times_out cqr %p status %x", 2924 cqr, cqr->status); 2925 2926 spin_lock(&block->queue_lock); 2927 spin_lock(get_ccwdev_lock(device->cdev)); 2928 cqr->retries = -1; 2929 cqr->intrc = -ETIMEDOUT; 2930 if (cqr->status >= DASD_CQR_QUEUED) { 2931 spin_unlock(get_ccwdev_lock(device->cdev)); 2932 rc = dasd_cancel_req(cqr); 2933 } else if (cqr->status == DASD_CQR_FILLED || 2934 cqr->status == DASD_CQR_NEED_ERP) { 2935 cqr->status = DASD_CQR_TERMINATED; 2936 spin_unlock(get_ccwdev_lock(device->cdev)); 2937 } else if (cqr->status == DASD_CQR_IN_ERP) { 2938 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 2939 2940 list_for_each_entry_safe(searchcqr, nextcqr, 2941 &block->ccw_queue, blocklist) { 2942 tmpcqr = searchcqr; 2943 while (tmpcqr->refers) 2944 tmpcqr = tmpcqr->refers; 2945 if (tmpcqr != cqr) 2946 continue; 2947 /* searchcqr is an ERP request for cqr */ 2948 searchcqr->retries = -1; 2949 searchcqr->intrc = -ETIMEDOUT; 2950 if (searchcqr->status >= DASD_CQR_QUEUED) { 2951 spin_unlock(get_ccwdev_lock(device->cdev)); 2952 rc = dasd_cancel_req(searchcqr); 2953 spin_lock(get_ccwdev_lock(device->cdev)); 2954 } else if ((searchcqr->status == DASD_CQR_FILLED) || 2955 (searchcqr->status == DASD_CQR_NEED_ERP)) { 2956 searchcqr->status = DASD_CQR_TERMINATED; 2957 rc = 0; 2958 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 2959 /* 2960 * Shouldn't happen; most recent ERP 2961 * request is at the front of queue 2962 */ 2963 continue; 2964 } 2965 break; 2966 } 2967 spin_unlock(get_ccwdev_lock(device->cdev)); 2968 } 2969 dasd_schedule_block_bh(block); 2970 spin_unlock(&block->queue_lock); 2971 2972 return rc ? BLK_EH_RESET_TIMER : BLK_EH_NOT_HANDLED; 2973 } 2974 2975 /* 2976 * Allocate and initialize request queue and default I/O scheduler. 2977 */ 2978 static int dasd_alloc_queue(struct dasd_block *block) 2979 { 2980 block->request_queue = blk_init_queue(do_dasd_request, 2981 &block->request_queue_lock); 2982 if (block->request_queue == NULL) 2983 return -ENOMEM; 2984 2985 block->request_queue->queuedata = block; 2986 2987 return 0; 2988 } 2989 2990 /* 2991 * Allocate and initialize request queue. 2992 */ 2993 static void dasd_setup_queue(struct dasd_block *block) 2994 { 2995 int max; 2996 2997 if (block->base->features & DASD_FEATURE_USERAW) { 2998 /* 2999 * the max_blocks value for raw_track access is 256 3000 * it is higher than the native ECKD value because we 3001 * only need one ccw per track 3002 * so the max_hw_sectors are 3003 * 2048 x 512B = 1024kB = 16 tracks 3004 */ 3005 max = 2048; 3006 } else { 3007 max = block->base->discipline->max_blocks << block->s2b_shift; 3008 } 3009 blk_queue_logical_block_size(block->request_queue, 3010 block->bp_block); 3011 blk_queue_max_hw_sectors(block->request_queue, max); 3012 blk_queue_max_segments(block->request_queue, -1L); 3013 /* with page sized segments we can translate each segement into 3014 * one idaw/tidaw 3015 */ 3016 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 3017 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 3018 } 3019 3020 /* 3021 * Deactivate and free request queue. 3022 */ 3023 static void dasd_free_queue(struct dasd_block *block) 3024 { 3025 if (block->request_queue) { 3026 blk_cleanup_queue(block->request_queue); 3027 block->request_queue = NULL; 3028 } 3029 } 3030 3031 /* 3032 * Flush request on the request queue. 3033 */ 3034 static void dasd_flush_request_queue(struct dasd_block *block) 3035 { 3036 struct request *req; 3037 3038 if (!block->request_queue) 3039 return; 3040 3041 spin_lock_irq(&block->request_queue_lock); 3042 while ((req = blk_fetch_request(block->request_queue))) 3043 __blk_end_request_all(req, -EIO); 3044 spin_unlock_irq(&block->request_queue_lock); 3045 } 3046 3047 static int dasd_open(struct block_device *bdev, fmode_t mode) 3048 { 3049 struct dasd_device *base; 3050 int rc; 3051 3052 base = dasd_device_from_gendisk(bdev->bd_disk); 3053 if (!base) 3054 return -ENODEV; 3055 3056 atomic_inc(&base->block->open_count); 3057 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3058 rc = -ENODEV; 3059 goto unlock; 3060 } 3061 3062 if (!try_module_get(base->discipline->owner)) { 3063 rc = -EINVAL; 3064 goto unlock; 3065 } 3066 3067 if (dasd_probeonly) { 3068 dev_info(&base->cdev->dev, 3069 "Accessing the DASD failed because it is in " 3070 "probeonly mode\n"); 3071 rc = -EPERM; 3072 goto out; 3073 } 3074 3075 if (base->state <= DASD_STATE_BASIC) { 3076 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3077 " Cannot open unrecognized device"); 3078 rc = -ENODEV; 3079 goto out; 3080 } 3081 3082 if ((mode & FMODE_WRITE) && 3083 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3084 (base->features & DASD_FEATURE_READONLY))) { 3085 rc = -EROFS; 3086 goto out; 3087 } 3088 3089 dasd_put_device(base); 3090 return 0; 3091 3092 out: 3093 module_put(base->discipline->owner); 3094 unlock: 3095 atomic_dec(&base->block->open_count); 3096 dasd_put_device(base); 3097 return rc; 3098 } 3099 3100 static void dasd_release(struct gendisk *disk, fmode_t mode) 3101 { 3102 struct dasd_device *base = dasd_device_from_gendisk(disk); 3103 if (base) { 3104 atomic_dec(&base->block->open_count); 3105 module_put(base->discipline->owner); 3106 dasd_put_device(base); 3107 } 3108 } 3109 3110 /* 3111 * Return disk geometry. 3112 */ 3113 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3114 { 3115 struct dasd_device *base; 3116 3117 base = dasd_device_from_gendisk(bdev->bd_disk); 3118 if (!base) 3119 return -ENODEV; 3120 3121 if (!base->discipline || 3122 !base->discipline->fill_geometry) { 3123 dasd_put_device(base); 3124 return -EINVAL; 3125 } 3126 base->discipline->fill_geometry(base->block, geo); 3127 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3128 dasd_put_device(base); 3129 return 0; 3130 } 3131 3132 const struct block_device_operations 3133 dasd_device_operations = { 3134 .owner = THIS_MODULE, 3135 .open = dasd_open, 3136 .release = dasd_release, 3137 .ioctl = dasd_ioctl, 3138 .compat_ioctl = dasd_ioctl, 3139 .getgeo = dasd_getgeo, 3140 }; 3141 3142 /******************************************************************************* 3143 * end of block device operations 3144 */ 3145 3146 static void 3147 dasd_exit(void) 3148 { 3149 #ifdef CONFIG_PROC_FS 3150 dasd_proc_exit(); 3151 #endif 3152 dasd_eer_exit(); 3153 if (dasd_page_cache != NULL) { 3154 kmem_cache_destroy(dasd_page_cache); 3155 dasd_page_cache = NULL; 3156 } 3157 dasd_gendisk_exit(); 3158 dasd_devmap_exit(); 3159 if (dasd_debug_area != NULL) { 3160 debug_unregister(dasd_debug_area); 3161 dasd_debug_area = NULL; 3162 } 3163 dasd_statistics_removeroot(); 3164 } 3165 3166 /* 3167 * SECTION: common functions for ccw_driver use 3168 */ 3169 3170 /* 3171 * Is the device read-only? 3172 * Note that this function does not report the setting of the 3173 * readonly device attribute, but how it is configured in z/VM. 3174 */ 3175 int dasd_device_is_ro(struct dasd_device *device) 3176 { 3177 struct ccw_dev_id dev_id; 3178 struct diag210 diag_data; 3179 int rc; 3180 3181 if (!MACHINE_IS_VM) 3182 return 0; 3183 ccw_device_get_id(device->cdev, &dev_id); 3184 memset(&diag_data, 0, sizeof(diag_data)); 3185 diag_data.vrdcdvno = dev_id.devno; 3186 diag_data.vrdclen = sizeof(diag_data); 3187 rc = diag210(&diag_data); 3188 if (rc == 0 || rc == 2) { 3189 return diag_data.vrdcvfla & 0x80; 3190 } else { 3191 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3192 dev_id.devno, rc); 3193 return 0; 3194 } 3195 } 3196 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3197 3198 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3199 { 3200 struct ccw_device *cdev = data; 3201 int ret; 3202 3203 ret = ccw_device_set_online(cdev); 3204 if (ret) 3205 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3206 dev_name(&cdev->dev), ret); 3207 } 3208 3209 /* 3210 * Initial attempt at a probe function. this can be simplified once 3211 * the other detection code is gone. 3212 */ 3213 int dasd_generic_probe(struct ccw_device *cdev, 3214 struct dasd_discipline *discipline) 3215 { 3216 int ret; 3217 3218 ret = dasd_add_sysfs_files(cdev); 3219 if (ret) { 3220 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3221 "dasd_generic_probe: could not add " 3222 "sysfs entries"); 3223 return ret; 3224 } 3225 cdev->handler = &dasd_int_handler; 3226 3227 /* 3228 * Automatically online either all dasd devices (dasd_autodetect) 3229 * or all devices specified with dasd= parameters during 3230 * initial probe. 3231 */ 3232 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3233 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3234 async_schedule(dasd_generic_auto_online, cdev); 3235 return 0; 3236 } 3237 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3238 3239 /* 3240 * This will one day be called from a global not_oper handler. 3241 * It is also used by driver_unregister during module unload. 3242 */ 3243 void dasd_generic_remove(struct ccw_device *cdev) 3244 { 3245 struct dasd_device *device; 3246 struct dasd_block *block; 3247 3248 cdev->handler = NULL; 3249 3250 device = dasd_device_from_cdev(cdev); 3251 if (IS_ERR(device)) { 3252 dasd_remove_sysfs_files(cdev); 3253 return; 3254 } 3255 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3256 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3257 /* Already doing offline processing */ 3258 dasd_put_device(device); 3259 dasd_remove_sysfs_files(cdev); 3260 return; 3261 } 3262 /* 3263 * This device is removed unconditionally. Set offline 3264 * flag to prevent dasd_open from opening it while it is 3265 * no quite down yet. 3266 */ 3267 dasd_set_target_state(device, DASD_STATE_NEW); 3268 /* dasd_delete_device destroys the device reference. */ 3269 block = device->block; 3270 dasd_delete_device(device); 3271 /* 3272 * life cycle of block is bound to device, so delete it after 3273 * device was safely removed 3274 */ 3275 if (block) 3276 dasd_free_block(block); 3277 3278 dasd_remove_sysfs_files(cdev); 3279 } 3280 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3281 3282 /* 3283 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3284 * the device is detected for the first time and is supposed to be used 3285 * or the user has started activation through sysfs. 3286 */ 3287 int dasd_generic_set_online(struct ccw_device *cdev, 3288 struct dasd_discipline *base_discipline) 3289 { 3290 struct dasd_discipline *discipline; 3291 struct dasd_device *device; 3292 int rc; 3293 3294 /* first online clears initial online feature flag */ 3295 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3296 device = dasd_create_device(cdev); 3297 if (IS_ERR(device)) 3298 return PTR_ERR(device); 3299 3300 discipline = base_discipline; 3301 if (device->features & DASD_FEATURE_USEDIAG) { 3302 if (!dasd_diag_discipline_pointer) { 3303 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3304 dev_name(&cdev->dev)); 3305 dasd_delete_device(device); 3306 return -ENODEV; 3307 } 3308 discipline = dasd_diag_discipline_pointer; 3309 } 3310 if (!try_module_get(base_discipline->owner)) { 3311 dasd_delete_device(device); 3312 return -EINVAL; 3313 } 3314 if (!try_module_get(discipline->owner)) { 3315 module_put(base_discipline->owner); 3316 dasd_delete_device(device); 3317 return -EINVAL; 3318 } 3319 device->base_discipline = base_discipline; 3320 device->discipline = discipline; 3321 3322 /* check_device will allocate block device if necessary */ 3323 rc = discipline->check_device(device); 3324 if (rc) { 3325 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3326 dev_name(&cdev->dev), discipline->name, rc); 3327 module_put(discipline->owner); 3328 module_put(base_discipline->owner); 3329 dasd_delete_device(device); 3330 return rc; 3331 } 3332 3333 dasd_set_target_state(device, DASD_STATE_ONLINE); 3334 if (device->state <= DASD_STATE_KNOWN) { 3335 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3336 dev_name(&cdev->dev)); 3337 rc = -ENODEV; 3338 dasd_set_target_state(device, DASD_STATE_NEW); 3339 if (device->block) 3340 dasd_free_block(device->block); 3341 dasd_delete_device(device); 3342 } else 3343 pr_debug("dasd_generic device %s found\n", 3344 dev_name(&cdev->dev)); 3345 3346 wait_event(dasd_init_waitq, _wait_for_device(device)); 3347 3348 dasd_put_device(device); 3349 return rc; 3350 } 3351 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3352 3353 int dasd_generic_set_offline(struct ccw_device *cdev) 3354 { 3355 struct dasd_device *device; 3356 struct dasd_block *block; 3357 int max_count, open_count, rc; 3358 3359 rc = 0; 3360 device = dasd_device_from_cdev(cdev); 3361 if (IS_ERR(device)) 3362 return PTR_ERR(device); 3363 3364 /* 3365 * We must make sure that this device is currently not in use. 3366 * The open_count is increased for every opener, that includes 3367 * the blkdev_get in dasd_scan_partitions. We are only interested 3368 * in the other openers. 3369 */ 3370 if (device->block) { 3371 max_count = device->block->bdev ? 0 : -1; 3372 open_count = atomic_read(&device->block->open_count); 3373 if (open_count > max_count) { 3374 if (open_count > 0) 3375 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3376 dev_name(&cdev->dev), open_count); 3377 else 3378 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3379 dev_name(&cdev->dev)); 3380 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3381 dasd_put_device(device); 3382 return -EBUSY; 3383 } 3384 } 3385 3386 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3387 /* 3388 * safe offline already running 3389 * could only be called by normal offline so safe_offline flag 3390 * needs to be removed to run normal offline and kill all I/O 3391 */ 3392 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3393 /* Already doing normal offline processing */ 3394 dasd_put_device(device); 3395 return -EBUSY; 3396 } else 3397 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3398 3399 } else 3400 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3401 /* Already doing offline processing */ 3402 dasd_put_device(device); 3403 return -EBUSY; 3404 } 3405 3406 /* 3407 * if safe_offline called set safe_offline_running flag and 3408 * clear safe_offline so that a call to normal offline 3409 * can overrun safe_offline processing 3410 */ 3411 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3412 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3413 /* 3414 * If we want to set the device safe offline all IO operations 3415 * should be finished before continuing the offline process 3416 * so sync bdev first and then wait for our queues to become 3417 * empty 3418 */ 3419 /* sync blockdev and partitions */ 3420 rc = fsync_bdev(device->block->bdev); 3421 if (rc != 0) 3422 goto interrupted; 3423 3424 /* schedule device tasklet and wait for completion */ 3425 dasd_schedule_device_bh(device); 3426 rc = wait_event_interruptible(shutdown_waitq, 3427 _wait_for_empty_queues(device)); 3428 if (rc != 0) 3429 goto interrupted; 3430 } 3431 3432 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3433 dasd_set_target_state(device, DASD_STATE_NEW); 3434 /* dasd_delete_device destroys the device reference. */ 3435 block = device->block; 3436 dasd_delete_device(device); 3437 /* 3438 * life cycle of block is bound to device, so delete it after 3439 * device was safely removed 3440 */ 3441 if (block) 3442 dasd_free_block(block); 3443 return 0; 3444 3445 interrupted: 3446 /* interrupted by signal */ 3447 clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags); 3448 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3449 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3450 dasd_put_device(device); 3451 return rc; 3452 } 3453 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3454 3455 int dasd_generic_last_path_gone(struct dasd_device *device) 3456 { 3457 struct dasd_ccw_req *cqr; 3458 3459 dev_warn(&device->cdev->dev, "No operational channel path is left " 3460 "for the device\n"); 3461 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3462 /* First of all call extended error reporting. */ 3463 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3464 3465 if (device->state < DASD_STATE_BASIC) 3466 return 0; 3467 /* Device is active. We want to keep it. */ 3468 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3469 if ((cqr->status == DASD_CQR_IN_IO) || 3470 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3471 cqr->status = DASD_CQR_QUEUED; 3472 cqr->retries++; 3473 } 3474 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3475 dasd_device_clear_timer(device); 3476 dasd_schedule_device_bh(device); 3477 return 1; 3478 } 3479 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3480 3481 int dasd_generic_path_operational(struct dasd_device *device) 3482 { 3483 dev_info(&device->cdev->dev, "A channel path to the device has become " 3484 "operational\n"); 3485 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3486 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3487 if (device->stopped & DASD_UNRESUMED_PM) { 3488 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 3489 dasd_restore_device(device); 3490 return 1; 3491 } 3492 dasd_schedule_device_bh(device); 3493 if (device->block) 3494 dasd_schedule_block_bh(device->block); 3495 3496 if (!device->stopped) 3497 wake_up(&generic_waitq); 3498 3499 return 1; 3500 } 3501 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3502 3503 int dasd_generic_notify(struct ccw_device *cdev, int event) 3504 { 3505 struct dasd_device *device; 3506 int ret; 3507 3508 device = dasd_device_from_cdev_locked(cdev); 3509 if (IS_ERR(device)) 3510 return 0; 3511 ret = 0; 3512 switch (event) { 3513 case CIO_GONE: 3514 case CIO_BOXED: 3515 case CIO_NO_PATH: 3516 device->path_data.opm = 0; 3517 device->path_data.ppm = 0; 3518 device->path_data.npm = 0; 3519 ret = dasd_generic_last_path_gone(device); 3520 break; 3521 case CIO_OPER: 3522 ret = 1; 3523 if (device->path_data.opm) 3524 ret = dasd_generic_path_operational(device); 3525 break; 3526 } 3527 dasd_put_device(device); 3528 return ret; 3529 } 3530 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3531 3532 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3533 { 3534 int chp; 3535 __u8 oldopm, eventlpm; 3536 struct dasd_device *device; 3537 3538 device = dasd_device_from_cdev_locked(cdev); 3539 if (IS_ERR(device)) 3540 return; 3541 for (chp = 0; chp < 8; chp++) { 3542 eventlpm = 0x80 >> chp; 3543 if (path_event[chp] & PE_PATH_GONE) { 3544 oldopm = device->path_data.opm; 3545 device->path_data.opm &= ~eventlpm; 3546 device->path_data.ppm &= ~eventlpm; 3547 device->path_data.npm &= ~eventlpm; 3548 if (oldopm && !device->path_data.opm) { 3549 dev_warn(&device->cdev->dev, 3550 "No verified channel paths remain " 3551 "for the device\n"); 3552 DBF_DEV_EVENT(DBF_WARNING, device, 3553 "%s", "last verified path gone"); 3554 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3555 dasd_device_set_stop_bits(device, 3556 DASD_STOPPED_DC_WAIT); 3557 } 3558 } 3559 if (path_event[chp] & PE_PATH_AVAILABLE) { 3560 device->path_data.opm &= ~eventlpm; 3561 device->path_data.ppm &= ~eventlpm; 3562 device->path_data.npm &= ~eventlpm; 3563 device->path_data.tbvpm |= eventlpm; 3564 dasd_schedule_device_bh(device); 3565 } 3566 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3567 if (!(device->path_data.opm & eventlpm) && 3568 !(device->path_data.tbvpm & eventlpm)) { 3569 /* 3570 * we can not establish a pathgroup on an 3571 * unavailable path, so trigger a path 3572 * verification first 3573 */ 3574 device->path_data.tbvpm |= eventlpm; 3575 dasd_schedule_device_bh(device); 3576 } 3577 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3578 "Pathgroup re-established\n"); 3579 if (device->discipline->kick_validate) 3580 device->discipline->kick_validate(device); 3581 } 3582 } 3583 dasd_put_device(device); 3584 } 3585 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3586 3587 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3588 { 3589 if (!device->path_data.opm && lpm) { 3590 device->path_data.opm = lpm; 3591 dasd_generic_path_operational(device); 3592 } else 3593 device->path_data.opm |= lpm; 3594 return 0; 3595 } 3596 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3597 3598 3599 int dasd_generic_pm_freeze(struct ccw_device *cdev) 3600 { 3601 struct dasd_device *device = dasd_device_from_cdev(cdev); 3602 struct list_head freeze_queue; 3603 struct dasd_ccw_req *cqr, *n; 3604 struct dasd_ccw_req *refers; 3605 int rc; 3606 3607 if (IS_ERR(device)) 3608 return PTR_ERR(device); 3609 3610 /* mark device as suspended */ 3611 set_bit(DASD_FLAG_SUSPENDED, &device->flags); 3612 3613 if (device->discipline->freeze) 3614 rc = device->discipline->freeze(device); 3615 3616 /* disallow new I/O */ 3617 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 3618 3619 /* clear active requests and requeue them to block layer if possible */ 3620 INIT_LIST_HEAD(&freeze_queue); 3621 spin_lock_irq(get_ccwdev_lock(cdev)); 3622 rc = 0; 3623 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3624 /* Check status and move request to flush_queue */ 3625 if (cqr->status == DASD_CQR_IN_IO) { 3626 rc = device->discipline->term_IO(cqr); 3627 if (rc) { 3628 /* unable to terminate requeust */ 3629 dev_err(&device->cdev->dev, 3630 "Unable to terminate request %p " 3631 "on suspend\n", cqr); 3632 spin_unlock_irq(get_ccwdev_lock(cdev)); 3633 dasd_put_device(device); 3634 return rc; 3635 } 3636 } 3637 list_move_tail(&cqr->devlist, &freeze_queue); 3638 } 3639 spin_unlock_irq(get_ccwdev_lock(cdev)); 3640 3641 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 3642 wait_event(dasd_flush_wq, 3643 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3644 if (cqr->status == DASD_CQR_CLEARED) 3645 cqr->status = DASD_CQR_QUEUED; 3646 3647 /* requeue requests to blocklayer will only work for 3648 block device requests */ 3649 if (_dasd_requeue_request(cqr)) 3650 continue; 3651 3652 /* remove requests from device and block queue */ 3653 list_del_init(&cqr->devlist); 3654 while (cqr->refers != NULL) { 3655 refers = cqr->refers; 3656 /* remove the request from the block queue */ 3657 list_del(&cqr->blocklist); 3658 /* free the finished erp request */ 3659 dasd_free_erp_request(cqr, cqr->memdev); 3660 cqr = refers; 3661 } 3662 if (cqr->block) 3663 list_del_init(&cqr->blocklist); 3664 cqr->block->base->discipline->free_cp( 3665 cqr, (struct request *) cqr->callback_data); 3666 } 3667 3668 /* 3669 * if requests remain then they are internal request 3670 * and go back to the device queue 3671 */ 3672 if (!list_empty(&freeze_queue)) { 3673 /* move freeze_queue to start of the ccw_queue */ 3674 spin_lock_irq(get_ccwdev_lock(cdev)); 3675 list_splice_tail(&freeze_queue, &device->ccw_queue); 3676 spin_unlock_irq(get_ccwdev_lock(cdev)); 3677 } 3678 dasd_put_device(device); 3679 return rc; 3680 } 3681 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 3682 3683 int dasd_generic_restore_device(struct ccw_device *cdev) 3684 { 3685 struct dasd_device *device = dasd_device_from_cdev(cdev); 3686 int rc = 0; 3687 3688 if (IS_ERR(device)) 3689 return PTR_ERR(device); 3690 3691 /* allow new IO again */ 3692 dasd_device_remove_stop_bits(device, 3693 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 3694 3695 dasd_schedule_device_bh(device); 3696 3697 /* 3698 * call discipline restore function 3699 * if device is stopped do nothing e.g. for disconnected devices 3700 */ 3701 if (device->discipline->restore && !(device->stopped)) 3702 rc = device->discipline->restore(device); 3703 if (rc || device->stopped) 3704 /* 3705 * if the resume failed for the DASD we put it in 3706 * an UNRESUMED stop state 3707 */ 3708 device->stopped |= DASD_UNRESUMED_PM; 3709 3710 if (device->block) 3711 dasd_schedule_block_bh(device->block); 3712 3713 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 3714 dasd_put_device(device); 3715 return 0; 3716 } 3717 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 3718 3719 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 3720 void *rdc_buffer, 3721 int rdc_buffer_size, 3722 int magic) 3723 { 3724 struct dasd_ccw_req *cqr; 3725 struct ccw1 *ccw; 3726 unsigned long *idaw; 3727 3728 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 3729 3730 if (IS_ERR(cqr)) { 3731 /* internal error 13 - Allocating the RDC request failed*/ 3732 dev_err(&device->cdev->dev, 3733 "An error occurred in the DASD device driver, " 3734 "reason=%s\n", "13"); 3735 return cqr; 3736 } 3737 3738 ccw = cqr->cpaddr; 3739 ccw->cmd_code = CCW_CMD_RDC; 3740 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { 3741 idaw = (unsigned long *) (cqr->data); 3742 ccw->cda = (__u32)(addr_t) idaw; 3743 ccw->flags = CCW_FLAG_IDA; 3744 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); 3745 } else { 3746 ccw->cda = (__u32)(addr_t) rdc_buffer; 3747 ccw->flags = 0; 3748 } 3749 3750 ccw->count = rdc_buffer_size; 3751 cqr->startdev = device; 3752 cqr->memdev = device; 3753 cqr->expires = 10*HZ; 3754 cqr->retries = 256; 3755 cqr->buildclk = get_tod_clock(); 3756 cqr->status = DASD_CQR_FILLED; 3757 return cqr; 3758 } 3759 3760 3761 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 3762 void *rdc_buffer, int rdc_buffer_size) 3763 { 3764 int ret; 3765 struct dasd_ccw_req *cqr; 3766 3767 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 3768 magic); 3769 if (IS_ERR(cqr)) 3770 return PTR_ERR(cqr); 3771 3772 ret = dasd_sleep_on(cqr); 3773 dasd_sfree_request(cqr, cqr->memdev); 3774 return ret; 3775 } 3776 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 3777 3778 /* 3779 * In command mode and transport mode we need to look for sense 3780 * data in different places. The sense data itself is allways 3781 * an array of 32 bytes, so we can unify the sense data access 3782 * for both modes. 3783 */ 3784 char *dasd_get_sense(struct irb *irb) 3785 { 3786 struct tsb *tsb = NULL; 3787 char *sense = NULL; 3788 3789 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 3790 if (irb->scsw.tm.tcw) 3791 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 3792 irb->scsw.tm.tcw); 3793 if (tsb && tsb->length == 64 && tsb->flags) 3794 switch (tsb->flags & 0x07) { 3795 case 1: /* tsa_iostat */ 3796 sense = tsb->tsa.iostat.sense; 3797 break; 3798 case 2: /* tsa_ddpc */ 3799 sense = tsb->tsa.ddpc.sense; 3800 break; 3801 default: 3802 /* currently we don't use interrogate data */ 3803 break; 3804 } 3805 } else if (irb->esw.esw0.erw.cons) { 3806 sense = irb->ecw; 3807 } 3808 return sense; 3809 } 3810 EXPORT_SYMBOL_GPL(dasd_get_sense); 3811 3812 void dasd_generic_shutdown(struct ccw_device *cdev) 3813 { 3814 struct dasd_device *device; 3815 3816 device = dasd_device_from_cdev(cdev); 3817 if (IS_ERR(device)) 3818 return; 3819 3820 if (device->block) 3821 dasd_schedule_block_bh(device->block); 3822 3823 dasd_schedule_device_bh(device); 3824 3825 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 3826 } 3827 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 3828 3829 static int __init dasd_init(void) 3830 { 3831 int rc; 3832 3833 init_waitqueue_head(&dasd_init_waitq); 3834 init_waitqueue_head(&dasd_flush_wq); 3835 init_waitqueue_head(&generic_waitq); 3836 init_waitqueue_head(&shutdown_waitq); 3837 3838 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 3839 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 3840 if (dasd_debug_area == NULL) { 3841 rc = -ENOMEM; 3842 goto failed; 3843 } 3844 debug_register_view(dasd_debug_area, &debug_sprintf_view); 3845 debug_set_level(dasd_debug_area, DBF_WARNING); 3846 3847 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 3848 3849 dasd_diag_discipline_pointer = NULL; 3850 3851 dasd_statistics_createroot(); 3852 3853 rc = dasd_devmap_init(); 3854 if (rc) 3855 goto failed; 3856 rc = dasd_gendisk_init(); 3857 if (rc) 3858 goto failed; 3859 rc = dasd_parse(); 3860 if (rc) 3861 goto failed; 3862 rc = dasd_eer_init(); 3863 if (rc) 3864 goto failed; 3865 #ifdef CONFIG_PROC_FS 3866 rc = dasd_proc_init(); 3867 if (rc) 3868 goto failed; 3869 #endif 3870 3871 return 0; 3872 failed: 3873 pr_info("The DASD device driver could not be initialized\n"); 3874 dasd_exit(); 3875 return rc; 3876 } 3877 3878 module_init(dasd_init); 3879 module_exit(dasd_exit); 3880