1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #define KMSG_COMPONENT "dasd" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kmod.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/ctype.h> 18 #include <linux/major.h> 19 #include <linux/slab.h> 20 #include <linux/hdreg.h> 21 #include <linux/async.h> 22 #include <linux/mutex.h> 23 #include <linux/debugfs.h> 24 #include <linux/seq_file.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/ccwdev.h> 28 #include <asm/ebcdic.h> 29 #include <asm/idals.h> 30 #include <asm/itcw.h> 31 #include <asm/diag.h> 32 33 /* This is ugly... */ 34 #define PRINTK_HEADER "dasd:" 35 36 #include "dasd_int.h" 37 /* 38 * SECTION: Constant definitions to be used within this file 39 */ 40 #define DASD_CHANQ_MAX_SIZE 4 41 42 #define DASD_DIAG_MOD "dasd_diag_mod" 43 44 static unsigned int queue_depth = 32; 45 static unsigned int nr_hw_queues = 4; 46 47 module_param(queue_depth, uint, 0444); 48 MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); 49 50 module_param(nr_hw_queues, uint, 0444); 51 MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); 52 53 /* 54 * SECTION: exported variables of dasd.c 55 */ 56 debug_info_t *dasd_debug_area; 57 EXPORT_SYMBOL(dasd_debug_area); 58 static struct dentry *dasd_debugfs_root_entry; 59 struct dasd_discipline *dasd_diag_discipline_pointer; 60 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 61 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 62 63 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 64 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 65 " Copyright IBM Corp. 2000"); 66 MODULE_SUPPORTED_DEVICE("dasd"); 67 MODULE_LICENSE("GPL"); 68 69 /* 70 * SECTION: prototypes for static functions of dasd.c 71 */ 72 static int dasd_alloc_queue(struct dasd_block *); 73 static void dasd_free_queue(struct dasd_block *); 74 static int dasd_flush_block_queue(struct dasd_block *); 75 static void dasd_device_tasklet(unsigned long); 76 static void dasd_block_tasklet(unsigned long); 77 static void do_kick_device(struct work_struct *); 78 static void do_restore_device(struct work_struct *); 79 static void do_reload_device(struct work_struct *); 80 static void do_requeue_requests(struct work_struct *); 81 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 82 static void dasd_device_timeout(struct timer_list *); 83 static void dasd_block_timeout(struct timer_list *); 84 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 85 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 86 static void dasd_profile_exit(struct dasd_profile *); 87 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 88 static void dasd_hosts_exit(struct dasd_device *); 89 90 /* 91 * SECTION: Operations on the device structure. 92 */ 93 static wait_queue_head_t dasd_init_waitq; 94 static wait_queue_head_t dasd_flush_wq; 95 static wait_queue_head_t generic_waitq; 96 static wait_queue_head_t shutdown_waitq; 97 98 /* 99 * Allocate memory for a new device structure. 100 */ 101 struct dasd_device *dasd_alloc_device(void) 102 { 103 struct dasd_device *device; 104 105 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 106 if (!device) 107 return ERR_PTR(-ENOMEM); 108 109 /* Get two pages for normal block device operations. */ 110 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 111 if (!device->ccw_mem) { 112 kfree(device); 113 return ERR_PTR(-ENOMEM); 114 } 115 /* Get one page for error recovery. */ 116 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 117 if (!device->erp_mem) { 118 free_pages((unsigned long) device->ccw_mem, 1); 119 kfree(device); 120 return ERR_PTR(-ENOMEM); 121 } 122 /* Get two pages for ese format. */ 123 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 124 if (!device->ese_mem) { 125 free_page((unsigned long) device->erp_mem); 126 free_pages((unsigned long) device->ccw_mem, 1); 127 kfree(device); 128 return ERR_PTR(-ENOMEM); 129 } 130 131 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 132 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 133 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 134 spin_lock_init(&device->mem_lock); 135 atomic_set(&device->tasklet_scheduled, 0); 136 tasklet_init(&device->tasklet, dasd_device_tasklet, 137 (unsigned long) device); 138 INIT_LIST_HEAD(&device->ccw_queue); 139 timer_setup(&device->timer, dasd_device_timeout, 0); 140 INIT_WORK(&device->kick_work, do_kick_device); 141 INIT_WORK(&device->restore_device, do_restore_device); 142 INIT_WORK(&device->reload_device, do_reload_device); 143 INIT_WORK(&device->requeue_requests, do_requeue_requests); 144 device->state = DASD_STATE_NEW; 145 device->target = DASD_STATE_NEW; 146 mutex_init(&device->state_mutex); 147 spin_lock_init(&device->profile.lock); 148 return device; 149 } 150 151 /* 152 * Free memory of a device structure. 153 */ 154 void dasd_free_device(struct dasd_device *device) 155 { 156 kfree(device->private); 157 free_pages((unsigned long) device->ese_mem, 1); 158 free_page((unsigned long) device->erp_mem); 159 free_pages((unsigned long) device->ccw_mem, 1); 160 kfree(device); 161 } 162 163 /* 164 * Allocate memory for a new device structure. 165 */ 166 struct dasd_block *dasd_alloc_block(void) 167 { 168 struct dasd_block *block; 169 170 block = kzalloc(sizeof(*block), GFP_ATOMIC); 171 if (!block) 172 return ERR_PTR(-ENOMEM); 173 /* open_count = 0 means device online but not in use */ 174 atomic_set(&block->open_count, -1); 175 176 atomic_set(&block->tasklet_scheduled, 0); 177 tasklet_init(&block->tasklet, dasd_block_tasklet, 178 (unsigned long) block); 179 INIT_LIST_HEAD(&block->ccw_queue); 180 spin_lock_init(&block->queue_lock); 181 INIT_LIST_HEAD(&block->format_list); 182 spin_lock_init(&block->format_lock); 183 timer_setup(&block->timer, dasd_block_timeout, 0); 184 spin_lock_init(&block->profile.lock); 185 186 return block; 187 } 188 EXPORT_SYMBOL_GPL(dasd_alloc_block); 189 190 /* 191 * Free memory of a device structure. 192 */ 193 void dasd_free_block(struct dasd_block *block) 194 { 195 kfree(block); 196 } 197 EXPORT_SYMBOL_GPL(dasd_free_block); 198 199 /* 200 * Make a new device known to the system. 201 */ 202 static int dasd_state_new_to_known(struct dasd_device *device) 203 { 204 int rc; 205 206 /* 207 * As long as the device is not in state DASD_STATE_NEW we want to 208 * keep the reference count > 0. 209 */ 210 dasd_get_device(device); 211 212 if (device->block) { 213 rc = dasd_alloc_queue(device->block); 214 if (rc) { 215 dasd_put_device(device); 216 return rc; 217 } 218 } 219 device->state = DASD_STATE_KNOWN; 220 return 0; 221 } 222 223 /* 224 * Let the system forget about a device. 225 */ 226 static int dasd_state_known_to_new(struct dasd_device *device) 227 { 228 /* Disable extended error reporting for this device. */ 229 dasd_eer_disable(device); 230 device->state = DASD_STATE_NEW; 231 232 if (device->block) 233 dasd_free_queue(device->block); 234 235 /* Give up reference we took in dasd_state_new_to_known. */ 236 dasd_put_device(device); 237 return 0; 238 } 239 240 static struct dentry *dasd_debugfs_setup(const char *name, 241 struct dentry *base_dentry) 242 { 243 struct dentry *pde; 244 245 if (!base_dentry) 246 return NULL; 247 pde = debugfs_create_dir(name, base_dentry); 248 if (!pde || IS_ERR(pde)) 249 return NULL; 250 return pde; 251 } 252 253 /* 254 * Request the irq line for the device. 255 */ 256 static int dasd_state_known_to_basic(struct dasd_device *device) 257 { 258 struct dasd_block *block = device->block; 259 int rc = 0; 260 261 /* Allocate and register gendisk structure. */ 262 if (block) { 263 rc = dasd_gendisk_alloc(block); 264 if (rc) 265 return rc; 266 block->debugfs_dentry = 267 dasd_debugfs_setup(block->gdp->disk_name, 268 dasd_debugfs_root_entry); 269 dasd_profile_init(&block->profile, block->debugfs_dentry); 270 if (dasd_global_profile_level == DASD_PROFILE_ON) 271 dasd_profile_on(&device->block->profile); 272 } 273 device->debugfs_dentry = 274 dasd_debugfs_setup(dev_name(&device->cdev->dev), 275 dasd_debugfs_root_entry); 276 dasd_profile_init(&device->profile, device->debugfs_dentry); 277 dasd_hosts_init(device->debugfs_dentry, device); 278 279 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 280 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 281 8 * sizeof(long)); 282 debug_register_view(device->debug_area, &debug_sprintf_view); 283 debug_set_level(device->debug_area, DBF_WARNING); 284 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 285 286 device->state = DASD_STATE_BASIC; 287 288 return rc; 289 } 290 291 /* 292 * Release the irq line for the device. Terminate any running i/o. 293 */ 294 static int dasd_state_basic_to_known(struct dasd_device *device) 295 { 296 int rc; 297 298 if (device->discipline->basic_to_known) { 299 rc = device->discipline->basic_to_known(device); 300 if (rc) 301 return rc; 302 } 303 304 if (device->block) { 305 dasd_profile_exit(&device->block->profile); 306 debugfs_remove(device->block->debugfs_dentry); 307 dasd_gendisk_free(device->block); 308 dasd_block_clear_timer(device->block); 309 } 310 rc = dasd_flush_device_queue(device); 311 if (rc) 312 return rc; 313 dasd_device_clear_timer(device); 314 dasd_profile_exit(&device->profile); 315 dasd_hosts_exit(device); 316 debugfs_remove(device->debugfs_dentry); 317 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 318 if (device->debug_area != NULL) { 319 debug_unregister(device->debug_area); 320 device->debug_area = NULL; 321 } 322 device->state = DASD_STATE_KNOWN; 323 return 0; 324 } 325 326 /* 327 * Do the initial analysis. The do_analysis function may return 328 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 329 * until the discipline decides to continue the startup sequence 330 * by calling the function dasd_change_state. The eckd disciplines 331 * uses this to start a ccw that detects the format. The completion 332 * interrupt for this detection ccw uses the kernel event daemon to 333 * trigger the call to dasd_change_state. All this is done in the 334 * discipline code, see dasd_eckd.c. 335 * After the analysis ccw is done (do_analysis returned 0) the block 336 * device is setup. 337 * In case the analysis returns an error, the device setup is stopped 338 * (a fake disk was already added to allow formatting). 339 */ 340 static int dasd_state_basic_to_ready(struct dasd_device *device) 341 { 342 int rc; 343 struct dasd_block *block; 344 struct gendisk *disk; 345 346 rc = 0; 347 block = device->block; 348 /* make disk known with correct capacity */ 349 if (block) { 350 if (block->base->discipline->do_analysis != NULL) 351 rc = block->base->discipline->do_analysis(block); 352 if (rc) { 353 if (rc != -EAGAIN) { 354 device->state = DASD_STATE_UNFMT; 355 disk = device->block->gdp; 356 kobject_uevent(&disk_to_dev(disk)->kobj, 357 KOBJ_CHANGE); 358 goto out; 359 } 360 return rc; 361 } 362 if (device->discipline->setup_blk_queue) 363 device->discipline->setup_blk_queue(block); 364 set_capacity(block->gdp, 365 block->blocks << block->s2b_shift); 366 device->state = DASD_STATE_READY; 367 rc = dasd_scan_partitions(block); 368 if (rc) { 369 device->state = DASD_STATE_BASIC; 370 return rc; 371 } 372 } else { 373 device->state = DASD_STATE_READY; 374 } 375 out: 376 if (device->discipline->basic_to_ready) 377 rc = device->discipline->basic_to_ready(device); 378 return rc; 379 } 380 381 static inline 382 int _wait_for_empty_queues(struct dasd_device *device) 383 { 384 if (device->block) 385 return list_empty(&device->ccw_queue) && 386 list_empty(&device->block->ccw_queue); 387 else 388 return list_empty(&device->ccw_queue); 389 } 390 391 /* 392 * Remove device from block device layer. Destroy dirty buffers. 393 * Forget format information. Check if the target level is basic 394 * and if it is create fake disk for formatting. 395 */ 396 static int dasd_state_ready_to_basic(struct dasd_device *device) 397 { 398 int rc; 399 400 device->state = DASD_STATE_BASIC; 401 if (device->block) { 402 struct dasd_block *block = device->block; 403 rc = dasd_flush_block_queue(block); 404 if (rc) { 405 device->state = DASD_STATE_READY; 406 return rc; 407 } 408 dasd_destroy_partitions(block); 409 block->blocks = 0; 410 block->bp_block = 0; 411 block->s2b_shift = 0; 412 } 413 return 0; 414 } 415 416 /* 417 * Back to basic. 418 */ 419 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 420 { 421 device->state = DASD_STATE_BASIC; 422 return 0; 423 } 424 425 /* 426 * Make the device online and schedule the bottom half to start 427 * the requeueing of requests from the linux request queue to the 428 * ccw queue. 429 */ 430 static int 431 dasd_state_ready_to_online(struct dasd_device * device) 432 { 433 struct gendisk *disk; 434 struct disk_part_iter piter; 435 struct hd_struct *part; 436 437 device->state = DASD_STATE_ONLINE; 438 if (device->block) { 439 dasd_schedule_block_bh(device->block); 440 if ((device->features & DASD_FEATURE_USERAW)) { 441 disk = device->block->gdp; 442 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 443 return 0; 444 } 445 disk = device->block->bdev->bd_disk; 446 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 447 while ((part = disk_part_iter_next(&piter))) 448 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 449 disk_part_iter_exit(&piter); 450 } 451 return 0; 452 } 453 454 /* 455 * Stop the requeueing of requests again. 456 */ 457 static int dasd_state_online_to_ready(struct dasd_device *device) 458 { 459 int rc; 460 struct gendisk *disk; 461 struct disk_part_iter piter; 462 struct hd_struct *part; 463 464 if (device->discipline->online_to_ready) { 465 rc = device->discipline->online_to_ready(device); 466 if (rc) 467 return rc; 468 } 469 470 device->state = DASD_STATE_READY; 471 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 472 disk = device->block->bdev->bd_disk; 473 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 474 while ((part = disk_part_iter_next(&piter))) 475 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 476 disk_part_iter_exit(&piter); 477 } 478 return 0; 479 } 480 481 /* 482 * Device startup state changes. 483 */ 484 static int dasd_increase_state(struct dasd_device *device) 485 { 486 int rc; 487 488 rc = 0; 489 if (device->state == DASD_STATE_NEW && 490 device->target >= DASD_STATE_KNOWN) 491 rc = dasd_state_new_to_known(device); 492 493 if (!rc && 494 device->state == DASD_STATE_KNOWN && 495 device->target >= DASD_STATE_BASIC) 496 rc = dasd_state_known_to_basic(device); 497 498 if (!rc && 499 device->state == DASD_STATE_BASIC && 500 device->target >= DASD_STATE_READY) 501 rc = dasd_state_basic_to_ready(device); 502 503 if (!rc && 504 device->state == DASD_STATE_UNFMT && 505 device->target > DASD_STATE_UNFMT) 506 rc = -EPERM; 507 508 if (!rc && 509 device->state == DASD_STATE_READY && 510 device->target >= DASD_STATE_ONLINE) 511 rc = dasd_state_ready_to_online(device); 512 513 return rc; 514 } 515 516 /* 517 * Device shutdown state changes. 518 */ 519 static int dasd_decrease_state(struct dasd_device *device) 520 { 521 int rc; 522 523 rc = 0; 524 if (device->state == DASD_STATE_ONLINE && 525 device->target <= DASD_STATE_READY) 526 rc = dasd_state_online_to_ready(device); 527 528 if (!rc && 529 device->state == DASD_STATE_READY && 530 device->target <= DASD_STATE_BASIC) 531 rc = dasd_state_ready_to_basic(device); 532 533 if (!rc && 534 device->state == DASD_STATE_UNFMT && 535 device->target <= DASD_STATE_BASIC) 536 rc = dasd_state_unfmt_to_basic(device); 537 538 if (!rc && 539 device->state == DASD_STATE_BASIC && 540 device->target <= DASD_STATE_KNOWN) 541 rc = dasd_state_basic_to_known(device); 542 543 if (!rc && 544 device->state == DASD_STATE_KNOWN && 545 device->target <= DASD_STATE_NEW) 546 rc = dasd_state_known_to_new(device); 547 548 return rc; 549 } 550 551 /* 552 * This is the main startup/shutdown routine. 553 */ 554 static void dasd_change_state(struct dasd_device *device) 555 { 556 int rc; 557 558 if (device->state == device->target) 559 /* Already where we want to go today... */ 560 return; 561 if (device->state < device->target) 562 rc = dasd_increase_state(device); 563 else 564 rc = dasd_decrease_state(device); 565 if (rc == -EAGAIN) 566 return; 567 if (rc) 568 device->target = device->state; 569 570 /* let user-space know that the device status changed */ 571 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 572 573 if (device->state == device->target) 574 wake_up(&dasd_init_waitq); 575 } 576 577 /* 578 * Kick starter for devices that did not complete the startup/shutdown 579 * procedure or were sleeping because of a pending state. 580 * dasd_kick_device will schedule a call do do_kick_device to the kernel 581 * event daemon. 582 */ 583 static void do_kick_device(struct work_struct *work) 584 { 585 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 586 mutex_lock(&device->state_mutex); 587 dasd_change_state(device); 588 mutex_unlock(&device->state_mutex); 589 dasd_schedule_device_bh(device); 590 dasd_put_device(device); 591 } 592 593 void dasd_kick_device(struct dasd_device *device) 594 { 595 dasd_get_device(device); 596 /* queue call to dasd_kick_device to the kernel event daemon. */ 597 if (!schedule_work(&device->kick_work)) 598 dasd_put_device(device); 599 } 600 EXPORT_SYMBOL(dasd_kick_device); 601 602 /* 603 * dasd_reload_device will schedule a call do do_reload_device to the kernel 604 * event daemon. 605 */ 606 static void do_reload_device(struct work_struct *work) 607 { 608 struct dasd_device *device = container_of(work, struct dasd_device, 609 reload_device); 610 device->discipline->reload(device); 611 dasd_put_device(device); 612 } 613 614 void dasd_reload_device(struct dasd_device *device) 615 { 616 dasd_get_device(device); 617 /* queue call to dasd_reload_device to the kernel event daemon. */ 618 if (!schedule_work(&device->reload_device)) 619 dasd_put_device(device); 620 } 621 EXPORT_SYMBOL(dasd_reload_device); 622 623 /* 624 * dasd_restore_device will schedule a call do do_restore_device to the kernel 625 * event daemon. 626 */ 627 static void do_restore_device(struct work_struct *work) 628 { 629 struct dasd_device *device = container_of(work, struct dasd_device, 630 restore_device); 631 device->cdev->drv->restore(device->cdev); 632 dasd_put_device(device); 633 } 634 635 void dasd_restore_device(struct dasd_device *device) 636 { 637 dasd_get_device(device); 638 /* queue call to dasd_restore_device to the kernel event daemon. */ 639 if (!schedule_work(&device->restore_device)) 640 dasd_put_device(device); 641 } 642 643 /* 644 * Set the target state for a device and starts the state change. 645 */ 646 void dasd_set_target_state(struct dasd_device *device, int target) 647 { 648 dasd_get_device(device); 649 mutex_lock(&device->state_mutex); 650 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 651 if (dasd_probeonly && target > DASD_STATE_READY) 652 target = DASD_STATE_READY; 653 if (device->target != target) { 654 if (device->state == target) 655 wake_up(&dasd_init_waitq); 656 device->target = target; 657 } 658 if (device->state != device->target) 659 dasd_change_state(device); 660 mutex_unlock(&device->state_mutex); 661 dasd_put_device(device); 662 } 663 EXPORT_SYMBOL(dasd_set_target_state); 664 665 /* 666 * Enable devices with device numbers in [from..to]. 667 */ 668 static inline int _wait_for_device(struct dasd_device *device) 669 { 670 return (device->state == device->target); 671 } 672 673 void dasd_enable_device(struct dasd_device *device) 674 { 675 dasd_set_target_state(device, DASD_STATE_ONLINE); 676 if (device->state <= DASD_STATE_KNOWN) 677 /* No discipline for device found. */ 678 dasd_set_target_state(device, DASD_STATE_NEW); 679 /* Now wait for the devices to come up. */ 680 wait_event(dasd_init_waitq, _wait_for_device(device)); 681 682 dasd_reload_device(device); 683 if (device->discipline->kick_validate) 684 device->discipline->kick_validate(device); 685 } 686 EXPORT_SYMBOL(dasd_enable_device); 687 688 /* 689 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 690 */ 691 692 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 693 694 #ifdef CONFIG_DASD_PROFILE 695 struct dasd_profile dasd_global_profile = { 696 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 697 }; 698 static struct dentry *dasd_debugfs_global_entry; 699 700 /* 701 * Add profiling information for cqr before execution. 702 */ 703 static void dasd_profile_start(struct dasd_block *block, 704 struct dasd_ccw_req *cqr, 705 struct request *req) 706 { 707 struct list_head *l; 708 unsigned int counter; 709 struct dasd_device *device; 710 711 /* count the length of the chanq for statistics */ 712 counter = 0; 713 if (dasd_global_profile_level || block->profile.data) 714 list_for_each(l, &block->ccw_queue) 715 if (++counter >= 31) 716 break; 717 718 spin_lock(&dasd_global_profile.lock); 719 if (dasd_global_profile.data) { 720 dasd_global_profile.data->dasd_io_nr_req[counter]++; 721 if (rq_data_dir(req) == READ) 722 dasd_global_profile.data->dasd_read_nr_req[counter]++; 723 } 724 spin_unlock(&dasd_global_profile.lock); 725 726 spin_lock(&block->profile.lock); 727 if (block->profile.data) { 728 block->profile.data->dasd_io_nr_req[counter]++; 729 if (rq_data_dir(req) == READ) 730 block->profile.data->dasd_read_nr_req[counter]++; 731 } 732 spin_unlock(&block->profile.lock); 733 734 /* 735 * We count the request for the start device, even though it may run on 736 * some other device due to error recovery. This way we make sure that 737 * we count each request only once. 738 */ 739 device = cqr->startdev; 740 if (device->profile.data) { 741 counter = 1; /* request is not yet queued on the start device */ 742 list_for_each(l, &device->ccw_queue) 743 if (++counter >= 31) 744 break; 745 } 746 spin_lock(&device->profile.lock); 747 if (device->profile.data) { 748 device->profile.data->dasd_io_nr_req[counter]++; 749 if (rq_data_dir(req) == READ) 750 device->profile.data->dasd_read_nr_req[counter]++; 751 } 752 spin_unlock(&device->profile.lock); 753 } 754 755 /* 756 * Add profiling information for cqr after execution. 757 */ 758 759 #define dasd_profile_counter(value, index) \ 760 { \ 761 for (index = 0; index < 31 && value >> (2+index); index++) \ 762 ; \ 763 } 764 765 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 766 int is_alias, 767 int is_tpm, 768 int is_read, 769 long sectors, 770 int sectors_ind, 771 int tottime_ind, 772 int tottimeps_ind, 773 int strtime_ind, 774 int irqtime_ind, 775 int irqtimeps_ind, 776 int endtime_ind) 777 { 778 /* in case of an overflow, reset the whole profile */ 779 if (data->dasd_io_reqs == UINT_MAX) { 780 memset(data, 0, sizeof(*data)); 781 ktime_get_real_ts64(&data->starttod); 782 } 783 data->dasd_io_reqs++; 784 data->dasd_io_sects += sectors; 785 if (is_alias) 786 data->dasd_io_alias++; 787 if (is_tpm) 788 data->dasd_io_tpm++; 789 790 data->dasd_io_secs[sectors_ind]++; 791 data->dasd_io_times[tottime_ind]++; 792 data->dasd_io_timps[tottimeps_ind]++; 793 data->dasd_io_time1[strtime_ind]++; 794 data->dasd_io_time2[irqtime_ind]++; 795 data->dasd_io_time2ps[irqtimeps_ind]++; 796 data->dasd_io_time3[endtime_ind]++; 797 798 if (is_read) { 799 data->dasd_read_reqs++; 800 data->dasd_read_sects += sectors; 801 if (is_alias) 802 data->dasd_read_alias++; 803 if (is_tpm) 804 data->dasd_read_tpm++; 805 data->dasd_read_secs[sectors_ind]++; 806 data->dasd_read_times[tottime_ind]++; 807 data->dasd_read_time1[strtime_ind]++; 808 data->dasd_read_time2[irqtime_ind]++; 809 data->dasd_read_time3[endtime_ind]++; 810 } 811 } 812 813 static void dasd_profile_end(struct dasd_block *block, 814 struct dasd_ccw_req *cqr, 815 struct request *req) 816 { 817 unsigned long strtime, irqtime, endtime, tottime; 818 unsigned long tottimeps, sectors; 819 struct dasd_device *device; 820 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 821 int irqtime_ind, irqtimeps_ind, endtime_ind; 822 struct dasd_profile_info *data; 823 824 device = cqr->startdev; 825 if (!(dasd_global_profile_level || 826 block->profile.data || 827 device->profile.data)) 828 return; 829 830 sectors = blk_rq_sectors(req); 831 if (!cqr->buildclk || !cqr->startclk || 832 !cqr->stopclk || !cqr->endclk || 833 !sectors) 834 return; 835 836 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 837 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 838 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 839 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 840 tottimeps = tottime / sectors; 841 842 dasd_profile_counter(sectors, sectors_ind); 843 dasd_profile_counter(tottime, tottime_ind); 844 dasd_profile_counter(tottimeps, tottimeps_ind); 845 dasd_profile_counter(strtime, strtime_ind); 846 dasd_profile_counter(irqtime, irqtime_ind); 847 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 848 dasd_profile_counter(endtime, endtime_ind); 849 850 spin_lock(&dasd_global_profile.lock); 851 if (dasd_global_profile.data) { 852 data = dasd_global_profile.data; 853 data->dasd_sum_times += tottime; 854 data->dasd_sum_time_str += strtime; 855 data->dasd_sum_time_irq += irqtime; 856 data->dasd_sum_time_end += endtime; 857 dasd_profile_end_add_data(dasd_global_profile.data, 858 cqr->startdev != block->base, 859 cqr->cpmode == 1, 860 rq_data_dir(req) == READ, 861 sectors, sectors_ind, tottime_ind, 862 tottimeps_ind, strtime_ind, 863 irqtime_ind, irqtimeps_ind, 864 endtime_ind); 865 } 866 spin_unlock(&dasd_global_profile.lock); 867 868 spin_lock(&block->profile.lock); 869 if (block->profile.data) { 870 data = block->profile.data; 871 data->dasd_sum_times += tottime; 872 data->dasd_sum_time_str += strtime; 873 data->dasd_sum_time_irq += irqtime; 874 data->dasd_sum_time_end += endtime; 875 dasd_profile_end_add_data(block->profile.data, 876 cqr->startdev != block->base, 877 cqr->cpmode == 1, 878 rq_data_dir(req) == READ, 879 sectors, sectors_ind, tottime_ind, 880 tottimeps_ind, strtime_ind, 881 irqtime_ind, irqtimeps_ind, 882 endtime_ind); 883 } 884 spin_unlock(&block->profile.lock); 885 886 spin_lock(&device->profile.lock); 887 if (device->profile.data) { 888 data = device->profile.data; 889 data->dasd_sum_times += tottime; 890 data->dasd_sum_time_str += strtime; 891 data->dasd_sum_time_irq += irqtime; 892 data->dasd_sum_time_end += endtime; 893 dasd_profile_end_add_data(device->profile.data, 894 cqr->startdev != block->base, 895 cqr->cpmode == 1, 896 rq_data_dir(req) == READ, 897 sectors, sectors_ind, tottime_ind, 898 tottimeps_ind, strtime_ind, 899 irqtime_ind, irqtimeps_ind, 900 endtime_ind); 901 } 902 spin_unlock(&device->profile.lock); 903 } 904 905 void dasd_profile_reset(struct dasd_profile *profile) 906 { 907 struct dasd_profile_info *data; 908 909 spin_lock_bh(&profile->lock); 910 data = profile->data; 911 if (!data) { 912 spin_unlock_bh(&profile->lock); 913 return; 914 } 915 memset(data, 0, sizeof(*data)); 916 ktime_get_real_ts64(&data->starttod); 917 spin_unlock_bh(&profile->lock); 918 } 919 920 int dasd_profile_on(struct dasd_profile *profile) 921 { 922 struct dasd_profile_info *data; 923 924 data = kzalloc(sizeof(*data), GFP_KERNEL); 925 if (!data) 926 return -ENOMEM; 927 spin_lock_bh(&profile->lock); 928 if (profile->data) { 929 spin_unlock_bh(&profile->lock); 930 kfree(data); 931 return 0; 932 } 933 ktime_get_real_ts64(&data->starttod); 934 profile->data = data; 935 spin_unlock_bh(&profile->lock); 936 return 0; 937 } 938 939 void dasd_profile_off(struct dasd_profile *profile) 940 { 941 spin_lock_bh(&profile->lock); 942 kfree(profile->data); 943 profile->data = NULL; 944 spin_unlock_bh(&profile->lock); 945 } 946 947 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 948 { 949 char *buffer; 950 951 buffer = vmalloc(user_len + 1); 952 if (buffer == NULL) 953 return ERR_PTR(-ENOMEM); 954 if (copy_from_user(buffer, user_buf, user_len) != 0) { 955 vfree(buffer); 956 return ERR_PTR(-EFAULT); 957 } 958 /* got the string, now strip linefeed. */ 959 if (buffer[user_len - 1] == '\n') 960 buffer[user_len - 1] = 0; 961 else 962 buffer[user_len] = 0; 963 return buffer; 964 } 965 966 static ssize_t dasd_stats_write(struct file *file, 967 const char __user *user_buf, 968 size_t user_len, loff_t *pos) 969 { 970 char *buffer, *str; 971 int rc; 972 struct seq_file *m = (struct seq_file *)file->private_data; 973 struct dasd_profile *prof = m->private; 974 975 if (user_len > 65536) 976 user_len = 65536; 977 buffer = dasd_get_user_string(user_buf, user_len); 978 if (IS_ERR(buffer)) 979 return PTR_ERR(buffer); 980 981 str = skip_spaces(buffer); 982 rc = user_len; 983 if (strncmp(str, "reset", 5) == 0) { 984 dasd_profile_reset(prof); 985 } else if (strncmp(str, "on", 2) == 0) { 986 rc = dasd_profile_on(prof); 987 if (rc) 988 goto out; 989 rc = user_len; 990 if (prof == &dasd_global_profile) { 991 dasd_profile_reset(prof); 992 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 993 } 994 } else if (strncmp(str, "off", 3) == 0) { 995 if (prof == &dasd_global_profile) 996 dasd_global_profile_level = DASD_PROFILE_OFF; 997 dasd_profile_off(prof); 998 } else 999 rc = -EINVAL; 1000 out: 1001 vfree(buffer); 1002 return rc; 1003 } 1004 1005 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 1006 { 1007 int i; 1008 1009 for (i = 0; i < 32; i++) 1010 seq_printf(m, "%u ", array[i]); 1011 seq_putc(m, '\n'); 1012 } 1013 1014 static void dasd_stats_seq_print(struct seq_file *m, 1015 struct dasd_profile_info *data) 1016 { 1017 seq_printf(m, "start_time %lld.%09ld\n", 1018 (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 1019 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 1020 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 1021 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 1022 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 1023 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 1024 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 1025 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 1026 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 1027 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 1028 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 1029 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 1030 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 1031 seq_puts(m, "histogram_sectors "); 1032 dasd_stats_array(m, data->dasd_io_secs); 1033 seq_puts(m, "histogram_io_times "); 1034 dasd_stats_array(m, data->dasd_io_times); 1035 seq_puts(m, "histogram_io_times_weighted "); 1036 dasd_stats_array(m, data->dasd_io_timps); 1037 seq_puts(m, "histogram_time_build_to_ssch "); 1038 dasd_stats_array(m, data->dasd_io_time1); 1039 seq_puts(m, "histogram_time_ssch_to_irq "); 1040 dasd_stats_array(m, data->dasd_io_time2); 1041 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1042 dasd_stats_array(m, data->dasd_io_time2ps); 1043 seq_puts(m, "histogram_time_irq_to_end "); 1044 dasd_stats_array(m, data->dasd_io_time3); 1045 seq_puts(m, "histogram_ccw_queue_length "); 1046 dasd_stats_array(m, data->dasd_io_nr_req); 1047 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1048 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1049 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1050 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1051 seq_puts(m, "histogram_read_sectors "); 1052 dasd_stats_array(m, data->dasd_read_secs); 1053 seq_puts(m, "histogram_read_times "); 1054 dasd_stats_array(m, data->dasd_read_times); 1055 seq_puts(m, "histogram_read_time_build_to_ssch "); 1056 dasd_stats_array(m, data->dasd_read_time1); 1057 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1058 dasd_stats_array(m, data->dasd_read_time2); 1059 seq_puts(m, "histogram_read_time_irq_to_end "); 1060 dasd_stats_array(m, data->dasd_read_time3); 1061 seq_puts(m, "histogram_read_ccw_queue_length "); 1062 dasd_stats_array(m, data->dasd_read_nr_req); 1063 } 1064 1065 static int dasd_stats_show(struct seq_file *m, void *v) 1066 { 1067 struct dasd_profile *profile; 1068 struct dasd_profile_info *data; 1069 1070 profile = m->private; 1071 spin_lock_bh(&profile->lock); 1072 data = profile->data; 1073 if (!data) { 1074 spin_unlock_bh(&profile->lock); 1075 seq_puts(m, "disabled\n"); 1076 return 0; 1077 } 1078 dasd_stats_seq_print(m, data); 1079 spin_unlock_bh(&profile->lock); 1080 return 0; 1081 } 1082 1083 static int dasd_stats_open(struct inode *inode, struct file *file) 1084 { 1085 struct dasd_profile *profile = inode->i_private; 1086 return single_open(file, dasd_stats_show, profile); 1087 } 1088 1089 static const struct file_operations dasd_stats_raw_fops = { 1090 .owner = THIS_MODULE, 1091 .open = dasd_stats_open, 1092 .read = seq_read, 1093 .llseek = seq_lseek, 1094 .release = single_release, 1095 .write = dasd_stats_write, 1096 }; 1097 1098 static void dasd_profile_init(struct dasd_profile *profile, 1099 struct dentry *base_dentry) 1100 { 1101 umode_t mode; 1102 struct dentry *pde; 1103 1104 if (!base_dentry) 1105 return; 1106 profile->dentry = NULL; 1107 profile->data = NULL; 1108 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1109 pde = debugfs_create_file("statistics", mode, base_dentry, 1110 profile, &dasd_stats_raw_fops); 1111 if (pde && !IS_ERR(pde)) 1112 profile->dentry = pde; 1113 return; 1114 } 1115 1116 static void dasd_profile_exit(struct dasd_profile *profile) 1117 { 1118 dasd_profile_off(profile); 1119 debugfs_remove(profile->dentry); 1120 profile->dentry = NULL; 1121 } 1122 1123 static void dasd_statistics_removeroot(void) 1124 { 1125 dasd_global_profile_level = DASD_PROFILE_OFF; 1126 dasd_profile_exit(&dasd_global_profile); 1127 debugfs_remove(dasd_debugfs_global_entry); 1128 debugfs_remove(dasd_debugfs_root_entry); 1129 } 1130 1131 static void dasd_statistics_createroot(void) 1132 { 1133 struct dentry *pde; 1134 1135 dasd_debugfs_root_entry = NULL; 1136 pde = debugfs_create_dir("dasd", NULL); 1137 if (!pde || IS_ERR(pde)) 1138 goto error; 1139 dasd_debugfs_root_entry = pde; 1140 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1141 if (!pde || IS_ERR(pde)) 1142 goto error; 1143 dasd_debugfs_global_entry = pde; 1144 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1145 return; 1146 1147 error: 1148 DBF_EVENT(DBF_ERR, "%s", 1149 "Creation of the dasd debugfs interface failed"); 1150 dasd_statistics_removeroot(); 1151 return; 1152 } 1153 1154 #else 1155 #define dasd_profile_start(block, cqr, req) do {} while (0) 1156 #define dasd_profile_end(block, cqr, req) do {} while (0) 1157 1158 static void dasd_statistics_createroot(void) 1159 { 1160 return; 1161 } 1162 1163 static void dasd_statistics_removeroot(void) 1164 { 1165 return; 1166 } 1167 1168 int dasd_stats_generic_show(struct seq_file *m, void *v) 1169 { 1170 seq_puts(m, "Statistics are not activated in this kernel\n"); 1171 return 0; 1172 } 1173 1174 static void dasd_profile_init(struct dasd_profile *profile, 1175 struct dentry *base_dentry) 1176 { 1177 return; 1178 } 1179 1180 static void dasd_profile_exit(struct dasd_profile *profile) 1181 { 1182 return; 1183 } 1184 1185 int dasd_profile_on(struct dasd_profile *profile) 1186 { 1187 return 0; 1188 } 1189 1190 #endif /* CONFIG_DASD_PROFILE */ 1191 1192 static int dasd_hosts_show(struct seq_file *m, void *v) 1193 { 1194 struct dasd_device *device; 1195 int rc = -EOPNOTSUPP; 1196 1197 device = m->private; 1198 dasd_get_device(device); 1199 1200 if (device->discipline->hosts_print) 1201 rc = device->discipline->hosts_print(device, m); 1202 1203 dasd_put_device(device); 1204 return rc; 1205 } 1206 1207 DEFINE_SHOW_ATTRIBUTE(dasd_hosts); 1208 1209 static void dasd_hosts_exit(struct dasd_device *device) 1210 { 1211 debugfs_remove(device->hosts_dentry); 1212 device->hosts_dentry = NULL; 1213 } 1214 1215 static void dasd_hosts_init(struct dentry *base_dentry, 1216 struct dasd_device *device) 1217 { 1218 struct dentry *pde; 1219 umode_t mode; 1220 1221 if (!base_dentry) 1222 return; 1223 1224 mode = S_IRUSR | S_IFREG; 1225 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1226 device, &dasd_hosts_fops); 1227 if (pde && !IS_ERR(pde)) 1228 device->hosts_dentry = pde; 1229 } 1230 1231 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, 1232 struct dasd_device *device, 1233 struct dasd_ccw_req *cqr) 1234 { 1235 unsigned long flags; 1236 char *data, *chunk; 1237 int size = 0; 1238 1239 if (cplength > 0) 1240 size += cplength * sizeof(struct ccw1); 1241 if (datasize > 0) 1242 size += datasize; 1243 if (!cqr) 1244 size += (sizeof(*cqr) + 7L) & -8L; 1245 1246 spin_lock_irqsave(&device->mem_lock, flags); 1247 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); 1248 spin_unlock_irqrestore(&device->mem_lock, flags); 1249 if (!chunk) 1250 return ERR_PTR(-ENOMEM); 1251 if (!cqr) { 1252 cqr = (void *) data; 1253 data += (sizeof(*cqr) + 7L) & -8L; 1254 } 1255 memset(cqr, 0, sizeof(*cqr)); 1256 cqr->mem_chunk = chunk; 1257 if (cplength > 0) { 1258 cqr->cpaddr = data; 1259 data += cplength * sizeof(struct ccw1); 1260 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1261 } 1262 if (datasize > 0) { 1263 cqr->data = data; 1264 memset(cqr->data, 0, datasize); 1265 } 1266 cqr->magic = magic; 1267 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1268 dasd_get_device(device); 1269 return cqr; 1270 } 1271 EXPORT_SYMBOL(dasd_smalloc_request); 1272 1273 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1274 int datasize, 1275 struct dasd_device *device) 1276 { 1277 struct dasd_ccw_req *cqr; 1278 unsigned long flags; 1279 int size, cqr_size; 1280 char *data; 1281 1282 cqr_size = (sizeof(*cqr) + 7L) & -8L; 1283 size = cqr_size; 1284 if (cplength > 0) 1285 size += cplength * sizeof(struct ccw1); 1286 if (datasize > 0) 1287 size += datasize; 1288 1289 spin_lock_irqsave(&device->mem_lock, flags); 1290 cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1291 spin_unlock_irqrestore(&device->mem_lock, flags); 1292 if (!cqr) 1293 return ERR_PTR(-ENOMEM); 1294 memset(cqr, 0, sizeof(*cqr)); 1295 data = (char *)cqr + cqr_size; 1296 cqr->cpaddr = NULL; 1297 if (cplength > 0) { 1298 cqr->cpaddr = data; 1299 data += cplength * sizeof(struct ccw1); 1300 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1301 } 1302 cqr->data = NULL; 1303 if (datasize > 0) { 1304 cqr->data = data; 1305 memset(cqr->data, 0, datasize); 1306 } 1307 1308 cqr->magic = magic; 1309 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1310 dasd_get_device(device); 1311 1312 return cqr; 1313 } 1314 EXPORT_SYMBOL(dasd_fmalloc_request); 1315 1316 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1317 { 1318 unsigned long flags; 1319 1320 spin_lock_irqsave(&device->mem_lock, flags); 1321 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); 1322 spin_unlock_irqrestore(&device->mem_lock, flags); 1323 dasd_put_device(device); 1324 } 1325 EXPORT_SYMBOL(dasd_sfree_request); 1326 1327 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1328 { 1329 unsigned long flags; 1330 1331 spin_lock_irqsave(&device->mem_lock, flags); 1332 dasd_free_chunk(&device->ese_chunks, cqr); 1333 spin_unlock_irqrestore(&device->mem_lock, flags); 1334 dasd_put_device(device); 1335 } 1336 EXPORT_SYMBOL(dasd_ffree_request); 1337 1338 /* 1339 * Check discipline magic in cqr. 1340 */ 1341 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1342 { 1343 struct dasd_device *device; 1344 1345 if (cqr == NULL) 1346 return -EINVAL; 1347 device = cqr->startdev; 1348 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1349 DBF_DEV_EVENT(DBF_WARNING, device, 1350 " dasd_ccw_req 0x%08x magic doesn't match" 1351 " discipline 0x%08x", 1352 cqr->magic, 1353 *(unsigned int *) device->discipline->name); 1354 return -EINVAL; 1355 } 1356 return 0; 1357 } 1358 1359 /* 1360 * Terminate the current i/o and set the request to clear_pending. 1361 * Timer keeps device runnig. 1362 * ccw_device_clear can fail if the i/o subsystem 1363 * is in a bad mood. 1364 */ 1365 int dasd_term_IO(struct dasd_ccw_req *cqr) 1366 { 1367 struct dasd_device *device; 1368 int retries, rc; 1369 char errorstring[ERRORLENGTH]; 1370 1371 /* Check the cqr */ 1372 rc = dasd_check_cqr(cqr); 1373 if (rc) 1374 return rc; 1375 retries = 0; 1376 device = (struct dasd_device *) cqr->startdev; 1377 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1378 rc = ccw_device_clear(device->cdev, (long) cqr); 1379 switch (rc) { 1380 case 0: /* termination successful */ 1381 cqr->status = DASD_CQR_CLEAR_PENDING; 1382 cqr->stopclk = get_tod_clock(); 1383 cqr->starttime = 0; 1384 DBF_DEV_EVENT(DBF_DEBUG, device, 1385 "terminate cqr %p successful", 1386 cqr); 1387 break; 1388 case -ENODEV: 1389 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1390 "device gone, retry"); 1391 break; 1392 case -EINVAL: 1393 /* 1394 * device not valid so no I/O could be running 1395 * handle CQR as termination successful 1396 */ 1397 cqr->status = DASD_CQR_CLEARED; 1398 cqr->stopclk = get_tod_clock(); 1399 cqr->starttime = 0; 1400 /* no retries for invalid devices */ 1401 cqr->retries = -1; 1402 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1403 "EINVAL, handle as terminated"); 1404 /* fake rc to success */ 1405 rc = 0; 1406 break; 1407 default: 1408 /* internal error 10 - unknown rc*/ 1409 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1410 dev_err(&device->cdev->dev, "An error occurred in the " 1411 "DASD device driver, reason=%s\n", errorstring); 1412 BUG(); 1413 break; 1414 } 1415 retries++; 1416 } 1417 dasd_schedule_device_bh(device); 1418 return rc; 1419 } 1420 EXPORT_SYMBOL(dasd_term_IO); 1421 1422 /* 1423 * Start the i/o. This start_IO can fail if the channel is really busy. 1424 * In that case set up a timer to start the request later. 1425 */ 1426 int dasd_start_IO(struct dasd_ccw_req *cqr) 1427 { 1428 struct dasd_device *device; 1429 int rc; 1430 char errorstring[ERRORLENGTH]; 1431 1432 /* Check the cqr */ 1433 rc = dasd_check_cqr(cqr); 1434 if (rc) { 1435 cqr->intrc = rc; 1436 return rc; 1437 } 1438 device = (struct dasd_device *) cqr->startdev; 1439 if (((cqr->block && 1440 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1441 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1442 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1443 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1444 "because of stolen lock", cqr); 1445 cqr->status = DASD_CQR_ERROR; 1446 cqr->intrc = -EPERM; 1447 return -EPERM; 1448 } 1449 if (cqr->retries < 0) { 1450 /* internal error 14 - start_IO run out of retries */ 1451 sprintf(errorstring, "14 %p", cqr); 1452 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1453 "device driver, reason=%s\n", errorstring); 1454 cqr->status = DASD_CQR_ERROR; 1455 return -EIO; 1456 } 1457 cqr->startclk = get_tod_clock(); 1458 cqr->starttime = jiffies; 1459 cqr->retries--; 1460 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1461 cqr->lpm &= dasd_path_get_opm(device); 1462 if (!cqr->lpm) 1463 cqr->lpm = dasd_path_get_opm(device); 1464 } 1465 if (cqr->cpmode == 1) { 1466 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1467 (long) cqr, cqr->lpm); 1468 } else { 1469 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1470 (long) cqr, cqr->lpm, 0); 1471 } 1472 switch (rc) { 1473 case 0: 1474 cqr->status = DASD_CQR_IN_IO; 1475 break; 1476 case -EBUSY: 1477 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1478 "start_IO: device busy, retry later"); 1479 break; 1480 case -EACCES: 1481 /* -EACCES indicates that the request used only a subset of the 1482 * available paths and all these paths are gone. If the lpm of 1483 * this request was only a subset of the opm (e.g. the ppm) then 1484 * we just do a retry with all available paths. 1485 * If we already use the full opm, something is amiss, and we 1486 * need a full path verification. 1487 */ 1488 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1489 DBF_DEV_EVENT(DBF_WARNING, device, 1490 "start_IO: selected paths gone (%x)", 1491 cqr->lpm); 1492 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1493 cqr->lpm = dasd_path_get_opm(device); 1494 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1495 "start_IO: selected paths gone," 1496 " retry on all paths"); 1497 } else { 1498 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1499 "start_IO: all paths in opm gone," 1500 " do path verification"); 1501 dasd_generic_last_path_gone(device); 1502 dasd_path_no_path(device); 1503 dasd_path_set_tbvpm(device, 1504 ccw_device_get_path_mask( 1505 device->cdev)); 1506 } 1507 break; 1508 case -ENODEV: 1509 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1510 "start_IO: -ENODEV device gone, retry"); 1511 break; 1512 case -EIO: 1513 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1514 "start_IO: -EIO device gone, retry"); 1515 break; 1516 case -EINVAL: 1517 /* most likely caused in power management context */ 1518 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1519 "start_IO: -EINVAL device currently " 1520 "not accessible"); 1521 break; 1522 default: 1523 /* internal error 11 - unknown rc */ 1524 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1525 dev_err(&device->cdev->dev, 1526 "An error occurred in the DASD device driver, " 1527 "reason=%s\n", errorstring); 1528 BUG(); 1529 break; 1530 } 1531 cqr->intrc = rc; 1532 return rc; 1533 } 1534 EXPORT_SYMBOL(dasd_start_IO); 1535 1536 /* 1537 * Timeout function for dasd devices. This is used for different purposes 1538 * 1) missing interrupt handler for normal operation 1539 * 2) delayed start of request where start_IO failed with -EBUSY 1540 * 3) timeout for missing state change interrupts 1541 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1542 * DASD_CQR_QUEUED for 2) and 3). 1543 */ 1544 static void dasd_device_timeout(struct timer_list *t) 1545 { 1546 unsigned long flags; 1547 struct dasd_device *device; 1548 1549 device = from_timer(device, t, timer); 1550 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1551 /* re-activate request queue */ 1552 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1553 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1554 dasd_schedule_device_bh(device); 1555 } 1556 1557 /* 1558 * Setup timeout for a device in jiffies. 1559 */ 1560 void dasd_device_set_timer(struct dasd_device *device, int expires) 1561 { 1562 if (expires == 0) 1563 del_timer(&device->timer); 1564 else 1565 mod_timer(&device->timer, jiffies + expires); 1566 } 1567 EXPORT_SYMBOL(dasd_device_set_timer); 1568 1569 /* 1570 * Clear timeout for a device. 1571 */ 1572 void dasd_device_clear_timer(struct dasd_device *device) 1573 { 1574 del_timer(&device->timer); 1575 } 1576 EXPORT_SYMBOL(dasd_device_clear_timer); 1577 1578 static void dasd_handle_killed_request(struct ccw_device *cdev, 1579 unsigned long intparm) 1580 { 1581 struct dasd_ccw_req *cqr; 1582 struct dasd_device *device; 1583 1584 if (!intparm) 1585 return; 1586 cqr = (struct dasd_ccw_req *) intparm; 1587 if (cqr->status != DASD_CQR_IN_IO) { 1588 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1589 "invalid status in handle_killed_request: " 1590 "%02x", cqr->status); 1591 return; 1592 } 1593 1594 device = dasd_device_from_cdev_locked(cdev); 1595 if (IS_ERR(device)) { 1596 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1597 "unable to get device from cdev"); 1598 return; 1599 } 1600 1601 if (!cqr->startdev || 1602 device != cqr->startdev || 1603 strncmp(cqr->startdev->discipline->ebcname, 1604 (char *) &cqr->magic, 4)) { 1605 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1606 "invalid device in request"); 1607 dasd_put_device(device); 1608 return; 1609 } 1610 1611 /* Schedule request to be retried. */ 1612 cqr->status = DASD_CQR_QUEUED; 1613 1614 dasd_device_clear_timer(device); 1615 dasd_schedule_device_bh(device); 1616 dasd_put_device(device); 1617 } 1618 1619 void dasd_generic_handle_state_change(struct dasd_device *device) 1620 { 1621 /* First of all start sense subsystem status request. */ 1622 dasd_eer_snss(device); 1623 1624 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1625 dasd_schedule_device_bh(device); 1626 if (device->block) { 1627 dasd_schedule_block_bh(device->block); 1628 if (device->block->request_queue) 1629 blk_mq_run_hw_queues(device->block->request_queue, 1630 true); 1631 } 1632 } 1633 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1634 1635 static int dasd_check_hpf_error(struct irb *irb) 1636 { 1637 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1638 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1639 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1640 } 1641 1642 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1643 { 1644 struct dasd_device *device = NULL; 1645 u8 *sense = NULL; 1646 1647 if (!block) 1648 return 0; 1649 device = block->base; 1650 if (!device || !device->discipline->is_ese) 1651 return 0; 1652 if (!device->discipline->is_ese(device)) 1653 return 0; 1654 1655 sense = dasd_get_sense(irb); 1656 if (!sense) 1657 return 0; 1658 1659 return !!(sense[1] & SNS1_NO_REC_FOUND) || 1660 !!(sense[1] & SNS1_FILE_PROTECTED) || 1661 scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; 1662 } 1663 1664 static int dasd_ese_oos_cond(u8 *sense) 1665 { 1666 return sense[0] & SNS0_EQUIPMENT_CHECK && 1667 sense[1] & SNS1_PERM_ERR && 1668 sense[1] & SNS1_WRITE_INHIBITED && 1669 sense[25] == 0x01; 1670 } 1671 1672 /* 1673 * Interrupt handler for "normal" ssch-io based dasd devices. 1674 */ 1675 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1676 struct irb *irb) 1677 { 1678 struct dasd_ccw_req *cqr, *next, *fcqr; 1679 struct dasd_device *device; 1680 unsigned long now; 1681 int nrf_suppressed = 0; 1682 int fp_suppressed = 0; 1683 u8 *sense = NULL; 1684 int expires; 1685 1686 cqr = (struct dasd_ccw_req *) intparm; 1687 if (IS_ERR(irb)) { 1688 switch (PTR_ERR(irb)) { 1689 case -EIO: 1690 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1691 device = cqr->startdev; 1692 cqr->status = DASD_CQR_CLEARED; 1693 dasd_device_clear_timer(device); 1694 wake_up(&dasd_flush_wq); 1695 dasd_schedule_device_bh(device); 1696 return; 1697 } 1698 break; 1699 case -ETIMEDOUT: 1700 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1701 "request timed out\n", __func__); 1702 break; 1703 default: 1704 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1705 "unknown error %ld\n", __func__, 1706 PTR_ERR(irb)); 1707 } 1708 dasd_handle_killed_request(cdev, intparm); 1709 return; 1710 } 1711 1712 now = get_tod_clock(); 1713 /* check for conditions that should be handled immediately */ 1714 if (!cqr || 1715 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1716 scsw_cstat(&irb->scsw) == 0)) { 1717 if (cqr) 1718 memcpy(&cqr->irb, irb, sizeof(*irb)); 1719 device = dasd_device_from_cdev_locked(cdev); 1720 if (IS_ERR(device)) 1721 return; 1722 /* ignore unsolicited interrupts for DIAG discipline */ 1723 if (device->discipline == dasd_diag_discipline_pointer) { 1724 dasd_put_device(device); 1725 return; 1726 } 1727 1728 /* 1729 * In some cases 'File Protected' or 'No Record Found' errors 1730 * might be expected and debug log messages for the 1731 * corresponding interrupts shouldn't be written then. 1732 * Check if either of the according suppress bits is set. 1733 */ 1734 sense = dasd_get_sense(irb); 1735 if (sense) { 1736 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1737 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1738 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1739 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1740 1741 /* 1742 * Extent pool probably out-of-space. 1743 * Stop device and check exhaust level. 1744 */ 1745 if (dasd_ese_oos_cond(sense)) { 1746 dasd_generic_space_exhaust(device, cqr); 1747 device->discipline->ext_pool_exhaust(device, cqr); 1748 dasd_put_device(device); 1749 return; 1750 } 1751 } 1752 if (!(fp_suppressed || nrf_suppressed)) 1753 device->discipline->dump_sense_dbf(device, irb, "int"); 1754 1755 if (device->features & DASD_FEATURE_ERPLOG) 1756 device->discipline->dump_sense(device, cqr, irb); 1757 device->discipline->check_for_device_change(device, cqr, irb); 1758 dasd_put_device(device); 1759 } 1760 1761 /* check for for attention message */ 1762 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1763 device = dasd_device_from_cdev_locked(cdev); 1764 if (!IS_ERR(device)) { 1765 device->discipline->check_attention(device, 1766 irb->esw.esw1.lpum); 1767 dasd_put_device(device); 1768 } 1769 } 1770 1771 if (!cqr) 1772 return; 1773 1774 device = (struct dasd_device *) cqr->startdev; 1775 if (!device || 1776 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1777 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1778 "invalid device in request"); 1779 return; 1780 } 1781 1782 if (dasd_ese_needs_format(cqr->block, irb)) { 1783 if (rq_data_dir((struct request *)cqr->callback_data) == READ) { 1784 device->discipline->ese_read(cqr, irb); 1785 cqr->status = DASD_CQR_SUCCESS; 1786 cqr->stopclk = now; 1787 dasd_device_clear_timer(device); 1788 dasd_schedule_device_bh(device); 1789 return; 1790 } 1791 fcqr = device->discipline->ese_format(device, cqr, irb); 1792 if (IS_ERR(fcqr)) { 1793 if (PTR_ERR(fcqr) == -EINVAL) { 1794 cqr->status = DASD_CQR_ERROR; 1795 return; 1796 } 1797 /* 1798 * If we can't format now, let the request go 1799 * one extra round. Maybe we can format later. 1800 */ 1801 cqr->status = DASD_CQR_QUEUED; 1802 dasd_schedule_device_bh(device); 1803 return; 1804 } else { 1805 fcqr->status = DASD_CQR_QUEUED; 1806 cqr->status = DASD_CQR_QUEUED; 1807 list_add(&fcqr->devlist, &device->ccw_queue); 1808 dasd_schedule_device_bh(device); 1809 return; 1810 } 1811 } 1812 1813 /* Check for clear pending */ 1814 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1815 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1816 cqr->status = DASD_CQR_CLEARED; 1817 dasd_device_clear_timer(device); 1818 wake_up(&dasd_flush_wq); 1819 dasd_schedule_device_bh(device); 1820 return; 1821 } 1822 1823 /* check status - the request might have been killed by dyn detach */ 1824 if (cqr->status != DASD_CQR_IN_IO) { 1825 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1826 "status %02x", dev_name(&cdev->dev), cqr->status); 1827 return; 1828 } 1829 1830 next = NULL; 1831 expires = 0; 1832 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1833 scsw_cstat(&irb->scsw) == 0) { 1834 /* request was completed successfully */ 1835 cqr->status = DASD_CQR_SUCCESS; 1836 cqr->stopclk = now; 1837 /* Start first request on queue if possible -> fast_io. */ 1838 if (cqr->devlist.next != &device->ccw_queue) { 1839 next = list_entry(cqr->devlist.next, 1840 struct dasd_ccw_req, devlist); 1841 } 1842 } else { /* error */ 1843 /* check for HPF error 1844 * call discipline function to requeue all requests 1845 * and disable HPF accordingly 1846 */ 1847 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1848 device->discipline->handle_hpf_error) 1849 device->discipline->handle_hpf_error(device, irb); 1850 /* 1851 * If we don't want complex ERP for this request, then just 1852 * reset this and retry it in the fastpath 1853 */ 1854 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1855 cqr->retries > 0) { 1856 if (cqr->lpm == dasd_path_get_opm(device)) 1857 DBF_DEV_EVENT(DBF_DEBUG, device, 1858 "default ERP in fastpath " 1859 "(%i retries left)", 1860 cqr->retries); 1861 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1862 cqr->lpm = dasd_path_get_opm(device); 1863 cqr->status = DASD_CQR_QUEUED; 1864 next = cqr; 1865 } else 1866 cqr->status = DASD_CQR_ERROR; 1867 } 1868 if (next && (next->status == DASD_CQR_QUEUED) && 1869 (!device->stopped)) { 1870 if (device->discipline->start_IO(next) == 0) 1871 expires = next->expires; 1872 } 1873 if (expires != 0) 1874 dasd_device_set_timer(device, expires); 1875 else 1876 dasd_device_clear_timer(device); 1877 dasd_schedule_device_bh(device); 1878 } 1879 EXPORT_SYMBOL(dasd_int_handler); 1880 1881 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1882 { 1883 struct dasd_device *device; 1884 1885 device = dasd_device_from_cdev_locked(cdev); 1886 1887 if (IS_ERR(device)) 1888 goto out; 1889 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1890 device->state != device->target || 1891 !device->discipline->check_for_device_change){ 1892 dasd_put_device(device); 1893 goto out; 1894 } 1895 if (device->discipline->dump_sense_dbf) 1896 device->discipline->dump_sense_dbf(device, irb, "uc"); 1897 device->discipline->check_for_device_change(device, NULL, irb); 1898 dasd_put_device(device); 1899 out: 1900 return UC_TODO_RETRY; 1901 } 1902 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1903 1904 /* 1905 * If we have an error on a dasd_block layer request then we cancel 1906 * and return all further requests from the same dasd_block as well. 1907 */ 1908 static void __dasd_device_recovery(struct dasd_device *device, 1909 struct dasd_ccw_req *ref_cqr) 1910 { 1911 struct list_head *l, *n; 1912 struct dasd_ccw_req *cqr; 1913 1914 /* 1915 * only requeue request that came from the dasd_block layer 1916 */ 1917 if (!ref_cqr->block) 1918 return; 1919 1920 list_for_each_safe(l, n, &device->ccw_queue) { 1921 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1922 if (cqr->status == DASD_CQR_QUEUED && 1923 ref_cqr->block == cqr->block) { 1924 cqr->status = DASD_CQR_CLEARED; 1925 } 1926 } 1927 }; 1928 1929 /* 1930 * Remove those ccw requests from the queue that need to be returned 1931 * to the upper layer. 1932 */ 1933 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1934 struct list_head *final_queue) 1935 { 1936 struct list_head *l, *n; 1937 struct dasd_ccw_req *cqr; 1938 1939 /* Process request with final status. */ 1940 list_for_each_safe(l, n, &device->ccw_queue) { 1941 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1942 1943 /* Skip any non-final request. */ 1944 if (cqr->status == DASD_CQR_QUEUED || 1945 cqr->status == DASD_CQR_IN_IO || 1946 cqr->status == DASD_CQR_CLEAR_PENDING) 1947 continue; 1948 if (cqr->status == DASD_CQR_ERROR) { 1949 __dasd_device_recovery(device, cqr); 1950 } 1951 /* Rechain finished requests to final queue */ 1952 list_move_tail(&cqr->devlist, final_queue); 1953 } 1954 } 1955 1956 static void __dasd_process_cqr(struct dasd_device *device, 1957 struct dasd_ccw_req *cqr) 1958 { 1959 char errorstring[ERRORLENGTH]; 1960 1961 switch (cqr->status) { 1962 case DASD_CQR_SUCCESS: 1963 cqr->status = DASD_CQR_DONE; 1964 break; 1965 case DASD_CQR_ERROR: 1966 cqr->status = DASD_CQR_NEED_ERP; 1967 break; 1968 case DASD_CQR_CLEARED: 1969 cqr->status = DASD_CQR_TERMINATED; 1970 break; 1971 default: 1972 /* internal error 12 - wrong cqr status*/ 1973 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1974 dev_err(&device->cdev->dev, 1975 "An error occurred in the DASD device driver, " 1976 "reason=%s\n", errorstring); 1977 BUG(); 1978 } 1979 if (cqr->callback) 1980 cqr->callback(cqr, cqr->callback_data); 1981 } 1982 1983 /* 1984 * the cqrs from the final queue are returned to the upper layer 1985 * by setting a dasd_block state and calling the callback function 1986 */ 1987 static void __dasd_device_process_final_queue(struct dasd_device *device, 1988 struct list_head *final_queue) 1989 { 1990 struct list_head *l, *n; 1991 struct dasd_ccw_req *cqr; 1992 struct dasd_block *block; 1993 1994 list_for_each_safe(l, n, final_queue) { 1995 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1996 list_del_init(&cqr->devlist); 1997 block = cqr->block; 1998 if (!block) { 1999 __dasd_process_cqr(device, cqr); 2000 } else { 2001 spin_lock_bh(&block->queue_lock); 2002 __dasd_process_cqr(device, cqr); 2003 spin_unlock_bh(&block->queue_lock); 2004 } 2005 } 2006 } 2007 2008 /* 2009 * Take a look at the first request on the ccw queue and check 2010 * if it reached its expire time. If so, terminate the IO. 2011 */ 2012 static void __dasd_device_check_expire(struct dasd_device *device) 2013 { 2014 struct dasd_ccw_req *cqr; 2015 2016 if (list_empty(&device->ccw_queue)) 2017 return; 2018 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2019 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 2020 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 2021 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2022 /* 2023 * IO in safe offline processing should not 2024 * run out of retries 2025 */ 2026 cqr->retries++; 2027 } 2028 if (device->discipline->term_IO(cqr) != 0) { 2029 /* Hmpf, try again in 5 sec */ 2030 dev_err(&device->cdev->dev, 2031 "cqr %p timed out (%lus) but cannot be " 2032 "ended, retrying in 5 s\n", 2033 cqr, (cqr->expires/HZ)); 2034 cqr->expires += 5*HZ; 2035 dasd_device_set_timer(device, 5*HZ); 2036 } else { 2037 dev_err(&device->cdev->dev, 2038 "cqr %p timed out (%lus), %i retries " 2039 "remaining\n", cqr, (cqr->expires/HZ), 2040 cqr->retries); 2041 } 2042 } 2043 } 2044 2045 /* 2046 * return 1 when device is not eligible for IO 2047 */ 2048 static int __dasd_device_is_unusable(struct dasd_device *device, 2049 struct dasd_ccw_req *cqr) 2050 { 2051 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC); 2052 2053 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2054 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2055 /* 2056 * dasd is being set offline 2057 * but it is no safe offline where we have to allow I/O 2058 */ 2059 return 1; 2060 } 2061 if (device->stopped) { 2062 if (device->stopped & mask) { 2063 /* stopped and CQR will not change that. */ 2064 return 1; 2065 } 2066 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2067 /* CQR is not able to change device to 2068 * operational. */ 2069 return 1; 2070 } 2071 /* CQR required to get device operational. */ 2072 } 2073 return 0; 2074 } 2075 2076 /* 2077 * Take a look at the first request on the ccw queue and check 2078 * if it needs to be started. 2079 */ 2080 static void __dasd_device_start_head(struct dasd_device *device) 2081 { 2082 struct dasd_ccw_req *cqr; 2083 int rc; 2084 2085 if (list_empty(&device->ccw_queue)) 2086 return; 2087 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2088 if (cqr->status != DASD_CQR_QUEUED) 2089 return; 2090 /* if device is not usable return request to upper layer */ 2091 if (__dasd_device_is_unusable(device, cqr)) { 2092 cqr->intrc = -EAGAIN; 2093 cqr->status = DASD_CQR_CLEARED; 2094 dasd_schedule_device_bh(device); 2095 return; 2096 } 2097 2098 rc = device->discipline->start_IO(cqr); 2099 if (rc == 0) 2100 dasd_device_set_timer(device, cqr->expires); 2101 else if (rc == -EACCES) { 2102 dasd_schedule_device_bh(device); 2103 } else 2104 /* Hmpf, try again in 1/2 sec */ 2105 dasd_device_set_timer(device, 50); 2106 } 2107 2108 static void __dasd_device_check_path_events(struct dasd_device *device) 2109 { 2110 int rc; 2111 2112 if (!dasd_path_get_tbvpm(device)) 2113 return; 2114 2115 if (device->stopped & 2116 ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) 2117 return; 2118 rc = device->discipline->verify_path(device, 2119 dasd_path_get_tbvpm(device)); 2120 if (rc) 2121 dasd_device_set_timer(device, 50); 2122 else 2123 dasd_path_clear_all_verify(device); 2124 }; 2125 2126 /* 2127 * Go through all request on the dasd_device request queue, 2128 * terminate them on the cdev if necessary, and return them to the 2129 * submitting layer via callback. 2130 * Note: 2131 * Make sure that all 'submitting layers' still exist when 2132 * this function is called!. In other words, when 'device' is a base 2133 * device then all block layer requests must have been removed before 2134 * via dasd_flush_block_queue. 2135 */ 2136 int dasd_flush_device_queue(struct dasd_device *device) 2137 { 2138 struct dasd_ccw_req *cqr, *n; 2139 int rc; 2140 struct list_head flush_queue; 2141 2142 INIT_LIST_HEAD(&flush_queue); 2143 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2144 rc = 0; 2145 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2146 /* Check status and move request to flush_queue */ 2147 switch (cqr->status) { 2148 case DASD_CQR_IN_IO: 2149 rc = device->discipline->term_IO(cqr); 2150 if (rc) { 2151 /* unable to terminate requeust */ 2152 dev_err(&device->cdev->dev, 2153 "Flushing the DASD request queue " 2154 "failed for request %p\n", cqr); 2155 /* stop flush processing */ 2156 goto finished; 2157 } 2158 break; 2159 case DASD_CQR_QUEUED: 2160 cqr->stopclk = get_tod_clock(); 2161 cqr->status = DASD_CQR_CLEARED; 2162 break; 2163 default: /* no need to modify the others */ 2164 break; 2165 } 2166 list_move_tail(&cqr->devlist, &flush_queue); 2167 } 2168 finished: 2169 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2170 /* 2171 * After this point all requests must be in state CLEAR_PENDING, 2172 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2173 * one of the others. 2174 */ 2175 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2176 wait_event(dasd_flush_wq, 2177 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2178 /* 2179 * Now set each request back to TERMINATED, DONE or NEED_ERP 2180 * and call the callback function of flushed requests 2181 */ 2182 __dasd_device_process_final_queue(device, &flush_queue); 2183 return rc; 2184 } 2185 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2186 2187 /* 2188 * Acquire the device lock and process queues for the device. 2189 */ 2190 static void dasd_device_tasklet(unsigned long data) 2191 { 2192 struct dasd_device *device = (struct dasd_device *) data; 2193 struct list_head final_queue; 2194 2195 atomic_set (&device->tasklet_scheduled, 0); 2196 INIT_LIST_HEAD(&final_queue); 2197 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2198 /* Check expire time of first request on the ccw queue. */ 2199 __dasd_device_check_expire(device); 2200 /* find final requests on ccw queue */ 2201 __dasd_device_process_ccw_queue(device, &final_queue); 2202 __dasd_device_check_path_events(device); 2203 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2204 /* Now call the callback function of requests with final status */ 2205 __dasd_device_process_final_queue(device, &final_queue); 2206 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2207 /* Now check if the head of the ccw queue needs to be started. */ 2208 __dasd_device_start_head(device); 2209 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2210 if (waitqueue_active(&shutdown_waitq)) 2211 wake_up(&shutdown_waitq); 2212 dasd_put_device(device); 2213 } 2214 2215 /* 2216 * Schedules a call to dasd_tasklet over the device tasklet. 2217 */ 2218 void dasd_schedule_device_bh(struct dasd_device *device) 2219 { 2220 /* Protect against rescheduling. */ 2221 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2222 return; 2223 dasd_get_device(device); 2224 tasklet_hi_schedule(&device->tasklet); 2225 } 2226 EXPORT_SYMBOL(dasd_schedule_device_bh); 2227 2228 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2229 { 2230 device->stopped |= bits; 2231 } 2232 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2233 2234 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2235 { 2236 device->stopped &= ~bits; 2237 if (!device->stopped) 2238 wake_up(&generic_waitq); 2239 } 2240 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2241 2242 /* 2243 * Queue a request to the head of the device ccw_queue. 2244 * Start the I/O if possible. 2245 */ 2246 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2247 { 2248 struct dasd_device *device; 2249 unsigned long flags; 2250 2251 device = cqr->startdev; 2252 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2253 cqr->status = DASD_CQR_QUEUED; 2254 list_add(&cqr->devlist, &device->ccw_queue); 2255 /* let the bh start the request to keep them in order */ 2256 dasd_schedule_device_bh(device); 2257 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2258 } 2259 EXPORT_SYMBOL(dasd_add_request_head); 2260 2261 /* 2262 * Queue a request to the tail of the device ccw_queue. 2263 * Start the I/O if possible. 2264 */ 2265 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2266 { 2267 struct dasd_device *device; 2268 unsigned long flags; 2269 2270 device = cqr->startdev; 2271 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2272 cqr->status = DASD_CQR_QUEUED; 2273 list_add_tail(&cqr->devlist, &device->ccw_queue); 2274 /* let the bh start the request to keep them in order */ 2275 dasd_schedule_device_bh(device); 2276 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2277 } 2278 EXPORT_SYMBOL(dasd_add_request_tail); 2279 2280 /* 2281 * Wakeup helper for the 'sleep_on' functions. 2282 */ 2283 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2284 { 2285 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2286 cqr->callback_data = DASD_SLEEPON_END_TAG; 2287 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2288 wake_up(&generic_waitq); 2289 } 2290 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2291 2292 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2293 { 2294 struct dasd_device *device; 2295 int rc; 2296 2297 device = cqr->startdev; 2298 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2299 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2300 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2301 return rc; 2302 } 2303 2304 /* 2305 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2306 */ 2307 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2308 { 2309 struct dasd_device *device; 2310 dasd_erp_fn_t erp_fn; 2311 2312 if (cqr->status == DASD_CQR_FILLED) 2313 return 0; 2314 device = cqr->startdev; 2315 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2316 if (cqr->status == DASD_CQR_TERMINATED) { 2317 device->discipline->handle_terminated_request(cqr); 2318 return 1; 2319 } 2320 if (cqr->status == DASD_CQR_NEED_ERP) { 2321 erp_fn = device->discipline->erp_action(cqr); 2322 erp_fn(cqr); 2323 return 1; 2324 } 2325 if (cqr->status == DASD_CQR_FAILED) 2326 dasd_log_sense(cqr, &cqr->irb); 2327 if (cqr->refers) { 2328 __dasd_process_erp(device, cqr); 2329 return 1; 2330 } 2331 } 2332 return 0; 2333 } 2334 2335 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2336 { 2337 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2338 if (cqr->refers) /* erp is not done yet */ 2339 return 1; 2340 return ((cqr->status != DASD_CQR_DONE) && 2341 (cqr->status != DASD_CQR_FAILED)); 2342 } else 2343 return (cqr->status == DASD_CQR_FILLED); 2344 } 2345 2346 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2347 { 2348 struct dasd_device *device; 2349 int rc; 2350 struct list_head ccw_queue; 2351 struct dasd_ccw_req *cqr; 2352 2353 INIT_LIST_HEAD(&ccw_queue); 2354 maincqr->status = DASD_CQR_FILLED; 2355 device = maincqr->startdev; 2356 list_add(&maincqr->blocklist, &ccw_queue); 2357 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2358 cqr = list_first_entry(&ccw_queue, 2359 struct dasd_ccw_req, blocklist)) { 2360 2361 if (__dasd_sleep_on_erp(cqr)) 2362 continue; 2363 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2364 continue; 2365 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2366 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2367 cqr->status = DASD_CQR_FAILED; 2368 cqr->intrc = -EPERM; 2369 continue; 2370 } 2371 /* Non-temporary stop condition will trigger fail fast */ 2372 if (device->stopped & ~DASD_STOPPED_PENDING && 2373 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2374 (!dasd_eer_enabled(device))) { 2375 cqr->status = DASD_CQR_FAILED; 2376 cqr->intrc = -ENOLINK; 2377 continue; 2378 } 2379 /* 2380 * Don't try to start requests if device is in 2381 * offline processing, it might wait forever 2382 */ 2383 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2384 cqr->status = DASD_CQR_FAILED; 2385 cqr->intrc = -ENODEV; 2386 continue; 2387 } 2388 /* 2389 * Don't try to start requests if device is stopped 2390 * except path verification requests 2391 */ 2392 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2393 if (interruptible) { 2394 rc = wait_event_interruptible( 2395 generic_waitq, !(device->stopped)); 2396 if (rc == -ERESTARTSYS) { 2397 cqr->status = DASD_CQR_FAILED; 2398 maincqr->intrc = rc; 2399 continue; 2400 } 2401 } else 2402 wait_event(generic_waitq, !(device->stopped)); 2403 } 2404 if (!cqr->callback) 2405 cqr->callback = dasd_wakeup_cb; 2406 2407 cqr->callback_data = DASD_SLEEPON_START_TAG; 2408 dasd_add_request_tail(cqr); 2409 if (interruptible) { 2410 rc = wait_event_interruptible( 2411 generic_waitq, _wait_for_wakeup(cqr)); 2412 if (rc == -ERESTARTSYS) { 2413 dasd_cancel_req(cqr); 2414 /* wait (non-interruptible) for final status */ 2415 wait_event(generic_waitq, 2416 _wait_for_wakeup(cqr)); 2417 cqr->status = DASD_CQR_FAILED; 2418 maincqr->intrc = rc; 2419 continue; 2420 } 2421 } else 2422 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2423 } 2424 2425 maincqr->endclk = get_tod_clock(); 2426 if ((maincqr->status != DASD_CQR_DONE) && 2427 (maincqr->intrc != -ERESTARTSYS)) 2428 dasd_log_sense(maincqr, &maincqr->irb); 2429 if (maincqr->status == DASD_CQR_DONE) 2430 rc = 0; 2431 else if (maincqr->intrc) 2432 rc = maincqr->intrc; 2433 else 2434 rc = -EIO; 2435 return rc; 2436 } 2437 2438 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2439 { 2440 struct dasd_ccw_req *cqr; 2441 2442 list_for_each_entry(cqr, ccw_queue, blocklist) { 2443 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2444 return 0; 2445 } 2446 2447 return 1; 2448 } 2449 2450 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2451 { 2452 struct dasd_device *device; 2453 struct dasd_ccw_req *cqr, *n; 2454 u8 *sense = NULL; 2455 int rc; 2456 2457 retry: 2458 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2459 device = cqr->startdev; 2460 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2461 continue; 2462 2463 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2464 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2465 cqr->status = DASD_CQR_FAILED; 2466 cqr->intrc = -EPERM; 2467 continue; 2468 } 2469 /*Non-temporary stop condition will trigger fail fast*/ 2470 if (device->stopped & ~DASD_STOPPED_PENDING && 2471 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2472 !dasd_eer_enabled(device)) { 2473 cqr->status = DASD_CQR_FAILED; 2474 cqr->intrc = -EAGAIN; 2475 continue; 2476 } 2477 2478 /*Don't try to start requests if device is stopped*/ 2479 if (interruptible) { 2480 rc = wait_event_interruptible( 2481 generic_waitq, !device->stopped); 2482 if (rc == -ERESTARTSYS) { 2483 cqr->status = DASD_CQR_FAILED; 2484 cqr->intrc = rc; 2485 continue; 2486 } 2487 } else 2488 wait_event(generic_waitq, !(device->stopped)); 2489 2490 if (!cqr->callback) 2491 cqr->callback = dasd_wakeup_cb; 2492 cqr->callback_data = DASD_SLEEPON_START_TAG; 2493 dasd_add_request_tail(cqr); 2494 } 2495 2496 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2497 2498 rc = 0; 2499 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2500 /* 2501 * In some cases the 'File Protected' or 'Incorrect Length' 2502 * error might be expected and error recovery would be 2503 * unnecessary in these cases. Check if the according suppress 2504 * bit is set. 2505 */ 2506 sense = dasd_get_sense(&cqr->irb); 2507 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2508 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2509 continue; 2510 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2511 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2512 continue; 2513 2514 /* 2515 * for alias devices simplify error recovery and 2516 * return to upper layer 2517 * do not skip ERP requests 2518 */ 2519 if (cqr->startdev != cqr->basedev && !cqr->refers && 2520 (cqr->status == DASD_CQR_TERMINATED || 2521 cqr->status == DASD_CQR_NEED_ERP)) 2522 return -EAGAIN; 2523 2524 /* normal recovery for basedev IO */ 2525 if (__dasd_sleep_on_erp(cqr)) 2526 /* handle erp first */ 2527 goto retry; 2528 } 2529 2530 return 0; 2531 } 2532 2533 /* 2534 * Queue a request to the tail of the device ccw_queue and wait for 2535 * it's completion. 2536 */ 2537 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2538 { 2539 return _dasd_sleep_on(cqr, 0); 2540 } 2541 EXPORT_SYMBOL(dasd_sleep_on); 2542 2543 /* 2544 * Start requests from a ccw_queue and wait for their completion. 2545 */ 2546 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2547 { 2548 return _dasd_sleep_on_queue(ccw_queue, 0); 2549 } 2550 EXPORT_SYMBOL(dasd_sleep_on_queue); 2551 2552 /* 2553 * Start requests from a ccw_queue and wait interruptible for their completion. 2554 */ 2555 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2556 { 2557 return _dasd_sleep_on_queue(ccw_queue, 1); 2558 } 2559 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2560 2561 /* 2562 * Queue a request to the tail of the device ccw_queue and wait 2563 * interruptible for it's completion. 2564 */ 2565 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2566 { 2567 return _dasd_sleep_on(cqr, 1); 2568 } 2569 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2570 2571 /* 2572 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2573 * for eckd devices) the currently running request has to be terminated 2574 * and be put back to status queued, before the special request is added 2575 * to the head of the queue. Then the special request is waited on normally. 2576 */ 2577 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2578 { 2579 struct dasd_ccw_req *cqr; 2580 int rc; 2581 2582 if (list_empty(&device->ccw_queue)) 2583 return 0; 2584 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2585 rc = device->discipline->term_IO(cqr); 2586 if (!rc) 2587 /* 2588 * CQR terminated because a more important request is pending. 2589 * Undo decreasing of retry counter because this is 2590 * not an error case. 2591 */ 2592 cqr->retries++; 2593 return rc; 2594 } 2595 2596 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2597 { 2598 struct dasd_device *device; 2599 int rc; 2600 2601 device = cqr->startdev; 2602 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2603 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2604 cqr->status = DASD_CQR_FAILED; 2605 cqr->intrc = -EPERM; 2606 return -EIO; 2607 } 2608 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2609 rc = _dasd_term_running_cqr(device); 2610 if (rc) { 2611 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2612 return rc; 2613 } 2614 cqr->callback = dasd_wakeup_cb; 2615 cqr->callback_data = DASD_SLEEPON_START_TAG; 2616 cqr->status = DASD_CQR_QUEUED; 2617 /* 2618 * add new request as second 2619 * first the terminated cqr needs to be finished 2620 */ 2621 list_add(&cqr->devlist, device->ccw_queue.next); 2622 2623 /* let the bh start the request to keep them in order */ 2624 dasd_schedule_device_bh(device); 2625 2626 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2627 2628 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2629 2630 if (cqr->status == DASD_CQR_DONE) 2631 rc = 0; 2632 else if (cqr->intrc) 2633 rc = cqr->intrc; 2634 else 2635 rc = -EIO; 2636 2637 /* kick tasklets */ 2638 dasd_schedule_device_bh(device); 2639 if (device->block) 2640 dasd_schedule_block_bh(device->block); 2641 2642 return rc; 2643 } 2644 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2645 2646 /* 2647 * Cancels a request that was started with dasd_sleep_on_req. 2648 * This is useful to timeout requests. The request will be 2649 * terminated if it is currently in i/o. 2650 * Returns 0 if request termination was successful 2651 * negative error code if termination failed 2652 * Cancellation of a request is an asynchronous operation! The calling 2653 * function has to wait until the request is properly returned via callback. 2654 */ 2655 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) 2656 { 2657 struct dasd_device *device = cqr->startdev; 2658 int rc = 0; 2659 2660 switch (cqr->status) { 2661 case DASD_CQR_QUEUED: 2662 /* request was not started - just set to cleared */ 2663 cqr->status = DASD_CQR_CLEARED; 2664 break; 2665 case DASD_CQR_IN_IO: 2666 /* request in IO - terminate IO and release again */ 2667 rc = device->discipline->term_IO(cqr); 2668 if (rc) { 2669 dev_err(&device->cdev->dev, 2670 "Cancelling request %p failed with rc=%d\n", 2671 cqr, rc); 2672 } else { 2673 cqr->stopclk = get_tod_clock(); 2674 } 2675 break; 2676 default: /* already finished or clear pending - do nothing */ 2677 break; 2678 } 2679 dasd_schedule_device_bh(device); 2680 return rc; 2681 } 2682 2683 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2684 { 2685 struct dasd_device *device = cqr->startdev; 2686 unsigned long flags; 2687 int rc; 2688 2689 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2690 rc = __dasd_cancel_req(cqr); 2691 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2692 return rc; 2693 } 2694 2695 /* 2696 * SECTION: Operations of the dasd_block layer. 2697 */ 2698 2699 /* 2700 * Timeout function for dasd_block. This is used when the block layer 2701 * is waiting for something that may not come reliably, (e.g. a state 2702 * change interrupt) 2703 */ 2704 static void dasd_block_timeout(struct timer_list *t) 2705 { 2706 unsigned long flags; 2707 struct dasd_block *block; 2708 2709 block = from_timer(block, t, timer); 2710 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2711 /* re-activate request queue */ 2712 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2713 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2714 dasd_schedule_block_bh(block); 2715 blk_mq_run_hw_queues(block->request_queue, true); 2716 } 2717 2718 /* 2719 * Setup timeout for a dasd_block in jiffies. 2720 */ 2721 void dasd_block_set_timer(struct dasd_block *block, int expires) 2722 { 2723 if (expires == 0) 2724 del_timer(&block->timer); 2725 else 2726 mod_timer(&block->timer, jiffies + expires); 2727 } 2728 EXPORT_SYMBOL(dasd_block_set_timer); 2729 2730 /* 2731 * Clear timeout for a dasd_block. 2732 */ 2733 void dasd_block_clear_timer(struct dasd_block *block) 2734 { 2735 del_timer(&block->timer); 2736 } 2737 EXPORT_SYMBOL(dasd_block_clear_timer); 2738 2739 /* 2740 * Process finished error recovery ccw. 2741 */ 2742 static void __dasd_process_erp(struct dasd_device *device, 2743 struct dasd_ccw_req *cqr) 2744 { 2745 dasd_erp_fn_t erp_fn; 2746 2747 if (cqr->status == DASD_CQR_DONE) 2748 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2749 else 2750 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2751 erp_fn = device->discipline->erp_postaction(cqr); 2752 erp_fn(cqr); 2753 } 2754 2755 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2756 { 2757 struct request *req; 2758 blk_status_t error = BLK_STS_OK; 2759 unsigned int proc_bytes; 2760 int status; 2761 2762 req = (struct request *) cqr->callback_data; 2763 dasd_profile_end(cqr->block, cqr, req); 2764 2765 proc_bytes = cqr->proc_bytes; 2766 status = cqr->block->base->discipline->free_cp(cqr, req); 2767 if (status < 0) 2768 error = errno_to_blk_status(status); 2769 else if (status == 0) { 2770 switch (cqr->intrc) { 2771 case -EPERM: 2772 error = BLK_STS_NEXUS; 2773 break; 2774 case -ENOLINK: 2775 error = BLK_STS_TRANSPORT; 2776 break; 2777 case -ETIMEDOUT: 2778 error = BLK_STS_TIMEOUT; 2779 break; 2780 default: 2781 error = BLK_STS_IOERR; 2782 break; 2783 } 2784 } 2785 2786 /* 2787 * We need to take care for ETIMEDOUT errors here since the 2788 * complete callback does not get called in this case. 2789 * Take care of all errors here and avoid additional code to 2790 * transfer the error value to the complete callback. 2791 */ 2792 if (error) { 2793 blk_mq_end_request(req, error); 2794 blk_mq_run_hw_queues(req->q, true); 2795 } else { 2796 /* 2797 * Partial completed requests can happen with ESE devices. 2798 * During read we might have gotten a NRF error and have to 2799 * complete a request partially. 2800 */ 2801 if (proc_bytes) { 2802 blk_update_request(req, BLK_STS_OK, 2803 blk_rq_bytes(req) - proc_bytes); 2804 blk_mq_requeue_request(req, true); 2805 } else { 2806 blk_mq_complete_request(req); 2807 } 2808 } 2809 } 2810 2811 /* 2812 * Process ccw request queue. 2813 */ 2814 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2815 struct list_head *final_queue) 2816 { 2817 struct list_head *l, *n; 2818 struct dasd_ccw_req *cqr; 2819 dasd_erp_fn_t erp_fn; 2820 unsigned long flags; 2821 struct dasd_device *base = block->base; 2822 2823 restart: 2824 /* Process request with final status. */ 2825 list_for_each_safe(l, n, &block->ccw_queue) { 2826 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2827 if (cqr->status != DASD_CQR_DONE && 2828 cqr->status != DASD_CQR_FAILED && 2829 cqr->status != DASD_CQR_NEED_ERP && 2830 cqr->status != DASD_CQR_TERMINATED) 2831 continue; 2832 2833 if (cqr->status == DASD_CQR_TERMINATED) { 2834 base->discipline->handle_terminated_request(cqr); 2835 goto restart; 2836 } 2837 2838 /* Process requests that may be recovered */ 2839 if (cqr->status == DASD_CQR_NEED_ERP) { 2840 erp_fn = base->discipline->erp_action(cqr); 2841 if (IS_ERR(erp_fn(cqr))) 2842 continue; 2843 goto restart; 2844 } 2845 2846 /* log sense for fatal error */ 2847 if (cqr->status == DASD_CQR_FAILED) { 2848 dasd_log_sense(cqr, &cqr->irb); 2849 } 2850 2851 /* First of all call extended error reporting. */ 2852 if (dasd_eer_enabled(base) && 2853 cqr->status == DASD_CQR_FAILED) { 2854 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2855 2856 /* restart request */ 2857 cqr->status = DASD_CQR_FILLED; 2858 cqr->retries = 255; 2859 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2860 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2861 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2862 flags); 2863 goto restart; 2864 } 2865 2866 /* Process finished ERP request. */ 2867 if (cqr->refers) { 2868 __dasd_process_erp(base, cqr); 2869 goto restart; 2870 } 2871 2872 /* Rechain finished requests to final queue */ 2873 cqr->endclk = get_tod_clock(); 2874 list_move_tail(&cqr->blocklist, final_queue); 2875 } 2876 } 2877 2878 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2879 { 2880 dasd_schedule_block_bh(cqr->block); 2881 } 2882 2883 static void __dasd_block_start_head(struct dasd_block *block) 2884 { 2885 struct dasd_ccw_req *cqr; 2886 2887 if (list_empty(&block->ccw_queue)) 2888 return; 2889 /* We allways begin with the first requests on the queue, as some 2890 * of previously started requests have to be enqueued on a 2891 * dasd_device again for error recovery. 2892 */ 2893 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2894 if (cqr->status != DASD_CQR_FILLED) 2895 continue; 2896 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2897 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2898 cqr->status = DASD_CQR_FAILED; 2899 cqr->intrc = -EPERM; 2900 dasd_schedule_block_bh(block); 2901 continue; 2902 } 2903 /* Non-temporary stop condition will trigger fail fast */ 2904 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2905 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2906 (!dasd_eer_enabled(block->base))) { 2907 cqr->status = DASD_CQR_FAILED; 2908 cqr->intrc = -ENOLINK; 2909 dasd_schedule_block_bh(block); 2910 continue; 2911 } 2912 /* Don't try to start requests if device is stopped */ 2913 if (block->base->stopped) 2914 return; 2915 2916 /* just a fail safe check, should not happen */ 2917 if (!cqr->startdev) 2918 cqr->startdev = block->base; 2919 2920 /* make sure that the requests we submit find their way back */ 2921 cqr->callback = dasd_return_cqr_cb; 2922 2923 dasd_add_request_tail(cqr); 2924 } 2925 } 2926 2927 /* 2928 * Central dasd_block layer routine. Takes requests from the generic 2929 * block layer request queue, creates ccw requests, enqueues them on 2930 * a dasd_device and processes ccw requests that have been returned. 2931 */ 2932 static void dasd_block_tasklet(unsigned long data) 2933 { 2934 struct dasd_block *block = (struct dasd_block *) data; 2935 struct list_head final_queue; 2936 struct list_head *l, *n; 2937 struct dasd_ccw_req *cqr; 2938 struct dasd_queue *dq; 2939 2940 atomic_set(&block->tasklet_scheduled, 0); 2941 INIT_LIST_HEAD(&final_queue); 2942 spin_lock_irq(&block->queue_lock); 2943 /* Finish off requests on ccw queue */ 2944 __dasd_process_block_ccw_queue(block, &final_queue); 2945 spin_unlock_irq(&block->queue_lock); 2946 2947 /* Now call the callback function of requests with final status */ 2948 list_for_each_safe(l, n, &final_queue) { 2949 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2950 dq = cqr->dq; 2951 spin_lock_irq(&dq->lock); 2952 list_del_init(&cqr->blocklist); 2953 __dasd_cleanup_cqr(cqr); 2954 spin_unlock_irq(&dq->lock); 2955 } 2956 2957 spin_lock_irq(&block->queue_lock); 2958 /* Now check if the head of the ccw queue needs to be started. */ 2959 __dasd_block_start_head(block); 2960 spin_unlock_irq(&block->queue_lock); 2961 2962 if (waitqueue_active(&shutdown_waitq)) 2963 wake_up(&shutdown_waitq); 2964 dasd_put_device(block->base); 2965 } 2966 2967 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2968 { 2969 wake_up(&dasd_flush_wq); 2970 } 2971 2972 /* 2973 * Requeue a request back to the block request queue 2974 * only works for block requests 2975 */ 2976 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2977 { 2978 struct dasd_block *block = cqr->block; 2979 struct request *req; 2980 2981 if (!block) 2982 return -EINVAL; 2983 spin_lock_irq(&cqr->dq->lock); 2984 req = (struct request *) cqr->callback_data; 2985 blk_mq_requeue_request(req, false); 2986 spin_unlock_irq(&cqr->dq->lock); 2987 2988 return 0; 2989 } 2990 2991 /* 2992 * Go through all request on the dasd_block request queue, cancel them 2993 * on the respective dasd_device, and return them to the generic 2994 * block layer. 2995 */ 2996 static int dasd_flush_block_queue(struct dasd_block *block) 2997 { 2998 struct dasd_ccw_req *cqr, *n; 2999 int rc, i; 3000 struct list_head flush_queue; 3001 unsigned long flags; 3002 3003 INIT_LIST_HEAD(&flush_queue); 3004 spin_lock_bh(&block->queue_lock); 3005 rc = 0; 3006 restart: 3007 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 3008 /* if this request currently owned by a dasd_device cancel it */ 3009 if (cqr->status >= DASD_CQR_QUEUED) 3010 rc = dasd_cancel_req(cqr); 3011 if (rc < 0) 3012 break; 3013 /* Rechain request (including erp chain) so it won't be 3014 * touched by the dasd_block_tasklet anymore. 3015 * Replace the callback so we notice when the request 3016 * is returned from the dasd_device layer. 3017 */ 3018 cqr->callback = _dasd_wake_block_flush_cb; 3019 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 3020 list_move_tail(&cqr->blocklist, &flush_queue); 3021 if (i > 1) 3022 /* moved more than one request - need to restart */ 3023 goto restart; 3024 } 3025 spin_unlock_bh(&block->queue_lock); 3026 /* Now call the callback function of flushed requests */ 3027 restart_cb: 3028 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 3029 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3030 /* Process finished ERP request. */ 3031 if (cqr->refers) { 3032 spin_lock_bh(&block->queue_lock); 3033 __dasd_process_erp(block->base, cqr); 3034 spin_unlock_bh(&block->queue_lock); 3035 /* restart list_for_xx loop since dasd_process_erp 3036 * might remove multiple elements */ 3037 goto restart_cb; 3038 } 3039 /* call the callback function */ 3040 spin_lock_irqsave(&cqr->dq->lock, flags); 3041 cqr->endclk = get_tod_clock(); 3042 list_del_init(&cqr->blocklist); 3043 __dasd_cleanup_cqr(cqr); 3044 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3045 } 3046 return rc; 3047 } 3048 3049 /* 3050 * Schedules a call to dasd_tasklet over the device tasklet. 3051 */ 3052 void dasd_schedule_block_bh(struct dasd_block *block) 3053 { 3054 /* Protect against rescheduling. */ 3055 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 3056 return; 3057 /* life cycle of block is bound to it's base device */ 3058 dasd_get_device(block->base); 3059 tasklet_hi_schedule(&block->tasklet); 3060 } 3061 EXPORT_SYMBOL(dasd_schedule_block_bh); 3062 3063 3064 /* 3065 * SECTION: external block device operations 3066 * (request queue handling, open, release, etc.) 3067 */ 3068 3069 /* 3070 * Dasd request queue function. Called from ll_rw_blk.c 3071 */ 3072 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 3073 const struct blk_mq_queue_data *qd) 3074 { 3075 struct dasd_block *block = hctx->queue->queuedata; 3076 struct dasd_queue *dq = hctx->driver_data; 3077 struct request *req = qd->rq; 3078 struct dasd_device *basedev; 3079 struct dasd_ccw_req *cqr; 3080 blk_status_t rc = BLK_STS_OK; 3081 3082 basedev = block->base; 3083 spin_lock_irq(&dq->lock); 3084 if (basedev->state < DASD_STATE_READY) { 3085 DBF_DEV_EVENT(DBF_ERR, basedev, 3086 "device not ready for request %p", req); 3087 rc = BLK_STS_IOERR; 3088 goto out; 3089 } 3090 3091 /* 3092 * if device is stopped do not fetch new requests 3093 * except failfast is active which will let requests fail 3094 * immediately in __dasd_block_start_head() 3095 */ 3096 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3097 DBF_DEV_EVENT(DBF_ERR, basedev, 3098 "device stopped request %p", req); 3099 rc = BLK_STS_RESOURCE; 3100 goto out; 3101 } 3102 3103 if (basedev->features & DASD_FEATURE_READONLY && 3104 rq_data_dir(req) == WRITE) { 3105 DBF_DEV_EVENT(DBF_ERR, basedev, 3106 "Rejecting write request %p", req); 3107 rc = BLK_STS_IOERR; 3108 goto out; 3109 } 3110 3111 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3112 (basedev->features & DASD_FEATURE_FAILFAST || 3113 blk_noretry_request(req))) { 3114 DBF_DEV_EVENT(DBF_ERR, basedev, 3115 "Rejecting failfast request %p", req); 3116 rc = BLK_STS_IOERR; 3117 goto out; 3118 } 3119 3120 cqr = basedev->discipline->build_cp(basedev, block, req); 3121 if (IS_ERR(cqr)) { 3122 if (PTR_ERR(cqr) == -EBUSY || 3123 PTR_ERR(cqr) == -ENOMEM || 3124 PTR_ERR(cqr) == -EAGAIN) { 3125 rc = BLK_STS_RESOURCE; 3126 goto out; 3127 } 3128 DBF_DEV_EVENT(DBF_ERR, basedev, 3129 "CCW creation failed (rc=%ld) on request %p", 3130 PTR_ERR(cqr), req); 3131 rc = BLK_STS_IOERR; 3132 goto out; 3133 } 3134 /* 3135 * Note: callback is set to dasd_return_cqr_cb in 3136 * __dasd_block_start_head to cover erp requests as well 3137 */ 3138 cqr->callback_data = req; 3139 cqr->status = DASD_CQR_FILLED; 3140 cqr->dq = dq; 3141 3142 blk_mq_start_request(req); 3143 spin_lock(&block->queue_lock); 3144 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3145 INIT_LIST_HEAD(&cqr->devlist); 3146 dasd_profile_start(block, cqr, req); 3147 dasd_schedule_block_bh(block); 3148 spin_unlock(&block->queue_lock); 3149 3150 out: 3151 spin_unlock_irq(&dq->lock); 3152 return rc; 3153 } 3154 3155 /* 3156 * Block timeout callback, called from the block layer 3157 * 3158 * Return values: 3159 * BLK_EH_RESET_TIMER if the request should be left running 3160 * BLK_EH_DONE if the request is handled or terminated 3161 * by the driver. 3162 */ 3163 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3164 { 3165 struct dasd_block *block = req->q->queuedata; 3166 struct dasd_device *device; 3167 struct dasd_ccw_req *cqr; 3168 unsigned long flags; 3169 int rc = 0; 3170 3171 cqr = blk_mq_rq_to_pdu(req); 3172 if (!cqr) 3173 return BLK_EH_DONE; 3174 3175 spin_lock_irqsave(&cqr->dq->lock, flags); 3176 device = cqr->startdev ? cqr->startdev : block->base; 3177 if (!device->blk_timeout) { 3178 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3179 return BLK_EH_RESET_TIMER; 3180 } 3181 DBF_DEV_EVENT(DBF_WARNING, device, 3182 " dasd_times_out cqr %p status %x", 3183 cqr, cqr->status); 3184 3185 spin_lock(&block->queue_lock); 3186 spin_lock(get_ccwdev_lock(device->cdev)); 3187 cqr->retries = -1; 3188 cqr->intrc = -ETIMEDOUT; 3189 if (cqr->status >= DASD_CQR_QUEUED) { 3190 rc = __dasd_cancel_req(cqr); 3191 } else if (cqr->status == DASD_CQR_FILLED || 3192 cqr->status == DASD_CQR_NEED_ERP) { 3193 cqr->status = DASD_CQR_TERMINATED; 3194 } else if (cqr->status == DASD_CQR_IN_ERP) { 3195 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3196 3197 list_for_each_entry_safe(searchcqr, nextcqr, 3198 &block->ccw_queue, blocklist) { 3199 tmpcqr = searchcqr; 3200 while (tmpcqr->refers) 3201 tmpcqr = tmpcqr->refers; 3202 if (tmpcqr != cqr) 3203 continue; 3204 /* searchcqr is an ERP request for cqr */ 3205 searchcqr->retries = -1; 3206 searchcqr->intrc = -ETIMEDOUT; 3207 if (searchcqr->status >= DASD_CQR_QUEUED) { 3208 rc = __dasd_cancel_req(searchcqr); 3209 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3210 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3211 searchcqr->status = DASD_CQR_TERMINATED; 3212 rc = 0; 3213 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3214 /* 3215 * Shouldn't happen; most recent ERP 3216 * request is at the front of queue 3217 */ 3218 continue; 3219 } 3220 break; 3221 } 3222 } 3223 spin_unlock(get_ccwdev_lock(device->cdev)); 3224 dasd_schedule_block_bh(block); 3225 spin_unlock(&block->queue_lock); 3226 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3227 3228 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE; 3229 } 3230 3231 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3232 unsigned int idx) 3233 { 3234 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3235 3236 if (!dq) 3237 return -ENOMEM; 3238 3239 spin_lock_init(&dq->lock); 3240 hctx->driver_data = dq; 3241 3242 return 0; 3243 } 3244 3245 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3246 { 3247 kfree(hctx->driver_data); 3248 hctx->driver_data = NULL; 3249 } 3250 3251 static void dasd_request_done(struct request *req) 3252 { 3253 blk_mq_end_request(req, 0); 3254 blk_mq_run_hw_queues(req->q, true); 3255 } 3256 3257 static struct blk_mq_ops dasd_mq_ops = { 3258 .queue_rq = do_dasd_request, 3259 .complete = dasd_request_done, 3260 .timeout = dasd_times_out, 3261 .init_hctx = dasd_init_hctx, 3262 .exit_hctx = dasd_exit_hctx, 3263 }; 3264 3265 /* 3266 * Allocate and initialize request queue and default I/O scheduler. 3267 */ 3268 static int dasd_alloc_queue(struct dasd_block *block) 3269 { 3270 int rc; 3271 3272 block->tag_set.ops = &dasd_mq_ops; 3273 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); 3274 block->tag_set.nr_hw_queues = nr_hw_queues; 3275 block->tag_set.queue_depth = queue_depth; 3276 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3277 block->tag_set.numa_node = NUMA_NO_NODE; 3278 3279 rc = blk_mq_alloc_tag_set(&block->tag_set); 3280 if (rc) 3281 return rc; 3282 3283 block->request_queue = blk_mq_init_queue(&block->tag_set); 3284 if (IS_ERR(block->request_queue)) 3285 return PTR_ERR(block->request_queue); 3286 3287 block->request_queue->queuedata = block; 3288 3289 return 0; 3290 } 3291 3292 /* 3293 * Deactivate and free request queue. 3294 */ 3295 static void dasd_free_queue(struct dasd_block *block) 3296 { 3297 if (block->request_queue) { 3298 blk_cleanup_queue(block->request_queue); 3299 blk_mq_free_tag_set(&block->tag_set); 3300 block->request_queue = NULL; 3301 } 3302 } 3303 3304 static int dasd_open(struct block_device *bdev, fmode_t mode) 3305 { 3306 struct dasd_device *base; 3307 int rc; 3308 3309 base = dasd_device_from_gendisk(bdev->bd_disk); 3310 if (!base) 3311 return -ENODEV; 3312 3313 atomic_inc(&base->block->open_count); 3314 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3315 rc = -ENODEV; 3316 goto unlock; 3317 } 3318 3319 if (!try_module_get(base->discipline->owner)) { 3320 rc = -EINVAL; 3321 goto unlock; 3322 } 3323 3324 if (dasd_probeonly) { 3325 dev_info(&base->cdev->dev, 3326 "Accessing the DASD failed because it is in " 3327 "probeonly mode\n"); 3328 rc = -EPERM; 3329 goto out; 3330 } 3331 3332 if (base->state <= DASD_STATE_BASIC) { 3333 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3334 " Cannot open unrecognized device"); 3335 rc = -ENODEV; 3336 goto out; 3337 } 3338 3339 if ((mode & FMODE_WRITE) && 3340 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3341 (base->features & DASD_FEATURE_READONLY))) { 3342 rc = -EROFS; 3343 goto out; 3344 } 3345 3346 dasd_put_device(base); 3347 return 0; 3348 3349 out: 3350 module_put(base->discipline->owner); 3351 unlock: 3352 atomic_dec(&base->block->open_count); 3353 dasd_put_device(base); 3354 return rc; 3355 } 3356 3357 static void dasd_release(struct gendisk *disk, fmode_t mode) 3358 { 3359 struct dasd_device *base = dasd_device_from_gendisk(disk); 3360 if (base) { 3361 atomic_dec(&base->block->open_count); 3362 module_put(base->discipline->owner); 3363 dasd_put_device(base); 3364 } 3365 } 3366 3367 /* 3368 * Return disk geometry. 3369 */ 3370 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3371 { 3372 struct dasd_device *base; 3373 3374 base = dasd_device_from_gendisk(bdev->bd_disk); 3375 if (!base) 3376 return -ENODEV; 3377 3378 if (!base->discipline || 3379 !base->discipline->fill_geometry) { 3380 dasd_put_device(base); 3381 return -EINVAL; 3382 } 3383 base->discipline->fill_geometry(base->block, geo); 3384 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3385 dasd_put_device(base); 3386 return 0; 3387 } 3388 3389 const struct block_device_operations 3390 dasd_device_operations = { 3391 .owner = THIS_MODULE, 3392 .open = dasd_open, 3393 .release = dasd_release, 3394 .ioctl = dasd_ioctl, 3395 .compat_ioctl = dasd_ioctl, 3396 .getgeo = dasd_getgeo, 3397 }; 3398 3399 /******************************************************************************* 3400 * end of block device operations 3401 */ 3402 3403 static void 3404 dasd_exit(void) 3405 { 3406 #ifdef CONFIG_PROC_FS 3407 dasd_proc_exit(); 3408 #endif 3409 dasd_eer_exit(); 3410 kmem_cache_destroy(dasd_page_cache); 3411 dasd_page_cache = NULL; 3412 dasd_gendisk_exit(); 3413 dasd_devmap_exit(); 3414 if (dasd_debug_area != NULL) { 3415 debug_unregister(dasd_debug_area); 3416 dasd_debug_area = NULL; 3417 } 3418 dasd_statistics_removeroot(); 3419 } 3420 3421 /* 3422 * SECTION: common functions for ccw_driver use 3423 */ 3424 3425 /* 3426 * Is the device read-only? 3427 * Note that this function does not report the setting of the 3428 * readonly device attribute, but how it is configured in z/VM. 3429 */ 3430 int dasd_device_is_ro(struct dasd_device *device) 3431 { 3432 struct ccw_dev_id dev_id; 3433 struct diag210 diag_data; 3434 int rc; 3435 3436 if (!MACHINE_IS_VM) 3437 return 0; 3438 ccw_device_get_id(device->cdev, &dev_id); 3439 memset(&diag_data, 0, sizeof(diag_data)); 3440 diag_data.vrdcdvno = dev_id.devno; 3441 diag_data.vrdclen = sizeof(diag_data); 3442 rc = diag210(&diag_data); 3443 if (rc == 0 || rc == 2) { 3444 return diag_data.vrdcvfla & 0x80; 3445 } else { 3446 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3447 dev_id.devno, rc); 3448 return 0; 3449 } 3450 } 3451 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3452 3453 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3454 { 3455 struct ccw_device *cdev = data; 3456 int ret; 3457 3458 ret = ccw_device_set_online(cdev); 3459 if (ret) 3460 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3461 dev_name(&cdev->dev), ret); 3462 } 3463 3464 /* 3465 * Initial attempt at a probe function. this can be simplified once 3466 * the other detection code is gone. 3467 */ 3468 int dasd_generic_probe(struct ccw_device *cdev, 3469 struct dasd_discipline *discipline) 3470 { 3471 int ret; 3472 3473 ret = dasd_add_sysfs_files(cdev); 3474 if (ret) { 3475 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3476 "dasd_generic_probe: could not add " 3477 "sysfs entries"); 3478 return ret; 3479 } 3480 cdev->handler = &dasd_int_handler; 3481 3482 /* 3483 * Automatically online either all dasd devices (dasd_autodetect) 3484 * or all devices specified with dasd= parameters during 3485 * initial probe. 3486 */ 3487 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3488 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3489 async_schedule(dasd_generic_auto_online, cdev); 3490 return 0; 3491 } 3492 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3493 3494 void dasd_generic_free_discipline(struct dasd_device *device) 3495 { 3496 /* Forget the discipline information. */ 3497 if (device->discipline) { 3498 if (device->discipline->uncheck_device) 3499 device->discipline->uncheck_device(device); 3500 module_put(device->discipline->owner); 3501 device->discipline = NULL; 3502 } 3503 if (device->base_discipline) { 3504 module_put(device->base_discipline->owner); 3505 device->base_discipline = NULL; 3506 } 3507 } 3508 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3509 3510 /* 3511 * This will one day be called from a global not_oper handler. 3512 * It is also used by driver_unregister during module unload. 3513 */ 3514 void dasd_generic_remove(struct ccw_device *cdev) 3515 { 3516 struct dasd_device *device; 3517 struct dasd_block *block; 3518 3519 cdev->handler = NULL; 3520 3521 device = dasd_device_from_cdev(cdev); 3522 if (IS_ERR(device)) { 3523 dasd_remove_sysfs_files(cdev); 3524 return; 3525 } 3526 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3527 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3528 /* Already doing offline processing */ 3529 dasd_put_device(device); 3530 dasd_remove_sysfs_files(cdev); 3531 return; 3532 } 3533 /* 3534 * This device is removed unconditionally. Set offline 3535 * flag to prevent dasd_open from opening it while it is 3536 * no quite down yet. 3537 */ 3538 dasd_set_target_state(device, DASD_STATE_NEW); 3539 /* dasd_delete_device destroys the device reference. */ 3540 block = device->block; 3541 dasd_delete_device(device); 3542 /* 3543 * life cycle of block is bound to device, so delete it after 3544 * device was safely removed 3545 */ 3546 if (block) 3547 dasd_free_block(block); 3548 3549 dasd_remove_sysfs_files(cdev); 3550 } 3551 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3552 3553 /* 3554 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3555 * the device is detected for the first time and is supposed to be used 3556 * or the user has started activation through sysfs. 3557 */ 3558 int dasd_generic_set_online(struct ccw_device *cdev, 3559 struct dasd_discipline *base_discipline) 3560 { 3561 struct dasd_discipline *discipline; 3562 struct dasd_device *device; 3563 int rc; 3564 3565 /* first online clears initial online feature flag */ 3566 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3567 device = dasd_create_device(cdev); 3568 if (IS_ERR(device)) 3569 return PTR_ERR(device); 3570 3571 discipline = base_discipline; 3572 if (device->features & DASD_FEATURE_USEDIAG) { 3573 if (!dasd_diag_discipline_pointer) { 3574 /* Try to load the required module. */ 3575 rc = request_module(DASD_DIAG_MOD); 3576 if (rc) { 3577 pr_warn("%s Setting the DASD online failed " 3578 "because the required module %s " 3579 "could not be loaded (rc=%d)\n", 3580 dev_name(&cdev->dev), DASD_DIAG_MOD, 3581 rc); 3582 dasd_delete_device(device); 3583 return -ENODEV; 3584 } 3585 } 3586 /* Module init could have failed, so check again here after 3587 * request_module(). */ 3588 if (!dasd_diag_discipline_pointer) { 3589 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3590 dev_name(&cdev->dev)); 3591 dasd_delete_device(device); 3592 return -ENODEV; 3593 } 3594 discipline = dasd_diag_discipline_pointer; 3595 } 3596 if (!try_module_get(base_discipline->owner)) { 3597 dasd_delete_device(device); 3598 return -EINVAL; 3599 } 3600 if (!try_module_get(discipline->owner)) { 3601 module_put(base_discipline->owner); 3602 dasd_delete_device(device); 3603 return -EINVAL; 3604 } 3605 device->base_discipline = base_discipline; 3606 device->discipline = discipline; 3607 3608 /* check_device will allocate block device if necessary */ 3609 rc = discipline->check_device(device); 3610 if (rc) { 3611 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3612 dev_name(&cdev->dev), discipline->name, rc); 3613 module_put(discipline->owner); 3614 module_put(base_discipline->owner); 3615 dasd_delete_device(device); 3616 return rc; 3617 } 3618 3619 dasd_set_target_state(device, DASD_STATE_ONLINE); 3620 if (device->state <= DASD_STATE_KNOWN) { 3621 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3622 dev_name(&cdev->dev)); 3623 rc = -ENODEV; 3624 dasd_set_target_state(device, DASD_STATE_NEW); 3625 if (device->block) 3626 dasd_free_block(device->block); 3627 dasd_delete_device(device); 3628 } else 3629 pr_debug("dasd_generic device %s found\n", 3630 dev_name(&cdev->dev)); 3631 3632 wait_event(dasd_init_waitq, _wait_for_device(device)); 3633 3634 dasd_put_device(device); 3635 return rc; 3636 } 3637 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3638 3639 int dasd_generic_set_offline(struct ccw_device *cdev) 3640 { 3641 struct dasd_device *device; 3642 struct dasd_block *block; 3643 int max_count, open_count, rc; 3644 unsigned long flags; 3645 3646 rc = 0; 3647 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3648 device = dasd_device_from_cdev_locked(cdev); 3649 if (IS_ERR(device)) { 3650 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3651 return PTR_ERR(device); 3652 } 3653 3654 /* 3655 * We must make sure that this device is currently not in use. 3656 * The open_count is increased for every opener, that includes 3657 * the blkdev_get in dasd_scan_partitions. We are only interested 3658 * in the other openers. 3659 */ 3660 if (device->block) { 3661 max_count = device->block->bdev ? 0 : -1; 3662 open_count = atomic_read(&device->block->open_count); 3663 if (open_count > max_count) { 3664 if (open_count > 0) 3665 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3666 dev_name(&cdev->dev), open_count); 3667 else 3668 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3669 dev_name(&cdev->dev)); 3670 rc = -EBUSY; 3671 goto out_err; 3672 } 3673 } 3674 3675 /* 3676 * Test if the offline processing is already running and exit if so. 3677 * If a safe offline is being processed this could only be a normal 3678 * offline that should be able to overtake the safe offline and 3679 * cancel any I/O we do not want to wait for any longer 3680 */ 3681 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3682 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3683 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3684 &device->flags); 3685 } else { 3686 rc = -EBUSY; 3687 goto out_err; 3688 } 3689 } 3690 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3691 3692 /* 3693 * if safe_offline is called set safe_offline_running flag and 3694 * clear safe_offline so that a call to normal offline 3695 * can overrun safe_offline processing 3696 */ 3697 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3698 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3699 /* need to unlock here to wait for outstanding I/O */ 3700 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3701 /* 3702 * If we want to set the device safe offline all IO operations 3703 * should be finished before continuing the offline process 3704 * so sync bdev first and then wait for our queues to become 3705 * empty 3706 */ 3707 if (device->block) { 3708 rc = fsync_bdev(device->block->bdev); 3709 if (rc != 0) 3710 goto interrupted; 3711 } 3712 dasd_schedule_device_bh(device); 3713 rc = wait_event_interruptible(shutdown_waitq, 3714 _wait_for_empty_queues(device)); 3715 if (rc != 0) 3716 goto interrupted; 3717 3718 /* 3719 * check if a normal offline process overtook the offline 3720 * processing in this case simply do nothing beside returning 3721 * that we got interrupted 3722 * otherwise mark safe offline as not running any longer and 3723 * continue with normal offline 3724 */ 3725 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3726 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3727 rc = -ERESTARTSYS; 3728 goto out_err; 3729 } 3730 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3731 } 3732 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3733 3734 dasd_set_target_state(device, DASD_STATE_NEW); 3735 /* dasd_delete_device destroys the device reference. */ 3736 block = device->block; 3737 dasd_delete_device(device); 3738 /* 3739 * life cycle of block is bound to device, so delete it after 3740 * device was safely removed 3741 */ 3742 if (block) 3743 dasd_free_block(block); 3744 3745 return 0; 3746 3747 interrupted: 3748 /* interrupted by signal */ 3749 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3750 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3751 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3752 out_err: 3753 dasd_put_device(device); 3754 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3755 return rc; 3756 } 3757 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3758 3759 int dasd_generic_last_path_gone(struct dasd_device *device) 3760 { 3761 struct dasd_ccw_req *cqr; 3762 3763 dev_warn(&device->cdev->dev, "No operational channel path is left " 3764 "for the device\n"); 3765 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3766 /* First of all call extended error reporting. */ 3767 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3768 3769 if (device->state < DASD_STATE_BASIC) 3770 return 0; 3771 /* Device is active. We want to keep it. */ 3772 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3773 if ((cqr->status == DASD_CQR_IN_IO) || 3774 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3775 cqr->status = DASD_CQR_QUEUED; 3776 cqr->retries++; 3777 } 3778 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3779 dasd_device_clear_timer(device); 3780 dasd_schedule_device_bh(device); 3781 return 1; 3782 } 3783 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3784 3785 int dasd_generic_path_operational(struct dasd_device *device) 3786 { 3787 dev_info(&device->cdev->dev, "A channel path to the device has become " 3788 "operational\n"); 3789 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3790 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3791 if (device->stopped & DASD_UNRESUMED_PM) { 3792 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 3793 dasd_restore_device(device); 3794 return 1; 3795 } 3796 dasd_schedule_device_bh(device); 3797 if (device->block) { 3798 dasd_schedule_block_bh(device->block); 3799 if (device->block->request_queue) 3800 blk_mq_run_hw_queues(device->block->request_queue, 3801 true); 3802 } 3803 3804 if (!device->stopped) 3805 wake_up(&generic_waitq); 3806 3807 return 1; 3808 } 3809 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3810 3811 int dasd_generic_notify(struct ccw_device *cdev, int event) 3812 { 3813 struct dasd_device *device; 3814 int ret; 3815 3816 device = dasd_device_from_cdev_locked(cdev); 3817 if (IS_ERR(device)) 3818 return 0; 3819 ret = 0; 3820 switch (event) { 3821 case CIO_GONE: 3822 case CIO_BOXED: 3823 case CIO_NO_PATH: 3824 dasd_path_no_path(device); 3825 ret = dasd_generic_last_path_gone(device); 3826 break; 3827 case CIO_OPER: 3828 ret = 1; 3829 if (dasd_path_get_opm(device)) 3830 ret = dasd_generic_path_operational(device); 3831 break; 3832 } 3833 dasd_put_device(device); 3834 return ret; 3835 } 3836 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3837 3838 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3839 { 3840 struct dasd_device *device; 3841 int chp, oldopm, hpfpm, ifccpm; 3842 3843 device = dasd_device_from_cdev_locked(cdev); 3844 if (IS_ERR(device)) 3845 return; 3846 3847 oldopm = dasd_path_get_opm(device); 3848 for (chp = 0; chp < 8; chp++) { 3849 if (path_event[chp] & PE_PATH_GONE) { 3850 dasd_path_notoper(device, chp); 3851 } 3852 if (path_event[chp] & PE_PATH_AVAILABLE) { 3853 dasd_path_available(device, chp); 3854 dasd_schedule_device_bh(device); 3855 } 3856 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3857 if (!dasd_path_is_operational(device, chp) && 3858 !dasd_path_need_verify(device, chp)) { 3859 /* 3860 * we can not establish a pathgroup on an 3861 * unavailable path, so trigger a path 3862 * verification first 3863 */ 3864 dasd_path_available(device, chp); 3865 dasd_schedule_device_bh(device); 3866 } 3867 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3868 "Pathgroup re-established\n"); 3869 if (device->discipline->kick_validate) 3870 device->discipline->kick_validate(device); 3871 } 3872 } 3873 hpfpm = dasd_path_get_hpfpm(device); 3874 ifccpm = dasd_path_get_ifccpm(device); 3875 if (!dasd_path_get_opm(device) && hpfpm) { 3876 /* 3877 * device has no operational paths but at least one path is 3878 * disabled due to HPF errors 3879 * disable HPF at all and use the path(s) again 3880 */ 3881 if (device->discipline->disable_hpf) 3882 device->discipline->disable_hpf(device); 3883 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3884 dasd_path_set_tbvpm(device, hpfpm); 3885 dasd_schedule_device_bh(device); 3886 dasd_schedule_requeue(device); 3887 } else if (!dasd_path_get_opm(device) && ifccpm) { 3888 /* 3889 * device has no operational paths but at least one path is 3890 * disabled due to IFCC errors 3891 * trigger path verification on paths with IFCC errors 3892 */ 3893 dasd_path_set_tbvpm(device, ifccpm); 3894 dasd_schedule_device_bh(device); 3895 } 3896 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3897 dev_warn(&device->cdev->dev, 3898 "No verified channel paths remain for the device\n"); 3899 DBF_DEV_EVENT(DBF_WARNING, device, 3900 "%s", "last verified path gone"); 3901 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3902 dasd_device_set_stop_bits(device, 3903 DASD_STOPPED_DC_WAIT); 3904 } 3905 dasd_put_device(device); 3906 } 3907 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3908 3909 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3910 { 3911 if (!dasd_path_get_opm(device) && lpm) { 3912 dasd_path_set_opm(device, lpm); 3913 dasd_generic_path_operational(device); 3914 } else 3915 dasd_path_add_opm(device, lpm); 3916 return 0; 3917 } 3918 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3919 3920 void dasd_generic_space_exhaust(struct dasd_device *device, 3921 struct dasd_ccw_req *cqr) 3922 { 3923 dasd_eer_write(device, NULL, DASD_EER_NOSPC); 3924 3925 if (device->state < DASD_STATE_BASIC) 3926 return; 3927 3928 if (cqr->status == DASD_CQR_IN_IO || 3929 cqr->status == DASD_CQR_CLEAR_PENDING) { 3930 cqr->status = DASD_CQR_QUEUED; 3931 cqr->retries++; 3932 } 3933 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3934 dasd_device_clear_timer(device); 3935 dasd_schedule_device_bh(device); 3936 } 3937 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3938 3939 void dasd_generic_space_avail(struct dasd_device *device) 3940 { 3941 dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3942 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3943 3944 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3945 dasd_schedule_device_bh(device); 3946 3947 if (device->block) { 3948 dasd_schedule_block_bh(device->block); 3949 if (device->block->request_queue) 3950 blk_mq_run_hw_queues(device->block->request_queue, true); 3951 } 3952 if (!device->stopped) 3953 wake_up(&generic_waitq); 3954 } 3955 EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3956 3957 /* 3958 * clear active requests and requeue them to block layer if possible 3959 */ 3960 static int dasd_generic_requeue_all_requests(struct dasd_device *device) 3961 { 3962 struct list_head requeue_queue; 3963 struct dasd_ccw_req *cqr, *n; 3964 struct dasd_ccw_req *refers; 3965 int rc; 3966 3967 INIT_LIST_HEAD(&requeue_queue); 3968 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3969 rc = 0; 3970 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3971 /* Check status and move request to flush_queue */ 3972 if (cqr->status == DASD_CQR_IN_IO) { 3973 rc = device->discipline->term_IO(cqr); 3974 if (rc) { 3975 /* unable to terminate requeust */ 3976 dev_err(&device->cdev->dev, 3977 "Unable to terminate request %p " 3978 "on suspend\n", cqr); 3979 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3980 dasd_put_device(device); 3981 return rc; 3982 } 3983 } 3984 list_move_tail(&cqr->devlist, &requeue_queue); 3985 } 3986 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3987 3988 list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { 3989 wait_event(dasd_flush_wq, 3990 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3991 3992 /* 3993 * requeue requests to blocklayer will only work 3994 * for block device requests 3995 */ 3996 if (_dasd_requeue_request(cqr)) 3997 continue; 3998 3999 /* remove requests from device and block queue */ 4000 list_del_init(&cqr->devlist); 4001 while (cqr->refers != NULL) { 4002 refers = cqr->refers; 4003 /* remove the request from the block queue */ 4004 list_del(&cqr->blocklist); 4005 /* free the finished erp request */ 4006 dasd_free_erp_request(cqr, cqr->memdev); 4007 cqr = refers; 4008 } 4009 4010 /* 4011 * _dasd_requeue_request already checked for a valid 4012 * blockdevice, no need to check again 4013 * all erp requests (cqr->refers) have a cqr->block 4014 * pointer copy from the original cqr 4015 */ 4016 list_del_init(&cqr->blocklist); 4017 cqr->block->base->discipline->free_cp( 4018 cqr, (struct request *) cqr->callback_data); 4019 } 4020 4021 /* 4022 * if requests remain then they are internal request 4023 * and go back to the device queue 4024 */ 4025 if (!list_empty(&requeue_queue)) { 4026 /* move freeze_queue to start of the ccw_queue */ 4027 spin_lock_irq(get_ccwdev_lock(device->cdev)); 4028 list_splice_tail(&requeue_queue, &device->ccw_queue); 4029 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 4030 } 4031 dasd_schedule_device_bh(device); 4032 return rc; 4033 } 4034 4035 static void do_requeue_requests(struct work_struct *work) 4036 { 4037 struct dasd_device *device = container_of(work, struct dasd_device, 4038 requeue_requests); 4039 dasd_generic_requeue_all_requests(device); 4040 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 4041 if (device->block) 4042 dasd_schedule_block_bh(device->block); 4043 dasd_put_device(device); 4044 } 4045 4046 void dasd_schedule_requeue(struct dasd_device *device) 4047 { 4048 dasd_get_device(device); 4049 /* queue call to dasd_reload_device to the kernel event daemon. */ 4050 if (!schedule_work(&device->requeue_requests)) 4051 dasd_put_device(device); 4052 } 4053 EXPORT_SYMBOL(dasd_schedule_requeue); 4054 4055 int dasd_generic_pm_freeze(struct ccw_device *cdev) 4056 { 4057 struct dasd_device *device = dasd_device_from_cdev(cdev); 4058 4059 if (IS_ERR(device)) 4060 return PTR_ERR(device); 4061 4062 /* mark device as suspended */ 4063 set_bit(DASD_FLAG_SUSPENDED, &device->flags); 4064 4065 if (device->discipline->freeze) 4066 device->discipline->freeze(device); 4067 4068 /* disallow new I/O */ 4069 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 4070 4071 return dasd_generic_requeue_all_requests(device); 4072 } 4073 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 4074 4075 int dasd_generic_restore_device(struct ccw_device *cdev) 4076 { 4077 struct dasd_device *device = dasd_device_from_cdev(cdev); 4078 int rc = 0; 4079 4080 if (IS_ERR(device)) 4081 return PTR_ERR(device); 4082 4083 /* allow new IO again */ 4084 dasd_device_remove_stop_bits(device, 4085 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 4086 4087 dasd_schedule_device_bh(device); 4088 4089 /* 4090 * call discipline restore function 4091 * if device is stopped do nothing e.g. for disconnected devices 4092 */ 4093 if (device->discipline->restore && !(device->stopped)) 4094 rc = device->discipline->restore(device); 4095 if (rc || device->stopped) 4096 /* 4097 * if the resume failed for the DASD we put it in 4098 * an UNRESUMED stop state 4099 */ 4100 device->stopped |= DASD_UNRESUMED_PM; 4101 4102 if (device->block) { 4103 dasd_schedule_block_bh(device->block); 4104 if (device->block->request_queue) 4105 blk_mq_run_hw_queues(device->block->request_queue, 4106 true); 4107 } 4108 4109 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 4110 dasd_put_device(device); 4111 return 0; 4112 } 4113 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 4114 4115 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 4116 int rdc_buffer_size, 4117 int magic) 4118 { 4119 struct dasd_ccw_req *cqr; 4120 struct ccw1 *ccw; 4121 4122 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, 4123 NULL); 4124 4125 if (IS_ERR(cqr)) { 4126 /* internal error 13 - Allocating the RDC request failed*/ 4127 dev_err(&device->cdev->dev, 4128 "An error occurred in the DASD device driver, " 4129 "reason=%s\n", "13"); 4130 return cqr; 4131 } 4132 4133 ccw = cqr->cpaddr; 4134 ccw->cmd_code = CCW_CMD_RDC; 4135 ccw->cda = (__u32)(addr_t) cqr->data; 4136 ccw->flags = 0; 4137 ccw->count = rdc_buffer_size; 4138 cqr->startdev = device; 4139 cqr->memdev = device; 4140 cqr->expires = 10*HZ; 4141 cqr->retries = 256; 4142 cqr->buildclk = get_tod_clock(); 4143 cqr->status = DASD_CQR_FILLED; 4144 return cqr; 4145 } 4146 4147 4148 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 4149 void *rdc_buffer, int rdc_buffer_size) 4150 { 4151 int ret; 4152 struct dasd_ccw_req *cqr; 4153 4154 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); 4155 if (IS_ERR(cqr)) 4156 return PTR_ERR(cqr); 4157 4158 ret = dasd_sleep_on(cqr); 4159 if (ret == 0) 4160 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); 4161 dasd_sfree_request(cqr, cqr->memdev); 4162 return ret; 4163 } 4164 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4165 4166 /* 4167 * In command mode and transport mode we need to look for sense 4168 * data in different places. The sense data itself is allways 4169 * an array of 32 bytes, so we can unify the sense data access 4170 * for both modes. 4171 */ 4172 char *dasd_get_sense(struct irb *irb) 4173 { 4174 struct tsb *tsb = NULL; 4175 char *sense = NULL; 4176 4177 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4178 if (irb->scsw.tm.tcw) 4179 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 4180 irb->scsw.tm.tcw); 4181 if (tsb && tsb->length == 64 && tsb->flags) 4182 switch (tsb->flags & 0x07) { 4183 case 1: /* tsa_iostat */ 4184 sense = tsb->tsa.iostat.sense; 4185 break; 4186 case 2: /* tsa_ddpc */ 4187 sense = tsb->tsa.ddpc.sense; 4188 break; 4189 default: 4190 /* currently we don't use interrogate data */ 4191 break; 4192 } 4193 } else if (irb->esw.esw0.erw.cons) { 4194 sense = irb->ecw; 4195 } 4196 return sense; 4197 } 4198 EXPORT_SYMBOL_GPL(dasd_get_sense); 4199 4200 void dasd_generic_shutdown(struct ccw_device *cdev) 4201 { 4202 struct dasd_device *device; 4203 4204 device = dasd_device_from_cdev(cdev); 4205 if (IS_ERR(device)) 4206 return; 4207 4208 if (device->block) 4209 dasd_schedule_block_bh(device->block); 4210 4211 dasd_schedule_device_bh(device); 4212 4213 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4214 } 4215 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4216 4217 static int __init dasd_init(void) 4218 { 4219 int rc; 4220 4221 init_waitqueue_head(&dasd_init_waitq); 4222 init_waitqueue_head(&dasd_flush_wq); 4223 init_waitqueue_head(&generic_waitq); 4224 init_waitqueue_head(&shutdown_waitq); 4225 4226 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4227 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4228 if (dasd_debug_area == NULL) { 4229 rc = -ENOMEM; 4230 goto failed; 4231 } 4232 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4233 debug_set_level(dasd_debug_area, DBF_WARNING); 4234 4235 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4236 4237 dasd_diag_discipline_pointer = NULL; 4238 4239 dasd_statistics_createroot(); 4240 4241 rc = dasd_devmap_init(); 4242 if (rc) 4243 goto failed; 4244 rc = dasd_gendisk_init(); 4245 if (rc) 4246 goto failed; 4247 rc = dasd_parse(); 4248 if (rc) 4249 goto failed; 4250 rc = dasd_eer_init(); 4251 if (rc) 4252 goto failed; 4253 #ifdef CONFIG_PROC_FS 4254 rc = dasd_proc_init(); 4255 if (rc) 4256 goto failed; 4257 #endif 4258 4259 return 0; 4260 failed: 4261 pr_info("The DASD device driver could not be initialized\n"); 4262 dasd_exit(); 4263 return rc; 4264 } 4265 4266 module_init(dasd_init); 4267 module_exit(dasd_exit); 4268