1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #define KMSG_COMPONENT "dasd" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kmod.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/ctype.h> 18 #include <linux/major.h> 19 #include <linux/slab.h> 20 #include <linux/hdreg.h> 21 #include <linux/async.h> 22 #include <linux/mutex.h> 23 #include <linux/debugfs.h> 24 #include <linux/seq_file.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/ccwdev.h> 28 #include <asm/ebcdic.h> 29 #include <asm/idals.h> 30 #include <asm/itcw.h> 31 #include <asm/diag.h> 32 33 /* This is ugly... */ 34 #define PRINTK_HEADER "dasd:" 35 36 #include "dasd_int.h" 37 /* 38 * SECTION: Constant definitions to be used within this file 39 */ 40 #define DASD_CHANQ_MAX_SIZE 4 41 42 #define DASD_DIAG_MOD "dasd_diag_mod" 43 44 static unsigned int queue_depth = 32; 45 static unsigned int nr_hw_queues = 4; 46 47 module_param(queue_depth, uint, 0444); 48 MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); 49 50 module_param(nr_hw_queues, uint, 0444); 51 MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); 52 53 /* 54 * SECTION: exported variables of dasd.c 55 */ 56 debug_info_t *dasd_debug_area; 57 EXPORT_SYMBOL(dasd_debug_area); 58 static struct dentry *dasd_debugfs_root_entry; 59 struct dasd_discipline *dasd_diag_discipline_pointer; 60 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 61 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 62 63 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 64 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 65 " Copyright IBM Corp. 2000"); 66 MODULE_SUPPORTED_DEVICE("dasd"); 67 MODULE_LICENSE("GPL"); 68 69 /* 70 * SECTION: prototypes for static functions of dasd.c 71 */ 72 static int dasd_alloc_queue(struct dasd_block *); 73 static void dasd_free_queue(struct dasd_block *); 74 static int dasd_flush_block_queue(struct dasd_block *); 75 static void dasd_device_tasklet(unsigned long); 76 static void dasd_block_tasklet(unsigned long); 77 static void do_kick_device(struct work_struct *); 78 static void do_restore_device(struct work_struct *); 79 static void do_reload_device(struct work_struct *); 80 static void do_requeue_requests(struct work_struct *); 81 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 82 static void dasd_device_timeout(struct timer_list *); 83 static void dasd_block_timeout(struct timer_list *); 84 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 85 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 86 static void dasd_profile_exit(struct dasd_profile *); 87 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 88 static void dasd_hosts_exit(struct dasd_device *); 89 90 /* 91 * SECTION: Operations on the device structure. 92 */ 93 static wait_queue_head_t dasd_init_waitq; 94 static wait_queue_head_t dasd_flush_wq; 95 static wait_queue_head_t generic_waitq; 96 static wait_queue_head_t shutdown_waitq; 97 98 /* 99 * Allocate memory for a new device structure. 100 */ 101 struct dasd_device *dasd_alloc_device(void) 102 { 103 struct dasd_device *device; 104 105 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 106 if (!device) 107 return ERR_PTR(-ENOMEM); 108 109 /* Get two pages for normal block device operations. */ 110 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 111 if (!device->ccw_mem) { 112 kfree(device); 113 return ERR_PTR(-ENOMEM); 114 } 115 /* Get one page for error recovery. */ 116 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 117 if (!device->erp_mem) { 118 free_pages((unsigned long) device->ccw_mem, 1); 119 kfree(device); 120 return ERR_PTR(-ENOMEM); 121 } 122 /* Get two pages for ese format. */ 123 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 124 if (!device->ese_mem) { 125 free_page((unsigned long) device->erp_mem); 126 free_pages((unsigned long) device->ccw_mem, 1); 127 kfree(device); 128 return ERR_PTR(-ENOMEM); 129 } 130 131 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 132 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 133 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 134 spin_lock_init(&device->mem_lock); 135 atomic_set(&device->tasklet_scheduled, 0); 136 tasklet_init(&device->tasklet, dasd_device_tasklet, 137 (unsigned long) device); 138 INIT_LIST_HEAD(&device->ccw_queue); 139 timer_setup(&device->timer, dasd_device_timeout, 0); 140 INIT_WORK(&device->kick_work, do_kick_device); 141 INIT_WORK(&device->restore_device, do_restore_device); 142 INIT_WORK(&device->reload_device, do_reload_device); 143 INIT_WORK(&device->requeue_requests, do_requeue_requests); 144 device->state = DASD_STATE_NEW; 145 device->target = DASD_STATE_NEW; 146 mutex_init(&device->state_mutex); 147 spin_lock_init(&device->profile.lock); 148 return device; 149 } 150 151 /* 152 * Free memory of a device structure. 153 */ 154 void dasd_free_device(struct dasd_device *device) 155 { 156 kfree(device->private); 157 free_pages((unsigned long) device->ese_mem, 1); 158 free_page((unsigned long) device->erp_mem); 159 free_pages((unsigned long) device->ccw_mem, 1); 160 kfree(device); 161 } 162 163 /* 164 * Allocate memory for a new device structure. 165 */ 166 struct dasd_block *dasd_alloc_block(void) 167 { 168 struct dasd_block *block; 169 170 block = kzalloc(sizeof(*block), GFP_ATOMIC); 171 if (!block) 172 return ERR_PTR(-ENOMEM); 173 /* open_count = 0 means device online but not in use */ 174 atomic_set(&block->open_count, -1); 175 176 atomic_set(&block->tasklet_scheduled, 0); 177 tasklet_init(&block->tasklet, dasd_block_tasklet, 178 (unsigned long) block); 179 INIT_LIST_HEAD(&block->ccw_queue); 180 spin_lock_init(&block->queue_lock); 181 INIT_LIST_HEAD(&block->format_list); 182 spin_lock_init(&block->format_lock); 183 timer_setup(&block->timer, dasd_block_timeout, 0); 184 spin_lock_init(&block->profile.lock); 185 186 return block; 187 } 188 EXPORT_SYMBOL_GPL(dasd_alloc_block); 189 190 /* 191 * Free memory of a device structure. 192 */ 193 void dasd_free_block(struct dasd_block *block) 194 { 195 kfree(block); 196 } 197 EXPORT_SYMBOL_GPL(dasd_free_block); 198 199 /* 200 * Make a new device known to the system. 201 */ 202 static int dasd_state_new_to_known(struct dasd_device *device) 203 { 204 int rc; 205 206 /* 207 * As long as the device is not in state DASD_STATE_NEW we want to 208 * keep the reference count > 0. 209 */ 210 dasd_get_device(device); 211 212 if (device->block) { 213 rc = dasd_alloc_queue(device->block); 214 if (rc) { 215 dasd_put_device(device); 216 return rc; 217 } 218 } 219 device->state = DASD_STATE_KNOWN; 220 return 0; 221 } 222 223 /* 224 * Let the system forget about a device. 225 */ 226 static int dasd_state_known_to_new(struct dasd_device *device) 227 { 228 /* Disable extended error reporting for this device. */ 229 dasd_eer_disable(device); 230 device->state = DASD_STATE_NEW; 231 232 if (device->block) 233 dasd_free_queue(device->block); 234 235 /* Give up reference we took in dasd_state_new_to_known. */ 236 dasd_put_device(device); 237 return 0; 238 } 239 240 static struct dentry *dasd_debugfs_setup(const char *name, 241 struct dentry *base_dentry) 242 { 243 struct dentry *pde; 244 245 if (!base_dentry) 246 return NULL; 247 pde = debugfs_create_dir(name, base_dentry); 248 if (!pde || IS_ERR(pde)) 249 return NULL; 250 return pde; 251 } 252 253 /* 254 * Request the irq line for the device. 255 */ 256 static int dasd_state_known_to_basic(struct dasd_device *device) 257 { 258 struct dasd_block *block = device->block; 259 int rc = 0; 260 261 /* Allocate and register gendisk structure. */ 262 if (block) { 263 rc = dasd_gendisk_alloc(block); 264 if (rc) 265 return rc; 266 block->debugfs_dentry = 267 dasd_debugfs_setup(block->gdp->disk_name, 268 dasd_debugfs_root_entry); 269 dasd_profile_init(&block->profile, block->debugfs_dentry); 270 if (dasd_global_profile_level == DASD_PROFILE_ON) 271 dasd_profile_on(&device->block->profile); 272 } 273 device->debugfs_dentry = 274 dasd_debugfs_setup(dev_name(&device->cdev->dev), 275 dasd_debugfs_root_entry); 276 dasd_profile_init(&device->profile, device->debugfs_dentry); 277 dasd_hosts_init(device->debugfs_dentry, device); 278 279 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 280 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 281 8 * sizeof(long)); 282 debug_register_view(device->debug_area, &debug_sprintf_view); 283 debug_set_level(device->debug_area, DBF_WARNING); 284 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 285 286 device->state = DASD_STATE_BASIC; 287 288 return rc; 289 } 290 291 /* 292 * Release the irq line for the device. Terminate any running i/o. 293 */ 294 static int dasd_state_basic_to_known(struct dasd_device *device) 295 { 296 int rc; 297 298 if (device->discipline->basic_to_known) { 299 rc = device->discipline->basic_to_known(device); 300 if (rc) 301 return rc; 302 } 303 304 if (device->block) { 305 dasd_profile_exit(&device->block->profile); 306 debugfs_remove(device->block->debugfs_dentry); 307 dasd_gendisk_free(device->block); 308 dasd_block_clear_timer(device->block); 309 } 310 rc = dasd_flush_device_queue(device); 311 if (rc) 312 return rc; 313 dasd_device_clear_timer(device); 314 dasd_profile_exit(&device->profile); 315 dasd_hosts_exit(device); 316 debugfs_remove(device->debugfs_dentry); 317 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 318 if (device->debug_area != NULL) { 319 debug_unregister(device->debug_area); 320 device->debug_area = NULL; 321 } 322 device->state = DASD_STATE_KNOWN; 323 return 0; 324 } 325 326 /* 327 * Do the initial analysis. The do_analysis function may return 328 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 329 * until the discipline decides to continue the startup sequence 330 * by calling the function dasd_change_state. The eckd disciplines 331 * uses this to start a ccw that detects the format. The completion 332 * interrupt for this detection ccw uses the kernel event daemon to 333 * trigger the call to dasd_change_state. All this is done in the 334 * discipline code, see dasd_eckd.c. 335 * After the analysis ccw is done (do_analysis returned 0) the block 336 * device is setup. 337 * In case the analysis returns an error, the device setup is stopped 338 * (a fake disk was already added to allow formatting). 339 */ 340 static int dasd_state_basic_to_ready(struct dasd_device *device) 341 { 342 int rc; 343 struct dasd_block *block; 344 struct gendisk *disk; 345 346 rc = 0; 347 block = device->block; 348 /* make disk known with correct capacity */ 349 if (block) { 350 if (block->base->discipline->do_analysis != NULL) 351 rc = block->base->discipline->do_analysis(block); 352 if (rc) { 353 if (rc != -EAGAIN) { 354 device->state = DASD_STATE_UNFMT; 355 disk = device->block->gdp; 356 kobject_uevent(&disk_to_dev(disk)->kobj, 357 KOBJ_CHANGE); 358 goto out; 359 } 360 return rc; 361 } 362 if (device->discipline->setup_blk_queue) 363 device->discipline->setup_blk_queue(block); 364 set_capacity(block->gdp, 365 block->blocks << block->s2b_shift); 366 device->state = DASD_STATE_READY; 367 rc = dasd_scan_partitions(block); 368 if (rc) { 369 device->state = DASD_STATE_BASIC; 370 return rc; 371 } 372 } else { 373 device->state = DASD_STATE_READY; 374 } 375 out: 376 if (device->discipline->basic_to_ready) 377 rc = device->discipline->basic_to_ready(device); 378 return rc; 379 } 380 381 static inline 382 int _wait_for_empty_queues(struct dasd_device *device) 383 { 384 if (device->block) 385 return list_empty(&device->ccw_queue) && 386 list_empty(&device->block->ccw_queue); 387 else 388 return list_empty(&device->ccw_queue); 389 } 390 391 /* 392 * Remove device from block device layer. Destroy dirty buffers. 393 * Forget format information. Check if the target level is basic 394 * and if it is create fake disk for formatting. 395 */ 396 static int dasd_state_ready_to_basic(struct dasd_device *device) 397 { 398 int rc; 399 400 device->state = DASD_STATE_BASIC; 401 if (device->block) { 402 struct dasd_block *block = device->block; 403 rc = dasd_flush_block_queue(block); 404 if (rc) { 405 device->state = DASD_STATE_READY; 406 return rc; 407 } 408 dasd_destroy_partitions(block); 409 block->blocks = 0; 410 block->bp_block = 0; 411 block->s2b_shift = 0; 412 } 413 return 0; 414 } 415 416 /* 417 * Back to basic. 418 */ 419 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 420 { 421 device->state = DASD_STATE_BASIC; 422 return 0; 423 } 424 425 /* 426 * Make the device online and schedule the bottom half to start 427 * the requeueing of requests from the linux request queue to the 428 * ccw queue. 429 */ 430 static int 431 dasd_state_ready_to_online(struct dasd_device * device) 432 { 433 struct gendisk *disk; 434 struct disk_part_iter piter; 435 struct hd_struct *part; 436 437 device->state = DASD_STATE_ONLINE; 438 if (device->block) { 439 dasd_schedule_block_bh(device->block); 440 if ((device->features & DASD_FEATURE_USERAW)) { 441 disk = device->block->gdp; 442 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 443 return 0; 444 } 445 disk = device->block->bdev->bd_disk; 446 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 447 while ((part = disk_part_iter_next(&piter))) 448 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 449 disk_part_iter_exit(&piter); 450 } 451 return 0; 452 } 453 454 /* 455 * Stop the requeueing of requests again. 456 */ 457 static int dasd_state_online_to_ready(struct dasd_device *device) 458 { 459 int rc; 460 struct gendisk *disk; 461 struct disk_part_iter piter; 462 struct hd_struct *part; 463 464 if (device->discipline->online_to_ready) { 465 rc = device->discipline->online_to_ready(device); 466 if (rc) 467 return rc; 468 } 469 470 device->state = DASD_STATE_READY; 471 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 472 disk = device->block->bdev->bd_disk; 473 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 474 while ((part = disk_part_iter_next(&piter))) 475 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 476 disk_part_iter_exit(&piter); 477 } 478 return 0; 479 } 480 481 /* 482 * Device startup state changes. 483 */ 484 static int dasd_increase_state(struct dasd_device *device) 485 { 486 int rc; 487 488 rc = 0; 489 if (device->state == DASD_STATE_NEW && 490 device->target >= DASD_STATE_KNOWN) 491 rc = dasd_state_new_to_known(device); 492 493 if (!rc && 494 device->state == DASD_STATE_KNOWN && 495 device->target >= DASD_STATE_BASIC) 496 rc = dasd_state_known_to_basic(device); 497 498 if (!rc && 499 device->state == DASD_STATE_BASIC && 500 device->target >= DASD_STATE_READY) 501 rc = dasd_state_basic_to_ready(device); 502 503 if (!rc && 504 device->state == DASD_STATE_UNFMT && 505 device->target > DASD_STATE_UNFMT) 506 rc = -EPERM; 507 508 if (!rc && 509 device->state == DASD_STATE_READY && 510 device->target >= DASD_STATE_ONLINE) 511 rc = dasd_state_ready_to_online(device); 512 513 return rc; 514 } 515 516 /* 517 * Device shutdown state changes. 518 */ 519 static int dasd_decrease_state(struct dasd_device *device) 520 { 521 int rc; 522 523 rc = 0; 524 if (device->state == DASD_STATE_ONLINE && 525 device->target <= DASD_STATE_READY) 526 rc = dasd_state_online_to_ready(device); 527 528 if (!rc && 529 device->state == DASD_STATE_READY && 530 device->target <= DASD_STATE_BASIC) 531 rc = dasd_state_ready_to_basic(device); 532 533 if (!rc && 534 device->state == DASD_STATE_UNFMT && 535 device->target <= DASD_STATE_BASIC) 536 rc = dasd_state_unfmt_to_basic(device); 537 538 if (!rc && 539 device->state == DASD_STATE_BASIC && 540 device->target <= DASD_STATE_KNOWN) 541 rc = dasd_state_basic_to_known(device); 542 543 if (!rc && 544 device->state == DASD_STATE_KNOWN && 545 device->target <= DASD_STATE_NEW) 546 rc = dasd_state_known_to_new(device); 547 548 return rc; 549 } 550 551 /* 552 * This is the main startup/shutdown routine. 553 */ 554 static void dasd_change_state(struct dasd_device *device) 555 { 556 int rc; 557 558 if (device->state == device->target) 559 /* Already where we want to go today... */ 560 return; 561 if (device->state < device->target) 562 rc = dasd_increase_state(device); 563 else 564 rc = dasd_decrease_state(device); 565 if (rc == -EAGAIN) 566 return; 567 if (rc) 568 device->target = device->state; 569 570 /* let user-space know that the device status changed */ 571 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 572 573 if (device->state == device->target) 574 wake_up(&dasd_init_waitq); 575 } 576 577 /* 578 * Kick starter for devices that did not complete the startup/shutdown 579 * procedure or were sleeping because of a pending state. 580 * dasd_kick_device will schedule a call do do_kick_device to the kernel 581 * event daemon. 582 */ 583 static void do_kick_device(struct work_struct *work) 584 { 585 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 586 mutex_lock(&device->state_mutex); 587 dasd_change_state(device); 588 mutex_unlock(&device->state_mutex); 589 dasd_schedule_device_bh(device); 590 dasd_put_device(device); 591 } 592 593 void dasd_kick_device(struct dasd_device *device) 594 { 595 dasd_get_device(device); 596 /* queue call to dasd_kick_device to the kernel event daemon. */ 597 if (!schedule_work(&device->kick_work)) 598 dasd_put_device(device); 599 } 600 EXPORT_SYMBOL(dasd_kick_device); 601 602 /* 603 * dasd_reload_device will schedule a call do do_reload_device to the kernel 604 * event daemon. 605 */ 606 static void do_reload_device(struct work_struct *work) 607 { 608 struct dasd_device *device = container_of(work, struct dasd_device, 609 reload_device); 610 device->discipline->reload(device); 611 dasd_put_device(device); 612 } 613 614 void dasd_reload_device(struct dasd_device *device) 615 { 616 dasd_get_device(device); 617 /* queue call to dasd_reload_device to the kernel event daemon. */ 618 if (!schedule_work(&device->reload_device)) 619 dasd_put_device(device); 620 } 621 EXPORT_SYMBOL(dasd_reload_device); 622 623 /* 624 * dasd_restore_device will schedule a call do do_restore_device to the kernel 625 * event daemon. 626 */ 627 static void do_restore_device(struct work_struct *work) 628 { 629 struct dasd_device *device = container_of(work, struct dasd_device, 630 restore_device); 631 device->cdev->drv->restore(device->cdev); 632 dasd_put_device(device); 633 } 634 635 void dasd_restore_device(struct dasd_device *device) 636 { 637 dasd_get_device(device); 638 /* queue call to dasd_restore_device to the kernel event daemon. */ 639 if (!schedule_work(&device->restore_device)) 640 dasd_put_device(device); 641 } 642 643 /* 644 * Set the target state for a device and starts the state change. 645 */ 646 void dasd_set_target_state(struct dasd_device *device, int target) 647 { 648 dasd_get_device(device); 649 mutex_lock(&device->state_mutex); 650 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 651 if (dasd_probeonly && target > DASD_STATE_READY) 652 target = DASD_STATE_READY; 653 if (device->target != target) { 654 if (device->state == target) 655 wake_up(&dasd_init_waitq); 656 device->target = target; 657 } 658 if (device->state != device->target) 659 dasd_change_state(device); 660 mutex_unlock(&device->state_mutex); 661 dasd_put_device(device); 662 } 663 EXPORT_SYMBOL(dasd_set_target_state); 664 665 /* 666 * Enable devices with device numbers in [from..to]. 667 */ 668 static inline int _wait_for_device(struct dasd_device *device) 669 { 670 return (device->state == device->target); 671 } 672 673 void dasd_enable_device(struct dasd_device *device) 674 { 675 dasd_set_target_state(device, DASD_STATE_ONLINE); 676 if (device->state <= DASD_STATE_KNOWN) 677 /* No discipline for device found. */ 678 dasd_set_target_state(device, DASD_STATE_NEW); 679 /* Now wait for the devices to come up. */ 680 wait_event(dasd_init_waitq, _wait_for_device(device)); 681 682 dasd_reload_device(device); 683 if (device->discipline->kick_validate) 684 device->discipline->kick_validate(device); 685 } 686 EXPORT_SYMBOL(dasd_enable_device); 687 688 /* 689 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 690 */ 691 692 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 693 694 #ifdef CONFIG_DASD_PROFILE 695 struct dasd_profile dasd_global_profile = { 696 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 697 }; 698 static struct dentry *dasd_debugfs_global_entry; 699 700 /* 701 * Add profiling information for cqr before execution. 702 */ 703 static void dasd_profile_start(struct dasd_block *block, 704 struct dasd_ccw_req *cqr, 705 struct request *req) 706 { 707 struct list_head *l; 708 unsigned int counter; 709 struct dasd_device *device; 710 711 /* count the length of the chanq for statistics */ 712 counter = 0; 713 if (dasd_global_profile_level || block->profile.data) 714 list_for_each(l, &block->ccw_queue) 715 if (++counter >= 31) 716 break; 717 718 spin_lock(&dasd_global_profile.lock); 719 if (dasd_global_profile.data) { 720 dasd_global_profile.data->dasd_io_nr_req[counter]++; 721 if (rq_data_dir(req) == READ) 722 dasd_global_profile.data->dasd_read_nr_req[counter]++; 723 } 724 spin_unlock(&dasd_global_profile.lock); 725 726 spin_lock(&block->profile.lock); 727 if (block->profile.data) { 728 block->profile.data->dasd_io_nr_req[counter]++; 729 if (rq_data_dir(req) == READ) 730 block->profile.data->dasd_read_nr_req[counter]++; 731 } 732 spin_unlock(&block->profile.lock); 733 734 /* 735 * We count the request for the start device, even though it may run on 736 * some other device due to error recovery. This way we make sure that 737 * we count each request only once. 738 */ 739 device = cqr->startdev; 740 if (device->profile.data) { 741 counter = 1; /* request is not yet queued on the start device */ 742 list_for_each(l, &device->ccw_queue) 743 if (++counter >= 31) 744 break; 745 } 746 spin_lock(&device->profile.lock); 747 if (device->profile.data) { 748 device->profile.data->dasd_io_nr_req[counter]++; 749 if (rq_data_dir(req) == READ) 750 device->profile.data->dasd_read_nr_req[counter]++; 751 } 752 spin_unlock(&device->profile.lock); 753 } 754 755 /* 756 * Add profiling information for cqr after execution. 757 */ 758 759 #define dasd_profile_counter(value, index) \ 760 { \ 761 for (index = 0; index < 31 && value >> (2+index); index++) \ 762 ; \ 763 } 764 765 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 766 int is_alias, 767 int is_tpm, 768 int is_read, 769 long sectors, 770 int sectors_ind, 771 int tottime_ind, 772 int tottimeps_ind, 773 int strtime_ind, 774 int irqtime_ind, 775 int irqtimeps_ind, 776 int endtime_ind) 777 { 778 /* in case of an overflow, reset the whole profile */ 779 if (data->dasd_io_reqs == UINT_MAX) { 780 memset(data, 0, sizeof(*data)); 781 ktime_get_real_ts64(&data->starttod); 782 } 783 data->dasd_io_reqs++; 784 data->dasd_io_sects += sectors; 785 if (is_alias) 786 data->dasd_io_alias++; 787 if (is_tpm) 788 data->dasd_io_tpm++; 789 790 data->dasd_io_secs[sectors_ind]++; 791 data->dasd_io_times[tottime_ind]++; 792 data->dasd_io_timps[tottimeps_ind]++; 793 data->dasd_io_time1[strtime_ind]++; 794 data->dasd_io_time2[irqtime_ind]++; 795 data->dasd_io_time2ps[irqtimeps_ind]++; 796 data->dasd_io_time3[endtime_ind]++; 797 798 if (is_read) { 799 data->dasd_read_reqs++; 800 data->dasd_read_sects += sectors; 801 if (is_alias) 802 data->dasd_read_alias++; 803 if (is_tpm) 804 data->dasd_read_tpm++; 805 data->dasd_read_secs[sectors_ind]++; 806 data->dasd_read_times[tottime_ind]++; 807 data->dasd_read_time1[strtime_ind]++; 808 data->dasd_read_time2[irqtime_ind]++; 809 data->dasd_read_time3[endtime_ind]++; 810 } 811 } 812 813 static void dasd_profile_end(struct dasd_block *block, 814 struct dasd_ccw_req *cqr, 815 struct request *req) 816 { 817 unsigned long strtime, irqtime, endtime, tottime; 818 unsigned long tottimeps, sectors; 819 struct dasd_device *device; 820 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 821 int irqtime_ind, irqtimeps_ind, endtime_ind; 822 struct dasd_profile_info *data; 823 824 device = cqr->startdev; 825 if (!(dasd_global_profile_level || 826 block->profile.data || 827 device->profile.data)) 828 return; 829 830 sectors = blk_rq_sectors(req); 831 if (!cqr->buildclk || !cqr->startclk || 832 !cqr->stopclk || !cqr->endclk || 833 !sectors) 834 return; 835 836 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 837 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 838 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 839 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 840 tottimeps = tottime / sectors; 841 842 dasd_profile_counter(sectors, sectors_ind); 843 dasd_profile_counter(tottime, tottime_ind); 844 dasd_profile_counter(tottimeps, tottimeps_ind); 845 dasd_profile_counter(strtime, strtime_ind); 846 dasd_profile_counter(irqtime, irqtime_ind); 847 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 848 dasd_profile_counter(endtime, endtime_ind); 849 850 spin_lock(&dasd_global_profile.lock); 851 if (dasd_global_profile.data) { 852 data = dasd_global_profile.data; 853 data->dasd_sum_times += tottime; 854 data->dasd_sum_time_str += strtime; 855 data->dasd_sum_time_irq += irqtime; 856 data->dasd_sum_time_end += endtime; 857 dasd_profile_end_add_data(dasd_global_profile.data, 858 cqr->startdev != block->base, 859 cqr->cpmode == 1, 860 rq_data_dir(req) == READ, 861 sectors, sectors_ind, tottime_ind, 862 tottimeps_ind, strtime_ind, 863 irqtime_ind, irqtimeps_ind, 864 endtime_ind); 865 } 866 spin_unlock(&dasd_global_profile.lock); 867 868 spin_lock(&block->profile.lock); 869 if (block->profile.data) { 870 data = block->profile.data; 871 data->dasd_sum_times += tottime; 872 data->dasd_sum_time_str += strtime; 873 data->dasd_sum_time_irq += irqtime; 874 data->dasd_sum_time_end += endtime; 875 dasd_profile_end_add_data(block->profile.data, 876 cqr->startdev != block->base, 877 cqr->cpmode == 1, 878 rq_data_dir(req) == READ, 879 sectors, sectors_ind, tottime_ind, 880 tottimeps_ind, strtime_ind, 881 irqtime_ind, irqtimeps_ind, 882 endtime_ind); 883 } 884 spin_unlock(&block->profile.lock); 885 886 spin_lock(&device->profile.lock); 887 if (device->profile.data) { 888 data = device->profile.data; 889 data->dasd_sum_times += tottime; 890 data->dasd_sum_time_str += strtime; 891 data->dasd_sum_time_irq += irqtime; 892 data->dasd_sum_time_end += endtime; 893 dasd_profile_end_add_data(device->profile.data, 894 cqr->startdev != block->base, 895 cqr->cpmode == 1, 896 rq_data_dir(req) == READ, 897 sectors, sectors_ind, tottime_ind, 898 tottimeps_ind, strtime_ind, 899 irqtime_ind, irqtimeps_ind, 900 endtime_ind); 901 } 902 spin_unlock(&device->profile.lock); 903 } 904 905 void dasd_profile_reset(struct dasd_profile *profile) 906 { 907 struct dasd_profile_info *data; 908 909 spin_lock_bh(&profile->lock); 910 data = profile->data; 911 if (!data) { 912 spin_unlock_bh(&profile->lock); 913 return; 914 } 915 memset(data, 0, sizeof(*data)); 916 ktime_get_real_ts64(&data->starttod); 917 spin_unlock_bh(&profile->lock); 918 } 919 920 int dasd_profile_on(struct dasd_profile *profile) 921 { 922 struct dasd_profile_info *data; 923 924 data = kzalloc(sizeof(*data), GFP_KERNEL); 925 if (!data) 926 return -ENOMEM; 927 spin_lock_bh(&profile->lock); 928 if (profile->data) { 929 spin_unlock_bh(&profile->lock); 930 kfree(data); 931 return 0; 932 } 933 ktime_get_real_ts64(&data->starttod); 934 profile->data = data; 935 spin_unlock_bh(&profile->lock); 936 return 0; 937 } 938 939 void dasd_profile_off(struct dasd_profile *profile) 940 { 941 spin_lock_bh(&profile->lock); 942 kfree(profile->data); 943 profile->data = NULL; 944 spin_unlock_bh(&profile->lock); 945 } 946 947 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 948 { 949 char *buffer; 950 951 buffer = vmalloc(user_len + 1); 952 if (buffer == NULL) 953 return ERR_PTR(-ENOMEM); 954 if (copy_from_user(buffer, user_buf, user_len) != 0) { 955 vfree(buffer); 956 return ERR_PTR(-EFAULT); 957 } 958 /* got the string, now strip linefeed. */ 959 if (buffer[user_len - 1] == '\n') 960 buffer[user_len - 1] = 0; 961 else 962 buffer[user_len] = 0; 963 return buffer; 964 } 965 966 static ssize_t dasd_stats_write(struct file *file, 967 const char __user *user_buf, 968 size_t user_len, loff_t *pos) 969 { 970 char *buffer, *str; 971 int rc; 972 struct seq_file *m = (struct seq_file *)file->private_data; 973 struct dasd_profile *prof = m->private; 974 975 if (user_len > 65536) 976 user_len = 65536; 977 buffer = dasd_get_user_string(user_buf, user_len); 978 if (IS_ERR(buffer)) 979 return PTR_ERR(buffer); 980 981 str = skip_spaces(buffer); 982 rc = user_len; 983 if (strncmp(str, "reset", 5) == 0) { 984 dasd_profile_reset(prof); 985 } else if (strncmp(str, "on", 2) == 0) { 986 rc = dasd_profile_on(prof); 987 if (rc) 988 goto out; 989 rc = user_len; 990 if (prof == &dasd_global_profile) { 991 dasd_profile_reset(prof); 992 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 993 } 994 } else if (strncmp(str, "off", 3) == 0) { 995 if (prof == &dasd_global_profile) 996 dasd_global_profile_level = DASD_PROFILE_OFF; 997 dasd_profile_off(prof); 998 } else 999 rc = -EINVAL; 1000 out: 1001 vfree(buffer); 1002 return rc; 1003 } 1004 1005 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 1006 { 1007 int i; 1008 1009 for (i = 0; i < 32; i++) 1010 seq_printf(m, "%u ", array[i]); 1011 seq_putc(m, '\n'); 1012 } 1013 1014 static void dasd_stats_seq_print(struct seq_file *m, 1015 struct dasd_profile_info *data) 1016 { 1017 seq_printf(m, "start_time %lld.%09ld\n", 1018 (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 1019 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 1020 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 1021 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 1022 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 1023 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 1024 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 1025 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 1026 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 1027 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 1028 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 1029 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 1030 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 1031 seq_puts(m, "histogram_sectors "); 1032 dasd_stats_array(m, data->dasd_io_secs); 1033 seq_puts(m, "histogram_io_times "); 1034 dasd_stats_array(m, data->dasd_io_times); 1035 seq_puts(m, "histogram_io_times_weighted "); 1036 dasd_stats_array(m, data->dasd_io_timps); 1037 seq_puts(m, "histogram_time_build_to_ssch "); 1038 dasd_stats_array(m, data->dasd_io_time1); 1039 seq_puts(m, "histogram_time_ssch_to_irq "); 1040 dasd_stats_array(m, data->dasd_io_time2); 1041 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1042 dasd_stats_array(m, data->dasd_io_time2ps); 1043 seq_puts(m, "histogram_time_irq_to_end "); 1044 dasd_stats_array(m, data->dasd_io_time3); 1045 seq_puts(m, "histogram_ccw_queue_length "); 1046 dasd_stats_array(m, data->dasd_io_nr_req); 1047 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1048 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1049 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1050 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1051 seq_puts(m, "histogram_read_sectors "); 1052 dasd_stats_array(m, data->dasd_read_secs); 1053 seq_puts(m, "histogram_read_times "); 1054 dasd_stats_array(m, data->dasd_read_times); 1055 seq_puts(m, "histogram_read_time_build_to_ssch "); 1056 dasd_stats_array(m, data->dasd_read_time1); 1057 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1058 dasd_stats_array(m, data->dasd_read_time2); 1059 seq_puts(m, "histogram_read_time_irq_to_end "); 1060 dasd_stats_array(m, data->dasd_read_time3); 1061 seq_puts(m, "histogram_read_ccw_queue_length "); 1062 dasd_stats_array(m, data->dasd_read_nr_req); 1063 } 1064 1065 static int dasd_stats_show(struct seq_file *m, void *v) 1066 { 1067 struct dasd_profile *profile; 1068 struct dasd_profile_info *data; 1069 1070 profile = m->private; 1071 spin_lock_bh(&profile->lock); 1072 data = profile->data; 1073 if (!data) { 1074 spin_unlock_bh(&profile->lock); 1075 seq_puts(m, "disabled\n"); 1076 return 0; 1077 } 1078 dasd_stats_seq_print(m, data); 1079 spin_unlock_bh(&profile->lock); 1080 return 0; 1081 } 1082 1083 static int dasd_stats_open(struct inode *inode, struct file *file) 1084 { 1085 struct dasd_profile *profile = inode->i_private; 1086 return single_open(file, dasd_stats_show, profile); 1087 } 1088 1089 static const struct file_operations dasd_stats_raw_fops = { 1090 .owner = THIS_MODULE, 1091 .open = dasd_stats_open, 1092 .read = seq_read, 1093 .llseek = seq_lseek, 1094 .release = single_release, 1095 .write = dasd_stats_write, 1096 }; 1097 1098 static void dasd_profile_init(struct dasd_profile *profile, 1099 struct dentry *base_dentry) 1100 { 1101 umode_t mode; 1102 struct dentry *pde; 1103 1104 if (!base_dentry) 1105 return; 1106 profile->dentry = NULL; 1107 profile->data = NULL; 1108 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1109 pde = debugfs_create_file("statistics", mode, base_dentry, 1110 profile, &dasd_stats_raw_fops); 1111 if (pde && !IS_ERR(pde)) 1112 profile->dentry = pde; 1113 return; 1114 } 1115 1116 static void dasd_profile_exit(struct dasd_profile *profile) 1117 { 1118 dasd_profile_off(profile); 1119 debugfs_remove(profile->dentry); 1120 profile->dentry = NULL; 1121 } 1122 1123 static void dasd_statistics_removeroot(void) 1124 { 1125 dasd_global_profile_level = DASD_PROFILE_OFF; 1126 dasd_profile_exit(&dasd_global_profile); 1127 debugfs_remove(dasd_debugfs_global_entry); 1128 debugfs_remove(dasd_debugfs_root_entry); 1129 } 1130 1131 static void dasd_statistics_createroot(void) 1132 { 1133 struct dentry *pde; 1134 1135 dasd_debugfs_root_entry = NULL; 1136 pde = debugfs_create_dir("dasd", NULL); 1137 if (!pde || IS_ERR(pde)) 1138 goto error; 1139 dasd_debugfs_root_entry = pde; 1140 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1141 if (!pde || IS_ERR(pde)) 1142 goto error; 1143 dasd_debugfs_global_entry = pde; 1144 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1145 return; 1146 1147 error: 1148 DBF_EVENT(DBF_ERR, "%s", 1149 "Creation of the dasd debugfs interface failed"); 1150 dasd_statistics_removeroot(); 1151 return; 1152 } 1153 1154 #else 1155 #define dasd_profile_start(block, cqr, req) do {} while (0) 1156 #define dasd_profile_end(block, cqr, req) do {} while (0) 1157 1158 static void dasd_statistics_createroot(void) 1159 { 1160 return; 1161 } 1162 1163 static void dasd_statistics_removeroot(void) 1164 { 1165 return; 1166 } 1167 1168 int dasd_stats_generic_show(struct seq_file *m, void *v) 1169 { 1170 seq_puts(m, "Statistics are not activated in this kernel\n"); 1171 return 0; 1172 } 1173 1174 static void dasd_profile_init(struct dasd_profile *profile, 1175 struct dentry *base_dentry) 1176 { 1177 return; 1178 } 1179 1180 static void dasd_profile_exit(struct dasd_profile *profile) 1181 { 1182 return; 1183 } 1184 1185 int dasd_profile_on(struct dasd_profile *profile) 1186 { 1187 return 0; 1188 } 1189 1190 #endif /* CONFIG_DASD_PROFILE */ 1191 1192 static int dasd_hosts_show(struct seq_file *m, void *v) 1193 { 1194 struct dasd_device *device; 1195 int rc = -EOPNOTSUPP; 1196 1197 device = m->private; 1198 dasd_get_device(device); 1199 1200 if (device->discipline->hosts_print) 1201 rc = device->discipline->hosts_print(device, m); 1202 1203 dasd_put_device(device); 1204 return rc; 1205 } 1206 1207 DEFINE_SHOW_ATTRIBUTE(dasd_hosts); 1208 1209 static void dasd_hosts_exit(struct dasd_device *device) 1210 { 1211 debugfs_remove(device->hosts_dentry); 1212 device->hosts_dentry = NULL; 1213 } 1214 1215 static void dasd_hosts_init(struct dentry *base_dentry, 1216 struct dasd_device *device) 1217 { 1218 struct dentry *pde; 1219 umode_t mode; 1220 1221 if (!base_dentry) 1222 return; 1223 1224 mode = S_IRUSR | S_IFREG; 1225 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1226 device, &dasd_hosts_fops); 1227 if (pde && !IS_ERR(pde)) 1228 device->hosts_dentry = pde; 1229 } 1230 1231 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, 1232 struct dasd_device *device, 1233 struct dasd_ccw_req *cqr) 1234 { 1235 unsigned long flags; 1236 char *data, *chunk; 1237 int size = 0; 1238 1239 if (cplength > 0) 1240 size += cplength * sizeof(struct ccw1); 1241 if (datasize > 0) 1242 size += datasize; 1243 if (!cqr) 1244 size += (sizeof(*cqr) + 7L) & -8L; 1245 1246 spin_lock_irqsave(&device->mem_lock, flags); 1247 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); 1248 spin_unlock_irqrestore(&device->mem_lock, flags); 1249 if (!chunk) 1250 return ERR_PTR(-ENOMEM); 1251 if (!cqr) { 1252 cqr = (void *) data; 1253 data += (sizeof(*cqr) + 7L) & -8L; 1254 } 1255 memset(cqr, 0, sizeof(*cqr)); 1256 cqr->mem_chunk = chunk; 1257 if (cplength > 0) { 1258 cqr->cpaddr = data; 1259 data += cplength * sizeof(struct ccw1); 1260 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1261 } 1262 if (datasize > 0) { 1263 cqr->data = data; 1264 memset(cqr->data, 0, datasize); 1265 } 1266 cqr->magic = magic; 1267 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1268 dasd_get_device(device); 1269 return cqr; 1270 } 1271 EXPORT_SYMBOL(dasd_smalloc_request); 1272 1273 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1274 int datasize, 1275 struct dasd_device *device) 1276 { 1277 struct dasd_ccw_req *cqr; 1278 unsigned long flags; 1279 int size, cqr_size; 1280 char *data; 1281 1282 cqr_size = (sizeof(*cqr) + 7L) & -8L; 1283 size = cqr_size; 1284 if (cplength > 0) 1285 size += cplength * sizeof(struct ccw1); 1286 if (datasize > 0) 1287 size += datasize; 1288 1289 spin_lock_irqsave(&device->mem_lock, flags); 1290 cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1291 spin_unlock_irqrestore(&device->mem_lock, flags); 1292 if (!cqr) 1293 return ERR_PTR(-ENOMEM); 1294 memset(cqr, 0, sizeof(*cqr)); 1295 data = (char *)cqr + cqr_size; 1296 cqr->cpaddr = NULL; 1297 if (cplength > 0) { 1298 cqr->cpaddr = data; 1299 data += cplength * sizeof(struct ccw1); 1300 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1301 } 1302 cqr->data = NULL; 1303 if (datasize > 0) { 1304 cqr->data = data; 1305 memset(cqr->data, 0, datasize); 1306 } 1307 1308 cqr->magic = magic; 1309 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1310 dasd_get_device(device); 1311 1312 return cqr; 1313 } 1314 EXPORT_SYMBOL(dasd_fmalloc_request); 1315 1316 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1317 { 1318 unsigned long flags; 1319 1320 spin_lock_irqsave(&device->mem_lock, flags); 1321 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); 1322 spin_unlock_irqrestore(&device->mem_lock, flags); 1323 dasd_put_device(device); 1324 } 1325 EXPORT_SYMBOL(dasd_sfree_request); 1326 1327 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1328 { 1329 unsigned long flags; 1330 1331 spin_lock_irqsave(&device->mem_lock, flags); 1332 dasd_free_chunk(&device->ese_chunks, cqr); 1333 spin_unlock_irqrestore(&device->mem_lock, flags); 1334 dasd_put_device(device); 1335 } 1336 EXPORT_SYMBOL(dasd_ffree_request); 1337 1338 /* 1339 * Check discipline magic in cqr. 1340 */ 1341 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1342 { 1343 struct dasd_device *device; 1344 1345 if (cqr == NULL) 1346 return -EINVAL; 1347 device = cqr->startdev; 1348 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1349 DBF_DEV_EVENT(DBF_WARNING, device, 1350 " dasd_ccw_req 0x%08x magic doesn't match" 1351 " discipline 0x%08x", 1352 cqr->magic, 1353 *(unsigned int *) device->discipline->name); 1354 return -EINVAL; 1355 } 1356 return 0; 1357 } 1358 1359 /* 1360 * Terminate the current i/o and set the request to clear_pending. 1361 * Timer keeps device runnig. 1362 * ccw_device_clear can fail if the i/o subsystem 1363 * is in a bad mood. 1364 */ 1365 int dasd_term_IO(struct dasd_ccw_req *cqr) 1366 { 1367 struct dasd_device *device; 1368 int retries, rc; 1369 char errorstring[ERRORLENGTH]; 1370 1371 /* Check the cqr */ 1372 rc = dasd_check_cqr(cqr); 1373 if (rc) 1374 return rc; 1375 retries = 0; 1376 device = (struct dasd_device *) cqr->startdev; 1377 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1378 rc = ccw_device_clear(device->cdev, (long) cqr); 1379 switch (rc) { 1380 case 0: /* termination successful */ 1381 cqr->status = DASD_CQR_CLEAR_PENDING; 1382 cqr->stopclk = get_tod_clock(); 1383 cqr->starttime = 0; 1384 DBF_DEV_EVENT(DBF_DEBUG, device, 1385 "terminate cqr %p successful", 1386 cqr); 1387 break; 1388 case -ENODEV: 1389 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1390 "device gone, retry"); 1391 break; 1392 case -EINVAL: 1393 /* 1394 * device not valid so no I/O could be running 1395 * handle CQR as termination successful 1396 */ 1397 cqr->status = DASD_CQR_CLEARED; 1398 cqr->stopclk = get_tod_clock(); 1399 cqr->starttime = 0; 1400 /* no retries for invalid devices */ 1401 cqr->retries = -1; 1402 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1403 "EINVAL, handle as terminated"); 1404 /* fake rc to success */ 1405 rc = 0; 1406 break; 1407 default: 1408 /* internal error 10 - unknown rc*/ 1409 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1410 dev_err(&device->cdev->dev, "An error occurred in the " 1411 "DASD device driver, reason=%s\n", errorstring); 1412 BUG(); 1413 break; 1414 } 1415 retries++; 1416 } 1417 dasd_schedule_device_bh(device); 1418 return rc; 1419 } 1420 EXPORT_SYMBOL(dasd_term_IO); 1421 1422 /* 1423 * Start the i/o. This start_IO can fail if the channel is really busy. 1424 * In that case set up a timer to start the request later. 1425 */ 1426 int dasd_start_IO(struct dasd_ccw_req *cqr) 1427 { 1428 struct dasd_device *device; 1429 int rc; 1430 char errorstring[ERRORLENGTH]; 1431 1432 /* Check the cqr */ 1433 rc = dasd_check_cqr(cqr); 1434 if (rc) { 1435 cqr->intrc = rc; 1436 return rc; 1437 } 1438 device = (struct dasd_device *) cqr->startdev; 1439 if (((cqr->block && 1440 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1441 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1442 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1443 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1444 "because of stolen lock", cqr); 1445 cqr->status = DASD_CQR_ERROR; 1446 cqr->intrc = -EPERM; 1447 return -EPERM; 1448 } 1449 if (cqr->retries < 0) { 1450 /* internal error 14 - start_IO run out of retries */ 1451 sprintf(errorstring, "14 %p", cqr); 1452 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1453 "device driver, reason=%s\n", errorstring); 1454 cqr->status = DASD_CQR_ERROR; 1455 return -EIO; 1456 } 1457 cqr->startclk = get_tod_clock(); 1458 cqr->starttime = jiffies; 1459 cqr->retries--; 1460 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1461 cqr->lpm &= dasd_path_get_opm(device); 1462 if (!cqr->lpm) 1463 cqr->lpm = dasd_path_get_opm(device); 1464 } 1465 if (cqr->cpmode == 1) { 1466 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1467 (long) cqr, cqr->lpm); 1468 } else { 1469 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1470 (long) cqr, cqr->lpm, 0); 1471 } 1472 switch (rc) { 1473 case 0: 1474 cqr->status = DASD_CQR_IN_IO; 1475 break; 1476 case -EBUSY: 1477 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1478 "start_IO: device busy, retry later"); 1479 break; 1480 case -EACCES: 1481 /* -EACCES indicates that the request used only a subset of the 1482 * available paths and all these paths are gone. If the lpm of 1483 * this request was only a subset of the opm (e.g. the ppm) then 1484 * we just do a retry with all available paths. 1485 * If we already use the full opm, something is amiss, and we 1486 * need a full path verification. 1487 */ 1488 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1489 DBF_DEV_EVENT(DBF_WARNING, device, 1490 "start_IO: selected paths gone (%x)", 1491 cqr->lpm); 1492 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1493 cqr->lpm = dasd_path_get_opm(device); 1494 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1495 "start_IO: selected paths gone," 1496 " retry on all paths"); 1497 } else { 1498 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1499 "start_IO: all paths in opm gone," 1500 " do path verification"); 1501 dasd_generic_last_path_gone(device); 1502 dasd_path_no_path(device); 1503 dasd_path_set_tbvpm(device, 1504 ccw_device_get_path_mask( 1505 device->cdev)); 1506 } 1507 break; 1508 case -ENODEV: 1509 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1510 "start_IO: -ENODEV device gone, retry"); 1511 break; 1512 case -EIO: 1513 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1514 "start_IO: -EIO device gone, retry"); 1515 break; 1516 case -EINVAL: 1517 /* most likely caused in power management context */ 1518 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1519 "start_IO: -EINVAL device currently " 1520 "not accessible"); 1521 break; 1522 default: 1523 /* internal error 11 - unknown rc */ 1524 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1525 dev_err(&device->cdev->dev, 1526 "An error occurred in the DASD device driver, " 1527 "reason=%s\n", errorstring); 1528 BUG(); 1529 break; 1530 } 1531 cqr->intrc = rc; 1532 return rc; 1533 } 1534 EXPORT_SYMBOL(dasd_start_IO); 1535 1536 /* 1537 * Timeout function for dasd devices. This is used for different purposes 1538 * 1) missing interrupt handler for normal operation 1539 * 2) delayed start of request where start_IO failed with -EBUSY 1540 * 3) timeout for missing state change interrupts 1541 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1542 * DASD_CQR_QUEUED for 2) and 3). 1543 */ 1544 static void dasd_device_timeout(struct timer_list *t) 1545 { 1546 unsigned long flags; 1547 struct dasd_device *device; 1548 1549 device = from_timer(device, t, timer); 1550 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1551 /* re-activate request queue */ 1552 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1553 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1554 dasd_schedule_device_bh(device); 1555 } 1556 1557 /* 1558 * Setup timeout for a device in jiffies. 1559 */ 1560 void dasd_device_set_timer(struct dasd_device *device, int expires) 1561 { 1562 if (expires == 0) 1563 del_timer(&device->timer); 1564 else 1565 mod_timer(&device->timer, jiffies + expires); 1566 } 1567 EXPORT_SYMBOL(dasd_device_set_timer); 1568 1569 /* 1570 * Clear timeout for a device. 1571 */ 1572 void dasd_device_clear_timer(struct dasd_device *device) 1573 { 1574 del_timer(&device->timer); 1575 } 1576 EXPORT_SYMBOL(dasd_device_clear_timer); 1577 1578 static void dasd_handle_killed_request(struct ccw_device *cdev, 1579 unsigned long intparm) 1580 { 1581 struct dasd_ccw_req *cqr; 1582 struct dasd_device *device; 1583 1584 if (!intparm) 1585 return; 1586 cqr = (struct dasd_ccw_req *) intparm; 1587 if (cqr->status != DASD_CQR_IN_IO) { 1588 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1589 "invalid status in handle_killed_request: " 1590 "%02x", cqr->status); 1591 return; 1592 } 1593 1594 device = dasd_device_from_cdev_locked(cdev); 1595 if (IS_ERR(device)) { 1596 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1597 "unable to get device from cdev"); 1598 return; 1599 } 1600 1601 if (!cqr->startdev || 1602 device != cqr->startdev || 1603 strncmp(cqr->startdev->discipline->ebcname, 1604 (char *) &cqr->magic, 4)) { 1605 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1606 "invalid device in request"); 1607 dasd_put_device(device); 1608 return; 1609 } 1610 1611 /* Schedule request to be retried. */ 1612 cqr->status = DASD_CQR_QUEUED; 1613 1614 dasd_device_clear_timer(device); 1615 dasd_schedule_device_bh(device); 1616 dasd_put_device(device); 1617 } 1618 1619 void dasd_generic_handle_state_change(struct dasd_device *device) 1620 { 1621 /* First of all start sense subsystem status request. */ 1622 dasd_eer_snss(device); 1623 1624 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1625 dasd_schedule_device_bh(device); 1626 if (device->block) { 1627 dasd_schedule_block_bh(device->block); 1628 if (device->block->request_queue) 1629 blk_mq_run_hw_queues(device->block->request_queue, 1630 true); 1631 } 1632 } 1633 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1634 1635 static int dasd_check_hpf_error(struct irb *irb) 1636 { 1637 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1638 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1639 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1640 } 1641 1642 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1643 { 1644 struct dasd_device *device = NULL; 1645 u8 *sense = NULL; 1646 1647 if (!block) 1648 return 0; 1649 device = block->base; 1650 if (!device || !device->discipline->is_ese) 1651 return 0; 1652 if (!device->discipline->is_ese(device)) 1653 return 0; 1654 1655 sense = dasd_get_sense(irb); 1656 if (!sense) 1657 return 0; 1658 1659 return !!(sense[1] & SNS1_NO_REC_FOUND) || 1660 !!(sense[1] & SNS1_FILE_PROTECTED) || 1661 scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; 1662 } 1663 1664 static int dasd_ese_oos_cond(u8 *sense) 1665 { 1666 return sense[0] & SNS0_EQUIPMENT_CHECK && 1667 sense[1] & SNS1_PERM_ERR && 1668 sense[1] & SNS1_WRITE_INHIBITED && 1669 sense[25] == 0x01; 1670 } 1671 1672 /* 1673 * Interrupt handler for "normal" ssch-io based dasd devices. 1674 */ 1675 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1676 struct irb *irb) 1677 { 1678 struct dasd_ccw_req *cqr, *next, *fcqr; 1679 struct dasd_device *device; 1680 unsigned long now; 1681 int nrf_suppressed = 0; 1682 int fp_suppressed = 0; 1683 u8 *sense = NULL; 1684 int expires; 1685 1686 cqr = (struct dasd_ccw_req *) intparm; 1687 if (IS_ERR(irb)) { 1688 switch (PTR_ERR(irb)) { 1689 case -EIO: 1690 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1691 device = cqr->startdev; 1692 cqr->status = DASD_CQR_CLEARED; 1693 dasd_device_clear_timer(device); 1694 wake_up(&dasd_flush_wq); 1695 dasd_schedule_device_bh(device); 1696 return; 1697 } 1698 break; 1699 case -ETIMEDOUT: 1700 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1701 "request timed out\n", __func__); 1702 break; 1703 default: 1704 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1705 "unknown error %ld\n", __func__, 1706 PTR_ERR(irb)); 1707 } 1708 dasd_handle_killed_request(cdev, intparm); 1709 return; 1710 } 1711 1712 now = get_tod_clock(); 1713 /* check for conditions that should be handled immediately */ 1714 if (!cqr || 1715 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1716 scsw_cstat(&irb->scsw) == 0)) { 1717 if (cqr) 1718 memcpy(&cqr->irb, irb, sizeof(*irb)); 1719 device = dasd_device_from_cdev_locked(cdev); 1720 if (IS_ERR(device)) 1721 return; 1722 /* ignore unsolicited interrupts for DIAG discipline */ 1723 if (device->discipline == dasd_diag_discipline_pointer) { 1724 dasd_put_device(device); 1725 return; 1726 } 1727 1728 /* 1729 * In some cases 'File Protected' or 'No Record Found' errors 1730 * might be expected and debug log messages for the 1731 * corresponding interrupts shouldn't be written then. 1732 * Check if either of the according suppress bits is set. 1733 */ 1734 sense = dasd_get_sense(irb); 1735 if (sense) { 1736 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1737 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1738 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1739 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1740 1741 /* 1742 * Extent pool probably out-of-space. 1743 * Stop device and check exhaust level. 1744 */ 1745 if (dasd_ese_oos_cond(sense)) { 1746 dasd_generic_space_exhaust(device, cqr); 1747 device->discipline->ext_pool_exhaust(device, cqr); 1748 dasd_put_device(device); 1749 return; 1750 } 1751 } 1752 if (!(fp_suppressed || nrf_suppressed)) 1753 device->discipline->dump_sense_dbf(device, irb, "int"); 1754 1755 if (device->features & DASD_FEATURE_ERPLOG) 1756 device->discipline->dump_sense(device, cqr, irb); 1757 device->discipline->check_for_device_change(device, cqr, irb); 1758 dasd_put_device(device); 1759 } 1760 1761 /* check for for attention message */ 1762 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1763 device = dasd_device_from_cdev_locked(cdev); 1764 if (!IS_ERR(device)) { 1765 device->discipline->check_attention(device, 1766 irb->esw.esw1.lpum); 1767 dasd_put_device(device); 1768 } 1769 } 1770 1771 if (!cqr) 1772 return; 1773 1774 device = (struct dasd_device *) cqr->startdev; 1775 if (!device || 1776 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1777 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1778 "invalid device in request"); 1779 return; 1780 } 1781 1782 if (dasd_ese_needs_format(cqr->block, irb)) { 1783 if (rq_data_dir((struct request *)cqr->callback_data) == READ) { 1784 device->discipline->ese_read(cqr, irb); 1785 cqr->status = DASD_CQR_SUCCESS; 1786 cqr->stopclk = now; 1787 dasd_device_clear_timer(device); 1788 dasd_schedule_device_bh(device); 1789 return; 1790 } 1791 fcqr = device->discipline->ese_format(device, cqr, irb); 1792 if (IS_ERR(fcqr)) { 1793 if (PTR_ERR(fcqr) == -EINVAL) { 1794 cqr->status = DASD_CQR_ERROR; 1795 return; 1796 } 1797 /* 1798 * If we can't format now, let the request go 1799 * one extra round. Maybe we can format later. 1800 */ 1801 cqr->status = DASD_CQR_QUEUED; 1802 dasd_schedule_device_bh(device); 1803 return; 1804 } else { 1805 fcqr->status = DASD_CQR_QUEUED; 1806 cqr->status = DASD_CQR_QUEUED; 1807 list_add(&fcqr->devlist, &device->ccw_queue); 1808 dasd_schedule_device_bh(device); 1809 return; 1810 } 1811 } 1812 1813 /* Check for clear pending */ 1814 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1815 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1816 cqr->status = DASD_CQR_CLEARED; 1817 dasd_device_clear_timer(device); 1818 wake_up(&dasd_flush_wq); 1819 dasd_schedule_device_bh(device); 1820 return; 1821 } 1822 1823 /* check status - the request might have been killed by dyn detach */ 1824 if (cqr->status != DASD_CQR_IN_IO) { 1825 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1826 "status %02x", dev_name(&cdev->dev), cqr->status); 1827 return; 1828 } 1829 1830 next = NULL; 1831 expires = 0; 1832 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1833 scsw_cstat(&irb->scsw) == 0) { 1834 /* request was completed successfully */ 1835 cqr->status = DASD_CQR_SUCCESS; 1836 cqr->stopclk = now; 1837 /* Start first request on queue if possible -> fast_io. */ 1838 if (cqr->devlist.next != &device->ccw_queue) { 1839 next = list_entry(cqr->devlist.next, 1840 struct dasd_ccw_req, devlist); 1841 } 1842 } else { /* error */ 1843 /* check for HPF error 1844 * call discipline function to requeue all requests 1845 * and disable HPF accordingly 1846 */ 1847 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1848 device->discipline->handle_hpf_error) 1849 device->discipline->handle_hpf_error(device, irb); 1850 /* 1851 * If we don't want complex ERP for this request, then just 1852 * reset this and retry it in the fastpath 1853 */ 1854 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1855 cqr->retries > 0) { 1856 if (cqr->lpm == dasd_path_get_opm(device)) 1857 DBF_DEV_EVENT(DBF_DEBUG, device, 1858 "default ERP in fastpath " 1859 "(%i retries left)", 1860 cqr->retries); 1861 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1862 cqr->lpm = dasd_path_get_opm(device); 1863 cqr->status = DASD_CQR_QUEUED; 1864 next = cqr; 1865 } else 1866 cqr->status = DASD_CQR_ERROR; 1867 } 1868 if (next && (next->status == DASD_CQR_QUEUED) && 1869 (!device->stopped)) { 1870 if (device->discipline->start_IO(next) == 0) 1871 expires = next->expires; 1872 } 1873 if (expires != 0) 1874 dasd_device_set_timer(device, expires); 1875 else 1876 dasd_device_clear_timer(device); 1877 dasd_schedule_device_bh(device); 1878 } 1879 EXPORT_SYMBOL(dasd_int_handler); 1880 1881 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1882 { 1883 struct dasd_device *device; 1884 1885 device = dasd_device_from_cdev_locked(cdev); 1886 1887 if (IS_ERR(device)) 1888 goto out; 1889 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1890 device->state != device->target || 1891 !device->discipline->check_for_device_change){ 1892 dasd_put_device(device); 1893 goto out; 1894 } 1895 if (device->discipline->dump_sense_dbf) 1896 device->discipline->dump_sense_dbf(device, irb, "uc"); 1897 device->discipline->check_for_device_change(device, NULL, irb); 1898 dasd_put_device(device); 1899 out: 1900 return UC_TODO_RETRY; 1901 } 1902 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1903 1904 /* 1905 * If we have an error on a dasd_block layer request then we cancel 1906 * and return all further requests from the same dasd_block as well. 1907 */ 1908 static void __dasd_device_recovery(struct dasd_device *device, 1909 struct dasd_ccw_req *ref_cqr) 1910 { 1911 struct list_head *l, *n; 1912 struct dasd_ccw_req *cqr; 1913 1914 /* 1915 * only requeue request that came from the dasd_block layer 1916 */ 1917 if (!ref_cqr->block) 1918 return; 1919 1920 list_for_each_safe(l, n, &device->ccw_queue) { 1921 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1922 if (cqr->status == DASD_CQR_QUEUED && 1923 ref_cqr->block == cqr->block) { 1924 cqr->status = DASD_CQR_CLEARED; 1925 } 1926 } 1927 }; 1928 1929 /* 1930 * Remove those ccw requests from the queue that need to be returned 1931 * to the upper layer. 1932 */ 1933 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1934 struct list_head *final_queue) 1935 { 1936 struct list_head *l, *n; 1937 struct dasd_ccw_req *cqr; 1938 1939 /* Process request with final status. */ 1940 list_for_each_safe(l, n, &device->ccw_queue) { 1941 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1942 1943 /* Skip any non-final request. */ 1944 if (cqr->status == DASD_CQR_QUEUED || 1945 cqr->status == DASD_CQR_IN_IO || 1946 cqr->status == DASD_CQR_CLEAR_PENDING) 1947 continue; 1948 if (cqr->status == DASD_CQR_ERROR) { 1949 __dasd_device_recovery(device, cqr); 1950 } 1951 /* Rechain finished requests to final queue */ 1952 list_move_tail(&cqr->devlist, final_queue); 1953 } 1954 } 1955 1956 static void __dasd_process_cqr(struct dasd_device *device, 1957 struct dasd_ccw_req *cqr) 1958 { 1959 char errorstring[ERRORLENGTH]; 1960 1961 switch (cqr->status) { 1962 case DASD_CQR_SUCCESS: 1963 cqr->status = DASD_CQR_DONE; 1964 break; 1965 case DASD_CQR_ERROR: 1966 cqr->status = DASD_CQR_NEED_ERP; 1967 break; 1968 case DASD_CQR_CLEARED: 1969 cqr->status = DASD_CQR_TERMINATED; 1970 break; 1971 default: 1972 /* internal error 12 - wrong cqr status*/ 1973 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1974 dev_err(&device->cdev->dev, 1975 "An error occurred in the DASD device driver, " 1976 "reason=%s\n", errorstring); 1977 BUG(); 1978 } 1979 if (cqr->callback) 1980 cqr->callback(cqr, cqr->callback_data); 1981 } 1982 1983 /* 1984 * the cqrs from the final queue are returned to the upper layer 1985 * by setting a dasd_block state and calling the callback function 1986 */ 1987 static void __dasd_device_process_final_queue(struct dasd_device *device, 1988 struct list_head *final_queue) 1989 { 1990 struct list_head *l, *n; 1991 struct dasd_ccw_req *cqr; 1992 struct dasd_block *block; 1993 1994 list_for_each_safe(l, n, final_queue) { 1995 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1996 list_del_init(&cqr->devlist); 1997 block = cqr->block; 1998 if (!block) { 1999 __dasd_process_cqr(device, cqr); 2000 } else { 2001 spin_lock_bh(&block->queue_lock); 2002 __dasd_process_cqr(device, cqr); 2003 spin_unlock_bh(&block->queue_lock); 2004 } 2005 } 2006 } 2007 2008 /* 2009 * Take a look at the first request on the ccw queue and check 2010 * if it reached its expire time. If so, terminate the IO. 2011 */ 2012 static void __dasd_device_check_expire(struct dasd_device *device) 2013 { 2014 struct dasd_ccw_req *cqr; 2015 2016 if (list_empty(&device->ccw_queue)) 2017 return; 2018 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2019 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 2020 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 2021 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2022 /* 2023 * IO in safe offline processing should not 2024 * run out of retries 2025 */ 2026 cqr->retries++; 2027 } 2028 if (device->discipline->term_IO(cqr) != 0) { 2029 /* Hmpf, try again in 5 sec */ 2030 dev_err(&device->cdev->dev, 2031 "cqr %p timed out (%lus) but cannot be " 2032 "ended, retrying in 5 s\n", 2033 cqr, (cqr->expires/HZ)); 2034 cqr->expires += 5*HZ; 2035 dasd_device_set_timer(device, 5*HZ); 2036 } else { 2037 dev_err(&device->cdev->dev, 2038 "cqr %p timed out (%lus), %i retries " 2039 "remaining\n", cqr, (cqr->expires/HZ), 2040 cqr->retries); 2041 } 2042 } 2043 } 2044 2045 /* 2046 * return 1 when device is not eligible for IO 2047 */ 2048 static int __dasd_device_is_unusable(struct dasd_device *device, 2049 struct dasd_ccw_req *cqr) 2050 { 2051 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC); 2052 2053 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2054 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2055 /* 2056 * dasd is being set offline 2057 * but it is no safe offline where we have to allow I/O 2058 */ 2059 return 1; 2060 } 2061 if (device->stopped) { 2062 if (device->stopped & mask) { 2063 /* stopped and CQR will not change that. */ 2064 return 1; 2065 } 2066 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2067 /* CQR is not able to change device to 2068 * operational. */ 2069 return 1; 2070 } 2071 /* CQR required to get device operational. */ 2072 } 2073 return 0; 2074 } 2075 2076 /* 2077 * Take a look at the first request on the ccw queue and check 2078 * if it needs to be started. 2079 */ 2080 static void __dasd_device_start_head(struct dasd_device *device) 2081 { 2082 struct dasd_ccw_req *cqr; 2083 int rc; 2084 2085 if (list_empty(&device->ccw_queue)) 2086 return; 2087 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2088 if (cqr->status != DASD_CQR_QUEUED) 2089 return; 2090 /* if device is not usable return request to upper layer */ 2091 if (__dasd_device_is_unusable(device, cqr)) { 2092 cqr->intrc = -EAGAIN; 2093 cqr->status = DASD_CQR_CLEARED; 2094 dasd_schedule_device_bh(device); 2095 return; 2096 } 2097 2098 rc = device->discipline->start_IO(cqr); 2099 if (rc == 0) 2100 dasd_device_set_timer(device, cqr->expires); 2101 else if (rc == -EACCES) { 2102 dasd_schedule_device_bh(device); 2103 } else 2104 /* Hmpf, try again in 1/2 sec */ 2105 dasd_device_set_timer(device, 50); 2106 } 2107 2108 static void __dasd_device_check_path_events(struct dasd_device *device) 2109 { 2110 int rc; 2111 2112 if (!dasd_path_get_tbvpm(device)) 2113 return; 2114 2115 if (device->stopped & 2116 ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) 2117 return; 2118 rc = device->discipline->verify_path(device, 2119 dasd_path_get_tbvpm(device)); 2120 if (rc) 2121 dasd_device_set_timer(device, 50); 2122 else 2123 dasd_path_clear_all_verify(device); 2124 }; 2125 2126 /* 2127 * Go through all request on the dasd_device request queue, 2128 * terminate them on the cdev if necessary, and return them to the 2129 * submitting layer via callback. 2130 * Note: 2131 * Make sure that all 'submitting layers' still exist when 2132 * this function is called!. In other words, when 'device' is a base 2133 * device then all block layer requests must have been removed before 2134 * via dasd_flush_block_queue. 2135 */ 2136 int dasd_flush_device_queue(struct dasd_device *device) 2137 { 2138 struct dasd_ccw_req *cqr, *n; 2139 int rc; 2140 struct list_head flush_queue; 2141 2142 INIT_LIST_HEAD(&flush_queue); 2143 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2144 rc = 0; 2145 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2146 /* Check status and move request to flush_queue */ 2147 switch (cqr->status) { 2148 case DASD_CQR_IN_IO: 2149 rc = device->discipline->term_IO(cqr); 2150 if (rc) { 2151 /* unable to terminate requeust */ 2152 dev_err(&device->cdev->dev, 2153 "Flushing the DASD request queue " 2154 "failed for request %p\n", cqr); 2155 /* stop flush processing */ 2156 goto finished; 2157 } 2158 break; 2159 case DASD_CQR_QUEUED: 2160 cqr->stopclk = get_tod_clock(); 2161 cqr->status = DASD_CQR_CLEARED; 2162 break; 2163 default: /* no need to modify the others */ 2164 break; 2165 } 2166 list_move_tail(&cqr->devlist, &flush_queue); 2167 } 2168 finished: 2169 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2170 /* 2171 * After this point all requests must be in state CLEAR_PENDING, 2172 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2173 * one of the others. 2174 */ 2175 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2176 wait_event(dasd_flush_wq, 2177 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2178 /* 2179 * Now set each request back to TERMINATED, DONE or NEED_ERP 2180 * and call the callback function of flushed requests 2181 */ 2182 __dasd_device_process_final_queue(device, &flush_queue); 2183 return rc; 2184 } 2185 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2186 2187 /* 2188 * Acquire the device lock and process queues for the device. 2189 */ 2190 static void dasd_device_tasklet(unsigned long data) 2191 { 2192 struct dasd_device *device = (struct dasd_device *) data; 2193 struct list_head final_queue; 2194 2195 atomic_set (&device->tasklet_scheduled, 0); 2196 INIT_LIST_HEAD(&final_queue); 2197 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2198 /* Check expire time of first request on the ccw queue. */ 2199 __dasd_device_check_expire(device); 2200 /* find final requests on ccw queue */ 2201 __dasd_device_process_ccw_queue(device, &final_queue); 2202 __dasd_device_check_path_events(device); 2203 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2204 /* Now call the callback function of requests with final status */ 2205 __dasd_device_process_final_queue(device, &final_queue); 2206 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2207 /* Now check if the head of the ccw queue needs to be started. */ 2208 __dasd_device_start_head(device); 2209 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2210 if (waitqueue_active(&shutdown_waitq)) 2211 wake_up(&shutdown_waitq); 2212 dasd_put_device(device); 2213 } 2214 2215 /* 2216 * Schedules a call to dasd_tasklet over the device tasklet. 2217 */ 2218 void dasd_schedule_device_bh(struct dasd_device *device) 2219 { 2220 /* Protect against rescheduling. */ 2221 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2222 return; 2223 dasd_get_device(device); 2224 tasklet_hi_schedule(&device->tasklet); 2225 } 2226 EXPORT_SYMBOL(dasd_schedule_device_bh); 2227 2228 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2229 { 2230 device->stopped |= bits; 2231 } 2232 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2233 2234 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2235 { 2236 device->stopped &= ~bits; 2237 if (!device->stopped) 2238 wake_up(&generic_waitq); 2239 } 2240 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2241 2242 /* 2243 * Queue a request to the head of the device ccw_queue. 2244 * Start the I/O if possible. 2245 */ 2246 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2247 { 2248 struct dasd_device *device; 2249 unsigned long flags; 2250 2251 device = cqr->startdev; 2252 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2253 cqr->status = DASD_CQR_QUEUED; 2254 list_add(&cqr->devlist, &device->ccw_queue); 2255 /* let the bh start the request to keep them in order */ 2256 dasd_schedule_device_bh(device); 2257 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2258 } 2259 EXPORT_SYMBOL(dasd_add_request_head); 2260 2261 /* 2262 * Queue a request to the tail of the device ccw_queue. 2263 * Start the I/O if possible. 2264 */ 2265 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2266 { 2267 struct dasd_device *device; 2268 unsigned long flags; 2269 2270 device = cqr->startdev; 2271 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2272 cqr->status = DASD_CQR_QUEUED; 2273 list_add_tail(&cqr->devlist, &device->ccw_queue); 2274 /* let the bh start the request to keep them in order */ 2275 dasd_schedule_device_bh(device); 2276 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2277 } 2278 EXPORT_SYMBOL(dasd_add_request_tail); 2279 2280 /* 2281 * Wakeup helper for the 'sleep_on' functions. 2282 */ 2283 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2284 { 2285 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2286 cqr->callback_data = DASD_SLEEPON_END_TAG; 2287 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2288 wake_up(&generic_waitq); 2289 } 2290 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2291 2292 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2293 { 2294 struct dasd_device *device; 2295 int rc; 2296 2297 device = cqr->startdev; 2298 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2299 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2300 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2301 return rc; 2302 } 2303 2304 /* 2305 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2306 */ 2307 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2308 { 2309 struct dasd_device *device; 2310 dasd_erp_fn_t erp_fn; 2311 2312 if (cqr->status == DASD_CQR_FILLED) 2313 return 0; 2314 device = cqr->startdev; 2315 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2316 if (cqr->status == DASD_CQR_TERMINATED) { 2317 device->discipline->handle_terminated_request(cqr); 2318 return 1; 2319 } 2320 if (cqr->status == DASD_CQR_NEED_ERP) { 2321 erp_fn = device->discipline->erp_action(cqr); 2322 erp_fn(cqr); 2323 return 1; 2324 } 2325 if (cqr->status == DASD_CQR_FAILED) 2326 dasd_log_sense(cqr, &cqr->irb); 2327 if (cqr->refers) { 2328 __dasd_process_erp(device, cqr); 2329 return 1; 2330 } 2331 } 2332 return 0; 2333 } 2334 2335 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2336 { 2337 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2338 if (cqr->refers) /* erp is not done yet */ 2339 return 1; 2340 return ((cqr->status != DASD_CQR_DONE) && 2341 (cqr->status != DASD_CQR_FAILED)); 2342 } else 2343 return (cqr->status == DASD_CQR_FILLED); 2344 } 2345 2346 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2347 { 2348 struct dasd_device *device; 2349 int rc; 2350 struct list_head ccw_queue; 2351 struct dasd_ccw_req *cqr; 2352 2353 INIT_LIST_HEAD(&ccw_queue); 2354 maincqr->status = DASD_CQR_FILLED; 2355 device = maincqr->startdev; 2356 list_add(&maincqr->blocklist, &ccw_queue); 2357 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2358 cqr = list_first_entry(&ccw_queue, 2359 struct dasd_ccw_req, blocklist)) { 2360 2361 if (__dasd_sleep_on_erp(cqr)) 2362 continue; 2363 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2364 continue; 2365 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2366 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2367 cqr->status = DASD_CQR_FAILED; 2368 cqr->intrc = -EPERM; 2369 continue; 2370 } 2371 /* Non-temporary stop condition will trigger fail fast */ 2372 if (device->stopped & ~DASD_STOPPED_PENDING && 2373 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2374 (!dasd_eer_enabled(device))) { 2375 cqr->status = DASD_CQR_FAILED; 2376 cqr->intrc = -ENOLINK; 2377 continue; 2378 } 2379 /* 2380 * Don't try to start requests if device is in 2381 * offline processing, it might wait forever 2382 */ 2383 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2384 cqr->status = DASD_CQR_FAILED; 2385 cqr->intrc = -ENODEV; 2386 continue; 2387 } 2388 /* 2389 * Don't try to start requests if device is stopped 2390 * except path verification requests 2391 */ 2392 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2393 if (interruptible) { 2394 rc = wait_event_interruptible( 2395 generic_waitq, !(device->stopped)); 2396 if (rc == -ERESTARTSYS) { 2397 cqr->status = DASD_CQR_FAILED; 2398 maincqr->intrc = rc; 2399 continue; 2400 } 2401 } else 2402 wait_event(generic_waitq, !(device->stopped)); 2403 } 2404 if (!cqr->callback) 2405 cqr->callback = dasd_wakeup_cb; 2406 2407 cqr->callback_data = DASD_SLEEPON_START_TAG; 2408 dasd_add_request_tail(cqr); 2409 if (interruptible) { 2410 rc = wait_event_interruptible( 2411 generic_waitq, _wait_for_wakeup(cqr)); 2412 if (rc == -ERESTARTSYS) { 2413 dasd_cancel_req(cqr); 2414 /* wait (non-interruptible) for final status */ 2415 wait_event(generic_waitq, 2416 _wait_for_wakeup(cqr)); 2417 cqr->status = DASD_CQR_FAILED; 2418 maincqr->intrc = rc; 2419 continue; 2420 } 2421 } else 2422 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2423 } 2424 2425 maincqr->endclk = get_tod_clock(); 2426 if ((maincqr->status != DASD_CQR_DONE) && 2427 (maincqr->intrc != -ERESTARTSYS)) 2428 dasd_log_sense(maincqr, &maincqr->irb); 2429 if (maincqr->status == DASD_CQR_DONE) 2430 rc = 0; 2431 else if (maincqr->intrc) 2432 rc = maincqr->intrc; 2433 else 2434 rc = -EIO; 2435 return rc; 2436 } 2437 2438 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2439 { 2440 struct dasd_ccw_req *cqr; 2441 2442 list_for_each_entry(cqr, ccw_queue, blocklist) { 2443 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2444 return 0; 2445 } 2446 2447 return 1; 2448 } 2449 2450 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2451 { 2452 struct dasd_device *device; 2453 struct dasd_ccw_req *cqr, *n; 2454 u8 *sense = NULL; 2455 int rc; 2456 2457 retry: 2458 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2459 device = cqr->startdev; 2460 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2461 continue; 2462 2463 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2464 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2465 cqr->status = DASD_CQR_FAILED; 2466 cqr->intrc = -EPERM; 2467 continue; 2468 } 2469 /*Non-temporary stop condition will trigger fail fast*/ 2470 if (device->stopped & ~DASD_STOPPED_PENDING && 2471 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2472 !dasd_eer_enabled(device)) { 2473 cqr->status = DASD_CQR_FAILED; 2474 cqr->intrc = -EAGAIN; 2475 continue; 2476 } 2477 2478 /*Don't try to start requests if device is stopped*/ 2479 if (interruptible) { 2480 rc = wait_event_interruptible( 2481 generic_waitq, !device->stopped); 2482 if (rc == -ERESTARTSYS) { 2483 cqr->status = DASD_CQR_FAILED; 2484 cqr->intrc = rc; 2485 continue; 2486 } 2487 } else 2488 wait_event(generic_waitq, !(device->stopped)); 2489 2490 if (!cqr->callback) 2491 cqr->callback = dasd_wakeup_cb; 2492 cqr->callback_data = DASD_SLEEPON_START_TAG; 2493 dasd_add_request_tail(cqr); 2494 } 2495 2496 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2497 2498 rc = 0; 2499 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2500 /* 2501 * In some cases the 'File Protected' or 'Incorrect Length' 2502 * error might be expected and error recovery would be 2503 * unnecessary in these cases. Check if the according suppress 2504 * bit is set. 2505 */ 2506 sense = dasd_get_sense(&cqr->irb); 2507 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2508 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2509 continue; 2510 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2511 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2512 continue; 2513 2514 /* 2515 * for alias devices simplify error recovery and 2516 * return to upper layer 2517 * do not skip ERP requests 2518 */ 2519 if (cqr->startdev != cqr->basedev && !cqr->refers && 2520 (cqr->status == DASD_CQR_TERMINATED || 2521 cqr->status == DASD_CQR_NEED_ERP)) 2522 return -EAGAIN; 2523 2524 /* normal recovery for basedev IO */ 2525 if (__dasd_sleep_on_erp(cqr)) 2526 /* handle erp first */ 2527 goto retry; 2528 } 2529 2530 return 0; 2531 } 2532 2533 /* 2534 * Queue a request to the tail of the device ccw_queue and wait for 2535 * it's completion. 2536 */ 2537 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2538 { 2539 return _dasd_sleep_on(cqr, 0); 2540 } 2541 EXPORT_SYMBOL(dasd_sleep_on); 2542 2543 /* 2544 * Start requests from a ccw_queue and wait for their completion. 2545 */ 2546 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2547 { 2548 return _dasd_sleep_on_queue(ccw_queue, 0); 2549 } 2550 EXPORT_SYMBOL(dasd_sleep_on_queue); 2551 2552 /* 2553 * Start requests from a ccw_queue and wait interruptible for their completion. 2554 */ 2555 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2556 { 2557 return _dasd_sleep_on_queue(ccw_queue, 1); 2558 } 2559 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2560 2561 /* 2562 * Queue a request to the tail of the device ccw_queue and wait 2563 * interruptible for it's completion. 2564 */ 2565 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2566 { 2567 return _dasd_sleep_on(cqr, 1); 2568 } 2569 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2570 2571 /* 2572 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2573 * for eckd devices) the currently running request has to be terminated 2574 * and be put back to status queued, before the special request is added 2575 * to the head of the queue. Then the special request is waited on normally. 2576 */ 2577 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2578 { 2579 struct dasd_ccw_req *cqr; 2580 int rc; 2581 2582 if (list_empty(&device->ccw_queue)) 2583 return 0; 2584 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2585 rc = device->discipline->term_IO(cqr); 2586 if (!rc) 2587 /* 2588 * CQR terminated because a more important request is pending. 2589 * Undo decreasing of retry counter because this is 2590 * not an error case. 2591 */ 2592 cqr->retries++; 2593 return rc; 2594 } 2595 2596 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2597 { 2598 struct dasd_device *device; 2599 int rc; 2600 2601 device = cqr->startdev; 2602 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2603 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2604 cqr->status = DASD_CQR_FAILED; 2605 cqr->intrc = -EPERM; 2606 return -EIO; 2607 } 2608 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2609 rc = _dasd_term_running_cqr(device); 2610 if (rc) { 2611 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2612 return rc; 2613 } 2614 cqr->callback = dasd_wakeup_cb; 2615 cqr->callback_data = DASD_SLEEPON_START_TAG; 2616 cqr->status = DASD_CQR_QUEUED; 2617 /* 2618 * add new request as second 2619 * first the terminated cqr needs to be finished 2620 */ 2621 list_add(&cqr->devlist, device->ccw_queue.next); 2622 2623 /* let the bh start the request to keep them in order */ 2624 dasd_schedule_device_bh(device); 2625 2626 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2627 2628 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2629 2630 if (cqr->status == DASD_CQR_DONE) 2631 rc = 0; 2632 else if (cqr->intrc) 2633 rc = cqr->intrc; 2634 else 2635 rc = -EIO; 2636 2637 /* kick tasklets */ 2638 dasd_schedule_device_bh(device); 2639 if (device->block) 2640 dasd_schedule_block_bh(device->block); 2641 2642 return rc; 2643 } 2644 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2645 2646 /* 2647 * Cancels a request that was started with dasd_sleep_on_req. 2648 * This is useful to timeout requests. The request will be 2649 * terminated if it is currently in i/o. 2650 * Returns 0 if request termination was successful 2651 * negative error code if termination failed 2652 * Cancellation of a request is an asynchronous operation! The calling 2653 * function has to wait until the request is properly returned via callback. 2654 */ 2655 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) 2656 { 2657 struct dasd_device *device = cqr->startdev; 2658 int rc = 0; 2659 2660 switch (cqr->status) { 2661 case DASD_CQR_QUEUED: 2662 /* request was not started - just set to cleared */ 2663 cqr->status = DASD_CQR_CLEARED; 2664 break; 2665 case DASD_CQR_IN_IO: 2666 /* request in IO - terminate IO and release again */ 2667 rc = device->discipline->term_IO(cqr); 2668 if (rc) { 2669 dev_err(&device->cdev->dev, 2670 "Cancelling request %p failed with rc=%d\n", 2671 cqr, rc); 2672 } else { 2673 cqr->stopclk = get_tod_clock(); 2674 } 2675 break; 2676 default: /* already finished or clear pending - do nothing */ 2677 break; 2678 } 2679 dasd_schedule_device_bh(device); 2680 return rc; 2681 } 2682 2683 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2684 { 2685 struct dasd_device *device = cqr->startdev; 2686 unsigned long flags; 2687 int rc; 2688 2689 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2690 rc = __dasd_cancel_req(cqr); 2691 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2692 return rc; 2693 } 2694 2695 /* 2696 * SECTION: Operations of the dasd_block layer. 2697 */ 2698 2699 /* 2700 * Timeout function for dasd_block. This is used when the block layer 2701 * is waiting for something that may not come reliably, (e.g. a state 2702 * change interrupt) 2703 */ 2704 static void dasd_block_timeout(struct timer_list *t) 2705 { 2706 unsigned long flags; 2707 struct dasd_block *block; 2708 2709 block = from_timer(block, t, timer); 2710 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2711 /* re-activate request queue */ 2712 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2713 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2714 dasd_schedule_block_bh(block); 2715 blk_mq_run_hw_queues(block->request_queue, true); 2716 } 2717 2718 /* 2719 * Setup timeout for a dasd_block in jiffies. 2720 */ 2721 void dasd_block_set_timer(struct dasd_block *block, int expires) 2722 { 2723 if (expires == 0) 2724 del_timer(&block->timer); 2725 else 2726 mod_timer(&block->timer, jiffies + expires); 2727 } 2728 EXPORT_SYMBOL(dasd_block_set_timer); 2729 2730 /* 2731 * Clear timeout for a dasd_block. 2732 */ 2733 void dasd_block_clear_timer(struct dasd_block *block) 2734 { 2735 del_timer(&block->timer); 2736 } 2737 EXPORT_SYMBOL(dasd_block_clear_timer); 2738 2739 /* 2740 * Process finished error recovery ccw. 2741 */ 2742 static void __dasd_process_erp(struct dasd_device *device, 2743 struct dasd_ccw_req *cqr) 2744 { 2745 dasd_erp_fn_t erp_fn; 2746 2747 if (cqr->status == DASD_CQR_DONE) 2748 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2749 else 2750 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2751 erp_fn = device->discipline->erp_postaction(cqr); 2752 erp_fn(cqr); 2753 } 2754 2755 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2756 { 2757 struct request *req; 2758 blk_status_t error = BLK_STS_OK; 2759 unsigned int proc_bytes; 2760 int status; 2761 2762 req = (struct request *) cqr->callback_data; 2763 dasd_profile_end(cqr->block, cqr, req); 2764 2765 proc_bytes = cqr->proc_bytes; 2766 status = cqr->block->base->discipline->free_cp(cqr, req); 2767 if (status < 0) 2768 error = errno_to_blk_status(status); 2769 else if (status == 0) { 2770 switch (cqr->intrc) { 2771 case -EPERM: 2772 error = BLK_STS_NEXUS; 2773 break; 2774 case -ENOLINK: 2775 error = BLK_STS_TRANSPORT; 2776 break; 2777 case -ETIMEDOUT: 2778 error = BLK_STS_TIMEOUT; 2779 break; 2780 default: 2781 error = BLK_STS_IOERR; 2782 break; 2783 } 2784 } 2785 2786 /* 2787 * We need to take care for ETIMEDOUT errors here since the 2788 * complete callback does not get called in this case. 2789 * Take care of all errors here and avoid additional code to 2790 * transfer the error value to the complete callback. 2791 */ 2792 if (error) { 2793 blk_mq_end_request(req, error); 2794 blk_mq_run_hw_queues(req->q, true); 2795 } else { 2796 /* 2797 * Partial completed requests can happen with ESE devices. 2798 * During read we might have gotten a NRF error and have to 2799 * complete a request partially. 2800 */ 2801 if (proc_bytes) { 2802 blk_update_request(req, BLK_STS_OK, 2803 blk_rq_bytes(req) - proc_bytes); 2804 blk_mq_requeue_request(req, true); 2805 } else if (likely(!blk_should_fake_timeout(req->q))) { 2806 blk_mq_complete_request(req); 2807 } 2808 } 2809 } 2810 2811 /* 2812 * Process ccw request queue. 2813 */ 2814 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2815 struct list_head *final_queue) 2816 { 2817 struct list_head *l, *n; 2818 struct dasd_ccw_req *cqr; 2819 dasd_erp_fn_t erp_fn; 2820 unsigned long flags; 2821 struct dasd_device *base = block->base; 2822 2823 restart: 2824 /* Process request with final status. */ 2825 list_for_each_safe(l, n, &block->ccw_queue) { 2826 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2827 if (cqr->status != DASD_CQR_DONE && 2828 cqr->status != DASD_CQR_FAILED && 2829 cqr->status != DASD_CQR_NEED_ERP && 2830 cqr->status != DASD_CQR_TERMINATED) 2831 continue; 2832 2833 if (cqr->status == DASD_CQR_TERMINATED) { 2834 base->discipline->handle_terminated_request(cqr); 2835 goto restart; 2836 } 2837 2838 /* Process requests that may be recovered */ 2839 if (cqr->status == DASD_CQR_NEED_ERP) { 2840 erp_fn = base->discipline->erp_action(cqr); 2841 if (IS_ERR(erp_fn(cqr))) 2842 continue; 2843 goto restart; 2844 } 2845 2846 /* log sense for fatal error */ 2847 if (cqr->status == DASD_CQR_FAILED) { 2848 dasd_log_sense(cqr, &cqr->irb); 2849 } 2850 2851 /* First of all call extended error reporting. */ 2852 if (dasd_eer_enabled(base) && 2853 cqr->status == DASD_CQR_FAILED) { 2854 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2855 2856 /* restart request */ 2857 cqr->status = DASD_CQR_FILLED; 2858 cqr->retries = 255; 2859 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2860 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2861 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2862 flags); 2863 goto restart; 2864 } 2865 2866 /* Process finished ERP request. */ 2867 if (cqr->refers) { 2868 __dasd_process_erp(base, cqr); 2869 goto restart; 2870 } 2871 2872 /* Rechain finished requests to final queue */ 2873 cqr->endclk = get_tod_clock(); 2874 list_move_tail(&cqr->blocklist, final_queue); 2875 } 2876 } 2877 2878 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2879 { 2880 dasd_schedule_block_bh(cqr->block); 2881 } 2882 2883 static void __dasd_block_start_head(struct dasd_block *block) 2884 { 2885 struct dasd_ccw_req *cqr; 2886 2887 if (list_empty(&block->ccw_queue)) 2888 return; 2889 /* We allways begin with the first requests on the queue, as some 2890 * of previously started requests have to be enqueued on a 2891 * dasd_device again for error recovery. 2892 */ 2893 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2894 if (cqr->status != DASD_CQR_FILLED) 2895 continue; 2896 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2897 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2898 cqr->status = DASD_CQR_FAILED; 2899 cqr->intrc = -EPERM; 2900 dasd_schedule_block_bh(block); 2901 continue; 2902 } 2903 /* Non-temporary stop condition will trigger fail fast */ 2904 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2905 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2906 (!dasd_eer_enabled(block->base))) { 2907 cqr->status = DASD_CQR_FAILED; 2908 cqr->intrc = -ENOLINK; 2909 dasd_schedule_block_bh(block); 2910 continue; 2911 } 2912 /* Don't try to start requests if device is stopped */ 2913 if (block->base->stopped) 2914 return; 2915 2916 /* just a fail safe check, should not happen */ 2917 if (!cqr->startdev) 2918 cqr->startdev = block->base; 2919 2920 /* make sure that the requests we submit find their way back */ 2921 cqr->callback = dasd_return_cqr_cb; 2922 2923 dasd_add_request_tail(cqr); 2924 } 2925 } 2926 2927 /* 2928 * Central dasd_block layer routine. Takes requests from the generic 2929 * block layer request queue, creates ccw requests, enqueues them on 2930 * a dasd_device and processes ccw requests that have been returned. 2931 */ 2932 static void dasd_block_tasklet(unsigned long data) 2933 { 2934 struct dasd_block *block = (struct dasd_block *) data; 2935 struct list_head final_queue; 2936 struct list_head *l, *n; 2937 struct dasd_ccw_req *cqr; 2938 struct dasd_queue *dq; 2939 2940 atomic_set(&block->tasklet_scheduled, 0); 2941 INIT_LIST_HEAD(&final_queue); 2942 spin_lock_irq(&block->queue_lock); 2943 /* Finish off requests on ccw queue */ 2944 __dasd_process_block_ccw_queue(block, &final_queue); 2945 spin_unlock_irq(&block->queue_lock); 2946 2947 /* Now call the callback function of requests with final status */ 2948 list_for_each_safe(l, n, &final_queue) { 2949 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2950 dq = cqr->dq; 2951 spin_lock_irq(&dq->lock); 2952 list_del_init(&cqr->blocklist); 2953 __dasd_cleanup_cqr(cqr); 2954 spin_unlock_irq(&dq->lock); 2955 } 2956 2957 spin_lock_irq(&block->queue_lock); 2958 /* Now check if the head of the ccw queue needs to be started. */ 2959 __dasd_block_start_head(block); 2960 spin_unlock_irq(&block->queue_lock); 2961 2962 if (waitqueue_active(&shutdown_waitq)) 2963 wake_up(&shutdown_waitq); 2964 dasd_put_device(block->base); 2965 } 2966 2967 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2968 { 2969 wake_up(&dasd_flush_wq); 2970 } 2971 2972 /* 2973 * Requeue a request back to the block request queue 2974 * only works for block requests 2975 */ 2976 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2977 { 2978 struct dasd_block *block = cqr->block; 2979 struct request *req; 2980 2981 if (!block) 2982 return -EINVAL; 2983 /* 2984 * If the request is an ERP request there is nothing to requeue. 2985 * This will be done with the remaining original request. 2986 */ 2987 if (cqr->refers) 2988 return 0; 2989 spin_lock_irq(&cqr->dq->lock); 2990 req = (struct request *) cqr->callback_data; 2991 blk_mq_requeue_request(req, false); 2992 spin_unlock_irq(&cqr->dq->lock); 2993 2994 return 0; 2995 } 2996 2997 /* 2998 * Go through all request on the dasd_block request queue, cancel them 2999 * on the respective dasd_device, and return them to the generic 3000 * block layer. 3001 */ 3002 static int dasd_flush_block_queue(struct dasd_block *block) 3003 { 3004 struct dasd_ccw_req *cqr, *n; 3005 int rc, i; 3006 struct list_head flush_queue; 3007 unsigned long flags; 3008 3009 INIT_LIST_HEAD(&flush_queue); 3010 spin_lock_bh(&block->queue_lock); 3011 rc = 0; 3012 restart: 3013 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 3014 /* if this request currently owned by a dasd_device cancel it */ 3015 if (cqr->status >= DASD_CQR_QUEUED) 3016 rc = dasd_cancel_req(cqr); 3017 if (rc < 0) 3018 break; 3019 /* Rechain request (including erp chain) so it won't be 3020 * touched by the dasd_block_tasklet anymore. 3021 * Replace the callback so we notice when the request 3022 * is returned from the dasd_device layer. 3023 */ 3024 cqr->callback = _dasd_wake_block_flush_cb; 3025 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 3026 list_move_tail(&cqr->blocklist, &flush_queue); 3027 if (i > 1) 3028 /* moved more than one request - need to restart */ 3029 goto restart; 3030 } 3031 spin_unlock_bh(&block->queue_lock); 3032 /* Now call the callback function of flushed requests */ 3033 restart_cb: 3034 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 3035 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3036 /* Process finished ERP request. */ 3037 if (cqr->refers) { 3038 spin_lock_bh(&block->queue_lock); 3039 __dasd_process_erp(block->base, cqr); 3040 spin_unlock_bh(&block->queue_lock); 3041 /* restart list_for_xx loop since dasd_process_erp 3042 * might remove multiple elements */ 3043 goto restart_cb; 3044 } 3045 /* call the callback function */ 3046 spin_lock_irqsave(&cqr->dq->lock, flags); 3047 cqr->endclk = get_tod_clock(); 3048 list_del_init(&cqr->blocklist); 3049 __dasd_cleanup_cqr(cqr); 3050 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3051 } 3052 return rc; 3053 } 3054 3055 /* 3056 * Schedules a call to dasd_tasklet over the device tasklet. 3057 */ 3058 void dasd_schedule_block_bh(struct dasd_block *block) 3059 { 3060 /* Protect against rescheduling. */ 3061 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 3062 return; 3063 /* life cycle of block is bound to it's base device */ 3064 dasd_get_device(block->base); 3065 tasklet_hi_schedule(&block->tasklet); 3066 } 3067 EXPORT_SYMBOL(dasd_schedule_block_bh); 3068 3069 3070 /* 3071 * SECTION: external block device operations 3072 * (request queue handling, open, release, etc.) 3073 */ 3074 3075 /* 3076 * Dasd request queue function. Called from ll_rw_blk.c 3077 */ 3078 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 3079 const struct blk_mq_queue_data *qd) 3080 { 3081 struct dasd_block *block = hctx->queue->queuedata; 3082 struct dasd_queue *dq = hctx->driver_data; 3083 struct request *req = qd->rq; 3084 struct dasd_device *basedev; 3085 struct dasd_ccw_req *cqr; 3086 blk_status_t rc = BLK_STS_OK; 3087 3088 basedev = block->base; 3089 spin_lock_irq(&dq->lock); 3090 if (basedev->state < DASD_STATE_READY) { 3091 DBF_DEV_EVENT(DBF_ERR, basedev, 3092 "device not ready for request %p", req); 3093 rc = BLK_STS_IOERR; 3094 goto out; 3095 } 3096 3097 /* 3098 * if device is stopped do not fetch new requests 3099 * except failfast is active which will let requests fail 3100 * immediately in __dasd_block_start_head() 3101 */ 3102 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3103 DBF_DEV_EVENT(DBF_ERR, basedev, 3104 "device stopped request %p", req); 3105 rc = BLK_STS_RESOURCE; 3106 goto out; 3107 } 3108 3109 if (basedev->features & DASD_FEATURE_READONLY && 3110 rq_data_dir(req) == WRITE) { 3111 DBF_DEV_EVENT(DBF_ERR, basedev, 3112 "Rejecting write request %p", req); 3113 rc = BLK_STS_IOERR; 3114 goto out; 3115 } 3116 3117 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3118 (basedev->features & DASD_FEATURE_FAILFAST || 3119 blk_noretry_request(req))) { 3120 DBF_DEV_EVENT(DBF_ERR, basedev, 3121 "Rejecting failfast request %p", req); 3122 rc = BLK_STS_IOERR; 3123 goto out; 3124 } 3125 3126 cqr = basedev->discipline->build_cp(basedev, block, req); 3127 if (IS_ERR(cqr)) { 3128 if (PTR_ERR(cqr) == -EBUSY || 3129 PTR_ERR(cqr) == -ENOMEM || 3130 PTR_ERR(cqr) == -EAGAIN) { 3131 rc = BLK_STS_RESOURCE; 3132 goto out; 3133 } 3134 DBF_DEV_EVENT(DBF_ERR, basedev, 3135 "CCW creation failed (rc=%ld) on request %p", 3136 PTR_ERR(cqr), req); 3137 rc = BLK_STS_IOERR; 3138 goto out; 3139 } 3140 /* 3141 * Note: callback is set to dasd_return_cqr_cb in 3142 * __dasd_block_start_head to cover erp requests as well 3143 */ 3144 cqr->callback_data = req; 3145 cqr->status = DASD_CQR_FILLED; 3146 cqr->dq = dq; 3147 3148 blk_mq_start_request(req); 3149 spin_lock(&block->queue_lock); 3150 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3151 INIT_LIST_HEAD(&cqr->devlist); 3152 dasd_profile_start(block, cqr, req); 3153 dasd_schedule_block_bh(block); 3154 spin_unlock(&block->queue_lock); 3155 3156 out: 3157 spin_unlock_irq(&dq->lock); 3158 return rc; 3159 } 3160 3161 /* 3162 * Block timeout callback, called from the block layer 3163 * 3164 * Return values: 3165 * BLK_EH_RESET_TIMER if the request should be left running 3166 * BLK_EH_DONE if the request is handled or terminated 3167 * by the driver. 3168 */ 3169 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3170 { 3171 struct dasd_block *block = req->q->queuedata; 3172 struct dasd_device *device; 3173 struct dasd_ccw_req *cqr; 3174 unsigned long flags; 3175 int rc = 0; 3176 3177 cqr = blk_mq_rq_to_pdu(req); 3178 if (!cqr) 3179 return BLK_EH_DONE; 3180 3181 spin_lock_irqsave(&cqr->dq->lock, flags); 3182 device = cqr->startdev ? cqr->startdev : block->base; 3183 if (!device->blk_timeout) { 3184 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3185 return BLK_EH_RESET_TIMER; 3186 } 3187 DBF_DEV_EVENT(DBF_WARNING, device, 3188 " dasd_times_out cqr %p status %x", 3189 cqr, cqr->status); 3190 3191 spin_lock(&block->queue_lock); 3192 spin_lock(get_ccwdev_lock(device->cdev)); 3193 cqr->retries = -1; 3194 cqr->intrc = -ETIMEDOUT; 3195 if (cqr->status >= DASD_CQR_QUEUED) { 3196 rc = __dasd_cancel_req(cqr); 3197 } else if (cqr->status == DASD_CQR_FILLED || 3198 cqr->status == DASD_CQR_NEED_ERP) { 3199 cqr->status = DASD_CQR_TERMINATED; 3200 } else if (cqr->status == DASD_CQR_IN_ERP) { 3201 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3202 3203 list_for_each_entry_safe(searchcqr, nextcqr, 3204 &block->ccw_queue, blocklist) { 3205 tmpcqr = searchcqr; 3206 while (tmpcqr->refers) 3207 tmpcqr = tmpcqr->refers; 3208 if (tmpcqr != cqr) 3209 continue; 3210 /* searchcqr is an ERP request for cqr */ 3211 searchcqr->retries = -1; 3212 searchcqr->intrc = -ETIMEDOUT; 3213 if (searchcqr->status >= DASD_CQR_QUEUED) { 3214 rc = __dasd_cancel_req(searchcqr); 3215 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3216 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3217 searchcqr->status = DASD_CQR_TERMINATED; 3218 rc = 0; 3219 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3220 /* 3221 * Shouldn't happen; most recent ERP 3222 * request is at the front of queue 3223 */ 3224 continue; 3225 } 3226 break; 3227 } 3228 } 3229 spin_unlock(get_ccwdev_lock(device->cdev)); 3230 dasd_schedule_block_bh(block); 3231 spin_unlock(&block->queue_lock); 3232 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3233 3234 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE; 3235 } 3236 3237 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3238 unsigned int idx) 3239 { 3240 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3241 3242 if (!dq) 3243 return -ENOMEM; 3244 3245 spin_lock_init(&dq->lock); 3246 hctx->driver_data = dq; 3247 3248 return 0; 3249 } 3250 3251 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3252 { 3253 kfree(hctx->driver_data); 3254 hctx->driver_data = NULL; 3255 } 3256 3257 static void dasd_request_done(struct request *req) 3258 { 3259 blk_mq_end_request(req, 0); 3260 blk_mq_run_hw_queues(req->q, true); 3261 } 3262 3263 static struct blk_mq_ops dasd_mq_ops = { 3264 .queue_rq = do_dasd_request, 3265 .complete = dasd_request_done, 3266 .timeout = dasd_times_out, 3267 .init_hctx = dasd_init_hctx, 3268 .exit_hctx = dasd_exit_hctx, 3269 }; 3270 3271 /* 3272 * Allocate and initialize request queue and default I/O scheduler. 3273 */ 3274 static int dasd_alloc_queue(struct dasd_block *block) 3275 { 3276 int rc; 3277 3278 block->tag_set.ops = &dasd_mq_ops; 3279 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); 3280 block->tag_set.nr_hw_queues = nr_hw_queues; 3281 block->tag_set.queue_depth = queue_depth; 3282 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3283 block->tag_set.numa_node = NUMA_NO_NODE; 3284 3285 rc = blk_mq_alloc_tag_set(&block->tag_set); 3286 if (rc) 3287 return rc; 3288 3289 block->request_queue = blk_mq_init_queue(&block->tag_set); 3290 if (IS_ERR(block->request_queue)) 3291 return PTR_ERR(block->request_queue); 3292 3293 block->request_queue->queuedata = block; 3294 3295 return 0; 3296 } 3297 3298 /* 3299 * Deactivate and free request queue. 3300 */ 3301 static void dasd_free_queue(struct dasd_block *block) 3302 { 3303 if (block->request_queue) { 3304 blk_cleanup_queue(block->request_queue); 3305 blk_mq_free_tag_set(&block->tag_set); 3306 block->request_queue = NULL; 3307 } 3308 } 3309 3310 static int dasd_open(struct block_device *bdev, fmode_t mode) 3311 { 3312 struct dasd_device *base; 3313 int rc; 3314 3315 base = dasd_device_from_gendisk(bdev->bd_disk); 3316 if (!base) 3317 return -ENODEV; 3318 3319 atomic_inc(&base->block->open_count); 3320 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3321 rc = -ENODEV; 3322 goto unlock; 3323 } 3324 3325 if (!try_module_get(base->discipline->owner)) { 3326 rc = -EINVAL; 3327 goto unlock; 3328 } 3329 3330 if (dasd_probeonly) { 3331 dev_info(&base->cdev->dev, 3332 "Accessing the DASD failed because it is in " 3333 "probeonly mode\n"); 3334 rc = -EPERM; 3335 goto out; 3336 } 3337 3338 if (base->state <= DASD_STATE_BASIC) { 3339 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3340 " Cannot open unrecognized device"); 3341 rc = -ENODEV; 3342 goto out; 3343 } 3344 3345 if ((mode & FMODE_WRITE) && 3346 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3347 (base->features & DASD_FEATURE_READONLY))) { 3348 rc = -EROFS; 3349 goto out; 3350 } 3351 3352 dasd_put_device(base); 3353 return 0; 3354 3355 out: 3356 module_put(base->discipline->owner); 3357 unlock: 3358 atomic_dec(&base->block->open_count); 3359 dasd_put_device(base); 3360 return rc; 3361 } 3362 3363 static void dasd_release(struct gendisk *disk, fmode_t mode) 3364 { 3365 struct dasd_device *base = dasd_device_from_gendisk(disk); 3366 if (base) { 3367 atomic_dec(&base->block->open_count); 3368 module_put(base->discipline->owner); 3369 dasd_put_device(base); 3370 } 3371 } 3372 3373 /* 3374 * Return disk geometry. 3375 */ 3376 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3377 { 3378 struct dasd_device *base; 3379 3380 base = dasd_device_from_gendisk(bdev->bd_disk); 3381 if (!base) 3382 return -ENODEV; 3383 3384 if (!base->discipline || 3385 !base->discipline->fill_geometry) { 3386 dasd_put_device(base); 3387 return -EINVAL; 3388 } 3389 base->discipline->fill_geometry(base->block, geo); 3390 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3391 dasd_put_device(base); 3392 return 0; 3393 } 3394 3395 const struct block_device_operations 3396 dasd_device_operations = { 3397 .owner = THIS_MODULE, 3398 .open = dasd_open, 3399 .release = dasd_release, 3400 .ioctl = dasd_ioctl, 3401 .compat_ioctl = dasd_ioctl, 3402 .getgeo = dasd_getgeo, 3403 }; 3404 3405 /******************************************************************************* 3406 * end of block device operations 3407 */ 3408 3409 static void 3410 dasd_exit(void) 3411 { 3412 #ifdef CONFIG_PROC_FS 3413 dasd_proc_exit(); 3414 #endif 3415 dasd_eer_exit(); 3416 kmem_cache_destroy(dasd_page_cache); 3417 dasd_page_cache = NULL; 3418 dasd_gendisk_exit(); 3419 dasd_devmap_exit(); 3420 if (dasd_debug_area != NULL) { 3421 debug_unregister(dasd_debug_area); 3422 dasd_debug_area = NULL; 3423 } 3424 dasd_statistics_removeroot(); 3425 } 3426 3427 /* 3428 * SECTION: common functions for ccw_driver use 3429 */ 3430 3431 /* 3432 * Is the device read-only? 3433 * Note that this function does not report the setting of the 3434 * readonly device attribute, but how it is configured in z/VM. 3435 */ 3436 int dasd_device_is_ro(struct dasd_device *device) 3437 { 3438 struct ccw_dev_id dev_id; 3439 struct diag210 diag_data; 3440 int rc; 3441 3442 if (!MACHINE_IS_VM) 3443 return 0; 3444 ccw_device_get_id(device->cdev, &dev_id); 3445 memset(&diag_data, 0, sizeof(diag_data)); 3446 diag_data.vrdcdvno = dev_id.devno; 3447 diag_data.vrdclen = sizeof(diag_data); 3448 rc = diag210(&diag_data); 3449 if (rc == 0 || rc == 2) { 3450 return diag_data.vrdcvfla & 0x80; 3451 } else { 3452 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3453 dev_id.devno, rc); 3454 return 0; 3455 } 3456 } 3457 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3458 3459 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3460 { 3461 struct ccw_device *cdev = data; 3462 int ret; 3463 3464 ret = ccw_device_set_online(cdev); 3465 if (ret) 3466 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3467 dev_name(&cdev->dev), ret); 3468 } 3469 3470 /* 3471 * Initial attempt at a probe function. this can be simplified once 3472 * the other detection code is gone. 3473 */ 3474 int dasd_generic_probe(struct ccw_device *cdev, 3475 struct dasd_discipline *discipline) 3476 { 3477 int ret; 3478 3479 ret = dasd_add_sysfs_files(cdev); 3480 if (ret) { 3481 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3482 "dasd_generic_probe: could not add " 3483 "sysfs entries"); 3484 return ret; 3485 } 3486 cdev->handler = &dasd_int_handler; 3487 3488 /* 3489 * Automatically online either all dasd devices (dasd_autodetect) 3490 * or all devices specified with dasd= parameters during 3491 * initial probe. 3492 */ 3493 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3494 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3495 async_schedule(dasd_generic_auto_online, cdev); 3496 return 0; 3497 } 3498 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3499 3500 void dasd_generic_free_discipline(struct dasd_device *device) 3501 { 3502 /* Forget the discipline information. */ 3503 if (device->discipline) { 3504 if (device->discipline->uncheck_device) 3505 device->discipline->uncheck_device(device); 3506 module_put(device->discipline->owner); 3507 device->discipline = NULL; 3508 } 3509 if (device->base_discipline) { 3510 module_put(device->base_discipline->owner); 3511 device->base_discipline = NULL; 3512 } 3513 } 3514 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3515 3516 /* 3517 * This will one day be called from a global not_oper handler. 3518 * It is also used by driver_unregister during module unload. 3519 */ 3520 void dasd_generic_remove(struct ccw_device *cdev) 3521 { 3522 struct dasd_device *device; 3523 struct dasd_block *block; 3524 3525 cdev->handler = NULL; 3526 3527 device = dasd_device_from_cdev(cdev); 3528 if (IS_ERR(device)) { 3529 dasd_remove_sysfs_files(cdev); 3530 return; 3531 } 3532 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3533 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3534 /* Already doing offline processing */ 3535 dasd_put_device(device); 3536 dasd_remove_sysfs_files(cdev); 3537 return; 3538 } 3539 /* 3540 * This device is removed unconditionally. Set offline 3541 * flag to prevent dasd_open from opening it while it is 3542 * no quite down yet. 3543 */ 3544 dasd_set_target_state(device, DASD_STATE_NEW); 3545 /* dasd_delete_device destroys the device reference. */ 3546 block = device->block; 3547 dasd_delete_device(device); 3548 /* 3549 * life cycle of block is bound to device, so delete it after 3550 * device was safely removed 3551 */ 3552 if (block) 3553 dasd_free_block(block); 3554 3555 dasd_remove_sysfs_files(cdev); 3556 } 3557 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3558 3559 /* 3560 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3561 * the device is detected for the first time and is supposed to be used 3562 * or the user has started activation through sysfs. 3563 */ 3564 int dasd_generic_set_online(struct ccw_device *cdev, 3565 struct dasd_discipline *base_discipline) 3566 { 3567 struct dasd_discipline *discipline; 3568 struct dasd_device *device; 3569 int rc; 3570 3571 /* first online clears initial online feature flag */ 3572 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3573 device = dasd_create_device(cdev); 3574 if (IS_ERR(device)) 3575 return PTR_ERR(device); 3576 3577 discipline = base_discipline; 3578 if (device->features & DASD_FEATURE_USEDIAG) { 3579 if (!dasd_diag_discipline_pointer) { 3580 /* Try to load the required module. */ 3581 rc = request_module(DASD_DIAG_MOD); 3582 if (rc) { 3583 pr_warn("%s Setting the DASD online failed " 3584 "because the required module %s " 3585 "could not be loaded (rc=%d)\n", 3586 dev_name(&cdev->dev), DASD_DIAG_MOD, 3587 rc); 3588 dasd_delete_device(device); 3589 return -ENODEV; 3590 } 3591 } 3592 /* Module init could have failed, so check again here after 3593 * request_module(). */ 3594 if (!dasd_diag_discipline_pointer) { 3595 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3596 dev_name(&cdev->dev)); 3597 dasd_delete_device(device); 3598 return -ENODEV; 3599 } 3600 discipline = dasd_diag_discipline_pointer; 3601 } 3602 if (!try_module_get(base_discipline->owner)) { 3603 dasd_delete_device(device); 3604 return -EINVAL; 3605 } 3606 if (!try_module_get(discipline->owner)) { 3607 module_put(base_discipline->owner); 3608 dasd_delete_device(device); 3609 return -EINVAL; 3610 } 3611 device->base_discipline = base_discipline; 3612 device->discipline = discipline; 3613 3614 /* check_device will allocate block device if necessary */ 3615 rc = discipline->check_device(device); 3616 if (rc) { 3617 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3618 dev_name(&cdev->dev), discipline->name, rc); 3619 module_put(discipline->owner); 3620 module_put(base_discipline->owner); 3621 dasd_delete_device(device); 3622 return rc; 3623 } 3624 3625 dasd_set_target_state(device, DASD_STATE_ONLINE); 3626 if (device->state <= DASD_STATE_KNOWN) { 3627 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3628 dev_name(&cdev->dev)); 3629 rc = -ENODEV; 3630 dasd_set_target_state(device, DASD_STATE_NEW); 3631 if (device->block) 3632 dasd_free_block(device->block); 3633 dasd_delete_device(device); 3634 } else 3635 pr_debug("dasd_generic device %s found\n", 3636 dev_name(&cdev->dev)); 3637 3638 wait_event(dasd_init_waitq, _wait_for_device(device)); 3639 3640 dasd_put_device(device); 3641 return rc; 3642 } 3643 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3644 3645 int dasd_generic_set_offline(struct ccw_device *cdev) 3646 { 3647 struct dasd_device *device; 3648 struct dasd_block *block; 3649 int max_count, open_count, rc; 3650 unsigned long flags; 3651 3652 rc = 0; 3653 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3654 device = dasd_device_from_cdev_locked(cdev); 3655 if (IS_ERR(device)) { 3656 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3657 return PTR_ERR(device); 3658 } 3659 3660 /* 3661 * We must make sure that this device is currently not in use. 3662 * The open_count is increased for every opener, that includes 3663 * the blkdev_get in dasd_scan_partitions. We are only interested 3664 * in the other openers. 3665 */ 3666 if (device->block) { 3667 max_count = device->block->bdev ? 0 : -1; 3668 open_count = atomic_read(&device->block->open_count); 3669 if (open_count > max_count) { 3670 if (open_count > 0) 3671 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3672 dev_name(&cdev->dev), open_count); 3673 else 3674 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3675 dev_name(&cdev->dev)); 3676 rc = -EBUSY; 3677 goto out_err; 3678 } 3679 } 3680 3681 /* 3682 * Test if the offline processing is already running and exit if so. 3683 * If a safe offline is being processed this could only be a normal 3684 * offline that should be able to overtake the safe offline and 3685 * cancel any I/O we do not want to wait for any longer 3686 */ 3687 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3688 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3689 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3690 &device->flags); 3691 } else { 3692 rc = -EBUSY; 3693 goto out_err; 3694 } 3695 } 3696 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3697 3698 /* 3699 * if safe_offline is called set safe_offline_running flag and 3700 * clear safe_offline so that a call to normal offline 3701 * can overrun safe_offline processing 3702 */ 3703 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3704 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3705 /* need to unlock here to wait for outstanding I/O */ 3706 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3707 /* 3708 * If we want to set the device safe offline all IO operations 3709 * should be finished before continuing the offline process 3710 * so sync bdev first and then wait for our queues to become 3711 * empty 3712 */ 3713 if (device->block) { 3714 rc = fsync_bdev(device->block->bdev); 3715 if (rc != 0) 3716 goto interrupted; 3717 } 3718 dasd_schedule_device_bh(device); 3719 rc = wait_event_interruptible(shutdown_waitq, 3720 _wait_for_empty_queues(device)); 3721 if (rc != 0) 3722 goto interrupted; 3723 3724 /* 3725 * check if a normal offline process overtook the offline 3726 * processing in this case simply do nothing beside returning 3727 * that we got interrupted 3728 * otherwise mark safe offline as not running any longer and 3729 * continue with normal offline 3730 */ 3731 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3732 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3733 rc = -ERESTARTSYS; 3734 goto out_err; 3735 } 3736 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3737 } 3738 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3739 3740 dasd_set_target_state(device, DASD_STATE_NEW); 3741 /* dasd_delete_device destroys the device reference. */ 3742 block = device->block; 3743 dasd_delete_device(device); 3744 /* 3745 * life cycle of block is bound to device, so delete it after 3746 * device was safely removed 3747 */ 3748 if (block) 3749 dasd_free_block(block); 3750 3751 return 0; 3752 3753 interrupted: 3754 /* interrupted by signal */ 3755 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3756 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3757 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3758 out_err: 3759 dasd_put_device(device); 3760 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3761 return rc; 3762 } 3763 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3764 3765 int dasd_generic_last_path_gone(struct dasd_device *device) 3766 { 3767 struct dasd_ccw_req *cqr; 3768 3769 dev_warn(&device->cdev->dev, "No operational channel path is left " 3770 "for the device\n"); 3771 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3772 /* First of all call extended error reporting. */ 3773 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3774 3775 if (device->state < DASD_STATE_BASIC) 3776 return 0; 3777 /* Device is active. We want to keep it. */ 3778 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3779 if ((cqr->status == DASD_CQR_IN_IO) || 3780 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3781 cqr->status = DASD_CQR_QUEUED; 3782 cqr->retries++; 3783 } 3784 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3785 dasd_device_clear_timer(device); 3786 dasd_schedule_device_bh(device); 3787 return 1; 3788 } 3789 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3790 3791 int dasd_generic_path_operational(struct dasd_device *device) 3792 { 3793 dev_info(&device->cdev->dev, "A channel path to the device has become " 3794 "operational\n"); 3795 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3796 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3797 if (device->stopped & DASD_UNRESUMED_PM) { 3798 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 3799 dasd_restore_device(device); 3800 return 1; 3801 } 3802 dasd_schedule_device_bh(device); 3803 if (device->block) { 3804 dasd_schedule_block_bh(device->block); 3805 if (device->block->request_queue) 3806 blk_mq_run_hw_queues(device->block->request_queue, 3807 true); 3808 } 3809 3810 if (!device->stopped) 3811 wake_up(&generic_waitq); 3812 3813 return 1; 3814 } 3815 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3816 3817 int dasd_generic_notify(struct ccw_device *cdev, int event) 3818 { 3819 struct dasd_device *device; 3820 int ret; 3821 3822 device = dasd_device_from_cdev_locked(cdev); 3823 if (IS_ERR(device)) 3824 return 0; 3825 ret = 0; 3826 switch (event) { 3827 case CIO_GONE: 3828 case CIO_BOXED: 3829 case CIO_NO_PATH: 3830 dasd_path_no_path(device); 3831 ret = dasd_generic_last_path_gone(device); 3832 break; 3833 case CIO_OPER: 3834 ret = 1; 3835 if (dasd_path_get_opm(device)) 3836 ret = dasd_generic_path_operational(device); 3837 break; 3838 } 3839 dasd_put_device(device); 3840 return ret; 3841 } 3842 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3843 3844 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3845 { 3846 struct dasd_device *device; 3847 int chp, oldopm, hpfpm, ifccpm; 3848 3849 device = dasd_device_from_cdev_locked(cdev); 3850 if (IS_ERR(device)) 3851 return; 3852 3853 oldopm = dasd_path_get_opm(device); 3854 for (chp = 0; chp < 8; chp++) { 3855 if (path_event[chp] & PE_PATH_GONE) { 3856 dasd_path_notoper(device, chp); 3857 } 3858 if (path_event[chp] & PE_PATH_AVAILABLE) { 3859 dasd_path_available(device, chp); 3860 dasd_schedule_device_bh(device); 3861 } 3862 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3863 if (!dasd_path_is_operational(device, chp) && 3864 !dasd_path_need_verify(device, chp)) { 3865 /* 3866 * we can not establish a pathgroup on an 3867 * unavailable path, so trigger a path 3868 * verification first 3869 */ 3870 dasd_path_available(device, chp); 3871 dasd_schedule_device_bh(device); 3872 } 3873 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3874 "Pathgroup re-established\n"); 3875 if (device->discipline->kick_validate) 3876 device->discipline->kick_validate(device); 3877 } 3878 } 3879 hpfpm = dasd_path_get_hpfpm(device); 3880 ifccpm = dasd_path_get_ifccpm(device); 3881 if (!dasd_path_get_opm(device) && hpfpm) { 3882 /* 3883 * device has no operational paths but at least one path is 3884 * disabled due to HPF errors 3885 * disable HPF at all and use the path(s) again 3886 */ 3887 if (device->discipline->disable_hpf) 3888 device->discipline->disable_hpf(device); 3889 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3890 dasd_path_set_tbvpm(device, hpfpm); 3891 dasd_schedule_device_bh(device); 3892 dasd_schedule_requeue(device); 3893 } else if (!dasd_path_get_opm(device) && ifccpm) { 3894 /* 3895 * device has no operational paths but at least one path is 3896 * disabled due to IFCC errors 3897 * trigger path verification on paths with IFCC errors 3898 */ 3899 dasd_path_set_tbvpm(device, ifccpm); 3900 dasd_schedule_device_bh(device); 3901 } 3902 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3903 dev_warn(&device->cdev->dev, 3904 "No verified channel paths remain for the device\n"); 3905 DBF_DEV_EVENT(DBF_WARNING, device, 3906 "%s", "last verified path gone"); 3907 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3908 dasd_device_set_stop_bits(device, 3909 DASD_STOPPED_DC_WAIT); 3910 } 3911 dasd_put_device(device); 3912 } 3913 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3914 3915 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3916 { 3917 if (!dasd_path_get_opm(device) && lpm) { 3918 dasd_path_set_opm(device, lpm); 3919 dasd_generic_path_operational(device); 3920 } else 3921 dasd_path_add_opm(device, lpm); 3922 return 0; 3923 } 3924 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3925 3926 void dasd_generic_space_exhaust(struct dasd_device *device, 3927 struct dasd_ccw_req *cqr) 3928 { 3929 dasd_eer_write(device, NULL, DASD_EER_NOSPC); 3930 3931 if (device->state < DASD_STATE_BASIC) 3932 return; 3933 3934 if (cqr->status == DASD_CQR_IN_IO || 3935 cqr->status == DASD_CQR_CLEAR_PENDING) { 3936 cqr->status = DASD_CQR_QUEUED; 3937 cqr->retries++; 3938 } 3939 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3940 dasd_device_clear_timer(device); 3941 dasd_schedule_device_bh(device); 3942 } 3943 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3944 3945 void dasd_generic_space_avail(struct dasd_device *device) 3946 { 3947 dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3948 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3949 3950 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3951 dasd_schedule_device_bh(device); 3952 3953 if (device->block) { 3954 dasd_schedule_block_bh(device->block); 3955 if (device->block->request_queue) 3956 blk_mq_run_hw_queues(device->block->request_queue, true); 3957 } 3958 if (!device->stopped) 3959 wake_up(&generic_waitq); 3960 } 3961 EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3962 3963 /* 3964 * clear active requests and requeue them to block layer if possible 3965 */ 3966 static int dasd_generic_requeue_all_requests(struct dasd_device *device) 3967 { 3968 struct list_head requeue_queue; 3969 struct dasd_ccw_req *cqr, *n; 3970 struct dasd_ccw_req *refers; 3971 int rc; 3972 3973 INIT_LIST_HEAD(&requeue_queue); 3974 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3975 rc = 0; 3976 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3977 /* Check status and move request to flush_queue */ 3978 if (cqr->status == DASD_CQR_IN_IO) { 3979 rc = device->discipline->term_IO(cqr); 3980 if (rc) { 3981 /* unable to terminate requeust */ 3982 dev_err(&device->cdev->dev, 3983 "Unable to terminate request %p " 3984 "on suspend\n", cqr); 3985 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3986 dasd_put_device(device); 3987 return rc; 3988 } 3989 } 3990 list_move_tail(&cqr->devlist, &requeue_queue); 3991 } 3992 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3993 3994 list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { 3995 wait_event(dasd_flush_wq, 3996 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3997 3998 /* 3999 * requeue requests to blocklayer will only work 4000 * for block device requests 4001 */ 4002 if (_dasd_requeue_request(cqr)) 4003 continue; 4004 4005 /* remove requests from device and block queue */ 4006 list_del_init(&cqr->devlist); 4007 while (cqr->refers != NULL) { 4008 refers = cqr->refers; 4009 /* remove the request from the block queue */ 4010 list_del(&cqr->blocklist); 4011 /* free the finished erp request */ 4012 dasd_free_erp_request(cqr, cqr->memdev); 4013 cqr = refers; 4014 } 4015 4016 /* 4017 * _dasd_requeue_request already checked for a valid 4018 * blockdevice, no need to check again 4019 * all erp requests (cqr->refers) have a cqr->block 4020 * pointer copy from the original cqr 4021 */ 4022 list_del_init(&cqr->blocklist); 4023 cqr->block->base->discipline->free_cp( 4024 cqr, (struct request *) cqr->callback_data); 4025 } 4026 4027 /* 4028 * if requests remain then they are internal request 4029 * and go back to the device queue 4030 */ 4031 if (!list_empty(&requeue_queue)) { 4032 /* move freeze_queue to start of the ccw_queue */ 4033 spin_lock_irq(get_ccwdev_lock(device->cdev)); 4034 list_splice_tail(&requeue_queue, &device->ccw_queue); 4035 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 4036 } 4037 dasd_schedule_device_bh(device); 4038 return rc; 4039 } 4040 4041 static void do_requeue_requests(struct work_struct *work) 4042 { 4043 struct dasd_device *device = container_of(work, struct dasd_device, 4044 requeue_requests); 4045 dasd_generic_requeue_all_requests(device); 4046 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 4047 if (device->block) 4048 dasd_schedule_block_bh(device->block); 4049 dasd_put_device(device); 4050 } 4051 4052 void dasd_schedule_requeue(struct dasd_device *device) 4053 { 4054 dasd_get_device(device); 4055 /* queue call to dasd_reload_device to the kernel event daemon. */ 4056 if (!schedule_work(&device->requeue_requests)) 4057 dasd_put_device(device); 4058 } 4059 EXPORT_SYMBOL(dasd_schedule_requeue); 4060 4061 int dasd_generic_pm_freeze(struct ccw_device *cdev) 4062 { 4063 struct dasd_device *device = dasd_device_from_cdev(cdev); 4064 4065 if (IS_ERR(device)) 4066 return PTR_ERR(device); 4067 4068 /* mark device as suspended */ 4069 set_bit(DASD_FLAG_SUSPENDED, &device->flags); 4070 4071 if (device->discipline->freeze) 4072 device->discipline->freeze(device); 4073 4074 /* disallow new I/O */ 4075 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 4076 4077 return dasd_generic_requeue_all_requests(device); 4078 } 4079 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 4080 4081 int dasd_generic_restore_device(struct ccw_device *cdev) 4082 { 4083 struct dasd_device *device = dasd_device_from_cdev(cdev); 4084 int rc = 0; 4085 4086 if (IS_ERR(device)) 4087 return PTR_ERR(device); 4088 4089 /* allow new IO again */ 4090 dasd_device_remove_stop_bits(device, 4091 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 4092 4093 dasd_schedule_device_bh(device); 4094 4095 /* 4096 * call discipline restore function 4097 * if device is stopped do nothing e.g. for disconnected devices 4098 */ 4099 if (device->discipline->restore && !(device->stopped)) 4100 rc = device->discipline->restore(device); 4101 if (rc || device->stopped) 4102 /* 4103 * if the resume failed for the DASD we put it in 4104 * an UNRESUMED stop state 4105 */ 4106 device->stopped |= DASD_UNRESUMED_PM; 4107 4108 if (device->block) { 4109 dasd_schedule_block_bh(device->block); 4110 if (device->block->request_queue) 4111 blk_mq_run_hw_queues(device->block->request_queue, 4112 true); 4113 } 4114 4115 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 4116 dasd_put_device(device); 4117 return 0; 4118 } 4119 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 4120 4121 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 4122 int rdc_buffer_size, 4123 int magic) 4124 { 4125 struct dasd_ccw_req *cqr; 4126 struct ccw1 *ccw; 4127 4128 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, 4129 NULL); 4130 4131 if (IS_ERR(cqr)) { 4132 /* internal error 13 - Allocating the RDC request failed*/ 4133 dev_err(&device->cdev->dev, 4134 "An error occurred in the DASD device driver, " 4135 "reason=%s\n", "13"); 4136 return cqr; 4137 } 4138 4139 ccw = cqr->cpaddr; 4140 ccw->cmd_code = CCW_CMD_RDC; 4141 ccw->cda = (__u32)(addr_t) cqr->data; 4142 ccw->flags = 0; 4143 ccw->count = rdc_buffer_size; 4144 cqr->startdev = device; 4145 cqr->memdev = device; 4146 cqr->expires = 10*HZ; 4147 cqr->retries = 256; 4148 cqr->buildclk = get_tod_clock(); 4149 cqr->status = DASD_CQR_FILLED; 4150 return cqr; 4151 } 4152 4153 4154 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 4155 void *rdc_buffer, int rdc_buffer_size) 4156 { 4157 int ret; 4158 struct dasd_ccw_req *cqr; 4159 4160 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); 4161 if (IS_ERR(cqr)) 4162 return PTR_ERR(cqr); 4163 4164 ret = dasd_sleep_on(cqr); 4165 if (ret == 0) 4166 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); 4167 dasd_sfree_request(cqr, cqr->memdev); 4168 return ret; 4169 } 4170 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4171 4172 /* 4173 * In command mode and transport mode we need to look for sense 4174 * data in different places. The sense data itself is allways 4175 * an array of 32 bytes, so we can unify the sense data access 4176 * for both modes. 4177 */ 4178 char *dasd_get_sense(struct irb *irb) 4179 { 4180 struct tsb *tsb = NULL; 4181 char *sense = NULL; 4182 4183 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4184 if (irb->scsw.tm.tcw) 4185 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 4186 irb->scsw.tm.tcw); 4187 if (tsb && tsb->length == 64 && tsb->flags) 4188 switch (tsb->flags & 0x07) { 4189 case 1: /* tsa_iostat */ 4190 sense = tsb->tsa.iostat.sense; 4191 break; 4192 case 2: /* tsa_ddpc */ 4193 sense = tsb->tsa.ddpc.sense; 4194 break; 4195 default: 4196 /* currently we don't use interrogate data */ 4197 break; 4198 } 4199 } else if (irb->esw.esw0.erw.cons) { 4200 sense = irb->ecw; 4201 } 4202 return sense; 4203 } 4204 EXPORT_SYMBOL_GPL(dasd_get_sense); 4205 4206 void dasd_generic_shutdown(struct ccw_device *cdev) 4207 { 4208 struct dasd_device *device; 4209 4210 device = dasd_device_from_cdev(cdev); 4211 if (IS_ERR(device)) 4212 return; 4213 4214 if (device->block) 4215 dasd_schedule_block_bh(device->block); 4216 4217 dasd_schedule_device_bh(device); 4218 4219 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4220 } 4221 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4222 4223 static int __init dasd_init(void) 4224 { 4225 int rc; 4226 4227 init_waitqueue_head(&dasd_init_waitq); 4228 init_waitqueue_head(&dasd_flush_wq); 4229 init_waitqueue_head(&generic_waitq); 4230 init_waitqueue_head(&shutdown_waitq); 4231 4232 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4233 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4234 if (dasd_debug_area == NULL) { 4235 rc = -ENOMEM; 4236 goto failed; 4237 } 4238 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4239 debug_set_level(dasd_debug_area, DBF_WARNING); 4240 4241 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4242 4243 dasd_diag_discipline_pointer = NULL; 4244 4245 dasd_statistics_createroot(); 4246 4247 rc = dasd_devmap_init(); 4248 if (rc) 4249 goto failed; 4250 rc = dasd_gendisk_init(); 4251 if (rc) 4252 goto failed; 4253 rc = dasd_parse(); 4254 if (rc) 4255 goto failed; 4256 rc = dasd_eer_init(); 4257 if (rc) 4258 goto failed; 4259 #ifdef CONFIG_PROC_FS 4260 rc = dasd_proc_init(); 4261 if (rc) 4262 goto failed; 4263 #endif 4264 4265 return 0; 4266 failed: 4267 pr_info("The DASD device driver could not be initialized\n"); 4268 dasd_exit(); 4269 return rc; 4270 } 4271 4272 module_init(dasd_init); 4273 module_exit(dasd_exit); 4274