1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #define KMSG_COMPONENT "dasd" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kmod.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/ctype.h> 18 #include <linux/major.h> 19 #include <linux/slab.h> 20 #include <linux/hdreg.h> 21 #include <linux/async.h> 22 #include <linux/mutex.h> 23 #include <linux/debugfs.h> 24 #include <linux/seq_file.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/ccwdev.h> 28 #include <asm/ebcdic.h> 29 #include <asm/idals.h> 30 #include <asm/itcw.h> 31 #include <asm/diag.h> 32 33 /* This is ugly... */ 34 #define PRINTK_HEADER "dasd:" 35 36 #include "dasd_int.h" 37 /* 38 * SECTION: Constant definitions to be used within this file 39 */ 40 #define DASD_CHANQ_MAX_SIZE 4 41 42 #define DASD_DIAG_MOD "dasd_diag_mod" 43 44 static unsigned int queue_depth = 32; 45 static unsigned int nr_hw_queues = 4; 46 47 module_param(queue_depth, uint, 0444); 48 MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); 49 50 module_param(nr_hw_queues, uint, 0444); 51 MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); 52 53 /* 54 * SECTION: exported variables of dasd.c 55 */ 56 debug_info_t *dasd_debug_area; 57 EXPORT_SYMBOL(dasd_debug_area); 58 static struct dentry *dasd_debugfs_root_entry; 59 struct dasd_discipline *dasd_diag_discipline_pointer; 60 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 61 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 62 63 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 64 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 65 " Copyright IBM Corp. 2000"); 66 MODULE_SUPPORTED_DEVICE("dasd"); 67 MODULE_LICENSE("GPL"); 68 69 /* 70 * SECTION: prototypes for static functions of dasd.c 71 */ 72 static int dasd_alloc_queue(struct dasd_block *); 73 static void dasd_free_queue(struct dasd_block *); 74 static int dasd_flush_block_queue(struct dasd_block *); 75 static void dasd_device_tasklet(unsigned long); 76 static void dasd_block_tasklet(unsigned long); 77 static void do_kick_device(struct work_struct *); 78 static void do_restore_device(struct work_struct *); 79 static void do_reload_device(struct work_struct *); 80 static void do_requeue_requests(struct work_struct *); 81 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 82 static void dasd_device_timeout(struct timer_list *); 83 static void dasd_block_timeout(struct timer_list *); 84 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 85 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 86 static void dasd_profile_exit(struct dasd_profile *); 87 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 88 static void dasd_hosts_exit(struct dasd_device *); 89 90 /* 91 * SECTION: Operations on the device structure. 92 */ 93 static wait_queue_head_t dasd_init_waitq; 94 static wait_queue_head_t dasd_flush_wq; 95 static wait_queue_head_t generic_waitq; 96 static wait_queue_head_t shutdown_waitq; 97 98 /* 99 * Allocate memory for a new device structure. 100 */ 101 struct dasd_device *dasd_alloc_device(void) 102 { 103 struct dasd_device *device; 104 105 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 106 if (!device) 107 return ERR_PTR(-ENOMEM); 108 109 /* Get two pages for normal block device operations. */ 110 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 111 if (!device->ccw_mem) { 112 kfree(device); 113 return ERR_PTR(-ENOMEM); 114 } 115 /* Get one page for error recovery. */ 116 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 117 if (!device->erp_mem) { 118 free_pages((unsigned long) device->ccw_mem, 1); 119 kfree(device); 120 return ERR_PTR(-ENOMEM); 121 } 122 /* Get two pages for ese format. */ 123 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 124 if (!device->ese_mem) { 125 free_page((unsigned long) device->erp_mem); 126 free_pages((unsigned long) device->ccw_mem, 1); 127 kfree(device); 128 return ERR_PTR(-ENOMEM); 129 } 130 131 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 132 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 133 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 134 spin_lock_init(&device->mem_lock); 135 atomic_set(&device->tasklet_scheduled, 0); 136 tasklet_init(&device->tasklet, dasd_device_tasklet, 137 (unsigned long) device); 138 INIT_LIST_HEAD(&device->ccw_queue); 139 timer_setup(&device->timer, dasd_device_timeout, 0); 140 INIT_WORK(&device->kick_work, do_kick_device); 141 INIT_WORK(&device->restore_device, do_restore_device); 142 INIT_WORK(&device->reload_device, do_reload_device); 143 INIT_WORK(&device->requeue_requests, do_requeue_requests); 144 device->state = DASD_STATE_NEW; 145 device->target = DASD_STATE_NEW; 146 mutex_init(&device->state_mutex); 147 spin_lock_init(&device->profile.lock); 148 return device; 149 } 150 151 /* 152 * Free memory of a device structure. 153 */ 154 void dasd_free_device(struct dasd_device *device) 155 { 156 kfree(device->private); 157 free_pages((unsigned long) device->ese_mem, 1); 158 free_page((unsigned long) device->erp_mem); 159 free_pages((unsigned long) device->ccw_mem, 1); 160 kfree(device); 161 } 162 163 /* 164 * Allocate memory for a new device structure. 165 */ 166 struct dasd_block *dasd_alloc_block(void) 167 { 168 struct dasd_block *block; 169 170 block = kzalloc(sizeof(*block), GFP_ATOMIC); 171 if (!block) 172 return ERR_PTR(-ENOMEM); 173 /* open_count = 0 means device online but not in use */ 174 atomic_set(&block->open_count, -1); 175 176 atomic_set(&block->tasklet_scheduled, 0); 177 tasklet_init(&block->tasklet, dasd_block_tasklet, 178 (unsigned long) block); 179 INIT_LIST_HEAD(&block->ccw_queue); 180 spin_lock_init(&block->queue_lock); 181 timer_setup(&block->timer, dasd_block_timeout, 0); 182 spin_lock_init(&block->profile.lock); 183 184 return block; 185 } 186 EXPORT_SYMBOL_GPL(dasd_alloc_block); 187 188 /* 189 * Free memory of a device structure. 190 */ 191 void dasd_free_block(struct dasd_block *block) 192 { 193 kfree(block); 194 } 195 EXPORT_SYMBOL_GPL(dasd_free_block); 196 197 /* 198 * Make a new device known to the system. 199 */ 200 static int dasd_state_new_to_known(struct dasd_device *device) 201 { 202 int rc; 203 204 /* 205 * As long as the device is not in state DASD_STATE_NEW we want to 206 * keep the reference count > 0. 207 */ 208 dasd_get_device(device); 209 210 if (device->block) { 211 rc = dasd_alloc_queue(device->block); 212 if (rc) { 213 dasd_put_device(device); 214 return rc; 215 } 216 } 217 device->state = DASD_STATE_KNOWN; 218 return 0; 219 } 220 221 /* 222 * Let the system forget about a device. 223 */ 224 static int dasd_state_known_to_new(struct dasd_device *device) 225 { 226 /* Disable extended error reporting for this device. */ 227 dasd_eer_disable(device); 228 device->state = DASD_STATE_NEW; 229 230 if (device->block) 231 dasd_free_queue(device->block); 232 233 /* Give up reference we took in dasd_state_new_to_known. */ 234 dasd_put_device(device); 235 return 0; 236 } 237 238 static struct dentry *dasd_debugfs_setup(const char *name, 239 struct dentry *base_dentry) 240 { 241 struct dentry *pde; 242 243 if (!base_dentry) 244 return NULL; 245 pde = debugfs_create_dir(name, base_dentry); 246 if (!pde || IS_ERR(pde)) 247 return NULL; 248 return pde; 249 } 250 251 /* 252 * Request the irq line for the device. 253 */ 254 static int dasd_state_known_to_basic(struct dasd_device *device) 255 { 256 struct dasd_block *block = device->block; 257 int rc = 0; 258 259 /* Allocate and register gendisk structure. */ 260 if (block) { 261 rc = dasd_gendisk_alloc(block); 262 if (rc) 263 return rc; 264 block->debugfs_dentry = 265 dasd_debugfs_setup(block->gdp->disk_name, 266 dasd_debugfs_root_entry); 267 dasd_profile_init(&block->profile, block->debugfs_dentry); 268 if (dasd_global_profile_level == DASD_PROFILE_ON) 269 dasd_profile_on(&device->block->profile); 270 } 271 device->debugfs_dentry = 272 dasd_debugfs_setup(dev_name(&device->cdev->dev), 273 dasd_debugfs_root_entry); 274 dasd_profile_init(&device->profile, device->debugfs_dentry); 275 dasd_hosts_init(device->debugfs_dentry, device); 276 277 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 278 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 279 8 * sizeof(long)); 280 debug_register_view(device->debug_area, &debug_sprintf_view); 281 debug_set_level(device->debug_area, DBF_WARNING); 282 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 283 284 device->state = DASD_STATE_BASIC; 285 286 return rc; 287 } 288 289 /* 290 * Release the irq line for the device. Terminate any running i/o. 291 */ 292 static int dasd_state_basic_to_known(struct dasd_device *device) 293 { 294 int rc; 295 296 if (device->discipline->basic_to_known) { 297 rc = device->discipline->basic_to_known(device); 298 if (rc) 299 return rc; 300 } 301 302 if (device->block) { 303 dasd_profile_exit(&device->block->profile); 304 debugfs_remove(device->block->debugfs_dentry); 305 dasd_gendisk_free(device->block); 306 dasd_block_clear_timer(device->block); 307 } 308 rc = dasd_flush_device_queue(device); 309 if (rc) 310 return rc; 311 dasd_device_clear_timer(device); 312 dasd_profile_exit(&device->profile); 313 dasd_hosts_exit(device); 314 debugfs_remove(device->debugfs_dentry); 315 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 316 if (device->debug_area != NULL) { 317 debug_unregister(device->debug_area); 318 device->debug_area = NULL; 319 } 320 device->state = DASD_STATE_KNOWN; 321 return 0; 322 } 323 324 /* 325 * Do the initial analysis. The do_analysis function may return 326 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 327 * until the discipline decides to continue the startup sequence 328 * by calling the function dasd_change_state. The eckd disciplines 329 * uses this to start a ccw that detects the format. The completion 330 * interrupt for this detection ccw uses the kernel event daemon to 331 * trigger the call to dasd_change_state. All this is done in the 332 * discipline code, see dasd_eckd.c. 333 * After the analysis ccw is done (do_analysis returned 0) the block 334 * device is setup. 335 * In case the analysis returns an error, the device setup is stopped 336 * (a fake disk was already added to allow formatting). 337 */ 338 static int dasd_state_basic_to_ready(struct dasd_device *device) 339 { 340 int rc; 341 struct dasd_block *block; 342 struct gendisk *disk; 343 344 rc = 0; 345 block = device->block; 346 /* make disk known with correct capacity */ 347 if (block) { 348 if (block->base->discipline->do_analysis != NULL) 349 rc = block->base->discipline->do_analysis(block); 350 if (rc) { 351 if (rc != -EAGAIN) { 352 device->state = DASD_STATE_UNFMT; 353 disk = device->block->gdp; 354 kobject_uevent(&disk_to_dev(disk)->kobj, 355 KOBJ_CHANGE); 356 goto out; 357 } 358 return rc; 359 } 360 if (device->discipline->setup_blk_queue) 361 device->discipline->setup_blk_queue(block); 362 set_capacity(block->gdp, 363 block->blocks << block->s2b_shift); 364 device->state = DASD_STATE_READY; 365 rc = dasd_scan_partitions(block); 366 if (rc) { 367 device->state = DASD_STATE_BASIC; 368 return rc; 369 } 370 } else { 371 device->state = DASD_STATE_READY; 372 } 373 out: 374 if (device->discipline->basic_to_ready) 375 rc = device->discipline->basic_to_ready(device); 376 return rc; 377 } 378 379 static inline 380 int _wait_for_empty_queues(struct dasd_device *device) 381 { 382 if (device->block) 383 return list_empty(&device->ccw_queue) && 384 list_empty(&device->block->ccw_queue); 385 else 386 return list_empty(&device->ccw_queue); 387 } 388 389 /* 390 * Remove device from block device layer. Destroy dirty buffers. 391 * Forget format information. Check if the target level is basic 392 * and if it is create fake disk for formatting. 393 */ 394 static int dasd_state_ready_to_basic(struct dasd_device *device) 395 { 396 int rc; 397 398 device->state = DASD_STATE_BASIC; 399 if (device->block) { 400 struct dasd_block *block = device->block; 401 rc = dasd_flush_block_queue(block); 402 if (rc) { 403 device->state = DASD_STATE_READY; 404 return rc; 405 } 406 dasd_destroy_partitions(block); 407 block->blocks = 0; 408 block->bp_block = 0; 409 block->s2b_shift = 0; 410 } 411 return 0; 412 } 413 414 /* 415 * Back to basic. 416 */ 417 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 418 { 419 device->state = DASD_STATE_BASIC; 420 return 0; 421 } 422 423 /* 424 * Make the device online and schedule the bottom half to start 425 * the requeueing of requests from the linux request queue to the 426 * ccw queue. 427 */ 428 static int 429 dasd_state_ready_to_online(struct dasd_device * device) 430 { 431 struct gendisk *disk; 432 struct disk_part_iter piter; 433 struct hd_struct *part; 434 435 device->state = DASD_STATE_ONLINE; 436 if (device->block) { 437 dasd_schedule_block_bh(device->block); 438 if ((device->features & DASD_FEATURE_USERAW)) { 439 disk = device->block->gdp; 440 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 441 return 0; 442 } 443 disk = device->block->bdev->bd_disk; 444 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 445 while ((part = disk_part_iter_next(&piter))) 446 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 447 disk_part_iter_exit(&piter); 448 } 449 return 0; 450 } 451 452 /* 453 * Stop the requeueing of requests again. 454 */ 455 static int dasd_state_online_to_ready(struct dasd_device *device) 456 { 457 int rc; 458 struct gendisk *disk; 459 struct disk_part_iter piter; 460 struct hd_struct *part; 461 462 if (device->discipline->online_to_ready) { 463 rc = device->discipline->online_to_ready(device); 464 if (rc) 465 return rc; 466 } 467 468 device->state = DASD_STATE_READY; 469 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 470 disk = device->block->bdev->bd_disk; 471 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 472 while ((part = disk_part_iter_next(&piter))) 473 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 474 disk_part_iter_exit(&piter); 475 } 476 return 0; 477 } 478 479 /* 480 * Device startup state changes. 481 */ 482 static int dasd_increase_state(struct dasd_device *device) 483 { 484 int rc; 485 486 rc = 0; 487 if (device->state == DASD_STATE_NEW && 488 device->target >= DASD_STATE_KNOWN) 489 rc = dasd_state_new_to_known(device); 490 491 if (!rc && 492 device->state == DASD_STATE_KNOWN && 493 device->target >= DASD_STATE_BASIC) 494 rc = dasd_state_known_to_basic(device); 495 496 if (!rc && 497 device->state == DASD_STATE_BASIC && 498 device->target >= DASD_STATE_READY) 499 rc = dasd_state_basic_to_ready(device); 500 501 if (!rc && 502 device->state == DASD_STATE_UNFMT && 503 device->target > DASD_STATE_UNFMT) 504 rc = -EPERM; 505 506 if (!rc && 507 device->state == DASD_STATE_READY && 508 device->target >= DASD_STATE_ONLINE) 509 rc = dasd_state_ready_to_online(device); 510 511 return rc; 512 } 513 514 /* 515 * Device shutdown state changes. 516 */ 517 static int dasd_decrease_state(struct dasd_device *device) 518 { 519 int rc; 520 521 rc = 0; 522 if (device->state == DASD_STATE_ONLINE && 523 device->target <= DASD_STATE_READY) 524 rc = dasd_state_online_to_ready(device); 525 526 if (!rc && 527 device->state == DASD_STATE_READY && 528 device->target <= DASD_STATE_BASIC) 529 rc = dasd_state_ready_to_basic(device); 530 531 if (!rc && 532 device->state == DASD_STATE_UNFMT && 533 device->target <= DASD_STATE_BASIC) 534 rc = dasd_state_unfmt_to_basic(device); 535 536 if (!rc && 537 device->state == DASD_STATE_BASIC && 538 device->target <= DASD_STATE_KNOWN) 539 rc = dasd_state_basic_to_known(device); 540 541 if (!rc && 542 device->state == DASD_STATE_KNOWN && 543 device->target <= DASD_STATE_NEW) 544 rc = dasd_state_known_to_new(device); 545 546 return rc; 547 } 548 549 /* 550 * This is the main startup/shutdown routine. 551 */ 552 static void dasd_change_state(struct dasd_device *device) 553 { 554 int rc; 555 556 if (device->state == device->target) 557 /* Already where we want to go today... */ 558 return; 559 if (device->state < device->target) 560 rc = dasd_increase_state(device); 561 else 562 rc = dasd_decrease_state(device); 563 if (rc == -EAGAIN) 564 return; 565 if (rc) 566 device->target = device->state; 567 568 /* let user-space know that the device status changed */ 569 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 570 571 if (device->state == device->target) 572 wake_up(&dasd_init_waitq); 573 } 574 575 /* 576 * Kick starter for devices that did not complete the startup/shutdown 577 * procedure or were sleeping because of a pending state. 578 * dasd_kick_device will schedule a call do do_kick_device to the kernel 579 * event daemon. 580 */ 581 static void do_kick_device(struct work_struct *work) 582 { 583 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 584 mutex_lock(&device->state_mutex); 585 dasd_change_state(device); 586 mutex_unlock(&device->state_mutex); 587 dasd_schedule_device_bh(device); 588 dasd_put_device(device); 589 } 590 591 void dasd_kick_device(struct dasd_device *device) 592 { 593 dasd_get_device(device); 594 /* queue call to dasd_kick_device to the kernel event daemon. */ 595 if (!schedule_work(&device->kick_work)) 596 dasd_put_device(device); 597 } 598 EXPORT_SYMBOL(dasd_kick_device); 599 600 /* 601 * dasd_reload_device will schedule a call do do_reload_device to the kernel 602 * event daemon. 603 */ 604 static void do_reload_device(struct work_struct *work) 605 { 606 struct dasd_device *device = container_of(work, struct dasd_device, 607 reload_device); 608 device->discipline->reload(device); 609 dasd_put_device(device); 610 } 611 612 void dasd_reload_device(struct dasd_device *device) 613 { 614 dasd_get_device(device); 615 /* queue call to dasd_reload_device to the kernel event daemon. */ 616 if (!schedule_work(&device->reload_device)) 617 dasd_put_device(device); 618 } 619 EXPORT_SYMBOL(dasd_reload_device); 620 621 /* 622 * dasd_restore_device will schedule a call do do_restore_device to the kernel 623 * event daemon. 624 */ 625 static void do_restore_device(struct work_struct *work) 626 { 627 struct dasd_device *device = container_of(work, struct dasd_device, 628 restore_device); 629 device->cdev->drv->restore(device->cdev); 630 dasd_put_device(device); 631 } 632 633 void dasd_restore_device(struct dasd_device *device) 634 { 635 dasd_get_device(device); 636 /* queue call to dasd_restore_device to the kernel event daemon. */ 637 if (!schedule_work(&device->restore_device)) 638 dasd_put_device(device); 639 } 640 641 /* 642 * Set the target state for a device and starts the state change. 643 */ 644 void dasd_set_target_state(struct dasd_device *device, int target) 645 { 646 dasd_get_device(device); 647 mutex_lock(&device->state_mutex); 648 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 649 if (dasd_probeonly && target > DASD_STATE_READY) 650 target = DASD_STATE_READY; 651 if (device->target != target) { 652 if (device->state == target) 653 wake_up(&dasd_init_waitq); 654 device->target = target; 655 } 656 if (device->state != device->target) 657 dasd_change_state(device); 658 mutex_unlock(&device->state_mutex); 659 dasd_put_device(device); 660 } 661 EXPORT_SYMBOL(dasd_set_target_state); 662 663 /* 664 * Enable devices with device numbers in [from..to]. 665 */ 666 static inline int _wait_for_device(struct dasd_device *device) 667 { 668 return (device->state == device->target); 669 } 670 671 void dasd_enable_device(struct dasd_device *device) 672 { 673 dasd_set_target_state(device, DASD_STATE_ONLINE); 674 if (device->state <= DASD_STATE_KNOWN) 675 /* No discipline for device found. */ 676 dasd_set_target_state(device, DASD_STATE_NEW); 677 /* Now wait for the devices to come up. */ 678 wait_event(dasd_init_waitq, _wait_for_device(device)); 679 680 dasd_reload_device(device); 681 if (device->discipline->kick_validate) 682 device->discipline->kick_validate(device); 683 } 684 EXPORT_SYMBOL(dasd_enable_device); 685 686 /* 687 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 688 */ 689 690 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 691 692 #ifdef CONFIG_DASD_PROFILE 693 struct dasd_profile dasd_global_profile = { 694 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 695 }; 696 static struct dentry *dasd_debugfs_global_entry; 697 698 /* 699 * Add profiling information for cqr before execution. 700 */ 701 static void dasd_profile_start(struct dasd_block *block, 702 struct dasd_ccw_req *cqr, 703 struct request *req) 704 { 705 struct list_head *l; 706 unsigned int counter; 707 struct dasd_device *device; 708 709 /* count the length of the chanq for statistics */ 710 counter = 0; 711 if (dasd_global_profile_level || block->profile.data) 712 list_for_each(l, &block->ccw_queue) 713 if (++counter >= 31) 714 break; 715 716 spin_lock(&dasd_global_profile.lock); 717 if (dasd_global_profile.data) { 718 dasd_global_profile.data->dasd_io_nr_req[counter]++; 719 if (rq_data_dir(req) == READ) 720 dasd_global_profile.data->dasd_read_nr_req[counter]++; 721 } 722 spin_unlock(&dasd_global_profile.lock); 723 724 spin_lock(&block->profile.lock); 725 if (block->profile.data) { 726 block->profile.data->dasd_io_nr_req[counter]++; 727 if (rq_data_dir(req) == READ) 728 block->profile.data->dasd_read_nr_req[counter]++; 729 } 730 spin_unlock(&block->profile.lock); 731 732 /* 733 * We count the request for the start device, even though it may run on 734 * some other device due to error recovery. This way we make sure that 735 * we count each request only once. 736 */ 737 device = cqr->startdev; 738 if (device->profile.data) { 739 counter = 1; /* request is not yet queued on the start device */ 740 list_for_each(l, &device->ccw_queue) 741 if (++counter >= 31) 742 break; 743 } 744 spin_lock(&device->profile.lock); 745 if (device->profile.data) { 746 device->profile.data->dasd_io_nr_req[counter]++; 747 if (rq_data_dir(req) == READ) 748 device->profile.data->dasd_read_nr_req[counter]++; 749 } 750 spin_unlock(&device->profile.lock); 751 } 752 753 /* 754 * Add profiling information for cqr after execution. 755 */ 756 757 #define dasd_profile_counter(value, index) \ 758 { \ 759 for (index = 0; index < 31 && value >> (2+index); index++) \ 760 ; \ 761 } 762 763 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 764 int is_alias, 765 int is_tpm, 766 int is_read, 767 long sectors, 768 int sectors_ind, 769 int tottime_ind, 770 int tottimeps_ind, 771 int strtime_ind, 772 int irqtime_ind, 773 int irqtimeps_ind, 774 int endtime_ind) 775 { 776 /* in case of an overflow, reset the whole profile */ 777 if (data->dasd_io_reqs == UINT_MAX) { 778 memset(data, 0, sizeof(*data)); 779 ktime_get_real_ts64(&data->starttod); 780 } 781 data->dasd_io_reqs++; 782 data->dasd_io_sects += sectors; 783 if (is_alias) 784 data->dasd_io_alias++; 785 if (is_tpm) 786 data->dasd_io_tpm++; 787 788 data->dasd_io_secs[sectors_ind]++; 789 data->dasd_io_times[tottime_ind]++; 790 data->dasd_io_timps[tottimeps_ind]++; 791 data->dasd_io_time1[strtime_ind]++; 792 data->dasd_io_time2[irqtime_ind]++; 793 data->dasd_io_time2ps[irqtimeps_ind]++; 794 data->dasd_io_time3[endtime_ind]++; 795 796 if (is_read) { 797 data->dasd_read_reqs++; 798 data->dasd_read_sects += sectors; 799 if (is_alias) 800 data->dasd_read_alias++; 801 if (is_tpm) 802 data->dasd_read_tpm++; 803 data->dasd_read_secs[sectors_ind]++; 804 data->dasd_read_times[tottime_ind]++; 805 data->dasd_read_time1[strtime_ind]++; 806 data->dasd_read_time2[irqtime_ind]++; 807 data->dasd_read_time3[endtime_ind]++; 808 } 809 } 810 811 static void dasd_profile_end(struct dasd_block *block, 812 struct dasd_ccw_req *cqr, 813 struct request *req) 814 { 815 unsigned long strtime, irqtime, endtime, tottime; 816 unsigned long tottimeps, sectors; 817 struct dasd_device *device; 818 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 819 int irqtime_ind, irqtimeps_ind, endtime_ind; 820 struct dasd_profile_info *data; 821 822 device = cqr->startdev; 823 if (!(dasd_global_profile_level || 824 block->profile.data || 825 device->profile.data)) 826 return; 827 828 sectors = blk_rq_sectors(req); 829 if (!cqr->buildclk || !cqr->startclk || 830 !cqr->stopclk || !cqr->endclk || 831 !sectors) 832 return; 833 834 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 835 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 836 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 837 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 838 tottimeps = tottime / sectors; 839 840 dasd_profile_counter(sectors, sectors_ind); 841 dasd_profile_counter(tottime, tottime_ind); 842 dasd_profile_counter(tottimeps, tottimeps_ind); 843 dasd_profile_counter(strtime, strtime_ind); 844 dasd_profile_counter(irqtime, irqtime_ind); 845 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 846 dasd_profile_counter(endtime, endtime_ind); 847 848 spin_lock(&dasd_global_profile.lock); 849 if (dasd_global_profile.data) { 850 data = dasd_global_profile.data; 851 data->dasd_sum_times += tottime; 852 data->dasd_sum_time_str += strtime; 853 data->dasd_sum_time_irq += irqtime; 854 data->dasd_sum_time_end += endtime; 855 dasd_profile_end_add_data(dasd_global_profile.data, 856 cqr->startdev != block->base, 857 cqr->cpmode == 1, 858 rq_data_dir(req) == READ, 859 sectors, sectors_ind, tottime_ind, 860 tottimeps_ind, strtime_ind, 861 irqtime_ind, irqtimeps_ind, 862 endtime_ind); 863 } 864 spin_unlock(&dasd_global_profile.lock); 865 866 spin_lock(&block->profile.lock); 867 if (block->profile.data) { 868 data = block->profile.data; 869 data->dasd_sum_times += tottime; 870 data->dasd_sum_time_str += strtime; 871 data->dasd_sum_time_irq += irqtime; 872 data->dasd_sum_time_end += endtime; 873 dasd_profile_end_add_data(block->profile.data, 874 cqr->startdev != block->base, 875 cqr->cpmode == 1, 876 rq_data_dir(req) == READ, 877 sectors, sectors_ind, tottime_ind, 878 tottimeps_ind, strtime_ind, 879 irqtime_ind, irqtimeps_ind, 880 endtime_ind); 881 } 882 spin_unlock(&block->profile.lock); 883 884 spin_lock(&device->profile.lock); 885 if (device->profile.data) { 886 data = device->profile.data; 887 data->dasd_sum_times += tottime; 888 data->dasd_sum_time_str += strtime; 889 data->dasd_sum_time_irq += irqtime; 890 data->dasd_sum_time_end += endtime; 891 dasd_profile_end_add_data(device->profile.data, 892 cqr->startdev != block->base, 893 cqr->cpmode == 1, 894 rq_data_dir(req) == READ, 895 sectors, sectors_ind, tottime_ind, 896 tottimeps_ind, strtime_ind, 897 irqtime_ind, irqtimeps_ind, 898 endtime_ind); 899 } 900 spin_unlock(&device->profile.lock); 901 } 902 903 void dasd_profile_reset(struct dasd_profile *profile) 904 { 905 struct dasd_profile_info *data; 906 907 spin_lock_bh(&profile->lock); 908 data = profile->data; 909 if (!data) { 910 spin_unlock_bh(&profile->lock); 911 return; 912 } 913 memset(data, 0, sizeof(*data)); 914 ktime_get_real_ts64(&data->starttod); 915 spin_unlock_bh(&profile->lock); 916 } 917 918 int dasd_profile_on(struct dasd_profile *profile) 919 { 920 struct dasd_profile_info *data; 921 922 data = kzalloc(sizeof(*data), GFP_KERNEL); 923 if (!data) 924 return -ENOMEM; 925 spin_lock_bh(&profile->lock); 926 if (profile->data) { 927 spin_unlock_bh(&profile->lock); 928 kfree(data); 929 return 0; 930 } 931 ktime_get_real_ts64(&data->starttod); 932 profile->data = data; 933 spin_unlock_bh(&profile->lock); 934 return 0; 935 } 936 937 void dasd_profile_off(struct dasd_profile *profile) 938 { 939 spin_lock_bh(&profile->lock); 940 kfree(profile->data); 941 profile->data = NULL; 942 spin_unlock_bh(&profile->lock); 943 } 944 945 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 946 { 947 char *buffer; 948 949 buffer = vmalloc(user_len + 1); 950 if (buffer == NULL) 951 return ERR_PTR(-ENOMEM); 952 if (copy_from_user(buffer, user_buf, user_len) != 0) { 953 vfree(buffer); 954 return ERR_PTR(-EFAULT); 955 } 956 /* got the string, now strip linefeed. */ 957 if (buffer[user_len - 1] == '\n') 958 buffer[user_len - 1] = 0; 959 else 960 buffer[user_len] = 0; 961 return buffer; 962 } 963 964 static ssize_t dasd_stats_write(struct file *file, 965 const char __user *user_buf, 966 size_t user_len, loff_t *pos) 967 { 968 char *buffer, *str; 969 int rc; 970 struct seq_file *m = (struct seq_file *)file->private_data; 971 struct dasd_profile *prof = m->private; 972 973 if (user_len > 65536) 974 user_len = 65536; 975 buffer = dasd_get_user_string(user_buf, user_len); 976 if (IS_ERR(buffer)) 977 return PTR_ERR(buffer); 978 979 str = skip_spaces(buffer); 980 rc = user_len; 981 if (strncmp(str, "reset", 5) == 0) { 982 dasd_profile_reset(prof); 983 } else if (strncmp(str, "on", 2) == 0) { 984 rc = dasd_profile_on(prof); 985 if (rc) 986 goto out; 987 rc = user_len; 988 if (prof == &dasd_global_profile) { 989 dasd_profile_reset(prof); 990 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 991 } 992 } else if (strncmp(str, "off", 3) == 0) { 993 if (prof == &dasd_global_profile) 994 dasd_global_profile_level = DASD_PROFILE_OFF; 995 dasd_profile_off(prof); 996 } else 997 rc = -EINVAL; 998 out: 999 vfree(buffer); 1000 return rc; 1001 } 1002 1003 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 1004 { 1005 int i; 1006 1007 for (i = 0; i < 32; i++) 1008 seq_printf(m, "%u ", array[i]); 1009 seq_putc(m, '\n'); 1010 } 1011 1012 static void dasd_stats_seq_print(struct seq_file *m, 1013 struct dasd_profile_info *data) 1014 { 1015 seq_printf(m, "start_time %lld.%09ld\n", 1016 (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 1017 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 1018 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 1019 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 1020 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 1021 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 1022 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 1023 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 1024 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 1025 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 1026 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 1027 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 1028 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 1029 seq_puts(m, "histogram_sectors "); 1030 dasd_stats_array(m, data->dasd_io_secs); 1031 seq_puts(m, "histogram_io_times "); 1032 dasd_stats_array(m, data->dasd_io_times); 1033 seq_puts(m, "histogram_io_times_weighted "); 1034 dasd_stats_array(m, data->dasd_io_timps); 1035 seq_puts(m, "histogram_time_build_to_ssch "); 1036 dasd_stats_array(m, data->dasd_io_time1); 1037 seq_puts(m, "histogram_time_ssch_to_irq "); 1038 dasd_stats_array(m, data->dasd_io_time2); 1039 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1040 dasd_stats_array(m, data->dasd_io_time2ps); 1041 seq_puts(m, "histogram_time_irq_to_end "); 1042 dasd_stats_array(m, data->dasd_io_time3); 1043 seq_puts(m, "histogram_ccw_queue_length "); 1044 dasd_stats_array(m, data->dasd_io_nr_req); 1045 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1046 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1047 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1048 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1049 seq_puts(m, "histogram_read_sectors "); 1050 dasd_stats_array(m, data->dasd_read_secs); 1051 seq_puts(m, "histogram_read_times "); 1052 dasd_stats_array(m, data->dasd_read_times); 1053 seq_puts(m, "histogram_read_time_build_to_ssch "); 1054 dasd_stats_array(m, data->dasd_read_time1); 1055 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1056 dasd_stats_array(m, data->dasd_read_time2); 1057 seq_puts(m, "histogram_read_time_irq_to_end "); 1058 dasd_stats_array(m, data->dasd_read_time3); 1059 seq_puts(m, "histogram_read_ccw_queue_length "); 1060 dasd_stats_array(m, data->dasd_read_nr_req); 1061 } 1062 1063 static int dasd_stats_show(struct seq_file *m, void *v) 1064 { 1065 struct dasd_profile *profile; 1066 struct dasd_profile_info *data; 1067 1068 profile = m->private; 1069 spin_lock_bh(&profile->lock); 1070 data = profile->data; 1071 if (!data) { 1072 spin_unlock_bh(&profile->lock); 1073 seq_puts(m, "disabled\n"); 1074 return 0; 1075 } 1076 dasd_stats_seq_print(m, data); 1077 spin_unlock_bh(&profile->lock); 1078 return 0; 1079 } 1080 1081 static int dasd_stats_open(struct inode *inode, struct file *file) 1082 { 1083 struct dasd_profile *profile = inode->i_private; 1084 return single_open(file, dasd_stats_show, profile); 1085 } 1086 1087 static const struct file_operations dasd_stats_raw_fops = { 1088 .owner = THIS_MODULE, 1089 .open = dasd_stats_open, 1090 .read = seq_read, 1091 .llseek = seq_lseek, 1092 .release = single_release, 1093 .write = dasd_stats_write, 1094 }; 1095 1096 static void dasd_profile_init(struct dasd_profile *profile, 1097 struct dentry *base_dentry) 1098 { 1099 umode_t mode; 1100 struct dentry *pde; 1101 1102 if (!base_dentry) 1103 return; 1104 profile->dentry = NULL; 1105 profile->data = NULL; 1106 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1107 pde = debugfs_create_file("statistics", mode, base_dentry, 1108 profile, &dasd_stats_raw_fops); 1109 if (pde && !IS_ERR(pde)) 1110 profile->dentry = pde; 1111 return; 1112 } 1113 1114 static void dasd_profile_exit(struct dasd_profile *profile) 1115 { 1116 dasd_profile_off(profile); 1117 debugfs_remove(profile->dentry); 1118 profile->dentry = NULL; 1119 } 1120 1121 static void dasd_statistics_removeroot(void) 1122 { 1123 dasd_global_profile_level = DASD_PROFILE_OFF; 1124 dasd_profile_exit(&dasd_global_profile); 1125 debugfs_remove(dasd_debugfs_global_entry); 1126 debugfs_remove(dasd_debugfs_root_entry); 1127 } 1128 1129 static void dasd_statistics_createroot(void) 1130 { 1131 struct dentry *pde; 1132 1133 dasd_debugfs_root_entry = NULL; 1134 pde = debugfs_create_dir("dasd", NULL); 1135 if (!pde || IS_ERR(pde)) 1136 goto error; 1137 dasd_debugfs_root_entry = pde; 1138 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1139 if (!pde || IS_ERR(pde)) 1140 goto error; 1141 dasd_debugfs_global_entry = pde; 1142 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1143 return; 1144 1145 error: 1146 DBF_EVENT(DBF_ERR, "%s", 1147 "Creation of the dasd debugfs interface failed"); 1148 dasd_statistics_removeroot(); 1149 return; 1150 } 1151 1152 #else 1153 #define dasd_profile_start(block, cqr, req) do {} while (0) 1154 #define dasd_profile_end(block, cqr, req) do {} while (0) 1155 1156 static void dasd_statistics_createroot(void) 1157 { 1158 return; 1159 } 1160 1161 static void dasd_statistics_removeroot(void) 1162 { 1163 return; 1164 } 1165 1166 int dasd_stats_generic_show(struct seq_file *m, void *v) 1167 { 1168 seq_puts(m, "Statistics are not activated in this kernel\n"); 1169 return 0; 1170 } 1171 1172 static void dasd_profile_init(struct dasd_profile *profile, 1173 struct dentry *base_dentry) 1174 { 1175 return; 1176 } 1177 1178 static void dasd_profile_exit(struct dasd_profile *profile) 1179 { 1180 return; 1181 } 1182 1183 int dasd_profile_on(struct dasd_profile *profile) 1184 { 1185 return 0; 1186 } 1187 1188 #endif /* CONFIG_DASD_PROFILE */ 1189 1190 static int dasd_hosts_show(struct seq_file *m, void *v) 1191 { 1192 struct dasd_device *device; 1193 int rc = -EOPNOTSUPP; 1194 1195 device = m->private; 1196 dasd_get_device(device); 1197 1198 if (device->discipline->hosts_print) 1199 rc = device->discipline->hosts_print(device, m); 1200 1201 dasd_put_device(device); 1202 return rc; 1203 } 1204 1205 DEFINE_SHOW_ATTRIBUTE(dasd_hosts); 1206 1207 static void dasd_hosts_exit(struct dasd_device *device) 1208 { 1209 debugfs_remove(device->hosts_dentry); 1210 device->hosts_dentry = NULL; 1211 } 1212 1213 static void dasd_hosts_init(struct dentry *base_dentry, 1214 struct dasd_device *device) 1215 { 1216 struct dentry *pde; 1217 umode_t mode; 1218 1219 if (!base_dentry) 1220 return; 1221 1222 mode = S_IRUSR | S_IFREG; 1223 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1224 device, &dasd_hosts_fops); 1225 if (pde && !IS_ERR(pde)) 1226 device->hosts_dentry = pde; 1227 } 1228 1229 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, 1230 struct dasd_device *device, 1231 struct dasd_ccw_req *cqr) 1232 { 1233 unsigned long flags; 1234 char *data, *chunk; 1235 int size = 0; 1236 1237 if (cplength > 0) 1238 size += cplength * sizeof(struct ccw1); 1239 if (datasize > 0) 1240 size += datasize; 1241 if (!cqr) 1242 size += (sizeof(*cqr) + 7L) & -8L; 1243 1244 spin_lock_irqsave(&device->mem_lock, flags); 1245 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); 1246 spin_unlock_irqrestore(&device->mem_lock, flags); 1247 if (!chunk) 1248 return ERR_PTR(-ENOMEM); 1249 if (!cqr) { 1250 cqr = (void *) data; 1251 data += (sizeof(*cqr) + 7L) & -8L; 1252 } 1253 memset(cqr, 0, sizeof(*cqr)); 1254 cqr->mem_chunk = chunk; 1255 if (cplength > 0) { 1256 cqr->cpaddr = data; 1257 data += cplength * sizeof(struct ccw1); 1258 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1259 } 1260 if (datasize > 0) { 1261 cqr->data = data; 1262 memset(cqr->data, 0, datasize); 1263 } 1264 cqr->magic = magic; 1265 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1266 dasd_get_device(device); 1267 return cqr; 1268 } 1269 EXPORT_SYMBOL(dasd_smalloc_request); 1270 1271 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1272 int datasize, 1273 struct dasd_device *device) 1274 { 1275 struct dasd_ccw_req *cqr; 1276 unsigned long flags; 1277 int size, cqr_size; 1278 char *data; 1279 1280 cqr_size = (sizeof(*cqr) + 7L) & -8L; 1281 size = cqr_size; 1282 if (cplength > 0) 1283 size += cplength * sizeof(struct ccw1); 1284 if (datasize > 0) 1285 size += datasize; 1286 1287 spin_lock_irqsave(&device->mem_lock, flags); 1288 cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1289 spin_unlock_irqrestore(&device->mem_lock, flags); 1290 if (!cqr) 1291 return ERR_PTR(-ENOMEM); 1292 memset(cqr, 0, sizeof(*cqr)); 1293 data = (char *)cqr + cqr_size; 1294 cqr->cpaddr = NULL; 1295 if (cplength > 0) { 1296 cqr->cpaddr = data; 1297 data += cplength * sizeof(struct ccw1); 1298 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1299 } 1300 cqr->data = NULL; 1301 if (datasize > 0) { 1302 cqr->data = data; 1303 memset(cqr->data, 0, datasize); 1304 } 1305 1306 cqr->magic = magic; 1307 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1308 dasd_get_device(device); 1309 1310 return cqr; 1311 } 1312 EXPORT_SYMBOL(dasd_fmalloc_request); 1313 1314 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1315 { 1316 unsigned long flags; 1317 1318 spin_lock_irqsave(&device->mem_lock, flags); 1319 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); 1320 spin_unlock_irqrestore(&device->mem_lock, flags); 1321 dasd_put_device(device); 1322 } 1323 EXPORT_SYMBOL(dasd_sfree_request); 1324 1325 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1326 { 1327 unsigned long flags; 1328 1329 spin_lock_irqsave(&device->mem_lock, flags); 1330 dasd_free_chunk(&device->ese_chunks, cqr); 1331 spin_unlock_irqrestore(&device->mem_lock, flags); 1332 dasd_put_device(device); 1333 } 1334 EXPORT_SYMBOL(dasd_ffree_request); 1335 1336 /* 1337 * Check discipline magic in cqr. 1338 */ 1339 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1340 { 1341 struct dasd_device *device; 1342 1343 if (cqr == NULL) 1344 return -EINVAL; 1345 device = cqr->startdev; 1346 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1347 DBF_DEV_EVENT(DBF_WARNING, device, 1348 " dasd_ccw_req 0x%08x magic doesn't match" 1349 " discipline 0x%08x", 1350 cqr->magic, 1351 *(unsigned int *) device->discipline->name); 1352 return -EINVAL; 1353 } 1354 return 0; 1355 } 1356 1357 /* 1358 * Terminate the current i/o and set the request to clear_pending. 1359 * Timer keeps device runnig. 1360 * ccw_device_clear can fail if the i/o subsystem 1361 * is in a bad mood. 1362 */ 1363 int dasd_term_IO(struct dasd_ccw_req *cqr) 1364 { 1365 struct dasd_device *device; 1366 int retries, rc; 1367 char errorstring[ERRORLENGTH]; 1368 1369 /* Check the cqr */ 1370 rc = dasd_check_cqr(cqr); 1371 if (rc) 1372 return rc; 1373 retries = 0; 1374 device = (struct dasd_device *) cqr->startdev; 1375 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1376 rc = ccw_device_clear(device->cdev, (long) cqr); 1377 switch (rc) { 1378 case 0: /* termination successful */ 1379 cqr->status = DASD_CQR_CLEAR_PENDING; 1380 cqr->stopclk = get_tod_clock(); 1381 cqr->starttime = 0; 1382 DBF_DEV_EVENT(DBF_DEBUG, device, 1383 "terminate cqr %p successful", 1384 cqr); 1385 break; 1386 case -ENODEV: 1387 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1388 "device gone, retry"); 1389 break; 1390 case -EINVAL: 1391 /* 1392 * device not valid so no I/O could be running 1393 * handle CQR as termination successful 1394 */ 1395 cqr->status = DASD_CQR_CLEARED; 1396 cqr->stopclk = get_tod_clock(); 1397 cqr->starttime = 0; 1398 /* no retries for invalid devices */ 1399 cqr->retries = -1; 1400 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1401 "EINVAL, handle as terminated"); 1402 /* fake rc to success */ 1403 rc = 0; 1404 break; 1405 default: 1406 /* internal error 10 - unknown rc*/ 1407 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1408 dev_err(&device->cdev->dev, "An error occurred in the " 1409 "DASD device driver, reason=%s\n", errorstring); 1410 BUG(); 1411 break; 1412 } 1413 retries++; 1414 } 1415 dasd_schedule_device_bh(device); 1416 return rc; 1417 } 1418 EXPORT_SYMBOL(dasd_term_IO); 1419 1420 /* 1421 * Start the i/o. This start_IO can fail if the channel is really busy. 1422 * In that case set up a timer to start the request later. 1423 */ 1424 int dasd_start_IO(struct dasd_ccw_req *cqr) 1425 { 1426 struct dasd_device *device; 1427 int rc; 1428 char errorstring[ERRORLENGTH]; 1429 1430 /* Check the cqr */ 1431 rc = dasd_check_cqr(cqr); 1432 if (rc) { 1433 cqr->intrc = rc; 1434 return rc; 1435 } 1436 device = (struct dasd_device *) cqr->startdev; 1437 if (((cqr->block && 1438 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1439 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1440 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1441 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1442 "because of stolen lock", cqr); 1443 cqr->status = DASD_CQR_ERROR; 1444 cqr->intrc = -EPERM; 1445 return -EPERM; 1446 } 1447 if (cqr->retries < 0) { 1448 /* internal error 14 - start_IO run out of retries */ 1449 sprintf(errorstring, "14 %p", cqr); 1450 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1451 "device driver, reason=%s\n", errorstring); 1452 cqr->status = DASD_CQR_ERROR; 1453 return -EIO; 1454 } 1455 cqr->startclk = get_tod_clock(); 1456 cqr->starttime = jiffies; 1457 cqr->retries--; 1458 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1459 cqr->lpm &= dasd_path_get_opm(device); 1460 if (!cqr->lpm) 1461 cqr->lpm = dasd_path_get_opm(device); 1462 } 1463 if (cqr->cpmode == 1) { 1464 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1465 (long) cqr, cqr->lpm); 1466 } else { 1467 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1468 (long) cqr, cqr->lpm, 0); 1469 } 1470 switch (rc) { 1471 case 0: 1472 cqr->status = DASD_CQR_IN_IO; 1473 break; 1474 case -EBUSY: 1475 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1476 "start_IO: device busy, retry later"); 1477 break; 1478 case -EACCES: 1479 /* -EACCES indicates that the request used only a subset of the 1480 * available paths and all these paths are gone. If the lpm of 1481 * this request was only a subset of the opm (e.g. the ppm) then 1482 * we just do a retry with all available paths. 1483 * If we already use the full opm, something is amiss, and we 1484 * need a full path verification. 1485 */ 1486 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1487 DBF_DEV_EVENT(DBF_WARNING, device, 1488 "start_IO: selected paths gone (%x)", 1489 cqr->lpm); 1490 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1491 cqr->lpm = dasd_path_get_opm(device); 1492 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1493 "start_IO: selected paths gone," 1494 " retry on all paths"); 1495 } else { 1496 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1497 "start_IO: all paths in opm gone," 1498 " do path verification"); 1499 dasd_generic_last_path_gone(device); 1500 dasd_path_no_path(device); 1501 dasd_path_set_tbvpm(device, 1502 ccw_device_get_path_mask( 1503 device->cdev)); 1504 } 1505 break; 1506 case -ENODEV: 1507 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1508 "start_IO: -ENODEV device gone, retry"); 1509 break; 1510 case -EIO: 1511 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1512 "start_IO: -EIO device gone, retry"); 1513 break; 1514 case -EINVAL: 1515 /* most likely caused in power management context */ 1516 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1517 "start_IO: -EINVAL device currently " 1518 "not accessible"); 1519 break; 1520 default: 1521 /* internal error 11 - unknown rc */ 1522 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1523 dev_err(&device->cdev->dev, 1524 "An error occurred in the DASD device driver, " 1525 "reason=%s\n", errorstring); 1526 BUG(); 1527 break; 1528 } 1529 cqr->intrc = rc; 1530 return rc; 1531 } 1532 EXPORT_SYMBOL(dasd_start_IO); 1533 1534 /* 1535 * Timeout function for dasd devices. This is used for different purposes 1536 * 1) missing interrupt handler for normal operation 1537 * 2) delayed start of request where start_IO failed with -EBUSY 1538 * 3) timeout for missing state change interrupts 1539 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1540 * DASD_CQR_QUEUED for 2) and 3). 1541 */ 1542 static void dasd_device_timeout(struct timer_list *t) 1543 { 1544 unsigned long flags; 1545 struct dasd_device *device; 1546 1547 device = from_timer(device, t, timer); 1548 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1549 /* re-activate request queue */ 1550 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1551 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1552 dasd_schedule_device_bh(device); 1553 } 1554 1555 /* 1556 * Setup timeout for a device in jiffies. 1557 */ 1558 void dasd_device_set_timer(struct dasd_device *device, int expires) 1559 { 1560 if (expires == 0) 1561 del_timer(&device->timer); 1562 else 1563 mod_timer(&device->timer, jiffies + expires); 1564 } 1565 EXPORT_SYMBOL(dasd_device_set_timer); 1566 1567 /* 1568 * Clear timeout for a device. 1569 */ 1570 void dasd_device_clear_timer(struct dasd_device *device) 1571 { 1572 del_timer(&device->timer); 1573 } 1574 EXPORT_SYMBOL(dasd_device_clear_timer); 1575 1576 static void dasd_handle_killed_request(struct ccw_device *cdev, 1577 unsigned long intparm) 1578 { 1579 struct dasd_ccw_req *cqr; 1580 struct dasd_device *device; 1581 1582 if (!intparm) 1583 return; 1584 cqr = (struct dasd_ccw_req *) intparm; 1585 if (cqr->status != DASD_CQR_IN_IO) { 1586 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1587 "invalid status in handle_killed_request: " 1588 "%02x", cqr->status); 1589 return; 1590 } 1591 1592 device = dasd_device_from_cdev_locked(cdev); 1593 if (IS_ERR(device)) { 1594 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1595 "unable to get device from cdev"); 1596 return; 1597 } 1598 1599 if (!cqr->startdev || 1600 device != cqr->startdev || 1601 strncmp(cqr->startdev->discipline->ebcname, 1602 (char *) &cqr->magic, 4)) { 1603 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1604 "invalid device in request"); 1605 dasd_put_device(device); 1606 return; 1607 } 1608 1609 /* Schedule request to be retried. */ 1610 cqr->status = DASD_CQR_QUEUED; 1611 1612 dasd_device_clear_timer(device); 1613 dasd_schedule_device_bh(device); 1614 dasd_put_device(device); 1615 } 1616 1617 void dasd_generic_handle_state_change(struct dasd_device *device) 1618 { 1619 /* First of all start sense subsystem status request. */ 1620 dasd_eer_snss(device); 1621 1622 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1623 dasd_schedule_device_bh(device); 1624 if (device->block) { 1625 dasd_schedule_block_bh(device->block); 1626 if (device->block->request_queue) 1627 blk_mq_run_hw_queues(device->block->request_queue, 1628 true); 1629 } 1630 } 1631 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1632 1633 static int dasd_check_hpf_error(struct irb *irb) 1634 { 1635 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1636 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1637 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1638 } 1639 1640 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1641 { 1642 struct dasd_device *device = NULL; 1643 u8 *sense = NULL; 1644 1645 if (!block) 1646 return 0; 1647 device = block->base; 1648 if (!device || !device->discipline->is_ese) 1649 return 0; 1650 if (!device->discipline->is_ese(device)) 1651 return 0; 1652 1653 sense = dasd_get_sense(irb); 1654 if (!sense) 1655 return 0; 1656 1657 return !!(sense[1] & SNS1_NO_REC_FOUND) || 1658 !!(sense[1] & SNS1_FILE_PROTECTED) || 1659 scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; 1660 } 1661 1662 static int dasd_ese_oos_cond(u8 *sense) 1663 { 1664 return sense[0] & SNS0_EQUIPMENT_CHECK && 1665 sense[1] & SNS1_PERM_ERR && 1666 sense[1] & SNS1_WRITE_INHIBITED && 1667 sense[25] == 0x01; 1668 } 1669 1670 /* 1671 * Interrupt handler for "normal" ssch-io based dasd devices. 1672 */ 1673 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1674 struct irb *irb) 1675 { 1676 struct dasd_ccw_req *cqr, *next, *fcqr; 1677 struct dasd_device *device; 1678 unsigned long now; 1679 int nrf_suppressed = 0; 1680 int fp_suppressed = 0; 1681 u8 *sense = NULL; 1682 int expires; 1683 1684 cqr = (struct dasd_ccw_req *) intparm; 1685 if (IS_ERR(irb)) { 1686 switch (PTR_ERR(irb)) { 1687 case -EIO: 1688 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1689 device = cqr->startdev; 1690 cqr->status = DASD_CQR_CLEARED; 1691 dasd_device_clear_timer(device); 1692 wake_up(&dasd_flush_wq); 1693 dasd_schedule_device_bh(device); 1694 return; 1695 } 1696 break; 1697 case -ETIMEDOUT: 1698 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1699 "request timed out\n", __func__); 1700 break; 1701 default: 1702 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1703 "unknown error %ld\n", __func__, 1704 PTR_ERR(irb)); 1705 } 1706 dasd_handle_killed_request(cdev, intparm); 1707 return; 1708 } 1709 1710 now = get_tod_clock(); 1711 /* check for conditions that should be handled immediately */ 1712 if (!cqr || 1713 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1714 scsw_cstat(&irb->scsw) == 0)) { 1715 if (cqr) 1716 memcpy(&cqr->irb, irb, sizeof(*irb)); 1717 device = dasd_device_from_cdev_locked(cdev); 1718 if (IS_ERR(device)) 1719 return; 1720 /* ignore unsolicited interrupts for DIAG discipline */ 1721 if (device->discipline == dasd_diag_discipline_pointer) { 1722 dasd_put_device(device); 1723 return; 1724 } 1725 1726 /* 1727 * In some cases 'File Protected' or 'No Record Found' errors 1728 * might be expected and debug log messages for the 1729 * corresponding interrupts shouldn't be written then. 1730 * Check if either of the according suppress bits is set. 1731 */ 1732 sense = dasd_get_sense(irb); 1733 if (sense) { 1734 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1735 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1736 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1737 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1738 1739 /* 1740 * Extent pool probably out-of-space. 1741 * Stop device and check exhaust level. 1742 */ 1743 if (dasd_ese_oos_cond(sense)) { 1744 dasd_generic_space_exhaust(device, cqr); 1745 device->discipline->ext_pool_exhaust(device, cqr); 1746 dasd_put_device(device); 1747 return; 1748 } 1749 } 1750 if (!(fp_suppressed || nrf_suppressed)) 1751 device->discipline->dump_sense_dbf(device, irb, "int"); 1752 1753 if (device->features & DASD_FEATURE_ERPLOG) 1754 device->discipline->dump_sense(device, cqr, irb); 1755 device->discipline->check_for_device_change(device, cqr, irb); 1756 dasd_put_device(device); 1757 } 1758 1759 /* check for for attention message */ 1760 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1761 device = dasd_device_from_cdev_locked(cdev); 1762 if (!IS_ERR(device)) { 1763 device->discipline->check_attention(device, 1764 irb->esw.esw1.lpum); 1765 dasd_put_device(device); 1766 } 1767 } 1768 1769 if (!cqr) 1770 return; 1771 1772 device = (struct dasd_device *) cqr->startdev; 1773 if (!device || 1774 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1775 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1776 "invalid device in request"); 1777 return; 1778 } 1779 1780 if (dasd_ese_needs_format(cqr->block, irb)) { 1781 if (rq_data_dir((struct request *)cqr->callback_data) == READ) { 1782 device->discipline->ese_read(cqr); 1783 cqr->status = DASD_CQR_SUCCESS; 1784 cqr->stopclk = now; 1785 dasd_device_clear_timer(device); 1786 dasd_schedule_device_bh(device); 1787 return; 1788 } 1789 fcqr = device->discipline->ese_format(device, cqr); 1790 if (IS_ERR(fcqr)) { 1791 /* 1792 * If we can't format now, let the request go 1793 * one extra round. Maybe we can format later. 1794 */ 1795 cqr->status = DASD_CQR_QUEUED; 1796 } else { 1797 fcqr->status = DASD_CQR_QUEUED; 1798 cqr->status = DASD_CQR_QUEUED; 1799 list_add(&fcqr->devlist, &device->ccw_queue); 1800 dasd_schedule_device_bh(device); 1801 return; 1802 } 1803 } 1804 1805 /* Check for clear pending */ 1806 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1807 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1808 cqr->status = DASD_CQR_CLEARED; 1809 dasd_device_clear_timer(device); 1810 wake_up(&dasd_flush_wq); 1811 dasd_schedule_device_bh(device); 1812 return; 1813 } 1814 1815 /* check status - the request might have been killed by dyn detach */ 1816 if (cqr->status != DASD_CQR_IN_IO) { 1817 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1818 "status %02x", dev_name(&cdev->dev), cqr->status); 1819 return; 1820 } 1821 1822 next = NULL; 1823 expires = 0; 1824 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1825 scsw_cstat(&irb->scsw) == 0) { 1826 /* request was completed successfully */ 1827 cqr->status = DASD_CQR_SUCCESS; 1828 cqr->stopclk = now; 1829 /* Start first request on queue if possible -> fast_io. */ 1830 if (cqr->devlist.next != &device->ccw_queue) { 1831 next = list_entry(cqr->devlist.next, 1832 struct dasd_ccw_req, devlist); 1833 } 1834 } else { /* error */ 1835 /* check for HPF error 1836 * call discipline function to requeue all requests 1837 * and disable HPF accordingly 1838 */ 1839 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1840 device->discipline->handle_hpf_error) 1841 device->discipline->handle_hpf_error(device, irb); 1842 /* 1843 * If we don't want complex ERP for this request, then just 1844 * reset this and retry it in the fastpath 1845 */ 1846 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1847 cqr->retries > 0) { 1848 if (cqr->lpm == dasd_path_get_opm(device)) 1849 DBF_DEV_EVENT(DBF_DEBUG, device, 1850 "default ERP in fastpath " 1851 "(%i retries left)", 1852 cqr->retries); 1853 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1854 cqr->lpm = dasd_path_get_opm(device); 1855 cqr->status = DASD_CQR_QUEUED; 1856 next = cqr; 1857 } else 1858 cqr->status = DASD_CQR_ERROR; 1859 } 1860 if (next && (next->status == DASD_CQR_QUEUED) && 1861 (!device->stopped)) { 1862 if (device->discipline->start_IO(next) == 0) 1863 expires = next->expires; 1864 } 1865 if (expires != 0) 1866 dasd_device_set_timer(device, expires); 1867 else 1868 dasd_device_clear_timer(device); 1869 dasd_schedule_device_bh(device); 1870 } 1871 EXPORT_SYMBOL(dasd_int_handler); 1872 1873 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1874 { 1875 struct dasd_device *device; 1876 1877 device = dasd_device_from_cdev_locked(cdev); 1878 1879 if (IS_ERR(device)) 1880 goto out; 1881 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1882 device->state != device->target || 1883 !device->discipline->check_for_device_change){ 1884 dasd_put_device(device); 1885 goto out; 1886 } 1887 if (device->discipline->dump_sense_dbf) 1888 device->discipline->dump_sense_dbf(device, irb, "uc"); 1889 device->discipline->check_for_device_change(device, NULL, irb); 1890 dasd_put_device(device); 1891 out: 1892 return UC_TODO_RETRY; 1893 } 1894 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1895 1896 /* 1897 * If we have an error on a dasd_block layer request then we cancel 1898 * and return all further requests from the same dasd_block as well. 1899 */ 1900 static void __dasd_device_recovery(struct dasd_device *device, 1901 struct dasd_ccw_req *ref_cqr) 1902 { 1903 struct list_head *l, *n; 1904 struct dasd_ccw_req *cqr; 1905 1906 /* 1907 * only requeue request that came from the dasd_block layer 1908 */ 1909 if (!ref_cqr->block) 1910 return; 1911 1912 list_for_each_safe(l, n, &device->ccw_queue) { 1913 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1914 if (cqr->status == DASD_CQR_QUEUED && 1915 ref_cqr->block == cqr->block) { 1916 cqr->status = DASD_CQR_CLEARED; 1917 } 1918 } 1919 }; 1920 1921 /* 1922 * Remove those ccw requests from the queue that need to be returned 1923 * to the upper layer. 1924 */ 1925 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1926 struct list_head *final_queue) 1927 { 1928 struct list_head *l, *n; 1929 struct dasd_ccw_req *cqr; 1930 1931 /* Process request with final status. */ 1932 list_for_each_safe(l, n, &device->ccw_queue) { 1933 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1934 1935 /* Skip any non-final request. */ 1936 if (cqr->status == DASD_CQR_QUEUED || 1937 cqr->status == DASD_CQR_IN_IO || 1938 cqr->status == DASD_CQR_CLEAR_PENDING) 1939 continue; 1940 if (cqr->status == DASD_CQR_ERROR) { 1941 __dasd_device_recovery(device, cqr); 1942 } 1943 /* Rechain finished requests to final queue */ 1944 list_move_tail(&cqr->devlist, final_queue); 1945 } 1946 } 1947 1948 static void __dasd_process_cqr(struct dasd_device *device, 1949 struct dasd_ccw_req *cqr) 1950 { 1951 char errorstring[ERRORLENGTH]; 1952 1953 switch (cqr->status) { 1954 case DASD_CQR_SUCCESS: 1955 cqr->status = DASD_CQR_DONE; 1956 break; 1957 case DASD_CQR_ERROR: 1958 cqr->status = DASD_CQR_NEED_ERP; 1959 break; 1960 case DASD_CQR_CLEARED: 1961 cqr->status = DASD_CQR_TERMINATED; 1962 break; 1963 default: 1964 /* internal error 12 - wrong cqr status*/ 1965 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1966 dev_err(&device->cdev->dev, 1967 "An error occurred in the DASD device driver, " 1968 "reason=%s\n", errorstring); 1969 BUG(); 1970 } 1971 if (cqr->callback) 1972 cqr->callback(cqr, cqr->callback_data); 1973 } 1974 1975 /* 1976 * the cqrs from the final queue are returned to the upper layer 1977 * by setting a dasd_block state and calling the callback function 1978 */ 1979 static void __dasd_device_process_final_queue(struct dasd_device *device, 1980 struct list_head *final_queue) 1981 { 1982 struct list_head *l, *n; 1983 struct dasd_ccw_req *cqr; 1984 struct dasd_block *block; 1985 1986 list_for_each_safe(l, n, final_queue) { 1987 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1988 list_del_init(&cqr->devlist); 1989 block = cqr->block; 1990 if (!block) { 1991 __dasd_process_cqr(device, cqr); 1992 } else { 1993 spin_lock_bh(&block->queue_lock); 1994 __dasd_process_cqr(device, cqr); 1995 spin_unlock_bh(&block->queue_lock); 1996 } 1997 } 1998 } 1999 2000 /* 2001 * Take a look at the first request on the ccw queue and check 2002 * if it reached its expire time. If so, terminate the IO. 2003 */ 2004 static void __dasd_device_check_expire(struct dasd_device *device) 2005 { 2006 struct dasd_ccw_req *cqr; 2007 2008 if (list_empty(&device->ccw_queue)) 2009 return; 2010 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2011 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 2012 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 2013 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2014 /* 2015 * IO in safe offline processing should not 2016 * run out of retries 2017 */ 2018 cqr->retries++; 2019 } 2020 if (device->discipline->term_IO(cqr) != 0) { 2021 /* Hmpf, try again in 5 sec */ 2022 dev_err(&device->cdev->dev, 2023 "cqr %p timed out (%lus) but cannot be " 2024 "ended, retrying in 5 s\n", 2025 cqr, (cqr->expires/HZ)); 2026 cqr->expires += 5*HZ; 2027 dasd_device_set_timer(device, 5*HZ); 2028 } else { 2029 dev_err(&device->cdev->dev, 2030 "cqr %p timed out (%lus), %i retries " 2031 "remaining\n", cqr, (cqr->expires/HZ), 2032 cqr->retries); 2033 } 2034 } 2035 } 2036 2037 /* 2038 * return 1 when device is not eligible for IO 2039 */ 2040 static int __dasd_device_is_unusable(struct dasd_device *device, 2041 struct dasd_ccw_req *cqr) 2042 { 2043 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM | DASD_STOPPED_NOSPC); 2044 2045 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2046 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2047 /* 2048 * dasd is being set offline 2049 * but it is no safe offline where we have to allow I/O 2050 */ 2051 return 1; 2052 } 2053 if (device->stopped) { 2054 if (device->stopped & mask) { 2055 /* stopped and CQR will not change that. */ 2056 return 1; 2057 } 2058 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2059 /* CQR is not able to change device to 2060 * operational. */ 2061 return 1; 2062 } 2063 /* CQR required to get device operational. */ 2064 } 2065 return 0; 2066 } 2067 2068 /* 2069 * Take a look at the first request on the ccw queue and check 2070 * if it needs to be started. 2071 */ 2072 static void __dasd_device_start_head(struct dasd_device *device) 2073 { 2074 struct dasd_ccw_req *cqr; 2075 int rc; 2076 2077 if (list_empty(&device->ccw_queue)) 2078 return; 2079 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2080 if (cqr->status != DASD_CQR_QUEUED) 2081 return; 2082 /* if device is not usable return request to upper layer */ 2083 if (__dasd_device_is_unusable(device, cqr)) { 2084 cqr->intrc = -EAGAIN; 2085 cqr->status = DASD_CQR_CLEARED; 2086 dasd_schedule_device_bh(device); 2087 return; 2088 } 2089 2090 rc = device->discipline->start_IO(cqr); 2091 if (rc == 0) 2092 dasd_device_set_timer(device, cqr->expires); 2093 else if (rc == -EACCES) { 2094 dasd_schedule_device_bh(device); 2095 } else 2096 /* Hmpf, try again in 1/2 sec */ 2097 dasd_device_set_timer(device, 50); 2098 } 2099 2100 static void __dasd_device_check_path_events(struct dasd_device *device) 2101 { 2102 int rc; 2103 2104 if (!dasd_path_get_tbvpm(device)) 2105 return; 2106 2107 if (device->stopped & 2108 ~(DASD_STOPPED_DC_WAIT | DASD_UNRESUMED_PM)) 2109 return; 2110 rc = device->discipline->verify_path(device, 2111 dasd_path_get_tbvpm(device)); 2112 if (rc) 2113 dasd_device_set_timer(device, 50); 2114 else 2115 dasd_path_clear_all_verify(device); 2116 }; 2117 2118 /* 2119 * Go through all request on the dasd_device request queue, 2120 * terminate them on the cdev if necessary, and return them to the 2121 * submitting layer via callback. 2122 * Note: 2123 * Make sure that all 'submitting layers' still exist when 2124 * this function is called!. In other words, when 'device' is a base 2125 * device then all block layer requests must have been removed before 2126 * via dasd_flush_block_queue. 2127 */ 2128 int dasd_flush_device_queue(struct dasd_device *device) 2129 { 2130 struct dasd_ccw_req *cqr, *n; 2131 int rc; 2132 struct list_head flush_queue; 2133 2134 INIT_LIST_HEAD(&flush_queue); 2135 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2136 rc = 0; 2137 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2138 /* Check status and move request to flush_queue */ 2139 switch (cqr->status) { 2140 case DASD_CQR_IN_IO: 2141 rc = device->discipline->term_IO(cqr); 2142 if (rc) { 2143 /* unable to terminate requeust */ 2144 dev_err(&device->cdev->dev, 2145 "Flushing the DASD request queue " 2146 "failed for request %p\n", cqr); 2147 /* stop flush processing */ 2148 goto finished; 2149 } 2150 break; 2151 case DASD_CQR_QUEUED: 2152 cqr->stopclk = get_tod_clock(); 2153 cqr->status = DASD_CQR_CLEARED; 2154 break; 2155 default: /* no need to modify the others */ 2156 break; 2157 } 2158 list_move_tail(&cqr->devlist, &flush_queue); 2159 } 2160 finished: 2161 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2162 /* 2163 * After this point all requests must be in state CLEAR_PENDING, 2164 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2165 * one of the others. 2166 */ 2167 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2168 wait_event(dasd_flush_wq, 2169 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2170 /* 2171 * Now set each request back to TERMINATED, DONE or NEED_ERP 2172 * and call the callback function of flushed requests 2173 */ 2174 __dasd_device_process_final_queue(device, &flush_queue); 2175 return rc; 2176 } 2177 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2178 2179 /* 2180 * Acquire the device lock and process queues for the device. 2181 */ 2182 static void dasd_device_tasklet(unsigned long data) 2183 { 2184 struct dasd_device *device = (struct dasd_device *) data; 2185 struct list_head final_queue; 2186 2187 atomic_set (&device->tasklet_scheduled, 0); 2188 INIT_LIST_HEAD(&final_queue); 2189 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2190 /* Check expire time of first request on the ccw queue. */ 2191 __dasd_device_check_expire(device); 2192 /* find final requests on ccw queue */ 2193 __dasd_device_process_ccw_queue(device, &final_queue); 2194 __dasd_device_check_path_events(device); 2195 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2196 /* Now call the callback function of requests with final status */ 2197 __dasd_device_process_final_queue(device, &final_queue); 2198 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2199 /* Now check if the head of the ccw queue needs to be started. */ 2200 __dasd_device_start_head(device); 2201 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2202 if (waitqueue_active(&shutdown_waitq)) 2203 wake_up(&shutdown_waitq); 2204 dasd_put_device(device); 2205 } 2206 2207 /* 2208 * Schedules a call to dasd_tasklet over the device tasklet. 2209 */ 2210 void dasd_schedule_device_bh(struct dasd_device *device) 2211 { 2212 /* Protect against rescheduling. */ 2213 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2214 return; 2215 dasd_get_device(device); 2216 tasklet_hi_schedule(&device->tasklet); 2217 } 2218 EXPORT_SYMBOL(dasd_schedule_device_bh); 2219 2220 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2221 { 2222 device->stopped |= bits; 2223 } 2224 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2225 2226 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2227 { 2228 device->stopped &= ~bits; 2229 if (!device->stopped) 2230 wake_up(&generic_waitq); 2231 } 2232 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2233 2234 /* 2235 * Queue a request to the head of the device ccw_queue. 2236 * Start the I/O if possible. 2237 */ 2238 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2239 { 2240 struct dasd_device *device; 2241 unsigned long flags; 2242 2243 device = cqr->startdev; 2244 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2245 cqr->status = DASD_CQR_QUEUED; 2246 list_add(&cqr->devlist, &device->ccw_queue); 2247 /* let the bh start the request to keep them in order */ 2248 dasd_schedule_device_bh(device); 2249 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2250 } 2251 EXPORT_SYMBOL(dasd_add_request_head); 2252 2253 /* 2254 * Queue a request to the tail of the device ccw_queue. 2255 * Start the I/O if possible. 2256 */ 2257 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2258 { 2259 struct dasd_device *device; 2260 unsigned long flags; 2261 2262 device = cqr->startdev; 2263 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2264 cqr->status = DASD_CQR_QUEUED; 2265 list_add_tail(&cqr->devlist, &device->ccw_queue); 2266 /* let the bh start the request to keep them in order */ 2267 dasd_schedule_device_bh(device); 2268 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2269 } 2270 EXPORT_SYMBOL(dasd_add_request_tail); 2271 2272 /* 2273 * Wakeup helper for the 'sleep_on' functions. 2274 */ 2275 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2276 { 2277 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2278 cqr->callback_data = DASD_SLEEPON_END_TAG; 2279 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2280 wake_up(&generic_waitq); 2281 } 2282 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2283 2284 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2285 { 2286 struct dasd_device *device; 2287 int rc; 2288 2289 device = cqr->startdev; 2290 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2291 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2292 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2293 return rc; 2294 } 2295 2296 /* 2297 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2298 */ 2299 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2300 { 2301 struct dasd_device *device; 2302 dasd_erp_fn_t erp_fn; 2303 2304 if (cqr->status == DASD_CQR_FILLED) 2305 return 0; 2306 device = cqr->startdev; 2307 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2308 if (cqr->status == DASD_CQR_TERMINATED) { 2309 device->discipline->handle_terminated_request(cqr); 2310 return 1; 2311 } 2312 if (cqr->status == DASD_CQR_NEED_ERP) { 2313 erp_fn = device->discipline->erp_action(cqr); 2314 erp_fn(cqr); 2315 return 1; 2316 } 2317 if (cqr->status == DASD_CQR_FAILED) 2318 dasd_log_sense(cqr, &cqr->irb); 2319 if (cqr->refers) { 2320 __dasd_process_erp(device, cqr); 2321 return 1; 2322 } 2323 } 2324 return 0; 2325 } 2326 2327 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2328 { 2329 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2330 if (cqr->refers) /* erp is not done yet */ 2331 return 1; 2332 return ((cqr->status != DASD_CQR_DONE) && 2333 (cqr->status != DASD_CQR_FAILED)); 2334 } else 2335 return (cqr->status == DASD_CQR_FILLED); 2336 } 2337 2338 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2339 { 2340 struct dasd_device *device; 2341 int rc; 2342 struct list_head ccw_queue; 2343 struct dasd_ccw_req *cqr; 2344 2345 INIT_LIST_HEAD(&ccw_queue); 2346 maincqr->status = DASD_CQR_FILLED; 2347 device = maincqr->startdev; 2348 list_add(&maincqr->blocklist, &ccw_queue); 2349 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2350 cqr = list_first_entry(&ccw_queue, 2351 struct dasd_ccw_req, blocklist)) { 2352 2353 if (__dasd_sleep_on_erp(cqr)) 2354 continue; 2355 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2356 continue; 2357 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2358 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2359 cqr->status = DASD_CQR_FAILED; 2360 cqr->intrc = -EPERM; 2361 continue; 2362 } 2363 /* Non-temporary stop condition will trigger fail fast */ 2364 if (device->stopped & ~DASD_STOPPED_PENDING && 2365 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2366 (!dasd_eer_enabled(device))) { 2367 cqr->status = DASD_CQR_FAILED; 2368 cqr->intrc = -ENOLINK; 2369 continue; 2370 } 2371 /* 2372 * Don't try to start requests if device is in 2373 * offline processing, it might wait forever 2374 */ 2375 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2376 cqr->status = DASD_CQR_FAILED; 2377 cqr->intrc = -ENODEV; 2378 continue; 2379 } 2380 /* 2381 * Don't try to start requests if device is stopped 2382 * except path verification requests 2383 */ 2384 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2385 if (interruptible) { 2386 rc = wait_event_interruptible( 2387 generic_waitq, !(device->stopped)); 2388 if (rc == -ERESTARTSYS) { 2389 cqr->status = DASD_CQR_FAILED; 2390 maincqr->intrc = rc; 2391 continue; 2392 } 2393 } else 2394 wait_event(generic_waitq, !(device->stopped)); 2395 } 2396 if (!cqr->callback) 2397 cqr->callback = dasd_wakeup_cb; 2398 2399 cqr->callback_data = DASD_SLEEPON_START_TAG; 2400 dasd_add_request_tail(cqr); 2401 if (interruptible) { 2402 rc = wait_event_interruptible( 2403 generic_waitq, _wait_for_wakeup(cqr)); 2404 if (rc == -ERESTARTSYS) { 2405 dasd_cancel_req(cqr); 2406 /* wait (non-interruptible) for final status */ 2407 wait_event(generic_waitq, 2408 _wait_for_wakeup(cqr)); 2409 cqr->status = DASD_CQR_FAILED; 2410 maincqr->intrc = rc; 2411 continue; 2412 } 2413 } else 2414 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2415 } 2416 2417 maincqr->endclk = get_tod_clock(); 2418 if ((maincqr->status != DASD_CQR_DONE) && 2419 (maincqr->intrc != -ERESTARTSYS)) 2420 dasd_log_sense(maincqr, &maincqr->irb); 2421 if (maincqr->status == DASD_CQR_DONE) 2422 rc = 0; 2423 else if (maincqr->intrc) 2424 rc = maincqr->intrc; 2425 else 2426 rc = -EIO; 2427 return rc; 2428 } 2429 2430 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2431 { 2432 struct dasd_ccw_req *cqr; 2433 2434 list_for_each_entry(cqr, ccw_queue, blocklist) { 2435 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2436 return 0; 2437 } 2438 2439 return 1; 2440 } 2441 2442 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2443 { 2444 struct dasd_device *device; 2445 struct dasd_ccw_req *cqr, *n; 2446 u8 *sense = NULL; 2447 int rc; 2448 2449 retry: 2450 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2451 device = cqr->startdev; 2452 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2453 continue; 2454 2455 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2456 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2457 cqr->status = DASD_CQR_FAILED; 2458 cqr->intrc = -EPERM; 2459 continue; 2460 } 2461 /*Non-temporary stop condition will trigger fail fast*/ 2462 if (device->stopped & ~DASD_STOPPED_PENDING && 2463 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2464 !dasd_eer_enabled(device)) { 2465 cqr->status = DASD_CQR_FAILED; 2466 cqr->intrc = -EAGAIN; 2467 continue; 2468 } 2469 2470 /*Don't try to start requests if device is stopped*/ 2471 if (interruptible) { 2472 rc = wait_event_interruptible( 2473 generic_waitq, !device->stopped); 2474 if (rc == -ERESTARTSYS) { 2475 cqr->status = DASD_CQR_FAILED; 2476 cqr->intrc = rc; 2477 continue; 2478 } 2479 } else 2480 wait_event(generic_waitq, !(device->stopped)); 2481 2482 if (!cqr->callback) 2483 cqr->callback = dasd_wakeup_cb; 2484 cqr->callback_data = DASD_SLEEPON_START_TAG; 2485 dasd_add_request_tail(cqr); 2486 } 2487 2488 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2489 2490 rc = 0; 2491 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2492 /* 2493 * In some cases the 'File Protected' or 'Incorrect Length' 2494 * error might be expected and error recovery would be 2495 * unnecessary in these cases. Check if the according suppress 2496 * bit is set. 2497 */ 2498 sense = dasd_get_sense(&cqr->irb); 2499 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2500 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2501 continue; 2502 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2503 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2504 continue; 2505 2506 /* 2507 * for alias devices simplify error recovery and 2508 * return to upper layer 2509 * do not skip ERP requests 2510 */ 2511 if (cqr->startdev != cqr->basedev && !cqr->refers && 2512 (cqr->status == DASD_CQR_TERMINATED || 2513 cqr->status == DASD_CQR_NEED_ERP)) 2514 return -EAGAIN; 2515 2516 /* normal recovery for basedev IO */ 2517 if (__dasd_sleep_on_erp(cqr)) 2518 /* handle erp first */ 2519 goto retry; 2520 } 2521 2522 return 0; 2523 } 2524 2525 /* 2526 * Queue a request to the tail of the device ccw_queue and wait for 2527 * it's completion. 2528 */ 2529 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2530 { 2531 return _dasd_sleep_on(cqr, 0); 2532 } 2533 EXPORT_SYMBOL(dasd_sleep_on); 2534 2535 /* 2536 * Start requests from a ccw_queue and wait for their completion. 2537 */ 2538 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2539 { 2540 return _dasd_sleep_on_queue(ccw_queue, 0); 2541 } 2542 EXPORT_SYMBOL(dasd_sleep_on_queue); 2543 2544 /* 2545 * Start requests from a ccw_queue and wait interruptible for their completion. 2546 */ 2547 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2548 { 2549 return _dasd_sleep_on_queue(ccw_queue, 1); 2550 } 2551 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2552 2553 /* 2554 * Queue a request to the tail of the device ccw_queue and wait 2555 * interruptible for it's completion. 2556 */ 2557 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2558 { 2559 return _dasd_sleep_on(cqr, 1); 2560 } 2561 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2562 2563 /* 2564 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2565 * for eckd devices) the currently running request has to be terminated 2566 * and be put back to status queued, before the special request is added 2567 * to the head of the queue. Then the special request is waited on normally. 2568 */ 2569 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2570 { 2571 struct dasd_ccw_req *cqr; 2572 int rc; 2573 2574 if (list_empty(&device->ccw_queue)) 2575 return 0; 2576 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2577 rc = device->discipline->term_IO(cqr); 2578 if (!rc) 2579 /* 2580 * CQR terminated because a more important request is pending. 2581 * Undo decreasing of retry counter because this is 2582 * not an error case. 2583 */ 2584 cqr->retries++; 2585 return rc; 2586 } 2587 2588 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2589 { 2590 struct dasd_device *device; 2591 int rc; 2592 2593 device = cqr->startdev; 2594 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2595 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2596 cqr->status = DASD_CQR_FAILED; 2597 cqr->intrc = -EPERM; 2598 return -EIO; 2599 } 2600 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2601 rc = _dasd_term_running_cqr(device); 2602 if (rc) { 2603 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2604 return rc; 2605 } 2606 cqr->callback = dasd_wakeup_cb; 2607 cqr->callback_data = DASD_SLEEPON_START_TAG; 2608 cqr->status = DASD_CQR_QUEUED; 2609 /* 2610 * add new request as second 2611 * first the terminated cqr needs to be finished 2612 */ 2613 list_add(&cqr->devlist, device->ccw_queue.next); 2614 2615 /* let the bh start the request to keep them in order */ 2616 dasd_schedule_device_bh(device); 2617 2618 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2619 2620 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2621 2622 if (cqr->status == DASD_CQR_DONE) 2623 rc = 0; 2624 else if (cqr->intrc) 2625 rc = cqr->intrc; 2626 else 2627 rc = -EIO; 2628 2629 /* kick tasklets */ 2630 dasd_schedule_device_bh(device); 2631 if (device->block) 2632 dasd_schedule_block_bh(device->block); 2633 2634 return rc; 2635 } 2636 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2637 2638 /* 2639 * Cancels a request that was started with dasd_sleep_on_req. 2640 * This is useful to timeout requests. The request will be 2641 * terminated if it is currently in i/o. 2642 * Returns 0 if request termination was successful 2643 * negative error code if termination failed 2644 * Cancellation of a request is an asynchronous operation! The calling 2645 * function has to wait until the request is properly returned via callback. 2646 */ 2647 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) 2648 { 2649 struct dasd_device *device = cqr->startdev; 2650 int rc = 0; 2651 2652 switch (cqr->status) { 2653 case DASD_CQR_QUEUED: 2654 /* request was not started - just set to cleared */ 2655 cqr->status = DASD_CQR_CLEARED; 2656 break; 2657 case DASD_CQR_IN_IO: 2658 /* request in IO - terminate IO and release again */ 2659 rc = device->discipline->term_IO(cqr); 2660 if (rc) { 2661 dev_err(&device->cdev->dev, 2662 "Cancelling request %p failed with rc=%d\n", 2663 cqr, rc); 2664 } else { 2665 cqr->stopclk = get_tod_clock(); 2666 } 2667 break; 2668 default: /* already finished or clear pending - do nothing */ 2669 break; 2670 } 2671 dasd_schedule_device_bh(device); 2672 return rc; 2673 } 2674 2675 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2676 { 2677 struct dasd_device *device = cqr->startdev; 2678 unsigned long flags; 2679 int rc; 2680 2681 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2682 rc = __dasd_cancel_req(cqr); 2683 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2684 return rc; 2685 } 2686 2687 /* 2688 * SECTION: Operations of the dasd_block layer. 2689 */ 2690 2691 /* 2692 * Timeout function for dasd_block. This is used when the block layer 2693 * is waiting for something that may not come reliably, (e.g. a state 2694 * change interrupt) 2695 */ 2696 static void dasd_block_timeout(struct timer_list *t) 2697 { 2698 unsigned long flags; 2699 struct dasd_block *block; 2700 2701 block = from_timer(block, t, timer); 2702 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2703 /* re-activate request queue */ 2704 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2705 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2706 dasd_schedule_block_bh(block); 2707 blk_mq_run_hw_queues(block->request_queue, true); 2708 } 2709 2710 /* 2711 * Setup timeout for a dasd_block in jiffies. 2712 */ 2713 void dasd_block_set_timer(struct dasd_block *block, int expires) 2714 { 2715 if (expires == 0) 2716 del_timer(&block->timer); 2717 else 2718 mod_timer(&block->timer, jiffies + expires); 2719 } 2720 EXPORT_SYMBOL(dasd_block_set_timer); 2721 2722 /* 2723 * Clear timeout for a dasd_block. 2724 */ 2725 void dasd_block_clear_timer(struct dasd_block *block) 2726 { 2727 del_timer(&block->timer); 2728 } 2729 EXPORT_SYMBOL(dasd_block_clear_timer); 2730 2731 /* 2732 * Process finished error recovery ccw. 2733 */ 2734 static void __dasd_process_erp(struct dasd_device *device, 2735 struct dasd_ccw_req *cqr) 2736 { 2737 dasd_erp_fn_t erp_fn; 2738 2739 if (cqr->status == DASD_CQR_DONE) 2740 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2741 else 2742 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2743 erp_fn = device->discipline->erp_postaction(cqr); 2744 erp_fn(cqr); 2745 } 2746 2747 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2748 { 2749 struct request *req; 2750 blk_status_t error = BLK_STS_OK; 2751 int status; 2752 2753 req = (struct request *) cqr->callback_data; 2754 dasd_profile_end(cqr->block, cqr, req); 2755 2756 status = cqr->block->base->discipline->free_cp(cqr, req); 2757 if (status < 0) 2758 error = errno_to_blk_status(status); 2759 else if (status == 0) { 2760 switch (cqr->intrc) { 2761 case -EPERM: 2762 error = BLK_STS_NEXUS; 2763 break; 2764 case -ENOLINK: 2765 error = BLK_STS_TRANSPORT; 2766 break; 2767 case -ETIMEDOUT: 2768 error = BLK_STS_TIMEOUT; 2769 break; 2770 default: 2771 error = BLK_STS_IOERR; 2772 break; 2773 } 2774 } 2775 2776 /* 2777 * We need to take care for ETIMEDOUT errors here since the 2778 * complete callback does not get called in this case. 2779 * Take care of all errors here and avoid additional code to 2780 * transfer the error value to the complete callback. 2781 */ 2782 if (error) { 2783 blk_mq_end_request(req, error); 2784 blk_mq_run_hw_queues(req->q, true); 2785 } else { 2786 blk_mq_complete_request(req); 2787 } 2788 } 2789 2790 /* 2791 * Process ccw request queue. 2792 */ 2793 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2794 struct list_head *final_queue) 2795 { 2796 struct list_head *l, *n; 2797 struct dasd_ccw_req *cqr; 2798 dasd_erp_fn_t erp_fn; 2799 unsigned long flags; 2800 struct dasd_device *base = block->base; 2801 2802 restart: 2803 /* Process request with final status. */ 2804 list_for_each_safe(l, n, &block->ccw_queue) { 2805 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2806 if (cqr->status != DASD_CQR_DONE && 2807 cqr->status != DASD_CQR_FAILED && 2808 cqr->status != DASD_CQR_NEED_ERP && 2809 cqr->status != DASD_CQR_TERMINATED) 2810 continue; 2811 2812 if (cqr->status == DASD_CQR_TERMINATED) { 2813 base->discipline->handle_terminated_request(cqr); 2814 goto restart; 2815 } 2816 2817 /* Process requests that may be recovered */ 2818 if (cqr->status == DASD_CQR_NEED_ERP) { 2819 erp_fn = base->discipline->erp_action(cqr); 2820 if (IS_ERR(erp_fn(cqr))) 2821 continue; 2822 goto restart; 2823 } 2824 2825 /* log sense for fatal error */ 2826 if (cqr->status == DASD_CQR_FAILED) { 2827 dasd_log_sense(cqr, &cqr->irb); 2828 } 2829 2830 /* First of all call extended error reporting. */ 2831 if (dasd_eer_enabled(base) && 2832 cqr->status == DASD_CQR_FAILED) { 2833 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2834 2835 /* restart request */ 2836 cqr->status = DASD_CQR_FILLED; 2837 cqr->retries = 255; 2838 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2839 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2840 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2841 flags); 2842 goto restart; 2843 } 2844 2845 /* Process finished ERP request. */ 2846 if (cqr->refers) { 2847 __dasd_process_erp(base, cqr); 2848 goto restart; 2849 } 2850 2851 /* Rechain finished requests to final queue */ 2852 cqr->endclk = get_tod_clock(); 2853 list_move_tail(&cqr->blocklist, final_queue); 2854 } 2855 } 2856 2857 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2858 { 2859 dasd_schedule_block_bh(cqr->block); 2860 } 2861 2862 static void __dasd_block_start_head(struct dasd_block *block) 2863 { 2864 struct dasd_ccw_req *cqr; 2865 2866 if (list_empty(&block->ccw_queue)) 2867 return; 2868 /* We allways begin with the first requests on the queue, as some 2869 * of previously started requests have to be enqueued on a 2870 * dasd_device again for error recovery. 2871 */ 2872 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2873 if (cqr->status != DASD_CQR_FILLED) 2874 continue; 2875 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2876 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2877 cqr->status = DASD_CQR_FAILED; 2878 cqr->intrc = -EPERM; 2879 dasd_schedule_block_bh(block); 2880 continue; 2881 } 2882 /* Non-temporary stop condition will trigger fail fast */ 2883 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2884 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2885 (!dasd_eer_enabled(block->base))) { 2886 cqr->status = DASD_CQR_FAILED; 2887 cqr->intrc = -ENOLINK; 2888 dasd_schedule_block_bh(block); 2889 continue; 2890 } 2891 /* Don't try to start requests if device is stopped */ 2892 if (block->base->stopped) 2893 return; 2894 2895 /* just a fail safe check, should not happen */ 2896 if (!cqr->startdev) 2897 cqr->startdev = block->base; 2898 2899 /* make sure that the requests we submit find their way back */ 2900 cqr->callback = dasd_return_cqr_cb; 2901 2902 dasd_add_request_tail(cqr); 2903 } 2904 } 2905 2906 /* 2907 * Central dasd_block layer routine. Takes requests from the generic 2908 * block layer request queue, creates ccw requests, enqueues them on 2909 * a dasd_device and processes ccw requests that have been returned. 2910 */ 2911 static void dasd_block_tasklet(unsigned long data) 2912 { 2913 struct dasd_block *block = (struct dasd_block *) data; 2914 struct list_head final_queue; 2915 struct list_head *l, *n; 2916 struct dasd_ccw_req *cqr; 2917 struct dasd_queue *dq; 2918 2919 atomic_set(&block->tasklet_scheduled, 0); 2920 INIT_LIST_HEAD(&final_queue); 2921 spin_lock_irq(&block->queue_lock); 2922 /* Finish off requests on ccw queue */ 2923 __dasd_process_block_ccw_queue(block, &final_queue); 2924 spin_unlock_irq(&block->queue_lock); 2925 2926 /* Now call the callback function of requests with final status */ 2927 list_for_each_safe(l, n, &final_queue) { 2928 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2929 dq = cqr->dq; 2930 spin_lock_irq(&dq->lock); 2931 list_del_init(&cqr->blocklist); 2932 __dasd_cleanup_cqr(cqr); 2933 spin_unlock_irq(&dq->lock); 2934 } 2935 2936 spin_lock_irq(&block->queue_lock); 2937 /* Now check if the head of the ccw queue needs to be started. */ 2938 __dasd_block_start_head(block); 2939 spin_unlock_irq(&block->queue_lock); 2940 2941 if (waitqueue_active(&shutdown_waitq)) 2942 wake_up(&shutdown_waitq); 2943 dasd_put_device(block->base); 2944 } 2945 2946 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2947 { 2948 wake_up(&dasd_flush_wq); 2949 } 2950 2951 /* 2952 * Requeue a request back to the block request queue 2953 * only works for block requests 2954 */ 2955 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2956 { 2957 struct dasd_block *block = cqr->block; 2958 struct request *req; 2959 2960 if (!block) 2961 return -EINVAL; 2962 spin_lock_irq(&cqr->dq->lock); 2963 req = (struct request *) cqr->callback_data; 2964 blk_mq_requeue_request(req, false); 2965 spin_unlock_irq(&cqr->dq->lock); 2966 2967 return 0; 2968 } 2969 2970 /* 2971 * Go through all request on the dasd_block request queue, cancel them 2972 * on the respective dasd_device, and return them to the generic 2973 * block layer. 2974 */ 2975 static int dasd_flush_block_queue(struct dasd_block *block) 2976 { 2977 struct dasd_ccw_req *cqr, *n; 2978 int rc, i; 2979 struct list_head flush_queue; 2980 unsigned long flags; 2981 2982 INIT_LIST_HEAD(&flush_queue); 2983 spin_lock_bh(&block->queue_lock); 2984 rc = 0; 2985 restart: 2986 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2987 /* if this request currently owned by a dasd_device cancel it */ 2988 if (cqr->status >= DASD_CQR_QUEUED) 2989 rc = dasd_cancel_req(cqr); 2990 if (rc < 0) 2991 break; 2992 /* Rechain request (including erp chain) so it won't be 2993 * touched by the dasd_block_tasklet anymore. 2994 * Replace the callback so we notice when the request 2995 * is returned from the dasd_device layer. 2996 */ 2997 cqr->callback = _dasd_wake_block_flush_cb; 2998 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2999 list_move_tail(&cqr->blocklist, &flush_queue); 3000 if (i > 1) 3001 /* moved more than one request - need to restart */ 3002 goto restart; 3003 } 3004 spin_unlock_bh(&block->queue_lock); 3005 /* Now call the callback function of flushed requests */ 3006 restart_cb: 3007 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 3008 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3009 /* Process finished ERP request. */ 3010 if (cqr->refers) { 3011 spin_lock_bh(&block->queue_lock); 3012 __dasd_process_erp(block->base, cqr); 3013 spin_unlock_bh(&block->queue_lock); 3014 /* restart list_for_xx loop since dasd_process_erp 3015 * might remove multiple elements */ 3016 goto restart_cb; 3017 } 3018 /* call the callback function */ 3019 spin_lock_irqsave(&cqr->dq->lock, flags); 3020 cqr->endclk = get_tod_clock(); 3021 list_del_init(&cqr->blocklist); 3022 __dasd_cleanup_cqr(cqr); 3023 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3024 } 3025 return rc; 3026 } 3027 3028 /* 3029 * Schedules a call to dasd_tasklet over the device tasklet. 3030 */ 3031 void dasd_schedule_block_bh(struct dasd_block *block) 3032 { 3033 /* Protect against rescheduling. */ 3034 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 3035 return; 3036 /* life cycle of block is bound to it's base device */ 3037 dasd_get_device(block->base); 3038 tasklet_hi_schedule(&block->tasklet); 3039 } 3040 EXPORT_SYMBOL(dasd_schedule_block_bh); 3041 3042 3043 /* 3044 * SECTION: external block device operations 3045 * (request queue handling, open, release, etc.) 3046 */ 3047 3048 /* 3049 * Dasd request queue function. Called from ll_rw_blk.c 3050 */ 3051 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 3052 const struct blk_mq_queue_data *qd) 3053 { 3054 struct dasd_block *block = hctx->queue->queuedata; 3055 struct dasd_queue *dq = hctx->driver_data; 3056 struct request *req = qd->rq; 3057 struct dasd_device *basedev; 3058 struct dasd_ccw_req *cqr; 3059 blk_status_t rc = BLK_STS_OK; 3060 3061 basedev = block->base; 3062 spin_lock_irq(&dq->lock); 3063 if (basedev->state < DASD_STATE_READY) { 3064 DBF_DEV_EVENT(DBF_ERR, basedev, 3065 "device not ready for request %p", req); 3066 rc = BLK_STS_IOERR; 3067 goto out; 3068 } 3069 3070 /* 3071 * if device is stopped do not fetch new requests 3072 * except failfast is active which will let requests fail 3073 * immediately in __dasd_block_start_head() 3074 */ 3075 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3076 DBF_DEV_EVENT(DBF_ERR, basedev, 3077 "device stopped request %p", req); 3078 rc = BLK_STS_RESOURCE; 3079 goto out; 3080 } 3081 3082 if (basedev->features & DASD_FEATURE_READONLY && 3083 rq_data_dir(req) == WRITE) { 3084 DBF_DEV_EVENT(DBF_ERR, basedev, 3085 "Rejecting write request %p", req); 3086 rc = BLK_STS_IOERR; 3087 goto out; 3088 } 3089 3090 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3091 (basedev->features & DASD_FEATURE_FAILFAST || 3092 blk_noretry_request(req))) { 3093 DBF_DEV_EVENT(DBF_ERR, basedev, 3094 "Rejecting failfast request %p", req); 3095 rc = BLK_STS_IOERR; 3096 goto out; 3097 } 3098 3099 cqr = basedev->discipline->build_cp(basedev, block, req); 3100 if (IS_ERR(cqr)) { 3101 if (PTR_ERR(cqr) == -EBUSY || 3102 PTR_ERR(cqr) == -ENOMEM || 3103 PTR_ERR(cqr) == -EAGAIN) { 3104 rc = BLK_STS_RESOURCE; 3105 goto out; 3106 } 3107 DBF_DEV_EVENT(DBF_ERR, basedev, 3108 "CCW creation failed (rc=%ld) on request %p", 3109 PTR_ERR(cqr), req); 3110 rc = BLK_STS_IOERR; 3111 goto out; 3112 } 3113 /* 3114 * Note: callback is set to dasd_return_cqr_cb in 3115 * __dasd_block_start_head to cover erp requests as well 3116 */ 3117 cqr->callback_data = req; 3118 cqr->status = DASD_CQR_FILLED; 3119 cqr->dq = dq; 3120 3121 blk_mq_start_request(req); 3122 spin_lock(&block->queue_lock); 3123 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3124 INIT_LIST_HEAD(&cqr->devlist); 3125 dasd_profile_start(block, cqr, req); 3126 dasd_schedule_block_bh(block); 3127 spin_unlock(&block->queue_lock); 3128 3129 out: 3130 spin_unlock_irq(&dq->lock); 3131 return rc; 3132 } 3133 3134 /* 3135 * Block timeout callback, called from the block layer 3136 * 3137 * Return values: 3138 * BLK_EH_RESET_TIMER if the request should be left running 3139 * BLK_EH_DONE if the request is handled or terminated 3140 * by the driver. 3141 */ 3142 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3143 { 3144 struct dasd_block *block = req->q->queuedata; 3145 struct dasd_device *device; 3146 struct dasd_ccw_req *cqr; 3147 unsigned long flags; 3148 int rc = 0; 3149 3150 cqr = blk_mq_rq_to_pdu(req); 3151 if (!cqr) 3152 return BLK_EH_DONE; 3153 3154 spin_lock_irqsave(&cqr->dq->lock, flags); 3155 device = cqr->startdev ? cqr->startdev : block->base; 3156 if (!device->blk_timeout) { 3157 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3158 return BLK_EH_RESET_TIMER; 3159 } 3160 DBF_DEV_EVENT(DBF_WARNING, device, 3161 " dasd_times_out cqr %p status %x", 3162 cqr, cqr->status); 3163 3164 spin_lock(&block->queue_lock); 3165 spin_lock(get_ccwdev_lock(device->cdev)); 3166 cqr->retries = -1; 3167 cqr->intrc = -ETIMEDOUT; 3168 if (cqr->status >= DASD_CQR_QUEUED) { 3169 rc = __dasd_cancel_req(cqr); 3170 } else if (cqr->status == DASD_CQR_FILLED || 3171 cqr->status == DASD_CQR_NEED_ERP) { 3172 cqr->status = DASD_CQR_TERMINATED; 3173 } else if (cqr->status == DASD_CQR_IN_ERP) { 3174 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3175 3176 list_for_each_entry_safe(searchcqr, nextcqr, 3177 &block->ccw_queue, blocklist) { 3178 tmpcqr = searchcqr; 3179 while (tmpcqr->refers) 3180 tmpcqr = tmpcqr->refers; 3181 if (tmpcqr != cqr) 3182 continue; 3183 /* searchcqr is an ERP request for cqr */ 3184 searchcqr->retries = -1; 3185 searchcqr->intrc = -ETIMEDOUT; 3186 if (searchcqr->status >= DASD_CQR_QUEUED) { 3187 rc = __dasd_cancel_req(searchcqr); 3188 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3189 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3190 searchcqr->status = DASD_CQR_TERMINATED; 3191 rc = 0; 3192 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3193 /* 3194 * Shouldn't happen; most recent ERP 3195 * request is at the front of queue 3196 */ 3197 continue; 3198 } 3199 break; 3200 } 3201 } 3202 spin_unlock(get_ccwdev_lock(device->cdev)); 3203 dasd_schedule_block_bh(block); 3204 spin_unlock(&block->queue_lock); 3205 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3206 3207 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE; 3208 } 3209 3210 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3211 unsigned int idx) 3212 { 3213 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3214 3215 if (!dq) 3216 return -ENOMEM; 3217 3218 spin_lock_init(&dq->lock); 3219 hctx->driver_data = dq; 3220 3221 return 0; 3222 } 3223 3224 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3225 { 3226 kfree(hctx->driver_data); 3227 hctx->driver_data = NULL; 3228 } 3229 3230 static void dasd_request_done(struct request *req) 3231 { 3232 blk_mq_end_request(req, 0); 3233 blk_mq_run_hw_queues(req->q, true); 3234 } 3235 3236 static struct blk_mq_ops dasd_mq_ops = { 3237 .queue_rq = do_dasd_request, 3238 .complete = dasd_request_done, 3239 .timeout = dasd_times_out, 3240 .init_hctx = dasd_init_hctx, 3241 .exit_hctx = dasd_exit_hctx, 3242 }; 3243 3244 /* 3245 * Allocate and initialize request queue and default I/O scheduler. 3246 */ 3247 static int dasd_alloc_queue(struct dasd_block *block) 3248 { 3249 int rc; 3250 3251 block->tag_set.ops = &dasd_mq_ops; 3252 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); 3253 block->tag_set.nr_hw_queues = nr_hw_queues; 3254 block->tag_set.queue_depth = queue_depth; 3255 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3256 block->tag_set.numa_node = NUMA_NO_NODE; 3257 3258 rc = blk_mq_alloc_tag_set(&block->tag_set); 3259 if (rc) 3260 return rc; 3261 3262 block->request_queue = blk_mq_init_queue(&block->tag_set); 3263 if (IS_ERR(block->request_queue)) 3264 return PTR_ERR(block->request_queue); 3265 3266 block->request_queue->queuedata = block; 3267 3268 return 0; 3269 } 3270 3271 /* 3272 * Deactivate and free request queue. 3273 */ 3274 static void dasd_free_queue(struct dasd_block *block) 3275 { 3276 if (block->request_queue) { 3277 blk_cleanup_queue(block->request_queue); 3278 blk_mq_free_tag_set(&block->tag_set); 3279 block->request_queue = NULL; 3280 } 3281 } 3282 3283 static int dasd_open(struct block_device *bdev, fmode_t mode) 3284 { 3285 struct dasd_device *base; 3286 int rc; 3287 3288 base = dasd_device_from_gendisk(bdev->bd_disk); 3289 if (!base) 3290 return -ENODEV; 3291 3292 atomic_inc(&base->block->open_count); 3293 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3294 rc = -ENODEV; 3295 goto unlock; 3296 } 3297 3298 if (!try_module_get(base->discipline->owner)) { 3299 rc = -EINVAL; 3300 goto unlock; 3301 } 3302 3303 if (dasd_probeonly) { 3304 dev_info(&base->cdev->dev, 3305 "Accessing the DASD failed because it is in " 3306 "probeonly mode\n"); 3307 rc = -EPERM; 3308 goto out; 3309 } 3310 3311 if (base->state <= DASD_STATE_BASIC) { 3312 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3313 " Cannot open unrecognized device"); 3314 rc = -ENODEV; 3315 goto out; 3316 } 3317 3318 if ((mode & FMODE_WRITE) && 3319 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3320 (base->features & DASD_FEATURE_READONLY))) { 3321 rc = -EROFS; 3322 goto out; 3323 } 3324 3325 dasd_put_device(base); 3326 return 0; 3327 3328 out: 3329 module_put(base->discipline->owner); 3330 unlock: 3331 atomic_dec(&base->block->open_count); 3332 dasd_put_device(base); 3333 return rc; 3334 } 3335 3336 static void dasd_release(struct gendisk *disk, fmode_t mode) 3337 { 3338 struct dasd_device *base = dasd_device_from_gendisk(disk); 3339 if (base) { 3340 atomic_dec(&base->block->open_count); 3341 module_put(base->discipline->owner); 3342 dasd_put_device(base); 3343 } 3344 } 3345 3346 /* 3347 * Return disk geometry. 3348 */ 3349 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3350 { 3351 struct dasd_device *base; 3352 3353 base = dasd_device_from_gendisk(bdev->bd_disk); 3354 if (!base) 3355 return -ENODEV; 3356 3357 if (!base->discipline || 3358 !base->discipline->fill_geometry) { 3359 dasd_put_device(base); 3360 return -EINVAL; 3361 } 3362 base->discipline->fill_geometry(base->block, geo); 3363 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3364 dasd_put_device(base); 3365 return 0; 3366 } 3367 3368 const struct block_device_operations 3369 dasd_device_operations = { 3370 .owner = THIS_MODULE, 3371 .open = dasd_open, 3372 .release = dasd_release, 3373 .ioctl = dasd_ioctl, 3374 .compat_ioctl = dasd_ioctl, 3375 .getgeo = dasd_getgeo, 3376 }; 3377 3378 /******************************************************************************* 3379 * end of block device operations 3380 */ 3381 3382 static void 3383 dasd_exit(void) 3384 { 3385 #ifdef CONFIG_PROC_FS 3386 dasd_proc_exit(); 3387 #endif 3388 dasd_eer_exit(); 3389 kmem_cache_destroy(dasd_page_cache); 3390 dasd_page_cache = NULL; 3391 dasd_gendisk_exit(); 3392 dasd_devmap_exit(); 3393 if (dasd_debug_area != NULL) { 3394 debug_unregister(dasd_debug_area); 3395 dasd_debug_area = NULL; 3396 } 3397 dasd_statistics_removeroot(); 3398 } 3399 3400 /* 3401 * SECTION: common functions for ccw_driver use 3402 */ 3403 3404 /* 3405 * Is the device read-only? 3406 * Note that this function does not report the setting of the 3407 * readonly device attribute, but how it is configured in z/VM. 3408 */ 3409 int dasd_device_is_ro(struct dasd_device *device) 3410 { 3411 struct ccw_dev_id dev_id; 3412 struct diag210 diag_data; 3413 int rc; 3414 3415 if (!MACHINE_IS_VM) 3416 return 0; 3417 ccw_device_get_id(device->cdev, &dev_id); 3418 memset(&diag_data, 0, sizeof(diag_data)); 3419 diag_data.vrdcdvno = dev_id.devno; 3420 diag_data.vrdclen = sizeof(diag_data); 3421 rc = diag210(&diag_data); 3422 if (rc == 0 || rc == 2) { 3423 return diag_data.vrdcvfla & 0x80; 3424 } else { 3425 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3426 dev_id.devno, rc); 3427 return 0; 3428 } 3429 } 3430 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3431 3432 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3433 { 3434 struct ccw_device *cdev = data; 3435 int ret; 3436 3437 ret = ccw_device_set_online(cdev); 3438 if (ret) 3439 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3440 dev_name(&cdev->dev), ret); 3441 } 3442 3443 /* 3444 * Initial attempt at a probe function. this can be simplified once 3445 * the other detection code is gone. 3446 */ 3447 int dasd_generic_probe(struct ccw_device *cdev, 3448 struct dasd_discipline *discipline) 3449 { 3450 int ret; 3451 3452 ret = dasd_add_sysfs_files(cdev); 3453 if (ret) { 3454 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3455 "dasd_generic_probe: could not add " 3456 "sysfs entries"); 3457 return ret; 3458 } 3459 cdev->handler = &dasd_int_handler; 3460 3461 /* 3462 * Automatically online either all dasd devices (dasd_autodetect) 3463 * or all devices specified with dasd= parameters during 3464 * initial probe. 3465 */ 3466 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3467 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3468 async_schedule(dasd_generic_auto_online, cdev); 3469 return 0; 3470 } 3471 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3472 3473 void dasd_generic_free_discipline(struct dasd_device *device) 3474 { 3475 /* Forget the discipline information. */ 3476 if (device->discipline) { 3477 if (device->discipline->uncheck_device) 3478 device->discipline->uncheck_device(device); 3479 module_put(device->discipline->owner); 3480 device->discipline = NULL; 3481 } 3482 if (device->base_discipline) { 3483 module_put(device->base_discipline->owner); 3484 device->base_discipline = NULL; 3485 } 3486 } 3487 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3488 3489 /* 3490 * This will one day be called from a global not_oper handler. 3491 * It is also used by driver_unregister during module unload. 3492 */ 3493 void dasd_generic_remove(struct ccw_device *cdev) 3494 { 3495 struct dasd_device *device; 3496 struct dasd_block *block; 3497 3498 cdev->handler = NULL; 3499 3500 device = dasd_device_from_cdev(cdev); 3501 if (IS_ERR(device)) { 3502 dasd_remove_sysfs_files(cdev); 3503 return; 3504 } 3505 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3506 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3507 /* Already doing offline processing */ 3508 dasd_put_device(device); 3509 dasd_remove_sysfs_files(cdev); 3510 return; 3511 } 3512 /* 3513 * This device is removed unconditionally. Set offline 3514 * flag to prevent dasd_open from opening it while it is 3515 * no quite down yet. 3516 */ 3517 dasd_set_target_state(device, DASD_STATE_NEW); 3518 /* dasd_delete_device destroys the device reference. */ 3519 block = device->block; 3520 dasd_delete_device(device); 3521 /* 3522 * life cycle of block is bound to device, so delete it after 3523 * device was safely removed 3524 */ 3525 if (block) 3526 dasd_free_block(block); 3527 3528 dasd_remove_sysfs_files(cdev); 3529 } 3530 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3531 3532 /* 3533 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3534 * the device is detected for the first time and is supposed to be used 3535 * or the user has started activation through sysfs. 3536 */ 3537 int dasd_generic_set_online(struct ccw_device *cdev, 3538 struct dasd_discipline *base_discipline) 3539 { 3540 struct dasd_discipline *discipline; 3541 struct dasd_device *device; 3542 int rc; 3543 3544 /* first online clears initial online feature flag */ 3545 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3546 device = dasd_create_device(cdev); 3547 if (IS_ERR(device)) 3548 return PTR_ERR(device); 3549 3550 discipline = base_discipline; 3551 if (device->features & DASD_FEATURE_USEDIAG) { 3552 if (!dasd_diag_discipline_pointer) { 3553 /* Try to load the required module. */ 3554 rc = request_module(DASD_DIAG_MOD); 3555 if (rc) { 3556 pr_warn("%s Setting the DASD online failed " 3557 "because the required module %s " 3558 "could not be loaded (rc=%d)\n", 3559 dev_name(&cdev->dev), DASD_DIAG_MOD, 3560 rc); 3561 dasd_delete_device(device); 3562 return -ENODEV; 3563 } 3564 } 3565 /* Module init could have failed, so check again here after 3566 * request_module(). */ 3567 if (!dasd_diag_discipline_pointer) { 3568 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3569 dev_name(&cdev->dev)); 3570 dasd_delete_device(device); 3571 return -ENODEV; 3572 } 3573 discipline = dasd_diag_discipline_pointer; 3574 } 3575 if (!try_module_get(base_discipline->owner)) { 3576 dasd_delete_device(device); 3577 return -EINVAL; 3578 } 3579 if (!try_module_get(discipline->owner)) { 3580 module_put(base_discipline->owner); 3581 dasd_delete_device(device); 3582 return -EINVAL; 3583 } 3584 device->base_discipline = base_discipline; 3585 device->discipline = discipline; 3586 3587 /* check_device will allocate block device if necessary */ 3588 rc = discipline->check_device(device); 3589 if (rc) { 3590 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3591 dev_name(&cdev->dev), discipline->name, rc); 3592 module_put(discipline->owner); 3593 module_put(base_discipline->owner); 3594 dasd_delete_device(device); 3595 return rc; 3596 } 3597 3598 dasd_set_target_state(device, DASD_STATE_ONLINE); 3599 if (device->state <= DASD_STATE_KNOWN) { 3600 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3601 dev_name(&cdev->dev)); 3602 rc = -ENODEV; 3603 dasd_set_target_state(device, DASD_STATE_NEW); 3604 if (device->block) 3605 dasd_free_block(device->block); 3606 dasd_delete_device(device); 3607 } else 3608 pr_debug("dasd_generic device %s found\n", 3609 dev_name(&cdev->dev)); 3610 3611 wait_event(dasd_init_waitq, _wait_for_device(device)); 3612 3613 dasd_put_device(device); 3614 return rc; 3615 } 3616 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3617 3618 int dasd_generic_set_offline(struct ccw_device *cdev) 3619 { 3620 struct dasd_device *device; 3621 struct dasd_block *block; 3622 int max_count, open_count, rc; 3623 unsigned long flags; 3624 3625 rc = 0; 3626 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3627 device = dasd_device_from_cdev_locked(cdev); 3628 if (IS_ERR(device)) { 3629 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3630 return PTR_ERR(device); 3631 } 3632 3633 /* 3634 * We must make sure that this device is currently not in use. 3635 * The open_count is increased for every opener, that includes 3636 * the blkdev_get in dasd_scan_partitions. We are only interested 3637 * in the other openers. 3638 */ 3639 if (device->block) { 3640 max_count = device->block->bdev ? 0 : -1; 3641 open_count = atomic_read(&device->block->open_count); 3642 if (open_count > max_count) { 3643 if (open_count > 0) 3644 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3645 dev_name(&cdev->dev), open_count); 3646 else 3647 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3648 dev_name(&cdev->dev)); 3649 rc = -EBUSY; 3650 goto out_err; 3651 } 3652 } 3653 3654 /* 3655 * Test if the offline processing is already running and exit if so. 3656 * If a safe offline is being processed this could only be a normal 3657 * offline that should be able to overtake the safe offline and 3658 * cancel any I/O we do not want to wait for any longer 3659 */ 3660 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3661 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3662 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3663 &device->flags); 3664 } else { 3665 rc = -EBUSY; 3666 goto out_err; 3667 } 3668 } 3669 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3670 3671 /* 3672 * if safe_offline is called set safe_offline_running flag and 3673 * clear safe_offline so that a call to normal offline 3674 * can overrun safe_offline processing 3675 */ 3676 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3677 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3678 /* need to unlock here to wait for outstanding I/O */ 3679 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3680 /* 3681 * If we want to set the device safe offline all IO operations 3682 * should be finished before continuing the offline process 3683 * so sync bdev first and then wait for our queues to become 3684 * empty 3685 */ 3686 if (device->block) { 3687 rc = fsync_bdev(device->block->bdev); 3688 if (rc != 0) 3689 goto interrupted; 3690 } 3691 dasd_schedule_device_bh(device); 3692 rc = wait_event_interruptible(shutdown_waitq, 3693 _wait_for_empty_queues(device)); 3694 if (rc != 0) 3695 goto interrupted; 3696 3697 /* 3698 * check if a normal offline process overtook the offline 3699 * processing in this case simply do nothing beside returning 3700 * that we got interrupted 3701 * otherwise mark safe offline as not running any longer and 3702 * continue with normal offline 3703 */ 3704 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3705 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3706 rc = -ERESTARTSYS; 3707 goto out_err; 3708 } 3709 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3710 } 3711 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3712 3713 dasd_set_target_state(device, DASD_STATE_NEW); 3714 /* dasd_delete_device destroys the device reference. */ 3715 block = device->block; 3716 dasd_delete_device(device); 3717 /* 3718 * life cycle of block is bound to device, so delete it after 3719 * device was safely removed 3720 */ 3721 if (block) 3722 dasd_free_block(block); 3723 3724 return 0; 3725 3726 interrupted: 3727 /* interrupted by signal */ 3728 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3729 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3730 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3731 out_err: 3732 dasd_put_device(device); 3733 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3734 return rc; 3735 } 3736 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3737 3738 int dasd_generic_last_path_gone(struct dasd_device *device) 3739 { 3740 struct dasd_ccw_req *cqr; 3741 3742 dev_warn(&device->cdev->dev, "No operational channel path is left " 3743 "for the device\n"); 3744 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3745 /* First of all call extended error reporting. */ 3746 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3747 3748 if (device->state < DASD_STATE_BASIC) 3749 return 0; 3750 /* Device is active. We want to keep it. */ 3751 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3752 if ((cqr->status == DASD_CQR_IN_IO) || 3753 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3754 cqr->status = DASD_CQR_QUEUED; 3755 cqr->retries++; 3756 } 3757 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3758 dasd_device_clear_timer(device); 3759 dasd_schedule_device_bh(device); 3760 return 1; 3761 } 3762 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3763 3764 int dasd_generic_path_operational(struct dasd_device *device) 3765 { 3766 dev_info(&device->cdev->dev, "A channel path to the device has become " 3767 "operational\n"); 3768 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3769 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3770 if (device->stopped & DASD_UNRESUMED_PM) { 3771 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 3772 dasd_restore_device(device); 3773 return 1; 3774 } 3775 dasd_schedule_device_bh(device); 3776 if (device->block) { 3777 dasd_schedule_block_bh(device->block); 3778 if (device->block->request_queue) 3779 blk_mq_run_hw_queues(device->block->request_queue, 3780 true); 3781 } 3782 3783 if (!device->stopped) 3784 wake_up(&generic_waitq); 3785 3786 return 1; 3787 } 3788 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3789 3790 int dasd_generic_notify(struct ccw_device *cdev, int event) 3791 { 3792 struct dasd_device *device; 3793 int ret; 3794 3795 device = dasd_device_from_cdev_locked(cdev); 3796 if (IS_ERR(device)) 3797 return 0; 3798 ret = 0; 3799 switch (event) { 3800 case CIO_GONE: 3801 case CIO_BOXED: 3802 case CIO_NO_PATH: 3803 dasd_path_no_path(device); 3804 ret = dasd_generic_last_path_gone(device); 3805 break; 3806 case CIO_OPER: 3807 ret = 1; 3808 if (dasd_path_get_opm(device)) 3809 ret = dasd_generic_path_operational(device); 3810 break; 3811 } 3812 dasd_put_device(device); 3813 return ret; 3814 } 3815 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3816 3817 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3818 { 3819 struct dasd_device *device; 3820 int chp, oldopm, hpfpm, ifccpm; 3821 3822 device = dasd_device_from_cdev_locked(cdev); 3823 if (IS_ERR(device)) 3824 return; 3825 3826 oldopm = dasd_path_get_opm(device); 3827 for (chp = 0; chp < 8; chp++) { 3828 if (path_event[chp] & PE_PATH_GONE) { 3829 dasd_path_notoper(device, chp); 3830 } 3831 if (path_event[chp] & PE_PATH_AVAILABLE) { 3832 dasd_path_available(device, chp); 3833 dasd_schedule_device_bh(device); 3834 } 3835 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3836 if (!dasd_path_is_operational(device, chp) && 3837 !dasd_path_need_verify(device, chp)) { 3838 /* 3839 * we can not establish a pathgroup on an 3840 * unavailable path, so trigger a path 3841 * verification first 3842 */ 3843 dasd_path_available(device, chp); 3844 dasd_schedule_device_bh(device); 3845 } 3846 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3847 "Pathgroup re-established\n"); 3848 if (device->discipline->kick_validate) 3849 device->discipline->kick_validate(device); 3850 } 3851 } 3852 hpfpm = dasd_path_get_hpfpm(device); 3853 ifccpm = dasd_path_get_ifccpm(device); 3854 if (!dasd_path_get_opm(device) && hpfpm) { 3855 /* 3856 * device has no operational paths but at least one path is 3857 * disabled due to HPF errors 3858 * disable HPF at all and use the path(s) again 3859 */ 3860 if (device->discipline->disable_hpf) 3861 device->discipline->disable_hpf(device); 3862 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3863 dasd_path_set_tbvpm(device, hpfpm); 3864 dasd_schedule_device_bh(device); 3865 dasd_schedule_requeue(device); 3866 } else if (!dasd_path_get_opm(device) && ifccpm) { 3867 /* 3868 * device has no operational paths but at least one path is 3869 * disabled due to IFCC errors 3870 * trigger path verification on paths with IFCC errors 3871 */ 3872 dasd_path_set_tbvpm(device, ifccpm); 3873 dasd_schedule_device_bh(device); 3874 } 3875 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3876 dev_warn(&device->cdev->dev, 3877 "No verified channel paths remain for the device\n"); 3878 DBF_DEV_EVENT(DBF_WARNING, device, 3879 "%s", "last verified path gone"); 3880 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3881 dasd_device_set_stop_bits(device, 3882 DASD_STOPPED_DC_WAIT); 3883 } 3884 dasd_put_device(device); 3885 } 3886 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3887 3888 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3889 { 3890 if (!dasd_path_get_opm(device) && lpm) { 3891 dasd_path_set_opm(device, lpm); 3892 dasd_generic_path_operational(device); 3893 } else 3894 dasd_path_add_opm(device, lpm); 3895 return 0; 3896 } 3897 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3898 3899 void dasd_generic_space_exhaust(struct dasd_device *device, 3900 struct dasd_ccw_req *cqr) 3901 { 3902 dasd_eer_write(device, NULL, DASD_EER_NOSPC); 3903 3904 if (device->state < DASD_STATE_BASIC) 3905 return; 3906 3907 if (cqr->status == DASD_CQR_IN_IO || 3908 cqr->status == DASD_CQR_CLEAR_PENDING) { 3909 cqr->status = DASD_CQR_QUEUED; 3910 cqr->retries++; 3911 } 3912 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3913 dasd_device_clear_timer(device); 3914 dasd_schedule_device_bh(device); 3915 } 3916 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3917 3918 void dasd_generic_space_avail(struct dasd_device *device) 3919 { 3920 dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3921 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3922 3923 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3924 dasd_schedule_device_bh(device); 3925 3926 if (device->block) { 3927 dasd_schedule_block_bh(device->block); 3928 if (device->block->request_queue) 3929 blk_mq_run_hw_queues(device->block->request_queue, true); 3930 } 3931 if (!device->stopped) 3932 wake_up(&generic_waitq); 3933 } 3934 EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3935 3936 /* 3937 * clear active requests and requeue them to block layer if possible 3938 */ 3939 static int dasd_generic_requeue_all_requests(struct dasd_device *device) 3940 { 3941 struct list_head requeue_queue; 3942 struct dasd_ccw_req *cqr, *n; 3943 struct dasd_ccw_req *refers; 3944 int rc; 3945 3946 INIT_LIST_HEAD(&requeue_queue); 3947 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3948 rc = 0; 3949 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3950 /* Check status and move request to flush_queue */ 3951 if (cqr->status == DASD_CQR_IN_IO) { 3952 rc = device->discipline->term_IO(cqr); 3953 if (rc) { 3954 /* unable to terminate requeust */ 3955 dev_err(&device->cdev->dev, 3956 "Unable to terminate request %p " 3957 "on suspend\n", cqr); 3958 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3959 dasd_put_device(device); 3960 return rc; 3961 } 3962 } 3963 list_move_tail(&cqr->devlist, &requeue_queue); 3964 } 3965 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3966 3967 list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { 3968 wait_event(dasd_flush_wq, 3969 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3970 3971 /* 3972 * requeue requests to blocklayer will only work 3973 * for block device requests 3974 */ 3975 if (_dasd_requeue_request(cqr)) 3976 continue; 3977 3978 /* remove requests from device and block queue */ 3979 list_del_init(&cqr->devlist); 3980 while (cqr->refers != NULL) { 3981 refers = cqr->refers; 3982 /* remove the request from the block queue */ 3983 list_del(&cqr->blocklist); 3984 /* free the finished erp request */ 3985 dasd_free_erp_request(cqr, cqr->memdev); 3986 cqr = refers; 3987 } 3988 3989 /* 3990 * _dasd_requeue_request already checked for a valid 3991 * blockdevice, no need to check again 3992 * all erp requests (cqr->refers) have a cqr->block 3993 * pointer copy from the original cqr 3994 */ 3995 list_del_init(&cqr->blocklist); 3996 cqr->block->base->discipline->free_cp( 3997 cqr, (struct request *) cqr->callback_data); 3998 } 3999 4000 /* 4001 * if requests remain then they are internal request 4002 * and go back to the device queue 4003 */ 4004 if (!list_empty(&requeue_queue)) { 4005 /* move freeze_queue to start of the ccw_queue */ 4006 spin_lock_irq(get_ccwdev_lock(device->cdev)); 4007 list_splice_tail(&requeue_queue, &device->ccw_queue); 4008 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 4009 } 4010 dasd_schedule_device_bh(device); 4011 return rc; 4012 } 4013 4014 static void do_requeue_requests(struct work_struct *work) 4015 { 4016 struct dasd_device *device = container_of(work, struct dasd_device, 4017 requeue_requests); 4018 dasd_generic_requeue_all_requests(device); 4019 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 4020 if (device->block) 4021 dasd_schedule_block_bh(device->block); 4022 dasd_put_device(device); 4023 } 4024 4025 void dasd_schedule_requeue(struct dasd_device *device) 4026 { 4027 dasd_get_device(device); 4028 /* queue call to dasd_reload_device to the kernel event daemon. */ 4029 if (!schedule_work(&device->requeue_requests)) 4030 dasd_put_device(device); 4031 } 4032 EXPORT_SYMBOL(dasd_schedule_requeue); 4033 4034 int dasd_generic_pm_freeze(struct ccw_device *cdev) 4035 { 4036 struct dasd_device *device = dasd_device_from_cdev(cdev); 4037 4038 if (IS_ERR(device)) 4039 return PTR_ERR(device); 4040 4041 /* mark device as suspended */ 4042 set_bit(DASD_FLAG_SUSPENDED, &device->flags); 4043 4044 if (device->discipline->freeze) 4045 device->discipline->freeze(device); 4046 4047 /* disallow new I/O */ 4048 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 4049 4050 return dasd_generic_requeue_all_requests(device); 4051 } 4052 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 4053 4054 int dasd_generic_restore_device(struct ccw_device *cdev) 4055 { 4056 struct dasd_device *device = dasd_device_from_cdev(cdev); 4057 int rc = 0; 4058 4059 if (IS_ERR(device)) 4060 return PTR_ERR(device); 4061 4062 /* allow new IO again */ 4063 dasd_device_remove_stop_bits(device, 4064 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 4065 4066 dasd_schedule_device_bh(device); 4067 4068 /* 4069 * call discipline restore function 4070 * if device is stopped do nothing e.g. for disconnected devices 4071 */ 4072 if (device->discipline->restore && !(device->stopped)) 4073 rc = device->discipline->restore(device); 4074 if (rc || device->stopped) 4075 /* 4076 * if the resume failed for the DASD we put it in 4077 * an UNRESUMED stop state 4078 */ 4079 device->stopped |= DASD_UNRESUMED_PM; 4080 4081 if (device->block) { 4082 dasd_schedule_block_bh(device->block); 4083 if (device->block->request_queue) 4084 blk_mq_run_hw_queues(device->block->request_queue, 4085 true); 4086 } 4087 4088 clear_bit(DASD_FLAG_SUSPENDED, &device->flags); 4089 dasd_put_device(device); 4090 return 0; 4091 } 4092 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 4093 4094 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 4095 int rdc_buffer_size, 4096 int magic) 4097 { 4098 struct dasd_ccw_req *cqr; 4099 struct ccw1 *ccw; 4100 4101 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, 4102 NULL); 4103 4104 if (IS_ERR(cqr)) { 4105 /* internal error 13 - Allocating the RDC request failed*/ 4106 dev_err(&device->cdev->dev, 4107 "An error occurred in the DASD device driver, " 4108 "reason=%s\n", "13"); 4109 return cqr; 4110 } 4111 4112 ccw = cqr->cpaddr; 4113 ccw->cmd_code = CCW_CMD_RDC; 4114 ccw->cda = (__u32)(addr_t) cqr->data; 4115 ccw->flags = 0; 4116 ccw->count = rdc_buffer_size; 4117 cqr->startdev = device; 4118 cqr->memdev = device; 4119 cqr->expires = 10*HZ; 4120 cqr->retries = 256; 4121 cqr->buildclk = get_tod_clock(); 4122 cqr->status = DASD_CQR_FILLED; 4123 return cqr; 4124 } 4125 4126 4127 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 4128 void *rdc_buffer, int rdc_buffer_size) 4129 { 4130 int ret; 4131 struct dasd_ccw_req *cqr; 4132 4133 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); 4134 if (IS_ERR(cqr)) 4135 return PTR_ERR(cqr); 4136 4137 ret = dasd_sleep_on(cqr); 4138 if (ret == 0) 4139 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); 4140 dasd_sfree_request(cqr, cqr->memdev); 4141 return ret; 4142 } 4143 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4144 4145 /* 4146 * In command mode and transport mode we need to look for sense 4147 * data in different places. The sense data itself is allways 4148 * an array of 32 bytes, so we can unify the sense data access 4149 * for both modes. 4150 */ 4151 char *dasd_get_sense(struct irb *irb) 4152 { 4153 struct tsb *tsb = NULL; 4154 char *sense = NULL; 4155 4156 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4157 if (irb->scsw.tm.tcw) 4158 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 4159 irb->scsw.tm.tcw); 4160 if (tsb && tsb->length == 64 && tsb->flags) 4161 switch (tsb->flags & 0x07) { 4162 case 1: /* tsa_iostat */ 4163 sense = tsb->tsa.iostat.sense; 4164 break; 4165 case 2: /* tsa_ddpc */ 4166 sense = tsb->tsa.ddpc.sense; 4167 break; 4168 default: 4169 /* currently we don't use interrogate data */ 4170 break; 4171 } 4172 } else if (irb->esw.esw0.erw.cons) { 4173 sense = irb->ecw; 4174 } 4175 return sense; 4176 } 4177 EXPORT_SYMBOL_GPL(dasd_get_sense); 4178 4179 void dasd_generic_shutdown(struct ccw_device *cdev) 4180 { 4181 struct dasd_device *device; 4182 4183 device = dasd_device_from_cdev(cdev); 4184 if (IS_ERR(device)) 4185 return; 4186 4187 if (device->block) 4188 dasd_schedule_block_bh(device->block); 4189 4190 dasd_schedule_device_bh(device); 4191 4192 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4193 } 4194 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4195 4196 static int __init dasd_init(void) 4197 { 4198 int rc; 4199 4200 init_waitqueue_head(&dasd_init_waitq); 4201 init_waitqueue_head(&dasd_flush_wq); 4202 init_waitqueue_head(&generic_waitq); 4203 init_waitqueue_head(&shutdown_waitq); 4204 4205 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4206 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4207 if (dasd_debug_area == NULL) { 4208 rc = -ENOMEM; 4209 goto failed; 4210 } 4211 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4212 debug_set_level(dasd_debug_area, DBF_WARNING); 4213 4214 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4215 4216 dasd_diag_discipline_pointer = NULL; 4217 4218 dasd_statistics_createroot(); 4219 4220 rc = dasd_devmap_init(); 4221 if (rc) 4222 goto failed; 4223 rc = dasd_gendisk_init(); 4224 if (rc) 4225 goto failed; 4226 rc = dasd_parse(); 4227 if (rc) 4228 goto failed; 4229 rc = dasd_eer_init(); 4230 if (rc) 4231 goto failed; 4232 #ifdef CONFIG_PROC_FS 4233 rc = dasd_proc_init(); 4234 if (rc) 4235 goto failed; 4236 #endif 4237 4238 return 0; 4239 failed: 4240 pr_info("The DASD device driver could not be initialized\n"); 4241 dasd_exit(); 4242 return rc; 4243 } 4244 4245 module_init(dasd_init); 4246 module_exit(dasd_exit); 4247