1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #define KMSG_COMPONENT "dasd" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kmod.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/ctype.h> 18 #include <linux/major.h> 19 #include <linux/slab.h> 20 #include <linux/hdreg.h> 21 #include <linux/async.h> 22 #include <linux/mutex.h> 23 #include <linux/debugfs.h> 24 #include <linux/seq_file.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/ccwdev.h> 28 #include <asm/ebcdic.h> 29 #include <asm/idals.h> 30 #include <asm/itcw.h> 31 #include <asm/diag.h> 32 33 /* This is ugly... */ 34 #define PRINTK_HEADER "dasd:" 35 36 #include "dasd_int.h" 37 /* 38 * SECTION: Constant definitions to be used within this file 39 */ 40 #define DASD_CHANQ_MAX_SIZE 4 41 42 #define DASD_DIAG_MOD "dasd_diag_mod" 43 44 static unsigned int queue_depth = 32; 45 static unsigned int nr_hw_queues = 4; 46 47 module_param(queue_depth, uint, 0444); 48 MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); 49 50 module_param(nr_hw_queues, uint, 0444); 51 MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); 52 53 /* 54 * SECTION: exported variables of dasd.c 55 */ 56 debug_info_t *dasd_debug_area; 57 EXPORT_SYMBOL(dasd_debug_area); 58 static struct dentry *dasd_debugfs_root_entry; 59 struct dasd_discipline *dasd_diag_discipline_pointer; 60 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 61 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 62 63 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 64 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 65 " Copyright IBM Corp. 2000"); 66 MODULE_SUPPORTED_DEVICE("dasd"); 67 MODULE_LICENSE("GPL"); 68 69 /* 70 * SECTION: prototypes for static functions of dasd.c 71 */ 72 static int dasd_alloc_queue(struct dasd_block *); 73 static void dasd_free_queue(struct dasd_block *); 74 static int dasd_flush_block_queue(struct dasd_block *); 75 static void dasd_device_tasklet(unsigned long); 76 static void dasd_block_tasklet(unsigned long); 77 static void do_kick_device(struct work_struct *); 78 static void do_reload_device(struct work_struct *); 79 static void do_requeue_requests(struct work_struct *); 80 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 81 static void dasd_device_timeout(struct timer_list *); 82 static void dasd_block_timeout(struct timer_list *); 83 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 84 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 85 static void dasd_profile_exit(struct dasd_profile *); 86 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 87 static void dasd_hosts_exit(struct dasd_device *); 88 89 /* 90 * SECTION: Operations on the device structure. 91 */ 92 static wait_queue_head_t dasd_init_waitq; 93 static wait_queue_head_t dasd_flush_wq; 94 static wait_queue_head_t generic_waitq; 95 static wait_queue_head_t shutdown_waitq; 96 97 /* 98 * Allocate memory for a new device structure. 99 */ 100 struct dasd_device *dasd_alloc_device(void) 101 { 102 struct dasd_device *device; 103 104 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 105 if (!device) 106 return ERR_PTR(-ENOMEM); 107 108 /* Get two pages for normal block device operations. */ 109 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 110 if (!device->ccw_mem) { 111 kfree(device); 112 return ERR_PTR(-ENOMEM); 113 } 114 /* Get one page for error recovery. */ 115 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 116 if (!device->erp_mem) { 117 free_pages((unsigned long) device->ccw_mem, 1); 118 kfree(device); 119 return ERR_PTR(-ENOMEM); 120 } 121 /* Get two pages for ese format. */ 122 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 123 if (!device->ese_mem) { 124 free_page((unsigned long) device->erp_mem); 125 free_pages((unsigned long) device->ccw_mem, 1); 126 kfree(device); 127 return ERR_PTR(-ENOMEM); 128 } 129 130 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 131 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 132 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 133 spin_lock_init(&device->mem_lock); 134 atomic_set(&device->tasklet_scheduled, 0); 135 tasklet_init(&device->tasklet, dasd_device_tasklet, 136 (unsigned long) device); 137 INIT_LIST_HEAD(&device->ccw_queue); 138 timer_setup(&device->timer, dasd_device_timeout, 0); 139 INIT_WORK(&device->kick_work, do_kick_device); 140 INIT_WORK(&device->reload_device, do_reload_device); 141 INIT_WORK(&device->requeue_requests, do_requeue_requests); 142 device->state = DASD_STATE_NEW; 143 device->target = DASD_STATE_NEW; 144 mutex_init(&device->state_mutex); 145 spin_lock_init(&device->profile.lock); 146 return device; 147 } 148 149 /* 150 * Free memory of a device structure. 151 */ 152 void dasd_free_device(struct dasd_device *device) 153 { 154 kfree(device->private); 155 free_pages((unsigned long) device->ese_mem, 1); 156 free_page((unsigned long) device->erp_mem); 157 free_pages((unsigned long) device->ccw_mem, 1); 158 kfree(device); 159 } 160 161 /* 162 * Allocate memory for a new device structure. 163 */ 164 struct dasd_block *dasd_alloc_block(void) 165 { 166 struct dasd_block *block; 167 168 block = kzalloc(sizeof(*block), GFP_ATOMIC); 169 if (!block) 170 return ERR_PTR(-ENOMEM); 171 /* open_count = 0 means device online but not in use */ 172 atomic_set(&block->open_count, -1); 173 174 atomic_set(&block->tasklet_scheduled, 0); 175 tasklet_init(&block->tasklet, dasd_block_tasklet, 176 (unsigned long) block); 177 INIT_LIST_HEAD(&block->ccw_queue); 178 spin_lock_init(&block->queue_lock); 179 INIT_LIST_HEAD(&block->format_list); 180 spin_lock_init(&block->format_lock); 181 timer_setup(&block->timer, dasd_block_timeout, 0); 182 spin_lock_init(&block->profile.lock); 183 184 return block; 185 } 186 EXPORT_SYMBOL_GPL(dasd_alloc_block); 187 188 /* 189 * Free memory of a device structure. 190 */ 191 void dasd_free_block(struct dasd_block *block) 192 { 193 kfree(block); 194 } 195 EXPORT_SYMBOL_GPL(dasd_free_block); 196 197 /* 198 * Make a new device known to the system. 199 */ 200 static int dasd_state_new_to_known(struct dasd_device *device) 201 { 202 int rc; 203 204 /* 205 * As long as the device is not in state DASD_STATE_NEW we want to 206 * keep the reference count > 0. 207 */ 208 dasd_get_device(device); 209 210 if (device->block) { 211 rc = dasd_alloc_queue(device->block); 212 if (rc) { 213 dasd_put_device(device); 214 return rc; 215 } 216 } 217 device->state = DASD_STATE_KNOWN; 218 return 0; 219 } 220 221 /* 222 * Let the system forget about a device. 223 */ 224 static int dasd_state_known_to_new(struct dasd_device *device) 225 { 226 /* Disable extended error reporting for this device. */ 227 dasd_eer_disable(device); 228 device->state = DASD_STATE_NEW; 229 230 if (device->block) 231 dasd_free_queue(device->block); 232 233 /* Give up reference we took in dasd_state_new_to_known. */ 234 dasd_put_device(device); 235 return 0; 236 } 237 238 static struct dentry *dasd_debugfs_setup(const char *name, 239 struct dentry *base_dentry) 240 { 241 struct dentry *pde; 242 243 if (!base_dentry) 244 return NULL; 245 pde = debugfs_create_dir(name, base_dentry); 246 if (!pde || IS_ERR(pde)) 247 return NULL; 248 return pde; 249 } 250 251 /* 252 * Request the irq line for the device. 253 */ 254 static int dasd_state_known_to_basic(struct dasd_device *device) 255 { 256 struct dasd_block *block = device->block; 257 int rc = 0; 258 259 /* Allocate and register gendisk structure. */ 260 if (block) { 261 rc = dasd_gendisk_alloc(block); 262 if (rc) 263 return rc; 264 block->debugfs_dentry = 265 dasd_debugfs_setup(block->gdp->disk_name, 266 dasd_debugfs_root_entry); 267 dasd_profile_init(&block->profile, block->debugfs_dentry); 268 if (dasd_global_profile_level == DASD_PROFILE_ON) 269 dasd_profile_on(&device->block->profile); 270 } 271 device->debugfs_dentry = 272 dasd_debugfs_setup(dev_name(&device->cdev->dev), 273 dasd_debugfs_root_entry); 274 dasd_profile_init(&device->profile, device->debugfs_dentry); 275 dasd_hosts_init(device->debugfs_dentry, device); 276 277 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 278 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 279 8 * sizeof(long)); 280 debug_register_view(device->debug_area, &debug_sprintf_view); 281 debug_set_level(device->debug_area, DBF_WARNING); 282 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 283 284 device->state = DASD_STATE_BASIC; 285 286 return rc; 287 } 288 289 /* 290 * Release the irq line for the device. Terminate any running i/o. 291 */ 292 static int dasd_state_basic_to_known(struct dasd_device *device) 293 { 294 int rc; 295 296 if (device->discipline->basic_to_known) { 297 rc = device->discipline->basic_to_known(device); 298 if (rc) 299 return rc; 300 } 301 302 if (device->block) { 303 dasd_profile_exit(&device->block->profile); 304 debugfs_remove(device->block->debugfs_dentry); 305 dasd_gendisk_free(device->block); 306 dasd_block_clear_timer(device->block); 307 } 308 rc = dasd_flush_device_queue(device); 309 if (rc) 310 return rc; 311 dasd_device_clear_timer(device); 312 dasd_profile_exit(&device->profile); 313 dasd_hosts_exit(device); 314 debugfs_remove(device->debugfs_dentry); 315 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 316 if (device->debug_area != NULL) { 317 debug_unregister(device->debug_area); 318 device->debug_area = NULL; 319 } 320 device->state = DASD_STATE_KNOWN; 321 return 0; 322 } 323 324 /* 325 * Do the initial analysis. The do_analysis function may return 326 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 327 * until the discipline decides to continue the startup sequence 328 * by calling the function dasd_change_state. The eckd disciplines 329 * uses this to start a ccw that detects the format. The completion 330 * interrupt for this detection ccw uses the kernel event daemon to 331 * trigger the call to dasd_change_state. All this is done in the 332 * discipline code, see dasd_eckd.c. 333 * After the analysis ccw is done (do_analysis returned 0) the block 334 * device is setup. 335 * In case the analysis returns an error, the device setup is stopped 336 * (a fake disk was already added to allow formatting). 337 */ 338 static int dasd_state_basic_to_ready(struct dasd_device *device) 339 { 340 int rc; 341 struct dasd_block *block; 342 struct gendisk *disk; 343 344 rc = 0; 345 block = device->block; 346 /* make disk known with correct capacity */ 347 if (block) { 348 if (block->base->discipline->do_analysis != NULL) 349 rc = block->base->discipline->do_analysis(block); 350 if (rc) { 351 if (rc != -EAGAIN) { 352 device->state = DASD_STATE_UNFMT; 353 disk = device->block->gdp; 354 kobject_uevent(&disk_to_dev(disk)->kobj, 355 KOBJ_CHANGE); 356 goto out; 357 } 358 return rc; 359 } 360 if (device->discipline->setup_blk_queue) 361 device->discipline->setup_blk_queue(block); 362 set_capacity(block->gdp, 363 block->blocks << block->s2b_shift); 364 device->state = DASD_STATE_READY; 365 rc = dasd_scan_partitions(block); 366 if (rc) { 367 device->state = DASD_STATE_BASIC; 368 return rc; 369 } 370 } else { 371 device->state = DASD_STATE_READY; 372 } 373 out: 374 if (device->discipline->basic_to_ready) 375 rc = device->discipline->basic_to_ready(device); 376 return rc; 377 } 378 379 static inline 380 int _wait_for_empty_queues(struct dasd_device *device) 381 { 382 if (device->block) 383 return list_empty(&device->ccw_queue) && 384 list_empty(&device->block->ccw_queue); 385 else 386 return list_empty(&device->ccw_queue); 387 } 388 389 /* 390 * Remove device from block device layer. Destroy dirty buffers. 391 * Forget format information. Check if the target level is basic 392 * and if it is create fake disk for formatting. 393 */ 394 static int dasd_state_ready_to_basic(struct dasd_device *device) 395 { 396 int rc; 397 398 device->state = DASD_STATE_BASIC; 399 if (device->block) { 400 struct dasd_block *block = device->block; 401 rc = dasd_flush_block_queue(block); 402 if (rc) { 403 device->state = DASD_STATE_READY; 404 return rc; 405 } 406 dasd_destroy_partitions(block); 407 block->blocks = 0; 408 block->bp_block = 0; 409 block->s2b_shift = 0; 410 } 411 return 0; 412 } 413 414 /* 415 * Back to basic. 416 */ 417 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 418 { 419 device->state = DASD_STATE_BASIC; 420 return 0; 421 } 422 423 /* 424 * Make the device online and schedule the bottom half to start 425 * the requeueing of requests from the linux request queue to the 426 * ccw queue. 427 */ 428 static int 429 dasd_state_ready_to_online(struct dasd_device * device) 430 { 431 device->state = DASD_STATE_ONLINE; 432 if (device->block) { 433 dasd_schedule_block_bh(device->block); 434 if ((device->features & DASD_FEATURE_USERAW)) { 435 kobject_uevent(&disk_to_dev(device->block->gdp)->kobj, 436 KOBJ_CHANGE); 437 return 0; 438 } 439 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 440 } 441 return 0; 442 } 443 444 /* 445 * Stop the requeueing of requests again. 446 */ 447 static int dasd_state_online_to_ready(struct dasd_device *device) 448 { 449 int rc; 450 451 if (device->discipline->online_to_ready) { 452 rc = device->discipline->online_to_ready(device); 453 if (rc) 454 return rc; 455 } 456 457 device->state = DASD_STATE_READY; 458 if (device->block && !(device->features & DASD_FEATURE_USERAW)) 459 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 460 return 0; 461 } 462 463 /* 464 * Device startup state changes. 465 */ 466 static int dasd_increase_state(struct dasd_device *device) 467 { 468 int rc; 469 470 rc = 0; 471 if (device->state == DASD_STATE_NEW && 472 device->target >= DASD_STATE_KNOWN) 473 rc = dasd_state_new_to_known(device); 474 475 if (!rc && 476 device->state == DASD_STATE_KNOWN && 477 device->target >= DASD_STATE_BASIC) 478 rc = dasd_state_known_to_basic(device); 479 480 if (!rc && 481 device->state == DASD_STATE_BASIC && 482 device->target >= DASD_STATE_READY) 483 rc = dasd_state_basic_to_ready(device); 484 485 if (!rc && 486 device->state == DASD_STATE_UNFMT && 487 device->target > DASD_STATE_UNFMT) 488 rc = -EPERM; 489 490 if (!rc && 491 device->state == DASD_STATE_READY && 492 device->target >= DASD_STATE_ONLINE) 493 rc = dasd_state_ready_to_online(device); 494 495 return rc; 496 } 497 498 /* 499 * Device shutdown state changes. 500 */ 501 static int dasd_decrease_state(struct dasd_device *device) 502 { 503 int rc; 504 505 rc = 0; 506 if (device->state == DASD_STATE_ONLINE && 507 device->target <= DASD_STATE_READY) 508 rc = dasd_state_online_to_ready(device); 509 510 if (!rc && 511 device->state == DASD_STATE_READY && 512 device->target <= DASD_STATE_BASIC) 513 rc = dasd_state_ready_to_basic(device); 514 515 if (!rc && 516 device->state == DASD_STATE_UNFMT && 517 device->target <= DASD_STATE_BASIC) 518 rc = dasd_state_unfmt_to_basic(device); 519 520 if (!rc && 521 device->state == DASD_STATE_BASIC && 522 device->target <= DASD_STATE_KNOWN) 523 rc = dasd_state_basic_to_known(device); 524 525 if (!rc && 526 device->state == DASD_STATE_KNOWN && 527 device->target <= DASD_STATE_NEW) 528 rc = dasd_state_known_to_new(device); 529 530 return rc; 531 } 532 533 /* 534 * This is the main startup/shutdown routine. 535 */ 536 static void dasd_change_state(struct dasd_device *device) 537 { 538 int rc; 539 540 if (device->state == device->target) 541 /* Already where we want to go today... */ 542 return; 543 if (device->state < device->target) 544 rc = dasd_increase_state(device); 545 else 546 rc = dasd_decrease_state(device); 547 if (rc == -EAGAIN) 548 return; 549 if (rc) 550 device->target = device->state; 551 552 /* let user-space know that the device status changed */ 553 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 554 555 if (device->state == device->target) 556 wake_up(&dasd_init_waitq); 557 } 558 559 /* 560 * Kick starter for devices that did not complete the startup/shutdown 561 * procedure or were sleeping because of a pending state. 562 * dasd_kick_device will schedule a call do do_kick_device to the kernel 563 * event daemon. 564 */ 565 static void do_kick_device(struct work_struct *work) 566 { 567 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 568 mutex_lock(&device->state_mutex); 569 dasd_change_state(device); 570 mutex_unlock(&device->state_mutex); 571 dasd_schedule_device_bh(device); 572 dasd_put_device(device); 573 } 574 575 void dasd_kick_device(struct dasd_device *device) 576 { 577 dasd_get_device(device); 578 /* queue call to dasd_kick_device to the kernel event daemon. */ 579 if (!schedule_work(&device->kick_work)) 580 dasd_put_device(device); 581 } 582 EXPORT_SYMBOL(dasd_kick_device); 583 584 /* 585 * dasd_reload_device will schedule a call do do_reload_device to the kernel 586 * event daemon. 587 */ 588 static void do_reload_device(struct work_struct *work) 589 { 590 struct dasd_device *device = container_of(work, struct dasd_device, 591 reload_device); 592 device->discipline->reload(device); 593 dasd_put_device(device); 594 } 595 596 void dasd_reload_device(struct dasd_device *device) 597 { 598 dasd_get_device(device); 599 /* queue call to dasd_reload_device to the kernel event daemon. */ 600 if (!schedule_work(&device->reload_device)) 601 dasd_put_device(device); 602 } 603 EXPORT_SYMBOL(dasd_reload_device); 604 605 /* 606 * Set the target state for a device and starts the state change. 607 */ 608 void dasd_set_target_state(struct dasd_device *device, int target) 609 { 610 dasd_get_device(device); 611 mutex_lock(&device->state_mutex); 612 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 613 if (dasd_probeonly && target > DASD_STATE_READY) 614 target = DASD_STATE_READY; 615 if (device->target != target) { 616 if (device->state == target) 617 wake_up(&dasd_init_waitq); 618 device->target = target; 619 } 620 if (device->state != device->target) 621 dasd_change_state(device); 622 mutex_unlock(&device->state_mutex); 623 dasd_put_device(device); 624 } 625 EXPORT_SYMBOL(dasd_set_target_state); 626 627 /* 628 * Enable devices with device numbers in [from..to]. 629 */ 630 static inline int _wait_for_device(struct dasd_device *device) 631 { 632 return (device->state == device->target); 633 } 634 635 void dasd_enable_device(struct dasd_device *device) 636 { 637 dasd_set_target_state(device, DASD_STATE_ONLINE); 638 if (device->state <= DASD_STATE_KNOWN) 639 /* No discipline for device found. */ 640 dasd_set_target_state(device, DASD_STATE_NEW); 641 /* Now wait for the devices to come up. */ 642 wait_event(dasd_init_waitq, _wait_for_device(device)); 643 644 dasd_reload_device(device); 645 if (device->discipline->kick_validate) 646 device->discipline->kick_validate(device); 647 } 648 EXPORT_SYMBOL(dasd_enable_device); 649 650 /* 651 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 652 */ 653 654 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 655 656 #ifdef CONFIG_DASD_PROFILE 657 struct dasd_profile dasd_global_profile = { 658 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 659 }; 660 static struct dentry *dasd_debugfs_global_entry; 661 662 /* 663 * Add profiling information for cqr before execution. 664 */ 665 static void dasd_profile_start(struct dasd_block *block, 666 struct dasd_ccw_req *cqr, 667 struct request *req) 668 { 669 struct list_head *l; 670 unsigned int counter; 671 struct dasd_device *device; 672 673 /* count the length of the chanq for statistics */ 674 counter = 0; 675 if (dasd_global_profile_level || block->profile.data) 676 list_for_each(l, &block->ccw_queue) 677 if (++counter >= 31) 678 break; 679 680 spin_lock(&dasd_global_profile.lock); 681 if (dasd_global_profile.data) { 682 dasd_global_profile.data->dasd_io_nr_req[counter]++; 683 if (rq_data_dir(req) == READ) 684 dasd_global_profile.data->dasd_read_nr_req[counter]++; 685 } 686 spin_unlock(&dasd_global_profile.lock); 687 688 spin_lock(&block->profile.lock); 689 if (block->profile.data) { 690 block->profile.data->dasd_io_nr_req[counter]++; 691 if (rq_data_dir(req) == READ) 692 block->profile.data->dasd_read_nr_req[counter]++; 693 } 694 spin_unlock(&block->profile.lock); 695 696 /* 697 * We count the request for the start device, even though it may run on 698 * some other device due to error recovery. This way we make sure that 699 * we count each request only once. 700 */ 701 device = cqr->startdev; 702 if (device->profile.data) { 703 counter = 1; /* request is not yet queued on the start device */ 704 list_for_each(l, &device->ccw_queue) 705 if (++counter >= 31) 706 break; 707 } 708 spin_lock(&device->profile.lock); 709 if (device->profile.data) { 710 device->profile.data->dasd_io_nr_req[counter]++; 711 if (rq_data_dir(req) == READ) 712 device->profile.data->dasd_read_nr_req[counter]++; 713 } 714 spin_unlock(&device->profile.lock); 715 } 716 717 /* 718 * Add profiling information for cqr after execution. 719 */ 720 721 #define dasd_profile_counter(value, index) \ 722 { \ 723 for (index = 0; index < 31 && value >> (2+index); index++) \ 724 ; \ 725 } 726 727 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 728 int is_alias, 729 int is_tpm, 730 int is_read, 731 long sectors, 732 int sectors_ind, 733 int tottime_ind, 734 int tottimeps_ind, 735 int strtime_ind, 736 int irqtime_ind, 737 int irqtimeps_ind, 738 int endtime_ind) 739 { 740 /* in case of an overflow, reset the whole profile */ 741 if (data->dasd_io_reqs == UINT_MAX) { 742 memset(data, 0, sizeof(*data)); 743 ktime_get_real_ts64(&data->starttod); 744 } 745 data->dasd_io_reqs++; 746 data->dasd_io_sects += sectors; 747 if (is_alias) 748 data->dasd_io_alias++; 749 if (is_tpm) 750 data->dasd_io_tpm++; 751 752 data->dasd_io_secs[sectors_ind]++; 753 data->dasd_io_times[tottime_ind]++; 754 data->dasd_io_timps[tottimeps_ind]++; 755 data->dasd_io_time1[strtime_ind]++; 756 data->dasd_io_time2[irqtime_ind]++; 757 data->dasd_io_time2ps[irqtimeps_ind]++; 758 data->dasd_io_time3[endtime_ind]++; 759 760 if (is_read) { 761 data->dasd_read_reqs++; 762 data->dasd_read_sects += sectors; 763 if (is_alias) 764 data->dasd_read_alias++; 765 if (is_tpm) 766 data->dasd_read_tpm++; 767 data->dasd_read_secs[sectors_ind]++; 768 data->dasd_read_times[tottime_ind]++; 769 data->dasd_read_time1[strtime_ind]++; 770 data->dasd_read_time2[irqtime_ind]++; 771 data->dasd_read_time3[endtime_ind]++; 772 } 773 } 774 775 static void dasd_profile_end(struct dasd_block *block, 776 struct dasd_ccw_req *cqr, 777 struct request *req) 778 { 779 unsigned long strtime, irqtime, endtime, tottime; 780 unsigned long tottimeps, sectors; 781 struct dasd_device *device; 782 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 783 int irqtime_ind, irqtimeps_ind, endtime_ind; 784 struct dasd_profile_info *data; 785 786 device = cqr->startdev; 787 if (!(dasd_global_profile_level || 788 block->profile.data || 789 device->profile.data)) 790 return; 791 792 sectors = blk_rq_sectors(req); 793 if (!cqr->buildclk || !cqr->startclk || 794 !cqr->stopclk || !cqr->endclk || 795 !sectors) 796 return; 797 798 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 799 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 800 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 801 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 802 tottimeps = tottime / sectors; 803 804 dasd_profile_counter(sectors, sectors_ind); 805 dasd_profile_counter(tottime, tottime_ind); 806 dasd_profile_counter(tottimeps, tottimeps_ind); 807 dasd_profile_counter(strtime, strtime_ind); 808 dasd_profile_counter(irqtime, irqtime_ind); 809 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 810 dasd_profile_counter(endtime, endtime_ind); 811 812 spin_lock(&dasd_global_profile.lock); 813 if (dasd_global_profile.data) { 814 data = dasd_global_profile.data; 815 data->dasd_sum_times += tottime; 816 data->dasd_sum_time_str += strtime; 817 data->dasd_sum_time_irq += irqtime; 818 data->dasd_sum_time_end += endtime; 819 dasd_profile_end_add_data(dasd_global_profile.data, 820 cqr->startdev != block->base, 821 cqr->cpmode == 1, 822 rq_data_dir(req) == READ, 823 sectors, sectors_ind, tottime_ind, 824 tottimeps_ind, strtime_ind, 825 irqtime_ind, irqtimeps_ind, 826 endtime_ind); 827 } 828 spin_unlock(&dasd_global_profile.lock); 829 830 spin_lock(&block->profile.lock); 831 if (block->profile.data) { 832 data = block->profile.data; 833 data->dasd_sum_times += tottime; 834 data->dasd_sum_time_str += strtime; 835 data->dasd_sum_time_irq += irqtime; 836 data->dasd_sum_time_end += endtime; 837 dasd_profile_end_add_data(block->profile.data, 838 cqr->startdev != block->base, 839 cqr->cpmode == 1, 840 rq_data_dir(req) == READ, 841 sectors, sectors_ind, tottime_ind, 842 tottimeps_ind, strtime_ind, 843 irqtime_ind, irqtimeps_ind, 844 endtime_ind); 845 } 846 spin_unlock(&block->profile.lock); 847 848 spin_lock(&device->profile.lock); 849 if (device->profile.data) { 850 data = device->profile.data; 851 data->dasd_sum_times += tottime; 852 data->dasd_sum_time_str += strtime; 853 data->dasd_sum_time_irq += irqtime; 854 data->dasd_sum_time_end += endtime; 855 dasd_profile_end_add_data(device->profile.data, 856 cqr->startdev != block->base, 857 cqr->cpmode == 1, 858 rq_data_dir(req) == READ, 859 sectors, sectors_ind, tottime_ind, 860 tottimeps_ind, strtime_ind, 861 irqtime_ind, irqtimeps_ind, 862 endtime_ind); 863 } 864 spin_unlock(&device->profile.lock); 865 } 866 867 void dasd_profile_reset(struct dasd_profile *profile) 868 { 869 struct dasd_profile_info *data; 870 871 spin_lock_bh(&profile->lock); 872 data = profile->data; 873 if (!data) { 874 spin_unlock_bh(&profile->lock); 875 return; 876 } 877 memset(data, 0, sizeof(*data)); 878 ktime_get_real_ts64(&data->starttod); 879 spin_unlock_bh(&profile->lock); 880 } 881 882 int dasd_profile_on(struct dasd_profile *profile) 883 { 884 struct dasd_profile_info *data; 885 886 data = kzalloc(sizeof(*data), GFP_KERNEL); 887 if (!data) 888 return -ENOMEM; 889 spin_lock_bh(&profile->lock); 890 if (profile->data) { 891 spin_unlock_bh(&profile->lock); 892 kfree(data); 893 return 0; 894 } 895 ktime_get_real_ts64(&data->starttod); 896 profile->data = data; 897 spin_unlock_bh(&profile->lock); 898 return 0; 899 } 900 901 void dasd_profile_off(struct dasd_profile *profile) 902 { 903 spin_lock_bh(&profile->lock); 904 kfree(profile->data); 905 profile->data = NULL; 906 spin_unlock_bh(&profile->lock); 907 } 908 909 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 910 { 911 char *buffer; 912 913 buffer = vmalloc(user_len + 1); 914 if (buffer == NULL) 915 return ERR_PTR(-ENOMEM); 916 if (copy_from_user(buffer, user_buf, user_len) != 0) { 917 vfree(buffer); 918 return ERR_PTR(-EFAULT); 919 } 920 /* got the string, now strip linefeed. */ 921 if (buffer[user_len - 1] == '\n') 922 buffer[user_len - 1] = 0; 923 else 924 buffer[user_len] = 0; 925 return buffer; 926 } 927 928 static ssize_t dasd_stats_write(struct file *file, 929 const char __user *user_buf, 930 size_t user_len, loff_t *pos) 931 { 932 char *buffer, *str; 933 int rc; 934 struct seq_file *m = (struct seq_file *)file->private_data; 935 struct dasd_profile *prof = m->private; 936 937 if (user_len > 65536) 938 user_len = 65536; 939 buffer = dasd_get_user_string(user_buf, user_len); 940 if (IS_ERR(buffer)) 941 return PTR_ERR(buffer); 942 943 str = skip_spaces(buffer); 944 rc = user_len; 945 if (strncmp(str, "reset", 5) == 0) { 946 dasd_profile_reset(prof); 947 } else if (strncmp(str, "on", 2) == 0) { 948 rc = dasd_profile_on(prof); 949 if (rc) 950 goto out; 951 rc = user_len; 952 if (prof == &dasd_global_profile) { 953 dasd_profile_reset(prof); 954 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 955 } 956 } else if (strncmp(str, "off", 3) == 0) { 957 if (prof == &dasd_global_profile) 958 dasd_global_profile_level = DASD_PROFILE_OFF; 959 dasd_profile_off(prof); 960 } else 961 rc = -EINVAL; 962 out: 963 vfree(buffer); 964 return rc; 965 } 966 967 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 968 { 969 int i; 970 971 for (i = 0; i < 32; i++) 972 seq_printf(m, "%u ", array[i]); 973 seq_putc(m, '\n'); 974 } 975 976 static void dasd_stats_seq_print(struct seq_file *m, 977 struct dasd_profile_info *data) 978 { 979 seq_printf(m, "start_time %lld.%09ld\n", 980 (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 981 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 982 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 983 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 984 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 985 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 986 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 987 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 988 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 989 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 990 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 991 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 992 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 993 seq_puts(m, "histogram_sectors "); 994 dasd_stats_array(m, data->dasd_io_secs); 995 seq_puts(m, "histogram_io_times "); 996 dasd_stats_array(m, data->dasd_io_times); 997 seq_puts(m, "histogram_io_times_weighted "); 998 dasd_stats_array(m, data->dasd_io_timps); 999 seq_puts(m, "histogram_time_build_to_ssch "); 1000 dasd_stats_array(m, data->dasd_io_time1); 1001 seq_puts(m, "histogram_time_ssch_to_irq "); 1002 dasd_stats_array(m, data->dasd_io_time2); 1003 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1004 dasd_stats_array(m, data->dasd_io_time2ps); 1005 seq_puts(m, "histogram_time_irq_to_end "); 1006 dasd_stats_array(m, data->dasd_io_time3); 1007 seq_puts(m, "histogram_ccw_queue_length "); 1008 dasd_stats_array(m, data->dasd_io_nr_req); 1009 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1010 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1011 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1012 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1013 seq_puts(m, "histogram_read_sectors "); 1014 dasd_stats_array(m, data->dasd_read_secs); 1015 seq_puts(m, "histogram_read_times "); 1016 dasd_stats_array(m, data->dasd_read_times); 1017 seq_puts(m, "histogram_read_time_build_to_ssch "); 1018 dasd_stats_array(m, data->dasd_read_time1); 1019 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1020 dasd_stats_array(m, data->dasd_read_time2); 1021 seq_puts(m, "histogram_read_time_irq_to_end "); 1022 dasd_stats_array(m, data->dasd_read_time3); 1023 seq_puts(m, "histogram_read_ccw_queue_length "); 1024 dasd_stats_array(m, data->dasd_read_nr_req); 1025 } 1026 1027 static int dasd_stats_show(struct seq_file *m, void *v) 1028 { 1029 struct dasd_profile *profile; 1030 struct dasd_profile_info *data; 1031 1032 profile = m->private; 1033 spin_lock_bh(&profile->lock); 1034 data = profile->data; 1035 if (!data) { 1036 spin_unlock_bh(&profile->lock); 1037 seq_puts(m, "disabled\n"); 1038 return 0; 1039 } 1040 dasd_stats_seq_print(m, data); 1041 spin_unlock_bh(&profile->lock); 1042 return 0; 1043 } 1044 1045 static int dasd_stats_open(struct inode *inode, struct file *file) 1046 { 1047 struct dasd_profile *profile = inode->i_private; 1048 return single_open(file, dasd_stats_show, profile); 1049 } 1050 1051 static const struct file_operations dasd_stats_raw_fops = { 1052 .owner = THIS_MODULE, 1053 .open = dasd_stats_open, 1054 .read = seq_read, 1055 .llseek = seq_lseek, 1056 .release = single_release, 1057 .write = dasd_stats_write, 1058 }; 1059 1060 static void dasd_profile_init(struct dasd_profile *profile, 1061 struct dentry *base_dentry) 1062 { 1063 umode_t mode; 1064 struct dentry *pde; 1065 1066 if (!base_dentry) 1067 return; 1068 profile->dentry = NULL; 1069 profile->data = NULL; 1070 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1071 pde = debugfs_create_file("statistics", mode, base_dentry, 1072 profile, &dasd_stats_raw_fops); 1073 if (pde && !IS_ERR(pde)) 1074 profile->dentry = pde; 1075 return; 1076 } 1077 1078 static void dasd_profile_exit(struct dasd_profile *profile) 1079 { 1080 dasd_profile_off(profile); 1081 debugfs_remove(profile->dentry); 1082 profile->dentry = NULL; 1083 } 1084 1085 static void dasd_statistics_removeroot(void) 1086 { 1087 dasd_global_profile_level = DASD_PROFILE_OFF; 1088 dasd_profile_exit(&dasd_global_profile); 1089 debugfs_remove(dasd_debugfs_global_entry); 1090 debugfs_remove(dasd_debugfs_root_entry); 1091 } 1092 1093 static void dasd_statistics_createroot(void) 1094 { 1095 struct dentry *pde; 1096 1097 dasd_debugfs_root_entry = NULL; 1098 pde = debugfs_create_dir("dasd", NULL); 1099 if (!pde || IS_ERR(pde)) 1100 goto error; 1101 dasd_debugfs_root_entry = pde; 1102 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1103 if (!pde || IS_ERR(pde)) 1104 goto error; 1105 dasd_debugfs_global_entry = pde; 1106 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1107 return; 1108 1109 error: 1110 DBF_EVENT(DBF_ERR, "%s", 1111 "Creation of the dasd debugfs interface failed"); 1112 dasd_statistics_removeroot(); 1113 return; 1114 } 1115 1116 #else 1117 #define dasd_profile_start(block, cqr, req) do {} while (0) 1118 #define dasd_profile_end(block, cqr, req) do {} while (0) 1119 1120 static void dasd_statistics_createroot(void) 1121 { 1122 return; 1123 } 1124 1125 static void dasd_statistics_removeroot(void) 1126 { 1127 return; 1128 } 1129 1130 int dasd_stats_generic_show(struct seq_file *m, void *v) 1131 { 1132 seq_puts(m, "Statistics are not activated in this kernel\n"); 1133 return 0; 1134 } 1135 1136 static void dasd_profile_init(struct dasd_profile *profile, 1137 struct dentry *base_dentry) 1138 { 1139 return; 1140 } 1141 1142 static void dasd_profile_exit(struct dasd_profile *profile) 1143 { 1144 return; 1145 } 1146 1147 int dasd_profile_on(struct dasd_profile *profile) 1148 { 1149 return 0; 1150 } 1151 1152 #endif /* CONFIG_DASD_PROFILE */ 1153 1154 static int dasd_hosts_show(struct seq_file *m, void *v) 1155 { 1156 struct dasd_device *device; 1157 int rc = -EOPNOTSUPP; 1158 1159 device = m->private; 1160 dasd_get_device(device); 1161 1162 if (device->discipline->hosts_print) 1163 rc = device->discipline->hosts_print(device, m); 1164 1165 dasd_put_device(device); 1166 return rc; 1167 } 1168 1169 DEFINE_SHOW_ATTRIBUTE(dasd_hosts); 1170 1171 static void dasd_hosts_exit(struct dasd_device *device) 1172 { 1173 debugfs_remove(device->hosts_dentry); 1174 device->hosts_dentry = NULL; 1175 } 1176 1177 static void dasd_hosts_init(struct dentry *base_dentry, 1178 struct dasd_device *device) 1179 { 1180 struct dentry *pde; 1181 umode_t mode; 1182 1183 if (!base_dentry) 1184 return; 1185 1186 mode = S_IRUSR | S_IFREG; 1187 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1188 device, &dasd_hosts_fops); 1189 if (pde && !IS_ERR(pde)) 1190 device->hosts_dentry = pde; 1191 } 1192 1193 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, 1194 struct dasd_device *device, 1195 struct dasd_ccw_req *cqr) 1196 { 1197 unsigned long flags; 1198 char *data, *chunk; 1199 int size = 0; 1200 1201 if (cplength > 0) 1202 size += cplength * sizeof(struct ccw1); 1203 if (datasize > 0) 1204 size += datasize; 1205 if (!cqr) 1206 size += (sizeof(*cqr) + 7L) & -8L; 1207 1208 spin_lock_irqsave(&device->mem_lock, flags); 1209 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); 1210 spin_unlock_irqrestore(&device->mem_lock, flags); 1211 if (!chunk) 1212 return ERR_PTR(-ENOMEM); 1213 if (!cqr) { 1214 cqr = (void *) data; 1215 data += (sizeof(*cqr) + 7L) & -8L; 1216 } 1217 memset(cqr, 0, sizeof(*cqr)); 1218 cqr->mem_chunk = chunk; 1219 if (cplength > 0) { 1220 cqr->cpaddr = data; 1221 data += cplength * sizeof(struct ccw1); 1222 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1223 } 1224 if (datasize > 0) { 1225 cqr->data = data; 1226 memset(cqr->data, 0, datasize); 1227 } 1228 cqr->magic = magic; 1229 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1230 dasd_get_device(device); 1231 return cqr; 1232 } 1233 EXPORT_SYMBOL(dasd_smalloc_request); 1234 1235 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1236 int datasize, 1237 struct dasd_device *device) 1238 { 1239 struct dasd_ccw_req *cqr; 1240 unsigned long flags; 1241 int size, cqr_size; 1242 char *data; 1243 1244 cqr_size = (sizeof(*cqr) + 7L) & -8L; 1245 size = cqr_size; 1246 if (cplength > 0) 1247 size += cplength * sizeof(struct ccw1); 1248 if (datasize > 0) 1249 size += datasize; 1250 1251 spin_lock_irqsave(&device->mem_lock, flags); 1252 cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1253 spin_unlock_irqrestore(&device->mem_lock, flags); 1254 if (!cqr) 1255 return ERR_PTR(-ENOMEM); 1256 memset(cqr, 0, sizeof(*cqr)); 1257 data = (char *)cqr + cqr_size; 1258 cqr->cpaddr = NULL; 1259 if (cplength > 0) { 1260 cqr->cpaddr = data; 1261 data += cplength * sizeof(struct ccw1); 1262 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1263 } 1264 cqr->data = NULL; 1265 if (datasize > 0) { 1266 cqr->data = data; 1267 memset(cqr->data, 0, datasize); 1268 } 1269 1270 cqr->magic = magic; 1271 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1272 dasd_get_device(device); 1273 1274 return cqr; 1275 } 1276 EXPORT_SYMBOL(dasd_fmalloc_request); 1277 1278 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1279 { 1280 unsigned long flags; 1281 1282 spin_lock_irqsave(&device->mem_lock, flags); 1283 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); 1284 spin_unlock_irqrestore(&device->mem_lock, flags); 1285 dasd_put_device(device); 1286 } 1287 EXPORT_SYMBOL(dasd_sfree_request); 1288 1289 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1290 { 1291 unsigned long flags; 1292 1293 spin_lock_irqsave(&device->mem_lock, flags); 1294 dasd_free_chunk(&device->ese_chunks, cqr); 1295 spin_unlock_irqrestore(&device->mem_lock, flags); 1296 dasd_put_device(device); 1297 } 1298 EXPORT_SYMBOL(dasd_ffree_request); 1299 1300 /* 1301 * Check discipline magic in cqr. 1302 */ 1303 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1304 { 1305 struct dasd_device *device; 1306 1307 if (cqr == NULL) 1308 return -EINVAL; 1309 device = cqr->startdev; 1310 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1311 DBF_DEV_EVENT(DBF_WARNING, device, 1312 " dasd_ccw_req 0x%08x magic doesn't match" 1313 " discipline 0x%08x", 1314 cqr->magic, 1315 *(unsigned int *) device->discipline->name); 1316 return -EINVAL; 1317 } 1318 return 0; 1319 } 1320 1321 /* 1322 * Terminate the current i/o and set the request to clear_pending. 1323 * Timer keeps device runnig. 1324 * ccw_device_clear can fail if the i/o subsystem 1325 * is in a bad mood. 1326 */ 1327 int dasd_term_IO(struct dasd_ccw_req *cqr) 1328 { 1329 struct dasd_device *device; 1330 int retries, rc; 1331 char errorstring[ERRORLENGTH]; 1332 1333 /* Check the cqr */ 1334 rc = dasd_check_cqr(cqr); 1335 if (rc) 1336 return rc; 1337 retries = 0; 1338 device = (struct dasd_device *) cqr->startdev; 1339 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1340 rc = ccw_device_clear(device->cdev, (long) cqr); 1341 switch (rc) { 1342 case 0: /* termination successful */ 1343 cqr->status = DASD_CQR_CLEAR_PENDING; 1344 cqr->stopclk = get_tod_clock(); 1345 cqr->starttime = 0; 1346 DBF_DEV_EVENT(DBF_DEBUG, device, 1347 "terminate cqr %p successful", 1348 cqr); 1349 break; 1350 case -ENODEV: 1351 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1352 "device gone, retry"); 1353 break; 1354 case -EINVAL: 1355 /* 1356 * device not valid so no I/O could be running 1357 * handle CQR as termination successful 1358 */ 1359 cqr->status = DASD_CQR_CLEARED; 1360 cqr->stopclk = get_tod_clock(); 1361 cqr->starttime = 0; 1362 /* no retries for invalid devices */ 1363 cqr->retries = -1; 1364 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1365 "EINVAL, handle as terminated"); 1366 /* fake rc to success */ 1367 rc = 0; 1368 break; 1369 default: 1370 /* internal error 10 - unknown rc*/ 1371 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1372 dev_err(&device->cdev->dev, "An error occurred in the " 1373 "DASD device driver, reason=%s\n", errorstring); 1374 BUG(); 1375 break; 1376 } 1377 retries++; 1378 } 1379 dasd_schedule_device_bh(device); 1380 return rc; 1381 } 1382 EXPORT_SYMBOL(dasd_term_IO); 1383 1384 /* 1385 * Start the i/o. This start_IO can fail if the channel is really busy. 1386 * In that case set up a timer to start the request later. 1387 */ 1388 int dasd_start_IO(struct dasd_ccw_req *cqr) 1389 { 1390 struct dasd_device *device; 1391 int rc; 1392 char errorstring[ERRORLENGTH]; 1393 1394 /* Check the cqr */ 1395 rc = dasd_check_cqr(cqr); 1396 if (rc) { 1397 cqr->intrc = rc; 1398 return rc; 1399 } 1400 device = (struct dasd_device *) cqr->startdev; 1401 if (((cqr->block && 1402 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1403 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1404 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1405 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1406 "because of stolen lock", cqr); 1407 cqr->status = DASD_CQR_ERROR; 1408 cqr->intrc = -EPERM; 1409 return -EPERM; 1410 } 1411 if (cqr->retries < 0) { 1412 /* internal error 14 - start_IO run out of retries */ 1413 sprintf(errorstring, "14 %p", cqr); 1414 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1415 "device driver, reason=%s\n", errorstring); 1416 cqr->status = DASD_CQR_ERROR; 1417 return -EIO; 1418 } 1419 cqr->startclk = get_tod_clock(); 1420 cqr->starttime = jiffies; 1421 cqr->retries--; 1422 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1423 cqr->lpm &= dasd_path_get_opm(device); 1424 if (!cqr->lpm) 1425 cqr->lpm = dasd_path_get_opm(device); 1426 } 1427 if (cqr->cpmode == 1) { 1428 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1429 (long) cqr, cqr->lpm); 1430 } else { 1431 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1432 (long) cqr, cqr->lpm, 0); 1433 } 1434 switch (rc) { 1435 case 0: 1436 cqr->status = DASD_CQR_IN_IO; 1437 break; 1438 case -EBUSY: 1439 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1440 "start_IO: device busy, retry later"); 1441 break; 1442 case -EACCES: 1443 /* -EACCES indicates that the request used only a subset of the 1444 * available paths and all these paths are gone. If the lpm of 1445 * this request was only a subset of the opm (e.g. the ppm) then 1446 * we just do a retry with all available paths. 1447 * If we already use the full opm, something is amiss, and we 1448 * need a full path verification. 1449 */ 1450 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1451 DBF_DEV_EVENT(DBF_WARNING, device, 1452 "start_IO: selected paths gone (%x)", 1453 cqr->lpm); 1454 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1455 cqr->lpm = dasd_path_get_opm(device); 1456 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1457 "start_IO: selected paths gone," 1458 " retry on all paths"); 1459 } else { 1460 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1461 "start_IO: all paths in opm gone," 1462 " do path verification"); 1463 dasd_generic_last_path_gone(device); 1464 dasd_path_no_path(device); 1465 dasd_path_set_tbvpm(device, 1466 ccw_device_get_path_mask( 1467 device->cdev)); 1468 } 1469 break; 1470 case -ENODEV: 1471 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1472 "start_IO: -ENODEV device gone, retry"); 1473 break; 1474 case -EIO: 1475 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1476 "start_IO: -EIO device gone, retry"); 1477 break; 1478 case -EINVAL: 1479 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1480 "start_IO: -EINVAL device currently " 1481 "not accessible"); 1482 break; 1483 default: 1484 /* internal error 11 - unknown rc */ 1485 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1486 dev_err(&device->cdev->dev, 1487 "An error occurred in the DASD device driver, " 1488 "reason=%s\n", errorstring); 1489 BUG(); 1490 break; 1491 } 1492 cqr->intrc = rc; 1493 return rc; 1494 } 1495 EXPORT_SYMBOL(dasd_start_IO); 1496 1497 /* 1498 * Timeout function for dasd devices. This is used for different purposes 1499 * 1) missing interrupt handler for normal operation 1500 * 2) delayed start of request where start_IO failed with -EBUSY 1501 * 3) timeout for missing state change interrupts 1502 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1503 * DASD_CQR_QUEUED for 2) and 3). 1504 */ 1505 static void dasd_device_timeout(struct timer_list *t) 1506 { 1507 unsigned long flags; 1508 struct dasd_device *device; 1509 1510 device = from_timer(device, t, timer); 1511 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1512 /* re-activate request queue */ 1513 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1514 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1515 dasd_schedule_device_bh(device); 1516 } 1517 1518 /* 1519 * Setup timeout for a device in jiffies. 1520 */ 1521 void dasd_device_set_timer(struct dasd_device *device, int expires) 1522 { 1523 if (expires == 0) 1524 del_timer(&device->timer); 1525 else 1526 mod_timer(&device->timer, jiffies + expires); 1527 } 1528 EXPORT_SYMBOL(dasd_device_set_timer); 1529 1530 /* 1531 * Clear timeout for a device. 1532 */ 1533 void dasd_device_clear_timer(struct dasd_device *device) 1534 { 1535 del_timer(&device->timer); 1536 } 1537 EXPORT_SYMBOL(dasd_device_clear_timer); 1538 1539 static void dasd_handle_killed_request(struct ccw_device *cdev, 1540 unsigned long intparm) 1541 { 1542 struct dasd_ccw_req *cqr; 1543 struct dasd_device *device; 1544 1545 if (!intparm) 1546 return; 1547 cqr = (struct dasd_ccw_req *) intparm; 1548 if (cqr->status != DASD_CQR_IN_IO) { 1549 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1550 "invalid status in handle_killed_request: " 1551 "%02x", cqr->status); 1552 return; 1553 } 1554 1555 device = dasd_device_from_cdev_locked(cdev); 1556 if (IS_ERR(device)) { 1557 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1558 "unable to get device from cdev"); 1559 return; 1560 } 1561 1562 if (!cqr->startdev || 1563 device != cqr->startdev || 1564 strncmp(cqr->startdev->discipline->ebcname, 1565 (char *) &cqr->magic, 4)) { 1566 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1567 "invalid device in request"); 1568 dasd_put_device(device); 1569 return; 1570 } 1571 1572 /* Schedule request to be retried. */ 1573 cqr->status = DASD_CQR_QUEUED; 1574 1575 dasd_device_clear_timer(device); 1576 dasd_schedule_device_bh(device); 1577 dasd_put_device(device); 1578 } 1579 1580 void dasd_generic_handle_state_change(struct dasd_device *device) 1581 { 1582 /* First of all start sense subsystem status request. */ 1583 dasd_eer_snss(device); 1584 1585 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1586 dasd_schedule_device_bh(device); 1587 if (device->block) { 1588 dasd_schedule_block_bh(device->block); 1589 if (device->block->request_queue) 1590 blk_mq_run_hw_queues(device->block->request_queue, 1591 true); 1592 } 1593 } 1594 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1595 1596 static int dasd_check_hpf_error(struct irb *irb) 1597 { 1598 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1599 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1600 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1601 } 1602 1603 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1604 { 1605 struct dasd_device *device = NULL; 1606 u8 *sense = NULL; 1607 1608 if (!block) 1609 return 0; 1610 device = block->base; 1611 if (!device || !device->discipline->is_ese) 1612 return 0; 1613 if (!device->discipline->is_ese(device)) 1614 return 0; 1615 1616 sense = dasd_get_sense(irb); 1617 if (!sense) 1618 return 0; 1619 1620 return !!(sense[1] & SNS1_NO_REC_FOUND) || 1621 !!(sense[1] & SNS1_FILE_PROTECTED) || 1622 scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; 1623 } 1624 1625 static int dasd_ese_oos_cond(u8 *sense) 1626 { 1627 return sense[0] & SNS0_EQUIPMENT_CHECK && 1628 sense[1] & SNS1_PERM_ERR && 1629 sense[1] & SNS1_WRITE_INHIBITED && 1630 sense[25] == 0x01; 1631 } 1632 1633 /* 1634 * Interrupt handler for "normal" ssch-io based dasd devices. 1635 */ 1636 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1637 struct irb *irb) 1638 { 1639 struct dasd_ccw_req *cqr, *next, *fcqr; 1640 struct dasd_device *device; 1641 unsigned long now; 1642 int nrf_suppressed = 0; 1643 int fp_suppressed = 0; 1644 u8 *sense = NULL; 1645 int expires; 1646 1647 cqr = (struct dasd_ccw_req *) intparm; 1648 if (IS_ERR(irb)) { 1649 switch (PTR_ERR(irb)) { 1650 case -EIO: 1651 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1652 device = cqr->startdev; 1653 cqr->status = DASD_CQR_CLEARED; 1654 dasd_device_clear_timer(device); 1655 wake_up(&dasd_flush_wq); 1656 dasd_schedule_device_bh(device); 1657 return; 1658 } 1659 break; 1660 case -ETIMEDOUT: 1661 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1662 "request timed out\n", __func__); 1663 break; 1664 default: 1665 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1666 "unknown error %ld\n", __func__, 1667 PTR_ERR(irb)); 1668 } 1669 dasd_handle_killed_request(cdev, intparm); 1670 return; 1671 } 1672 1673 now = get_tod_clock(); 1674 /* check for conditions that should be handled immediately */ 1675 if (!cqr || 1676 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1677 scsw_cstat(&irb->scsw) == 0)) { 1678 if (cqr) 1679 memcpy(&cqr->irb, irb, sizeof(*irb)); 1680 device = dasd_device_from_cdev_locked(cdev); 1681 if (IS_ERR(device)) 1682 return; 1683 /* ignore unsolicited interrupts for DIAG discipline */ 1684 if (device->discipline == dasd_diag_discipline_pointer) { 1685 dasd_put_device(device); 1686 return; 1687 } 1688 1689 /* 1690 * In some cases 'File Protected' or 'No Record Found' errors 1691 * might be expected and debug log messages for the 1692 * corresponding interrupts shouldn't be written then. 1693 * Check if either of the according suppress bits is set. 1694 */ 1695 sense = dasd_get_sense(irb); 1696 if (sense) { 1697 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1698 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1699 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1700 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1701 1702 /* 1703 * Extent pool probably out-of-space. 1704 * Stop device and check exhaust level. 1705 */ 1706 if (dasd_ese_oos_cond(sense)) { 1707 dasd_generic_space_exhaust(device, cqr); 1708 device->discipline->ext_pool_exhaust(device, cqr); 1709 dasd_put_device(device); 1710 return; 1711 } 1712 } 1713 if (!(fp_suppressed || nrf_suppressed)) 1714 device->discipline->dump_sense_dbf(device, irb, "int"); 1715 1716 if (device->features & DASD_FEATURE_ERPLOG) 1717 device->discipline->dump_sense(device, cqr, irb); 1718 device->discipline->check_for_device_change(device, cqr, irb); 1719 dasd_put_device(device); 1720 } 1721 1722 /* check for for attention message */ 1723 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1724 device = dasd_device_from_cdev_locked(cdev); 1725 if (!IS_ERR(device)) { 1726 device->discipline->check_attention(device, 1727 irb->esw.esw1.lpum); 1728 dasd_put_device(device); 1729 } 1730 } 1731 1732 if (!cqr) 1733 return; 1734 1735 device = (struct dasd_device *) cqr->startdev; 1736 if (!device || 1737 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1738 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1739 "invalid device in request"); 1740 return; 1741 } 1742 1743 if (dasd_ese_needs_format(cqr->block, irb)) { 1744 if (rq_data_dir((struct request *)cqr->callback_data) == READ) { 1745 device->discipline->ese_read(cqr, irb); 1746 cqr->status = DASD_CQR_SUCCESS; 1747 cqr->stopclk = now; 1748 dasd_device_clear_timer(device); 1749 dasd_schedule_device_bh(device); 1750 return; 1751 } 1752 fcqr = device->discipline->ese_format(device, cqr, irb); 1753 if (IS_ERR(fcqr)) { 1754 if (PTR_ERR(fcqr) == -EINVAL) { 1755 cqr->status = DASD_CQR_ERROR; 1756 return; 1757 } 1758 /* 1759 * If we can't format now, let the request go 1760 * one extra round. Maybe we can format later. 1761 */ 1762 cqr->status = DASD_CQR_QUEUED; 1763 dasd_schedule_device_bh(device); 1764 return; 1765 } else { 1766 fcqr->status = DASD_CQR_QUEUED; 1767 cqr->status = DASD_CQR_QUEUED; 1768 list_add(&fcqr->devlist, &device->ccw_queue); 1769 dasd_schedule_device_bh(device); 1770 return; 1771 } 1772 } 1773 1774 /* Check for clear pending */ 1775 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1776 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1777 cqr->status = DASD_CQR_CLEARED; 1778 dasd_device_clear_timer(device); 1779 wake_up(&dasd_flush_wq); 1780 dasd_schedule_device_bh(device); 1781 return; 1782 } 1783 1784 /* check status - the request might have been killed by dyn detach */ 1785 if (cqr->status != DASD_CQR_IN_IO) { 1786 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1787 "status %02x", dev_name(&cdev->dev), cqr->status); 1788 return; 1789 } 1790 1791 next = NULL; 1792 expires = 0; 1793 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1794 scsw_cstat(&irb->scsw) == 0) { 1795 /* request was completed successfully */ 1796 cqr->status = DASD_CQR_SUCCESS; 1797 cqr->stopclk = now; 1798 /* Start first request on queue if possible -> fast_io. */ 1799 if (cqr->devlist.next != &device->ccw_queue) { 1800 next = list_entry(cqr->devlist.next, 1801 struct dasd_ccw_req, devlist); 1802 } 1803 } else { /* error */ 1804 /* check for HPF error 1805 * call discipline function to requeue all requests 1806 * and disable HPF accordingly 1807 */ 1808 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1809 device->discipline->handle_hpf_error) 1810 device->discipline->handle_hpf_error(device, irb); 1811 /* 1812 * If we don't want complex ERP for this request, then just 1813 * reset this and retry it in the fastpath 1814 */ 1815 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1816 cqr->retries > 0) { 1817 if (cqr->lpm == dasd_path_get_opm(device)) 1818 DBF_DEV_EVENT(DBF_DEBUG, device, 1819 "default ERP in fastpath " 1820 "(%i retries left)", 1821 cqr->retries); 1822 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1823 cqr->lpm = dasd_path_get_opm(device); 1824 cqr->status = DASD_CQR_QUEUED; 1825 next = cqr; 1826 } else 1827 cqr->status = DASD_CQR_ERROR; 1828 } 1829 if (next && (next->status == DASD_CQR_QUEUED) && 1830 (!device->stopped)) { 1831 if (device->discipline->start_IO(next) == 0) 1832 expires = next->expires; 1833 } 1834 if (expires != 0) 1835 dasd_device_set_timer(device, expires); 1836 else 1837 dasd_device_clear_timer(device); 1838 dasd_schedule_device_bh(device); 1839 } 1840 EXPORT_SYMBOL(dasd_int_handler); 1841 1842 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1843 { 1844 struct dasd_device *device; 1845 1846 device = dasd_device_from_cdev_locked(cdev); 1847 1848 if (IS_ERR(device)) 1849 goto out; 1850 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1851 device->state != device->target || 1852 !device->discipline->check_for_device_change){ 1853 dasd_put_device(device); 1854 goto out; 1855 } 1856 if (device->discipline->dump_sense_dbf) 1857 device->discipline->dump_sense_dbf(device, irb, "uc"); 1858 device->discipline->check_for_device_change(device, NULL, irb); 1859 dasd_put_device(device); 1860 out: 1861 return UC_TODO_RETRY; 1862 } 1863 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1864 1865 /* 1866 * If we have an error on a dasd_block layer request then we cancel 1867 * and return all further requests from the same dasd_block as well. 1868 */ 1869 static void __dasd_device_recovery(struct dasd_device *device, 1870 struct dasd_ccw_req *ref_cqr) 1871 { 1872 struct list_head *l, *n; 1873 struct dasd_ccw_req *cqr; 1874 1875 /* 1876 * only requeue request that came from the dasd_block layer 1877 */ 1878 if (!ref_cqr->block) 1879 return; 1880 1881 list_for_each_safe(l, n, &device->ccw_queue) { 1882 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1883 if (cqr->status == DASD_CQR_QUEUED && 1884 ref_cqr->block == cqr->block) { 1885 cqr->status = DASD_CQR_CLEARED; 1886 } 1887 } 1888 }; 1889 1890 /* 1891 * Remove those ccw requests from the queue that need to be returned 1892 * to the upper layer. 1893 */ 1894 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1895 struct list_head *final_queue) 1896 { 1897 struct list_head *l, *n; 1898 struct dasd_ccw_req *cqr; 1899 1900 /* Process request with final status. */ 1901 list_for_each_safe(l, n, &device->ccw_queue) { 1902 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1903 1904 /* Skip any non-final request. */ 1905 if (cqr->status == DASD_CQR_QUEUED || 1906 cqr->status == DASD_CQR_IN_IO || 1907 cqr->status == DASD_CQR_CLEAR_PENDING) 1908 continue; 1909 if (cqr->status == DASD_CQR_ERROR) { 1910 __dasd_device_recovery(device, cqr); 1911 } 1912 /* Rechain finished requests to final queue */ 1913 list_move_tail(&cqr->devlist, final_queue); 1914 } 1915 } 1916 1917 static void __dasd_process_cqr(struct dasd_device *device, 1918 struct dasd_ccw_req *cqr) 1919 { 1920 char errorstring[ERRORLENGTH]; 1921 1922 switch (cqr->status) { 1923 case DASD_CQR_SUCCESS: 1924 cqr->status = DASD_CQR_DONE; 1925 break; 1926 case DASD_CQR_ERROR: 1927 cqr->status = DASD_CQR_NEED_ERP; 1928 break; 1929 case DASD_CQR_CLEARED: 1930 cqr->status = DASD_CQR_TERMINATED; 1931 break; 1932 default: 1933 /* internal error 12 - wrong cqr status*/ 1934 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1935 dev_err(&device->cdev->dev, 1936 "An error occurred in the DASD device driver, " 1937 "reason=%s\n", errorstring); 1938 BUG(); 1939 } 1940 if (cqr->callback) 1941 cqr->callback(cqr, cqr->callback_data); 1942 } 1943 1944 /* 1945 * the cqrs from the final queue are returned to the upper layer 1946 * by setting a dasd_block state and calling the callback function 1947 */ 1948 static void __dasd_device_process_final_queue(struct dasd_device *device, 1949 struct list_head *final_queue) 1950 { 1951 struct list_head *l, *n; 1952 struct dasd_ccw_req *cqr; 1953 struct dasd_block *block; 1954 1955 list_for_each_safe(l, n, final_queue) { 1956 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1957 list_del_init(&cqr->devlist); 1958 block = cqr->block; 1959 if (!block) { 1960 __dasd_process_cqr(device, cqr); 1961 } else { 1962 spin_lock_bh(&block->queue_lock); 1963 __dasd_process_cqr(device, cqr); 1964 spin_unlock_bh(&block->queue_lock); 1965 } 1966 } 1967 } 1968 1969 /* 1970 * Take a look at the first request on the ccw queue and check 1971 * if it reached its expire time. If so, terminate the IO. 1972 */ 1973 static void __dasd_device_check_expire(struct dasd_device *device) 1974 { 1975 struct dasd_ccw_req *cqr; 1976 1977 if (list_empty(&device->ccw_queue)) 1978 return; 1979 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1980 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1981 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1982 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1983 /* 1984 * IO in safe offline processing should not 1985 * run out of retries 1986 */ 1987 cqr->retries++; 1988 } 1989 if (device->discipline->term_IO(cqr) != 0) { 1990 /* Hmpf, try again in 5 sec */ 1991 dev_err(&device->cdev->dev, 1992 "cqr %p timed out (%lus) but cannot be " 1993 "ended, retrying in 5 s\n", 1994 cqr, (cqr->expires/HZ)); 1995 cqr->expires += 5*HZ; 1996 dasd_device_set_timer(device, 5*HZ); 1997 } else { 1998 dev_err(&device->cdev->dev, 1999 "cqr %p timed out (%lus), %i retries " 2000 "remaining\n", cqr, (cqr->expires/HZ), 2001 cqr->retries); 2002 } 2003 } 2004 } 2005 2006 /* 2007 * return 1 when device is not eligible for IO 2008 */ 2009 static int __dasd_device_is_unusable(struct dasd_device *device, 2010 struct dasd_ccw_req *cqr) 2011 { 2012 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC); 2013 2014 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2015 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2016 /* 2017 * dasd is being set offline 2018 * but it is no safe offline where we have to allow I/O 2019 */ 2020 return 1; 2021 } 2022 if (device->stopped) { 2023 if (device->stopped & mask) { 2024 /* stopped and CQR will not change that. */ 2025 return 1; 2026 } 2027 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2028 /* CQR is not able to change device to 2029 * operational. */ 2030 return 1; 2031 } 2032 /* CQR required to get device operational. */ 2033 } 2034 return 0; 2035 } 2036 2037 /* 2038 * Take a look at the first request on the ccw queue and check 2039 * if it needs to be started. 2040 */ 2041 static void __dasd_device_start_head(struct dasd_device *device) 2042 { 2043 struct dasd_ccw_req *cqr; 2044 int rc; 2045 2046 if (list_empty(&device->ccw_queue)) 2047 return; 2048 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2049 if (cqr->status != DASD_CQR_QUEUED) 2050 return; 2051 /* if device is not usable return request to upper layer */ 2052 if (__dasd_device_is_unusable(device, cqr)) { 2053 cqr->intrc = -EAGAIN; 2054 cqr->status = DASD_CQR_CLEARED; 2055 dasd_schedule_device_bh(device); 2056 return; 2057 } 2058 2059 rc = device->discipline->start_IO(cqr); 2060 if (rc == 0) 2061 dasd_device_set_timer(device, cqr->expires); 2062 else if (rc == -EACCES) { 2063 dasd_schedule_device_bh(device); 2064 } else 2065 /* Hmpf, try again in 1/2 sec */ 2066 dasd_device_set_timer(device, 50); 2067 } 2068 2069 static void __dasd_device_check_path_events(struct dasd_device *device) 2070 { 2071 __u8 tbvpm, fcsecpm; 2072 int rc; 2073 2074 tbvpm = dasd_path_get_tbvpm(device); 2075 fcsecpm = dasd_path_get_fcsecpm(device); 2076 2077 if (!tbvpm && !fcsecpm) 2078 return; 2079 2080 if (device->stopped & ~(DASD_STOPPED_DC_WAIT)) 2081 return; 2082 rc = device->discipline->pe_handler(device, tbvpm, fcsecpm); 2083 if (rc) { 2084 dasd_device_set_timer(device, 50); 2085 } else { 2086 dasd_path_clear_all_verify(device); 2087 dasd_path_clear_all_fcsec(device); 2088 } 2089 }; 2090 2091 /* 2092 * Go through all request on the dasd_device request queue, 2093 * terminate them on the cdev if necessary, and return them to the 2094 * submitting layer via callback. 2095 * Note: 2096 * Make sure that all 'submitting layers' still exist when 2097 * this function is called!. In other words, when 'device' is a base 2098 * device then all block layer requests must have been removed before 2099 * via dasd_flush_block_queue. 2100 */ 2101 int dasd_flush_device_queue(struct dasd_device *device) 2102 { 2103 struct dasd_ccw_req *cqr, *n; 2104 int rc; 2105 struct list_head flush_queue; 2106 2107 INIT_LIST_HEAD(&flush_queue); 2108 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2109 rc = 0; 2110 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2111 /* Check status and move request to flush_queue */ 2112 switch (cqr->status) { 2113 case DASD_CQR_IN_IO: 2114 rc = device->discipline->term_IO(cqr); 2115 if (rc) { 2116 /* unable to terminate requeust */ 2117 dev_err(&device->cdev->dev, 2118 "Flushing the DASD request queue " 2119 "failed for request %p\n", cqr); 2120 /* stop flush processing */ 2121 goto finished; 2122 } 2123 break; 2124 case DASD_CQR_QUEUED: 2125 cqr->stopclk = get_tod_clock(); 2126 cqr->status = DASD_CQR_CLEARED; 2127 break; 2128 default: /* no need to modify the others */ 2129 break; 2130 } 2131 list_move_tail(&cqr->devlist, &flush_queue); 2132 } 2133 finished: 2134 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2135 /* 2136 * After this point all requests must be in state CLEAR_PENDING, 2137 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2138 * one of the others. 2139 */ 2140 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2141 wait_event(dasd_flush_wq, 2142 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2143 /* 2144 * Now set each request back to TERMINATED, DONE or NEED_ERP 2145 * and call the callback function of flushed requests 2146 */ 2147 __dasd_device_process_final_queue(device, &flush_queue); 2148 return rc; 2149 } 2150 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2151 2152 /* 2153 * Acquire the device lock and process queues for the device. 2154 */ 2155 static void dasd_device_tasklet(unsigned long data) 2156 { 2157 struct dasd_device *device = (struct dasd_device *) data; 2158 struct list_head final_queue; 2159 2160 atomic_set (&device->tasklet_scheduled, 0); 2161 INIT_LIST_HEAD(&final_queue); 2162 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2163 /* Check expire time of first request on the ccw queue. */ 2164 __dasd_device_check_expire(device); 2165 /* find final requests on ccw queue */ 2166 __dasd_device_process_ccw_queue(device, &final_queue); 2167 __dasd_device_check_path_events(device); 2168 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2169 /* Now call the callback function of requests with final status */ 2170 __dasd_device_process_final_queue(device, &final_queue); 2171 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2172 /* Now check if the head of the ccw queue needs to be started. */ 2173 __dasd_device_start_head(device); 2174 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2175 if (waitqueue_active(&shutdown_waitq)) 2176 wake_up(&shutdown_waitq); 2177 dasd_put_device(device); 2178 } 2179 2180 /* 2181 * Schedules a call to dasd_tasklet over the device tasklet. 2182 */ 2183 void dasd_schedule_device_bh(struct dasd_device *device) 2184 { 2185 /* Protect against rescheduling. */ 2186 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2187 return; 2188 dasd_get_device(device); 2189 tasklet_hi_schedule(&device->tasklet); 2190 } 2191 EXPORT_SYMBOL(dasd_schedule_device_bh); 2192 2193 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2194 { 2195 device->stopped |= bits; 2196 } 2197 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2198 2199 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2200 { 2201 device->stopped &= ~bits; 2202 if (!device->stopped) 2203 wake_up(&generic_waitq); 2204 } 2205 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2206 2207 /* 2208 * Queue a request to the head of the device ccw_queue. 2209 * Start the I/O if possible. 2210 */ 2211 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2212 { 2213 struct dasd_device *device; 2214 unsigned long flags; 2215 2216 device = cqr->startdev; 2217 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2218 cqr->status = DASD_CQR_QUEUED; 2219 list_add(&cqr->devlist, &device->ccw_queue); 2220 /* let the bh start the request to keep them in order */ 2221 dasd_schedule_device_bh(device); 2222 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2223 } 2224 EXPORT_SYMBOL(dasd_add_request_head); 2225 2226 /* 2227 * Queue a request to the tail of the device ccw_queue. 2228 * Start the I/O if possible. 2229 */ 2230 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2231 { 2232 struct dasd_device *device; 2233 unsigned long flags; 2234 2235 device = cqr->startdev; 2236 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2237 cqr->status = DASD_CQR_QUEUED; 2238 list_add_tail(&cqr->devlist, &device->ccw_queue); 2239 /* let the bh start the request to keep them in order */ 2240 dasd_schedule_device_bh(device); 2241 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2242 } 2243 EXPORT_SYMBOL(dasd_add_request_tail); 2244 2245 /* 2246 * Wakeup helper for the 'sleep_on' functions. 2247 */ 2248 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2249 { 2250 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2251 cqr->callback_data = DASD_SLEEPON_END_TAG; 2252 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2253 wake_up(&generic_waitq); 2254 } 2255 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2256 2257 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2258 { 2259 struct dasd_device *device; 2260 int rc; 2261 2262 device = cqr->startdev; 2263 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2264 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2265 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2266 return rc; 2267 } 2268 2269 /* 2270 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2271 */ 2272 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2273 { 2274 struct dasd_device *device; 2275 dasd_erp_fn_t erp_fn; 2276 2277 if (cqr->status == DASD_CQR_FILLED) 2278 return 0; 2279 device = cqr->startdev; 2280 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2281 if (cqr->status == DASD_CQR_TERMINATED) { 2282 device->discipline->handle_terminated_request(cqr); 2283 return 1; 2284 } 2285 if (cqr->status == DASD_CQR_NEED_ERP) { 2286 erp_fn = device->discipline->erp_action(cqr); 2287 erp_fn(cqr); 2288 return 1; 2289 } 2290 if (cqr->status == DASD_CQR_FAILED) 2291 dasd_log_sense(cqr, &cqr->irb); 2292 if (cqr->refers) { 2293 __dasd_process_erp(device, cqr); 2294 return 1; 2295 } 2296 } 2297 return 0; 2298 } 2299 2300 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2301 { 2302 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2303 if (cqr->refers) /* erp is not done yet */ 2304 return 1; 2305 return ((cqr->status != DASD_CQR_DONE) && 2306 (cqr->status != DASD_CQR_FAILED)); 2307 } else 2308 return (cqr->status == DASD_CQR_FILLED); 2309 } 2310 2311 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2312 { 2313 struct dasd_device *device; 2314 int rc; 2315 struct list_head ccw_queue; 2316 struct dasd_ccw_req *cqr; 2317 2318 INIT_LIST_HEAD(&ccw_queue); 2319 maincqr->status = DASD_CQR_FILLED; 2320 device = maincqr->startdev; 2321 list_add(&maincqr->blocklist, &ccw_queue); 2322 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2323 cqr = list_first_entry(&ccw_queue, 2324 struct dasd_ccw_req, blocklist)) { 2325 2326 if (__dasd_sleep_on_erp(cqr)) 2327 continue; 2328 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2329 continue; 2330 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2331 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2332 cqr->status = DASD_CQR_FAILED; 2333 cqr->intrc = -EPERM; 2334 continue; 2335 } 2336 /* Non-temporary stop condition will trigger fail fast */ 2337 if (device->stopped & ~DASD_STOPPED_PENDING && 2338 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2339 (!dasd_eer_enabled(device))) { 2340 cqr->status = DASD_CQR_FAILED; 2341 cqr->intrc = -ENOLINK; 2342 continue; 2343 } 2344 /* 2345 * Don't try to start requests if device is in 2346 * offline processing, it might wait forever 2347 */ 2348 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2349 cqr->status = DASD_CQR_FAILED; 2350 cqr->intrc = -ENODEV; 2351 continue; 2352 } 2353 /* 2354 * Don't try to start requests if device is stopped 2355 * except path verification requests 2356 */ 2357 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2358 if (interruptible) { 2359 rc = wait_event_interruptible( 2360 generic_waitq, !(device->stopped)); 2361 if (rc == -ERESTARTSYS) { 2362 cqr->status = DASD_CQR_FAILED; 2363 maincqr->intrc = rc; 2364 continue; 2365 } 2366 } else 2367 wait_event(generic_waitq, !(device->stopped)); 2368 } 2369 if (!cqr->callback) 2370 cqr->callback = dasd_wakeup_cb; 2371 2372 cqr->callback_data = DASD_SLEEPON_START_TAG; 2373 dasd_add_request_tail(cqr); 2374 if (interruptible) { 2375 rc = wait_event_interruptible( 2376 generic_waitq, _wait_for_wakeup(cqr)); 2377 if (rc == -ERESTARTSYS) { 2378 dasd_cancel_req(cqr); 2379 /* wait (non-interruptible) for final status */ 2380 wait_event(generic_waitq, 2381 _wait_for_wakeup(cqr)); 2382 cqr->status = DASD_CQR_FAILED; 2383 maincqr->intrc = rc; 2384 continue; 2385 } 2386 } else 2387 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2388 } 2389 2390 maincqr->endclk = get_tod_clock(); 2391 if ((maincqr->status != DASD_CQR_DONE) && 2392 (maincqr->intrc != -ERESTARTSYS)) 2393 dasd_log_sense(maincqr, &maincqr->irb); 2394 if (maincqr->status == DASD_CQR_DONE) 2395 rc = 0; 2396 else if (maincqr->intrc) 2397 rc = maincqr->intrc; 2398 else 2399 rc = -EIO; 2400 return rc; 2401 } 2402 2403 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2404 { 2405 struct dasd_ccw_req *cqr; 2406 2407 list_for_each_entry(cqr, ccw_queue, blocklist) { 2408 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2409 return 0; 2410 } 2411 2412 return 1; 2413 } 2414 2415 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2416 { 2417 struct dasd_device *device; 2418 struct dasd_ccw_req *cqr, *n; 2419 u8 *sense = NULL; 2420 int rc; 2421 2422 retry: 2423 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2424 device = cqr->startdev; 2425 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2426 continue; 2427 2428 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2429 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2430 cqr->status = DASD_CQR_FAILED; 2431 cqr->intrc = -EPERM; 2432 continue; 2433 } 2434 /*Non-temporary stop condition will trigger fail fast*/ 2435 if (device->stopped & ~DASD_STOPPED_PENDING && 2436 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2437 !dasd_eer_enabled(device)) { 2438 cqr->status = DASD_CQR_FAILED; 2439 cqr->intrc = -EAGAIN; 2440 continue; 2441 } 2442 2443 /*Don't try to start requests if device is stopped*/ 2444 if (interruptible) { 2445 rc = wait_event_interruptible( 2446 generic_waitq, !device->stopped); 2447 if (rc == -ERESTARTSYS) { 2448 cqr->status = DASD_CQR_FAILED; 2449 cqr->intrc = rc; 2450 continue; 2451 } 2452 } else 2453 wait_event(generic_waitq, !(device->stopped)); 2454 2455 if (!cqr->callback) 2456 cqr->callback = dasd_wakeup_cb; 2457 cqr->callback_data = DASD_SLEEPON_START_TAG; 2458 dasd_add_request_tail(cqr); 2459 } 2460 2461 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2462 2463 rc = 0; 2464 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2465 /* 2466 * In some cases the 'File Protected' or 'Incorrect Length' 2467 * error might be expected and error recovery would be 2468 * unnecessary in these cases. Check if the according suppress 2469 * bit is set. 2470 */ 2471 sense = dasd_get_sense(&cqr->irb); 2472 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2473 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2474 continue; 2475 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2476 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2477 continue; 2478 2479 /* 2480 * for alias devices simplify error recovery and 2481 * return to upper layer 2482 * do not skip ERP requests 2483 */ 2484 if (cqr->startdev != cqr->basedev && !cqr->refers && 2485 (cqr->status == DASD_CQR_TERMINATED || 2486 cqr->status == DASD_CQR_NEED_ERP)) 2487 return -EAGAIN; 2488 2489 /* normal recovery for basedev IO */ 2490 if (__dasd_sleep_on_erp(cqr)) 2491 /* handle erp first */ 2492 goto retry; 2493 } 2494 2495 return 0; 2496 } 2497 2498 /* 2499 * Queue a request to the tail of the device ccw_queue and wait for 2500 * it's completion. 2501 */ 2502 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2503 { 2504 return _dasd_sleep_on(cqr, 0); 2505 } 2506 EXPORT_SYMBOL(dasd_sleep_on); 2507 2508 /* 2509 * Start requests from a ccw_queue and wait for their completion. 2510 */ 2511 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2512 { 2513 return _dasd_sleep_on_queue(ccw_queue, 0); 2514 } 2515 EXPORT_SYMBOL(dasd_sleep_on_queue); 2516 2517 /* 2518 * Start requests from a ccw_queue and wait interruptible for their completion. 2519 */ 2520 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2521 { 2522 return _dasd_sleep_on_queue(ccw_queue, 1); 2523 } 2524 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2525 2526 /* 2527 * Queue a request to the tail of the device ccw_queue and wait 2528 * interruptible for it's completion. 2529 */ 2530 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2531 { 2532 return _dasd_sleep_on(cqr, 1); 2533 } 2534 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2535 2536 /* 2537 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2538 * for eckd devices) the currently running request has to be terminated 2539 * and be put back to status queued, before the special request is added 2540 * to the head of the queue. Then the special request is waited on normally. 2541 */ 2542 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2543 { 2544 struct dasd_ccw_req *cqr; 2545 int rc; 2546 2547 if (list_empty(&device->ccw_queue)) 2548 return 0; 2549 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2550 rc = device->discipline->term_IO(cqr); 2551 if (!rc) 2552 /* 2553 * CQR terminated because a more important request is pending. 2554 * Undo decreasing of retry counter because this is 2555 * not an error case. 2556 */ 2557 cqr->retries++; 2558 return rc; 2559 } 2560 2561 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2562 { 2563 struct dasd_device *device; 2564 int rc; 2565 2566 device = cqr->startdev; 2567 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2568 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2569 cqr->status = DASD_CQR_FAILED; 2570 cqr->intrc = -EPERM; 2571 return -EIO; 2572 } 2573 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2574 rc = _dasd_term_running_cqr(device); 2575 if (rc) { 2576 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2577 return rc; 2578 } 2579 cqr->callback = dasd_wakeup_cb; 2580 cqr->callback_data = DASD_SLEEPON_START_TAG; 2581 cqr->status = DASD_CQR_QUEUED; 2582 /* 2583 * add new request as second 2584 * first the terminated cqr needs to be finished 2585 */ 2586 list_add(&cqr->devlist, device->ccw_queue.next); 2587 2588 /* let the bh start the request to keep them in order */ 2589 dasd_schedule_device_bh(device); 2590 2591 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2592 2593 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2594 2595 if (cqr->status == DASD_CQR_DONE) 2596 rc = 0; 2597 else if (cqr->intrc) 2598 rc = cqr->intrc; 2599 else 2600 rc = -EIO; 2601 2602 /* kick tasklets */ 2603 dasd_schedule_device_bh(device); 2604 if (device->block) 2605 dasd_schedule_block_bh(device->block); 2606 2607 return rc; 2608 } 2609 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2610 2611 /* 2612 * Cancels a request that was started with dasd_sleep_on_req. 2613 * This is useful to timeout requests. The request will be 2614 * terminated if it is currently in i/o. 2615 * Returns 0 if request termination was successful 2616 * negative error code if termination failed 2617 * Cancellation of a request is an asynchronous operation! The calling 2618 * function has to wait until the request is properly returned via callback. 2619 */ 2620 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) 2621 { 2622 struct dasd_device *device = cqr->startdev; 2623 int rc = 0; 2624 2625 switch (cqr->status) { 2626 case DASD_CQR_QUEUED: 2627 /* request was not started - just set to cleared */ 2628 cqr->status = DASD_CQR_CLEARED; 2629 break; 2630 case DASD_CQR_IN_IO: 2631 /* request in IO - terminate IO and release again */ 2632 rc = device->discipline->term_IO(cqr); 2633 if (rc) { 2634 dev_err(&device->cdev->dev, 2635 "Cancelling request %p failed with rc=%d\n", 2636 cqr, rc); 2637 } else { 2638 cqr->stopclk = get_tod_clock(); 2639 } 2640 break; 2641 default: /* already finished or clear pending - do nothing */ 2642 break; 2643 } 2644 dasd_schedule_device_bh(device); 2645 return rc; 2646 } 2647 2648 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2649 { 2650 struct dasd_device *device = cqr->startdev; 2651 unsigned long flags; 2652 int rc; 2653 2654 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2655 rc = __dasd_cancel_req(cqr); 2656 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2657 return rc; 2658 } 2659 2660 /* 2661 * SECTION: Operations of the dasd_block layer. 2662 */ 2663 2664 /* 2665 * Timeout function for dasd_block. This is used when the block layer 2666 * is waiting for something that may not come reliably, (e.g. a state 2667 * change interrupt) 2668 */ 2669 static void dasd_block_timeout(struct timer_list *t) 2670 { 2671 unsigned long flags; 2672 struct dasd_block *block; 2673 2674 block = from_timer(block, t, timer); 2675 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2676 /* re-activate request queue */ 2677 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2678 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2679 dasd_schedule_block_bh(block); 2680 blk_mq_run_hw_queues(block->request_queue, true); 2681 } 2682 2683 /* 2684 * Setup timeout for a dasd_block in jiffies. 2685 */ 2686 void dasd_block_set_timer(struct dasd_block *block, int expires) 2687 { 2688 if (expires == 0) 2689 del_timer(&block->timer); 2690 else 2691 mod_timer(&block->timer, jiffies + expires); 2692 } 2693 EXPORT_SYMBOL(dasd_block_set_timer); 2694 2695 /* 2696 * Clear timeout for a dasd_block. 2697 */ 2698 void dasd_block_clear_timer(struct dasd_block *block) 2699 { 2700 del_timer(&block->timer); 2701 } 2702 EXPORT_SYMBOL(dasd_block_clear_timer); 2703 2704 /* 2705 * Process finished error recovery ccw. 2706 */ 2707 static void __dasd_process_erp(struct dasd_device *device, 2708 struct dasd_ccw_req *cqr) 2709 { 2710 dasd_erp_fn_t erp_fn; 2711 2712 if (cqr->status == DASD_CQR_DONE) 2713 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2714 else 2715 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2716 erp_fn = device->discipline->erp_postaction(cqr); 2717 erp_fn(cqr); 2718 } 2719 2720 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2721 { 2722 struct request *req; 2723 blk_status_t error = BLK_STS_OK; 2724 unsigned int proc_bytes; 2725 int status; 2726 2727 req = (struct request *) cqr->callback_data; 2728 dasd_profile_end(cqr->block, cqr, req); 2729 2730 proc_bytes = cqr->proc_bytes; 2731 status = cqr->block->base->discipline->free_cp(cqr, req); 2732 if (status < 0) 2733 error = errno_to_blk_status(status); 2734 else if (status == 0) { 2735 switch (cqr->intrc) { 2736 case -EPERM: 2737 error = BLK_STS_NEXUS; 2738 break; 2739 case -ENOLINK: 2740 error = BLK_STS_TRANSPORT; 2741 break; 2742 case -ETIMEDOUT: 2743 error = BLK_STS_TIMEOUT; 2744 break; 2745 default: 2746 error = BLK_STS_IOERR; 2747 break; 2748 } 2749 } 2750 2751 /* 2752 * We need to take care for ETIMEDOUT errors here since the 2753 * complete callback does not get called in this case. 2754 * Take care of all errors here and avoid additional code to 2755 * transfer the error value to the complete callback. 2756 */ 2757 if (error) { 2758 blk_mq_end_request(req, error); 2759 blk_mq_run_hw_queues(req->q, true); 2760 } else { 2761 /* 2762 * Partial completed requests can happen with ESE devices. 2763 * During read we might have gotten a NRF error and have to 2764 * complete a request partially. 2765 */ 2766 if (proc_bytes) { 2767 blk_update_request(req, BLK_STS_OK, 2768 blk_rq_bytes(req) - proc_bytes); 2769 blk_mq_requeue_request(req, true); 2770 } else if (likely(!blk_should_fake_timeout(req->q))) { 2771 blk_mq_complete_request(req); 2772 } 2773 } 2774 } 2775 2776 /* 2777 * Process ccw request queue. 2778 */ 2779 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2780 struct list_head *final_queue) 2781 { 2782 struct list_head *l, *n; 2783 struct dasd_ccw_req *cqr; 2784 dasd_erp_fn_t erp_fn; 2785 unsigned long flags; 2786 struct dasd_device *base = block->base; 2787 2788 restart: 2789 /* Process request with final status. */ 2790 list_for_each_safe(l, n, &block->ccw_queue) { 2791 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2792 if (cqr->status != DASD_CQR_DONE && 2793 cqr->status != DASD_CQR_FAILED && 2794 cqr->status != DASD_CQR_NEED_ERP && 2795 cqr->status != DASD_CQR_TERMINATED) 2796 continue; 2797 2798 if (cqr->status == DASD_CQR_TERMINATED) { 2799 base->discipline->handle_terminated_request(cqr); 2800 goto restart; 2801 } 2802 2803 /* Process requests that may be recovered */ 2804 if (cqr->status == DASD_CQR_NEED_ERP) { 2805 erp_fn = base->discipline->erp_action(cqr); 2806 if (IS_ERR(erp_fn(cqr))) 2807 continue; 2808 goto restart; 2809 } 2810 2811 /* log sense for fatal error */ 2812 if (cqr->status == DASD_CQR_FAILED) { 2813 dasd_log_sense(cqr, &cqr->irb); 2814 } 2815 2816 /* First of all call extended error reporting. */ 2817 if (dasd_eer_enabled(base) && 2818 cqr->status == DASD_CQR_FAILED) { 2819 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2820 2821 /* restart request */ 2822 cqr->status = DASD_CQR_FILLED; 2823 cqr->retries = 255; 2824 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2825 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2826 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2827 flags); 2828 goto restart; 2829 } 2830 2831 /* Process finished ERP request. */ 2832 if (cqr->refers) { 2833 __dasd_process_erp(base, cqr); 2834 goto restart; 2835 } 2836 2837 /* Rechain finished requests to final queue */ 2838 cqr->endclk = get_tod_clock(); 2839 list_move_tail(&cqr->blocklist, final_queue); 2840 } 2841 } 2842 2843 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2844 { 2845 dasd_schedule_block_bh(cqr->block); 2846 } 2847 2848 static void __dasd_block_start_head(struct dasd_block *block) 2849 { 2850 struct dasd_ccw_req *cqr; 2851 2852 if (list_empty(&block->ccw_queue)) 2853 return; 2854 /* We allways begin with the first requests on the queue, as some 2855 * of previously started requests have to be enqueued on a 2856 * dasd_device again for error recovery. 2857 */ 2858 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2859 if (cqr->status != DASD_CQR_FILLED) 2860 continue; 2861 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2862 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2863 cqr->status = DASD_CQR_FAILED; 2864 cqr->intrc = -EPERM; 2865 dasd_schedule_block_bh(block); 2866 continue; 2867 } 2868 /* Non-temporary stop condition will trigger fail fast */ 2869 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2870 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2871 (!dasd_eer_enabled(block->base))) { 2872 cqr->status = DASD_CQR_FAILED; 2873 cqr->intrc = -ENOLINK; 2874 dasd_schedule_block_bh(block); 2875 continue; 2876 } 2877 /* Don't try to start requests if device is stopped */ 2878 if (block->base->stopped) 2879 return; 2880 2881 /* just a fail safe check, should not happen */ 2882 if (!cqr->startdev) 2883 cqr->startdev = block->base; 2884 2885 /* make sure that the requests we submit find their way back */ 2886 cqr->callback = dasd_return_cqr_cb; 2887 2888 dasd_add_request_tail(cqr); 2889 } 2890 } 2891 2892 /* 2893 * Central dasd_block layer routine. Takes requests from the generic 2894 * block layer request queue, creates ccw requests, enqueues them on 2895 * a dasd_device and processes ccw requests that have been returned. 2896 */ 2897 static void dasd_block_tasklet(unsigned long data) 2898 { 2899 struct dasd_block *block = (struct dasd_block *) data; 2900 struct list_head final_queue; 2901 struct list_head *l, *n; 2902 struct dasd_ccw_req *cqr; 2903 struct dasd_queue *dq; 2904 2905 atomic_set(&block->tasklet_scheduled, 0); 2906 INIT_LIST_HEAD(&final_queue); 2907 spin_lock_irq(&block->queue_lock); 2908 /* Finish off requests on ccw queue */ 2909 __dasd_process_block_ccw_queue(block, &final_queue); 2910 spin_unlock_irq(&block->queue_lock); 2911 2912 /* Now call the callback function of requests with final status */ 2913 list_for_each_safe(l, n, &final_queue) { 2914 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2915 dq = cqr->dq; 2916 spin_lock_irq(&dq->lock); 2917 list_del_init(&cqr->blocklist); 2918 __dasd_cleanup_cqr(cqr); 2919 spin_unlock_irq(&dq->lock); 2920 } 2921 2922 spin_lock_irq(&block->queue_lock); 2923 /* Now check if the head of the ccw queue needs to be started. */ 2924 __dasd_block_start_head(block); 2925 spin_unlock_irq(&block->queue_lock); 2926 2927 if (waitqueue_active(&shutdown_waitq)) 2928 wake_up(&shutdown_waitq); 2929 dasd_put_device(block->base); 2930 } 2931 2932 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2933 { 2934 wake_up(&dasd_flush_wq); 2935 } 2936 2937 /* 2938 * Requeue a request back to the block request queue 2939 * only works for block requests 2940 */ 2941 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2942 { 2943 struct dasd_block *block = cqr->block; 2944 struct request *req; 2945 2946 if (!block) 2947 return -EINVAL; 2948 /* 2949 * If the request is an ERP request there is nothing to requeue. 2950 * This will be done with the remaining original request. 2951 */ 2952 if (cqr->refers) 2953 return 0; 2954 spin_lock_irq(&cqr->dq->lock); 2955 req = (struct request *) cqr->callback_data; 2956 blk_mq_requeue_request(req, false); 2957 spin_unlock_irq(&cqr->dq->lock); 2958 2959 return 0; 2960 } 2961 2962 /* 2963 * Go through all request on the dasd_block request queue, cancel them 2964 * on the respective dasd_device, and return them to the generic 2965 * block layer. 2966 */ 2967 static int dasd_flush_block_queue(struct dasd_block *block) 2968 { 2969 struct dasd_ccw_req *cqr, *n; 2970 int rc, i; 2971 struct list_head flush_queue; 2972 unsigned long flags; 2973 2974 INIT_LIST_HEAD(&flush_queue); 2975 spin_lock_bh(&block->queue_lock); 2976 rc = 0; 2977 restart: 2978 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2979 /* if this request currently owned by a dasd_device cancel it */ 2980 if (cqr->status >= DASD_CQR_QUEUED) 2981 rc = dasd_cancel_req(cqr); 2982 if (rc < 0) 2983 break; 2984 /* Rechain request (including erp chain) so it won't be 2985 * touched by the dasd_block_tasklet anymore. 2986 * Replace the callback so we notice when the request 2987 * is returned from the dasd_device layer. 2988 */ 2989 cqr->callback = _dasd_wake_block_flush_cb; 2990 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2991 list_move_tail(&cqr->blocklist, &flush_queue); 2992 if (i > 1) 2993 /* moved more than one request - need to restart */ 2994 goto restart; 2995 } 2996 spin_unlock_bh(&block->queue_lock); 2997 /* Now call the callback function of flushed requests */ 2998 restart_cb: 2999 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 3000 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3001 /* Process finished ERP request. */ 3002 if (cqr->refers) { 3003 spin_lock_bh(&block->queue_lock); 3004 __dasd_process_erp(block->base, cqr); 3005 spin_unlock_bh(&block->queue_lock); 3006 /* restart list_for_xx loop since dasd_process_erp 3007 * might remove multiple elements */ 3008 goto restart_cb; 3009 } 3010 /* call the callback function */ 3011 spin_lock_irqsave(&cqr->dq->lock, flags); 3012 cqr->endclk = get_tod_clock(); 3013 list_del_init(&cqr->blocklist); 3014 __dasd_cleanup_cqr(cqr); 3015 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3016 } 3017 return rc; 3018 } 3019 3020 /* 3021 * Schedules a call to dasd_tasklet over the device tasklet. 3022 */ 3023 void dasd_schedule_block_bh(struct dasd_block *block) 3024 { 3025 /* Protect against rescheduling. */ 3026 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 3027 return; 3028 /* life cycle of block is bound to it's base device */ 3029 dasd_get_device(block->base); 3030 tasklet_hi_schedule(&block->tasklet); 3031 } 3032 EXPORT_SYMBOL(dasd_schedule_block_bh); 3033 3034 3035 /* 3036 * SECTION: external block device operations 3037 * (request queue handling, open, release, etc.) 3038 */ 3039 3040 /* 3041 * Dasd request queue function. Called from ll_rw_blk.c 3042 */ 3043 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 3044 const struct blk_mq_queue_data *qd) 3045 { 3046 struct dasd_block *block = hctx->queue->queuedata; 3047 struct dasd_queue *dq = hctx->driver_data; 3048 struct request *req = qd->rq; 3049 struct dasd_device *basedev; 3050 struct dasd_ccw_req *cqr; 3051 blk_status_t rc = BLK_STS_OK; 3052 3053 basedev = block->base; 3054 spin_lock_irq(&dq->lock); 3055 if (basedev->state < DASD_STATE_READY) { 3056 DBF_DEV_EVENT(DBF_ERR, basedev, 3057 "device not ready for request %p", req); 3058 rc = BLK_STS_IOERR; 3059 goto out; 3060 } 3061 3062 /* 3063 * if device is stopped do not fetch new requests 3064 * except failfast is active which will let requests fail 3065 * immediately in __dasd_block_start_head() 3066 */ 3067 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3068 DBF_DEV_EVENT(DBF_ERR, basedev, 3069 "device stopped request %p", req); 3070 rc = BLK_STS_RESOURCE; 3071 goto out; 3072 } 3073 3074 if (basedev->features & DASD_FEATURE_READONLY && 3075 rq_data_dir(req) == WRITE) { 3076 DBF_DEV_EVENT(DBF_ERR, basedev, 3077 "Rejecting write request %p", req); 3078 rc = BLK_STS_IOERR; 3079 goto out; 3080 } 3081 3082 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3083 (basedev->features & DASD_FEATURE_FAILFAST || 3084 blk_noretry_request(req))) { 3085 DBF_DEV_EVENT(DBF_ERR, basedev, 3086 "Rejecting failfast request %p", req); 3087 rc = BLK_STS_IOERR; 3088 goto out; 3089 } 3090 3091 cqr = basedev->discipline->build_cp(basedev, block, req); 3092 if (IS_ERR(cqr)) { 3093 if (PTR_ERR(cqr) == -EBUSY || 3094 PTR_ERR(cqr) == -ENOMEM || 3095 PTR_ERR(cqr) == -EAGAIN) { 3096 rc = BLK_STS_RESOURCE; 3097 goto out; 3098 } 3099 DBF_DEV_EVENT(DBF_ERR, basedev, 3100 "CCW creation failed (rc=%ld) on request %p", 3101 PTR_ERR(cqr), req); 3102 rc = BLK_STS_IOERR; 3103 goto out; 3104 } 3105 /* 3106 * Note: callback is set to dasd_return_cqr_cb in 3107 * __dasd_block_start_head to cover erp requests as well 3108 */ 3109 cqr->callback_data = req; 3110 cqr->status = DASD_CQR_FILLED; 3111 cqr->dq = dq; 3112 3113 blk_mq_start_request(req); 3114 spin_lock(&block->queue_lock); 3115 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3116 INIT_LIST_HEAD(&cqr->devlist); 3117 dasd_profile_start(block, cqr, req); 3118 dasd_schedule_block_bh(block); 3119 spin_unlock(&block->queue_lock); 3120 3121 out: 3122 spin_unlock_irq(&dq->lock); 3123 return rc; 3124 } 3125 3126 /* 3127 * Block timeout callback, called from the block layer 3128 * 3129 * Return values: 3130 * BLK_EH_RESET_TIMER if the request should be left running 3131 * BLK_EH_DONE if the request is handled or terminated 3132 * by the driver. 3133 */ 3134 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3135 { 3136 struct dasd_block *block = req->q->queuedata; 3137 struct dasd_device *device; 3138 struct dasd_ccw_req *cqr; 3139 unsigned long flags; 3140 int rc = 0; 3141 3142 cqr = blk_mq_rq_to_pdu(req); 3143 if (!cqr) 3144 return BLK_EH_DONE; 3145 3146 spin_lock_irqsave(&cqr->dq->lock, flags); 3147 device = cqr->startdev ? cqr->startdev : block->base; 3148 if (!device->blk_timeout) { 3149 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3150 return BLK_EH_RESET_TIMER; 3151 } 3152 DBF_DEV_EVENT(DBF_WARNING, device, 3153 " dasd_times_out cqr %p status %x", 3154 cqr, cqr->status); 3155 3156 spin_lock(&block->queue_lock); 3157 spin_lock(get_ccwdev_lock(device->cdev)); 3158 cqr->retries = -1; 3159 cqr->intrc = -ETIMEDOUT; 3160 if (cqr->status >= DASD_CQR_QUEUED) { 3161 rc = __dasd_cancel_req(cqr); 3162 } else if (cqr->status == DASD_CQR_FILLED || 3163 cqr->status == DASD_CQR_NEED_ERP) { 3164 cqr->status = DASD_CQR_TERMINATED; 3165 } else if (cqr->status == DASD_CQR_IN_ERP) { 3166 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3167 3168 list_for_each_entry_safe(searchcqr, nextcqr, 3169 &block->ccw_queue, blocklist) { 3170 tmpcqr = searchcqr; 3171 while (tmpcqr->refers) 3172 tmpcqr = tmpcqr->refers; 3173 if (tmpcqr != cqr) 3174 continue; 3175 /* searchcqr is an ERP request for cqr */ 3176 searchcqr->retries = -1; 3177 searchcqr->intrc = -ETIMEDOUT; 3178 if (searchcqr->status >= DASD_CQR_QUEUED) { 3179 rc = __dasd_cancel_req(searchcqr); 3180 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3181 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3182 searchcqr->status = DASD_CQR_TERMINATED; 3183 rc = 0; 3184 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3185 /* 3186 * Shouldn't happen; most recent ERP 3187 * request is at the front of queue 3188 */ 3189 continue; 3190 } 3191 break; 3192 } 3193 } 3194 spin_unlock(get_ccwdev_lock(device->cdev)); 3195 dasd_schedule_block_bh(block); 3196 spin_unlock(&block->queue_lock); 3197 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3198 3199 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE; 3200 } 3201 3202 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3203 unsigned int idx) 3204 { 3205 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3206 3207 if (!dq) 3208 return -ENOMEM; 3209 3210 spin_lock_init(&dq->lock); 3211 hctx->driver_data = dq; 3212 3213 return 0; 3214 } 3215 3216 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3217 { 3218 kfree(hctx->driver_data); 3219 hctx->driver_data = NULL; 3220 } 3221 3222 static void dasd_request_done(struct request *req) 3223 { 3224 blk_mq_end_request(req, 0); 3225 blk_mq_run_hw_queues(req->q, true); 3226 } 3227 3228 static struct blk_mq_ops dasd_mq_ops = { 3229 .queue_rq = do_dasd_request, 3230 .complete = dasd_request_done, 3231 .timeout = dasd_times_out, 3232 .init_hctx = dasd_init_hctx, 3233 .exit_hctx = dasd_exit_hctx, 3234 }; 3235 3236 /* 3237 * Allocate and initialize request queue and default I/O scheduler. 3238 */ 3239 static int dasd_alloc_queue(struct dasd_block *block) 3240 { 3241 int rc; 3242 3243 block->tag_set.ops = &dasd_mq_ops; 3244 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); 3245 block->tag_set.nr_hw_queues = nr_hw_queues; 3246 block->tag_set.queue_depth = queue_depth; 3247 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3248 block->tag_set.numa_node = NUMA_NO_NODE; 3249 3250 rc = blk_mq_alloc_tag_set(&block->tag_set); 3251 if (rc) 3252 return rc; 3253 3254 block->request_queue = blk_mq_init_queue(&block->tag_set); 3255 if (IS_ERR(block->request_queue)) 3256 return PTR_ERR(block->request_queue); 3257 3258 block->request_queue->queuedata = block; 3259 3260 return 0; 3261 } 3262 3263 /* 3264 * Deactivate and free request queue. 3265 */ 3266 static void dasd_free_queue(struct dasd_block *block) 3267 { 3268 if (block->request_queue) { 3269 blk_cleanup_queue(block->request_queue); 3270 blk_mq_free_tag_set(&block->tag_set); 3271 block->request_queue = NULL; 3272 } 3273 } 3274 3275 static int dasd_open(struct block_device *bdev, fmode_t mode) 3276 { 3277 struct dasd_device *base; 3278 int rc; 3279 3280 base = dasd_device_from_gendisk(bdev->bd_disk); 3281 if (!base) 3282 return -ENODEV; 3283 3284 atomic_inc(&base->block->open_count); 3285 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3286 rc = -ENODEV; 3287 goto unlock; 3288 } 3289 3290 if (!try_module_get(base->discipline->owner)) { 3291 rc = -EINVAL; 3292 goto unlock; 3293 } 3294 3295 if (dasd_probeonly) { 3296 dev_info(&base->cdev->dev, 3297 "Accessing the DASD failed because it is in " 3298 "probeonly mode\n"); 3299 rc = -EPERM; 3300 goto out; 3301 } 3302 3303 if (base->state <= DASD_STATE_BASIC) { 3304 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3305 " Cannot open unrecognized device"); 3306 rc = -ENODEV; 3307 goto out; 3308 } 3309 3310 if ((mode & FMODE_WRITE) && 3311 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3312 (base->features & DASD_FEATURE_READONLY))) { 3313 rc = -EROFS; 3314 goto out; 3315 } 3316 3317 dasd_put_device(base); 3318 return 0; 3319 3320 out: 3321 module_put(base->discipline->owner); 3322 unlock: 3323 atomic_dec(&base->block->open_count); 3324 dasd_put_device(base); 3325 return rc; 3326 } 3327 3328 static void dasd_release(struct gendisk *disk, fmode_t mode) 3329 { 3330 struct dasd_device *base = dasd_device_from_gendisk(disk); 3331 if (base) { 3332 atomic_dec(&base->block->open_count); 3333 module_put(base->discipline->owner); 3334 dasd_put_device(base); 3335 } 3336 } 3337 3338 /* 3339 * Return disk geometry. 3340 */ 3341 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3342 { 3343 struct dasd_device *base; 3344 3345 base = dasd_device_from_gendisk(bdev->bd_disk); 3346 if (!base) 3347 return -ENODEV; 3348 3349 if (!base->discipline || 3350 !base->discipline->fill_geometry) { 3351 dasd_put_device(base); 3352 return -EINVAL; 3353 } 3354 base->discipline->fill_geometry(base->block, geo); 3355 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3356 dasd_put_device(base); 3357 return 0; 3358 } 3359 3360 const struct block_device_operations 3361 dasd_device_operations = { 3362 .owner = THIS_MODULE, 3363 .open = dasd_open, 3364 .release = dasd_release, 3365 .ioctl = dasd_ioctl, 3366 .compat_ioctl = dasd_ioctl, 3367 .getgeo = dasd_getgeo, 3368 .set_read_only = dasd_set_read_only, 3369 }; 3370 3371 /******************************************************************************* 3372 * end of block device operations 3373 */ 3374 3375 static void 3376 dasd_exit(void) 3377 { 3378 #ifdef CONFIG_PROC_FS 3379 dasd_proc_exit(); 3380 #endif 3381 dasd_eer_exit(); 3382 kmem_cache_destroy(dasd_page_cache); 3383 dasd_page_cache = NULL; 3384 dasd_gendisk_exit(); 3385 dasd_devmap_exit(); 3386 if (dasd_debug_area != NULL) { 3387 debug_unregister(dasd_debug_area); 3388 dasd_debug_area = NULL; 3389 } 3390 dasd_statistics_removeroot(); 3391 } 3392 3393 /* 3394 * SECTION: common functions for ccw_driver use 3395 */ 3396 3397 /* 3398 * Is the device read-only? 3399 * Note that this function does not report the setting of the 3400 * readonly device attribute, but how it is configured in z/VM. 3401 */ 3402 int dasd_device_is_ro(struct dasd_device *device) 3403 { 3404 struct ccw_dev_id dev_id; 3405 struct diag210 diag_data; 3406 int rc; 3407 3408 if (!MACHINE_IS_VM) 3409 return 0; 3410 ccw_device_get_id(device->cdev, &dev_id); 3411 memset(&diag_data, 0, sizeof(diag_data)); 3412 diag_data.vrdcdvno = dev_id.devno; 3413 diag_data.vrdclen = sizeof(diag_data); 3414 rc = diag210(&diag_data); 3415 if (rc == 0 || rc == 2) { 3416 return diag_data.vrdcvfla & 0x80; 3417 } else { 3418 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3419 dev_id.devno, rc); 3420 return 0; 3421 } 3422 } 3423 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3424 3425 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3426 { 3427 struct ccw_device *cdev = data; 3428 int ret; 3429 3430 ret = ccw_device_set_online(cdev); 3431 if (ret) 3432 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3433 dev_name(&cdev->dev), ret); 3434 } 3435 3436 /* 3437 * Initial attempt at a probe function. this can be simplified once 3438 * the other detection code is gone. 3439 */ 3440 int dasd_generic_probe(struct ccw_device *cdev) 3441 { 3442 int ret; 3443 3444 ret = dasd_add_sysfs_files(cdev); 3445 if (ret) { 3446 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3447 "dasd_generic_probe: could not add " 3448 "sysfs entries"); 3449 return ret; 3450 } 3451 cdev->handler = &dasd_int_handler; 3452 3453 /* 3454 * Automatically online either all dasd devices (dasd_autodetect) 3455 * or all devices specified with dasd= parameters during 3456 * initial probe. 3457 */ 3458 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3459 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3460 async_schedule(dasd_generic_auto_online, cdev); 3461 return 0; 3462 } 3463 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3464 3465 void dasd_generic_free_discipline(struct dasd_device *device) 3466 { 3467 /* Forget the discipline information. */ 3468 if (device->discipline) { 3469 if (device->discipline->uncheck_device) 3470 device->discipline->uncheck_device(device); 3471 module_put(device->discipline->owner); 3472 device->discipline = NULL; 3473 } 3474 if (device->base_discipline) { 3475 module_put(device->base_discipline->owner); 3476 device->base_discipline = NULL; 3477 } 3478 } 3479 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3480 3481 /* 3482 * This will one day be called from a global not_oper handler. 3483 * It is also used by driver_unregister during module unload. 3484 */ 3485 void dasd_generic_remove(struct ccw_device *cdev) 3486 { 3487 struct dasd_device *device; 3488 struct dasd_block *block; 3489 3490 cdev->handler = NULL; 3491 3492 device = dasd_device_from_cdev(cdev); 3493 if (IS_ERR(device)) { 3494 dasd_remove_sysfs_files(cdev); 3495 return; 3496 } 3497 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3498 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3499 /* Already doing offline processing */ 3500 dasd_put_device(device); 3501 dasd_remove_sysfs_files(cdev); 3502 return; 3503 } 3504 /* 3505 * This device is removed unconditionally. Set offline 3506 * flag to prevent dasd_open from opening it while it is 3507 * no quite down yet. 3508 */ 3509 dasd_set_target_state(device, DASD_STATE_NEW); 3510 /* dasd_delete_device destroys the device reference. */ 3511 block = device->block; 3512 dasd_delete_device(device); 3513 /* 3514 * life cycle of block is bound to device, so delete it after 3515 * device was safely removed 3516 */ 3517 if (block) 3518 dasd_free_block(block); 3519 3520 dasd_remove_sysfs_files(cdev); 3521 } 3522 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3523 3524 /* 3525 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3526 * the device is detected for the first time and is supposed to be used 3527 * or the user has started activation through sysfs. 3528 */ 3529 int dasd_generic_set_online(struct ccw_device *cdev, 3530 struct dasd_discipline *base_discipline) 3531 { 3532 struct dasd_discipline *discipline; 3533 struct dasd_device *device; 3534 int rc; 3535 3536 /* first online clears initial online feature flag */ 3537 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3538 device = dasd_create_device(cdev); 3539 if (IS_ERR(device)) 3540 return PTR_ERR(device); 3541 3542 discipline = base_discipline; 3543 if (device->features & DASD_FEATURE_USEDIAG) { 3544 if (!dasd_diag_discipline_pointer) { 3545 /* Try to load the required module. */ 3546 rc = request_module(DASD_DIAG_MOD); 3547 if (rc) { 3548 pr_warn("%s Setting the DASD online failed " 3549 "because the required module %s " 3550 "could not be loaded (rc=%d)\n", 3551 dev_name(&cdev->dev), DASD_DIAG_MOD, 3552 rc); 3553 dasd_delete_device(device); 3554 return -ENODEV; 3555 } 3556 } 3557 /* Module init could have failed, so check again here after 3558 * request_module(). */ 3559 if (!dasd_diag_discipline_pointer) { 3560 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3561 dev_name(&cdev->dev)); 3562 dasd_delete_device(device); 3563 return -ENODEV; 3564 } 3565 discipline = dasd_diag_discipline_pointer; 3566 } 3567 if (!try_module_get(base_discipline->owner)) { 3568 dasd_delete_device(device); 3569 return -EINVAL; 3570 } 3571 if (!try_module_get(discipline->owner)) { 3572 module_put(base_discipline->owner); 3573 dasd_delete_device(device); 3574 return -EINVAL; 3575 } 3576 device->base_discipline = base_discipline; 3577 device->discipline = discipline; 3578 3579 /* check_device will allocate block device if necessary */ 3580 rc = discipline->check_device(device); 3581 if (rc) { 3582 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3583 dev_name(&cdev->dev), discipline->name, rc); 3584 module_put(discipline->owner); 3585 module_put(base_discipline->owner); 3586 dasd_delete_device(device); 3587 return rc; 3588 } 3589 3590 dasd_set_target_state(device, DASD_STATE_ONLINE); 3591 if (device->state <= DASD_STATE_KNOWN) { 3592 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3593 dev_name(&cdev->dev)); 3594 rc = -ENODEV; 3595 dasd_set_target_state(device, DASD_STATE_NEW); 3596 if (device->block) 3597 dasd_free_block(device->block); 3598 dasd_delete_device(device); 3599 } else 3600 pr_debug("dasd_generic device %s found\n", 3601 dev_name(&cdev->dev)); 3602 3603 wait_event(dasd_init_waitq, _wait_for_device(device)); 3604 3605 dasd_put_device(device); 3606 return rc; 3607 } 3608 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3609 3610 int dasd_generic_set_offline(struct ccw_device *cdev) 3611 { 3612 struct dasd_device *device; 3613 struct dasd_block *block; 3614 int max_count, open_count, rc; 3615 unsigned long flags; 3616 3617 rc = 0; 3618 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3619 device = dasd_device_from_cdev_locked(cdev); 3620 if (IS_ERR(device)) { 3621 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3622 return PTR_ERR(device); 3623 } 3624 3625 /* 3626 * We must make sure that this device is currently not in use. 3627 * The open_count is increased for every opener, that includes 3628 * the blkdev_get in dasd_scan_partitions. We are only interested 3629 * in the other openers. 3630 */ 3631 if (device->block) { 3632 max_count = device->block->bdev ? 0 : -1; 3633 open_count = atomic_read(&device->block->open_count); 3634 if (open_count > max_count) { 3635 if (open_count > 0) 3636 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3637 dev_name(&cdev->dev), open_count); 3638 else 3639 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3640 dev_name(&cdev->dev)); 3641 rc = -EBUSY; 3642 goto out_err; 3643 } 3644 } 3645 3646 /* 3647 * Test if the offline processing is already running and exit if so. 3648 * If a safe offline is being processed this could only be a normal 3649 * offline that should be able to overtake the safe offline and 3650 * cancel any I/O we do not want to wait for any longer 3651 */ 3652 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3653 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3654 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3655 &device->flags); 3656 } else { 3657 rc = -EBUSY; 3658 goto out_err; 3659 } 3660 } 3661 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3662 3663 /* 3664 * if safe_offline is called set safe_offline_running flag and 3665 * clear safe_offline so that a call to normal offline 3666 * can overrun safe_offline processing 3667 */ 3668 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3669 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3670 /* need to unlock here to wait for outstanding I/O */ 3671 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3672 /* 3673 * If we want to set the device safe offline all IO operations 3674 * should be finished before continuing the offline process 3675 * so sync bdev first and then wait for our queues to become 3676 * empty 3677 */ 3678 if (device->block) { 3679 rc = fsync_bdev(device->block->bdev); 3680 if (rc != 0) 3681 goto interrupted; 3682 } 3683 dasd_schedule_device_bh(device); 3684 rc = wait_event_interruptible(shutdown_waitq, 3685 _wait_for_empty_queues(device)); 3686 if (rc != 0) 3687 goto interrupted; 3688 3689 /* 3690 * check if a normal offline process overtook the offline 3691 * processing in this case simply do nothing beside returning 3692 * that we got interrupted 3693 * otherwise mark safe offline as not running any longer and 3694 * continue with normal offline 3695 */ 3696 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3697 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3698 rc = -ERESTARTSYS; 3699 goto out_err; 3700 } 3701 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3702 } 3703 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3704 3705 dasd_set_target_state(device, DASD_STATE_NEW); 3706 /* dasd_delete_device destroys the device reference. */ 3707 block = device->block; 3708 dasd_delete_device(device); 3709 /* 3710 * life cycle of block is bound to device, so delete it after 3711 * device was safely removed 3712 */ 3713 if (block) 3714 dasd_free_block(block); 3715 3716 return 0; 3717 3718 interrupted: 3719 /* interrupted by signal */ 3720 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3721 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3722 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3723 out_err: 3724 dasd_put_device(device); 3725 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3726 return rc; 3727 } 3728 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3729 3730 int dasd_generic_last_path_gone(struct dasd_device *device) 3731 { 3732 struct dasd_ccw_req *cqr; 3733 3734 dev_warn(&device->cdev->dev, "No operational channel path is left " 3735 "for the device\n"); 3736 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3737 /* First of all call extended error reporting. */ 3738 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3739 3740 if (device->state < DASD_STATE_BASIC) 3741 return 0; 3742 /* Device is active. We want to keep it. */ 3743 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3744 if ((cqr->status == DASD_CQR_IN_IO) || 3745 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3746 cqr->status = DASD_CQR_QUEUED; 3747 cqr->retries++; 3748 } 3749 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3750 dasd_device_clear_timer(device); 3751 dasd_schedule_device_bh(device); 3752 return 1; 3753 } 3754 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3755 3756 int dasd_generic_path_operational(struct dasd_device *device) 3757 { 3758 dev_info(&device->cdev->dev, "A channel path to the device has become " 3759 "operational\n"); 3760 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3761 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3762 dasd_schedule_device_bh(device); 3763 if (device->block) { 3764 dasd_schedule_block_bh(device->block); 3765 if (device->block->request_queue) 3766 blk_mq_run_hw_queues(device->block->request_queue, 3767 true); 3768 } 3769 3770 if (!device->stopped) 3771 wake_up(&generic_waitq); 3772 3773 return 1; 3774 } 3775 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3776 3777 int dasd_generic_notify(struct ccw_device *cdev, int event) 3778 { 3779 struct dasd_device *device; 3780 int ret; 3781 3782 device = dasd_device_from_cdev_locked(cdev); 3783 if (IS_ERR(device)) 3784 return 0; 3785 ret = 0; 3786 switch (event) { 3787 case CIO_GONE: 3788 case CIO_BOXED: 3789 case CIO_NO_PATH: 3790 dasd_path_no_path(device); 3791 ret = dasd_generic_last_path_gone(device); 3792 break; 3793 case CIO_OPER: 3794 ret = 1; 3795 if (dasd_path_get_opm(device)) 3796 ret = dasd_generic_path_operational(device); 3797 break; 3798 } 3799 dasd_put_device(device); 3800 return ret; 3801 } 3802 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3803 3804 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3805 { 3806 struct dasd_device *device; 3807 int chp, oldopm, hpfpm, ifccpm; 3808 3809 device = dasd_device_from_cdev_locked(cdev); 3810 if (IS_ERR(device)) 3811 return; 3812 3813 oldopm = dasd_path_get_opm(device); 3814 for (chp = 0; chp < 8; chp++) { 3815 if (path_event[chp] & PE_PATH_GONE) { 3816 dasd_path_notoper(device, chp); 3817 } 3818 if (path_event[chp] & PE_PATH_AVAILABLE) { 3819 dasd_path_available(device, chp); 3820 dasd_schedule_device_bh(device); 3821 } 3822 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3823 if (!dasd_path_is_operational(device, chp) && 3824 !dasd_path_need_verify(device, chp)) { 3825 /* 3826 * we can not establish a pathgroup on an 3827 * unavailable path, so trigger a path 3828 * verification first 3829 */ 3830 dasd_path_available(device, chp); 3831 dasd_schedule_device_bh(device); 3832 } 3833 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3834 "Pathgroup re-established\n"); 3835 if (device->discipline->kick_validate) 3836 device->discipline->kick_validate(device); 3837 } 3838 if (path_event[chp] & PE_PATH_FCES_EVENT) { 3839 dasd_path_fcsec_update(device, chp); 3840 dasd_schedule_device_bh(device); 3841 } 3842 } 3843 hpfpm = dasd_path_get_hpfpm(device); 3844 ifccpm = dasd_path_get_ifccpm(device); 3845 if (!dasd_path_get_opm(device) && hpfpm) { 3846 /* 3847 * device has no operational paths but at least one path is 3848 * disabled due to HPF errors 3849 * disable HPF at all and use the path(s) again 3850 */ 3851 if (device->discipline->disable_hpf) 3852 device->discipline->disable_hpf(device); 3853 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3854 dasd_path_set_tbvpm(device, hpfpm); 3855 dasd_schedule_device_bh(device); 3856 dasd_schedule_requeue(device); 3857 } else if (!dasd_path_get_opm(device) && ifccpm) { 3858 /* 3859 * device has no operational paths but at least one path is 3860 * disabled due to IFCC errors 3861 * trigger path verification on paths with IFCC errors 3862 */ 3863 dasd_path_set_tbvpm(device, ifccpm); 3864 dasd_schedule_device_bh(device); 3865 } 3866 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3867 dev_warn(&device->cdev->dev, 3868 "No verified channel paths remain for the device\n"); 3869 DBF_DEV_EVENT(DBF_WARNING, device, 3870 "%s", "last verified path gone"); 3871 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3872 dasd_device_set_stop_bits(device, 3873 DASD_STOPPED_DC_WAIT); 3874 } 3875 dasd_put_device(device); 3876 } 3877 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3878 3879 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3880 { 3881 if (!dasd_path_get_opm(device) && lpm) { 3882 dasd_path_set_opm(device, lpm); 3883 dasd_generic_path_operational(device); 3884 } else 3885 dasd_path_add_opm(device, lpm); 3886 return 0; 3887 } 3888 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3889 3890 void dasd_generic_space_exhaust(struct dasd_device *device, 3891 struct dasd_ccw_req *cqr) 3892 { 3893 dasd_eer_write(device, NULL, DASD_EER_NOSPC); 3894 3895 if (device->state < DASD_STATE_BASIC) 3896 return; 3897 3898 if (cqr->status == DASD_CQR_IN_IO || 3899 cqr->status == DASD_CQR_CLEAR_PENDING) { 3900 cqr->status = DASD_CQR_QUEUED; 3901 cqr->retries++; 3902 } 3903 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3904 dasd_device_clear_timer(device); 3905 dasd_schedule_device_bh(device); 3906 } 3907 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3908 3909 void dasd_generic_space_avail(struct dasd_device *device) 3910 { 3911 dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3912 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3913 3914 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3915 dasd_schedule_device_bh(device); 3916 3917 if (device->block) { 3918 dasd_schedule_block_bh(device->block); 3919 if (device->block->request_queue) 3920 blk_mq_run_hw_queues(device->block->request_queue, true); 3921 } 3922 if (!device->stopped) 3923 wake_up(&generic_waitq); 3924 } 3925 EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3926 3927 /* 3928 * clear active requests and requeue them to block layer if possible 3929 */ 3930 static int dasd_generic_requeue_all_requests(struct dasd_device *device) 3931 { 3932 struct list_head requeue_queue; 3933 struct dasd_ccw_req *cqr, *n; 3934 struct dasd_ccw_req *refers; 3935 int rc; 3936 3937 INIT_LIST_HEAD(&requeue_queue); 3938 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3939 rc = 0; 3940 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3941 /* Check status and move request to flush_queue */ 3942 if (cqr->status == DASD_CQR_IN_IO) { 3943 rc = device->discipline->term_IO(cqr); 3944 if (rc) { 3945 /* unable to terminate requeust */ 3946 dev_err(&device->cdev->dev, 3947 "Unable to terminate request %p " 3948 "on suspend\n", cqr); 3949 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3950 dasd_put_device(device); 3951 return rc; 3952 } 3953 } 3954 list_move_tail(&cqr->devlist, &requeue_queue); 3955 } 3956 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3957 3958 list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { 3959 wait_event(dasd_flush_wq, 3960 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3961 3962 /* 3963 * requeue requests to blocklayer will only work 3964 * for block device requests 3965 */ 3966 if (_dasd_requeue_request(cqr)) 3967 continue; 3968 3969 /* remove requests from device and block queue */ 3970 list_del_init(&cqr->devlist); 3971 while (cqr->refers != NULL) { 3972 refers = cqr->refers; 3973 /* remove the request from the block queue */ 3974 list_del(&cqr->blocklist); 3975 /* free the finished erp request */ 3976 dasd_free_erp_request(cqr, cqr->memdev); 3977 cqr = refers; 3978 } 3979 3980 /* 3981 * _dasd_requeue_request already checked for a valid 3982 * blockdevice, no need to check again 3983 * all erp requests (cqr->refers) have a cqr->block 3984 * pointer copy from the original cqr 3985 */ 3986 list_del_init(&cqr->blocklist); 3987 cqr->block->base->discipline->free_cp( 3988 cqr, (struct request *) cqr->callback_data); 3989 } 3990 3991 /* 3992 * if requests remain then they are internal request 3993 * and go back to the device queue 3994 */ 3995 if (!list_empty(&requeue_queue)) { 3996 /* move freeze_queue to start of the ccw_queue */ 3997 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3998 list_splice_tail(&requeue_queue, &device->ccw_queue); 3999 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 4000 } 4001 dasd_schedule_device_bh(device); 4002 return rc; 4003 } 4004 4005 static void do_requeue_requests(struct work_struct *work) 4006 { 4007 struct dasd_device *device = container_of(work, struct dasd_device, 4008 requeue_requests); 4009 dasd_generic_requeue_all_requests(device); 4010 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 4011 if (device->block) 4012 dasd_schedule_block_bh(device->block); 4013 dasd_put_device(device); 4014 } 4015 4016 void dasd_schedule_requeue(struct dasd_device *device) 4017 { 4018 dasd_get_device(device); 4019 /* queue call to dasd_reload_device to the kernel event daemon. */ 4020 if (!schedule_work(&device->requeue_requests)) 4021 dasd_put_device(device); 4022 } 4023 EXPORT_SYMBOL(dasd_schedule_requeue); 4024 4025 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 4026 int rdc_buffer_size, 4027 int magic) 4028 { 4029 struct dasd_ccw_req *cqr; 4030 struct ccw1 *ccw; 4031 4032 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, 4033 NULL); 4034 4035 if (IS_ERR(cqr)) { 4036 /* internal error 13 - Allocating the RDC request failed*/ 4037 dev_err(&device->cdev->dev, 4038 "An error occurred in the DASD device driver, " 4039 "reason=%s\n", "13"); 4040 return cqr; 4041 } 4042 4043 ccw = cqr->cpaddr; 4044 ccw->cmd_code = CCW_CMD_RDC; 4045 ccw->cda = (__u32)(addr_t) cqr->data; 4046 ccw->flags = 0; 4047 ccw->count = rdc_buffer_size; 4048 cqr->startdev = device; 4049 cqr->memdev = device; 4050 cqr->expires = 10*HZ; 4051 cqr->retries = 256; 4052 cqr->buildclk = get_tod_clock(); 4053 cqr->status = DASD_CQR_FILLED; 4054 return cqr; 4055 } 4056 4057 4058 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 4059 void *rdc_buffer, int rdc_buffer_size) 4060 { 4061 int ret; 4062 struct dasd_ccw_req *cqr; 4063 4064 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); 4065 if (IS_ERR(cqr)) 4066 return PTR_ERR(cqr); 4067 4068 ret = dasd_sleep_on(cqr); 4069 if (ret == 0) 4070 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); 4071 dasd_sfree_request(cqr, cqr->memdev); 4072 return ret; 4073 } 4074 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4075 4076 /* 4077 * In command mode and transport mode we need to look for sense 4078 * data in different places. The sense data itself is allways 4079 * an array of 32 bytes, so we can unify the sense data access 4080 * for both modes. 4081 */ 4082 char *dasd_get_sense(struct irb *irb) 4083 { 4084 struct tsb *tsb = NULL; 4085 char *sense = NULL; 4086 4087 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4088 if (irb->scsw.tm.tcw) 4089 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 4090 irb->scsw.tm.tcw); 4091 if (tsb && tsb->length == 64 && tsb->flags) 4092 switch (tsb->flags & 0x07) { 4093 case 1: /* tsa_iostat */ 4094 sense = tsb->tsa.iostat.sense; 4095 break; 4096 case 2: /* tsa_ddpc */ 4097 sense = tsb->tsa.ddpc.sense; 4098 break; 4099 default: 4100 /* currently we don't use interrogate data */ 4101 break; 4102 } 4103 } else if (irb->esw.esw0.erw.cons) { 4104 sense = irb->ecw; 4105 } 4106 return sense; 4107 } 4108 EXPORT_SYMBOL_GPL(dasd_get_sense); 4109 4110 void dasd_generic_shutdown(struct ccw_device *cdev) 4111 { 4112 struct dasd_device *device; 4113 4114 device = dasd_device_from_cdev(cdev); 4115 if (IS_ERR(device)) 4116 return; 4117 4118 if (device->block) 4119 dasd_schedule_block_bh(device->block); 4120 4121 dasd_schedule_device_bh(device); 4122 4123 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4124 } 4125 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4126 4127 static int __init dasd_init(void) 4128 { 4129 int rc; 4130 4131 init_waitqueue_head(&dasd_init_waitq); 4132 init_waitqueue_head(&dasd_flush_wq); 4133 init_waitqueue_head(&generic_waitq); 4134 init_waitqueue_head(&shutdown_waitq); 4135 4136 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4137 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4138 if (dasd_debug_area == NULL) { 4139 rc = -ENOMEM; 4140 goto failed; 4141 } 4142 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4143 debug_set_level(dasd_debug_area, DBF_WARNING); 4144 4145 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4146 4147 dasd_diag_discipline_pointer = NULL; 4148 4149 dasd_statistics_createroot(); 4150 4151 rc = dasd_devmap_init(); 4152 if (rc) 4153 goto failed; 4154 rc = dasd_gendisk_init(); 4155 if (rc) 4156 goto failed; 4157 rc = dasd_parse(); 4158 if (rc) 4159 goto failed; 4160 rc = dasd_eer_init(); 4161 if (rc) 4162 goto failed; 4163 #ifdef CONFIG_PROC_FS 4164 rc = dasd_proc_init(); 4165 if (rc) 4166 goto failed; 4167 #endif 4168 4169 return 0; 4170 failed: 4171 pr_info("The DASD device driver could not be initialized\n"); 4172 dasd_exit(); 4173 return rc; 4174 } 4175 4176 module_init(dasd_init); 4177 module_exit(dasd_exit); 4178