1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #define KMSG_COMPONENT "dasd" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kmod.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/ctype.h> 18 #include <linux/major.h> 19 #include <linux/slab.h> 20 #include <linux/hdreg.h> 21 #include <linux/async.h> 22 #include <linux/mutex.h> 23 #include <linux/debugfs.h> 24 #include <linux/seq_file.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/ccwdev.h> 28 #include <asm/ebcdic.h> 29 #include <asm/idals.h> 30 #include <asm/itcw.h> 31 #include <asm/diag.h> 32 33 /* This is ugly... */ 34 #define PRINTK_HEADER "dasd:" 35 36 #include "dasd_int.h" 37 /* 38 * SECTION: Constant definitions to be used within this file 39 */ 40 #define DASD_CHANQ_MAX_SIZE 4 41 42 #define DASD_DIAG_MOD "dasd_diag_mod" 43 44 static unsigned int queue_depth = 32; 45 static unsigned int nr_hw_queues = 4; 46 47 module_param(queue_depth, uint, 0444); 48 MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); 49 50 module_param(nr_hw_queues, uint, 0444); 51 MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); 52 53 /* 54 * SECTION: exported variables of dasd.c 55 */ 56 debug_info_t *dasd_debug_area; 57 EXPORT_SYMBOL(dasd_debug_area); 58 static struct dentry *dasd_debugfs_root_entry; 59 struct dasd_discipline *dasd_diag_discipline_pointer; 60 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 61 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 62 63 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 64 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 65 " Copyright IBM Corp. 2000"); 66 MODULE_LICENSE("GPL"); 67 68 /* 69 * SECTION: prototypes for static functions of dasd.c 70 */ 71 static int dasd_alloc_queue(struct dasd_block *); 72 static void dasd_free_queue(struct dasd_block *); 73 static int dasd_flush_block_queue(struct dasd_block *); 74 static void dasd_device_tasklet(unsigned long); 75 static void dasd_block_tasklet(unsigned long); 76 static void do_kick_device(struct work_struct *); 77 static void do_reload_device(struct work_struct *); 78 static void do_requeue_requests(struct work_struct *); 79 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 80 static void dasd_device_timeout(struct timer_list *); 81 static void dasd_block_timeout(struct timer_list *); 82 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 83 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 84 static void dasd_profile_exit(struct dasd_profile *); 85 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 86 static void dasd_hosts_exit(struct dasd_device *); 87 88 /* 89 * SECTION: Operations on the device structure. 90 */ 91 static wait_queue_head_t dasd_init_waitq; 92 static wait_queue_head_t dasd_flush_wq; 93 static wait_queue_head_t generic_waitq; 94 static wait_queue_head_t shutdown_waitq; 95 96 /* 97 * Allocate memory for a new device structure. 98 */ 99 struct dasd_device *dasd_alloc_device(void) 100 { 101 struct dasd_device *device; 102 103 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 104 if (!device) 105 return ERR_PTR(-ENOMEM); 106 107 /* Get two pages for normal block device operations. */ 108 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 109 if (!device->ccw_mem) { 110 kfree(device); 111 return ERR_PTR(-ENOMEM); 112 } 113 /* Get one page for error recovery. */ 114 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 115 if (!device->erp_mem) { 116 free_pages((unsigned long) device->ccw_mem, 1); 117 kfree(device); 118 return ERR_PTR(-ENOMEM); 119 } 120 /* Get two pages for ese format. */ 121 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 122 if (!device->ese_mem) { 123 free_page((unsigned long) device->erp_mem); 124 free_pages((unsigned long) device->ccw_mem, 1); 125 kfree(device); 126 return ERR_PTR(-ENOMEM); 127 } 128 129 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 130 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 131 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 132 spin_lock_init(&device->mem_lock); 133 atomic_set(&device->tasklet_scheduled, 0); 134 tasklet_init(&device->tasklet, dasd_device_tasklet, 135 (unsigned long) device); 136 INIT_LIST_HEAD(&device->ccw_queue); 137 timer_setup(&device->timer, dasd_device_timeout, 0); 138 INIT_WORK(&device->kick_work, do_kick_device); 139 INIT_WORK(&device->reload_device, do_reload_device); 140 INIT_WORK(&device->requeue_requests, do_requeue_requests); 141 device->state = DASD_STATE_NEW; 142 device->target = DASD_STATE_NEW; 143 mutex_init(&device->state_mutex); 144 spin_lock_init(&device->profile.lock); 145 return device; 146 } 147 148 /* 149 * Free memory of a device structure. 150 */ 151 void dasd_free_device(struct dasd_device *device) 152 { 153 kfree(device->private); 154 free_pages((unsigned long) device->ese_mem, 1); 155 free_page((unsigned long) device->erp_mem); 156 free_pages((unsigned long) device->ccw_mem, 1); 157 kfree(device); 158 } 159 160 /* 161 * Allocate memory for a new device structure. 162 */ 163 struct dasd_block *dasd_alloc_block(void) 164 { 165 struct dasd_block *block; 166 167 block = kzalloc(sizeof(*block), GFP_ATOMIC); 168 if (!block) 169 return ERR_PTR(-ENOMEM); 170 /* open_count = 0 means device online but not in use */ 171 atomic_set(&block->open_count, -1); 172 173 atomic_set(&block->tasklet_scheduled, 0); 174 tasklet_init(&block->tasklet, dasd_block_tasklet, 175 (unsigned long) block); 176 INIT_LIST_HEAD(&block->ccw_queue); 177 spin_lock_init(&block->queue_lock); 178 INIT_LIST_HEAD(&block->format_list); 179 spin_lock_init(&block->format_lock); 180 timer_setup(&block->timer, dasd_block_timeout, 0); 181 spin_lock_init(&block->profile.lock); 182 183 return block; 184 } 185 EXPORT_SYMBOL_GPL(dasd_alloc_block); 186 187 /* 188 * Free memory of a device structure. 189 */ 190 void dasd_free_block(struct dasd_block *block) 191 { 192 kfree(block); 193 } 194 EXPORT_SYMBOL_GPL(dasd_free_block); 195 196 /* 197 * Make a new device known to the system. 198 */ 199 static int dasd_state_new_to_known(struct dasd_device *device) 200 { 201 int rc; 202 203 /* 204 * As long as the device is not in state DASD_STATE_NEW we want to 205 * keep the reference count > 0. 206 */ 207 dasd_get_device(device); 208 209 if (device->block) { 210 rc = dasd_alloc_queue(device->block); 211 if (rc) { 212 dasd_put_device(device); 213 return rc; 214 } 215 } 216 device->state = DASD_STATE_KNOWN; 217 return 0; 218 } 219 220 /* 221 * Let the system forget about a device. 222 */ 223 static int dasd_state_known_to_new(struct dasd_device *device) 224 { 225 /* Disable extended error reporting for this device. */ 226 dasd_eer_disable(device); 227 device->state = DASD_STATE_NEW; 228 229 if (device->block) 230 dasd_free_queue(device->block); 231 232 /* Give up reference we took in dasd_state_new_to_known. */ 233 dasd_put_device(device); 234 return 0; 235 } 236 237 static struct dentry *dasd_debugfs_setup(const char *name, 238 struct dentry *base_dentry) 239 { 240 struct dentry *pde; 241 242 if (!base_dentry) 243 return NULL; 244 pde = debugfs_create_dir(name, base_dentry); 245 if (!pde || IS_ERR(pde)) 246 return NULL; 247 return pde; 248 } 249 250 /* 251 * Request the irq line for the device. 252 */ 253 static int dasd_state_known_to_basic(struct dasd_device *device) 254 { 255 struct dasd_block *block = device->block; 256 int rc = 0; 257 258 /* Allocate and register gendisk structure. */ 259 if (block) { 260 rc = dasd_gendisk_alloc(block); 261 if (rc) 262 return rc; 263 block->debugfs_dentry = 264 dasd_debugfs_setup(block->gdp->disk_name, 265 dasd_debugfs_root_entry); 266 dasd_profile_init(&block->profile, block->debugfs_dentry); 267 if (dasd_global_profile_level == DASD_PROFILE_ON) 268 dasd_profile_on(&device->block->profile); 269 } 270 device->debugfs_dentry = 271 dasd_debugfs_setup(dev_name(&device->cdev->dev), 272 dasd_debugfs_root_entry); 273 dasd_profile_init(&device->profile, device->debugfs_dentry); 274 dasd_hosts_init(device->debugfs_dentry, device); 275 276 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 277 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 278 8 * sizeof(long)); 279 debug_register_view(device->debug_area, &debug_sprintf_view); 280 debug_set_level(device->debug_area, DBF_WARNING); 281 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 282 283 device->state = DASD_STATE_BASIC; 284 285 return rc; 286 } 287 288 /* 289 * Release the irq line for the device. Terminate any running i/o. 290 */ 291 static int dasd_state_basic_to_known(struct dasd_device *device) 292 { 293 int rc; 294 295 if (device->discipline->basic_to_known) { 296 rc = device->discipline->basic_to_known(device); 297 if (rc) 298 return rc; 299 } 300 301 if (device->block) { 302 dasd_profile_exit(&device->block->profile); 303 debugfs_remove(device->block->debugfs_dentry); 304 dasd_gendisk_free(device->block); 305 dasd_block_clear_timer(device->block); 306 } 307 rc = dasd_flush_device_queue(device); 308 if (rc) 309 return rc; 310 dasd_device_clear_timer(device); 311 dasd_profile_exit(&device->profile); 312 dasd_hosts_exit(device); 313 debugfs_remove(device->debugfs_dentry); 314 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 315 if (device->debug_area != NULL) { 316 debug_unregister(device->debug_area); 317 device->debug_area = NULL; 318 } 319 device->state = DASD_STATE_KNOWN; 320 return 0; 321 } 322 323 /* 324 * Do the initial analysis. The do_analysis function may return 325 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 326 * until the discipline decides to continue the startup sequence 327 * by calling the function dasd_change_state. The eckd disciplines 328 * uses this to start a ccw that detects the format. The completion 329 * interrupt for this detection ccw uses the kernel event daemon to 330 * trigger the call to dasd_change_state. All this is done in the 331 * discipline code, see dasd_eckd.c. 332 * After the analysis ccw is done (do_analysis returned 0) the block 333 * device is setup. 334 * In case the analysis returns an error, the device setup is stopped 335 * (a fake disk was already added to allow formatting). 336 */ 337 static int dasd_state_basic_to_ready(struct dasd_device *device) 338 { 339 int rc; 340 struct dasd_block *block; 341 struct gendisk *disk; 342 343 rc = 0; 344 block = device->block; 345 /* make disk known with correct capacity */ 346 if (block) { 347 if (block->base->discipline->do_analysis != NULL) 348 rc = block->base->discipline->do_analysis(block); 349 if (rc) { 350 if (rc != -EAGAIN) { 351 device->state = DASD_STATE_UNFMT; 352 disk = device->block->gdp; 353 kobject_uevent(&disk_to_dev(disk)->kobj, 354 KOBJ_CHANGE); 355 goto out; 356 } 357 return rc; 358 } 359 if (device->discipline->setup_blk_queue) 360 device->discipline->setup_blk_queue(block); 361 set_capacity(block->gdp, 362 block->blocks << block->s2b_shift); 363 device->state = DASD_STATE_READY; 364 rc = dasd_scan_partitions(block); 365 if (rc) { 366 device->state = DASD_STATE_BASIC; 367 return rc; 368 } 369 } else { 370 device->state = DASD_STATE_READY; 371 } 372 out: 373 if (device->discipline->basic_to_ready) 374 rc = device->discipline->basic_to_ready(device); 375 return rc; 376 } 377 378 static inline 379 int _wait_for_empty_queues(struct dasd_device *device) 380 { 381 if (device->block) 382 return list_empty(&device->ccw_queue) && 383 list_empty(&device->block->ccw_queue); 384 else 385 return list_empty(&device->ccw_queue); 386 } 387 388 /* 389 * Remove device from block device layer. Destroy dirty buffers. 390 * Forget format information. Check if the target level is basic 391 * and if it is create fake disk for formatting. 392 */ 393 static int dasd_state_ready_to_basic(struct dasd_device *device) 394 { 395 int rc; 396 397 device->state = DASD_STATE_BASIC; 398 if (device->block) { 399 struct dasd_block *block = device->block; 400 rc = dasd_flush_block_queue(block); 401 if (rc) { 402 device->state = DASD_STATE_READY; 403 return rc; 404 } 405 dasd_destroy_partitions(block); 406 block->blocks = 0; 407 block->bp_block = 0; 408 block->s2b_shift = 0; 409 } 410 return 0; 411 } 412 413 /* 414 * Back to basic. 415 */ 416 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 417 { 418 device->state = DASD_STATE_BASIC; 419 return 0; 420 } 421 422 /* 423 * Make the device online and schedule the bottom half to start 424 * the requeueing of requests from the linux request queue to the 425 * ccw queue. 426 */ 427 static int 428 dasd_state_ready_to_online(struct dasd_device * device) 429 { 430 device->state = DASD_STATE_ONLINE; 431 if (device->block) { 432 dasd_schedule_block_bh(device->block); 433 if ((device->features & DASD_FEATURE_USERAW)) { 434 kobject_uevent(&disk_to_dev(device->block->gdp)->kobj, 435 KOBJ_CHANGE); 436 return 0; 437 } 438 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 439 } 440 return 0; 441 } 442 443 /* 444 * Stop the requeueing of requests again. 445 */ 446 static int dasd_state_online_to_ready(struct dasd_device *device) 447 { 448 int rc; 449 450 if (device->discipline->online_to_ready) { 451 rc = device->discipline->online_to_ready(device); 452 if (rc) 453 return rc; 454 } 455 456 device->state = DASD_STATE_READY; 457 if (device->block && !(device->features & DASD_FEATURE_USERAW)) 458 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 459 return 0; 460 } 461 462 /* 463 * Device startup state changes. 464 */ 465 static int dasd_increase_state(struct dasd_device *device) 466 { 467 int rc; 468 469 rc = 0; 470 if (device->state == DASD_STATE_NEW && 471 device->target >= DASD_STATE_KNOWN) 472 rc = dasd_state_new_to_known(device); 473 474 if (!rc && 475 device->state == DASD_STATE_KNOWN && 476 device->target >= DASD_STATE_BASIC) 477 rc = dasd_state_known_to_basic(device); 478 479 if (!rc && 480 device->state == DASD_STATE_BASIC && 481 device->target >= DASD_STATE_READY) 482 rc = dasd_state_basic_to_ready(device); 483 484 if (!rc && 485 device->state == DASD_STATE_UNFMT && 486 device->target > DASD_STATE_UNFMT) 487 rc = -EPERM; 488 489 if (!rc && 490 device->state == DASD_STATE_READY && 491 device->target >= DASD_STATE_ONLINE) 492 rc = dasd_state_ready_to_online(device); 493 494 return rc; 495 } 496 497 /* 498 * Device shutdown state changes. 499 */ 500 static int dasd_decrease_state(struct dasd_device *device) 501 { 502 int rc; 503 504 rc = 0; 505 if (device->state == DASD_STATE_ONLINE && 506 device->target <= DASD_STATE_READY) 507 rc = dasd_state_online_to_ready(device); 508 509 if (!rc && 510 device->state == DASD_STATE_READY && 511 device->target <= DASD_STATE_BASIC) 512 rc = dasd_state_ready_to_basic(device); 513 514 if (!rc && 515 device->state == DASD_STATE_UNFMT && 516 device->target <= DASD_STATE_BASIC) 517 rc = dasd_state_unfmt_to_basic(device); 518 519 if (!rc && 520 device->state == DASD_STATE_BASIC && 521 device->target <= DASD_STATE_KNOWN) 522 rc = dasd_state_basic_to_known(device); 523 524 if (!rc && 525 device->state == DASD_STATE_KNOWN && 526 device->target <= DASD_STATE_NEW) 527 rc = dasd_state_known_to_new(device); 528 529 return rc; 530 } 531 532 /* 533 * This is the main startup/shutdown routine. 534 */ 535 static void dasd_change_state(struct dasd_device *device) 536 { 537 int rc; 538 539 if (device->state == device->target) 540 /* Already where we want to go today... */ 541 return; 542 if (device->state < device->target) 543 rc = dasd_increase_state(device); 544 else 545 rc = dasd_decrease_state(device); 546 if (rc == -EAGAIN) 547 return; 548 if (rc) 549 device->target = device->state; 550 551 /* let user-space know that the device status changed */ 552 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 553 554 if (device->state == device->target) 555 wake_up(&dasd_init_waitq); 556 } 557 558 /* 559 * Kick starter for devices that did not complete the startup/shutdown 560 * procedure or were sleeping because of a pending state. 561 * dasd_kick_device will schedule a call do do_kick_device to the kernel 562 * event daemon. 563 */ 564 static void do_kick_device(struct work_struct *work) 565 { 566 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 567 mutex_lock(&device->state_mutex); 568 dasd_change_state(device); 569 mutex_unlock(&device->state_mutex); 570 dasd_schedule_device_bh(device); 571 dasd_put_device(device); 572 } 573 574 void dasd_kick_device(struct dasd_device *device) 575 { 576 dasd_get_device(device); 577 /* queue call to dasd_kick_device to the kernel event daemon. */ 578 if (!schedule_work(&device->kick_work)) 579 dasd_put_device(device); 580 } 581 EXPORT_SYMBOL(dasd_kick_device); 582 583 /* 584 * dasd_reload_device will schedule a call do do_reload_device to the kernel 585 * event daemon. 586 */ 587 static void do_reload_device(struct work_struct *work) 588 { 589 struct dasd_device *device = container_of(work, struct dasd_device, 590 reload_device); 591 device->discipline->reload(device); 592 dasd_put_device(device); 593 } 594 595 void dasd_reload_device(struct dasd_device *device) 596 { 597 dasd_get_device(device); 598 /* queue call to dasd_reload_device to the kernel event daemon. */ 599 if (!schedule_work(&device->reload_device)) 600 dasd_put_device(device); 601 } 602 EXPORT_SYMBOL(dasd_reload_device); 603 604 /* 605 * Set the target state for a device and starts the state change. 606 */ 607 void dasd_set_target_state(struct dasd_device *device, int target) 608 { 609 dasd_get_device(device); 610 mutex_lock(&device->state_mutex); 611 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 612 if (dasd_probeonly && target > DASD_STATE_READY) 613 target = DASD_STATE_READY; 614 if (device->target != target) { 615 if (device->state == target) 616 wake_up(&dasd_init_waitq); 617 device->target = target; 618 } 619 if (device->state != device->target) 620 dasd_change_state(device); 621 mutex_unlock(&device->state_mutex); 622 dasd_put_device(device); 623 } 624 625 /* 626 * Enable devices with device numbers in [from..to]. 627 */ 628 static inline int _wait_for_device(struct dasd_device *device) 629 { 630 return (device->state == device->target); 631 } 632 633 void dasd_enable_device(struct dasd_device *device) 634 { 635 dasd_set_target_state(device, DASD_STATE_ONLINE); 636 if (device->state <= DASD_STATE_KNOWN) 637 /* No discipline for device found. */ 638 dasd_set_target_state(device, DASD_STATE_NEW); 639 /* Now wait for the devices to come up. */ 640 wait_event(dasd_init_waitq, _wait_for_device(device)); 641 642 dasd_reload_device(device); 643 if (device->discipline->kick_validate) 644 device->discipline->kick_validate(device); 645 } 646 EXPORT_SYMBOL(dasd_enable_device); 647 648 /* 649 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 650 */ 651 652 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 653 654 #ifdef CONFIG_DASD_PROFILE 655 struct dasd_profile dasd_global_profile = { 656 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 657 }; 658 static struct dentry *dasd_debugfs_global_entry; 659 660 /* 661 * Add profiling information for cqr before execution. 662 */ 663 static void dasd_profile_start(struct dasd_block *block, 664 struct dasd_ccw_req *cqr, 665 struct request *req) 666 { 667 struct list_head *l; 668 unsigned int counter; 669 struct dasd_device *device; 670 671 /* count the length of the chanq for statistics */ 672 counter = 0; 673 if (dasd_global_profile_level || block->profile.data) 674 list_for_each(l, &block->ccw_queue) 675 if (++counter >= 31) 676 break; 677 678 spin_lock(&dasd_global_profile.lock); 679 if (dasd_global_profile.data) { 680 dasd_global_profile.data->dasd_io_nr_req[counter]++; 681 if (rq_data_dir(req) == READ) 682 dasd_global_profile.data->dasd_read_nr_req[counter]++; 683 } 684 spin_unlock(&dasd_global_profile.lock); 685 686 spin_lock(&block->profile.lock); 687 if (block->profile.data) { 688 block->profile.data->dasd_io_nr_req[counter]++; 689 if (rq_data_dir(req) == READ) 690 block->profile.data->dasd_read_nr_req[counter]++; 691 } 692 spin_unlock(&block->profile.lock); 693 694 /* 695 * We count the request for the start device, even though it may run on 696 * some other device due to error recovery. This way we make sure that 697 * we count each request only once. 698 */ 699 device = cqr->startdev; 700 if (device->profile.data) { 701 counter = 1; /* request is not yet queued on the start device */ 702 list_for_each(l, &device->ccw_queue) 703 if (++counter >= 31) 704 break; 705 } 706 spin_lock(&device->profile.lock); 707 if (device->profile.data) { 708 device->profile.data->dasd_io_nr_req[counter]++; 709 if (rq_data_dir(req) == READ) 710 device->profile.data->dasd_read_nr_req[counter]++; 711 } 712 spin_unlock(&device->profile.lock); 713 } 714 715 /* 716 * Add profiling information for cqr after execution. 717 */ 718 719 #define dasd_profile_counter(value, index) \ 720 { \ 721 for (index = 0; index < 31 && value >> (2+index); index++) \ 722 ; \ 723 } 724 725 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 726 int is_alias, 727 int is_tpm, 728 int is_read, 729 long sectors, 730 int sectors_ind, 731 int tottime_ind, 732 int tottimeps_ind, 733 int strtime_ind, 734 int irqtime_ind, 735 int irqtimeps_ind, 736 int endtime_ind) 737 { 738 /* in case of an overflow, reset the whole profile */ 739 if (data->dasd_io_reqs == UINT_MAX) { 740 memset(data, 0, sizeof(*data)); 741 ktime_get_real_ts64(&data->starttod); 742 } 743 data->dasd_io_reqs++; 744 data->dasd_io_sects += sectors; 745 if (is_alias) 746 data->dasd_io_alias++; 747 if (is_tpm) 748 data->dasd_io_tpm++; 749 750 data->dasd_io_secs[sectors_ind]++; 751 data->dasd_io_times[tottime_ind]++; 752 data->dasd_io_timps[tottimeps_ind]++; 753 data->dasd_io_time1[strtime_ind]++; 754 data->dasd_io_time2[irqtime_ind]++; 755 data->dasd_io_time2ps[irqtimeps_ind]++; 756 data->dasd_io_time3[endtime_ind]++; 757 758 if (is_read) { 759 data->dasd_read_reqs++; 760 data->dasd_read_sects += sectors; 761 if (is_alias) 762 data->dasd_read_alias++; 763 if (is_tpm) 764 data->dasd_read_tpm++; 765 data->dasd_read_secs[sectors_ind]++; 766 data->dasd_read_times[tottime_ind]++; 767 data->dasd_read_time1[strtime_ind]++; 768 data->dasd_read_time2[irqtime_ind]++; 769 data->dasd_read_time3[endtime_ind]++; 770 } 771 } 772 773 static void dasd_profile_end(struct dasd_block *block, 774 struct dasd_ccw_req *cqr, 775 struct request *req) 776 { 777 unsigned long strtime, irqtime, endtime, tottime; 778 unsigned long tottimeps, sectors; 779 struct dasd_device *device; 780 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 781 int irqtime_ind, irqtimeps_ind, endtime_ind; 782 struct dasd_profile_info *data; 783 784 device = cqr->startdev; 785 if (!(dasd_global_profile_level || 786 block->profile.data || 787 device->profile.data)) 788 return; 789 790 sectors = blk_rq_sectors(req); 791 if (!cqr->buildclk || !cqr->startclk || 792 !cqr->stopclk || !cqr->endclk || 793 !sectors) 794 return; 795 796 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 797 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 798 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 799 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 800 tottimeps = tottime / sectors; 801 802 dasd_profile_counter(sectors, sectors_ind); 803 dasd_profile_counter(tottime, tottime_ind); 804 dasd_profile_counter(tottimeps, tottimeps_ind); 805 dasd_profile_counter(strtime, strtime_ind); 806 dasd_profile_counter(irqtime, irqtime_ind); 807 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 808 dasd_profile_counter(endtime, endtime_ind); 809 810 spin_lock(&dasd_global_profile.lock); 811 if (dasd_global_profile.data) { 812 data = dasd_global_profile.data; 813 data->dasd_sum_times += tottime; 814 data->dasd_sum_time_str += strtime; 815 data->dasd_sum_time_irq += irqtime; 816 data->dasd_sum_time_end += endtime; 817 dasd_profile_end_add_data(dasd_global_profile.data, 818 cqr->startdev != block->base, 819 cqr->cpmode == 1, 820 rq_data_dir(req) == READ, 821 sectors, sectors_ind, tottime_ind, 822 tottimeps_ind, strtime_ind, 823 irqtime_ind, irqtimeps_ind, 824 endtime_ind); 825 } 826 spin_unlock(&dasd_global_profile.lock); 827 828 spin_lock(&block->profile.lock); 829 if (block->profile.data) { 830 data = block->profile.data; 831 data->dasd_sum_times += tottime; 832 data->dasd_sum_time_str += strtime; 833 data->dasd_sum_time_irq += irqtime; 834 data->dasd_sum_time_end += endtime; 835 dasd_profile_end_add_data(block->profile.data, 836 cqr->startdev != block->base, 837 cqr->cpmode == 1, 838 rq_data_dir(req) == READ, 839 sectors, sectors_ind, tottime_ind, 840 tottimeps_ind, strtime_ind, 841 irqtime_ind, irqtimeps_ind, 842 endtime_ind); 843 } 844 spin_unlock(&block->profile.lock); 845 846 spin_lock(&device->profile.lock); 847 if (device->profile.data) { 848 data = device->profile.data; 849 data->dasd_sum_times += tottime; 850 data->dasd_sum_time_str += strtime; 851 data->dasd_sum_time_irq += irqtime; 852 data->dasd_sum_time_end += endtime; 853 dasd_profile_end_add_data(device->profile.data, 854 cqr->startdev != block->base, 855 cqr->cpmode == 1, 856 rq_data_dir(req) == READ, 857 sectors, sectors_ind, tottime_ind, 858 tottimeps_ind, strtime_ind, 859 irqtime_ind, irqtimeps_ind, 860 endtime_ind); 861 } 862 spin_unlock(&device->profile.lock); 863 } 864 865 void dasd_profile_reset(struct dasd_profile *profile) 866 { 867 struct dasd_profile_info *data; 868 869 spin_lock_bh(&profile->lock); 870 data = profile->data; 871 if (!data) { 872 spin_unlock_bh(&profile->lock); 873 return; 874 } 875 memset(data, 0, sizeof(*data)); 876 ktime_get_real_ts64(&data->starttod); 877 spin_unlock_bh(&profile->lock); 878 } 879 880 int dasd_profile_on(struct dasd_profile *profile) 881 { 882 struct dasd_profile_info *data; 883 884 data = kzalloc(sizeof(*data), GFP_KERNEL); 885 if (!data) 886 return -ENOMEM; 887 spin_lock_bh(&profile->lock); 888 if (profile->data) { 889 spin_unlock_bh(&profile->lock); 890 kfree(data); 891 return 0; 892 } 893 ktime_get_real_ts64(&data->starttod); 894 profile->data = data; 895 spin_unlock_bh(&profile->lock); 896 return 0; 897 } 898 899 void dasd_profile_off(struct dasd_profile *profile) 900 { 901 spin_lock_bh(&profile->lock); 902 kfree(profile->data); 903 profile->data = NULL; 904 spin_unlock_bh(&profile->lock); 905 } 906 907 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 908 { 909 char *buffer; 910 911 buffer = vmalloc(user_len + 1); 912 if (buffer == NULL) 913 return ERR_PTR(-ENOMEM); 914 if (copy_from_user(buffer, user_buf, user_len) != 0) { 915 vfree(buffer); 916 return ERR_PTR(-EFAULT); 917 } 918 /* got the string, now strip linefeed. */ 919 if (buffer[user_len - 1] == '\n') 920 buffer[user_len - 1] = 0; 921 else 922 buffer[user_len] = 0; 923 return buffer; 924 } 925 926 static ssize_t dasd_stats_write(struct file *file, 927 const char __user *user_buf, 928 size_t user_len, loff_t *pos) 929 { 930 char *buffer, *str; 931 int rc; 932 struct seq_file *m = (struct seq_file *)file->private_data; 933 struct dasd_profile *prof = m->private; 934 935 if (user_len > 65536) 936 user_len = 65536; 937 buffer = dasd_get_user_string(user_buf, user_len); 938 if (IS_ERR(buffer)) 939 return PTR_ERR(buffer); 940 941 str = skip_spaces(buffer); 942 rc = user_len; 943 if (strncmp(str, "reset", 5) == 0) { 944 dasd_profile_reset(prof); 945 } else if (strncmp(str, "on", 2) == 0) { 946 rc = dasd_profile_on(prof); 947 if (rc) 948 goto out; 949 rc = user_len; 950 if (prof == &dasd_global_profile) { 951 dasd_profile_reset(prof); 952 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 953 } 954 } else if (strncmp(str, "off", 3) == 0) { 955 if (prof == &dasd_global_profile) 956 dasd_global_profile_level = DASD_PROFILE_OFF; 957 dasd_profile_off(prof); 958 } else 959 rc = -EINVAL; 960 out: 961 vfree(buffer); 962 return rc; 963 } 964 965 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 966 { 967 int i; 968 969 for (i = 0; i < 32; i++) 970 seq_printf(m, "%u ", array[i]); 971 seq_putc(m, '\n'); 972 } 973 974 static void dasd_stats_seq_print(struct seq_file *m, 975 struct dasd_profile_info *data) 976 { 977 seq_printf(m, "start_time %lld.%09ld\n", 978 (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 979 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 980 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 981 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 982 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 983 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 984 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 985 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 986 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 987 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 988 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 989 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 990 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 991 seq_puts(m, "histogram_sectors "); 992 dasd_stats_array(m, data->dasd_io_secs); 993 seq_puts(m, "histogram_io_times "); 994 dasd_stats_array(m, data->dasd_io_times); 995 seq_puts(m, "histogram_io_times_weighted "); 996 dasd_stats_array(m, data->dasd_io_timps); 997 seq_puts(m, "histogram_time_build_to_ssch "); 998 dasd_stats_array(m, data->dasd_io_time1); 999 seq_puts(m, "histogram_time_ssch_to_irq "); 1000 dasd_stats_array(m, data->dasd_io_time2); 1001 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1002 dasd_stats_array(m, data->dasd_io_time2ps); 1003 seq_puts(m, "histogram_time_irq_to_end "); 1004 dasd_stats_array(m, data->dasd_io_time3); 1005 seq_puts(m, "histogram_ccw_queue_length "); 1006 dasd_stats_array(m, data->dasd_io_nr_req); 1007 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1008 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1009 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1010 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1011 seq_puts(m, "histogram_read_sectors "); 1012 dasd_stats_array(m, data->dasd_read_secs); 1013 seq_puts(m, "histogram_read_times "); 1014 dasd_stats_array(m, data->dasd_read_times); 1015 seq_puts(m, "histogram_read_time_build_to_ssch "); 1016 dasd_stats_array(m, data->dasd_read_time1); 1017 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1018 dasd_stats_array(m, data->dasd_read_time2); 1019 seq_puts(m, "histogram_read_time_irq_to_end "); 1020 dasd_stats_array(m, data->dasd_read_time3); 1021 seq_puts(m, "histogram_read_ccw_queue_length "); 1022 dasd_stats_array(m, data->dasd_read_nr_req); 1023 } 1024 1025 static int dasd_stats_show(struct seq_file *m, void *v) 1026 { 1027 struct dasd_profile *profile; 1028 struct dasd_profile_info *data; 1029 1030 profile = m->private; 1031 spin_lock_bh(&profile->lock); 1032 data = profile->data; 1033 if (!data) { 1034 spin_unlock_bh(&profile->lock); 1035 seq_puts(m, "disabled\n"); 1036 return 0; 1037 } 1038 dasd_stats_seq_print(m, data); 1039 spin_unlock_bh(&profile->lock); 1040 return 0; 1041 } 1042 1043 static int dasd_stats_open(struct inode *inode, struct file *file) 1044 { 1045 struct dasd_profile *profile = inode->i_private; 1046 return single_open(file, dasd_stats_show, profile); 1047 } 1048 1049 static const struct file_operations dasd_stats_raw_fops = { 1050 .owner = THIS_MODULE, 1051 .open = dasd_stats_open, 1052 .read = seq_read, 1053 .llseek = seq_lseek, 1054 .release = single_release, 1055 .write = dasd_stats_write, 1056 }; 1057 1058 static void dasd_profile_init(struct dasd_profile *profile, 1059 struct dentry *base_dentry) 1060 { 1061 umode_t mode; 1062 struct dentry *pde; 1063 1064 if (!base_dentry) 1065 return; 1066 profile->dentry = NULL; 1067 profile->data = NULL; 1068 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1069 pde = debugfs_create_file("statistics", mode, base_dentry, 1070 profile, &dasd_stats_raw_fops); 1071 if (pde && !IS_ERR(pde)) 1072 profile->dentry = pde; 1073 return; 1074 } 1075 1076 static void dasd_profile_exit(struct dasd_profile *profile) 1077 { 1078 dasd_profile_off(profile); 1079 debugfs_remove(profile->dentry); 1080 profile->dentry = NULL; 1081 } 1082 1083 static void dasd_statistics_removeroot(void) 1084 { 1085 dasd_global_profile_level = DASD_PROFILE_OFF; 1086 dasd_profile_exit(&dasd_global_profile); 1087 debugfs_remove(dasd_debugfs_global_entry); 1088 debugfs_remove(dasd_debugfs_root_entry); 1089 } 1090 1091 static void dasd_statistics_createroot(void) 1092 { 1093 struct dentry *pde; 1094 1095 dasd_debugfs_root_entry = NULL; 1096 pde = debugfs_create_dir("dasd", NULL); 1097 if (!pde || IS_ERR(pde)) 1098 goto error; 1099 dasd_debugfs_root_entry = pde; 1100 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1101 if (!pde || IS_ERR(pde)) 1102 goto error; 1103 dasd_debugfs_global_entry = pde; 1104 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1105 return; 1106 1107 error: 1108 DBF_EVENT(DBF_ERR, "%s", 1109 "Creation of the dasd debugfs interface failed"); 1110 dasd_statistics_removeroot(); 1111 return; 1112 } 1113 1114 #else 1115 #define dasd_profile_start(block, cqr, req) do {} while (0) 1116 #define dasd_profile_end(block, cqr, req) do {} while (0) 1117 1118 static void dasd_statistics_createroot(void) 1119 { 1120 return; 1121 } 1122 1123 static void dasd_statistics_removeroot(void) 1124 { 1125 return; 1126 } 1127 1128 int dasd_stats_generic_show(struct seq_file *m, void *v) 1129 { 1130 seq_puts(m, "Statistics are not activated in this kernel\n"); 1131 return 0; 1132 } 1133 1134 static void dasd_profile_init(struct dasd_profile *profile, 1135 struct dentry *base_dentry) 1136 { 1137 return; 1138 } 1139 1140 static void dasd_profile_exit(struct dasd_profile *profile) 1141 { 1142 return; 1143 } 1144 1145 int dasd_profile_on(struct dasd_profile *profile) 1146 { 1147 return 0; 1148 } 1149 1150 #endif /* CONFIG_DASD_PROFILE */ 1151 1152 static int dasd_hosts_show(struct seq_file *m, void *v) 1153 { 1154 struct dasd_device *device; 1155 int rc = -EOPNOTSUPP; 1156 1157 device = m->private; 1158 dasd_get_device(device); 1159 1160 if (device->discipline->hosts_print) 1161 rc = device->discipline->hosts_print(device, m); 1162 1163 dasd_put_device(device); 1164 return rc; 1165 } 1166 1167 DEFINE_SHOW_ATTRIBUTE(dasd_hosts); 1168 1169 static void dasd_hosts_exit(struct dasd_device *device) 1170 { 1171 debugfs_remove(device->hosts_dentry); 1172 device->hosts_dentry = NULL; 1173 } 1174 1175 static void dasd_hosts_init(struct dentry *base_dentry, 1176 struct dasd_device *device) 1177 { 1178 struct dentry *pde; 1179 umode_t mode; 1180 1181 if (!base_dentry) 1182 return; 1183 1184 mode = S_IRUSR | S_IFREG; 1185 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1186 device, &dasd_hosts_fops); 1187 if (pde && !IS_ERR(pde)) 1188 device->hosts_dentry = pde; 1189 } 1190 1191 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, 1192 struct dasd_device *device, 1193 struct dasd_ccw_req *cqr) 1194 { 1195 unsigned long flags; 1196 char *data, *chunk; 1197 int size = 0; 1198 1199 if (cplength > 0) 1200 size += cplength * sizeof(struct ccw1); 1201 if (datasize > 0) 1202 size += datasize; 1203 if (!cqr) 1204 size += (sizeof(*cqr) + 7L) & -8L; 1205 1206 spin_lock_irqsave(&device->mem_lock, flags); 1207 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); 1208 spin_unlock_irqrestore(&device->mem_lock, flags); 1209 if (!chunk) 1210 return ERR_PTR(-ENOMEM); 1211 if (!cqr) { 1212 cqr = (void *) data; 1213 data += (sizeof(*cqr) + 7L) & -8L; 1214 } 1215 memset(cqr, 0, sizeof(*cqr)); 1216 cqr->mem_chunk = chunk; 1217 if (cplength > 0) { 1218 cqr->cpaddr = data; 1219 data += cplength * sizeof(struct ccw1); 1220 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1221 } 1222 if (datasize > 0) { 1223 cqr->data = data; 1224 memset(cqr->data, 0, datasize); 1225 } 1226 cqr->magic = magic; 1227 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1228 dasd_get_device(device); 1229 return cqr; 1230 } 1231 EXPORT_SYMBOL(dasd_smalloc_request); 1232 1233 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1234 int datasize, 1235 struct dasd_device *device) 1236 { 1237 struct dasd_ccw_req *cqr; 1238 unsigned long flags; 1239 int size, cqr_size; 1240 char *data; 1241 1242 cqr_size = (sizeof(*cqr) + 7L) & -8L; 1243 size = cqr_size; 1244 if (cplength > 0) 1245 size += cplength * sizeof(struct ccw1); 1246 if (datasize > 0) 1247 size += datasize; 1248 1249 spin_lock_irqsave(&device->mem_lock, flags); 1250 cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1251 spin_unlock_irqrestore(&device->mem_lock, flags); 1252 if (!cqr) 1253 return ERR_PTR(-ENOMEM); 1254 memset(cqr, 0, sizeof(*cqr)); 1255 data = (char *)cqr + cqr_size; 1256 cqr->cpaddr = NULL; 1257 if (cplength > 0) { 1258 cqr->cpaddr = data; 1259 data += cplength * sizeof(struct ccw1); 1260 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1261 } 1262 cqr->data = NULL; 1263 if (datasize > 0) { 1264 cqr->data = data; 1265 memset(cqr->data, 0, datasize); 1266 } 1267 1268 cqr->magic = magic; 1269 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1270 dasd_get_device(device); 1271 1272 return cqr; 1273 } 1274 EXPORT_SYMBOL(dasd_fmalloc_request); 1275 1276 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1277 { 1278 unsigned long flags; 1279 1280 spin_lock_irqsave(&device->mem_lock, flags); 1281 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); 1282 spin_unlock_irqrestore(&device->mem_lock, flags); 1283 dasd_put_device(device); 1284 } 1285 EXPORT_SYMBOL(dasd_sfree_request); 1286 1287 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1288 { 1289 unsigned long flags; 1290 1291 spin_lock_irqsave(&device->mem_lock, flags); 1292 dasd_free_chunk(&device->ese_chunks, cqr); 1293 spin_unlock_irqrestore(&device->mem_lock, flags); 1294 dasd_put_device(device); 1295 } 1296 EXPORT_SYMBOL(dasd_ffree_request); 1297 1298 /* 1299 * Check discipline magic in cqr. 1300 */ 1301 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1302 { 1303 struct dasd_device *device; 1304 1305 if (cqr == NULL) 1306 return -EINVAL; 1307 device = cqr->startdev; 1308 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1309 DBF_DEV_EVENT(DBF_WARNING, device, 1310 " dasd_ccw_req 0x%08x magic doesn't match" 1311 " discipline 0x%08x", 1312 cqr->magic, 1313 *(unsigned int *) device->discipline->name); 1314 return -EINVAL; 1315 } 1316 return 0; 1317 } 1318 1319 /* 1320 * Terminate the current i/o and set the request to clear_pending. 1321 * Timer keeps device runnig. 1322 * ccw_device_clear can fail if the i/o subsystem 1323 * is in a bad mood. 1324 */ 1325 int dasd_term_IO(struct dasd_ccw_req *cqr) 1326 { 1327 struct dasd_device *device; 1328 int retries, rc; 1329 char errorstring[ERRORLENGTH]; 1330 1331 /* Check the cqr */ 1332 rc = dasd_check_cqr(cqr); 1333 if (rc) 1334 return rc; 1335 retries = 0; 1336 device = (struct dasd_device *) cqr->startdev; 1337 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1338 rc = ccw_device_clear(device->cdev, (long) cqr); 1339 switch (rc) { 1340 case 0: /* termination successful */ 1341 cqr->status = DASD_CQR_CLEAR_PENDING; 1342 cqr->stopclk = get_tod_clock(); 1343 cqr->starttime = 0; 1344 DBF_DEV_EVENT(DBF_DEBUG, device, 1345 "terminate cqr %p successful", 1346 cqr); 1347 break; 1348 case -ENODEV: 1349 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1350 "device gone, retry"); 1351 break; 1352 case -EINVAL: 1353 /* 1354 * device not valid so no I/O could be running 1355 * handle CQR as termination successful 1356 */ 1357 cqr->status = DASD_CQR_CLEARED; 1358 cqr->stopclk = get_tod_clock(); 1359 cqr->starttime = 0; 1360 /* no retries for invalid devices */ 1361 cqr->retries = -1; 1362 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1363 "EINVAL, handle as terminated"); 1364 /* fake rc to success */ 1365 rc = 0; 1366 break; 1367 default: 1368 /* internal error 10 - unknown rc*/ 1369 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1370 dev_err(&device->cdev->dev, "An error occurred in the " 1371 "DASD device driver, reason=%s\n", errorstring); 1372 BUG(); 1373 break; 1374 } 1375 retries++; 1376 } 1377 dasd_schedule_device_bh(device); 1378 return rc; 1379 } 1380 EXPORT_SYMBOL(dasd_term_IO); 1381 1382 /* 1383 * Start the i/o. This start_IO can fail if the channel is really busy. 1384 * In that case set up a timer to start the request later. 1385 */ 1386 int dasd_start_IO(struct dasd_ccw_req *cqr) 1387 { 1388 struct dasd_device *device; 1389 int rc; 1390 char errorstring[ERRORLENGTH]; 1391 1392 /* Check the cqr */ 1393 rc = dasd_check_cqr(cqr); 1394 if (rc) { 1395 cqr->intrc = rc; 1396 return rc; 1397 } 1398 device = (struct dasd_device *) cqr->startdev; 1399 if (((cqr->block && 1400 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1401 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1402 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1403 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1404 "because of stolen lock", cqr); 1405 cqr->status = DASD_CQR_ERROR; 1406 cqr->intrc = -EPERM; 1407 return -EPERM; 1408 } 1409 if (cqr->retries < 0) { 1410 /* internal error 14 - start_IO run out of retries */ 1411 sprintf(errorstring, "14 %p", cqr); 1412 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1413 "device driver, reason=%s\n", errorstring); 1414 cqr->status = DASD_CQR_ERROR; 1415 return -EIO; 1416 } 1417 cqr->startclk = get_tod_clock(); 1418 cqr->starttime = jiffies; 1419 cqr->retries--; 1420 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1421 cqr->lpm &= dasd_path_get_opm(device); 1422 if (!cqr->lpm) 1423 cqr->lpm = dasd_path_get_opm(device); 1424 } 1425 if (cqr->cpmode == 1) { 1426 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1427 (long) cqr, cqr->lpm); 1428 } else { 1429 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1430 (long) cqr, cqr->lpm, 0); 1431 } 1432 switch (rc) { 1433 case 0: 1434 cqr->status = DASD_CQR_IN_IO; 1435 break; 1436 case -EBUSY: 1437 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1438 "start_IO: device busy, retry later"); 1439 break; 1440 case -EACCES: 1441 /* -EACCES indicates that the request used only a subset of the 1442 * available paths and all these paths are gone. If the lpm of 1443 * this request was only a subset of the opm (e.g. the ppm) then 1444 * we just do a retry with all available paths. 1445 * If we already use the full opm, something is amiss, and we 1446 * need a full path verification. 1447 */ 1448 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1449 DBF_DEV_EVENT(DBF_WARNING, device, 1450 "start_IO: selected paths gone (%x)", 1451 cqr->lpm); 1452 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1453 cqr->lpm = dasd_path_get_opm(device); 1454 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1455 "start_IO: selected paths gone," 1456 " retry on all paths"); 1457 } else { 1458 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1459 "start_IO: all paths in opm gone," 1460 " do path verification"); 1461 dasd_generic_last_path_gone(device); 1462 dasd_path_no_path(device); 1463 dasd_path_set_tbvpm(device, 1464 ccw_device_get_path_mask( 1465 device->cdev)); 1466 } 1467 break; 1468 case -ENODEV: 1469 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1470 "start_IO: -ENODEV device gone, retry"); 1471 break; 1472 case -EIO: 1473 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1474 "start_IO: -EIO device gone, retry"); 1475 break; 1476 case -EINVAL: 1477 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1478 "start_IO: -EINVAL device currently " 1479 "not accessible"); 1480 break; 1481 default: 1482 /* internal error 11 - unknown rc */ 1483 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1484 dev_err(&device->cdev->dev, 1485 "An error occurred in the DASD device driver, " 1486 "reason=%s\n", errorstring); 1487 BUG(); 1488 break; 1489 } 1490 cqr->intrc = rc; 1491 return rc; 1492 } 1493 EXPORT_SYMBOL(dasd_start_IO); 1494 1495 /* 1496 * Timeout function for dasd devices. This is used for different purposes 1497 * 1) missing interrupt handler for normal operation 1498 * 2) delayed start of request where start_IO failed with -EBUSY 1499 * 3) timeout for missing state change interrupts 1500 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1501 * DASD_CQR_QUEUED for 2) and 3). 1502 */ 1503 static void dasd_device_timeout(struct timer_list *t) 1504 { 1505 unsigned long flags; 1506 struct dasd_device *device; 1507 1508 device = from_timer(device, t, timer); 1509 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1510 /* re-activate request queue */ 1511 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1512 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1513 dasd_schedule_device_bh(device); 1514 } 1515 1516 /* 1517 * Setup timeout for a device in jiffies. 1518 */ 1519 void dasd_device_set_timer(struct dasd_device *device, int expires) 1520 { 1521 if (expires == 0) 1522 del_timer(&device->timer); 1523 else 1524 mod_timer(&device->timer, jiffies + expires); 1525 } 1526 EXPORT_SYMBOL(dasd_device_set_timer); 1527 1528 /* 1529 * Clear timeout for a device. 1530 */ 1531 void dasd_device_clear_timer(struct dasd_device *device) 1532 { 1533 del_timer(&device->timer); 1534 } 1535 EXPORT_SYMBOL(dasd_device_clear_timer); 1536 1537 static void dasd_handle_killed_request(struct ccw_device *cdev, 1538 unsigned long intparm) 1539 { 1540 struct dasd_ccw_req *cqr; 1541 struct dasd_device *device; 1542 1543 if (!intparm) 1544 return; 1545 cqr = (struct dasd_ccw_req *) intparm; 1546 if (cqr->status != DASD_CQR_IN_IO) { 1547 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1548 "invalid status in handle_killed_request: " 1549 "%02x", cqr->status); 1550 return; 1551 } 1552 1553 device = dasd_device_from_cdev_locked(cdev); 1554 if (IS_ERR(device)) { 1555 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1556 "unable to get device from cdev"); 1557 return; 1558 } 1559 1560 if (!cqr->startdev || 1561 device != cqr->startdev || 1562 strncmp(cqr->startdev->discipline->ebcname, 1563 (char *) &cqr->magic, 4)) { 1564 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1565 "invalid device in request"); 1566 dasd_put_device(device); 1567 return; 1568 } 1569 1570 /* Schedule request to be retried. */ 1571 cqr->status = DASD_CQR_QUEUED; 1572 1573 dasd_device_clear_timer(device); 1574 dasd_schedule_device_bh(device); 1575 dasd_put_device(device); 1576 } 1577 1578 void dasd_generic_handle_state_change(struct dasd_device *device) 1579 { 1580 /* First of all start sense subsystem status request. */ 1581 dasd_eer_snss(device); 1582 1583 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1584 dasd_schedule_device_bh(device); 1585 if (device->block) { 1586 dasd_schedule_block_bh(device->block); 1587 if (device->block->request_queue) 1588 blk_mq_run_hw_queues(device->block->request_queue, 1589 true); 1590 } 1591 } 1592 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1593 1594 static int dasd_check_hpf_error(struct irb *irb) 1595 { 1596 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1597 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1598 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1599 } 1600 1601 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1602 { 1603 struct dasd_device *device = NULL; 1604 u8 *sense = NULL; 1605 1606 if (!block) 1607 return 0; 1608 device = block->base; 1609 if (!device || !device->discipline->is_ese) 1610 return 0; 1611 if (!device->discipline->is_ese(device)) 1612 return 0; 1613 1614 sense = dasd_get_sense(irb); 1615 if (!sense) 1616 return 0; 1617 1618 return !!(sense[1] & SNS1_NO_REC_FOUND) || 1619 !!(sense[1] & SNS1_FILE_PROTECTED) || 1620 scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; 1621 } 1622 1623 static int dasd_ese_oos_cond(u8 *sense) 1624 { 1625 return sense[0] & SNS0_EQUIPMENT_CHECK && 1626 sense[1] & SNS1_PERM_ERR && 1627 sense[1] & SNS1_WRITE_INHIBITED && 1628 sense[25] == 0x01; 1629 } 1630 1631 /* 1632 * Interrupt handler for "normal" ssch-io based dasd devices. 1633 */ 1634 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1635 struct irb *irb) 1636 { 1637 struct dasd_ccw_req *cqr, *next, *fcqr; 1638 struct dasd_device *device; 1639 unsigned long now; 1640 int nrf_suppressed = 0; 1641 int fp_suppressed = 0; 1642 u8 *sense = NULL; 1643 int expires; 1644 1645 cqr = (struct dasd_ccw_req *) intparm; 1646 if (IS_ERR(irb)) { 1647 switch (PTR_ERR(irb)) { 1648 case -EIO: 1649 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1650 device = cqr->startdev; 1651 cqr->status = DASD_CQR_CLEARED; 1652 dasd_device_clear_timer(device); 1653 wake_up(&dasd_flush_wq); 1654 dasd_schedule_device_bh(device); 1655 return; 1656 } 1657 break; 1658 case -ETIMEDOUT: 1659 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1660 "request timed out\n", __func__); 1661 break; 1662 default: 1663 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1664 "unknown error %ld\n", __func__, 1665 PTR_ERR(irb)); 1666 } 1667 dasd_handle_killed_request(cdev, intparm); 1668 return; 1669 } 1670 1671 now = get_tod_clock(); 1672 /* check for conditions that should be handled immediately */ 1673 if (!cqr || 1674 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1675 scsw_cstat(&irb->scsw) == 0)) { 1676 if (cqr) 1677 memcpy(&cqr->irb, irb, sizeof(*irb)); 1678 device = dasd_device_from_cdev_locked(cdev); 1679 if (IS_ERR(device)) 1680 return; 1681 /* ignore unsolicited interrupts for DIAG discipline */ 1682 if (device->discipline == dasd_diag_discipline_pointer) { 1683 dasd_put_device(device); 1684 return; 1685 } 1686 1687 /* 1688 * In some cases 'File Protected' or 'No Record Found' errors 1689 * might be expected and debug log messages for the 1690 * corresponding interrupts shouldn't be written then. 1691 * Check if either of the according suppress bits is set. 1692 */ 1693 sense = dasd_get_sense(irb); 1694 if (sense) { 1695 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1696 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1697 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1698 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1699 1700 /* 1701 * Extent pool probably out-of-space. 1702 * Stop device and check exhaust level. 1703 */ 1704 if (dasd_ese_oos_cond(sense)) { 1705 dasd_generic_space_exhaust(device, cqr); 1706 device->discipline->ext_pool_exhaust(device, cqr); 1707 dasd_put_device(device); 1708 return; 1709 } 1710 } 1711 if (!(fp_suppressed || nrf_suppressed)) 1712 device->discipline->dump_sense_dbf(device, irb, "int"); 1713 1714 if (device->features & DASD_FEATURE_ERPLOG) 1715 device->discipline->dump_sense(device, cqr, irb); 1716 device->discipline->check_for_device_change(device, cqr, irb); 1717 dasd_put_device(device); 1718 } 1719 1720 /* check for for attention message */ 1721 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1722 device = dasd_device_from_cdev_locked(cdev); 1723 if (!IS_ERR(device)) { 1724 device->discipline->check_attention(device, 1725 irb->esw.esw1.lpum); 1726 dasd_put_device(device); 1727 } 1728 } 1729 1730 if (!cqr) 1731 return; 1732 1733 device = (struct dasd_device *) cqr->startdev; 1734 if (!device || 1735 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1736 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1737 "invalid device in request"); 1738 return; 1739 } 1740 1741 if (dasd_ese_needs_format(cqr->block, irb)) { 1742 if (rq_data_dir((struct request *)cqr->callback_data) == READ) { 1743 device->discipline->ese_read(cqr, irb); 1744 cqr->status = DASD_CQR_SUCCESS; 1745 cqr->stopclk = now; 1746 dasd_device_clear_timer(device); 1747 dasd_schedule_device_bh(device); 1748 return; 1749 } 1750 fcqr = device->discipline->ese_format(device, cqr, irb); 1751 if (IS_ERR(fcqr)) { 1752 if (PTR_ERR(fcqr) == -EINVAL) { 1753 cqr->status = DASD_CQR_ERROR; 1754 return; 1755 } 1756 /* 1757 * If we can't format now, let the request go 1758 * one extra round. Maybe we can format later. 1759 */ 1760 cqr->status = DASD_CQR_QUEUED; 1761 dasd_schedule_device_bh(device); 1762 return; 1763 } else { 1764 fcqr->status = DASD_CQR_QUEUED; 1765 cqr->status = DASD_CQR_QUEUED; 1766 list_add(&fcqr->devlist, &device->ccw_queue); 1767 dasd_schedule_device_bh(device); 1768 return; 1769 } 1770 } 1771 1772 /* Check for clear pending */ 1773 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1774 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1775 cqr->status = DASD_CQR_CLEARED; 1776 dasd_device_clear_timer(device); 1777 wake_up(&dasd_flush_wq); 1778 dasd_schedule_device_bh(device); 1779 return; 1780 } 1781 1782 /* check status - the request might have been killed by dyn detach */ 1783 if (cqr->status != DASD_CQR_IN_IO) { 1784 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1785 "status %02x", dev_name(&cdev->dev), cqr->status); 1786 return; 1787 } 1788 1789 next = NULL; 1790 expires = 0; 1791 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1792 scsw_cstat(&irb->scsw) == 0) { 1793 /* request was completed successfully */ 1794 cqr->status = DASD_CQR_SUCCESS; 1795 cqr->stopclk = now; 1796 /* Start first request on queue if possible -> fast_io. */ 1797 if (cqr->devlist.next != &device->ccw_queue) { 1798 next = list_entry(cqr->devlist.next, 1799 struct dasd_ccw_req, devlist); 1800 } 1801 } else { /* error */ 1802 /* check for HPF error 1803 * call discipline function to requeue all requests 1804 * and disable HPF accordingly 1805 */ 1806 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1807 device->discipline->handle_hpf_error) 1808 device->discipline->handle_hpf_error(device, irb); 1809 /* 1810 * If we don't want complex ERP for this request, then just 1811 * reset this and retry it in the fastpath 1812 */ 1813 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1814 cqr->retries > 0) { 1815 if (cqr->lpm == dasd_path_get_opm(device)) 1816 DBF_DEV_EVENT(DBF_DEBUG, device, 1817 "default ERP in fastpath " 1818 "(%i retries left)", 1819 cqr->retries); 1820 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1821 cqr->lpm = dasd_path_get_opm(device); 1822 cqr->status = DASD_CQR_QUEUED; 1823 next = cqr; 1824 } else 1825 cqr->status = DASD_CQR_ERROR; 1826 } 1827 if (next && (next->status == DASD_CQR_QUEUED) && 1828 (!device->stopped)) { 1829 if (device->discipline->start_IO(next) == 0) 1830 expires = next->expires; 1831 } 1832 if (expires != 0) 1833 dasd_device_set_timer(device, expires); 1834 else 1835 dasd_device_clear_timer(device); 1836 dasd_schedule_device_bh(device); 1837 } 1838 EXPORT_SYMBOL(dasd_int_handler); 1839 1840 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1841 { 1842 struct dasd_device *device; 1843 1844 device = dasd_device_from_cdev_locked(cdev); 1845 1846 if (IS_ERR(device)) 1847 goto out; 1848 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1849 device->state != device->target || 1850 !device->discipline->check_for_device_change){ 1851 dasd_put_device(device); 1852 goto out; 1853 } 1854 if (device->discipline->dump_sense_dbf) 1855 device->discipline->dump_sense_dbf(device, irb, "uc"); 1856 device->discipline->check_for_device_change(device, NULL, irb); 1857 dasd_put_device(device); 1858 out: 1859 return UC_TODO_RETRY; 1860 } 1861 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1862 1863 /* 1864 * If we have an error on a dasd_block layer request then we cancel 1865 * and return all further requests from the same dasd_block as well. 1866 */ 1867 static void __dasd_device_recovery(struct dasd_device *device, 1868 struct dasd_ccw_req *ref_cqr) 1869 { 1870 struct list_head *l, *n; 1871 struct dasd_ccw_req *cqr; 1872 1873 /* 1874 * only requeue request that came from the dasd_block layer 1875 */ 1876 if (!ref_cqr->block) 1877 return; 1878 1879 list_for_each_safe(l, n, &device->ccw_queue) { 1880 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1881 if (cqr->status == DASD_CQR_QUEUED && 1882 ref_cqr->block == cqr->block) { 1883 cqr->status = DASD_CQR_CLEARED; 1884 } 1885 } 1886 }; 1887 1888 /* 1889 * Remove those ccw requests from the queue that need to be returned 1890 * to the upper layer. 1891 */ 1892 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1893 struct list_head *final_queue) 1894 { 1895 struct list_head *l, *n; 1896 struct dasd_ccw_req *cqr; 1897 1898 /* Process request with final status. */ 1899 list_for_each_safe(l, n, &device->ccw_queue) { 1900 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1901 1902 /* Skip any non-final request. */ 1903 if (cqr->status == DASD_CQR_QUEUED || 1904 cqr->status == DASD_CQR_IN_IO || 1905 cqr->status == DASD_CQR_CLEAR_PENDING) 1906 continue; 1907 if (cqr->status == DASD_CQR_ERROR) { 1908 __dasd_device_recovery(device, cqr); 1909 } 1910 /* Rechain finished requests to final queue */ 1911 list_move_tail(&cqr->devlist, final_queue); 1912 } 1913 } 1914 1915 static void __dasd_process_cqr(struct dasd_device *device, 1916 struct dasd_ccw_req *cqr) 1917 { 1918 char errorstring[ERRORLENGTH]; 1919 1920 switch (cqr->status) { 1921 case DASD_CQR_SUCCESS: 1922 cqr->status = DASD_CQR_DONE; 1923 break; 1924 case DASD_CQR_ERROR: 1925 cqr->status = DASD_CQR_NEED_ERP; 1926 break; 1927 case DASD_CQR_CLEARED: 1928 cqr->status = DASD_CQR_TERMINATED; 1929 break; 1930 default: 1931 /* internal error 12 - wrong cqr status*/ 1932 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1933 dev_err(&device->cdev->dev, 1934 "An error occurred in the DASD device driver, " 1935 "reason=%s\n", errorstring); 1936 BUG(); 1937 } 1938 if (cqr->callback) 1939 cqr->callback(cqr, cqr->callback_data); 1940 } 1941 1942 /* 1943 * the cqrs from the final queue are returned to the upper layer 1944 * by setting a dasd_block state and calling the callback function 1945 */ 1946 static void __dasd_device_process_final_queue(struct dasd_device *device, 1947 struct list_head *final_queue) 1948 { 1949 struct list_head *l, *n; 1950 struct dasd_ccw_req *cqr; 1951 struct dasd_block *block; 1952 1953 list_for_each_safe(l, n, final_queue) { 1954 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1955 list_del_init(&cqr->devlist); 1956 block = cqr->block; 1957 if (!block) { 1958 __dasd_process_cqr(device, cqr); 1959 } else { 1960 spin_lock_bh(&block->queue_lock); 1961 __dasd_process_cqr(device, cqr); 1962 spin_unlock_bh(&block->queue_lock); 1963 } 1964 } 1965 } 1966 1967 /* 1968 * Take a look at the first request on the ccw queue and check 1969 * if it reached its expire time. If so, terminate the IO. 1970 */ 1971 static void __dasd_device_check_expire(struct dasd_device *device) 1972 { 1973 struct dasd_ccw_req *cqr; 1974 1975 if (list_empty(&device->ccw_queue)) 1976 return; 1977 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1978 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1979 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1980 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1981 /* 1982 * IO in safe offline processing should not 1983 * run out of retries 1984 */ 1985 cqr->retries++; 1986 } 1987 if (device->discipline->term_IO(cqr) != 0) { 1988 /* Hmpf, try again in 5 sec */ 1989 dev_err(&device->cdev->dev, 1990 "cqr %p timed out (%lus) but cannot be " 1991 "ended, retrying in 5 s\n", 1992 cqr, (cqr->expires/HZ)); 1993 cqr->expires += 5*HZ; 1994 dasd_device_set_timer(device, 5*HZ); 1995 } else { 1996 dev_err(&device->cdev->dev, 1997 "cqr %p timed out (%lus), %i retries " 1998 "remaining\n", cqr, (cqr->expires/HZ), 1999 cqr->retries); 2000 } 2001 } 2002 } 2003 2004 /* 2005 * return 1 when device is not eligible for IO 2006 */ 2007 static int __dasd_device_is_unusable(struct dasd_device *device, 2008 struct dasd_ccw_req *cqr) 2009 { 2010 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC); 2011 2012 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2013 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2014 /* 2015 * dasd is being set offline 2016 * but it is no safe offline where we have to allow I/O 2017 */ 2018 return 1; 2019 } 2020 if (device->stopped) { 2021 if (device->stopped & mask) { 2022 /* stopped and CQR will not change that. */ 2023 return 1; 2024 } 2025 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2026 /* CQR is not able to change device to 2027 * operational. */ 2028 return 1; 2029 } 2030 /* CQR required to get device operational. */ 2031 } 2032 return 0; 2033 } 2034 2035 /* 2036 * Take a look at the first request on the ccw queue and check 2037 * if it needs to be started. 2038 */ 2039 static void __dasd_device_start_head(struct dasd_device *device) 2040 { 2041 struct dasd_ccw_req *cqr; 2042 int rc; 2043 2044 if (list_empty(&device->ccw_queue)) 2045 return; 2046 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2047 if (cqr->status != DASD_CQR_QUEUED) 2048 return; 2049 /* if device is not usable return request to upper layer */ 2050 if (__dasd_device_is_unusable(device, cqr)) { 2051 cqr->intrc = -EAGAIN; 2052 cqr->status = DASD_CQR_CLEARED; 2053 dasd_schedule_device_bh(device); 2054 return; 2055 } 2056 2057 rc = device->discipline->start_IO(cqr); 2058 if (rc == 0) 2059 dasd_device_set_timer(device, cqr->expires); 2060 else if (rc == -EACCES) { 2061 dasd_schedule_device_bh(device); 2062 } else 2063 /* Hmpf, try again in 1/2 sec */ 2064 dasd_device_set_timer(device, 50); 2065 } 2066 2067 static void __dasd_device_check_path_events(struct dasd_device *device) 2068 { 2069 __u8 tbvpm, fcsecpm; 2070 int rc; 2071 2072 tbvpm = dasd_path_get_tbvpm(device); 2073 fcsecpm = dasd_path_get_fcsecpm(device); 2074 2075 if (!tbvpm && !fcsecpm) 2076 return; 2077 2078 if (device->stopped & ~(DASD_STOPPED_DC_WAIT)) 2079 return; 2080 2081 dasd_path_clear_all_verify(device); 2082 dasd_path_clear_all_fcsec(device); 2083 2084 rc = device->discipline->pe_handler(device, tbvpm, fcsecpm); 2085 if (rc) { 2086 dasd_path_add_tbvpm(device, tbvpm); 2087 dasd_path_add_fcsecpm(device, fcsecpm); 2088 dasd_device_set_timer(device, 50); 2089 } 2090 }; 2091 2092 /* 2093 * Go through all request on the dasd_device request queue, 2094 * terminate them on the cdev if necessary, and return them to the 2095 * submitting layer via callback. 2096 * Note: 2097 * Make sure that all 'submitting layers' still exist when 2098 * this function is called!. In other words, when 'device' is a base 2099 * device then all block layer requests must have been removed before 2100 * via dasd_flush_block_queue. 2101 */ 2102 int dasd_flush_device_queue(struct dasd_device *device) 2103 { 2104 struct dasd_ccw_req *cqr, *n; 2105 int rc; 2106 struct list_head flush_queue; 2107 2108 INIT_LIST_HEAD(&flush_queue); 2109 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2110 rc = 0; 2111 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2112 /* Check status and move request to flush_queue */ 2113 switch (cqr->status) { 2114 case DASD_CQR_IN_IO: 2115 rc = device->discipline->term_IO(cqr); 2116 if (rc) { 2117 /* unable to terminate requeust */ 2118 dev_err(&device->cdev->dev, 2119 "Flushing the DASD request queue " 2120 "failed for request %p\n", cqr); 2121 /* stop flush processing */ 2122 goto finished; 2123 } 2124 break; 2125 case DASD_CQR_QUEUED: 2126 cqr->stopclk = get_tod_clock(); 2127 cqr->status = DASD_CQR_CLEARED; 2128 break; 2129 default: /* no need to modify the others */ 2130 break; 2131 } 2132 list_move_tail(&cqr->devlist, &flush_queue); 2133 } 2134 finished: 2135 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2136 /* 2137 * After this point all requests must be in state CLEAR_PENDING, 2138 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2139 * one of the others. 2140 */ 2141 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2142 wait_event(dasd_flush_wq, 2143 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2144 /* 2145 * Now set each request back to TERMINATED, DONE or NEED_ERP 2146 * and call the callback function of flushed requests 2147 */ 2148 __dasd_device_process_final_queue(device, &flush_queue); 2149 return rc; 2150 } 2151 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2152 2153 /* 2154 * Acquire the device lock and process queues for the device. 2155 */ 2156 static void dasd_device_tasklet(unsigned long data) 2157 { 2158 struct dasd_device *device = (struct dasd_device *) data; 2159 struct list_head final_queue; 2160 2161 atomic_set (&device->tasklet_scheduled, 0); 2162 INIT_LIST_HEAD(&final_queue); 2163 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2164 /* Check expire time of first request on the ccw queue. */ 2165 __dasd_device_check_expire(device); 2166 /* find final requests on ccw queue */ 2167 __dasd_device_process_ccw_queue(device, &final_queue); 2168 __dasd_device_check_path_events(device); 2169 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2170 /* Now call the callback function of requests with final status */ 2171 __dasd_device_process_final_queue(device, &final_queue); 2172 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2173 /* Now check if the head of the ccw queue needs to be started. */ 2174 __dasd_device_start_head(device); 2175 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2176 if (waitqueue_active(&shutdown_waitq)) 2177 wake_up(&shutdown_waitq); 2178 dasd_put_device(device); 2179 } 2180 2181 /* 2182 * Schedules a call to dasd_tasklet over the device tasklet. 2183 */ 2184 void dasd_schedule_device_bh(struct dasd_device *device) 2185 { 2186 /* Protect against rescheduling. */ 2187 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2188 return; 2189 dasd_get_device(device); 2190 tasklet_hi_schedule(&device->tasklet); 2191 } 2192 EXPORT_SYMBOL(dasd_schedule_device_bh); 2193 2194 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2195 { 2196 device->stopped |= bits; 2197 } 2198 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2199 2200 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2201 { 2202 device->stopped &= ~bits; 2203 if (!device->stopped) 2204 wake_up(&generic_waitq); 2205 } 2206 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2207 2208 /* 2209 * Queue a request to the head of the device ccw_queue. 2210 * Start the I/O if possible. 2211 */ 2212 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2213 { 2214 struct dasd_device *device; 2215 unsigned long flags; 2216 2217 device = cqr->startdev; 2218 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2219 cqr->status = DASD_CQR_QUEUED; 2220 list_add(&cqr->devlist, &device->ccw_queue); 2221 /* let the bh start the request to keep them in order */ 2222 dasd_schedule_device_bh(device); 2223 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2224 } 2225 EXPORT_SYMBOL(dasd_add_request_head); 2226 2227 /* 2228 * Queue a request to the tail of the device ccw_queue. 2229 * Start the I/O if possible. 2230 */ 2231 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2232 { 2233 struct dasd_device *device; 2234 unsigned long flags; 2235 2236 device = cqr->startdev; 2237 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2238 cqr->status = DASD_CQR_QUEUED; 2239 list_add_tail(&cqr->devlist, &device->ccw_queue); 2240 /* let the bh start the request to keep them in order */ 2241 dasd_schedule_device_bh(device); 2242 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2243 } 2244 EXPORT_SYMBOL(dasd_add_request_tail); 2245 2246 /* 2247 * Wakeup helper for the 'sleep_on' functions. 2248 */ 2249 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2250 { 2251 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2252 cqr->callback_data = DASD_SLEEPON_END_TAG; 2253 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2254 wake_up(&generic_waitq); 2255 } 2256 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2257 2258 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2259 { 2260 struct dasd_device *device; 2261 int rc; 2262 2263 device = cqr->startdev; 2264 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2265 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2266 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2267 return rc; 2268 } 2269 2270 /* 2271 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2272 */ 2273 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2274 { 2275 struct dasd_device *device; 2276 dasd_erp_fn_t erp_fn; 2277 2278 if (cqr->status == DASD_CQR_FILLED) 2279 return 0; 2280 device = cqr->startdev; 2281 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2282 if (cqr->status == DASD_CQR_TERMINATED) { 2283 device->discipline->handle_terminated_request(cqr); 2284 return 1; 2285 } 2286 if (cqr->status == DASD_CQR_NEED_ERP) { 2287 erp_fn = device->discipline->erp_action(cqr); 2288 erp_fn(cqr); 2289 return 1; 2290 } 2291 if (cqr->status == DASD_CQR_FAILED) 2292 dasd_log_sense(cqr, &cqr->irb); 2293 if (cqr->refers) { 2294 __dasd_process_erp(device, cqr); 2295 return 1; 2296 } 2297 } 2298 return 0; 2299 } 2300 2301 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2302 { 2303 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2304 if (cqr->refers) /* erp is not done yet */ 2305 return 1; 2306 return ((cqr->status != DASD_CQR_DONE) && 2307 (cqr->status != DASD_CQR_FAILED)); 2308 } else 2309 return (cqr->status == DASD_CQR_FILLED); 2310 } 2311 2312 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2313 { 2314 struct dasd_device *device; 2315 int rc; 2316 struct list_head ccw_queue; 2317 struct dasd_ccw_req *cqr; 2318 2319 INIT_LIST_HEAD(&ccw_queue); 2320 maincqr->status = DASD_CQR_FILLED; 2321 device = maincqr->startdev; 2322 list_add(&maincqr->blocklist, &ccw_queue); 2323 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2324 cqr = list_first_entry(&ccw_queue, 2325 struct dasd_ccw_req, blocklist)) { 2326 2327 if (__dasd_sleep_on_erp(cqr)) 2328 continue; 2329 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2330 continue; 2331 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2332 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2333 cqr->status = DASD_CQR_FAILED; 2334 cqr->intrc = -EPERM; 2335 continue; 2336 } 2337 /* Non-temporary stop condition will trigger fail fast */ 2338 if (device->stopped & ~DASD_STOPPED_PENDING && 2339 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2340 (!dasd_eer_enabled(device))) { 2341 cqr->status = DASD_CQR_FAILED; 2342 cqr->intrc = -ENOLINK; 2343 continue; 2344 } 2345 /* 2346 * Don't try to start requests if device is in 2347 * offline processing, it might wait forever 2348 */ 2349 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2350 cqr->status = DASD_CQR_FAILED; 2351 cqr->intrc = -ENODEV; 2352 continue; 2353 } 2354 /* 2355 * Don't try to start requests if device is stopped 2356 * except path verification requests 2357 */ 2358 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2359 if (interruptible) { 2360 rc = wait_event_interruptible( 2361 generic_waitq, !(device->stopped)); 2362 if (rc == -ERESTARTSYS) { 2363 cqr->status = DASD_CQR_FAILED; 2364 maincqr->intrc = rc; 2365 continue; 2366 } 2367 } else 2368 wait_event(generic_waitq, !(device->stopped)); 2369 } 2370 if (!cqr->callback) 2371 cqr->callback = dasd_wakeup_cb; 2372 2373 cqr->callback_data = DASD_SLEEPON_START_TAG; 2374 dasd_add_request_tail(cqr); 2375 if (interruptible) { 2376 rc = wait_event_interruptible( 2377 generic_waitq, _wait_for_wakeup(cqr)); 2378 if (rc == -ERESTARTSYS) { 2379 dasd_cancel_req(cqr); 2380 /* wait (non-interruptible) for final status */ 2381 wait_event(generic_waitq, 2382 _wait_for_wakeup(cqr)); 2383 cqr->status = DASD_CQR_FAILED; 2384 maincqr->intrc = rc; 2385 continue; 2386 } 2387 } else 2388 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2389 } 2390 2391 maincqr->endclk = get_tod_clock(); 2392 if ((maincqr->status != DASD_CQR_DONE) && 2393 (maincqr->intrc != -ERESTARTSYS)) 2394 dasd_log_sense(maincqr, &maincqr->irb); 2395 if (maincqr->status == DASD_CQR_DONE) 2396 rc = 0; 2397 else if (maincqr->intrc) 2398 rc = maincqr->intrc; 2399 else 2400 rc = -EIO; 2401 return rc; 2402 } 2403 2404 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2405 { 2406 struct dasd_ccw_req *cqr; 2407 2408 list_for_each_entry(cqr, ccw_queue, blocklist) { 2409 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2410 return 0; 2411 } 2412 2413 return 1; 2414 } 2415 2416 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2417 { 2418 struct dasd_device *device; 2419 struct dasd_ccw_req *cqr, *n; 2420 u8 *sense = NULL; 2421 int rc; 2422 2423 retry: 2424 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2425 device = cqr->startdev; 2426 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2427 continue; 2428 2429 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2430 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2431 cqr->status = DASD_CQR_FAILED; 2432 cqr->intrc = -EPERM; 2433 continue; 2434 } 2435 /*Non-temporary stop condition will trigger fail fast*/ 2436 if (device->stopped & ~DASD_STOPPED_PENDING && 2437 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2438 !dasd_eer_enabled(device)) { 2439 cqr->status = DASD_CQR_FAILED; 2440 cqr->intrc = -EAGAIN; 2441 continue; 2442 } 2443 2444 /*Don't try to start requests if device is stopped*/ 2445 if (interruptible) { 2446 rc = wait_event_interruptible( 2447 generic_waitq, !device->stopped); 2448 if (rc == -ERESTARTSYS) { 2449 cqr->status = DASD_CQR_FAILED; 2450 cqr->intrc = rc; 2451 continue; 2452 } 2453 } else 2454 wait_event(generic_waitq, !(device->stopped)); 2455 2456 if (!cqr->callback) 2457 cqr->callback = dasd_wakeup_cb; 2458 cqr->callback_data = DASD_SLEEPON_START_TAG; 2459 dasd_add_request_tail(cqr); 2460 } 2461 2462 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2463 2464 rc = 0; 2465 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2466 /* 2467 * In some cases the 'File Protected' or 'Incorrect Length' 2468 * error might be expected and error recovery would be 2469 * unnecessary in these cases. Check if the according suppress 2470 * bit is set. 2471 */ 2472 sense = dasd_get_sense(&cqr->irb); 2473 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2474 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2475 continue; 2476 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2477 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2478 continue; 2479 2480 /* 2481 * for alias devices simplify error recovery and 2482 * return to upper layer 2483 * do not skip ERP requests 2484 */ 2485 if (cqr->startdev != cqr->basedev && !cqr->refers && 2486 (cqr->status == DASD_CQR_TERMINATED || 2487 cqr->status == DASD_CQR_NEED_ERP)) 2488 return -EAGAIN; 2489 2490 /* normal recovery for basedev IO */ 2491 if (__dasd_sleep_on_erp(cqr)) 2492 /* handle erp first */ 2493 goto retry; 2494 } 2495 2496 return 0; 2497 } 2498 2499 /* 2500 * Queue a request to the tail of the device ccw_queue and wait for 2501 * it's completion. 2502 */ 2503 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2504 { 2505 return _dasd_sleep_on(cqr, 0); 2506 } 2507 EXPORT_SYMBOL(dasd_sleep_on); 2508 2509 /* 2510 * Start requests from a ccw_queue and wait for their completion. 2511 */ 2512 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2513 { 2514 return _dasd_sleep_on_queue(ccw_queue, 0); 2515 } 2516 EXPORT_SYMBOL(dasd_sleep_on_queue); 2517 2518 /* 2519 * Start requests from a ccw_queue and wait interruptible for their completion. 2520 */ 2521 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2522 { 2523 return _dasd_sleep_on_queue(ccw_queue, 1); 2524 } 2525 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2526 2527 /* 2528 * Queue a request to the tail of the device ccw_queue and wait 2529 * interruptible for it's completion. 2530 */ 2531 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2532 { 2533 return _dasd_sleep_on(cqr, 1); 2534 } 2535 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2536 2537 /* 2538 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2539 * for eckd devices) the currently running request has to be terminated 2540 * and be put back to status queued, before the special request is added 2541 * to the head of the queue. Then the special request is waited on normally. 2542 */ 2543 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2544 { 2545 struct dasd_ccw_req *cqr; 2546 int rc; 2547 2548 if (list_empty(&device->ccw_queue)) 2549 return 0; 2550 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2551 rc = device->discipline->term_IO(cqr); 2552 if (!rc) 2553 /* 2554 * CQR terminated because a more important request is pending. 2555 * Undo decreasing of retry counter because this is 2556 * not an error case. 2557 */ 2558 cqr->retries++; 2559 return rc; 2560 } 2561 2562 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2563 { 2564 struct dasd_device *device; 2565 int rc; 2566 2567 device = cqr->startdev; 2568 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2569 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2570 cqr->status = DASD_CQR_FAILED; 2571 cqr->intrc = -EPERM; 2572 return -EIO; 2573 } 2574 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2575 rc = _dasd_term_running_cqr(device); 2576 if (rc) { 2577 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2578 return rc; 2579 } 2580 cqr->callback = dasd_wakeup_cb; 2581 cqr->callback_data = DASD_SLEEPON_START_TAG; 2582 cqr->status = DASD_CQR_QUEUED; 2583 /* 2584 * add new request as second 2585 * first the terminated cqr needs to be finished 2586 */ 2587 list_add(&cqr->devlist, device->ccw_queue.next); 2588 2589 /* let the bh start the request to keep them in order */ 2590 dasd_schedule_device_bh(device); 2591 2592 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2593 2594 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2595 2596 if (cqr->status == DASD_CQR_DONE) 2597 rc = 0; 2598 else if (cqr->intrc) 2599 rc = cqr->intrc; 2600 else 2601 rc = -EIO; 2602 2603 /* kick tasklets */ 2604 dasd_schedule_device_bh(device); 2605 if (device->block) 2606 dasd_schedule_block_bh(device->block); 2607 2608 return rc; 2609 } 2610 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2611 2612 /* 2613 * Cancels a request that was started with dasd_sleep_on_req. 2614 * This is useful to timeout requests. The request will be 2615 * terminated if it is currently in i/o. 2616 * Returns 0 if request termination was successful 2617 * negative error code if termination failed 2618 * Cancellation of a request is an asynchronous operation! The calling 2619 * function has to wait until the request is properly returned via callback. 2620 */ 2621 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) 2622 { 2623 struct dasd_device *device = cqr->startdev; 2624 int rc = 0; 2625 2626 switch (cqr->status) { 2627 case DASD_CQR_QUEUED: 2628 /* request was not started - just set to cleared */ 2629 cqr->status = DASD_CQR_CLEARED; 2630 break; 2631 case DASD_CQR_IN_IO: 2632 /* request in IO - terminate IO and release again */ 2633 rc = device->discipline->term_IO(cqr); 2634 if (rc) { 2635 dev_err(&device->cdev->dev, 2636 "Cancelling request %p failed with rc=%d\n", 2637 cqr, rc); 2638 } else { 2639 cqr->stopclk = get_tod_clock(); 2640 } 2641 break; 2642 default: /* already finished or clear pending - do nothing */ 2643 break; 2644 } 2645 dasd_schedule_device_bh(device); 2646 return rc; 2647 } 2648 2649 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2650 { 2651 struct dasd_device *device = cqr->startdev; 2652 unsigned long flags; 2653 int rc; 2654 2655 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2656 rc = __dasd_cancel_req(cqr); 2657 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2658 return rc; 2659 } 2660 2661 /* 2662 * SECTION: Operations of the dasd_block layer. 2663 */ 2664 2665 /* 2666 * Timeout function for dasd_block. This is used when the block layer 2667 * is waiting for something that may not come reliably, (e.g. a state 2668 * change interrupt) 2669 */ 2670 static void dasd_block_timeout(struct timer_list *t) 2671 { 2672 unsigned long flags; 2673 struct dasd_block *block; 2674 2675 block = from_timer(block, t, timer); 2676 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2677 /* re-activate request queue */ 2678 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2679 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2680 dasd_schedule_block_bh(block); 2681 blk_mq_run_hw_queues(block->request_queue, true); 2682 } 2683 2684 /* 2685 * Setup timeout for a dasd_block in jiffies. 2686 */ 2687 void dasd_block_set_timer(struct dasd_block *block, int expires) 2688 { 2689 if (expires == 0) 2690 del_timer(&block->timer); 2691 else 2692 mod_timer(&block->timer, jiffies + expires); 2693 } 2694 EXPORT_SYMBOL(dasd_block_set_timer); 2695 2696 /* 2697 * Clear timeout for a dasd_block. 2698 */ 2699 void dasd_block_clear_timer(struct dasd_block *block) 2700 { 2701 del_timer(&block->timer); 2702 } 2703 EXPORT_SYMBOL(dasd_block_clear_timer); 2704 2705 /* 2706 * Process finished error recovery ccw. 2707 */ 2708 static void __dasd_process_erp(struct dasd_device *device, 2709 struct dasd_ccw_req *cqr) 2710 { 2711 dasd_erp_fn_t erp_fn; 2712 2713 if (cqr->status == DASD_CQR_DONE) 2714 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2715 else 2716 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2717 erp_fn = device->discipline->erp_postaction(cqr); 2718 erp_fn(cqr); 2719 } 2720 2721 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2722 { 2723 struct request *req; 2724 blk_status_t error = BLK_STS_OK; 2725 unsigned int proc_bytes; 2726 int status; 2727 2728 req = (struct request *) cqr->callback_data; 2729 dasd_profile_end(cqr->block, cqr, req); 2730 2731 proc_bytes = cqr->proc_bytes; 2732 status = cqr->block->base->discipline->free_cp(cqr, req); 2733 if (status < 0) 2734 error = errno_to_blk_status(status); 2735 else if (status == 0) { 2736 switch (cqr->intrc) { 2737 case -EPERM: 2738 error = BLK_STS_NEXUS; 2739 break; 2740 case -ENOLINK: 2741 error = BLK_STS_TRANSPORT; 2742 break; 2743 case -ETIMEDOUT: 2744 error = BLK_STS_TIMEOUT; 2745 break; 2746 default: 2747 error = BLK_STS_IOERR; 2748 break; 2749 } 2750 } 2751 2752 /* 2753 * We need to take care for ETIMEDOUT errors here since the 2754 * complete callback does not get called in this case. 2755 * Take care of all errors here and avoid additional code to 2756 * transfer the error value to the complete callback. 2757 */ 2758 if (error) { 2759 blk_mq_end_request(req, error); 2760 blk_mq_run_hw_queues(req->q, true); 2761 } else { 2762 /* 2763 * Partial completed requests can happen with ESE devices. 2764 * During read we might have gotten a NRF error and have to 2765 * complete a request partially. 2766 */ 2767 if (proc_bytes) { 2768 blk_update_request(req, BLK_STS_OK, 2769 blk_rq_bytes(req) - proc_bytes); 2770 blk_mq_requeue_request(req, true); 2771 } else if (likely(!blk_should_fake_timeout(req->q))) { 2772 blk_mq_complete_request(req); 2773 } 2774 } 2775 } 2776 2777 /* 2778 * Process ccw request queue. 2779 */ 2780 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2781 struct list_head *final_queue) 2782 { 2783 struct list_head *l, *n; 2784 struct dasd_ccw_req *cqr; 2785 dasd_erp_fn_t erp_fn; 2786 unsigned long flags; 2787 struct dasd_device *base = block->base; 2788 2789 restart: 2790 /* Process request with final status. */ 2791 list_for_each_safe(l, n, &block->ccw_queue) { 2792 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2793 if (cqr->status != DASD_CQR_DONE && 2794 cqr->status != DASD_CQR_FAILED && 2795 cqr->status != DASD_CQR_NEED_ERP && 2796 cqr->status != DASD_CQR_TERMINATED) 2797 continue; 2798 2799 if (cqr->status == DASD_CQR_TERMINATED) { 2800 base->discipline->handle_terminated_request(cqr); 2801 goto restart; 2802 } 2803 2804 /* Process requests that may be recovered */ 2805 if (cqr->status == DASD_CQR_NEED_ERP) { 2806 erp_fn = base->discipline->erp_action(cqr); 2807 if (IS_ERR(erp_fn(cqr))) 2808 continue; 2809 goto restart; 2810 } 2811 2812 /* log sense for fatal error */ 2813 if (cqr->status == DASD_CQR_FAILED) { 2814 dasd_log_sense(cqr, &cqr->irb); 2815 } 2816 2817 /* First of all call extended error reporting. */ 2818 if (dasd_eer_enabled(base) && 2819 cqr->status == DASD_CQR_FAILED) { 2820 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2821 2822 /* restart request */ 2823 cqr->status = DASD_CQR_FILLED; 2824 cqr->retries = 255; 2825 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2826 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2827 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2828 flags); 2829 goto restart; 2830 } 2831 2832 /* Process finished ERP request. */ 2833 if (cqr->refers) { 2834 __dasd_process_erp(base, cqr); 2835 goto restart; 2836 } 2837 2838 /* Rechain finished requests to final queue */ 2839 cqr->endclk = get_tod_clock(); 2840 list_move_tail(&cqr->blocklist, final_queue); 2841 } 2842 } 2843 2844 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2845 { 2846 dasd_schedule_block_bh(cqr->block); 2847 } 2848 2849 static void __dasd_block_start_head(struct dasd_block *block) 2850 { 2851 struct dasd_ccw_req *cqr; 2852 2853 if (list_empty(&block->ccw_queue)) 2854 return; 2855 /* We allways begin with the first requests on the queue, as some 2856 * of previously started requests have to be enqueued on a 2857 * dasd_device again for error recovery. 2858 */ 2859 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2860 if (cqr->status != DASD_CQR_FILLED) 2861 continue; 2862 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2863 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2864 cqr->status = DASD_CQR_FAILED; 2865 cqr->intrc = -EPERM; 2866 dasd_schedule_block_bh(block); 2867 continue; 2868 } 2869 /* Non-temporary stop condition will trigger fail fast */ 2870 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2871 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2872 (!dasd_eer_enabled(block->base))) { 2873 cqr->status = DASD_CQR_FAILED; 2874 cqr->intrc = -ENOLINK; 2875 dasd_schedule_block_bh(block); 2876 continue; 2877 } 2878 /* Don't try to start requests if device is stopped */ 2879 if (block->base->stopped) 2880 return; 2881 2882 /* just a fail safe check, should not happen */ 2883 if (!cqr->startdev) 2884 cqr->startdev = block->base; 2885 2886 /* make sure that the requests we submit find their way back */ 2887 cqr->callback = dasd_return_cqr_cb; 2888 2889 dasd_add_request_tail(cqr); 2890 } 2891 } 2892 2893 /* 2894 * Central dasd_block layer routine. Takes requests from the generic 2895 * block layer request queue, creates ccw requests, enqueues them on 2896 * a dasd_device and processes ccw requests that have been returned. 2897 */ 2898 static void dasd_block_tasklet(unsigned long data) 2899 { 2900 struct dasd_block *block = (struct dasd_block *) data; 2901 struct list_head final_queue; 2902 struct list_head *l, *n; 2903 struct dasd_ccw_req *cqr; 2904 struct dasd_queue *dq; 2905 2906 atomic_set(&block->tasklet_scheduled, 0); 2907 INIT_LIST_HEAD(&final_queue); 2908 spin_lock_irq(&block->queue_lock); 2909 /* Finish off requests on ccw queue */ 2910 __dasd_process_block_ccw_queue(block, &final_queue); 2911 spin_unlock_irq(&block->queue_lock); 2912 2913 /* Now call the callback function of requests with final status */ 2914 list_for_each_safe(l, n, &final_queue) { 2915 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2916 dq = cqr->dq; 2917 spin_lock_irq(&dq->lock); 2918 list_del_init(&cqr->blocklist); 2919 __dasd_cleanup_cqr(cqr); 2920 spin_unlock_irq(&dq->lock); 2921 } 2922 2923 spin_lock_irq(&block->queue_lock); 2924 /* Now check if the head of the ccw queue needs to be started. */ 2925 __dasd_block_start_head(block); 2926 spin_unlock_irq(&block->queue_lock); 2927 2928 if (waitqueue_active(&shutdown_waitq)) 2929 wake_up(&shutdown_waitq); 2930 dasd_put_device(block->base); 2931 } 2932 2933 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2934 { 2935 wake_up(&dasd_flush_wq); 2936 } 2937 2938 /* 2939 * Requeue a request back to the block request queue 2940 * only works for block requests 2941 */ 2942 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2943 { 2944 struct dasd_block *block = cqr->block; 2945 struct request *req; 2946 2947 if (!block) 2948 return -EINVAL; 2949 /* 2950 * If the request is an ERP request there is nothing to requeue. 2951 * This will be done with the remaining original request. 2952 */ 2953 if (cqr->refers) 2954 return 0; 2955 spin_lock_irq(&cqr->dq->lock); 2956 req = (struct request *) cqr->callback_data; 2957 blk_mq_requeue_request(req, false); 2958 spin_unlock_irq(&cqr->dq->lock); 2959 2960 return 0; 2961 } 2962 2963 /* 2964 * Go through all request on the dasd_block request queue, cancel them 2965 * on the respective dasd_device, and return them to the generic 2966 * block layer. 2967 */ 2968 static int dasd_flush_block_queue(struct dasd_block *block) 2969 { 2970 struct dasd_ccw_req *cqr, *n; 2971 int rc, i; 2972 struct list_head flush_queue; 2973 unsigned long flags; 2974 2975 INIT_LIST_HEAD(&flush_queue); 2976 spin_lock_bh(&block->queue_lock); 2977 rc = 0; 2978 restart: 2979 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2980 /* if this request currently owned by a dasd_device cancel it */ 2981 if (cqr->status >= DASD_CQR_QUEUED) 2982 rc = dasd_cancel_req(cqr); 2983 if (rc < 0) 2984 break; 2985 /* Rechain request (including erp chain) so it won't be 2986 * touched by the dasd_block_tasklet anymore. 2987 * Replace the callback so we notice when the request 2988 * is returned from the dasd_device layer. 2989 */ 2990 cqr->callback = _dasd_wake_block_flush_cb; 2991 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2992 list_move_tail(&cqr->blocklist, &flush_queue); 2993 if (i > 1) 2994 /* moved more than one request - need to restart */ 2995 goto restart; 2996 } 2997 spin_unlock_bh(&block->queue_lock); 2998 /* Now call the callback function of flushed requests */ 2999 restart_cb: 3000 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 3001 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3002 /* Process finished ERP request. */ 3003 if (cqr->refers) { 3004 spin_lock_bh(&block->queue_lock); 3005 __dasd_process_erp(block->base, cqr); 3006 spin_unlock_bh(&block->queue_lock); 3007 /* restart list_for_xx loop since dasd_process_erp 3008 * might remove multiple elements */ 3009 goto restart_cb; 3010 } 3011 /* call the callback function */ 3012 spin_lock_irqsave(&cqr->dq->lock, flags); 3013 cqr->endclk = get_tod_clock(); 3014 list_del_init(&cqr->blocklist); 3015 __dasd_cleanup_cqr(cqr); 3016 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3017 } 3018 return rc; 3019 } 3020 3021 /* 3022 * Schedules a call to dasd_tasklet over the device tasklet. 3023 */ 3024 void dasd_schedule_block_bh(struct dasd_block *block) 3025 { 3026 /* Protect against rescheduling. */ 3027 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 3028 return; 3029 /* life cycle of block is bound to it's base device */ 3030 dasd_get_device(block->base); 3031 tasklet_hi_schedule(&block->tasklet); 3032 } 3033 EXPORT_SYMBOL(dasd_schedule_block_bh); 3034 3035 3036 /* 3037 * SECTION: external block device operations 3038 * (request queue handling, open, release, etc.) 3039 */ 3040 3041 /* 3042 * Dasd request queue function. Called from ll_rw_blk.c 3043 */ 3044 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 3045 const struct blk_mq_queue_data *qd) 3046 { 3047 struct dasd_block *block = hctx->queue->queuedata; 3048 struct dasd_queue *dq = hctx->driver_data; 3049 struct request *req = qd->rq; 3050 struct dasd_device *basedev; 3051 struct dasd_ccw_req *cqr; 3052 blk_status_t rc = BLK_STS_OK; 3053 3054 basedev = block->base; 3055 spin_lock_irq(&dq->lock); 3056 if (basedev->state < DASD_STATE_READY || 3057 test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) { 3058 DBF_DEV_EVENT(DBF_ERR, basedev, 3059 "device not ready for request %p", req); 3060 rc = BLK_STS_IOERR; 3061 goto out; 3062 } 3063 3064 /* 3065 * if device is stopped do not fetch new requests 3066 * except failfast is active which will let requests fail 3067 * immediately in __dasd_block_start_head() 3068 */ 3069 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3070 DBF_DEV_EVENT(DBF_ERR, basedev, 3071 "device stopped request %p", req); 3072 rc = BLK_STS_RESOURCE; 3073 goto out; 3074 } 3075 3076 if (basedev->features & DASD_FEATURE_READONLY && 3077 rq_data_dir(req) == WRITE) { 3078 DBF_DEV_EVENT(DBF_ERR, basedev, 3079 "Rejecting write request %p", req); 3080 rc = BLK_STS_IOERR; 3081 goto out; 3082 } 3083 3084 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3085 (basedev->features & DASD_FEATURE_FAILFAST || 3086 blk_noretry_request(req))) { 3087 DBF_DEV_EVENT(DBF_ERR, basedev, 3088 "Rejecting failfast request %p", req); 3089 rc = BLK_STS_IOERR; 3090 goto out; 3091 } 3092 3093 cqr = basedev->discipline->build_cp(basedev, block, req); 3094 if (IS_ERR(cqr)) { 3095 if (PTR_ERR(cqr) == -EBUSY || 3096 PTR_ERR(cqr) == -ENOMEM || 3097 PTR_ERR(cqr) == -EAGAIN) { 3098 rc = BLK_STS_RESOURCE; 3099 goto out; 3100 } 3101 DBF_DEV_EVENT(DBF_ERR, basedev, 3102 "CCW creation failed (rc=%ld) on request %p", 3103 PTR_ERR(cqr), req); 3104 rc = BLK_STS_IOERR; 3105 goto out; 3106 } 3107 /* 3108 * Note: callback is set to dasd_return_cqr_cb in 3109 * __dasd_block_start_head to cover erp requests as well 3110 */ 3111 cqr->callback_data = req; 3112 cqr->status = DASD_CQR_FILLED; 3113 cqr->dq = dq; 3114 3115 blk_mq_start_request(req); 3116 spin_lock(&block->queue_lock); 3117 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3118 INIT_LIST_HEAD(&cqr->devlist); 3119 dasd_profile_start(block, cqr, req); 3120 dasd_schedule_block_bh(block); 3121 spin_unlock(&block->queue_lock); 3122 3123 out: 3124 spin_unlock_irq(&dq->lock); 3125 return rc; 3126 } 3127 3128 /* 3129 * Block timeout callback, called from the block layer 3130 * 3131 * Return values: 3132 * BLK_EH_RESET_TIMER if the request should be left running 3133 * BLK_EH_DONE if the request is handled or terminated 3134 * by the driver. 3135 */ 3136 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3137 { 3138 struct dasd_block *block = req->q->queuedata; 3139 struct dasd_device *device; 3140 struct dasd_ccw_req *cqr; 3141 unsigned long flags; 3142 int rc = 0; 3143 3144 cqr = blk_mq_rq_to_pdu(req); 3145 if (!cqr) 3146 return BLK_EH_DONE; 3147 3148 spin_lock_irqsave(&cqr->dq->lock, flags); 3149 device = cqr->startdev ? cqr->startdev : block->base; 3150 if (!device->blk_timeout) { 3151 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3152 return BLK_EH_RESET_TIMER; 3153 } 3154 DBF_DEV_EVENT(DBF_WARNING, device, 3155 " dasd_times_out cqr %p status %x", 3156 cqr, cqr->status); 3157 3158 spin_lock(&block->queue_lock); 3159 spin_lock(get_ccwdev_lock(device->cdev)); 3160 cqr->retries = -1; 3161 cqr->intrc = -ETIMEDOUT; 3162 if (cqr->status >= DASD_CQR_QUEUED) { 3163 rc = __dasd_cancel_req(cqr); 3164 } else if (cqr->status == DASD_CQR_FILLED || 3165 cqr->status == DASD_CQR_NEED_ERP) { 3166 cqr->status = DASD_CQR_TERMINATED; 3167 } else if (cqr->status == DASD_CQR_IN_ERP) { 3168 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3169 3170 list_for_each_entry_safe(searchcqr, nextcqr, 3171 &block->ccw_queue, blocklist) { 3172 tmpcqr = searchcqr; 3173 while (tmpcqr->refers) 3174 tmpcqr = tmpcqr->refers; 3175 if (tmpcqr != cqr) 3176 continue; 3177 /* searchcqr is an ERP request for cqr */ 3178 searchcqr->retries = -1; 3179 searchcqr->intrc = -ETIMEDOUT; 3180 if (searchcqr->status >= DASD_CQR_QUEUED) { 3181 rc = __dasd_cancel_req(searchcqr); 3182 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3183 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3184 searchcqr->status = DASD_CQR_TERMINATED; 3185 rc = 0; 3186 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3187 /* 3188 * Shouldn't happen; most recent ERP 3189 * request is at the front of queue 3190 */ 3191 continue; 3192 } 3193 break; 3194 } 3195 } 3196 spin_unlock(get_ccwdev_lock(device->cdev)); 3197 dasd_schedule_block_bh(block); 3198 spin_unlock(&block->queue_lock); 3199 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3200 3201 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE; 3202 } 3203 3204 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3205 unsigned int idx) 3206 { 3207 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3208 3209 if (!dq) 3210 return -ENOMEM; 3211 3212 spin_lock_init(&dq->lock); 3213 hctx->driver_data = dq; 3214 3215 return 0; 3216 } 3217 3218 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3219 { 3220 kfree(hctx->driver_data); 3221 hctx->driver_data = NULL; 3222 } 3223 3224 static void dasd_request_done(struct request *req) 3225 { 3226 blk_mq_end_request(req, 0); 3227 blk_mq_run_hw_queues(req->q, true); 3228 } 3229 3230 static struct blk_mq_ops dasd_mq_ops = { 3231 .queue_rq = do_dasd_request, 3232 .complete = dasd_request_done, 3233 .timeout = dasd_times_out, 3234 .init_hctx = dasd_init_hctx, 3235 .exit_hctx = dasd_exit_hctx, 3236 }; 3237 3238 /* 3239 * Allocate and initialize request queue and default I/O scheduler. 3240 */ 3241 static int dasd_alloc_queue(struct dasd_block *block) 3242 { 3243 int rc; 3244 3245 block->tag_set.ops = &dasd_mq_ops; 3246 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); 3247 block->tag_set.nr_hw_queues = nr_hw_queues; 3248 block->tag_set.queue_depth = queue_depth; 3249 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3250 block->tag_set.numa_node = NUMA_NO_NODE; 3251 3252 rc = blk_mq_alloc_tag_set(&block->tag_set); 3253 if (rc) 3254 return rc; 3255 3256 block->request_queue = blk_mq_init_queue(&block->tag_set); 3257 if (IS_ERR(block->request_queue)) 3258 return PTR_ERR(block->request_queue); 3259 3260 block->request_queue->queuedata = block; 3261 3262 return 0; 3263 } 3264 3265 /* 3266 * Deactivate and free request queue. 3267 */ 3268 static void dasd_free_queue(struct dasd_block *block) 3269 { 3270 if (block->request_queue) { 3271 blk_cleanup_queue(block->request_queue); 3272 blk_mq_free_tag_set(&block->tag_set); 3273 block->request_queue = NULL; 3274 } 3275 } 3276 3277 static int dasd_open(struct block_device *bdev, fmode_t mode) 3278 { 3279 struct dasd_device *base; 3280 int rc; 3281 3282 base = dasd_device_from_gendisk(bdev->bd_disk); 3283 if (!base) 3284 return -ENODEV; 3285 3286 atomic_inc(&base->block->open_count); 3287 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3288 rc = -ENODEV; 3289 goto unlock; 3290 } 3291 3292 if (!try_module_get(base->discipline->owner)) { 3293 rc = -EINVAL; 3294 goto unlock; 3295 } 3296 3297 if (dasd_probeonly) { 3298 dev_info(&base->cdev->dev, 3299 "Accessing the DASD failed because it is in " 3300 "probeonly mode\n"); 3301 rc = -EPERM; 3302 goto out; 3303 } 3304 3305 if (base->state <= DASD_STATE_BASIC) { 3306 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3307 " Cannot open unrecognized device"); 3308 rc = -ENODEV; 3309 goto out; 3310 } 3311 3312 if ((mode & FMODE_WRITE) && 3313 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3314 (base->features & DASD_FEATURE_READONLY))) { 3315 rc = -EROFS; 3316 goto out; 3317 } 3318 3319 dasd_put_device(base); 3320 return 0; 3321 3322 out: 3323 module_put(base->discipline->owner); 3324 unlock: 3325 atomic_dec(&base->block->open_count); 3326 dasd_put_device(base); 3327 return rc; 3328 } 3329 3330 static void dasd_release(struct gendisk *disk, fmode_t mode) 3331 { 3332 struct dasd_device *base = dasd_device_from_gendisk(disk); 3333 if (base) { 3334 atomic_dec(&base->block->open_count); 3335 module_put(base->discipline->owner); 3336 dasd_put_device(base); 3337 } 3338 } 3339 3340 /* 3341 * Return disk geometry. 3342 */ 3343 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3344 { 3345 struct dasd_device *base; 3346 3347 base = dasd_device_from_gendisk(bdev->bd_disk); 3348 if (!base) 3349 return -ENODEV; 3350 3351 if (!base->discipline || 3352 !base->discipline->fill_geometry) { 3353 dasd_put_device(base); 3354 return -EINVAL; 3355 } 3356 base->discipline->fill_geometry(base->block, geo); 3357 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3358 dasd_put_device(base); 3359 return 0; 3360 } 3361 3362 const struct block_device_operations 3363 dasd_device_operations = { 3364 .owner = THIS_MODULE, 3365 .open = dasd_open, 3366 .release = dasd_release, 3367 .ioctl = dasd_ioctl, 3368 .compat_ioctl = dasd_ioctl, 3369 .getgeo = dasd_getgeo, 3370 .set_read_only = dasd_set_read_only, 3371 }; 3372 3373 /******************************************************************************* 3374 * end of block device operations 3375 */ 3376 3377 static void 3378 dasd_exit(void) 3379 { 3380 #ifdef CONFIG_PROC_FS 3381 dasd_proc_exit(); 3382 #endif 3383 dasd_eer_exit(); 3384 kmem_cache_destroy(dasd_page_cache); 3385 dasd_page_cache = NULL; 3386 dasd_gendisk_exit(); 3387 dasd_devmap_exit(); 3388 if (dasd_debug_area != NULL) { 3389 debug_unregister(dasd_debug_area); 3390 dasd_debug_area = NULL; 3391 } 3392 dasd_statistics_removeroot(); 3393 } 3394 3395 /* 3396 * SECTION: common functions for ccw_driver use 3397 */ 3398 3399 /* 3400 * Is the device read-only? 3401 * Note that this function does not report the setting of the 3402 * readonly device attribute, but how it is configured in z/VM. 3403 */ 3404 int dasd_device_is_ro(struct dasd_device *device) 3405 { 3406 struct ccw_dev_id dev_id; 3407 struct diag210 diag_data; 3408 int rc; 3409 3410 if (!MACHINE_IS_VM) 3411 return 0; 3412 ccw_device_get_id(device->cdev, &dev_id); 3413 memset(&diag_data, 0, sizeof(diag_data)); 3414 diag_data.vrdcdvno = dev_id.devno; 3415 diag_data.vrdclen = sizeof(diag_data); 3416 rc = diag210(&diag_data); 3417 if (rc == 0 || rc == 2) { 3418 return diag_data.vrdcvfla & 0x80; 3419 } else { 3420 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3421 dev_id.devno, rc); 3422 return 0; 3423 } 3424 } 3425 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3426 3427 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3428 { 3429 struct ccw_device *cdev = data; 3430 int ret; 3431 3432 ret = ccw_device_set_online(cdev); 3433 if (ret) 3434 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3435 dev_name(&cdev->dev), ret); 3436 } 3437 3438 /* 3439 * Initial attempt at a probe function. this can be simplified once 3440 * the other detection code is gone. 3441 */ 3442 int dasd_generic_probe(struct ccw_device *cdev) 3443 { 3444 cdev->handler = &dasd_int_handler; 3445 3446 /* 3447 * Automatically online either all dasd devices (dasd_autodetect) 3448 * or all devices specified with dasd= parameters during 3449 * initial probe. 3450 */ 3451 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3452 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3453 async_schedule(dasd_generic_auto_online, cdev); 3454 return 0; 3455 } 3456 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3457 3458 void dasd_generic_free_discipline(struct dasd_device *device) 3459 { 3460 /* Forget the discipline information. */ 3461 if (device->discipline) { 3462 if (device->discipline->uncheck_device) 3463 device->discipline->uncheck_device(device); 3464 module_put(device->discipline->owner); 3465 device->discipline = NULL; 3466 } 3467 if (device->base_discipline) { 3468 module_put(device->base_discipline->owner); 3469 device->base_discipline = NULL; 3470 } 3471 } 3472 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3473 3474 /* 3475 * This will one day be called from a global not_oper handler. 3476 * It is also used by driver_unregister during module unload. 3477 */ 3478 void dasd_generic_remove(struct ccw_device *cdev) 3479 { 3480 struct dasd_device *device; 3481 struct dasd_block *block; 3482 3483 device = dasd_device_from_cdev(cdev); 3484 if (IS_ERR(device)) 3485 return; 3486 3487 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3488 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3489 /* Already doing offline processing */ 3490 dasd_put_device(device); 3491 return; 3492 } 3493 /* 3494 * This device is removed unconditionally. Set offline 3495 * flag to prevent dasd_open from opening it while it is 3496 * no quite down yet. 3497 */ 3498 dasd_set_target_state(device, DASD_STATE_NEW); 3499 cdev->handler = NULL; 3500 /* dasd_delete_device destroys the device reference. */ 3501 block = device->block; 3502 dasd_delete_device(device); 3503 /* 3504 * life cycle of block is bound to device, so delete it after 3505 * device was safely removed 3506 */ 3507 if (block) 3508 dasd_free_block(block); 3509 } 3510 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3511 3512 /* 3513 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3514 * the device is detected for the first time and is supposed to be used 3515 * or the user has started activation through sysfs. 3516 */ 3517 int dasd_generic_set_online(struct ccw_device *cdev, 3518 struct dasd_discipline *base_discipline) 3519 { 3520 struct dasd_discipline *discipline; 3521 struct dasd_device *device; 3522 int rc; 3523 3524 /* first online clears initial online feature flag */ 3525 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3526 device = dasd_create_device(cdev); 3527 if (IS_ERR(device)) 3528 return PTR_ERR(device); 3529 3530 discipline = base_discipline; 3531 if (device->features & DASD_FEATURE_USEDIAG) { 3532 if (!dasd_diag_discipline_pointer) { 3533 /* Try to load the required module. */ 3534 rc = request_module(DASD_DIAG_MOD); 3535 if (rc) { 3536 pr_warn("%s Setting the DASD online failed " 3537 "because the required module %s " 3538 "could not be loaded (rc=%d)\n", 3539 dev_name(&cdev->dev), DASD_DIAG_MOD, 3540 rc); 3541 dasd_delete_device(device); 3542 return -ENODEV; 3543 } 3544 } 3545 /* Module init could have failed, so check again here after 3546 * request_module(). */ 3547 if (!dasd_diag_discipline_pointer) { 3548 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3549 dev_name(&cdev->dev)); 3550 dasd_delete_device(device); 3551 return -ENODEV; 3552 } 3553 discipline = dasd_diag_discipline_pointer; 3554 } 3555 if (!try_module_get(base_discipline->owner)) { 3556 dasd_delete_device(device); 3557 return -EINVAL; 3558 } 3559 if (!try_module_get(discipline->owner)) { 3560 module_put(base_discipline->owner); 3561 dasd_delete_device(device); 3562 return -EINVAL; 3563 } 3564 device->base_discipline = base_discipline; 3565 device->discipline = discipline; 3566 3567 /* check_device will allocate block device if necessary */ 3568 rc = discipline->check_device(device); 3569 if (rc) { 3570 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3571 dev_name(&cdev->dev), discipline->name, rc); 3572 module_put(discipline->owner); 3573 module_put(base_discipline->owner); 3574 dasd_delete_device(device); 3575 return rc; 3576 } 3577 3578 dasd_set_target_state(device, DASD_STATE_ONLINE); 3579 if (device->state <= DASD_STATE_KNOWN) { 3580 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3581 dev_name(&cdev->dev)); 3582 rc = -ENODEV; 3583 dasd_set_target_state(device, DASD_STATE_NEW); 3584 if (device->block) 3585 dasd_free_block(device->block); 3586 dasd_delete_device(device); 3587 } else 3588 pr_debug("dasd_generic device %s found\n", 3589 dev_name(&cdev->dev)); 3590 3591 wait_event(dasd_init_waitq, _wait_for_device(device)); 3592 3593 dasd_put_device(device); 3594 return rc; 3595 } 3596 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3597 3598 int dasd_generic_set_offline(struct ccw_device *cdev) 3599 { 3600 struct dasd_device *device; 3601 struct dasd_block *block; 3602 int max_count, open_count, rc; 3603 unsigned long flags; 3604 3605 rc = 0; 3606 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3607 device = dasd_device_from_cdev_locked(cdev); 3608 if (IS_ERR(device)) { 3609 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3610 return PTR_ERR(device); 3611 } 3612 3613 /* 3614 * We must make sure that this device is currently not in use. 3615 * The open_count is increased for every opener, that includes 3616 * the blkdev_get in dasd_scan_partitions. We are only interested 3617 * in the other openers. 3618 */ 3619 if (device->block) { 3620 max_count = device->block->bdev ? 0 : -1; 3621 open_count = atomic_read(&device->block->open_count); 3622 if (open_count > max_count) { 3623 if (open_count > 0) 3624 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3625 dev_name(&cdev->dev), open_count); 3626 else 3627 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3628 dev_name(&cdev->dev)); 3629 rc = -EBUSY; 3630 goto out_err; 3631 } 3632 } 3633 3634 /* 3635 * Test if the offline processing is already running and exit if so. 3636 * If a safe offline is being processed this could only be a normal 3637 * offline that should be able to overtake the safe offline and 3638 * cancel any I/O we do not want to wait for any longer 3639 */ 3640 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3641 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3642 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3643 &device->flags); 3644 } else { 3645 rc = -EBUSY; 3646 goto out_err; 3647 } 3648 } 3649 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3650 3651 /* 3652 * if safe_offline is called set safe_offline_running flag and 3653 * clear safe_offline so that a call to normal offline 3654 * can overrun safe_offline processing 3655 */ 3656 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3657 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3658 /* need to unlock here to wait for outstanding I/O */ 3659 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3660 /* 3661 * If we want to set the device safe offline all IO operations 3662 * should be finished before continuing the offline process 3663 * so sync bdev first and then wait for our queues to become 3664 * empty 3665 */ 3666 if (device->block) { 3667 rc = fsync_bdev(device->block->bdev); 3668 if (rc != 0) 3669 goto interrupted; 3670 } 3671 dasd_schedule_device_bh(device); 3672 rc = wait_event_interruptible(shutdown_waitq, 3673 _wait_for_empty_queues(device)); 3674 if (rc != 0) 3675 goto interrupted; 3676 3677 /* 3678 * check if a normal offline process overtook the offline 3679 * processing in this case simply do nothing beside returning 3680 * that we got interrupted 3681 * otherwise mark safe offline as not running any longer and 3682 * continue with normal offline 3683 */ 3684 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3685 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3686 rc = -ERESTARTSYS; 3687 goto out_err; 3688 } 3689 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3690 } 3691 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3692 3693 dasd_set_target_state(device, DASD_STATE_NEW); 3694 /* dasd_delete_device destroys the device reference. */ 3695 block = device->block; 3696 dasd_delete_device(device); 3697 /* 3698 * life cycle of block is bound to device, so delete it after 3699 * device was safely removed 3700 */ 3701 if (block) 3702 dasd_free_block(block); 3703 3704 return 0; 3705 3706 interrupted: 3707 /* interrupted by signal */ 3708 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3709 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3710 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3711 out_err: 3712 dasd_put_device(device); 3713 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3714 return rc; 3715 } 3716 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3717 3718 int dasd_generic_last_path_gone(struct dasd_device *device) 3719 { 3720 struct dasd_ccw_req *cqr; 3721 3722 dev_warn(&device->cdev->dev, "No operational channel path is left " 3723 "for the device\n"); 3724 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3725 /* First of all call extended error reporting. */ 3726 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3727 3728 if (device->state < DASD_STATE_BASIC) 3729 return 0; 3730 /* Device is active. We want to keep it. */ 3731 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3732 if ((cqr->status == DASD_CQR_IN_IO) || 3733 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3734 cqr->status = DASD_CQR_QUEUED; 3735 cqr->retries++; 3736 } 3737 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3738 dasd_device_clear_timer(device); 3739 dasd_schedule_device_bh(device); 3740 return 1; 3741 } 3742 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3743 3744 int dasd_generic_path_operational(struct dasd_device *device) 3745 { 3746 dev_info(&device->cdev->dev, "A channel path to the device has become " 3747 "operational\n"); 3748 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3749 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3750 dasd_schedule_device_bh(device); 3751 if (device->block) { 3752 dasd_schedule_block_bh(device->block); 3753 if (device->block->request_queue) 3754 blk_mq_run_hw_queues(device->block->request_queue, 3755 true); 3756 } 3757 3758 if (!device->stopped) 3759 wake_up(&generic_waitq); 3760 3761 return 1; 3762 } 3763 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3764 3765 int dasd_generic_notify(struct ccw_device *cdev, int event) 3766 { 3767 struct dasd_device *device; 3768 int ret; 3769 3770 device = dasd_device_from_cdev_locked(cdev); 3771 if (IS_ERR(device)) 3772 return 0; 3773 ret = 0; 3774 switch (event) { 3775 case CIO_GONE: 3776 case CIO_BOXED: 3777 case CIO_NO_PATH: 3778 dasd_path_no_path(device); 3779 ret = dasd_generic_last_path_gone(device); 3780 break; 3781 case CIO_OPER: 3782 ret = 1; 3783 if (dasd_path_get_opm(device)) 3784 ret = dasd_generic_path_operational(device); 3785 break; 3786 } 3787 dasd_put_device(device); 3788 return ret; 3789 } 3790 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3791 3792 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3793 { 3794 struct dasd_device *device; 3795 int chp, oldopm, hpfpm, ifccpm; 3796 3797 device = dasd_device_from_cdev_locked(cdev); 3798 if (IS_ERR(device)) 3799 return; 3800 3801 oldopm = dasd_path_get_opm(device); 3802 for (chp = 0; chp < 8; chp++) { 3803 if (path_event[chp] & PE_PATH_GONE) { 3804 dasd_path_notoper(device, chp); 3805 } 3806 if (path_event[chp] & PE_PATH_AVAILABLE) { 3807 dasd_path_available(device, chp); 3808 dasd_schedule_device_bh(device); 3809 } 3810 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3811 if (!dasd_path_is_operational(device, chp) && 3812 !dasd_path_need_verify(device, chp)) { 3813 /* 3814 * we can not establish a pathgroup on an 3815 * unavailable path, so trigger a path 3816 * verification first 3817 */ 3818 dasd_path_available(device, chp); 3819 dasd_schedule_device_bh(device); 3820 } 3821 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3822 "Pathgroup re-established\n"); 3823 if (device->discipline->kick_validate) 3824 device->discipline->kick_validate(device); 3825 } 3826 if (path_event[chp] & PE_PATH_FCES_EVENT) { 3827 dasd_path_fcsec_update(device, chp); 3828 dasd_schedule_device_bh(device); 3829 } 3830 } 3831 hpfpm = dasd_path_get_hpfpm(device); 3832 ifccpm = dasd_path_get_ifccpm(device); 3833 if (!dasd_path_get_opm(device) && hpfpm) { 3834 /* 3835 * device has no operational paths but at least one path is 3836 * disabled due to HPF errors 3837 * disable HPF at all and use the path(s) again 3838 */ 3839 if (device->discipline->disable_hpf) 3840 device->discipline->disable_hpf(device); 3841 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3842 dasd_path_set_tbvpm(device, hpfpm); 3843 dasd_schedule_device_bh(device); 3844 dasd_schedule_requeue(device); 3845 } else if (!dasd_path_get_opm(device) && ifccpm) { 3846 /* 3847 * device has no operational paths but at least one path is 3848 * disabled due to IFCC errors 3849 * trigger path verification on paths with IFCC errors 3850 */ 3851 dasd_path_set_tbvpm(device, ifccpm); 3852 dasd_schedule_device_bh(device); 3853 } 3854 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3855 dev_warn(&device->cdev->dev, 3856 "No verified channel paths remain for the device\n"); 3857 DBF_DEV_EVENT(DBF_WARNING, device, 3858 "%s", "last verified path gone"); 3859 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3860 dasd_device_set_stop_bits(device, 3861 DASD_STOPPED_DC_WAIT); 3862 } 3863 dasd_put_device(device); 3864 } 3865 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3866 3867 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3868 { 3869 if (!dasd_path_get_opm(device) && lpm) { 3870 dasd_path_set_opm(device, lpm); 3871 dasd_generic_path_operational(device); 3872 } else 3873 dasd_path_add_opm(device, lpm); 3874 return 0; 3875 } 3876 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3877 3878 void dasd_generic_space_exhaust(struct dasd_device *device, 3879 struct dasd_ccw_req *cqr) 3880 { 3881 dasd_eer_write(device, NULL, DASD_EER_NOSPC); 3882 3883 if (device->state < DASD_STATE_BASIC) 3884 return; 3885 3886 if (cqr->status == DASD_CQR_IN_IO || 3887 cqr->status == DASD_CQR_CLEAR_PENDING) { 3888 cqr->status = DASD_CQR_QUEUED; 3889 cqr->retries++; 3890 } 3891 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3892 dasd_device_clear_timer(device); 3893 dasd_schedule_device_bh(device); 3894 } 3895 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3896 3897 void dasd_generic_space_avail(struct dasd_device *device) 3898 { 3899 dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3900 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3901 3902 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3903 dasd_schedule_device_bh(device); 3904 3905 if (device->block) { 3906 dasd_schedule_block_bh(device->block); 3907 if (device->block->request_queue) 3908 blk_mq_run_hw_queues(device->block->request_queue, true); 3909 } 3910 if (!device->stopped) 3911 wake_up(&generic_waitq); 3912 } 3913 EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3914 3915 /* 3916 * clear active requests and requeue them to block layer if possible 3917 */ 3918 static int dasd_generic_requeue_all_requests(struct dasd_device *device) 3919 { 3920 struct list_head requeue_queue; 3921 struct dasd_ccw_req *cqr, *n; 3922 struct dasd_ccw_req *refers; 3923 int rc; 3924 3925 INIT_LIST_HEAD(&requeue_queue); 3926 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3927 rc = 0; 3928 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3929 /* Check status and move request to flush_queue */ 3930 if (cqr->status == DASD_CQR_IN_IO) { 3931 rc = device->discipline->term_IO(cqr); 3932 if (rc) { 3933 /* unable to terminate requeust */ 3934 dev_err(&device->cdev->dev, 3935 "Unable to terminate request %p " 3936 "on suspend\n", cqr); 3937 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3938 dasd_put_device(device); 3939 return rc; 3940 } 3941 } 3942 list_move_tail(&cqr->devlist, &requeue_queue); 3943 } 3944 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3945 3946 list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { 3947 wait_event(dasd_flush_wq, 3948 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3949 3950 /* 3951 * requeue requests to blocklayer will only work 3952 * for block device requests 3953 */ 3954 if (_dasd_requeue_request(cqr)) 3955 continue; 3956 3957 /* remove requests from device and block queue */ 3958 list_del_init(&cqr->devlist); 3959 while (cqr->refers != NULL) { 3960 refers = cqr->refers; 3961 /* remove the request from the block queue */ 3962 list_del(&cqr->blocklist); 3963 /* free the finished erp request */ 3964 dasd_free_erp_request(cqr, cqr->memdev); 3965 cqr = refers; 3966 } 3967 3968 /* 3969 * _dasd_requeue_request already checked for a valid 3970 * blockdevice, no need to check again 3971 * all erp requests (cqr->refers) have a cqr->block 3972 * pointer copy from the original cqr 3973 */ 3974 list_del_init(&cqr->blocklist); 3975 cqr->block->base->discipline->free_cp( 3976 cqr, (struct request *) cqr->callback_data); 3977 } 3978 3979 /* 3980 * if requests remain then they are internal request 3981 * and go back to the device queue 3982 */ 3983 if (!list_empty(&requeue_queue)) { 3984 /* move freeze_queue to start of the ccw_queue */ 3985 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3986 list_splice_tail(&requeue_queue, &device->ccw_queue); 3987 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3988 } 3989 dasd_schedule_device_bh(device); 3990 return rc; 3991 } 3992 3993 static void do_requeue_requests(struct work_struct *work) 3994 { 3995 struct dasd_device *device = container_of(work, struct dasd_device, 3996 requeue_requests); 3997 dasd_generic_requeue_all_requests(device); 3998 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 3999 if (device->block) 4000 dasd_schedule_block_bh(device->block); 4001 dasd_put_device(device); 4002 } 4003 4004 void dasd_schedule_requeue(struct dasd_device *device) 4005 { 4006 dasd_get_device(device); 4007 /* queue call to dasd_reload_device to the kernel event daemon. */ 4008 if (!schedule_work(&device->requeue_requests)) 4009 dasd_put_device(device); 4010 } 4011 EXPORT_SYMBOL(dasd_schedule_requeue); 4012 4013 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 4014 int rdc_buffer_size, 4015 int magic) 4016 { 4017 struct dasd_ccw_req *cqr; 4018 struct ccw1 *ccw; 4019 4020 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, 4021 NULL); 4022 4023 if (IS_ERR(cqr)) { 4024 /* internal error 13 - Allocating the RDC request failed*/ 4025 dev_err(&device->cdev->dev, 4026 "An error occurred in the DASD device driver, " 4027 "reason=%s\n", "13"); 4028 return cqr; 4029 } 4030 4031 ccw = cqr->cpaddr; 4032 ccw->cmd_code = CCW_CMD_RDC; 4033 ccw->cda = (__u32)(addr_t) cqr->data; 4034 ccw->flags = 0; 4035 ccw->count = rdc_buffer_size; 4036 cqr->startdev = device; 4037 cqr->memdev = device; 4038 cqr->expires = 10*HZ; 4039 cqr->retries = 256; 4040 cqr->buildclk = get_tod_clock(); 4041 cqr->status = DASD_CQR_FILLED; 4042 return cqr; 4043 } 4044 4045 4046 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 4047 void *rdc_buffer, int rdc_buffer_size) 4048 { 4049 int ret; 4050 struct dasd_ccw_req *cqr; 4051 4052 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); 4053 if (IS_ERR(cqr)) 4054 return PTR_ERR(cqr); 4055 4056 ret = dasd_sleep_on(cqr); 4057 if (ret == 0) 4058 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); 4059 dasd_sfree_request(cqr, cqr->memdev); 4060 return ret; 4061 } 4062 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4063 4064 /* 4065 * In command mode and transport mode we need to look for sense 4066 * data in different places. The sense data itself is allways 4067 * an array of 32 bytes, so we can unify the sense data access 4068 * for both modes. 4069 */ 4070 char *dasd_get_sense(struct irb *irb) 4071 { 4072 struct tsb *tsb = NULL; 4073 char *sense = NULL; 4074 4075 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4076 if (irb->scsw.tm.tcw) 4077 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 4078 irb->scsw.tm.tcw); 4079 if (tsb && tsb->length == 64 && tsb->flags) 4080 switch (tsb->flags & 0x07) { 4081 case 1: /* tsa_iostat */ 4082 sense = tsb->tsa.iostat.sense; 4083 break; 4084 case 2: /* tsa_ddpc */ 4085 sense = tsb->tsa.ddpc.sense; 4086 break; 4087 default: 4088 /* currently we don't use interrogate data */ 4089 break; 4090 } 4091 } else if (irb->esw.esw0.erw.cons) { 4092 sense = irb->ecw; 4093 } 4094 return sense; 4095 } 4096 EXPORT_SYMBOL_GPL(dasd_get_sense); 4097 4098 void dasd_generic_shutdown(struct ccw_device *cdev) 4099 { 4100 struct dasd_device *device; 4101 4102 device = dasd_device_from_cdev(cdev); 4103 if (IS_ERR(device)) 4104 return; 4105 4106 if (device->block) 4107 dasd_schedule_block_bh(device->block); 4108 4109 dasd_schedule_device_bh(device); 4110 4111 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4112 } 4113 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4114 4115 static int __init dasd_init(void) 4116 { 4117 int rc; 4118 4119 init_waitqueue_head(&dasd_init_waitq); 4120 init_waitqueue_head(&dasd_flush_wq); 4121 init_waitqueue_head(&generic_waitq); 4122 init_waitqueue_head(&shutdown_waitq); 4123 4124 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4125 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4126 if (dasd_debug_area == NULL) { 4127 rc = -ENOMEM; 4128 goto failed; 4129 } 4130 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4131 debug_set_level(dasd_debug_area, DBF_WARNING); 4132 4133 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4134 4135 dasd_diag_discipline_pointer = NULL; 4136 4137 dasd_statistics_createroot(); 4138 4139 rc = dasd_devmap_init(); 4140 if (rc) 4141 goto failed; 4142 rc = dasd_gendisk_init(); 4143 if (rc) 4144 goto failed; 4145 rc = dasd_parse(); 4146 if (rc) 4147 goto failed; 4148 rc = dasd_eer_init(); 4149 if (rc) 4150 goto failed; 4151 #ifdef CONFIG_PROC_FS 4152 rc = dasd_proc_init(); 4153 if (rc) 4154 goto failed; 4155 #endif 4156 4157 return 0; 4158 failed: 4159 pr_info("The DASD device driver could not be initialized\n"); 4160 dasd_exit(); 4161 return rc; 4162 } 4163 4164 module_init(dasd_init); 4165 module_exit(dasd_exit); 4166