1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #define KMSG_COMPONENT "dasd" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kmod.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/ctype.h> 18 #include <linux/major.h> 19 #include <linux/slab.h> 20 #include <linux/hdreg.h> 21 #include <linux/async.h> 22 #include <linux/mutex.h> 23 #include <linux/debugfs.h> 24 #include <linux/seq_file.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/ccwdev.h> 28 #include <asm/ebcdic.h> 29 #include <asm/idals.h> 30 #include <asm/itcw.h> 31 #include <asm/diag.h> 32 33 /* This is ugly... */ 34 #define PRINTK_HEADER "dasd:" 35 36 #include "dasd_int.h" 37 /* 38 * SECTION: Constant definitions to be used within this file 39 */ 40 #define DASD_CHANQ_MAX_SIZE 4 41 42 #define DASD_DIAG_MOD "dasd_diag_mod" 43 44 static unsigned int queue_depth = 32; 45 static unsigned int nr_hw_queues = 4; 46 47 module_param(queue_depth, uint, 0444); 48 MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); 49 50 module_param(nr_hw_queues, uint, 0444); 51 MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); 52 53 /* 54 * SECTION: exported variables of dasd.c 55 */ 56 debug_info_t *dasd_debug_area; 57 EXPORT_SYMBOL(dasd_debug_area); 58 static struct dentry *dasd_debugfs_root_entry; 59 struct dasd_discipline *dasd_diag_discipline_pointer; 60 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 61 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 62 63 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 64 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 65 " Copyright IBM Corp. 2000"); 66 MODULE_LICENSE("GPL"); 67 68 /* 69 * SECTION: prototypes for static functions of dasd.c 70 */ 71 static int dasd_alloc_queue(struct dasd_block *); 72 static void dasd_free_queue(struct dasd_block *); 73 static int dasd_flush_block_queue(struct dasd_block *); 74 static void dasd_device_tasklet(unsigned long); 75 static void dasd_block_tasklet(unsigned long); 76 static void do_kick_device(struct work_struct *); 77 static void do_reload_device(struct work_struct *); 78 static void do_requeue_requests(struct work_struct *); 79 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 80 static void dasd_device_timeout(struct timer_list *); 81 static void dasd_block_timeout(struct timer_list *); 82 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 83 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 84 static void dasd_profile_exit(struct dasd_profile *); 85 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 86 static void dasd_hosts_exit(struct dasd_device *); 87 88 /* 89 * SECTION: Operations on the device structure. 90 */ 91 static wait_queue_head_t dasd_init_waitq; 92 static wait_queue_head_t dasd_flush_wq; 93 static wait_queue_head_t generic_waitq; 94 static wait_queue_head_t shutdown_waitq; 95 96 /* 97 * Allocate memory for a new device structure. 98 */ 99 struct dasd_device *dasd_alloc_device(void) 100 { 101 struct dasd_device *device; 102 103 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 104 if (!device) 105 return ERR_PTR(-ENOMEM); 106 107 /* Get two pages for normal block device operations. */ 108 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 109 if (!device->ccw_mem) { 110 kfree(device); 111 return ERR_PTR(-ENOMEM); 112 } 113 /* Get one page for error recovery. */ 114 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 115 if (!device->erp_mem) { 116 free_pages((unsigned long) device->ccw_mem, 1); 117 kfree(device); 118 return ERR_PTR(-ENOMEM); 119 } 120 /* Get two pages for ese format. */ 121 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 122 if (!device->ese_mem) { 123 free_page((unsigned long) device->erp_mem); 124 free_pages((unsigned long) device->ccw_mem, 1); 125 kfree(device); 126 return ERR_PTR(-ENOMEM); 127 } 128 129 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 130 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 131 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 132 spin_lock_init(&device->mem_lock); 133 atomic_set(&device->tasklet_scheduled, 0); 134 tasklet_init(&device->tasklet, dasd_device_tasklet, 135 (unsigned long) device); 136 INIT_LIST_HEAD(&device->ccw_queue); 137 timer_setup(&device->timer, dasd_device_timeout, 0); 138 INIT_WORK(&device->kick_work, do_kick_device); 139 INIT_WORK(&device->reload_device, do_reload_device); 140 INIT_WORK(&device->requeue_requests, do_requeue_requests); 141 device->state = DASD_STATE_NEW; 142 device->target = DASD_STATE_NEW; 143 mutex_init(&device->state_mutex); 144 spin_lock_init(&device->profile.lock); 145 return device; 146 } 147 148 /* 149 * Free memory of a device structure. 150 */ 151 void dasd_free_device(struct dasd_device *device) 152 { 153 kfree(device->private); 154 free_pages((unsigned long) device->ese_mem, 1); 155 free_page((unsigned long) device->erp_mem); 156 free_pages((unsigned long) device->ccw_mem, 1); 157 kfree(device); 158 } 159 160 /* 161 * Allocate memory for a new device structure. 162 */ 163 struct dasd_block *dasd_alloc_block(void) 164 { 165 struct dasd_block *block; 166 167 block = kzalloc(sizeof(*block), GFP_ATOMIC); 168 if (!block) 169 return ERR_PTR(-ENOMEM); 170 /* open_count = 0 means device online but not in use */ 171 atomic_set(&block->open_count, -1); 172 173 atomic_set(&block->tasklet_scheduled, 0); 174 tasklet_init(&block->tasklet, dasd_block_tasklet, 175 (unsigned long) block); 176 INIT_LIST_HEAD(&block->ccw_queue); 177 spin_lock_init(&block->queue_lock); 178 INIT_LIST_HEAD(&block->format_list); 179 spin_lock_init(&block->format_lock); 180 timer_setup(&block->timer, dasd_block_timeout, 0); 181 spin_lock_init(&block->profile.lock); 182 183 return block; 184 } 185 EXPORT_SYMBOL_GPL(dasd_alloc_block); 186 187 /* 188 * Free memory of a device structure. 189 */ 190 void dasd_free_block(struct dasd_block *block) 191 { 192 kfree(block); 193 } 194 EXPORT_SYMBOL_GPL(dasd_free_block); 195 196 /* 197 * Make a new device known to the system. 198 */ 199 static int dasd_state_new_to_known(struct dasd_device *device) 200 { 201 int rc; 202 203 /* 204 * As long as the device is not in state DASD_STATE_NEW we want to 205 * keep the reference count > 0. 206 */ 207 dasd_get_device(device); 208 209 if (device->block) { 210 rc = dasd_alloc_queue(device->block); 211 if (rc) { 212 dasd_put_device(device); 213 return rc; 214 } 215 } 216 device->state = DASD_STATE_KNOWN; 217 return 0; 218 } 219 220 /* 221 * Let the system forget about a device. 222 */ 223 static int dasd_state_known_to_new(struct dasd_device *device) 224 { 225 /* Disable extended error reporting for this device. */ 226 dasd_eer_disable(device); 227 device->state = DASD_STATE_NEW; 228 229 if (device->block) 230 dasd_free_queue(device->block); 231 232 /* Give up reference we took in dasd_state_new_to_known. */ 233 dasd_put_device(device); 234 return 0; 235 } 236 237 static struct dentry *dasd_debugfs_setup(const char *name, 238 struct dentry *base_dentry) 239 { 240 struct dentry *pde; 241 242 if (!base_dentry) 243 return NULL; 244 pde = debugfs_create_dir(name, base_dentry); 245 if (!pde || IS_ERR(pde)) 246 return NULL; 247 return pde; 248 } 249 250 /* 251 * Request the irq line for the device. 252 */ 253 static int dasd_state_known_to_basic(struct dasd_device *device) 254 { 255 struct dasd_block *block = device->block; 256 int rc = 0; 257 258 /* Allocate and register gendisk structure. */ 259 if (block) { 260 rc = dasd_gendisk_alloc(block); 261 if (rc) 262 return rc; 263 block->debugfs_dentry = 264 dasd_debugfs_setup(block->gdp->disk_name, 265 dasd_debugfs_root_entry); 266 dasd_profile_init(&block->profile, block->debugfs_dentry); 267 if (dasd_global_profile_level == DASD_PROFILE_ON) 268 dasd_profile_on(&device->block->profile); 269 } 270 device->debugfs_dentry = 271 dasd_debugfs_setup(dev_name(&device->cdev->dev), 272 dasd_debugfs_root_entry); 273 dasd_profile_init(&device->profile, device->debugfs_dentry); 274 dasd_hosts_init(device->debugfs_dentry, device); 275 276 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 277 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 278 8 * sizeof(long)); 279 debug_register_view(device->debug_area, &debug_sprintf_view); 280 debug_set_level(device->debug_area, DBF_WARNING); 281 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 282 283 device->state = DASD_STATE_BASIC; 284 285 return rc; 286 } 287 288 /* 289 * Release the irq line for the device. Terminate any running i/o. 290 */ 291 static int dasd_state_basic_to_known(struct dasd_device *device) 292 { 293 int rc; 294 295 if (device->discipline->basic_to_known) { 296 rc = device->discipline->basic_to_known(device); 297 if (rc) 298 return rc; 299 } 300 301 if (device->block) { 302 dasd_profile_exit(&device->block->profile); 303 debugfs_remove(device->block->debugfs_dentry); 304 dasd_gendisk_free(device->block); 305 dasd_block_clear_timer(device->block); 306 } 307 rc = dasd_flush_device_queue(device); 308 if (rc) 309 return rc; 310 dasd_device_clear_timer(device); 311 dasd_profile_exit(&device->profile); 312 dasd_hosts_exit(device); 313 debugfs_remove(device->debugfs_dentry); 314 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 315 if (device->debug_area != NULL) { 316 debug_unregister(device->debug_area); 317 device->debug_area = NULL; 318 } 319 device->state = DASD_STATE_KNOWN; 320 return 0; 321 } 322 323 /* 324 * Do the initial analysis. The do_analysis function may return 325 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 326 * until the discipline decides to continue the startup sequence 327 * by calling the function dasd_change_state. The eckd disciplines 328 * uses this to start a ccw that detects the format. The completion 329 * interrupt for this detection ccw uses the kernel event daemon to 330 * trigger the call to dasd_change_state. All this is done in the 331 * discipline code, see dasd_eckd.c. 332 * After the analysis ccw is done (do_analysis returned 0) the block 333 * device is setup. 334 * In case the analysis returns an error, the device setup is stopped 335 * (a fake disk was already added to allow formatting). 336 */ 337 static int dasd_state_basic_to_ready(struct dasd_device *device) 338 { 339 int rc; 340 struct dasd_block *block; 341 struct gendisk *disk; 342 343 rc = 0; 344 block = device->block; 345 /* make disk known with correct capacity */ 346 if (block) { 347 if (block->base->discipline->do_analysis != NULL) 348 rc = block->base->discipline->do_analysis(block); 349 if (rc) { 350 if (rc != -EAGAIN) { 351 device->state = DASD_STATE_UNFMT; 352 disk = device->block->gdp; 353 kobject_uevent(&disk_to_dev(disk)->kobj, 354 KOBJ_CHANGE); 355 goto out; 356 } 357 return rc; 358 } 359 if (device->discipline->setup_blk_queue) 360 device->discipline->setup_blk_queue(block); 361 set_capacity(block->gdp, 362 block->blocks << block->s2b_shift); 363 device->state = DASD_STATE_READY; 364 rc = dasd_scan_partitions(block); 365 if (rc) { 366 device->state = DASD_STATE_BASIC; 367 return rc; 368 } 369 } else { 370 device->state = DASD_STATE_READY; 371 } 372 out: 373 if (device->discipline->basic_to_ready) 374 rc = device->discipline->basic_to_ready(device); 375 return rc; 376 } 377 378 static inline 379 int _wait_for_empty_queues(struct dasd_device *device) 380 { 381 if (device->block) 382 return list_empty(&device->ccw_queue) && 383 list_empty(&device->block->ccw_queue); 384 else 385 return list_empty(&device->ccw_queue); 386 } 387 388 /* 389 * Remove device from block device layer. Destroy dirty buffers. 390 * Forget format information. Check if the target level is basic 391 * and if it is create fake disk for formatting. 392 */ 393 static int dasd_state_ready_to_basic(struct dasd_device *device) 394 { 395 int rc; 396 397 device->state = DASD_STATE_BASIC; 398 if (device->block) { 399 struct dasd_block *block = device->block; 400 rc = dasd_flush_block_queue(block); 401 if (rc) { 402 device->state = DASD_STATE_READY; 403 return rc; 404 } 405 dasd_destroy_partitions(block); 406 block->blocks = 0; 407 block->bp_block = 0; 408 block->s2b_shift = 0; 409 } 410 return 0; 411 } 412 413 /* 414 * Back to basic. 415 */ 416 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 417 { 418 device->state = DASD_STATE_BASIC; 419 return 0; 420 } 421 422 /* 423 * Make the device online and schedule the bottom half to start 424 * the requeueing of requests from the linux request queue to the 425 * ccw queue. 426 */ 427 static int 428 dasd_state_ready_to_online(struct dasd_device * device) 429 { 430 device->state = DASD_STATE_ONLINE; 431 if (device->block) { 432 dasd_schedule_block_bh(device->block); 433 if ((device->features & DASD_FEATURE_USERAW)) { 434 kobject_uevent(&disk_to_dev(device->block->gdp)->kobj, 435 KOBJ_CHANGE); 436 return 0; 437 } 438 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 439 } 440 return 0; 441 } 442 443 /* 444 * Stop the requeueing of requests again. 445 */ 446 static int dasd_state_online_to_ready(struct dasd_device *device) 447 { 448 int rc; 449 450 if (device->discipline->online_to_ready) { 451 rc = device->discipline->online_to_ready(device); 452 if (rc) 453 return rc; 454 } 455 456 device->state = DASD_STATE_READY; 457 if (device->block && !(device->features & DASD_FEATURE_USERAW)) 458 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 459 return 0; 460 } 461 462 /* 463 * Device startup state changes. 464 */ 465 static int dasd_increase_state(struct dasd_device *device) 466 { 467 int rc; 468 469 rc = 0; 470 if (device->state == DASD_STATE_NEW && 471 device->target >= DASD_STATE_KNOWN) 472 rc = dasd_state_new_to_known(device); 473 474 if (!rc && 475 device->state == DASD_STATE_KNOWN && 476 device->target >= DASD_STATE_BASIC) 477 rc = dasd_state_known_to_basic(device); 478 479 if (!rc && 480 device->state == DASD_STATE_BASIC && 481 device->target >= DASD_STATE_READY) 482 rc = dasd_state_basic_to_ready(device); 483 484 if (!rc && 485 device->state == DASD_STATE_UNFMT && 486 device->target > DASD_STATE_UNFMT) 487 rc = -EPERM; 488 489 if (!rc && 490 device->state == DASD_STATE_READY && 491 device->target >= DASD_STATE_ONLINE) 492 rc = dasd_state_ready_to_online(device); 493 494 return rc; 495 } 496 497 /* 498 * Device shutdown state changes. 499 */ 500 static int dasd_decrease_state(struct dasd_device *device) 501 { 502 int rc; 503 504 rc = 0; 505 if (device->state == DASD_STATE_ONLINE && 506 device->target <= DASD_STATE_READY) 507 rc = dasd_state_online_to_ready(device); 508 509 if (!rc && 510 device->state == DASD_STATE_READY && 511 device->target <= DASD_STATE_BASIC) 512 rc = dasd_state_ready_to_basic(device); 513 514 if (!rc && 515 device->state == DASD_STATE_UNFMT && 516 device->target <= DASD_STATE_BASIC) 517 rc = dasd_state_unfmt_to_basic(device); 518 519 if (!rc && 520 device->state == DASD_STATE_BASIC && 521 device->target <= DASD_STATE_KNOWN) 522 rc = dasd_state_basic_to_known(device); 523 524 if (!rc && 525 device->state == DASD_STATE_KNOWN && 526 device->target <= DASD_STATE_NEW) 527 rc = dasd_state_known_to_new(device); 528 529 return rc; 530 } 531 532 /* 533 * This is the main startup/shutdown routine. 534 */ 535 static void dasd_change_state(struct dasd_device *device) 536 { 537 int rc; 538 539 if (device->state == device->target) 540 /* Already where we want to go today... */ 541 return; 542 if (device->state < device->target) 543 rc = dasd_increase_state(device); 544 else 545 rc = dasd_decrease_state(device); 546 if (rc == -EAGAIN) 547 return; 548 if (rc) 549 device->target = device->state; 550 551 /* let user-space know that the device status changed */ 552 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 553 554 if (device->state == device->target) 555 wake_up(&dasd_init_waitq); 556 } 557 558 /* 559 * Kick starter for devices that did not complete the startup/shutdown 560 * procedure or were sleeping because of a pending state. 561 * dasd_kick_device will schedule a call do do_kick_device to the kernel 562 * event daemon. 563 */ 564 static void do_kick_device(struct work_struct *work) 565 { 566 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 567 mutex_lock(&device->state_mutex); 568 dasd_change_state(device); 569 mutex_unlock(&device->state_mutex); 570 dasd_schedule_device_bh(device); 571 dasd_put_device(device); 572 } 573 574 void dasd_kick_device(struct dasd_device *device) 575 { 576 dasd_get_device(device); 577 /* queue call to dasd_kick_device to the kernel event daemon. */ 578 if (!schedule_work(&device->kick_work)) 579 dasd_put_device(device); 580 } 581 EXPORT_SYMBOL(dasd_kick_device); 582 583 /* 584 * dasd_reload_device will schedule a call do do_reload_device to the kernel 585 * event daemon. 586 */ 587 static void do_reload_device(struct work_struct *work) 588 { 589 struct dasd_device *device = container_of(work, struct dasd_device, 590 reload_device); 591 device->discipline->reload(device); 592 dasd_put_device(device); 593 } 594 595 void dasd_reload_device(struct dasd_device *device) 596 { 597 dasd_get_device(device); 598 /* queue call to dasd_reload_device to the kernel event daemon. */ 599 if (!schedule_work(&device->reload_device)) 600 dasd_put_device(device); 601 } 602 EXPORT_SYMBOL(dasd_reload_device); 603 604 /* 605 * Set the target state for a device and starts the state change. 606 */ 607 void dasd_set_target_state(struct dasd_device *device, int target) 608 { 609 dasd_get_device(device); 610 mutex_lock(&device->state_mutex); 611 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 612 if (dasd_probeonly && target > DASD_STATE_READY) 613 target = DASD_STATE_READY; 614 if (device->target != target) { 615 if (device->state == target) 616 wake_up(&dasd_init_waitq); 617 device->target = target; 618 } 619 if (device->state != device->target) 620 dasd_change_state(device); 621 mutex_unlock(&device->state_mutex); 622 dasd_put_device(device); 623 } 624 EXPORT_SYMBOL(dasd_set_target_state); 625 626 /* 627 * Enable devices with device numbers in [from..to]. 628 */ 629 static inline int _wait_for_device(struct dasd_device *device) 630 { 631 return (device->state == device->target); 632 } 633 634 void dasd_enable_device(struct dasd_device *device) 635 { 636 dasd_set_target_state(device, DASD_STATE_ONLINE); 637 if (device->state <= DASD_STATE_KNOWN) 638 /* No discipline for device found. */ 639 dasd_set_target_state(device, DASD_STATE_NEW); 640 /* Now wait for the devices to come up. */ 641 wait_event(dasd_init_waitq, _wait_for_device(device)); 642 643 dasd_reload_device(device); 644 if (device->discipline->kick_validate) 645 device->discipline->kick_validate(device); 646 } 647 EXPORT_SYMBOL(dasd_enable_device); 648 649 /* 650 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 651 */ 652 653 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 654 655 #ifdef CONFIG_DASD_PROFILE 656 struct dasd_profile dasd_global_profile = { 657 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 658 }; 659 static struct dentry *dasd_debugfs_global_entry; 660 661 /* 662 * Add profiling information for cqr before execution. 663 */ 664 static void dasd_profile_start(struct dasd_block *block, 665 struct dasd_ccw_req *cqr, 666 struct request *req) 667 { 668 struct list_head *l; 669 unsigned int counter; 670 struct dasd_device *device; 671 672 /* count the length of the chanq for statistics */ 673 counter = 0; 674 if (dasd_global_profile_level || block->profile.data) 675 list_for_each(l, &block->ccw_queue) 676 if (++counter >= 31) 677 break; 678 679 spin_lock(&dasd_global_profile.lock); 680 if (dasd_global_profile.data) { 681 dasd_global_profile.data->dasd_io_nr_req[counter]++; 682 if (rq_data_dir(req) == READ) 683 dasd_global_profile.data->dasd_read_nr_req[counter]++; 684 } 685 spin_unlock(&dasd_global_profile.lock); 686 687 spin_lock(&block->profile.lock); 688 if (block->profile.data) { 689 block->profile.data->dasd_io_nr_req[counter]++; 690 if (rq_data_dir(req) == READ) 691 block->profile.data->dasd_read_nr_req[counter]++; 692 } 693 spin_unlock(&block->profile.lock); 694 695 /* 696 * We count the request for the start device, even though it may run on 697 * some other device due to error recovery. This way we make sure that 698 * we count each request only once. 699 */ 700 device = cqr->startdev; 701 if (device->profile.data) { 702 counter = 1; /* request is not yet queued on the start device */ 703 list_for_each(l, &device->ccw_queue) 704 if (++counter >= 31) 705 break; 706 } 707 spin_lock(&device->profile.lock); 708 if (device->profile.data) { 709 device->profile.data->dasd_io_nr_req[counter]++; 710 if (rq_data_dir(req) == READ) 711 device->profile.data->dasd_read_nr_req[counter]++; 712 } 713 spin_unlock(&device->profile.lock); 714 } 715 716 /* 717 * Add profiling information for cqr after execution. 718 */ 719 720 #define dasd_profile_counter(value, index) \ 721 { \ 722 for (index = 0; index < 31 && value >> (2+index); index++) \ 723 ; \ 724 } 725 726 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 727 int is_alias, 728 int is_tpm, 729 int is_read, 730 long sectors, 731 int sectors_ind, 732 int tottime_ind, 733 int tottimeps_ind, 734 int strtime_ind, 735 int irqtime_ind, 736 int irqtimeps_ind, 737 int endtime_ind) 738 { 739 /* in case of an overflow, reset the whole profile */ 740 if (data->dasd_io_reqs == UINT_MAX) { 741 memset(data, 0, sizeof(*data)); 742 ktime_get_real_ts64(&data->starttod); 743 } 744 data->dasd_io_reqs++; 745 data->dasd_io_sects += sectors; 746 if (is_alias) 747 data->dasd_io_alias++; 748 if (is_tpm) 749 data->dasd_io_tpm++; 750 751 data->dasd_io_secs[sectors_ind]++; 752 data->dasd_io_times[tottime_ind]++; 753 data->dasd_io_timps[tottimeps_ind]++; 754 data->dasd_io_time1[strtime_ind]++; 755 data->dasd_io_time2[irqtime_ind]++; 756 data->dasd_io_time2ps[irqtimeps_ind]++; 757 data->dasd_io_time3[endtime_ind]++; 758 759 if (is_read) { 760 data->dasd_read_reqs++; 761 data->dasd_read_sects += sectors; 762 if (is_alias) 763 data->dasd_read_alias++; 764 if (is_tpm) 765 data->dasd_read_tpm++; 766 data->dasd_read_secs[sectors_ind]++; 767 data->dasd_read_times[tottime_ind]++; 768 data->dasd_read_time1[strtime_ind]++; 769 data->dasd_read_time2[irqtime_ind]++; 770 data->dasd_read_time3[endtime_ind]++; 771 } 772 } 773 774 static void dasd_profile_end(struct dasd_block *block, 775 struct dasd_ccw_req *cqr, 776 struct request *req) 777 { 778 unsigned long strtime, irqtime, endtime, tottime; 779 unsigned long tottimeps, sectors; 780 struct dasd_device *device; 781 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 782 int irqtime_ind, irqtimeps_ind, endtime_ind; 783 struct dasd_profile_info *data; 784 785 device = cqr->startdev; 786 if (!(dasd_global_profile_level || 787 block->profile.data || 788 device->profile.data)) 789 return; 790 791 sectors = blk_rq_sectors(req); 792 if (!cqr->buildclk || !cqr->startclk || 793 !cqr->stopclk || !cqr->endclk || 794 !sectors) 795 return; 796 797 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 798 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 799 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 800 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 801 tottimeps = tottime / sectors; 802 803 dasd_profile_counter(sectors, sectors_ind); 804 dasd_profile_counter(tottime, tottime_ind); 805 dasd_profile_counter(tottimeps, tottimeps_ind); 806 dasd_profile_counter(strtime, strtime_ind); 807 dasd_profile_counter(irqtime, irqtime_ind); 808 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 809 dasd_profile_counter(endtime, endtime_ind); 810 811 spin_lock(&dasd_global_profile.lock); 812 if (dasd_global_profile.data) { 813 data = dasd_global_profile.data; 814 data->dasd_sum_times += tottime; 815 data->dasd_sum_time_str += strtime; 816 data->dasd_sum_time_irq += irqtime; 817 data->dasd_sum_time_end += endtime; 818 dasd_profile_end_add_data(dasd_global_profile.data, 819 cqr->startdev != block->base, 820 cqr->cpmode == 1, 821 rq_data_dir(req) == READ, 822 sectors, sectors_ind, tottime_ind, 823 tottimeps_ind, strtime_ind, 824 irqtime_ind, irqtimeps_ind, 825 endtime_ind); 826 } 827 spin_unlock(&dasd_global_profile.lock); 828 829 spin_lock(&block->profile.lock); 830 if (block->profile.data) { 831 data = block->profile.data; 832 data->dasd_sum_times += tottime; 833 data->dasd_sum_time_str += strtime; 834 data->dasd_sum_time_irq += irqtime; 835 data->dasd_sum_time_end += endtime; 836 dasd_profile_end_add_data(block->profile.data, 837 cqr->startdev != block->base, 838 cqr->cpmode == 1, 839 rq_data_dir(req) == READ, 840 sectors, sectors_ind, tottime_ind, 841 tottimeps_ind, strtime_ind, 842 irqtime_ind, irqtimeps_ind, 843 endtime_ind); 844 } 845 spin_unlock(&block->profile.lock); 846 847 spin_lock(&device->profile.lock); 848 if (device->profile.data) { 849 data = device->profile.data; 850 data->dasd_sum_times += tottime; 851 data->dasd_sum_time_str += strtime; 852 data->dasd_sum_time_irq += irqtime; 853 data->dasd_sum_time_end += endtime; 854 dasd_profile_end_add_data(device->profile.data, 855 cqr->startdev != block->base, 856 cqr->cpmode == 1, 857 rq_data_dir(req) == READ, 858 sectors, sectors_ind, tottime_ind, 859 tottimeps_ind, strtime_ind, 860 irqtime_ind, irqtimeps_ind, 861 endtime_ind); 862 } 863 spin_unlock(&device->profile.lock); 864 } 865 866 void dasd_profile_reset(struct dasd_profile *profile) 867 { 868 struct dasd_profile_info *data; 869 870 spin_lock_bh(&profile->lock); 871 data = profile->data; 872 if (!data) { 873 spin_unlock_bh(&profile->lock); 874 return; 875 } 876 memset(data, 0, sizeof(*data)); 877 ktime_get_real_ts64(&data->starttod); 878 spin_unlock_bh(&profile->lock); 879 } 880 881 int dasd_profile_on(struct dasd_profile *profile) 882 { 883 struct dasd_profile_info *data; 884 885 data = kzalloc(sizeof(*data), GFP_KERNEL); 886 if (!data) 887 return -ENOMEM; 888 spin_lock_bh(&profile->lock); 889 if (profile->data) { 890 spin_unlock_bh(&profile->lock); 891 kfree(data); 892 return 0; 893 } 894 ktime_get_real_ts64(&data->starttod); 895 profile->data = data; 896 spin_unlock_bh(&profile->lock); 897 return 0; 898 } 899 900 void dasd_profile_off(struct dasd_profile *profile) 901 { 902 spin_lock_bh(&profile->lock); 903 kfree(profile->data); 904 profile->data = NULL; 905 spin_unlock_bh(&profile->lock); 906 } 907 908 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 909 { 910 char *buffer; 911 912 buffer = vmalloc(user_len + 1); 913 if (buffer == NULL) 914 return ERR_PTR(-ENOMEM); 915 if (copy_from_user(buffer, user_buf, user_len) != 0) { 916 vfree(buffer); 917 return ERR_PTR(-EFAULT); 918 } 919 /* got the string, now strip linefeed. */ 920 if (buffer[user_len - 1] == '\n') 921 buffer[user_len - 1] = 0; 922 else 923 buffer[user_len] = 0; 924 return buffer; 925 } 926 927 static ssize_t dasd_stats_write(struct file *file, 928 const char __user *user_buf, 929 size_t user_len, loff_t *pos) 930 { 931 char *buffer, *str; 932 int rc; 933 struct seq_file *m = (struct seq_file *)file->private_data; 934 struct dasd_profile *prof = m->private; 935 936 if (user_len > 65536) 937 user_len = 65536; 938 buffer = dasd_get_user_string(user_buf, user_len); 939 if (IS_ERR(buffer)) 940 return PTR_ERR(buffer); 941 942 str = skip_spaces(buffer); 943 rc = user_len; 944 if (strncmp(str, "reset", 5) == 0) { 945 dasd_profile_reset(prof); 946 } else if (strncmp(str, "on", 2) == 0) { 947 rc = dasd_profile_on(prof); 948 if (rc) 949 goto out; 950 rc = user_len; 951 if (prof == &dasd_global_profile) { 952 dasd_profile_reset(prof); 953 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 954 } 955 } else if (strncmp(str, "off", 3) == 0) { 956 if (prof == &dasd_global_profile) 957 dasd_global_profile_level = DASD_PROFILE_OFF; 958 dasd_profile_off(prof); 959 } else 960 rc = -EINVAL; 961 out: 962 vfree(buffer); 963 return rc; 964 } 965 966 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 967 { 968 int i; 969 970 for (i = 0; i < 32; i++) 971 seq_printf(m, "%u ", array[i]); 972 seq_putc(m, '\n'); 973 } 974 975 static void dasd_stats_seq_print(struct seq_file *m, 976 struct dasd_profile_info *data) 977 { 978 seq_printf(m, "start_time %lld.%09ld\n", 979 (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 980 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 981 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 982 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 983 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 984 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 985 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 986 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 987 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 988 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 989 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 990 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 991 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 992 seq_puts(m, "histogram_sectors "); 993 dasd_stats_array(m, data->dasd_io_secs); 994 seq_puts(m, "histogram_io_times "); 995 dasd_stats_array(m, data->dasd_io_times); 996 seq_puts(m, "histogram_io_times_weighted "); 997 dasd_stats_array(m, data->dasd_io_timps); 998 seq_puts(m, "histogram_time_build_to_ssch "); 999 dasd_stats_array(m, data->dasd_io_time1); 1000 seq_puts(m, "histogram_time_ssch_to_irq "); 1001 dasd_stats_array(m, data->dasd_io_time2); 1002 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1003 dasd_stats_array(m, data->dasd_io_time2ps); 1004 seq_puts(m, "histogram_time_irq_to_end "); 1005 dasd_stats_array(m, data->dasd_io_time3); 1006 seq_puts(m, "histogram_ccw_queue_length "); 1007 dasd_stats_array(m, data->dasd_io_nr_req); 1008 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1009 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1010 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1011 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1012 seq_puts(m, "histogram_read_sectors "); 1013 dasd_stats_array(m, data->dasd_read_secs); 1014 seq_puts(m, "histogram_read_times "); 1015 dasd_stats_array(m, data->dasd_read_times); 1016 seq_puts(m, "histogram_read_time_build_to_ssch "); 1017 dasd_stats_array(m, data->dasd_read_time1); 1018 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1019 dasd_stats_array(m, data->dasd_read_time2); 1020 seq_puts(m, "histogram_read_time_irq_to_end "); 1021 dasd_stats_array(m, data->dasd_read_time3); 1022 seq_puts(m, "histogram_read_ccw_queue_length "); 1023 dasd_stats_array(m, data->dasd_read_nr_req); 1024 } 1025 1026 static int dasd_stats_show(struct seq_file *m, void *v) 1027 { 1028 struct dasd_profile *profile; 1029 struct dasd_profile_info *data; 1030 1031 profile = m->private; 1032 spin_lock_bh(&profile->lock); 1033 data = profile->data; 1034 if (!data) { 1035 spin_unlock_bh(&profile->lock); 1036 seq_puts(m, "disabled\n"); 1037 return 0; 1038 } 1039 dasd_stats_seq_print(m, data); 1040 spin_unlock_bh(&profile->lock); 1041 return 0; 1042 } 1043 1044 static int dasd_stats_open(struct inode *inode, struct file *file) 1045 { 1046 struct dasd_profile *profile = inode->i_private; 1047 return single_open(file, dasd_stats_show, profile); 1048 } 1049 1050 static const struct file_operations dasd_stats_raw_fops = { 1051 .owner = THIS_MODULE, 1052 .open = dasd_stats_open, 1053 .read = seq_read, 1054 .llseek = seq_lseek, 1055 .release = single_release, 1056 .write = dasd_stats_write, 1057 }; 1058 1059 static void dasd_profile_init(struct dasd_profile *profile, 1060 struct dentry *base_dentry) 1061 { 1062 umode_t mode; 1063 struct dentry *pde; 1064 1065 if (!base_dentry) 1066 return; 1067 profile->dentry = NULL; 1068 profile->data = NULL; 1069 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1070 pde = debugfs_create_file("statistics", mode, base_dentry, 1071 profile, &dasd_stats_raw_fops); 1072 if (pde && !IS_ERR(pde)) 1073 profile->dentry = pde; 1074 return; 1075 } 1076 1077 static void dasd_profile_exit(struct dasd_profile *profile) 1078 { 1079 dasd_profile_off(profile); 1080 debugfs_remove(profile->dentry); 1081 profile->dentry = NULL; 1082 } 1083 1084 static void dasd_statistics_removeroot(void) 1085 { 1086 dasd_global_profile_level = DASD_PROFILE_OFF; 1087 dasd_profile_exit(&dasd_global_profile); 1088 debugfs_remove(dasd_debugfs_global_entry); 1089 debugfs_remove(dasd_debugfs_root_entry); 1090 } 1091 1092 static void dasd_statistics_createroot(void) 1093 { 1094 struct dentry *pde; 1095 1096 dasd_debugfs_root_entry = NULL; 1097 pde = debugfs_create_dir("dasd", NULL); 1098 if (!pde || IS_ERR(pde)) 1099 goto error; 1100 dasd_debugfs_root_entry = pde; 1101 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1102 if (!pde || IS_ERR(pde)) 1103 goto error; 1104 dasd_debugfs_global_entry = pde; 1105 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1106 return; 1107 1108 error: 1109 DBF_EVENT(DBF_ERR, "%s", 1110 "Creation of the dasd debugfs interface failed"); 1111 dasd_statistics_removeroot(); 1112 return; 1113 } 1114 1115 #else 1116 #define dasd_profile_start(block, cqr, req) do {} while (0) 1117 #define dasd_profile_end(block, cqr, req) do {} while (0) 1118 1119 static void dasd_statistics_createroot(void) 1120 { 1121 return; 1122 } 1123 1124 static void dasd_statistics_removeroot(void) 1125 { 1126 return; 1127 } 1128 1129 int dasd_stats_generic_show(struct seq_file *m, void *v) 1130 { 1131 seq_puts(m, "Statistics are not activated in this kernel\n"); 1132 return 0; 1133 } 1134 1135 static void dasd_profile_init(struct dasd_profile *profile, 1136 struct dentry *base_dentry) 1137 { 1138 return; 1139 } 1140 1141 static void dasd_profile_exit(struct dasd_profile *profile) 1142 { 1143 return; 1144 } 1145 1146 int dasd_profile_on(struct dasd_profile *profile) 1147 { 1148 return 0; 1149 } 1150 1151 #endif /* CONFIG_DASD_PROFILE */ 1152 1153 static int dasd_hosts_show(struct seq_file *m, void *v) 1154 { 1155 struct dasd_device *device; 1156 int rc = -EOPNOTSUPP; 1157 1158 device = m->private; 1159 dasd_get_device(device); 1160 1161 if (device->discipline->hosts_print) 1162 rc = device->discipline->hosts_print(device, m); 1163 1164 dasd_put_device(device); 1165 return rc; 1166 } 1167 1168 DEFINE_SHOW_ATTRIBUTE(dasd_hosts); 1169 1170 static void dasd_hosts_exit(struct dasd_device *device) 1171 { 1172 debugfs_remove(device->hosts_dentry); 1173 device->hosts_dentry = NULL; 1174 } 1175 1176 static void dasd_hosts_init(struct dentry *base_dentry, 1177 struct dasd_device *device) 1178 { 1179 struct dentry *pde; 1180 umode_t mode; 1181 1182 if (!base_dentry) 1183 return; 1184 1185 mode = S_IRUSR | S_IFREG; 1186 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1187 device, &dasd_hosts_fops); 1188 if (pde && !IS_ERR(pde)) 1189 device->hosts_dentry = pde; 1190 } 1191 1192 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, 1193 struct dasd_device *device, 1194 struct dasd_ccw_req *cqr) 1195 { 1196 unsigned long flags; 1197 char *data, *chunk; 1198 int size = 0; 1199 1200 if (cplength > 0) 1201 size += cplength * sizeof(struct ccw1); 1202 if (datasize > 0) 1203 size += datasize; 1204 if (!cqr) 1205 size += (sizeof(*cqr) + 7L) & -8L; 1206 1207 spin_lock_irqsave(&device->mem_lock, flags); 1208 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); 1209 spin_unlock_irqrestore(&device->mem_lock, flags); 1210 if (!chunk) 1211 return ERR_PTR(-ENOMEM); 1212 if (!cqr) { 1213 cqr = (void *) data; 1214 data += (sizeof(*cqr) + 7L) & -8L; 1215 } 1216 memset(cqr, 0, sizeof(*cqr)); 1217 cqr->mem_chunk = chunk; 1218 if (cplength > 0) { 1219 cqr->cpaddr = data; 1220 data += cplength * sizeof(struct ccw1); 1221 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1222 } 1223 if (datasize > 0) { 1224 cqr->data = data; 1225 memset(cqr->data, 0, datasize); 1226 } 1227 cqr->magic = magic; 1228 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1229 dasd_get_device(device); 1230 return cqr; 1231 } 1232 EXPORT_SYMBOL(dasd_smalloc_request); 1233 1234 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1235 int datasize, 1236 struct dasd_device *device) 1237 { 1238 struct dasd_ccw_req *cqr; 1239 unsigned long flags; 1240 int size, cqr_size; 1241 char *data; 1242 1243 cqr_size = (sizeof(*cqr) + 7L) & -8L; 1244 size = cqr_size; 1245 if (cplength > 0) 1246 size += cplength * sizeof(struct ccw1); 1247 if (datasize > 0) 1248 size += datasize; 1249 1250 spin_lock_irqsave(&device->mem_lock, flags); 1251 cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1252 spin_unlock_irqrestore(&device->mem_lock, flags); 1253 if (!cqr) 1254 return ERR_PTR(-ENOMEM); 1255 memset(cqr, 0, sizeof(*cqr)); 1256 data = (char *)cqr + cqr_size; 1257 cqr->cpaddr = NULL; 1258 if (cplength > 0) { 1259 cqr->cpaddr = data; 1260 data += cplength * sizeof(struct ccw1); 1261 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1262 } 1263 cqr->data = NULL; 1264 if (datasize > 0) { 1265 cqr->data = data; 1266 memset(cqr->data, 0, datasize); 1267 } 1268 1269 cqr->magic = magic; 1270 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1271 dasd_get_device(device); 1272 1273 return cqr; 1274 } 1275 EXPORT_SYMBOL(dasd_fmalloc_request); 1276 1277 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1278 { 1279 unsigned long flags; 1280 1281 spin_lock_irqsave(&device->mem_lock, flags); 1282 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); 1283 spin_unlock_irqrestore(&device->mem_lock, flags); 1284 dasd_put_device(device); 1285 } 1286 EXPORT_SYMBOL(dasd_sfree_request); 1287 1288 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1289 { 1290 unsigned long flags; 1291 1292 spin_lock_irqsave(&device->mem_lock, flags); 1293 dasd_free_chunk(&device->ese_chunks, cqr); 1294 spin_unlock_irqrestore(&device->mem_lock, flags); 1295 dasd_put_device(device); 1296 } 1297 EXPORT_SYMBOL(dasd_ffree_request); 1298 1299 /* 1300 * Check discipline magic in cqr. 1301 */ 1302 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1303 { 1304 struct dasd_device *device; 1305 1306 if (cqr == NULL) 1307 return -EINVAL; 1308 device = cqr->startdev; 1309 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1310 DBF_DEV_EVENT(DBF_WARNING, device, 1311 " dasd_ccw_req 0x%08x magic doesn't match" 1312 " discipline 0x%08x", 1313 cqr->magic, 1314 *(unsigned int *) device->discipline->name); 1315 return -EINVAL; 1316 } 1317 return 0; 1318 } 1319 1320 /* 1321 * Terminate the current i/o and set the request to clear_pending. 1322 * Timer keeps device runnig. 1323 * ccw_device_clear can fail if the i/o subsystem 1324 * is in a bad mood. 1325 */ 1326 int dasd_term_IO(struct dasd_ccw_req *cqr) 1327 { 1328 struct dasd_device *device; 1329 int retries, rc; 1330 char errorstring[ERRORLENGTH]; 1331 1332 /* Check the cqr */ 1333 rc = dasd_check_cqr(cqr); 1334 if (rc) 1335 return rc; 1336 retries = 0; 1337 device = (struct dasd_device *) cqr->startdev; 1338 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1339 rc = ccw_device_clear(device->cdev, (long) cqr); 1340 switch (rc) { 1341 case 0: /* termination successful */ 1342 cqr->status = DASD_CQR_CLEAR_PENDING; 1343 cqr->stopclk = get_tod_clock(); 1344 cqr->starttime = 0; 1345 DBF_DEV_EVENT(DBF_DEBUG, device, 1346 "terminate cqr %p successful", 1347 cqr); 1348 break; 1349 case -ENODEV: 1350 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1351 "device gone, retry"); 1352 break; 1353 case -EINVAL: 1354 /* 1355 * device not valid so no I/O could be running 1356 * handle CQR as termination successful 1357 */ 1358 cqr->status = DASD_CQR_CLEARED; 1359 cqr->stopclk = get_tod_clock(); 1360 cqr->starttime = 0; 1361 /* no retries for invalid devices */ 1362 cqr->retries = -1; 1363 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1364 "EINVAL, handle as terminated"); 1365 /* fake rc to success */ 1366 rc = 0; 1367 break; 1368 default: 1369 /* internal error 10 - unknown rc*/ 1370 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1371 dev_err(&device->cdev->dev, "An error occurred in the " 1372 "DASD device driver, reason=%s\n", errorstring); 1373 BUG(); 1374 break; 1375 } 1376 retries++; 1377 } 1378 dasd_schedule_device_bh(device); 1379 return rc; 1380 } 1381 EXPORT_SYMBOL(dasd_term_IO); 1382 1383 /* 1384 * Start the i/o. This start_IO can fail if the channel is really busy. 1385 * In that case set up a timer to start the request later. 1386 */ 1387 int dasd_start_IO(struct dasd_ccw_req *cqr) 1388 { 1389 struct dasd_device *device; 1390 int rc; 1391 char errorstring[ERRORLENGTH]; 1392 1393 /* Check the cqr */ 1394 rc = dasd_check_cqr(cqr); 1395 if (rc) { 1396 cqr->intrc = rc; 1397 return rc; 1398 } 1399 device = (struct dasd_device *) cqr->startdev; 1400 if (((cqr->block && 1401 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1402 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1403 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1404 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1405 "because of stolen lock", cqr); 1406 cqr->status = DASD_CQR_ERROR; 1407 cqr->intrc = -EPERM; 1408 return -EPERM; 1409 } 1410 if (cqr->retries < 0) { 1411 /* internal error 14 - start_IO run out of retries */ 1412 sprintf(errorstring, "14 %p", cqr); 1413 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1414 "device driver, reason=%s\n", errorstring); 1415 cqr->status = DASD_CQR_ERROR; 1416 return -EIO; 1417 } 1418 cqr->startclk = get_tod_clock(); 1419 cqr->starttime = jiffies; 1420 cqr->retries--; 1421 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1422 cqr->lpm &= dasd_path_get_opm(device); 1423 if (!cqr->lpm) 1424 cqr->lpm = dasd_path_get_opm(device); 1425 } 1426 if (cqr->cpmode == 1) { 1427 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1428 (long) cqr, cqr->lpm); 1429 } else { 1430 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1431 (long) cqr, cqr->lpm, 0); 1432 } 1433 switch (rc) { 1434 case 0: 1435 cqr->status = DASD_CQR_IN_IO; 1436 break; 1437 case -EBUSY: 1438 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1439 "start_IO: device busy, retry later"); 1440 break; 1441 case -EACCES: 1442 /* -EACCES indicates that the request used only a subset of the 1443 * available paths and all these paths are gone. If the lpm of 1444 * this request was only a subset of the opm (e.g. the ppm) then 1445 * we just do a retry with all available paths. 1446 * If we already use the full opm, something is amiss, and we 1447 * need a full path verification. 1448 */ 1449 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1450 DBF_DEV_EVENT(DBF_WARNING, device, 1451 "start_IO: selected paths gone (%x)", 1452 cqr->lpm); 1453 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1454 cqr->lpm = dasd_path_get_opm(device); 1455 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1456 "start_IO: selected paths gone," 1457 " retry on all paths"); 1458 } else { 1459 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1460 "start_IO: all paths in opm gone," 1461 " do path verification"); 1462 dasd_generic_last_path_gone(device); 1463 dasd_path_no_path(device); 1464 dasd_path_set_tbvpm(device, 1465 ccw_device_get_path_mask( 1466 device->cdev)); 1467 } 1468 break; 1469 case -ENODEV: 1470 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1471 "start_IO: -ENODEV device gone, retry"); 1472 break; 1473 case -EIO: 1474 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1475 "start_IO: -EIO device gone, retry"); 1476 break; 1477 case -EINVAL: 1478 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1479 "start_IO: -EINVAL device currently " 1480 "not accessible"); 1481 break; 1482 default: 1483 /* internal error 11 - unknown rc */ 1484 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1485 dev_err(&device->cdev->dev, 1486 "An error occurred in the DASD device driver, " 1487 "reason=%s\n", errorstring); 1488 BUG(); 1489 break; 1490 } 1491 cqr->intrc = rc; 1492 return rc; 1493 } 1494 EXPORT_SYMBOL(dasd_start_IO); 1495 1496 /* 1497 * Timeout function for dasd devices. This is used for different purposes 1498 * 1) missing interrupt handler for normal operation 1499 * 2) delayed start of request where start_IO failed with -EBUSY 1500 * 3) timeout for missing state change interrupts 1501 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1502 * DASD_CQR_QUEUED for 2) and 3). 1503 */ 1504 static void dasd_device_timeout(struct timer_list *t) 1505 { 1506 unsigned long flags; 1507 struct dasd_device *device; 1508 1509 device = from_timer(device, t, timer); 1510 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1511 /* re-activate request queue */ 1512 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1513 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1514 dasd_schedule_device_bh(device); 1515 } 1516 1517 /* 1518 * Setup timeout for a device in jiffies. 1519 */ 1520 void dasd_device_set_timer(struct dasd_device *device, int expires) 1521 { 1522 if (expires == 0) 1523 del_timer(&device->timer); 1524 else 1525 mod_timer(&device->timer, jiffies + expires); 1526 } 1527 EXPORT_SYMBOL(dasd_device_set_timer); 1528 1529 /* 1530 * Clear timeout for a device. 1531 */ 1532 void dasd_device_clear_timer(struct dasd_device *device) 1533 { 1534 del_timer(&device->timer); 1535 } 1536 EXPORT_SYMBOL(dasd_device_clear_timer); 1537 1538 static void dasd_handle_killed_request(struct ccw_device *cdev, 1539 unsigned long intparm) 1540 { 1541 struct dasd_ccw_req *cqr; 1542 struct dasd_device *device; 1543 1544 if (!intparm) 1545 return; 1546 cqr = (struct dasd_ccw_req *) intparm; 1547 if (cqr->status != DASD_CQR_IN_IO) { 1548 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1549 "invalid status in handle_killed_request: " 1550 "%02x", cqr->status); 1551 return; 1552 } 1553 1554 device = dasd_device_from_cdev_locked(cdev); 1555 if (IS_ERR(device)) { 1556 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1557 "unable to get device from cdev"); 1558 return; 1559 } 1560 1561 if (!cqr->startdev || 1562 device != cqr->startdev || 1563 strncmp(cqr->startdev->discipline->ebcname, 1564 (char *) &cqr->magic, 4)) { 1565 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1566 "invalid device in request"); 1567 dasd_put_device(device); 1568 return; 1569 } 1570 1571 /* Schedule request to be retried. */ 1572 cqr->status = DASD_CQR_QUEUED; 1573 1574 dasd_device_clear_timer(device); 1575 dasd_schedule_device_bh(device); 1576 dasd_put_device(device); 1577 } 1578 1579 void dasd_generic_handle_state_change(struct dasd_device *device) 1580 { 1581 /* First of all start sense subsystem status request. */ 1582 dasd_eer_snss(device); 1583 1584 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1585 dasd_schedule_device_bh(device); 1586 if (device->block) { 1587 dasd_schedule_block_bh(device->block); 1588 if (device->block->request_queue) 1589 blk_mq_run_hw_queues(device->block->request_queue, 1590 true); 1591 } 1592 } 1593 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1594 1595 static int dasd_check_hpf_error(struct irb *irb) 1596 { 1597 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1598 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1599 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1600 } 1601 1602 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1603 { 1604 struct dasd_device *device = NULL; 1605 u8 *sense = NULL; 1606 1607 if (!block) 1608 return 0; 1609 device = block->base; 1610 if (!device || !device->discipline->is_ese) 1611 return 0; 1612 if (!device->discipline->is_ese(device)) 1613 return 0; 1614 1615 sense = dasd_get_sense(irb); 1616 if (!sense) 1617 return 0; 1618 1619 return !!(sense[1] & SNS1_NO_REC_FOUND) || 1620 !!(sense[1] & SNS1_FILE_PROTECTED) || 1621 scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; 1622 } 1623 1624 static int dasd_ese_oos_cond(u8 *sense) 1625 { 1626 return sense[0] & SNS0_EQUIPMENT_CHECK && 1627 sense[1] & SNS1_PERM_ERR && 1628 sense[1] & SNS1_WRITE_INHIBITED && 1629 sense[25] == 0x01; 1630 } 1631 1632 /* 1633 * Interrupt handler for "normal" ssch-io based dasd devices. 1634 */ 1635 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1636 struct irb *irb) 1637 { 1638 struct dasd_ccw_req *cqr, *next, *fcqr; 1639 struct dasd_device *device; 1640 unsigned long now; 1641 int nrf_suppressed = 0; 1642 int fp_suppressed = 0; 1643 u8 *sense = NULL; 1644 int expires; 1645 1646 cqr = (struct dasd_ccw_req *) intparm; 1647 if (IS_ERR(irb)) { 1648 switch (PTR_ERR(irb)) { 1649 case -EIO: 1650 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1651 device = cqr->startdev; 1652 cqr->status = DASD_CQR_CLEARED; 1653 dasd_device_clear_timer(device); 1654 wake_up(&dasd_flush_wq); 1655 dasd_schedule_device_bh(device); 1656 return; 1657 } 1658 break; 1659 case -ETIMEDOUT: 1660 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1661 "request timed out\n", __func__); 1662 break; 1663 default: 1664 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1665 "unknown error %ld\n", __func__, 1666 PTR_ERR(irb)); 1667 } 1668 dasd_handle_killed_request(cdev, intparm); 1669 return; 1670 } 1671 1672 now = get_tod_clock(); 1673 /* check for conditions that should be handled immediately */ 1674 if (!cqr || 1675 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1676 scsw_cstat(&irb->scsw) == 0)) { 1677 if (cqr) 1678 memcpy(&cqr->irb, irb, sizeof(*irb)); 1679 device = dasd_device_from_cdev_locked(cdev); 1680 if (IS_ERR(device)) 1681 return; 1682 /* ignore unsolicited interrupts for DIAG discipline */ 1683 if (device->discipline == dasd_diag_discipline_pointer) { 1684 dasd_put_device(device); 1685 return; 1686 } 1687 1688 /* 1689 * In some cases 'File Protected' or 'No Record Found' errors 1690 * might be expected and debug log messages for the 1691 * corresponding interrupts shouldn't be written then. 1692 * Check if either of the according suppress bits is set. 1693 */ 1694 sense = dasd_get_sense(irb); 1695 if (sense) { 1696 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1697 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1698 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1699 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1700 1701 /* 1702 * Extent pool probably out-of-space. 1703 * Stop device and check exhaust level. 1704 */ 1705 if (dasd_ese_oos_cond(sense)) { 1706 dasd_generic_space_exhaust(device, cqr); 1707 device->discipline->ext_pool_exhaust(device, cqr); 1708 dasd_put_device(device); 1709 return; 1710 } 1711 } 1712 if (!(fp_suppressed || nrf_suppressed)) 1713 device->discipline->dump_sense_dbf(device, irb, "int"); 1714 1715 if (device->features & DASD_FEATURE_ERPLOG) 1716 device->discipline->dump_sense(device, cqr, irb); 1717 device->discipline->check_for_device_change(device, cqr, irb); 1718 dasd_put_device(device); 1719 } 1720 1721 /* check for for attention message */ 1722 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1723 device = dasd_device_from_cdev_locked(cdev); 1724 if (!IS_ERR(device)) { 1725 device->discipline->check_attention(device, 1726 irb->esw.esw1.lpum); 1727 dasd_put_device(device); 1728 } 1729 } 1730 1731 if (!cqr) 1732 return; 1733 1734 device = (struct dasd_device *) cqr->startdev; 1735 if (!device || 1736 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1737 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1738 "invalid device in request"); 1739 return; 1740 } 1741 1742 if (dasd_ese_needs_format(cqr->block, irb)) { 1743 if (rq_data_dir((struct request *)cqr->callback_data) == READ) { 1744 device->discipline->ese_read(cqr, irb); 1745 cqr->status = DASD_CQR_SUCCESS; 1746 cqr->stopclk = now; 1747 dasd_device_clear_timer(device); 1748 dasd_schedule_device_bh(device); 1749 return; 1750 } 1751 fcqr = device->discipline->ese_format(device, cqr, irb); 1752 if (IS_ERR(fcqr)) { 1753 if (PTR_ERR(fcqr) == -EINVAL) { 1754 cqr->status = DASD_CQR_ERROR; 1755 return; 1756 } 1757 /* 1758 * If we can't format now, let the request go 1759 * one extra round. Maybe we can format later. 1760 */ 1761 cqr->status = DASD_CQR_QUEUED; 1762 dasd_schedule_device_bh(device); 1763 return; 1764 } else { 1765 fcqr->status = DASD_CQR_QUEUED; 1766 cqr->status = DASD_CQR_QUEUED; 1767 list_add(&fcqr->devlist, &device->ccw_queue); 1768 dasd_schedule_device_bh(device); 1769 return; 1770 } 1771 } 1772 1773 /* Check for clear pending */ 1774 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1775 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1776 cqr->status = DASD_CQR_CLEARED; 1777 dasd_device_clear_timer(device); 1778 wake_up(&dasd_flush_wq); 1779 dasd_schedule_device_bh(device); 1780 return; 1781 } 1782 1783 /* check status - the request might have been killed by dyn detach */ 1784 if (cqr->status != DASD_CQR_IN_IO) { 1785 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1786 "status %02x", dev_name(&cdev->dev), cqr->status); 1787 return; 1788 } 1789 1790 next = NULL; 1791 expires = 0; 1792 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1793 scsw_cstat(&irb->scsw) == 0) { 1794 /* request was completed successfully */ 1795 cqr->status = DASD_CQR_SUCCESS; 1796 cqr->stopclk = now; 1797 /* Start first request on queue if possible -> fast_io. */ 1798 if (cqr->devlist.next != &device->ccw_queue) { 1799 next = list_entry(cqr->devlist.next, 1800 struct dasd_ccw_req, devlist); 1801 } 1802 } else { /* error */ 1803 /* check for HPF error 1804 * call discipline function to requeue all requests 1805 * and disable HPF accordingly 1806 */ 1807 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1808 device->discipline->handle_hpf_error) 1809 device->discipline->handle_hpf_error(device, irb); 1810 /* 1811 * If we don't want complex ERP for this request, then just 1812 * reset this and retry it in the fastpath 1813 */ 1814 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1815 cqr->retries > 0) { 1816 if (cqr->lpm == dasd_path_get_opm(device)) 1817 DBF_DEV_EVENT(DBF_DEBUG, device, 1818 "default ERP in fastpath " 1819 "(%i retries left)", 1820 cqr->retries); 1821 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1822 cqr->lpm = dasd_path_get_opm(device); 1823 cqr->status = DASD_CQR_QUEUED; 1824 next = cqr; 1825 } else 1826 cqr->status = DASD_CQR_ERROR; 1827 } 1828 if (next && (next->status == DASD_CQR_QUEUED) && 1829 (!device->stopped)) { 1830 if (device->discipline->start_IO(next) == 0) 1831 expires = next->expires; 1832 } 1833 if (expires != 0) 1834 dasd_device_set_timer(device, expires); 1835 else 1836 dasd_device_clear_timer(device); 1837 dasd_schedule_device_bh(device); 1838 } 1839 EXPORT_SYMBOL(dasd_int_handler); 1840 1841 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1842 { 1843 struct dasd_device *device; 1844 1845 device = dasd_device_from_cdev_locked(cdev); 1846 1847 if (IS_ERR(device)) 1848 goto out; 1849 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1850 device->state != device->target || 1851 !device->discipline->check_for_device_change){ 1852 dasd_put_device(device); 1853 goto out; 1854 } 1855 if (device->discipline->dump_sense_dbf) 1856 device->discipline->dump_sense_dbf(device, irb, "uc"); 1857 device->discipline->check_for_device_change(device, NULL, irb); 1858 dasd_put_device(device); 1859 out: 1860 return UC_TODO_RETRY; 1861 } 1862 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1863 1864 /* 1865 * If we have an error on a dasd_block layer request then we cancel 1866 * and return all further requests from the same dasd_block as well. 1867 */ 1868 static void __dasd_device_recovery(struct dasd_device *device, 1869 struct dasd_ccw_req *ref_cqr) 1870 { 1871 struct list_head *l, *n; 1872 struct dasd_ccw_req *cqr; 1873 1874 /* 1875 * only requeue request that came from the dasd_block layer 1876 */ 1877 if (!ref_cqr->block) 1878 return; 1879 1880 list_for_each_safe(l, n, &device->ccw_queue) { 1881 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1882 if (cqr->status == DASD_CQR_QUEUED && 1883 ref_cqr->block == cqr->block) { 1884 cqr->status = DASD_CQR_CLEARED; 1885 } 1886 } 1887 }; 1888 1889 /* 1890 * Remove those ccw requests from the queue that need to be returned 1891 * to the upper layer. 1892 */ 1893 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1894 struct list_head *final_queue) 1895 { 1896 struct list_head *l, *n; 1897 struct dasd_ccw_req *cqr; 1898 1899 /* Process request with final status. */ 1900 list_for_each_safe(l, n, &device->ccw_queue) { 1901 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1902 1903 /* Skip any non-final request. */ 1904 if (cqr->status == DASD_CQR_QUEUED || 1905 cqr->status == DASD_CQR_IN_IO || 1906 cqr->status == DASD_CQR_CLEAR_PENDING) 1907 continue; 1908 if (cqr->status == DASD_CQR_ERROR) { 1909 __dasd_device_recovery(device, cqr); 1910 } 1911 /* Rechain finished requests to final queue */ 1912 list_move_tail(&cqr->devlist, final_queue); 1913 } 1914 } 1915 1916 static void __dasd_process_cqr(struct dasd_device *device, 1917 struct dasd_ccw_req *cqr) 1918 { 1919 char errorstring[ERRORLENGTH]; 1920 1921 switch (cqr->status) { 1922 case DASD_CQR_SUCCESS: 1923 cqr->status = DASD_CQR_DONE; 1924 break; 1925 case DASD_CQR_ERROR: 1926 cqr->status = DASD_CQR_NEED_ERP; 1927 break; 1928 case DASD_CQR_CLEARED: 1929 cqr->status = DASD_CQR_TERMINATED; 1930 break; 1931 default: 1932 /* internal error 12 - wrong cqr status*/ 1933 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1934 dev_err(&device->cdev->dev, 1935 "An error occurred in the DASD device driver, " 1936 "reason=%s\n", errorstring); 1937 BUG(); 1938 } 1939 if (cqr->callback) 1940 cqr->callback(cqr, cqr->callback_data); 1941 } 1942 1943 /* 1944 * the cqrs from the final queue are returned to the upper layer 1945 * by setting a dasd_block state and calling the callback function 1946 */ 1947 static void __dasd_device_process_final_queue(struct dasd_device *device, 1948 struct list_head *final_queue) 1949 { 1950 struct list_head *l, *n; 1951 struct dasd_ccw_req *cqr; 1952 struct dasd_block *block; 1953 1954 list_for_each_safe(l, n, final_queue) { 1955 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1956 list_del_init(&cqr->devlist); 1957 block = cqr->block; 1958 if (!block) { 1959 __dasd_process_cqr(device, cqr); 1960 } else { 1961 spin_lock_bh(&block->queue_lock); 1962 __dasd_process_cqr(device, cqr); 1963 spin_unlock_bh(&block->queue_lock); 1964 } 1965 } 1966 } 1967 1968 /* 1969 * Take a look at the first request on the ccw queue and check 1970 * if it reached its expire time. If so, terminate the IO. 1971 */ 1972 static void __dasd_device_check_expire(struct dasd_device *device) 1973 { 1974 struct dasd_ccw_req *cqr; 1975 1976 if (list_empty(&device->ccw_queue)) 1977 return; 1978 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1979 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1980 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1981 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1982 /* 1983 * IO in safe offline processing should not 1984 * run out of retries 1985 */ 1986 cqr->retries++; 1987 } 1988 if (device->discipline->term_IO(cqr) != 0) { 1989 /* Hmpf, try again in 5 sec */ 1990 dev_err(&device->cdev->dev, 1991 "cqr %p timed out (%lus) but cannot be " 1992 "ended, retrying in 5 s\n", 1993 cqr, (cqr->expires/HZ)); 1994 cqr->expires += 5*HZ; 1995 dasd_device_set_timer(device, 5*HZ); 1996 } else { 1997 dev_err(&device->cdev->dev, 1998 "cqr %p timed out (%lus), %i retries " 1999 "remaining\n", cqr, (cqr->expires/HZ), 2000 cqr->retries); 2001 } 2002 } 2003 } 2004 2005 /* 2006 * return 1 when device is not eligible for IO 2007 */ 2008 static int __dasd_device_is_unusable(struct dasd_device *device, 2009 struct dasd_ccw_req *cqr) 2010 { 2011 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC); 2012 2013 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2014 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2015 /* 2016 * dasd is being set offline 2017 * but it is no safe offline where we have to allow I/O 2018 */ 2019 return 1; 2020 } 2021 if (device->stopped) { 2022 if (device->stopped & mask) { 2023 /* stopped and CQR will not change that. */ 2024 return 1; 2025 } 2026 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2027 /* CQR is not able to change device to 2028 * operational. */ 2029 return 1; 2030 } 2031 /* CQR required to get device operational. */ 2032 } 2033 return 0; 2034 } 2035 2036 /* 2037 * Take a look at the first request on the ccw queue and check 2038 * if it needs to be started. 2039 */ 2040 static void __dasd_device_start_head(struct dasd_device *device) 2041 { 2042 struct dasd_ccw_req *cqr; 2043 int rc; 2044 2045 if (list_empty(&device->ccw_queue)) 2046 return; 2047 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2048 if (cqr->status != DASD_CQR_QUEUED) 2049 return; 2050 /* if device is not usable return request to upper layer */ 2051 if (__dasd_device_is_unusable(device, cqr)) { 2052 cqr->intrc = -EAGAIN; 2053 cqr->status = DASD_CQR_CLEARED; 2054 dasd_schedule_device_bh(device); 2055 return; 2056 } 2057 2058 rc = device->discipline->start_IO(cqr); 2059 if (rc == 0) 2060 dasd_device_set_timer(device, cqr->expires); 2061 else if (rc == -EACCES) { 2062 dasd_schedule_device_bh(device); 2063 } else 2064 /* Hmpf, try again in 1/2 sec */ 2065 dasd_device_set_timer(device, 50); 2066 } 2067 2068 static void __dasd_device_check_path_events(struct dasd_device *device) 2069 { 2070 __u8 tbvpm, fcsecpm; 2071 int rc; 2072 2073 tbvpm = dasd_path_get_tbvpm(device); 2074 fcsecpm = dasd_path_get_fcsecpm(device); 2075 2076 if (!tbvpm && !fcsecpm) 2077 return; 2078 2079 if (device->stopped & ~(DASD_STOPPED_DC_WAIT)) 2080 return; 2081 rc = device->discipline->pe_handler(device, tbvpm, fcsecpm); 2082 if (rc) { 2083 dasd_device_set_timer(device, 50); 2084 } else { 2085 dasd_path_clear_all_verify(device); 2086 dasd_path_clear_all_fcsec(device); 2087 } 2088 }; 2089 2090 /* 2091 * Go through all request on the dasd_device request queue, 2092 * terminate them on the cdev if necessary, and return them to the 2093 * submitting layer via callback. 2094 * Note: 2095 * Make sure that all 'submitting layers' still exist when 2096 * this function is called!. In other words, when 'device' is a base 2097 * device then all block layer requests must have been removed before 2098 * via dasd_flush_block_queue. 2099 */ 2100 int dasd_flush_device_queue(struct dasd_device *device) 2101 { 2102 struct dasd_ccw_req *cqr, *n; 2103 int rc; 2104 struct list_head flush_queue; 2105 2106 INIT_LIST_HEAD(&flush_queue); 2107 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2108 rc = 0; 2109 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2110 /* Check status and move request to flush_queue */ 2111 switch (cqr->status) { 2112 case DASD_CQR_IN_IO: 2113 rc = device->discipline->term_IO(cqr); 2114 if (rc) { 2115 /* unable to terminate requeust */ 2116 dev_err(&device->cdev->dev, 2117 "Flushing the DASD request queue " 2118 "failed for request %p\n", cqr); 2119 /* stop flush processing */ 2120 goto finished; 2121 } 2122 break; 2123 case DASD_CQR_QUEUED: 2124 cqr->stopclk = get_tod_clock(); 2125 cqr->status = DASD_CQR_CLEARED; 2126 break; 2127 default: /* no need to modify the others */ 2128 break; 2129 } 2130 list_move_tail(&cqr->devlist, &flush_queue); 2131 } 2132 finished: 2133 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2134 /* 2135 * After this point all requests must be in state CLEAR_PENDING, 2136 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2137 * one of the others. 2138 */ 2139 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2140 wait_event(dasd_flush_wq, 2141 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2142 /* 2143 * Now set each request back to TERMINATED, DONE or NEED_ERP 2144 * and call the callback function of flushed requests 2145 */ 2146 __dasd_device_process_final_queue(device, &flush_queue); 2147 return rc; 2148 } 2149 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2150 2151 /* 2152 * Acquire the device lock and process queues for the device. 2153 */ 2154 static void dasd_device_tasklet(unsigned long data) 2155 { 2156 struct dasd_device *device = (struct dasd_device *) data; 2157 struct list_head final_queue; 2158 2159 atomic_set (&device->tasklet_scheduled, 0); 2160 INIT_LIST_HEAD(&final_queue); 2161 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2162 /* Check expire time of first request on the ccw queue. */ 2163 __dasd_device_check_expire(device); 2164 /* find final requests on ccw queue */ 2165 __dasd_device_process_ccw_queue(device, &final_queue); 2166 __dasd_device_check_path_events(device); 2167 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2168 /* Now call the callback function of requests with final status */ 2169 __dasd_device_process_final_queue(device, &final_queue); 2170 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2171 /* Now check if the head of the ccw queue needs to be started. */ 2172 __dasd_device_start_head(device); 2173 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2174 if (waitqueue_active(&shutdown_waitq)) 2175 wake_up(&shutdown_waitq); 2176 dasd_put_device(device); 2177 } 2178 2179 /* 2180 * Schedules a call to dasd_tasklet over the device tasklet. 2181 */ 2182 void dasd_schedule_device_bh(struct dasd_device *device) 2183 { 2184 /* Protect against rescheduling. */ 2185 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2186 return; 2187 dasd_get_device(device); 2188 tasklet_hi_schedule(&device->tasklet); 2189 } 2190 EXPORT_SYMBOL(dasd_schedule_device_bh); 2191 2192 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2193 { 2194 device->stopped |= bits; 2195 } 2196 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2197 2198 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2199 { 2200 device->stopped &= ~bits; 2201 if (!device->stopped) 2202 wake_up(&generic_waitq); 2203 } 2204 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2205 2206 /* 2207 * Queue a request to the head of the device ccw_queue. 2208 * Start the I/O if possible. 2209 */ 2210 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2211 { 2212 struct dasd_device *device; 2213 unsigned long flags; 2214 2215 device = cqr->startdev; 2216 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2217 cqr->status = DASD_CQR_QUEUED; 2218 list_add(&cqr->devlist, &device->ccw_queue); 2219 /* let the bh start the request to keep them in order */ 2220 dasd_schedule_device_bh(device); 2221 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2222 } 2223 EXPORT_SYMBOL(dasd_add_request_head); 2224 2225 /* 2226 * Queue a request to the tail of the device ccw_queue. 2227 * Start the I/O if possible. 2228 */ 2229 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2230 { 2231 struct dasd_device *device; 2232 unsigned long flags; 2233 2234 device = cqr->startdev; 2235 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2236 cqr->status = DASD_CQR_QUEUED; 2237 list_add_tail(&cqr->devlist, &device->ccw_queue); 2238 /* let the bh start the request to keep them in order */ 2239 dasd_schedule_device_bh(device); 2240 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2241 } 2242 EXPORT_SYMBOL(dasd_add_request_tail); 2243 2244 /* 2245 * Wakeup helper for the 'sleep_on' functions. 2246 */ 2247 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2248 { 2249 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2250 cqr->callback_data = DASD_SLEEPON_END_TAG; 2251 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2252 wake_up(&generic_waitq); 2253 } 2254 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2255 2256 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2257 { 2258 struct dasd_device *device; 2259 int rc; 2260 2261 device = cqr->startdev; 2262 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2263 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2264 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2265 return rc; 2266 } 2267 2268 /* 2269 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2270 */ 2271 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2272 { 2273 struct dasd_device *device; 2274 dasd_erp_fn_t erp_fn; 2275 2276 if (cqr->status == DASD_CQR_FILLED) 2277 return 0; 2278 device = cqr->startdev; 2279 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2280 if (cqr->status == DASD_CQR_TERMINATED) { 2281 device->discipline->handle_terminated_request(cqr); 2282 return 1; 2283 } 2284 if (cqr->status == DASD_CQR_NEED_ERP) { 2285 erp_fn = device->discipline->erp_action(cqr); 2286 erp_fn(cqr); 2287 return 1; 2288 } 2289 if (cqr->status == DASD_CQR_FAILED) 2290 dasd_log_sense(cqr, &cqr->irb); 2291 if (cqr->refers) { 2292 __dasd_process_erp(device, cqr); 2293 return 1; 2294 } 2295 } 2296 return 0; 2297 } 2298 2299 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2300 { 2301 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2302 if (cqr->refers) /* erp is not done yet */ 2303 return 1; 2304 return ((cqr->status != DASD_CQR_DONE) && 2305 (cqr->status != DASD_CQR_FAILED)); 2306 } else 2307 return (cqr->status == DASD_CQR_FILLED); 2308 } 2309 2310 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2311 { 2312 struct dasd_device *device; 2313 int rc; 2314 struct list_head ccw_queue; 2315 struct dasd_ccw_req *cqr; 2316 2317 INIT_LIST_HEAD(&ccw_queue); 2318 maincqr->status = DASD_CQR_FILLED; 2319 device = maincqr->startdev; 2320 list_add(&maincqr->blocklist, &ccw_queue); 2321 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2322 cqr = list_first_entry(&ccw_queue, 2323 struct dasd_ccw_req, blocklist)) { 2324 2325 if (__dasd_sleep_on_erp(cqr)) 2326 continue; 2327 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2328 continue; 2329 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2330 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2331 cqr->status = DASD_CQR_FAILED; 2332 cqr->intrc = -EPERM; 2333 continue; 2334 } 2335 /* Non-temporary stop condition will trigger fail fast */ 2336 if (device->stopped & ~DASD_STOPPED_PENDING && 2337 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2338 (!dasd_eer_enabled(device))) { 2339 cqr->status = DASD_CQR_FAILED; 2340 cqr->intrc = -ENOLINK; 2341 continue; 2342 } 2343 /* 2344 * Don't try to start requests if device is in 2345 * offline processing, it might wait forever 2346 */ 2347 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2348 cqr->status = DASD_CQR_FAILED; 2349 cqr->intrc = -ENODEV; 2350 continue; 2351 } 2352 /* 2353 * Don't try to start requests if device is stopped 2354 * except path verification requests 2355 */ 2356 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2357 if (interruptible) { 2358 rc = wait_event_interruptible( 2359 generic_waitq, !(device->stopped)); 2360 if (rc == -ERESTARTSYS) { 2361 cqr->status = DASD_CQR_FAILED; 2362 maincqr->intrc = rc; 2363 continue; 2364 } 2365 } else 2366 wait_event(generic_waitq, !(device->stopped)); 2367 } 2368 if (!cqr->callback) 2369 cqr->callback = dasd_wakeup_cb; 2370 2371 cqr->callback_data = DASD_SLEEPON_START_TAG; 2372 dasd_add_request_tail(cqr); 2373 if (interruptible) { 2374 rc = wait_event_interruptible( 2375 generic_waitq, _wait_for_wakeup(cqr)); 2376 if (rc == -ERESTARTSYS) { 2377 dasd_cancel_req(cqr); 2378 /* wait (non-interruptible) for final status */ 2379 wait_event(generic_waitq, 2380 _wait_for_wakeup(cqr)); 2381 cqr->status = DASD_CQR_FAILED; 2382 maincqr->intrc = rc; 2383 continue; 2384 } 2385 } else 2386 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2387 } 2388 2389 maincqr->endclk = get_tod_clock(); 2390 if ((maincqr->status != DASD_CQR_DONE) && 2391 (maincqr->intrc != -ERESTARTSYS)) 2392 dasd_log_sense(maincqr, &maincqr->irb); 2393 if (maincqr->status == DASD_CQR_DONE) 2394 rc = 0; 2395 else if (maincqr->intrc) 2396 rc = maincqr->intrc; 2397 else 2398 rc = -EIO; 2399 return rc; 2400 } 2401 2402 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2403 { 2404 struct dasd_ccw_req *cqr; 2405 2406 list_for_each_entry(cqr, ccw_queue, blocklist) { 2407 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2408 return 0; 2409 } 2410 2411 return 1; 2412 } 2413 2414 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2415 { 2416 struct dasd_device *device; 2417 struct dasd_ccw_req *cqr, *n; 2418 u8 *sense = NULL; 2419 int rc; 2420 2421 retry: 2422 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2423 device = cqr->startdev; 2424 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2425 continue; 2426 2427 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2428 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2429 cqr->status = DASD_CQR_FAILED; 2430 cqr->intrc = -EPERM; 2431 continue; 2432 } 2433 /*Non-temporary stop condition will trigger fail fast*/ 2434 if (device->stopped & ~DASD_STOPPED_PENDING && 2435 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2436 !dasd_eer_enabled(device)) { 2437 cqr->status = DASD_CQR_FAILED; 2438 cqr->intrc = -EAGAIN; 2439 continue; 2440 } 2441 2442 /*Don't try to start requests if device is stopped*/ 2443 if (interruptible) { 2444 rc = wait_event_interruptible( 2445 generic_waitq, !device->stopped); 2446 if (rc == -ERESTARTSYS) { 2447 cqr->status = DASD_CQR_FAILED; 2448 cqr->intrc = rc; 2449 continue; 2450 } 2451 } else 2452 wait_event(generic_waitq, !(device->stopped)); 2453 2454 if (!cqr->callback) 2455 cqr->callback = dasd_wakeup_cb; 2456 cqr->callback_data = DASD_SLEEPON_START_TAG; 2457 dasd_add_request_tail(cqr); 2458 } 2459 2460 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2461 2462 rc = 0; 2463 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2464 /* 2465 * In some cases the 'File Protected' or 'Incorrect Length' 2466 * error might be expected and error recovery would be 2467 * unnecessary in these cases. Check if the according suppress 2468 * bit is set. 2469 */ 2470 sense = dasd_get_sense(&cqr->irb); 2471 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2472 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2473 continue; 2474 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2475 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2476 continue; 2477 2478 /* 2479 * for alias devices simplify error recovery and 2480 * return to upper layer 2481 * do not skip ERP requests 2482 */ 2483 if (cqr->startdev != cqr->basedev && !cqr->refers && 2484 (cqr->status == DASD_CQR_TERMINATED || 2485 cqr->status == DASD_CQR_NEED_ERP)) 2486 return -EAGAIN; 2487 2488 /* normal recovery for basedev IO */ 2489 if (__dasd_sleep_on_erp(cqr)) 2490 /* handle erp first */ 2491 goto retry; 2492 } 2493 2494 return 0; 2495 } 2496 2497 /* 2498 * Queue a request to the tail of the device ccw_queue and wait for 2499 * it's completion. 2500 */ 2501 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2502 { 2503 return _dasd_sleep_on(cqr, 0); 2504 } 2505 EXPORT_SYMBOL(dasd_sleep_on); 2506 2507 /* 2508 * Start requests from a ccw_queue and wait for their completion. 2509 */ 2510 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2511 { 2512 return _dasd_sleep_on_queue(ccw_queue, 0); 2513 } 2514 EXPORT_SYMBOL(dasd_sleep_on_queue); 2515 2516 /* 2517 * Start requests from a ccw_queue and wait interruptible for their completion. 2518 */ 2519 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2520 { 2521 return _dasd_sleep_on_queue(ccw_queue, 1); 2522 } 2523 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2524 2525 /* 2526 * Queue a request to the tail of the device ccw_queue and wait 2527 * interruptible for it's completion. 2528 */ 2529 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2530 { 2531 return _dasd_sleep_on(cqr, 1); 2532 } 2533 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2534 2535 /* 2536 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2537 * for eckd devices) the currently running request has to be terminated 2538 * and be put back to status queued, before the special request is added 2539 * to the head of the queue. Then the special request is waited on normally. 2540 */ 2541 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2542 { 2543 struct dasd_ccw_req *cqr; 2544 int rc; 2545 2546 if (list_empty(&device->ccw_queue)) 2547 return 0; 2548 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2549 rc = device->discipline->term_IO(cqr); 2550 if (!rc) 2551 /* 2552 * CQR terminated because a more important request is pending. 2553 * Undo decreasing of retry counter because this is 2554 * not an error case. 2555 */ 2556 cqr->retries++; 2557 return rc; 2558 } 2559 2560 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2561 { 2562 struct dasd_device *device; 2563 int rc; 2564 2565 device = cqr->startdev; 2566 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2567 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2568 cqr->status = DASD_CQR_FAILED; 2569 cqr->intrc = -EPERM; 2570 return -EIO; 2571 } 2572 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2573 rc = _dasd_term_running_cqr(device); 2574 if (rc) { 2575 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2576 return rc; 2577 } 2578 cqr->callback = dasd_wakeup_cb; 2579 cqr->callback_data = DASD_SLEEPON_START_TAG; 2580 cqr->status = DASD_CQR_QUEUED; 2581 /* 2582 * add new request as second 2583 * first the terminated cqr needs to be finished 2584 */ 2585 list_add(&cqr->devlist, device->ccw_queue.next); 2586 2587 /* let the bh start the request to keep them in order */ 2588 dasd_schedule_device_bh(device); 2589 2590 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2591 2592 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2593 2594 if (cqr->status == DASD_CQR_DONE) 2595 rc = 0; 2596 else if (cqr->intrc) 2597 rc = cqr->intrc; 2598 else 2599 rc = -EIO; 2600 2601 /* kick tasklets */ 2602 dasd_schedule_device_bh(device); 2603 if (device->block) 2604 dasd_schedule_block_bh(device->block); 2605 2606 return rc; 2607 } 2608 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2609 2610 /* 2611 * Cancels a request that was started with dasd_sleep_on_req. 2612 * This is useful to timeout requests. The request will be 2613 * terminated if it is currently in i/o. 2614 * Returns 0 if request termination was successful 2615 * negative error code if termination failed 2616 * Cancellation of a request is an asynchronous operation! The calling 2617 * function has to wait until the request is properly returned via callback. 2618 */ 2619 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) 2620 { 2621 struct dasd_device *device = cqr->startdev; 2622 int rc = 0; 2623 2624 switch (cqr->status) { 2625 case DASD_CQR_QUEUED: 2626 /* request was not started - just set to cleared */ 2627 cqr->status = DASD_CQR_CLEARED; 2628 break; 2629 case DASD_CQR_IN_IO: 2630 /* request in IO - terminate IO and release again */ 2631 rc = device->discipline->term_IO(cqr); 2632 if (rc) { 2633 dev_err(&device->cdev->dev, 2634 "Cancelling request %p failed with rc=%d\n", 2635 cqr, rc); 2636 } else { 2637 cqr->stopclk = get_tod_clock(); 2638 } 2639 break; 2640 default: /* already finished or clear pending - do nothing */ 2641 break; 2642 } 2643 dasd_schedule_device_bh(device); 2644 return rc; 2645 } 2646 2647 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2648 { 2649 struct dasd_device *device = cqr->startdev; 2650 unsigned long flags; 2651 int rc; 2652 2653 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2654 rc = __dasd_cancel_req(cqr); 2655 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2656 return rc; 2657 } 2658 2659 /* 2660 * SECTION: Operations of the dasd_block layer. 2661 */ 2662 2663 /* 2664 * Timeout function for dasd_block. This is used when the block layer 2665 * is waiting for something that may not come reliably, (e.g. a state 2666 * change interrupt) 2667 */ 2668 static void dasd_block_timeout(struct timer_list *t) 2669 { 2670 unsigned long flags; 2671 struct dasd_block *block; 2672 2673 block = from_timer(block, t, timer); 2674 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2675 /* re-activate request queue */ 2676 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2677 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2678 dasd_schedule_block_bh(block); 2679 blk_mq_run_hw_queues(block->request_queue, true); 2680 } 2681 2682 /* 2683 * Setup timeout for a dasd_block in jiffies. 2684 */ 2685 void dasd_block_set_timer(struct dasd_block *block, int expires) 2686 { 2687 if (expires == 0) 2688 del_timer(&block->timer); 2689 else 2690 mod_timer(&block->timer, jiffies + expires); 2691 } 2692 EXPORT_SYMBOL(dasd_block_set_timer); 2693 2694 /* 2695 * Clear timeout for a dasd_block. 2696 */ 2697 void dasd_block_clear_timer(struct dasd_block *block) 2698 { 2699 del_timer(&block->timer); 2700 } 2701 EXPORT_SYMBOL(dasd_block_clear_timer); 2702 2703 /* 2704 * Process finished error recovery ccw. 2705 */ 2706 static void __dasd_process_erp(struct dasd_device *device, 2707 struct dasd_ccw_req *cqr) 2708 { 2709 dasd_erp_fn_t erp_fn; 2710 2711 if (cqr->status == DASD_CQR_DONE) 2712 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2713 else 2714 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2715 erp_fn = device->discipline->erp_postaction(cqr); 2716 erp_fn(cqr); 2717 } 2718 2719 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2720 { 2721 struct request *req; 2722 blk_status_t error = BLK_STS_OK; 2723 unsigned int proc_bytes; 2724 int status; 2725 2726 req = (struct request *) cqr->callback_data; 2727 dasd_profile_end(cqr->block, cqr, req); 2728 2729 proc_bytes = cqr->proc_bytes; 2730 status = cqr->block->base->discipline->free_cp(cqr, req); 2731 if (status < 0) 2732 error = errno_to_blk_status(status); 2733 else if (status == 0) { 2734 switch (cqr->intrc) { 2735 case -EPERM: 2736 error = BLK_STS_NEXUS; 2737 break; 2738 case -ENOLINK: 2739 error = BLK_STS_TRANSPORT; 2740 break; 2741 case -ETIMEDOUT: 2742 error = BLK_STS_TIMEOUT; 2743 break; 2744 default: 2745 error = BLK_STS_IOERR; 2746 break; 2747 } 2748 } 2749 2750 /* 2751 * We need to take care for ETIMEDOUT errors here since the 2752 * complete callback does not get called in this case. 2753 * Take care of all errors here and avoid additional code to 2754 * transfer the error value to the complete callback. 2755 */ 2756 if (error) { 2757 blk_mq_end_request(req, error); 2758 blk_mq_run_hw_queues(req->q, true); 2759 } else { 2760 /* 2761 * Partial completed requests can happen with ESE devices. 2762 * During read we might have gotten a NRF error and have to 2763 * complete a request partially. 2764 */ 2765 if (proc_bytes) { 2766 blk_update_request(req, BLK_STS_OK, 2767 blk_rq_bytes(req) - proc_bytes); 2768 blk_mq_requeue_request(req, true); 2769 } else if (likely(!blk_should_fake_timeout(req->q))) { 2770 blk_mq_complete_request(req); 2771 } 2772 } 2773 } 2774 2775 /* 2776 * Process ccw request queue. 2777 */ 2778 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2779 struct list_head *final_queue) 2780 { 2781 struct list_head *l, *n; 2782 struct dasd_ccw_req *cqr; 2783 dasd_erp_fn_t erp_fn; 2784 unsigned long flags; 2785 struct dasd_device *base = block->base; 2786 2787 restart: 2788 /* Process request with final status. */ 2789 list_for_each_safe(l, n, &block->ccw_queue) { 2790 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2791 if (cqr->status != DASD_CQR_DONE && 2792 cqr->status != DASD_CQR_FAILED && 2793 cqr->status != DASD_CQR_NEED_ERP && 2794 cqr->status != DASD_CQR_TERMINATED) 2795 continue; 2796 2797 if (cqr->status == DASD_CQR_TERMINATED) { 2798 base->discipline->handle_terminated_request(cqr); 2799 goto restart; 2800 } 2801 2802 /* Process requests that may be recovered */ 2803 if (cqr->status == DASD_CQR_NEED_ERP) { 2804 erp_fn = base->discipline->erp_action(cqr); 2805 if (IS_ERR(erp_fn(cqr))) 2806 continue; 2807 goto restart; 2808 } 2809 2810 /* log sense for fatal error */ 2811 if (cqr->status == DASD_CQR_FAILED) { 2812 dasd_log_sense(cqr, &cqr->irb); 2813 } 2814 2815 /* First of all call extended error reporting. */ 2816 if (dasd_eer_enabled(base) && 2817 cqr->status == DASD_CQR_FAILED) { 2818 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2819 2820 /* restart request */ 2821 cqr->status = DASD_CQR_FILLED; 2822 cqr->retries = 255; 2823 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2824 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2825 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2826 flags); 2827 goto restart; 2828 } 2829 2830 /* Process finished ERP request. */ 2831 if (cqr->refers) { 2832 __dasd_process_erp(base, cqr); 2833 goto restart; 2834 } 2835 2836 /* Rechain finished requests to final queue */ 2837 cqr->endclk = get_tod_clock(); 2838 list_move_tail(&cqr->blocklist, final_queue); 2839 } 2840 } 2841 2842 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2843 { 2844 dasd_schedule_block_bh(cqr->block); 2845 } 2846 2847 static void __dasd_block_start_head(struct dasd_block *block) 2848 { 2849 struct dasd_ccw_req *cqr; 2850 2851 if (list_empty(&block->ccw_queue)) 2852 return; 2853 /* We allways begin with the first requests on the queue, as some 2854 * of previously started requests have to be enqueued on a 2855 * dasd_device again for error recovery. 2856 */ 2857 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2858 if (cqr->status != DASD_CQR_FILLED) 2859 continue; 2860 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2861 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2862 cqr->status = DASD_CQR_FAILED; 2863 cqr->intrc = -EPERM; 2864 dasd_schedule_block_bh(block); 2865 continue; 2866 } 2867 /* Non-temporary stop condition will trigger fail fast */ 2868 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2869 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2870 (!dasd_eer_enabled(block->base))) { 2871 cqr->status = DASD_CQR_FAILED; 2872 cqr->intrc = -ENOLINK; 2873 dasd_schedule_block_bh(block); 2874 continue; 2875 } 2876 /* Don't try to start requests if device is stopped */ 2877 if (block->base->stopped) 2878 return; 2879 2880 /* just a fail safe check, should not happen */ 2881 if (!cqr->startdev) 2882 cqr->startdev = block->base; 2883 2884 /* make sure that the requests we submit find their way back */ 2885 cqr->callback = dasd_return_cqr_cb; 2886 2887 dasd_add_request_tail(cqr); 2888 } 2889 } 2890 2891 /* 2892 * Central dasd_block layer routine. Takes requests from the generic 2893 * block layer request queue, creates ccw requests, enqueues them on 2894 * a dasd_device and processes ccw requests that have been returned. 2895 */ 2896 static void dasd_block_tasklet(unsigned long data) 2897 { 2898 struct dasd_block *block = (struct dasd_block *) data; 2899 struct list_head final_queue; 2900 struct list_head *l, *n; 2901 struct dasd_ccw_req *cqr; 2902 struct dasd_queue *dq; 2903 2904 atomic_set(&block->tasklet_scheduled, 0); 2905 INIT_LIST_HEAD(&final_queue); 2906 spin_lock_irq(&block->queue_lock); 2907 /* Finish off requests on ccw queue */ 2908 __dasd_process_block_ccw_queue(block, &final_queue); 2909 spin_unlock_irq(&block->queue_lock); 2910 2911 /* Now call the callback function of requests with final status */ 2912 list_for_each_safe(l, n, &final_queue) { 2913 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2914 dq = cqr->dq; 2915 spin_lock_irq(&dq->lock); 2916 list_del_init(&cqr->blocklist); 2917 __dasd_cleanup_cqr(cqr); 2918 spin_unlock_irq(&dq->lock); 2919 } 2920 2921 spin_lock_irq(&block->queue_lock); 2922 /* Now check if the head of the ccw queue needs to be started. */ 2923 __dasd_block_start_head(block); 2924 spin_unlock_irq(&block->queue_lock); 2925 2926 if (waitqueue_active(&shutdown_waitq)) 2927 wake_up(&shutdown_waitq); 2928 dasd_put_device(block->base); 2929 } 2930 2931 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2932 { 2933 wake_up(&dasd_flush_wq); 2934 } 2935 2936 /* 2937 * Requeue a request back to the block request queue 2938 * only works for block requests 2939 */ 2940 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2941 { 2942 struct dasd_block *block = cqr->block; 2943 struct request *req; 2944 2945 if (!block) 2946 return -EINVAL; 2947 /* 2948 * If the request is an ERP request there is nothing to requeue. 2949 * This will be done with the remaining original request. 2950 */ 2951 if (cqr->refers) 2952 return 0; 2953 spin_lock_irq(&cqr->dq->lock); 2954 req = (struct request *) cqr->callback_data; 2955 blk_mq_requeue_request(req, false); 2956 spin_unlock_irq(&cqr->dq->lock); 2957 2958 return 0; 2959 } 2960 2961 /* 2962 * Go through all request on the dasd_block request queue, cancel them 2963 * on the respective dasd_device, and return them to the generic 2964 * block layer. 2965 */ 2966 static int dasd_flush_block_queue(struct dasd_block *block) 2967 { 2968 struct dasd_ccw_req *cqr, *n; 2969 int rc, i; 2970 struct list_head flush_queue; 2971 unsigned long flags; 2972 2973 INIT_LIST_HEAD(&flush_queue); 2974 spin_lock_bh(&block->queue_lock); 2975 rc = 0; 2976 restart: 2977 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2978 /* if this request currently owned by a dasd_device cancel it */ 2979 if (cqr->status >= DASD_CQR_QUEUED) 2980 rc = dasd_cancel_req(cqr); 2981 if (rc < 0) 2982 break; 2983 /* Rechain request (including erp chain) so it won't be 2984 * touched by the dasd_block_tasklet anymore. 2985 * Replace the callback so we notice when the request 2986 * is returned from the dasd_device layer. 2987 */ 2988 cqr->callback = _dasd_wake_block_flush_cb; 2989 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2990 list_move_tail(&cqr->blocklist, &flush_queue); 2991 if (i > 1) 2992 /* moved more than one request - need to restart */ 2993 goto restart; 2994 } 2995 spin_unlock_bh(&block->queue_lock); 2996 /* Now call the callback function of flushed requests */ 2997 restart_cb: 2998 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2999 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3000 /* Process finished ERP request. */ 3001 if (cqr->refers) { 3002 spin_lock_bh(&block->queue_lock); 3003 __dasd_process_erp(block->base, cqr); 3004 spin_unlock_bh(&block->queue_lock); 3005 /* restart list_for_xx loop since dasd_process_erp 3006 * might remove multiple elements */ 3007 goto restart_cb; 3008 } 3009 /* call the callback function */ 3010 spin_lock_irqsave(&cqr->dq->lock, flags); 3011 cqr->endclk = get_tod_clock(); 3012 list_del_init(&cqr->blocklist); 3013 __dasd_cleanup_cqr(cqr); 3014 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3015 } 3016 return rc; 3017 } 3018 3019 /* 3020 * Schedules a call to dasd_tasklet over the device tasklet. 3021 */ 3022 void dasd_schedule_block_bh(struct dasd_block *block) 3023 { 3024 /* Protect against rescheduling. */ 3025 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 3026 return; 3027 /* life cycle of block is bound to it's base device */ 3028 dasd_get_device(block->base); 3029 tasklet_hi_schedule(&block->tasklet); 3030 } 3031 EXPORT_SYMBOL(dasd_schedule_block_bh); 3032 3033 3034 /* 3035 * SECTION: external block device operations 3036 * (request queue handling, open, release, etc.) 3037 */ 3038 3039 /* 3040 * Dasd request queue function. Called from ll_rw_blk.c 3041 */ 3042 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 3043 const struct blk_mq_queue_data *qd) 3044 { 3045 struct dasd_block *block = hctx->queue->queuedata; 3046 struct dasd_queue *dq = hctx->driver_data; 3047 struct request *req = qd->rq; 3048 struct dasd_device *basedev; 3049 struct dasd_ccw_req *cqr; 3050 blk_status_t rc = BLK_STS_OK; 3051 3052 basedev = block->base; 3053 spin_lock_irq(&dq->lock); 3054 if (basedev->state < DASD_STATE_READY || 3055 test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) { 3056 DBF_DEV_EVENT(DBF_ERR, basedev, 3057 "device not ready for request %p", req); 3058 rc = BLK_STS_IOERR; 3059 goto out; 3060 } 3061 3062 /* 3063 * if device is stopped do not fetch new requests 3064 * except failfast is active which will let requests fail 3065 * immediately in __dasd_block_start_head() 3066 */ 3067 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3068 DBF_DEV_EVENT(DBF_ERR, basedev, 3069 "device stopped request %p", req); 3070 rc = BLK_STS_RESOURCE; 3071 goto out; 3072 } 3073 3074 if (basedev->features & DASD_FEATURE_READONLY && 3075 rq_data_dir(req) == WRITE) { 3076 DBF_DEV_EVENT(DBF_ERR, basedev, 3077 "Rejecting write request %p", req); 3078 rc = BLK_STS_IOERR; 3079 goto out; 3080 } 3081 3082 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3083 (basedev->features & DASD_FEATURE_FAILFAST || 3084 blk_noretry_request(req))) { 3085 DBF_DEV_EVENT(DBF_ERR, basedev, 3086 "Rejecting failfast request %p", req); 3087 rc = BLK_STS_IOERR; 3088 goto out; 3089 } 3090 3091 cqr = basedev->discipline->build_cp(basedev, block, req); 3092 if (IS_ERR(cqr)) { 3093 if (PTR_ERR(cqr) == -EBUSY || 3094 PTR_ERR(cqr) == -ENOMEM || 3095 PTR_ERR(cqr) == -EAGAIN) { 3096 rc = BLK_STS_RESOURCE; 3097 goto out; 3098 } 3099 DBF_DEV_EVENT(DBF_ERR, basedev, 3100 "CCW creation failed (rc=%ld) on request %p", 3101 PTR_ERR(cqr), req); 3102 rc = BLK_STS_IOERR; 3103 goto out; 3104 } 3105 /* 3106 * Note: callback is set to dasd_return_cqr_cb in 3107 * __dasd_block_start_head to cover erp requests as well 3108 */ 3109 cqr->callback_data = req; 3110 cqr->status = DASD_CQR_FILLED; 3111 cqr->dq = dq; 3112 3113 blk_mq_start_request(req); 3114 spin_lock(&block->queue_lock); 3115 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3116 INIT_LIST_HEAD(&cqr->devlist); 3117 dasd_profile_start(block, cqr, req); 3118 dasd_schedule_block_bh(block); 3119 spin_unlock(&block->queue_lock); 3120 3121 out: 3122 spin_unlock_irq(&dq->lock); 3123 return rc; 3124 } 3125 3126 /* 3127 * Block timeout callback, called from the block layer 3128 * 3129 * Return values: 3130 * BLK_EH_RESET_TIMER if the request should be left running 3131 * BLK_EH_DONE if the request is handled or terminated 3132 * by the driver. 3133 */ 3134 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3135 { 3136 struct dasd_block *block = req->q->queuedata; 3137 struct dasd_device *device; 3138 struct dasd_ccw_req *cqr; 3139 unsigned long flags; 3140 int rc = 0; 3141 3142 cqr = blk_mq_rq_to_pdu(req); 3143 if (!cqr) 3144 return BLK_EH_DONE; 3145 3146 spin_lock_irqsave(&cqr->dq->lock, flags); 3147 device = cqr->startdev ? cqr->startdev : block->base; 3148 if (!device->blk_timeout) { 3149 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3150 return BLK_EH_RESET_TIMER; 3151 } 3152 DBF_DEV_EVENT(DBF_WARNING, device, 3153 " dasd_times_out cqr %p status %x", 3154 cqr, cqr->status); 3155 3156 spin_lock(&block->queue_lock); 3157 spin_lock(get_ccwdev_lock(device->cdev)); 3158 cqr->retries = -1; 3159 cqr->intrc = -ETIMEDOUT; 3160 if (cqr->status >= DASD_CQR_QUEUED) { 3161 rc = __dasd_cancel_req(cqr); 3162 } else if (cqr->status == DASD_CQR_FILLED || 3163 cqr->status == DASD_CQR_NEED_ERP) { 3164 cqr->status = DASD_CQR_TERMINATED; 3165 } else if (cqr->status == DASD_CQR_IN_ERP) { 3166 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3167 3168 list_for_each_entry_safe(searchcqr, nextcqr, 3169 &block->ccw_queue, blocklist) { 3170 tmpcqr = searchcqr; 3171 while (tmpcqr->refers) 3172 tmpcqr = tmpcqr->refers; 3173 if (tmpcqr != cqr) 3174 continue; 3175 /* searchcqr is an ERP request for cqr */ 3176 searchcqr->retries = -1; 3177 searchcqr->intrc = -ETIMEDOUT; 3178 if (searchcqr->status >= DASD_CQR_QUEUED) { 3179 rc = __dasd_cancel_req(searchcqr); 3180 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3181 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3182 searchcqr->status = DASD_CQR_TERMINATED; 3183 rc = 0; 3184 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3185 /* 3186 * Shouldn't happen; most recent ERP 3187 * request is at the front of queue 3188 */ 3189 continue; 3190 } 3191 break; 3192 } 3193 } 3194 spin_unlock(get_ccwdev_lock(device->cdev)); 3195 dasd_schedule_block_bh(block); 3196 spin_unlock(&block->queue_lock); 3197 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3198 3199 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE; 3200 } 3201 3202 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3203 unsigned int idx) 3204 { 3205 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3206 3207 if (!dq) 3208 return -ENOMEM; 3209 3210 spin_lock_init(&dq->lock); 3211 hctx->driver_data = dq; 3212 3213 return 0; 3214 } 3215 3216 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3217 { 3218 kfree(hctx->driver_data); 3219 hctx->driver_data = NULL; 3220 } 3221 3222 static void dasd_request_done(struct request *req) 3223 { 3224 blk_mq_end_request(req, 0); 3225 blk_mq_run_hw_queues(req->q, true); 3226 } 3227 3228 static struct blk_mq_ops dasd_mq_ops = { 3229 .queue_rq = do_dasd_request, 3230 .complete = dasd_request_done, 3231 .timeout = dasd_times_out, 3232 .init_hctx = dasd_init_hctx, 3233 .exit_hctx = dasd_exit_hctx, 3234 }; 3235 3236 /* 3237 * Allocate and initialize request queue and default I/O scheduler. 3238 */ 3239 static int dasd_alloc_queue(struct dasd_block *block) 3240 { 3241 int rc; 3242 3243 block->tag_set.ops = &dasd_mq_ops; 3244 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); 3245 block->tag_set.nr_hw_queues = nr_hw_queues; 3246 block->tag_set.queue_depth = queue_depth; 3247 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3248 block->tag_set.numa_node = NUMA_NO_NODE; 3249 3250 rc = blk_mq_alloc_tag_set(&block->tag_set); 3251 if (rc) 3252 return rc; 3253 3254 block->request_queue = blk_mq_init_queue(&block->tag_set); 3255 if (IS_ERR(block->request_queue)) 3256 return PTR_ERR(block->request_queue); 3257 3258 block->request_queue->queuedata = block; 3259 3260 return 0; 3261 } 3262 3263 /* 3264 * Deactivate and free request queue. 3265 */ 3266 static void dasd_free_queue(struct dasd_block *block) 3267 { 3268 if (block->request_queue) { 3269 blk_cleanup_queue(block->request_queue); 3270 blk_mq_free_tag_set(&block->tag_set); 3271 block->request_queue = NULL; 3272 } 3273 } 3274 3275 static int dasd_open(struct block_device *bdev, fmode_t mode) 3276 { 3277 struct dasd_device *base; 3278 int rc; 3279 3280 base = dasd_device_from_gendisk(bdev->bd_disk); 3281 if (!base) 3282 return -ENODEV; 3283 3284 atomic_inc(&base->block->open_count); 3285 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3286 rc = -ENODEV; 3287 goto unlock; 3288 } 3289 3290 if (!try_module_get(base->discipline->owner)) { 3291 rc = -EINVAL; 3292 goto unlock; 3293 } 3294 3295 if (dasd_probeonly) { 3296 dev_info(&base->cdev->dev, 3297 "Accessing the DASD failed because it is in " 3298 "probeonly mode\n"); 3299 rc = -EPERM; 3300 goto out; 3301 } 3302 3303 if (base->state <= DASD_STATE_BASIC) { 3304 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3305 " Cannot open unrecognized device"); 3306 rc = -ENODEV; 3307 goto out; 3308 } 3309 3310 if ((mode & FMODE_WRITE) && 3311 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3312 (base->features & DASD_FEATURE_READONLY))) { 3313 rc = -EROFS; 3314 goto out; 3315 } 3316 3317 dasd_put_device(base); 3318 return 0; 3319 3320 out: 3321 module_put(base->discipline->owner); 3322 unlock: 3323 atomic_dec(&base->block->open_count); 3324 dasd_put_device(base); 3325 return rc; 3326 } 3327 3328 static void dasd_release(struct gendisk *disk, fmode_t mode) 3329 { 3330 struct dasd_device *base = dasd_device_from_gendisk(disk); 3331 if (base) { 3332 atomic_dec(&base->block->open_count); 3333 module_put(base->discipline->owner); 3334 dasd_put_device(base); 3335 } 3336 } 3337 3338 /* 3339 * Return disk geometry. 3340 */ 3341 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3342 { 3343 struct dasd_device *base; 3344 3345 base = dasd_device_from_gendisk(bdev->bd_disk); 3346 if (!base) 3347 return -ENODEV; 3348 3349 if (!base->discipline || 3350 !base->discipline->fill_geometry) { 3351 dasd_put_device(base); 3352 return -EINVAL; 3353 } 3354 base->discipline->fill_geometry(base->block, geo); 3355 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3356 dasd_put_device(base); 3357 return 0; 3358 } 3359 3360 const struct block_device_operations 3361 dasd_device_operations = { 3362 .owner = THIS_MODULE, 3363 .open = dasd_open, 3364 .release = dasd_release, 3365 .ioctl = dasd_ioctl, 3366 .compat_ioctl = dasd_ioctl, 3367 .getgeo = dasd_getgeo, 3368 .set_read_only = dasd_set_read_only, 3369 }; 3370 3371 /******************************************************************************* 3372 * end of block device operations 3373 */ 3374 3375 static void 3376 dasd_exit(void) 3377 { 3378 #ifdef CONFIG_PROC_FS 3379 dasd_proc_exit(); 3380 #endif 3381 dasd_eer_exit(); 3382 kmem_cache_destroy(dasd_page_cache); 3383 dasd_page_cache = NULL; 3384 dasd_gendisk_exit(); 3385 dasd_devmap_exit(); 3386 if (dasd_debug_area != NULL) { 3387 debug_unregister(dasd_debug_area); 3388 dasd_debug_area = NULL; 3389 } 3390 dasd_statistics_removeroot(); 3391 } 3392 3393 /* 3394 * SECTION: common functions for ccw_driver use 3395 */ 3396 3397 /* 3398 * Is the device read-only? 3399 * Note that this function does not report the setting of the 3400 * readonly device attribute, but how it is configured in z/VM. 3401 */ 3402 int dasd_device_is_ro(struct dasd_device *device) 3403 { 3404 struct ccw_dev_id dev_id; 3405 struct diag210 diag_data; 3406 int rc; 3407 3408 if (!MACHINE_IS_VM) 3409 return 0; 3410 ccw_device_get_id(device->cdev, &dev_id); 3411 memset(&diag_data, 0, sizeof(diag_data)); 3412 diag_data.vrdcdvno = dev_id.devno; 3413 diag_data.vrdclen = sizeof(diag_data); 3414 rc = diag210(&diag_data); 3415 if (rc == 0 || rc == 2) { 3416 return diag_data.vrdcvfla & 0x80; 3417 } else { 3418 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3419 dev_id.devno, rc); 3420 return 0; 3421 } 3422 } 3423 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3424 3425 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3426 { 3427 struct ccw_device *cdev = data; 3428 int ret; 3429 3430 ret = ccw_device_set_online(cdev); 3431 if (ret) 3432 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3433 dev_name(&cdev->dev), ret); 3434 } 3435 3436 /* 3437 * Initial attempt at a probe function. this can be simplified once 3438 * the other detection code is gone. 3439 */ 3440 int dasd_generic_probe(struct ccw_device *cdev) 3441 { 3442 cdev->handler = &dasd_int_handler; 3443 3444 /* 3445 * Automatically online either all dasd devices (dasd_autodetect) 3446 * or all devices specified with dasd= parameters during 3447 * initial probe. 3448 */ 3449 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3450 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3451 async_schedule(dasd_generic_auto_online, cdev); 3452 return 0; 3453 } 3454 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3455 3456 void dasd_generic_free_discipline(struct dasd_device *device) 3457 { 3458 /* Forget the discipline information. */ 3459 if (device->discipline) { 3460 if (device->discipline->uncheck_device) 3461 device->discipline->uncheck_device(device); 3462 module_put(device->discipline->owner); 3463 device->discipline = NULL; 3464 } 3465 if (device->base_discipline) { 3466 module_put(device->base_discipline->owner); 3467 device->base_discipline = NULL; 3468 } 3469 } 3470 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3471 3472 /* 3473 * This will one day be called from a global not_oper handler. 3474 * It is also used by driver_unregister during module unload. 3475 */ 3476 void dasd_generic_remove(struct ccw_device *cdev) 3477 { 3478 struct dasd_device *device; 3479 struct dasd_block *block; 3480 3481 device = dasd_device_from_cdev(cdev); 3482 if (IS_ERR(device)) 3483 return; 3484 3485 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3486 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3487 /* Already doing offline processing */ 3488 dasd_put_device(device); 3489 return; 3490 } 3491 /* 3492 * This device is removed unconditionally. Set offline 3493 * flag to prevent dasd_open from opening it while it is 3494 * no quite down yet. 3495 */ 3496 dasd_set_target_state(device, DASD_STATE_NEW); 3497 cdev->handler = NULL; 3498 /* dasd_delete_device destroys the device reference. */ 3499 block = device->block; 3500 dasd_delete_device(device); 3501 /* 3502 * life cycle of block is bound to device, so delete it after 3503 * device was safely removed 3504 */ 3505 if (block) 3506 dasd_free_block(block); 3507 } 3508 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3509 3510 /* 3511 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3512 * the device is detected for the first time and is supposed to be used 3513 * or the user has started activation through sysfs. 3514 */ 3515 int dasd_generic_set_online(struct ccw_device *cdev, 3516 struct dasd_discipline *base_discipline) 3517 { 3518 struct dasd_discipline *discipline; 3519 struct dasd_device *device; 3520 int rc; 3521 3522 /* first online clears initial online feature flag */ 3523 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3524 device = dasd_create_device(cdev); 3525 if (IS_ERR(device)) 3526 return PTR_ERR(device); 3527 3528 discipline = base_discipline; 3529 if (device->features & DASD_FEATURE_USEDIAG) { 3530 if (!dasd_diag_discipline_pointer) { 3531 /* Try to load the required module. */ 3532 rc = request_module(DASD_DIAG_MOD); 3533 if (rc) { 3534 pr_warn("%s Setting the DASD online failed " 3535 "because the required module %s " 3536 "could not be loaded (rc=%d)\n", 3537 dev_name(&cdev->dev), DASD_DIAG_MOD, 3538 rc); 3539 dasd_delete_device(device); 3540 return -ENODEV; 3541 } 3542 } 3543 /* Module init could have failed, so check again here after 3544 * request_module(). */ 3545 if (!dasd_diag_discipline_pointer) { 3546 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3547 dev_name(&cdev->dev)); 3548 dasd_delete_device(device); 3549 return -ENODEV; 3550 } 3551 discipline = dasd_diag_discipline_pointer; 3552 } 3553 if (!try_module_get(base_discipline->owner)) { 3554 dasd_delete_device(device); 3555 return -EINVAL; 3556 } 3557 if (!try_module_get(discipline->owner)) { 3558 module_put(base_discipline->owner); 3559 dasd_delete_device(device); 3560 return -EINVAL; 3561 } 3562 device->base_discipline = base_discipline; 3563 device->discipline = discipline; 3564 3565 /* check_device will allocate block device if necessary */ 3566 rc = discipline->check_device(device); 3567 if (rc) { 3568 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3569 dev_name(&cdev->dev), discipline->name, rc); 3570 module_put(discipline->owner); 3571 module_put(base_discipline->owner); 3572 dasd_delete_device(device); 3573 return rc; 3574 } 3575 3576 dasd_set_target_state(device, DASD_STATE_ONLINE); 3577 if (device->state <= DASD_STATE_KNOWN) { 3578 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3579 dev_name(&cdev->dev)); 3580 rc = -ENODEV; 3581 dasd_set_target_state(device, DASD_STATE_NEW); 3582 if (device->block) 3583 dasd_free_block(device->block); 3584 dasd_delete_device(device); 3585 } else 3586 pr_debug("dasd_generic device %s found\n", 3587 dev_name(&cdev->dev)); 3588 3589 wait_event(dasd_init_waitq, _wait_for_device(device)); 3590 3591 dasd_put_device(device); 3592 return rc; 3593 } 3594 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3595 3596 int dasd_generic_set_offline(struct ccw_device *cdev) 3597 { 3598 struct dasd_device *device; 3599 struct dasd_block *block; 3600 int max_count, open_count, rc; 3601 unsigned long flags; 3602 3603 rc = 0; 3604 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3605 device = dasd_device_from_cdev_locked(cdev); 3606 if (IS_ERR(device)) { 3607 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3608 return PTR_ERR(device); 3609 } 3610 3611 /* 3612 * We must make sure that this device is currently not in use. 3613 * The open_count is increased for every opener, that includes 3614 * the blkdev_get in dasd_scan_partitions. We are only interested 3615 * in the other openers. 3616 */ 3617 if (device->block) { 3618 max_count = device->block->bdev ? 0 : -1; 3619 open_count = atomic_read(&device->block->open_count); 3620 if (open_count > max_count) { 3621 if (open_count > 0) 3622 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3623 dev_name(&cdev->dev), open_count); 3624 else 3625 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3626 dev_name(&cdev->dev)); 3627 rc = -EBUSY; 3628 goto out_err; 3629 } 3630 } 3631 3632 /* 3633 * Test if the offline processing is already running and exit if so. 3634 * If a safe offline is being processed this could only be a normal 3635 * offline that should be able to overtake the safe offline and 3636 * cancel any I/O we do not want to wait for any longer 3637 */ 3638 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3639 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3640 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3641 &device->flags); 3642 } else { 3643 rc = -EBUSY; 3644 goto out_err; 3645 } 3646 } 3647 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3648 3649 /* 3650 * if safe_offline is called set safe_offline_running flag and 3651 * clear safe_offline so that a call to normal offline 3652 * can overrun safe_offline processing 3653 */ 3654 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3655 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3656 /* need to unlock here to wait for outstanding I/O */ 3657 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3658 /* 3659 * If we want to set the device safe offline all IO operations 3660 * should be finished before continuing the offline process 3661 * so sync bdev first and then wait for our queues to become 3662 * empty 3663 */ 3664 if (device->block) { 3665 rc = fsync_bdev(device->block->bdev); 3666 if (rc != 0) 3667 goto interrupted; 3668 } 3669 dasd_schedule_device_bh(device); 3670 rc = wait_event_interruptible(shutdown_waitq, 3671 _wait_for_empty_queues(device)); 3672 if (rc != 0) 3673 goto interrupted; 3674 3675 /* 3676 * check if a normal offline process overtook the offline 3677 * processing in this case simply do nothing beside returning 3678 * that we got interrupted 3679 * otherwise mark safe offline as not running any longer and 3680 * continue with normal offline 3681 */ 3682 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3683 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3684 rc = -ERESTARTSYS; 3685 goto out_err; 3686 } 3687 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3688 } 3689 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3690 3691 dasd_set_target_state(device, DASD_STATE_NEW); 3692 /* dasd_delete_device destroys the device reference. */ 3693 block = device->block; 3694 dasd_delete_device(device); 3695 /* 3696 * life cycle of block is bound to device, so delete it after 3697 * device was safely removed 3698 */ 3699 if (block) 3700 dasd_free_block(block); 3701 3702 return 0; 3703 3704 interrupted: 3705 /* interrupted by signal */ 3706 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3707 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3708 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3709 out_err: 3710 dasd_put_device(device); 3711 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3712 return rc; 3713 } 3714 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3715 3716 int dasd_generic_last_path_gone(struct dasd_device *device) 3717 { 3718 struct dasd_ccw_req *cqr; 3719 3720 dev_warn(&device->cdev->dev, "No operational channel path is left " 3721 "for the device\n"); 3722 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3723 /* First of all call extended error reporting. */ 3724 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3725 3726 if (device->state < DASD_STATE_BASIC) 3727 return 0; 3728 /* Device is active. We want to keep it. */ 3729 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3730 if ((cqr->status == DASD_CQR_IN_IO) || 3731 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3732 cqr->status = DASD_CQR_QUEUED; 3733 cqr->retries++; 3734 } 3735 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3736 dasd_device_clear_timer(device); 3737 dasd_schedule_device_bh(device); 3738 return 1; 3739 } 3740 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3741 3742 int dasd_generic_path_operational(struct dasd_device *device) 3743 { 3744 dev_info(&device->cdev->dev, "A channel path to the device has become " 3745 "operational\n"); 3746 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3747 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3748 dasd_schedule_device_bh(device); 3749 if (device->block) { 3750 dasd_schedule_block_bh(device->block); 3751 if (device->block->request_queue) 3752 blk_mq_run_hw_queues(device->block->request_queue, 3753 true); 3754 } 3755 3756 if (!device->stopped) 3757 wake_up(&generic_waitq); 3758 3759 return 1; 3760 } 3761 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3762 3763 int dasd_generic_notify(struct ccw_device *cdev, int event) 3764 { 3765 struct dasd_device *device; 3766 int ret; 3767 3768 device = dasd_device_from_cdev_locked(cdev); 3769 if (IS_ERR(device)) 3770 return 0; 3771 ret = 0; 3772 switch (event) { 3773 case CIO_GONE: 3774 case CIO_BOXED: 3775 case CIO_NO_PATH: 3776 dasd_path_no_path(device); 3777 ret = dasd_generic_last_path_gone(device); 3778 break; 3779 case CIO_OPER: 3780 ret = 1; 3781 if (dasd_path_get_opm(device)) 3782 ret = dasd_generic_path_operational(device); 3783 break; 3784 } 3785 dasd_put_device(device); 3786 return ret; 3787 } 3788 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3789 3790 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3791 { 3792 struct dasd_device *device; 3793 int chp, oldopm, hpfpm, ifccpm; 3794 3795 device = dasd_device_from_cdev_locked(cdev); 3796 if (IS_ERR(device)) 3797 return; 3798 3799 oldopm = dasd_path_get_opm(device); 3800 for (chp = 0; chp < 8; chp++) { 3801 if (path_event[chp] & PE_PATH_GONE) { 3802 dasd_path_notoper(device, chp); 3803 } 3804 if (path_event[chp] & PE_PATH_AVAILABLE) { 3805 dasd_path_available(device, chp); 3806 dasd_schedule_device_bh(device); 3807 } 3808 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3809 if (!dasd_path_is_operational(device, chp) && 3810 !dasd_path_need_verify(device, chp)) { 3811 /* 3812 * we can not establish a pathgroup on an 3813 * unavailable path, so trigger a path 3814 * verification first 3815 */ 3816 dasd_path_available(device, chp); 3817 dasd_schedule_device_bh(device); 3818 } 3819 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3820 "Pathgroup re-established\n"); 3821 if (device->discipline->kick_validate) 3822 device->discipline->kick_validate(device); 3823 } 3824 if (path_event[chp] & PE_PATH_FCES_EVENT) { 3825 dasd_path_fcsec_update(device, chp); 3826 dasd_schedule_device_bh(device); 3827 } 3828 } 3829 hpfpm = dasd_path_get_hpfpm(device); 3830 ifccpm = dasd_path_get_ifccpm(device); 3831 if (!dasd_path_get_opm(device) && hpfpm) { 3832 /* 3833 * device has no operational paths but at least one path is 3834 * disabled due to HPF errors 3835 * disable HPF at all and use the path(s) again 3836 */ 3837 if (device->discipline->disable_hpf) 3838 device->discipline->disable_hpf(device); 3839 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3840 dasd_path_set_tbvpm(device, hpfpm); 3841 dasd_schedule_device_bh(device); 3842 dasd_schedule_requeue(device); 3843 } else if (!dasd_path_get_opm(device) && ifccpm) { 3844 /* 3845 * device has no operational paths but at least one path is 3846 * disabled due to IFCC errors 3847 * trigger path verification on paths with IFCC errors 3848 */ 3849 dasd_path_set_tbvpm(device, ifccpm); 3850 dasd_schedule_device_bh(device); 3851 } 3852 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3853 dev_warn(&device->cdev->dev, 3854 "No verified channel paths remain for the device\n"); 3855 DBF_DEV_EVENT(DBF_WARNING, device, 3856 "%s", "last verified path gone"); 3857 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3858 dasd_device_set_stop_bits(device, 3859 DASD_STOPPED_DC_WAIT); 3860 } 3861 dasd_put_device(device); 3862 } 3863 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3864 3865 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3866 { 3867 if (!dasd_path_get_opm(device) && lpm) { 3868 dasd_path_set_opm(device, lpm); 3869 dasd_generic_path_operational(device); 3870 } else 3871 dasd_path_add_opm(device, lpm); 3872 return 0; 3873 } 3874 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3875 3876 void dasd_generic_space_exhaust(struct dasd_device *device, 3877 struct dasd_ccw_req *cqr) 3878 { 3879 dasd_eer_write(device, NULL, DASD_EER_NOSPC); 3880 3881 if (device->state < DASD_STATE_BASIC) 3882 return; 3883 3884 if (cqr->status == DASD_CQR_IN_IO || 3885 cqr->status == DASD_CQR_CLEAR_PENDING) { 3886 cqr->status = DASD_CQR_QUEUED; 3887 cqr->retries++; 3888 } 3889 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3890 dasd_device_clear_timer(device); 3891 dasd_schedule_device_bh(device); 3892 } 3893 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3894 3895 void dasd_generic_space_avail(struct dasd_device *device) 3896 { 3897 dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3898 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3899 3900 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3901 dasd_schedule_device_bh(device); 3902 3903 if (device->block) { 3904 dasd_schedule_block_bh(device->block); 3905 if (device->block->request_queue) 3906 blk_mq_run_hw_queues(device->block->request_queue, true); 3907 } 3908 if (!device->stopped) 3909 wake_up(&generic_waitq); 3910 } 3911 EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3912 3913 /* 3914 * clear active requests and requeue them to block layer if possible 3915 */ 3916 static int dasd_generic_requeue_all_requests(struct dasd_device *device) 3917 { 3918 struct list_head requeue_queue; 3919 struct dasd_ccw_req *cqr, *n; 3920 struct dasd_ccw_req *refers; 3921 int rc; 3922 3923 INIT_LIST_HEAD(&requeue_queue); 3924 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3925 rc = 0; 3926 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3927 /* Check status and move request to flush_queue */ 3928 if (cqr->status == DASD_CQR_IN_IO) { 3929 rc = device->discipline->term_IO(cqr); 3930 if (rc) { 3931 /* unable to terminate requeust */ 3932 dev_err(&device->cdev->dev, 3933 "Unable to terminate request %p " 3934 "on suspend\n", cqr); 3935 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3936 dasd_put_device(device); 3937 return rc; 3938 } 3939 } 3940 list_move_tail(&cqr->devlist, &requeue_queue); 3941 } 3942 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3943 3944 list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { 3945 wait_event(dasd_flush_wq, 3946 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3947 3948 /* 3949 * requeue requests to blocklayer will only work 3950 * for block device requests 3951 */ 3952 if (_dasd_requeue_request(cqr)) 3953 continue; 3954 3955 /* remove requests from device and block queue */ 3956 list_del_init(&cqr->devlist); 3957 while (cqr->refers != NULL) { 3958 refers = cqr->refers; 3959 /* remove the request from the block queue */ 3960 list_del(&cqr->blocklist); 3961 /* free the finished erp request */ 3962 dasd_free_erp_request(cqr, cqr->memdev); 3963 cqr = refers; 3964 } 3965 3966 /* 3967 * _dasd_requeue_request already checked for a valid 3968 * blockdevice, no need to check again 3969 * all erp requests (cqr->refers) have a cqr->block 3970 * pointer copy from the original cqr 3971 */ 3972 list_del_init(&cqr->blocklist); 3973 cqr->block->base->discipline->free_cp( 3974 cqr, (struct request *) cqr->callback_data); 3975 } 3976 3977 /* 3978 * if requests remain then they are internal request 3979 * and go back to the device queue 3980 */ 3981 if (!list_empty(&requeue_queue)) { 3982 /* move freeze_queue to start of the ccw_queue */ 3983 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3984 list_splice_tail(&requeue_queue, &device->ccw_queue); 3985 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3986 } 3987 dasd_schedule_device_bh(device); 3988 return rc; 3989 } 3990 3991 static void do_requeue_requests(struct work_struct *work) 3992 { 3993 struct dasd_device *device = container_of(work, struct dasd_device, 3994 requeue_requests); 3995 dasd_generic_requeue_all_requests(device); 3996 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 3997 if (device->block) 3998 dasd_schedule_block_bh(device->block); 3999 dasd_put_device(device); 4000 } 4001 4002 void dasd_schedule_requeue(struct dasd_device *device) 4003 { 4004 dasd_get_device(device); 4005 /* queue call to dasd_reload_device to the kernel event daemon. */ 4006 if (!schedule_work(&device->requeue_requests)) 4007 dasd_put_device(device); 4008 } 4009 EXPORT_SYMBOL(dasd_schedule_requeue); 4010 4011 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 4012 int rdc_buffer_size, 4013 int magic) 4014 { 4015 struct dasd_ccw_req *cqr; 4016 struct ccw1 *ccw; 4017 4018 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, 4019 NULL); 4020 4021 if (IS_ERR(cqr)) { 4022 /* internal error 13 - Allocating the RDC request failed*/ 4023 dev_err(&device->cdev->dev, 4024 "An error occurred in the DASD device driver, " 4025 "reason=%s\n", "13"); 4026 return cqr; 4027 } 4028 4029 ccw = cqr->cpaddr; 4030 ccw->cmd_code = CCW_CMD_RDC; 4031 ccw->cda = (__u32)(addr_t) cqr->data; 4032 ccw->flags = 0; 4033 ccw->count = rdc_buffer_size; 4034 cqr->startdev = device; 4035 cqr->memdev = device; 4036 cqr->expires = 10*HZ; 4037 cqr->retries = 256; 4038 cqr->buildclk = get_tod_clock(); 4039 cqr->status = DASD_CQR_FILLED; 4040 return cqr; 4041 } 4042 4043 4044 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 4045 void *rdc_buffer, int rdc_buffer_size) 4046 { 4047 int ret; 4048 struct dasd_ccw_req *cqr; 4049 4050 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); 4051 if (IS_ERR(cqr)) 4052 return PTR_ERR(cqr); 4053 4054 ret = dasd_sleep_on(cqr); 4055 if (ret == 0) 4056 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); 4057 dasd_sfree_request(cqr, cqr->memdev); 4058 return ret; 4059 } 4060 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4061 4062 /* 4063 * In command mode and transport mode we need to look for sense 4064 * data in different places. The sense data itself is allways 4065 * an array of 32 bytes, so we can unify the sense data access 4066 * for both modes. 4067 */ 4068 char *dasd_get_sense(struct irb *irb) 4069 { 4070 struct tsb *tsb = NULL; 4071 char *sense = NULL; 4072 4073 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4074 if (irb->scsw.tm.tcw) 4075 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 4076 irb->scsw.tm.tcw); 4077 if (tsb && tsb->length == 64 && tsb->flags) 4078 switch (tsb->flags & 0x07) { 4079 case 1: /* tsa_iostat */ 4080 sense = tsb->tsa.iostat.sense; 4081 break; 4082 case 2: /* tsa_ddpc */ 4083 sense = tsb->tsa.ddpc.sense; 4084 break; 4085 default: 4086 /* currently we don't use interrogate data */ 4087 break; 4088 } 4089 } else if (irb->esw.esw0.erw.cons) { 4090 sense = irb->ecw; 4091 } 4092 return sense; 4093 } 4094 EXPORT_SYMBOL_GPL(dasd_get_sense); 4095 4096 void dasd_generic_shutdown(struct ccw_device *cdev) 4097 { 4098 struct dasd_device *device; 4099 4100 device = dasd_device_from_cdev(cdev); 4101 if (IS_ERR(device)) 4102 return; 4103 4104 if (device->block) 4105 dasd_schedule_block_bh(device->block); 4106 4107 dasd_schedule_device_bh(device); 4108 4109 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4110 } 4111 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4112 4113 static int __init dasd_init(void) 4114 { 4115 int rc; 4116 4117 init_waitqueue_head(&dasd_init_waitq); 4118 init_waitqueue_head(&dasd_flush_wq); 4119 init_waitqueue_head(&generic_waitq); 4120 init_waitqueue_head(&shutdown_waitq); 4121 4122 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4123 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4124 if (dasd_debug_area == NULL) { 4125 rc = -ENOMEM; 4126 goto failed; 4127 } 4128 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4129 debug_set_level(dasd_debug_area, DBF_WARNING); 4130 4131 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4132 4133 dasd_diag_discipline_pointer = NULL; 4134 4135 dasd_statistics_createroot(); 4136 4137 rc = dasd_devmap_init(); 4138 if (rc) 4139 goto failed; 4140 rc = dasd_gendisk_init(); 4141 if (rc) 4142 goto failed; 4143 rc = dasd_parse(); 4144 if (rc) 4145 goto failed; 4146 rc = dasd_eer_init(); 4147 if (rc) 4148 goto failed; 4149 #ifdef CONFIG_PROC_FS 4150 rc = dasd_proc_init(); 4151 if (rc) 4152 goto failed; 4153 #endif 4154 4155 return 0; 4156 failed: 4157 pr_info("The DASD device driver could not be initialized\n"); 4158 dasd_exit(); 4159 return rc; 4160 } 4161 4162 module_init(dasd_init); 4163 module_exit(dasd_exit); 4164