1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #define KMSG_COMPONENT "dasd" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kmod.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/ctype.h> 18 #include <linux/major.h> 19 #include <linux/slab.h> 20 #include <linux/hdreg.h> 21 #include <linux/async.h> 22 #include <linux/mutex.h> 23 #include <linux/debugfs.h> 24 #include <linux/seq_file.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/ccwdev.h> 28 #include <asm/ebcdic.h> 29 #include <asm/idals.h> 30 #include <asm/itcw.h> 31 #include <asm/diag.h> 32 33 /* This is ugly... */ 34 #define PRINTK_HEADER "dasd:" 35 36 #include "dasd_int.h" 37 /* 38 * SECTION: Constant definitions to be used within this file 39 */ 40 #define DASD_CHANQ_MAX_SIZE 4 41 42 #define DASD_DIAG_MOD "dasd_diag_mod" 43 44 static unsigned int queue_depth = 32; 45 static unsigned int nr_hw_queues = 4; 46 47 module_param(queue_depth, uint, 0444); 48 MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices"); 49 50 module_param(nr_hw_queues, uint, 0444); 51 MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices"); 52 53 /* 54 * SECTION: exported variables of dasd.c 55 */ 56 debug_info_t *dasd_debug_area; 57 EXPORT_SYMBOL(dasd_debug_area); 58 static struct dentry *dasd_debugfs_root_entry; 59 struct dasd_discipline *dasd_diag_discipline_pointer; 60 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 61 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 62 63 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 64 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 65 " Copyright IBM Corp. 2000"); 66 MODULE_SUPPORTED_DEVICE("dasd"); 67 MODULE_LICENSE("GPL"); 68 69 /* 70 * SECTION: prototypes for static functions of dasd.c 71 */ 72 static int dasd_alloc_queue(struct dasd_block *); 73 static void dasd_free_queue(struct dasd_block *); 74 static int dasd_flush_block_queue(struct dasd_block *); 75 static void dasd_device_tasklet(unsigned long); 76 static void dasd_block_tasklet(unsigned long); 77 static void do_kick_device(struct work_struct *); 78 static void do_reload_device(struct work_struct *); 79 static void do_requeue_requests(struct work_struct *); 80 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 81 static void dasd_device_timeout(struct timer_list *); 82 static void dasd_block_timeout(struct timer_list *); 83 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 84 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 85 static void dasd_profile_exit(struct dasd_profile *); 86 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 87 static void dasd_hosts_exit(struct dasd_device *); 88 89 /* 90 * SECTION: Operations on the device structure. 91 */ 92 static wait_queue_head_t dasd_init_waitq; 93 static wait_queue_head_t dasd_flush_wq; 94 static wait_queue_head_t generic_waitq; 95 static wait_queue_head_t shutdown_waitq; 96 97 /* 98 * Allocate memory for a new device structure. 99 */ 100 struct dasd_device *dasd_alloc_device(void) 101 { 102 struct dasd_device *device; 103 104 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 105 if (!device) 106 return ERR_PTR(-ENOMEM); 107 108 /* Get two pages for normal block device operations. */ 109 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 110 if (!device->ccw_mem) { 111 kfree(device); 112 return ERR_PTR(-ENOMEM); 113 } 114 /* Get one page for error recovery. */ 115 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 116 if (!device->erp_mem) { 117 free_pages((unsigned long) device->ccw_mem, 1); 118 kfree(device); 119 return ERR_PTR(-ENOMEM); 120 } 121 /* Get two pages for ese format. */ 122 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 123 if (!device->ese_mem) { 124 free_page((unsigned long) device->erp_mem); 125 free_pages((unsigned long) device->ccw_mem, 1); 126 kfree(device); 127 return ERR_PTR(-ENOMEM); 128 } 129 130 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 131 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 132 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 133 spin_lock_init(&device->mem_lock); 134 atomic_set(&device->tasklet_scheduled, 0); 135 tasklet_init(&device->tasklet, dasd_device_tasklet, 136 (unsigned long) device); 137 INIT_LIST_HEAD(&device->ccw_queue); 138 timer_setup(&device->timer, dasd_device_timeout, 0); 139 INIT_WORK(&device->kick_work, do_kick_device); 140 INIT_WORK(&device->reload_device, do_reload_device); 141 INIT_WORK(&device->requeue_requests, do_requeue_requests); 142 device->state = DASD_STATE_NEW; 143 device->target = DASD_STATE_NEW; 144 mutex_init(&device->state_mutex); 145 spin_lock_init(&device->profile.lock); 146 return device; 147 } 148 149 /* 150 * Free memory of a device structure. 151 */ 152 void dasd_free_device(struct dasd_device *device) 153 { 154 kfree(device->private); 155 free_pages((unsigned long) device->ese_mem, 1); 156 free_page((unsigned long) device->erp_mem); 157 free_pages((unsigned long) device->ccw_mem, 1); 158 kfree(device); 159 } 160 161 /* 162 * Allocate memory for a new device structure. 163 */ 164 struct dasd_block *dasd_alloc_block(void) 165 { 166 struct dasd_block *block; 167 168 block = kzalloc(sizeof(*block), GFP_ATOMIC); 169 if (!block) 170 return ERR_PTR(-ENOMEM); 171 /* open_count = 0 means device online but not in use */ 172 atomic_set(&block->open_count, -1); 173 174 atomic_set(&block->tasklet_scheduled, 0); 175 tasklet_init(&block->tasklet, dasd_block_tasklet, 176 (unsigned long) block); 177 INIT_LIST_HEAD(&block->ccw_queue); 178 spin_lock_init(&block->queue_lock); 179 INIT_LIST_HEAD(&block->format_list); 180 spin_lock_init(&block->format_lock); 181 timer_setup(&block->timer, dasd_block_timeout, 0); 182 spin_lock_init(&block->profile.lock); 183 184 return block; 185 } 186 EXPORT_SYMBOL_GPL(dasd_alloc_block); 187 188 /* 189 * Free memory of a device structure. 190 */ 191 void dasd_free_block(struct dasd_block *block) 192 { 193 kfree(block); 194 } 195 EXPORT_SYMBOL_GPL(dasd_free_block); 196 197 /* 198 * Make a new device known to the system. 199 */ 200 static int dasd_state_new_to_known(struct dasd_device *device) 201 { 202 int rc; 203 204 /* 205 * As long as the device is not in state DASD_STATE_NEW we want to 206 * keep the reference count > 0. 207 */ 208 dasd_get_device(device); 209 210 if (device->block) { 211 rc = dasd_alloc_queue(device->block); 212 if (rc) { 213 dasd_put_device(device); 214 return rc; 215 } 216 } 217 device->state = DASD_STATE_KNOWN; 218 return 0; 219 } 220 221 /* 222 * Let the system forget about a device. 223 */ 224 static int dasd_state_known_to_new(struct dasd_device *device) 225 { 226 /* Disable extended error reporting for this device. */ 227 dasd_eer_disable(device); 228 device->state = DASD_STATE_NEW; 229 230 if (device->block) 231 dasd_free_queue(device->block); 232 233 /* Give up reference we took in dasd_state_new_to_known. */ 234 dasd_put_device(device); 235 return 0; 236 } 237 238 static struct dentry *dasd_debugfs_setup(const char *name, 239 struct dentry *base_dentry) 240 { 241 struct dentry *pde; 242 243 if (!base_dentry) 244 return NULL; 245 pde = debugfs_create_dir(name, base_dentry); 246 if (!pde || IS_ERR(pde)) 247 return NULL; 248 return pde; 249 } 250 251 /* 252 * Request the irq line for the device. 253 */ 254 static int dasd_state_known_to_basic(struct dasd_device *device) 255 { 256 struct dasd_block *block = device->block; 257 int rc = 0; 258 259 /* Allocate and register gendisk structure. */ 260 if (block) { 261 rc = dasd_gendisk_alloc(block); 262 if (rc) 263 return rc; 264 block->debugfs_dentry = 265 dasd_debugfs_setup(block->gdp->disk_name, 266 dasd_debugfs_root_entry); 267 dasd_profile_init(&block->profile, block->debugfs_dentry); 268 if (dasd_global_profile_level == DASD_PROFILE_ON) 269 dasd_profile_on(&device->block->profile); 270 } 271 device->debugfs_dentry = 272 dasd_debugfs_setup(dev_name(&device->cdev->dev), 273 dasd_debugfs_root_entry); 274 dasd_profile_init(&device->profile, device->debugfs_dentry); 275 dasd_hosts_init(device->debugfs_dentry, device); 276 277 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 278 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 279 8 * sizeof(long)); 280 debug_register_view(device->debug_area, &debug_sprintf_view); 281 debug_set_level(device->debug_area, DBF_WARNING); 282 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 283 284 device->state = DASD_STATE_BASIC; 285 286 return rc; 287 } 288 289 /* 290 * Release the irq line for the device. Terminate any running i/o. 291 */ 292 static int dasd_state_basic_to_known(struct dasd_device *device) 293 { 294 int rc; 295 296 if (device->discipline->basic_to_known) { 297 rc = device->discipline->basic_to_known(device); 298 if (rc) 299 return rc; 300 } 301 302 if (device->block) { 303 dasd_profile_exit(&device->block->profile); 304 debugfs_remove(device->block->debugfs_dentry); 305 dasd_gendisk_free(device->block); 306 dasd_block_clear_timer(device->block); 307 } 308 rc = dasd_flush_device_queue(device); 309 if (rc) 310 return rc; 311 dasd_device_clear_timer(device); 312 dasd_profile_exit(&device->profile); 313 dasd_hosts_exit(device); 314 debugfs_remove(device->debugfs_dentry); 315 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 316 if (device->debug_area != NULL) { 317 debug_unregister(device->debug_area); 318 device->debug_area = NULL; 319 } 320 device->state = DASD_STATE_KNOWN; 321 return 0; 322 } 323 324 /* 325 * Do the initial analysis. The do_analysis function may return 326 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 327 * until the discipline decides to continue the startup sequence 328 * by calling the function dasd_change_state. The eckd disciplines 329 * uses this to start a ccw that detects the format. The completion 330 * interrupt for this detection ccw uses the kernel event daemon to 331 * trigger the call to dasd_change_state. All this is done in the 332 * discipline code, see dasd_eckd.c. 333 * After the analysis ccw is done (do_analysis returned 0) the block 334 * device is setup. 335 * In case the analysis returns an error, the device setup is stopped 336 * (a fake disk was already added to allow formatting). 337 */ 338 static int dasd_state_basic_to_ready(struct dasd_device *device) 339 { 340 int rc; 341 struct dasd_block *block; 342 struct gendisk *disk; 343 344 rc = 0; 345 block = device->block; 346 /* make disk known with correct capacity */ 347 if (block) { 348 if (block->base->discipline->do_analysis != NULL) 349 rc = block->base->discipline->do_analysis(block); 350 if (rc) { 351 if (rc != -EAGAIN) { 352 device->state = DASD_STATE_UNFMT; 353 disk = device->block->gdp; 354 kobject_uevent(&disk_to_dev(disk)->kobj, 355 KOBJ_CHANGE); 356 goto out; 357 } 358 return rc; 359 } 360 if (device->discipline->setup_blk_queue) 361 device->discipline->setup_blk_queue(block); 362 set_capacity(block->gdp, 363 block->blocks << block->s2b_shift); 364 device->state = DASD_STATE_READY; 365 rc = dasd_scan_partitions(block); 366 if (rc) { 367 device->state = DASD_STATE_BASIC; 368 return rc; 369 } 370 } else { 371 device->state = DASD_STATE_READY; 372 } 373 out: 374 if (device->discipline->basic_to_ready) 375 rc = device->discipline->basic_to_ready(device); 376 return rc; 377 } 378 379 static inline 380 int _wait_for_empty_queues(struct dasd_device *device) 381 { 382 if (device->block) 383 return list_empty(&device->ccw_queue) && 384 list_empty(&device->block->ccw_queue); 385 else 386 return list_empty(&device->ccw_queue); 387 } 388 389 /* 390 * Remove device from block device layer. Destroy dirty buffers. 391 * Forget format information. Check if the target level is basic 392 * and if it is create fake disk for formatting. 393 */ 394 static int dasd_state_ready_to_basic(struct dasd_device *device) 395 { 396 int rc; 397 398 device->state = DASD_STATE_BASIC; 399 if (device->block) { 400 struct dasd_block *block = device->block; 401 rc = dasd_flush_block_queue(block); 402 if (rc) { 403 device->state = DASD_STATE_READY; 404 return rc; 405 } 406 dasd_destroy_partitions(block); 407 block->blocks = 0; 408 block->bp_block = 0; 409 block->s2b_shift = 0; 410 } 411 return 0; 412 } 413 414 /* 415 * Back to basic. 416 */ 417 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 418 { 419 device->state = DASD_STATE_BASIC; 420 return 0; 421 } 422 423 /* 424 * Make the device online and schedule the bottom half to start 425 * the requeueing of requests from the linux request queue to the 426 * ccw queue. 427 */ 428 static int 429 dasd_state_ready_to_online(struct dasd_device * device) 430 { 431 struct gendisk *disk; 432 struct disk_part_iter piter; 433 struct block_device *part; 434 435 device->state = DASD_STATE_ONLINE; 436 if (device->block) { 437 dasd_schedule_block_bh(device->block); 438 if ((device->features & DASD_FEATURE_USERAW)) { 439 disk = device->block->gdp; 440 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); 441 return 0; 442 } 443 disk = device->block->bdev->bd_disk; 444 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 445 while ((part = disk_part_iter_next(&piter))) 446 kobject_uevent(bdev_kobj(part), KOBJ_CHANGE); 447 disk_part_iter_exit(&piter); 448 } 449 return 0; 450 } 451 452 /* 453 * Stop the requeueing of requests again. 454 */ 455 static int dasd_state_online_to_ready(struct dasd_device *device) 456 { 457 int rc; 458 struct gendisk *disk; 459 struct disk_part_iter piter; 460 struct block_device *part; 461 462 if (device->discipline->online_to_ready) { 463 rc = device->discipline->online_to_ready(device); 464 if (rc) 465 return rc; 466 } 467 468 device->state = DASD_STATE_READY; 469 if (device->block && !(device->features & DASD_FEATURE_USERAW)) { 470 disk = device->block->bdev->bd_disk; 471 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 472 while ((part = disk_part_iter_next(&piter))) 473 kobject_uevent(bdev_kobj(part), KOBJ_CHANGE); 474 disk_part_iter_exit(&piter); 475 } 476 return 0; 477 } 478 479 /* 480 * Device startup state changes. 481 */ 482 static int dasd_increase_state(struct dasd_device *device) 483 { 484 int rc; 485 486 rc = 0; 487 if (device->state == DASD_STATE_NEW && 488 device->target >= DASD_STATE_KNOWN) 489 rc = dasd_state_new_to_known(device); 490 491 if (!rc && 492 device->state == DASD_STATE_KNOWN && 493 device->target >= DASD_STATE_BASIC) 494 rc = dasd_state_known_to_basic(device); 495 496 if (!rc && 497 device->state == DASD_STATE_BASIC && 498 device->target >= DASD_STATE_READY) 499 rc = dasd_state_basic_to_ready(device); 500 501 if (!rc && 502 device->state == DASD_STATE_UNFMT && 503 device->target > DASD_STATE_UNFMT) 504 rc = -EPERM; 505 506 if (!rc && 507 device->state == DASD_STATE_READY && 508 device->target >= DASD_STATE_ONLINE) 509 rc = dasd_state_ready_to_online(device); 510 511 return rc; 512 } 513 514 /* 515 * Device shutdown state changes. 516 */ 517 static int dasd_decrease_state(struct dasd_device *device) 518 { 519 int rc; 520 521 rc = 0; 522 if (device->state == DASD_STATE_ONLINE && 523 device->target <= DASD_STATE_READY) 524 rc = dasd_state_online_to_ready(device); 525 526 if (!rc && 527 device->state == DASD_STATE_READY && 528 device->target <= DASD_STATE_BASIC) 529 rc = dasd_state_ready_to_basic(device); 530 531 if (!rc && 532 device->state == DASD_STATE_UNFMT && 533 device->target <= DASD_STATE_BASIC) 534 rc = dasd_state_unfmt_to_basic(device); 535 536 if (!rc && 537 device->state == DASD_STATE_BASIC && 538 device->target <= DASD_STATE_KNOWN) 539 rc = dasd_state_basic_to_known(device); 540 541 if (!rc && 542 device->state == DASD_STATE_KNOWN && 543 device->target <= DASD_STATE_NEW) 544 rc = dasd_state_known_to_new(device); 545 546 return rc; 547 } 548 549 /* 550 * This is the main startup/shutdown routine. 551 */ 552 static void dasd_change_state(struct dasd_device *device) 553 { 554 int rc; 555 556 if (device->state == device->target) 557 /* Already where we want to go today... */ 558 return; 559 if (device->state < device->target) 560 rc = dasd_increase_state(device); 561 else 562 rc = dasd_decrease_state(device); 563 if (rc == -EAGAIN) 564 return; 565 if (rc) 566 device->target = device->state; 567 568 /* let user-space know that the device status changed */ 569 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 570 571 if (device->state == device->target) 572 wake_up(&dasd_init_waitq); 573 } 574 575 /* 576 * Kick starter for devices that did not complete the startup/shutdown 577 * procedure or were sleeping because of a pending state. 578 * dasd_kick_device will schedule a call do do_kick_device to the kernel 579 * event daemon. 580 */ 581 static void do_kick_device(struct work_struct *work) 582 { 583 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 584 mutex_lock(&device->state_mutex); 585 dasd_change_state(device); 586 mutex_unlock(&device->state_mutex); 587 dasd_schedule_device_bh(device); 588 dasd_put_device(device); 589 } 590 591 void dasd_kick_device(struct dasd_device *device) 592 { 593 dasd_get_device(device); 594 /* queue call to dasd_kick_device to the kernel event daemon. */ 595 if (!schedule_work(&device->kick_work)) 596 dasd_put_device(device); 597 } 598 EXPORT_SYMBOL(dasd_kick_device); 599 600 /* 601 * dasd_reload_device will schedule a call do do_reload_device to the kernel 602 * event daemon. 603 */ 604 static void do_reload_device(struct work_struct *work) 605 { 606 struct dasd_device *device = container_of(work, struct dasd_device, 607 reload_device); 608 device->discipline->reload(device); 609 dasd_put_device(device); 610 } 611 612 void dasd_reload_device(struct dasd_device *device) 613 { 614 dasd_get_device(device); 615 /* queue call to dasd_reload_device to the kernel event daemon. */ 616 if (!schedule_work(&device->reload_device)) 617 dasd_put_device(device); 618 } 619 EXPORT_SYMBOL(dasd_reload_device); 620 621 /* 622 * Set the target state for a device and starts the state change. 623 */ 624 void dasd_set_target_state(struct dasd_device *device, int target) 625 { 626 dasd_get_device(device); 627 mutex_lock(&device->state_mutex); 628 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 629 if (dasd_probeonly && target > DASD_STATE_READY) 630 target = DASD_STATE_READY; 631 if (device->target != target) { 632 if (device->state == target) 633 wake_up(&dasd_init_waitq); 634 device->target = target; 635 } 636 if (device->state != device->target) 637 dasd_change_state(device); 638 mutex_unlock(&device->state_mutex); 639 dasd_put_device(device); 640 } 641 EXPORT_SYMBOL(dasd_set_target_state); 642 643 /* 644 * Enable devices with device numbers in [from..to]. 645 */ 646 static inline int _wait_for_device(struct dasd_device *device) 647 { 648 return (device->state == device->target); 649 } 650 651 void dasd_enable_device(struct dasd_device *device) 652 { 653 dasd_set_target_state(device, DASD_STATE_ONLINE); 654 if (device->state <= DASD_STATE_KNOWN) 655 /* No discipline for device found. */ 656 dasd_set_target_state(device, DASD_STATE_NEW); 657 /* Now wait for the devices to come up. */ 658 wait_event(dasd_init_waitq, _wait_for_device(device)); 659 660 dasd_reload_device(device); 661 if (device->discipline->kick_validate) 662 device->discipline->kick_validate(device); 663 } 664 EXPORT_SYMBOL(dasd_enable_device); 665 666 /* 667 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 668 */ 669 670 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 671 672 #ifdef CONFIG_DASD_PROFILE 673 struct dasd_profile dasd_global_profile = { 674 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 675 }; 676 static struct dentry *dasd_debugfs_global_entry; 677 678 /* 679 * Add profiling information for cqr before execution. 680 */ 681 static void dasd_profile_start(struct dasd_block *block, 682 struct dasd_ccw_req *cqr, 683 struct request *req) 684 { 685 struct list_head *l; 686 unsigned int counter; 687 struct dasd_device *device; 688 689 /* count the length of the chanq for statistics */ 690 counter = 0; 691 if (dasd_global_profile_level || block->profile.data) 692 list_for_each(l, &block->ccw_queue) 693 if (++counter >= 31) 694 break; 695 696 spin_lock(&dasd_global_profile.lock); 697 if (dasd_global_profile.data) { 698 dasd_global_profile.data->dasd_io_nr_req[counter]++; 699 if (rq_data_dir(req) == READ) 700 dasd_global_profile.data->dasd_read_nr_req[counter]++; 701 } 702 spin_unlock(&dasd_global_profile.lock); 703 704 spin_lock(&block->profile.lock); 705 if (block->profile.data) { 706 block->profile.data->dasd_io_nr_req[counter]++; 707 if (rq_data_dir(req) == READ) 708 block->profile.data->dasd_read_nr_req[counter]++; 709 } 710 spin_unlock(&block->profile.lock); 711 712 /* 713 * We count the request for the start device, even though it may run on 714 * some other device due to error recovery. This way we make sure that 715 * we count each request only once. 716 */ 717 device = cqr->startdev; 718 if (device->profile.data) { 719 counter = 1; /* request is not yet queued on the start device */ 720 list_for_each(l, &device->ccw_queue) 721 if (++counter >= 31) 722 break; 723 } 724 spin_lock(&device->profile.lock); 725 if (device->profile.data) { 726 device->profile.data->dasd_io_nr_req[counter]++; 727 if (rq_data_dir(req) == READ) 728 device->profile.data->dasd_read_nr_req[counter]++; 729 } 730 spin_unlock(&device->profile.lock); 731 } 732 733 /* 734 * Add profiling information for cqr after execution. 735 */ 736 737 #define dasd_profile_counter(value, index) \ 738 { \ 739 for (index = 0; index < 31 && value >> (2+index); index++) \ 740 ; \ 741 } 742 743 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 744 int is_alias, 745 int is_tpm, 746 int is_read, 747 long sectors, 748 int sectors_ind, 749 int tottime_ind, 750 int tottimeps_ind, 751 int strtime_ind, 752 int irqtime_ind, 753 int irqtimeps_ind, 754 int endtime_ind) 755 { 756 /* in case of an overflow, reset the whole profile */ 757 if (data->dasd_io_reqs == UINT_MAX) { 758 memset(data, 0, sizeof(*data)); 759 ktime_get_real_ts64(&data->starttod); 760 } 761 data->dasd_io_reqs++; 762 data->dasd_io_sects += sectors; 763 if (is_alias) 764 data->dasd_io_alias++; 765 if (is_tpm) 766 data->dasd_io_tpm++; 767 768 data->dasd_io_secs[sectors_ind]++; 769 data->dasd_io_times[tottime_ind]++; 770 data->dasd_io_timps[tottimeps_ind]++; 771 data->dasd_io_time1[strtime_ind]++; 772 data->dasd_io_time2[irqtime_ind]++; 773 data->dasd_io_time2ps[irqtimeps_ind]++; 774 data->dasd_io_time3[endtime_ind]++; 775 776 if (is_read) { 777 data->dasd_read_reqs++; 778 data->dasd_read_sects += sectors; 779 if (is_alias) 780 data->dasd_read_alias++; 781 if (is_tpm) 782 data->dasd_read_tpm++; 783 data->dasd_read_secs[sectors_ind]++; 784 data->dasd_read_times[tottime_ind]++; 785 data->dasd_read_time1[strtime_ind]++; 786 data->dasd_read_time2[irqtime_ind]++; 787 data->dasd_read_time3[endtime_ind]++; 788 } 789 } 790 791 static void dasd_profile_end(struct dasd_block *block, 792 struct dasd_ccw_req *cqr, 793 struct request *req) 794 { 795 unsigned long strtime, irqtime, endtime, tottime; 796 unsigned long tottimeps, sectors; 797 struct dasd_device *device; 798 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 799 int irqtime_ind, irqtimeps_ind, endtime_ind; 800 struct dasd_profile_info *data; 801 802 device = cqr->startdev; 803 if (!(dasd_global_profile_level || 804 block->profile.data || 805 device->profile.data)) 806 return; 807 808 sectors = blk_rq_sectors(req); 809 if (!cqr->buildclk || !cqr->startclk || 810 !cqr->stopclk || !cqr->endclk || 811 !sectors) 812 return; 813 814 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 815 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 816 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 817 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 818 tottimeps = tottime / sectors; 819 820 dasd_profile_counter(sectors, sectors_ind); 821 dasd_profile_counter(tottime, tottime_ind); 822 dasd_profile_counter(tottimeps, tottimeps_ind); 823 dasd_profile_counter(strtime, strtime_ind); 824 dasd_profile_counter(irqtime, irqtime_ind); 825 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 826 dasd_profile_counter(endtime, endtime_ind); 827 828 spin_lock(&dasd_global_profile.lock); 829 if (dasd_global_profile.data) { 830 data = dasd_global_profile.data; 831 data->dasd_sum_times += tottime; 832 data->dasd_sum_time_str += strtime; 833 data->dasd_sum_time_irq += irqtime; 834 data->dasd_sum_time_end += endtime; 835 dasd_profile_end_add_data(dasd_global_profile.data, 836 cqr->startdev != block->base, 837 cqr->cpmode == 1, 838 rq_data_dir(req) == READ, 839 sectors, sectors_ind, tottime_ind, 840 tottimeps_ind, strtime_ind, 841 irqtime_ind, irqtimeps_ind, 842 endtime_ind); 843 } 844 spin_unlock(&dasd_global_profile.lock); 845 846 spin_lock(&block->profile.lock); 847 if (block->profile.data) { 848 data = block->profile.data; 849 data->dasd_sum_times += tottime; 850 data->dasd_sum_time_str += strtime; 851 data->dasd_sum_time_irq += irqtime; 852 data->dasd_sum_time_end += endtime; 853 dasd_profile_end_add_data(block->profile.data, 854 cqr->startdev != block->base, 855 cqr->cpmode == 1, 856 rq_data_dir(req) == READ, 857 sectors, sectors_ind, tottime_ind, 858 tottimeps_ind, strtime_ind, 859 irqtime_ind, irqtimeps_ind, 860 endtime_ind); 861 } 862 spin_unlock(&block->profile.lock); 863 864 spin_lock(&device->profile.lock); 865 if (device->profile.data) { 866 data = device->profile.data; 867 data->dasd_sum_times += tottime; 868 data->dasd_sum_time_str += strtime; 869 data->dasd_sum_time_irq += irqtime; 870 data->dasd_sum_time_end += endtime; 871 dasd_profile_end_add_data(device->profile.data, 872 cqr->startdev != block->base, 873 cqr->cpmode == 1, 874 rq_data_dir(req) == READ, 875 sectors, sectors_ind, tottime_ind, 876 tottimeps_ind, strtime_ind, 877 irqtime_ind, irqtimeps_ind, 878 endtime_ind); 879 } 880 spin_unlock(&device->profile.lock); 881 } 882 883 void dasd_profile_reset(struct dasd_profile *profile) 884 { 885 struct dasd_profile_info *data; 886 887 spin_lock_bh(&profile->lock); 888 data = profile->data; 889 if (!data) { 890 spin_unlock_bh(&profile->lock); 891 return; 892 } 893 memset(data, 0, sizeof(*data)); 894 ktime_get_real_ts64(&data->starttod); 895 spin_unlock_bh(&profile->lock); 896 } 897 898 int dasd_profile_on(struct dasd_profile *profile) 899 { 900 struct dasd_profile_info *data; 901 902 data = kzalloc(sizeof(*data), GFP_KERNEL); 903 if (!data) 904 return -ENOMEM; 905 spin_lock_bh(&profile->lock); 906 if (profile->data) { 907 spin_unlock_bh(&profile->lock); 908 kfree(data); 909 return 0; 910 } 911 ktime_get_real_ts64(&data->starttod); 912 profile->data = data; 913 spin_unlock_bh(&profile->lock); 914 return 0; 915 } 916 917 void dasd_profile_off(struct dasd_profile *profile) 918 { 919 spin_lock_bh(&profile->lock); 920 kfree(profile->data); 921 profile->data = NULL; 922 spin_unlock_bh(&profile->lock); 923 } 924 925 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 926 { 927 char *buffer; 928 929 buffer = vmalloc(user_len + 1); 930 if (buffer == NULL) 931 return ERR_PTR(-ENOMEM); 932 if (copy_from_user(buffer, user_buf, user_len) != 0) { 933 vfree(buffer); 934 return ERR_PTR(-EFAULT); 935 } 936 /* got the string, now strip linefeed. */ 937 if (buffer[user_len - 1] == '\n') 938 buffer[user_len - 1] = 0; 939 else 940 buffer[user_len] = 0; 941 return buffer; 942 } 943 944 static ssize_t dasd_stats_write(struct file *file, 945 const char __user *user_buf, 946 size_t user_len, loff_t *pos) 947 { 948 char *buffer, *str; 949 int rc; 950 struct seq_file *m = (struct seq_file *)file->private_data; 951 struct dasd_profile *prof = m->private; 952 953 if (user_len > 65536) 954 user_len = 65536; 955 buffer = dasd_get_user_string(user_buf, user_len); 956 if (IS_ERR(buffer)) 957 return PTR_ERR(buffer); 958 959 str = skip_spaces(buffer); 960 rc = user_len; 961 if (strncmp(str, "reset", 5) == 0) { 962 dasd_profile_reset(prof); 963 } else if (strncmp(str, "on", 2) == 0) { 964 rc = dasd_profile_on(prof); 965 if (rc) 966 goto out; 967 rc = user_len; 968 if (prof == &dasd_global_profile) { 969 dasd_profile_reset(prof); 970 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 971 } 972 } else if (strncmp(str, "off", 3) == 0) { 973 if (prof == &dasd_global_profile) 974 dasd_global_profile_level = DASD_PROFILE_OFF; 975 dasd_profile_off(prof); 976 } else 977 rc = -EINVAL; 978 out: 979 vfree(buffer); 980 return rc; 981 } 982 983 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 984 { 985 int i; 986 987 for (i = 0; i < 32; i++) 988 seq_printf(m, "%u ", array[i]); 989 seq_putc(m, '\n'); 990 } 991 992 static void dasd_stats_seq_print(struct seq_file *m, 993 struct dasd_profile_info *data) 994 { 995 seq_printf(m, "start_time %lld.%09ld\n", 996 (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 997 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 998 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 999 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 1000 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 1001 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 1002 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 1003 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 1004 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 1005 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 1006 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 1007 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 1008 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 1009 seq_puts(m, "histogram_sectors "); 1010 dasd_stats_array(m, data->dasd_io_secs); 1011 seq_puts(m, "histogram_io_times "); 1012 dasd_stats_array(m, data->dasd_io_times); 1013 seq_puts(m, "histogram_io_times_weighted "); 1014 dasd_stats_array(m, data->dasd_io_timps); 1015 seq_puts(m, "histogram_time_build_to_ssch "); 1016 dasd_stats_array(m, data->dasd_io_time1); 1017 seq_puts(m, "histogram_time_ssch_to_irq "); 1018 dasd_stats_array(m, data->dasd_io_time2); 1019 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 1020 dasd_stats_array(m, data->dasd_io_time2ps); 1021 seq_puts(m, "histogram_time_irq_to_end "); 1022 dasd_stats_array(m, data->dasd_io_time3); 1023 seq_puts(m, "histogram_ccw_queue_length "); 1024 dasd_stats_array(m, data->dasd_io_nr_req); 1025 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 1026 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 1027 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 1028 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 1029 seq_puts(m, "histogram_read_sectors "); 1030 dasd_stats_array(m, data->dasd_read_secs); 1031 seq_puts(m, "histogram_read_times "); 1032 dasd_stats_array(m, data->dasd_read_times); 1033 seq_puts(m, "histogram_read_time_build_to_ssch "); 1034 dasd_stats_array(m, data->dasd_read_time1); 1035 seq_puts(m, "histogram_read_time_ssch_to_irq "); 1036 dasd_stats_array(m, data->dasd_read_time2); 1037 seq_puts(m, "histogram_read_time_irq_to_end "); 1038 dasd_stats_array(m, data->dasd_read_time3); 1039 seq_puts(m, "histogram_read_ccw_queue_length "); 1040 dasd_stats_array(m, data->dasd_read_nr_req); 1041 } 1042 1043 static int dasd_stats_show(struct seq_file *m, void *v) 1044 { 1045 struct dasd_profile *profile; 1046 struct dasd_profile_info *data; 1047 1048 profile = m->private; 1049 spin_lock_bh(&profile->lock); 1050 data = profile->data; 1051 if (!data) { 1052 spin_unlock_bh(&profile->lock); 1053 seq_puts(m, "disabled\n"); 1054 return 0; 1055 } 1056 dasd_stats_seq_print(m, data); 1057 spin_unlock_bh(&profile->lock); 1058 return 0; 1059 } 1060 1061 static int dasd_stats_open(struct inode *inode, struct file *file) 1062 { 1063 struct dasd_profile *profile = inode->i_private; 1064 return single_open(file, dasd_stats_show, profile); 1065 } 1066 1067 static const struct file_operations dasd_stats_raw_fops = { 1068 .owner = THIS_MODULE, 1069 .open = dasd_stats_open, 1070 .read = seq_read, 1071 .llseek = seq_lseek, 1072 .release = single_release, 1073 .write = dasd_stats_write, 1074 }; 1075 1076 static void dasd_profile_init(struct dasd_profile *profile, 1077 struct dentry *base_dentry) 1078 { 1079 umode_t mode; 1080 struct dentry *pde; 1081 1082 if (!base_dentry) 1083 return; 1084 profile->dentry = NULL; 1085 profile->data = NULL; 1086 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1087 pde = debugfs_create_file("statistics", mode, base_dentry, 1088 profile, &dasd_stats_raw_fops); 1089 if (pde && !IS_ERR(pde)) 1090 profile->dentry = pde; 1091 return; 1092 } 1093 1094 static void dasd_profile_exit(struct dasd_profile *profile) 1095 { 1096 dasd_profile_off(profile); 1097 debugfs_remove(profile->dentry); 1098 profile->dentry = NULL; 1099 } 1100 1101 static void dasd_statistics_removeroot(void) 1102 { 1103 dasd_global_profile_level = DASD_PROFILE_OFF; 1104 dasd_profile_exit(&dasd_global_profile); 1105 debugfs_remove(dasd_debugfs_global_entry); 1106 debugfs_remove(dasd_debugfs_root_entry); 1107 } 1108 1109 static void dasd_statistics_createroot(void) 1110 { 1111 struct dentry *pde; 1112 1113 dasd_debugfs_root_entry = NULL; 1114 pde = debugfs_create_dir("dasd", NULL); 1115 if (!pde || IS_ERR(pde)) 1116 goto error; 1117 dasd_debugfs_root_entry = pde; 1118 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1119 if (!pde || IS_ERR(pde)) 1120 goto error; 1121 dasd_debugfs_global_entry = pde; 1122 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1123 return; 1124 1125 error: 1126 DBF_EVENT(DBF_ERR, "%s", 1127 "Creation of the dasd debugfs interface failed"); 1128 dasd_statistics_removeroot(); 1129 return; 1130 } 1131 1132 #else 1133 #define dasd_profile_start(block, cqr, req) do {} while (0) 1134 #define dasd_profile_end(block, cqr, req) do {} while (0) 1135 1136 static void dasd_statistics_createroot(void) 1137 { 1138 return; 1139 } 1140 1141 static void dasd_statistics_removeroot(void) 1142 { 1143 return; 1144 } 1145 1146 int dasd_stats_generic_show(struct seq_file *m, void *v) 1147 { 1148 seq_puts(m, "Statistics are not activated in this kernel\n"); 1149 return 0; 1150 } 1151 1152 static void dasd_profile_init(struct dasd_profile *profile, 1153 struct dentry *base_dentry) 1154 { 1155 return; 1156 } 1157 1158 static void dasd_profile_exit(struct dasd_profile *profile) 1159 { 1160 return; 1161 } 1162 1163 int dasd_profile_on(struct dasd_profile *profile) 1164 { 1165 return 0; 1166 } 1167 1168 #endif /* CONFIG_DASD_PROFILE */ 1169 1170 static int dasd_hosts_show(struct seq_file *m, void *v) 1171 { 1172 struct dasd_device *device; 1173 int rc = -EOPNOTSUPP; 1174 1175 device = m->private; 1176 dasd_get_device(device); 1177 1178 if (device->discipline->hosts_print) 1179 rc = device->discipline->hosts_print(device, m); 1180 1181 dasd_put_device(device); 1182 return rc; 1183 } 1184 1185 DEFINE_SHOW_ATTRIBUTE(dasd_hosts); 1186 1187 static void dasd_hosts_exit(struct dasd_device *device) 1188 { 1189 debugfs_remove(device->hosts_dentry); 1190 device->hosts_dentry = NULL; 1191 } 1192 1193 static void dasd_hosts_init(struct dentry *base_dentry, 1194 struct dasd_device *device) 1195 { 1196 struct dentry *pde; 1197 umode_t mode; 1198 1199 if (!base_dentry) 1200 return; 1201 1202 mode = S_IRUSR | S_IFREG; 1203 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1204 device, &dasd_hosts_fops); 1205 if (pde && !IS_ERR(pde)) 1206 device->hosts_dentry = pde; 1207 } 1208 1209 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, 1210 struct dasd_device *device, 1211 struct dasd_ccw_req *cqr) 1212 { 1213 unsigned long flags; 1214 char *data, *chunk; 1215 int size = 0; 1216 1217 if (cplength > 0) 1218 size += cplength * sizeof(struct ccw1); 1219 if (datasize > 0) 1220 size += datasize; 1221 if (!cqr) 1222 size += (sizeof(*cqr) + 7L) & -8L; 1223 1224 spin_lock_irqsave(&device->mem_lock, flags); 1225 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); 1226 spin_unlock_irqrestore(&device->mem_lock, flags); 1227 if (!chunk) 1228 return ERR_PTR(-ENOMEM); 1229 if (!cqr) { 1230 cqr = (void *) data; 1231 data += (sizeof(*cqr) + 7L) & -8L; 1232 } 1233 memset(cqr, 0, sizeof(*cqr)); 1234 cqr->mem_chunk = chunk; 1235 if (cplength > 0) { 1236 cqr->cpaddr = data; 1237 data += cplength * sizeof(struct ccw1); 1238 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1239 } 1240 if (datasize > 0) { 1241 cqr->data = data; 1242 memset(cqr->data, 0, datasize); 1243 } 1244 cqr->magic = magic; 1245 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1246 dasd_get_device(device); 1247 return cqr; 1248 } 1249 EXPORT_SYMBOL(dasd_smalloc_request); 1250 1251 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1252 int datasize, 1253 struct dasd_device *device) 1254 { 1255 struct dasd_ccw_req *cqr; 1256 unsigned long flags; 1257 int size, cqr_size; 1258 char *data; 1259 1260 cqr_size = (sizeof(*cqr) + 7L) & -8L; 1261 size = cqr_size; 1262 if (cplength > 0) 1263 size += cplength * sizeof(struct ccw1); 1264 if (datasize > 0) 1265 size += datasize; 1266 1267 spin_lock_irqsave(&device->mem_lock, flags); 1268 cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1269 spin_unlock_irqrestore(&device->mem_lock, flags); 1270 if (!cqr) 1271 return ERR_PTR(-ENOMEM); 1272 memset(cqr, 0, sizeof(*cqr)); 1273 data = (char *)cqr + cqr_size; 1274 cqr->cpaddr = NULL; 1275 if (cplength > 0) { 1276 cqr->cpaddr = data; 1277 data += cplength * sizeof(struct ccw1); 1278 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1279 } 1280 cqr->data = NULL; 1281 if (datasize > 0) { 1282 cqr->data = data; 1283 memset(cqr->data, 0, datasize); 1284 } 1285 1286 cqr->magic = magic; 1287 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1288 dasd_get_device(device); 1289 1290 return cqr; 1291 } 1292 EXPORT_SYMBOL(dasd_fmalloc_request); 1293 1294 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1295 { 1296 unsigned long flags; 1297 1298 spin_lock_irqsave(&device->mem_lock, flags); 1299 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); 1300 spin_unlock_irqrestore(&device->mem_lock, flags); 1301 dasd_put_device(device); 1302 } 1303 EXPORT_SYMBOL(dasd_sfree_request); 1304 1305 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1306 { 1307 unsigned long flags; 1308 1309 spin_lock_irqsave(&device->mem_lock, flags); 1310 dasd_free_chunk(&device->ese_chunks, cqr); 1311 spin_unlock_irqrestore(&device->mem_lock, flags); 1312 dasd_put_device(device); 1313 } 1314 EXPORT_SYMBOL(dasd_ffree_request); 1315 1316 /* 1317 * Check discipline magic in cqr. 1318 */ 1319 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1320 { 1321 struct dasd_device *device; 1322 1323 if (cqr == NULL) 1324 return -EINVAL; 1325 device = cqr->startdev; 1326 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1327 DBF_DEV_EVENT(DBF_WARNING, device, 1328 " dasd_ccw_req 0x%08x magic doesn't match" 1329 " discipline 0x%08x", 1330 cqr->magic, 1331 *(unsigned int *) device->discipline->name); 1332 return -EINVAL; 1333 } 1334 return 0; 1335 } 1336 1337 /* 1338 * Terminate the current i/o and set the request to clear_pending. 1339 * Timer keeps device runnig. 1340 * ccw_device_clear can fail if the i/o subsystem 1341 * is in a bad mood. 1342 */ 1343 int dasd_term_IO(struct dasd_ccw_req *cqr) 1344 { 1345 struct dasd_device *device; 1346 int retries, rc; 1347 char errorstring[ERRORLENGTH]; 1348 1349 /* Check the cqr */ 1350 rc = dasd_check_cqr(cqr); 1351 if (rc) 1352 return rc; 1353 retries = 0; 1354 device = (struct dasd_device *) cqr->startdev; 1355 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1356 rc = ccw_device_clear(device->cdev, (long) cqr); 1357 switch (rc) { 1358 case 0: /* termination successful */ 1359 cqr->status = DASD_CQR_CLEAR_PENDING; 1360 cqr->stopclk = get_tod_clock(); 1361 cqr->starttime = 0; 1362 DBF_DEV_EVENT(DBF_DEBUG, device, 1363 "terminate cqr %p successful", 1364 cqr); 1365 break; 1366 case -ENODEV: 1367 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1368 "device gone, retry"); 1369 break; 1370 case -EINVAL: 1371 /* 1372 * device not valid so no I/O could be running 1373 * handle CQR as termination successful 1374 */ 1375 cqr->status = DASD_CQR_CLEARED; 1376 cqr->stopclk = get_tod_clock(); 1377 cqr->starttime = 0; 1378 /* no retries for invalid devices */ 1379 cqr->retries = -1; 1380 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1381 "EINVAL, handle as terminated"); 1382 /* fake rc to success */ 1383 rc = 0; 1384 break; 1385 default: 1386 /* internal error 10 - unknown rc*/ 1387 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1388 dev_err(&device->cdev->dev, "An error occurred in the " 1389 "DASD device driver, reason=%s\n", errorstring); 1390 BUG(); 1391 break; 1392 } 1393 retries++; 1394 } 1395 dasd_schedule_device_bh(device); 1396 return rc; 1397 } 1398 EXPORT_SYMBOL(dasd_term_IO); 1399 1400 /* 1401 * Start the i/o. This start_IO can fail if the channel is really busy. 1402 * In that case set up a timer to start the request later. 1403 */ 1404 int dasd_start_IO(struct dasd_ccw_req *cqr) 1405 { 1406 struct dasd_device *device; 1407 int rc; 1408 char errorstring[ERRORLENGTH]; 1409 1410 /* Check the cqr */ 1411 rc = dasd_check_cqr(cqr); 1412 if (rc) { 1413 cqr->intrc = rc; 1414 return rc; 1415 } 1416 device = (struct dasd_device *) cqr->startdev; 1417 if (((cqr->block && 1418 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1419 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1420 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1421 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1422 "because of stolen lock", cqr); 1423 cqr->status = DASD_CQR_ERROR; 1424 cqr->intrc = -EPERM; 1425 return -EPERM; 1426 } 1427 if (cqr->retries < 0) { 1428 /* internal error 14 - start_IO run out of retries */ 1429 sprintf(errorstring, "14 %p", cqr); 1430 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1431 "device driver, reason=%s\n", errorstring); 1432 cqr->status = DASD_CQR_ERROR; 1433 return -EIO; 1434 } 1435 cqr->startclk = get_tod_clock(); 1436 cqr->starttime = jiffies; 1437 cqr->retries--; 1438 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1439 cqr->lpm &= dasd_path_get_opm(device); 1440 if (!cqr->lpm) 1441 cqr->lpm = dasd_path_get_opm(device); 1442 } 1443 if (cqr->cpmode == 1) { 1444 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1445 (long) cqr, cqr->lpm); 1446 } else { 1447 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1448 (long) cqr, cqr->lpm, 0); 1449 } 1450 switch (rc) { 1451 case 0: 1452 cqr->status = DASD_CQR_IN_IO; 1453 break; 1454 case -EBUSY: 1455 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1456 "start_IO: device busy, retry later"); 1457 break; 1458 case -EACCES: 1459 /* -EACCES indicates that the request used only a subset of the 1460 * available paths and all these paths are gone. If the lpm of 1461 * this request was only a subset of the opm (e.g. the ppm) then 1462 * we just do a retry with all available paths. 1463 * If we already use the full opm, something is amiss, and we 1464 * need a full path verification. 1465 */ 1466 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1467 DBF_DEV_EVENT(DBF_WARNING, device, 1468 "start_IO: selected paths gone (%x)", 1469 cqr->lpm); 1470 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1471 cqr->lpm = dasd_path_get_opm(device); 1472 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1473 "start_IO: selected paths gone," 1474 " retry on all paths"); 1475 } else { 1476 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1477 "start_IO: all paths in opm gone," 1478 " do path verification"); 1479 dasd_generic_last_path_gone(device); 1480 dasd_path_no_path(device); 1481 dasd_path_set_tbvpm(device, 1482 ccw_device_get_path_mask( 1483 device->cdev)); 1484 } 1485 break; 1486 case -ENODEV: 1487 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1488 "start_IO: -ENODEV device gone, retry"); 1489 break; 1490 case -EIO: 1491 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1492 "start_IO: -EIO device gone, retry"); 1493 break; 1494 case -EINVAL: 1495 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1496 "start_IO: -EINVAL device currently " 1497 "not accessible"); 1498 break; 1499 default: 1500 /* internal error 11 - unknown rc */ 1501 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1502 dev_err(&device->cdev->dev, 1503 "An error occurred in the DASD device driver, " 1504 "reason=%s\n", errorstring); 1505 BUG(); 1506 break; 1507 } 1508 cqr->intrc = rc; 1509 return rc; 1510 } 1511 EXPORT_SYMBOL(dasd_start_IO); 1512 1513 /* 1514 * Timeout function for dasd devices. This is used for different purposes 1515 * 1) missing interrupt handler for normal operation 1516 * 2) delayed start of request where start_IO failed with -EBUSY 1517 * 3) timeout for missing state change interrupts 1518 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1519 * DASD_CQR_QUEUED for 2) and 3). 1520 */ 1521 static void dasd_device_timeout(struct timer_list *t) 1522 { 1523 unsigned long flags; 1524 struct dasd_device *device; 1525 1526 device = from_timer(device, t, timer); 1527 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1528 /* re-activate request queue */ 1529 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1530 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1531 dasd_schedule_device_bh(device); 1532 } 1533 1534 /* 1535 * Setup timeout for a device in jiffies. 1536 */ 1537 void dasd_device_set_timer(struct dasd_device *device, int expires) 1538 { 1539 if (expires == 0) 1540 del_timer(&device->timer); 1541 else 1542 mod_timer(&device->timer, jiffies + expires); 1543 } 1544 EXPORT_SYMBOL(dasd_device_set_timer); 1545 1546 /* 1547 * Clear timeout for a device. 1548 */ 1549 void dasd_device_clear_timer(struct dasd_device *device) 1550 { 1551 del_timer(&device->timer); 1552 } 1553 EXPORT_SYMBOL(dasd_device_clear_timer); 1554 1555 static void dasd_handle_killed_request(struct ccw_device *cdev, 1556 unsigned long intparm) 1557 { 1558 struct dasd_ccw_req *cqr; 1559 struct dasd_device *device; 1560 1561 if (!intparm) 1562 return; 1563 cqr = (struct dasd_ccw_req *) intparm; 1564 if (cqr->status != DASD_CQR_IN_IO) { 1565 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1566 "invalid status in handle_killed_request: " 1567 "%02x", cqr->status); 1568 return; 1569 } 1570 1571 device = dasd_device_from_cdev_locked(cdev); 1572 if (IS_ERR(device)) { 1573 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1574 "unable to get device from cdev"); 1575 return; 1576 } 1577 1578 if (!cqr->startdev || 1579 device != cqr->startdev || 1580 strncmp(cqr->startdev->discipline->ebcname, 1581 (char *) &cqr->magic, 4)) { 1582 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1583 "invalid device in request"); 1584 dasd_put_device(device); 1585 return; 1586 } 1587 1588 /* Schedule request to be retried. */ 1589 cqr->status = DASD_CQR_QUEUED; 1590 1591 dasd_device_clear_timer(device); 1592 dasd_schedule_device_bh(device); 1593 dasd_put_device(device); 1594 } 1595 1596 void dasd_generic_handle_state_change(struct dasd_device *device) 1597 { 1598 /* First of all start sense subsystem status request. */ 1599 dasd_eer_snss(device); 1600 1601 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1602 dasd_schedule_device_bh(device); 1603 if (device->block) { 1604 dasd_schedule_block_bh(device->block); 1605 if (device->block->request_queue) 1606 blk_mq_run_hw_queues(device->block->request_queue, 1607 true); 1608 } 1609 } 1610 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1611 1612 static int dasd_check_hpf_error(struct irb *irb) 1613 { 1614 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1615 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1616 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1617 } 1618 1619 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1620 { 1621 struct dasd_device *device = NULL; 1622 u8 *sense = NULL; 1623 1624 if (!block) 1625 return 0; 1626 device = block->base; 1627 if (!device || !device->discipline->is_ese) 1628 return 0; 1629 if (!device->discipline->is_ese(device)) 1630 return 0; 1631 1632 sense = dasd_get_sense(irb); 1633 if (!sense) 1634 return 0; 1635 1636 return !!(sense[1] & SNS1_NO_REC_FOUND) || 1637 !!(sense[1] & SNS1_FILE_PROTECTED) || 1638 scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; 1639 } 1640 1641 static int dasd_ese_oos_cond(u8 *sense) 1642 { 1643 return sense[0] & SNS0_EQUIPMENT_CHECK && 1644 sense[1] & SNS1_PERM_ERR && 1645 sense[1] & SNS1_WRITE_INHIBITED && 1646 sense[25] == 0x01; 1647 } 1648 1649 /* 1650 * Interrupt handler for "normal" ssch-io based dasd devices. 1651 */ 1652 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1653 struct irb *irb) 1654 { 1655 struct dasd_ccw_req *cqr, *next, *fcqr; 1656 struct dasd_device *device; 1657 unsigned long now; 1658 int nrf_suppressed = 0; 1659 int fp_suppressed = 0; 1660 u8 *sense = NULL; 1661 int expires; 1662 1663 cqr = (struct dasd_ccw_req *) intparm; 1664 if (IS_ERR(irb)) { 1665 switch (PTR_ERR(irb)) { 1666 case -EIO: 1667 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1668 device = cqr->startdev; 1669 cqr->status = DASD_CQR_CLEARED; 1670 dasd_device_clear_timer(device); 1671 wake_up(&dasd_flush_wq); 1672 dasd_schedule_device_bh(device); 1673 return; 1674 } 1675 break; 1676 case -ETIMEDOUT: 1677 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1678 "request timed out\n", __func__); 1679 break; 1680 default: 1681 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1682 "unknown error %ld\n", __func__, 1683 PTR_ERR(irb)); 1684 } 1685 dasd_handle_killed_request(cdev, intparm); 1686 return; 1687 } 1688 1689 now = get_tod_clock(); 1690 /* check for conditions that should be handled immediately */ 1691 if (!cqr || 1692 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1693 scsw_cstat(&irb->scsw) == 0)) { 1694 if (cqr) 1695 memcpy(&cqr->irb, irb, sizeof(*irb)); 1696 device = dasd_device_from_cdev_locked(cdev); 1697 if (IS_ERR(device)) 1698 return; 1699 /* ignore unsolicited interrupts for DIAG discipline */ 1700 if (device->discipline == dasd_diag_discipline_pointer) { 1701 dasd_put_device(device); 1702 return; 1703 } 1704 1705 /* 1706 * In some cases 'File Protected' or 'No Record Found' errors 1707 * might be expected and debug log messages for the 1708 * corresponding interrupts shouldn't be written then. 1709 * Check if either of the according suppress bits is set. 1710 */ 1711 sense = dasd_get_sense(irb); 1712 if (sense) { 1713 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1714 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1715 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1716 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1717 1718 /* 1719 * Extent pool probably out-of-space. 1720 * Stop device and check exhaust level. 1721 */ 1722 if (dasd_ese_oos_cond(sense)) { 1723 dasd_generic_space_exhaust(device, cqr); 1724 device->discipline->ext_pool_exhaust(device, cqr); 1725 dasd_put_device(device); 1726 return; 1727 } 1728 } 1729 if (!(fp_suppressed || nrf_suppressed)) 1730 device->discipline->dump_sense_dbf(device, irb, "int"); 1731 1732 if (device->features & DASD_FEATURE_ERPLOG) 1733 device->discipline->dump_sense(device, cqr, irb); 1734 device->discipline->check_for_device_change(device, cqr, irb); 1735 dasd_put_device(device); 1736 } 1737 1738 /* check for for attention message */ 1739 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1740 device = dasd_device_from_cdev_locked(cdev); 1741 if (!IS_ERR(device)) { 1742 device->discipline->check_attention(device, 1743 irb->esw.esw1.lpum); 1744 dasd_put_device(device); 1745 } 1746 } 1747 1748 if (!cqr) 1749 return; 1750 1751 device = (struct dasd_device *) cqr->startdev; 1752 if (!device || 1753 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1754 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1755 "invalid device in request"); 1756 return; 1757 } 1758 1759 if (dasd_ese_needs_format(cqr->block, irb)) { 1760 if (rq_data_dir((struct request *)cqr->callback_data) == READ) { 1761 device->discipline->ese_read(cqr, irb); 1762 cqr->status = DASD_CQR_SUCCESS; 1763 cqr->stopclk = now; 1764 dasd_device_clear_timer(device); 1765 dasd_schedule_device_bh(device); 1766 return; 1767 } 1768 fcqr = device->discipline->ese_format(device, cqr, irb); 1769 if (IS_ERR(fcqr)) { 1770 if (PTR_ERR(fcqr) == -EINVAL) { 1771 cqr->status = DASD_CQR_ERROR; 1772 return; 1773 } 1774 /* 1775 * If we can't format now, let the request go 1776 * one extra round. Maybe we can format later. 1777 */ 1778 cqr->status = DASD_CQR_QUEUED; 1779 dasd_schedule_device_bh(device); 1780 return; 1781 } else { 1782 fcqr->status = DASD_CQR_QUEUED; 1783 cqr->status = DASD_CQR_QUEUED; 1784 list_add(&fcqr->devlist, &device->ccw_queue); 1785 dasd_schedule_device_bh(device); 1786 return; 1787 } 1788 } 1789 1790 /* Check for clear pending */ 1791 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1792 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1793 cqr->status = DASD_CQR_CLEARED; 1794 dasd_device_clear_timer(device); 1795 wake_up(&dasd_flush_wq); 1796 dasd_schedule_device_bh(device); 1797 return; 1798 } 1799 1800 /* check status - the request might have been killed by dyn detach */ 1801 if (cqr->status != DASD_CQR_IN_IO) { 1802 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1803 "status %02x", dev_name(&cdev->dev), cqr->status); 1804 return; 1805 } 1806 1807 next = NULL; 1808 expires = 0; 1809 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1810 scsw_cstat(&irb->scsw) == 0) { 1811 /* request was completed successfully */ 1812 cqr->status = DASD_CQR_SUCCESS; 1813 cqr->stopclk = now; 1814 /* Start first request on queue if possible -> fast_io. */ 1815 if (cqr->devlist.next != &device->ccw_queue) { 1816 next = list_entry(cqr->devlist.next, 1817 struct dasd_ccw_req, devlist); 1818 } 1819 } else { /* error */ 1820 /* check for HPF error 1821 * call discipline function to requeue all requests 1822 * and disable HPF accordingly 1823 */ 1824 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1825 device->discipline->handle_hpf_error) 1826 device->discipline->handle_hpf_error(device, irb); 1827 /* 1828 * If we don't want complex ERP for this request, then just 1829 * reset this and retry it in the fastpath 1830 */ 1831 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1832 cqr->retries > 0) { 1833 if (cqr->lpm == dasd_path_get_opm(device)) 1834 DBF_DEV_EVENT(DBF_DEBUG, device, 1835 "default ERP in fastpath " 1836 "(%i retries left)", 1837 cqr->retries); 1838 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1839 cqr->lpm = dasd_path_get_opm(device); 1840 cqr->status = DASD_CQR_QUEUED; 1841 next = cqr; 1842 } else 1843 cqr->status = DASD_CQR_ERROR; 1844 } 1845 if (next && (next->status == DASD_CQR_QUEUED) && 1846 (!device->stopped)) { 1847 if (device->discipline->start_IO(next) == 0) 1848 expires = next->expires; 1849 } 1850 if (expires != 0) 1851 dasd_device_set_timer(device, expires); 1852 else 1853 dasd_device_clear_timer(device); 1854 dasd_schedule_device_bh(device); 1855 } 1856 EXPORT_SYMBOL(dasd_int_handler); 1857 1858 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1859 { 1860 struct dasd_device *device; 1861 1862 device = dasd_device_from_cdev_locked(cdev); 1863 1864 if (IS_ERR(device)) 1865 goto out; 1866 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1867 device->state != device->target || 1868 !device->discipline->check_for_device_change){ 1869 dasd_put_device(device); 1870 goto out; 1871 } 1872 if (device->discipline->dump_sense_dbf) 1873 device->discipline->dump_sense_dbf(device, irb, "uc"); 1874 device->discipline->check_for_device_change(device, NULL, irb); 1875 dasd_put_device(device); 1876 out: 1877 return UC_TODO_RETRY; 1878 } 1879 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1880 1881 /* 1882 * If we have an error on a dasd_block layer request then we cancel 1883 * and return all further requests from the same dasd_block as well. 1884 */ 1885 static void __dasd_device_recovery(struct dasd_device *device, 1886 struct dasd_ccw_req *ref_cqr) 1887 { 1888 struct list_head *l, *n; 1889 struct dasd_ccw_req *cqr; 1890 1891 /* 1892 * only requeue request that came from the dasd_block layer 1893 */ 1894 if (!ref_cqr->block) 1895 return; 1896 1897 list_for_each_safe(l, n, &device->ccw_queue) { 1898 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1899 if (cqr->status == DASD_CQR_QUEUED && 1900 ref_cqr->block == cqr->block) { 1901 cqr->status = DASD_CQR_CLEARED; 1902 } 1903 } 1904 }; 1905 1906 /* 1907 * Remove those ccw requests from the queue that need to be returned 1908 * to the upper layer. 1909 */ 1910 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1911 struct list_head *final_queue) 1912 { 1913 struct list_head *l, *n; 1914 struct dasd_ccw_req *cqr; 1915 1916 /* Process request with final status. */ 1917 list_for_each_safe(l, n, &device->ccw_queue) { 1918 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1919 1920 /* Skip any non-final request. */ 1921 if (cqr->status == DASD_CQR_QUEUED || 1922 cqr->status == DASD_CQR_IN_IO || 1923 cqr->status == DASD_CQR_CLEAR_PENDING) 1924 continue; 1925 if (cqr->status == DASD_CQR_ERROR) { 1926 __dasd_device_recovery(device, cqr); 1927 } 1928 /* Rechain finished requests to final queue */ 1929 list_move_tail(&cqr->devlist, final_queue); 1930 } 1931 } 1932 1933 static void __dasd_process_cqr(struct dasd_device *device, 1934 struct dasd_ccw_req *cqr) 1935 { 1936 char errorstring[ERRORLENGTH]; 1937 1938 switch (cqr->status) { 1939 case DASD_CQR_SUCCESS: 1940 cqr->status = DASD_CQR_DONE; 1941 break; 1942 case DASD_CQR_ERROR: 1943 cqr->status = DASD_CQR_NEED_ERP; 1944 break; 1945 case DASD_CQR_CLEARED: 1946 cqr->status = DASD_CQR_TERMINATED; 1947 break; 1948 default: 1949 /* internal error 12 - wrong cqr status*/ 1950 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1951 dev_err(&device->cdev->dev, 1952 "An error occurred in the DASD device driver, " 1953 "reason=%s\n", errorstring); 1954 BUG(); 1955 } 1956 if (cqr->callback) 1957 cqr->callback(cqr, cqr->callback_data); 1958 } 1959 1960 /* 1961 * the cqrs from the final queue are returned to the upper layer 1962 * by setting a dasd_block state and calling the callback function 1963 */ 1964 static void __dasd_device_process_final_queue(struct dasd_device *device, 1965 struct list_head *final_queue) 1966 { 1967 struct list_head *l, *n; 1968 struct dasd_ccw_req *cqr; 1969 struct dasd_block *block; 1970 1971 list_for_each_safe(l, n, final_queue) { 1972 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1973 list_del_init(&cqr->devlist); 1974 block = cqr->block; 1975 if (!block) { 1976 __dasd_process_cqr(device, cqr); 1977 } else { 1978 spin_lock_bh(&block->queue_lock); 1979 __dasd_process_cqr(device, cqr); 1980 spin_unlock_bh(&block->queue_lock); 1981 } 1982 } 1983 } 1984 1985 /* 1986 * Take a look at the first request on the ccw queue and check 1987 * if it reached its expire time. If so, terminate the IO. 1988 */ 1989 static void __dasd_device_check_expire(struct dasd_device *device) 1990 { 1991 struct dasd_ccw_req *cqr; 1992 1993 if (list_empty(&device->ccw_queue)) 1994 return; 1995 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1996 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1997 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1998 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1999 /* 2000 * IO in safe offline processing should not 2001 * run out of retries 2002 */ 2003 cqr->retries++; 2004 } 2005 if (device->discipline->term_IO(cqr) != 0) { 2006 /* Hmpf, try again in 5 sec */ 2007 dev_err(&device->cdev->dev, 2008 "cqr %p timed out (%lus) but cannot be " 2009 "ended, retrying in 5 s\n", 2010 cqr, (cqr->expires/HZ)); 2011 cqr->expires += 5*HZ; 2012 dasd_device_set_timer(device, 5*HZ); 2013 } else { 2014 dev_err(&device->cdev->dev, 2015 "cqr %p timed out (%lus), %i retries " 2016 "remaining\n", cqr, (cqr->expires/HZ), 2017 cqr->retries); 2018 } 2019 } 2020 } 2021 2022 /* 2023 * return 1 when device is not eligible for IO 2024 */ 2025 static int __dasd_device_is_unusable(struct dasd_device *device, 2026 struct dasd_ccw_req *cqr) 2027 { 2028 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC); 2029 2030 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2031 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2032 /* 2033 * dasd is being set offline 2034 * but it is no safe offline where we have to allow I/O 2035 */ 2036 return 1; 2037 } 2038 if (device->stopped) { 2039 if (device->stopped & mask) { 2040 /* stopped and CQR will not change that. */ 2041 return 1; 2042 } 2043 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2044 /* CQR is not able to change device to 2045 * operational. */ 2046 return 1; 2047 } 2048 /* CQR required to get device operational. */ 2049 } 2050 return 0; 2051 } 2052 2053 /* 2054 * Take a look at the first request on the ccw queue and check 2055 * if it needs to be started. 2056 */ 2057 static void __dasd_device_start_head(struct dasd_device *device) 2058 { 2059 struct dasd_ccw_req *cqr; 2060 int rc; 2061 2062 if (list_empty(&device->ccw_queue)) 2063 return; 2064 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2065 if (cqr->status != DASD_CQR_QUEUED) 2066 return; 2067 /* if device is not usable return request to upper layer */ 2068 if (__dasd_device_is_unusable(device, cqr)) { 2069 cqr->intrc = -EAGAIN; 2070 cqr->status = DASD_CQR_CLEARED; 2071 dasd_schedule_device_bh(device); 2072 return; 2073 } 2074 2075 rc = device->discipline->start_IO(cqr); 2076 if (rc == 0) 2077 dasd_device_set_timer(device, cqr->expires); 2078 else if (rc == -EACCES) { 2079 dasd_schedule_device_bh(device); 2080 } else 2081 /* Hmpf, try again in 1/2 sec */ 2082 dasd_device_set_timer(device, 50); 2083 } 2084 2085 static void __dasd_device_check_path_events(struct dasd_device *device) 2086 { 2087 __u8 tbvpm, fcsecpm; 2088 int rc; 2089 2090 tbvpm = dasd_path_get_tbvpm(device); 2091 fcsecpm = dasd_path_get_fcsecpm(device); 2092 2093 if (!tbvpm && !fcsecpm) 2094 return; 2095 2096 if (device->stopped & ~(DASD_STOPPED_DC_WAIT)) 2097 return; 2098 rc = device->discipline->pe_handler(device, tbvpm, fcsecpm); 2099 if (rc) { 2100 dasd_device_set_timer(device, 50); 2101 } else { 2102 dasd_path_clear_all_verify(device); 2103 dasd_path_clear_all_fcsec(device); 2104 } 2105 }; 2106 2107 /* 2108 * Go through all request on the dasd_device request queue, 2109 * terminate them on the cdev if necessary, and return them to the 2110 * submitting layer via callback. 2111 * Note: 2112 * Make sure that all 'submitting layers' still exist when 2113 * this function is called!. In other words, when 'device' is a base 2114 * device then all block layer requests must have been removed before 2115 * via dasd_flush_block_queue. 2116 */ 2117 int dasd_flush_device_queue(struct dasd_device *device) 2118 { 2119 struct dasd_ccw_req *cqr, *n; 2120 int rc; 2121 struct list_head flush_queue; 2122 2123 INIT_LIST_HEAD(&flush_queue); 2124 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2125 rc = 0; 2126 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2127 /* Check status and move request to flush_queue */ 2128 switch (cqr->status) { 2129 case DASD_CQR_IN_IO: 2130 rc = device->discipline->term_IO(cqr); 2131 if (rc) { 2132 /* unable to terminate requeust */ 2133 dev_err(&device->cdev->dev, 2134 "Flushing the DASD request queue " 2135 "failed for request %p\n", cqr); 2136 /* stop flush processing */ 2137 goto finished; 2138 } 2139 break; 2140 case DASD_CQR_QUEUED: 2141 cqr->stopclk = get_tod_clock(); 2142 cqr->status = DASD_CQR_CLEARED; 2143 break; 2144 default: /* no need to modify the others */ 2145 break; 2146 } 2147 list_move_tail(&cqr->devlist, &flush_queue); 2148 } 2149 finished: 2150 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2151 /* 2152 * After this point all requests must be in state CLEAR_PENDING, 2153 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2154 * one of the others. 2155 */ 2156 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2157 wait_event(dasd_flush_wq, 2158 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2159 /* 2160 * Now set each request back to TERMINATED, DONE or NEED_ERP 2161 * and call the callback function of flushed requests 2162 */ 2163 __dasd_device_process_final_queue(device, &flush_queue); 2164 return rc; 2165 } 2166 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2167 2168 /* 2169 * Acquire the device lock and process queues for the device. 2170 */ 2171 static void dasd_device_tasklet(unsigned long data) 2172 { 2173 struct dasd_device *device = (struct dasd_device *) data; 2174 struct list_head final_queue; 2175 2176 atomic_set (&device->tasklet_scheduled, 0); 2177 INIT_LIST_HEAD(&final_queue); 2178 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2179 /* Check expire time of first request on the ccw queue. */ 2180 __dasd_device_check_expire(device); 2181 /* find final requests on ccw queue */ 2182 __dasd_device_process_ccw_queue(device, &final_queue); 2183 __dasd_device_check_path_events(device); 2184 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2185 /* Now call the callback function of requests with final status */ 2186 __dasd_device_process_final_queue(device, &final_queue); 2187 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2188 /* Now check if the head of the ccw queue needs to be started. */ 2189 __dasd_device_start_head(device); 2190 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2191 if (waitqueue_active(&shutdown_waitq)) 2192 wake_up(&shutdown_waitq); 2193 dasd_put_device(device); 2194 } 2195 2196 /* 2197 * Schedules a call to dasd_tasklet over the device tasklet. 2198 */ 2199 void dasd_schedule_device_bh(struct dasd_device *device) 2200 { 2201 /* Protect against rescheduling. */ 2202 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2203 return; 2204 dasd_get_device(device); 2205 tasklet_hi_schedule(&device->tasklet); 2206 } 2207 EXPORT_SYMBOL(dasd_schedule_device_bh); 2208 2209 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2210 { 2211 device->stopped |= bits; 2212 } 2213 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2214 2215 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2216 { 2217 device->stopped &= ~bits; 2218 if (!device->stopped) 2219 wake_up(&generic_waitq); 2220 } 2221 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2222 2223 /* 2224 * Queue a request to the head of the device ccw_queue. 2225 * Start the I/O if possible. 2226 */ 2227 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2228 { 2229 struct dasd_device *device; 2230 unsigned long flags; 2231 2232 device = cqr->startdev; 2233 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2234 cqr->status = DASD_CQR_QUEUED; 2235 list_add(&cqr->devlist, &device->ccw_queue); 2236 /* let the bh start the request to keep them in order */ 2237 dasd_schedule_device_bh(device); 2238 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2239 } 2240 EXPORT_SYMBOL(dasd_add_request_head); 2241 2242 /* 2243 * Queue a request to the tail of the device ccw_queue. 2244 * Start the I/O if possible. 2245 */ 2246 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2247 { 2248 struct dasd_device *device; 2249 unsigned long flags; 2250 2251 device = cqr->startdev; 2252 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2253 cqr->status = DASD_CQR_QUEUED; 2254 list_add_tail(&cqr->devlist, &device->ccw_queue); 2255 /* let the bh start the request to keep them in order */ 2256 dasd_schedule_device_bh(device); 2257 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2258 } 2259 EXPORT_SYMBOL(dasd_add_request_tail); 2260 2261 /* 2262 * Wakeup helper for the 'sleep_on' functions. 2263 */ 2264 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2265 { 2266 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2267 cqr->callback_data = DASD_SLEEPON_END_TAG; 2268 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2269 wake_up(&generic_waitq); 2270 } 2271 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2272 2273 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2274 { 2275 struct dasd_device *device; 2276 int rc; 2277 2278 device = cqr->startdev; 2279 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2280 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2281 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2282 return rc; 2283 } 2284 2285 /* 2286 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2287 */ 2288 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2289 { 2290 struct dasd_device *device; 2291 dasd_erp_fn_t erp_fn; 2292 2293 if (cqr->status == DASD_CQR_FILLED) 2294 return 0; 2295 device = cqr->startdev; 2296 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2297 if (cqr->status == DASD_CQR_TERMINATED) { 2298 device->discipline->handle_terminated_request(cqr); 2299 return 1; 2300 } 2301 if (cqr->status == DASD_CQR_NEED_ERP) { 2302 erp_fn = device->discipline->erp_action(cqr); 2303 erp_fn(cqr); 2304 return 1; 2305 } 2306 if (cqr->status == DASD_CQR_FAILED) 2307 dasd_log_sense(cqr, &cqr->irb); 2308 if (cqr->refers) { 2309 __dasd_process_erp(device, cqr); 2310 return 1; 2311 } 2312 } 2313 return 0; 2314 } 2315 2316 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2317 { 2318 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2319 if (cqr->refers) /* erp is not done yet */ 2320 return 1; 2321 return ((cqr->status != DASD_CQR_DONE) && 2322 (cqr->status != DASD_CQR_FAILED)); 2323 } else 2324 return (cqr->status == DASD_CQR_FILLED); 2325 } 2326 2327 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2328 { 2329 struct dasd_device *device; 2330 int rc; 2331 struct list_head ccw_queue; 2332 struct dasd_ccw_req *cqr; 2333 2334 INIT_LIST_HEAD(&ccw_queue); 2335 maincqr->status = DASD_CQR_FILLED; 2336 device = maincqr->startdev; 2337 list_add(&maincqr->blocklist, &ccw_queue); 2338 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2339 cqr = list_first_entry(&ccw_queue, 2340 struct dasd_ccw_req, blocklist)) { 2341 2342 if (__dasd_sleep_on_erp(cqr)) 2343 continue; 2344 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2345 continue; 2346 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2347 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2348 cqr->status = DASD_CQR_FAILED; 2349 cqr->intrc = -EPERM; 2350 continue; 2351 } 2352 /* Non-temporary stop condition will trigger fail fast */ 2353 if (device->stopped & ~DASD_STOPPED_PENDING && 2354 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2355 (!dasd_eer_enabled(device))) { 2356 cqr->status = DASD_CQR_FAILED; 2357 cqr->intrc = -ENOLINK; 2358 continue; 2359 } 2360 /* 2361 * Don't try to start requests if device is in 2362 * offline processing, it might wait forever 2363 */ 2364 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2365 cqr->status = DASD_CQR_FAILED; 2366 cqr->intrc = -ENODEV; 2367 continue; 2368 } 2369 /* 2370 * Don't try to start requests if device is stopped 2371 * except path verification requests 2372 */ 2373 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2374 if (interruptible) { 2375 rc = wait_event_interruptible( 2376 generic_waitq, !(device->stopped)); 2377 if (rc == -ERESTARTSYS) { 2378 cqr->status = DASD_CQR_FAILED; 2379 maincqr->intrc = rc; 2380 continue; 2381 } 2382 } else 2383 wait_event(generic_waitq, !(device->stopped)); 2384 } 2385 if (!cqr->callback) 2386 cqr->callback = dasd_wakeup_cb; 2387 2388 cqr->callback_data = DASD_SLEEPON_START_TAG; 2389 dasd_add_request_tail(cqr); 2390 if (interruptible) { 2391 rc = wait_event_interruptible( 2392 generic_waitq, _wait_for_wakeup(cqr)); 2393 if (rc == -ERESTARTSYS) { 2394 dasd_cancel_req(cqr); 2395 /* wait (non-interruptible) for final status */ 2396 wait_event(generic_waitq, 2397 _wait_for_wakeup(cqr)); 2398 cqr->status = DASD_CQR_FAILED; 2399 maincqr->intrc = rc; 2400 continue; 2401 } 2402 } else 2403 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2404 } 2405 2406 maincqr->endclk = get_tod_clock(); 2407 if ((maincqr->status != DASD_CQR_DONE) && 2408 (maincqr->intrc != -ERESTARTSYS)) 2409 dasd_log_sense(maincqr, &maincqr->irb); 2410 if (maincqr->status == DASD_CQR_DONE) 2411 rc = 0; 2412 else if (maincqr->intrc) 2413 rc = maincqr->intrc; 2414 else 2415 rc = -EIO; 2416 return rc; 2417 } 2418 2419 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2420 { 2421 struct dasd_ccw_req *cqr; 2422 2423 list_for_each_entry(cqr, ccw_queue, blocklist) { 2424 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2425 return 0; 2426 } 2427 2428 return 1; 2429 } 2430 2431 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2432 { 2433 struct dasd_device *device; 2434 struct dasd_ccw_req *cqr, *n; 2435 u8 *sense = NULL; 2436 int rc; 2437 2438 retry: 2439 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2440 device = cqr->startdev; 2441 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2442 continue; 2443 2444 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2445 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2446 cqr->status = DASD_CQR_FAILED; 2447 cqr->intrc = -EPERM; 2448 continue; 2449 } 2450 /*Non-temporary stop condition will trigger fail fast*/ 2451 if (device->stopped & ~DASD_STOPPED_PENDING && 2452 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2453 !dasd_eer_enabled(device)) { 2454 cqr->status = DASD_CQR_FAILED; 2455 cqr->intrc = -EAGAIN; 2456 continue; 2457 } 2458 2459 /*Don't try to start requests if device is stopped*/ 2460 if (interruptible) { 2461 rc = wait_event_interruptible( 2462 generic_waitq, !device->stopped); 2463 if (rc == -ERESTARTSYS) { 2464 cqr->status = DASD_CQR_FAILED; 2465 cqr->intrc = rc; 2466 continue; 2467 } 2468 } else 2469 wait_event(generic_waitq, !(device->stopped)); 2470 2471 if (!cqr->callback) 2472 cqr->callback = dasd_wakeup_cb; 2473 cqr->callback_data = DASD_SLEEPON_START_TAG; 2474 dasd_add_request_tail(cqr); 2475 } 2476 2477 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2478 2479 rc = 0; 2480 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2481 /* 2482 * In some cases the 'File Protected' or 'Incorrect Length' 2483 * error might be expected and error recovery would be 2484 * unnecessary in these cases. Check if the according suppress 2485 * bit is set. 2486 */ 2487 sense = dasd_get_sense(&cqr->irb); 2488 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2489 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2490 continue; 2491 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2492 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2493 continue; 2494 2495 /* 2496 * for alias devices simplify error recovery and 2497 * return to upper layer 2498 * do not skip ERP requests 2499 */ 2500 if (cqr->startdev != cqr->basedev && !cqr->refers && 2501 (cqr->status == DASD_CQR_TERMINATED || 2502 cqr->status == DASD_CQR_NEED_ERP)) 2503 return -EAGAIN; 2504 2505 /* normal recovery for basedev IO */ 2506 if (__dasd_sleep_on_erp(cqr)) 2507 /* handle erp first */ 2508 goto retry; 2509 } 2510 2511 return 0; 2512 } 2513 2514 /* 2515 * Queue a request to the tail of the device ccw_queue and wait for 2516 * it's completion. 2517 */ 2518 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2519 { 2520 return _dasd_sleep_on(cqr, 0); 2521 } 2522 EXPORT_SYMBOL(dasd_sleep_on); 2523 2524 /* 2525 * Start requests from a ccw_queue and wait for their completion. 2526 */ 2527 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2528 { 2529 return _dasd_sleep_on_queue(ccw_queue, 0); 2530 } 2531 EXPORT_SYMBOL(dasd_sleep_on_queue); 2532 2533 /* 2534 * Start requests from a ccw_queue and wait interruptible for their completion. 2535 */ 2536 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2537 { 2538 return _dasd_sleep_on_queue(ccw_queue, 1); 2539 } 2540 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2541 2542 /* 2543 * Queue a request to the tail of the device ccw_queue and wait 2544 * interruptible for it's completion. 2545 */ 2546 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2547 { 2548 return _dasd_sleep_on(cqr, 1); 2549 } 2550 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2551 2552 /* 2553 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2554 * for eckd devices) the currently running request has to be terminated 2555 * and be put back to status queued, before the special request is added 2556 * to the head of the queue. Then the special request is waited on normally. 2557 */ 2558 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2559 { 2560 struct dasd_ccw_req *cqr; 2561 int rc; 2562 2563 if (list_empty(&device->ccw_queue)) 2564 return 0; 2565 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2566 rc = device->discipline->term_IO(cqr); 2567 if (!rc) 2568 /* 2569 * CQR terminated because a more important request is pending. 2570 * Undo decreasing of retry counter because this is 2571 * not an error case. 2572 */ 2573 cqr->retries++; 2574 return rc; 2575 } 2576 2577 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2578 { 2579 struct dasd_device *device; 2580 int rc; 2581 2582 device = cqr->startdev; 2583 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2584 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2585 cqr->status = DASD_CQR_FAILED; 2586 cqr->intrc = -EPERM; 2587 return -EIO; 2588 } 2589 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2590 rc = _dasd_term_running_cqr(device); 2591 if (rc) { 2592 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2593 return rc; 2594 } 2595 cqr->callback = dasd_wakeup_cb; 2596 cqr->callback_data = DASD_SLEEPON_START_TAG; 2597 cqr->status = DASD_CQR_QUEUED; 2598 /* 2599 * add new request as second 2600 * first the terminated cqr needs to be finished 2601 */ 2602 list_add(&cqr->devlist, device->ccw_queue.next); 2603 2604 /* let the bh start the request to keep them in order */ 2605 dasd_schedule_device_bh(device); 2606 2607 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2608 2609 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2610 2611 if (cqr->status == DASD_CQR_DONE) 2612 rc = 0; 2613 else if (cqr->intrc) 2614 rc = cqr->intrc; 2615 else 2616 rc = -EIO; 2617 2618 /* kick tasklets */ 2619 dasd_schedule_device_bh(device); 2620 if (device->block) 2621 dasd_schedule_block_bh(device->block); 2622 2623 return rc; 2624 } 2625 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2626 2627 /* 2628 * Cancels a request that was started with dasd_sleep_on_req. 2629 * This is useful to timeout requests. The request will be 2630 * terminated if it is currently in i/o. 2631 * Returns 0 if request termination was successful 2632 * negative error code if termination failed 2633 * Cancellation of a request is an asynchronous operation! The calling 2634 * function has to wait until the request is properly returned via callback. 2635 */ 2636 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) 2637 { 2638 struct dasd_device *device = cqr->startdev; 2639 int rc = 0; 2640 2641 switch (cqr->status) { 2642 case DASD_CQR_QUEUED: 2643 /* request was not started - just set to cleared */ 2644 cqr->status = DASD_CQR_CLEARED; 2645 break; 2646 case DASD_CQR_IN_IO: 2647 /* request in IO - terminate IO and release again */ 2648 rc = device->discipline->term_IO(cqr); 2649 if (rc) { 2650 dev_err(&device->cdev->dev, 2651 "Cancelling request %p failed with rc=%d\n", 2652 cqr, rc); 2653 } else { 2654 cqr->stopclk = get_tod_clock(); 2655 } 2656 break; 2657 default: /* already finished or clear pending - do nothing */ 2658 break; 2659 } 2660 dasd_schedule_device_bh(device); 2661 return rc; 2662 } 2663 2664 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2665 { 2666 struct dasd_device *device = cqr->startdev; 2667 unsigned long flags; 2668 int rc; 2669 2670 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2671 rc = __dasd_cancel_req(cqr); 2672 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2673 return rc; 2674 } 2675 2676 /* 2677 * SECTION: Operations of the dasd_block layer. 2678 */ 2679 2680 /* 2681 * Timeout function for dasd_block. This is used when the block layer 2682 * is waiting for something that may not come reliably, (e.g. a state 2683 * change interrupt) 2684 */ 2685 static void dasd_block_timeout(struct timer_list *t) 2686 { 2687 unsigned long flags; 2688 struct dasd_block *block; 2689 2690 block = from_timer(block, t, timer); 2691 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2692 /* re-activate request queue */ 2693 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2694 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2695 dasd_schedule_block_bh(block); 2696 blk_mq_run_hw_queues(block->request_queue, true); 2697 } 2698 2699 /* 2700 * Setup timeout for a dasd_block in jiffies. 2701 */ 2702 void dasd_block_set_timer(struct dasd_block *block, int expires) 2703 { 2704 if (expires == 0) 2705 del_timer(&block->timer); 2706 else 2707 mod_timer(&block->timer, jiffies + expires); 2708 } 2709 EXPORT_SYMBOL(dasd_block_set_timer); 2710 2711 /* 2712 * Clear timeout for a dasd_block. 2713 */ 2714 void dasd_block_clear_timer(struct dasd_block *block) 2715 { 2716 del_timer(&block->timer); 2717 } 2718 EXPORT_SYMBOL(dasd_block_clear_timer); 2719 2720 /* 2721 * Process finished error recovery ccw. 2722 */ 2723 static void __dasd_process_erp(struct dasd_device *device, 2724 struct dasd_ccw_req *cqr) 2725 { 2726 dasd_erp_fn_t erp_fn; 2727 2728 if (cqr->status == DASD_CQR_DONE) 2729 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2730 else 2731 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2732 erp_fn = device->discipline->erp_postaction(cqr); 2733 erp_fn(cqr); 2734 } 2735 2736 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2737 { 2738 struct request *req; 2739 blk_status_t error = BLK_STS_OK; 2740 unsigned int proc_bytes; 2741 int status; 2742 2743 req = (struct request *) cqr->callback_data; 2744 dasd_profile_end(cqr->block, cqr, req); 2745 2746 proc_bytes = cqr->proc_bytes; 2747 status = cqr->block->base->discipline->free_cp(cqr, req); 2748 if (status < 0) 2749 error = errno_to_blk_status(status); 2750 else if (status == 0) { 2751 switch (cqr->intrc) { 2752 case -EPERM: 2753 error = BLK_STS_NEXUS; 2754 break; 2755 case -ENOLINK: 2756 error = BLK_STS_TRANSPORT; 2757 break; 2758 case -ETIMEDOUT: 2759 error = BLK_STS_TIMEOUT; 2760 break; 2761 default: 2762 error = BLK_STS_IOERR; 2763 break; 2764 } 2765 } 2766 2767 /* 2768 * We need to take care for ETIMEDOUT errors here since the 2769 * complete callback does not get called in this case. 2770 * Take care of all errors here and avoid additional code to 2771 * transfer the error value to the complete callback. 2772 */ 2773 if (error) { 2774 blk_mq_end_request(req, error); 2775 blk_mq_run_hw_queues(req->q, true); 2776 } else { 2777 /* 2778 * Partial completed requests can happen with ESE devices. 2779 * During read we might have gotten a NRF error and have to 2780 * complete a request partially. 2781 */ 2782 if (proc_bytes) { 2783 blk_update_request(req, BLK_STS_OK, 2784 blk_rq_bytes(req) - proc_bytes); 2785 blk_mq_requeue_request(req, true); 2786 } else if (likely(!blk_should_fake_timeout(req->q))) { 2787 blk_mq_complete_request(req); 2788 } 2789 } 2790 } 2791 2792 /* 2793 * Process ccw request queue. 2794 */ 2795 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2796 struct list_head *final_queue) 2797 { 2798 struct list_head *l, *n; 2799 struct dasd_ccw_req *cqr; 2800 dasd_erp_fn_t erp_fn; 2801 unsigned long flags; 2802 struct dasd_device *base = block->base; 2803 2804 restart: 2805 /* Process request with final status. */ 2806 list_for_each_safe(l, n, &block->ccw_queue) { 2807 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2808 if (cqr->status != DASD_CQR_DONE && 2809 cqr->status != DASD_CQR_FAILED && 2810 cqr->status != DASD_CQR_NEED_ERP && 2811 cqr->status != DASD_CQR_TERMINATED) 2812 continue; 2813 2814 if (cqr->status == DASD_CQR_TERMINATED) { 2815 base->discipline->handle_terminated_request(cqr); 2816 goto restart; 2817 } 2818 2819 /* Process requests that may be recovered */ 2820 if (cqr->status == DASD_CQR_NEED_ERP) { 2821 erp_fn = base->discipline->erp_action(cqr); 2822 if (IS_ERR(erp_fn(cqr))) 2823 continue; 2824 goto restart; 2825 } 2826 2827 /* log sense for fatal error */ 2828 if (cqr->status == DASD_CQR_FAILED) { 2829 dasd_log_sense(cqr, &cqr->irb); 2830 } 2831 2832 /* First of all call extended error reporting. */ 2833 if (dasd_eer_enabled(base) && 2834 cqr->status == DASD_CQR_FAILED) { 2835 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2836 2837 /* restart request */ 2838 cqr->status = DASD_CQR_FILLED; 2839 cqr->retries = 255; 2840 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2841 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2842 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2843 flags); 2844 goto restart; 2845 } 2846 2847 /* Process finished ERP request. */ 2848 if (cqr->refers) { 2849 __dasd_process_erp(base, cqr); 2850 goto restart; 2851 } 2852 2853 /* Rechain finished requests to final queue */ 2854 cqr->endclk = get_tod_clock(); 2855 list_move_tail(&cqr->blocklist, final_queue); 2856 } 2857 } 2858 2859 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2860 { 2861 dasd_schedule_block_bh(cqr->block); 2862 } 2863 2864 static void __dasd_block_start_head(struct dasd_block *block) 2865 { 2866 struct dasd_ccw_req *cqr; 2867 2868 if (list_empty(&block->ccw_queue)) 2869 return; 2870 /* We allways begin with the first requests on the queue, as some 2871 * of previously started requests have to be enqueued on a 2872 * dasd_device again for error recovery. 2873 */ 2874 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2875 if (cqr->status != DASD_CQR_FILLED) 2876 continue; 2877 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2878 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2879 cqr->status = DASD_CQR_FAILED; 2880 cqr->intrc = -EPERM; 2881 dasd_schedule_block_bh(block); 2882 continue; 2883 } 2884 /* Non-temporary stop condition will trigger fail fast */ 2885 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2886 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2887 (!dasd_eer_enabled(block->base))) { 2888 cqr->status = DASD_CQR_FAILED; 2889 cqr->intrc = -ENOLINK; 2890 dasd_schedule_block_bh(block); 2891 continue; 2892 } 2893 /* Don't try to start requests if device is stopped */ 2894 if (block->base->stopped) 2895 return; 2896 2897 /* just a fail safe check, should not happen */ 2898 if (!cqr->startdev) 2899 cqr->startdev = block->base; 2900 2901 /* make sure that the requests we submit find their way back */ 2902 cqr->callback = dasd_return_cqr_cb; 2903 2904 dasd_add_request_tail(cqr); 2905 } 2906 } 2907 2908 /* 2909 * Central dasd_block layer routine. Takes requests from the generic 2910 * block layer request queue, creates ccw requests, enqueues them on 2911 * a dasd_device and processes ccw requests that have been returned. 2912 */ 2913 static void dasd_block_tasklet(unsigned long data) 2914 { 2915 struct dasd_block *block = (struct dasd_block *) data; 2916 struct list_head final_queue; 2917 struct list_head *l, *n; 2918 struct dasd_ccw_req *cqr; 2919 struct dasd_queue *dq; 2920 2921 atomic_set(&block->tasklet_scheduled, 0); 2922 INIT_LIST_HEAD(&final_queue); 2923 spin_lock_irq(&block->queue_lock); 2924 /* Finish off requests on ccw queue */ 2925 __dasd_process_block_ccw_queue(block, &final_queue); 2926 spin_unlock_irq(&block->queue_lock); 2927 2928 /* Now call the callback function of requests with final status */ 2929 list_for_each_safe(l, n, &final_queue) { 2930 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2931 dq = cqr->dq; 2932 spin_lock_irq(&dq->lock); 2933 list_del_init(&cqr->blocklist); 2934 __dasd_cleanup_cqr(cqr); 2935 spin_unlock_irq(&dq->lock); 2936 } 2937 2938 spin_lock_irq(&block->queue_lock); 2939 /* Now check if the head of the ccw queue needs to be started. */ 2940 __dasd_block_start_head(block); 2941 spin_unlock_irq(&block->queue_lock); 2942 2943 if (waitqueue_active(&shutdown_waitq)) 2944 wake_up(&shutdown_waitq); 2945 dasd_put_device(block->base); 2946 } 2947 2948 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2949 { 2950 wake_up(&dasd_flush_wq); 2951 } 2952 2953 /* 2954 * Requeue a request back to the block request queue 2955 * only works for block requests 2956 */ 2957 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2958 { 2959 struct dasd_block *block = cqr->block; 2960 struct request *req; 2961 2962 if (!block) 2963 return -EINVAL; 2964 /* 2965 * If the request is an ERP request there is nothing to requeue. 2966 * This will be done with the remaining original request. 2967 */ 2968 if (cqr->refers) 2969 return 0; 2970 spin_lock_irq(&cqr->dq->lock); 2971 req = (struct request *) cqr->callback_data; 2972 blk_mq_requeue_request(req, false); 2973 spin_unlock_irq(&cqr->dq->lock); 2974 2975 return 0; 2976 } 2977 2978 /* 2979 * Go through all request on the dasd_block request queue, cancel them 2980 * on the respective dasd_device, and return them to the generic 2981 * block layer. 2982 */ 2983 static int dasd_flush_block_queue(struct dasd_block *block) 2984 { 2985 struct dasd_ccw_req *cqr, *n; 2986 int rc, i; 2987 struct list_head flush_queue; 2988 unsigned long flags; 2989 2990 INIT_LIST_HEAD(&flush_queue); 2991 spin_lock_bh(&block->queue_lock); 2992 rc = 0; 2993 restart: 2994 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2995 /* if this request currently owned by a dasd_device cancel it */ 2996 if (cqr->status >= DASD_CQR_QUEUED) 2997 rc = dasd_cancel_req(cqr); 2998 if (rc < 0) 2999 break; 3000 /* Rechain request (including erp chain) so it won't be 3001 * touched by the dasd_block_tasklet anymore. 3002 * Replace the callback so we notice when the request 3003 * is returned from the dasd_device layer. 3004 */ 3005 cqr->callback = _dasd_wake_block_flush_cb; 3006 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 3007 list_move_tail(&cqr->blocklist, &flush_queue); 3008 if (i > 1) 3009 /* moved more than one request - need to restart */ 3010 goto restart; 3011 } 3012 spin_unlock_bh(&block->queue_lock); 3013 /* Now call the callback function of flushed requests */ 3014 restart_cb: 3015 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 3016 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3017 /* Process finished ERP request. */ 3018 if (cqr->refers) { 3019 spin_lock_bh(&block->queue_lock); 3020 __dasd_process_erp(block->base, cqr); 3021 spin_unlock_bh(&block->queue_lock); 3022 /* restart list_for_xx loop since dasd_process_erp 3023 * might remove multiple elements */ 3024 goto restart_cb; 3025 } 3026 /* call the callback function */ 3027 spin_lock_irqsave(&cqr->dq->lock, flags); 3028 cqr->endclk = get_tod_clock(); 3029 list_del_init(&cqr->blocklist); 3030 __dasd_cleanup_cqr(cqr); 3031 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3032 } 3033 return rc; 3034 } 3035 3036 /* 3037 * Schedules a call to dasd_tasklet over the device tasklet. 3038 */ 3039 void dasd_schedule_block_bh(struct dasd_block *block) 3040 { 3041 /* Protect against rescheduling. */ 3042 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 3043 return; 3044 /* life cycle of block is bound to it's base device */ 3045 dasd_get_device(block->base); 3046 tasklet_hi_schedule(&block->tasklet); 3047 } 3048 EXPORT_SYMBOL(dasd_schedule_block_bh); 3049 3050 3051 /* 3052 * SECTION: external block device operations 3053 * (request queue handling, open, release, etc.) 3054 */ 3055 3056 /* 3057 * Dasd request queue function. Called from ll_rw_blk.c 3058 */ 3059 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 3060 const struct blk_mq_queue_data *qd) 3061 { 3062 struct dasd_block *block = hctx->queue->queuedata; 3063 struct dasd_queue *dq = hctx->driver_data; 3064 struct request *req = qd->rq; 3065 struct dasd_device *basedev; 3066 struct dasd_ccw_req *cqr; 3067 blk_status_t rc = BLK_STS_OK; 3068 3069 basedev = block->base; 3070 spin_lock_irq(&dq->lock); 3071 if (basedev->state < DASD_STATE_READY) { 3072 DBF_DEV_EVENT(DBF_ERR, basedev, 3073 "device not ready for request %p", req); 3074 rc = BLK_STS_IOERR; 3075 goto out; 3076 } 3077 3078 /* 3079 * if device is stopped do not fetch new requests 3080 * except failfast is active which will let requests fail 3081 * immediately in __dasd_block_start_head() 3082 */ 3083 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3084 DBF_DEV_EVENT(DBF_ERR, basedev, 3085 "device stopped request %p", req); 3086 rc = BLK_STS_RESOURCE; 3087 goto out; 3088 } 3089 3090 if (basedev->features & DASD_FEATURE_READONLY && 3091 rq_data_dir(req) == WRITE) { 3092 DBF_DEV_EVENT(DBF_ERR, basedev, 3093 "Rejecting write request %p", req); 3094 rc = BLK_STS_IOERR; 3095 goto out; 3096 } 3097 3098 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3099 (basedev->features & DASD_FEATURE_FAILFAST || 3100 blk_noretry_request(req))) { 3101 DBF_DEV_EVENT(DBF_ERR, basedev, 3102 "Rejecting failfast request %p", req); 3103 rc = BLK_STS_IOERR; 3104 goto out; 3105 } 3106 3107 cqr = basedev->discipline->build_cp(basedev, block, req); 3108 if (IS_ERR(cqr)) { 3109 if (PTR_ERR(cqr) == -EBUSY || 3110 PTR_ERR(cqr) == -ENOMEM || 3111 PTR_ERR(cqr) == -EAGAIN) { 3112 rc = BLK_STS_RESOURCE; 3113 goto out; 3114 } 3115 DBF_DEV_EVENT(DBF_ERR, basedev, 3116 "CCW creation failed (rc=%ld) on request %p", 3117 PTR_ERR(cqr), req); 3118 rc = BLK_STS_IOERR; 3119 goto out; 3120 } 3121 /* 3122 * Note: callback is set to dasd_return_cqr_cb in 3123 * __dasd_block_start_head to cover erp requests as well 3124 */ 3125 cqr->callback_data = req; 3126 cqr->status = DASD_CQR_FILLED; 3127 cqr->dq = dq; 3128 3129 blk_mq_start_request(req); 3130 spin_lock(&block->queue_lock); 3131 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3132 INIT_LIST_HEAD(&cqr->devlist); 3133 dasd_profile_start(block, cqr, req); 3134 dasd_schedule_block_bh(block); 3135 spin_unlock(&block->queue_lock); 3136 3137 out: 3138 spin_unlock_irq(&dq->lock); 3139 return rc; 3140 } 3141 3142 /* 3143 * Block timeout callback, called from the block layer 3144 * 3145 * Return values: 3146 * BLK_EH_RESET_TIMER if the request should be left running 3147 * BLK_EH_DONE if the request is handled or terminated 3148 * by the driver. 3149 */ 3150 enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved) 3151 { 3152 struct dasd_block *block = req->q->queuedata; 3153 struct dasd_device *device; 3154 struct dasd_ccw_req *cqr; 3155 unsigned long flags; 3156 int rc = 0; 3157 3158 cqr = blk_mq_rq_to_pdu(req); 3159 if (!cqr) 3160 return BLK_EH_DONE; 3161 3162 spin_lock_irqsave(&cqr->dq->lock, flags); 3163 device = cqr->startdev ? cqr->startdev : block->base; 3164 if (!device->blk_timeout) { 3165 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3166 return BLK_EH_RESET_TIMER; 3167 } 3168 DBF_DEV_EVENT(DBF_WARNING, device, 3169 " dasd_times_out cqr %p status %x", 3170 cqr, cqr->status); 3171 3172 spin_lock(&block->queue_lock); 3173 spin_lock(get_ccwdev_lock(device->cdev)); 3174 cqr->retries = -1; 3175 cqr->intrc = -ETIMEDOUT; 3176 if (cqr->status >= DASD_CQR_QUEUED) { 3177 rc = __dasd_cancel_req(cqr); 3178 } else if (cqr->status == DASD_CQR_FILLED || 3179 cqr->status == DASD_CQR_NEED_ERP) { 3180 cqr->status = DASD_CQR_TERMINATED; 3181 } else if (cqr->status == DASD_CQR_IN_ERP) { 3182 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3183 3184 list_for_each_entry_safe(searchcqr, nextcqr, 3185 &block->ccw_queue, blocklist) { 3186 tmpcqr = searchcqr; 3187 while (tmpcqr->refers) 3188 tmpcqr = tmpcqr->refers; 3189 if (tmpcqr != cqr) 3190 continue; 3191 /* searchcqr is an ERP request for cqr */ 3192 searchcqr->retries = -1; 3193 searchcqr->intrc = -ETIMEDOUT; 3194 if (searchcqr->status >= DASD_CQR_QUEUED) { 3195 rc = __dasd_cancel_req(searchcqr); 3196 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3197 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3198 searchcqr->status = DASD_CQR_TERMINATED; 3199 rc = 0; 3200 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3201 /* 3202 * Shouldn't happen; most recent ERP 3203 * request is at the front of queue 3204 */ 3205 continue; 3206 } 3207 break; 3208 } 3209 } 3210 spin_unlock(get_ccwdev_lock(device->cdev)); 3211 dasd_schedule_block_bh(block); 3212 spin_unlock(&block->queue_lock); 3213 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3214 3215 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE; 3216 } 3217 3218 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3219 unsigned int idx) 3220 { 3221 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3222 3223 if (!dq) 3224 return -ENOMEM; 3225 3226 spin_lock_init(&dq->lock); 3227 hctx->driver_data = dq; 3228 3229 return 0; 3230 } 3231 3232 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3233 { 3234 kfree(hctx->driver_data); 3235 hctx->driver_data = NULL; 3236 } 3237 3238 static void dasd_request_done(struct request *req) 3239 { 3240 blk_mq_end_request(req, 0); 3241 blk_mq_run_hw_queues(req->q, true); 3242 } 3243 3244 static struct blk_mq_ops dasd_mq_ops = { 3245 .queue_rq = do_dasd_request, 3246 .complete = dasd_request_done, 3247 .timeout = dasd_times_out, 3248 .init_hctx = dasd_init_hctx, 3249 .exit_hctx = dasd_exit_hctx, 3250 }; 3251 3252 /* 3253 * Allocate and initialize request queue and default I/O scheduler. 3254 */ 3255 static int dasd_alloc_queue(struct dasd_block *block) 3256 { 3257 int rc; 3258 3259 block->tag_set.ops = &dasd_mq_ops; 3260 block->tag_set.cmd_size = sizeof(struct dasd_ccw_req); 3261 block->tag_set.nr_hw_queues = nr_hw_queues; 3262 block->tag_set.queue_depth = queue_depth; 3263 block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; 3264 block->tag_set.numa_node = NUMA_NO_NODE; 3265 3266 rc = blk_mq_alloc_tag_set(&block->tag_set); 3267 if (rc) 3268 return rc; 3269 3270 block->request_queue = blk_mq_init_queue(&block->tag_set); 3271 if (IS_ERR(block->request_queue)) 3272 return PTR_ERR(block->request_queue); 3273 3274 block->request_queue->queuedata = block; 3275 3276 return 0; 3277 } 3278 3279 /* 3280 * Deactivate and free request queue. 3281 */ 3282 static void dasd_free_queue(struct dasd_block *block) 3283 { 3284 if (block->request_queue) { 3285 blk_cleanup_queue(block->request_queue); 3286 blk_mq_free_tag_set(&block->tag_set); 3287 block->request_queue = NULL; 3288 } 3289 } 3290 3291 static int dasd_open(struct block_device *bdev, fmode_t mode) 3292 { 3293 struct dasd_device *base; 3294 int rc; 3295 3296 base = dasd_device_from_gendisk(bdev->bd_disk); 3297 if (!base) 3298 return -ENODEV; 3299 3300 atomic_inc(&base->block->open_count); 3301 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3302 rc = -ENODEV; 3303 goto unlock; 3304 } 3305 3306 if (!try_module_get(base->discipline->owner)) { 3307 rc = -EINVAL; 3308 goto unlock; 3309 } 3310 3311 if (dasd_probeonly) { 3312 dev_info(&base->cdev->dev, 3313 "Accessing the DASD failed because it is in " 3314 "probeonly mode\n"); 3315 rc = -EPERM; 3316 goto out; 3317 } 3318 3319 if (base->state <= DASD_STATE_BASIC) { 3320 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3321 " Cannot open unrecognized device"); 3322 rc = -ENODEV; 3323 goto out; 3324 } 3325 3326 if ((mode & FMODE_WRITE) && 3327 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3328 (base->features & DASD_FEATURE_READONLY))) { 3329 rc = -EROFS; 3330 goto out; 3331 } 3332 3333 dasd_put_device(base); 3334 return 0; 3335 3336 out: 3337 module_put(base->discipline->owner); 3338 unlock: 3339 atomic_dec(&base->block->open_count); 3340 dasd_put_device(base); 3341 return rc; 3342 } 3343 3344 static void dasd_release(struct gendisk *disk, fmode_t mode) 3345 { 3346 struct dasd_device *base = dasd_device_from_gendisk(disk); 3347 if (base) { 3348 atomic_dec(&base->block->open_count); 3349 module_put(base->discipline->owner); 3350 dasd_put_device(base); 3351 } 3352 } 3353 3354 /* 3355 * Return disk geometry. 3356 */ 3357 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3358 { 3359 struct dasd_device *base; 3360 3361 base = dasd_device_from_gendisk(bdev->bd_disk); 3362 if (!base) 3363 return -ENODEV; 3364 3365 if (!base->discipline || 3366 !base->discipline->fill_geometry) { 3367 dasd_put_device(base); 3368 return -EINVAL; 3369 } 3370 base->discipline->fill_geometry(base->block, geo); 3371 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3372 dasd_put_device(base); 3373 return 0; 3374 } 3375 3376 const struct block_device_operations 3377 dasd_device_operations = { 3378 .owner = THIS_MODULE, 3379 .open = dasd_open, 3380 .release = dasd_release, 3381 .ioctl = dasd_ioctl, 3382 .compat_ioctl = dasd_ioctl, 3383 .getgeo = dasd_getgeo, 3384 .set_read_only = dasd_set_read_only, 3385 }; 3386 3387 /******************************************************************************* 3388 * end of block device operations 3389 */ 3390 3391 static void 3392 dasd_exit(void) 3393 { 3394 #ifdef CONFIG_PROC_FS 3395 dasd_proc_exit(); 3396 #endif 3397 dasd_eer_exit(); 3398 kmem_cache_destroy(dasd_page_cache); 3399 dasd_page_cache = NULL; 3400 dasd_gendisk_exit(); 3401 dasd_devmap_exit(); 3402 if (dasd_debug_area != NULL) { 3403 debug_unregister(dasd_debug_area); 3404 dasd_debug_area = NULL; 3405 } 3406 dasd_statistics_removeroot(); 3407 } 3408 3409 /* 3410 * SECTION: common functions for ccw_driver use 3411 */ 3412 3413 /* 3414 * Is the device read-only? 3415 * Note that this function does not report the setting of the 3416 * readonly device attribute, but how it is configured in z/VM. 3417 */ 3418 int dasd_device_is_ro(struct dasd_device *device) 3419 { 3420 struct ccw_dev_id dev_id; 3421 struct diag210 diag_data; 3422 int rc; 3423 3424 if (!MACHINE_IS_VM) 3425 return 0; 3426 ccw_device_get_id(device->cdev, &dev_id); 3427 memset(&diag_data, 0, sizeof(diag_data)); 3428 diag_data.vrdcdvno = dev_id.devno; 3429 diag_data.vrdclen = sizeof(diag_data); 3430 rc = diag210(&diag_data); 3431 if (rc == 0 || rc == 2) { 3432 return diag_data.vrdcvfla & 0x80; 3433 } else { 3434 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3435 dev_id.devno, rc); 3436 return 0; 3437 } 3438 } 3439 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3440 3441 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3442 { 3443 struct ccw_device *cdev = data; 3444 int ret; 3445 3446 ret = ccw_device_set_online(cdev); 3447 if (ret) 3448 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3449 dev_name(&cdev->dev), ret); 3450 } 3451 3452 /* 3453 * Initial attempt at a probe function. this can be simplified once 3454 * the other detection code is gone. 3455 */ 3456 int dasd_generic_probe(struct ccw_device *cdev) 3457 { 3458 int ret; 3459 3460 ret = dasd_add_sysfs_files(cdev); 3461 if (ret) { 3462 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 3463 "dasd_generic_probe: could not add " 3464 "sysfs entries"); 3465 return ret; 3466 } 3467 cdev->handler = &dasd_int_handler; 3468 3469 /* 3470 * Automatically online either all dasd devices (dasd_autodetect) 3471 * or all devices specified with dasd= parameters during 3472 * initial probe. 3473 */ 3474 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3475 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3476 async_schedule(dasd_generic_auto_online, cdev); 3477 return 0; 3478 } 3479 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3480 3481 void dasd_generic_free_discipline(struct dasd_device *device) 3482 { 3483 /* Forget the discipline information. */ 3484 if (device->discipline) { 3485 if (device->discipline->uncheck_device) 3486 device->discipline->uncheck_device(device); 3487 module_put(device->discipline->owner); 3488 device->discipline = NULL; 3489 } 3490 if (device->base_discipline) { 3491 module_put(device->base_discipline->owner); 3492 device->base_discipline = NULL; 3493 } 3494 } 3495 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3496 3497 /* 3498 * This will one day be called from a global not_oper handler. 3499 * It is also used by driver_unregister during module unload. 3500 */ 3501 void dasd_generic_remove(struct ccw_device *cdev) 3502 { 3503 struct dasd_device *device; 3504 struct dasd_block *block; 3505 3506 cdev->handler = NULL; 3507 3508 device = dasd_device_from_cdev(cdev); 3509 if (IS_ERR(device)) { 3510 dasd_remove_sysfs_files(cdev); 3511 return; 3512 } 3513 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3514 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3515 /* Already doing offline processing */ 3516 dasd_put_device(device); 3517 dasd_remove_sysfs_files(cdev); 3518 return; 3519 } 3520 /* 3521 * This device is removed unconditionally. Set offline 3522 * flag to prevent dasd_open from opening it while it is 3523 * no quite down yet. 3524 */ 3525 dasd_set_target_state(device, DASD_STATE_NEW); 3526 /* dasd_delete_device destroys the device reference. */ 3527 block = device->block; 3528 dasd_delete_device(device); 3529 /* 3530 * life cycle of block is bound to device, so delete it after 3531 * device was safely removed 3532 */ 3533 if (block) 3534 dasd_free_block(block); 3535 3536 dasd_remove_sysfs_files(cdev); 3537 } 3538 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3539 3540 /* 3541 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3542 * the device is detected for the first time and is supposed to be used 3543 * or the user has started activation through sysfs. 3544 */ 3545 int dasd_generic_set_online(struct ccw_device *cdev, 3546 struct dasd_discipline *base_discipline) 3547 { 3548 struct dasd_discipline *discipline; 3549 struct dasd_device *device; 3550 int rc; 3551 3552 /* first online clears initial online feature flag */ 3553 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3554 device = dasd_create_device(cdev); 3555 if (IS_ERR(device)) 3556 return PTR_ERR(device); 3557 3558 discipline = base_discipline; 3559 if (device->features & DASD_FEATURE_USEDIAG) { 3560 if (!dasd_diag_discipline_pointer) { 3561 /* Try to load the required module. */ 3562 rc = request_module(DASD_DIAG_MOD); 3563 if (rc) { 3564 pr_warn("%s Setting the DASD online failed " 3565 "because the required module %s " 3566 "could not be loaded (rc=%d)\n", 3567 dev_name(&cdev->dev), DASD_DIAG_MOD, 3568 rc); 3569 dasd_delete_device(device); 3570 return -ENODEV; 3571 } 3572 } 3573 /* Module init could have failed, so check again here after 3574 * request_module(). */ 3575 if (!dasd_diag_discipline_pointer) { 3576 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3577 dev_name(&cdev->dev)); 3578 dasd_delete_device(device); 3579 return -ENODEV; 3580 } 3581 discipline = dasd_diag_discipline_pointer; 3582 } 3583 if (!try_module_get(base_discipline->owner)) { 3584 dasd_delete_device(device); 3585 return -EINVAL; 3586 } 3587 if (!try_module_get(discipline->owner)) { 3588 module_put(base_discipline->owner); 3589 dasd_delete_device(device); 3590 return -EINVAL; 3591 } 3592 device->base_discipline = base_discipline; 3593 device->discipline = discipline; 3594 3595 /* check_device will allocate block device if necessary */ 3596 rc = discipline->check_device(device); 3597 if (rc) { 3598 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3599 dev_name(&cdev->dev), discipline->name, rc); 3600 module_put(discipline->owner); 3601 module_put(base_discipline->owner); 3602 dasd_delete_device(device); 3603 return rc; 3604 } 3605 3606 dasd_set_target_state(device, DASD_STATE_ONLINE); 3607 if (device->state <= DASD_STATE_KNOWN) { 3608 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3609 dev_name(&cdev->dev)); 3610 rc = -ENODEV; 3611 dasd_set_target_state(device, DASD_STATE_NEW); 3612 if (device->block) 3613 dasd_free_block(device->block); 3614 dasd_delete_device(device); 3615 } else 3616 pr_debug("dasd_generic device %s found\n", 3617 dev_name(&cdev->dev)); 3618 3619 wait_event(dasd_init_waitq, _wait_for_device(device)); 3620 3621 dasd_put_device(device); 3622 return rc; 3623 } 3624 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3625 3626 int dasd_generic_set_offline(struct ccw_device *cdev) 3627 { 3628 struct dasd_device *device; 3629 struct dasd_block *block; 3630 int max_count, open_count, rc; 3631 unsigned long flags; 3632 3633 rc = 0; 3634 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3635 device = dasd_device_from_cdev_locked(cdev); 3636 if (IS_ERR(device)) { 3637 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3638 return PTR_ERR(device); 3639 } 3640 3641 /* 3642 * We must make sure that this device is currently not in use. 3643 * The open_count is increased for every opener, that includes 3644 * the blkdev_get in dasd_scan_partitions. We are only interested 3645 * in the other openers. 3646 */ 3647 if (device->block) { 3648 max_count = device->block->bdev ? 0 : -1; 3649 open_count = atomic_read(&device->block->open_count); 3650 if (open_count > max_count) { 3651 if (open_count > 0) 3652 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3653 dev_name(&cdev->dev), open_count); 3654 else 3655 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3656 dev_name(&cdev->dev)); 3657 rc = -EBUSY; 3658 goto out_err; 3659 } 3660 } 3661 3662 /* 3663 * Test if the offline processing is already running and exit if so. 3664 * If a safe offline is being processed this could only be a normal 3665 * offline that should be able to overtake the safe offline and 3666 * cancel any I/O we do not want to wait for any longer 3667 */ 3668 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3669 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3670 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3671 &device->flags); 3672 } else { 3673 rc = -EBUSY; 3674 goto out_err; 3675 } 3676 } 3677 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3678 3679 /* 3680 * if safe_offline is called set safe_offline_running flag and 3681 * clear safe_offline so that a call to normal offline 3682 * can overrun safe_offline processing 3683 */ 3684 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3685 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3686 /* need to unlock here to wait for outstanding I/O */ 3687 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3688 /* 3689 * If we want to set the device safe offline all IO operations 3690 * should be finished before continuing the offline process 3691 * so sync bdev first and then wait for our queues to become 3692 * empty 3693 */ 3694 if (device->block) { 3695 rc = fsync_bdev(device->block->bdev); 3696 if (rc != 0) 3697 goto interrupted; 3698 } 3699 dasd_schedule_device_bh(device); 3700 rc = wait_event_interruptible(shutdown_waitq, 3701 _wait_for_empty_queues(device)); 3702 if (rc != 0) 3703 goto interrupted; 3704 3705 /* 3706 * check if a normal offline process overtook the offline 3707 * processing in this case simply do nothing beside returning 3708 * that we got interrupted 3709 * otherwise mark safe offline as not running any longer and 3710 * continue with normal offline 3711 */ 3712 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3713 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3714 rc = -ERESTARTSYS; 3715 goto out_err; 3716 } 3717 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3718 } 3719 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3720 3721 dasd_set_target_state(device, DASD_STATE_NEW); 3722 /* dasd_delete_device destroys the device reference. */ 3723 block = device->block; 3724 dasd_delete_device(device); 3725 /* 3726 * life cycle of block is bound to device, so delete it after 3727 * device was safely removed 3728 */ 3729 if (block) 3730 dasd_free_block(block); 3731 3732 return 0; 3733 3734 interrupted: 3735 /* interrupted by signal */ 3736 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3737 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3738 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3739 out_err: 3740 dasd_put_device(device); 3741 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3742 return rc; 3743 } 3744 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3745 3746 int dasd_generic_last_path_gone(struct dasd_device *device) 3747 { 3748 struct dasd_ccw_req *cqr; 3749 3750 dev_warn(&device->cdev->dev, "No operational channel path is left " 3751 "for the device\n"); 3752 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3753 /* First of all call extended error reporting. */ 3754 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3755 3756 if (device->state < DASD_STATE_BASIC) 3757 return 0; 3758 /* Device is active. We want to keep it. */ 3759 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3760 if ((cqr->status == DASD_CQR_IN_IO) || 3761 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3762 cqr->status = DASD_CQR_QUEUED; 3763 cqr->retries++; 3764 } 3765 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3766 dasd_device_clear_timer(device); 3767 dasd_schedule_device_bh(device); 3768 return 1; 3769 } 3770 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3771 3772 int dasd_generic_path_operational(struct dasd_device *device) 3773 { 3774 dev_info(&device->cdev->dev, "A channel path to the device has become " 3775 "operational\n"); 3776 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3777 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3778 dasd_schedule_device_bh(device); 3779 if (device->block) { 3780 dasd_schedule_block_bh(device->block); 3781 if (device->block->request_queue) 3782 blk_mq_run_hw_queues(device->block->request_queue, 3783 true); 3784 } 3785 3786 if (!device->stopped) 3787 wake_up(&generic_waitq); 3788 3789 return 1; 3790 } 3791 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3792 3793 int dasd_generic_notify(struct ccw_device *cdev, int event) 3794 { 3795 struct dasd_device *device; 3796 int ret; 3797 3798 device = dasd_device_from_cdev_locked(cdev); 3799 if (IS_ERR(device)) 3800 return 0; 3801 ret = 0; 3802 switch (event) { 3803 case CIO_GONE: 3804 case CIO_BOXED: 3805 case CIO_NO_PATH: 3806 dasd_path_no_path(device); 3807 ret = dasd_generic_last_path_gone(device); 3808 break; 3809 case CIO_OPER: 3810 ret = 1; 3811 if (dasd_path_get_opm(device)) 3812 ret = dasd_generic_path_operational(device); 3813 break; 3814 } 3815 dasd_put_device(device); 3816 return ret; 3817 } 3818 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3819 3820 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3821 { 3822 struct dasd_device *device; 3823 int chp, oldopm, hpfpm, ifccpm; 3824 3825 device = dasd_device_from_cdev_locked(cdev); 3826 if (IS_ERR(device)) 3827 return; 3828 3829 oldopm = dasd_path_get_opm(device); 3830 for (chp = 0; chp < 8; chp++) { 3831 if (path_event[chp] & PE_PATH_GONE) { 3832 dasd_path_notoper(device, chp); 3833 } 3834 if (path_event[chp] & PE_PATH_AVAILABLE) { 3835 dasd_path_available(device, chp); 3836 dasd_schedule_device_bh(device); 3837 } 3838 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3839 if (!dasd_path_is_operational(device, chp) && 3840 !dasd_path_need_verify(device, chp)) { 3841 /* 3842 * we can not establish a pathgroup on an 3843 * unavailable path, so trigger a path 3844 * verification first 3845 */ 3846 dasd_path_available(device, chp); 3847 dasd_schedule_device_bh(device); 3848 } 3849 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3850 "Pathgroup re-established\n"); 3851 if (device->discipline->kick_validate) 3852 device->discipline->kick_validate(device); 3853 } 3854 if (path_event[chp] & PE_PATH_FCES_EVENT) { 3855 dasd_path_fcsec_update(device, chp); 3856 dasd_schedule_device_bh(device); 3857 } 3858 } 3859 hpfpm = dasd_path_get_hpfpm(device); 3860 ifccpm = dasd_path_get_ifccpm(device); 3861 if (!dasd_path_get_opm(device) && hpfpm) { 3862 /* 3863 * device has no operational paths but at least one path is 3864 * disabled due to HPF errors 3865 * disable HPF at all and use the path(s) again 3866 */ 3867 if (device->discipline->disable_hpf) 3868 device->discipline->disable_hpf(device); 3869 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3870 dasd_path_set_tbvpm(device, hpfpm); 3871 dasd_schedule_device_bh(device); 3872 dasd_schedule_requeue(device); 3873 } else if (!dasd_path_get_opm(device) && ifccpm) { 3874 /* 3875 * device has no operational paths but at least one path is 3876 * disabled due to IFCC errors 3877 * trigger path verification on paths with IFCC errors 3878 */ 3879 dasd_path_set_tbvpm(device, ifccpm); 3880 dasd_schedule_device_bh(device); 3881 } 3882 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3883 dev_warn(&device->cdev->dev, 3884 "No verified channel paths remain for the device\n"); 3885 DBF_DEV_EVENT(DBF_WARNING, device, 3886 "%s", "last verified path gone"); 3887 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3888 dasd_device_set_stop_bits(device, 3889 DASD_STOPPED_DC_WAIT); 3890 } 3891 dasd_put_device(device); 3892 } 3893 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3894 3895 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3896 { 3897 if (!dasd_path_get_opm(device) && lpm) { 3898 dasd_path_set_opm(device, lpm); 3899 dasd_generic_path_operational(device); 3900 } else 3901 dasd_path_add_opm(device, lpm); 3902 return 0; 3903 } 3904 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3905 3906 void dasd_generic_space_exhaust(struct dasd_device *device, 3907 struct dasd_ccw_req *cqr) 3908 { 3909 dasd_eer_write(device, NULL, DASD_EER_NOSPC); 3910 3911 if (device->state < DASD_STATE_BASIC) 3912 return; 3913 3914 if (cqr->status == DASD_CQR_IN_IO || 3915 cqr->status == DASD_CQR_CLEAR_PENDING) { 3916 cqr->status = DASD_CQR_QUEUED; 3917 cqr->retries++; 3918 } 3919 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3920 dasd_device_clear_timer(device); 3921 dasd_schedule_device_bh(device); 3922 } 3923 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3924 3925 void dasd_generic_space_avail(struct dasd_device *device) 3926 { 3927 dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3928 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3929 3930 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3931 dasd_schedule_device_bh(device); 3932 3933 if (device->block) { 3934 dasd_schedule_block_bh(device->block); 3935 if (device->block->request_queue) 3936 blk_mq_run_hw_queues(device->block->request_queue, true); 3937 } 3938 if (!device->stopped) 3939 wake_up(&generic_waitq); 3940 } 3941 EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3942 3943 /* 3944 * clear active requests and requeue them to block layer if possible 3945 */ 3946 static int dasd_generic_requeue_all_requests(struct dasd_device *device) 3947 { 3948 struct list_head requeue_queue; 3949 struct dasd_ccw_req *cqr, *n; 3950 struct dasd_ccw_req *refers; 3951 int rc; 3952 3953 INIT_LIST_HEAD(&requeue_queue); 3954 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3955 rc = 0; 3956 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3957 /* Check status and move request to flush_queue */ 3958 if (cqr->status == DASD_CQR_IN_IO) { 3959 rc = device->discipline->term_IO(cqr); 3960 if (rc) { 3961 /* unable to terminate requeust */ 3962 dev_err(&device->cdev->dev, 3963 "Unable to terminate request %p " 3964 "on suspend\n", cqr); 3965 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3966 dasd_put_device(device); 3967 return rc; 3968 } 3969 } 3970 list_move_tail(&cqr->devlist, &requeue_queue); 3971 } 3972 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3973 3974 list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { 3975 wait_event(dasd_flush_wq, 3976 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3977 3978 /* 3979 * requeue requests to blocklayer will only work 3980 * for block device requests 3981 */ 3982 if (_dasd_requeue_request(cqr)) 3983 continue; 3984 3985 /* remove requests from device and block queue */ 3986 list_del_init(&cqr->devlist); 3987 while (cqr->refers != NULL) { 3988 refers = cqr->refers; 3989 /* remove the request from the block queue */ 3990 list_del(&cqr->blocklist); 3991 /* free the finished erp request */ 3992 dasd_free_erp_request(cqr, cqr->memdev); 3993 cqr = refers; 3994 } 3995 3996 /* 3997 * _dasd_requeue_request already checked for a valid 3998 * blockdevice, no need to check again 3999 * all erp requests (cqr->refers) have a cqr->block 4000 * pointer copy from the original cqr 4001 */ 4002 list_del_init(&cqr->blocklist); 4003 cqr->block->base->discipline->free_cp( 4004 cqr, (struct request *) cqr->callback_data); 4005 } 4006 4007 /* 4008 * if requests remain then they are internal request 4009 * and go back to the device queue 4010 */ 4011 if (!list_empty(&requeue_queue)) { 4012 /* move freeze_queue to start of the ccw_queue */ 4013 spin_lock_irq(get_ccwdev_lock(device->cdev)); 4014 list_splice_tail(&requeue_queue, &device->ccw_queue); 4015 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 4016 } 4017 dasd_schedule_device_bh(device); 4018 return rc; 4019 } 4020 4021 static void do_requeue_requests(struct work_struct *work) 4022 { 4023 struct dasd_device *device = container_of(work, struct dasd_device, 4024 requeue_requests); 4025 dasd_generic_requeue_all_requests(device); 4026 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 4027 if (device->block) 4028 dasd_schedule_block_bh(device->block); 4029 dasd_put_device(device); 4030 } 4031 4032 void dasd_schedule_requeue(struct dasd_device *device) 4033 { 4034 dasd_get_device(device); 4035 /* queue call to dasd_reload_device to the kernel event daemon. */ 4036 if (!schedule_work(&device->requeue_requests)) 4037 dasd_put_device(device); 4038 } 4039 EXPORT_SYMBOL(dasd_schedule_requeue); 4040 4041 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 4042 int rdc_buffer_size, 4043 int magic) 4044 { 4045 struct dasd_ccw_req *cqr; 4046 struct ccw1 *ccw; 4047 4048 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, 4049 NULL); 4050 4051 if (IS_ERR(cqr)) { 4052 /* internal error 13 - Allocating the RDC request failed*/ 4053 dev_err(&device->cdev->dev, 4054 "An error occurred in the DASD device driver, " 4055 "reason=%s\n", "13"); 4056 return cqr; 4057 } 4058 4059 ccw = cqr->cpaddr; 4060 ccw->cmd_code = CCW_CMD_RDC; 4061 ccw->cda = (__u32)(addr_t) cqr->data; 4062 ccw->flags = 0; 4063 ccw->count = rdc_buffer_size; 4064 cqr->startdev = device; 4065 cqr->memdev = device; 4066 cqr->expires = 10*HZ; 4067 cqr->retries = 256; 4068 cqr->buildclk = get_tod_clock(); 4069 cqr->status = DASD_CQR_FILLED; 4070 return cqr; 4071 } 4072 4073 4074 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 4075 void *rdc_buffer, int rdc_buffer_size) 4076 { 4077 int ret; 4078 struct dasd_ccw_req *cqr; 4079 4080 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); 4081 if (IS_ERR(cqr)) 4082 return PTR_ERR(cqr); 4083 4084 ret = dasd_sleep_on(cqr); 4085 if (ret == 0) 4086 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); 4087 dasd_sfree_request(cqr, cqr->memdev); 4088 return ret; 4089 } 4090 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4091 4092 /* 4093 * In command mode and transport mode we need to look for sense 4094 * data in different places. The sense data itself is allways 4095 * an array of 32 bytes, so we can unify the sense data access 4096 * for both modes. 4097 */ 4098 char *dasd_get_sense(struct irb *irb) 4099 { 4100 struct tsb *tsb = NULL; 4101 char *sense = NULL; 4102 4103 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4104 if (irb->scsw.tm.tcw) 4105 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 4106 irb->scsw.tm.tcw); 4107 if (tsb && tsb->length == 64 && tsb->flags) 4108 switch (tsb->flags & 0x07) { 4109 case 1: /* tsa_iostat */ 4110 sense = tsb->tsa.iostat.sense; 4111 break; 4112 case 2: /* tsa_ddpc */ 4113 sense = tsb->tsa.ddpc.sense; 4114 break; 4115 default: 4116 /* currently we don't use interrogate data */ 4117 break; 4118 } 4119 } else if (irb->esw.esw0.erw.cons) { 4120 sense = irb->ecw; 4121 } 4122 return sense; 4123 } 4124 EXPORT_SYMBOL_GPL(dasd_get_sense); 4125 4126 void dasd_generic_shutdown(struct ccw_device *cdev) 4127 { 4128 struct dasd_device *device; 4129 4130 device = dasd_device_from_cdev(cdev); 4131 if (IS_ERR(device)) 4132 return; 4133 4134 if (device->block) 4135 dasd_schedule_block_bh(device->block); 4136 4137 dasd_schedule_device_bh(device); 4138 4139 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4140 } 4141 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4142 4143 static int __init dasd_init(void) 4144 { 4145 int rc; 4146 4147 init_waitqueue_head(&dasd_init_waitq); 4148 init_waitqueue_head(&dasd_flush_wq); 4149 init_waitqueue_head(&generic_waitq); 4150 init_waitqueue_head(&shutdown_waitq); 4151 4152 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4153 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4154 if (dasd_debug_area == NULL) { 4155 rc = -ENOMEM; 4156 goto failed; 4157 } 4158 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4159 debug_set_level(dasd_debug_area, DBF_WARNING); 4160 4161 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4162 4163 dasd_diag_discipline_pointer = NULL; 4164 4165 dasd_statistics_createroot(); 4166 4167 rc = dasd_devmap_init(); 4168 if (rc) 4169 goto failed; 4170 rc = dasd_gendisk_init(); 4171 if (rc) 4172 goto failed; 4173 rc = dasd_parse(); 4174 if (rc) 4175 goto failed; 4176 rc = dasd_eer_init(); 4177 if (rc) 4178 goto failed; 4179 #ifdef CONFIG_PROC_FS 4180 rc = dasd_proc_init(); 4181 if (rc) 4182 goto failed; 4183 #endif 4184 4185 return 0; 4186 failed: 4187 pr_info("The DASD device driver could not be initialized\n"); 4188 dasd_exit(); 4189 return rc; 4190 } 4191 4192 module_init(dasd_init); 4193 module_exit(dasd_exit); 4194