1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #include <linux/kmod.h> 12 #include <linux/init.h> 13 #include <linux/interrupt.h> 14 #include <linux/ctype.h> 15 #include <linux/major.h> 16 #include <linux/slab.h> 17 #include <linux/hdreg.h> 18 #include <linux/async.h> 19 #include <linux/mutex.h> 20 #include <linux/debugfs.h> 21 #include <linux/seq_file.h> 22 #include <linux/vmalloc.h> 23 24 #include <asm/ccwdev.h> 25 #include <asm/ebcdic.h> 26 #include <asm/idals.h> 27 #include <asm/itcw.h> 28 #include <asm/diag.h> 29 30 /* This is ugly... */ 31 #define PRINTK_HEADER "dasd:" 32 33 #include "dasd_int.h" 34 /* 35 * SECTION: Constant definitions to be used within this file 36 */ 37 #define DASD_CHANQ_MAX_SIZE 4 38 39 #define DASD_DIAG_MOD "dasd_diag_mod" 40 41 /* 42 * SECTION: exported variables of dasd.c 43 */ 44 debug_info_t *dasd_debug_area; 45 EXPORT_SYMBOL(dasd_debug_area); 46 static struct dentry *dasd_debugfs_root_entry; 47 struct dasd_discipline *dasd_diag_discipline_pointer; 48 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 49 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 50 51 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 52 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 53 " Copyright IBM Corp. 2000"); 54 MODULE_LICENSE("GPL"); 55 56 /* 57 * SECTION: prototypes for static functions of dasd.c 58 */ 59 static int dasd_flush_block_queue(struct dasd_block *); 60 static void dasd_device_tasklet(unsigned long); 61 static void dasd_block_tasklet(unsigned long); 62 static void do_kick_device(struct work_struct *); 63 static void do_reload_device(struct work_struct *); 64 static void do_requeue_requests(struct work_struct *); 65 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 66 static void dasd_device_timeout(struct timer_list *); 67 static void dasd_block_timeout(struct timer_list *); 68 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 69 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 70 static void dasd_profile_exit(struct dasd_profile *); 71 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 72 static void dasd_hosts_exit(struct dasd_device *); 73 static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *, 74 unsigned int); 75 /* 76 * SECTION: Operations on the device structure. 77 */ 78 static wait_queue_head_t dasd_init_waitq; 79 static wait_queue_head_t dasd_flush_wq; 80 static wait_queue_head_t generic_waitq; 81 static wait_queue_head_t shutdown_waitq; 82 83 /* 84 * Allocate memory for a new device structure. 85 */ 86 struct dasd_device *dasd_alloc_device(void) 87 { 88 struct dasd_device *device; 89 90 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 91 if (!device) 92 return ERR_PTR(-ENOMEM); 93 94 /* Get two pages for normal block device operations. */ 95 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 96 if (!device->ccw_mem) { 97 kfree(device); 98 return ERR_PTR(-ENOMEM); 99 } 100 /* Get one page for error recovery. */ 101 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 102 if (!device->erp_mem) { 103 free_pages((unsigned long) device->ccw_mem, 1); 104 kfree(device); 105 return ERR_PTR(-ENOMEM); 106 } 107 /* Get two pages for ese format. */ 108 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 109 if (!device->ese_mem) { 110 free_page((unsigned long) device->erp_mem); 111 free_pages((unsigned long) device->ccw_mem, 1); 112 kfree(device); 113 return ERR_PTR(-ENOMEM); 114 } 115 116 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 117 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 118 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 119 spin_lock_init(&device->mem_lock); 120 atomic_set(&device->tasklet_scheduled, 0); 121 tasklet_init(&device->tasklet, dasd_device_tasklet, 122 (unsigned long) device); 123 INIT_LIST_HEAD(&device->ccw_queue); 124 timer_setup(&device->timer, dasd_device_timeout, 0); 125 INIT_WORK(&device->kick_work, do_kick_device); 126 INIT_WORK(&device->reload_device, do_reload_device); 127 INIT_WORK(&device->requeue_requests, do_requeue_requests); 128 device->state = DASD_STATE_NEW; 129 device->target = DASD_STATE_NEW; 130 mutex_init(&device->state_mutex); 131 spin_lock_init(&device->profile.lock); 132 return device; 133 } 134 135 /* 136 * Free memory of a device structure. 137 */ 138 void dasd_free_device(struct dasd_device *device) 139 { 140 kfree(device->private); 141 free_pages((unsigned long) device->ese_mem, 1); 142 free_page((unsigned long) device->erp_mem); 143 free_pages((unsigned long) device->ccw_mem, 1); 144 kfree(device); 145 } 146 147 /* 148 * Allocate memory for a new device structure. 149 */ 150 struct dasd_block *dasd_alloc_block(void) 151 { 152 struct dasd_block *block; 153 154 block = kzalloc(sizeof(*block), GFP_ATOMIC); 155 if (!block) 156 return ERR_PTR(-ENOMEM); 157 /* open_count = 0 means device online but not in use */ 158 atomic_set(&block->open_count, -1); 159 160 atomic_set(&block->tasklet_scheduled, 0); 161 tasklet_init(&block->tasklet, dasd_block_tasklet, 162 (unsigned long) block); 163 INIT_LIST_HEAD(&block->ccw_queue); 164 spin_lock_init(&block->queue_lock); 165 INIT_LIST_HEAD(&block->format_list); 166 spin_lock_init(&block->format_lock); 167 timer_setup(&block->timer, dasd_block_timeout, 0); 168 spin_lock_init(&block->profile.lock); 169 170 return block; 171 } 172 EXPORT_SYMBOL_GPL(dasd_alloc_block); 173 174 /* 175 * Free memory of a device structure. 176 */ 177 void dasd_free_block(struct dasd_block *block) 178 { 179 kfree(block); 180 } 181 EXPORT_SYMBOL_GPL(dasd_free_block); 182 183 /* 184 * Make a new device known to the system. 185 */ 186 static int dasd_state_new_to_known(struct dasd_device *device) 187 { 188 /* 189 * As long as the device is not in state DASD_STATE_NEW we want to 190 * keep the reference count > 0. 191 */ 192 dasd_get_device(device); 193 device->state = DASD_STATE_KNOWN; 194 return 0; 195 } 196 197 /* 198 * Let the system forget about a device. 199 */ 200 static int dasd_state_known_to_new(struct dasd_device *device) 201 { 202 /* Disable extended error reporting for this device. */ 203 dasd_eer_disable(device); 204 device->state = DASD_STATE_NEW; 205 206 /* Give up reference we took in dasd_state_new_to_known. */ 207 dasd_put_device(device); 208 return 0; 209 } 210 211 static struct dentry *dasd_debugfs_setup(const char *name, 212 struct dentry *base_dentry) 213 { 214 struct dentry *pde; 215 216 if (!base_dentry) 217 return NULL; 218 pde = debugfs_create_dir(name, base_dentry); 219 if (!pde || IS_ERR(pde)) 220 return NULL; 221 return pde; 222 } 223 224 /* 225 * Request the irq line for the device. 226 */ 227 static int dasd_state_known_to_basic(struct dasd_device *device) 228 { 229 struct dasd_block *block = device->block; 230 int rc = 0; 231 232 /* Allocate and register gendisk structure. */ 233 if (block) { 234 rc = dasd_gendisk_alloc(block); 235 if (rc) 236 return rc; 237 block->debugfs_dentry = 238 dasd_debugfs_setup(block->gdp->disk_name, 239 dasd_debugfs_root_entry); 240 dasd_profile_init(&block->profile, block->debugfs_dentry); 241 if (dasd_global_profile_level == DASD_PROFILE_ON) 242 dasd_profile_on(&device->block->profile); 243 } 244 device->debugfs_dentry = 245 dasd_debugfs_setup(dev_name(&device->cdev->dev), 246 dasd_debugfs_root_entry); 247 dasd_profile_init(&device->profile, device->debugfs_dentry); 248 dasd_hosts_init(device->debugfs_dentry, device); 249 250 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 251 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 252 8 * sizeof(long)); 253 debug_register_view(device->debug_area, &debug_sprintf_view); 254 debug_set_level(device->debug_area, DBF_WARNING); 255 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 256 257 device->state = DASD_STATE_BASIC; 258 259 return rc; 260 } 261 262 /* 263 * Release the irq line for the device. Terminate any running i/o. 264 */ 265 static int dasd_state_basic_to_known(struct dasd_device *device) 266 { 267 int rc; 268 269 if (device->discipline->basic_to_known) { 270 rc = device->discipline->basic_to_known(device); 271 if (rc) 272 return rc; 273 } 274 275 if (device->block) { 276 dasd_profile_exit(&device->block->profile); 277 debugfs_remove(device->block->debugfs_dentry); 278 dasd_gendisk_free(device->block); 279 dasd_block_clear_timer(device->block); 280 } 281 rc = dasd_flush_device_queue(device); 282 if (rc) 283 return rc; 284 dasd_device_clear_timer(device); 285 dasd_profile_exit(&device->profile); 286 dasd_hosts_exit(device); 287 debugfs_remove(device->debugfs_dentry); 288 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 289 if (device->debug_area != NULL) { 290 debug_unregister(device->debug_area); 291 device->debug_area = NULL; 292 } 293 device->state = DASD_STATE_KNOWN; 294 return 0; 295 } 296 297 /* 298 * Do the initial analysis. The do_analysis function may return 299 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 300 * until the discipline decides to continue the startup sequence 301 * by calling the function dasd_change_state. The eckd disciplines 302 * uses this to start a ccw that detects the format. The completion 303 * interrupt for this detection ccw uses the kernel event daemon to 304 * trigger the call to dasd_change_state. All this is done in the 305 * discipline code, see dasd_eckd.c. 306 * After the analysis ccw is done (do_analysis returned 0) the block 307 * device is setup. 308 * In case the analysis returns an error, the device setup is stopped 309 * (a fake disk was already added to allow formatting). 310 */ 311 static int dasd_state_basic_to_ready(struct dasd_device *device) 312 { 313 int rc; 314 struct dasd_block *block; 315 struct gendisk *disk; 316 317 rc = 0; 318 block = device->block; 319 /* make disk known with correct capacity */ 320 if (block) { 321 if (block->base->discipline->do_analysis != NULL) 322 rc = block->base->discipline->do_analysis(block); 323 if (rc) { 324 if (rc != -EAGAIN) { 325 device->state = DASD_STATE_UNFMT; 326 disk = device->block->gdp; 327 kobject_uevent(&disk_to_dev(disk)->kobj, 328 KOBJ_CHANGE); 329 goto out; 330 } 331 return rc; 332 } 333 if (device->discipline->setup_blk_queue) 334 device->discipline->setup_blk_queue(block); 335 set_capacity(block->gdp, 336 block->blocks << block->s2b_shift); 337 device->state = DASD_STATE_READY; 338 rc = dasd_scan_partitions(block); 339 if (rc) { 340 device->state = DASD_STATE_BASIC; 341 return rc; 342 } 343 } else { 344 device->state = DASD_STATE_READY; 345 } 346 out: 347 if (device->discipline->basic_to_ready) 348 rc = device->discipline->basic_to_ready(device); 349 return rc; 350 } 351 352 static inline 353 int _wait_for_empty_queues(struct dasd_device *device) 354 { 355 if (device->block) 356 return list_empty(&device->ccw_queue) && 357 list_empty(&device->block->ccw_queue); 358 else 359 return list_empty(&device->ccw_queue); 360 } 361 362 /* 363 * Remove device from block device layer. Destroy dirty buffers. 364 * Forget format information. Check if the target level is basic 365 * and if it is create fake disk for formatting. 366 */ 367 static int dasd_state_ready_to_basic(struct dasd_device *device) 368 { 369 int rc; 370 371 device->state = DASD_STATE_BASIC; 372 if (device->block) { 373 struct dasd_block *block = device->block; 374 rc = dasd_flush_block_queue(block); 375 if (rc) { 376 device->state = DASD_STATE_READY; 377 return rc; 378 } 379 dasd_destroy_partitions(block); 380 block->blocks = 0; 381 block->bp_block = 0; 382 block->s2b_shift = 0; 383 } 384 return 0; 385 } 386 387 /* 388 * Back to basic. 389 */ 390 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 391 { 392 device->state = DASD_STATE_BASIC; 393 return 0; 394 } 395 396 /* 397 * Make the device online and schedule the bottom half to start 398 * the requeueing of requests from the linux request queue to the 399 * ccw queue. 400 */ 401 static int 402 dasd_state_ready_to_online(struct dasd_device * device) 403 { 404 device->state = DASD_STATE_ONLINE; 405 if (device->block) { 406 dasd_schedule_block_bh(device->block); 407 if ((device->features & DASD_FEATURE_USERAW)) { 408 kobject_uevent(&disk_to_dev(device->block->gdp)->kobj, 409 KOBJ_CHANGE); 410 return 0; 411 } 412 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 413 } 414 return 0; 415 } 416 417 /* 418 * Stop the requeueing of requests again. 419 */ 420 static int dasd_state_online_to_ready(struct dasd_device *device) 421 { 422 int rc; 423 424 if (device->discipline->online_to_ready) { 425 rc = device->discipline->online_to_ready(device); 426 if (rc) 427 return rc; 428 } 429 430 device->state = DASD_STATE_READY; 431 if (device->block && !(device->features & DASD_FEATURE_USERAW)) 432 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 433 return 0; 434 } 435 436 /* 437 * Device startup state changes. 438 */ 439 static int dasd_increase_state(struct dasd_device *device) 440 { 441 int rc; 442 443 rc = 0; 444 if (device->state == DASD_STATE_NEW && 445 device->target >= DASD_STATE_KNOWN) 446 rc = dasd_state_new_to_known(device); 447 448 if (!rc && 449 device->state == DASD_STATE_KNOWN && 450 device->target >= DASD_STATE_BASIC) 451 rc = dasd_state_known_to_basic(device); 452 453 if (!rc && 454 device->state == DASD_STATE_BASIC && 455 device->target >= DASD_STATE_READY) 456 rc = dasd_state_basic_to_ready(device); 457 458 if (!rc && 459 device->state == DASD_STATE_UNFMT && 460 device->target > DASD_STATE_UNFMT) 461 rc = -EPERM; 462 463 if (!rc && 464 device->state == DASD_STATE_READY && 465 device->target >= DASD_STATE_ONLINE) 466 rc = dasd_state_ready_to_online(device); 467 468 return rc; 469 } 470 471 /* 472 * Device shutdown state changes. 473 */ 474 static int dasd_decrease_state(struct dasd_device *device) 475 { 476 int rc; 477 478 rc = 0; 479 if (device->state == DASD_STATE_ONLINE && 480 device->target <= DASD_STATE_READY) 481 rc = dasd_state_online_to_ready(device); 482 483 if (!rc && 484 device->state == DASD_STATE_READY && 485 device->target <= DASD_STATE_BASIC) 486 rc = dasd_state_ready_to_basic(device); 487 488 if (!rc && 489 device->state == DASD_STATE_UNFMT && 490 device->target <= DASD_STATE_BASIC) 491 rc = dasd_state_unfmt_to_basic(device); 492 493 if (!rc && 494 device->state == DASD_STATE_BASIC && 495 device->target <= DASD_STATE_KNOWN) 496 rc = dasd_state_basic_to_known(device); 497 498 if (!rc && 499 device->state == DASD_STATE_KNOWN && 500 device->target <= DASD_STATE_NEW) 501 rc = dasd_state_known_to_new(device); 502 503 return rc; 504 } 505 506 /* 507 * This is the main startup/shutdown routine. 508 */ 509 static void dasd_change_state(struct dasd_device *device) 510 { 511 int rc; 512 513 if (device->state == device->target) 514 /* Already where we want to go today... */ 515 return; 516 if (device->state < device->target) 517 rc = dasd_increase_state(device); 518 else 519 rc = dasd_decrease_state(device); 520 if (rc == -EAGAIN) 521 return; 522 if (rc) 523 device->target = device->state; 524 525 /* let user-space know that the device status changed */ 526 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 527 528 if (device->state == device->target) 529 wake_up(&dasd_init_waitq); 530 } 531 532 /* 533 * Kick starter for devices that did not complete the startup/shutdown 534 * procedure or were sleeping because of a pending state. 535 * dasd_kick_device will schedule a call do do_kick_device to the kernel 536 * event daemon. 537 */ 538 static void do_kick_device(struct work_struct *work) 539 { 540 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 541 mutex_lock(&device->state_mutex); 542 dasd_change_state(device); 543 mutex_unlock(&device->state_mutex); 544 dasd_schedule_device_bh(device); 545 dasd_put_device(device); 546 } 547 548 void dasd_kick_device(struct dasd_device *device) 549 { 550 dasd_get_device(device); 551 /* queue call to dasd_kick_device to the kernel event daemon. */ 552 if (!schedule_work(&device->kick_work)) 553 dasd_put_device(device); 554 } 555 EXPORT_SYMBOL(dasd_kick_device); 556 557 /* 558 * dasd_reload_device will schedule a call do do_reload_device to the kernel 559 * event daemon. 560 */ 561 static void do_reload_device(struct work_struct *work) 562 { 563 struct dasd_device *device = container_of(work, struct dasd_device, 564 reload_device); 565 device->discipline->reload(device); 566 dasd_put_device(device); 567 } 568 569 void dasd_reload_device(struct dasd_device *device) 570 { 571 dasd_get_device(device); 572 /* queue call to dasd_reload_device to the kernel event daemon. */ 573 if (!schedule_work(&device->reload_device)) 574 dasd_put_device(device); 575 } 576 EXPORT_SYMBOL(dasd_reload_device); 577 578 /* 579 * Set the target state for a device and starts the state change. 580 */ 581 void dasd_set_target_state(struct dasd_device *device, int target) 582 { 583 dasd_get_device(device); 584 mutex_lock(&device->state_mutex); 585 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 586 if (dasd_probeonly && target > DASD_STATE_READY) 587 target = DASD_STATE_READY; 588 if (device->target != target) { 589 if (device->state == target) 590 wake_up(&dasd_init_waitq); 591 device->target = target; 592 } 593 if (device->state != device->target) 594 dasd_change_state(device); 595 mutex_unlock(&device->state_mutex); 596 dasd_put_device(device); 597 } 598 599 /* 600 * Enable devices with device numbers in [from..to]. 601 */ 602 static inline int _wait_for_device(struct dasd_device *device) 603 { 604 return (device->state == device->target); 605 } 606 607 void dasd_enable_device(struct dasd_device *device) 608 { 609 dasd_set_target_state(device, DASD_STATE_ONLINE); 610 if (device->state <= DASD_STATE_KNOWN) 611 /* No discipline for device found. */ 612 dasd_set_target_state(device, DASD_STATE_NEW); 613 /* Now wait for the devices to come up. */ 614 wait_event(dasd_init_waitq, _wait_for_device(device)); 615 616 dasd_reload_device(device); 617 if (device->discipline->kick_validate) 618 device->discipline->kick_validate(device); 619 } 620 EXPORT_SYMBOL(dasd_enable_device); 621 622 /* 623 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 624 */ 625 626 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 627 628 #ifdef CONFIG_DASD_PROFILE 629 struct dasd_profile dasd_global_profile = { 630 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 631 }; 632 static struct dentry *dasd_debugfs_global_entry; 633 634 /* 635 * Add profiling information for cqr before execution. 636 */ 637 static void dasd_profile_start(struct dasd_block *block, 638 struct dasd_ccw_req *cqr, 639 struct request *req) 640 { 641 struct list_head *l; 642 unsigned int counter; 643 struct dasd_device *device; 644 645 /* count the length of the chanq for statistics */ 646 counter = 0; 647 if (dasd_global_profile_level || block->profile.data) 648 list_for_each(l, &block->ccw_queue) 649 if (++counter >= 31) 650 break; 651 652 spin_lock(&dasd_global_profile.lock); 653 if (dasd_global_profile.data) { 654 dasd_global_profile.data->dasd_io_nr_req[counter]++; 655 if (rq_data_dir(req) == READ) 656 dasd_global_profile.data->dasd_read_nr_req[counter]++; 657 } 658 spin_unlock(&dasd_global_profile.lock); 659 660 spin_lock(&block->profile.lock); 661 if (block->profile.data) { 662 block->profile.data->dasd_io_nr_req[counter]++; 663 if (rq_data_dir(req) == READ) 664 block->profile.data->dasd_read_nr_req[counter]++; 665 } 666 spin_unlock(&block->profile.lock); 667 668 /* 669 * We count the request for the start device, even though it may run on 670 * some other device due to error recovery. This way we make sure that 671 * we count each request only once. 672 */ 673 device = cqr->startdev; 674 if (!device->profile.data) 675 return; 676 677 spin_lock(get_ccwdev_lock(device->cdev)); 678 counter = 1; /* request is not yet queued on the start device */ 679 list_for_each(l, &device->ccw_queue) 680 if (++counter >= 31) 681 break; 682 spin_unlock(get_ccwdev_lock(device->cdev)); 683 684 spin_lock(&device->profile.lock); 685 device->profile.data->dasd_io_nr_req[counter]++; 686 if (rq_data_dir(req) == READ) 687 device->profile.data->dasd_read_nr_req[counter]++; 688 spin_unlock(&device->profile.lock); 689 } 690 691 /* 692 * Add profiling information for cqr after execution. 693 */ 694 695 #define dasd_profile_counter(value, index) \ 696 { \ 697 for (index = 0; index < 31 && value >> (2+index); index++) \ 698 ; \ 699 } 700 701 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 702 int is_alias, 703 int is_tpm, 704 int is_read, 705 long sectors, 706 int sectors_ind, 707 int tottime_ind, 708 int tottimeps_ind, 709 int strtime_ind, 710 int irqtime_ind, 711 int irqtimeps_ind, 712 int endtime_ind) 713 { 714 /* in case of an overflow, reset the whole profile */ 715 if (data->dasd_io_reqs == UINT_MAX) { 716 memset(data, 0, sizeof(*data)); 717 ktime_get_real_ts64(&data->starttod); 718 } 719 data->dasd_io_reqs++; 720 data->dasd_io_sects += sectors; 721 if (is_alias) 722 data->dasd_io_alias++; 723 if (is_tpm) 724 data->dasd_io_tpm++; 725 726 data->dasd_io_secs[sectors_ind]++; 727 data->dasd_io_times[tottime_ind]++; 728 data->dasd_io_timps[tottimeps_ind]++; 729 data->dasd_io_time1[strtime_ind]++; 730 data->dasd_io_time2[irqtime_ind]++; 731 data->dasd_io_time2ps[irqtimeps_ind]++; 732 data->dasd_io_time3[endtime_ind]++; 733 734 if (is_read) { 735 data->dasd_read_reqs++; 736 data->dasd_read_sects += sectors; 737 if (is_alias) 738 data->dasd_read_alias++; 739 if (is_tpm) 740 data->dasd_read_tpm++; 741 data->dasd_read_secs[sectors_ind]++; 742 data->dasd_read_times[tottime_ind]++; 743 data->dasd_read_time1[strtime_ind]++; 744 data->dasd_read_time2[irqtime_ind]++; 745 data->dasd_read_time3[endtime_ind]++; 746 } 747 } 748 749 static void dasd_profile_end(struct dasd_block *block, 750 struct dasd_ccw_req *cqr, 751 struct request *req) 752 { 753 unsigned long strtime, irqtime, endtime, tottime; 754 unsigned long tottimeps, sectors; 755 struct dasd_device *device; 756 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 757 int irqtime_ind, irqtimeps_ind, endtime_ind; 758 struct dasd_profile_info *data; 759 760 device = cqr->startdev; 761 if (!(dasd_global_profile_level || 762 block->profile.data || 763 device->profile.data)) 764 return; 765 766 sectors = blk_rq_sectors(req); 767 if (!cqr->buildclk || !cqr->startclk || 768 !cqr->stopclk || !cqr->endclk || 769 !sectors) 770 return; 771 772 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 773 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 774 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 775 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 776 tottimeps = tottime / sectors; 777 778 dasd_profile_counter(sectors, sectors_ind); 779 dasd_profile_counter(tottime, tottime_ind); 780 dasd_profile_counter(tottimeps, tottimeps_ind); 781 dasd_profile_counter(strtime, strtime_ind); 782 dasd_profile_counter(irqtime, irqtime_ind); 783 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 784 dasd_profile_counter(endtime, endtime_ind); 785 786 spin_lock(&dasd_global_profile.lock); 787 if (dasd_global_profile.data) { 788 data = dasd_global_profile.data; 789 data->dasd_sum_times += tottime; 790 data->dasd_sum_time_str += strtime; 791 data->dasd_sum_time_irq += irqtime; 792 data->dasd_sum_time_end += endtime; 793 dasd_profile_end_add_data(dasd_global_profile.data, 794 cqr->startdev != block->base, 795 cqr->cpmode == 1, 796 rq_data_dir(req) == READ, 797 sectors, sectors_ind, tottime_ind, 798 tottimeps_ind, strtime_ind, 799 irqtime_ind, irqtimeps_ind, 800 endtime_ind); 801 } 802 spin_unlock(&dasd_global_profile.lock); 803 804 spin_lock(&block->profile.lock); 805 if (block->profile.data) { 806 data = block->profile.data; 807 data->dasd_sum_times += tottime; 808 data->dasd_sum_time_str += strtime; 809 data->dasd_sum_time_irq += irqtime; 810 data->dasd_sum_time_end += endtime; 811 dasd_profile_end_add_data(block->profile.data, 812 cqr->startdev != block->base, 813 cqr->cpmode == 1, 814 rq_data_dir(req) == READ, 815 sectors, sectors_ind, tottime_ind, 816 tottimeps_ind, strtime_ind, 817 irqtime_ind, irqtimeps_ind, 818 endtime_ind); 819 } 820 spin_unlock(&block->profile.lock); 821 822 spin_lock(&device->profile.lock); 823 if (device->profile.data) { 824 data = device->profile.data; 825 data->dasd_sum_times += tottime; 826 data->dasd_sum_time_str += strtime; 827 data->dasd_sum_time_irq += irqtime; 828 data->dasd_sum_time_end += endtime; 829 dasd_profile_end_add_data(device->profile.data, 830 cqr->startdev != block->base, 831 cqr->cpmode == 1, 832 rq_data_dir(req) == READ, 833 sectors, sectors_ind, tottime_ind, 834 tottimeps_ind, strtime_ind, 835 irqtime_ind, irqtimeps_ind, 836 endtime_ind); 837 } 838 spin_unlock(&device->profile.lock); 839 } 840 841 void dasd_profile_reset(struct dasd_profile *profile) 842 { 843 struct dasd_profile_info *data; 844 845 spin_lock_bh(&profile->lock); 846 data = profile->data; 847 if (!data) { 848 spin_unlock_bh(&profile->lock); 849 return; 850 } 851 memset(data, 0, sizeof(*data)); 852 ktime_get_real_ts64(&data->starttod); 853 spin_unlock_bh(&profile->lock); 854 } 855 856 int dasd_profile_on(struct dasd_profile *profile) 857 { 858 struct dasd_profile_info *data; 859 860 data = kzalloc(sizeof(*data), GFP_KERNEL); 861 if (!data) 862 return -ENOMEM; 863 spin_lock_bh(&profile->lock); 864 if (profile->data) { 865 spin_unlock_bh(&profile->lock); 866 kfree(data); 867 return 0; 868 } 869 ktime_get_real_ts64(&data->starttod); 870 profile->data = data; 871 spin_unlock_bh(&profile->lock); 872 return 0; 873 } 874 875 void dasd_profile_off(struct dasd_profile *profile) 876 { 877 spin_lock_bh(&profile->lock); 878 kfree(profile->data); 879 profile->data = NULL; 880 spin_unlock_bh(&profile->lock); 881 } 882 883 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 884 { 885 char *buffer; 886 887 buffer = vmalloc(user_len + 1); 888 if (buffer == NULL) 889 return ERR_PTR(-ENOMEM); 890 if (copy_from_user(buffer, user_buf, user_len) != 0) { 891 vfree(buffer); 892 return ERR_PTR(-EFAULT); 893 } 894 /* got the string, now strip linefeed. */ 895 if (buffer[user_len - 1] == '\n') 896 buffer[user_len - 1] = 0; 897 else 898 buffer[user_len] = 0; 899 return buffer; 900 } 901 902 static ssize_t dasd_stats_write(struct file *file, 903 const char __user *user_buf, 904 size_t user_len, loff_t *pos) 905 { 906 char *buffer, *str; 907 int rc; 908 struct seq_file *m = (struct seq_file *)file->private_data; 909 struct dasd_profile *prof = m->private; 910 911 if (user_len > 65536) 912 user_len = 65536; 913 buffer = dasd_get_user_string(user_buf, user_len); 914 if (IS_ERR(buffer)) 915 return PTR_ERR(buffer); 916 917 str = skip_spaces(buffer); 918 rc = user_len; 919 if (strncmp(str, "reset", 5) == 0) { 920 dasd_profile_reset(prof); 921 } else if (strncmp(str, "on", 2) == 0) { 922 rc = dasd_profile_on(prof); 923 if (rc) 924 goto out; 925 rc = user_len; 926 if (prof == &dasd_global_profile) { 927 dasd_profile_reset(prof); 928 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 929 } 930 } else if (strncmp(str, "off", 3) == 0) { 931 if (prof == &dasd_global_profile) 932 dasd_global_profile_level = DASD_PROFILE_OFF; 933 dasd_profile_off(prof); 934 } else 935 rc = -EINVAL; 936 out: 937 vfree(buffer); 938 return rc; 939 } 940 941 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 942 { 943 int i; 944 945 for (i = 0; i < 32; i++) 946 seq_printf(m, "%u ", array[i]); 947 seq_putc(m, '\n'); 948 } 949 950 static void dasd_stats_seq_print(struct seq_file *m, 951 struct dasd_profile_info *data) 952 { 953 seq_printf(m, "start_time %lld.%09ld\n", 954 (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 955 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 956 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 957 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 958 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 959 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 960 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 961 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 962 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 963 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 964 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 965 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 966 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 967 seq_puts(m, "histogram_sectors "); 968 dasd_stats_array(m, data->dasd_io_secs); 969 seq_puts(m, "histogram_io_times "); 970 dasd_stats_array(m, data->dasd_io_times); 971 seq_puts(m, "histogram_io_times_weighted "); 972 dasd_stats_array(m, data->dasd_io_timps); 973 seq_puts(m, "histogram_time_build_to_ssch "); 974 dasd_stats_array(m, data->dasd_io_time1); 975 seq_puts(m, "histogram_time_ssch_to_irq "); 976 dasd_stats_array(m, data->dasd_io_time2); 977 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 978 dasd_stats_array(m, data->dasd_io_time2ps); 979 seq_puts(m, "histogram_time_irq_to_end "); 980 dasd_stats_array(m, data->dasd_io_time3); 981 seq_puts(m, "histogram_ccw_queue_length "); 982 dasd_stats_array(m, data->dasd_io_nr_req); 983 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 984 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 985 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 986 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 987 seq_puts(m, "histogram_read_sectors "); 988 dasd_stats_array(m, data->dasd_read_secs); 989 seq_puts(m, "histogram_read_times "); 990 dasd_stats_array(m, data->dasd_read_times); 991 seq_puts(m, "histogram_read_time_build_to_ssch "); 992 dasd_stats_array(m, data->dasd_read_time1); 993 seq_puts(m, "histogram_read_time_ssch_to_irq "); 994 dasd_stats_array(m, data->dasd_read_time2); 995 seq_puts(m, "histogram_read_time_irq_to_end "); 996 dasd_stats_array(m, data->dasd_read_time3); 997 seq_puts(m, "histogram_read_ccw_queue_length "); 998 dasd_stats_array(m, data->dasd_read_nr_req); 999 } 1000 1001 static int dasd_stats_show(struct seq_file *m, void *v) 1002 { 1003 struct dasd_profile *profile; 1004 struct dasd_profile_info *data; 1005 1006 profile = m->private; 1007 spin_lock_bh(&profile->lock); 1008 data = profile->data; 1009 if (!data) { 1010 spin_unlock_bh(&profile->lock); 1011 seq_puts(m, "disabled\n"); 1012 return 0; 1013 } 1014 dasd_stats_seq_print(m, data); 1015 spin_unlock_bh(&profile->lock); 1016 return 0; 1017 } 1018 1019 static int dasd_stats_open(struct inode *inode, struct file *file) 1020 { 1021 struct dasd_profile *profile = inode->i_private; 1022 return single_open(file, dasd_stats_show, profile); 1023 } 1024 1025 static const struct file_operations dasd_stats_raw_fops = { 1026 .owner = THIS_MODULE, 1027 .open = dasd_stats_open, 1028 .read = seq_read, 1029 .llseek = seq_lseek, 1030 .release = single_release, 1031 .write = dasd_stats_write, 1032 }; 1033 1034 static void dasd_profile_init(struct dasd_profile *profile, 1035 struct dentry *base_dentry) 1036 { 1037 umode_t mode; 1038 struct dentry *pde; 1039 1040 if (!base_dentry) 1041 return; 1042 profile->dentry = NULL; 1043 profile->data = NULL; 1044 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1045 pde = debugfs_create_file("statistics", mode, base_dentry, 1046 profile, &dasd_stats_raw_fops); 1047 if (pde && !IS_ERR(pde)) 1048 profile->dentry = pde; 1049 return; 1050 } 1051 1052 static void dasd_profile_exit(struct dasd_profile *profile) 1053 { 1054 dasd_profile_off(profile); 1055 debugfs_remove(profile->dentry); 1056 profile->dentry = NULL; 1057 } 1058 1059 static void dasd_statistics_removeroot(void) 1060 { 1061 dasd_global_profile_level = DASD_PROFILE_OFF; 1062 dasd_profile_exit(&dasd_global_profile); 1063 debugfs_remove(dasd_debugfs_global_entry); 1064 debugfs_remove(dasd_debugfs_root_entry); 1065 } 1066 1067 static void dasd_statistics_createroot(void) 1068 { 1069 struct dentry *pde; 1070 1071 dasd_debugfs_root_entry = NULL; 1072 pde = debugfs_create_dir("dasd", NULL); 1073 if (!pde || IS_ERR(pde)) 1074 goto error; 1075 dasd_debugfs_root_entry = pde; 1076 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1077 if (!pde || IS_ERR(pde)) 1078 goto error; 1079 dasd_debugfs_global_entry = pde; 1080 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1081 return; 1082 1083 error: 1084 DBF_EVENT(DBF_ERR, "%s", 1085 "Creation of the dasd debugfs interface failed"); 1086 dasd_statistics_removeroot(); 1087 return; 1088 } 1089 1090 #else 1091 #define dasd_profile_start(block, cqr, req) do {} while (0) 1092 #define dasd_profile_end(block, cqr, req) do {} while (0) 1093 1094 static void dasd_statistics_createroot(void) 1095 { 1096 return; 1097 } 1098 1099 static void dasd_statistics_removeroot(void) 1100 { 1101 return; 1102 } 1103 1104 int dasd_stats_generic_show(struct seq_file *m, void *v) 1105 { 1106 seq_puts(m, "Statistics are not activated in this kernel\n"); 1107 return 0; 1108 } 1109 1110 static void dasd_profile_init(struct dasd_profile *profile, 1111 struct dentry *base_dentry) 1112 { 1113 return; 1114 } 1115 1116 static void dasd_profile_exit(struct dasd_profile *profile) 1117 { 1118 return; 1119 } 1120 1121 int dasd_profile_on(struct dasd_profile *profile) 1122 { 1123 return 0; 1124 } 1125 1126 #endif /* CONFIG_DASD_PROFILE */ 1127 1128 static int dasd_hosts_show(struct seq_file *m, void *v) 1129 { 1130 struct dasd_device *device; 1131 int rc = -EOPNOTSUPP; 1132 1133 device = m->private; 1134 dasd_get_device(device); 1135 1136 if (device->discipline->hosts_print) 1137 rc = device->discipline->hosts_print(device, m); 1138 1139 dasd_put_device(device); 1140 return rc; 1141 } 1142 1143 DEFINE_SHOW_ATTRIBUTE(dasd_hosts); 1144 1145 static void dasd_hosts_exit(struct dasd_device *device) 1146 { 1147 debugfs_remove(device->hosts_dentry); 1148 device->hosts_dentry = NULL; 1149 } 1150 1151 static void dasd_hosts_init(struct dentry *base_dentry, 1152 struct dasd_device *device) 1153 { 1154 struct dentry *pde; 1155 umode_t mode; 1156 1157 if (!base_dentry) 1158 return; 1159 1160 mode = S_IRUSR | S_IFREG; 1161 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1162 device, &dasd_hosts_fops); 1163 if (pde && !IS_ERR(pde)) 1164 device->hosts_dentry = pde; 1165 } 1166 1167 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, 1168 struct dasd_device *device, 1169 struct dasd_ccw_req *cqr) 1170 { 1171 unsigned long flags; 1172 char *data, *chunk; 1173 int size = 0; 1174 1175 if (cplength > 0) 1176 size += cplength * sizeof(struct ccw1); 1177 if (datasize > 0) 1178 size += datasize; 1179 if (!cqr) 1180 size += (sizeof(*cqr) + 7L) & -8L; 1181 1182 spin_lock_irqsave(&device->mem_lock, flags); 1183 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); 1184 spin_unlock_irqrestore(&device->mem_lock, flags); 1185 if (!chunk) 1186 return ERR_PTR(-ENOMEM); 1187 if (!cqr) { 1188 cqr = (void *) data; 1189 data += (sizeof(*cqr) + 7L) & -8L; 1190 } 1191 memset(cqr, 0, sizeof(*cqr)); 1192 cqr->mem_chunk = chunk; 1193 if (cplength > 0) { 1194 cqr->cpaddr = data; 1195 data += cplength * sizeof(struct ccw1); 1196 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1197 } 1198 if (datasize > 0) { 1199 cqr->data = data; 1200 memset(cqr->data, 0, datasize); 1201 } 1202 cqr->magic = magic; 1203 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1204 dasd_get_device(device); 1205 return cqr; 1206 } 1207 EXPORT_SYMBOL(dasd_smalloc_request); 1208 1209 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1210 int datasize, 1211 struct dasd_device *device) 1212 { 1213 struct dasd_ccw_req *cqr; 1214 unsigned long flags; 1215 int size, cqr_size; 1216 char *data; 1217 1218 cqr_size = (sizeof(*cqr) + 7L) & -8L; 1219 size = cqr_size; 1220 if (cplength > 0) 1221 size += cplength * sizeof(struct ccw1); 1222 if (datasize > 0) 1223 size += datasize; 1224 1225 spin_lock_irqsave(&device->mem_lock, flags); 1226 cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1227 spin_unlock_irqrestore(&device->mem_lock, flags); 1228 if (!cqr) 1229 return ERR_PTR(-ENOMEM); 1230 memset(cqr, 0, sizeof(*cqr)); 1231 data = (char *)cqr + cqr_size; 1232 cqr->cpaddr = NULL; 1233 if (cplength > 0) { 1234 cqr->cpaddr = data; 1235 data += cplength * sizeof(struct ccw1); 1236 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1237 } 1238 cqr->data = NULL; 1239 if (datasize > 0) { 1240 cqr->data = data; 1241 memset(cqr->data, 0, datasize); 1242 } 1243 1244 cqr->magic = magic; 1245 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1246 dasd_get_device(device); 1247 1248 return cqr; 1249 } 1250 EXPORT_SYMBOL(dasd_fmalloc_request); 1251 1252 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1253 { 1254 unsigned long flags; 1255 1256 spin_lock_irqsave(&device->mem_lock, flags); 1257 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); 1258 spin_unlock_irqrestore(&device->mem_lock, flags); 1259 dasd_put_device(device); 1260 } 1261 EXPORT_SYMBOL(dasd_sfree_request); 1262 1263 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1264 { 1265 unsigned long flags; 1266 1267 spin_lock_irqsave(&device->mem_lock, flags); 1268 dasd_free_chunk(&device->ese_chunks, cqr); 1269 spin_unlock_irqrestore(&device->mem_lock, flags); 1270 dasd_put_device(device); 1271 } 1272 EXPORT_SYMBOL(dasd_ffree_request); 1273 1274 /* 1275 * Check discipline magic in cqr. 1276 */ 1277 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1278 { 1279 struct dasd_device *device; 1280 1281 if (cqr == NULL) 1282 return -EINVAL; 1283 device = cqr->startdev; 1284 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1285 DBF_DEV_EVENT(DBF_WARNING, device, 1286 " dasd_ccw_req 0x%08x magic doesn't match" 1287 " discipline 0x%08x", 1288 cqr->magic, 1289 *(unsigned int *) device->discipline->name); 1290 return -EINVAL; 1291 } 1292 return 0; 1293 } 1294 1295 /* 1296 * Terminate the current i/o and set the request to clear_pending. 1297 * Timer keeps device runnig. 1298 * ccw_device_clear can fail if the i/o subsystem 1299 * is in a bad mood. 1300 */ 1301 int dasd_term_IO(struct dasd_ccw_req *cqr) 1302 { 1303 struct dasd_device *device; 1304 int retries, rc; 1305 char errorstring[ERRORLENGTH]; 1306 1307 /* Check the cqr */ 1308 rc = dasd_check_cqr(cqr); 1309 if (rc) 1310 return rc; 1311 retries = 0; 1312 device = (struct dasd_device *) cqr->startdev; 1313 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1314 rc = ccw_device_clear(device->cdev, (long) cqr); 1315 switch (rc) { 1316 case 0: /* termination successful */ 1317 cqr->status = DASD_CQR_CLEAR_PENDING; 1318 cqr->stopclk = get_tod_clock(); 1319 cqr->starttime = 0; 1320 DBF_DEV_EVENT(DBF_DEBUG, device, 1321 "terminate cqr %p successful", 1322 cqr); 1323 break; 1324 case -ENODEV: 1325 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1326 "device gone, retry"); 1327 break; 1328 case -EINVAL: 1329 /* 1330 * device not valid so no I/O could be running 1331 * handle CQR as termination successful 1332 */ 1333 cqr->status = DASD_CQR_CLEARED; 1334 cqr->stopclk = get_tod_clock(); 1335 cqr->starttime = 0; 1336 /* no retries for invalid devices */ 1337 cqr->retries = -1; 1338 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1339 "EINVAL, handle as terminated"); 1340 /* fake rc to success */ 1341 rc = 0; 1342 break; 1343 default: 1344 /* internal error 10 - unknown rc*/ 1345 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1346 dev_err(&device->cdev->dev, "An error occurred in the " 1347 "DASD device driver, reason=%s\n", errorstring); 1348 BUG(); 1349 break; 1350 } 1351 retries++; 1352 } 1353 dasd_schedule_device_bh(device); 1354 return rc; 1355 } 1356 EXPORT_SYMBOL(dasd_term_IO); 1357 1358 /* 1359 * Start the i/o. This start_IO can fail if the channel is really busy. 1360 * In that case set up a timer to start the request later. 1361 */ 1362 int dasd_start_IO(struct dasd_ccw_req *cqr) 1363 { 1364 struct dasd_device *device; 1365 int rc; 1366 char errorstring[ERRORLENGTH]; 1367 1368 /* Check the cqr */ 1369 rc = dasd_check_cqr(cqr); 1370 if (rc) { 1371 cqr->intrc = rc; 1372 return rc; 1373 } 1374 device = (struct dasd_device *) cqr->startdev; 1375 if (((cqr->block && 1376 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1377 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1378 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1379 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1380 "because of stolen lock", cqr); 1381 cqr->status = DASD_CQR_ERROR; 1382 cqr->intrc = -EPERM; 1383 return -EPERM; 1384 } 1385 if (cqr->retries < 0) { 1386 /* internal error 14 - start_IO run out of retries */ 1387 sprintf(errorstring, "14 %p", cqr); 1388 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1389 "device driver, reason=%s\n", errorstring); 1390 cqr->status = DASD_CQR_ERROR; 1391 return -EIO; 1392 } 1393 cqr->startclk = get_tod_clock(); 1394 cqr->starttime = jiffies; 1395 cqr->retries--; 1396 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1397 cqr->lpm &= dasd_path_get_opm(device); 1398 if (!cqr->lpm) 1399 cqr->lpm = dasd_path_get_opm(device); 1400 } 1401 /* 1402 * remember the amount of formatted tracks to prevent double format on 1403 * ESE devices 1404 */ 1405 if (cqr->block) 1406 cqr->trkcount = atomic_read(&cqr->block->trkcount); 1407 1408 if (cqr->cpmode == 1) { 1409 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1410 (long) cqr, cqr->lpm); 1411 } else { 1412 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1413 (long) cqr, cqr->lpm, 0); 1414 } 1415 switch (rc) { 1416 case 0: 1417 cqr->status = DASD_CQR_IN_IO; 1418 break; 1419 case -EBUSY: 1420 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1421 "start_IO: device busy, retry later"); 1422 break; 1423 case -EACCES: 1424 /* -EACCES indicates that the request used only a subset of the 1425 * available paths and all these paths are gone. If the lpm of 1426 * this request was only a subset of the opm (e.g. the ppm) then 1427 * we just do a retry with all available paths. 1428 * If we already use the full opm, something is amiss, and we 1429 * need a full path verification. 1430 */ 1431 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1432 DBF_DEV_EVENT(DBF_WARNING, device, 1433 "start_IO: selected paths gone (%x)", 1434 cqr->lpm); 1435 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1436 cqr->lpm = dasd_path_get_opm(device); 1437 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1438 "start_IO: selected paths gone," 1439 " retry on all paths"); 1440 } else { 1441 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1442 "start_IO: all paths in opm gone," 1443 " do path verification"); 1444 dasd_generic_last_path_gone(device); 1445 dasd_path_no_path(device); 1446 dasd_path_set_tbvpm(device, 1447 ccw_device_get_path_mask( 1448 device->cdev)); 1449 } 1450 break; 1451 case -ENODEV: 1452 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1453 "start_IO: -ENODEV device gone, retry"); 1454 /* this is equivalent to CC=3 for SSCH report this to EER */ 1455 dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO); 1456 break; 1457 case -EIO: 1458 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1459 "start_IO: -EIO device gone, retry"); 1460 break; 1461 case -EINVAL: 1462 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1463 "start_IO: -EINVAL device currently " 1464 "not accessible"); 1465 break; 1466 default: 1467 /* internal error 11 - unknown rc */ 1468 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1469 dev_err(&device->cdev->dev, 1470 "An error occurred in the DASD device driver, " 1471 "reason=%s\n", errorstring); 1472 BUG(); 1473 break; 1474 } 1475 cqr->intrc = rc; 1476 return rc; 1477 } 1478 EXPORT_SYMBOL(dasd_start_IO); 1479 1480 /* 1481 * Timeout function for dasd devices. This is used for different purposes 1482 * 1) missing interrupt handler for normal operation 1483 * 2) delayed start of request where start_IO failed with -EBUSY 1484 * 3) timeout for missing state change interrupts 1485 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1486 * DASD_CQR_QUEUED for 2) and 3). 1487 */ 1488 static void dasd_device_timeout(struct timer_list *t) 1489 { 1490 unsigned long flags; 1491 struct dasd_device *device; 1492 1493 device = from_timer(device, t, timer); 1494 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1495 /* re-activate request queue */ 1496 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1497 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1498 dasd_schedule_device_bh(device); 1499 } 1500 1501 /* 1502 * Setup timeout for a device in jiffies. 1503 */ 1504 void dasd_device_set_timer(struct dasd_device *device, int expires) 1505 { 1506 if (expires == 0) 1507 del_timer(&device->timer); 1508 else 1509 mod_timer(&device->timer, jiffies + expires); 1510 } 1511 EXPORT_SYMBOL(dasd_device_set_timer); 1512 1513 /* 1514 * Clear timeout for a device. 1515 */ 1516 void dasd_device_clear_timer(struct dasd_device *device) 1517 { 1518 del_timer(&device->timer); 1519 } 1520 EXPORT_SYMBOL(dasd_device_clear_timer); 1521 1522 static void dasd_handle_killed_request(struct ccw_device *cdev, 1523 unsigned long intparm) 1524 { 1525 struct dasd_ccw_req *cqr; 1526 struct dasd_device *device; 1527 1528 if (!intparm) 1529 return; 1530 cqr = (struct dasd_ccw_req *) intparm; 1531 if (cqr->status != DASD_CQR_IN_IO) { 1532 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1533 "invalid status in handle_killed_request: " 1534 "%02x", cqr->status); 1535 return; 1536 } 1537 1538 device = dasd_device_from_cdev_locked(cdev); 1539 if (IS_ERR(device)) { 1540 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1541 "unable to get device from cdev"); 1542 return; 1543 } 1544 1545 if (!cqr->startdev || 1546 device != cqr->startdev || 1547 strncmp(cqr->startdev->discipline->ebcname, 1548 (char *) &cqr->magic, 4)) { 1549 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1550 "invalid device in request"); 1551 dasd_put_device(device); 1552 return; 1553 } 1554 1555 /* Schedule request to be retried. */ 1556 cqr->status = DASD_CQR_QUEUED; 1557 1558 dasd_device_clear_timer(device); 1559 dasd_schedule_device_bh(device); 1560 dasd_put_device(device); 1561 } 1562 1563 void dasd_generic_handle_state_change(struct dasd_device *device) 1564 { 1565 /* First of all start sense subsystem status request. */ 1566 dasd_eer_snss(device); 1567 1568 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1569 dasd_schedule_device_bh(device); 1570 if (device->block) { 1571 dasd_schedule_block_bh(device->block); 1572 if (device->block->gdp) 1573 blk_mq_run_hw_queues(device->block->gdp->queue, true); 1574 } 1575 } 1576 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1577 1578 static int dasd_check_hpf_error(struct irb *irb) 1579 { 1580 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1581 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1582 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1583 } 1584 1585 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1586 { 1587 struct dasd_device *device = NULL; 1588 u8 *sense = NULL; 1589 1590 if (!block) 1591 return 0; 1592 device = block->base; 1593 if (!device || !device->discipline->is_ese) 1594 return 0; 1595 if (!device->discipline->is_ese(device)) 1596 return 0; 1597 1598 sense = dasd_get_sense(irb); 1599 if (!sense) 1600 return 0; 1601 1602 if (sense[1] & SNS1_NO_REC_FOUND) 1603 return 1; 1604 1605 if ((sense[1] & SNS1_INV_TRACK_FORMAT) && 1606 scsw_is_tm(&irb->scsw) && 1607 !(sense[2] & SNS2_ENV_DATA_PRESENT)) 1608 return 1; 1609 1610 return 0; 1611 } 1612 1613 static int dasd_ese_oos_cond(u8 *sense) 1614 { 1615 return sense[0] & SNS0_EQUIPMENT_CHECK && 1616 sense[1] & SNS1_PERM_ERR && 1617 sense[1] & SNS1_WRITE_INHIBITED && 1618 sense[25] == 0x01; 1619 } 1620 1621 /* 1622 * Interrupt handler for "normal" ssch-io based dasd devices. 1623 */ 1624 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1625 struct irb *irb) 1626 { 1627 struct dasd_ccw_req *cqr, *next, *fcqr; 1628 struct dasd_device *device; 1629 unsigned long now; 1630 int nrf_suppressed = 0; 1631 int it_suppressed = 0; 1632 struct request *req; 1633 u8 *sense = NULL; 1634 int expires; 1635 1636 cqr = (struct dasd_ccw_req *) intparm; 1637 if (IS_ERR(irb)) { 1638 switch (PTR_ERR(irb)) { 1639 case -EIO: 1640 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1641 device = cqr->startdev; 1642 cqr->status = DASD_CQR_CLEARED; 1643 dasd_device_clear_timer(device); 1644 wake_up(&dasd_flush_wq); 1645 dasd_schedule_device_bh(device); 1646 return; 1647 } 1648 break; 1649 case -ETIMEDOUT: 1650 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1651 "request timed out\n", __func__); 1652 break; 1653 default: 1654 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1655 "unknown error %ld\n", __func__, 1656 PTR_ERR(irb)); 1657 } 1658 dasd_handle_killed_request(cdev, intparm); 1659 return; 1660 } 1661 1662 now = get_tod_clock(); 1663 /* check for conditions that should be handled immediately */ 1664 if (!cqr || 1665 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1666 scsw_cstat(&irb->scsw) == 0)) { 1667 if (cqr) 1668 memcpy(&cqr->irb, irb, sizeof(*irb)); 1669 device = dasd_device_from_cdev_locked(cdev); 1670 if (IS_ERR(device)) 1671 return; 1672 /* ignore unsolicited interrupts for DIAG discipline */ 1673 if (device->discipline == dasd_diag_discipline_pointer) { 1674 dasd_put_device(device); 1675 return; 1676 } 1677 1678 /* 1679 * In some cases 'File Protected' or 'No Record Found' errors 1680 * might be expected and debug log messages for the 1681 * corresponding interrupts shouldn't be written then. 1682 * Check if either of the according suppress bits is set. 1683 */ 1684 sense = dasd_get_sense(irb); 1685 if (sense) { 1686 it_suppressed = (sense[1] & SNS1_INV_TRACK_FORMAT) && 1687 !(sense[2] & SNS2_ENV_DATA_PRESENT) && 1688 test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags); 1689 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1690 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1691 1692 /* 1693 * Extent pool probably out-of-space. 1694 * Stop device and check exhaust level. 1695 */ 1696 if (dasd_ese_oos_cond(sense)) { 1697 dasd_generic_space_exhaust(device, cqr); 1698 device->discipline->ext_pool_exhaust(device, cqr); 1699 dasd_put_device(device); 1700 return; 1701 } 1702 } 1703 if (!(it_suppressed || nrf_suppressed)) 1704 device->discipline->dump_sense_dbf(device, irb, "int"); 1705 1706 if (device->features & DASD_FEATURE_ERPLOG) 1707 device->discipline->dump_sense(device, cqr, irb); 1708 device->discipline->check_for_device_change(device, cqr, irb); 1709 dasd_put_device(device); 1710 } 1711 1712 /* check for attention message */ 1713 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1714 device = dasd_device_from_cdev_locked(cdev); 1715 if (!IS_ERR(device)) { 1716 device->discipline->check_attention(device, 1717 irb->esw.esw1.lpum); 1718 dasd_put_device(device); 1719 } 1720 } 1721 1722 if (!cqr) 1723 return; 1724 1725 device = (struct dasd_device *) cqr->startdev; 1726 if (!device || 1727 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1728 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1729 "invalid device in request"); 1730 return; 1731 } 1732 1733 if (dasd_ese_needs_format(cqr->block, irb)) { 1734 req = dasd_get_callback_data(cqr); 1735 if (!req) { 1736 cqr->status = DASD_CQR_ERROR; 1737 return; 1738 } 1739 if (rq_data_dir(req) == READ) { 1740 device->discipline->ese_read(cqr, irb); 1741 cqr->status = DASD_CQR_SUCCESS; 1742 cqr->stopclk = now; 1743 dasd_device_clear_timer(device); 1744 dasd_schedule_device_bh(device); 1745 return; 1746 } 1747 fcqr = device->discipline->ese_format(device, cqr, irb); 1748 if (IS_ERR(fcqr)) { 1749 if (PTR_ERR(fcqr) == -EINVAL) { 1750 cqr->status = DASD_CQR_ERROR; 1751 return; 1752 } 1753 /* 1754 * If we can't format now, let the request go 1755 * one extra round. Maybe we can format later. 1756 */ 1757 cqr->status = DASD_CQR_QUEUED; 1758 dasd_schedule_device_bh(device); 1759 return; 1760 } else { 1761 fcqr->status = DASD_CQR_QUEUED; 1762 cqr->status = DASD_CQR_QUEUED; 1763 list_add(&fcqr->devlist, &device->ccw_queue); 1764 dasd_schedule_device_bh(device); 1765 return; 1766 } 1767 } 1768 1769 /* Check for clear pending */ 1770 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1771 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1772 cqr->status = DASD_CQR_CLEARED; 1773 dasd_device_clear_timer(device); 1774 wake_up(&dasd_flush_wq); 1775 dasd_schedule_device_bh(device); 1776 return; 1777 } 1778 1779 /* check status - the request might have been killed by dyn detach */ 1780 if (cqr->status != DASD_CQR_IN_IO) { 1781 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1782 "status %02x", dev_name(&cdev->dev), cqr->status); 1783 return; 1784 } 1785 1786 next = NULL; 1787 expires = 0; 1788 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1789 scsw_cstat(&irb->scsw) == 0) { 1790 /* request was completed successfully */ 1791 cqr->status = DASD_CQR_SUCCESS; 1792 cqr->stopclk = now; 1793 /* Start first request on queue if possible -> fast_io. */ 1794 if (cqr->devlist.next != &device->ccw_queue) { 1795 next = list_entry(cqr->devlist.next, 1796 struct dasd_ccw_req, devlist); 1797 } 1798 } else { /* error */ 1799 /* check for HPF error 1800 * call discipline function to requeue all requests 1801 * and disable HPF accordingly 1802 */ 1803 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1804 device->discipline->handle_hpf_error) 1805 device->discipline->handle_hpf_error(device, irb); 1806 /* 1807 * If we don't want complex ERP for this request, then just 1808 * reset this and retry it in the fastpath 1809 */ 1810 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1811 cqr->retries > 0) { 1812 if (cqr->lpm == dasd_path_get_opm(device)) 1813 DBF_DEV_EVENT(DBF_DEBUG, device, 1814 "default ERP in fastpath " 1815 "(%i retries left)", 1816 cqr->retries); 1817 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1818 cqr->lpm = dasd_path_get_opm(device); 1819 cqr->status = DASD_CQR_QUEUED; 1820 next = cqr; 1821 } else 1822 cqr->status = DASD_CQR_ERROR; 1823 } 1824 if (next && (next->status == DASD_CQR_QUEUED) && 1825 (!device->stopped)) { 1826 if (device->discipline->start_IO(next) == 0) 1827 expires = next->expires; 1828 } 1829 if (expires != 0) 1830 dasd_device_set_timer(device, expires); 1831 else 1832 dasd_device_clear_timer(device); 1833 dasd_schedule_device_bh(device); 1834 } 1835 EXPORT_SYMBOL(dasd_int_handler); 1836 1837 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1838 { 1839 struct dasd_device *device; 1840 1841 device = dasd_device_from_cdev_locked(cdev); 1842 1843 if (IS_ERR(device)) 1844 goto out; 1845 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1846 device->state != device->target || 1847 !device->discipline->check_for_device_change){ 1848 dasd_put_device(device); 1849 goto out; 1850 } 1851 if (device->discipline->dump_sense_dbf) 1852 device->discipline->dump_sense_dbf(device, irb, "uc"); 1853 device->discipline->check_for_device_change(device, NULL, irb); 1854 dasd_put_device(device); 1855 out: 1856 return UC_TODO_RETRY; 1857 } 1858 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1859 1860 /* 1861 * If we have an error on a dasd_block layer request then we cancel 1862 * and return all further requests from the same dasd_block as well. 1863 */ 1864 static void __dasd_device_recovery(struct dasd_device *device, 1865 struct dasd_ccw_req *ref_cqr) 1866 { 1867 struct list_head *l, *n; 1868 struct dasd_ccw_req *cqr; 1869 1870 /* 1871 * only requeue request that came from the dasd_block layer 1872 */ 1873 if (!ref_cqr->block) 1874 return; 1875 1876 list_for_each_safe(l, n, &device->ccw_queue) { 1877 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1878 if (cqr->status == DASD_CQR_QUEUED && 1879 ref_cqr->block == cqr->block) { 1880 cqr->status = DASD_CQR_CLEARED; 1881 } 1882 } 1883 }; 1884 1885 /* 1886 * Remove those ccw requests from the queue that need to be returned 1887 * to the upper layer. 1888 */ 1889 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1890 struct list_head *final_queue) 1891 { 1892 struct list_head *l, *n; 1893 struct dasd_ccw_req *cqr; 1894 1895 /* Process request with final status. */ 1896 list_for_each_safe(l, n, &device->ccw_queue) { 1897 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1898 1899 /* Skip any non-final request. */ 1900 if (cqr->status == DASD_CQR_QUEUED || 1901 cqr->status == DASD_CQR_IN_IO || 1902 cqr->status == DASD_CQR_CLEAR_PENDING) 1903 continue; 1904 if (cqr->status == DASD_CQR_ERROR) { 1905 __dasd_device_recovery(device, cqr); 1906 } 1907 /* Rechain finished requests to final queue */ 1908 list_move_tail(&cqr->devlist, final_queue); 1909 } 1910 } 1911 1912 static void __dasd_process_cqr(struct dasd_device *device, 1913 struct dasd_ccw_req *cqr) 1914 { 1915 char errorstring[ERRORLENGTH]; 1916 1917 switch (cqr->status) { 1918 case DASD_CQR_SUCCESS: 1919 cqr->status = DASD_CQR_DONE; 1920 break; 1921 case DASD_CQR_ERROR: 1922 cqr->status = DASD_CQR_NEED_ERP; 1923 break; 1924 case DASD_CQR_CLEARED: 1925 cqr->status = DASD_CQR_TERMINATED; 1926 break; 1927 default: 1928 /* internal error 12 - wrong cqr status*/ 1929 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1930 dev_err(&device->cdev->dev, 1931 "An error occurred in the DASD device driver, " 1932 "reason=%s\n", errorstring); 1933 BUG(); 1934 } 1935 if (cqr->callback) 1936 cqr->callback(cqr, cqr->callback_data); 1937 } 1938 1939 /* 1940 * the cqrs from the final queue are returned to the upper layer 1941 * by setting a dasd_block state and calling the callback function 1942 */ 1943 static void __dasd_device_process_final_queue(struct dasd_device *device, 1944 struct list_head *final_queue) 1945 { 1946 struct list_head *l, *n; 1947 struct dasd_ccw_req *cqr; 1948 struct dasd_block *block; 1949 1950 list_for_each_safe(l, n, final_queue) { 1951 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1952 list_del_init(&cqr->devlist); 1953 block = cqr->block; 1954 if (!block) { 1955 __dasd_process_cqr(device, cqr); 1956 } else { 1957 spin_lock_bh(&block->queue_lock); 1958 __dasd_process_cqr(device, cqr); 1959 spin_unlock_bh(&block->queue_lock); 1960 } 1961 } 1962 } 1963 1964 /* 1965 * check if device should be autoquiesced due to too many timeouts 1966 */ 1967 static void __dasd_device_check_autoquiesce_timeout(struct dasd_device *device, 1968 struct dasd_ccw_req *cqr) 1969 { 1970 if ((device->default_retries - cqr->retries) >= device->aq_timeouts) 1971 dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS); 1972 } 1973 1974 /* 1975 * Take a look at the first request on the ccw queue and check 1976 * if it reached its expire time. If so, terminate the IO. 1977 */ 1978 static void __dasd_device_check_expire(struct dasd_device *device) 1979 { 1980 struct dasd_ccw_req *cqr; 1981 1982 if (list_empty(&device->ccw_queue)) 1983 return; 1984 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1985 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1986 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1987 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1988 /* 1989 * IO in safe offline processing should not 1990 * run out of retries 1991 */ 1992 cqr->retries++; 1993 } 1994 if (device->discipline->term_IO(cqr) != 0) { 1995 /* Hmpf, try again in 5 sec */ 1996 dev_err(&device->cdev->dev, 1997 "cqr %p timed out (%lus) but cannot be " 1998 "ended, retrying in 5 s\n", 1999 cqr, (cqr->expires/HZ)); 2000 cqr->expires += 5*HZ; 2001 dasd_device_set_timer(device, 5*HZ); 2002 } else { 2003 dev_err(&device->cdev->dev, 2004 "cqr %p timed out (%lus), %i retries " 2005 "remaining\n", cqr, (cqr->expires/HZ), 2006 cqr->retries); 2007 } 2008 __dasd_device_check_autoquiesce_timeout(device, cqr); 2009 } 2010 } 2011 2012 /* 2013 * return 1 when device is not eligible for IO 2014 */ 2015 static int __dasd_device_is_unusable(struct dasd_device *device, 2016 struct dasd_ccw_req *cqr) 2017 { 2018 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC); 2019 2020 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2021 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2022 /* 2023 * dasd is being set offline 2024 * but it is no safe offline where we have to allow I/O 2025 */ 2026 return 1; 2027 } 2028 if (device->stopped) { 2029 if (device->stopped & mask) { 2030 /* stopped and CQR will not change that. */ 2031 return 1; 2032 } 2033 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2034 /* CQR is not able to change device to 2035 * operational. */ 2036 return 1; 2037 } 2038 /* CQR required to get device operational. */ 2039 } 2040 return 0; 2041 } 2042 2043 /* 2044 * Take a look at the first request on the ccw queue and check 2045 * if it needs to be started. 2046 */ 2047 static void __dasd_device_start_head(struct dasd_device *device) 2048 { 2049 struct dasd_ccw_req *cqr; 2050 int rc; 2051 2052 if (list_empty(&device->ccw_queue)) 2053 return; 2054 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2055 if (cqr->status != DASD_CQR_QUEUED) 2056 return; 2057 /* if device is not usable return request to upper layer */ 2058 if (__dasd_device_is_unusable(device, cqr)) { 2059 cqr->intrc = -EAGAIN; 2060 cqr->status = DASD_CQR_CLEARED; 2061 dasd_schedule_device_bh(device); 2062 return; 2063 } 2064 2065 rc = device->discipline->start_IO(cqr); 2066 if (rc == 0) 2067 dasd_device_set_timer(device, cqr->expires); 2068 else if (rc == -EACCES) { 2069 dasd_schedule_device_bh(device); 2070 } else 2071 /* Hmpf, try again in 1/2 sec */ 2072 dasd_device_set_timer(device, 50); 2073 } 2074 2075 static void __dasd_device_check_path_events(struct dasd_device *device) 2076 { 2077 __u8 tbvpm, fcsecpm; 2078 int rc; 2079 2080 tbvpm = dasd_path_get_tbvpm(device); 2081 fcsecpm = dasd_path_get_fcsecpm(device); 2082 2083 if (!tbvpm && !fcsecpm) 2084 return; 2085 2086 if (device->stopped & ~(DASD_STOPPED_DC_WAIT)) 2087 return; 2088 2089 dasd_path_clear_all_verify(device); 2090 dasd_path_clear_all_fcsec(device); 2091 2092 rc = device->discipline->pe_handler(device, tbvpm, fcsecpm); 2093 if (rc) { 2094 dasd_path_add_tbvpm(device, tbvpm); 2095 dasd_path_add_fcsecpm(device, fcsecpm); 2096 dasd_device_set_timer(device, 50); 2097 } 2098 }; 2099 2100 /* 2101 * Go through all request on the dasd_device request queue, 2102 * terminate them on the cdev if necessary, and return them to the 2103 * submitting layer via callback. 2104 * Note: 2105 * Make sure that all 'submitting layers' still exist when 2106 * this function is called!. In other words, when 'device' is a base 2107 * device then all block layer requests must have been removed before 2108 * via dasd_flush_block_queue. 2109 */ 2110 int dasd_flush_device_queue(struct dasd_device *device) 2111 { 2112 struct dasd_ccw_req *cqr, *n; 2113 int rc; 2114 struct list_head flush_queue; 2115 2116 INIT_LIST_HEAD(&flush_queue); 2117 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2118 rc = 0; 2119 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2120 /* Check status and move request to flush_queue */ 2121 switch (cqr->status) { 2122 case DASD_CQR_IN_IO: 2123 rc = device->discipline->term_IO(cqr); 2124 if (rc) { 2125 /* unable to terminate requeust */ 2126 dev_err(&device->cdev->dev, 2127 "Flushing the DASD request queue " 2128 "failed for request %p\n", cqr); 2129 /* stop flush processing */ 2130 goto finished; 2131 } 2132 break; 2133 case DASD_CQR_QUEUED: 2134 cqr->stopclk = get_tod_clock(); 2135 cqr->status = DASD_CQR_CLEARED; 2136 break; 2137 default: /* no need to modify the others */ 2138 break; 2139 } 2140 list_move_tail(&cqr->devlist, &flush_queue); 2141 } 2142 finished: 2143 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2144 /* 2145 * After this point all requests must be in state CLEAR_PENDING, 2146 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2147 * one of the others. 2148 */ 2149 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2150 wait_event(dasd_flush_wq, 2151 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2152 /* 2153 * Now set each request back to TERMINATED, DONE or NEED_ERP 2154 * and call the callback function of flushed requests 2155 */ 2156 __dasd_device_process_final_queue(device, &flush_queue); 2157 return rc; 2158 } 2159 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2160 2161 /* 2162 * Acquire the device lock and process queues for the device. 2163 */ 2164 static void dasd_device_tasklet(unsigned long data) 2165 { 2166 struct dasd_device *device = (struct dasd_device *) data; 2167 struct list_head final_queue; 2168 2169 atomic_set (&device->tasklet_scheduled, 0); 2170 INIT_LIST_HEAD(&final_queue); 2171 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2172 /* Check expire time of first request on the ccw queue. */ 2173 __dasd_device_check_expire(device); 2174 /* find final requests on ccw queue */ 2175 __dasd_device_process_ccw_queue(device, &final_queue); 2176 __dasd_device_check_path_events(device); 2177 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2178 /* Now call the callback function of requests with final status */ 2179 __dasd_device_process_final_queue(device, &final_queue); 2180 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2181 /* Now check if the head of the ccw queue needs to be started. */ 2182 __dasd_device_start_head(device); 2183 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2184 if (waitqueue_active(&shutdown_waitq)) 2185 wake_up(&shutdown_waitq); 2186 dasd_put_device(device); 2187 } 2188 2189 /* 2190 * Schedules a call to dasd_tasklet over the device tasklet. 2191 */ 2192 void dasd_schedule_device_bh(struct dasd_device *device) 2193 { 2194 /* Protect against rescheduling. */ 2195 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2196 return; 2197 dasd_get_device(device); 2198 tasklet_hi_schedule(&device->tasklet); 2199 } 2200 EXPORT_SYMBOL(dasd_schedule_device_bh); 2201 2202 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2203 { 2204 device->stopped |= bits; 2205 } 2206 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2207 2208 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2209 { 2210 device->stopped &= ~bits; 2211 if (!device->stopped) 2212 wake_up(&generic_waitq); 2213 } 2214 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2215 2216 /* 2217 * Queue a request to the head of the device ccw_queue. 2218 * Start the I/O if possible. 2219 */ 2220 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2221 { 2222 struct dasd_device *device; 2223 unsigned long flags; 2224 2225 device = cqr->startdev; 2226 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2227 cqr->status = DASD_CQR_QUEUED; 2228 list_add(&cqr->devlist, &device->ccw_queue); 2229 /* let the bh start the request to keep them in order */ 2230 dasd_schedule_device_bh(device); 2231 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2232 } 2233 EXPORT_SYMBOL(dasd_add_request_head); 2234 2235 /* 2236 * Queue a request to the tail of the device ccw_queue. 2237 * Start the I/O if possible. 2238 */ 2239 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2240 { 2241 struct dasd_device *device; 2242 unsigned long flags; 2243 2244 device = cqr->startdev; 2245 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2246 cqr->status = DASD_CQR_QUEUED; 2247 list_add_tail(&cqr->devlist, &device->ccw_queue); 2248 /* let the bh start the request to keep them in order */ 2249 dasd_schedule_device_bh(device); 2250 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2251 } 2252 EXPORT_SYMBOL(dasd_add_request_tail); 2253 2254 /* 2255 * Wakeup helper for the 'sleep_on' functions. 2256 */ 2257 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2258 { 2259 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2260 cqr->callback_data = DASD_SLEEPON_END_TAG; 2261 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2262 wake_up(&generic_waitq); 2263 } 2264 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2265 2266 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2267 { 2268 struct dasd_device *device; 2269 int rc; 2270 2271 device = cqr->startdev; 2272 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2273 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2274 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2275 return rc; 2276 } 2277 2278 /* 2279 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2280 */ 2281 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2282 { 2283 struct dasd_device *device; 2284 dasd_erp_fn_t erp_fn; 2285 2286 if (cqr->status == DASD_CQR_FILLED) 2287 return 0; 2288 device = cqr->startdev; 2289 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2290 if (cqr->status == DASD_CQR_TERMINATED) { 2291 device->discipline->handle_terminated_request(cqr); 2292 return 1; 2293 } 2294 if (cqr->status == DASD_CQR_NEED_ERP) { 2295 erp_fn = device->discipline->erp_action(cqr); 2296 erp_fn(cqr); 2297 return 1; 2298 } 2299 if (cqr->status == DASD_CQR_FAILED) 2300 dasd_log_sense(cqr, &cqr->irb); 2301 if (cqr->refers) { 2302 __dasd_process_erp(device, cqr); 2303 return 1; 2304 } 2305 } 2306 return 0; 2307 } 2308 2309 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2310 { 2311 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2312 if (cqr->refers) /* erp is not done yet */ 2313 return 1; 2314 return ((cqr->status != DASD_CQR_DONE) && 2315 (cqr->status != DASD_CQR_FAILED)); 2316 } else 2317 return (cqr->status == DASD_CQR_FILLED); 2318 } 2319 2320 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2321 { 2322 struct dasd_device *device; 2323 int rc; 2324 struct list_head ccw_queue; 2325 struct dasd_ccw_req *cqr; 2326 2327 INIT_LIST_HEAD(&ccw_queue); 2328 maincqr->status = DASD_CQR_FILLED; 2329 device = maincqr->startdev; 2330 list_add(&maincqr->blocklist, &ccw_queue); 2331 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2332 cqr = list_first_entry(&ccw_queue, 2333 struct dasd_ccw_req, blocklist)) { 2334 2335 if (__dasd_sleep_on_erp(cqr)) 2336 continue; 2337 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2338 continue; 2339 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2340 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2341 cqr->status = DASD_CQR_FAILED; 2342 cqr->intrc = -EPERM; 2343 continue; 2344 } 2345 /* Non-temporary stop condition will trigger fail fast */ 2346 if (device->stopped & ~DASD_STOPPED_PENDING && 2347 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2348 !dasd_eer_enabled(device) && device->aq_mask == 0) { 2349 cqr->status = DASD_CQR_FAILED; 2350 cqr->intrc = -ENOLINK; 2351 continue; 2352 } 2353 /* 2354 * Don't try to start requests if device is in 2355 * offline processing, it might wait forever 2356 */ 2357 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2358 cqr->status = DASD_CQR_FAILED; 2359 cqr->intrc = -ENODEV; 2360 continue; 2361 } 2362 /* 2363 * Don't try to start requests if device is stopped 2364 * except path verification requests 2365 */ 2366 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2367 if (interruptible) { 2368 rc = wait_event_interruptible( 2369 generic_waitq, !(device->stopped)); 2370 if (rc == -ERESTARTSYS) { 2371 cqr->status = DASD_CQR_FAILED; 2372 maincqr->intrc = rc; 2373 continue; 2374 } 2375 } else 2376 wait_event(generic_waitq, !(device->stopped)); 2377 } 2378 if (!cqr->callback) 2379 cqr->callback = dasd_wakeup_cb; 2380 2381 cqr->callback_data = DASD_SLEEPON_START_TAG; 2382 dasd_add_request_tail(cqr); 2383 if (interruptible) { 2384 rc = wait_event_interruptible( 2385 generic_waitq, _wait_for_wakeup(cqr)); 2386 if (rc == -ERESTARTSYS) { 2387 dasd_cancel_req(cqr); 2388 /* wait (non-interruptible) for final status */ 2389 wait_event(generic_waitq, 2390 _wait_for_wakeup(cqr)); 2391 cqr->status = DASD_CQR_FAILED; 2392 maincqr->intrc = rc; 2393 continue; 2394 } 2395 } else 2396 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2397 } 2398 2399 maincqr->endclk = get_tod_clock(); 2400 if ((maincqr->status != DASD_CQR_DONE) && 2401 (maincqr->intrc != -ERESTARTSYS)) 2402 dasd_log_sense(maincqr, &maincqr->irb); 2403 if (maincqr->status == DASD_CQR_DONE) 2404 rc = 0; 2405 else if (maincqr->intrc) 2406 rc = maincqr->intrc; 2407 else 2408 rc = -EIO; 2409 return rc; 2410 } 2411 2412 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2413 { 2414 struct dasd_ccw_req *cqr; 2415 2416 list_for_each_entry(cqr, ccw_queue, blocklist) { 2417 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2418 return 0; 2419 } 2420 2421 return 1; 2422 } 2423 2424 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2425 { 2426 struct dasd_device *device; 2427 struct dasd_ccw_req *cqr, *n; 2428 u8 *sense = NULL; 2429 int rc; 2430 2431 retry: 2432 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2433 device = cqr->startdev; 2434 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2435 continue; 2436 2437 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2438 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2439 cqr->status = DASD_CQR_FAILED; 2440 cqr->intrc = -EPERM; 2441 continue; 2442 } 2443 /*Non-temporary stop condition will trigger fail fast*/ 2444 if (device->stopped & ~DASD_STOPPED_PENDING && 2445 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2446 !dasd_eer_enabled(device)) { 2447 cqr->status = DASD_CQR_FAILED; 2448 cqr->intrc = -EAGAIN; 2449 continue; 2450 } 2451 2452 /*Don't try to start requests if device is stopped*/ 2453 if (interruptible) { 2454 rc = wait_event_interruptible( 2455 generic_waitq, !device->stopped); 2456 if (rc == -ERESTARTSYS) { 2457 cqr->status = DASD_CQR_FAILED; 2458 cqr->intrc = rc; 2459 continue; 2460 } 2461 } else 2462 wait_event(generic_waitq, !(device->stopped)); 2463 2464 if (!cqr->callback) 2465 cqr->callback = dasd_wakeup_cb; 2466 cqr->callback_data = DASD_SLEEPON_START_TAG; 2467 dasd_add_request_tail(cqr); 2468 } 2469 2470 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2471 2472 rc = 0; 2473 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2474 /* 2475 * In some cases certain errors might be expected and 2476 * error recovery would be unnecessary in these cases. 2477 * Check if the according suppress bit is set. 2478 */ 2479 sense = dasd_get_sense(&cqr->irb); 2480 if (sense && (sense[1] & SNS1_INV_TRACK_FORMAT) && 2481 !(sense[2] & SNS2_ENV_DATA_PRESENT) && 2482 test_bit(DASD_CQR_SUPPRESS_IT, &cqr->flags)) 2483 continue; 2484 if (sense && (sense[1] & SNS1_NO_REC_FOUND) && 2485 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags)) 2486 continue; 2487 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2488 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2489 continue; 2490 2491 /* 2492 * for alias devices simplify error recovery and 2493 * return to upper layer 2494 * do not skip ERP requests 2495 */ 2496 if (cqr->startdev != cqr->basedev && !cqr->refers && 2497 (cqr->status == DASD_CQR_TERMINATED || 2498 cqr->status == DASD_CQR_NEED_ERP)) 2499 return -EAGAIN; 2500 2501 /* normal recovery for basedev IO */ 2502 if (__dasd_sleep_on_erp(cqr)) 2503 /* handle erp first */ 2504 goto retry; 2505 } 2506 2507 return 0; 2508 } 2509 2510 /* 2511 * Queue a request to the tail of the device ccw_queue and wait for 2512 * it's completion. 2513 */ 2514 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2515 { 2516 return _dasd_sleep_on(cqr, 0); 2517 } 2518 EXPORT_SYMBOL(dasd_sleep_on); 2519 2520 /* 2521 * Start requests from a ccw_queue and wait for their completion. 2522 */ 2523 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2524 { 2525 return _dasd_sleep_on_queue(ccw_queue, 0); 2526 } 2527 EXPORT_SYMBOL(dasd_sleep_on_queue); 2528 2529 /* 2530 * Start requests from a ccw_queue and wait interruptible for their completion. 2531 */ 2532 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2533 { 2534 return _dasd_sleep_on_queue(ccw_queue, 1); 2535 } 2536 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2537 2538 /* 2539 * Queue a request to the tail of the device ccw_queue and wait 2540 * interruptible for it's completion. 2541 */ 2542 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2543 { 2544 return _dasd_sleep_on(cqr, 1); 2545 } 2546 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2547 2548 /* 2549 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2550 * for eckd devices) the currently running request has to be terminated 2551 * and be put back to status queued, before the special request is added 2552 * to the head of the queue. Then the special request is waited on normally. 2553 */ 2554 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2555 { 2556 struct dasd_ccw_req *cqr; 2557 int rc; 2558 2559 if (list_empty(&device->ccw_queue)) 2560 return 0; 2561 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2562 rc = device->discipline->term_IO(cqr); 2563 if (!rc) 2564 /* 2565 * CQR terminated because a more important request is pending. 2566 * Undo decreasing of retry counter because this is 2567 * not an error case. 2568 */ 2569 cqr->retries++; 2570 return rc; 2571 } 2572 2573 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2574 { 2575 struct dasd_device *device; 2576 int rc; 2577 2578 device = cqr->startdev; 2579 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2580 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2581 cqr->status = DASD_CQR_FAILED; 2582 cqr->intrc = -EPERM; 2583 return -EIO; 2584 } 2585 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2586 rc = _dasd_term_running_cqr(device); 2587 if (rc) { 2588 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2589 return rc; 2590 } 2591 cqr->callback = dasd_wakeup_cb; 2592 cqr->callback_data = DASD_SLEEPON_START_TAG; 2593 cqr->status = DASD_CQR_QUEUED; 2594 /* 2595 * add new request as second 2596 * first the terminated cqr needs to be finished 2597 */ 2598 list_add(&cqr->devlist, device->ccw_queue.next); 2599 2600 /* let the bh start the request to keep them in order */ 2601 dasd_schedule_device_bh(device); 2602 2603 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2604 2605 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2606 2607 if (cqr->status == DASD_CQR_DONE) 2608 rc = 0; 2609 else if (cqr->intrc) 2610 rc = cqr->intrc; 2611 else 2612 rc = -EIO; 2613 2614 /* kick tasklets */ 2615 dasd_schedule_device_bh(device); 2616 if (device->block) 2617 dasd_schedule_block_bh(device->block); 2618 2619 return rc; 2620 } 2621 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2622 2623 /* 2624 * Cancels a request that was started with dasd_sleep_on_req. 2625 * This is useful to timeout requests. The request will be 2626 * terminated if it is currently in i/o. 2627 * Returns 0 if request termination was successful 2628 * negative error code if termination failed 2629 * Cancellation of a request is an asynchronous operation! The calling 2630 * function has to wait until the request is properly returned via callback. 2631 */ 2632 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) 2633 { 2634 struct dasd_device *device = cqr->startdev; 2635 int rc = 0; 2636 2637 switch (cqr->status) { 2638 case DASD_CQR_QUEUED: 2639 /* request was not started - just set to cleared */ 2640 cqr->status = DASD_CQR_CLEARED; 2641 break; 2642 case DASD_CQR_IN_IO: 2643 /* request in IO - terminate IO and release again */ 2644 rc = device->discipline->term_IO(cqr); 2645 if (rc) { 2646 dev_err(&device->cdev->dev, 2647 "Cancelling request %p failed with rc=%d\n", 2648 cqr, rc); 2649 } else { 2650 cqr->stopclk = get_tod_clock(); 2651 } 2652 break; 2653 default: /* already finished or clear pending - do nothing */ 2654 break; 2655 } 2656 dasd_schedule_device_bh(device); 2657 return rc; 2658 } 2659 2660 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2661 { 2662 struct dasd_device *device = cqr->startdev; 2663 unsigned long flags; 2664 int rc; 2665 2666 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2667 rc = __dasd_cancel_req(cqr); 2668 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2669 return rc; 2670 } 2671 2672 /* 2673 * SECTION: Operations of the dasd_block layer. 2674 */ 2675 2676 /* 2677 * Timeout function for dasd_block. This is used when the block layer 2678 * is waiting for something that may not come reliably, (e.g. a state 2679 * change interrupt) 2680 */ 2681 static void dasd_block_timeout(struct timer_list *t) 2682 { 2683 unsigned long flags; 2684 struct dasd_block *block; 2685 2686 block = from_timer(block, t, timer); 2687 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2688 /* re-activate request queue */ 2689 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2690 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2691 dasd_schedule_block_bh(block); 2692 blk_mq_run_hw_queues(block->gdp->queue, true); 2693 } 2694 2695 /* 2696 * Setup timeout for a dasd_block in jiffies. 2697 */ 2698 void dasd_block_set_timer(struct dasd_block *block, int expires) 2699 { 2700 if (expires == 0) 2701 del_timer(&block->timer); 2702 else 2703 mod_timer(&block->timer, jiffies + expires); 2704 } 2705 EXPORT_SYMBOL(dasd_block_set_timer); 2706 2707 /* 2708 * Clear timeout for a dasd_block. 2709 */ 2710 void dasd_block_clear_timer(struct dasd_block *block) 2711 { 2712 del_timer(&block->timer); 2713 } 2714 EXPORT_SYMBOL(dasd_block_clear_timer); 2715 2716 /* 2717 * Process finished error recovery ccw. 2718 */ 2719 static void __dasd_process_erp(struct dasd_device *device, 2720 struct dasd_ccw_req *cqr) 2721 { 2722 dasd_erp_fn_t erp_fn; 2723 2724 if (cqr->status == DASD_CQR_DONE) 2725 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2726 else 2727 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2728 erp_fn = device->discipline->erp_postaction(cqr); 2729 erp_fn(cqr); 2730 } 2731 2732 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2733 { 2734 struct request *req; 2735 blk_status_t error = BLK_STS_OK; 2736 unsigned int proc_bytes; 2737 int status; 2738 2739 req = (struct request *) cqr->callback_data; 2740 dasd_profile_end(cqr->block, cqr, req); 2741 2742 proc_bytes = cqr->proc_bytes; 2743 status = cqr->block->base->discipline->free_cp(cqr, req); 2744 if (status < 0) 2745 error = errno_to_blk_status(status); 2746 else if (status == 0) { 2747 switch (cqr->intrc) { 2748 case -EPERM: 2749 /* 2750 * DASD doesn't implement SCSI/NVMe reservations, but it 2751 * implements a locking scheme similar to them. We 2752 * return this error when we no longer have the lock. 2753 */ 2754 error = BLK_STS_RESV_CONFLICT; 2755 break; 2756 case -ENOLINK: 2757 error = BLK_STS_TRANSPORT; 2758 break; 2759 case -ETIMEDOUT: 2760 error = BLK_STS_TIMEOUT; 2761 break; 2762 default: 2763 error = BLK_STS_IOERR; 2764 break; 2765 } 2766 } 2767 2768 /* 2769 * We need to take care for ETIMEDOUT errors here since the 2770 * complete callback does not get called in this case. 2771 * Take care of all errors here and avoid additional code to 2772 * transfer the error value to the complete callback. 2773 */ 2774 if (error) { 2775 blk_mq_end_request(req, error); 2776 blk_mq_run_hw_queues(req->q, true); 2777 } else { 2778 /* 2779 * Partial completed requests can happen with ESE devices. 2780 * During read we might have gotten a NRF error and have to 2781 * complete a request partially. 2782 */ 2783 if (proc_bytes) { 2784 blk_update_request(req, BLK_STS_OK, proc_bytes); 2785 blk_mq_requeue_request(req, true); 2786 } else if (likely(!blk_should_fake_timeout(req->q))) { 2787 blk_mq_complete_request(req); 2788 } 2789 } 2790 } 2791 2792 /* 2793 * Process ccw request queue. 2794 */ 2795 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2796 struct list_head *final_queue) 2797 { 2798 struct list_head *l, *n; 2799 struct dasd_ccw_req *cqr; 2800 dasd_erp_fn_t erp_fn; 2801 unsigned long flags; 2802 struct dasd_device *base = block->base; 2803 2804 restart: 2805 /* Process request with final status. */ 2806 list_for_each_safe(l, n, &block->ccw_queue) { 2807 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2808 if (cqr->status != DASD_CQR_DONE && 2809 cqr->status != DASD_CQR_FAILED && 2810 cqr->status != DASD_CQR_NEED_ERP && 2811 cqr->status != DASD_CQR_TERMINATED) 2812 continue; 2813 2814 if (cqr->status == DASD_CQR_TERMINATED) { 2815 base->discipline->handle_terminated_request(cqr); 2816 goto restart; 2817 } 2818 2819 /* Process requests that may be recovered */ 2820 if (cqr->status == DASD_CQR_NEED_ERP) { 2821 erp_fn = base->discipline->erp_action(cqr); 2822 if (IS_ERR(erp_fn(cqr))) 2823 continue; 2824 goto restart; 2825 } 2826 2827 /* log sense for fatal error */ 2828 if (cqr->status == DASD_CQR_FAILED) { 2829 dasd_log_sense(cqr, &cqr->irb); 2830 } 2831 2832 /* 2833 * First call extended error reporting and check for autoquiesce 2834 */ 2835 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2836 if (cqr->status == DASD_CQR_FAILED && 2837 dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) { 2838 cqr->status = DASD_CQR_FILLED; 2839 cqr->retries = 255; 2840 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 2841 goto restart; 2842 } 2843 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 2844 2845 /* Process finished ERP request. */ 2846 if (cqr->refers) { 2847 __dasd_process_erp(base, cqr); 2848 goto restart; 2849 } 2850 2851 /* Rechain finished requests to final queue */ 2852 cqr->endclk = get_tod_clock(); 2853 list_move_tail(&cqr->blocklist, final_queue); 2854 } 2855 } 2856 2857 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2858 { 2859 dasd_schedule_block_bh(cqr->block); 2860 } 2861 2862 static void __dasd_block_start_head(struct dasd_block *block) 2863 { 2864 struct dasd_ccw_req *cqr; 2865 2866 if (list_empty(&block->ccw_queue)) 2867 return; 2868 /* We allways begin with the first requests on the queue, as some 2869 * of previously started requests have to be enqueued on a 2870 * dasd_device again for error recovery. 2871 */ 2872 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2873 if (cqr->status != DASD_CQR_FILLED) 2874 continue; 2875 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2876 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2877 cqr->status = DASD_CQR_FAILED; 2878 cqr->intrc = -EPERM; 2879 dasd_schedule_block_bh(block); 2880 continue; 2881 } 2882 /* Non-temporary stop condition will trigger fail fast */ 2883 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2884 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2885 !dasd_eer_enabled(block->base) && block->base->aq_mask == 0) { 2886 cqr->status = DASD_CQR_FAILED; 2887 cqr->intrc = -ENOLINK; 2888 dasd_schedule_block_bh(block); 2889 continue; 2890 } 2891 /* Don't try to start requests if device is stopped */ 2892 if (block->base->stopped) 2893 return; 2894 2895 /* just a fail safe check, should not happen */ 2896 if (!cqr->startdev) 2897 cqr->startdev = block->base; 2898 2899 /* make sure that the requests we submit find their way back */ 2900 cqr->callback = dasd_return_cqr_cb; 2901 2902 dasd_add_request_tail(cqr); 2903 } 2904 } 2905 2906 /* 2907 * Central dasd_block layer routine. Takes requests from the generic 2908 * block layer request queue, creates ccw requests, enqueues them on 2909 * a dasd_device and processes ccw requests that have been returned. 2910 */ 2911 static void dasd_block_tasklet(unsigned long data) 2912 { 2913 struct dasd_block *block = (struct dasd_block *) data; 2914 struct list_head final_queue; 2915 struct list_head *l, *n; 2916 struct dasd_ccw_req *cqr; 2917 struct dasd_queue *dq; 2918 2919 atomic_set(&block->tasklet_scheduled, 0); 2920 INIT_LIST_HEAD(&final_queue); 2921 spin_lock_irq(&block->queue_lock); 2922 /* Finish off requests on ccw queue */ 2923 __dasd_process_block_ccw_queue(block, &final_queue); 2924 spin_unlock_irq(&block->queue_lock); 2925 2926 /* Now call the callback function of requests with final status */ 2927 list_for_each_safe(l, n, &final_queue) { 2928 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2929 dq = cqr->dq; 2930 spin_lock_irq(&dq->lock); 2931 list_del_init(&cqr->blocklist); 2932 __dasd_cleanup_cqr(cqr); 2933 spin_unlock_irq(&dq->lock); 2934 } 2935 2936 spin_lock_irq(&block->queue_lock); 2937 /* Now check if the head of the ccw queue needs to be started. */ 2938 __dasd_block_start_head(block); 2939 spin_unlock_irq(&block->queue_lock); 2940 2941 if (waitqueue_active(&shutdown_waitq)) 2942 wake_up(&shutdown_waitq); 2943 dasd_put_device(block->base); 2944 } 2945 2946 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2947 { 2948 wake_up(&dasd_flush_wq); 2949 } 2950 2951 /* 2952 * Requeue a request back to the block request queue 2953 * only works for block requests 2954 */ 2955 static void _dasd_requeue_request(struct dasd_ccw_req *cqr) 2956 { 2957 struct request *req; 2958 2959 /* 2960 * If the request is an ERP request there is nothing to requeue. 2961 * This will be done with the remaining original request. 2962 */ 2963 if (cqr->refers) 2964 return; 2965 spin_lock_irq(&cqr->dq->lock); 2966 req = (struct request *) cqr->callback_data; 2967 blk_mq_requeue_request(req, true); 2968 spin_unlock_irq(&cqr->dq->lock); 2969 2970 return; 2971 } 2972 2973 static int _dasd_requests_to_flushqueue(struct dasd_block *block, 2974 struct list_head *flush_queue) 2975 { 2976 struct dasd_ccw_req *cqr, *n; 2977 unsigned long flags; 2978 int rc, i; 2979 2980 spin_lock_irqsave(&block->queue_lock, flags); 2981 rc = 0; 2982 restart: 2983 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2984 /* if this request currently owned by a dasd_device cancel it */ 2985 if (cqr->status >= DASD_CQR_QUEUED) 2986 rc = dasd_cancel_req(cqr); 2987 if (rc < 0) 2988 break; 2989 /* Rechain request (including erp chain) so it won't be 2990 * touched by the dasd_block_tasklet anymore. 2991 * Replace the callback so we notice when the request 2992 * is returned from the dasd_device layer. 2993 */ 2994 cqr->callback = _dasd_wake_block_flush_cb; 2995 for (i = 0; cqr; cqr = cqr->refers, i++) 2996 list_move_tail(&cqr->blocklist, flush_queue); 2997 if (i > 1) 2998 /* moved more than one request - need to restart */ 2999 goto restart; 3000 } 3001 spin_unlock_irqrestore(&block->queue_lock, flags); 3002 3003 return rc; 3004 } 3005 3006 /* 3007 * Go through all request on the dasd_block request queue, cancel them 3008 * on the respective dasd_device, and return them to the generic 3009 * block layer. 3010 */ 3011 static int dasd_flush_block_queue(struct dasd_block *block) 3012 { 3013 struct dasd_ccw_req *cqr, *n; 3014 struct list_head flush_queue; 3015 unsigned long flags; 3016 int rc; 3017 3018 INIT_LIST_HEAD(&flush_queue); 3019 rc = _dasd_requests_to_flushqueue(block, &flush_queue); 3020 3021 /* Now call the callback function of flushed requests */ 3022 restart_cb: 3023 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 3024 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3025 /* Process finished ERP request. */ 3026 if (cqr->refers) { 3027 spin_lock_bh(&block->queue_lock); 3028 __dasd_process_erp(block->base, cqr); 3029 spin_unlock_bh(&block->queue_lock); 3030 /* restart list_for_xx loop since dasd_process_erp 3031 * might remove multiple elements */ 3032 goto restart_cb; 3033 } 3034 /* call the callback function */ 3035 spin_lock_irqsave(&cqr->dq->lock, flags); 3036 cqr->endclk = get_tod_clock(); 3037 list_del_init(&cqr->blocklist); 3038 __dasd_cleanup_cqr(cqr); 3039 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3040 } 3041 return rc; 3042 } 3043 3044 /* 3045 * Schedules a call to dasd_tasklet over the device tasklet. 3046 */ 3047 void dasd_schedule_block_bh(struct dasd_block *block) 3048 { 3049 /* Protect against rescheduling. */ 3050 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 3051 return; 3052 /* life cycle of block is bound to it's base device */ 3053 dasd_get_device(block->base); 3054 tasklet_hi_schedule(&block->tasklet); 3055 } 3056 EXPORT_SYMBOL(dasd_schedule_block_bh); 3057 3058 3059 /* 3060 * SECTION: external block device operations 3061 * (request queue handling, open, release, etc.) 3062 */ 3063 3064 /* 3065 * Dasd request queue function. Called from ll_rw_blk.c 3066 */ 3067 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 3068 const struct blk_mq_queue_data *qd) 3069 { 3070 struct dasd_block *block = hctx->queue->queuedata; 3071 struct dasd_queue *dq = hctx->driver_data; 3072 struct request *req = qd->rq; 3073 struct dasd_device *basedev; 3074 struct dasd_ccw_req *cqr; 3075 blk_status_t rc = BLK_STS_OK; 3076 3077 basedev = block->base; 3078 spin_lock_irq(&dq->lock); 3079 if (basedev->state < DASD_STATE_READY || 3080 test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) { 3081 DBF_DEV_EVENT(DBF_ERR, basedev, 3082 "device not ready for request %p", req); 3083 rc = BLK_STS_IOERR; 3084 goto out; 3085 } 3086 3087 /* 3088 * if device is stopped do not fetch new requests 3089 * except failfast is active which will let requests fail 3090 * immediately in __dasd_block_start_head() 3091 */ 3092 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3093 DBF_DEV_EVENT(DBF_ERR, basedev, 3094 "device stopped request %p", req); 3095 rc = BLK_STS_RESOURCE; 3096 goto out; 3097 } 3098 3099 if (basedev->features & DASD_FEATURE_READONLY && 3100 rq_data_dir(req) == WRITE) { 3101 DBF_DEV_EVENT(DBF_ERR, basedev, 3102 "Rejecting write request %p", req); 3103 rc = BLK_STS_IOERR; 3104 goto out; 3105 } 3106 3107 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3108 (basedev->features & DASD_FEATURE_FAILFAST || 3109 blk_noretry_request(req))) { 3110 DBF_DEV_EVENT(DBF_ERR, basedev, 3111 "Rejecting failfast request %p", req); 3112 rc = BLK_STS_IOERR; 3113 goto out; 3114 } 3115 3116 cqr = basedev->discipline->build_cp(basedev, block, req); 3117 if (IS_ERR(cqr)) { 3118 if (PTR_ERR(cqr) == -EBUSY || 3119 PTR_ERR(cqr) == -ENOMEM || 3120 PTR_ERR(cqr) == -EAGAIN) { 3121 rc = BLK_STS_RESOURCE; 3122 goto out; 3123 } 3124 DBF_DEV_EVENT(DBF_ERR, basedev, 3125 "CCW creation failed (rc=%ld) on request %p", 3126 PTR_ERR(cqr), req); 3127 rc = BLK_STS_IOERR; 3128 goto out; 3129 } 3130 /* 3131 * Note: callback is set to dasd_return_cqr_cb in 3132 * __dasd_block_start_head to cover erp requests as well 3133 */ 3134 cqr->callback_data = req; 3135 cqr->status = DASD_CQR_FILLED; 3136 cqr->dq = dq; 3137 3138 blk_mq_start_request(req); 3139 spin_lock(&block->queue_lock); 3140 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3141 INIT_LIST_HEAD(&cqr->devlist); 3142 dasd_profile_start(block, cqr, req); 3143 dasd_schedule_block_bh(block); 3144 spin_unlock(&block->queue_lock); 3145 3146 out: 3147 spin_unlock_irq(&dq->lock); 3148 return rc; 3149 } 3150 3151 /* 3152 * Block timeout callback, called from the block layer 3153 * 3154 * Return values: 3155 * BLK_EH_RESET_TIMER if the request should be left running 3156 * BLK_EH_DONE if the request is handled or terminated 3157 * by the driver. 3158 */ 3159 enum blk_eh_timer_return dasd_times_out(struct request *req) 3160 { 3161 struct dasd_block *block = req->q->queuedata; 3162 struct dasd_device *device; 3163 struct dasd_ccw_req *cqr; 3164 unsigned long flags; 3165 int rc = 0; 3166 3167 cqr = blk_mq_rq_to_pdu(req); 3168 if (!cqr) 3169 return BLK_EH_DONE; 3170 3171 spin_lock_irqsave(&cqr->dq->lock, flags); 3172 device = cqr->startdev ? cqr->startdev : block->base; 3173 if (!device->blk_timeout) { 3174 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3175 return BLK_EH_RESET_TIMER; 3176 } 3177 DBF_DEV_EVENT(DBF_WARNING, device, 3178 " dasd_times_out cqr %p status %x", 3179 cqr, cqr->status); 3180 3181 spin_lock(&block->queue_lock); 3182 spin_lock(get_ccwdev_lock(device->cdev)); 3183 cqr->retries = -1; 3184 cqr->intrc = -ETIMEDOUT; 3185 if (cqr->status >= DASD_CQR_QUEUED) { 3186 rc = __dasd_cancel_req(cqr); 3187 } else if (cqr->status == DASD_CQR_FILLED || 3188 cqr->status == DASD_CQR_NEED_ERP) { 3189 cqr->status = DASD_CQR_TERMINATED; 3190 } else if (cqr->status == DASD_CQR_IN_ERP) { 3191 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3192 3193 list_for_each_entry_safe(searchcqr, nextcqr, 3194 &block->ccw_queue, blocklist) { 3195 tmpcqr = searchcqr; 3196 while (tmpcqr->refers) 3197 tmpcqr = tmpcqr->refers; 3198 if (tmpcqr != cqr) 3199 continue; 3200 /* searchcqr is an ERP request for cqr */ 3201 searchcqr->retries = -1; 3202 searchcqr->intrc = -ETIMEDOUT; 3203 if (searchcqr->status >= DASD_CQR_QUEUED) { 3204 rc = __dasd_cancel_req(searchcqr); 3205 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3206 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3207 searchcqr->status = DASD_CQR_TERMINATED; 3208 rc = 0; 3209 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3210 /* 3211 * Shouldn't happen; most recent ERP 3212 * request is at the front of queue 3213 */ 3214 continue; 3215 } 3216 break; 3217 } 3218 } 3219 spin_unlock(get_ccwdev_lock(device->cdev)); 3220 dasd_schedule_block_bh(block); 3221 spin_unlock(&block->queue_lock); 3222 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3223 3224 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE; 3225 } 3226 3227 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3228 unsigned int idx) 3229 { 3230 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3231 3232 if (!dq) 3233 return -ENOMEM; 3234 3235 spin_lock_init(&dq->lock); 3236 hctx->driver_data = dq; 3237 3238 return 0; 3239 } 3240 3241 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3242 { 3243 kfree(hctx->driver_data); 3244 hctx->driver_data = NULL; 3245 } 3246 3247 static void dasd_request_done(struct request *req) 3248 { 3249 blk_mq_end_request(req, 0); 3250 blk_mq_run_hw_queues(req->q, true); 3251 } 3252 3253 struct blk_mq_ops dasd_mq_ops = { 3254 .queue_rq = do_dasd_request, 3255 .complete = dasd_request_done, 3256 .timeout = dasd_times_out, 3257 .init_hctx = dasd_init_hctx, 3258 .exit_hctx = dasd_exit_hctx, 3259 }; 3260 3261 static int dasd_open(struct gendisk *disk, blk_mode_t mode) 3262 { 3263 struct dasd_device *base; 3264 int rc; 3265 3266 base = dasd_device_from_gendisk(disk); 3267 if (!base) 3268 return -ENODEV; 3269 3270 atomic_inc(&base->block->open_count); 3271 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3272 rc = -ENODEV; 3273 goto unlock; 3274 } 3275 3276 if (!try_module_get(base->discipline->owner)) { 3277 rc = -EINVAL; 3278 goto unlock; 3279 } 3280 3281 if (dasd_probeonly) { 3282 dev_info(&base->cdev->dev, 3283 "Accessing the DASD failed because it is in " 3284 "probeonly mode\n"); 3285 rc = -EPERM; 3286 goto out; 3287 } 3288 3289 if (base->state <= DASD_STATE_BASIC) { 3290 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3291 " Cannot open unrecognized device"); 3292 rc = -ENODEV; 3293 goto out; 3294 } 3295 if ((mode & BLK_OPEN_WRITE) && 3296 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3297 (base->features & DASD_FEATURE_READONLY))) { 3298 rc = -EROFS; 3299 goto out; 3300 } 3301 dasd_put_device(base); 3302 return 0; 3303 3304 out: 3305 module_put(base->discipline->owner); 3306 unlock: 3307 atomic_dec(&base->block->open_count); 3308 dasd_put_device(base); 3309 return rc; 3310 } 3311 3312 static void dasd_release(struct gendisk *disk) 3313 { 3314 struct dasd_device *base = dasd_device_from_gendisk(disk); 3315 if (base) { 3316 atomic_dec(&base->block->open_count); 3317 module_put(base->discipline->owner); 3318 dasd_put_device(base); 3319 } 3320 } 3321 3322 /* 3323 * Return disk geometry. 3324 */ 3325 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3326 { 3327 struct dasd_device *base; 3328 3329 base = dasd_device_from_gendisk(bdev->bd_disk); 3330 if (!base) 3331 return -ENODEV; 3332 3333 if (!base->discipline || 3334 !base->discipline->fill_geometry) { 3335 dasd_put_device(base); 3336 return -EINVAL; 3337 } 3338 base->discipline->fill_geometry(base->block, geo); 3339 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3340 dasd_put_device(base); 3341 return 0; 3342 } 3343 3344 const struct block_device_operations 3345 dasd_device_operations = { 3346 .owner = THIS_MODULE, 3347 .open = dasd_open, 3348 .release = dasd_release, 3349 .ioctl = dasd_ioctl, 3350 .compat_ioctl = dasd_ioctl, 3351 .getgeo = dasd_getgeo, 3352 .set_read_only = dasd_set_read_only, 3353 }; 3354 3355 /******************************************************************************* 3356 * end of block device operations 3357 */ 3358 3359 static void 3360 dasd_exit(void) 3361 { 3362 #ifdef CONFIG_PROC_FS 3363 dasd_proc_exit(); 3364 #endif 3365 dasd_eer_exit(); 3366 kmem_cache_destroy(dasd_page_cache); 3367 dasd_page_cache = NULL; 3368 dasd_gendisk_exit(); 3369 dasd_devmap_exit(); 3370 if (dasd_debug_area != NULL) { 3371 debug_unregister(dasd_debug_area); 3372 dasd_debug_area = NULL; 3373 } 3374 dasd_statistics_removeroot(); 3375 } 3376 3377 /* 3378 * SECTION: common functions for ccw_driver use 3379 */ 3380 3381 /* 3382 * Is the device read-only? 3383 * Note that this function does not report the setting of the 3384 * readonly device attribute, but how it is configured in z/VM. 3385 */ 3386 int dasd_device_is_ro(struct dasd_device *device) 3387 { 3388 struct ccw_dev_id dev_id; 3389 struct diag210 diag_data; 3390 int rc; 3391 3392 if (!MACHINE_IS_VM) 3393 return 0; 3394 ccw_device_get_id(device->cdev, &dev_id); 3395 memset(&diag_data, 0, sizeof(diag_data)); 3396 diag_data.vrdcdvno = dev_id.devno; 3397 diag_data.vrdclen = sizeof(diag_data); 3398 rc = diag210(&diag_data); 3399 if (rc == 0 || rc == 2) { 3400 return diag_data.vrdcvfla & 0x80; 3401 } else { 3402 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3403 dev_id.devno, rc); 3404 return 0; 3405 } 3406 } 3407 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3408 3409 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3410 { 3411 struct ccw_device *cdev = data; 3412 int ret; 3413 3414 ret = ccw_device_set_online(cdev); 3415 if (ret) 3416 dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret); 3417 } 3418 3419 /* 3420 * Initial attempt at a probe function. this can be simplified once 3421 * the other detection code is gone. 3422 */ 3423 int dasd_generic_probe(struct ccw_device *cdev) 3424 { 3425 cdev->handler = &dasd_int_handler; 3426 3427 /* 3428 * Automatically online either all dasd devices (dasd_autodetect) 3429 * or all devices specified with dasd= parameters during 3430 * initial probe. 3431 */ 3432 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3433 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3434 async_schedule(dasd_generic_auto_online, cdev); 3435 return 0; 3436 } 3437 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3438 3439 void dasd_generic_free_discipline(struct dasd_device *device) 3440 { 3441 /* Forget the discipline information. */ 3442 if (device->discipline) { 3443 if (device->discipline->uncheck_device) 3444 device->discipline->uncheck_device(device); 3445 module_put(device->discipline->owner); 3446 device->discipline = NULL; 3447 } 3448 if (device->base_discipline) { 3449 module_put(device->base_discipline->owner); 3450 device->base_discipline = NULL; 3451 } 3452 } 3453 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3454 3455 /* 3456 * This will one day be called from a global not_oper handler. 3457 * It is also used by driver_unregister during module unload. 3458 */ 3459 void dasd_generic_remove(struct ccw_device *cdev) 3460 { 3461 struct dasd_device *device; 3462 struct dasd_block *block; 3463 3464 device = dasd_device_from_cdev(cdev); 3465 if (IS_ERR(device)) 3466 return; 3467 3468 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3469 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3470 /* Already doing offline processing */ 3471 dasd_put_device(device); 3472 return; 3473 } 3474 /* 3475 * This device is removed unconditionally. Set offline 3476 * flag to prevent dasd_open from opening it while it is 3477 * no quite down yet. 3478 */ 3479 dasd_set_target_state(device, DASD_STATE_NEW); 3480 cdev->handler = NULL; 3481 /* dasd_delete_device destroys the device reference. */ 3482 block = device->block; 3483 dasd_delete_device(device); 3484 /* 3485 * life cycle of block is bound to device, so delete it after 3486 * device was safely removed 3487 */ 3488 if (block) 3489 dasd_free_block(block); 3490 } 3491 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3492 3493 /* 3494 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3495 * the device is detected for the first time and is supposed to be used 3496 * or the user has started activation through sysfs. 3497 */ 3498 int dasd_generic_set_online(struct ccw_device *cdev, 3499 struct dasd_discipline *base_discipline) 3500 { 3501 struct dasd_discipline *discipline; 3502 struct dasd_device *device; 3503 struct device *dev; 3504 int rc; 3505 3506 dev = &cdev->dev; 3507 3508 /* first online clears initial online feature flag */ 3509 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3510 device = dasd_create_device(cdev); 3511 if (IS_ERR(device)) 3512 return PTR_ERR(device); 3513 3514 discipline = base_discipline; 3515 if (device->features & DASD_FEATURE_USEDIAG) { 3516 if (!dasd_diag_discipline_pointer) { 3517 /* Try to load the required module. */ 3518 rc = request_module(DASD_DIAG_MOD); 3519 if (rc) { 3520 dev_warn(dev, "Setting the DASD online failed " 3521 "because the required module %s " 3522 "could not be loaded (rc=%d)\n", 3523 DASD_DIAG_MOD, rc); 3524 dasd_delete_device(device); 3525 return -ENODEV; 3526 } 3527 } 3528 /* Module init could have failed, so check again here after 3529 * request_module(). */ 3530 if (!dasd_diag_discipline_pointer) { 3531 dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n"); 3532 dasd_delete_device(device); 3533 return -ENODEV; 3534 } 3535 discipline = dasd_diag_discipline_pointer; 3536 } 3537 if (!try_module_get(base_discipline->owner)) { 3538 dasd_delete_device(device); 3539 return -EINVAL; 3540 } 3541 device->base_discipline = base_discipline; 3542 if (!try_module_get(discipline->owner)) { 3543 dasd_delete_device(device); 3544 return -EINVAL; 3545 } 3546 device->discipline = discipline; 3547 3548 /* check_device will allocate block device if necessary */ 3549 rc = discipline->check_device(device); 3550 if (rc) { 3551 dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n", 3552 discipline->name, rc); 3553 dasd_delete_device(device); 3554 return rc; 3555 } 3556 3557 dasd_set_target_state(device, DASD_STATE_ONLINE); 3558 if (device->state <= DASD_STATE_KNOWN) { 3559 dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n"); 3560 rc = -ENODEV; 3561 dasd_set_target_state(device, DASD_STATE_NEW); 3562 if (device->block) 3563 dasd_free_block(device->block); 3564 dasd_delete_device(device); 3565 } else { 3566 dev_dbg(dev, "dasd_generic device found\n"); 3567 } 3568 3569 wait_event(dasd_init_waitq, _wait_for_device(device)); 3570 3571 dasd_put_device(device); 3572 return rc; 3573 } 3574 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3575 3576 int dasd_generic_set_offline(struct ccw_device *cdev) 3577 { 3578 int max_count, open_count, rc; 3579 struct dasd_device *device; 3580 struct dasd_block *block; 3581 unsigned long flags; 3582 struct device *dev; 3583 3584 dev = &cdev->dev; 3585 3586 rc = 0; 3587 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3588 device = dasd_device_from_cdev_locked(cdev); 3589 if (IS_ERR(device)) { 3590 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3591 return PTR_ERR(device); 3592 } 3593 3594 /* 3595 * We must make sure that this device is currently not in use. 3596 * The open_count is increased for every opener, that includes 3597 * the blkdev_get in dasd_scan_partitions. We are only interested 3598 * in the other openers. 3599 */ 3600 if (device->block) { 3601 max_count = device->block->bdev ? 0 : -1; 3602 open_count = atomic_read(&device->block->open_count); 3603 if (open_count > max_count) { 3604 if (open_count > 0) 3605 dev_warn(dev, "The DASD cannot be set offline with open count %i\n", 3606 open_count); 3607 else 3608 dev_warn(dev, "The DASD cannot be set offline while it is in use\n"); 3609 rc = -EBUSY; 3610 goto out_err; 3611 } 3612 } 3613 3614 /* 3615 * Test if the offline processing is already running and exit if so. 3616 * If a safe offline is being processed this could only be a normal 3617 * offline that should be able to overtake the safe offline and 3618 * cancel any I/O we do not want to wait for any longer 3619 */ 3620 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3621 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3622 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3623 &device->flags); 3624 } else { 3625 rc = -EBUSY; 3626 goto out_err; 3627 } 3628 } 3629 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3630 3631 /* 3632 * if safe_offline is called set safe_offline_running flag and 3633 * clear safe_offline so that a call to normal offline 3634 * can overrun safe_offline processing 3635 */ 3636 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3637 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3638 /* need to unlock here to wait for outstanding I/O */ 3639 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3640 /* 3641 * If we want to set the device safe offline all IO operations 3642 * should be finished before continuing the offline process 3643 * so sync bdev first and then wait for our queues to become 3644 * empty 3645 */ 3646 if (device->block) 3647 bdev_mark_dead(device->block->bdev, false); 3648 dasd_schedule_device_bh(device); 3649 rc = wait_event_interruptible(shutdown_waitq, 3650 _wait_for_empty_queues(device)); 3651 if (rc != 0) 3652 goto interrupted; 3653 3654 /* 3655 * check if a normal offline process overtook the offline 3656 * processing in this case simply do nothing beside returning 3657 * that we got interrupted 3658 * otherwise mark safe offline as not running any longer and 3659 * continue with normal offline 3660 */ 3661 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3662 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3663 rc = -ERESTARTSYS; 3664 goto out_err; 3665 } 3666 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3667 } 3668 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3669 3670 dasd_set_target_state(device, DASD_STATE_NEW); 3671 /* dasd_delete_device destroys the device reference. */ 3672 block = device->block; 3673 dasd_delete_device(device); 3674 /* 3675 * life cycle of block is bound to device, so delete it after 3676 * device was safely removed 3677 */ 3678 if (block) 3679 dasd_free_block(block); 3680 3681 return 0; 3682 3683 interrupted: 3684 /* interrupted by signal */ 3685 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3686 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3687 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3688 out_err: 3689 dasd_put_device(device); 3690 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3691 return rc; 3692 } 3693 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3694 3695 int dasd_generic_last_path_gone(struct dasd_device *device) 3696 { 3697 struct dasd_ccw_req *cqr; 3698 3699 dev_warn(&device->cdev->dev, "No operational channel path is left " 3700 "for the device\n"); 3701 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3702 /* First call extended error reporting and check for autoquiesce. */ 3703 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); 3704 3705 if (device->state < DASD_STATE_BASIC) 3706 return 0; 3707 /* Device is active. We want to keep it. */ 3708 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3709 if ((cqr->status == DASD_CQR_IN_IO) || 3710 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3711 cqr->status = DASD_CQR_QUEUED; 3712 cqr->retries++; 3713 } 3714 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3715 dasd_device_clear_timer(device); 3716 dasd_schedule_device_bh(device); 3717 return 1; 3718 } 3719 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3720 3721 int dasd_generic_path_operational(struct dasd_device *device) 3722 { 3723 dev_info(&device->cdev->dev, "A channel path to the device has become " 3724 "operational\n"); 3725 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3726 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3727 dasd_schedule_device_bh(device); 3728 if (device->block) { 3729 dasd_schedule_block_bh(device->block); 3730 if (device->block->gdp) 3731 blk_mq_run_hw_queues(device->block->gdp->queue, true); 3732 } 3733 3734 if (!device->stopped) 3735 wake_up(&generic_waitq); 3736 3737 return 1; 3738 } 3739 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3740 3741 int dasd_generic_notify(struct ccw_device *cdev, int event) 3742 { 3743 struct dasd_device *device; 3744 int ret; 3745 3746 device = dasd_device_from_cdev_locked(cdev); 3747 if (IS_ERR(device)) 3748 return 0; 3749 ret = 0; 3750 switch (event) { 3751 case CIO_GONE: 3752 case CIO_BOXED: 3753 case CIO_NO_PATH: 3754 dasd_path_no_path(device); 3755 ret = dasd_generic_last_path_gone(device); 3756 break; 3757 case CIO_OPER: 3758 ret = 1; 3759 if (dasd_path_get_opm(device)) 3760 ret = dasd_generic_path_operational(device); 3761 break; 3762 } 3763 dasd_put_device(device); 3764 return ret; 3765 } 3766 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3767 3768 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3769 { 3770 struct dasd_device *device; 3771 int chp, oldopm, hpfpm, ifccpm; 3772 3773 device = dasd_device_from_cdev_locked(cdev); 3774 if (IS_ERR(device)) 3775 return; 3776 3777 oldopm = dasd_path_get_opm(device); 3778 for (chp = 0; chp < 8; chp++) { 3779 if (path_event[chp] & PE_PATH_GONE) { 3780 dasd_path_notoper(device, chp); 3781 } 3782 if (path_event[chp] & PE_PATH_AVAILABLE) { 3783 dasd_path_available(device, chp); 3784 dasd_schedule_device_bh(device); 3785 } 3786 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3787 if (!dasd_path_is_operational(device, chp) && 3788 !dasd_path_need_verify(device, chp)) { 3789 /* 3790 * we can not establish a pathgroup on an 3791 * unavailable path, so trigger a path 3792 * verification first 3793 */ 3794 dasd_path_available(device, chp); 3795 dasd_schedule_device_bh(device); 3796 } 3797 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3798 "Pathgroup re-established\n"); 3799 if (device->discipline->kick_validate) 3800 device->discipline->kick_validate(device); 3801 } 3802 if (path_event[chp] & PE_PATH_FCES_EVENT) { 3803 dasd_path_fcsec_update(device, chp); 3804 dasd_schedule_device_bh(device); 3805 } 3806 } 3807 hpfpm = dasd_path_get_hpfpm(device); 3808 ifccpm = dasd_path_get_ifccpm(device); 3809 if (!dasd_path_get_opm(device) && hpfpm) { 3810 /* 3811 * device has no operational paths but at least one path is 3812 * disabled due to HPF errors 3813 * disable HPF at all and use the path(s) again 3814 */ 3815 if (device->discipline->disable_hpf) 3816 device->discipline->disable_hpf(device); 3817 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3818 dasd_path_set_tbvpm(device, hpfpm); 3819 dasd_schedule_device_bh(device); 3820 dasd_schedule_requeue(device); 3821 } else if (!dasd_path_get_opm(device) && ifccpm) { 3822 /* 3823 * device has no operational paths but at least one path is 3824 * disabled due to IFCC errors 3825 * trigger path verification on paths with IFCC errors 3826 */ 3827 dasd_path_set_tbvpm(device, ifccpm); 3828 dasd_schedule_device_bh(device); 3829 } 3830 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3831 dev_warn(&device->cdev->dev, 3832 "No verified channel paths remain for the device\n"); 3833 DBF_DEV_EVENT(DBF_WARNING, device, 3834 "%s", "last verified path gone"); 3835 /* First call extended error reporting and check for autoquiesce. */ 3836 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); 3837 dasd_device_set_stop_bits(device, 3838 DASD_STOPPED_DC_WAIT); 3839 } 3840 dasd_put_device(device); 3841 } 3842 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3843 3844 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3845 { 3846 if (!dasd_path_get_opm(device) && lpm) { 3847 dasd_path_set_opm(device, lpm); 3848 dasd_generic_path_operational(device); 3849 } else 3850 dasd_path_add_opm(device, lpm); 3851 return 0; 3852 } 3853 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3854 3855 void dasd_generic_space_exhaust(struct dasd_device *device, 3856 struct dasd_ccw_req *cqr) 3857 { 3858 /* First call extended error reporting and check for autoquiesce. */ 3859 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC); 3860 3861 if (device->state < DASD_STATE_BASIC) 3862 return; 3863 3864 if (cqr->status == DASD_CQR_IN_IO || 3865 cqr->status == DASD_CQR_CLEAR_PENDING) { 3866 cqr->status = DASD_CQR_QUEUED; 3867 cqr->retries++; 3868 } 3869 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3870 dasd_device_clear_timer(device); 3871 dasd_schedule_device_bh(device); 3872 } 3873 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3874 3875 void dasd_generic_space_avail(struct dasd_device *device) 3876 { 3877 dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3878 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3879 3880 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3881 dasd_schedule_device_bh(device); 3882 3883 if (device->block) { 3884 dasd_schedule_block_bh(device->block); 3885 if (device->block->gdp) 3886 blk_mq_run_hw_queues(device->block->gdp->queue, true); 3887 } 3888 if (!device->stopped) 3889 wake_up(&generic_waitq); 3890 } 3891 EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3892 3893 /* 3894 * clear active requests and requeue them to block layer if possible 3895 */ 3896 int dasd_generic_requeue_all_requests(struct dasd_device *device) 3897 { 3898 struct dasd_block *block = device->block; 3899 struct list_head requeue_queue; 3900 struct dasd_ccw_req *cqr, *n; 3901 int rc; 3902 3903 if (!block) 3904 return 0; 3905 3906 INIT_LIST_HEAD(&requeue_queue); 3907 rc = _dasd_requests_to_flushqueue(block, &requeue_queue); 3908 3909 /* Now call the callback function of flushed requests */ 3910 restart_cb: 3911 list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) { 3912 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3913 /* Process finished ERP request. */ 3914 if (cqr->refers) { 3915 spin_lock_bh(&block->queue_lock); 3916 __dasd_process_erp(block->base, cqr); 3917 spin_unlock_bh(&block->queue_lock); 3918 /* restart list_for_xx loop since dasd_process_erp 3919 * might remove multiple elements 3920 */ 3921 goto restart_cb; 3922 } 3923 _dasd_requeue_request(cqr); 3924 list_del_init(&cqr->blocklist); 3925 cqr->block->base->discipline->free_cp( 3926 cqr, (struct request *) cqr->callback_data); 3927 } 3928 dasd_schedule_device_bh(device); 3929 return rc; 3930 } 3931 EXPORT_SYMBOL_GPL(dasd_generic_requeue_all_requests); 3932 3933 static void do_requeue_requests(struct work_struct *work) 3934 { 3935 struct dasd_device *device = container_of(work, struct dasd_device, 3936 requeue_requests); 3937 dasd_generic_requeue_all_requests(device); 3938 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 3939 if (device->block) 3940 dasd_schedule_block_bh(device->block); 3941 dasd_put_device(device); 3942 } 3943 3944 void dasd_schedule_requeue(struct dasd_device *device) 3945 { 3946 dasd_get_device(device); 3947 /* queue call to dasd_reload_device to the kernel event daemon. */ 3948 if (!schedule_work(&device->requeue_requests)) 3949 dasd_put_device(device); 3950 } 3951 EXPORT_SYMBOL(dasd_schedule_requeue); 3952 3953 static int dasd_handle_autoquiesce(struct dasd_device *device, 3954 struct dasd_ccw_req *cqr, 3955 unsigned int reason) 3956 { 3957 /* in any case write eer message with reason */ 3958 if (dasd_eer_enabled(device)) 3959 dasd_eer_write(device, cqr, reason); 3960 3961 if (!test_bit(reason, &device->aq_mask)) 3962 return 0; 3963 3964 /* notify eer about autoquiesce */ 3965 if (dasd_eer_enabled(device)) 3966 dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE); 3967 3968 dev_info(&device->cdev->dev, 3969 "The DASD has been put in the quiesce state\n"); 3970 dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE); 3971 3972 if (device->features & DASD_FEATURE_REQUEUEQUIESCE) 3973 dasd_schedule_requeue(device); 3974 3975 return 1; 3976 } 3977 3978 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 3979 int rdc_buffer_size, 3980 int magic) 3981 { 3982 struct dasd_ccw_req *cqr; 3983 struct ccw1 *ccw; 3984 3985 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, 3986 NULL); 3987 3988 if (IS_ERR(cqr)) { 3989 /* internal error 13 - Allocating the RDC request failed*/ 3990 dev_err(&device->cdev->dev, 3991 "An error occurred in the DASD device driver, " 3992 "reason=%s\n", "13"); 3993 return cqr; 3994 } 3995 3996 ccw = cqr->cpaddr; 3997 ccw->cmd_code = CCW_CMD_RDC; 3998 ccw->cda = (__u32)virt_to_phys(cqr->data); 3999 ccw->flags = 0; 4000 ccw->count = rdc_buffer_size; 4001 cqr->startdev = device; 4002 cqr->memdev = device; 4003 cqr->expires = 10*HZ; 4004 cqr->retries = 256; 4005 cqr->buildclk = get_tod_clock(); 4006 cqr->status = DASD_CQR_FILLED; 4007 return cqr; 4008 } 4009 4010 4011 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 4012 void *rdc_buffer, int rdc_buffer_size) 4013 { 4014 int ret; 4015 struct dasd_ccw_req *cqr; 4016 4017 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); 4018 if (IS_ERR(cqr)) 4019 return PTR_ERR(cqr); 4020 4021 ret = dasd_sleep_on(cqr); 4022 if (ret == 0) 4023 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); 4024 dasd_sfree_request(cqr, cqr->memdev); 4025 return ret; 4026 } 4027 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4028 4029 /* 4030 * In command mode and transport mode we need to look for sense 4031 * data in different places. The sense data itself is allways 4032 * an array of 32 bytes, so we can unify the sense data access 4033 * for both modes. 4034 */ 4035 char *dasd_get_sense(struct irb *irb) 4036 { 4037 struct tsb *tsb = NULL; 4038 char *sense = NULL; 4039 4040 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4041 if (irb->scsw.tm.tcw) 4042 tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw)); 4043 if (tsb && tsb->length == 64 && tsb->flags) 4044 switch (tsb->flags & 0x07) { 4045 case 1: /* tsa_iostat */ 4046 sense = tsb->tsa.iostat.sense; 4047 break; 4048 case 2: /* tsa_ddpc */ 4049 sense = tsb->tsa.ddpc.sense; 4050 break; 4051 default: 4052 /* currently we don't use interrogate data */ 4053 break; 4054 } 4055 } else if (irb->esw.esw0.erw.cons) { 4056 sense = irb->ecw; 4057 } 4058 return sense; 4059 } 4060 EXPORT_SYMBOL_GPL(dasd_get_sense); 4061 4062 void dasd_generic_shutdown(struct ccw_device *cdev) 4063 { 4064 struct dasd_device *device; 4065 4066 device = dasd_device_from_cdev(cdev); 4067 if (IS_ERR(device)) 4068 return; 4069 4070 if (device->block) 4071 dasd_schedule_block_bh(device->block); 4072 4073 dasd_schedule_device_bh(device); 4074 4075 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4076 } 4077 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4078 4079 static int __init dasd_init(void) 4080 { 4081 int rc; 4082 4083 init_waitqueue_head(&dasd_init_waitq); 4084 init_waitqueue_head(&dasd_flush_wq); 4085 init_waitqueue_head(&generic_waitq); 4086 init_waitqueue_head(&shutdown_waitq); 4087 4088 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4089 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4090 if (dasd_debug_area == NULL) { 4091 rc = -ENOMEM; 4092 goto failed; 4093 } 4094 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4095 debug_set_level(dasd_debug_area, DBF_WARNING); 4096 4097 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4098 4099 dasd_diag_discipline_pointer = NULL; 4100 4101 dasd_statistics_createroot(); 4102 4103 rc = dasd_devmap_init(); 4104 if (rc) 4105 goto failed; 4106 rc = dasd_gendisk_init(); 4107 if (rc) 4108 goto failed; 4109 rc = dasd_parse(); 4110 if (rc) 4111 goto failed; 4112 rc = dasd_eer_init(); 4113 if (rc) 4114 goto failed; 4115 #ifdef CONFIG_PROC_FS 4116 rc = dasd_proc_init(); 4117 if (rc) 4118 goto failed; 4119 #endif 4120 4121 return 0; 4122 failed: 4123 pr_info("The DASD device driver could not be initialized\n"); 4124 dasd_exit(); 4125 return rc; 4126 } 4127 4128 module_init(dasd_init); 4129 module_exit(dasd_exit); 4130