1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #include <linux/kmod.h> 12 #include <linux/init.h> 13 #include <linux/interrupt.h> 14 #include <linux/ctype.h> 15 #include <linux/major.h> 16 #include <linux/slab.h> 17 #include <linux/hdreg.h> 18 #include <linux/async.h> 19 #include <linux/mutex.h> 20 #include <linux/debugfs.h> 21 #include <linux/seq_file.h> 22 #include <linux/vmalloc.h> 23 24 #include <asm/ccwdev.h> 25 #include <asm/ebcdic.h> 26 #include <asm/idals.h> 27 #include <asm/itcw.h> 28 #include <asm/diag.h> 29 30 /* This is ugly... */ 31 #define PRINTK_HEADER "dasd:" 32 33 #include "dasd_int.h" 34 /* 35 * SECTION: Constant definitions to be used within this file 36 */ 37 #define DASD_CHANQ_MAX_SIZE 4 38 39 #define DASD_DIAG_MOD "dasd_diag_mod" 40 41 /* 42 * SECTION: exported variables of dasd.c 43 */ 44 debug_info_t *dasd_debug_area; 45 EXPORT_SYMBOL(dasd_debug_area); 46 static struct dentry *dasd_debugfs_root_entry; 47 struct dasd_discipline *dasd_diag_discipline_pointer; 48 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 49 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 50 51 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 52 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 53 " Copyright IBM Corp. 2000"); 54 MODULE_LICENSE("GPL"); 55 56 /* 57 * SECTION: prototypes for static functions of dasd.c 58 */ 59 static int dasd_flush_block_queue(struct dasd_block *); 60 static void dasd_device_tasklet(unsigned long); 61 static void dasd_block_tasklet(unsigned long); 62 static void do_kick_device(struct work_struct *); 63 static void do_reload_device(struct work_struct *); 64 static void do_requeue_requests(struct work_struct *); 65 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 66 static void dasd_device_timeout(struct timer_list *); 67 static void dasd_block_timeout(struct timer_list *); 68 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 69 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 70 static void dasd_profile_exit(struct dasd_profile *); 71 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 72 static void dasd_hosts_exit(struct dasd_device *); 73 static int dasd_handle_autoquiesce(struct dasd_device *, struct dasd_ccw_req *, 74 unsigned int); 75 /* 76 * SECTION: Operations on the device structure. 77 */ 78 static wait_queue_head_t dasd_init_waitq; 79 static wait_queue_head_t dasd_flush_wq; 80 static wait_queue_head_t generic_waitq; 81 static wait_queue_head_t shutdown_waitq; 82 83 /* 84 * Allocate memory for a new device structure. 85 */ 86 struct dasd_device *dasd_alloc_device(void) 87 { 88 struct dasd_device *device; 89 90 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 91 if (!device) 92 return ERR_PTR(-ENOMEM); 93 94 /* Get two pages for normal block device operations. */ 95 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 96 if (!device->ccw_mem) { 97 kfree(device); 98 return ERR_PTR(-ENOMEM); 99 } 100 /* Get one page for error recovery. */ 101 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 102 if (!device->erp_mem) { 103 free_pages((unsigned long) device->ccw_mem, 1); 104 kfree(device); 105 return ERR_PTR(-ENOMEM); 106 } 107 /* Get two pages for ese format. */ 108 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 109 if (!device->ese_mem) { 110 free_page((unsigned long) device->erp_mem); 111 free_pages((unsigned long) device->ccw_mem, 1); 112 kfree(device); 113 return ERR_PTR(-ENOMEM); 114 } 115 116 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 117 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 118 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 119 spin_lock_init(&device->mem_lock); 120 atomic_set(&device->tasklet_scheduled, 0); 121 tasklet_init(&device->tasklet, dasd_device_tasklet, 122 (unsigned long) device); 123 INIT_LIST_HEAD(&device->ccw_queue); 124 timer_setup(&device->timer, dasd_device_timeout, 0); 125 INIT_WORK(&device->kick_work, do_kick_device); 126 INIT_WORK(&device->reload_device, do_reload_device); 127 INIT_WORK(&device->requeue_requests, do_requeue_requests); 128 device->state = DASD_STATE_NEW; 129 device->target = DASD_STATE_NEW; 130 mutex_init(&device->state_mutex); 131 spin_lock_init(&device->profile.lock); 132 return device; 133 } 134 135 /* 136 * Free memory of a device structure. 137 */ 138 void dasd_free_device(struct dasd_device *device) 139 { 140 kfree(device->private); 141 free_pages((unsigned long) device->ese_mem, 1); 142 free_page((unsigned long) device->erp_mem); 143 free_pages((unsigned long) device->ccw_mem, 1); 144 kfree(device); 145 } 146 147 /* 148 * Allocate memory for a new device structure. 149 */ 150 struct dasd_block *dasd_alloc_block(void) 151 { 152 struct dasd_block *block; 153 154 block = kzalloc(sizeof(*block), GFP_ATOMIC); 155 if (!block) 156 return ERR_PTR(-ENOMEM); 157 /* open_count = 0 means device online but not in use */ 158 atomic_set(&block->open_count, -1); 159 160 atomic_set(&block->tasklet_scheduled, 0); 161 tasklet_init(&block->tasklet, dasd_block_tasklet, 162 (unsigned long) block); 163 INIT_LIST_HEAD(&block->ccw_queue); 164 spin_lock_init(&block->queue_lock); 165 INIT_LIST_HEAD(&block->format_list); 166 spin_lock_init(&block->format_lock); 167 timer_setup(&block->timer, dasd_block_timeout, 0); 168 spin_lock_init(&block->profile.lock); 169 170 return block; 171 } 172 EXPORT_SYMBOL_GPL(dasd_alloc_block); 173 174 /* 175 * Free memory of a device structure. 176 */ 177 void dasd_free_block(struct dasd_block *block) 178 { 179 kfree(block); 180 } 181 EXPORT_SYMBOL_GPL(dasd_free_block); 182 183 /* 184 * Make a new device known to the system. 185 */ 186 static int dasd_state_new_to_known(struct dasd_device *device) 187 { 188 /* 189 * As long as the device is not in state DASD_STATE_NEW we want to 190 * keep the reference count > 0. 191 */ 192 dasd_get_device(device); 193 device->state = DASD_STATE_KNOWN; 194 return 0; 195 } 196 197 /* 198 * Let the system forget about a device. 199 */ 200 static int dasd_state_known_to_new(struct dasd_device *device) 201 { 202 /* Disable extended error reporting for this device. */ 203 dasd_eer_disable(device); 204 device->state = DASD_STATE_NEW; 205 206 /* Give up reference we took in dasd_state_new_to_known. */ 207 dasd_put_device(device); 208 return 0; 209 } 210 211 static struct dentry *dasd_debugfs_setup(const char *name, 212 struct dentry *base_dentry) 213 { 214 struct dentry *pde; 215 216 if (!base_dentry) 217 return NULL; 218 pde = debugfs_create_dir(name, base_dentry); 219 if (!pde || IS_ERR(pde)) 220 return NULL; 221 return pde; 222 } 223 224 /* 225 * Request the irq line for the device. 226 */ 227 static int dasd_state_known_to_basic(struct dasd_device *device) 228 { 229 struct dasd_block *block = device->block; 230 int rc = 0; 231 232 /* Allocate and register gendisk structure. */ 233 if (block) { 234 rc = dasd_gendisk_alloc(block); 235 if (rc) 236 return rc; 237 block->debugfs_dentry = 238 dasd_debugfs_setup(block->gdp->disk_name, 239 dasd_debugfs_root_entry); 240 dasd_profile_init(&block->profile, block->debugfs_dentry); 241 if (dasd_global_profile_level == DASD_PROFILE_ON) 242 dasd_profile_on(&device->block->profile); 243 } 244 device->debugfs_dentry = 245 dasd_debugfs_setup(dev_name(&device->cdev->dev), 246 dasd_debugfs_root_entry); 247 dasd_profile_init(&device->profile, device->debugfs_dentry); 248 dasd_hosts_init(device->debugfs_dentry, device); 249 250 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 251 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 252 8 * sizeof(long)); 253 debug_register_view(device->debug_area, &debug_sprintf_view); 254 debug_set_level(device->debug_area, DBF_WARNING); 255 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 256 257 device->state = DASD_STATE_BASIC; 258 259 return rc; 260 } 261 262 /* 263 * Release the irq line for the device. Terminate any running i/o. 264 */ 265 static int dasd_state_basic_to_known(struct dasd_device *device) 266 { 267 int rc; 268 269 if (device->discipline->basic_to_known) { 270 rc = device->discipline->basic_to_known(device); 271 if (rc) 272 return rc; 273 } 274 275 if (device->block) { 276 dasd_profile_exit(&device->block->profile); 277 debugfs_remove(device->block->debugfs_dentry); 278 dasd_gendisk_free(device->block); 279 dasd_block_clear_timer(device->block); 280 } 281 rc = dasd_flush_device_queue(device); 282 if (rc) 283 return rc; 284 dasd_device_clear_timer(device); 285 dasd_profile_exit(&device->profile); 286 dasd_hosts_exit(device); 287 debugfs_remove(device->debugfs_dentry); 288 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 289 if (device->debug_area != NULL) { 290 debug_unregister(device->debug_area); 291 device->debug_area = NULL; 292 } 293 device->state = DASD_STATE_KNOWN; 294 return 0; 295 } 296 297 /* 298 * Do the initial analysis. The do_analysis function may return 299 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 300 * until the discipline decides to continue the startup sequence 301 * by calling the function dasd_change_state. The eckd disciplines 302 * uses this to start a ccw that detects the format. The completion 303 * interrupt for this detection ccw uses the kernel event daemon to 304 * trigger the call to dasd_change_state. All this is done in the 305 * discipline code, see dasd_eckd.c. 306 * After the analysis ccw is done (do_analysis returned 0) the block 307 * device is setup. 308 * In case the analysis returns an error, the device setup is stopped 309 * (a fake disk was already added to allow formatting). 310 */ 311 static int dasd_state_basic_to_ready(struct dasd_device *device) 312 { 313 int rc; 314 struct dasd_block *block; 315 struct gendisk *disk; 316 317 rc = 0; 318 block = device->block; 319 /* make disk known with correct capacity */ 320 if (block) { 321 if (block->base->discipline->do_analysis != NULL) 322 rc = block->base->discipline->do_analysis(block); 323 if (rc) { 324 if (rc != -EAGAIN) { 325 device->state = DASD_STATE_UNFMT; 326 disk = device->block->gdp; 327 kobject_uevent(&disk_to_dev(disk)->kobj, 328 KOBJ_CHANGE); 329 goto out; 330 } 331 return rc; 332 } 333 if (device->discipline->setup_blk_queue) 334 device->discipline->setup_blk_queue(block); 335 set_capacity(block->gdp, 336 block->blocks << block->s2b_shift); 337 device->state = DASD_STATE_READY; 338 rc = dasd_scan_partitions(block); 339 if (rc) { 340 device->state = DASD_STATE_BASIC; 341 return rc; 342 } 343 } else { 344 device->state = DASD_STATE_READY; 345 } 346 out: 347 if (device->discipline->basic_to_ready) 348 rc = device->discipline->basic_to_ready(device); 349 return rc; 350 } 351 352 static inline 353 int _wait_for_empty_queues(struct dasd_device *device) 354 { 355 if (device->block) 356 return list_empty(&device->ccw_queue) && 357 list_empty(&device->block->ccw_queue); 358 else 359 return list_empty(&device->ccw_queue); 360 } 361 362 /* 363 * Remove device from block device layer. Destroy dirty buffers. 364 * Forget format information. Check if the target level is basic 365 * and if it is create fake disk for formatting. 366 */ 367 static int dasd_state_ready_to_basic(struct dasd_device *device) 368 { 369 int rc; 370 371 device->state = DASD_STATE_BASIC; 372 if (device->block) { 373 struct dasd_block *block = device->block; 374 rc = dasd_flush_block_queue(block); 375 if (rc) { 376 device->state = DASD_STATE_READY; 377 return rc; 378 } 379 dasd_destroy_partitions(block); 380 block->blocks = 0; 381 block->bp_block = 0; 382 block->s2b_shift = 0; 383 } 384 return 0; 385 } 386 387 /* 388 * Back to basic. 389 */ 390 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 391 { 392 device->state = DASD_STATE_BASIC; 393 return 0; 394 } 395 396 /* 397 * Make the device online and schedule the bottom half to start 398 * the requeueing of requests from the linux request queue to the 399 * ccw queue. 400 */ 401 static int 402 dasd_state_ready_to_online(struct dasd_device * device) 403 { 404 device->state = DASD_STATE_ONLINE; 405 if (device->block) { 406 dasd_schedule_block_bh(device->block); 407 if ((device->features & DASD_FEATURE_USERAW)) { 408 kobject_uevent(&disk_to_dev(device->block->gdp)->kobj, 409 KOBJ_CHANGE); 410 return 0; 411 } 412 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 413 } 414 return 0; 415 } 416 417 /* 418 * Stop the requeueing of requests again. 419 */ 420 static int dasd_state_online_to_ready(struct dasd_device *device) 421 { 422 int rc; 423 424 if (device->discipline->online_to_ready) { 425 rc = device->discipline->online_to_ready(device); 426 if (rc) 427 return rc; 428 } 429 430 device->state = DASD_STATE_READY; 431 if (device->block && !(device->features & DASD_FEATURE_USERAW)) 432 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 433 return 0; 434 } 435 436 /* 437 * Device startup state changes. 438 */ 439 static int dasd_increase_state(struct dasd_device *device) 440 { 441 int rc; 442 443 rc = 0; 444 if (device->state == DASD_STATE_NEW && 445 device->target >= DASD_STATE_KNOWN) 446 rc = dasd_state_new_to_known(device); 447 448 if (!rc && 449 device->state == DASD_STATE_KNOWN && 450 device->target >= DASD_STATE_BASIC) 451 rc = dasd_state_known_to_basic(device); 452 453 if (!rc && 454 device->state == DASD_STATE_BASIC && 455 device->target >= DASD_STATE_READY) 456 rc = dasd_state_basic_to_ready(device); 457 458 if (!rc && 459 device->state == DASD_STATE_UNFMT && 460 device->target > DASD_STATE_UNFMT) 461 rc = -EPERM; 462 463 if (!rc && 464 device->state == DASD_STATE_READY && 465 device->target >= DASD_STATE_ONLINE) 466 rc = dasd_state_ready_to_online(device); 467 468 return rc; 469 } 470 471 /* 472 * Device shutdown state changes. 473 */ 474 static int dasd_decrease_state(struct dasd_device *device) 475 { 476 int rc; 477 478 rc = 0; 479 if (device->state == DASD_STATE_ONLINE && 480 device->target <= DASD_STATE_READY) 481 rc = dasd_state_online_to_ready(device); 482 483 if (!rc && 484 device->state == DASD_STATE_READY && 485 device->target <= DASD_STATE_BASIC) 486 rc = dasd_state_ready_to_basic(device); 487 488 if (!rc && 489 device->state == DASD_STATE_UNFMT && 490 device->target <= DASD_STATE_BASIC) 491 rc = dasd_state_unfmt_to_basic(device); 492 493 if (!rc && 494 device->state == DASD_STATE_BASIC && 495 device->target <= DASD_STATE_KNOWN) 496 rc = dasd_state_basic_to_known(device); 497 498 if (!rc && 499 device->state == DASD_STATE_KNOWN && 500 device->target <= DASD_STATE_NEW) 501 rc = dasd_state_known_to_new(device); 502 503 return rc; 504 } 505 506 /* 507 * This is the main startup/shutdown routine. 508 */ 509 static void dasd_change_state(struct dasd_device *device) 510 { 511 int rc; 512 513 if (device->state == device->target) 514 /* Already where we want to go today... */ 515 return; 516 if (device->state < device->target) 517 rc = dasd_increase_state(device); 518 else 519 rc = dasd_decrease_state(device); 520 if (rc == -EAGAIN) 521 return; 522 if (rc) 523 device->target = device->state; 524 525 /* let user-space know that the device status changed */ 526 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 527 528 if (device->state == device->target) 529 wake_up(&dasd_init_waitq); 530 } 531 532 /* 533 * Kick starter for devices that did not complete the startup/shutdown 534 * procedure or were sleeping because of a pending state. 535 * dasd_kick_device will schedule a call do do_kick_device to the kernel 536 * event daemon. 537 */ 538 static void do_kick_device(struct work_struct *work) 539 { 540 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 541 mutex_lock(&device->state_mutex); 542 dasd_change_state(device); 543 mutex_unlock(&device->state_mutex); 544 dasd_schedule_device_bh(device); 545 dasd_put_device(device); 546 } 547 548 void dasd_kick_device(struct dasd_device *device) 549 { 550 dasd_get_device(device); 551 /* queue call to dasd_kick_device to the kernel event daemon. */ 552 if (!schedule_work(&device->kick_work)) 553 dasd_put_device(device); 554 } 555 EXPORT_SYMBOL(dasd_kick_device); 556 557 /* 558 * dasd_reload_device will schedule a call do do_reload_device to the kernel 559 * event daemon. 560 */ 561 static void do_reload_device(struct work_struct *work) 562 { 563 struct dasd_device *device = container_of(work, struct dasd_device, 564 reload_device); 565 device->discipline->reload(device); 566 dasd_put_device(device); 567 } 568 569 void dasd_reload_device(struct dasd_device *device) 570 { 571 dasd_get_device(device); 572 /* queue call to dasd_reload_device to the kernel event daemon. */ 573 if (!schedule_work(&device->reload_device)) 574 dasd_put_device(device); 575 } 576 EXPORT_SYMBOL(dasd_reload_device); 577 578 /* 579 * Set the target state for a device and starts the state change. 580 */ 581 void dasd_set_target_state(struct dasd_device *device, int target) 582 { 583 dasd_get_device(device); 584 mutex_lock(&device->state_mutex); 585 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 586 if (dasd_probeonly && target > DASD_STATE_READY) 587 target = DASD_STATE_READY; 588 if (device->target != target) { 589 if (device->state == target) 590 wake_up(&dasd_init_waitq); 591 device->target = target; 592 } 593 if (device->state != device->target) 594 dasd_change_state(device); 595 mutex_unlock(&device->state_mutex); 596 dasd_put_device(device); 597 } 598 599 /* 600 * Enable devices with device numbers in [from..to]. 601 */ 602 static inline int _wait_for_device(struct dasd_device *device) 603 { 604 return (device->state == device->target); 605 } 606 607 void dasd_enable_device(struct dasd_device *device) 608 { 609 dasd_set_target_state(device, DASD_STATE_ONLINE); 610 if (device->state <= DASD_STATE_KNOWN) 611 /* No discipline for device found. */ 612 dasd_set_target_state(device, DASD_STATE_NEW); 613 /* Now wait for the devices to come up. */ 614 wait_event(dasd_init_waitq, _wait_for_device(device)); 615 616 dasd_reload_device(device); 617 if (device->discipline->kick_validate) 618 device->discipline->kick_validate(device); 619 } 620 EXPORT_SYMBOL(dasd_enable_device); 621 622 /* 623 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 624 */ 625 626 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 627 628 #ifdef CONFIG_DASD_PROFILE 629 struct dasd_profile dasd_global_profile = { 630 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 631 }; 632 static struct dentry *dasd_debugfs_global_entry; 633 634 /* 635 * Add profiling information for cqr before execution. 636 */ 637 static void dasd_profile_start(struct dasd_block *block, 638 struct dasd_ccw_req *cqr, 639 struct request *req) 640 { 641 struct list_head *l; 642 unsigned int counter; 643 struct dasd_device *device; 644 645 /* count the length of the chanq for statistics */ 646 counter = 0; 647 if (dasd_global_profile_level || block->profile.data) 648 list_for_each(l, &block->ccw_queue) 649 if (++counter >= 31) 650 break; 651 652 spin_lock(&dasd_global_profile.lock); 653 if (dasd_global_profile.data) { 654 dasd_global_profile.data->dasd_io_nr_req[counter]++; 655 if (rq_data_dir(req) == READ) 656 dasd_global_profile.data->dasd_read_nr_req[counter]++; 657 } 658 spin_unlock(&dasd_global_profile.lock); 659 660 spin_lock(&block->profile.lock); 661 if (block->profile.data) { 662 block->profile.data->dasd_io_nr_req[counter]++; 663 if (rq_data_dir(req) == READ) 664 block->profile.data->dasd_read_nr_req[counter]++; 665 } 666 spin_unlock(&block->profile.lock); 667 668 /* 669 * We count the request for the start device, even though it may run on 670 * some other device due to error recovery. This way we make sure that 671 * we count each request only once. 672 */ 673 device = cqr->startdev; 674 if (!device->profile.data) 675 return; 676 677 spin_lock(get_ccwdev_lock(device->cdev)); 678 counter = 1; /* request is not yet queued on the start device */ 679 list_for_each(l, &device->ccw_queue) 680 if (++counter >= 31) 681 break; 682 spin_unlock(get_ccwdev_lock(device->cdev)); 683 684 spin_lock(&device->profile.lock); 685 device->profile.data->dasd_io_nr_req[counter]++; 686 if (rq_data_dir(req) == READ) 687 device->profile.data->dasd_read_nr_req[counter]++; 688 spin_unlock(&device->profile.lock); 689 } 690 691 /* 692 * Add profiling information for cqr after execution. 693 */ 694 695 #define dasd_profile_counter(value, index) \ 696 { \ 697 for (index = 0; index < 31 && value >> (2+index); index++) \ 698 ; \ 699 } 700 701 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 702 int is_alias, 703 int is_tpm, 704 int is_read, 705 long sectors, 706 int sectors_ind, 707 int tottime_ind, 708 int tottimeps_ind, 709 int strtime_ind, 710 int irqtime_ind, 711 int irqtimeps_ind, 712 int endtime_ind) 713 { 714 /* in case of an overflow, reset the whole profile */ 715 if (data->dasd_io_reqs == UINT_MAX) { 716 memset(data, 0, sizeof(*data)); 717 ktime_get_real_ts64(&data->starttod); 718 } 719 data->dasd_io_reqs++; 720 data->dasd_io_sects += sectors; 721 if (is_alias) 722 data->dasd_io_alias++; 723 if (is_tpm) 724 data->dasd_io_tpm++; 725 726 data->dasd_io_secs[sectors_ind]++; 727 data->dasd_io_times[tottime_ind]++; 728 data->dasd_io_timps[tottimeps_ind]++; 729 data->dasd_io_time1[strtime_ind]++; 730 data->dasd_io_time2[irqtime_ind]++; 731 data->dasd_io_time2ps[irqtimeps_ind]++; 732 data->dasd_io_time3[endtime_ind]++; 733 734 if (is_read) { 735 data->dasd_read_reqs++; 736 data->dasd_read_sects += sectors; 737 if (is_alias) 738 data->dasd_read_alias++; 739 if (is_tpm) 740 data->dasd_read_tpm++; 741 data->dasd_read_secs[sectors_ind]++; 742 data->dasd_read_times[tottime_ind]++; 743 data->dasd_read_time1[strtime_ind]++; 744 data->dasd_read_time2[irqtime_ind]++; 745 data->dasd_read_time3[endtime_ind]++; 746 } 747 } 748 749 static void dasd_profile_end(struct dasd_block *block, 750 struct dasd_ccw_req *cqr, 751 struct request *req) 752 { 753 unsigned long strtime, irqtime, endtime, tottime; 754 unsigned long tottimeps, sectors; 755 struct dasd_device *device; 756 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 757 int irqtime_ind, irqtimeps_ind, endtime_ind; 758 struct dasd_profile_info *data; 759 760 device = cqr->startdev; 761 if (!(dasd_global_profile_level || 762 block->profile.data || 763 device->profile.data)) 764 return; 765 766 sectors = blk_rq_sectors(req); 767 if (!cqr->buildclk || !cqr->startclk || 768 !cqr->stopclk || !cqr->endclk || 769 !sectors) 770 return; 771 772 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 773 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 774 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 775 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 776 tottimeps = tottime / sectors; 777 778 dasd_profile_counter(sectors, sectors_ind); 779 dasd_profile_counter(tottime, tottime_ind); 780 dasd_profile_counter(tottimeps, tottimeps_ind); 781 dasd_profile_counter(strtime, strtime_ind); 782 dasd_profile_counter(irqtime, irqtime_ind); 783 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 784 dasd_profile_counter(endtime, endtime_ind); 785 786 spin_lock(&dasd_global_profile.lock); 787 if (dasd_global_profile.data) { 788 data = dasd_global_profile.data; 789 data->dasd_sum_times += tottime; 790 data->dasd_sum_time_str += strtime; 791 data->dasd_sum_time_irq += irqtime; 792 data->dasd_sum_time_end += endtime; 793 dasd_profile_end_add_data(dasd_global_profile.data, 794 cqr->startdev != block->base, 795 cqr->cpmode == 1, 796 rq_data_dir(req) == READ, 797 sectors, sectors_ind, tottime_ind, 798 tottimeps_ind, strtime_ind, 799 irqtime_ind, irqtimeps_ind, 800 endtime_ind); 801 } 802 spin_unlock(&dasd_global_profile.lock); 803 804 spin_lock(&block->profile.lock); 805 if (block->profile.data) { 806 data = block->profile.data; 807 data->dasd_sum_times += tottime; 808 data->dasd_sum_time_str += strtime; 809 data->dasd_sum_time_irq += irqtime; 810 data->dasd_sum_time_end += endtime; 811 dasd_profile_end_add_data(block->profile.data, 812 cqr->startdev != block->base, 813 cqr->cpmode == 1, 814 rq_data_dir(req) == READ, 815 sectors, sectors_ind, tottime_ind, 816 tottimeps_ind, strtime_ind, 817 irqtime_ind, irqtimeps_ind, 818 endtime_ind); 819 } 820 spin_unlock(&block->profile.lock); 821 822 spin_lock(&device->profile.lock); 823 if (device->profile.data) { 824 data = device->profile.data; 825 data->dasd_sum_times += tottime; 826 data->dasd_sum_time_str += strtime; 827 data->dasd_sum_time_irq += irqtime; 828 data->dasd_sum_time_end += endtime; 829 dasd_profile_end_add_data(device->profile.data, 830 cqr->startdev != block->base, 831 cqr->cpmode == 1, 832 rq_data_dir(req) == READ, 833 sectors, sectors_ind, tottime_ind, 834 tottimeps_ind, strtime_ind, 835 irqtime_ind, irqtimeps_ind, 836 endtime_ind); 837 } 838 spin_unlock(&device->profile.lock); 839 } 840 841 void dasd_profile_reset(struct dasd_profile *profile) 842 { 843 struct dasd_profile_info *data; 844 845 spin_lock_bh(&profile->lock); 846 data = profile->data; 847 if (!data) { 848 spin_unlock_bh(&profile->lock); 849 return; 850 } 851 memset(data, 0, sizeof(*data)); 852 ktime_get_real_ts64(&data->starttod); 853 spin_unlock_bh(&profile->lock); 854 } 855 856 int dasd_profile_on(struct dasd_profile *profile) 857 { 858 struct dasd_profile_info *data; 859 860 data = kzalloc(sizeof(*data), GFP_KERNEL); 861 if (!data) 862 return -ENOMEM; 863 spin_lock_bh(&profile->lock); 864 if (profile->data) { 865 spin_unlock_bh(&profile->lock); 866 kfree(data); 867 return 0; 868 } 869 ktime_get_real_ts64(&data->starttod); 870 profile->data = data; 871 spin_unlock_bh(&profile->lock); 872 return 0; 873 } 874 875 void dasd_profile_off(struct dasd_profile *profile) 876 { 877 spin_lock_bh(&profile->lock); 878 kfree(profile->data); 879 profile->data = NULL; 880 spin_unlock_bh(&profile->lock); 881 } 882 883 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 884 { 885 char *buffer; 886 887 buffer = vmalloc(user_len + 1); 888 if (buffer == NULL) 889 return ERR_PTR(-ENOMEM); 890 if (copy_from_user(buffer, user_buf, user_len) != 0) { 891 vfree(buffer); 892 return ERR_PTR(-EFAULT); 893 } 894 /* got the string, now strip linefeed. */ 895 if (buffer[user_len - 1] == '\n') 896 buffer[user_len - 1] = 0; 897 else 898 buffer[user_len] = 0; 899 return buffer; 900 } 901 902 static ssize_t dasd_stats_write(struct file *file, 903 const char __user *user_buf, 904 size_t user_len, loff_t *pos) 905 { 906 char *buffer, *str; 907 int rc; 908 struct seq_file *m = (struct seq_file *)file->private_data; 909 struct dasd_profile *prof = m->private; 910 911 if (user_len > 65536) 912 user_len = 65536; 913 buffer = dasd_get_user_string(user_buf, user_len); 914 if (IS_ERR(buffer)) 915 return PTR_ERR(buffer); 916 917 str = skip_spaces(buffer); 918 rc = user_len; 919 if (strncmp(str, "reset", 5) == 0) { 920 dasd_profile_reset(prof); 921 } else if (strncmp(str, "on", 2) == 0) { 922 rc = dasd_profile_on(prof); 923 if (rc) 924 goto out; 925 rc = user_len; 926 if (prof == &dasd_global_profile) { 927 dasd_profile_reset(prof); 928 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 929 } 930 } else if (strncmp(str, "off", 3) == 0) { 931 if (prof == &dasd_global_profile) 932 dasd_global_profile_level = DASD_PROFILE_OFF; 933 dasd_profile_off(prof); 934 } else 935 rc = -EINVAL; 936 out: 937 vfree(buffer); 938 return rc; 939 } 940 941 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 942 { 943 int i; 944 945 for (i = 0; i < 32; i++) 946 seq_printf(m, "%u ", array[i]); 947 seq_putc(m, '\n'); 948 } 949 950 static void dasd_stats_seq_print(struct seq_file *m, 951 struct dasd_profile_info *data) 952 { 953 seq_printf(m, "start_time %lld.%09ld\n", 954 (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 955 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 956 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 957 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 958 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 959 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 960 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 961 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 962 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 963 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 964 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 965 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 966 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 967 seq_puts(m, "histogram_sectors "); 968 dasd_stats_array(m, data->dasd_io_secs); 969 seq_puts(m, "histogram_io_times "); 970 dasd_stats_array(m, data->dasd_io_times); 971 seq_puts(m, "histogram_io_times_weighted "); 972 dasd_stats_array(m, data->dasd_io_timps); 973 seq_puts(m, "histogram_time_build_to_ssch "); 974 dasd_stats_array(m, data->dasd_io_time1); 975 seq_puts(m, "histogram_time_ssch_to_irq "); 976 dasd_stats_array(m, data->dasd_io_time2); 977 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 978 dasd_stats_array(m, data->dasd_io_time2ps); 979 seq_puts(m, "histogram_time_irq_to_end "); 980 dasd_stats_array(m, data->dasd_io_time3); 981 seq_puts(m, "histogram_ccw_queue_length "); 982 dasd_stats_array(m, data->dasd_io_nr_req); 983 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 984 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 985 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 986 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 987 seq_puts(m, "histogram_read_sectors "); 988 dasd_stats_array(m, data->dasd_read_secs); 989 seq_puts(m, "histogram_read_times "); 990 dasd_stats_array(m, data->dasd_read_times); 991 seq_puts(m, "histogram_read_time_build_to_ssch "); 992 dasd_stats_array(m, data->dasd_read_time1); 993 seq_puts(m, "histogram_read_time_ssch_to_irq "); 994 dasd_stats_array(m, data->dasd_read_time2); 995 seq_puts(m, "histogram_read_time_irq_to_end "); 996 dasd_stats_array(m, data->dasd_read_time3); 997 seq_puts(m, "histogram_read_ccw_queue_length "); 998 dasd_stats_array(m, data->dasd_read_nr_req); 999 } 1000 1001 static int dasd_stats_show(struct seq_file *m, void *v) 1002 { 1003 struct dasd_profile *profile; 1004 struct dasd_profile_info *data; 1005 1006 profile = m->private; 1007 spin_lock_bh(&profile->lock); 1008 data = profile->data; 1009 if (!data) { 1010 spin_unlock_bh(&profile->lock); 1011 seq_puts(m, "disabled\n"); 1012 return 0; 1013 } 1014 dasd_stats_seq_print(m, data); 1015 spin_unlock_bh(&profile->lock); 1016 return 0; 1017 } 1018 1019 static int dasd_stats_open(struct inode *inode, struct file *file) 1020 { 1021 struct dasd_profile *profile = inode->i_private; 1022 return single_open(file, dasd_stats_show, profile); 1023 } 1024 1025 static const struct file_operations dasd_stats_raw_fops = { 1026 .owner = THIS_MODULE, 1027 .open = dasd_stats_open, 1028 .read = seq_read, 1029 .llseek = seq_lseek, 1030 .release = single_release, 1031 .write = dasd_stats_write, 1032 }; 1033 1034 static void dasd_profile_init(struct dasd_profile *profile, 1035 struct dentry *base_dentry) 1036 { 1037 umode_t mode; 1038 struct dentry *pde; 1039 1040 if (!base_dentry) 1041 return; 1042 profile->dentry = NULL; 1043 profile->data = NULL; 1044 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1045 pde = debugfs_create_file("statistics", mode, base_dentry, 1046 profile, &dasd_stats_raw_fops); 1047 if (pde && !IS_ERR(pde)) 1048 profile->dentry = pde; 1049 return; 1050 } 1051 1052 static void dasd_profile_exit(struct dasd_profile *profile) 1053 { 1054 dasd_profile_off(profile); 1055 debugfs_remove(profile->dentry); 1056 profile->dentry = NULL; 1057 } 1058 1059 static void dasd_statistics_removeroot(void) 1060 { 1061 dasd_global_profile_level = DASD_PROFILE_OFF; 1062 dasd_profile_exit(&dasd_global_profile); 1063 debugfs_remove(dasd_debugfs_global_entry); 1064 debugfs_remove(dasd_debugfs_root_entry); 1065 } 1066 1067 static void dasd_statistics_createroot(void) 1068 { 1069 struct dentry *pde; 1070 1071 dasd_debugfs_root_entry = NULL; 1072 pde = debugfs_create_dir("dasd", NULL); 1073 if (!pde || IS_ERR(pde)) 1074 goto error; 1075 dasd_debugfs_root_entry = pde; 1076 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1077 if (!pde || IS_ERR(pde)) 1078 goto error; 1079 dasd_debugfs_global_entry = pde; 1080 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1081 return; 1082 1083 error: 1084 DBF_EVENT(DBF_ERR, "%s", 1085 "Creation of the dasd debugfs interface failed"); 1086 dasd_statistics_removeroot(); 1087 return; 1088 } 1089 1090 #else 1091 #define dasd_profile_start(block, cqr, req) do {} while (0) 1092 #define dasd_profile_end(block, cqr, req) do {} while (0) 1093 1094 static void dasd_statistics_createroot(void) 1095 { 1096 return; 1097 } 1098 1099 static void dasd_statistics_removeroot(void) 1100 { 1101 return; 1102 } 1103 1104 int dasd_stats_generic_show(struct seq_file *m, void *v) 1105 { 1106 seq_puts(m, "Statistics are not activated in this kernel\n"); 1107 return 0; 1108 } 1109 1110 static void dasd_profile_init(struct dasd_profile *profile, 1111 struct dentry *base_dentry) 1112 { 1113 return; 1114 } 1115 1116 static void dasd_profile_exit(struct dasd_profile *profile) 1117 { 1118 return; 1119 } 1120 1121 int dasd_profile_on(struct dasd_profile *profile) 1122 { 1123 return 0; 1124 } 1125 1126 #endif /* CONFIG_DASD_PROFILE */ 1127 1128 static int dasd_hosts_show(struct seq_file *m, void *v) 1129 { 1130 struct dasd_device *device; 1131 int rc = -EOPNOTSUPP; 1132 1133 device = m->private; 1134 dasd_get_device(device); 1135 1136 if (device->discipline->hosts_print) 1137 rc = device->discipline->hosts_print(device, m); 1138 1139 dasd_put_device(device); 1140 return rc; 1141 } 1142 1143 DEFINE_SHOW_ATTRIBUTE(dasd_hosts); 1144 1145 static void dasd_hosts_exit(struct dasd_device *device) 1146 { 1147 debugfs_remove(device->hosts_dentry); 1148 device->hosts_dentry = NULL; 1149 } 1150 1151 static void dasd_hosts_init(struct dentry *base_dentry, 1152 struct dasd_device *device) 1153 { 1154 struct dentry *pde; 1155 umode_t mode; 1156 1157 if (!base_dentry) 1158 return; 1159 1160 mode = S_IRUSR | S_IFREG; 1161 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1162 device, &dasd_hosts_fops); 1163 if (pde && !IS_ERR(pde)) 1164 device->hosts_dentry = pde; 1165 } 1166 1167 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, 1168 struct dasd_device *device, 1169 struct dasd_ccw_req *cqr) 1170 { 1171 unsigned long flags; 1172 char *data, *chunk; 1173 int size = 0; 1174 1175 if (cplength > 0) 1176 size += cplength * sizeof(struct ccw1); 1177 if (datasize > 0) 1178 size += datasize; 1179 if (!cqr) 1180 size += (sizeof(*cqr) + 7L) & -8L; 1181 1182 spin_lock_irqsave(&device->mem_lock, flags); 1183 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); 1184 spin_unlock_irqrestore(&device->mem_lock, flags); 1185 if (!chunk) 1186 return ERR_PTR(-ENOMEM); 1187 if (!cqr) { 1188 cqr = (void *) data; 1189 data += (sizeof(*cqr) + 7L) & -8L; 1190 } 1191 memset(cqr, 0, sizeof(*cqr)); 1192 cqr->mem_chunk = chunk; 1193 if (cplength > 0) { 1194 cqr->cpaddr = data; 1195 data += cplength * sizeof(struct ccw1); 1196 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1197 } 1198 if (datasize > 0) { 1199 cqr->data = data; 1200 memset(cqr->data, 0, datasize); 1201 } 1202 cqr->magic = magic; 1203 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1204 dasd_get_device(device); 1205 return cqr; 1206 } 1207 EXPORT_SYMBOL(dasd_smalloc_request); 1208 1209 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1210 int datasize, 1211 struct dasd_device *device) 1212 { 1213 struct dasd_ccw_req *cqr; 1214 unsigned long flags; 1215 int size, cqr_size; 1216 char *data; 1217 1218 cqr_size = (sizeof(*cqr) + 7L) & -8L; 1219 size = cqr_size; 1220 if (cplength > 0) 1221 size += cplength * sizeof(struct ccw1); 1222 if (datasize > 0) 1223 size += datasize; 1224 1225 spin_lock_irqsave(&device->mem_lock, flags); 1226 cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1227 spin_unlock_irqrestore(&device->mem_lock, flags); 1228 if (!cqr) 1229 return ERR_PTR(-ENOMEM); 1230 memset(cqr, 0, sizeof(*cqr)); 1231 data = (char *)cqr + cqr_size; 1232 cqr->cpaddr = NULL; 1233 if (cplength > 0) { 1234 cqr->cpaddr = data; 1235 data += cplength * sizeof(struct ccw1); 1236 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1237 } 1238 cqr->data = NULL; 1239 if (datasize > 0) { 1240 cqr->data = data; 1241 memset(cqr->data, 0, datasize); 1242 } 1243 1244 cqr->magic = magic; 1245 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1246 dasd_get_device(device); 1247 1248 return cqr; 1249 } 1250 EXPORT_SYMBOL(dasd_fmalloc_request); 1251 1252 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1253 { 1254 unsigned long flags; 1255 1256 spin_lock_irqsave(&device->mem_lock, flags); 1257 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); 1258 spin_unlock_irqrestore(&device->mem_lock, flags); 1259 dasd_put_device(device); 1260 } 1261 EXPORT_SYMBOL(dasd_sfree_request); 1262 1263 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1264 { 1265 unsigned long flags; 1266 1267 spin_lock_irqsave(&device->mem_lock, flags); 1268 dasd_free_chunk(&device->ese_chunks, cqr); 1269 spin_unlock_irqrestore(&device->mem_lock, flags); 1270 dasd_put_device(device); 1271 } 1272 EXPORT_SYMBOL(dasd_ffree_request); 1273 1274 /* 1275 * Check discipline magic in cqr. 1276 */ 1277 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1278 { 1279 struct dasd_device *device; 1280 1281 if (cqr == NULL) 1282 return -EINVAL; 1283 device = cqr->startdev; 1284 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1285 DBF_DEV_EVENT(DBF_WARNING, device, 1286 " dasd_ccw_req 0x%08x magic doesn't match" 1287 " discipline 0x%08x", 1288 cqr->magic, 1289 *(unsigned int *) device->discipline->name); 1290 return -EINVAL; 1291 } 1292 return 0; 1293 } 1294 1295 /* 1296 * Terminate the current i/o and set the request to clear_pending. 1297 * Timer keeps device runnig. 1298 * ccw_device_clear can fail if the i/o subsystem 1299 * is in a bad mood. 1300 */ 1301 int dasd_term_IO(struct dasd_ccw_req *cqr) 1302 { 1303 struct dasd_device *device; 1304 int retries, rc; 1305 char errorstring[ERRORLENGTH]; 1306 1307 /* Check the cqr */ 1308 rc = dasd_check_cqr(cqr); 1309 if (rc) 1310 return rc; 1311 retries = 0; 1312 device = (struct dasd_device *) cqr->startdev; 1313 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1314 rc = ccw_device_clear(device->cdev, (long) cqr); 1315 switch (rc) { 1316 case 0: /* termination successful */ 1317 cqr->status = DASD_CQR_CLEAR_PENDING; 1318 cqr->stopclk = get_tod_clock(); 1319 cqr->starttime = 0; 1320 DBF_DEV_EVENT(DBF_DEBUG, device, 1321 "terminate cqr %p successful", 1322 cqr); 1323 break; 1324 case -ENODEV: 1325 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1326 "device gone, retry"); 1327 break; 1328 case -EINVAL: 1329 /* 1330 * device not valid so no I/O could be running 1331 * handle CQR as termination successful 1332 */ 1333 cqr->status = DASD_CQR_CLEARED; 1334 cqr->stopclk = get_tod_clock(); 1335 cqr->starttime = 0; 1336 /* no retries for invalid devices */ 1337 cqr->retries = -1; 1338 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1339 "EINVAL, handle as terminated"); 1340 /* fake rc to success */ 1341 rc = 0; 1342 break; 1343 default: 1344 /* internal error 10 - unknown rc*/ 1345 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1346 dev_err(&device->cdev->dev, "An error occurred in the " 1347 "DASD device driver, reason=%s\n", errorstring); 1348 BUG(); 1349 break; 1350 } 1351 retries++; 1352 } 1353 dasd_schedule_device_bh(device); 1354 return rc; 1355 } 1356 EXPORT_SYMBOL(dasd_term_IO); 1357 1358 /* 1359 * Start the i/o. This start_IO can fail if the channel is really busy. 1360 * In that case set up a timer to start the request later. 1361 */ 1362 int dasd_start_IO(struct dasd_ccw_req *cqr) 1363 { 1364 struct dasd_device *device; 1365 int rc; 1366 char errorstring[ERRORLENGTH]; 1367 1368 /* Check the cqr */ 1369 rc = dasd_check_cqr(cqr); 1370 if (rc) { 1371 cqr->intrc = rc; 1372 return rc; 1373 } 1374 device = (struct dasd_device *) cqr->startdev; 1375 if (((cqr->block && 1376 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1377 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1378 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1379 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1380 "because of stolen lock", cqr); 1381 cqr->status = DASD_CQR_ERROR; 1382 cqr->intrc = -EPERM; 1383 return -EPERM; 1384 } 1385 if (cqr->retries < 0) { 1386 /* internal error 14 - start_IO run out of retries */ 1387 sprintf(errorstring, "14 %p", cqr); 1388 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1389 "device driver, reason=%s\n", errorstring); 1390 cqr->status = DASD_CQR_ERROR; 1391 return -EIO; 1392 } 1393 cqr->startclk = get_tod_clock(); 1394 cqr->starttime = jiffies; 1395 cqr->retries--; 1396 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1397 cqr->lpm &= dasd_path_get_opm(device); 1398 if (!cqr->lpm) 1399 cqr->lpm = dasd_path_get_opm(device); 1400 } 1401 /* 1402 * remember the amount of formatted tracks to prevent double format on 1403 * ESE devices 1404 */ 1405 if (cqr->block) 1406 cqr->trkcount = atomic_read(&cqr->block->trkcount); 1407 1408 if (cqr->cpmode == 1) { 1409 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1410 (long) cqr, cqr->lpm); 1411 } else { 1412 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1413 (long) cqr, cqr->lpm, 0); 1414 } 1415 switch (rc) { 1416 case 0: 1417 cqr->status = DASD_CQR_IN_IO; 1418 break; 1419 case -EBUSY: 1420 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1421 "start_IO: device busy, retry later"); 1422 break; 1423 case -EACCES: 1424 /* -EACCES indicates that the request used only a subset of the 1425 * available paths and all these paths are gone. If the lpm of 1426 * this request was only a subset of the opm (e.g. the ppm) then 1427 * we just do a retry with all available paths. 1428 * If we already use the full opm, something is amiss, and we 1429 * need a full path verification. 1430 */ 1431 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1432 DBF_DEV_EVENT(DBF_WARNING, device, 1433 "start_IO: selected paths gone (%x)", 1434 cqr->lpm); 1435 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1436 cqr->lpm = dasd_path_get_opm(device); 1437 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1438 "start_IO: selected paths gone," 1439 " retry on all paths"); 1440 } else { 1441 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1442 "start_IO: all paths in opm gone," 1443 " do path verification"); 1444 dasd_generic_last_path_gone(device); 1445 dasd_path_no_path(device); 1446 dasd_path_set_tbvpm(device, 1447 ccw_device_get_path_mask( 1448 device->cdev)); 1449 } 1450 break; 1451 case -ENODEV: 1452 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1453 "start_IO: -ENODEV device gone, retry"); 1454 /* this is equivalent to CC=3 for SSCH report this to EER */ 1455 dasd_handle_autoquiesce(device, cqr, DASD_EER_STARTIO); 1456 break; 1457 case -EIO: 1458 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1459 "start_IO: -EIO device gone, retry"); 1460 break; 1461 case -EINVAL: 1462 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1463 "start_IO: -EINVAL device currently " 1464 "not accessible"); 1465 break; 1466 default: 1467 /* internal error 11 - unknown rc */ 1468 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1469 dev_err(&device->cdev->dev, 1470 "An error occurred in the DASD device driver, " 1471 "reason=%s\n", errorstring); 1472 BUG(); 1473 break; 1474 } 1475 cqr->intrc = rc; 1476 return rc; 1477 } 1478 EXPORT_SYMBOL(dasd_start_IO); 1479 1480 /* 1481 * Timeout function for dasd devices. This is used for different purposes 1482 * 1) missing interrupt handler for normal operation 1483 * 2) delayed start of request where start_IO failed with -EBUSY 1484 * 3) timeout for missing state change interrupts 1485 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1486 * DASD_CQR_QUEUED for 2) and 3). 1487 */ 1488 static void dasd_device_timeout(struct timer_list *t) 1489 { 1490 unsigned long flags; 1491 struct dasd_device *device; 1492 1493 device = from_timer(device, t, timer); 1494 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1495 /* re-activate request queue */ 1496 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1497 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1498 dasd_schedule_device_bh(device); 1499 } 1500 1501 /* 1502 * Setup timeout for a device in jiffies. 1503 */ 1504 void dasd_device_set_timer(struct dasd_device *device, int expires) 1505 { 1506 if (expires == 0) 1507 del_timer(&device->timer); 1508 else 1509 mod_timer(&device->timer, jiffies + expires); 1510 } 1511 EXPORT_SYMBOL(dasd_device_set_timer); 1512 1513 /* 1514 * Clear timeout for a device. 1515 */ 1516 void dasd_device_clear_timer(struct dasd_device *device) 1517 { 1518 del_timer(&device->timer); 1519 } 1520 EXPORT_SYMBOL(dasd_device_clear_timer); 1521 1522 static void dasd_handle_killed_request(struct ccw_device *cdev, 1523 unsigned long intparm) 1524 { 1525 struct dasd_ccw_req *cqr; 1526 struct dasd_device *device; 1527 1528 if (!intparm) 1529 return; 1530 cqr = (struct dasd_ccw_req *) intparm; 1531 if (cqr->status != DASD_CQR_IN_IO) { 1532 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1533 "invalid status in handle_killed_request: " 1534 "%02x", cqr->status); 1535 return; 1536 } 1537 1538 device = dasd_device_from_cdev_locked(cdev); 1539 if (IS_ERR(device)) { 1540 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1541 "unable to get device from cdev"); 1542 return; 1543 } 1544 1545 if (!cqr->startdev || 1546 device != cqr->startdev || 1547 strncmp(cqr->startdev->discipline->ebcname, 1548 (char *) &cqr->magic, 4)) { 1549 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1550 "invalid device in request"); 1551 dasd_put_device(device); 1552 return; 1553 } 1554 1555 /* Schedule request to be retried. */ 1556 cqr->status = DASD_CQR_QUEUED; 1557 1558 dasd_device_clear_timer(device); 1559 dasd_schedule_device_bh(device); 1560 dasd_put_device(device); 1561 } 1562 1563 void dasd_generic_handle_state_change(struct dasd_device *device) 1564 { 1565 /* First of all start sense subsystem status request. */ 1566 dasd_eer_snss(device); 1567 1568 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1569 dasd_schedule_device_bh(device); 1570 if (device->block) { 1571 dasd_schedule_block_bh(device->block); 1572 if (device->block->gdp) 1573 blk_mq_run_hw_queues(device->block->gdp->queue, true); 1574 } 1575 } 1576 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1577 1578 static int dasd_check_hpf_error(struct irb *irb) 1579 { 1580 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1581 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1582 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1583 } 1584 1585 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1586 { 1587 struct dasd_device *device = NULL; 1588 u8 *sense = NULL; 1589 1590 if (!block) 1591 return 0; 1592 device = block->base; 1593 if (!device || !device->discipline->is_ese) 1594 return 0; 1595 if (!device->discipline->is_ese(device)) 1596 return 0; 1597 1598 sense = dasd_get_sense(irb); 1599 if (!sense) 1600 return 0; 1601 1602 return !!(sense[1] & SNS1_NO_REC_FOUND) || 1603 !!(sense[1] & SNS1_FILE_PROTECTED) || 1604 scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; 1605 } 1606 1607 static int dasd_ese_oos_cond(u8 *sense) 1608 { 1609 return sense[0] & SNS0_EQUIPMENT_CHECK && 1610 sense[1] & SNS1_PERM_ERR && 1611 sense[1] & SNS1_WRITE_INHIBITED && 1612 sense[25] == 0x01; 1613 } 1614 1615 /* 1616 * Interrupt handler for "normal" ssch-io based dasd devices. 1617 */ 1618 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1619 struct irb *irb) 1620 { 1621 struct dasd_ccw_req *cqr, *next, *fcqr; 1622 struct dasd_device *device; 1623 unsigned long now; 1624 int nrf_suppressed = 0; 1625 int fp_suppressed = 0; 1626 struct request *req; 1627 u8 *sense = NULL; 1628 int expires; 1629 1630 cqr = (struct dasd_ccw_req *) intparm; 1631 if (IS_ERR(irb)) { 1632 switch (PTR_ERR(irb)) { 1633 case -EIO: 1634 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1635 device = cqr->startdev; 1636 cqr->status = DASD_CQR_CLEARED; 1637 dasd_device_clear_timer(device); 1638 wake_up(&dasd_flush_wq); 1639 dasd_schedule_device_bh(device); 1640 return; 1641 } 1642 break; 1643 case -ETIMEDOUT: 1644 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1645 "request timed out\n", __func__); 1646 break; 1647 default: 1648 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1649 "unknown error %ld\n", __func__, 1650 PTR_ERR(irb)); 1651 } 1652 dasd_handle_killed_request(cdev, intparm); 1653 return; 1654 } 1655 1656 now = get_tod_clock(); 1657 /* check for conditions that should be handled immediately */ 1658 if (!cqr || 1659 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1660 scsw_cstat(&irb->scsw) == 0)) { 1661 if (cqr) 1662 memcpy(&cqr->irb, irb, sizeof(*irb)); 1663 device = dasd_device_from_cdev_locked(cdev); 1664 if (IS_ERR(device)) 1665 return; 1666 /* ignore unsolicited interrupts for DIAG discipline */ 1667 if (device->discipline == dasd_diag_discipline_pointer) { 1668 dasd_put_device(device); 1669 return; 1670 } 1671 1672 /* 1673 * In some cases 'File Protected' or 'No Record Found' errors 1674 * might be expected and debug log messages for the 1675 * corresponding interrupts shouldn't be written then. 1676 * Check if either of the according suppress bits is set. 1677 */ 1678 sense = dasd_get_sense(irb); 1679 if (sense) { 1680 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1681 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1682 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1683 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1684 1685 /* 1686 * Extent pool probably out-of-space. 1687 * Stop device and check exhaust level. 1688 */ 1689 if (dasd_ese_oos_cond(sense)) { 1690 dasd_generic_space_exhaust(device, cqr); 1691 device->discipline->ext_pool_exhaust(device, cqr); 1692 dasd_put_device(device); 1693 return; 1694 } 1695 } 1696 if (!(fp_suppressed || nrf_suppressed)) 1697 device->discipline->dump_sense_dbf(device, irb, "int"); 1698 1699 if (device->features & DASD_FEATURE_ERPLOG) 1700 device->discipline->dump_sense(device, cqr, irb); 1701 device->discipline->check_for_device_change(device, cqr, irb); 1702 dasd_put_device(device); 1703 } 1704 1705 /* check for attention message */ 1706 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1707 device = dasd_device_from_cdev_locked(cdev); 1708 if (!IS_ERR(device)) { 1709 device->discipline->check_attention(device, 1710 irb->esw.esw1.lpum); 1711 dasd_put_device(device); 1712 } 1713 } 1714 1715 if (!cqr) 1716 return; 1717 1718 device = (struct dasd_device *) cqr->startdev; 1719 if (!device || 1720 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1721 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1722 "invalid device in request"); 1723 return; 1724 } 1725 1726 if (dasd_ese_needs_format(cqr->block, irb)) { 1727 req = dasd_get_callback_data(cqr); 1728 if (!req) { 1729 cqr->status = DASD_CQR_ERROR; 1730 return; 1731 } 1732 if (rq_data_dir(req) == READ) { 1733 device->discipline->ese_read(cqr, irb); 1734 cqr->status = DASD_CQR_SUCCESS; 1735 cqr->stopclk = now; 1736 dasd_device_clear_timer(device); 1737 dasd_schedule_device_bh(device); 1738 return; 1739 } 1740 fcqr = device->discipline->ese_format(device, cqr, irb); 1741 if (IS_ERR(fcqr)) { 1742 if (PTR_ERR(fcqr) == -EINVAL) { 1743 cqr->status = DASD_CQR_ERROR; 1744 return; 1745 } 1746 /* 1747 * If we can't format now, let the request go 1748 * one extra round. Maybe we can format later. 1749 */ 1750 cqr->status = DASD_CQR_QUEUED; 1751 dasd_schedule_device_bh(device); 1752 return; 1753 } else { 1754 fcqr->status = DASD_CQR_QUEUED; 1755 cqr->status = DASD_CQR_QUEUED; 1756 list_add(&fcqr->devlist, &device->ccw_queue); 1757 dasd_schedule_device_bh(device); 1758 return; 1759 } 1760 } 1761 1762 /* Check for clear pending */ 1763 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1764 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1765 cqr->status = DASD_CQR_CLEARED; 1766 dasd_device_clear_timer(device); 1767 wake_up(&dasd_flush_wq); 1768 dasd_schedule_device_bh(device); 1769 return; 1770 } 1771 1772 /* check status - the request might have been killed by dyn detach */ 1773 if (cqr->status != DASD_CQR_IN_IO) { 1774 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1775 "status %02x", dev_name(&cdev->dev), cqr->status); 1776 return; 1777 } 1778 1779 next = NULL; 1780 expires = 0; 1781 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1782 scsw_cstat(&irb->scsw) == 0) { 1783 /* request was completed successfully */ 1784 cqr->status = DASD_CQR_SUCCESS; 1785 cqr->stopclk = now; 1786 /* Start first request on queue if possible -> fast_io. */ 1787 if (cqr->devlist.next != &device->ccw_queue) { 1788 next = list_entry(cqr->devlist.next, 1789 struct dasd_ccw_req, devlist); 1790 } 1791 } else { /* error */ 1792 /* check for HPF error 1793 * call discipline function to requeue all requests 1794 * and disable HPF accordingly 1795 */ 1796 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1797 device->discipline->handle_hpf_error) 1798 device->discipline->handle_hpf_error(device, irb); 1799 /* 1800 * If we don't want complex ERP for this request, then just 1801 * reset this and retry it in the fastpath 1802 */ 1803 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1804 cqr->retries > 0) { 1805 if (cqr->lpm == dasd_path_get_opm(device)) 1806 DBF_DEV_EVENT(DBF_DEBUG, device, 1807 "default ERP in fastpath " 1808 "(%i retries left)", 1809 cqr->retries); 1810 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1811 cqr->lpm = dasd_path_get_opm(device); 1812 cqr->status = DASD_CQR_QUEUED; 1813 next = cqr; 1814 } else 1815 cqr->status = DASD_CQR_ERROR; 1816 } 1817 if (next && (next->status == DASD_CQR_QUEUED) && 1818 (!device->stopped)) { 1819 if (device->discipline->start_IO(next) == 0) 1820 expires = next->expires; 1821 } 1822 if (expires != 0) 1823 dasd_device_set_timer(device, expires); 1824 else 1825 dasd_device_clear_timer(device); 1826 dasd_schedule_device_bh(device); 1827 } 1828 EXPORT_SYMBOL(dasd_int_handler); 1829 1830 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1831 { 1832 struct dasd_device *device; 1833 1834 device = dasd_device_from_cdev_locked(cdev); 1835 1836 if (IS_ERR(device)) 1837 goto out; 1838 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1839 device->state != device->target || 1840 !device->discipline->check_for_device_change){ 1841 dasd_put_device(device); 1842 goto out; 1843 } 1844 if (device->discipline->dump_sense_dbf) 1845 device->discipline->dump_sense_dbf(device, irb, "uc"); 1846 device->discipline->check_for_device_change(device, NULL, irb); 1847 dasd_put_device(device); 1848 out: 1849 return UC_TODO_RETRY; 1850 } 1851 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1852 1853 /* 1854 * If we have an error on a dasd_block layer request then we cancel 1855 * and return all further requests from the same dasd_block as well. 1856 */ 1857 static void __dasd_device_recovery(struct dasd_device *device, 1858 struct dasd_ccw_req *ref_cqr) 1859 { 1860 struct list_head *l, *n; 1861 struct dasd_ccw_req *cqr; 1862 1863 /* 1864 * only requeue request that came from the dasd_block layer 1865 */ 1866 if (!ref_cqr->block) 1867 return; 1868 1869 list_for_each_safe(l, n, &device->ccw_queue) { 1870 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1871 if (cqr->status == DASD_CQR_QUEUED && 1872 ref_cqr->block == cqr->block) { 1873 cqr->status = DASD_CQR_CLEARED; 1874 } 1875 } 1876 }; 1877 1878 /* 1879 * Remove those ccw requests from the queue that need to be returned 1880 * to the upper layer. 1881 */ 1882 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1883 struct list_head *final_queue) 1884 { 1885 struct list_head *l, *n; 1886 struct dasd_ccw_req *cqr; 1887 1888 /* Process request with final status. */ 1889 list_for_each_safe(l, n, &device->ccw_queue) { 1890 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1891 1892 /* Skip any non-final request. */ 1893 if (cqr->status == DASD_CQR_QUEUED || 1894 cqr->status == DASD_CQR_IN_IO || 1895 cqr->status == DASD_CQR_CLEAR_PENDING) 1896 continue; 1897 if (cqr->status == DASD_CQR_ERROR) { 1898 __dasd_device_recovery(device, cqr); 1899 } 1900 /* Rechain finished requests to final queue */ 1901 list_move_tail(&cqr->devlist, final_queue); 1902 } 1903 } 1904 1905 static void __dasd_process_cqr(struct dasd_device *device, 1906 struct dasd_ccw_req *cqr) 1907 { 1908 char errorstring[ERRORLENGTH]; 1909 1910 switch (cqr->status) { 1911 case DASD_CQR_SUCCESS: 1912 cqr->status = DASD_CQR_DONE; 1913 break; 1914 case DASD_CQR_ERROR: 1915 cqr->status = DASD_CQR_NEED_ERP; 1916 break; 1917 case DASD_CQR_CLEARED: 1918 cqr->status = DASD_CQR_TERMINATED; 1919 break; 1920 default: 1921 /* internal error 12 - wrong cqr status*/ 1922 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1923 dev_err(&device->cdev->dev, 1924 "An error occurred in the DASD device driver, " 1925 "reason=%s\n", errorstring); 1926 BUG(); 1927 } 1928 if (cqr->callback) 1929 cqr->callback(cqr, cqr->callback_data); 1930 } 1931 1932 /* 1933 * the cqrs from the final queue are returned to the upper layer 1934 * by setting a dasd_block state and calling the callback function 1935 */ 1936 static void __dasd_device_process_final_queue(struct dasd_device *device, 1937 struct list_head *final_queue) 1938 { 1939 struct list_head *l, *n; 1940 struct dasd_ccw_req *cqr; 1941 struct dasd_block *block; 1942 1943 list_for_each_safe(l, n, final_queue) { 1944 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1945 list_del_init(&cqr->devlist); 1946 block = cqr->block; 1947 if (!block) { 1948 __dasd_process_cqr(device, cqr); 1949 } else { 1950 spin_lock_bh(&block->queue_lock); 1951 __dasd_process_cqr(device, cqr); 1952 spin_unlock_bh(&block->queue_lock); 1953 } 1954 } 1955 } 1956 1957 /* 1958 * check if device should be autoquiesced due to too many timeouts 1959 */ 1960 static void __dasd_device_check_autoquiesce_timeout(struct dasd_device *device, 1961 struct dasd_ccw_req *cqr) 1962 { 1963 if ((device->default_retries - cqr->retries) >= device->aq_timeouts) 1964 dasd_handle_autoquiesce(device, cqr, DASD_EER_TIMEOUTS); 1965 } 1966 1967 /* 1968 * Take a look at the first request on the ccw queue and check 1969 * if it reached its expire time. If so, terminate the IO. 1970 */ 1971 static void __dasd_device_check_expire(struct dasd_device *device) 1972 { 1973 struct dasd_ccw_req *cqr; 1974 1975 if (list_empty(&device->ccw_queue)) 1976 return; 1977 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1978 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1979 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1980 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1981 /* 1982 * IO in safe offline processing should not 1983 * run out of retries 1984 */ 1985 cqr->retries++; 1986 } 1987 if (device->discipline->term_IO(cqr) != 0) { 1988 /* Hmpf, try again in 5 sec */ 1989 dev_err(&device->cdev->dev, 1990 "cqr %p timed out (%lus) but cannot be " 1991 "ended, retrying in 5 s\n", 1992 cqr, (cqr->expires/HZ)); 1993 cqr->expires += 5*HZ; 1994 dasd_device_set_timer(device, 5*HZ); 1995 } else { 1996 dev_err(&device->cdev->dev, 1997 "cqr %p timed out (%lus), %i retries " 1998 "remaining\n", cqr, (cqr->expires/HZ), 1999 cqr->retries); 2000 } 2001 __dasd_device_check_autoquiesce_timeout(device, cqr); 2002 } 2003 } 2004 2005 /* 2006 * return 1 when device is not eligible for IO 2007 */ 2008 static int __dasd_device_is_unusable(struct dasd_device *device, 2009 struct dasd_ccw_req *cqr) 2010 { 2011 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC); 2012 2013 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2014 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2015 /* 2016 * dasd is being set offline 2017 * but it is no safe offline where we have to allow I/O 2018 */ 2019 return 1; 2020 } 2021 if (device->stopped) { 2022 if (device->stopped & mask) { 2023 /* stopped and CQR will not change that. */ 2024 return 1; 2025 } 2026 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2027 /* CQR is not able to change device to 2028 * operational. */ 2029 return 1; 2030 } 2031 /* CQR required to get device operational. */ 2032 } 2033 return 0; 2034 } 2035 2036 /* 2037 * Take a look at the first request on the ccw queue and check 2038 * if it needs to be started. 2039 */ 2040 static void __dasd_device_start_head(struct dasd_device *device) 2041 { 2042 struct dasd_ccw_req *cqr; 2043 int rc; 2044 2045 if (list_empty(&device->ccw_queue)) 2046 return; 2047 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2048 if (cqr->status != DASD_CQR_QUEUED) 2049 return; 2050 /* if device is not usable return request to upper layer */ 2051 if (__dasd_device_is_unusable(device, cqr)) { 2052 cqr->intrc = -EAGAIN; 2053 cqr->status = DASD_CQR_CLEARED; 2054 dasd_schedule_device_bh(device); 2055 return; 2056 } 2057 2058 rc = device->discipline->start_IO(cqr); 2059 if (rc == 0) 2060 dasd_device_set_timer(device, cqr->expires); 2061 else if (rc == -EACCES) { 2062 dasd_schedule_device_bh(device); 2063 } else 2064 /* Hmpf, try again in 1/2 sec */ 2065 dasd_device_set_timer(device, 50); 2066 } 2067 2068 static void __dasd_device_check_path_events(struct dasd_device *device) 2069 { 2070 __u8 tbvpm, fcsecpm; 2071 int rc; 2072 2073 tbvpm = dasd_path_get_tbvpm(device); 2074 fcsecpm = dasd_path_get_fcsecpm(device); 2075 2076 if (!tbvpm && !fcsecpm) 2077 return; 2078 2079 if (device->stopped & ~(DASD_STOPPED_DC_WAIT)) 2080 return; 2081 2082 dasd_path_clear_all_verify(device); 2083 dasd_path_clear_all_fcsec(device); 2084 2085 rc = device->discipline->pe_handler(device, tbvpm, fcsecpm); 2086 if (rc) { 2087 dasd_path_add_tbvpm(device, tbvpm); 2088 dasd_path_add_fcsecpm(device, fcsecpm); 2089 dasd_device_set_timer(device, 50); 2090 } 2091 }; 2092 2093 /* 2094 * Go through all request on the dasd_device request queue, 2095 * terminate them on the cdev if necessary, and return them to the 2096 * submitting layer via callback. 2097 * Note: 2098 * Make sure that all 'submitting layers' still exist when 2099 * this function is called!. In other words, when 'device' is a base 2100 * device then all block layer requests must have been removed before 2101 * via dasd_flush_block_queue. 2102 */ 2103 int dasd_flush_device_queue(struct dasd_device *device) 2104 { 2105 struct dasd_ccw_req *cqr, *n; 2106 int rc; 2107 struct list_head flush_queue; 2108 2109 INIT_LIST_HEAD(&flush_queue); 2110 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2111 rc = 0; 2112 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2113 /* Check status and move request to flush_queue */ 2114 switch (cqr->status) { 2115 case DASD_CQR_IN_IO: 2116 rc = device->discipline->term_IO(cqr); 2117 if (rc) { 2118 /* unable to terminate requeust */ 2119 dev_err(&device->cdev->dev, 2120 "Flushing the DASD request queue " 2121 "failed for request %p\n", cqr); 2122 /* stop flush processing */ 2123 goto finished; 2124 } 2125 break; 2126 case DASD_CQR_QUEUED: 2127 cqr->stopclk = get_tod_clock(); 2128 cqr->status = DASD_CQR_CLEARED; 2129 break; 2130 default: /* no need to modify the others */ 2131 break; 2132 } 2133 list_move_tail(&cqr->devlist, &flush_queue); 2134 } 2135 finished: 2136 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2137 /* 2138 * After this point all requests must be in state CLEAR_PENDING, 2139 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2140 * one of the others. 2141 */ 2142 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2143 wait_event(dasd_flush_wq, 2144 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2145 /* 2146 * Now set each request back to TERMINATED, DONE or NEED_ERP 2147 * and call the callback function of flushed requests 2148 */ 2149 __dasd_device_process_final_queue(device, &flush_queue); 2150 return rc; 2151 } 2152 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2153 2154 /* 2155 * Acquire the device lock and process queues for the device. 2156 */ 2157 static void dasd_device_tasklet(unsigned long data) 2158 { 2159 struct dasd_device *device = (struct dasd_device *) data; 2160 struct list_head final_queue; 2161 2162 atomic_set (&device->tasklet_scheduled, 0); 2163 INIT_LIST_HEAD(&final_queue); 2164 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2165 /* Check expire time of first request on the ccw queue. */ 2166 __dasd_device_check_expire(device); 2167 /* find final requests on ccw queue */ 2168 __dasd_device_process_ccw_queue(device, &final_queue); 2169 __dasd_device_check_path_events(device); 2170 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2171 /* Now call the callback function of requests with final status */ 2172 __dasd_device_process_final_queue(device, &final_queue); 2173 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2174 /* Now check if the head of the ccw queue needs to be started. */ 2175 __dasd_device_start_head(device); 2176 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2177 if (waitqueue_active(&shutdown_waitq)) 2178 wake_up(&shutdown_waitq); 2179 dasd_put_device(device); 2180 } 2181 2182 /* 2183 * Schedules a call to dasd_tasklet over the device tasklet. 2184 */ 2185 void dasd_schedule_device_bh(struct dasd_device *device) 2186 { 2187 /* Protect against rescheduling. */ 2188 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2189 return; 2190 dasd_get_device(device); 2191 tasklet_hi_schedule(&device->tasklet); 2192 } 2193 EXPORT_SYMBOL(dasd_schedule_device_bh); 2194 2195 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2196 { 2197 device->stopped |= bits; 2198 } 2199 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2200 2201 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2202 { 2203 device->stopped &= ~bits; 2204 if (!device->stopped) 2205 wake_up(&generic_waitq); 2206 } 2207 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2208 2209 /* 2210 * Queue a request to the head of the device ccw_queue. 2211 * Start the I/O if possible. 2212 */ 2213 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2214 { 2215 struct dasd_device *device; 2216 unsigned long flags; 2217 2218 device = cqr->startdev; 2219 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2220 cqr->status = DASD_CQR_QUEUED; 2221 list_add(&cqr->devlist, &device->ccw_queue); 2222 /* let the bh start the request to keep them in order */ 2223 dasd_schedule_device_bh(device); 2224 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2225 } 2226 EXPORT_SYMBOL(dasd_add_request_head); 2227 2228 /* 2229 * Queue a request to the tail of the device ccw_queue. 2230 * Start the I/O if possible. 2231 */ 2232 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2233 { 2234 struct dasd_device *device; 2235 unsigned long flags; 2236 2237 device = cqr->startdev; 2238 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2239 cqr->status = DASD_CQR_QUEUED; 2240 list_add_tail(&cqr->devlist, &device->ccw_queue); 2241 /* let the bh start the request to keep them in order */ 2242 dasd_schedule_device_bh(device); 2243 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2244 } 2245 EXPORT_SYMBOL(dasd_add_request_tail); 2246 2247 /* 2248 * Wakeup helper for the 'sleep_on' functions. 2249 */ 2250 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2251 { 2252 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2253 cqr->callback_data = DASD_SLEEPON_END_TAG; 2254 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2255 wake_up(&generic_waitq); 2256 } 2257 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2258 2259 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2260 { 2261 struct dasd_device *device; 2262 int rc; 2263 2264 device = cqr->startdev; 2265 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2266 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2267 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2268 return rc; 2269 } 2270 2271 /* 2272 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2273 */ 2274 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2275 { 2276 struct dasd_device *device; 2277 dasd_erp_fn_t erp_fn; 2278 2279 if (cqr->status == DASD_CQR_FILLED) 2280 return 0; 2281 device = cqr->startdev; 2282 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2283 if (cqr->status == DASD_CQR_TERMINATED) { 2284 device->discipline->handle_terminated_request(cqr); 2285 return 1; 2286 } 2287 if (cqr->status == DASD_CQR_NEED_ERP) { 2288 erp_fn = device->discipline->erp_action(cqr); 2289 erp_fn(cqr); 2290 return 1; 2291 } 2292 if (cqr->status == DASD_CQR_FAILED) 2293 dasd_log_sense(cqr, &cqr->irb); 2294 if (cqr->refers) { 2295 __dasd_process_erp(device, cqr); 2296 return 1; 2297 } 2298 } 2299 return 0; 2300 } 2301 2302 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2303 { 2304 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2305 if (cqr->refers) /* erp is not done yet */ 2306 return 1; 2307 return ((cqr->status != DASD_CQR_DONE) && 2308 (cqr->status != DASD_CQR_FAILED)); 2309 } else 2310 return (cqr->status == DASD_CQR_FILLED); 2311 } 2312 2313 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2314 { 2315 struct dasd_device *device; 2316 int rc; 2317 struct list_head ccw_queue; 2318 struct dasd_ccw_req *cqr; 2319 2320 INIT_LIST_HEAD(&ccw_queue); 2321 maincqr->status = DASD_CQR_FILLED; 2322 device = maincqr->startdev; 2323 list_add(&maincqr->blocklist, &ccw_queue); 2324 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2325 cqr = list_first_entry(&ccw_queue, 2326 struct dasd_ccw_req, blocklist)) { 2327 2328 if (__dasd_sleep_on_erp(cqr)) 2329 continue; 2330 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2331 continue; 2332 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2333 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2334 cqr->status = DASD_CQR_FAILED; 2335 cqr->intrc = -EPERM; 2336 continue; 2337 } 2338 /* Non-temporary stop condition will trigger fail fast */ 2339 if (device->stopped & ~DASD_STOPPED_PENDING && 2340 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2341 !dasd_eer_enabled(device) && device->aq_mask == 0) { 2342 cqr->status = DASD_CQR_FAILED; 2343 cqr->intrc = -ENOLINK; 2344 continue; 2345 } 2346 /* 2347 * Don't try to start requests if device is in 2348 * offline processing, it might wait forever 2349 */ 2350 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2351 cqr->status = DASD_CQR_FAILED; 2352 cqr->intrc = -ENODEV; 2353 continue; 2354 } 2355 /* 2356 * Don't try to start requests if device is stopped 2357 * except path verification requests 2358 */ 2359 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2360 if (interruptible) { 2361 rc = wait_event_interruptible( 2362 generic_waitq, !(device->stopped)); 2363 if (rc == -ERESTARTSYS) { 2364 cqr->status = DASD_CQR_FAILED; 2365 maincqr->intrc = rc; 2366 continue; 2367 } 2368 } else 2369 wait_event(generic_waitq, !(device->stopped)); 2370 } 2371 if (!cqr->callback) 2372 cqr->callback = dasd_wakeup_cb; 2373 2374 cqr->callback_data = DASD_SLEEPON_START_TAG; 2375 dasd_add_request_tail(cqr); 2376 if (interruptible) { 2377 rc = wait_event_interruptible( 2378 generic_waitq, _wait_for_wakeup(cqr)); 2379 if (rc == -ERESTARTSYS) { 2380 dasd_cancel_req(cqr); 2381 /* wait (non-interruptible) for final status */ 2382 wait_event(generic_waitq, 2383 _wait_for_wakeup(cqr)); 2384 cqr->status = DASD_CQR_FAILED; 2385 maincqr->intrc = rc; 2386 continue; 2387 } 2388 } else 2389 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2390 } 2391 2392 maincqr->endclk = get_tod_clock(); 2393 if ((maincqr->status != DASD_CQR_DONE) && 2394 (maincqr->intrc != -ERESTARTSYS)) 2395 dasd_log_sense(maincqr, &maincqr->irb); 2396 if (maincqr->status == DASD_CQR_DONE) 2397 rc = 0; 2398 else if (maincqr->intrc) 2399 rc = maincqr->intrc; 2400 else 2401 rc = -EIO; 2402 return rc; 2403 } 2404 2405 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2406 { 2407 struct dasd_ccw_req *cqr; 2408 2409 list_for_each_entry(cqr, ccw_queue, blocklist) { 2410 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2411 return 0; 2412 } 2413 2414 return 1; 2415 } 2416 2417 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2418 { 2419 struct dasd_device *device; 2420 struct dasd_ccw_req *cqr, *n; 2421 u8 *sense = NULL; 2422 int rc; 2423 2424 retry: 2425 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2426 device = cqr->startdev; 2427 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2428 continue; 2429 2430 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2431 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2432 cqr->status = DASD_CQR_FAILED; 2433 cqr->intrc = -EPERM; 2434 continue; 2435 } 2436 /*Non-temporary stop condition will trigger fail fast*/ 2437 if (device->stopped & ~DASD_STOPPED_PENDING && 2438 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2439 !dasd_eer_enabled(device)) { 2440 cqr->status = DASD_CQR_FAILED; 2441 cqr->intrc = -EAGAIN; 2442 continue; 2443 } 2444 2445 /*Don't try to start requests if device is stopped*/ 2446 if (interruptible) { 2447 rc = wait_event_interruptible( 2448 generic_waitq, !device->stopped); 2449 if (rc == -ERESTARTSYS) { 2450 cqr->status = DASD_CQR_FAILED; 2451 cqr->intrc = rc; 2452 continue; 2453 } 2454 } else 2455 wait_event(generic_waitq, !(device->stopped)); 2456 2457 if (!cqr->callback) 2458 cqr->callback = dasd_wakeup_cb; 2459 cqr->callback_data = DASD_SLEEPON_START_TAG; 2460 dasd_add_request_tail(cqr); 2461 } 2462 2463 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2464 2465 rc = 0; 2466 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2467 /* 2468 * In some cases the 'File Protected' or 'Incorrect Length' 2469 * error might be expected and error recovery would be 2470 * unnecessary in these cases. Check if the according suppress 2471 * bit is set. 2472 */ 2473 sense = dasd_get_sense(&cqr->irb); 2474 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2475 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2476 continue; 2477 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2478 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2479 continue; 2480 2481 /* 2482 * for alias devices simplify error recovery and 2483 * return to upper layer 2484 * do not skip ERP requests 2485 */ 2486 if (cqr->startdev != cqr->basedev && !cqr->refers && 2487 (cqr->status == DASD_CQR_TERMINATED || 2488 cqr->status == DASD_CQR_NEED_ERP)) 2489 return -EAGAIN; 2490 2491 /* normal recovery for basedev IO */ 2492 if (__dasd_sleep_on_erp(cqr)) 2493 /* handle erp first */ 2494 goto retry; 2495 } 2496 2497 return 0; 2498 } 2499 2500 /* 2501 * Queue a request to the tail of the device ccw_queue and wait for 2502 * it's completion. 2503 */ 2504 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2505 { 2506 return _dasd_sleep_on(cqr, 0); 2507 } 2508 EXPORT_SYMBOL(dasd_sleep_on); 2509 2510 /* 2511 * Start requests from a ccw_queue and wait for their completion. 2512 */ 2513 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2514 { 2515 return _dasd_sleep_on_queue(ccw_queue, 0); 2516 } 2517 EXPORT_SYMBOL(dasd_sleep_on_queue); 2518 2519 /* 2520 * Start requests from a ccw_queue and wait interruptible for their completion. 2521 */ 2522 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2523 { 2524 return _dasd_sleep_on_queue(ccw_queue, 1); 2525 } 2526 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2527 2528 /* 2529 * Queue a request to the tail of the device ccw_queue and wait 2530 * interruptible for it's completion. 2531 */ 2532 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2533 { 2534 return _dasd_sleep_on(cqr, 1); 2535 } 2536 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2537 2538 /* 2539 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2540 * for eckd devices) the currently running request has to be terminated 2541 * and be put back to status queued, before the special request is added 2542 * to the head of the queue. Then the special request is waited on normally. 2543 */ 2544 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2545 { 2546 struct dasd_ccw_req *cqr; 2547 int rc; 2548 2549 if (list_empty(&device->ccw_queue)) 2550 return 0; 2551 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2552 rc = device->discipline->term_IO(cqr); 2553 if (!rc) 2554 /* 2555 * CQR terminated because a more important request is pending. 2556 * Undo decreasing of retry counter because this is 2557 * not an error case. 2558 */ 2559 cqr->retries++; 2560 return rc; 2561 } 2562 2563 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2564 { 2565 struct dasd_device *device; 2566 int rc; 2567 2568 device = cqr->startdev; 2569 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2570 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2571 cqr->status = DASD_CQR_FAILED; 2572 cqr->intrc = -EPERM; 2573 return -EIO; 2574 } 2575 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2576 rc = _dasd_term_running_cqr(device); 2577 if (rc) { 2578 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2579 return rc; 2580 } 2581 cqr->callback = dasd_wakeup_cb; 2582 cqr->callback_data = DASD_SLEEPON_START_TAG; 2583 cqr->status = DASD_CQR_QUEUED; 2584 /* 2585 * add new request as second 2586 * first the terminated cqr needs to be finished 2587 */ 2588 list_add(&cqr->devlist, device->ccw_queue.next); 2589 2590 /* let the bh start the request to keep them in order */ 2591 dasd_schedule_device_bh(device); 2592 2593 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2594 2595 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2596 2597 if (cqr->status == DASD_CQR_DONE) 2598 rc = 0; 2599 else if (cqr->intrc) 2600 rc = cqr->intrc; 2601 else 2602 rc = -EIO; 2603 2604 /* kick tasklets */ 2605 dasd_schedule_device_bh(device); 2606 if (device->block) 2607 dasd_schedule_block_bh(device->block); 2608 2609 return rc; 2610 } 2611 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2612 2613 /* 2614 * Cancels a request that was started with dasd_sleep_on_req. 2615 * This is useful to timeout requests. The request will be 2616 * terminated if it is currently in i/o. 2617 * Returns 0 if request termination was successful 2618 * negative error code if termination failed 2619 * Cancellation of a request is an asynchronous operation! The calling 2620 * function has to wait until the request is properly returned via callback. 2621 */ 2622 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) 2623 { 2624 struct dasd_device *device = cqr->startdev; 2625 int rc = 0; 2626 2627 switch (cqr->status) { 2628 case DASD_CQR_QUEUED: 2629 /* request was not started - just set to cleared */ 2630 cqr->status = DASD_CQR_CLEARED; 2631 break; 2632 case DASD_CQR_IN_IO: 2633 /* request in IO - terminate IO and release again */ 2634 rc = device->discipline->term_IO(cqr); 2635 if (rc) { 2636 dev_err(&device->cdev->dev, 2637 "Cancelling request %p failed with rc=%d\n", 2638 cqr, rc); 2639 } else { 2640 cqr->stopclk = get_tod_clock(); 2641 } 2642 break; 2643 default: /* already finished or clear pending - do nothing */ 2644 break; 2645 } 2646 dasd_schedule_device_bh(device); 2647 return rc; 2648 } 2649 2650 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2651 { 2652 struct dasd_device *device = cqr->startdev; 2653 unsigned long flags; 2654 int rc; 2655 2656 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2657 rc = __dasd_cancel_req(cqr); 2658 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2659 return rc; 2660 } 2661 2662 /* 2663 * SECTION: Operations of the dasd_block layer. 2664 */ 2665 2666 /* 2667 * Timeout function for dasd_block. This is used when the block layer 2668 * is waiting for something that may not come reliably, (e.g. a state 2669 * change interrupt) 2670 */ 2671 static void dasd_block_timeout(struct timer_list *t) 2672 { 2673 unsigned long flags; 2674 struct dasd_block *block; 2675 2676 block = from_timer(block, t, timer); 2677 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2678 /* re-activate request queue */ 2679 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2680 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2681 dasd_schedule_block_bh(block); 2682 blk_mq_run_hw_queues(block->gdp->queue, true); 2683 } 2684 2685 /* 2686 * Setup timeout for a dasd_block in jiffies. 2687 */ 2688 void dasd_block_set_timer(struct dasd_block *block, int expires) 2689 { 2690 if (expires == 0) 2691 del_timer(&block->timer); 2692 else 2693 mod_timer(&block->timer, jiffies + expires); 2694 } 2695 EXPORT_SYMBOL(dasd_block_set_timer); 2696 2697 /* 2698 * Clear timeout for a dasd_block. 2699 */ 2700 void dasd_block_clear_timer(struct dasd_block *block) 2701 { 2702 del_timer(&block->timer); 2703 } 2704 EXPORT_SYMBOL(dasd_block_clear_timer); 2705 2706 /* 2707 * Process finished error recovery ccw. 2708 */ 2709 static void __dasd_process_erp(struct dasd_device *device, 2710 struct dasd_ccw_req *cqr) 2711 { 2712 dasd_erp_fn_t erp_fn; 2713 2714 if (cqr->status == DASD_CQR_DONE) 2715 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2716 else 2717 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2718 erp_fn = device->discipline->erp_postaction(cqr); 2719 erp_fn(cqr); 2720 } 2721 2722 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2723 { 2724 struct request *req; 2725 blk_status_t error = BLK_STS_OK; 2726 unsigned int proc_bytes; 2727 int status; 2728 2729 req = (struct request *) cqr->callback_data; 2730 dasd_profile_end(cqr->block, cqr, req); 2731 2732 proc_bytes = cqr->proc_bytes; 2733 status = cqr->block->base->discipline->free_cp(cqr, req); 2734 if (status < 0) 2735 error = errno_to_blk_status(status); 2736 else if (status == 0) { 2737 switch (cqr->intrc) { 2738 case -EPERM: 2739 /* 2740 * DASD doesn't implement SCSI/NVMe reservations, but it 2741 * implements a locking scheme similar to them. We 2742 * return this error when we no longer have the lock. 2743 */ 2744 error = BLK_STS_RESV_CONFLICT; 2745 break; 2746 case -ENOLINK: 2747 error = BLK_STS_TRANSPORT; 2748 break; 2749 case -ETIMEDOUT: 2750 error = BLK_STS_TIMEOUT; 2751 break; 2752 default: 2753 error = BLK_STS_IOERR; 2754 break; 2755 } 2756 } 2757 2758 /* 2759 * We need to take care for ETIMEDOUT errors here since the 2760 * complete callback does not get called in this case. 2761 * Take care of all errors here and avoid additional code to 2762 * transfer the error value to the complete callback. 2763 */ 2764 if (error) { 2765 blk_mq_end_request(req, error); 2766 blk_mq_run_hw_queues(req->q, true); 2767 } else { 2768 /* 2769 * Partial completed requests can happen with ESE devices. 2770 * During read we might have gotten a NRF error and have to 2771 * complete a request partially. 2772 */ 2773 if (proc_bytes) { 2774 blk_update_request(req, BLK_STS_OK, proc_bytes); 2775 blk_mq_requeue_request(req, true); 2776 } else if (likely(!blk_should_fake_timeout(req->q))) { 2777 blk_mq_complete_request(req); 2778 } 2779 } 2780 } 2781 2782 /* 2783 * Process ccw request queue. 2784 */ 2785 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2786 struct list_head *final_queue) 2787 { 2788 struct list_head *l, *n; 2789 struct dasd_ccw_req *cqr; 2790 dasd_erp_fn_t erp_fn; 2791 unsigned long flags; 2792 struct dasd_device *base = block->base; 2793 2794 restart: 2795 /* Process request with final status. */ 2796 list_for_each_safe(l, n, &block->ccw_queue) { 2797 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2798 if (cqr->status != DASD_CQR_DONE && 2799 cqr->status != DASD_CQR_FAILED && 2800 cqr->status != DASD_CQR_NEED_ERP && 2801 cqr->status != DASD_CQR_TERMINATED) 2802 continue; 2803 2804 if (cqr->status == DASD_CQR_TERMINATED) { 2805 base->discipline->handle_terminated_request(cqr); 2806 goto restart; 2807 } 2808 2809 /* Process requests that may be recovered */ 2810 if (cqr->status == DASD_CQR_NEED_ERP) { 2811 erp_fn = base->discipline->erp_action(cqr); 2812 if (IS_ERR(erp_fn(cqr))) 2813 continue; 2814 goto restart; 2815 } 2816 2817 /* log sense for fatal error */ 2818 if (cqr->status == DASD_CQR_FAILED) { 2819 dasd_log_sense(cqr, &cqr->irb); 2820 } 2821 2822 /* 2823 * First call extended error reporting and check for autoquiesce 2824 */ 2825 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2826 if (cqr->status == DASD_CQR_FAILED && 2827 dasd_handle_autoquiesce(base, cqr, DASD_EER_FATALERROR)) { 2828 cqr->status = DASD_CQR_FILLED; 2829 cqr->retries = 255; 2830 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 2831 goto restart; 2832 } 2833 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); 2834 2835 /* Process finished ERP request. */ 2836 if (cqr->refers) { 2837 __dasd_process_erp(base, cqr); 2838 goto restart; 2839 } 2840 2841 /* Rechain finished requests to final queue */ 2842 cqr->endclk = get_tod_clock(); 2843 list_move_tail(&cqr->blocklist, final_queue); 2844 } 2845 } 2846 2847 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2848 { 2849 dasd_schedule_block_bh(cqr->block); 2850 } 2851 2852 static void __dasd_block_start_head(struct dasd_block *block) 2853 { 2854 struct dasd_ccw_req *cqr; 2855 2856 if (list_empty(&block->ccw_queue)) 2857 return; 2858 /* We allways begin with the first requests on the queue, as some 2859 * of previously started requests have to be enqueued on a 2860 * dasd_device again for error recovery. 2861 */ 2862 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2863 if (cqr->status != DASD_CQR_FILLED) 2864 continue; 2865 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2866 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2867 cqr->status = DASD_CQR_FAILED; 2868 cqr->intrc = -EPERM; 2869 dasd_schedule_block_bh(block); 2870 continue; 2871 } 2872 /* Non-temporary stop condition will trigger fail fast */ 2873 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2874 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2875 !dasd_eer_enabled(block->base) && block->base->aq_mask == 0) { 2876 cqr->status = DASD_CQR_FAILED; 2877 cqr->intrc = -ENOLINK; 2878 dasd_schedule_block_bh(block); 2879 continue; 2880 } 2881 /* Don't try to start requests if device is stopped */ 2882 if (block->base->stopped) 2883 return; 2884 2885 /* just a fail safe check, should not happen */ 2886 if (!cqr->startdev) 2887 cqr->startdev = block->base; 2888 2889 /* make sure that the requests we submit find their way back */ 2890 cqr->callback = dasd_return_cqr_cb; 2891 2892 dasd_add_request_tail(cqr); 2893 } 2894 } 2895 2896 /* 2897 * Central dasd_block layer routine. Takes requests from the generic 2898 * block layer request queue, creates ccw requests, enqueues them on 2899 * a dasd_device and processes ccw requests that have been returned. 2900 */ 2901 static void dasd_block_tasklet(unsigned long data) 2902 { 2903 struct dasd_block *block = (struct dasd_block *) data; 2904 struct list_head final_queue; 2905 struct list_head *l, *n; 2906 struct dasd_ccw_req *cqr; 2907 struct dasd_queue *dq; 2908 2909 atomic_set(&block->tasklet_scheduled, 0); 2910 INIT_LIST_HEAD(&final_queue); 2911 spin_lock_irq(&block->queue_lock); 2912 /* Finish off requests on ccw queue */ 2913 __dasd_process_block_ccw_queue(block, &final_queue); 2914 spin_unlock_irq(&block->queue_lock); 2915 2916 /* Now call the callback function of requests with final status */ 2917 list_for_each_safe(l, n, &final_queue) { 2918 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2919 dq = cqr->dq; 2920 spin_lock_irq(&dq->lock); 2921 list_del_init(&cqr->blocklist); 2922 __dasd_cleanup_cqr(cqr); 2923 spin_unlock_irq(&dq->lock); 2924 } 2925 2926 spin_lock_irq(&block->queue_lock); 2927 /* Now check if the head of the ccw queue needs to be started. */ 2928 __dasd_block_start_head(block); 2929 spin_unlock_irq(&block->queue_lock); 2930 2931 if (waitqueue_active(&shutdown_waitq)) 2932 wake_up(&shutdown_waitq); 2933 dasd_put_device(block->base); 2934 } 2935 2936 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2937 { 2938 wake_up(&dasd_flush_wq); 2939 } 2940 2941 /* 2942 * Requeue a request back to the block request queue 2943 * only works for block requests 2944 */ 2945 static void _dasd_requeue_request(struct dasd_ccw_req *cqr) 2946 { 2947 struct request *req; 2948 2949 /* 2950 * If the request is an ERP request there is nothing to requeue. 2951 * This will be done with the remaining original request. 2952 */ 2953 if (cqr->refers) 2954 return; 2955 spin_lock_irq(&cqr->dq->lock); 2956 req = (struct request *) cqr->callback_data; 2957 blk_mq_requeue_request(req, true); 2958 spin_unlock_irq(&cqr->dq->lock); 2959 2960 return; 2961 } 2962 2963 static int _dasd_requests_to_flushqueue(struct dasd_block *block, 2964 struct list_head *flush_queue) 2965 { 2966 struct dasd_ccw_req *cqr, *n; 2967 unsigned long flags; 2968 int rc, i; 2969 2970 spin_lock_irqsave(&block->queue_lock, flags); 2971 rc = 0; 2972 restart: 2973 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2974 /* if this request currently owned by a dasd_device cancel it */ 2975 if (cqr->status >= DASD_CQR_QUEUED) 2976 rc = dasd_cancel_req(cqr); 2977 if (rc < 0) 2978 break; 2979 /* Rechain request (including erp chain) so it won't be 2980 * touched by the dasd_block_tasklet anymore. 2981 * Replace the callback so we notice when the request 2982 * is returned from the dasd_device layer. 2983 */ 2984 cqr->callback = _dasd_wake_block_flush_cb; 2985 for (i = 0; cqr; cqr = cqr->refers, i++) 2986 list_move_tail(&cqr->blocklist, flush_queue); 2987 if (i > 1) 2988 /* moved more than one request - need to restart */ 2989 goto restart; 2990 } 2991 spin_unlock_irqrestore(&block->queue_lock, flags); 2992 2993 return rc; 2994 } 2995 2996 /* 2997 * Go through all request on the dasd_block request queue, cancel them 2998 * on the respective dasd_device, and return them to the generic 2999 * block layer. 3000 */ 3001 static int dasd_flush_block_queue(struct dasd_block *block) 3002 { 3003 struct dasd_ccw_req *cqr, *n; 3004 struct list_head flush_queue; 3005 unsigned long flags; 3006 int rc; 3007 3008 INIT_LIST_HEAD(&flush_queue); 3009 rc = _dasd_requests_to_flushqueue(block, &flush_queue); 3010 3011 /* Now call the callback function of flushed requests */ 3012 restart_cb: 3013 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 3014 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3015 /* Process finished ERP request. */ 3016 if (cqr->refers) { 3017 spin_lock_bh(&block->queue_lock); 3018 __dasd_process_erp(block->base, cqr); 3019 spin_unlock_bh(&block->queue_lock); 3020 /* restart list_for_xx loop since dasd_process_erp 3021 * might remove multiple elements */ 3022 goto restart_cb; 3023 } 3024 /* call the callback function */ 3025 spin_lock_irqsave(&cqr->dq->lock, flags); 3026 cqr->endclk = get_tod_clock(); 3027 list_del_init(&cqr->blocklist); 3028 __dasd_cleanup_cqr(cqr); 3029 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3030 } 3031 return rc; 3032 } 3033 3034 /* 3035 * Schedules a call to dasd_tasklet over the device tasklet. 3036 */ 3037 void dasd_schedule_block_bh(struct dasd_block *block) 3038 { 3039 /* Protect against rescheduling. */ 3040 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 3041 return; 3042 /* life cycle of block is bound to it's base device */ 3043 dasd_get_device(block->base); 3044 tasklet_hi_schedule(&block->tasklet); 3045 } 3046 EXPORT_SYMBOL(dasd_schedule_block_bh); 3047 3048 3049 /* 3050 * SECTION: external block device operations 3051 * (request queue handling, open, release, etc.) 3052 */ 3053 3054 /* 3055 * Dasd request queue function. Called from ll_rw_blk.c 3056 */ 3057 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 3058 const struct blk_mq_queue_data *qd) 3059 { 3060 struct dasd_block *block = hctx->queue->queuedata; 3061 struct dasd_queue *dq = hctx->driver_data; 3062 struct request *req = qd->rq; 3063 struct dasd_device *basedev; 3064 struct dasd_ccw_req *cqr; 3065 blk_status_t rc = BLK_STS_OK; 3066 3067 basedev = block->base; 3068 spin_lock_irq(&dq->lock); 3069 if (basedev->state < DASD_STATE_READY || 3070 test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) { 3071 DBF_DEV_EVENT(DBF_ERR, basedev, 3072 "device not ready for request %p", req); 3073 rc = BLK_STS_IOERR; 3074 goto out; 3075 } 3076 3077 /* 3078 * if device is stopped do not fetch new requests 3079 * except failfast is active which will let requests fail 3080 * immediately in __dasd_block_start_head() 3081 */ 3082 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3083 DBF_DEV_EVENT(DBF_ERR, basedev, 3084 "device stopped request %p", req); 3085 rc = BLK_STS_RESOURCE; 3086 goto out; 3087 } 3088 3089 if (basedev->features & DASD_FEATURE_READONLY && 3090 rq_data_dir(req) == WRITE) { 3091 DBF_DEV_EVENT(DBF_ERR, basedev, 3092 "Rejecting write request %p", req); 3093 rc = BLK_STS_IOERR; 3094 goto out; 3095 } 3096 3097 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3098 (basedev->features & DASD_FEATURE_FAILFAST || 3099 blk_noretry_request(req))) { 3100 DBF_DEV_EVENT(DBF_ERR, basedev, 3101 "Rejecting failfast request %p", req); 3102 rc = BLK_STS_IOERR; 3103 goto out; 3104 } 3105 3106 cqr = basedev->discipline->build_cp(basedev, block, req); 3107 if (IS_ERR(cqr)) { 3108 if (PTR_ERR(cqr) == -EBUSY || 3109 PTR_ERR(cqr) == -ENOMEM || 3110 PTR_ERR(cqr) == -EAGAIN) { 3111 rc = BLK_STS_RESOURCE; 3112 goto out; 3113 } 3114 DBF_DEV_EVENT(DBF_ERR, basedev, 3115 "CCW creation failed (rc=%ld) on request %p", 3116 PTR_ERR(cqr), req); 3117 rc = BLK_STS_IOERR; 3118 goto out; 3119 } 3120 /* 3121 * Note: callback is set to dasd_return_cqr_cb in 3122 * __dasd_block_start_head to cover erp requests as well 3123 */ 3124 cqr->callback_data = req; 3125 cqr->status = DASD_CQR_FILLED; 3126 cqr->dq = dq; 3127 3128 blk_mq_start_request(req); 3129 spin_lock(&block->queue_lock); 3130 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3131 INIT_LIST_HEAD(&cqr->devlist); 3132 dasd_profile_start(block, cqr, req); 3133 dasd_schedule_block_bh(block); 3134 spin_unlock(&block->queue_lock); 3135 3136 out: 3137 spin_unlock_irq(&dq->lock); 3138 return rc; 3139 } 3140 3141 /* 3142 * Block timeout callback, called from the block layer 3143 * 3144 * Return values: 3145 * BLK_EH_RESET_TIMER if the request should be left running 3146 * BLK_EH_DONE if the request is handled or terminated 3147 * by the driver. 3148 */ 3149 enum blk_eh_timer_return dasd_times_out(struct request *req) 3150 { 3151 struct dasd_block *block = req->q->queuedata; 3152 struct dasd_device *device; 3153 struct dasd_ccw_req *cqr; 3154 unsigned long flags; 3155 int rc = 0; 3156 3157 cqr = blk_mq_rq_to_pdu(req); 3158 if (!cqr) 3159 return BLK_EH_DONE; 3160 3161 spin_lock_irqsave(&cqr->dq->lock, flags); 3162 device = cqr->startdev ? cqr->startdev : block->base; 3163 if (!device->blk_timeout) { 3164 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3165 return BLK_EH_RESET_TIMER; 3166 } 3167 DBF_DEV_EVENT(DBF_WARNING, device, 3168 " dasd_times_out cqr %p status %x", 3169 cqr, cqr->status); 3170 3171 spin_lock(&block->queue_lock); 3172 spin_lock(get_ccwdev_lock(device->cdev)); 3173 cqr->retries = -1; 3174 cqr->intrc = -ETIMEDOUT; 3175 if (cqr->status >= DASD_CQR_QUEUED) { 3176 rc = __dasd_cancel_req(cqr); 3177 } else if (cqr->status == DASD_CQR_FILLED || 3178 cqr->status == DASD_CQR_NEED_ERP) { 3179 cqr->status = DASD_CQR_TERMINATED; 3180 } else if (cqr->status == DASD_CQR_IN_ERP) { 3181 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3182 3183 list_for_each_entry_safe(searchcqr, nextcqr, 3184 &block->ccw_queue, blocklist) { 3185 tmpcqr = searchcqr; 3186 while (tmpcqr->refers) 3187 tmpcqr = tmpcqr->refers; 3188 if (tmpcqr != cqr) 3189 continue; 3190 /* searchcqr is an ERP request for cqr */ 3191 searchcqr->retries = -1; 3192 searchcqr->intrc = -ETIMEDOUT; 3193 if (searchcqr->status >= DASD_CQR_QUEUED) { 3194 rc = __dasd_cancel_req(searchcqr); 3195 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3196 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3197 searchcqr->status = DASD_CQR_TERMINATED; 3198 rc = 0; 3199 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3200 /* 3201 * Shouldn't happen; most recent ERP 3202 * request is at the front of queue 3203 */ 3204 continue; 3205 } 3206 break; 3207 } 3208 } 3209 spin_unlock(get_ccwdev_lock(device->cdev)); 3210 dasd_schedule_block_bh(block); 3211 spin_unlock(&block->queue_lock); 3212 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3213 3214 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE; 3215 } 3216 3217 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3218 unsigned int idx) 3219 { 3220 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3221 3222 if (!dq) 3223 return -ENOMEM; 3224 3225 spin_lock_init(&dq->lock); 3226 hctx->driver_data = dq; 3227 3228 return 0; 3229 } 3230 3231 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3232 { 3233 kfree(hctx->driver_data); 3234 hctx->driver_data = NULL; 3235 } 3236 3237 static void dasd_request_done(struct request *req) 3238 { 3239 blk_mq_end_request(req, 0); 3240 blk_mq_run_hw_queues(req->q, true); 3241 } 3242 3243 struct blk_mq_ops dasd_mq_ops = { 3244 .queue_rq = do_dasd_request, 3245 .complete = dasd_request_done, 3246 .timeout = dasd_times_out, 3247 .init_hctx = dasd_init_hctx, 3248 .exit_hctx = dasd_exit_hctx, 3249 }; 3250 3251 static int dasd_open(struct gendisk *disk, blk_mode_t mode) 3252 { 3253 struct dasd_device *base; 3254 int rc; 3255 3256 base = dasd_device_from_gendisk(disk); 3257 if (!base) 3258 return -ENODEV; 3259 3260 atomic_inc(&base->block->open_count); 3261 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3262 rc = -ENODEV; 3263 goto unlock; 3264 } 3265 3266 if (!try_module_get(base->discipline->owner)) { 3267 rc = -EINVAL; 3268 goto unlock; 3269 } 3270 3271 if (dasd_probeonly) { 3272 dev_info(&base->cdev->dev, 3273 "Accessing the DASD failed because it is in " 3274 "probeonly mode\n"); 3275 rc = -EPERM; 3276 goto out; 3277 } 3278 3279 if (base->state <= DASD_STATE_BASIC) { 3280 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3281 " Cannot open unrecognized device"); 3282 rc = -ENODEV; 3283 goto out; 3284 } 3285 if ((mode & BLK_OPEN_WRITE) && 3286 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3287 (base->features & DASD_FEATURE_READONLY))) { 3288 rc = -EROFS; 3289 goto out; 3290 } 3291 dasd_put_device(base); 3292 return 0; 3293 3294 out: 3295 module_put(base->discipline->owner); 3296 unlock: 3297 atomic_dec(&base->block->open_count); 3298 dasd_put_device(base); 3299 return rc; 3300 } 3301 3302 static void dasd_release(struct gendisk *disk) 3303 { 3304 struct dasd_device *base = dasd_device_from_gendisk(disk); 3305 if (base) { 3306 atomic_dec(&base->block->open_count); 3307 module_put(base->discipline->owner); 3308 dasd_put_device(base); 3309 } 3310 } 3311 3312 /* 3313 * Return disk geometry. 3314 */ 3315 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3316 { 3317 struct dasd_device *base; 3318 3319 base = dasd_device_from_gendisk(bdev->bd_disk); 3320 if (!base) 3321 return -ENODEV; 3322 3323 if (!base->discipline || 3324 !base->discipline->fill_geometry) { 3325 dasd_put_device(base); 3326 return -EINVAL; 3327 } 3328 base->discipline->fill_geometry(base->block, geo); 3329 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3330 dasd_put_device(base); 3331 return 0; 3332 } 3333 3334 const struct block_device_operations 3335 dasd_device_operations = { 3336 .owner = THIS_MODULE, 3337 .open = dasd_open, 3338 .release = dasd_release, 3339 .ioctl = dasd_ioctl, 3340 .compat_ioctl = dasd_ioctl, 3341 .getgeo = dasd_getgeo, 3342 .set_read_only = dasd_set_read_only, 3343 }; 3344 3345 /******************************************************************************* 3346 * end of block device operations 3347 */ 3348 3349 static void 3350 dasd_exit(void) 3351 { 3352 #ifdef CONFIG_PROC_FS 3353 dasd_proc_exit(); 3354 #endif 3355 dasd_eer_exit(); 3356 kmem_cache_destroy(dasd_page_cache); 3357 dasd_page_cache = NULL; 3358 dasd_gendisk_exit(); 3359 dasd_devmap_exit(); 3360 if (dasd_debug_area != NULL) { 3361 debug_unregister(dasd_debug_area); 3362 dasd_debug_area = NULL; 3363 } 3364 dasd_statistics_removeroot(); 3365 } 3366 3367 /* 3368 * SECTION: common functions for ccw_driver use 3369 */ 3370 3371 /* 3372 * Is the device read-only? 3373 * Note that this function does not report the setting of the 3374 * readonly device attribute, but how it is configured in z/VM. 3375 */ 3376 int dasd_device_is_ro(struct dasd_device *device) 3377 { 3378 struct ccw_dev_id dev_id; 3379 struct diag210 diag_data; 3380 int rc; 3381 3382 if (!MACHINE_IS_VM) 3383 return 0; 3384 ccw_device_get_id(device->cdev, &dev_id); 3385 memset(&diag_data, 0, sizeof(diag_data)); 3386 diag_data.vrdcdvno = dev_id.devno; 3387 diag_data.vrdclen = sizeof(diag_data); 3388 rc = diag210(&diag_data); 3389 if (rc == 0 || rc == 2) { 3390 return diag_data.vrdcvfla & 0x80; 3391 } else { 3392 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3393 dev_id.devno, rc); 3394 return 0; 3395 } 3396 } 3397 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3398 3399 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3400 { 3401 struct ccw_device *cdev = data; 3402 int ret; 3403 3404 ret = ccw_device_set_online(cdev); 3405 if (ret) 3406 dev_warn(&cdev->dev, "Setting the DASD online failed with rc=%d\n", ret); 3407 } 3408 3409 /* 3410 * Initial attempt at a probe function. this can be simplified once 3411 * the other detection code is gone. 3412 */ 3413 int dasd_generic_probe(struct ccw_device *cdev) 3414 { 3415 cdev->handler = &dasd_int_handler; 3416 3417 /* 3418 * Automatically online either all dasd devices (dasd_autodetect) 3419 * or all devices specified with dasd= parameters during 3420 * initial probe. 3421 */ 3422 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3423 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3424 async_schedule(dasd_generic_auto_online, cdev); 3425 return 0; 3426 } 3427 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3428 3429 void dasd_generic_free_discipline(struct dasd_device *device) 3430 { 3431 /* Forget the discipline information. */ 3432 if (device->discipline) { 3433 if (device->discipline->uncheck_device) 3434 device->discipline->uncheck_device(device); 3435 module_put(device->discipline->owner); 3436 device->discipline = NULL; 3437 } 3438 if (device->base_discipline) { 3439 module_put(device->base_discipline->owner); 3440 device->base_discipline = NULL; 3441 } 3442 } 3443 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3444 3445 /* 3446 * This will one day be called from a global not_oper handler. 3447 * It is also used by driver_unregister during module unload. 3448 */ 3449 void dasd_generic_remove(struct ccw_device *cdev) 3450 { 3451 struct dasd_device *device; 3452 struct dasd_block *block; 3453 3454 device = dasd_device_from_cdev(cdev); 3455 if (IS_ERR(device)) 3456 return; 3457 3458 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3459 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3460 /* Already doing offline processing */ 3461 dasd_put_device(device); 3462 return; 3463 } 3464 /* 3465 * This device is removed unconditionally. Set offline 3466 * flag to prevent dasd_open from opening it while it is 3467 * no quite down yet. 3468 */ 3469 dasd_set_target_state(device, DASD_STATE_NEW); 3470 cdev->handler = NULL; 3471 /* dasd_delete_device destroys the device reference. */ 3472 block = device->block; 3473 dasd_delete_device(device); 3474 /* 3475 * life cycle of block is bound to device, so delete it after 3476 * device was safely removed 3477 */ 3478 if (block) 3479 dasd_free_block(block); 3480 } 3481 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3482 3483 /* 3484 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3485 * the device is detected for the first time and is supposed to be used 3486 * or the user has started activation through sysfs. 3487 */ 3488 int dasd_generic_set_online(struct ccw_device *cdev, 3489 struct dasd_discipline *base_discipline) 3490 { 3491 struct dasd_discipline *discipline; 3492 struct dasd_device *device; 3493 struct device *dev; 3494 int rc; 3495 3496 dev = &cdev->dev; 3497 3498 /* first online clears initial online feature flag */ 3499 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3500 device = dasd_create_device(cdev); 3501 if (IS_ERR(device)) 3502 return PTR_ERR(device); 3503 3504 discipline = base_discipline; 3505 if (device->features & DASD_FEATURE_USEDIAG) { 3506 if (!dasd_diag_discipline_pointer) { 3507 /* Try to load the required module. */ 3508 rc = request_module(DASD_DIAG_MOD); 3509 if (rc) { 3510 dev_warn(dev, "Setting the DASD online failed " 3511 "because the required module %s " 3512 "could not be loaded (rc=%d)\n", 3513 DASD_DIAG_MOD, rc); 3514 dasd_delete_device(device); 3515 return -ENODEV; 3516 } 3517 } 3518 /* Module init could have failed, so check again here after 3519 * request_module(). */ 3520 if (!dasd_diag_discipline_pointer) { 3521 dev_warn(dev, "Setting the DASD online failed because of missing DIAG discipline\n"); 3522 dasd_delete_device(device); 3523 return -ENODEV; 3524 } 3525 discipline = dasd_diag_discipline_pointer; 3526 } 3527 if (!try_module_get(base_discipline->owner)) { 3528 dasd_delete_device(device); 3529 return -EINVAL; 3530 } 3531 device->base_discipline = base_discipline; 3532 if (!try_module_get(discipline->owner)) { 3533 dasd_delete_device(device); 3534 return -EINVAL; 3535 } 3536 device->discipline = discipline; 3537 3538 /* check_device will allocate block device if necessary */ 3539 rc = discipline->check_device(device); 3540 if (rc) { 3541 dev_warn(dev, "Setting the DASD online with discipline %s failed with rc=%i\n", 3542 discipline->name, rc); 3543 dasd_delete_device(device); 3544 return rc; 3545 } 3546 3547 dasd_set_target_state(device, DASD_STATE_ONLINE); 3548 if (device->state <= DASD_STATE_KNOWN) { 3549 dev_warn(dev, "Setting the DASD online failed because of a missing discipline\n"); 3550 rc = -ENODEV; 3551 dasd_set_target_state(device, DASD_STATE_NEW); 3552 if (device->block) 3553 dasd_free_block(device->block); 3554 dasd_delete_device(device); 3555 } else { 3556 dev_dbg(dev, "dasd_generic device found\n"); 3557 } 3558 3559 wait_event(dasd_init_waitq, _wait_for_device(device)); 3560 3561 dasd_put_device(device); 3562 return rc; 3563 } 3564 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3565 3566 int dasd_generic_set_offline(struct ccw_device *cdev) 3567 { 3568 int max_count, open_count, rc; 3569 struct dasd_device *device; 3570 struct dasd_block *block; 3571 unsigned long flags; 3572 struct device *dev; 3573 3574 dev = &cdev->dev; 3575 3576 rc = 0; 3577 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3578 device = dasd_device_from_cdev_locked(cdev); 3579 if (IS_ERR(device)) { 3580 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3581 return PTR_ERR(device); 3582 } 3583 3584 /* 3585 * We must make sure that this device is currently not in use. 3586 * The open_count is increased for every opener, that includes 3587 * the blkdev_get in dasd_scan_partitions. We are only interested 3588 * in the other openers. 3589 */ 3590 if (device->block) { 3591 max_count = device->block->bdev ? 0 : -1; 3592 open_count = atomic_read(&device->block->open_count); 3593 if (open_count > max_count) { 3594 if (open_count > 0) 3595 dev_warn(dev, "The DASD cannot be set offline with open count %i\n", 3596 open_count); 3597 else 3598 dev_warn(dev, "The DASD cannot be set offline while it is in use\n"); 3599 rc = -EBUSY; 3600 goto out_err; 3601 } 3602 } 3603 3604 /* 3605 * Test if the offline processing is already running and exit if so. 3606 * If a safe offline is being processed this could only be a normal 3607 * offline that should be able to overtake the safe offline and 3608 * cancel any I/O we do not want to wait for any longer 3609 */ 3610 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3611 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3612 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3613 &device->flags); 3614 } else { 3615 rc = -EBUSY; 3616 goto out_err; 3617 } 3618 } 3619 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3620 3621 /* 3622 * if safe_offline is called set safe_offline_running flag and 3623 * clear safe_offline so that a call to normal offline 3624 * can overrun safe_offline processing 3625 */ 3626 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3627 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3628 /* need to unlock here to wait for outstanding I/O */ 3629 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3630 /* 3631 * If we want to set the device safe offline all IO operations 3632 * should be finished before continuing the offline process 3633 * so sync bdev first and then wait for our queues to become 3634 * empty 3635 */ 3636 if (device->block) 3637 bdev_mark_dead(device->block->bdev, false); 3638 dasd_schedule_device_bh(device); 3639 rc = wait_event_interruptible(shutdown_waitq, 3640 _wait_for_empty_queues(device)); 3641 if (rc != 0) 3642 goto interrupted; 3643 3644 /* 3645 * check if a normal offline process overtook the offline 3646 * processing in this case simply do nothing beside returning 3647 * that we got interrupted 3648 * otherwise mark safe offline as not running any longer and 3649 * continue with normal offline 3650 */ 3651 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3652 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3653 rc = -ERESTARTSYS; 3654 goto out_err; 3655 } 3656 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3657 } 3658 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3659 3660 dasd_set_target_state(device, DASD_STATE_NEW); 3661 /* dasd_delete_device destroys the device reference. */ 3662 block = device->block; 3663 dasd_delete_device(device); 3664 /* 3665 * life cycle of block is bound to device, so delete it after 3666 * device was safely removed 3667 */ 3668 if (block) 3669 dasd_free_block(block); 3670 3671 return 0; 3672 3673 interrupted: 3674 /* interrupted by signal */ 3675 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3676 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3677 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3678 out_err: 3679 dasd_put_device(device); 3680 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3681 return rc; 3682 } 3683 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3684 3685 int dasd_generic_last_path_gone(struct dasd_device *device) 3686 { 3687 struct dasd_ccw_req *cqr; 3688 3689 dev_warn(&device->cdev->dev, "No operational channel path is left " 3690 "for the device\n"); 3691 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3692 /* First call extended error reporting and check for autoquiesce. */ 3693 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); 3694 3695 if (device->state < DASD_STATE_BASIC) 3696 return 0; 3697 /* Device is active. We want to keep it. */ 3698 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3699 if ((cqr->status == DASD_CQR_IN_IO) || 3700 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3701 cqr->status = DASD_CQR_QUEUED; 3702 cqr->retries++; 3703 } 3704 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3705 dasd_device_clear_timer(device); 3706 dasd_schedule_device_bh(device); 3707 return 1; 3708 } 3709 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3710 3711 int dasd_generic_path_operational(struct dasd_device *device) 3712 { 3713 dev_info(&device->cdev->dev, "A channel path to the device has become " 3714 "operational\n"); 3715 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3716 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3717 dasd_schedule_device_bh(device); 3718 if (device->block) { 3719 dasd_schedule_block_bh(device->block); 3720 if (device->block->gdp) 3721 blk_mq_run_hw_queues(device->block->gdp->queue, true); 3722 } 3723 3724 if (!device->stopped) 3725 wake_up(&generic_waitq); 3726 3727 return 1; 3728 } 3729 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3730 3731 int dasd_generic_notify(struct ccw_device *cdev, int event) 3732 { 3733 struct dasd_device *device; 3734 int ret; 3735 3736 device = dasd_device_from_cdev_locked(cdev); 3737 if (IS_ERR(device)) 3738 return 0; 3739 ret = 0; 3740 switch (event) { 3741 case CIO_GONE: 3742 case CIO_BOXED: 3743 case CIO_NO_PATH: 3744 dasd_path_no_path(device); 3745 ret = dasd_generic_last_path_gone(device); 3746 break; 3747 case CIO_OPER: 3748 ret = 1; 3749 if (dasd_path_get_opm(device)) 3750 ret = dasd_generic_path_operational(device); 3751 break; 3752 } 3753 dasd_put_device(device); 3754 return ret; 3755 } 3756 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3757 3758 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3759 { 3760 struct dasd_device *device; 3761 int chp, oldopm, hpfpm, ifccpm; 3762 3763 device = dasd_device_from_cdev_locked(cdev); 3764 if (IS_ERR(device)) 3765 return; 3766 3767 oldopm = dasd_path_get_opm(device); 3768 for (chp = 0; chp < 8; chp++) { 3769 if (path_event[chp] & PE_PATH_GONE) { 3770 dasd_path_notoper(device, chp); 3771 } 3772 if (path_event[chp] & PE_PATH_AVAILABLE) { 3773 dasd_path_available(device, chp); 3774 dasd_schedule_device_bh(device); 3775 } 3776 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3777 if (!dasd_path_is_operational(device, chp) && 3778 !dasd_path_need_verify(device, chp)) { 3779 /* 3780 * we can not establish a pathgroup on an 3781 * unavailable path, so trigger a path 3782 * verification first 3783 */ 3784 dasd_path_available(device, chp); 3785 dasd_schedule_device_bh(device); 3786 } 3787 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3788 "Pathgroup re-established\n"); 3789 if (device->discipline->kick_validate) 3790 device->discipline->kick_validate(device); 3791 } 3792 if (path_event[chp] & PE_PATH_FCES_EVENT) { 3793 dasd_path_fcsec_update(device, chp); 3794 dasd_schedule_device_bh(device); 3795 } 3796 } 3797 hpfpm = dasd_path_get_hpfpm(device); 3798 ifccpm = dasd_path_get_ifccpm(device); 3799 if (!dasd_path_get_opm(device) && hpfpm) { 3800 /* 3801 * device has no operational paths but at least one path is 3802 * disabled due to HPF errors 3803 * disable HPF at all and use the path(s) again 3804 */ 3805 if (device->discipline->disable_hpf) 3806 device->discipline->disable_hpf(device); 3807 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3808 dasd_path_set_tbvpm(device, hpfpm); 3809 dasd_schedule_device_bh(device); 3810 dasd_schedule_requeue(device); 3811 } else if (!dasd_path_get_opm(device) && ifccpm) { 3812 /* 3813 * device has no operational paths but at least one path is 3814 * disabled due to IFCC errors 3815 * trigger path verification on paths with IFCC errors 3816 */ 3817 dasd_path_set_tbvpm(device, ifccpm); 3818 dasd_schedule_device_bh(device); 3819 } 3820 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3821 dev_warn(&device->cdev->dev, 3822 "No verified channel paths remain for the device\n"); 3823 DBF_DEV_EVENT(DBF_WARNING, device, 3824 "%s", "last verified path gone"); 3825 /* First call extended error reporting and check for autoquiesce. */ 3826 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOPATH); 3827 dasd_device_set_stop_bits(device, 3828 DASD_STOPPED_DC_WAIT); 3829 } 3830 dasd_put_device(device); 3831 } 3832 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3833 3834 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3835 { 3836 if (!dasd_path_get_opm(device) && lpm) { 3837 dasd_path_set_opm(device, lpm); 3838 dasd_generic_path_operational(device); 3839 } else 3840 dasd_path_add_opm(device, lpm); 3841 return 0; 3842 } 3843 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3844 3845 void dasd_generic_space_exhaust(struct dasd_device *device, 3846 struct dasd_ccw_req *cqr) 3847 { 3848 /* First call extended error reporting and check for autoquiesce. */ 3849 dasd_handle_autoquiesce(device, NULL, DASD_EER_NOSPC); 3850 3851 if (device->state < DASD_STATE_BASIC) 3852 return; 3853 3854 if (cqr->status == DASD_CQR_IN_IO || 3855 cqr->status == DASD_CQR_CLEAR_PENDING) { 3856 cqr->status = DASD_CQR_QUEUED; 3857 cqr->retries++; 3858 } 3859 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3860 dasd_device_clear_timer(device); 3861 dasd_schedule_device_bh(device); 3862 } 3863 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3864 3865 void dasd_generic_space_avail(struct dasd_device *device) 3866 { 3867 dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3868 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3869 3870 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3871 dasd_schedule_device_bh(device); 3872 3873 if (device->block) { 3874 dasd_schedule_block_bh(device->block); 3875 if (device->block->gdp) 3876 blk_mq_run_hw_queues(device->block->gdp->queue, true); 3877 } 3878 if (!device->stopped) 3879 wake_up(&generic_waitq); 3880 } 3881 EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3882 3883 /* 3884 * clear active requests and requeue them to block layer if possible 3885 */ 3886 int dasd_generic_requeue_all_requests(struct dasd_device *device) 3887 { 3888 struct dasd_block *block = device->block; 3889 struct list_head requeue_queue; 3890 struct dasd_ccw_req *cqr, *n; 3891 int rc; 3892 3893 if (!block) 3894 return 0; 3895 3896 INIT_LIST_HEAD(&requeue_queue); 3897 rc = _dasd_requests_to_flushqueue(block, &requeue_queue); 3898 3899 /* Now call the callback function of flushed requests */ 3900 restart_cb: 3901 list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) { 3902 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 3903 /* Process finished ERP request. */ 3904 if (cqr->refers) { 3905 spin_lock_bh(&block->queue_lock); 3906 __dasd_process_erp(block->base, cqr); 3907 spin_unlock_bh(&block->queue_lock); 3908 /* restart list_for_xx loop since dasd_process_erp 3909 * might remove multiple elements 3910 */ 3911 goto restart_cb; 3912 } 3913 _dasd_requeue_request(cqr); 3914 list_del_init(&cqr->blocklist); 3915 cqr->block->base->discipline->free_cp( 3916 cqr, (struct request *) cqr->callback_data); 3917 } 3918 dasd_schedule_device_bh(device); 3919 return rc; 3920 } 3921 EXPORT_SYMBOL_GPL(dasd_generic_requeue_all_requests); 3922 3923 static void do_requeue_requests(struct work_struct *work) 3924 { 3925 struct dasd_device *device = container_of(work, struct dasd_device, 3926 requeue_requests); 3927 dasd_generic_requeue_all_requests(device); 3928 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 3929 if (device->block) 3930 dasd_schedule_block_bh(device->block); 3931 dasd_put_device(device); 3932 } 3933 3934 void dasd_schedule_requeue(struct dasd_device *device) 3935 { 3936 dasd_get_device(device); 3937 /* queue call to dasd_reload_device to the kernel event daemon. */ 3938 if (!schedule_work(&device->requeue_requests)) 3939 dasd_put_device(device); 3940 } 3941 EXPORT_SYMBOL(dasd_schedule_requeue); 3942 3943 static int dasd_handle_autoquiesce(struct dasd_device *device, 3944 struct dasd_ccw_req *cqr, 3945 unsigned int reason) 3946 { 3947 /* in any case write eer message with reason */ 3948 if (dasd_eer_enabled(device)) 3949 dasd_eer_write(device, cqr, reason); 3950 3951 if (!test_bit(reason, &device->aq_mask)) 3952 return 0; 3953 3954 /* notify eer about autoquiesce */ 3955 if (dasd_eer_enabled(device)) 3956 dasd_eer_write(device, NULL, DASD_EER_AUTOQUIESCE); 3957 3958 dev_info(&device->cdev->dev, 3959 "The DASD has been put in the quiesce state\n"); 3960 dasd_device_set_stop_bits(device, DASD_STOPPED_QUIESCE); 3961 3962 if (device->features & DASD_FEATURE_REQUEUEQUIESCE) 3963 dasd_schedule_requeue(device); 3964 3965 return 1; 3966 } 3967 3968 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 3969 int rdc_buffer_size, 3970 int magic) 3971 { 3972 struct dasd_ccw_req *cqr; 3973 struct ccw1 *ccw; 3974 3975 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, 3976 NULL); 3977 3978 if (IS_ERR(cqr)) { 3979 /* internal error 13 - Allocating the RDC request failed*/ 3980 dev_err(&device->cdev->dev, 3981 "An error occurred in the DASD device driver, " 3982 "reason=%s\n", "13"); 3983 return cqr; 3984 } 3985 3986 ccw = cqr->cpaddr; 3987 ccw->cmd_code = CCW_CMD_RDC; 3988 ccw->cda = (__u32)virt_to_phys(cqr->data); 3989 ccw->flags = 0; 3990 ccw->count = rdc_buffer_size; 3991 cqr->startdev = device; 3992 cqr->memdev = device; 3993 cqr->expires = 10*HZ; 3994 cqr->retries = 256; 3995 cqr->buildclk = get_tod_clock(); 3996 cqr->status = DASD_CQR_FILLED; 3997 return cqr; 3998 } 3999 4000 4001 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 4002 void *rdc_buffer, int rdc_buffer_size) 4003 { 4004 int ret; 4005 struct dasd_ccw_req *cqr; 4006 4007 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); 4008 if (IS_ERR(cqr)) 4009 return PTR_ERR(cqr); 4010 4011 ret = dasd_sleep_on(cqr); 4012 if (ret == 0) 4013 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); 4014 dasd_sfree_request(cqr, cqr->memdev); 4015 return ret; 4016 } 4017 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4018 4019 /* 4020 * In command mode and transport mode we need to look for sense 4021 * data in different places. The sense data itself is allways 4022 * an array of 32 bytes, so we can unify the sense data access 4023 * for both modes. 4024 */ 4025 char *dasd_get_sense(struct irb *irb) 4026 { 4027 struct tsb *tsb = NULL; 4028 char *sense = NULL; 4029 4030 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4031 if (irb->scsw.tm.tcw) 4032 tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw)); 4033 if (tsb && tsb->length == 64 && tsb->flags) 4034 switch (tsb->flags & 0x07) { 4035 case 1: /* tsa_iostat */ 4036 sense = tsb->tsa.iostat.sense; 4037 break; 4038 case 2: /* tsa_ddpc */ 4039 sense = tsb->tsa.ddpc.sense; 4040 break; 4041 default: 4042 /* currently we don't use interrogate data */ 4043 break; 4044 } 4045 } else if (irb->esw.esw0.erw.cons) { 4046 sense = irb->ecw; 4047 } 4048 return sense; 4049 } 4050 EXPORT_SYMBOL_GPL(dasd_get_sense); 4051 4052 void dasd_generic_shutdown(struct ccw_device *cdev) 4053 { 4054 struct dasd_device *device; 4055 4056 device = dasd_device_from_cdev(cdev); 4057 if (IS_ERR(device)) 4058 return; 4059 4060 if (device->block) 4061 dasd_schedule_block_bh(device->block); 4062 4063 dasd_schedule_device_bh(device); 4064 4065 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4066 } 4067 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4068 4069 static int __init dasd_init(void) 4070 { 4071 int rc; 4072 4073 init_waitqueue_head(&dasd_init_waitq); 4074 init_waitqueue_head(&dasd_flush_wq); 4075 init_waitqueue_head(&generic_waitq); 4076 init_waitqueue_head(&shutdown_waitq); 4077 4078 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4079 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4080 if (dasd_debug_area == NULL) { 4081 rc = -ENOMEM; 4082 goto failed; 4083 } 4084 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4085 debug_set_level(dasd_debug_area, DBF_WARNING); 4086 4087 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4088 4089 dasd_diag_discipline_pointer = NULL; 4090 4091 dasd_statistics_createroot(); 4092 4093 rc = dasd_devmap_init(); 4094 if (rc) 4095 goto failed; 4096 rc = dasd_gendisk_init(); 4097 if (rc) 4098 goto failed; 4099 rc = dasd_parse(); 4100 if (rc) 4101 goto failed; 4102 rc = dasd_eer_init(); 4103 if (rc) 4104 goto failed; 4105 #ifdef CONFIG_PROC_FS 4106 rc = dasd_proc_init(); 4107 if (rc) 4108 goto failed; 4109 #endif 4110 4111 return 0; 4112 failed: 4113 pr_info("The DASD device driver could not be initialized\n"); 4114 dasd_exit(); 4115 return rc; 4116 } 4117 4118 module_init(dasd_init); 4119 module_exit(dasd_exit); 4120