1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #define KMSG_COMPONENT "dasd" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kmod.h> 15 #include <linux/init.h> 16 #include <linux/interrupt.h> 17 #include <linux/ctype.h> 18 #include <linux/major.h> 19 #include <linux/slab.h> 20 #include <linux/hdreg.h> 21 #include <linux/async.h> 22 #include <linux/mutex.h> 23 #include <linux/debugfs.h> 24 #include <linux/seq_file.h> 25 #include <linux/vmalloc.h> 26 27 #include <asm/ccwdev.h> 28 #include <asm/ebcdic.h> 29 #include <asm/idals.h> 30 #include <asm/itcw.h> 31 #include <asm/diag.h> 32 33 /* This is ugly... */ 34 #define PRINTK_HEADER "dasd:" 35 36 #include "dasd_int.h" 37 /* 38 * SECTION: Constant definitions to be used within this file 39 */ 40 #define DASD_CHANQ_MAX_SIZE 4 41 42 #define DASD_DIAG_MOD "dasd_diag_mod" 43 44 /* 45 * SECTION: exported variables of dasd.c 46 */ 47 debug_info_t *dasd_debug_area; 48 EXPORT_SYMBOL(dasd_debug_area); 49 static struct dentry *dasd_debugfs_root_entry; 50 struct dasd_discipline *dasd_diag_discipline_pointer; 51 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 52 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 53 54 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 55 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 56 " Copyright IBM Corp. 2000"); 57 MODULE_LICENSE("GPL"); 58 59 /* 60 * SECTION: prototypes for static functions of dasd.c 61 */ 62 static int dasd_flush_block_queue(struct dasd_block *); 63 static void dasd_device_tasklet(unsigned long); 64 static void dasd_block_tasklet(unsigned long); 65 static void do_kick_device(struct work_struct *); 66 static void do_reload_device(struct work_struct *); 67 static void do_requeue_requests(struct work_struct *); 68 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 69 static void dasd_device_timeout(struct timer_list *); 70 static void dasd_block_timeout(struct timer_list *); 71 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 72 static void dasd_profile_init(struct dasd_profile *, struct dentry *); 73 static void dasd_profile_exit(struct dasd_profile *); 74 static void dasd_hosts_init(struct dentry *, struct dasd_device *); 75 static void dasd_hosts_exit(struct dasd_device *); 76 77 /* 78 * SECTION: Operations on the device structure. 79 */ 80 static wait_queue_head_t dasd_init_waitq; 81 static wait_queue_head_t dasd_flush_wq; 82 static wait_queue_head_t generic_waitq; 83 static wait_queue_head_t shutdown_waitq; 84 85 /* 86 * Allocate memory for a new device structure. 87 */ 88 struct dasd_device *dasd_alloc_device(void) 89 { 90 struct dasd_device *device; 91 92 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 93 if (!device) 94 return ERR_PTR(-ENOMEM); 95 96 /* Get two pages for normal block device operations. */ 97 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 98 if (!device->ccw_mem) { 99 kfree(device); 100 return ERR_PTR(-ENOMEM); 101 } 102 /* Get one page for error recovery. */ 103 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 104 if (!device->erp_mem) { 105 free_pages((unsigned long) device->ccw_mem, 1); 106 kfree(device); 107 return ERR_PTR(-ENOMEM); 108 } 109 /* Get two pages for ese format. */ 110 device->ese_mem = (void *)__get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 111 if (!device->ese_mem) { 112 free_page((unsigned long) device->erp_mem); 113 free_pages((unsigned long) device->ccw_mem, 1); 114 kfree(device); 115 return ERR_PTR(-ENOMEM); 116 } 117 118 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 119 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 120 dasd_init_chunklist(&device->ese_chunks, device->ese_mem, PAGE_SIZE * 2); 121 spin_lock_init(&device->mem_lock); 122 atomic_set(&device->tasklet_scheduled, 0); 123 tasklet_init(&device->tasklet, dasd_device_tasklet, 124 (unsigned long) device); 125 INIT_LIST_HEAD(&device->ccw_queue); 126 timer_setup(&device->timer, dasd_device_timeout, 0); 127 INIT_WORK(&device->kick_work, do_kick_device); 128 INIT_WORK(&device->reload_device, do_reload_device); 129 INIT_WORK(&device->requeue_requests, do_requeue_requests); 130 device->state = DASD_STATE_NEW; 131 device->target = DASD_STATE_NEW; 132 mutex_init(&device->state_mutex); 133 spin_lock_init(&device->profile.lock); 134 return device; 135 } 136 137 /* 138 * Free memory of a device structure. 139 */ 140 void dasd_free_device(struct dasd_device *device) 141 { 142 kfree(device->private); 143 free_pages((unsigned long) device->ese_mem, 1); 144 free_page((unsigned long) device->erp_mem); 145 free_pages((unsigned long) device->ccw_mem, 1); 146 kfree(device); 147 } 148 149 /* 150 * Allocate memory for a new device structure. 151 */ 152 struct dasd_block *dasd_alloc_block(void) 153 { 154 struct dasd_block *block; 155 156 block = kzalloc(sizeof(*block), GFP_ATOMIC); 157 if (!block) 158 return ERR_PTR(-ENOMEM); 159 /* open_count = 0 means device online but not in use */ 160 atomic_set(&block->open_count, -1); 161 162 atomic_set(&block->tasklet_scheduled, 0); 163 tasklet_init(&block->tasklet, dasd_block_tasklet, 164 (unsigned long) block); 165 INIT_LIST_HEAD(&block->ccw_queue); 166 spin_lock_init(&block->queue_lock); 167 INIT_LIST_HEAD(&block->format_list); 168 spin_lock_init(&block->format_lock); 169 timer_setup(&block->timer, dasd_block_timeout, 0); 170 spin_lock_init(&block->profile.lock); 171 172 return block; 173 } 174 EXPORT_SYMBOL_GPL(dasd_alloc_block); 175 176 /* 177 * Free memory of a device structure. 178 */ 179 void dasd_free_block(struct dasd_block *block) 180 { 181 kfree(block); 182 } 183 EXPORT_SYMBOL_GPL(dasd_free_block); 184 185 /* 186 * Make a new device known to the system. 187 */ 188 static int dasd_state_new_to_known(struct dasd_device *device) 189 { 190 /* 191 * As long as the device is not in state DASD_STATE_NEW we want to 192 * keep the reference count > 0. 193 */ 194 dasd_get_device(device); 195 device->state = DASD_STATE_KNOWN; 196 return 0; 197 } 198 199 /* 200 * Let the system forget about a device. 201 */ 202 static int dasd_state_known_to_new(struct dasd_device *device) 203 { 204 /* Disable extended error reporting for this device. */ 205 dasd_eer_disable(device); 206 device->state = DASD_STATE_NEW; 207 208 /* Give up reference we took in dasd_state_new_to_known. */ 209 dasd_put_device(device); 210 return 0; 211 } 212 213 static struct dentry *dasd_debugfs_setup(const char *name, 214 struct dentry *base_dentry) 215 { 216 struct dentry *pde; 217 218 if (!base_dentry) 219 return NULL; 220 pde = debugfs_create_dir(name, base_dentry); 221 if (!pde || IS_ERR(pde)) 222 return NULL; 223 return pde; 224 } 225 226 /* 227 * Request the irq line for the device. 228 */ 229 static int dasd_state_known_to_basic(struct dasd_device *device) 230 { 231 struct dasd_block *block = device->block; 232 int rc = 0; 233 234 /* Allocate and register gendisk structure. */ 235 if (block) { 236 rc = dasd_gendisk_alloc(block); 237 if (rc) 238 return rc; 239 block->debugfs_dentry = 240 dasd_debugfs_setup(block->gdp->disk_name, 241 dasd_debugfs_root_entry); 242 dasd_profile_init(&block->profile, block->debugfs_dentry); 243 if (dasd_global_profile_level == DASD_PROFILE_ON) 244 dasd_profile_on(&device->block->profile); 245 } 246 device->debugfs_dentry = 247 dasd_debugfs_setup(dev_name(&device->cdev->dev), 248 dasd_debugfs_root_entry); 249 dasd_profile_init(&device->profile, device->debugfs_dentry); 250 dasd_hosts_init(device->debugfs_dentry, device); 251 252 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 253 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 254 8 * sizeof(long)); 255 debug_register_view(device->debug_area, &debug_sprintf_view); 256 debug_set_level(device->debug_area, DBF_WARNING); 257 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 258 259 device->state = DASD_STATE_BASIC; 260 261 return rc; 262 } 263 264 /* 265 * Release the irq line for the device. Terminate any running i/o. 266 */ 267 static int dasd_state_basic_to_known(struct dasd_device *device) 268 { 269 int rc; 270 271 if (device->discipline->basic_to_known) { 272 rc = device->discipline->basic_to_known(device); 273 if (rc) 274 return rc; 275 } 276 277 if (device->block) { 278 dasd_profile_exit(&device->block->profile); 279 debugfs_remove(device->block->debugfs_dentry); 280 dasd_gendisk_free(device->block); 281 dasd_block_clear_timer(device->block); 282 } 283 rc = dasd_flush_device_queue(device); 284 if (rc) 285 return rc; 286 dasd_device_clear_timer(device); 287 dasd_profile_exit(&device->profile); 288 dasd_hosts_exit(device); 289 debugfs_remove(device->debugfs_dentry); 290 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 291 if (device->debug_area != NULL) { 292 debug_unregister(device->debug_area); 293 device->debug_area = NULL; 294 } 295 device->state = DASD_STATE_KNOWN; 296 return 0; 297 } 298 299 /* 300 * Do the initial analysis. The do_analysis function may return 301 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 302 * until the discipline decides to continue the startup sequence 303 * by calling the function dasd_change_state. The eckd disciplines 304 * uses this to start a ccw that detects the format. The completion 305 * interrupt for this detection ccw uses the kernel event daemon to 306 * trigger the call to dasd_change_state. All this is done in the 307 * discipline code, see dasd_eckd.c. 308 * After the analysis ccw is done (do_analysis returned 0) the block 309 * device is setup. 310 * In case the analysis returns an error, the device setup is stopped 311 * (a fake disk was already added to allow formatting). 312 */ 313 static int dasd_state_basic_to_ready(struct dasd_device *device) 314 { 315 int rc; 316 struct dasd_block *block; 317 struct gendisk *disk; 318 319 rc = 0; 320 block = device->block; 321 /* make disk known with correct capacity */ 322 if (block) { 323 if (block->base->discipline->do_analysis != NULL) 324 rc = block->base->discipline->do_analysis(block); 325 if (rc) { 326 if (rc != -EAGAIN) { 327 device->state = DASD_STATE_UNFMT; 328 disk = device->block->gdp; 329 kobject_uevent(&disk_to_dev(disk)->kobj, 330 KOBJ_CHANGE); 331 goto out; 332 } 333 return rc; 334 } 335 if (device->discipline->setup_blk_queue) 336 device->discipline->setup_blk_queue(block); 337 set_capacity(block->gdp, 338 block->blocks << block->s2b_shift); 339 device->state = DASD_STATE_READY; 340 rc = dasd_scan_partitions(block); 341 if (rc) { 342 device->state = DASD_STATE_BASIC; 343 return rc; 344 } 345 } else { 346 device->state = DASD_STATE_READY; 347 } 348 out: 349 if (device->discipline->basic_to_ready) 350 rc = device->discipline->basic_to_ready(device); 351 return rc; 352 } 353 354 static inline 355 int _wait_for_empty_queues(struct dasd_device *device) 356 { 357 if (device->block) 358 return list_empty(&device->ccw_queue) && 359 list_empty(&device->block->ccw_queue); 360 else 361 return list_empty(&device->ccw_queue); 362 } 363 364 /* 365 * Remove device from block device layer. Destroy dirty buffers. 366 * Forget format information. Check if the target level is basic 367 * and if it is create fake disk for formatting. 368 */ 369 static int dasd_state_ready_to_basic(struct dasd_device *device) 370 { 371 int rc; 372 373 device->state = DASD_STATE_BASIC; 374 if (device->block) { 375 struct dasd_block *block = device->block; 376 rc = dasd_flush_block_queue(block); 377 if (rc) { 378 device->state = DASD_STATE_READY; 379 return rc; 380 } 381 dasd_destroy_partitions(block); 382 block->blocks = 0; 383 block->bp_block = 0; 384 block->s2b_shift = 0; 385 } 386 return 0; 387 } 388 389 /* 390 * Back to basic. 391 */ 392 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 393 { 394 device->state = DASD_STATE_BASIC; 395 return 0; 396 } 397 398 /* 399 * Make the device online and schedule the bottom half to start 400 * the requeueing of requests from the linux request queue to the 401 * ccw queue. 402 */ 403 static int 404 dasd_state_ready_to_online(struct dasd_device * device) 405 { 406 device->state = DASD_STATE_ONLINE; 407 if (device->block) { 408 dasd_schedule_block_bh(device->block); 409 if ((device->features & DASD_FEATURE_USERAW)) { 410 kobject_uevent(&disk_to_dev(device->block->gdp)->kobj, 411 KOBJ_CHANGE); 412 return 0; 413 } 414 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 415 } 416 return 0; 417 } 418 419 /* 420 * Stop the requeueing of requests again. 421 */ 422 static int dasd_state_online_to_ready(struct dasd_device *device) 423 { 424 int rc; 425 426 if (device->discipline->online_to_ready) { 427 rc = device->discipline->online_to_ready(device); 428 if (rc) 429 return rc; 430 } 431 432 device->state = DASD_STATE_READY; 433 if (device->block && !(device->features & DASD_FEATURE_USERAW)) 434 disk_uevent(device->block->bdev->bd_disk, KOBJ_CHANGE); 435 return 0; 436 } 437 438 /* 439 * Device startup state changes. 440 */ 441 static int dasd_increase_state(struct dasd_device *device) 442 { 443 int rc; 444 445 rc = 0; 446 if (device->state == DASD_STATE_NEW && 447 device->target >= DASD_STATE_KNOWN) 448 rc = dasd_state_new_to_known(device); 449 450 if (!rc && 451 device->state == DASD_STATE_KNOWN && 452 device->target >= DASD_STATE_BASIC) 453 rc = dasd_state_known_to_basic(device); 454 455 if (!rc && 456 device->state == DASD_STATE_BASIC && 457 device->target >= DASD_STATE_READY) 458 rc = dasd_state_basic_to_ready(device); 459 460 if (!rc && 461 device->state == DASD_STATE_UNFMT && 462 device->target > DASD_STATE_UNFMT) 463 rc = -EPERM; 464 465 if (!rc && 466 device->state == DASD_STATE_READY && 467 device->target >= DASD_STATE_ONLINE) 468 rc = dasd_state_ready_to_online(device); 469 470 return rc; 471 } 472 473 /* 474 * Device shutdown state changes. 475 */ 476 static int dasd_decrease_state(struct dasd_device *device) 477 { 478 int rc; 479 480 rc = 0; 481 if (device->state == DASD_STATE_ONLINE && 482 device->target <= DASD_STATE_READY) 483 rc = dasd_state_online_to_ready(device); 484 485 if (!rc && 486 device->state == DASD_STATE_READY && 487 device->target <= DASD_STATE_BASIC) 488 rc = dasd_state_ready_to_basic(device); 489 490 if (!rc && 491 device->state == DASD_STATE_UNFMT && 492 device->target <= DASD_STATE_BASIC) 493 rc = dasd_state_unfmt_to_basic(device); 494 495 if (!rc && 496 device->state == DASD_STATE_BASIC && 497 device->target <= DASD_STATE_KNOWN) 498 rc = dasd_state_basic_to_known(device); 499 500 if (!rc && 501 device->state == DASD_STATE_KNOWN && 502 device->target <= DASD_STATE_NEW) 503 rc = dasd_state_known_to_new(device); 504 505 return rc; 506 } 507 508 /* 509 * This is the main startup/shutdown routine. 510 */ 511 static void dasd_change_state(struct dasd_device *device) 512 { 513 int rc; 514 515 if (device->state == device->target) 516 /* Already where we want to go today... */ 517 return; 518 if (device->state < device->target) 519 rc = dasd_increase_state(device); 520 else 521 rc = dasd_decrease_state(device); 522 if (rc == -EAGAIN) 523 return; 524 if (rc) 525 device->target = device->state; 526 527 /* let user-space know that the device status changed */ 528 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 529 530 if (device->state == device->target) 531 wake_up(&dasd_init_waitq); 532 } 533 534 /* 535 * Kick starter for devices that did not complete the startup/shutdown 536 * procedure or were sleeping because of a pending state. 537 * dasd_kick_device will schedule a call do do_kick_device to the kernel 538 * event daemon. 539 */ 540 static void do_kick_device(struct work_struct *work) 541 { 542 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 543 mutex_lock(&device->state_mutex); 544 dasd_change_state(device); 545 mutex_unlock(&device->state_mutex); 546 dasd_schedule_device_bh(device); 547 dasd_put_device(device); 548 } 549 550 void dasd_kick_device(struct dasd_device *device) 551 { 552 dasd_get_device(device); 553 /* queue call to dasd_kick_device to the kernel event daemon. */ 554 if (!schedule_work(&device->kick_work)) 555 dasd_put_device(device); 556 } 557 EXPORT_SYMBOL(dasd_kick_device); 558 559 /* 560 * dasd_reload_device will schedule a call do do_reload_device to the kernel 561 * event daemon. 562 */ 563 static void do_reload_device(struct work_struct *work) 564 { 565 struct dasd_device *device = container_of(work, struct dasd_device, 566 reload_device); 567 device->discipline->reload(device); 568 dasd_put_device(device); 569 } 570 571 void dasd_reload_device(struct dasd_device *device) 572 { 573 dasd_get_device(device); 574 /* queue call to dasd_reload_device to the kernel event daemon. */ 575 if (!schedule_work(&device->reload_device)) 576 dasd_put_device(device); 577 } 578 EXPORT_SYMBOL(dasd_reload_device); 579 580 /* 581 * Set the target state for a device and starts the state change. 582 */ 583 void dasd_set_target_state(struct dasd_device *device, int target) 584 { 585 dasd_get_device(device); 586 mutex_lock(&device->state_mutex); 587 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 588 if (dasd_probeonly && target > DASD_STATE_READY) 589 target = DASD_STATE_READY; 590 if (device->target != target) { 591 if (device->state == target) 592 wake_up(&dasd_init_waitq); 593 device->target = target; 594 } 595 if (device->state != device->target) 596 dasd_change_state(device); 597 mutex_unlock(&device->state_mutex); 598 dasd_put_device(device); 599 } 600 601 /* 602 * Enable devices with device numbers in [from..to]. 603 */ 604 static inline int _wait_for_device(struct dasd_device *device) 605 { 606 return (device->state == device->target); 607 } 608 609 void dasd_enable_device(struct dasd_device *device) 610 { 611 dasd_set_target_state(device, DASD_STATE_ONLINE); 612 if (device->state <= DASD_STATE_KNOWN) 613 /* No discipline for device found. */ 614 dasd_set_target_state(device, DASD_STATE_NEW); 615 /* Now wait for the devices to come up. */ 616 wait_event(dasd_init_waitq, _wait_for_device(device)); 617 618 dasd_reload_device(device); 619 if (device->discipline->kick_validate) 620 device->discipline->kick_validate(device); 621 } 622 EXPORT_SYMBOL(dasd_enable_device); 623 624 /* 625 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 626 */ 627 628 unsigned int dasd_global_profile_level = DASD_PROFILE_OFF; 629 630 #ifdef CONFIG_DASD_PROFILE 631 struct dasd_profile dasd_global_profile = { 632 .lock = __SPIN_LOCK_UNLOCKED(dasd_global_profile.lock), 633 }; 634 static struct dentry *dasd_debugfs_global_entry; 635 636 /* 637 * Add profiling information for cqr before execution. 638 */ 639 static void dasd_profile_start(struct dasd_block *block, 640 struct dasd_ccw_req *cqr, 641 struct request *req) 642 { 643 struct list_head *l; 644 unsigned int counter; 645 struct dasd_device *device; 646 647 /* count the length of the chanq for statistics */ 648 counter = 0; 649 if (dasd_global_profile_level || block->profile.data) 650 list_for_each(l, &block->ccw_queue) 651 if (++counter >= 31) 652 break; 653 654 spin_lock(&dasd_global_profile.lock); 655 if (dasd_global_profile.data) { 656 dasd_global_profile.data->dasd_io_nr_req[counter]++; 657 if (rq_data_dir(req) == READ) 658 dasd_global_profile.data->dasd_read_nr_req[counter]++; 659 } 660 spin_unlock(&dasd_global_profile.lock); 661 662 spin_lock(&block->profile.lock); 663 if (block->profile.data) { 664 block->profile.data->dasd_io_nr_req[counter]++; 665 if (rq_data_dir(req) == READ) 666 block->profile.data->dasd_read_nr_req[counter]++; 667 } 668 spin_unlock(&block->profile.lock); 669 670 /* 671 * We count the request for the start device, even though it may run on 672 * some other device due to error recovery. This way we make sure that 673 * we count each request only once. 674 */ 675 device = cqr->startdev; 676 if (device->profile.data) { 677 counter = 1; /* request is not yet queued on the start device */ 678 list_for_each(l, &device->ccw_queue) 679 if (++counter >= 31) 680 break; 681 } 682 spin_lock(&device->profile.lock); 683 if (device->profile.data) { 684 device->profile.data->dasd_io_nr_req[counter]++; 685 if (rq_data_dir(req) == READ) 686 device->profile.data->dasd_read_nr_req[counter]++; 687 } 688 spin_unlock(&device->profile.lock); 689 } 690 691 /* 692 * Add profiling information for cqr after execution. 693 */ 694 695 #define dasd_profile_counter(value, index) \ 696 { \ 697 for (index = 0; index < 31 && value >> (2+index); index++) \ 698 ; \ 699 } 700 701 static void dasd_profile_end_add_data(struct dasd_profile_info *data, 702 int is_alias, 703 int is_tpm, 704 int is_read, 705 long sectors, 706 int sectors_ind, 707 int tottime_ind, 708 int tottimeps_ind, 709 int strtime_ind, 710 int irqtime_ind, 711 int irqtimeps_ind, 712 int endtime_ind) 713 { 714 /* in case of an overflow, reset the whole profile */ 715 if (data->dasd_io_reqs == UINT_MAX) { 716 memset(data, 0, sizeof(*data)); 717 ktime_get_real_ts64(&data->starttod); 718 } 719 data->dasd_io_reqs++; 720 data->dasd_io_sects += sectors; 721 if (is_alias) 722 data->dasd_io_alias++; 723 if (is_tpm) 724 data->dasd_io_tpm++; 725 726 data->dasd_io_secs[sectors_ind]++; 727 data->dasd_io_times[tottime_ind]++; 728 data->dasd_io_timps[tottimeps_ind]++; 729 data->dasd_io_time1[strtime_ind]++; 730 data->dasd_io_time2[irqtime_ind]++; 731 data->dasd_io_time2ps[irqtimeps_ind]++; 732 data->dasd_io_time3[endtime_ind]++; 733 734 if (is_read) { 735 data->dasd_read_reqs++; 736 data->dasd_read_sects += sectors; 737 if (is_alias) 738 data->dasd_read_alias++; 739 if (is_tpm) 740 data->dasd_read_tpm++; 741 data->dasd_read_secs[sectors_ind]++; 742 data->dasd_read_times[tottime_ind]++; 743 data->dasd_read_time1[strtime_ind]++; 744 data->dasd_read_time2[irqtime_ind]++; 745 data->dasd_read_time3[endtime_ind]++; 746 } 747 } 748 749 static void dasd_profile_end(struct dasd_block *block, 750 struct dasd_ccw_req *cqr, 751 struct request *req) 752 { 753 unsigned long strtime, irqtime, endtime, tottime; 754 unsigned long tottimeps, sectors; 755 struct dasd_device *device; 756 int sectors_ind, tottime_ind, tottimeps_ind, strtime_ind; 757 int irqtime_ind, irqtimeps_ind, endtime_ind; 758 struct dasd_profile_info *data; 759 760 device = cqr->startdev; 761 if (!(dasd_global_profile_level || 762 block->profile.data || 763 device->profile.data)) 764 return; 765 766 sectors = blk_rq_sectors(req); 767 if (!cqr->buildclk || !cqr->startclk || 768 !cqr->stopclk || !cqr->endclk || 769 !sectors) 770 return; 771 772 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 773 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 774 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 775 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 776 tottimeps = tottime / sectors; 777 778 dasd_profile_counter(sectors, sectors_ind); 779 dasd_profile_counter(tottime, tottime_ind); 780 dasd_profile_counter(tottimeps, tottimeps_ind); 781 dasd_profile_counter(strtime, strtime_ind); 782 dasd_profile_counter(irqtime, irqtime_ind); 783 dasd_profile_counter(irqtime / sectors, irqtimeps_ind); 784 dasd_profile_counter(endtime, endtime_ind); 785 786 spin_lock(&dasd_global_profile.lock); 787 if (dasd_global_profile.data) { 788 data = dasd_global_profile.data; 789 data->dasd_sum_times += tottime; 790 data->dasd_sum_time_str += strtime; 791 data->dasd_sum_time_irq += irqtime; 792 data->dasd_sum_time_end += endtime; 793 dasd_profile_end_add_data(dasd_global_profile.data, 794 cqr->startdev != block->base, 795 cqr->cpmode == 1, 796 rq_data_dir(req) == READ, 797 sectors, sectors_ind, tottime_ind, 798 tottimeps_ind, strtime_ind, 799 irqtime_ind, irqtimeps_ind, 800 endtime_ind); 801 } 802 spin_unlock(&dasd_global_profile.lock); 803 804 spin_lock(&block->profile.lock); 805 if (block->profile.data) { 806 data = block->profile.data; 807 data->dasd_sum_times += tottime; 808 data->dasd_sum_time_str += strtime; 809 data->dasd_sum_time_irq += irqtime; 810 data->dasd_sum_time_end += endtime; 811 dasd_profile_end_add_data(block->profile.data, 812 cqr->startdev != block->base, 813 cqr->cpmode == 1, 814 rq_data_dir(req) == READ, 815 sectors, sectors_ind, tottime_ind, 816 tottimeps_ind, strtime_ind, 817 irqtime_ind, irqtimeps_ind, 818 endtime_ind); 819 } 820 spin_unlock(&block->profile.lock); 821 822 spin_lock(&device->profile.lock); 823 if (device->profile.data) { 824 data = device->profile.data; 825 data->dasd_sum_times += tottime; 826 data->dasd_sum_time_str += strtime; 827 data->dasd_sum_time_irq += irqtime; 828 data->dasd_sum_time_end += endtime; 829 dasd_profile_end_add_data(device->profile.data, 830 cqr->startdev != block->base, 831 cqr->cpmode == 1, 832 rq_data_dir(req) == READ, 833 sectors, sectors_ind, tottime_ind, 834 tottimeps_ind, strtime_ind, 835 irqtime_ind, irqtimeps_ind, 836 endtime_ind); 837 } 838 spin_unlock(&device->profile.lock); 839 } 840 841 void dasd_profile_reset(struct dasd_profile *profile) 842 { 843 struct dasd_profile_info *data; 844 845 spin_lock_bh(&profile->lock); 846 data = profile->data; 847 if (!data) { 848 spin_unlock_bh(&profile->lock); 849 return; 850 } 851 memset(data, 0, sizeof(*data)); 852 ktime_get_real_ts64(&data->starttod); 853 spin_unlock_bh(&profile->lock); 854 } 855 856 int dasd_profile_on(struct dasd_profile *profile) 857 { 858 struct dasd_profile_info *data; 859 860 data = kzalloc(sizeof(*data), GFP_KERNEL); 861 if (!data) 862 return -ENOMEM; 863 spin_lock_bh(&profile->lock); 864 if (profile->data) { 865 spin_unlock_bh(&profile->lock); 866 kfree(data); 867 return 0; 868 } 869 ktime_get_real_ts64(&data->starttod); 870 profile->data = data; 871 spin_unlock_bh(&profile->lock); 872 return 0; 873 } 874 875 void dasd_profile_off(struct dasd_profile *profile) 876 { 877 spin_lock_bh(&profile->lock); 878 kfree(profile->data); 879 profile->data = NULL; 880 spin_unlock_bh(&profile->lock); 881 } 882 883 char *dasd_get_user_string(const char __user *user_buf, size_t user_len) 884 { 885 char *buffer; 886 887 buffer = vmalloc(user_len + 1); 888 if (buffer == NULL) 889 return ERR_PTR(-ENOMEM); 890 if (copy_from_user(buffer, user_buf, user_len) != 0) { 891 vfree(buffer); 892 return ERR_PTR(-EFAULT); 893 } 894 /* got the string, now strip linefeed. */ 895 if (buffer[user_len - 1] == '\n') 896 buffer[user_len - 1] = 0; 897 else 898 buffer[user_len] = 0; 899 return buffer; 900 } 901 902 static ssize_t dasd_stats_write(struct file *file, 903 const char __user *user_buf, 904 size_t user_len, loff_t *pos) 905 { 906 char *buffer, *str; 907 int rc; 908 struct seq_file *m = (struct seq_file *)file->private_data; 909 struct dasd_profile *prof = m->private; 910 911 if (user_len > 65536) 912 user_len = 65536; 913 buffer = dasd_get_user_string(user_buf, user_len); 914 if (IS_ERR(buffer)) 915 return PTR_ERR(buffer); 916 917 str = skip_spaces(buffer); 918 rc = user_len; 919 if (strncmp(str, "reset", 5) == 0) { 920 dasd_profile_reset(prof); 921 } else if (strncmp(str, "on", 2) == 0) { 922 rc = dasd_profile_on(prof); 923 if (rc) 924 goto out; 925 rc = user_len; 926 if (prof == &dasd_global_profile) { 927 dasd_profile_reset(prof); 928 dasd_global_profile_level = DASD_PROFILE_GLOBAL_ONLY; 929 } 930 } else if (strncmp(str, "off", 3) == 0) { 931 if (prof == &dasd_global_profile) 932 dasd_global_profile_level = DASD_PROFILE_OFF; 933 dasd_profile_off(prof); 934 } else 935 rc = -EINVAL; 936 out: 937 vfree(buffer); 938 return rc; 939 } 940 941 static void dasd_stats_array(struct seq_file *m, unsigned int *array) 942 { 943 int i; 944 945 for (i = 0; i < 32; i++) 946 seq_printf(m, "%u ", array[i]); 947 seq_putc(m, '\n'); 948 } 949 950 static void dasd_stats_seq_print(struct seq_file *m, 951 struct dasd_profile_info *data) 952 { 953 seq_printf(m, "start_time %lld.%09ld\n", 954 (s64)data->starttod.tv_sec, data->starttod.tv_nsec); 955 seq_printf(m, "total_requests %u\n", data->dasd_io_reqs); 956 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 957 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 958 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 959 seq_printf(m, "avg_total %lu\n", data->dasd_io_reqs ? 960 data->dasd_sum_times / data->dasd_io_reqs : 0UL); 961 seq_printf(m, "avg_build_to_ssch %lu\n", data->dasd_io_reqs ? 962 data->dasd_sum_time_str / data->dasd_io_reqs : 0UL); 963 seq_printf(m, "avg_ssch_to_irq %lu\n", data->dasd_io_reqs ? 964 data->dasd_sum_time_irq / data->dasd_io_reqs : 0UL); 965 seq_printf(m, "avg_irq_to_end %lu\n", data->dasd_io_reqs ? 966 data->dasd_sum_time_end / data->dasd_io_reqs : 0UL); 967 seq_puts(m, "histogram_sectors "); 968 dasd_stats_array(m, data->dasd_io_secs); 969 seq_puts(m, "histogram_io_times "); 970 dasd_stats_array(m, data->dasd_io_times); 971 seq_puts(m, "histogram_io_times_weighted "); 972 dasd_stats_array(m, data->dasd_io_timps); 973 seq_puts(m, "histogram_time_build_to_ssch "); 974 dasd_stats_array(m, data->dasd_io_time1); 975 seq_puts(m, "histogram_time_ssch_to_irq "); 976 dasd_stats_array(m, data->dasd_io_time2); 977 seq_puts(m, "histogram_time_ssch_to_irq_weighted "); 978 dasd_stats_array(m, data->dasd_io_time2ps); 979 seq_puts(m, "histogram_time_irq_to_end "); 980 dasd_stats_array(m, data->dasd_io_time3); 981 seq_puts(m, "histogram_ccw_queue_length "); 982 dasd_stats_array(m, data->dasd_io_nr_req); 983 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 984 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 985 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 986 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 987 seq_puts(m, "histogram_read_sectors "); 988 dasd_stats_array(m, data->dasd_read_secs); 989 seq_puts(m, "histogram_read_times "); 990 dasd_stats_array(m, data->dasd_read_times); 991 seq_puts(m, "histogram_read_time_build_to_ssch "); 992 dasd_stats_array(m, data->dasd_read_time1); 993 seq_puts(m, "histogram_read_time_ssch_to_irq "); 994 dasd_stats_array(m, data->dasd_read_time2); 995 seq_puts(m, "histogram_read_time_irq_to_end "); 996 dasd_stats_array(m, data->dasd_read_time3); 997 seq_puts(m, "histogram_read_ccw_queue_length "); 998 dasd_stats_array(m, data->dasd_read_nr_req); 999 } 1000 1001 static int dasd_stats_show(struct seq_file *m, void *v) 1002 { 1003 struct dasd_profile *profile; 1004 struct dasd_profile_info *data; 1005 1006 profile = m->private; 1007 spin_lock_bh(&profile->lock); 1008 data = profile->data; 1009 if (!data) { 1010 spin_unlock_bh(&profile->lock); 1011 seq_puts(m, "disabled\n"); 1012 return 0; 1013 } 1014 dasd_stats_seq_print(m, data); 1015 spin_unlock_bh(&profile->lock); 1016 return 0; 1017 } 1018 1019 static int dasd_stats_open(struct inode *inode, struct file *file) 1020 { 1021 struct dasd_profile *profile = inode->i_private; 1022 return single_open(file, dasd_stats_show, profile); 1023 } 1024 1025 static const struct file_operations dasd_stats_raw_fops = { 1026 .owner = THIS_MODULE, 1027 .open = dasd_stats_open, 1028 .read = seq_read, 1029 .llseek = seq_lseek, 1030 .release = single_release, 1031 .write = dasd_stats_write, 1032 }; 1033 1034 static void dasd_profile_init(struct dasd_profile *profile, 1035 struct dentry *base_dentry) 1036 { 1037 umode_t mode; 1038 struct dentry *pde; 1039 1040 if (!base_dentry) 1041 return; 1042 profile->dentry = NULL; 1043 profile->data = NULL; 1044 mode = (S_IRUSR | S_IWUSR | S_IFREG); 1045 pde = debugfs_create_file("statistics", mode, base_dentry, 1046 profile, &dasd_stats_raw_fops); 1047 if (pde && !IS_ERR(pde)) 1048 profile->dentry = pde; 1049 return; 1050 } 1051 1052 static void dasd_profile_exit(struct dasd_profile *profile) 1053 { 1054 dasd_profile_off(profile); 1055 debugfs_remove(profile->dentry); 1056 profile->dentry = NULL; 1057 } 1058 1059 static void dasd_statistics_removeroot(void) 1060 { 1061 dasd_global_profile_level = DASD_PROFILE_OFF; 1062 dasd_profile_exit(&dasd_global_profile); 1063 debugfs_remove(dasd_debugfs_global_entry); 1064 debugfs_remove(dasd_debugfs_root_entry); 1065 } 1066 1067 static void dasd_statistics_createroot(void) 1068 { 1069 struct dentry *pde; 1070 1071 dasd_debugfs_root_entry = NULL; 1072 pde = debugfs_create_dir("dasd", NULL); 1073 if (!pde || IS_ERR(pde)) 1074 goto error; 1075 dasd_debugfs_root_entry = pde; 1076 pde = debugfs_create_dir("global", dasd_debugfs_root_entry); 1077 if (!pde || IS_ERR(pde)) 1078 goto error; 1079 dasd_debugfs_global_entry = pde; 1080 dasd_profile_init(&dasd_global_profile, dasd_debugfs_global_entry); 1081 return; 1082 1083 error: 1084 DBF_EVENT(DBF_ERR, "%s", 1085 "Creation of the dasd debugfs interface failed"); 1086 dasd_statistics_removeroot(); 1087 return; 1088 } 1089 1090 #else 1091 #define dasd_profile_start(block, cqr, req) do {} while (0) 1092 #define dasd_profile_end(block, cqr, req) do {} while (0) 1093 1094 static void dasd_statistics_createroot(void) 1095 { 1096 return; 1097 } 1098 1099 static void dasd_statistics_removeroot(void) 1100 { 1101 return; 1102 } 1103 1104 int dasd_stats_generic_show(struct seq_file *m, void *v) 1105 { 1106 seq_puts(m, "Statistics are not activated in this kernel\n"); 1107 return 0; 1108 } 1109 1110 static void dasd_profile_init(struct dasd_profile *profile, 1111 struct dentry *base_dentry) 1112 { 1113 return; 1114 } 1115 1116 static void dasd_profile_exit(struct dasd_profile *profile) 1117 { 1118 return; 1119 } 1120 1121 int dasd_profile_on(struct dasd_profile *profile) 1122 { 1123 return 0; 1124 } 1125 1126 #endif /* CONFIG_DASD_PROFILE */ 1127 1128 static int dasd_hosts_show(struct seq_file *m, void *v) 1129 { 1130 struct dasd_device *device; 1131 int rc = -EOPNOTSUPP; 1132 1133 device = m->private; 1134 dasd_get_device(device); 1135 1136 if (device->discipline->hosts_print) 1137 rc = device->discipline->hosts_print(device, m); 1138 1139 dasd_put_device(device); 1140 return rc; 1141 } 1142 1143 DEFINE_SHOW_ATTRIBUTE(dasd_hosts); 1144 1145 static void dasd_hosts_exit(struct dasd_device *device) 1146 { 1147 debugfs_remove(device->hosts_dentry); 1148 device->hosts_dentry = NULL; 1149 } 1150 1151 static void dasd_hosts_init(struct dentry *base_dentry, 1152 struct dasd_device *device) 1153 { 1154 struct dentry *pde; 1155 umode_t mode; 1156 1157 if (!base_dentry) 1158 return; 1159 1160 mode = S_IRUSR | S_IFREG; 1161 pde = debugfs_create_file("host_access_list", mode, base_dentry, 1162 device, &dasd_hosts_fops); 1163 if (pde && !IS_ERR(pde)) 1164 device->hosts_dentry = pde; 1165 } 1166 1167 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize, 1168 struct dasd_device *device, 1169 struct dasd_ccw_req *cqr) 1170 { 1171 unsigned long flags; 1172 char *data, *chunk; 1173 int size = 0; 1174 1175 if (cplength > 0) 1176 size += cplength * sizeof(struct ccw1); 1177 if (datasize > 0) 1178 size += datasize; 1179 if (!cqr) 1180 size += (sizeof(*cqr) + 7L) & -8L; 1181 1182 spin_lock_irqsave(&device->mem_lock, flags); 1183 data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size); 1184 spin_unlock_irqrestore(&device->mem_lock, flags); 1185 if (!chunk) 1186 return ERR_PTR(-ENOMEM); 1187 if (!cqr) { 1188 cqr = (void *) data; 1189 data += (sizeof(*cqr) + 7L) & -8L; 1190 } 1191 memset(cqr, 0, sizeof(*cqr)); 1192 cqr->mem_chunk = chunk; 1193 if (cplength > 0) { 1194 cqr->cpaddr = data; 1195 data += cplength * sizeof(struct ccw1); 1196 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1197 } 1198 if (datasize > 0) { 1199 cqr->data = data; 1200 memset(cqr->data, 0, datasize); 1201 } 1202 cqr->magic = magic; 1203 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1204 dasd_get_device(device); 1205 return cqr; 1206 } 1207 EXPORT_SYMBOL(dasd_smalloc_request); 1208 1209 struct dasd_ccw_req *dasd_fmalloc_request(int magic, int cplength, 1210 int datasize, 1211 struct dasd_device *device) 1212 { 1213 struct dasd_ccw_req *cqr; 1214 unsigned long flags; 1215 int size, cqr_size; 1216 char *data; 1217 1218 cqr_size = (sizeof(*cqr) + 7L) & -8L; 1219 size = cqr_size; 1220 if (cplength > 0) 1221 size += cplength * sizeof(struct ccw1); 1222 if (datasize > 0) 1223 size += datasize; 1224 1225 spin_lock_irqsave(&device->mem_lock, flags); 1226 cqr = dasd_alloc_chunk(&device->ese_chunks, size); 1227 spin_unlock_irqrestore(&device->mem_lock, flags); 1228 if (!cqr) 1229 return ERR_PTR(-ENOMEM); 1230 memset(cqr, 0, sizeof(*cqr)); 1231 data = (char *)cqr + cqr_size; 1232 cqr->cpaddr = NULL; 1233 if (cplength > 0) { 1234 cqr->cpaddr = data; 1235 data += cplength * sizeof(struct ccw1); 1236 memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1)); 1237 } 1238 cqr->data = NULL; 1239 if (datasize > 0) { 1240 cqr->data = data; 1241 memset(cqr->data, 0, datasize); 1242 } 1243 1244 cqr->magic = magic; 1245 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 1246 dasd_get_device(device); 1247 1248 return cqr; 1249 } 1250 EXPORT_SYMBOL(dasd_fmalloc_request); 1251 1252 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1253 { 1254 unsigned long flags; 1255 1256 spin_lock_irqsave(&device->mem_lock, flags); 1257 dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk); 1258 spin_unlock_irqrestore(&device->mem_lock, flags); 1259 dasd_put_device(device); 1260 } 1261 EXPORT_SYMBOL(dasd_sfree_request); 1262 1263 void dasd_ffree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1264 { 1265 unsigned long flags; 1266 1267 spin_lock_irqsave(&device->mem_lock, flags); 1268 dasd_free_chunk(&device->ese_chunks, cqr); 1269 spin_unlock_irqrestore(&device->mem_lock, flags); 1270 dasd_put_device(device); 1271 } 1272 EXPORT_SYMBOL(dasd_ffree_request); 1273 1274 /* 1275 * Check discipline magic in cqr. 1276 */ 1277 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 1278 { 1279 struct dasd_device *device; 1280 1281 if (cqr == NULL) 1282 return -EINVAL; 1283 device = cqr->startdev; 1284 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 1285 DBF_DEV_EVENT(DBF_WARNING, device, 1286 " dasd_ccw_req 0x%08x magic doesn't match" 1287 " discipline 0x%08x", 1288 cqr->magic, 1289 *(unsigned int *) device->discipline->name); 1290 return -EINVAL; 1291 } 1292 return 0; 1293 } 1294 1295 /* 1296 * Terminate the current i/o and set the request to clear_pending. 1297 * Timer keeps device runnig. 1298 * ccw_device_clear can fail if the i/o subsystem 1299 * is in a bad mood. 1300 */ 1301 int dasd_term_IO(struct dasd_ccw_req *cqr) 1302 { 1303 struct dasd_device *device; 1304 int retries, rc; 1305 char errorstring[ERRORLENGTH]; 1306 1307 /* Check the cqr */ 1308 rc = dasd_check_cqr(cqr); 1309 if (rc) 1310 return rc; 1311 retries = 0; 1312 device = (struct dasd_device *) cqr->startdev; 1313 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 1314 rc = ccw_device_clear(device->cdev, (long) cqr); 1315 switch (rc) { 1316 case 0: /* termination successful */ 1317 cqr->status = DASD_CQR_CLEAR_PENDING; 1318 cqr->stopclk = get_tod_clock(); 1319 cqr->starttime = 0; 1320 DBF_DEV_EVENT(DBF_DEBUG, device, 1321 "terminate cqr %p successful", 1322 cqr); 1323 break; 1324 case -ENODEV: 1325 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1326 "device gone, retry"); 1327 break; 1328 case -EINVAL: 1329 /* 1330 * device not valid so no I/O could be running 1331 * handle CQR as termination successful 1332 */ 1333 cqr->status = DASD_CQR_CLEARED; 1334 cqr->stopclk = get_tod_clock(); 1335 cqr->starttime = 0; 1336 /* no retries for invalid devices */ 1337 cqr->retries = -1; 1338 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1339 "EINVAL, handle as terminated"); 1340 /* fake rc to success */ 1341 rc = 0; 1342 break; 1343 default: 1344 /* internal error 10 - unknown rc*/ 1345 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 1346 dev_err(&device->cdev->dev, "An error occurred in the " 1347 "DASD device driver, reason=%s\n", errorstring); 1348 BUG(); 1349 break; 1350 } 1351 retries++; 1352 } 1353 dasd_schedule_device_bh(device); 1354 return rc; 1355 } 1356 EXPORT_SYMBOL(dasd_term_IO); 1357 1358 /* 1359 * Start the i/o. This start_IO can fail if the channel is really busy. 1360 * In that case set up a timer to start the request later. 1361 */ 1362 int dasd_start_IO(struct dasd_ccw_req *cqr) 1363 { 1364 struct dasd_device *device; 1365 int rc; 1366 char errorstring[ERRORLENGTH]; 1367 1368 /* Check the cqr */ 1369 rc = dasd_check_cqr(cqr); 1370 if (rc) { 1371 cqr->intrc = rc; 1372 return rc; 1373 } 1374 device = (struct dasd_device *) cqr->startdev; 1375 if (((cqr->block && 1376 test_bit(DASD_FLAG_LOCK_STOLEN, &cqr->block->base->flags)) || 1377 test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags)) && 1378 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 1379 DBF_DEV_EVENT(DBF_DEBUG, device, "start_IO: return request %p " 1380 "because of stolen lock", cqr); 1381 cqr->status = DASD_CQR_ERROR; 1382 cqr->intrc = -EPERM; 1383 return -EPERM; 1384 } 1385 if (cqr->retries < 0) { 1386 /* internal error 14 - start_IO run out of retries */ 1387 sprintf(errorstring, "14 %p", cqr); 1388 dev_err(&device->cdev->dev, "An error occurred in the DASD " 1389 "device driver, reason=%s\n", errorstring); 1390 cqr->status = DASD_CQR_ERROR; 1391 return -EIO; 1392 } 1393 cqr->startclk = get_tod_clock(); 1394 cqr->starttime = jiffies; 1395 cqr->retries--; 1396 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1397 cqr->lpm &= dasd_path_get_opm(device); 1398 if (!cqr->lpm) 1399 cqr->lpm = dasd_path_get_opm(device); 1400 } 1401 /* 1402 * remember the amount of formatted tracks to prevent double format on 1403 * ESE devices 1404 */ 1405 if (cqr->block) 1406 cqr->trkcount = atomic_read(&cqr->block->trkcount); 1407 1408 if (cqr->cpmode == 1) { 1409 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 1410 (long) cqr, cqr->lpm); 1411 } else { 1412 rc = ccw_device_start(device->cdev, cqr->cpaddr, 1413 (long) cqr, cqr->lpm, 0); 1414 } 1415 switch (rc) { 1416 case 0: 1417 cqr->status = DASD_CQR_IN_IO; 1418 break; 1419 case -EBUSY: 1420 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1421 "start_IO: device busy, retry later"); 1422 break; 1423 case -EACCES: 1424 /* -EACCES indicates that the request used only a subset of the 1425 * available paths and all these paths are gone. If the lpm of 1426 * this request was only a subset of the opm (e.g. the ppm) then 1427 * we just do a retry with all available paths. 1428 * If we already use the full opm, something is amiss, and we 1429 * need a full path verification. 1430 */ 1431 if (test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 1432 DBF_DEV_EVENT(DBF_WARNING, device, 1433 "start_IO: selected paths gone (%x)", 1434 cqr->lpm); 1435 } else if (cqr->lpm != dasd_path_get_opm(device)) { 1436 cqr->lpm = dasd_path_get_opm(device); 1437 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 1438 "start_IO: selected paths gone," 1439 " retry on all paths"); 1440 } else { 1441 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1442 "start_IO: all paths in opm gone," 1443 " do path verification"); 1444 dasd_generic_last_path_gone(device); 1445 dasd_path_no_path(device); 1446 dasd_path_set_tbvpm(device, 1447 ccw_device_get_path_mask( 1448 device->cdev)); 1449 } 1450 break; 1451 case -ENODEV: 1452 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1453 "start_IO: -ENODEV device gone, retry"); 1454 break; 1455 case -EIO: 1456 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1457 "start_IO: -EIO device gone, retry"); 1458 break; 1459 case -EINVAL: 1460 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 1461 "start_IO: -EINVAL device currently " 1462 "not accessible"); 1463 break; 1464 default: 1465 /* internal error 11 - unknown rc */ 1466 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 1467 dev_err(&device->cdev->dev, 1468 "An error occurred in the DASD device driver, " 1469 "reason=%s\n", errorstring); 1470 BUG(); 1471 break; 1472 } 1473 cqr->intrc = rc; 1474 return rc; 1475 } 1476 EXPORT_SYMBOL(dasd_start_IO); 1477 1478 /* 1479 * Timeout function for dasd devices. This is used for different purposes 1480 * 1) missing interrupt handler for normal operation 1481 * 2) delayed start of request where start_IO failed with -EBUSY 1482 * 3) timeout for missing state change interrupts 1483 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 1484 * DASD_CQR_QUEUED for 2) and 3). 1485 */ 1486 static void dasd_device_timeout(struct timer_list *t) 1487 { 1488 unsigned long flags; 1489 struct dasd_device *device; 1490 1491 device = from_timer(device, t, timer); 1492 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1493 /* re-activate request queue */ 1494 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1495 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1496 dasd_schedule_device_bh(device); 1497 } 1498 1499 /* 1500 * Setup timeout for a device in jiffies. 1501 */ 1502 void dasd_device_set_timer(struct dasd_device *device, int expires) 1503 { 1504 if (expires == 0) 1505 del_timer(&device->timer); 1506 else 1507 mod_timer(&device->timer, jiffies + expires); 1508 } 1509 EXPORT_SYMBOL(dasd_device_set_timer); 1510 1511 /* 1512 * Clear timeout for a device. 1513 */ 1514 void dasd_device_clear_timer(struct dasd_device *device) 1515 { 1516 del_timer(&device->timer); 1517 } 1518 EXPORT_SYMBOL(dasd_device_clear_timer); 1519 1520 static void dasd_handle_killed_request(struct ccw_device *cdev, 1521 unsigned long intparm) 1522 { 1523 struct dasd_ccw_req *cqr; 1524 struct dasd_device *device; 1525 1526 if (!intparm) 1527 return; 1528 cqr = (struct dasd_ccw_req *) intparm; 1529 if (cqr->status != DASD_CQR_IN_IO) { 1530 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1531 "invalid status in handle_killed_request: " 1532 "%02x", cqr->status); 1533 return; 1534 } 1535 1536 device = dasd_device_from_cdev_locked(cdev); 1537 if (IS_ERR(device)) { 1538 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1539 "unable to get device from cdev"); 1540 return; 1541 } 1542 1543 if (!cqr->startdev || 1544 device != cqr->startdev || 1545 strncmp(cqr->startdev->discipline->ebcname, 1546 (char *) &cqr->magic, 4)) { 1547 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1548 "invalid device in request"); 1549 dasd_put_device(device); 1550 return; 1551 } 1552 1553 /* Schedule request to be retried. */ 1554 cqr->status = DASD_CQR_QUEUED; 1555 1556 dasd_device_clear_timer(device); 1557 dasd_schedule_device_bh(device); 1558 dasd_put_device(device); 1559 } 1560 1561 void dasd_generic_handle_state_change(struct dasd_device *device) 1562 { 1563 /* First of all start sense subsystem status request. */ 1564 dasd_eer_snss(device); 1565 1566 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1567 dasd_schedule_device_bh(device); 1568 if (device->block) { 1569 dasd_schedule_block_bh(device->block); 1570 if (device->block->gdp) 1571 blk_mq_run_hw_queues(device->block->gdp->queue, true); 1572 } 1573 } 1574 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 1575 1576 static int dasd_check_hpf_error(struct irb *irb) 1577 { 1578 return (scsw_tm_is_valid_schxs(&irb->scsw) && 1579 (irb->scsw.tm.sesq == SCSW_SESQ_DEV_NOFCX || 1580 irb->scsw.tm.sesq == SCSW_SESQ_PATH_NOFCX)); 1581 } 1582 1583 static int dasd_ese_needs_format(struct dasd_block *block, struct irb *irb) 1584 { 1585 struct dasd_device *device = NULL; 1586 u8 *sense = NULL; 1587 1588 if (!block) 1589 return 0; 1590 device = block->base; 1591 if (!device || !device->discipline->is_ese) 1592 return 0; 1593 if (!device->discipline->is_ese(device)) 1594 return 0; 1595 1596 sense = dasd_get_sense(irb); 1597 if (!sense) 1598 return 0; 1599 1600 return !!(sense[1] & SNS1_NO_REC_FOUND) || 1601 !!(sense[1] & SNS1_FILE_PROTECTED) || 1602 scsw_cstat(&irb->scsw) == SCHN_STAT_INCORR_LEN; 1603 } 1604 1605 static int dasd_ese_oos_cond(u8 *sense) 1606 { 1607 return sense[0] & SNS0_EQUIPMENT_CHECK && 1608 sense[1] & SNS1_PERM_ERR && 1609 sense[1] & SNS1_WRITE_INHIBITED && 1610 sense[25] == 0x01; 1611 } 1612 1613 /* 1614 * Interrupt handler for "normal" ssch-io based dasd devices. 1615 */ 1616 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1617 struct irb *irb) 1618 { 1619 struct dasd_ccw_req *cqr, *next, *fcqr; 1620 struct dasd_device *device; 1621 unsigned long now; 1622 int nrf_suppressed = 0; 1623 int fp_suppressed = 0; 1624 struct request *req; 1625 u8 *sense = NULL; 1626 int expires; 1627 1628 cqr = (struct dasd_ccw_req *) intparm; 1629 if (IS_ERR(irb)) { 1630 switch (PTR_ERR(irb)) { 1631 case -EIO: 1632 if (cqr && cqr->status == DASD_CQR_CLEAR_PENDING) { 1633 device = cqr->startdev; 1634 cqr->status = DASD_CQR_CLEARED; 1635 dasd_device_clear_timer(device); 1636 wake_up(&dasd_flush_wq); 1637 dasd_schedule_device_bh(device); 1638 return; 1639 } 1640 break; 1641 case -ETIMEDOUT: 1642 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1643 "request timed out\n", __func__); 1644 break; 1645 default: 1646 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1647 "unknown error %ld\n", __func__, 1648 PTR_ERR(irb)); 1649 } 1650 dasd_handle_killed_request(cdev, intparm); 1651 return; 1652 } 1653 1654 now = get_tod_clock(); 1655 /* check for conditions that should be handled immediately */ 1656 if (!cqr || 1657 !(scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1658 scsw_cstat(&irb->scsw) == 0)) { 1659 if (cqr) 1660 memcpy(&cqr->irb, irb, sizeof(*irb)); 1661 device = dasd_device_from_cdev_locked(cdev); 1662 if (IS_ERR(device)) 1663 return; 1664 /* ignore unsolicited interrupts for DIAG discipline */ 1665 if (device->discipline == dasd_diag_discipline_pointer) { 1666 dasd_put_device(device); 1667 return; 1668 } 1669 1670 /* 1671 * In some cases 'File Protected' or 'No Record Found' errors 1672 * might be expected and debug log messages for the 1673 * corresponding interrupts shouldn't be written then. 1674 * Check if either of the according suppress bits is set. 1675 */ 1676 sense = dasd_get_sense(irb); 1677 if (sense) { 1678 fp_suppressed = (sense[1] & SNS1_FILE_PROTECTED) && 1679 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags); 1680 nrf_suppressed = (sense[1] & SNS1_NO_REC_FOUND) && 1681 test_bit(DASD_CQR_SUPPRESS_NRF, &cqr->flags); 1682 1683 /* 1684 * Extent pool probably out-of-space. 1685 * Stop device and check exhaust level. 1686 */ 1687 if (dasd_ese_oos_cond(sense)) { 1688 dasd_generic_space_exhaust(device, cqr); 1689 device->discipline->ext_pool_exhaust(device, cqr); 1690 dasd_put_device(device); 1691 return; 1692 } 1693 } 1694 if (!(fp_suppressed || nrf_suppressed)) 1695 device->discipline->dump_sense_dbf(device, irb, "int"); 1696 1697 if (device->features & DASD_FEATURE_ERPLOG) 1698 device->discipline->dump_sense(device, cqr, irb); 1699 device->discipline->check_for_device_change(device, cqr, irb); 1700 dasd_put_device(device); 1701 } 1702 1703 /* check for attention message */ 1704 if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) { 1705 device = dasd_device_from_cdev_locked(cdev); 1706 if (!IS_ERR(device)) { 1707 device->discipline->check_attention(device, 1708 irb->esw.esw1.lpum); 1709 dasd_put_device(device); 1710 } 1711 } 1712 1713 if (!cqr) 1714 return; 1715 1716 device = (struct dasd_device *) cqr->startdev; 1717 if (!device || 1718 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1719 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1720 "invalid device in request"); 1721 return; 1722 } 1723 1724 if (dasd_ese_needs_format(cqr->block, irb)) { 1725 req = dasd_get_callback_data(cqr); 1726 if (!req) { 1727 cqr->status = DASD_CQR_ERROR; 1728 return; 1729 } 1730 if (rq_data_dir(req) == READ) { 1731 device->discipline->ese_read(cqr, irb); 1732 cqr->status = DASD_CQR_SUCCESS; 1733 cqr->stopclk = now; 1734 dasd_device_clear_timer(device); 1735 dasd_schedule_device_bh(device); 1736 return; 1737 } 1738 fcqr = device->discipline->ese_format(device, cqr, irb); 1739 if (IS_ERR(fcqr)) { 1740 if (PTR_ERR(fcqr) == -EINVAL) { 1741 cqr->status = DASD_CQR_ERROR; 1742 return; 1743 } 1744 /* 1745 * If we can't format now, let the request go 1746 * one extra round. Maybe we can format later. 1747 */ 1748 cqr->status = DASD_CQR_QUEUED; 1749 dasd_schedule_device_bh(device); 1750 return; 1751 } else { 1752 fcqr->status = DASD_CQR_QUEUED; 1753 cqr->status = DASD_CQR_QUEUED; 1754 list_add(&fcqr->devlist, &device->ccw_queue); 1755 dasd_schedule_device_bh(device); 1756 return; 1757 } 1758 } 1759 1760 /* Check for clear pending */ 1761 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1762 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1763 cqr->status = DASD_CQR_CLEARED; 1764 dasd_device_clear_timer(device); 1765 wake_up(&dasd_flush_wq); 1766 dasd_schedule_device_bh(device); 1767 return; 1768 } 1769 1770 /* check status - the request might have been killed by dyn detach */ 1771 if (cqr->status != DASD_CQR_IN_IO) { 1772 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1773 "status %02x", dev_name(&cdev->dev), cqr->status); 1774 return; 1775 } 1776 1777 next = NULL; 1778 expires = 0; 1779 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1780 scsw_cstat(&irb->scsw) == 0) { 1781 /* request was completed successfully */ 1782 cqr->status = DASD_CQR_SUCCESS; 1783 cqr->stopclk = now; 1784 /* Start first request on queue if possible -> fast_io. */ 1785 if (cqr->devlist.next != &device->ccw_queue) { 1786 next = list_entry(cqr->devlist.next, 1787 struct dasd_ccw_req, devlist); 1788 } 1789 } else { /* error */ 1790 /* check for HPF error 1791 * call discipline function to requeue all requests 1792 * and disable HPF accordingly 1793 */ 1794 if (cqr->cpmode && dasd_check_hpf_error(irb) && 1795 device->discipline->handle_hpf_error) 1796 device->discipline->handle_hpf_error(device, irb); 1797 /* 1798 * If we don't want complex ERP for this request, then just 1799 * reset this and retry it in the fastpath 1800 */ 1801 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1802 cqr->retries > 0) { 1803 if (cqr->lpm == dasd_path_get_opm(device)) 1804 DBF_DEV_EVENT(DBF_DEBUG, device, 1805 "default ERP in fastpath " 1806 "(%i retries left)", 1807 cqr->retries); 1808 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) 1809 cqr->lpm = dasd_path_get_opm(device); 1810 cqr->status = DASD_CQR_QUEUED; 1811 next = cqr; 1812 } else 1813 cqr->status = DASD_CQR_ERROR; 1814 } 1815 if (next && (next->status == DASD_CQR_QUEUED) && 1816 (!device->stopped)) { 1817 if (device->discipline->start_IO(next) == 0) 1818 expires = next->expires; 1819 } 1820 if (expires != 0) 1821 dasd_device_set_timer(device, expires); 1822 else 1823 dasd_device_clear_timer(device); 1824 dasd_schedule_device_bh(device); 1825 } 1826 EXPORT_SYMBOL(dasd_int_handler); 1827 1828 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1829 { 1830 struct dasd_device *device; 1831 1832 device = dasd_device_from_cdev_locked(cdev); 1833 1834 if (IS_ERR(device)) 1835 goto out; 1836 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1837 device->state != device->target || 1838 !device->discipline->check_for_device_change){ 1839 dasd_put_device(device); 1840 goto out; 1841 } 1842 if (device->discipline->dump_sense_dbf) 1843 device->discipline->dump_sense_dbf(device, irb, "uc"); 1844 device->discipline->check_for_device_change(device, NULL, irb); 1845 dasd_put_device(device); 1846 out: 1847 return UC_TODO_RETRY; 1848 } 1849 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1850 1851 /* 1852 * If we have an error on a dasd_block layer request then we cancel 1853 * and return all further requests from the same dasd_block as well. 1854 */ 1855 static void __dasd_device_recovery(struct dasd_device *device, 1856 struct dasd_ccw_req *ref_cqr) 1857 { 1858 struct list_head *l, *n; 1859 struct dasd_ccw_req *cqr; 1860 1861 /* 1862 * only requeue request that came from the dasd_block layer 1863 */ 1864 if (!ref_cqr->block) 1865 return; 1866 1867 list_for_each_safe(l, n, &device->ccw_queue) { 1868 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1869 if (cqr->status == DASD_CQR_QUEUED && 1870 ref_cqr->block == cqr->block) { 1871 cqr->status = DASD_CQR_CLEARED; 1872 } 1873 } 1874 }; 1875 1876 /* 1877 * Remove those ccw requests from the queue that need to be returned 1878 * to the upper layer. 1879 */ 1880 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1881 struct list_head *final_queue) 1882 { 1883 struct list_head *l, *n; 1884 struct dasd_ccw_req *cqr; 1885 1886 /* Process request with final status. */ 1887 list_for_each_safe(l, n, &device->ccw_queue) { 1888 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1889 1890 /* Skip any non-final request. */ 1891 if (cqr->status == DASD_CQR_QUEUED || 1892 cqr->status == DASD_CQR_IN_IO || 1893 cqr->status == DASD_CQR_CLEAR_PENDING) 1894 continue; 1895 if (cqr->status == DASD_CQR_ERROR) { 1896 __dasd_device_recovery(device, cqr); 1897 } 1898 /* Rechain finished requests to final queue */ 1899 list_move_tail(&cqr->devlist, final_queue); 1900 } 1901 } 1902 1903 static void __dasd_process_cqr(struct dasd_device *device, 1904 struct dasd_ccw_req *cqr) 1905 { 1906 char errorstring[ERRORLENGTH]; 1907 1908 switch (cqr->status) { 1909 case DASD_CQR_SUCCESS: 1910 cqr->status = DASD_CQR_DONE; 1911 break; 1912 case DASD_CQR_ERROR: 1913 cqr->status = DASD_CQR_NEED_ERP; 1914 break; 1915 case DASD_CQR_CLEARED: 1916 cqr->status = DASD_CQR_TERMINATED; 1917 break; 1918 default: 1919 /* internal error 12 - wrong cqr status*/ 1920 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1921 dev_err(&device->cdev->dev, 1922 "An error occurred in the DASD device driver, " 1923 "reason=%s\n", errorstring); 1924 BUG(); 1925 } 1926 if (cqr->callback) 1927 cqr->callback(cqr, cqr->callback_data); 1928 } 1929 1930 /* 1931 * the cqrs from the final queue are returned to the upper layer 1932 * by setting a dasd_block state and calling the callback function 1933 */ 1934 static void __dasd_device_process_final_queue(struct dasd_device *device, 1935 struct list_head *final_queue) 1936 { 1937 struct list_head *l, *n; 1938 struct dasd_ccw_req *cqr; 1939 struct dasd_block *block; 1940 1941 list_for_each_safe(l, n, final_queue) { 1942 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1943 list_del_init(&cqr->devlist); 1944 block = cqr->block; 1945 if (!block) { 1946 __dasd_process_cqr(device, cqr); 1947 } else { 1948 spin_lock_bh(&block->queue_lock); 1949 __dasd_process_cqr(device, cqr); 1950 spin_unlock_bh(&block->queue_lock); 1951 } 1952 } 1953 } 1954 1955 /* 1956 * Take a look at the first request on the ccw queue and check 1957 * if it reached its expire time. If so, terminate the IO. 1958 */ 1959 static void __dasd_device_check_expire(struct dasd_device *device) 1960 { 1961 struct dasd_ccw_req *cqr; 1962 1963 if (list_empty(&device->ccw_queue)) 1964 return; 1965 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1966 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1967 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1968 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 1969 /* 1970 * IO in safe offline processing should not 1971 * run out of retries 1972 */ 1973 cqr->retries++; 1974 } 1975 if (device->discipline->term_IO(cqr) != 0) { 1976 /* Hmpf, try again in 5 sec */ 1977 dev_err(&device->cdev->dev, 1978 "cqr %p timed out (%lus) but cannot be " 1979 "ended, retrying in 5 s\n", 1980 cqr, (cqr->expires/HZ)); 1981 cqr->expires += 5*HZ; 1982 dasd_device_set_timer(device, 5*HZ); 1983 } else { 1984 dev_err(&device->cdev->dev, 1985 "cqr %p timed out (%lus), %i retries " 1986 "remaining\n", cqr, (cqr->expires/HZ), 1987 cqr->retries); 1988 } 1989 } 1990 } 1991 1992 /* 1993 * return 1 when device is not eligible for IO 1994 */ 1995 static int __dasd_device_is_unusable(struct dasd_device *device, 1996 struct dasd_ccw_req *cqr) 1997 { 1998 int mask = ~(DASD_STOPPED_DC_WAIT | DASD_STOPPED_NOSPC); 1999 2000 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) && 2001 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 2002 /* 2003 * dasd is being set offline 2004 * but it is no safe offline where we have to allow I/O 2005 */ 2006 return 1; 2007 } 2008 if (device->stopped) { 2009 if (device->stopped & mask) { 2010 /* stopped and CQR will not change that. */ 2011 return 1; 2012 } 2013 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2014 /* CQR is not able to change device to 2015 * operational. */ 2016 return 1; 2017 } 2018 /* CQR required to get device operational. */ 2019 } 2020 return 0; 2021 } 2022 2023 /* 2024 * Take a look at the first request on the ccw queue and check 2025 * if it needs to be started. 2026 */ 2027 static void __dasd_device_start_head(struct dasd_device *device) 2028 { 2029 struct dasd_ccw_req *cqr; 2030 int rc; 2031 2032 if (list_empty(&device->ccw_queue)) 2033 return; 2034 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2035 if (cqr->status != DASD_CQR_QUEUED) 2036 return; 2037 /* if device is not usable return request to upper layer */ 2038 if (__dasd_device_is_unusable(device, cqr)) { 2039 cqr->intrc = -EAGAIN; 2040 cqr->status = DASD_CQR_CLEARED; 2041 dasd_schedule_device_bh(device); 2042 return; 2043 } 2044 2045 rc = device->discipline->start_IO(cqr); 2046 if (rc == 0) 2047 dasd_device_set_timer(device, cqr->expires); 2048 else if (rc == -EACCES) { 2049 dasd_schedule_device_bh(device); 2050 } else 2051 /* Hmpf, try again in 1/2 sec */ 2052 dasd_device_set_timer(device, 50); 2053 } 2054 2055 static void __dasd_device_check_path_events(struct dasd_device *device) 2056 { 2057 __u8 tbvpm, fcsecpm; 2058 int rc; 2059 2060 tbvpm = dasd_path_get_tbvpm(device); 2061 fcsecpm = dasd_path_get_fcsecpm(device); 2062 2063 if (!tbvpm && !fcsecpm) 2064 return; 2065 2066 if (device->stopped & ~(DASD_STOPPED_DC_WAIT)) 2067 return; 2068 2069 dasd_path_clear_all_verify(device); 2070 dasd_path_clear_all_fcsec(device); 2071 2072 rc = device->discipline->pe_handler(device, tbvpm, fcsecpm); 2073 if (rc) { 2074 dasd_path_add_tbvpm(device, tbvpm); 2075 dasd_path_add_fcsecpm(device, fcsecpm); 2076 dasd_device_set_timer(device, 50); 2077 } 2078 }; 2079 2080 /* 2081 * Go through all request on the dasd_device request queue, 2082 * terminate them on the cdev if necessary, and return them to the 2083 * submitting layer via callback. 2084 * Note: 2085 * Make sure that all 'submitting layers' still exist when 2086 * this function is called!. In other words, when 'device' is a base 2087 * device then all block layer requests must have been removed before 2088 * via dasd_flush_block_queue. 2089 */ 2090 int dasd_flush_device_queue(struct dasd_device *device) 2091 { 2092 struct dasd_ccw_req *cqr, *n; 2093 int rc; 2094 struct list_head flush_queue; 2095 2096 INIT_LIST_HEAD(&flush_queue); 2097 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2098 rc = 0; 2099 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2100 /* Check status and move request to flush_queue */ 2101 switch (cqr->status) { 2102 case DASD_CQR_IN_IO: 2103 rc = device->discipline->term_IO(cqr); 2104 if (rc) { 2105 /* unable to terminate requeust */ 2106 dev_err(&device->cdev->dev, 2107 "Flushing the DASD request queue " 2108 "failed for request %p\n", cqr); 2109 /* stop flush processing */ 2110 goto finished; 2111 } 2112 break; 2113 case DASD_CQR_QUEUED: 2114 cqr->stopclk = get_tod_clock(); 2115 cqr->status = DASD_CQR_CLEARED; 2116 break; 2117 default: /* no need to modify the others */ 2118 break; 2119 } 2120 list_move_tail(&cqr->devlist, &flush_queue); 2121 } 2122 finished: 2123 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2124 /* 2125 * After this point all requests must be in state CLEAR_PENDING, 2126 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 2127 * one of the others. 2128 */ 2129 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 2130 wait_event(dasd_flush_wq, 2131 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2132 /* 2133 * Now set each request back to TERMINATED, DONE or NEED_ERP 2134 * and call the callback function of flushed requests 2135 */ 2136 __dasd_device_process_final_queue(device, &flush_queue); 2137 return rc; 2138 } 2139 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2140 2141 /* 2142 * Acquire the device lock and process queues for the device. 2143 */ 2144 static void dasd_device_tasklet(unsigned long data) 2145 { 2146 struct dasd_device *device = (struct dasd_device *) data; 2147 struct list_head final_queue; 2148 2149 atomic_set (&device->tasklet_scheduled, 0); 2150 INIT_LIST_HEAD(&final_queue); 2151 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2152 /* Check expire time of first request on the ccw queue. */ 2153 __dasd_device_check_expire(device); 2154 /* find final requests on ccw queue */ 2155 __dasd_device_process_ccw_queue(device, &final_queue); 2156 __dasd_device_check_path_events(device); 2157 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2158 /* Now call the callback function of requests with final status */ 2159 __dasd_device_process_final_queue(device, &final_queue); 2160 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2161 /* Now check if the head of the ccw queue needs to be started. */ 2162 __dasd_device_start_head(device); 2163 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2164 if (waitqueue_active(&shutdown_waitq)) 2165 wake_up(&shutdown_waitq); 2166 dasd_put_device(device); 2167 } 2168 2169 /* 2170 * Schedules a call to dasd_tasklet over the device tasklet. 2171 */ 2172 void dasd_schedule_device_bh(struct dasd_device *device) 2173 { 2174 /* Protect against rescheduling. */ 2175 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 2176 return; 2177 dasd_get_device(device); 2178 tasklet_hi_schedule(&device->tasklet); 2179 } 2180 EXPORT_SYMBOL(dasd_schedule_device_bh); 2181 2182 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2183 { 2184 device->stopped |= bits; 2185 } 2186 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 2187 2188 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 2189 { 2190 device->stopped &= ~bits; 2191 if (!device->stopped) 2192 wake_up(&generic_waitq); 2193 } 2194 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 2195 2196 /* 2197 * Queue a request to the head of the device ccw_queue. 2198 * Start the I/O if possible. 2199 */ 2200 void dasd_add_request_head(struct dasd_ccw_req *cqr) 2201 { 2202 struct dasd_device *device; 2203 unsigned long flags; 2204 2205 device = cqr->startdev; 2206 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2207 cqr->status = DASD_CQR_QUEUED; 2208 list_add(&cqr->devlist, &device->ccw_queue); 2209 /* let the bh start the request to keep them in order */ 2210 dasd_schedule_device_bh(device); 2211 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2212 } 2213 EXPORT_SYMBOL(dasd_add_request_head); 2214 2215 /* 2216 * Queue a request to the tail of the device ccw_queue. 2217 * Start the I/O if possible. 2218 */ 2219 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 2220 { 2221 struct dasd_device *device; 2222 unsigned long flags; 2223 2224 device = cqr->startdev; 2225 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2226 cqr->status = DASD_CQR_QUEUED; 2227 list_add_tail(&cqr->devlist, &device->ccw_queue); 2228 /* let the bh start the request to keep them in order */ 2229 dasd_schedule_device_bh(device); 2230 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2231 } 2232 EXPORT_SYMBOL(dasd_add_request_tail); 2233 2234 /* 2235 * Wakeup helper for the 'sleep_on' functions. 2236 */ 2237 void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 2238 { 2239 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2240 cqr->callback_data = DASD_SLEEPON_END_TAG; 2241 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 2242 wake_up(&generic_waitq); 2243 } 2244 EXPORT_SYMBOL_GPL(dasd_wakeup_cb); 2245 2246 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 2247 { 2248 struct dasd_device *device; 2249 int rc; 2250 2251 device = cqr->startdev; 2252 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2253 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 2254 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2255 return rc; 2256 } 2257 2258 /* 2259 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 2260 */ 2261 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 2262 { 2263 struct dasd_device *device; 2264 dasd_erp_fn_t erp_fn; 2265 2266 if (cqr->status == DASD_CQR_FILLED) 2267 return 0; 2268 device = cqr->startdev; 2269 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2270 if (cqr->status == DASD_CQR_TERMINATED) { 2271 device->discipline->handle_terminated_request(cqr); 2272 return 1; 2273 } 2274 if (cqr->status == DASD_CQR_NEED_ERP) { 2275 erp_fn = device->discipline->erp_action(cqr); 2276 erp_fn(cqr); 2277 return 1; 2278 } 2279 if (cqr->status == DASD_CQR_FAILED) 2280 dasd_log_sense(cqr, &cqr->irb); 2281 if (cqr->refers) { 2282 __dasd_process_erp(device, cqr); 2283 return 1; 2284 } 2285 } 2286 return 0; 2287 } 2288 2289 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 2290 { 2291 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 2292 if (cqr->refers) /* erp is not done yet */ 2293 return 1; 2294 return ((cqr->status != DASD_CQR_DONE) && 2295 (cqr->status != DASD_CQR_FAILED)); 2296 } else 2297 return (cqr->status == DASD_CQR_FILLED); 2298 } 2299 2300 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 2301 { 2302 struct dasd_device *device; 2303 int rc; 2304 struct list_head ccw_queue; 2305 struct dasd_ccw_req *cqr; 2306 2307 INIT_LIST_HEAD(&ccw_queue); 2308 maincqr->status = DASD_CQR_FILLED; 2309 device = maincqr->startdev; 2310 list_add(&maincqr->blocklist, &ccw_queue); 2311 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 2312 cqr = list_first_entry(&ccw_queue, 2313 struct dasd_ccw_req, blocklist)) { 2314 2315 if (__dasd_sleep_on_erp(cqr)) 2316 continue; 2317 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 2318 continue; 2319 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2320 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2321 cqr->status = DASD_CQR_FAILED; 2322 cqr->intrc = -EPERM; 2323 continue; 2324 } 2325 /* Non-temporary stop condition will trigger fail fast */ 2326 if (device->stopped & ~DASD_STOPPED_PENDING && 2327 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2328 (!dasd_eer_enabled(device))) { 2329 cqr->status = DASD_CQR_FAILED; 2330 cqr->intrc = -ENOLINK; 2331 continue; 2332 } 2333 /* 2334 * Don't try to start requests if device is in 2335 * offline processing, it might wait forever 2336 */ 2337 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2338 cqr->status = DASD_CQR_FAILED; 2339 cqr->intrc = -ENODEV; 2340 continue; 2341 } 2342 /* 2343 * Don't try to start requests if device is stopped 2344 * except path verification requests 2345 */ 2346 if (!test_bit(DASD_CQR_VERIFY_PATH, &cqr->flags)) { 2347 if (interruptible) { 2348 rc = wait_event_interruptible( 2349 generic_waitq, !(device->stopped)); 2350 if (rc == -ERESTARTSYS) { 2351 cqr->status = DASD_CQR_FAILED; 2352 maincqr->intrc = rc; 2353 continue; 2354 } 2355 } else 2356 wait_event(generic_waitq, !(device->stopped)); 2357 } 2358 if (!cqr->callback) 2359 cqr->callback = dasd_wakeup_cb; 2360 2361 cqr->callback_data = DASD_SLEEPON_START_TAG; 2362 dasd_add_request_tail(cqr); 2363 if (interruptible) { 2364 rc = wait_event_interruptible( 2365 generic_waitq, _wait_for_wakeup(cqr)); 2366 if (rc == -ERESTARTSYS) { 2367 dasd_cancel_req(cqr); 2368 /* wait (non-interruptible) for final status */ 2369 wait_event(generic_waitq, 2370 _wait_for_wakeup(cqr)); 2371 cqr->status = DASD_CQR_FAILED; 2372 maincqr->intrc = rc; 2373 continue; 2374 } 2375 } else 2376 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2377 } 2378 2379 maincqr->endclk = get_tod_clock(); 2380 if ((maincqr->status != DASD_CQR_DONE) && 2381 (maincqr->intrc != -ERESTARTSYS)) 2382 dasd_log_sense(maincqr, &maincqr->irb); 2383 if (maincqr->status == DASD_CQR_DONE) 2384 rc = 0; 2385 else if (maincqr->intrc) 2386 rc = maincqr->intrc; 2387 else 2388 rc = -EIO; 2389 return rc; 2390 } 2391 2392 static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) 2393 { 2394 struct dasd_ccw_req *cqr; 2395 2396 list_for_each_entry(cqr, ccw_queue, blocklist) { 2397 if (cqr->callback_data != DASD_SLEEPON_END_TAG) 2398 return 0; 2399 } 2400 2401 return 1; 2402 } 2403 2404 static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) 2405 { 2406 struct dasd_device *device; 2407 struct dasd_ccw_req *cqr, *n; 2408 u8 *sense = NULL; 2409 int rc; 2410 2411 retry: 2412 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2413 device = cqr->startdev; 2414 if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ 2415 continue; 2416 2417 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2418 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2419 cqr->status = DASD_CQR_FAILED; 2420 cqr->intrc = -EPERM; 2421 continue; 2422 } 2423 /*Non-temporary stop condition will trigger fail fast*/ 2424 if (device->stopped & ~DASD_STOPPED_PENDING && 2425 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2426 !dasd_eer_enabled(device)) { 2427 cqr->status = DASD_CQR_FAILED; 2428 cqr->intrc = -EAGAIN; 2429 continue; 2430 } 2431 2432 /*Don't try to start requests if device is stopped*/ 2433 if (interruptible) { 2434 rc = wait_event_interruptible( 2435 generic_waitq, !device->stopped); 2436 if (rc == -ERESTARTSYS) { 2437 cqr->status = DASD_CQR_FAILED; 2438 cqr->intrc = rc; 2439 continue; 2440 } 2441 } else 2442 wait_event(generic_waitq, !(device->stopped)); 2443 2444 if (!cqr->callback) 2445 cqr->callback = dasd_wakeup_cb; 2446 cqr->callback_data = DASD_SLEEPON_START_TAG; 2447 dasd_add_request_tail(cqr); 2448 } 2449 2450 wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); 2451 2452 rc = 0; 2453 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2454 /* 2455 * In some cases the 'File Protected' or 'Incorrect Length' 2456 * error might be expected and error recovery would be 2457 * unnecessary in these cases. Check if the according suppress 2458 * bit is set. 2459 */ 2460 sense = dasd_get_sense(&cqr->irb); 2461 if (sense && sense[1] & SNS1_FILE_PROTECTED && 2462 test_bit(DASD_CQR_SUPPRESS_FP, &cqr->flags)) 2463 continue; 2464 if (scsw_cstat(&cqr->irb.scsw) == 0x40 && 2465 test_bit(DASD_CQR_SUPPRESS_IL, &cqr->flags)) 2466 continue; 2467 2468 /* 2469 * for alias devices simplify error recovery and 2470 * return to upper layer 2471 * do not skip ERP requests 2472 */ 2473 if (cqr->startdev != cqr->basedev && !cqr->refers && 2474 (cqr->status == DASD_CQR_TERMINATED || 2475 cqr->status == DASD_CQR_NEED_ERP)) 2476 return -EAGAIN; 2477 2478 /* normal recovery for basedev IO */ 2479 if (__dasd_sleep_on_erp(cqr)) 2480 /* handle erp first */ 2481 goto retry; 2482 } 2483 2484 return 0; 2485 } 2486 2487 /* 2488 * Queue a request to the tail of the device ccw_queue and wait for 2489 * it's completion. 2490 */ 2491 int dasd_sleep_on(struct dasd_ccw_req *cqr) 2492 { 2493 return _dasd_sleep_on(cqr, 0); 2494 } 2495 EXPORT_SYMBOL(dasd_sleep_on); 2496 2497 /* 2498 * Start requests from a ccw_queue and wait for their completion. 2499 */ 2500 int dasd_sleep_on_queue(struct list_head *ccw_queue) 2501 { 2502 return _dasd_sleep_on_queue(ccw_queue, 0); 2503 } 2504 EXPORT_SYMBOL(dasd_sleep_on_queue); 2505 2506 /* 2507 * Start requests from a ccw_queue and wait interruptible for their completion. 2508 */ 2509 int dasd_sleep_on_queue_interruptible(struct list_head *ccw_queue) 2510 { 2511 return _dasd_sleep_on_queue(ccw_queue, 1); 2512 } 2513 EXPORT_SYMBOL(dasd_sleep_on_queue_interruptible); 2514 2515 /* 2516 * Queue a request to the tail of the device ccw_queue and wait 2517 * interruptible for it's completion. 2518 */ 2519 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 2520 { 2521 return _dasd_sleep_on(cqr, 1); 2522 } 2523 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2524 2525 /* 2526 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2527 * for eckd devices) the currently running request has to be terminated 2528 * and be put back to status queued, before the special request is added 2529 * to the head of the queue. Then the special request is waited on normally. 2530 */ 2531 static inline int _dasd_term_running_cqr(struct dasd_device *device) 2532 { 2533 struct dasd_ccw_req *cqr; 2534 int rc; 2535 2536 if (list_empty(&device->ccw_queue)) 2537 return 0; 2538 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 2539 rc = device->discipline->term_IO(cqr); 2540 if (!rc) 2541 /* 2542 * CQR terminated because a more important request is pending. 2543 * Undo decreasing of retry counter because this is 2544 * not an error case. 2545 */ 2546 cqr->retries++; 2547 return rc; 2548 } 2549 2550 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 2551 { 2552 struct dasd_device *device; 2553 int rc; 2554 2555 device = cqr->startdev; 2556 if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && 2557 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2558 cqr->status = DASD_CQR_FAILED; 2559 cqr->intrc = -EPERM; 2560 return -EIO; 2561 } 2562 spin_lock_irq(get_ccwdev_lock(device->cdev)); 2563 rc = _dasd_term_running_cqr(device); 2564 if (rc) { 2565 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2566 return rc; 2567 } 2568 cqr->callback = dasd_wakeup_cb; 2569 cqr->callback_data = DASD_SLEEPON_START_TAG; 2570 cqr->status = DASD_CQR_QUEUED; 2571 /* 2572 * add new request as second 2573 * first the terminated cqr needs to be finished 2574 */ 2575 list_add(&cqr->devlist, device->ccw_queue.next); 2576 2577 /* let the bh start the request to keep them in order */ 2578 dasd_schedule_device_bh(device); 2579 2580 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 2581 2582 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 2583 2584 if (cqr->status == DASD_CQR_DONE) 2585 rc = 0; 2586 else if (cqr->intrc) 2587 rc = cqr->intrc; 2588 else 2589 rc = -EIO; 2590 2591 /* kick tasklets */ 2592 dasd_schedule_device_bh(device); 2593 if (device->block) 2594 dasd_schedule_block_bh(device->block); 2595 2596 return rc; 2597 } 2598 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2599 2600 /* 2601 * Cancels a request that was started with dasd_sleep_on_req. 2602 * This is useful to timeout requests. The request will be 2603 * terminated if it is currently in i/o. 2604 * Returns 0 if request termination was successful 2605 * negative error code if termination failed 2606 * Cancellation of a request is an asynchronous operation! The calling 2607 * function has to wait until the request is properly returned via callback. 2608 */ 2609 static int __dasd_cancel_req(struct dasd_ccw_req *cqr) 2610 { 2611 struct dasd_device *device = cqr->startdev; 2612 int rc = 0; 2613 2614 switch (cqr->status) { 2615 case DASD_CQR_QUEUED: 2616 /* request was not started - just set to cleared */ 2617 cqr->status = DASD_CQR_CLEARED; 2618 break; 2619 case DASD_CQR_IN_IO: 2620 /* request in IO - terminate IO and release again */ 2621 rc = device->discipline->term_IO(cqr); 2622 if (rc) { 2623 dev_err(&device->cdev->dev, 2624 "Cancelling request %p failed with rc=%d\n", 2625 cqr, rc); 2626 } else { 2627 cqr->stopclk = get_tod_clock(); 2628 } 2629 break; 2630 default: /* already finished or clear pending - do nothing */ 2631 break; 2632 } 2633 dasd_schedule_device_bh(device); 2634 return rc; 2635 } 2636 2637 int dasd_cancel_req(struct dasd_ccw_req *cqr) 2638 { 2639 struct dasd_device *device = cqr->startdev; 2640 unsigned long flags; 2641 int rc; 2642 2643 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 2644 rc = __dasd_cancel_req(cqr); 2645 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2646 return rc; 2647 } 2648 2649 /* 2650 * SECTION: Operations of the dasd_block layer. 2651 */ 2652 2653 /* 2654 * Timeout function for dasd_block. This is used when the block layer 2655 * is waiting for something that may not come reliably, (e.g. a state 2656 * change interrupt) 2657 */ 2658 static void dasd_block_timeout(struct timer_list *t) 2659 { 2660 unsigned long flags; 2661 struct dasd_block *block; 2662 2663 block = from_timer(block, t, timer); 2664 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 2665 /* re-activate request queue */ 2666 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 2667 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 2668 dasd_schedule_block_bh(block); 2669 blk_mq_run_hw_queues(block->gdp->queue, true); 2670 } 2671 2672 /* 2673 * Setup timeout for a dasd_block in jiffies. 2674 */ 2675 void dasd_block_set_timer(struct dasd_block *block, int expires) 2676 { 2677 if (expires == 0) 2678 del_timer(&block->timer); 2679 else 2680 mod_timer(&block->timer, jiffies + expires); 2681 } 2682 EXPORT_SYMBOL(dasd_block_set_timer); 2683 2684 /* 2685 * Clear timeout for a dasd_block. 2686 */ 2687 void dasd_block_clear_timer(struct dasd_block *block) 2688 { 2689 del_timer(&block->timer); 2690 } 2691 EXPORT_SYMBOL(dasd_block_clear_timer); 2692 2693 /* 2694 * Process finished error recovery ccw. 2695 */ 2696 static void __dasd_process_erp(struct dasd_device *device, 2697 struct dasd_ccw_req *cqr) 2698 { 2699 dasd_erp_fn_t erp_fn; 2700 2701 if (cqr->status == DASD_CQR_DONE) 2702 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 2703 else 2704 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 2705 erp_fn = device->discipline->erp_postaction(cqr); 2706 erp_fn(cqr); 2707 } 2708 2709 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 2710 { 2711 struct request *req; 2712 blk_status_t error = BLK_STS_OK; 2713 unsigned int proc_bytes; 2714 int status; 2715 2716 req = (struct request *) cqr->callback_data; 2717 dasd_profile_end(cqr->block, cqr, req); 2718 2719 proc_bytes = cqr->proc_bytes; 2720 status = cqr->block->base->discipline->free_cp(cqr, req); 2721 if (status < 0) 2722 error = errno_to_blk_status(status); 2723 else if (status == 0) { 2724 switch (cqr->intrc) { 2725 case -EPERM: 2726 error = BLK_STS_NEXUS; 2727 break; 2728 case -ENOLINK: 2729 error = BLK_STS_TRANSPORT; 2730 break; 2731 case -ETIMEDOUT: 2732 error = BLK_STS_TIMEOUT; 2733 break; 2734 default: 2735 error = BLK_STS_IOERR; 2736 break; 2737 } 2738 } 2739 2740 /* 2741 * We need to take care for ETIMEDOUT errors here since the 2742 * complete callback does not get called in this case. 2743 * Take care of all errors here and avoid additional code to 2744 * transfer the error value to the complete callback. 2745 */ 2746 if (error) { 2747 blk_mq_end_request(req, error); 2748 blk_mq_run_hw_queues(req->q, true); 2749 } else { 2750 /* 2751 * Partial completed requests can happen with ESE devices. 2752 * During read we might have gotten a NRF error and have to 2753 * complete a request partially. 2754 */ 2755 if (proc_bytes) { 2756 blk_update_request(req, BLK_STS_OK, proc_bytes); 2757 blk_mq_requeue_request(req, true); 2758 } else if (likely(!blk_should_fake_timeout(req->q))) { 2759 blk_mq_complete_request(req); 2760 } 2761 } 2762 } 2763 2764 /* 2765 * Process ccw request queue. 2766 */ 2767 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 2768 struct list_head *final_queue) 2769 { 2770 struct list_head *l, *n; 2771 struct dasd_ccw_req *cqr; 2772 dasd_erp_fn_t erp_fn; 2773 unsigned long flags; 2774 struct dasd_device *base = block->base; 2775 2776 restart: 2777 /* Process request with final status. */ 2778 list_for_each_safe(l, n, &block->ccw_queue) { 2779 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2780 if (cqr->status != DASD_CQR_DONE && 2781 cqr->status != DASD_CQR_FAILED && 2782 cqr->status != DASD_CQR_NEED_ERP && 2783 cqr->status != DASD_CQR_TERMINATED) 2784 continue; 2785 2786 if (cqr->status == DASD_CQR_TERMINATED) { 2787 base->discipline->handle_terminated_request(cqr); 2788 goto restart; 2789 } 2790 2791 /* Process requests that may be recovered */ 2792 if (cqr->status == DASD_CQR_NEED_ERP) { 2793 erp_fn = base->discipline->erp_action(cqr); 2794 if (IS_ERR(erp_fn(cqr))) 2795 continue; 2796 goto restart; 2797 } 2798 2799 /* log sense for fatal error */ 2800 if (cqr->status == DASD_CQR_FAILED) { 2801 dasd_log_sense(cqr, &cqr->irb); 2802 } 2803 2804 /* First of all call extended error reporting. */ 2805 if (dasd_eer_enabled(base) && 2806 cqr->status == DASD_CQR_FAILED) { 2807 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 2808 2809 /* restart request */ 2810 cqr->status = DASD_CQR_FILLED; 2811 cqr->retries = 255; 2812 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 2813 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 2814 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 2815 flags); 2816 goto restart; 2817 } 2818 2819 /* Process finished ERP request. */ 2820 if (cqr->refers) { 2821 __dasd_process_erp(base, cqr); 2822 goto restart; 2823 } 2824 2825 /* Rechain finished requests to final queue */ 2826 cqr->endclk = get_tod_clock(); 2827 list_move_tail(&cqr->blocklist, final_queue); 2828 } 2829 } 2830 2831 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2832 { 2833 dasd_schedule_block_bh(cqr->block); 2834 } 2835 2836 static void __dasd_block_start_head(struct dasd_block *block) 2837 { 2838 struct dasd_ccw_req *cqr; 2839 2840 if (list_empty(&block->ccw_queue)) 2841 return; 2842 /* We allways begin with the first requests on the queue, as some 2843 * of previously started requests have to be enqueued on a 2844 * dasd_device again for error recovery. 2845 */ 2846 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2847 if (cqr->status != DASD_CQR_FILLED) 2848 continue; 2849 if (test_bit(DASD_FLAG_LOCK_STOLEN, &block->base->flags) && 2850 !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { 2851 cqr->status = DASD_CQR_FAILED; 2852 cqr->intrc = -EPERM; 2853 dasd_schedule_block_bh(block); 2854 continue; 2855 } 2856 /* Non-temporary stop condition will trigger fail fast */ 2857 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2858 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2859 (!dasd_eer_enabled(block->base))) { 2860 cqr->status = DASD_CQR_FAILED; 2861 cqr->intrc = -ENOLINK; 2862 dasd_schedule_block_bh(block); 2863 continue; 2864 } 2865 /* Don't try to start requests if device is stopped */ 2866 if (block->base->stopped) 2867 return; 2868 2869 /* just a fail safe check, should not happen */ 2870 if (!cqr->startdev) 2871 cqr->startdev = block->base; 2872 2873 /* make sure that the requests we submit find their way back */ 2874 cqr->callback = dasd_return_cqr_cb; 2875 2876 dasd_add_request_tail(cqr); 2877 } 2878 } 2879 2880 /* 2881 * Central dasd_block layer routine. Takes requests from the generic 2882 * block layer request queue, creates ccw requests, enqueues them on 2883 * a dasd_device and processes ccw requests that have been returned. 2884 */ 2885 static void dasd_block_tasklet(unsigned long data) 2886 { 2887 struct dasd_block *block = (struct dasd_block *) data; 2888 struct list_head final_queue; 2889 struct list_head *l, *n; 2890 struct dasd_ccw_req *cqr; 2891 struct dasd_queue *dq; 2892 2893 atomic_set(&block->tasklet_scheduled, 0); 2894 INIT_LIST_HEAD(&final_queue); 2895 spin_lock_irq(&block->queue_lock); 2896 /* Finish off requests on ccw queue */ 2897 __dasd_process_block_ccw_queue(block, &final_queue); 2898 spin_unlock_irq(&block->queue_lock); 2899 2900 /* Now call the callback function of requests with final status */ 2901 list_for_each_safe(l, n, &final_queue) { 2902 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2903 dq = cqr->dq; 2904 spin_lock_irq(&dq->lock); 2905 list_del_init(&cqr->blocklist); 2906 __dasd_cleanup_cqr(cqr); 2907 spin_unlock_irq(&dq->lock); 2908 } 2909 2910 spin_lock_irq(&block->queue_lock); 2911 /* Now check if the head of the ccw queue needs to be started. */ 2912 __dasd_block_start_head(block); 2913 spin_unlock_irq(&block->queue_lock); 2914 2915 if (waitqueue_active(&shutdown_waitq)) 2916 wake_up(&shutdown_waitq); 2917 dasd_put_device(block->base); 2918 } 2919 2920 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2921 { 2922 wake_up(&dasd_flush_wq); 2923 } 2924 2925 /* 2926 * Requeue a request back to the block request queue 2927 * only works for block requests 2928 */ 2929 static int _dasd_requeue_request(struct dasd_ccw_req *cqr) 2930 { 2931 struct dasd_block *block = cqr->block; 2932 struct request *req; 2933 2934 if (!block) 2935 return -EINVAL; 2936 /* 2937 * If the request is an ERP request there is nothing to requeue. 2938 * This will be done with the remaining original request. 2939 */ 2940 if (cqr->refers) 2941 return 0; 2942 spin_lock_irq(&cqr->dq->lock); 2943 req = (struct request *) cqr->callback_data; 2944 blk_mq_requeue_request(req, false); 2945 spin_unlock_irq(&cqr->dq->lock); 2946 2947 return 0; 2948 } 2949 2950 /* 2951 * Go through all request on the dasd_block request queue, cancel them 2952 * on the respective dasd_device, and return them to the generic 2953 * block layer. 2954 */ 2955 static int dasd_flush_block_queue(struct dasd_block *block) 2956 { 2957 struct dasd_ccw_req *cqr, *n; 2958 int rc, i; 2959 struct list_head flush_queue; 2960 unsigned long flags; 2961 2962 INIT_LIST_HEAD(&flush_queue); 2963 spin_lock_bh(&block->queue_lock); 2964 rc = 0; 2965 restart: 2966 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2967 /* if this request currently owned by a dasd_device cancel it */ 2968 if (cqr->status >= DASD_CQR_QUEUED) 2969 rc = dasd_cancel_req(cqr); 2970 if (rc < 0) 2971 break; 2972 /* Rechain request (including erp chain) so it won't be 2973 * touched by the dasd_block_tasklet anymore. 2974 * Replace the callback so we notice when the request 2975 * is returned from the dasd_device layer. 2976 */ 2977 cqr->callback = _dasd_wake_block_flush_cb; 2978 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2979 list_move_tail(&cqr->blocklist, &flush_queue); 2980 if (i > 1) 2981 /* moved more than one request - need to restart */ 2982 goto restart; 2983 } 2984 spin_unlock_bh(&block->queue_lock); 2985 /* Now call the callback function of flushed requests */ 2986 restart_cb: 2987 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2988 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2989 /* Process finished ERP request. */ 2990 if (cqr->refers) { 2991 spin_lock_bh(&block->queue_lock); 2992 __dasd_process_erp(block->base, cqr); 2993 spin_unlock_bh(&block->queue_lock); 2994 /* restart list_for_xx loop since dasd_process_erp 2995 * might remove multiple elements */ 2996 goto restart_cb; 2997 } 2998 /* call the callback function */ 2999 spin_lock_irqsave(&cqr->dq->lock, flags); 3000 cqr->endclk = get_tod_clock(); 3001 list_del_init(&cqr->blocklist); 3002 __dasd_cleanup_cqr(cqr); 3003 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3004 } 3005 return rc; 3006 } 3007 3008 /* 3009 * Schedules a call to dasd_tasklet over the device tasklet. 3010 */ 3011 void dasd_schedule_block_bh(struct dasd_block *block) 3012 { 3013 /* Protect against rescheduling. */ 3014 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 3015 return; 3016 /* life cycle of block is bound to it's base device */ 3017 dasd_get_device(block->base); 3018 tasklet_hi_schedule(&block->tasklet); 3019 } 3020 EXPORT_SYMBOL(dasd_schedule_block_bh); 3021 3022 3023 /* 3024 * SECTION: external block device operations 3025 * (request queue handling, open, release, etc.) 3026 */ 3027 3028 /* 3029 * Dasd request queue function. Called from ll_rw_blk.c 3030 */ 3031 static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx, 3032 const struct blk_mq_queue_data *qd) 3033 { 3034 struct dasd_block *block = hctx->queue->queuedata; 3035 struct dasd_queue *dq = hctx->driver_data; 3036 struct request *req = qd->rq; 3037 struct dasd_device *basedev; 3038 struct dasd_ccw_req *cqr; 3039 blk_status_t rc = BLK_STS_OK; 3040 3041 basedev = block->base; 3042 spin_lock_irq(&dq->lock); 3043 if (basedev->state < DASD_STATE_READY || 3044 test_bit(DASD_FLAG_OFFLINE, &basedev->flags)) { 3045 DBF_DEV_EVENT(DBF_ERR, basedev, 3046 "device not ready for request %p", req); 3047 rc = BLK_STS_IOERR; 3048 goto out; 3049 } 3050 3051 /* 3052 * if device is stopped do not fetch new requests 3053 * except failfast is active which will let requests fail 3054 * immediately in __dasd_block_start_head() 3055 */ 3056 if (basedev->stopped && !(basedev->features & DASD_FEATURE_FAILFAST)) { 3057 DBF_DEV_EVENT(DBF_ERR, basedev, 3058 "device stopped request %p", req); 3059 rc = BLK_STS_RESOURCE; 3060 goto out; 3061 } 3062 3063 if (basedev->features & DASD_FEATURE_READONLY && 3064 rq_data_dir(req) == WRITE) { 3065 DBF_DEV_EVENT(DBF_ERR, basedev, 3066 "Rejecting write request %p", req); 3067 rc = BLK_STS_IOERR; 3068 goto out; 3069 } 3070 3071 if (test_bit(DASD_FLAG_ABORTALL, &basedev->flags) && 3072 (basedev->features & DASD_FEATURE_FAILFAST || 3073 blk_noretry_request(req))) { 3074 DBF_DEV_EVENT(DBF_ERR, basedev, 3075 "Rejecting failfast request %p", req); 3076 rc = BLK_STS_IOERR; 3077 goto out; 3078 } 3079 3080 cqr = basedev->discipline->build_cp(basedev, block, req); 3081 if (IS_ERR(cqr)) { 3082 if (PTR_ERR(cqr) == -EBUSY || 3083 PTR_ERR(cqr) == -ENOMEM || 3084 PTR_ERR(cqr) == -EAGAIN) { 3085 rc = BLK_STS_RESOURCE; 3086 goto out; 3087 } 3088 DBF_DEV_EVENT(DBF_ERR, basedev, 3089 "CCW creation failed (rc=%ld) on request %p", 3090 PTR_ERR(cqr), req); 3091 rc = BLK_STS_IOERR; 3092 goto out; 3093 } 3094 /* 3095 * Note: callback is set to dasd_return_cqr_cb in 3096 * __dasd_block_start_head to cover erp requests as well 3097 */ 3098 cqr->callback_data = req; 3099 cqr->status = DASD_CQR_FILLED; 3100 cqr->dq = dq; 3101 3102 blk_mq_start_request(req); 3103 spin_lock(&block->queue_lock); 3104 list_add_tail(&cqr->blocklist, &block->ccw_queue); 3105 INIT_LIST_HEAD(&cqr->devlist); 3106 dasd_profile_start(block, cqr, req); 3107 dasd_schedule_block_bh(block); 3108 spin_unlock(&block->queue_lock); 3109 3110 out: 3111 spin_unlock_irq(&dq->lock); 3112 return rc; 3113 } 3114 3115 /* 3116 * Block timeout callback, called from the block layer 3117 * 3118 * Return values: 3119 * BLK_EH_RESET_TIMER if the request should be left running 3120 * BLK_EH_DONE if the request is handled or terminated 3121 * by the driver. 3122 */ 3123 enum blk_eh_timer_return dasd_times_out(struct request *req) 3124 { 3125 struct dasd_block *block = req->q->queuedata; 3126 struct dasd_device *device; 3127 struct dasd_ccw_req *cqr; 3128 unsigned long flags; 3129 int rc = 0; 3130 3131 cqr = blk_mq_rq_to_pdu(req); 3132 if (!cqr) 3133 return BLK_EH_DONE; 3134 3135 spin_lock_irqsave(&cqr->dq->lock, flags); 3136 device = cqr->startdev ? cqr->startdev : block->base; 3137 if (!device->blk_timeout) { 3138 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3139 return BLK_EH_RESET_TIMER; 3140 } 3141 DBF_DEV_EVENT(DBF_WARNING, device, 3142 " dasd_times_out cqr %p status %x", 3143 cqr, cqr->status); 3144 3145 spin_lock(&block->queue_lock); 3146 spin_lock(get_ccwdev_lock(device->cdev)); 3147 cqr->retries = -1; 3148 cqr->intrc = -ETIMEDOUT; 3149 if (cqr->status >= DASD_CQR_QUEUED) { 3150 rc = __dasd_cancel_req(cqr); 3151 } else if (cqr->status == DASD_CQR_FILLED || 3152 cqr->status == DASD_CQR_NEED_ERP) { 3153 cqr->status = DASD_CQR_TERMINATED; 3154 } else if (cqr->status == DASD_CQR_IN_ERP) { 3155 struct dasd_ccw_req *searchcqr, *nextcqr, *tmpcqr; 3156 3157 list_for_each_entry_safe(searchcqr, nextcqr, 3158 &block->ccw_queue, blocklist) { 3159 tmpcqr = searchcqr; 3160 while (tmpcqr->refers) 3161 tmpcqr = tmpcqr->refers; 3162 if (tmpcqr != cqr) 3163 continue; 3164 /* searchcqr is an ERP request for cqr */ 3165 searchcqr->retries = -1; 3166 searchcqr->intrc = -ETIMEDOUT; 3167 if (searchcqr->status >= DASD_CQR_QUEUED) { 3168 rc = __dasd_cancel_req(searchcqr); 3169 } else if ((searchcqr->status == DASD_CQR_FILLED) || 3170 (searchcqr->status == DASD_CQR_NEED_ERP)) { 3171 searchcqr->status = DASD_CQR_TERMINATED; 3172 rc = 0; 3173 } else if (searchcqr->status == DASD_CQR_IN_ERP) { 3174 /* 3175 * Shouldn't happen; most recent ERP 3176 * request is at the front of queue 3177 */ 3178 continue; 3179 } 3180 break; 3181 } 3182 } 3183 spin_unlock(get_ccwdev_lock(device->cdev)); 3184 dasd_schedule_block_bh(block); 3185 spin_unlock(&block->queue_lock); 3186 spin_unlock_irqrestore(&cqr->dq->lock, flags); 3187 3188 return rc ? BLK_EH_RESET_TIMER : BLK_EH_DONE; 3189 } 3190 3191 static int dasd_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 3192 unsigned int idx) 3193 { 3194 struct dasd_queue *dq = kzalloc(sizeof(*dq), GFP_KERNEL); 3195 3196 if (!dq) 3197 return -ENOMEM; 3198 3199 spin_lock_init(&dq->lock); 3200 hctx->driver_data = dq; 3201 3202 return 0; 3203 } 3204 3205 static void dasd_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx) 3206 { 3207 kfree(hctx->driver_data); 3208 hctx->driver_data = NULL; 3209 } 3210 3211 static void dasd_request_done(struct request *req) 3212 { 3213 blk_mq_end_request(req, 0); 3214 blk_mq_run_hw_queues(req->q, true); 3215 } 3216 3217 struct blk_mq_ops dasd_mq_ops = { 3218 .queue_rq = do_dasd_request, 3219 .complete = dasd_request_done, 3220 .timeout = dasd_times_out, 3221 .init_hctx = dasd_init_hctx, 3222 .exit_hctx = dasd_exit_hctx, 3223 }; 3224 3225 static int dasd_open(struct block_device *bdev, fmode_t mode) 3226 { 3227 struct dasd_device *base; 3228 int rc; 3229 3230 base = dasd_device_from_gendisk(bdev->bd_disk); 3231 if (!base) 3232 return -ENODEV; 3233 3234 atomic_inc(&base->block->open_count); 3235 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 3236 rc = -ENODEV; 3237 goto unlock; 3238 } 3239 3240 if (!try_module_get(base->discipline->owner)) { 3241 rc = -EINVAL; 3242 goto unlock; 3243 } 3244 3245 if (dasd_probeonly) { 3246 dev_info(&base->cdev->dev, 3247 "Accessing the DASD failed because it is in " 3248 "probeonly mode\n"); 3249 rc = -EPERM; 3250 goto out; 3251 } 3252 3253 if (base->state <= DASD_STATE_BASIC) { 3254 DBF_DEV_EVENT(DBF_ERR, base, " %s", 3255 " Cannot open unrecognized device"); 3256 rc = -ENODEV; 3257 goto out; 3258 } 3259 3260 if ((mode & FMODE_WRITE) && 3261 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 3262 (base->features & DASD_FEATURE_READONLY))) { 3263 rc = -EROFS; 3264 goto out; 3265 } 3266 3267 dasd_put_device(base); 3268 return 0; 3269 3270 out: 3271 module_put(base->discipline->owner); 3272 unlock: 3273 atomic_dec(&base->block->open_count); 3274 dasd_put_device(base); 3275 return rc; 3276 } 3277 3278 static void dasd_release(struct gendisk *disk, fmode_t mode) 3279 { 3280 struct dasd_device *base = dasd_device_from_gendisk(disk); 3281 if (base) { 3282 atomic_dec(&base->block->open_count); 3283 module_put(base->discipline->owner); 3284 dasd_put_device(base); 3285 } 3286 } 3287 3288 /* 3289 * Return disk geometry. 3290 */ 3291 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3292 { 3293 struct dasd_device *base; 3294 3295 base = dasd_device_from_gendisk(bdev->bd_disk); 3296 if (!base) 3297 return -ENODEV; 3298 3299 if (!base->discipline || 3300 !base->discipline->fill_geometry) { 3301 dasd_put_device(base); 3302 return -EINVAL; 3303 } 3304 base->discipline->fill_geometry(base->block, geo); 3305 geo->start = get_start_sect(bdev) >> base->block->s2b_shift; 3306 dasd_put_device(base); 3307 return 0; 3308 } 3309 3310 const struct block_device_operations 3311 dasd_device_operations = { 3312 .owner = THIS_MODULE, 3313 .open = dasd_open, 3314 .release = dasd_release, 3315 .ioctl = dasd_ioctl, 3316 .compat_ioctl = dasd_ioctl, 3317 .getgeo = dasd_getgeo, 3318 .set_read_only = dasd_set_read_only, 3319 }; 3320 3321 /******************************************************************************* 3322 * end of block device operations 3323 */ 3324 3325 static void 3326 dasd_exit(void) 3327 { 3328 #ifdef CONFIG_PROC_FS 3329 dasd_proc_exit(); 3330 #endif 3331 dasd_eer_exit(); 3332 kmem_cache_destroy(dasd_page_cache); 3333 dasd_page_cache = NULL; 3334 dasd_gendisk_exit(); 3335 dasd_devmap_exit(); 3336 if (dasd_debug_area != NULL) { 3337 debug_unregister(dasd_debug_area); 3338 dasd_debug_area = NULL; 3339 } 3340 dasd_statistics_removeroot(); 3341 } 3342 3343 /* 3344 * SECTION: common functions for ccw_driver use 3345 */ 3346 3347 /* 3348 * Is the device read-only? 3349 * Note that this function does not report the setting of the 3350 * readonly device attribute, but how it is configured in z/VM. 3351 */ 3352 int dasd_device_is_ro(struct dasd_device *device) 3353 { 3354 struct ccw_dev_id dev_id; 3355 struct diag210 diag_data; 3356 int rc; 3357 3358 if (!MACHINE_IS_VM) 3359 return 0; 3360 ccw_device_get_id(device->cdev, &dev_id); 3361 memset(&diag_data, 0, sizeof(diag_data)); 3362 diag_data.vrdcdvno = dev_id.devno; 3363 diag_data.vrdclen = sizeof(diag_data); 3364 rc = diag210(&diag_data); 3365 if (rc == 0 || rc == 2) { 3366 return diag_data.vrdcvfla & 0x80; 3367 } else { 3368 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 3369 dev_id.devno, rc); 3370 return 0; 3371 } 3372 } 3373 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 3374 3375 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 3376 { 3377 struct ccw_device *cdev = data; 3378 int ret; 3379 3380 ret = ccw_device_set_online(cdev); 3381 if (ret) 3382 pr_warn("%s: Setting the DASD online failed with rc=%d\n", 3383 dev_name(&cdev->dev), ret); 3384 } 3385 3386 /* 3387 * Initial attempt at a probe function. this can be simplified once 3388 * the other detection code is gone. 3389 */ 3390 int dasd_generic_probe(struct ccw_device *cdev) 3391 { 3392 cdev->handler = &dasd_int_handler; 3393 3394 /* 3395 * Automatically online either all dasd devices (dasd_autodetect) 3396 * or all devices specified with dasd= parameters during 3397 * initial probe. 3398 */ 3399 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 3400 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 3401 async_schedule(dasd_generic_auto_online, cdev); 3402 return 0; 3403 } 3404 EXPORT_SYMBOL_GPL(dasd_generic_probe); 3405 3406 void dasd_generic_free_discipline(struct dasd_device *device) 3407 { 3408 /* Forget the discipline information. */ 3409 if (device->discipline) { 3410 if (device->discipline->uncheck_device) 3411 device->discipline->uncheck_device(device); 3412 module_put(device->discipline->owner); 3413 device->discipline = NULL; 3414 } 3415 if (device->base_discipline) { 3416 module_put(device->base_discipline->owner); 3417 device->base_discipline = NULL; 3418 } 3419 } 3420 EXPORT_SYMBOL_GPL(dasd_generic_free_discipline); 3421 3422 /* 3423 * This will one day be called from a global not_oper handler. 3424 * It is also used by driver_unregister during module unload. 3425 */ 3426 void dasd_generic_remove(struct ccw_device *cdev) 3427 { 3428 struct dasd_device *device; 3429 struct dasd_block *block; 3430 3431 device = dasd_device_from_cdev(cdev); 3432 if (IS_ERR(device)) 3433 return; 3434 3435 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags) && 3436 !test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3437 /* Already doing offline processing */ 3438 dasd_put_device(device); 3439 return; 3440 } 3441 /* 3442 * This device is removed unconditionally. Set offline 3443 * flag to prevent dasd_open from opening it while it is 3444 * no quite down yet. 3445 */ 3446 dasd_set_target_state(device, DASD_STATE_NEW); 3447 cdev->handler = NULL; 3448 /* dasd_delete_device destroys the device reference. */ 3449 block = device->block; 3450 dasd_delete_device(device); 3451 /* 3452 * life cycle of block is bound to device, so delete it after 3453 * device was safely removed 3454 */ 3455 if (block) 3456 dasd_free_block(block); 3457 } 3458 EXPORT_SYMBOL_GPL(dasd_generic_remove); 3459 3460 /* 3461 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3462 * the device is detected for the first time and is supposed to be used 3463 * or the user has started activation through sysfs. 3464 */ 3465 int dasd_generic_set_online(struct ccw_device *cdev, 3466 struct dasd_discipline *base_discipline) 3467 { 3468 struct dasd_discipline *discipline; 3469 struct dasd_device *device; 3470 int rc; 3471 3472 /* first online clears initial online feature flag */ 3473 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 3474 device = dasd_create_device(cdev); 3475 if (IS_ERR(device)) 3476 return PTR_ERR(device); 3477 3478 discipline = base_discipline; 3479 if (device->features & DASD_FEATURE_USEDIAG) { 3480 if (!dasd_diag_discipline_pointer) { 3481 /* Try to load the required module. */ 3482 rc = request_module(DASD_DIAG_MOD); 3483 if (rc) { 3484 pr_warn("%s Setting the DASD online failed " 3485 "because the required module %s " 3486 "could not be loaded (rc=%d)\n", 3487 dev_name(&cdev->dev), DASD_DIAG_MOD, 3488 rc); 3489 dasd_delete_device(device); 3490 return -ENODEV; 3491 } 3492 } 3493 /* Module init could have failed, so check again here after 3494 * request_module(). */ 3495 if (!dasd_diag_discipline_pointer) { 3496 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n", 3497 dev_name(&cdev->dev)); 3498 dasd_delete_device(device); 3499 return -ENODEV; 3500 } 3501 discipline = dasd_diag_discipline_pointer; 3502 } 3503 if (!try_module_get(base_discipline->owner)) { 3504 dasd_delete_device(device); 3505 return -EINVAL; 3506 } 3507 if (!try_module_get(discipline->owner)) { 3508 module_put(base_discipline->owner); 3509 dasd_delete_device(device); 3510 return -EINVAL; 3511 } 3512 device->base_discipline = base_discipline; 3513 device->discipline = discipline; 3514 3515 /* check_device will allocate block device if necessary */ 3516 rc = discipline->check_device(device); 3517 if (rc) { 3518 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n", 3519 dev_name(&cdev->dev), discipline->name, rc); 3520 module_put(discipline->owner); 3521 module_put(base_discipline->owner); 3522 dasd_delete_device(device); 3523 return rc; 3524 } 3525 3526 dasd_set_target_state(device, DASD_STATE_ONLINE); 3527 if (device->state <= DASD_STATE_KNOWN) { 3528 pr_warn("%s Setting the DASD online failed because of a missing discipline\n", 3529 dev_name(&cdev->dev)); 3530 rc = -ENODEV; 3531 dasd_set_target_state(device, DASD_STATE_NEW); 3532 if (device->block) 3533 dasd_free_block(device->block); 3534 dasd_delete_device(device); 3535 } else 3536 pr_debug("dasd_generic device %s found\n", 3537 dev_name(&cdev->dev)); 3538 3539 wait_event(dasd_init_waitq, _wait_for_device(device)); 3540 3541 dasd_put_device(device); 3542 return rc; 3543 } 3544 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 3545 3546 int dasd_generic_set_offline(struct ccw_device *cdev) 3547 { 3548 struct dasd_device *device; 3549 struct dasd_block *block; 3550 int max_count, open_count, rc; 3551 unsigned long flags; 3552 3553 rc = 0; 3554 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3555 device = dasd_device_from_cdev_locked(cdev); 3556 if (IS_ERR(device)) { 3557 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3558 return PTR_ERR(device); 3559 } 3560 3561 /* 3562 * We must make sure that this device is currently not in use. 3563 * The open_count is increased for every opener, that includes 3564 * the blkdev_get in dasd_scan_partitions. We are only interested 3565 * in the other openers. 3566 */ 3567 if (device->block) { 3568 max_count = device->block->bdev ? 0 : -1; 3569 open_count = atomic_read(&device->block->open_count); 3570 if (open_count > max_count) { 3571 if (open_count > 0) 3572 pr_warn("%s: The DASD cannot be set offline with open count %i\n", 3573 dev_name(&cdev->dev), open_count); 3574 else 3575 pr_warn("%s: The DASD cannot be set offline while it is in use\n", 3576 dev_name(&cdev->dev)); 3577 rc = -EBUSY; 3578 goto out_err; 3579 } 3580 } 3581 3582 /* 3583 * Test if the offline processing is already running and exit if so. 3584 * If a safe offline is being processed this could only be a normal 3585 * offline that should be able to overtake the safe offline and 3586 * cancel any I/O we do not want to wait for any longer 3587 */ 3588 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 3589 if (test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3590 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, 3591 &device->flags); 3592 } else { 3593 rc = -EBUSY; 3594 goto out_err; 3595 } 3596 } 3597 set_bit(DASD_FLAG_OFFLINE, &device->flags); 3598 3599 /* 3600 * if safe_offline is called set safe_offline_running flag and 3601 * clear safe_offline so that a call to normal offline 3602 * can overrun safe_offline processing 3603 */ 3604 if (test_and_clear_bit(DASD_FLAG_SAFE_OFFLINE, &device->flags) && 3605 !test_and_set_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3606 /* need to unlock here to wait for outstanding I/O */ 3607 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3608 /* 3609 * If we want to set the device safe offline all IO operations 3610 * should be finished before continuing the offline process 3611 * so sync bdev first and then wait for our queues to become 3612 * empty 3613 */ 3614 if (device->block) { 3615 rc = fsync_bdev(device->block->bdev); 3616 if (rc != 0) 3617 goto interrupted; 3618 } 3619 dasd_schedule_device_bh(device); 3620 rc = wait_event_interruptible(shutdown_waitq, 3621 _wait_for_empty_queues(device)); 3622 if (rc != 0) 3623 goto interrupted; 3624 3625 /* 3626 * check if a normal offline process overtook the offline 3627 * processing in this case simply do nothing beside returning 3628 * that we got interrupted 3629 * otherwise mark safe offline as not running any longer and 3630 * continue with normal offline 3631 */ 3632 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3633 if (!test_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags)) { 3634 rc = -ERESTARTSYS; 3635 goto out_err; 3636 } 3637 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3638 } 3639 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3640 3641 dasd_set_target_state(device, DASD_STATE_NEW); 3642 /* dasd_delete_device destroys the device reference. */ 3643 block = device->block; 3644 dasd_delete_device(device); 3645 /* 3646 * life cycle of block is bound to device, so delete it after 3647 * device was safely removed 3648 */ 3649 if (block) 3650 dasd_free_block(block); 3651 3652 return 0; 3653 3654 interrupted: 3655 /* interrupted by signal */ 3656 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 3657 clear_bit(DASD_FLAG_SAFE_OFFLINE_RUNNING, &device->flags); 3658 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3659 out_err: 3660 dasd_put_device(device); 3661 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 3662 return rc; 3663 } 3664 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 3665 3666 int dasd_generic_last_path_gone(struct dasd_device *device) 3667 { 3668 struct dasd_ccw_req *cqr; 3669 3670 dev_warn(&device->cdev->dev, "No operational channel path is left " 3671 "for the device\n"); 3672 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "last path gone"); 3673 /* First of all call extended error reporting. */ 3674 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3675 3676 if (device->state < DASD_STATE_BASIC) 3677 return 0; 3678 /* Device is active. We want to keep it. */ 3679 list_for_each_entry(cqr, &device->ccw_queue, devlist) 3680 if ((cqr->status == DASD_CQR_IN_IO) || 3681 (cqr->status == DASD_CQR_CLEAR_PENDING)) { 3682 cqr->status = DASD_CQR_QUEUED; 3683 cqr->retries++; 3684 } 3685 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 3686 dasd_device_clear_timer(device); 3687 dasd_schedule_device_bh(device); 3688 return 1; 3689 } 3690 EXPORT_SYMBOL_GPL(dasd_generic_last_path_gone); 3691 3692 int dasd_generic_path_operational(struct dasd_device *device) 3693 { 3694 dev_info(&device->cdev->dev, "A channel path to the device has become " 3695 "operational\n"); 3696 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "path operational"); 3697 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 3698 dasd_schedule_device_bh(device); 3699 if (device->block) { 3700 dasd_schedule_block_bh(device->block); 3701 if (device->block->gdp) 3702 blk_mq_run_hw_queues(device->block->gdp->queue, true); 3703 } 3704 3705 if (!device->stopped) 3706 wake_up(&generic_waitq); 3707 3708 return 1; 3709 } 3710 EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3711 3712 int dasd_generic_notify(struct ccw_device *cdev, int event) 3713 { 3714 struct dasd_device *device; 3715 int ret; 3716 3717 device = dasd_device_from_cdev_locked(cdev); 3718 if (IS_ERR(device)) 3719 return 0; 3720 ret = 0; 3721 switch (event) { 3722 case CIO_GONE: 3723 case CIO_BOXED: 3724 case CIO_NO_PATH: 3725 dasd_path_no_path(device); 3726 ret = dasd_generic_last_path_gone(device); 3727 break; 3728 case CIO_OPER: 3729 ret = 1; 3730 if (dasd_path_get_opm(device)) 3731 ret = dasd_generic_path_operational(device); 3732 break; 3733 } 3734 dasd_put_device(device); 3735 return ret; 3736 } 3737 EXPORT_SYMBOL_GPL(dasd_generic_notify); 3738 3739 void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3740 { 3741 struct dasd_device *device; 3742 int chp, oldopm, hpfpm, ifccpm; 3743 3744 device = dasd_device_from_cdev_locked(cdev); 3745 if (IS_ERR(device)) 3746 return; 3747 3748 oldopm = dasd_path_get_opm(device); 3749 for (chp = 0; chp < 8; chp++) { 3750 if (path_event[chp] & PE_PATH_GONE) { 3751 dasd_path_notoper(device, chp); 3752 } 3753 if (path_event[chp] & PE_PATH_AVAILABLE) { 3754 dasd_path_available(device, chp); 3755 dasd_schedule_device_bh(device); 3756 } 3757 if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { 3758 if (!dasd_path_is_operational(device, chp) && 3759 !dasd_path_need_verify(device, chp)) { 3760 /* 3761 * we can not establish a pathgroup on an 3762 * unavailable path, so trigger a path 3763 * verification first 3764 */ 3765 dasd_path_available(device, chp); 3766 dasd_schedule_device_bh(device); 3767 } 3768 DBF_DEV_EVENT(DBF_WARNING, device, "%s", 3769 "Pathgroup re-established\n"); 3770 if (device->discipline->kick_validate) 3771 device->discipline->kick_validate(device); 3772 } 3773 if (path_event[chp] & PE_PATH_FCES_EVENT) { 3774 dasd_path_fcsec_update(device, chp); 3775 dasd_schedule_device_bh(device); 3776 } 3777 } 3778 hpfpm = dasd_path_get_hpfpm(device); 3779 ifccpm = dasd_path_get_ifccpm(device); 3780 if (!dasd_path_get_opm(device) && hpfpm) { 3781 /* 3782 * device has no operational paths but at least one path is 3783 * disabled due to HPF errors 3784 * disable HPF at all and use the path(s) again 3785 */ 3786 if (device->discipline->disable_hpf) 3787 device->discipline->disable_hpf(device); 3788 dasd_device_set_stop_bits(device, DASD_STOPPED_NOT_ACC); 3789 dasd_path_set_tbvpm(device, hpfpm); 3790 dasd_schedule_device_bh(device); 3791 dasd_schedule_requeue(device); 3792 } else if (!dasd_path_get_opm(device) && ifccpm) { 3793 /* 3794 * device has no operational paths but at least one path is 3795 * disabled due to IFCC errors 3796 * trigger path verification on paths with IFCC errors 3797 */ 3798 dasd_path_set_tbvpm(device, ifccpm); 3799 dasd_schedule_device_bh(device); 3800 } 3801 if (oldopm && !dasd_path_get_opm(device) && !hpfpm && !ifccpm) { 3802 dev_warn(&device->cdev->dev, 3803 "No verified channel paths remain for the device\n"); 3804 DBF_DEV_EVENT(DBF_WARNING, device, 3805 "%s", "last verified path gone"); 3806 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 3807 dasd_device_set_stop_bits(device, 3808 DASD_STOPPED_DC_WAIT); 3809 } 3810 dasd_put_device(device); 3811 } 3812 EXPORT_SYMBOL_GPL(dasd_generic_path_event); 3813 3814 int dasd_generic_verify_path(struct dasd_device *device, __u8 lpm) 3815 { 3816 if (!dasd_path_get_opm(device) && lpm) { 3817 dasd_path_set_opm(device, lpm); 3818 dasd_generic_path_operational(device); 3819 } else 3820 dasd_path_add_opm(device, lpm); 3821 return 0; 3822 } 3823 EXPORT_SYMBOL_GPL(dasd_generic_verify_path); 3824 3825 void dasd_generic_space_exhaust(struct dasd_device *device, 3826 struct dasd_ccw_req *cqr) 3827 { 3828 dasd_eer_write(device, NULL, DASD_EER_NOSPC); 3829 3830 if (device->state < DASD_STATE_BASIC) 3831 return; 3832 3833 if (cqr->status == DASD_CQR_IN_IO || 3834 cqr->status == DASD_CQR_CLEAR_PENDING) { 3835 cqr->status = DASD_CQR_QUEUED; 3836 cqr->retries++; 3837 } 3838 dasd_device_set_stop_bits(device, DASD_STOPPED_NOSPC); 3839 dasd_device_clear_timer(device); 3840 dasd_schedule_device_bh(device); 3841 } 3842 EXPORT_SYMBOL_GPL(dasd_generic_space_exhaust); 3843 3844 void dasd_generic_space_avail(struct dasd_device *device) 3845 { 3846 dev_info(&device->cdev->dev, "Extent pool space is available\n"); 3847 DBF_DEV_EVENT(DBF_WARNING, device, "%s", "space available"); 3848 3849 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOSPC); 3850 dasd_schedule_device_bh(device); 3851 3852 if (device->block) { 3853 dasd_schedule_block_bh(device->block); 3854 if (device->block->gdp) 3855 blk_mq_run_hw_queues(device->block->gdp->queue, true); 3856 } 3857 if (!device->stopped) 3858 wake_up(&generic_waitq); 3859 } 3860 EXPORT_SYMBOL_GPL(dasd_generic_space_avail); 3861 3862 /* 3863 * clear active requests and requeue them to block layer if possible 3864 */ 3865 int dasd_generic_requeue_all_requests(struct dasd_device *device) 3866 { 3867 struct list_head requeue_queue; 3868 struct dasd_ccw_req *cqr, *n; 3869 struct dasd_ccw_req *refers; 3870 int rc; 3871 3872 INIT_LIST_HEAD(&requeue_queue); 3873 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3874 rc = 0; 3875 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 3876 /* Check status and move request to flush_queue */ 3877 if (cqr->status == DASD_CQR_IN_IO) { 3878 rc = device->discipline->term_IO(cqr); 3879 if (rc) { 3880 /* unable to terminate requeust */ 3881 dev_err(&device->cdev->dev, 3882 "Unable to terminate request %p " 3883 "on suspend\n", cqr); 3884 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3885 dasd_put_device(device); 3886 return rc; 3887 } 3888 } 3889 list_move_tail(&cqr->devlist, &requeue_queue); 3890 } 3891 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3892 3893 list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { 3894 wait_event(dasd_flush_wq, 3895 (cqr->status != DASD_CQR_CLEAR_PENDING)); 3896 3897 /* 3898 * requeue requests to blocklayer will only work 3899 * for block device requests 3900 */ 3901 if (_dasd_requeue_request(cqr)) 3902 continue; 3903 3904 /* remove requests from device and block queue */ 3905 list_del_init(&cqr->devlist); 3906 while (cqr->refers != NULL) { 3907 refers = cqr->refers; 3908 /* remove the request from the block queue */ 3909 list_del(&cqr->blocklist); 3910 /* free the finished erp request */ 3911 dasd_free_erp_request(cqr, cqr->memdev); 3912 cqr = refers; 3913 } 3914 3915 /* 3916 * _dasd_requeue_request already checked for a valid 3917 * blockdevice, no need to check again 3918 * all erp requests (cqr->refers) have a cqr->block 3919 * pointer copy from the original cqr 3920 */ 3921 list_del_init(&cqr->blocklist); 3922 cqr->block->base->discipline->free_cp( 3923 cqr, (struct request *) cqr->callback_data); 3924 } 3925 3926 /* 3927 * if requests remain then they are internal request 3928 * and go back to the device queue 3929 */ 3930 if (!list_empty(&requeue_queue)) { 3931 /* move freeze_queue to start of the ccw_queue */ 3932 spin_lock_irq(get_ccwdev_lock(device->cdev)); 3933 list_splice_tail(&requeue_queue, &device->ccw_queue); 3934 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 3935 } 3936 dasd_schedule_device_bh(device); 3937 return rc; 3938 } 3939 EXPORT_SYMBOL_GPL(dasd_generic_requeue_all_requests); 3940 3941 static void do_requeue_requests(struct work_struct *work) 3942 { 3943 struct dasd_device *device = container_of(work, struct dasd_device, 3944 requeue_requests); 3945 dasd_generic_requeue_all_requests(device); 3946 dasd_device_remove_stop_bits(device, DASD_STOPPED_NOT_ACC); 3947 if (device->block) 3948 dasd_schedule_block_bh(device->block); 3949 dasd_put_device(device); 3950 } 3951 3952 void dasd_schedule_requeue(struct dasd_device *device) 3953 { 3954 dasd_get_device(device); 3955 /* queue call to dasd_reload_device to the kernel event daemon. */ 3956 if (!schedule_work(&device->requeue_requests)) 3957 dasd_put_device(device); 3958 } 3959 EXPORT_SYMBOL(dasd_schedule_requeue); 3960 3961 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 3962 int rdc_buffer_size, 3963 int magic) 3964 { 3965 struct dasd_ccw_req *cqr; 3966 struct ccw1 *ccw; 3967 3968 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device, 3969 NULL); 3970 3971 if (IS_ERR(cqr)) { 3972 /* internal error 13 - Allocating the RDC request failed*/ 3973 dev_err(&device->cdev->dev, 3974 "An error occurred in the DASD device driver, " 3975 "reason=%s\n", "13"); 3976 return cqr; 3977 } 3978 3979 ccw = cqr->cpaddr; 3980 ccw->cmd_code = CCW_CMD_RDC; 3981 ccw->cda = (__u32)(addr_t) cqr->data; 3982 ccw->flags = 0; 3983 ccw->count = rdc_buffer_size; 3984 cqr->startdev = device; 3985 cqr->memdev = device; 3986 cqr->expires = 10*HZ; 3987 cqr->retries = 256; 3988 cqr->buildclk = get_tod_clock(); 3989 cqr->status = DASD_CQR_FILLED; 3990 return cqr; 3991 } 3992 3993 3994 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 3995 void *rdc_buffer, int rdc_buffer_size) 3996 { 3997 int ret; 3998 struct dasd_ccw_req *cqr; 3999 4000 cqr = dasd_generic_build_rdc(device, rdc_buffer_size, magic); 4001 if (IS_ERR(cqr)) 4002 return PTR_ERR(cqr); 4003 4004 ret = dasd_sleep_on(cqr); 4005 if (ret == 0) 4006 memcpy(rdc_buffer, cqr->data, rdc_buffer_size); 4007 dasd_sfree_request(cqr, cqr->memdev); 4008 return ret; 4009 } 4010 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 4011 4012 /* 4013 * In command mode and transport mode we need to look for sense 4014 * data in different places. The sense data itself is allways 4015 * an array of 32 bytes, so we can unify the sense data access 4016 * for both modes. 4017 */ 4018 char *dasd_get_sense(struct irb *irb) 4019 { 4020 struct tsb *tsb = NULL; 4021 char *sense = NULL; 4022 4023 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 4024 if (irb->scsw.tm.tcw) 4025 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 4026 irb->scsw.tm.tcw); 4027 if (tsb && tsb->length == 64 && tsb->flags) 4028 switch (tsb->flags & 0x07) { 4029 case 1: /* tsa_iostat */ 4030 sense = tsb->tsa.iostat.sense; 4031 break; 4032 case 2: /* tsa_ddpc */ 4033 sense = tsb->tsa.ddpc.sense; 4034 break; 4035 default: 4036 /* currently we don't use interrogate data */ 4037 break; 4038 } 4039 } else if (irb->esw.esw0.erw.cons) { 4040 sense = irb->ecw; 4041 } 4042 return sense; 4043 } 4044 EXPORT_SYMBOL_GPL(dasd_get_sense); 4045 4046 void dasd_generic_shutdown(struct ccw_device *cdev) 4047 { 4048 struct dasd_device *device; 4049 4050 device = dasd_device_from_cdev(cdev); 4051 if (IS_ERR(device)) 4052 return; 4053 4054 if (device->block) 4055 dasd_schedule_block_bh(device->block); 4056 4057 dasd_schedule_device_bh(device); 4058 4059 wait_event(shutdown_waitq, _wait_for_empty_queues(device)); 4060 } 4061 EXPORT_SYMBOL_GPL(dasd_generic_shutdown); 4062 4063 static int __init dasd_init(void) 4064 { 4065 int rc; 4066 4067 init_waitqueue_head(&dasd_init_waitq); 4068 init_waitqueue_head(&dasd_flush_wq); 4069 init_waitqueue_head(&generic_waitq); 4070 init_waitqueue_head(&shutdown_waitq); 4071 4072 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 4073 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 4074 if (dasd_debug_area == NULL) { 4075 rc = -ENOMEM; 4076 goto failed; 4077 } 4078 debug_register_view(dasd_debug_area, &debug_sprintf_view); 4079 debug_set_level(dasd_debug_area, DBF_WARNING); 4080 4081 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 4082 4083 dasd_diag_discipline_pointer = NULL; 4084 4085 dasd_statistics_createroot(); 4086 4087 rc = dasd_devmap_init(); 4088 if (rc) 4089 goto failed; 4090 rc = dasd_gendisk_init(); 4091 if (rc) 4092 goto failed; 4093 rc = dasd_parse(); 4094 if (rc) 4095 goto failed; 4096 rc = dasd_eer_init(); 4097 if (rc) 4098 goto failed; 4099 #ifdef CONFIG_PROC_FS 4100 rc = dasd_proc_init(); 4101 if (rc) 4102 goto failed; 4103 #endif 4104 4105 return 0; 4106 failed: 4107 pr_info("The DASD device driver could not be initialized\n"); 4108 dasd_exit(); 4109 return rc; 4110 } 4111 4112 module_init(dasd_init); 4113 module_exit(dasd_exit); 4114