1 /* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #define KMSG_COMPONENT "dasd" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kernel_stat.h> 15 #include <linux/kmod.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/ctype.h> 19 #include <linux/major.h> 20 #include <linux/slab.h> 21 #include <linux/buffer_head.h> 22 #include <linux/hdreg.h> 23 #include <linux/async.h> 24 #include <linux/mutex.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> 28 #include <asm/idals.h> 29 #include <asm/itcw.h> 30 #include <asm/diag.h> 31 32 /* This is ugly... */ 33 #define PRINTK_HEADER "dasd:" 34 35 #include "dasd_int.h" 36 /* 37 * SECTION: Constant definitions to be used within this file 38 */ 39 #define DASD_CHANQ_MAX_SIZE 4 40 41 #define DASD_SLEEPON_START_TAG (void *) 1 42 #define DASD_SLEEPON_END_TAG (void *) 2 43 44 /* 45 * SECTION: exported variables of dasd.c 46 */ 47 debug_info_t *dasd_debug_area; 48 struct dasd_discipline *dasd_diag_discipline_pointer; 49 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 50 51 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 52 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 53 " Copyright 2000 IBM Corporation"); 54 MODULE_SUPPORTED_DEVICE("dasd"); 55 MODULE_LICENSE("GPL"); 56 57 /* 58 * SECTION: prototypes for static functions of dasd.c 59 */ 60 static int dasd_alloc_queue(struct dasd_block *); 61 static void dasd_setup_queue(struct dasd_block *); 62 static void dasd_free_queue(struct dasd_block *); 63 static void dasd_flush_request_queue(struct dasd_block *); 64 static int dasd_flush_block_queue(struct dasd_block *); 65 static void dasd_device_tasklet(struct dasd_device *); 66 static void dasd_block_tasklet(struct dasd_block *); 67 static void do_kick_device(struct work_struct *); 68 static void do_restore_device(struct work_struct *); 69 static void do_reload_device(struct work_struct *); 70 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 71 static void dasd_device_timeout(unsigned long); 72 static void dasd_block_timeout(unsigned long); 73 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 74 75 /* 76 * SECTION: Operations on the device structure. 77 */ 78 static wait_queue_head_t dasd_init_waitq; 79 static wait_queue_head_t dasd_flush_wq; 80 static wait_queue_head_t generic_waitq; 81 82 /* 83 * Allocate memory for a new device structure. 84 */ 85 struct dasd_device *dasd_alloc_device(void) 86 { 87 struct dasd_device *device; 88 89 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 90 if (!device) 91 return ERR_PTR(-ENOMEM); 92 93 /* Get two pages for normal block device operations. */ 94 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 95 if (!device->ccw_mem) { 96 kfree(device); 97 return ERR_PTR(-ENOMEM); 98 } 99 /* Get one page for error recovery. */ 100 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 101 if (!device->erp_mem) { 102 free_pages((unsigned long) device->ccw_mem, 1); 103 kfree(device); 104 return ERR_PTR(-ENOMEM); 105 } 106 107 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 108 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 109 spin_lock_init(&device->mem_lock); 110 atomic_set(&device->tasklet_scheduled, 0); 111 tasklet_init(&device->tasklet, 112 (void (*)(unsigned long)) dasd_device_tasklet, 113 (unsigned long) device); 114 INIT_LIST_HEAD(&device->ccw_queue); 115 init_timer(&device->timer); 116 device->timer.function = dasd_device_timeout; 117 device->timer.data = (unsigned long) device; 118 INIT_WORK(&device->kick_work, do_kick_device); 119 INIT_WORK(&device->restore_device, do_restore_device); 120 INIT_WORK(&device->reload_device, do_reload_device); 121 device->state = DASD_STATE_NEW; 122 device->target = DASD_STATE_NEW; 123 mutex_init(&device->state_mutex); 124 125 return device; 126 } 127 128 /* 129 * Free memory of a device structure. 130 */ 131 void dasd_free_device(struct dasd_device *device) 132 { 133 kfree(device->private); 134 free_page((unsigned long) device->erp_mem); 135 free_pages((unsigned long) device->ccw_mem, 1); 136 kfree(device); 137 } 138 139 /* 140 * Allocate memory for a new device structure. 141 */ 142 struct dasd_block *dasd_alloc_block(void) 143 { 144 struct dasd_block *block; 145 146 block = kzalloc(sizeof(*block), GFP_ATOMIC); 147 if (!block) 148 return ERR_PTR(-ENOMEM); 149 /* open_count = 0 means device online but not in use */ 150 atomic_set(&block->open_count, -1); 151 152 spin_lock_init(&block->request_queue_lock); 153 atomic_set(&block->tasklet_scheduled, 0); 154 tasklet_init(&block->tasklet, 155 (void (*)(unsigned long)) dasd_block_tasklet, 156 (unsigned long) block); 157 INIT_LIST_HEAD(&block->ccw_queue); 158 spin_lock_init(&block->queue_lock); 159 init_timer(&block->timer); 160 block->timer.function = dasd_block_timeout; 161 block->timer.data = (unsigned long) block; 162 163 return block; 164 } 165 166 /* 167 * Free memory of a device structure. 168 */ 169 void dasd_free_block(struct dasd_block *block) 170 { 171 kfree(block); 172 } 173 174 /* 175 * Make a new device known to the system. 176 */ 177 static int dasd_state_new_to_known(struct dasd_device *device) 178 { 179 int rc; 180 181 /* 182 * As long as the device is not in state DASD_STATE_NEW we want to 183 * keep the reference count > 0. 184 */ 185 dasd_get_device(device); 186 187 if (device->block) { 188 rc = dasd_alloc_queue(device->block); 189 if (rc) { 190 dasd_put_device(device); 191 return rc; 192 } 193 } 194 device->state = DASD_STATE_KNOWN; 195 return 0; 196 } 197 198 /* 199 * Let the system forget about a device. 200 */ 201 static int dasd_state_known_to_new(struct dasd_device *device) 202 { 203 /* Disable extended error reporting for this device. */ 204 dasd_eer_disable(device); 205 /* Forget the discipline information. */ 206 if (device->discipline) { 207 if (device->discipline->uncheck_device) 208 device->discipline->uncheck_device(device); 209 module_put(device->discipline->owner); 210 } 211 device->discipline = NULL; 212 if (device->base_discipline) 213 module_put(device->base_discipline->owner); 214 device->base_discipline = NULL; 215 device->state = DASD_STATE_NEW; 216 217 if (device->block) 218 dasd_free_queue(device->block); 219 220 /* Give up reference we took in dasd_state_new_to_known. */ 221 dasd_put_device(device); 222 return 0; 223 } 224 225 /* 226 * Request the irq line for the device. 227 */ 228 static int dasd_state_known_to_basic(struct dasd_device *device) 229 { 230 int rc; 231 232 /* Allocate and register gendisk structure. */ 233 if (device->block) { 234 rc = dasd_gendisk_alloc(device->block); 235 if (rc) 236 return rc; 237 } 238 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 239 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 240 8 * sizeof(long)); 241 debug_register_view(device->debug_area, &debug_sprintf_view); 242 debug_set_level(device->debug_area, DBF_WARNING); 243 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 244 245 device->state = DASD_STATE_BASIC; 246 return 0; 247 } 248 249 /* 250 * Release the irq line for the device. Terminate any running i/o. 251 */ 252 static int dasd_state_basic_to_known(struct dasd_device *device) 253 { 254 int rc; 255 if (device->block) { 256 dasd_gendisk_free(device->block); 257 dasd_block_clear_timer(device->block); 258 } 259 rc = dasd_flush_device_queue(device); 260 if (rc) 261 return rc; 262 dasd_device_clear_timer(device); 263 264 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 265 if (device->debug_area != NULL) { 266 debug_unregister(device->debug_area); 267 device->debug_area = NULL; 268 } 269 device->state = DASD_STATE_KNOWN; 270 return 0; 271 } 272 273 /* 274 * Do the initial analysis. The do_analysis function may return 275 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 276 * until the discipline decides to continue the startup sequence 277 * by calling the function dasd_change_state. The eckd disciplines 278 * uses this to start a ccw that detects the format. The completion 279 * interrupt for this detection ccw uses the kernel event daemon to 280 * trigger the call to dasd_change_state. All this is done in the 281 * discipline code, see dasd_eckd.c. 282 * After the analysis ccw is done (do_analysis returned 0) the block 283 * device is setup. 284 * In case the analysis returns an error, the device setup is stopped 285 * (a fake disk was already added to allow formatting). 286 */ 287 static int dasd_state_basic_to_ready(struct dasd_device *device) 288 { 289 int rc; 290 struct dasd_block *block; 291 292 rc = 0; 293 block = device->block; 294 /* make disk known with correct capacity */ 295 if (block) { 296 if (block->base->discipline->do_analysis != NULL) 297 rc = block->base->discipline->do_analysis(block); 298 if (rc) { 299 if (rc != -EAGAIN) 300 device->state = DASD_STATE_UNFMT; 301 return rc; 302 } 303 dasd_setup_queue(block); 304 set_capacity(block->gdp, 305 block->blocks << block->s2b_shift); 306 device->state = DASD_STATE_READY; 307 rc = dasd_scan_partitions(block); 308 if (rc) 309 device->state = DASD_STATE_BASIC; 310 } else { 311 device->state = DASD_STATE_READY; 312 } 313 return rc; 314 } 315 316 /* 317 * Remove device from block device layer. Destroy dirty buffers. 318 * Forget format information. Check if the target level is basic 319 * and if it is create fake disk for formatting. 320 */ 321 static int dasd_state_ready_to_basic(struct dasd_device *device) 322 { 323 int rc; 324 325 device->state = DASD_STATE_BASIC; 326 if (device->block) { 327 struct dasd_block *block = device->block; 328 rc = dasd_flush_block_queue(block); 329 if (rc) { 330 device->state = DASD_STATE_READY; 331 return rc; 332 } 333 dasd_flush_request_queue(block); 334 dasd_destroy_partitions(block); 335 block->blocks = 0; 336 block->bp_block = 0; 337 block->s2b_shift = 0; 338 } 339 return 0; 340 } 341 342 /* 343 * Back to basic. 344 */ 345 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 346 { 347 device->state = DASD_STATE_BASIC; 348 return 0; 349 } 350 351 /* 352 * Make the device online and schedule the bottom half to start 353 * the requeueing of requests from the linux request queue to the 354 * ccw queue. 355 */ 356 static int 357 dasd_state_ready_to_online(struct dasd_device * device) 358 { 359 int rc; 360 struct gendisk *disk; 361 struct disk_part_iter piter; 362 struct hd_struct *part; 363 364 if (device->discipline->ready_to_online) { 365 rc = device->discipline->ready_to_online(device); 366 if (rc) 367 return rc; 368 } 369 device->state = DASD_STATE_ONLINE; 370 if (device->block) { 371 dasd_schedule_block_bh(device->block); 372 disk = device->block->bdev->bd_disk; 373 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 374 while ((part = disk_part_iter_next(&piter))) 375 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 376 disk_part_iter_exit(&piter); 377 } 378 return 0; 379 } 380 381 /* 382 * Stop the requeueing of requests again. 383 */ 384 static int dasd_state_online_to_ready(struct dasd_device *device) 385 { 386 int rc; 387 struct gendisk *disk; 388 struct disk_part_iter piter; 389 struct hd_struct *part; 390 391 if (device->discipline->online_to_ready) { 392 rc = device->discipline->online_to_ready(device); 393 if (rc) 394 return rc; 395 } 396 device->state = DASD_STATE_READY; 397 if (device->block) { 398 disk = device->block->bdev->bd_disk; 399 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 400 while ((part = disk_part_iter_next(&piter))) 401 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 402 disk_part_iter_exit(&piter); 403 } 404 return 0; 405 } 406 407 /* 408 * Device startup state changes. 409 */ 410 static int dasd_increase_state(struct dasd_device *device) 411 { 412 int rc; 413 414 rc = 0; 415 if (device->state == DASD_STATE_NEW && 416 device->target >= DASD_STATE_KNOWN) 417 rc = dasd_state_new_to_known(device); 418 419 if (!rc && 420 device->state == DASD_STATE_KNOWN && 421 device->target >= DASD_STATE_BASIC) 422 rc = dasd_state_known_to_basic(device); 423 424 if (!rc && 425 device->state == DASD_STATE_BASIC && 426 device->target >= DASD_STATE_READY) 427 rc = dasd_state_basic_to_ready(device); 428 429 if (!rc && 430 device->state == DASD_STATE_UNFMT && 431 device->target > DASD_STATE_UNFMT) 432 rc = -EPERM; 433 434 if (!rc && 435 device->state == DASD_STATE_READY && 436 device->target >= DASD_STATE_ONLINE) 437 rc = dasd_state_ready_to_online(device); 438 439 return rc; 440 } 441 442 /* 443 * Device shutdown state changes. 444 */ 445 static int dasd_decrease_state(struct dasd_device *device) 446 { 447 int rc; 448 449 rc = 0; 450 if (device->state == DASD_STATE_ONLINE && 451 device->target <= DASD_STATE_READY) 452 rc = dasd_state_online_to_ready(device); 453 454 if (!rc && 455 device->state == DASD_STATE_READY && 456 device->target <= DASD_STATE_BASIC) 457 rc = dasd_state_ready_to_basic(device); 458 459 if (!rc && 460 device->state == DASD_STATE_UNFMT && 461 device->target <= DASD_STATE_BASIC) 462 rc = dasd_state_unfmt_to_basic(device); 463 464 if (!rc && 465 device->state == DASD_STATE_BASIC && 466 device->target <= DASD_STATE_KNOWN) 467 rc = dasd_state_basic_to_known(device); 468 469 if (!rc && 470 device->state == DASD_STATE_KNOWN && 471 device->target <= DASD_STATE_NEW) 472 rc = dasd_state_known_to_new(device); 473 474 return rc; 475 } 476 477 /* 478 * This is the main startup/shutdown routine. 479 */ 480 static void dasd_change_state(struct dasd_device *device) 481 { 482 int rc; 483 484 if (device->state == device->target) 485 /* Already where we want to go today... */ 486 return; 487 if (device->state < device->target) 488 rc = dasd_increase_state(device); 489 else 490 rc = dasd_decrease_state(device); 491 if (rc == -EAGAIN) 492 return; 493 if (rc) 494 device->target = device->state; 495 496 if (device->state == device->target) 497 wake_up(&dasd_init_waitq); 498 499 /* let user-space know that the device status changed */ 500 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 501 } 502 503 /* 504 * Kick starter for devices that did not complete the startup/shutdown 505 * procedure or were sleeping because of a pending state. 506 * dasd_kick_device will schedule a call do do_kick_device to the kernel 507 * event daemon. 508 */ 509 static void do_kick_device(struct work_struct *work) 510 { 511 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 512 mutex_lock(&device->state_mutex); 513 dasd_change_state(device); 514 mutex_unlock(&device->state_mutex); 515 dasd_schedule_device_bh(device); 516 dasd_put_device(device); 517 } 518 519 void dasd_kick_device(struct dasd_device *device) 520 { 521 dasd_get_device(device); 522 /* queue call to dasd_kick_device to the kernel event daemon. */ 523 schedule_work(&device->kick_work); 524 } 525 526 /* 527 * dasd_reload_device will schedule a call do do_reload_device to the kernel 528 * event daemon. 529 */ 530 static void do_reload_device(struct work_struct *work) 531 { 532 struct dasd_device *device = container_of(work, struct dasd_device, 533 reload_device); 534 device->discipline->reload(device); 535 dasd_put_device(device); 536 } 537 538 void dasd_reload_device(struct dasd_device *device) 539 { 540 dasd_get_device(device); 541 /* queue call to dasd_reload_device to the kernel event daemon. */ 542 schedule_work(&device->reload_device); 543 } 544 EXPORT_SYMBOL(dasd_reload_device); 545 546 /* 547 * dasd_restore_device will schedule a call do do_restore_device to the kernel 548 * event daemon. 549 */ 550 static void do_restore_device(struct work_struct *work) 551 { 552 struct dasd_device *device = container_of(work, struct dasd_device, 553 restore_device); 554 device->cdev->drv->restore(device->cdev); 555 dasd_put_device(device); 556 } 557 558 void dasd_restore_device(struct dasd_device *device) 559 { 560 dasd_get_device(device); 561 /* queue call to dasd_restore_device to the kernel event daemon. */ 562 schedule_work(&device->restore_device); 563 } 564 565 /* 566 * Set the target state for a device and starts the state change. 567 */ 568 void dasd_set_target_state(struct dasd_device *device, int target) 569 { 570 dasd_get_device(device); 571 mutex_lock(&device->state_mutex); 572 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 573 if (dasd_probeonly && target > DASD_STATE_READY) 574 target = DASD_STATE_READY; 575 if (device->target != target) { 576 if (device->state == target) 577 wake_up(&dasd_init_waitq); 578 device->target = target; 579 } 580 if (device->state != device->target) 581 dasd_change_state(device); 582 mutex_unlock(&device->state_mutex); 583 dasd_put_device(device); 584 } 585 586 /* 587 * Enable devices with device numbers in [from..to]. 588 */ 589 static inline int _wait_for_device(struct dasd_device *device) 590 { 591 return (device->state == device->target); 592 } 593 594 void dasd_enable_device(struct dasd_device *device) 595 { 596 dasd_set_target_state(device, DASD_STATE_ONLINE); 597 if (device->state <= DASD_STATE_KNOWN) 598 /* No discipline for device found. */ 599 dasd_set_target_state(device, DASD_STATE_NEW); 600 /* Now wait for the devices to come up. */ 601 wait_event(dasd_init_waitq, _wait_for_device(device)); 602 } 603 604 /* 605 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 606 */ 607 #ifdef CONFIG_DASD_PROFILE 608 609 struct dasd_profile_info_t dasd_global_profile; 610 unsigned int dasd_profile_level = DASD_PROFILE_OFF; 611 612 /* 613 * Increments counter in global and local profiling structures. 614 */ 615 #define dasd_profile_counter(value, counter, block) \ 616 { \ 617 int index; \ 618 for (index = 0; index < 31 && value >> (2+index); index++); \ 619 dasd_global_profile.counter[index]++; \ 620 block->profile.counter[index]++; \ 621 } 622 623 /* 624 * Add profiling information for cqr before execution. 625 */ 626 static void dasd_profile_start(struct dasd_block *block, 627 struct dasd_ccw_req *cqr, 628 struct request *req) 629 { 630 struct list_head *l; 631 unsigned int counter; 632 633 if (dasd_profile_level != DASD_PROFILE_ON) 634 return; 635 636 /* count the length of the chanq for statistics */ 637 counter = 0; 638 list_for_each(l, &block->ccw_queue) 639 if (++counter >= 31) 640 break; 641 dasd_global_profile.dasd_io_nr_req[counter]++; 642 block->profile.dasd_io_nr_req[counter]++; 643 } 644 645 /* 646 * Add profiling information for cqr after execution. 647 */ 648 static void dasd_profile_end(struct dasd_block *block, 649 struct dasd_ccw_req *cqr, 650 struct request *req) 651 { 652 long strtime, irqtime, endtime, tottime; /* in microseconds */ 653 long tottimeps, sectors; 654 655 if (dasd_profile_level != DASD_PROFILE_ON) 656 return; 657 658 sectors = blk_rq_sectors(req); 659 if (!cqr->buildclk || !cqr->startclk || 660 !cqr->stopclk || !cqr->endclk || 661 !sectors) 662 return; 663 664 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 665 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 666 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 667 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 668 tottimeps = tottime / sectors; 669 670 if (!dasd_global_profile.dasd_io_reqs) 671 memset(&dasd_global_profile, 0, 672 sizeof(struct dasd_profile_info_t)); 673 dasd_global_profile.dasd_io_reqs++; 674 dasd_global_profile.dasd_io_sects += sectors; 675 676 if (!block->profile.dasd_io_reqs) 677 memset(&block->profile, 0, 678 sizeof(struct dasd_profile_info_t)); 679 block->profile.dasd_io_reqs++; 680 block->profile.dasd_io_sects += sectors; 681 682 dasd_profile_counter(sectors, dasd_io_secs, block); 683 dasd_profile_counter(tottime, dasd_io_times, block); 684 dasd_profile_counter(tottimeps, dasd_io_timps, block); 685 dasd_profile_counter(strtime, dasd_io_time1, block); 686 dasd_profile_counter(irqtime, dasd_io_time2, block); 687 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); 688 dasd_profile_counter(endtime, dasd_io_time3, block); 689 } 690 #else 691 #define dasd_profile_start(block, cqr, req) do {} while (0) 692 #define dasd_profile_end(block, cqr, req) do {} while (0) 693 #endif /* CONFIG_DASD_PROFILE */ 694 695 /* 696 * Allocate memory for a channel program with 'cplength' channel 697 * command words and 'datasize' additional space. There are two 698 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 699 * memory and 2) dasd_smalloc_request uses the static ccw memory 700 * that gets allocated for each device. 701 */ 702 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, 703 int datasize, 704 struct dasd_device *device) 705 { 706 struct dasd_ccw_req *cqr; 707 708 /* Sanity checks */ 709 BUG_ON(datasize > PAGE_SIZE || 710 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 711 712 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 713 if (cqr == NULL) 714 return ERR_PTR(-ENOMEM); 715 cqr->cpaddr = NULL; 716 if (cplength > 0) { 717 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 718 GFP_ATOMIC | GFP_DMA); 719 if (cqr->cpaddr == NULL) { 720 kfree(cqr); 721 return ERR_PTR(-ENOMEM); 722 } 723 } 724 cqr->data = NULL; 725 if (datasize > 0) { 726 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 727 if (cqr->data == NULL) { 728 kfree(cqr->cpaddr); 729 kfree(cqr); 730 return ERR_PTR(-ENOMEM); 731 } 732 } 733 cqr->magic = magic; 734 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 735 dasd_get_device(device); 736 return cqr; 737 } 738 739 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 740 int datasize, 741 struct dasd_device *device) 742 { 743 unsigned long flags; 744 struct dasd_ccw_req *cqr; 745 char *data; 746 int size; 747 748 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 749 if (cplength > 0) 750 size += cplength * sizeof(struct ccw1); 751 if (datasize > 0) 752 size += datasize; 753 spin_lock_irqsave(&device->mem_lock, flags); 754 cqr = (struct dasd_ccw_req *) 755 dasd_alloc_chunk(&device->ccw_chunks, size); 756 spin_unlock_irqrestore(&device->mem_lock, flags); 757 if (cqr == NULL) 758 return ERR_PTR(-ENOMEM); 759 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 760 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 761 cqr->cpaddr = NULL; 762 if (cplength > 0) { 763 cqr->cpaddr = (struct ccw1 *) data; 764 data += cplength*sizeof(struct ccw1); 765 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 766 } 767 cqr->data = NULL; 768 if (datasize > 0) { 769 cqr->data = data; 770 memset(cqr->data, 0, datasize); 771 } 772 cqr->magic = magic; 773 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 774 dasd_get_device(device); 775 return cqr; 776 } 777 778 /* 779 * Free memory of a channel program. This function needs to free all the 780 * idal lists that might have been created by dasd_set_cda and the 781 * struct dasd_ccw_req itself. 782 */ 783 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 784 { 785 #ifdef CONFIG_64BIT 786 struct ccw1 *ccw; 787 788 /* Clear any idals used for the request. */ 789 ccw = cqr->cpaddr; 790 do { 791 clear_normalized_cda(ccw); 792 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 793 #endif 794 kfree(cqr->cpaddr); 795 kfree(cqr->data); 796 kfree(cqr); 797 dasd_put_device(device); 798 } 799 800 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 801 { 802 unsigned long flags; 803 804 spin_lock_irqsave(&device->mem_lock, flags); 805 dasd_free_chunk(&device->ccw_chunks, cqr); 806 spin_unlock_irqrestore(&device->mem_lock, flags); 807 dasd_put_device(device); 808 } 809 810 /* 811 * Check discipline magic in cqr. 812 */ 813 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 814 { 815 struct dasd_device *device; 816 817 if (cqr == NULL) 818 return -EINVAL; 819 device = cqr->startdev; 820 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 821 DBF_DEV_EVENT(DBF_WARNING, device, 822 " dasd_ccw_req 0x%08x magic doesn't match" 823 " discipline 0x%08x", 824 cqr->magic, 825 *(unsigned int *) device->discipline->name); 826 return -EINVAL; 827 } 828 return 0; 829 } 830 831 /* 832 * Terminate the current i/o and set the request to clear_pending. 833 * Timer keeps device runnig. 834 * ccw_device_clear can fail if the i/o subsystem 835 * is in a bad mood. 836 */ 837 int dasd_term_IO(struct dasd_ccw_req *cqr) 838 { 839 struct dasd_device *device; 840 int retries, rc; 841 char errorstring[ERRORLENGTH]; 842 843 /* Check the cqr */ 844 rc = dasd_check_cqr(cqr); 845 if (rc) 846 return rc; 847 retries = 0; 848 device = (struct dasd_device *) cqr->startdev; 849 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 850 rc = ccw_device_clear(device->cdev, (long) cqr); 851 switch (rc) { 852 case 0: /* termination successful */ 853 cqr->retries--; 854 cqr->status = DASD_CQR_CLEAR_PENDING; 855 cqr->stopclk = get_clock(); 856 cqr->starttime = 0; 857 DBF_DEV_EVENT(DBF_DEBUG, device, 858 "terminate cqr %p successful", 859 cqr); 860 break; 861 case -ENODEV: 862 DBF_DEV_EVENT(DBF_ERR, device, "%s", 863 "device gone, retry"); 864 break; 865 case -EIO: 866 DBF_DEV_EVENT(DBF_ERR, device, "%s", 867 "I/O error, retry"); 868 break; 869 case -EINVAL: 870 case -EBUSY: 871 DBF_DEV_EVENT(DBF_ERR, device, "%s", 872 "device busy, retry later"); 873 break; 874 default: 875 /* internal error 10 - unknown rc*/ 876 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 877 dev_err(&device->cdev->dev, "An error occurred in the " 878 "DASD device driver, reason=%s\n", errorstring); 879 BUG(); 880 break; 881 } 882 retries++; 883 } 884 dasd_schedule_device_bh(device); 885 return rc; 886 } 887 888 /* 889 * Start the i/o. This start_IO can fail if the channel is really busy. 890 * In that case set up a timer to start the request later. 891 */ 892 int dasd_start_IO(struct dasd_ccw_req *cqr) 893 { 894 struct dasd_device *device; 895 int rc; 896 char errorstring[ERRORLENGTH]; 897 898 /* Check the cqr */ 899 rc = dasd_check_cqr(cqr); 900 if (rc) { 901 cqr->intrc = rc; 902 return rc; 903 } 904 device = (struct dasd_device *) cqr->startdev; 905 if (cqr->retries < 0) { 906 /* internal error 14 - start_IO run out of retries */ 907 sprintf(errorstring, "14 %p", cqr); 908 dev_err(&device->cdev->dev, "An error occurred in the DASD " 909 "device driver, reason=%s\n", errorstring); 910 cqr->status = DASD_CQR_ERROR; 911 return -EIO; 912 } 913 cqr->startclk = get_clock(); 914 cqr->starttime = jiffies; 915 cqr->retries--; 916 if (cqr->cpmode == 1) { 917 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 918 (long) cqr, cqr->lpm); 919 } else { 920 rc = ccw_device_start(device->cdev, cqr->cpaddr, 921 (long) cqr, cqr->lpm, 0); 922 } 923 switch (rc) { 924 case 0: 925 cqr->status = DASD_CQR_IN_IO; 926 break; 927 case -EBUSY: 928 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 929 "start_IO: device busy, retry later"); 930 break; 931 case -ETIMEDOUT: 932 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 933 "start_IO: request timeout, retry later"); 934 break; 935 case -EACCES: 936 /* -EACCES indicates that the request used only a 937 * subset of the available pathes and all these 938 * pathes are gone. 939 * Do a retry with all available pathes. 940 */ 941 cqr->lpm = LPM_ANYPATH; 942 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 943 "start_IO: selected pathes gone," 944 " retry on all pathes"); 945 break; 946 case -ENODEV: 947 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 948 "start_IO: -ENODEV device gone, retry"); 949 break; 950 case -EIO: 951 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 952 "start_IO: -EIO device gone, retry"); 953 break; 954 case -EINVAL: 955 /* most likely caused in power management context */ 956 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 957 "start_IO: -EINVAL device currently " 958 "not accessible"); 959 break; 960 default: 961 /* internal error 11 - unknown rc */ 962 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 963 dev_err(&device->cdev->dev, 964 "An error occurred in the DASD device driver, " 965 "reason=%s\n", errorstring); 966 BUG(); 967 break; 968 } 969 cqr->intrc = rc; 970 return rc; 971 } 972 973 /* 974 * Timeout function for dasd devices. This is used for different purposes 975 * 1) missing interrupt handler for normal operation 976 * 2) delayed start of request where start_IO failed with -EBUSY 977 * 3) timeout for missing state change interrupts 978 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 979 * DASD_CQR_QUEUED for 2) and 3). 980 */ 981 static void dasd_device_timeout(unsigned long ptr) 982 { 983 unsigned long flags; 984 struct dasd_device *device; 985 986 device = (struct dasd_device *) ptr; 987 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 988 /* re-activate request queue */ 989 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 990 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 991 dasd_schedule_device_bh(device); 992 } 993 994 /* 995 * Setup timeout for a device in jiffies. 996 */ 997 void dasd_device_set_timer(struct dasd_device *device, int expires) 998 { 999 if (expires == 0) 1000 del_timer(&device->timer); 1001 else 1002 mod_timer(&device->timer, jiffies + expires); 1003 } 1004 1005 /* 1006 * Clear timeout for a device. 1007 */ 1008 void dasd_device_clear_timer(struct dasd_device *device) 1009 { 1010 del_timer(&device->timer); 1011 } 1012 1013 static void dasd_handle_killed_request(struct ccw_device *cdev, 1014 unsigned long intparm) 1015 { 1016 struct dasd_ccw_req *cqr; 1017 struct dasd_device *device; 1018 1019 if (!intparm) 1020 return; 1021 cqr = (struct dasd_ccw_req *) intparm; 1022 if (cqr->status != DASD_CQR_IN_IO) { 1023 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1024 "invalid status in handle_killed_request: " 1025 "%02x", cqr->status); 1026 return; 1027 } 1028 1029 device = dasd_device_from_cdev_locked(cdev); 1030 if (IS_ERR(device)) { 1031 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1032 "unable to get device from cdev"); 1033 return; 1034 } 1035 1036 if (!cqr->startdev || 1037 device != cqr->startdev || 1038 strncmp(cqr->startdev->discipline->ebcname, 1039 (char *) &cqr->magic, 4)) { 1040 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1041 "invalid device in request"); 1042 dasd_put_device(device); 1043 return; 1044 } 1045 1046 /* Schedule request to be retried. */ 1047 cqr->status = DASD_CQR_QUEUED; 1048 1049 dasd_device_clear_timer(device); 1050 dasd_schedule_device_bh(device); 1051 dasd_put_device(device); 1052 } 1053 1054 void dasd_generic_handle_state_change(struct dasd_device *device) 1055 { 1056 /* First of all start sense subsystem status request. */ 1057 dasd_eer_snss(device); 1058 1059 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1060 dasd_schedule_device_bh(device); 1061 if (device->block) 1062 dasd_schedule_block_bh(device->block); 1063 } 1064 1065 /* 1066 * Interrupt handler for "normal" ssch-io based dasd devices. 1067 */ 1068 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1069 struct irb *irb) 1070 { 1071 struct dasd_ccw_req *cqr, *next; 1072 struct dasd_device *device; 1073 unsigned long long now; 1074 int expires; 1075 1076 kstat_cpu(smp_processor_id()).irqs[IOINT_DAS]++; 1077 if (IS_ERR(irb)) { 1078 switch (PTR_ERR(irb)) { 1079 case -EIO: 1080 break; 1081 case -ETIMEDOUT: 1082 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1083 "request timed out\n", __func__); 1084 break; 1085 default: 1086 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1087 "unknown error %ld\n", __func__, 1088 PTR_ERR(irb)); 1089 } 1090 dasd_handle_killed_request(cdev, intparm); 1091 return; 1092 } 1093 1094 now = get_clock(); 1095 1096 /* check for unsolicited interrupts */ 1097 cqr = (struct dasd_ccw_req *) intparm; 1098 if (!cqr || ((scsw_cc(&irb->scsw) == 1) && 1099 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 1100 ((scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND) || 1101 (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND | 1102 SCSW_STCTL_ALERT_STATUS))))) { 1103 if (cqr && cqr->status == DASD_CQR_IN_IO) 1104 cqr->status = DASD_CQR_QUEUED; 1105 if (cqr) 1106 memcpy(&cqr->irb, irb, sizeof(*irb)); 1107 device = dasd_device_from_cdev_locked(cdev); 1108 if (IS_ERR(device)) 1109 return; 1110 /* ignore unsolicited interrupts for DIAG discipline */ 1111 if (device->discipline == dasd_diag_discipline_pointer) { 1112 dasd_put_device(device); 1113 return; 1114 } 1115 device->discipline->dump_sense_dbf(device, irb, 1116 "unsolicited"); 1117 if ((device->features & DASD_FEATURE_ERPLOG)) 1118 device->discipline->dump_sense(device, cqr, 1119 irb); 1120 dasd_device_clear_timer(device); 1121 device->discipline->handle_unsolicited_interrupt(device, 1122 irb); 1123 dasd_put_device(device); 1124 return; 1125 } 1126 1127 device = (struct dasd_device *) cqr->startdev; 1128 if (!device || 1129 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1130 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1131 "invalid device in request"); 1132 return; 1133 } 1134 1135 /* Check for clear pending */ 1136 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1137 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1138 cqr->status = DASD_CQR_CLEARED; 1139 dasd_device_clear_timer(device); 1140 wake_up(&dasd_flush_wq); 1141 dasd_schedule_device_bh(device); 1142 return; 1143 } 1144 1145 /* check status - the request might have been killed by dyn detach */ 1146 if (cqr->status != DASD_CQR_IN_IO) { 1147 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1148 "status %02x", dev_name(&cdev->dev), cqr->status); 1149 return; 1150 } 1151 1152 next = NULL; 1153 expires = 0; 1154 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1155 scsw_cstat(&irb->scsw) == 0) { 1156 /* request was completed successfully */ 1157 cqr->status = DASD_CQR_SUCCESS; 1158 cqr->stopclk = now; 1159 /* Start first request on queue if possible -> fast_io. */ 1160 if (cqr->devlist.next != &device->ccw_queue) { 1161 next = list_entry(cqr->devlist.next, 1162 struct dasd_ccw_req, devlist); 1163 } 1164 } else { /* error */ 1165 memcpy(&cqr->irb, irb, sizeof(struct irb)); 1166 /* log sense for every failed I/O to s390 debugfeature */ 1167 dasd_log_sense_dbf(cqr, irb); 1168 if (device->features & DASD_FEATURE_ERPLOG) { 1169 dasd_log_sense(cqr, irb); 1170 } 1171 1172 /* 1173 * If we don't want complex ERP for this request, then just 1174 * reset this and retry it in the fastpath 1175 */ 1176 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1177 cqr->retries > 0) { 1178 if (cqr->lpm == LPM_ANYPATH) 1179 DBF_DEV_EVENT(DBF_DEBUG, device, 1180 "default ERP in fastpath " 1181 "(%i retries left)", 1182 cqr->retries); 1183 cqr->lpm = LPM_ANYPATH; 1184 cqr->status = DASD_CQR_QUEUED; 1185 next = cqr; 1186 } else 1187 cqr->status = DASD_CQR_ERROR; 1188 } 1189 if (next && (next->status == DASD_CQR_QUEUED) && 1190 (!device->stopped)) { 1191 if (device->discipline->start_IO(next) == 0) 1192 expires = next->expires; 1193 } 1194 if (expires != 0) 1195 dasd_device_set_timer(device, expires); 1196 else 1197 dasd_device_clear_timer(device); 1198 dasd_schedule_device_bh(device); 1199 } 1200 1201 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1202 { 1203 struct dasd_device *device; 1204 1205 device = dasd_device_from_cdev_locked(cdev); 1206 1207 if (IS_ERR(device)) 1208 goto out; 1209 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1210 device->state != device->target || 1211 !device->discipline->handle_unsolicited_interrupt){ 1212 dasd_put_device(device); 1213 goto out; 1214 } 1215 1216 dasd_device_clear_timer(device); 1217 device->discipline->handle_unsolicited_interrupt(device, irb); 1218 dasd_put_device(device); 1219 out: 1220 return UC_TODO_RETRY; 1221 } 1222 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1223 1224 /* 1225 * If we have an error on a dasd_block layer request then we cancel 1226 * and return all further requests from the same dasd_block as well. 1227 */ 1228 static void __dasd_device_recovery(struct dasd_device *device, 1229 struct dasd_ccw_req *ref_cqr) 1230 { 1231 struct list_head *l, *n; 1232 struct dasd_ccw_req *cqr; 1233 1234 /* 1235 * only requeue request that came from the dasd_block layer 1236 */ 1237 if (!ref_cqr->block) 1238 return; 1239 1240 list_for_each_safe(l, n, &device->ccw_queue) { 1241 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1242 if (cqr->status == DASD_CQR_QUEUED && 1243 ref_cqr->block == cqr->block) { 1244 cqr->status = DASD_CQR_CLEARED; 1245 } 1246 } 1247 }; 1248 1249 /* 1250 * Remove those ccw requests from the queue that need to be returned 1251 * to the upper layer. 1252 */ 1253 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1254 struct list_head *final_queue) 1255 { 1256 struct list_head *l, *n; 1257 struct dasd_ccw_req *cqr; 1258 1259 /* Process request with final status. */ 1260 list_for_each_safe(l, n, &device->ccw_queue) { 1261 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1262 1263 /* Stop list processing at the first non-final request. */ 1264 if (cqr->status == DASD_CQR_QUEUED || 1265 cqr->status == DASD_CQR_IN_IO || 1266 cqr->status == DASD_CQR_CLEAR_PENDING) 1267 break; 1268 if (cqr->status == DASD_CQR_ERROR) { 1269 __dasd_device_recovery(device, cqr); 1270 } 1271 /* Rechain finished requests to final queue */ 1272 list_move_tail(&cqr->devlist, final_queue); 1273 } 1274 } 1275 1276 /* 1277 * the cqrs from the final queue are returned to the upper layer 1278 * by setting a dasd_block state and calling the callback function 1279 */ 1280 static void __dasd_device_process_final_queue(struct dasd_device *device, 1281 struct list_head *final_queue) 1282 { 1283 struct list_head *l, *n; 1284 struct dasd_ccw_req *cqr; 1285 struct dasd_block *block; 1286 void (*callback)(struct dasd_ccw_req *, void *data); 1287 void *callback_data; 1288 char errorstring[ERRORLENGTH]; 1289 1290 list_for_each_safe(l, n, final_queue) { 1291 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1292 list_del_init(&cqr->devlist); 1293 block = cqr->block; 1294 callback = cqr->callback; 1295 callback_data = cqr->callback_data; 1296 if (block) 1297 spin_lock_bh(&block->queue_lock); 1298 switch (cqr->status) { 1299 case DASD_CQR_SUCCESS: 1300 cqr->status = DASD_CQR_DONE; 1301 break; 1302 case DASD_CQR_ERROR: 1303 cqr->status = DASD_CQR_NEED_ERP; 1304 break; 1305 case DASD_CQR_CLEARED: 1306 cqr->status = DASD_CQR_TERMINATED; 1307 break; 1308 default: 1309 /* internal error 12 - wrong cqr status*/ 1310 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1311 dev_err(&device->cdev->dev, 1312 "An error occurred in the DASD device driver, " 1313 "reason=%s\n", errorstring); 1314 BUG(); 1315 } 1316 if (cqr->callback != NULL) 1317 (callback)(cqr, callback_data); 1318 if (block) 1319 spin_unlock_bh(&block->queue_lock); 1320 } 1321 } 1322 1323 /* 1324 * Take a look at the first request on the ccw queue and check 1325 * if it reached its expire time. If so, terminate the IO. 1326 */ 1327 static void __dasd_device_check_expire(struct dasd_device *device) 1328 { 1329 struct dasd_ccw_req *cqr; 1330 1331 if (list_empty(&device->ccw_queue)) 1332 return; 1333 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1334 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1335 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1336 if (device->discipline->term_IO(cqr) != 0) { 1337 /* Hmpf, try again in 5 sec */ 1338 dev_err(&device->cdev->dev, 1339 "cqr %p timed out (%lus) but cannot be " 1340 "ended, retrying in 5 s\n", 1341 cqr, (cqr->expires/HZ)); 1342 cqr->expires += 5*HZ; 1343 dasd_device_set_timer(device, 5*HZ); 1344 } else { 1345 dev_err(&device->cdev->dev, 1346 "cqr %p timed out (%lus), %i retries " 1347 "remaining\n", cqr, (cqr->expires/HZ), 1348 cqr->retries); 1349 } 1350 } 1351 } 1352 1353 /* 1354 * Take a look at the first request on the ccw queue and check 1355 * if it needs to be started. 1356 */ 1357 static void __dasd_device_start_head(struct dasd_device *device) 1358 { 1359 struct dasd_ccw_req *cqr; 1360 int rc; 1361 1362 if (list_empty(&device->ccw_queue)) 1363 return; 1364 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1365 if (cqr->status != DASD_CQR_QUEUED) 1366 return; 1367 /* when device is stopped, return request to previous layer */ 1368 if (device->stopped) { 1369 cqr->status = DASD_CQR_CLEARED; 1370 dasd_schedule_device_bh(device); 1371 return; 1372 } 1373 1374 rc = device->discipline->start_IO(cqr); 1375 if (rc == 0) 1376 dasd_device_set_timer(device, cqr->expires); 1377 else if (rc == -EACCES) { 1378 dasd_schedule_device_bh(device); 1379 } else 1380 /* Hmpf, try again in 1/2 sec */ 1381 dasd_device_set_timer(device, 50); 1382 } 1383 1384 /* 1385 * Go through all request on the dasd_device request queue, 1386 * terminate them on the cdev if necessary, and return them to the 1387 * submitting layer via callback. 1388 * Note: 1389 * Make sure that all 'submitting layers' still exist when 1390 * this function is called!. In other words, when 'device' is a base 1391 * device then all block layer requests must have been removed before 1392 * via dasd_flush_block_queue. 1393 */ 1394 int dasd_flush_device_queue(struct dasd_device *device) 1395 { 1396 struct dasd_ccw_req *cqr, *n; 1397 int rc; 1398 struct list_head flush_queue; 1399 1400 INIT_LIST_HEAD(&flush_queue); 1401 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1402 rc = 0; 1403 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1404 /* Check status and move request to flush_queue */ 1405 switch (cqr->status) { 1406 case DASD_CQR_IN_IO: 1407 rc = device->discipline->term_IO(cqr); 1408 if (rc) { 1409 /* unable to terminate requeust */ 1410 dev_err(&device->cdev->dev, 1411 "Flushing the DASD request queue " 1412 "failed for request %p\n", cqr); 1413 /* stop flush processing */ 1414 goto finished; 1415 } 1416 break; 1417 case DASD_CQR_QUEUED: 1418 cqr->stopclk = get_clock(); 1419 cqr->status = DASD_CQR_CLEARED; 1420 break; 1421 default: /* no need to modify the others */ 1422 break; 1423 } 1424 list_move_tail(&cqr->devlist, &flush_queue); 1425 } 1426 finished: 1427 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1428 /* 1429 * After this point all requests must be in state CLEAR_PENDING, 1430 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1431 * one of the others. 1432 */ 1433 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1434 wait_event(dasd_flush_wq, 1435 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1436 /* 1437 * Now set each request back to TERMINATED, DONE or NEED_ERP 1438 * and call the callback function of flushed requests 1439 */ 1440 __dasd_device_process_final_queue(device, &flush_queue); 1441 return rc; 1442 } 1443 1444 /* 1445 * Acquire the device lock and process queues for the device. 1446 */ 1447 static void dasd_device_tasklet(struct dasd_device *device) 1448 { 1449 struct list_head final_queue; 1450 1451 atomic_set (&device->tasklet_scheduled, 0); 1452 INIT_LIST_HEAD(&final_queue); 1453 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1454 /* Check expire time of first request on the ccw queue. */ 1455 __dasd_device_check_expire(device); 1456 /* find final requests on ccw queue */ 1457 __dasd_device_process_ccw_queue(device, &final_queue); 1458 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1459 /* Now call the callback function of requests with final status */ 1460 __dasd_device_process_final_queue(device, &final_queue); 1461 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1462 /* Now check if the head of the ccw queue needs to be started. */ 1463 __dasd_device_start_head(device); 1464 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1465 dasd_put_device(device); 1466 } 1467 1468 /* 1469 * Schedules a call to dasd_tasklet over the device tasklet. 1470 */ 1471 void dasd_schedule_device_bh(struct dasd_device *device) 1472 { 1473 /* Protect against rescheduling. */ 1474 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1475 return; 1476 dasd_get_device(device); 1477 tasklet_hi_schedule(&device->tasklet); 1478 } 1479 1480 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 1481 { 1482 device->stopped |= bits; 1483 } 1484 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 1485 1486 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 1487 { 1488 device->stopped &= ~bits; 1489 if (!device->stopped) 1490 wake_up(&generic_waitq); 1491 } 1492 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 1493 1494 /* 1495 * Queue a request to the head of the device ccw_queue. 1496 * Start the I/O if possible. 1497 */ 1498 void dasd_add_request_head(struct dasd_ccw_req *cqr) 1499 { 1500 struct dasd_device *device; 1501 unsigned long flags; 1502 1503 device = cqr->startdev; 1504 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1505 cqr->status = DASD_CQR_QUEUED; 1506 list_add(&cqr->devlist, &device->ccw_queue); 1507 /* let the bh start the request to keep them in order */ 1508 dasd_schedule_device_bh(device); 1509 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1510 } 1511 1512 /* 1513 * Queue a request to the tail of the device ccw_queue. 1514 * Start the I/O if possible. 1515 */ 1516 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 1517 { 1518 struct dasd_device *device; 1519 unsigned long flags; 1520 1521 device = cqr->startdev; 1522 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1523 cqr->status = DASD_CQR_QUEUED; 1524 list_add_tail(&cqr->devlist, &device->ccw_queue); 1525 /* let the bh start the request to keep them in order */ 1526 dasd_schedule_device_bh(device); 1527 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1528 } 1529 1530 /* 1531 * Wakeup helper for the 'sleep_on' functions. 1532 */ 1533 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1534 { 1535 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 1536 cqr->callback_data = DASD_SLEEPON_END_TAG; 1537 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 1538 wake_up(&generic_waitq); 1539 } 1540 1541 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 1542 { 1543 struct dasd_device *device; 1544 int rc; 1545 1546 device = cqr->startdev; 1547 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1548 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 1549 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1550 return rc; 1551 } 1552 1553 /* 1554 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 1555 */ 1556 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 1557 { 1558 struct dasd_device *device; 1559 dasd_erp_fn_t erp_fn; 1560 1561 if (cqr->status == DASD_CQR_FILLED) 1562 return 0; 1563 device = cqr->startdev; 1564 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 1565 if (cqr->status == DASD_CQR_TERMINATED) { 1566 device->discipline->handle_terminated_request(cqr); 1567 return 1; 1568 } 1569 if (cqr->status == DASD_CQR_NEED_ERP) { 1570 erp_fn = device->discipline->erp_action(cqr); 1571 erp_fn(cqr); 1572 return 1; 1573 } 1574 if (cqr->status == DASD_CQR_FAILED) 1575 dasd_log_sense(cqr, &cqr->irb); 1576 if (cqr->refers) { 1577 __dasd_process_erp(device, cqr); 1578 return 1; 1579 } 1580 } 1581 return 0; 1582 } 1583 1584 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 1585 { 1586 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 1587 if (cqr->refers) /* erp is not done yet */ 1588 return 1; 1589 return ((cqr->status != DASD_CQR_DONE) && 1590 (cqr->status != DASD_CQR_FAILED)); 1591 } else 1592 return (cqr->status == DASD_CQR_FILLED); 1593 } 1594 1595 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 1596 { 1597 struct dasd_device *device; 1598 int rc; 1599 struct list_head ccw_queue; 1600 struct dasd_ccw_req *cqr; 1601 1602 INIT_LIST_HEAD(&ccw_queue); 1603 maincqr->status = DASD_CQR_FILLED; 1604 device = maincqr->startdev; 1605 list_add(&maincqr->blocklist, &ccw_queue); 1606 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 1607 cqr = list_first_entry(&ccw_queue, 1608 struct dasd_ccw_req, blocklist)) { 1609 1610 if (__dasd_sleep_on_erp(cqr)) 1611 continue; 1612 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 1613 continue; 1614 1615 /* Non-temporary stop condition will trigger fail fast */ 1616 if (device->stopped & ~DASD_STOPPED_PENDING && 1617 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1618 (!dasd_eer_enabled(device))) { 1619 cqr->status = DASD_CQR_FAILED; 1620 continue; 1621 } 1622 1623 /* Don't try to start requests if device is stopped */ 1624 if (interruptible) { 1625 rc = wait_event_interruptible( 1626 generic_waitq, !(device->stopped)); 1627 if (rc == -ERESTARTSYS) { 1628 cqr->status = DASD_CQR_FAILED; 1629 maincqr->intrc = rc; 1630 continue; 1631 } 1632 } else 1633 wait_event(generic_waitq, !(device->stopped)); 1634 1635 cqr->callback = dasd_wakeup_cb; 1636 cqr->callback_data = DASD_SLEEPON_START_TAG; 1637 dasd_add_request_tail(cqr); 1638 if (interruptible) { 1639 rc = wait_event_interruptible( 1640 generic_waitq, _wait_for_wakeup(cqr)); 1641 if (rc == -ERESTARTSYS) { 1642 dasd_cancel_req(cqr); 1643 /* wait (non-interruptible) for final status */ 1644 wait_event(generic_waitq, 1645 _wait_for_wakeup(cqr)); 1646 cqr->status = DASD_CQR_FAILED; 1647 maincqr->intrc = rc; 1648 continue; 1649 } 1650 } else 1651 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1652 } 1653 1654 maincqr->endclk = get_clock(); 1655 if ((maincqr->status != DASD_CQR_DONE) && 1656 (maincqr->intrc != -ERESTARTSYS)) 1657 dasd_log_sense(maincqr, &maincqr->irb); 1658 if (maincqr->status == DASD_CQR_DONE) 1659 rc = 0; 1660 else if (maincqr->intrc) 1661 rc = maincqr->intrc; 1662 else 1663 rc = -EIO; 1664 return rc; 1665 } 1666 1667 /* 1668 * Queue a request to the tail of the device ccw_queue and wait for 1669 * it's completion. 1670 */ 1671 int dasd_sleep_on(struct dasd_ccw_req *cqr) 1672 { 1673 return _dasd_sleep_on(cqr, 0); 1674 } 1675 1676 /* 1677 * Queue a request to the tail of the device ccw_queue and wait 1678 * interruptible for it's completion. 1679 */ 1680 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1681 { 1682 return _dasd_sleep_on(cqr, 1); 1683 } 1684 1685 /* 1686 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1687 * for eckd devices) the currently running request has to be terminated 1688 * and be put back to status queued, before the special request is added 1689 * to the head of the queue. Then the special request is waited on normally. 1690 */ 1691 static inline int _dasd_term_running_cqr(struct dasd_device *device) 1692 { 1693 struct dasd_ccw_req *cqr; 1694 1695 if (list_empty(&device->ccw_queue)) 1696 return 0; 1697 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1698 return device->discipline->term_IO(cqr); 1699 } 1700 1701 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1702 { 1703 struct dasd_device *device; 1704 int rc; 1705 1706 device = cqr->startdev; 1707 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1708 rc = _dasd_term_running_cqr(device); 1709 if (rc) { 1710 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1711 return rc; 1712 } 1713 1714 cqr->callback = dasd_wakeup_cb; 1715 cqr->callback_data = DASD_SLEEPON_START_TAG; 1716 cqr->status = DASD_CQR_QUEUED; 1717 list_add(&cqr->devlist, &device->ccw_queue); 1718 1719 /* let the bh start the request to keep them in order */ 1720 dasd_schedule_device_bh(device); 1721 1722 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1723 1724 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1725 1726 if (cqr->status == DASD_CQR_DONE) 1727 rc = 0; 1728 else if (cqr->intrc) 1729 rc = cqr->intrc; 1730 else 1731 rc = -EIO; 1732 return rc; 1733 } 1734 1735 /* 1736 * Cancels a request that was started with dasd_sleep_on_req. 1737 * This is useful to timeout requests. The request will be 1738 * terminated if it is currently in i/o. 1739 * Returns 1 if the request has been terminated. 1740 * 0 if there was no need to terminate the request (not started yet) 1741 * negative error code if termination failed 1742 * Cancellation of a request is an asynchronous operation! The calling 1743 * function has to wait until the request is properly returned via callback. 1744 */ 1745 int dasd_cancel_req(struct dasd_ccw_req *cqr) 1746 { 1747 struct dasd_device *device = cqr->startdev; 1748 unsigned long flags; 1749 int rc; 1750 1751 rc = 0; 1752 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1753 switch (cqr->status) { 1754 case DASD_CQR_QUEUED: 1755 /* request was not started - just set to cleared */ 1756 cqr->status = DASD_CQR_CLEARED; 1757 break; 1758 case DASD_CQR_IN_IO: 1759 /* request in IO - terminate IO and release again */ 1760 rc = device->discipline->term_IO(cqr); 1761 if (rc) { 1762 dev_err(&device->cdev->dev, 1763 "Cancelling request %p failed with rc=%d\n", 1764 cqr, rc); 1765 } else { 1766 cqr->stopclk = get_clock(); 1767 } 1768 break; 1769 default: /* already finished or clear pending - do nothing */ 1770 break; 1771 } 1772 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1773 dasd_schedule_device_bh(device); 1774 return rc; 1775 } 1776 1777 1778 /* 1779 * SECTION: Operations of the dasd_block layer. 1780 */ 1781 1782 /* 1783 * Timeout function for dasd_block. This is used when the block layer 1784 * is waiting for something that may not come reliably, (e.g. a state 1785 * change interrupt) 1786 */ 1787 static void dasd_block_timeout(unsigned long ptr) 1788 { 1789 unsigned long flags; 1790 struct dasd_block *block; 1791 1792 block = (struct dasd_block *) ptr; 1793 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1794 /* re-activate request queue */ 1795 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 1796 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1797 dasd_schedule_block_bh(block); 1798 } 1799 1800 /* 1801 * Setup timeout for a dasd_block in jiffies. 1802 */ 1803 void dasd_block_set_timer(struct dasd_block *block, int expires) 1804 { 1805 if (expires == 0) 1806 del_timer(&block->timer); 1807 else 1808 mod_timer(&block->timer, jiffies + expires); 1809 } 1810 1811 /* 1812 * Clear timeout for a dasd_block. 1813 */ 1814 void dasd_block_clear_timer(struct dasd_block *block) 1815 { 1816 del_timer(&block->timer); 1817 } 1818 1819 /* 1820 * Process finished error recovery ccw. 1821 */ 1822 static void __dasd_process_erp(struct dasd_device *device, 1823 struct dasd_ccw_req *cqr) 1824 { 1825 dasd_erp_fn_t erp_fn; 1826 1827 if (cqr->status == DASD_CQR_DONE) 1828 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1829 else 1830 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 1831 erp_fn = device->discipline->erp_postaction(cqr); 1832 erp_fn(cqr); 1833 } 1834 1835 /* 1836 * Fetch requests from the block device queue. 1837 */ 1838 static void __dasd_process_request_queue(struct dasd_block *block) 1839 { 1840 struct request_queue *queue; 1841 struct request *req; 1842 struct dasd_ccw_req *cqr; 1843 struct dasd_device *basedev; 1844 unsigned long flags; 1845 queue = block->request_queue; 1846 basedev = block->base; 1847 /* No queue ? Then there is nothing to do. */ 1848 if (queue == NULL) 1849 return; 1850 1851 /* 1852 * We requeue request from the block device queue to the ccw 1853 * queue only in two states. In state DASD_STATE_READY the 1854 * partition detection is done and we need to requeue requests 1855 * for that. State DASD_STATE_ONLINE is normal block device 1856 * operation. 1857 */ 1858 if (basedev->state < DASD_STATE_READY) { 1859 while ((req = blk_fetch_request(block->request_queue))) 1860 __blk_end_request_all(req, -EIO); 1861 return; 1862 } 1863 /* Now we try to fetch requests from the request queue */ 1864 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { 1865 if (basedev->features & DASD_FEATURE_READONLY && 1866 rq_data_dir(req) == WRITE) { 1867 DBF_DEV_EVENT(DBF_ERR, basedev, 1868 "Rejecting write request %p", 1869 req); 1870 blk_start_request(req); 1871 __blk_end_request_all(req, -EIO); 1872 continue; 1873 } 1874 cqr = basedev->discipline->build_cp(basedev, block, req); 1875 if (IS_ERR(cqr)) { 1876 if (PTR_ERR(cqr) == -EBUSY) 1877 break; /* normal end condition */ 1878 if (PTR_ERR(cqr) == -ENOMEM) 1879 break; /* terminate request queue loop */ 1880 if (PTR_ERR(cqr) == -EAGAIN) { 1881 /* 1882 * The current request cannot be build right 1883 * now, we have to try later. If this request 1884 * is the head-of-queue we stop the device 1885 * for 1/2 second. 1886 */ 1887 if (!list_empty(&block->ccw_queue)) 1888 break; 1889 spin_lock_irqsave( 1890 get_ccwdev_lock(basedev->cdev), flags); 1891 dasd_device_set_stop_bits(basedev, 1892 DASD_STOPPED_PENDING); 1893 spin_unlock_irqrestore( 1894 get_ccwdev_lock(basedev->cdev), flags); 1895 dasd_block_set_timer(block, HZ/2); 1896 break; 1897 } 1898 DBF_DEV_EVENT(DBF_ERR, basedev, 1899 "CCW creation failed (rc=%ld) " 1900 "on request %p", 1901 PTR_ERR(cqr), req); 1902 blk_start_request(req); 1903 __blk_end_request_all(req, -EIO); 1904 continue; 1905 } 1906 /* 1907 * Note: callback is set to dasd_return_cqr_cb in 1908 * __dasd_block_start_head to cover erp requests as well 1909 */ 1910 cqr->callback_data = (void *) req; 1911 cqr->status = DASD_CQR_FILLED; 1912 blk_start_request(req); 1913 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1914 dasd_profile_start(block, cqr, req); 1915 } 1916 } 1917 1918 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 1919 { 1920 struct request *req; 1921 int status; 1922 int error = 0; 1923 1924 req = (struct request *) cqr->callback_data; 1925 dasd_profile_end(cqr->block, cqr, req); 1926 status = cqr->block->base->discipline->free_cp(cqr, req); 1927 if (status <= 0) 1928 error = status ? status : -EIO; 1929 __blk_end_request_all(req, error); 1930 } 1931 1932 /* 1933 * Process ccw request queue. 1934 */ 1935 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 1936 struct list_head *final_queue) 1937 { 1938 struct list_head *l, *n; 1939 struct dasd_ccw_req *cqr; 1940 dasd_erp_fn_t erp_fn; 1941 unsigned long flags; 1942 struct dasd_device *base = block->base; 1943 1944 restart: 1945 /* Process request with final status. */ 1946 list_for_each_safe(l, n, &block->ccw_queue) { 1947 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1948 if (cqr->status != DASD_CQR_DONE && 1949 cqr->status != DASD_CQR_FAILED && 1950 cqr->status != DASD_CQR_NEED_ERP && 1951 cqr->status != DASD_CQR_TERMINATED) 1952 continue; 1953 1954 if (cqr->status == DASD_CQR_TERMINATED) { 1955 base->discipline->handle_terminated_request(cqr); 1956 goto restart; 1957 } 1958 1959 /* Process requests that may be recovered */ 1960 if (cqr->status == DASD_CQR_NEED_ERP) { 1961 erp_fn = base->discipline->erp_action(cqr); 1962 if (IS_ERR(erp_fn(cqr))) 1963 continue; 1964 goto restart; 1965 } 1966 1967 /* log sense for fatal error */ 1968 if (cqr->status == DASD_CQR_FAILED) { 1969 dasd_log_sense(cqr, &cqr->irb); 1970 } 1971 1972 /* First of all call extended error reporting. */ 1973 if (dasd_eer_enabled(base) && 1974 cqr->status == DASD_CQR_FAILED) { 1975 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 1976 1977 /* restart request */ 1978 cqr->status = DASD_CQR_FILLED; 1979 cqr->retries = 255; 1980 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1981 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 1982 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1983 flags); 1984 goto restart; 1985 } 1986 1987 /* Process finished ERP request. */ 1988 if (cqr->refers) { 1989 __dasd_process_erp(base, cqr); 1990 goto restart; 1991 } 1992 1993 /* Rechain finished requests to final queue */ 1994 cqr->endclk = get_clock(); 1995 list_move_tail(&cqr->blocklist, final_queue); 1996 } 1997 } 1998 1999 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2000 { 2001 dasd_schedule_block_bh(cqr->block); 2002 } 2003 2004 static void __dasd_block_start_head(struct dasd_block *block) 2005 { 2006 struct dasd_ccw_req *cqr; 2007 2008 if (list_empty(&block->ccw_queue)) 2009 return; 2010 /* We allways begin with the first requests on the queue, as some 2011 * of previously started requests have to be enqueued on a 2012 * dasd_device again for error recovery. 2013 */ 2014 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2015 if (cqr->status != DASD_CQR_FILLED) 2016 continue; 2017 /* Non-temporary stop condition will trigger fail fast */ 2018 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2019 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2020 (!dasd_eer_enabled(block->base))) { 2021 cqr->status = DASD_CQR_FAILED; 2022 dasd_schedule_block_bh(block); 2023 continue; 2024 } 2025 /* Don't try to start requests if device is stopped */ 2026 if (block->base->stopped) 2027 return; 2028 2029 /* just a fail safe check, should not happen */ 2030 if (!cqr->startdev) 2031 cqr->startdev = block->base; 2032 2033 /* make sure that the requests we submit find their way back */ 2034 cqr->callback = dasd_return_cqr_cb; 2035 2036 dasd_add_request_tail(cqr); 2037 } 2038 } 2039 2040 /* 2041 * Central dasd_block layer routine. Takes requests from the generic 2042 * block layer request queue, creates ccw requests, enqueues them on 2043 * a dasd_device and processes ccw requests that have been returned. 2044 */ 2045 static void dasd_block_tasklet(struct dasd_block *block) 2046 { 2047 struct list_head final_queue; 2048 struct list_head *l, *n; 2049 struct dasd_ccw_req *cqr; 2050 2051 atomic_set(&block->tasklet_scheduled, 0); 2052 INIT_LIST_HEAD(&final_queue); 2053 spin_lock(&block->queue_lock); 2054 /* Finish off requests on ccw queue */ 2055 __dasd_process_block_ccw_queue(block, &final_queue); 2056 spin_unlock(&block->queue_lock); 2057 /* Now call the callback function of requests with final status */ 2058 spin_lock_irq(&block->request_queue_lock); 2059 list_for_each_safe(l, n, &final_queue) { 2060 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2061 list_del_init(&cqr->blocklist); 2062 __dasd_cleanup_cqr(cqr); 2063 } 2064 spin_lock(&block->queue_lock); 2065 /* Get new request from the block device request queue */ 2066 __dasd_process_request_queue(block); 2067 /* Now check if the head of the ccw queue needs to be started. */ 2068 __dasd_block_start_head(block); 2069 spin_unlock(&block->queue_lock); 2070 spin_unlock_irq(&block->request_queue_lock); 2071 dasd_put_device(block->base); 2072 } 2073 2074 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2075 { 2076 wake_up(&dasd_flush_wq); 2077 } 2078 2079 /* 2080 * Go through all request on the dasd_block request queue, cancel them 2081 * on the respective dasd_device, and return them to the generic 2082 * block layer. 2083 */ 2084 static int dasd_flush_block_queue(struct dasd_block *block) 2085 { 2086 struct dasd_ccw_req *cqr, *n; 2087 int rc, i; 2088 struct list_head flush_queue; 2089 2090 INIT_LIST_HEAD(&flush_queue); 2091 spin_lock_bh(&block->queue_lock); 2092 rc = 0; 2093 restart: 2094 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2095 /* if this request currently owned by a dasd_device cancel it */ 2096 if (cqr->status >= DASD_CQR_QUEUED) 2097 rc = dasd_cancel_req(cqr); 2098 if (rc < 0) 2099 break; 2100 /* Rechain request (including erp chain) so it won't be 2101 * touched by the dasd_block_tasklet anymore. 2102 * Replace the callback so we notice when the request 2103 * is returned from the dasd_device layer. 2104 */ 2105 cqr->callback = _dasd_wake_block_flush_cb; 2106 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2107 list_move_tail(&cqr->blocklist, &flush_queue); 2108 if (i > 1) 2109 /* moved more than one request - need to restart */ 2110 goto restart; 2111 } 2112 spin_unlock_bh(&block->queue_lock); 2113 /* Now call the callback function of flushed requests */ 2114 restart_cb: 2115 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2116 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2117 /* Process finished ERP request. */ 2118 if (cqr->refers) { 2119 spin_lock_bh(&block->queue_lock); 2120 __dasd_process_erp(block->base, cqr); 2121 spin_unlock_bh(&block->queue_lock); 2122 /* restart list_for_xx loop since dasd_process_erp 2123 * might remove multiple elements */ 2124 goto restart_cb; 2125 } 2126 /* call the callback function */ 2127 spin_lock_irq(&block->request_queue_lock); 2128 cqr->endclk = get_clock(); 2129 list_del_init(&cqr->blocklist); 2130 __dasd_cleanup_cqr(cqr); 2131 spin_unlock_irq(&block->request_queue_lock); 2132 } 2133 return rc; 2134 } 2135 2136 /* 2137 * Schedules a call to dasd_tasklet over the device tasklet. 2138 */ 2139 void dasd_schedule_block_bh(struct dasd_block *block) 2140 { 2141 /* Protect against rescheduling. */ 2142 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 2143 return; 2144 /* life cycle of block is bound to it's base device */ 2145 dasd_get_device(block->base); 2146 tasklet_hi_schedule(&block->tasklet); 2147 } 2148 2149 2150 /* 2151 * SECTION: external block device operations 2152 * (request queue handling, open, release, etc.) 2153 */ 2154 2155 /* 2156 * Dasd request queue function. Called from ll_rw_blk.c 2157 */ 2158 static void do_dasd_request(struct request_queue *queue) 2159 { 2160 struct dasd_block *block; 2161 2162 block = queue->queuedata; 2163 spin_lock(&block->queue_lock); 2164 /* Get new request from the block device request queue */ 2165 __dasd_process_request_queue(block); 2166 /* Now check if the head of the ccw queue needs to be started. */ 2167 __dasd_block_start_head(block); 2168 spin_unlock(&block->queue_lock); 2169 } 2170 2171 /* 2172 * Allocate and initialize request queue and default I/O scheduler. 2173 */ 2174 static int dasd_alloc_queue(struct dasd_block *block) 2175 { 2176 int rc; 2177 2178 block->request_queue = blk_init_queue(do_dasd_request, 2179 &block->request_queue_lock); 2180 if (block->request_queue == NULL) 2181 return -ENOMEM; 2182 2183 block->request_queue->queuedata = block; 2184 2185 elevator_exit(block->request_queue->elevator); 2186 block->request_queue->elevator = NULL; 2187 rc = elevator_init(block->request_queue, "deadline"); 2188 if (rc) { 2189 blk_cleanup_queue(block->request_queue); 2190 return rc; 2191 } 2192 return 0; 2193 } 2194 2195 /* 2196 * Allocate and initialize request queue. 2197 */ 2198 static void dasd_setup_queue(struct dasd_block *block) 2199 { 2200 int max; 2201 2202 blk_queue_logical_block_size(block->request_queue, block->bp_block); 2203 max = block->base->discipline->max_blocks << block->s2b_shift; 2204 blk_queue_max_hw_sectors(block->request_queue, max); 2205 blk_queue_max_segments(block->request_queue, -1L); 2206 /* with page sized segments we can translate each segement into 2207 * one idaw/tidaw 2208 */ 2209 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 2210 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 2211 } 2212 2213 /* 2214 * Deactivate and free request queue. 2215 */ 2216 static void dasd_free_queue(struct dasd_block *block) 2217 { 2218 if (block->request_queue) { 2219 blk_cleanup_queue(block->request_queue); 2220 block->request_queue = NULL; 2221 } 2222 } 2223 2224 /* 2225 * Flush request on the request queue. 2226 */ 2227 static void dasd_flush_request_queue(struct dasd_block *block) 2228 { 2229 struct request *req; 2230 2231 if (!block->request_queue) 2232 return; 2233 2234 spin_lock_irq(&block->request_queue_lock); 2235 while ((req = blk_fetch_request(block->request_queue))) 2236 __blk_end_request_all(req, -EIO); 2237 spin_unlock_irq(&block->request_queue_lock); 2238 } 2239 2240 static int dasd_open(struct block_device *bdev, fmode_t mode) 2241 { 2242 struct dasd_block *block = bdev->bd_disk->private_data; 2243 struct dasd_device *base; 2244 int rc; 2245 2246 if (!block) 2247 return -ENODEV; 2248 2249 base = block->base; 2250 atomic_inc(&block->open_count); 2251 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2252 rc = -ENODEV; 2253 goto unlock; 2254 } 2255 2256 if (!try_module_get(base->discipline->owner)) { 2257 rc = -EINVAL; 2258 goto unlock; 2259 } 2260 2261 if (dasd_probeonly) { 2262 dev_info(&base->cdev->dev, 2263 "Accessing the DASD failed because it is in " 2264 "probeonly mode\n"); 2265 rc = -EPERM; 2266 goto out; 2267 } 2268 2269 if (base->state <= DASD_STATE_BASIC) { 2270 DBF_DEV_EVENT(DBF_ERR, base, " %s", 2271 " Cannot open unrecognized device"); 2272 rc = -ENODEV; 2273 goto out; 2274 } 2275 2276 if ((mode & FMODE_WRITE) && 2277 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 2278 (base->features & DASD_FEATURE_READONLY))) { 2279 rc = -EROFS; 2280 goto out; 2281 } 2282 2283 return 0; 2284 2285 out: 2286 module_put(base->discipline->owner); 2287 unlock: 2288 atomic_dec(&block->open_count); 2289 return rc; 2290 } 2291 2292 static int dasd_release(struct gendisk *disk, fmode_t mode) 2293 { 2294 struct dasd_block *block = disk->private_data; 2295 2296 atomic_dec(&block->open_count); 2297 module_put(block->base->discipline->owner); 2298 return 0; 2299 } 2300 2301 /* 2302 * Return disk geometry. 2303 */ 2304 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 2305 { 2306 struct dasd_block *block; 2307 struct dasd_device *base; 2308 2309 block = bdev->bd_disk->private_data; 2310 if (!block) 2311 return -ENODEV; 2312 base = block->base; 2313 2314 if (!base->discipline || 2315 !base->discipline->fill_geometry) 2316 return -EINVAL; 2317 2318 base->discipline->fill_geometry(block, geo); 2319 geo->start = get_start_sect(bdev) >> block->s2b_shift; 2320 return 0; 2321 } 2322 2323 const struct block_device_operations 2324 dasd_device_operations = { 2325 .owner = THIS_MODULE, 2326 .open = dasd_open, 2327 .release = dasd_release, 2328 .ioctl = dasd_ioctl, 2329 .compat_ioctl = dasd_ioctl, 2330 .getgeo = dasd_getgeo, 2331 }; 2332 2333 /******************************************************************************* 2334 * end of block device operations 2335 */ 2336 2337 static void 2338 dasd_exit(void) 2339 { 2340 #ifdef CONFIG_PROC_FS 2341 dasd_proc_exit(); 2342 #endif 2343 dasd_eer_exit(); 2344 if (dasd_page_cache != NULL) { 2345 kmem_cache_destroy(dasd_page_cache); 2346 dasd_page_cache = NULL; 2347 } 2348 dasd_gendisk_exit(); 2349 dasd_devmap_exit(); 2350 if (dasd_debug_area != NULL) { 2351 debug_unregister(dasd_debug_area); 2352 dasd_debug_area = NULL; 2353 } 2354 } 2355 2356 /* 2357 * SECTION: common functions for ccw_driver use 2358 */ 2359 2360 /* 2361 * Is the device read-only? 2362 * Note that this function does not report the setting of the 2363 * readonly device attribute, but how it is configured in z/VM. 2364 */ 2365 int dasd_device_is_ro(struct dasd_device *device) 2366 { 2367 struct ccw_dev_id dev_id; 2368 struct diag210 diag_data; 2369 int rc; 2370 2371 if (!MACHINE_IS_VM) 2372 return 0; 2373 ccw_device_get_id(device->cdev, &dev_id); 2374 memset(&diag_data, 0, sizeof(diag_data)); 2375 diag_data.vrdcdvno = dev_id.devno; 2376 diag_data.vrdclen = sizeof(diag_data); 2377 rc = diag210(&diag_data); 2378 if (rc == 0 || rc == 2) { 2379 return diag_data.vrdcvfla & 0x80; 2380 } else { 2381 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 2382 dev_id.devno, rc); 2383 return 0; 2384 } 2385 } 2386 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 2387 2388 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 2389 { 2390 struct ccw_device *cdev = data; 2391 int ret; 2392 2393 ret = ccw_device_set_online(cdev); 2394 if (ret) 2395 pr_warning("%s: Setting the DASD online failed with rc=%d\n", 2396 dev_name(&cdev->dev), ret); 2397 } 2398 2399 /* 2400 * Initial attempt at a probe function. this can be simplified once 2401 * the other detection code is gone. 2402 */ 2403 int dasd_generic_probe(struct ccw_device *cdev, 2404 struct dasd_discipline *discipline) 2405 { 2406 int ret; 2407 2408 ret = dasd_add_sysfs_files(cdev); 2409 if (ret) { 2410 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 2411 "dasd_generic_probe: could not add " 2412 "sysfs entries"); 2413 return ret; 2414 } 2415 cdev->handler = &dasd_int_handler; 2416 2417 /* 2418 * Automatically online either all dasd devices (dasd_autodetect) 2419 * or all devices specified with dasd= parameters during 2420 * initial probe. 2421 */ 2422 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 2423 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 2424 async_schedule(dasd_generic_auto_online, cdev); 2425 return 0; 2426 } 2427 2428 /* 2429 * This will one day be called from a global not_oper handler. 2430 * It is also used by driver_unregister during module unload. 2431 */ 2432 void dasd_generic_remove(struct ccw_device *cdev) 2433 { 2434 struct dasd_device *device; 2435 struct dasd_block *block; 2436 2437 cdev->handler = NULL; 2438 2439 dasd_remove_sysfs_files(cdev); 2440 device = dasd_device_from_cdev(cdev); 2441 if (IS_ERR(device)) 2442 return; 2443 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2444 /* Already doing offline processing */ 2445 dasd_put_device(device); 2446 return; 2447 } 2448 /* 2449 * This device is removed unconditionally. Set offline 2450 * flag to prevent dasd_open from opening it while it is 2451 * no quite down yet. 2452 */ 2453 dasd_set_target_state(device, DASD_STATE_NEW); 2454 /* dasd_delete_device destroys the device reference. */ 2455 block = device->block; 2456 device->block = NULL; 2457 dasd_delete_device(device); 2458 /* 2459 * life cycle of block is bound to device, so delete it after 2460 * device was safely removed 2461 */ 2462 if (block) 2463 dasd_free_block(block); 2464 } 2465 2466 /* 2467 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2468 * the device is detected for the first time and is supposed to be used 2469 * or the user has started activation through sysfs. 2470 */ 2471 int dasd_generic_set_online(struct ccw_device *cdev, 2472 struct dasd_discipline *base_discipline) 2473 { 2474 struct dasd_discipline *discipline; 2475 struct dasd_device *device; 2476 int rc; 2477 2478 /* first online clears initial online feature flag */ 2479 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2480 device = dasd_create_device(cdev); 2481 if (IS_ERR(device)) 2482 return PTR_ERR(device); 2483 2484 discipline = base_discipline; 2485 if (device->features & DASD_FEATURE_USEDIAG) { 2486 if (!dasd_diag_discipline_pointer) { 2487 pr_warning("%s Setting the DASD online failed because " 2488 "of missing DIAG discipline\n", 2489 dev_name(&cdev->dev)); 2490 dasd_delete_device(device); 2491 return -ENODEV; 2492 } 2493 discipline = dasd_diag_discipline_pointer; 2494 } 2495 if (!try_module_get(base_discipline->owner)) { 2496 dasd_delete_device(device); 2497 return -EINVAL; 2498 } 2499 if (!try_module_get(discipline->owner)) { 2500 module_put(base_discipline->owner); 2501 dasd_delete_device(device); 2502 return -EINVAL; 2503 } 2504 device->base_discipline = base_discipline; 2505 device->discipline = discipline; 2506 2507 /* check_device will allocate block device if necessary */ 2508 rc = discipline->check_device(device); 2509 if (rc) { 2510 pr_warning("%s Setting the DASD online with discipline %s " 2511 "failed with rc=%i\n", 2512 dev_name(&cdev->dev), discipline->name, rc); 2513 module_put(discipline->owner); 2514 module_put(base_discipline->owner); 2515 dasd_delete_device(device); 2516 return rc; 2517 } 2518 2519 dasd_set_target_state(device, DASD_STATE_ONLINE); 2520 if (device->state <= DASD_STATE_KNOWN) { 2521 pr_warning("%s Setting the DASD online failed because of a " 2522 "missing discipline\n", dev_name(&cdev->dev)); 2523 rc = -ENODEV; 2524 dasd_set_target_state(device, DASD_STATE_NEW); 2525 if (device->block) 2526 dasd_free_block(device->block); 2527 dasd_delete_device(device); 2528 } else 2529 pr_debug("dasd_generic device %s found\n", 2530 dev_name(&cdev->dev)); 2531 2532 wait_event(dasd_init_waitq, _wait_for_device(device)); 2533 2534 dasd_put_device(device); 2535 return rc; 2536 } 2537 2538 int dasd_generic_set_offline(struct ccw_device *cdev) 2539 { 2540 struct dasd_device *device; 2541 struct dasd_block *block; 2542 int max_count, open_count; 2543 2544 device = dasd_device_from_cdev(cdev); 2545 if (IS_ERR(device)) 2546 return PTR_ERR(device); 2547 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2548 /* Already doing offline processing */ 2549 dasd_put_device(device); 2550 return 0; 2551 } 2552 /* 2553 * We must make sure that this device is currently not in use. 2554 * The open_count is increased for every opener, that includes 2555 * the blkdev_get in dasd_scan_partitions. We are only interested 2556 * in the other openers. 2557 */ 2558 if (device->block) { 2559 max_count = device->block->bdev ? 0 : -1; 2560 open_count = atomic_read(&device->block->open_count); 2561 if (open_count > max_count) { 2562 if (open_count > 0) 2563 pr_warning("%s: The DASD cannot be set offline " 2564 "with open count %i\n", 2565 dev_name(&cdev->dev), open_count); 2566 else 2567 pr_warning("%s: The DASD cannot be set offline " 2568 "while it is in use\n", 2569 dev_name(&cdev->dev)); 2570 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2571 dasd_put_device(device); 2572 return -EBUSY; 2573 } 2574 } 2575 dasd_set_target_state(device, DASD_STATE_NEW); 2576 /* dasd_delete_device destroys the device reference. */ 2577 block = device->block; 2578 device->block = NULL; 2579 dasd_delete_device(device); 2580 /* 2581 * life cycle of block is bound to device, so delete it after 2582 * device was safely removed 2583 */ 2584 if (block) 2585 dasd_free_block(block); 2586 return 0; 2587 } 2588 2589 int dasd_generic_notify(struct ccw_device *cdev, int event) 2590 { 2591 struct dasd_device *device; 2592 struct dasd_ccw_req *cqr; 2593 int ret; 2594 2595 device = dasd_device_from_cdev_locked(cdev); 2596 if (IS_ERR(device)) 2597 return 0; 2598 ret = 0; 2599 switch (event) { 2600 case CIO_GONE: 2601 case CIO_BOXED: 2602 case CIO_NO_PATH: 2603 /* First of all call extended error reporting. */ 2604 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2605 2606 if (device->state < DASD_STATE_BASIC) 2607 break; 2608 /* Device is active. We want to keep it. */ 2609 list_for_each_entry(cqr, &device->ccw_queue, devlist) 2610 if (cqr->status == DASD_CQR_IN_IO) { 2611 cqr->status = DASD_CQR_QUEUED; 2612 cqr->retries++; 2613 } 2614 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 2615 dasd_device_clear_timer(device); 2616 dasd_schedule_device_bh(device); 2617 ret = 1; 2618 break; 2619 case CIO_OPER: 2620 /* FIXME: add a sanity check. */ 2621 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 2622 if (device->stopped & DASD_UNRESUMED_PM) { 2623 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 2624 dasd_restore_device(device); 2625 ret = 1; 2626 break; 2627 } 2628 dasd_schedule_device_bh(device); 2629 if (device->block) 2630 dasd_schedule_block_bh(device->block); 2631 ret = 1; 2632 break; 2633 } 2634 dasd_put_device(device); 2635 return ret; 2636 } 2637 2638 int dasd_generic_pm_freeze(struct ccw_device *cdev) 2639 { 2640 struct dasd_ccw_req *cqr, *n; 2641 int rc; 2642 struct list_head freeze_queue; 2643 struct dasd_device *device = dasd_device_from_cdev(cdev); 2644 2645 if (IS_ERR(device)) 2646 return PTR_ERR(device); 2647 /* disallow new I/O */ 2648 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 2649 /* clear active requests */ 2650 INIT_LIST_HEAD(&freeze_queue); 2651 spin_lock_irq(get_ccwdev_lock(cdev)); 2652 rc = 0; 2653 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2654 /* Check status and move request to flush_queue */ 2655 if (cqr->status == DASD_CQR_IN_IO) { 2656 rc = device->discipline->term_IO(cqr); 2657 if (rc) { 2658 /* unable to terminate requeust */ 2659 dev_err(&device->cdev->dev, 2660 "Unable to terminate request %p " 2661 "on suspend\n", cqr); 2662 spin_unlock_irq(get_ccwdev_lock(cdev)); 2663 dasd_put_device(device); 2664 return rc; 2665 } 2666 } 2667 list_move_tail(&cqr->devlist, &freeze_queue); 2668 } 2669 2670 spin_unlock_irq(get_ccwdev_lock(cdev)); 2671 2672 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 2673 wait_event(dasd_flush_wq, 2674 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2675 if (cqr->status == DASD_CQR_CLEARED) 2676 cqr->status = DASD_CQR_QUEUED; 2677 } 2678 /* move freeze_queue to start of the ccw_queue */ 2679 spin_lock_irq(get_ccwdev_lock(cdev)); 2680 list_splice_tail(&freeze_queue, &device->ccw_queue); 2681 spin_unlock_irq(get_ccwdev_lock(cdev)); 2682 2683 if (device->discipline->freeze) 2684 rc = device->discipline->freeze(device); 2685 2686 dasd_put_device(device); 2687 return rc; 2688 } 2689 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 2690 2691 int dasd_generic_restore_device(struct ccw_device *cdev) 2692 { 2693 struct dasd_device *device = dasd_device_from_cdev(cdev); 2694 int rc = 0; 2695 2696 if (IS_ERR(device)) 2697 return PTR_ERR(device); 2698 2699 /* allow new IO again */ 2700 dasd_device_remove_stop_bits(device, 2701 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 2702 2703 dasd_schedule_device_bh(device); 2704 2705 /* 2706 * call discipline restore function 2707 * if device is stopped do nothing e.g. for disconnected devices 2708 */ 2709 if (device->discipline->restore && !(device->stopped)) 2710 rc = device->discipline->restore(device); 2711 if (rc || device->stopped) 2712 /* 2713 * if the resume failed for the DASD we put it in 2714 * an UNRESUMED stop state 2715 */ 2716 device->stopped |= DASD_UNRESUMED_PM; 2717 2718 if (device->block) 2719 dasd_schedule_block_bh(device->block); 2720 2721 dasd_put_device(device); 2722 return 0; 2723 } 2724 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 2725 2726 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2727 void *rdc_buffer, 2728 int rdc_buffer_size, 2729 int magic) 2730 { 2731 struct dasd_ccw_req *cqr; 2732 struct ccw1 *ccw; 2733 unsigned long *idaw; 2734 2735 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2736 2737 if (IS_ERR(cqr)) { 2738 /* internal error 13 - Allocating the RDC request failed*/ 2739 dev_err(&device->cdev->dev, 2740 "An error occurred in the DASD device driver, " 2741 "reason=%s\n", "13"); 2742 return cqr; 2743 } 2744 2745 ccw = cqr->cpaddr; 2746 ccw->cmd_code = CCW_CMD_RDC; 2747 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { 2748 idaw = (unsigned long *) (cqr->data); 2749 ccw->cda = (__u32)(addr_t) idaw; 2750 ccw->flags = CCW_FLAG_IDA; 2751 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); 2752 } else { 2753 ccw->cda = (__u32)(addr_t) rdc_buffer; 2754 ccw->flags = 0; 2755 } 2756 2757 ccw->count = rdc_buffer_size; 2758 cqr->startdev = device; 2759 cqr->memdev = device; 2760 cqr->expires = 10*HZ; 2761 cqr->retries = 256; 2762 cqr->buildclk = get_clock(); 2763 cqr->status = DASD_CQR_FILLED; 2764 return cqr; 2765 } 2766 2767 2768 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 2769 void *rdc_buffer, int rdc_buffer_size) 2770 { 2771 int ret; 2772 struct dasd_ccw_req *cqr; 2773 2774 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 2775 magic); 2776 if (IS_ERR(cqr)) 2777 return PTR_ERR(cqr); 2778 2779 ret = dasd_sleep_on(cqr); 2780 dasd_sfree_request(cqr, cqr->memdev); 2781 return ret; 2782 } 2783 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2784 2785 /* 2786 * In command mode and transport mode we need to look for sense 2787 * data in different places. The sense data itself is allways 2788 * an array of 32 bytes, so we can unify the sense data access 2789 * for both modes. 2790 */ 2791 char *dasd_get_sense(struct irb *irb) 2792 { 2793 struct tsb *tsb = NULL; 2794 char *sense = NULL; 2795 2796 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 2797 if (irb->scsw.tm.tcw) 2798 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 2799 irb->scsw.tm.tcw); 2800 if (tsb && tsb->length == 64 && tsb->flags) 2801 switch (tsb->flags & 0x07) { 2802 case 1: /* tsa_iostat */ 2803 sense = tsb->tsa.iostat.sense; 2804 break; 2805 case 2: /* tsa_ddpc */ 2806 sense = tsb->tsa.ddpc.sense; 2807 break; 2808 default: 2809 /* currently we don't use interrogate data */ 2810 break; 2811 } 2812 } else if (irb->esw.esw0.erw.cons) { 2813 sense = irb->ecw; 2814 } 2815 return sense; 2816 } 2817 EXPORT_SYMBOL_GPL(dasd_get_sense); 2818 2819 static int __init dasd_init(void) 2820 { 2821 int rc; 2822 2823 init_waitqueue_head(&dasd_init_waitq); 2824 init_waitqueue_head(&dasd_flush_wq); 2825 init_waitqueue_head(&generic_waitq); 2826 2827 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2828 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 2829 if (dasd_debug_area == NULL) { 2830 rc = -ENOMEM; 2831 goto failed; 2832 } 2833 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2834 debug_set_level(dasd_debug_area, DBF_WARNING); 2835 2836 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2837 2838 dasd_diag_discipline_pointer = NULL; 2839 2840 rc = dasd_devmap_init(); 2841 if (rc) 2842 goto failed; 2843 rc = dasd_gendisk_init(); 2844 if (rc) 2845 goto failed; 2846 rc = dasd_parse(); 2847 if (rc) 2848 goto failed; 2849 rc = dasd_eer_init(); 2850 if (rc) 2851 goto failed; 2852 #ifdef CONFIG_PROC_FS 2853 rc = dasd_proc_init(); 2854 if (rc) 2855 goto failed; 2856 #endif 2857 2858 return 0; 2859 failed: 2860 pr_info("The DASD device driver could not be initialized\n"); 2861 dasd_exit(); 2862 return rc; 2863 } 2864 2865 module_init(dasd_init); 2866 module_exit(dasd_exit); 2867 2868 EXPORT_SYMBOL(dasd_debug_area); 2869 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2870 2871 EXPORT_SYMBOL(dasd_add_request_head); 2872 EXPORT_SYMBOL(dasd_add_request_tail); 2873 EXPORT_SYMBOL(dasd_cancel_req); 2874 EXPORT_SYMBOL(dasd_device_clear_timer); 2875 EXPORT_SYMBOL(dasd_block_clear_timer); 2876 EXPORT_SYMBOL(dasd_enable_device); 2877 EXPORT_SYMBOL(dasd_int_handler); 2878 EXPORT_SYMBOL(dasd_kfree_request); 2879 EXPORT_SYMBOL(dasd_kick_device); 2880 EXPORT_SYMBOL(dasd_kmalloc_request); 2881 EXPORT_SYMBOL(dasd_schedule_device_bh); 2882 EXPORT_SYMBOL(dasd_schedule_block_bh); 2883 EXPORT_SYMBOL(dasd_set_target_state); 2884 EXPORT_SYMBOL(dasd_device_set_timer); 2885 EXPORT_SYMBOL(dasd_block_set_timer); 2886 EXPORT_SYMBOL(dasd_sfree_request); 2887 EXPORT_SYMBOL(dasd_sleep_on); 2888 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2889 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2890 EXPORT_SYMBOL(dasd_smalloc_request); 2891 EXPORT_SYMBOL(dasd_start_IO); 2892 EXPORT_SYMBOL(dasd_term_IO); 2893 2894 EXPORT_SYMBOL_GPL(dasd_generic_probe); 2895 EXPORT_SYMBOL_GPL(dasd_generic_remove); 2896 EXPORT_SYMBOL_GPL(dasd_generic_notify); 2897 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2898 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2899 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 2900 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2901 EXPORT_SYMBOL_GPL(dasd_alloc_block); 2902 EXPORT_SYMBOL_GPL(dasd_free_block); 2903