1 /* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * Copyright IBM Corp. 1999, 2009 9 */ 10 11 #define KMSG_COMPONENT "dasd" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/kernel_stat.h> 15 #include <linux/kmod.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/ctype.h> 19 #include <linux/major.h> 20 #include <linux/slab.h> 21 #include <linux/buffer_head.h> 22 #include <linux/hdreg.h> 23 #include <linux/async.h> 24 #include <linux/mutex.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> 28 #include <asm/idals.h> 29 #include <asm/itcw.h> 30 #include <asm/diag.h> 31 32 /* This is ugly... */ 33 #define PRINTK_HEADER "dasd:" 34 35 #include "dasd_int.h" 36 /* 37 * SECTION: Constant definitions to be used within this file 38 */ 39 #define DASD_CHANQ_MAX_SIZE 4 40 41 #define DASD_SLEEPON_START_TAG (void *) 1 42 #define DASD_SLEEPON_END_TAG (void *) 2 43 44 /* 45 * SECTION: exported variables of dasd.c 46 */ 47 debug_info_t *dasd_debug_area; 48 struct dasd_discipline *dasd_diag_discipline_pointer; 49 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 50 51 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 52 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 53 " Copyright 2000 IBM Corporation"); 54 MODULE_SUPPORTED_DEVICE("dasd"); 55 MODULE_LICENSE("GPL"); 56 57 /* 58 * SECTION: prototypes for static functions of dasd.c 59 */ 60 static int dasd_alloc_queue(struct dasd_block *); 61 static void dasd_setup_queue(struct dasd_block *); 62 static void dasd_free_queue(struct dasd_block *); 63 static void dasd_flush_request_queue(struct dasd_block *); 64 static int dasd_flush_block_queue(struct dasd_block *); 65 static void dasd_device_tasklet(struct dasd_device *); 66 static void dasd_block_tasklet(struct dasd_block *); 67 static void do_kick_device(struct work_struct *); 68 static void do_restore_device(struct work_struct *); 69 static void do_reload_device(struct work_struct *); 70 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 71 static void dasd_device_timeout(unsigned long); 72 static void dasd_block_timeout(unsigned long); 73 static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *); 74 75 /* 76 * SECTION: Operations on the device structure. 77 */ 78 static wait_queue_head_t dasd_init_waitq; 79 static wait_queue_head_t dasd_flush_wq; 80 static wait_queue_head_t generic_waitq; 81 82 /* 83 * Allocate memory for a new device structure. 84 */ 85 struct dasd_device *dasd_alloc_device(void) 86 { 87 struct dasd_device *device; 88 89 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 90 if (!device) 91 return ERR_PTR(-ENOMEM); 92 93 /* Get two pages for normal block device operations. */ 94 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 95 if (!device->ccw_mem) { 96 kfree(device); 97 return ERR_PTR(-ENOMEM); 98 } 99 /* Get one page for error recovery. */ 100 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 101 if (!device->erp_mem) { 102 free_pages((unsigned long) device->ccw_mem, 1); 103 kfree(device); 104 return ERR_PTR(-ENOMEM); 105 } 106 107 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 108 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 109 spin_lock_init(&device->mem_lock); 110 atomic_set(&device->tasklet_scheduled, 0); 111 tasklet_init(&device->tasklet, 112 (void (*)(unsigned long)) dasd_device_tasklet, 113 (unsigned long) device); 114 INIT_LIST_HEAD(&device->ccw_queue); 115 init_timer(&device->timer); 116 device->timer.function = dasd_device_timeout; 117 device->timer.data = (unsigned long) device; 118 INIT_WORK(&device->kick_work, do_kick_device); 119 INIT_WORK(&device->restore_device, do_restore_device); 120 INIT_WORK(&device->reload_device, do_reload_device); 121 device->state = DASD_STATE_NEW; 122 device->target = DASD_STATE_NEW; 123 mutex_init(&device->state_mutex); 124 125 return device; 126 } 127 128 /* 129 * Free memory of a device structure. 130 */ 131 void dasd_free_device(struct dasd_device *device) 132 { 133 kfree(device->private); 134 free_page((unsigned long) device->erp_mem); 135 free_pages((unsigned long) device->ccw_mem, 1); 136 kfree(device); 137 } 138 139 /* 140 * Allocate memory for a new device structure. 141 */ 142 struct dasd_block *dasd_alloc_block(void) 143 { 144 struct dasd_block *block; 145 146 block = kzalloc(sizeof(*block), GFP_ATOMIC); 147 if (!block) 148 return ERR_PTR(-ENOMEM); 149 /* open_count = 0 means device online but not in use */ 150 atomic_set(&block->open_count, -1); 151 152 spin_lock_init(&block->request_queue_lock); 153 atomic_set(&block->tasklet_scheduled, 0); 154 tasklet_init(&block->tasklet, 155 (void (*)(unsigned long)) dasd_block_tasklet, 156 (unsigned long) block); 157 INIT_LIST_HEAD(&block->ccw_queue); 158 spin_lock_init(&block->queue_lock); 159 init_timer(&block->timer); 160 block->timer.function = dasd_block_timeout; 161 block->timer.data = (unsigned long) block; 162 163 return block; 164 } 165 166 /* 167 * Free memory of a device structure. 168 */ 169 void dasd_free_block(struct dasd_block *block) 170 { 171 kfree(block); 172 } 173 174 /* 175 * Make a new device known to the system. 176 */ 177 static int dasd_state_new_to_known(struct dasd_device *device) 178 { 179 int rc; 180 181 /* 182 * As long as the device is not in state DASD_STATE_NEW we want to 183 * keep the reference count > 0. 184 */ 185 dasd_get_device(device); 186 187 if (device->block) { 188 rc = dasd_alloc_queue(device->block); 189 if (rc) { 190 dasd_put_device(device); 191 return rc; 192 } 193 } 194 device->state = DASD_STATE_KNOWN; 195 return 0; 196 } 197 198 /* 199 * Let the system forget about a device. 200 */ 201 static int dasd_state_known_to_new(struct dasd_device *device) 202 { 203 /* Disable extended error reporting for this device. */ 204 dasd_eer_disable(device); 205 /* Forget the discipline information. */ 206 if (device->discipline) { 207 if (device->discipline->uncheck_device) 208 device->discipline->uncheck_device(device); 209 module_put(device->discipline->owner); 210 } 211 device->discipline = NULL; 212 if (device->base_discipline) 213 module_put(device->base_discipline->owner); 214 device->base_discipline = NULL; 215 device->state = DASD_STATE_NEW; 216 217 if (device->block) 218 dasd_free_queue(device->block); 219 220 /* Give up reference we took in dasd_state_new_to_known. */ 221 dasd_put_device(device); 222 return 0; 223 } 224 225 /* 226 * Request the irq line for the device. 227 */ 228 static int dasd_state_known_to_basic(struct dasd_device *device) 229 { 230 int rc; 231 232 /* Allocate and register gendisk structure. */ 233 if (device->block) { 234 rc = dasd_gendisk_alloc(device->block); 235 if (rc) 236 return rc; 237 } 238 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 239 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 240 8 * sizeof(long)); 241 debug_register_view(device->debug_area, &debug_sprintf_view); 242 debug_set_level(device->debug_area, DBF_WARNING); 243 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 244 245 device->state = DASD_STATE_BASIC; 246 return 0; 247 } 248 249 /* 250 * Release the irq line for the device. Terminate any running i/o. 251 */ 252 static int dasd_state_basic_to_known(struct dasd_device *device) 253 { 254 int rc; 255 if (device->block) { 256 dasd_gendisk_free(device->block); 257 dasd_block_clear_timer(device->block); 258 } 259 rc = dasd_flush_device_queue(device); 260 if (rc) 261 return rc; 262 dasd_device_clear_timer(device); 263 264 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 265 if (device->debug_area != NULL) { 266 debug_unregister(device->debug_area); 267 device->debug_area = NULL; 268 } 269 device->state = DASD_STATE_KNOWN; 270 return 0; 271 } 272 273 /* 274 * Do the initial analysis. The do_analysis function may return 275 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 276 * until the discipline decides to continue the startup sequence 277 * by calling the function dasd_change_state. The eckd disciplines 278 * uses this to start a ccw that detects the format. The completion 279 * interrupt for this detection ccw uses the kernel event daemon to 280 * trigger the call to dasd_change_state. All this is done in the 281 * discipline code, see dasd_eckd.c. 282 * After the analysis ccw is done (do_analysis returned 0) the block 283 * device is setup. 284 * In case the analysis returns an error, the device setup is stopped 285 * (a fake disk was already added to allow formatting). 286 */ 287 static int dasd_state_basic_to_ready(struct dasd_device *device) 288 { 289 int rc; 290 struct dasd_block *block; 291 292 rc = 0; 293 block = device->block; 294 /* make disk known with correct capacity */ 295 if (block) { 296 if (block->base->discipline->do_analysis != NULL) 297 rc = block->base->discipline->do_analysis(block); 298 if (rc) { 299 if (rc != -EAGAIN) 300 device->state = DASD_STATE_UNFMT; 301 return rc; 302 } 303 dasd_setup_queue(block); 304 set_capacity(block->gdp, 305 block->blocks << block->s2b_shift); 306 device->state = DASD_STATE_READY; 307 rc = dasd_scan_partitions(block); 308 if (rc) 309 device->state = DASD_STATE_BASIC; 310 } else { 311 device->state = DASD_STATE_READY; 312 } 313 return rc; 314 } 315 316 /* 317 * Remove device from block device layer. Destroy dirty buffers. 318 * Forget format information. Check if the target level is basic 319 * and if it is create fake disk for formatting. 320 */ 321 static int dasd_state_ready_to_basic(struct dasd_device *device) 322 { 323 int rc; 324 325 device->state = DASD_STATE_BASIC; 326 if (device->block) { 327 struct dasd_block *block = device->block; 328 rc = dasd_flush_block_queue(block); 329 if (rc) { 330 device->state = DASD_STATE_READY; 331 return rc; 332 } 333 dasd_flush_request_queue(block); 334 dasd_destroy_partitions(block); 335 block->blocks = 0; 336 block->bp_block = 0; 337 block->s2b_shift = 0; 338 } 339 return 0; 340 } 341 342 /* 343 * Back to basic. 344 */ 345 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 346 { 347 device->state = DASD_STATE_BASIC; 348 return 0; 349 } 350 351 /* 352 * Make the device online and schedule the bottom half to start 353 * the requeueing of requests from the linux request queue to the 354 * ccw queue. 355 */ 356 static int 357 dasd_state_ready_to_online(struct dasd_device * device) 358 { 359 int rc; 360 struct gendisk *disk; 361 struct disk_part_iter piter; 362 struct hd_struct *part; 363 364 if (device->discipline->ready_to_online) { 365 rc = device->discipline->ready_to_online(device); 366 if (rc) 367 return rc; 368 } 369 device->state = DASD_STATE_ONLINE; 370 if (device->block) { 371 dasd_schedule_block_bh(device->block); 372 disk = device->block->bdev->bd_disk; 373 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 374 while ((part = disk_part_iter_next(&piter))) 375 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 376 disk_part_iter_exit(&piter); 377 } 378 return 0; 379 } 380 381 /* 382 * Stop the requeueing of requests again. 383 */ 384 static int dasd_state_online_to_ready(struct dasd_device *device) 385 { 386 int rc; 387 struct gendisk *disk; 388 struct disk_part_iter piter; 389 struct hd_struct *part; 390 391 if (device->discipline->online_to_ready) { 392 rc = device->discipline->online_to_ready(device); 393 if (rc) 394 return rc; 395 } 396 device->state = DASD_STATE_READY; 397 if (device->block) { 398 disk = device->block->bdev->bd_disk; 399 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 400 while ((part = disk_part_iter_next(&piter))) 401 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 402 disk_part_iter_exit(&piter); 403 } 404 return 0; 405 } 406 407 /* 408 * Device startup state changes. 409 */ 410 static int dasd_increase_state(struct dasd_device *device) 411 { 412 int rc; 413 414 rc = 0; 415 if (device->state == DASD_STATE_NEW && 416 device->target >= DASD_STATE_KNOWN) 417 rc = dasd_state_new_to_known(device); 418 419 if (!rc && 420 device->state == DASD_STATE_KNOWN && 421 device->target >= DASD_STATE_BASIC) 422 rc = dasd_state_known_to_basic(device); 423 424 if (!rc && 425 device->state == DASD_STATE_BASIC && 426 device->target >= DASD_STATE_READY) 427 rc = dasd_state_basic_to_ready(device); 428 429 if (!rc && 430 device->state == DASD_STATE_UNFMT && 431 device->target > DASD_STATE_UNFMT) 432 rc = -EPERM; 433 434 if (!rc && 435 device->state == DASD_STATE_READY && 436 device->target >= DASD_STATE_ONLINE) 437 rc = dasd_state_ready_to_online(device); 438 439 return rc; 440 } 441 442 /* 443 * Device shutdown state changes. 444 */ 445 static int dasd_decrease_state(struct dasd_device *device) 446 { 447 int rc; 448 449 rc = 0; 450 if (device->state == DASD_STATE_ONLINE && 451 device->target <= DASD_STATE_READY) 452 rc = dasd_state_online_to_ready(device); 453 454 if (!rc && 455 device->state == DASD_STATE_READY && 456 device->target <= DASD_STATE_BASIC) 457 rc = dasd_state_ready_to_basic(device); 458 459 if (!rc && 460 device->state == DASD_STATE_UNFMT && 461 device->target <= DASD_STATE_BASIC) 462 rc = dasd_state_unfmt_to_basic(device); 463 464 if (!rc && 465 device->state == DASD_STATE_BASIC && 466 device->target <= DASD_STATE_KNOWN) 467 rc = dasd_state_basic_to_known(device); 468 469 if (!rc && 470 device->state == DASD_STATE_KNOWN && 471 device->target <= DASD_STATE_NEW) 472 rc = dasd_state_known_to_new(device); 473 474 return rc; 475 } 476 477 /* 478 * This is the main startup/shutdown routine. 479 */ 480 static void dasd_change_state(struct dasd_device *device) 481 { 482 int rc; 483 484 if (device->state == device->target) 485 /* Already where we want to go today... */ 486 return; 487 if (device->state < device->target) 488 rc = dasd_increase_state(device); 489 else 490 rc = dasd_decrease_state(device); 491 if (rc == -EAGAIN) 492 return; 493 if (rc) 494 device->target = device->state; 495 496 if (device->state == device->target) 497 wake_up(&dasd_init_waitq); 498 499 /* let user-space know that the device status changed */ 500 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 501 } 502 503 /* 504 * Kick starter for devices that did not complete the startup/shutdown 505 * procedure or were sleeping because of a pending state. 506 * dasd_kick_device will schedule a call do do_kick_device to the kernel 507 * event daemon. 508 */ 509 static void do_kick_device(struct work_struct *work) 510 { 511 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 512 mutex_lock(&device->state_mutex); 513 dasd_change_state(device); 514 mutex_unlock(&device->state_mutex); 515 dasd_schedule_device_bh(device); 516 dasd_put_device(device); 517 } 518 519 void dasd_kick_device(struct dasd_device *device) 520 { 521 dasd_get_device(device); 522 /* queue call to dasd_kick_device to the kernel event daemon. */ 523 schedule_work(&device->kick_work); 524 } 525 526 /* 527 * dasd_reload_device will schedule a call do do_reload_device to the kernel 528 * event daemon. 529 */ 530 static void do_reload_device(struct work_struct *work) 531 { 532 struct dasd_device *device = container_of(work, struct dasd_device, 533 reload_device); 534 device->discipline->reload(device); 535 dasd_put_device(device); 536 } 537 538 void dasd_reload_device(struct dasd_device *device) 539 { 540 dasd_get_device(device); 541 /* queue call to dasd_reload_device to the kernel event daemon. */ 542 schedule_work(&device->reload_device); 543 } 544 EXPORT_SYMBOL(dasd_reload_device); 545 546 /* 547 * dasd_restore_device will schedule a call do do_restore_device to the kernel 548 * event daemon. 549 */ 550 static void do_restore_device(struct work_struct *work) 551 { 552 struct dasd_device *device = container_of(work, struct dasd_device, 553 restore_device); 554 device->cdev->drv->restore(device->cdev); 555 dasd_put_device(device); 556 } 557 558 void dasd_restore_device(struct dasd_device *device) 559 { 560 dasd_get_device(device); 561 /* queue call to dasd_restore_device to the kernel event daemon. */ 562 schedule_work(&device->restore_device); 563 } 564 565 /* 566 * Set the target state for a device and starts the state change. 567 */ 568 void dasd_set_target_state(struct dasd_device *device, int target) 569 { 570 dasd_get_device(device); 571 mutex_lock(&device->state_mutex); 572 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 573 if (dasd_probeonly && target > DASD_STATE_READY) 574 target = DASD_STATE_READY; 575 if (device->target != target) { 576 if (device->state == target) 577 wake_up(&dasd_init_waitq); 578 device->target = target; 579 } 580 if (device->state != device->target) 581 dasd_change_state(device); 582 mutex_unlock(&device->state_mutex); 583 dasd_put_device(device); 584 } 585 586 /* 587 * Enable devices with device numbers in [from..to]. 588 */ 589 static inline int _wait_for_device(struct dasd_device *device) 590 { 591 return (device->state == device->target); 592 } 593 594 void dasd_enable_device(struct dasd_device *device) 595 { 596 dasd_set_target_state(device, DASD_STATE_ONLINE); 597 if (device->state <= DASD_STATE_KNOWN) 598 /* No discipline for device found. */ 599 dasd_set_target_state(device, DASD_STATE_NEW); 600 /* Now wait for the devices to come up. */ 601 wait_event(dasd_init_waitq, _wait_for_device(device)); 602 } 603 604 /* 605 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 606 */ 607 #ifdef CONFIG_DASD_PROFILE 608 609 struct dasd_profile_info_t dasd_global_profile; 610 unsigned int dasd_profile_level = DASD_PROFILE_OFF; 611 612 /* 613 * Increments counter in global and local profiling structures. 614 */ 615 #define dasd_profile_counter(value, counter, block) \ 616 { \ 617 int index; \ 618 for (index = 0; index < 31 && value >> (2+index); index++); \ 619 dasd_global_profile.counter[index]++; \ 620 block->profile.counter[index]++; \ 621 } 622 623 /* 624 * Add profiling information for cqr before execution. 625 */ 626 static void dasd_profile_start(struct dasd_block *block, 627 struct dasd_ccw_req *cqr, 628 struct request *req) 629 { 630 struct list_head *l; 631 unsigned int counter; 632 633 if (dasd_profile_level != DASD_PROFILE_ON) 634 return; 635 636 /* count the length of the chanq for statistics */ 637 counter = 0; 638 list_for_each(l, &block->ccw_queue) 639 if (++counter >= 31) 640 break; 641 dasd_global_profile.dasd_io_nr_req[counter]++; 642 block->profile.dasd_io_nr_req[counter]++; 643 } 644 645 /* 646 * Add profiling information for cqr after execution. 647 */ 648 static void dasd_profile_end(struct dasd_block *block, 649 struct dasd_ccw_req *cqr, 650 struct request *req) 651 { 652 long strtime, irqtime, endtime, tottime; /* in microseconds */ 653 long tottimeps, sectors; 654 655 if (dasd_profile_level != DASD_PROFILE_ON) 656 return; 657 658 sectors = blk_rq_sectors(req); 659 if (!cqr->buildclk || !cqr->startclk || 660 !cqr->stopclk || !cqr->endclk || 661 !sectors) 662 return; 663 664 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 665 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 666 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 667 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 668 tottimeps = tottime / sectors; 669 670 if (!dasd_global_profile.dasd_io_reqs) 671 memset(&dasd_global_profile, 0, 672 sizeof(struct dasd_profile_info_t)); 673 dasd_global_profile.dasd_io_reqs++; 674 dasd_global_profile.dasd_io_sects += sectors; 675 676 if (!block->profile.dasd_io_reqs) 677 memset(&block->profile, 0, 678 sizeof(struct dasd_profile_info_t)); 679 block->profile.dasd_io_reqs++; 680 block->profile.dasd_io_sects += sectors; 681 682 dasd_profile_counter(sectors, dasd_io_secs, block); 683 dasd_profile_counter(tottime, dasd_io_times, block); 684 dasd_profile_counter(tottimeps, dasd_io_timps, block); 685 dasd_profile_counter(strtime, dasd_io_time1, block); 686 dasd_profile_counter(irqtime, dasd_io_time2, block); 687 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); 688 dasd_profile_counter(endtime, dasd_io_time3, block); 689 } 690 #else 691 #define dasd_profile_start(block, cqr, req) do {} while (0) 692 #define dasd_profile_end(block, cqr, req) do {} while (0) 693 #endif /* CONFIG_DASD_PROFILE */ 694 695 /* 696 * Allocate memory for a channel program with 'cplength' channel 697 * command words and 'datasize' additional space. There are two 698 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 699 * memory and 2) dasd_smalloc_request uses the static ccw memory 700 * that gets allocated for each device. 701 */ 702 struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength, 703 int datasize, 704 struct dasd_device *device) 705 { 706 struct dasd_ccw_req *cqr; 707 708 /* Sanity checks */ 709 BUG_ON(datasize > PAGE_SIZE || 710 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 711 712 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 713 if (cqr == NULL) 714 return ERR_PTR(-ENOMEM); 715 cqr->cpaddr = NULL; 716 if (cplength > 0) { 717 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 718 GFP_ATOMIC | GFP_DMA); 719 if (cqr->cpaddr == NULL) { 720 kfree(cqr); 721 return ERR_PTR(-ENOMEM); 722 } 723 } 724 cqr->data = NULL; 725 if (datasize > 0) { 726 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 727 if (cqr->data == NULL) { 728 kfree(cqr->cpaddr); 729 kfree(cqr); 730 return ERR_PTR(-ENOMEM); 731 } 732 } 733 cqr->magic = magic; 734 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 735 dasd_get_device(device); 736 return cqr; 737 } 738 739 struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 740 int datasize, 741 struct dasd_device *device) 742 { 743 unsigned long flags; 744 struct dasd_ccw_req *cqr; 745 char *data; 746 int size; 747 748 /* Sanity checks */ 749 BUG_ON(datasize > PAGE_SIZE || 750 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 751 752 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 753 if (cplength > 0) 754 size += cplength * sizeof(struct ccw1); 755 if (datasize > 0) 756 size += datasize; 757 spin_lock_irqsave(&device->mem_lock, flags); 758 cqr = (struct dasd_ccw_req *) 759 dasd_alloc_chunk(&device->ccw_chunks, size); 760 spin_unlock_irqrestore(&device->mem_lock, flags); 761 if (cqr == NULL) 762 return ERR_PTR(-ENOMEM); 763 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 764 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 765 cqr->cpaddr = NULL; 766 if (cplength > 0) { 767 cqr->cpaddr = (struct ccw1 *) data; 768 data += cplength*sizeof(struct ccw1); 769 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 770 } 771 cqr->data = NULL; 772 if (datasize > 0) { 773 cqr->data = data; 774 memset(cqr->data, 0, datasize); 775 } 776 cqr->magic = magic; 777 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 778 dasd_get_device(device); 779 return cqr; 780 } 781 782 /* 783 * Free memory of a channel program. This function needs to free all the 784 * idal lists that might have been created by dasd_set_cda and the 785 * struct dasd_ccw_req itself. 786 */ 787 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 788 { 789 #ifdef CONFIG_64BIT 790 struct ccw1 *ccw; 791 792 /* Clear any idals used for the request. */ 793 ccw = cqr->cpaddr; 794 do { 795 clear_normalized_cda(ccw); 796 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 797 #endif 798 kfree(cqr->cpaddr); 799 kfree(cqr->data); 800 kfree(cqr); 801 dasd_put_device(device); 802 } 803 804 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 805 { 806 unsigned long flags; 807 808 spin_lock_irqsave(&device->mem_lock, flags); 809 dasd_free_chunk(&device->ccw_chunks, cqr); 810 spin_unlock_irqrestore(&device->mem_lock, flags); 811 dasd_put_device(device); 812 } 813 814 /* 815 * Check discipline magic in cqr. 816 */ 817 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 818 { 819 struct dasd_device *device; 820 821 if (cqr == NULL) 822 return -EINVAL; 823 device = cqr->startdev; 824 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 825 DBF_DEV_EVENT(DBF_WARNING, device, 826 " dasd_ccw_req 0x%08x magic doesn't match" 827 " discipline 0x%08x", 828 cqr->magic, 829 *(unsigned int *) device->discipline->name); 830 return -EINVAL; 831 } 832 return 0; 833 } 834 835 /* 836 * Terminate the current i/o and set the request to clear_pending. 837 * Timer keeps device runnig. 838 * ccw_device_clear can fail if the i/o subsystem 839 * is in a bad mood. 840 */ 841 int dasd_term_IO(struct dasd_ccw_req *cqr) 842 { 843 struct dasd_device *device; 844 int retries, rc; 845 char errorstring[ERRORLENGTH]; 846 847 /* Check the cqr */ 848 rc = dasd_check_cqr(cqr); 849 if (rc) 850 return rc; 851 retries = 0; 852 device = (struct dasd_device *) cqr->startdev; 853 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 854 rc = ccw_device_clear(device->cdev, (long) cqr); 855 switch (rc) { 856 case 0: /* termination successful */ 857 cqr->retries--; 858 cqr->status = DASD_CQR_CLEAR_PENDING; 859 cqr->stopclk = get_clock(); 860 cqr->starttime = 0; 861 DBF_DEV_EVENT(DBF_DEBUG, device, 862 "terminate cqr %p successful", 863 cqr); 864 break; 865 case -ENODEV: 866 DBF_DEV_EVENT(DBF_ERR, device, "%s", 867 "device gone, retry"); 868 break; 869 case -EIO: 870 DBF_DEV_EVENT(DBF_ERR, device, "%s", 871 "I/O error, retry"); 872 break; 873 case -EINVAL: 874 case -EBUSY: 875 DBF_DEV_EVENT(DBF_ERR, device, "%s", 876 "device busy, retry later"); 877 break; 878 default: 879 /* internal error 10 - unknown rc*/ 880 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 881 dev_err(&device->cdev->dev, "An error occurred in the " 882 "DASD device driver, reason=%s\n", errorstring); 883 BUG(); 884 break; 885 } 886 retries++; 887 } 888 dasd_schedule_device_bh(device); 889 return rc; 890 } 891 892 /* 893 * Start the i/o. This start_IO can fail if the channel is really busy. 894 * In that case set up a timer to start the request later. 895 */ 896 int dasd_start_IO(struct dasd_ccw_req *cqr) 897 { 898 struct dasd_device *device; 899 int rc; 900 char errorstring[ERRORLENGTH]; 901 902 /* Check the cqr */ 903 rc = dasd_check_cqr(cqr); 904 if (rc) { 905 cqr->intrc = rc; 906 return rc; 907 } 908 device = (struct dasd_device *) cqr->startdev; 909 if (cqr->retries < 0) { 910 /* internal error 14 - start_IO run out of retries */ 911 sprintf(errorstring, "14 %p", cqr); 912 dev_err(&device->cdev->dev, "An error occurred in the DASD " 913 "device driver, reason=%s\n", errorstring); 914 cqr->status = DASD_CQR_ERROR; 915 return -EIO; 916 } 917 cqr->startclk = get_clock(); 918 cqr->starttime = jiffies; 919 cqr->retries--; 920 if (cqr->cpmode == 1) { 921 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 922 (long) cqr, cqr->lpm); 923 } else { 924 rc = ccw_device_start(device->cdev, cqr->cpaddr, 925 (long) cqr, cqr->lpm, 0); 926 } 927 switch (rc) { 928 case 0: 929 cqr->status = DASD_CQR_IN_IO; 930 break; 931 case -EBUSY: 932 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 933 "start_IO: device busy, retry later"); 934 break; 935 case -ETIMEDOUT: 936 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 937 "start_IO: request timeout, retry later"); 938 break; 939 case -EACCES: 940 /* -EACCES indicates that the request used only a 941 * subset of the available pathes and all these 942 * pathes are gone. 943 * Do a retry with all available pathes. 944 */ 945 cqr->lpm = LPM_ANYPATH; 946 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 947 "start_IO: selected pathes gone," 948 " retry on all pathes"); 949 break; 950 case -ENODEV: 951 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 952 "start_IO: -ENODEV device gone, retry"); 953 break; 954 case -EIO: 955 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 956 "start_IO: -EIO device gone, retry"); 957 break; 958 case -EINVAL: 959 /* most likely caused in power management context */ 960 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 961 "start_IO: -EINVAL device currently " 962 "not accessible"); 963 break; 964 default: 965 /* internal error 11 - unknown rc */ 966 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 967 dev_err(&device->cdev->dev, 968 "An error occurred in the DASD device driver, " 969 "reason=%s\n", errorstring); 970 BUG(); 971 break; 972 } 973 cqr->intrc = rc; 974 return rc; 975 } 976 977 /* 978 * Timeout function for dasd devices. This is used for different purposes 979 * 1) missing interrupt handler for normal operation 980 * 2) delayed start of request where start_IO failed with -EBUSY 981 * 3) timeout for missing state change interrupts 982 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 983 * DASD_CQR_QUEUED for 2) and 3). 984 */ 985 static void dasd_device_timeout(unsigned long ptr) 986 { 987 unsigned long flags; 988 struct dasd_device *device; 989 990 device = (struct dasd_device *) ptr; 991 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 992 /* re-activate request queue */ 993 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 994 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 995 dasd_schedule_device_bh(device); 996 } 997 998 /* 999 * Setup timeout for a device in jiffies. 1000 */ 1001 void dasd_device_set_timer(struct dasd_device *device, int expires) 1002 { 1003 if (expires == 0) 1004 del_timer(&device->timer); 1005 else 1006 mod_timer(&device->timer, jiffies + expires); 1007 } 1008 1009 /* 1010 * Clear timeout for a device. 1011 */ 1012 void dasd_device_clear_timer(struct dasd_device *device) 1013 { 1014 del_timer(&device->timer); 1015 } 1016 1017 static void dasd_handle_killed_request(struct ccw_device *cdev, 1018 unsigned long intparm) 1019 { 1020 struct dasd_ccw_req *cqr; 1021 struct dasd_device *device; 1022 1023 if (!intparm) 1024 return; 1025 cqr = (struct dasd_ccw_req *) intparm; 1026 if (cqr->status != DASD_CQR_IN_IO) { 1027 DBF_EVENT_DEVID(DBF_DEBUG, cdev, 1028 "invalid status in handle_killed_request: " 1029 "%02x", cqr->status); 1030 return; 1031 } 1032 1033 device = dasd_device_from_cdev_locked(cdev); 1034 if (IS_ERR(device)) { 1035 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1036 "unable to get device from cdev"); 1037 return; 1038 } 1039 1040 if (!cqr->startdev || 1041 device != cqr->startdev || 1042 strncmp(cqr->startdev->discipline->ebcname, 1043 (char *) &cqr->magic, 4)) { 1044 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1045 "invalid device in request"); 1046 dasd_put_device(device); 1047 return; 1048 } 1049 1050 /* Schedule request to be retried. */ 1051 cqr->status = DASD_CQR_QUEUED; 1052 1053 dasd_device_clear_timer(device); 1054 dasd_schedule_device_bh(device); 1055 dasd_put_device(device); 1056 } 1057 1058 void dasd_generic_handle_state_change(struct dasd_device *device) 1059 { 1060 /* First of all start sense subsystem status request. */ 1061 dasd_eer_snss(device); 1062 1063 dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING); 1064 dasd_schedule_device_bh(device); 1065 if (device->block) 1066 dasd_schedule_block_bh(device->block); 1067 } 1068 1069 /* 1070 * Interrupt handler for "normal" ssch-io based dasd devices. 1071 */ 1072 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1073 struct irb *irb) 1074 { 1075 struct dasd_ccw_req *cqr, *next; 1076 struct dasd_device *device; 1077 unsigned long long now; 1078 int expires; 1079 1080 kstat_cpu(smp_processor_id()).irqs[IOINT_DAS]++; 1081 if (IS_ERR(irb)) { 1082 switch (PTR_ERR(irb)) { 1083 case -EIO: 1084 break; 1085 case -ETIMEDOUT: 1086 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1087 "request timed out\n", __func__); 1088 break; 1089 default: 1090 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s: " 1091 "unknown error %ld\n", __func__, 1092 PTR_ERR(irb)); 1093 } 1094 dasd_handle_killed_request(cdev, intparm); 1095 return; 1096 } 1097 1098 now = get_clock(); 1099 1100 /* check for unsolicited interrupts */ 1101 cqr = (struct dasd_ccw_req *) intparm; 1102 if (!cqr || ((scsw_cc(&irb->scsw) == 1) && 1103 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 1104 ((scsw_stctl(&irb->scsw) == SCSW_STCTL_STATUS_PEND) || 1105 (scsw_stctl(&irb->scsw) == (SCSW_STCTL_STATUS_PEND | 1106 SCSW_STCTL_ALERT_STATUS))))) { 1107 if (cqr && cqr->status == DASD_CQR_IN_IO) 1108 cqr->status = DASD_CQR_QUEUED; 1109 if (cqr) 1110 memcpy(&cqr->irb, irb, sizeof(*irb)); 1111 device = dasd_device_from_cdev_locked(cdev); 1112 if (IS_ERR(device)) 1113 return; 1114 /* ignore unsolicited interrupts for DIAG discipline */ 1115 if (device->discipline == dasd_diag_discipline_pointer) { 1116 dasd_put_device(device); 1117 return; 1118 } 1119 device->discipline->dump_sense_dbf(device, irb, 1120 "unsolicited"); 1121 if ((device->features & DASD_FEATURE_ERPLOG)) 1122 device->discipline->dump_sense(device, cqr, 1123 irb); 1124 dasd_device_clear_timer(device); 1125 device->discipline->handle_unsolicited_interrupt(device, 1126 irb); 1127 dasd_put_device(device); 1128 return; 1129 } 1130 1131 device = (struct dasd_device *) cqr->startdev; 1132 if (!device || 1133 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1134 DBF_EVENT_DEVID(DBF_DEBUG, cdev, "%s", 1135 "invalid device in request"); 1136 return; 1137 } 1138 1139 /* Check for clear pending */ 1140 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1141 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1142 cqr->status = DASD_CQR_CLEARED; 1143 dasd_device_clear_timer(device); 1144 wake_up(&dasd_flush_wq); 1145 dasd_schedule_device_bh(device); 1146 return; 1147 } 1148 1149 /* check status - the request might have been killed by dyn detach */ 1150 if (cqr->status != DASD_CQR_IN_IO) { 1151 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1152 "status %02x", dev_name(&cdev->dev), cqr->status); 1153 return; 1154 } 1155 1156 next = NULL; 1157 expires = 0; 1158 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1159 scsw_cstat(&irb->scsw) == 0) { 1160 /* request was completed successfully */ 1161 cqr->status = DASD_CQR_SUCCESS; 1162 cqr->stopclk = now; 1163 /* Start first request on queue if possible -> fast_io. */ 1164 if (cqr->devlist.next != &device->ccw_queue) { 1165 next = list_entry(cqr->devlist.next, 1166 struct dasd_ccw_req, devlist); 1167 } 1168 } else { /* error */ 1169 memcpy(&cqr->irb, irb, sizeof(struct irb)); 1170 /* log sense for every failed I/O to s390 debugfeature */ 1171 dasd_log_sense_dbf(cqr, irb); 1172 if (device->features & DASD_FEATURE_ERPLOG) { 1173 dasd_log_sense(cqr, irb); 1174 } 1175 1176 /* 1177 * If we don't want complex ERP for this request, then just 1178 * reset this and retry it in the fastpath 1179 */ 1180 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1181 cqr->retries > 0) { 1182 if (cqr->lpm == LPM_ANYPATH) 1183 DBF_DEV_EVENT(DBF_DEBUG, device, 1184 "default ERP in fastpath " 1185 "(%i retries left)", 1186 cqr->retries); 1187 cqr->lpm = LPM_ANYPATH; 1188 cqr->status = DASD_CQR_QUEUED; 1189 next = cqr; 1190 } else 1191 cqr->status = DASD_CQR_ERROR; 1192 } 1193 if (next && (next->status == DASD_CQR_QUEUED) && 1194 (!device->stopped)) { 1195 if (device->discipline->start_IO(next) == 0) 1196 expires = next->expires; 1197 } 1198 if (expires != 0) 1199 dasd_device_set_timer(device, expires); 1200 else 1201 dasd_device_clear_timer(device); 1202 dasd_schedule_device_bh(device); 1203 } 1204 1205 enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1206 { 1207 struct dasd_device *device; 1208 1209 device = dasd_device_from_cdev_locked(cdev); 1210 1211 if (IS_ERR(device)) 1212 goto out; 1213 if (test_bit(DASD_FLAG_OFFLINE, &device->flags) || 1214 device->state != device->target || 1215 !device->discipline->handle_unsolicited_interrupt){ 1216 dasd_put_device(device); 1217 goto out; 1218 } 1219 1220 dasd_device_clear_timer(device); 1221 device->discipline->handle_unsolicited_interrupt(device, irb); 1222 dasd_put_device(device); 1223 out: 1224 return UC_TODO_RETRY; 1225 } 1226 EXPORT_SYMBOL_GPL(dasd_generic_uc_handler); 1227 1228 /* 1229 * If we have an error on a dasd_block layer request then we cancel 1230 * and return all further requests from the same dasd_block as well. 1231 */ 1232 static void __dasd_device_recovery(struct dasd_device *device, 1233 struct dasd_ccw_req *ref_cqr) 1234 { 1235 struct list_head *l, *n; 1236 struct dasd_ccw_req *cqr; 1237 1238 /* 1239 * only requeue request that came from the dasd_block layer 1240 */ 1241 if (!ref_cqr->block) 1242 return; 1243 1244 list_for_each_safe(l, n, &device->ccw_queue) { 1245 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1246 if (cqr->status == DASD_CQR_QUEUED && 1247 ref_cqr->block == cqr->block) { 1248 cqr->status = DASD_CQR_CLEARED; 1249 } 1250 } 1251 }; 1252 1253 /* 1254 * Remove those ccw requests from the queue that need to be returned 1255 * to the upper layer. 1256 */ 1257 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1258 struct list_head *final_queue) 1259 { 1260 struct list_head *l, *n; 1261 struct dasd_ccw_req *cqr; 1262 1263 /* Process request with final status. */ 1264 list_for_each_safe(l, n, &device->ccw_queue) { 1265 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1266 1267 /* Stop list processing at the first non-final request. */ 1268 if (cqr->status == DASD_CQR_QUEUED || 1269 cqr->status == DASD_CQR_IN_IO || 1270 cqr->status == DASD_CQR_CLEAR_PENDING) 1271 break; 1272 if (cqr->status == DASD_CQR_ERROR) { 1273 __dasd_device_recovery(device, cqr); 1274 } 1275 /* Rechain finished requests to final queue */ 1276 list_move_tail(&cqr->devlist, final_queue); 1277 } 1278 } 1279 1280 /* 1281 * the cqrs from the final queue are returned to the upper layer 1282 * by setting a dasd_block state and calling the callback function 1283 */ 1284 static void __dasd_device_process_final_queue(struct dasd_device *device, 1285 struct list_head *final_queue) 1286 { 1287 struct list_head *l, *n; 1288 struct dasd_ccw_req *cqr; 1289 struct dasd_block *block; 1290 void (*callback)(struct dasd_ccw_req *, void *data); 1291 void *callback_data; 1292 char errorstring[ERRORLENGTH]; 1293 1294 list_for_each_safe(l, n, final_queue) { 1295 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1296 list_del_init(&cqr->devlist); 1297 block = cqr->block; 1298 callback = cqr->callback; 1299 callback_data = cqr->callback_data; 1300 if (block) 1301 spin_lock_bh(&block->queue_lock); 1302 switch (cqr->status) { 1303 case DASD_CQR_SUCCESS: 1304 cqr->status = DASD_CQR_DONE; 1305 break; 1306 case DASD_CQR_ERROR: 1307 cqr->status = DASD_CQR_NEED_ERP; 1308 break; 1309 case DASD_CQR_CLEARED: 1310 cqr->status = DASD_CQR_TERMINATED; 1311 break; 1312 default: 1313 /* internal error 12 - wrong cqr status*/ 1314 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1315 dev_err(&device->cdev->dev, 1316 "An error occurred in the DASD device driver, " 1317 "reason=%s\n", errorstring); 1318 BUG(); 1319 } 1320 if (cqr->callback != NULL) 1321 (callback)(cqr, callback_data); 1322 if (block) 1323 spin_unlock_bh(&block->queue_lock); 1324 } 1325 } 1326 1327 /* 1328 * Take a look at the first request on the ccw queue and check 1329 * if it reached its expire time. If so, terminate the IO. 1330 */ 1331 static void __dasd_device_check_expire(struct dasd_device *device) 1332 { 1333 struct dasd_ccw_req *cqr; 1334 1335 if (list_empty(&device->ccw_queue)) 1336 return; 1337 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1338 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1339 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1340 if (device->discipline->term_IO(cqr) != 0) { 1341 /* Hmpf, try again in 5 sec */ 1342 dev_err(&device->cdev->dev, 1343 "cqr %p timed out (%lus) but cannot be " 1344 "ended, retrying in 5 s\n", 1345 cqr, (cqr->expires/HZ)); 1346 cqr->expires += 5*HZ; 1347 dasd_device_set_timer(device, 5*HZ); 1348 } else { 1349 dev_err(&device->cdev->dev, 1350 "cqr %p timed out (%lus), %i retries " 1351 "remaining\n", cqr, (cqr->expires/HZ), 1352 cqr->retries); 1353 } 1354 } 1355 } 1356 1357 /* 1358 * Take a look at the first request on the ccw queue and check 1359 * if it needs to be started. 1360 */ 1361 static void __dasd_device_start_head(struct dasd_device *device) 1362 { 1363 struct dasd_ccw_req *cqr; 1364 int rc; 1365 1366 if (list_empty(&device->ccw_queue)) 1367 return; 1368 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1369 if (cqr->status != DASD_CQR_QUEUED) 1370 return; 1371 /* when device is stopped, return request to previous layer */ 1372 if (device->stopped) { 1373 cqr->status = DASD_CQR_CLEARED; 1374 dasd_schedule_device_bh(device); 1375 return; 1376 } 1377 1378 rc = device->discipline->start_IO(cqr); 1379 if (rc == 0) 1380 dasd_device_set_timer(device, cqr->expires); 1381 else if (rc == -EACCES) { 1382 dasd_schedule_device_bh(device); 1383 } else 1384 /* Hmpf, try again in 1/2 sec */ 1385 dasd_device_set_timer(device, 50); 1386 } 1387 1388 /* 1389 * Go through all request on the dasd_device request queue, 1390 * terminate them on the cdev if necessary, and return them to the 1391 * submitting layer via callback. 1392 * Note: 1393 * Make sure that all 'submitting layers' still exist when 1394 * this function is called!. In other words, when 'device' is a base 1395 * device then all block layer requests must have been removed before 1396 * via dasd_flush_block_queue. 1397 */ 1398 int dasd_flush_device_queue(struct dasd_device *device) 1399 { 1400 struct dasd_ccw_req *cqr, *n; 1401 int rc; 1402 struct list_head flush_queue; 1403 1404 INIT_LIST_HEAD(&flush_queue); 1405 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1406 rc = 0; 1407 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1408 /* Check status and move request to flush_queue */ 1409 switch (cqr->status) { 1410 case DASD_CQR_IN_IO: 1411 rc = device->discipline->term_IO(cqr); 1412 if (rc) { 1413 /* unable to terminate requeust */ 1414 dev_err(&device->cdev->dev, 1415 "Flushing the DASD request queue " 1416 "failed for request %p\n", cqr); 1417 /* stop flush processing */ 1418 goto finished; 1419 } 1420 break; 1421 case DASD_CQR_QUEUED: 1422 cqr->stopclk = get_clock(); 1423 cqr->status = DASD_CQR_CLEARED; 1424 break; 1425 default: /* no need to modify the others */ 1426 break; 1427 } 1428 list_move_tail(&cqr->devlist, &flush_queue); 1429 } 1430 finished: 1431 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1432 /* 1433 * After this point all requests must be in state CLEAR_PENDING, 1434 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1435 * one of the others. 1436 */ 1437 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1438 wait_event(dasd_flush_wq, 1439 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1440 /* 1441 * Now set each request back to TERMINATED, DONE or NEED_ERP 1442 * and call the callback function of flushed requests 1443 */ 1444 __dasd_device_process_final_queue(device, &flush_queue); 1445 return rc; 1446 } 1447 1448 /* 1449 * Acquire the device lock and process queues for the device. 1450 */ 1451 static void dasd_device_tasklet(struct dasd_device *device) 1452 { 1453 struct list_head final_queue; 1454 1455 atomic_set (&device->tasklet_scheduled, 0); 1456 INIT_LIST_HEAD(&final_queue); 1457 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1458 /* Check expire time of first request on the ccw queue. */ 1459 __dasd_device_check_expire(device); 1460 /* find final requests on ccw queue */ 1461 __dasd_device_process_ccw_queue(device, &final_queue); 1462 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1463 /* Now call the callback function of requests with final status */ 1464 __dasd_device_process_final_queue(device, &final_queue); 1465 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1466 /* Now check if the head of the ccw queue needs to be started. */ 1467 __dasd_device_start_head(device); 1468 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1469 dasd_put_device(device); 1470 } 1471 1472 /* 1473 * Schedules a call to dasd_tasklet over the device tasklet. 1474 */ 1475 void dasd_schedule_device_bh(struct dasd_device *device) 1476 { 1477 /* Protect against rescheduling. */ 1478 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1479 return; 1480 dasd_get_device(device); 1481 tasklet_hi_schedule(&device->tasklet); 1482 } 1483 1484 void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 1485 { 1486 device->stopped |= bits; 1487 } 1488 EXPORT_SYMBOL_GPL(dasd_device_set_stop_bits); 1489 1490 void dasd_device_remove_stop_bits(struct dasd_device *device, int bits) 1491 { 1492 device->stopped &= ~bits; 1493 if (!device->stopped) 1494 wake_up(&generic_waitq); 1495 } 1496 EXPORT_SYMBOL_GPL(dasd_device_remove_stop_bits); 1497 1498 /* 1499 * Queue a request to the head of the device ccw_queue. 1500 * Start the I/O if possible. 1501 */ 1502 void dasd_add_request_head(struct dasd_ccw_req *cqr) 1503 { 1504 struct dasd_device *device; 1505 unsigned long flags; 1506 1507 device = cqr->startdev; 1508 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1509 cqr->status = DASD_CQR_QUEUED; 1510 list_add(&cqr->devlist, &device->ccw_queue); 1511 /* let the bh start the request to keep them in order */ 1512 dasd_schedule_device_bh(device); 1513 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1514 } 1515 1516 /* 1517 * Queue a request to the tail of the device ccw_queue. 1518 * Start the I/O if possible. 1519 */ 1520 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 1521 { 1522 struct dasd_device *device; 1523 unsigned long flags; 1524 1525 device = cqr->startdev; 1526 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1527 cqr->status = DASD_CQR_QUEUED; 1528 list_add_tail(&cqr->devlist, &device->ccw_queue); 1529 /* let the bh start the request to keep them in order */ 1530 dasd_schedule_device_bh(device); 1531 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1532 } 1533 1534 /* 1535 * Wakeup helper for the 'sleep_on' functions. 1536 */ 1537 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1538 { 1539 spin_lock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 1540 cqr->callback_data = DASD_SLEEPON_END_TAG; 1541 spin_unlock_irq(get_ccwdev_lock(cqr->startdev->cdev)); 1542 wake_up(&generic_waitq); 1543 } 1544 1545 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 1546 { 1547 struct dasd_device *device; 1548 int rc; 1549 1550 device = cqr->startdev; 1551 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1552 rc = (cqr->callback_data == DASD_SLEEPON_END_TAG); 1553 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1554 return rc; 1555 } 1556 1557 /* 1558 * checks if error recovery is necessary, returns 1 if yes, 0 otherwise. 1559 */ 1560 static int __dasd_sleep_on_erp(struct dasd_ccw_req *cqr) 1561 { 1562 struct dasd_device *device; 1563 dasd_erp_fn_t erp_fn; 1564 1565 if (cqr->status == DASD_CQR_FILLED) 1566 return 0; 1567 device = cqr->startdev; 1568 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 1569 if (cqr->status == DASD_CQR_TERMINATED) { 1570 device->discipline->handle_terminated_request(cqr); 1571 return 1; 1572 } 1573 if (cqr->status == DASD_CQR_NEED_ERP) { 1574 erp_fn = device->discipline->erp_action(cqr); 1575 erp_fn(cqr); 1576 return 1; 1577 } 1578 if (cqr->status == DASD_CQR_FAILED) 1579 dasd_log_sense(cqr, &cqr->irb); 1580 if (cqr->refers) { 1581 __dasd_process_erp(device, cqr); 1582 return 1; 1583 } 1584 } 1585 return 0; 1586 } 1587 1588 static int __dasd_sleep_on_loop_condition(struct dasd_ccw_req *cqr) 1589 { 1590 if (test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) { 1591 if (cqr->refers) /* erp is not done yet */ 1592 return 1; 1593 return ((cqr->status != DASD_CQR_DONE) && 1594 (cqr->status != DASD_CQR_FAILED)); 1595 } else 1596 return (cqr->status == DASD_CQR_FILLED); 1597 } 1598 1599 static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) 1600 { 1601 struct dasd_device *device; 1602 int rc; 1603 struct list_head ccw_queue; 1604 struct dasd_ccw_req *cqr; 1605 1606 INIT_LIST_HEAD(&ccw_queue); 1607 maincqr->status = DASD_CQR_FILLED; 1608 device = maincqr->startdev; 1609 list_add(&maincqr->blocklist, &ccw_queue); 1610 for (cqr = maincqr; __dasd_sleep_on_loop_condition(cqr); 1611 cqr = list_first_entry(&ccw_queue, 1612 struct dasd_ccw_req, blocklist)) { 1613 1614 if (__dasd_sleep_on_erp(cqr)) 1615 continue; 1616 if (cqr->status != DASD_CQR_FILLED) /* could be failed */ 1617 continue; 1618 1619 /* Non-temporary stop condition will trigger fail fast */ 1620 if (device->stopped & ~DASD_STOPPED_PENDING && 1621 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1622 (!dasd_eer_enabled(device))) { 1623 cqr->status = DASD_CQR_FAILED; 1624 continue; 1625 } 1626 1627 /* Don't try to start requests if device is stopped */ 1628 if (interruptible) { 1629 rc = wait_event_interruptible( 1630 generic_waitq, !(device->stopped)); 1631 if (rc == -ERESTARTSYS) { 1632 cqr->status = DASD_CQR_FAILED; 1633 maincqr->intrc = rc; 1634 continue; 1635 } 1636 } else 1637 wait_event(generic_waitq, !(device->stopped)); 1638 1639 cqr->callback = dasd_wakeup_cb; 1640 cqr->callback_data = DASD_SLEEPON_START_TAG; 1641 dasd_add_request_tail(cqr); 1642 if (interruptible) { 1643 rc = wait_event_interruptible( 1644 generic_waitq, _wait_for_wakeup(cqr)); 1645 if (rc == -ERESTARTSYS) { 1646 dasd_cancel_req(cqr); 1647 /* wait (non-interruptible) for final status */ 1648 wait_event(generic_waitq, 1649 _wait_for_wakeup(cqr)); 1650 cqr->status = DASD_CQR_FAILED; 1651 maincqr->intrc = rc; 1652 continue; 1653 } 1654 } else 1655 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1656 } 1657 1658 maincqr->endclk = get_clock(); 1659 if ((maincqr->status != DASD_CQR_DONE) && 1660 (maincqr->intrc != -ERESTARTSYS)) 1661 dasd_log_sense(maincqr, &maincqr->irb); 1662 if (maincqr->status == DASD_CQR_DONE) 1663 rc = 0; 1664 else if (maincqr->intrc) 1665 rc = maincqr->intrc; 1666 else 1667 rc = -EIO; 1668 return rc; 1669 } 1670 1671 /* 1672 * Queue a request to the tail of the device ccw_queue and wait for 1673 * it's completion. 1674 */ 1675 int dasd_sleep_on(struct dasd_ccw_req *cqr) 1676 { 1677 return _dasd_sleep_on(cqr, 0); 1678 } 1679 1680 /* 1681 * Queue a request to the tail of the device ccw_queue and wait 1682 * interruptible for it's completion. 1683 */ 1684 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1685 { 1686 return _dasd_sleep_on(cqr, 1); 1687 } 1688 1689 /* 1690 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1691 * for eckd devices) the currently running request has to be terminated 1692 * and be put back to status queued, before the special request is added 1693 * to the head of the queue. Then the special request is waited on normally. 1694 */ 1695 static inline int _dasd_term_running_cqr(struct dasd_device *device) 1696 { 1697 struct dasd_ccw_req *cqr; 1698 1699 if (list_empty(&device->ccw_queue)) 1700 return 0; 1701 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1702 return device->discipline->term_IO(cqr); 1703 } 1704 1705 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1706 { 1707 struct dasd_device *device; 1708 int rc; 1709 1710 device = cqr->startdev; 1711 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1712 rc = _dasd_term_running_cqr(device); 1713 if (rc) { 1714 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1715 return rc; 1716 } 1717 1718 cqr->callback = dasd_wakeup_cb; 1719 cqr->callback_data = DASD_SLEEPON_START_TAG; 1720 cqr->status = DASD_CQR_QUEUED; 1721 list_add(&cqr->devlist, &device->ccw_queue); 1722 1723 /* let the bh start the request to keep them in order */ 1724 dasd_schedule_device_bh(device); 1725 1726 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1727 1728 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1729 1730 if (cqr->status == DASD_CQR_DONE) 1731 rc = 0; 1732 else if (cqr->intrc) 1733 rc = cqr->intrc; 1734 else 1735 rc = -EIO; 1736 return rc; 1737 } 1738 1739 /* 1740 * Cancels a request that was started with dasd_sleep_on_req. 1741 * This is useful to timeout requests. The request will be 1742 * terminated if it is currently in i/o. 1743 * Returns 1 if the request has been terminated. 1744 * 0 if there was no need to terminate the request (not started yet) 1745 * negative error code if termination failed 1746 * Cancellation of a request is an asynchronous operation! The calling 1747 * function has to wait until the request is properly returned via callback. 1748 */ 1749 int dasd_cancel_req(struct dasd_ccw_req *cqr) 1750 { 1751 struct dasd_device *device = cqr->startdev; 1752 unsigned long flags; 1753 int rc; 1754 1755 rc = 0; 1756 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1757 switch (cqr->status) { 1758 case DASD_CQR_QUEUED: 1759 /* request was not started - just set to cleared */ 1760 cqr->status = DASD_CQR_CLEARED; 1761 break; 1762 case DASD_CQR_IN_IO: 1763 /* request in IO - terminate IO and release again */ 1764 rc = device->discipline->term_IO(cqr); 1765 if (rc) { 1766 dev_err(&device->cdev->dev, 1767 "Cancelling request %p failed with rc=%d\n", 1768 cqr, rc); 1769 } else { 1770 cqr->stopclk = get_clock(); 1771 } 1772 break; 1773 default: /* already finished or clear pending - do nothing */ 1774 break; 1775 } 1776 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1777 dasd_schedule_device_bh(device); 1778 return rc; 1779 } 1780 1781 1782 /* 1783 * SECTION: Operations of the dasd_block layer. 1784 */ 1785 1786 /* 1787 * Timeout function for dasd_block. This is used when the block layer 1788 * is waiting for something that may not come reliably, (e.g. a state 1789 * change interrupt) 1790 */ 1791 static void dasd_block_timeout(unsigned long ptr) 1792 { 1793 unsigned long flags; 1794 struct dasd_block *block; 1795 1796 block = (struct dasd_block *) ptr; 1797 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1798 /* re-activate request queue */ 1799 dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING); 1800 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1801 dasd_schedule_block_bh(block); 1802 } 1803 1804 /* 1805 * Setup timeout for a dasd_block in jiffies. 1806 */ 1807 void dasd_block_set_timer(struct dasd_block *block, int expires) 1808 { 1809 if (expires == 0) 1810 del_timer(&block->timer); 1811 else 1812 mod_timer(&block->timer, jiffies + expires); 1813 } 1814 1815 /* 1816 * Clear timeout for a dasd_block. 1817 */ 1818 void dasd_block_clear_timer(struct dasd_block *block) 1819 { 1820 del_timer(&block->timer); 1821 } 1822 1823 /* 1824 * Process finished error recovery ccw. 1825 */ 1826 static void __dasd_process_erp(struct dasd_device *device, 1827 struct dasd_ccw_req *cqr) 1828 { 1829 dasd_erp_fn_t erp_fn; 1830 1831 if (cqr->status == DASD_CQR_DONE) 1832 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1833 else 1834 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 1835 erp_fn = device->discipline->erp_postaction(cqr); 1836 erp_fn(cqr); 1837 } 1838 1839 /* 1840 * Fetch requests from the block device queue. 1841 */ 1842 static void __dasd_process_request_queue(struct dasd_block *block) 1843 { 1844 struct request_queue *queue; 1845 struct request *req; 1846 struct dasd_ccw_req *cqr; 1847 struct dasd_device *basedev; 1848 unsigned long flags; 1849 queue = block->request_queue; 1850 basedev = block->base; 1851 /* No queue ? Then there is nothing to do. */ 1852 if (queue == NULL) 1853 return; 1854 1855 /* 1856 * We requeue request from the block device queue to the ccw 1857 * queue only in two states. In state DASD_STATE_READY the 1858 * partition detection is done and we need to requeue requests 1859 * for that. State DASD_STATE_ONLINE is normal block device 1860 * operation. 1861 */ 1862 if (basedev->state < DASD_STATE_READY) { 1863 while ((req = blk_fetch_request(block->request_queue))) 1864 __blk_end_request_all(req, -EIO); 1865 return; 1866 } 1867 /* Now we try to fetch requests from the request queue */ 1868 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { 1869 if (basedev->features & DASD_FEATURE_READONLY && 1870 rq_data_dir(req) == WRITE) { 1871 DBF_DEV_EVENT(DBF_ERR, basedev, 1872 "Rejecting write request %p", 1873 req); 1874 blk_start_request(req); 1875 __blk_end_request_all(req, -EIO); 1876 continue; 1877 } 1878 cqr = basedev->discipline->build_cp(basedev, block, req); 1879 if (IS_ERR(cqr)) { 1880 if (PTR_ERR(cqr) == -EBUSY) 1881 break; /* normal end condition */ 1882 if (PTR_ERR(cqr) == -ENOMEM) 1883 break; /* terminate request queue loop */ 1884 if (PTR_ERR(cqr) == -EAGAIN) { 1885 /* 1886 * The current request cannot be build right 1887 * now, we have to try later. If this request 1888 * is the head-of-queue we stop the device 1889 * for 1/2 second. 1890 */ 1891 if (!list_empty(&block->ccw_queue)) 1892 break; 1893 spin_lock_irqsave( 1894 get_ccwdev_lock(basedev->cdev), flags); 1895 dasd_device_set_stop_bits(basedev, 1896 DASD_STOPPED_PENDING); 1897 spin_unlock_irqrestore( 1898 get_ccwdev_lock(basedev->cdev), flags); 1899 dasd_block_set_timer(block, HZ/2); 1900 break; 1901 } 1902 DBF_DEV_EVENT(DBF_ERR, basedev, 1903 "CCW creation failed (rc=%ld) " 1904 "on request %p", 1905 PTR_ERR(cqr), req); 1906 blk_start_request(req); 1907 __blk_end_request_all(req, -EIO); 1908 continue; 1909 } 1910 /* 1911 * Note: callback is set to dasd_return_cqr_cb in 1912 * __dasd_block_start_head to cover erp requests as well 1913 */ 1914 cqr->callback_data = (void *) req; 1915 cqr->status = DASD_CQR_FILLED; 1916 blk_start_request(req); 1917 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1918 dasd_profile_start(block, cqr, req); 1919 } 1920 } 1921 1922 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 1923 { 1924 struct request *req; 1925 int status; 1926 int error = 0; 1927 1928 req = (struct request *) cqr->callback_data; 1929 dasd_profile_end(cqr->block, cqr, req); 1930 status = cqr->block->base->discipline->free_cp(cqr, req); 1931 if (status <= 0) 1932 error = status ? status : -EIO; 1933 __blk_end_request_all(req, error); 1934 } 1935 1936 /* 1937 * Process ccw request queue. 1938 */ 1939 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 1940 struct list_head *final_queue) 1941 { 1942 struct list_head *l, *n; 1943 struct dasd_ccw_req *cqr; 1944 dasd_erp_fn_t erp_fn; 1945 unsigned long flags; 1946 struct dasd_device *base = block->base; 1947 1948 restart: 1949 /* Process request with final status. */ 1950 list_for_each_safe(l, n, &block->ccw_queue) { 1951 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1952 if (cqr->status != DASD_CQR_DONE && 1953 cqr->status != DASD_CQR_FAILED && 1954 cqr->status != DASD_CQR_NEED_ERP && 1955 cqr->status != DASD_CQR_TERMINATED) 1956 continue; 1957 1958 if (cqr->status == DASD_CQR_TERMINATED) { 1959 base->discipline->handle_terminated_request(cqr); 1960 goto restart; 1961 } 1962 1963 /* Process requests that may be recovered */ 1964 if (cqr->status == DASD_CQR_NEED_ERP) { 1965 erp_fn = base->discipline->erp_action(cqr); 1966 if (IS_ERR(erp_fn(cqr))) 1967 continue; 1968 goto restart; 1969 } 1970 1971 /* log sense for fatal error */ 1972 if (cqr->status == DASD_CQR_FAILED) { 1973 dasd_log_sense(cqr, &cqr->irb); 1974 } 1975 1976 /* First of all call extended error reporting. */ 1977 if (dasd_eer_enabled(base) && 1978 cqr->status == DASD_CQR_FAILED) { 1979 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 1980 1981 /* restart request */ 1982 cqr->status = DASD_CQR_FILLED; 1983 cqr->retries = 255; 1984 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1985 dasd_device_set_stop_bits(base, DASD_STOPPED_QUIESCE); 1986 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1987 flags); 1988 goto restart; 1989 } 1990 1991 /* Process finished ERP request. */ 1992 if (cqr->refers) { 1993 __dasd_process_erp(base, cqr); 1994 goto restart; 1995 } 1996 1997 /* Rechain finished requests to final queue */ 1998 cqr->endclk = get_clock(); 1999 list_move_tail(&cqr->blocklist, final_queue); 2000 } 2001 } 2002 2003 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 2004 { 2005 dasd_schedule_block_bh(cqr->block); 2006 } 2007 2008 static void __dasd_block_start_head(struct dasd_block *block) 2009 { 2010 struct dasd_ccw_req *cqr; 2011 2012 if (list_empty(&block->ccw_queue)) 2013 return; 2014 /* We allways begin with the first requests on the queue, as some 2015 * of previously started requests have to be enqueued on a 2016 * dasd_device again for error recovery. 2017 */ 2018 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 2019 if (cqr->status != DASD_CQR_FILLED) 2020 continue; 2021 /* Non-temporary stop condition will trigger fail fast */ 2022 if (block->base->stopped & ~DASD_STOPPED_PENDING && 2023 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 2024 (!dasd_eer_enabled(block->base))) { 2025 cqr->status = DASD_CQR_FAILED; 2026 dasd_schedule_block_bh(block); 2027 continue; 2028 } 2029 /* Don't try to start requests if device is stopped */ 2030 if (block->base->stopped) 2031 return; 2032 2033 /* just a fail safe check, should not happen */ 2034 if (!cqr->startdev) 2035 cqr->startdev = block->base; 2036 2037 /* make sure that the requests we submit find their way back */ 2038 cqr->callback = dasd_return_cqr_cb; 2039 2040 dasd_add_request_tail(cqr); 2041 } 2042 } 2043 2044 /* 2045 * Central dasd_block layer routine. Takes requests from the generic 2046 * block layer request queue, creates ccw requests, enqueues them on 2047 * a dasd_device and processes ccw requests that have been returned. 2048 */ 2049 static void dasd_block_tasklet(struct dasd_block *block) 2050 { 2051 struct list_head final_queue; 2052 struct list_head *l, *n; 2053 struct dasd_ccw_req *cqr; 2054 2055 atomic_set(&block->tasklet_scheduled, 0); 2056 INIT_LIST_HEAD(&final_queue); 2057 spin_lock(&block->queue_lock); 2058 /* Finish off requests on ccw queue */ 2059 __dasd_process_block_ccw_queue(block, &final_queue); 2060 spin_unlock(&block->queue_lock); 2061 /* Now call the callback function of requests with final status */ 2062 spin_lock_irq(&block->request_queue_lock); 2063 list_for_each_safe(l, n, &final_queue) { 2064 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 2065 list_del_init(&cqr->blocklist); 2066 __dasd_cleanup_cqr(cqr); 2067 } 2068 spin_lock(&block->queue_lock); 2069 /* Get new request from the block device request queue */ 2070 __dasd_process_request_queue(block); 2071 /* Now check if the head of the ccw queue needs to be started. */ 2072 __dasd_block_start_head(block); 2073 spin_unlock(&block->queue_lock); 2074 spin_unlock_irq(&block->request_queue_lock); 2075 dasd_put_device(block->base); 2076 } 2077 2078 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 2079 { 2080 wake_up(&dasd_flush_wq); 2081 } 2082 2083 /* 2084 * Go through all request on the dasd_block request queue, cancel them 2085 * on the respective dasd_device, and return them to the generic 2086 * block layer. 2087 */ 2088 static int dasd_flush_block_queue(struct dasd_block *block) 2089 { 2090 struct dasd_ccw_req *cqr, *n; 2091 int rc, i; 2092 struct list_head flush_queue; 2093 2094 INIT_LIST_HEAD(&flush_queue); 2095 spin_lock_bh(&block->queue_lock); 2096 rc = 0; 2097 restart: 2098 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 2099 /* if this request currently owned by a dasd_device cancel it */ 2100 if (cqr->status >= DASD_CQR_QUEUED) 2101 rc = dasd_cancel_req(cqr); 2102 if (rc < 0) 2103 break; 2104 /* Rechain request (including erp chain) so it won't be 2105 * touched by the dasd_block_tasklet anymore. 2106 * Replace the callback so we notice when the request 2107 * is returned from the dasd_device layer. 2108 */ 2109 cqr->callback = _dasd_wake_block_flush_cb; 2110 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 2111 list_move_tail(&cqr->blocklist, &flush_queue); 2112 if (i > 1) 2113 /* moved more than one request - need to restart */ 2114 goto restart; 2115 } 2116 spin_unlock_bh(&block->queue_lock); 2117 /* Now call the callback function of flushed requests */ 2118 restart_cb: 2119 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 2120 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 2121 /* Process finished ERP request. */ 2122 if (cqr->refers) { 2123 spin_lock_bh(&block->queue_lock); 2124 __dasd_process_erp(block->base, cqr); 2125 spin_unlock_bh(&block->queue_lock); 2126 /* restart list_for_xx loop since dasd_process_erp 2127 * might remove multiple elements */ 2128 goto restart_cb; 2129 } 2130 /* call the callback function */ 2131 spin_lock_irq(&block->request_queue_lock); 2132 cqr->endclk = get_clock(); 2133 list_del_init(&cqr->blocklist); 2134 __dasd_cleanup_cqr(cqr); 2135 spin_unlock_irq(&block->request_queue_lock); 2136 } 2137 return rc; 2138 } 2139 2140 /* 2141 * Schedules a call to dasd_tasklet over the device tasklet. 2142 */ 2143 void dasd_schedule_block_bh(struct dasd_block *block) 2144 { 2145 /* Protect against rescheduling. */ 2146 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 2147 return; 2148 /* life cycle of block is bound to it's base device */ 2149 dasd_get_device(block->base); 2150 tasklet_hi_schedule(&block->tasklet); 2151 } 2152 2153 2154 /* 2155 * SECTION: external block device operations 2156 * (request queue handling, open, release, etc.) 2157 */ 2158 2159 /* 2160 * Dasd request queue function. Called from ll_rw_blk.c 2161 */ 2162 static void do_dasd_request(struct request_queue *queue) 2163 { 2164 struct dasd_block *block; 2165 2166 block = queue->queuedata; 2167 spin_lock(&block->queue_lock); 2168 /* Get new request from the block device request queue */ 2169 __dasd_process_request_queue(block); 2170 /* Now check if the head of the ccw queue needs to be started. */ 2171 __dasd_block_start_head(block); 2172 spin_unlock(&block->queue_lock); 2173 } 2174 2175 /* 2176 * Allocate and initialize request queue and default I/O scheduler. 2177 */ 2178 static int dasd_alloc_queue(struct dasd_block *block) 2179 { 2180 int rc; 2181 2182 block->request_queue = blk_init_queue(do_dasd_request, 2183 &block->request_queue_lock); 2184 if (block->request_queue == NULL) 2185 return -ENOMEM; 2186 2187 block->request_queue->queuedata = block; 2188 2189 elevator_exit(block->request_queue->elevator); 2190 block->request_queue->elevator = NULL; 2191 rc = elevator_init(block->request_queue, "deadline"); 2192 if (rc) { 2193 blk_cleanup_queue(block->request_queue); 2194 return rc; 2195 } 2196 return 0; 2197 } 2198 2199 /* 2200 * Allocate and initialize request queue. 2201 */ 2202 static void dasd_setup_queue(struct dasd_block *block) 2203 { 2204 int max; 2205 2206 blk_queue_logical_block_size(block->request_queue, block->bp_block); 2207 max = block->base->discipline->max_blocks << block->s2b_shift; 2208 blk_queue_max_hw_sectors(block->request_queue, max); 2209 blk_queue_max_segments(block->request_queue, -1L); 2210 /* with page sized segments we can translate each segement into 2211 * one idaw/tidaw 2212 */ 2213 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 2214 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 2215 } 2216 2217 /* 2218 * Deactivate and free request queue. 2219 */ 2220 static void dasd_free_queue(struct dasd_block *block) 2221 { 2222 if (block->request_queue) { 2223 blk_cleanup_queue(block->request_queue); 2224 block->request_queue = NULL; 2225 } 2226 } 2227 2228 /* 2229 * Flush request on the request queue. 2230 */ 2231 static void dasd_flush_request_queue(struct dasd_block *block) 2232 { 2233 struct request *req; 2234 2235 if (!block->request_queue) 2236 return; 2237 2238 spin_lock_irq(&block->request_queue_lock); 2239 while ((req = blk_fetch_request(block->request_queue))) 2240 __blk_end_request_all(req, -EIO); 2241 spin_unlock_irq(&block->request_queue_lock); 2242 } 2243 2244 static int dasd_open(struct block_device *bdev, fmode_t mode) 2245 { 2246 struct dasd_block *block = bdev->bd_disk->private_data; 2247 struct dasd_device *base; 2248 int rc; 2249 2250 if (!block) 2251 return -ENODEV; 2252 2253 base = block->base; 2254 atomic_inc(&block->open_count); 2255 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2256 rc = -ENODEV; 2257 goto unlock; 2258 } 2259 2260 if (!try_module_get(base->discipline->owner)) { 2261 rc = -EINVAL; 2262 goto unlock; 2263 } 2264 2265 if (dasd_probeonly) { 2266 dev_info(&base->cdev->dev, 2267 "Accessing the DASD failed because it is in " 2268 "probeonly mode\n"); 2269 rc = -EPERM; 2270 goto out; 2271 } 2272 2273 if (base->state <= DASD_STATE_BASIC) { 2274 DBF_DEV_EVENT(DBF_ERR, base, " %s", 2275 " Cannot open unrecognized device"); 2276 rc = -ENODEV; 2277 goto out; 2278 } 2279 2280 if ((mode & FMODE_WRITE) && 2281 (test_bit(DASD_FLAG_DEVICE_RO, &base->flags) || 2282 (base->features & DASD_FEATURE_READONLY))) { 2283 rc = -EROFS; 2284 goto out; 2285 } 2286 2287 return 0; 2288 2289 out: 2290 module_put(base->discipline->owner); 2291 unlock: 2292 atomic_dec(&block->open_count); 2293 return rc; 2294 } 2295 2296 static int dasd_release(struct gendisk *disk, fmode_t mode) 2297 { 2298 struct dasd_block *block = disk->private_data; 2299 2300 atomic_dec(&block->open_count); 2301 module_put(block->base->discipline->owner); 2302 return 0; 2303 } 2304 2305 /* 2306 * Return disk geometry. 2307 */ 2308 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 2309 { 2310 struct dasd_block *block; 2311 struct dasd_device *base; 2312 2313 block = bdev->bd_disk->private_data; 2314 if (!block) 2315 return -ENODEV; 2316 base = block->base; 2317 2318 if (!base->discipline || 2319 !base->discipline->fill_geometry) 2320 return -EINVAL; 2321 2322 base->discipline->fill_geometry(block, geo); 2323 geo->start = get_start_sect(bdev) >> block->s2b_shift; 2324 return 0; 2325 } 2326 2327 const struct block_device_operations 2328 dasd_device_operations = { 2329 .owner = THIS_MODULE, 2330 .open = dasd_open, 2331 .release = dasd_release, 2332 .ioctl = dasd_ioctl, 2333 .compat_ioctl = dasd_ioctl, 2334 .getgeo = dasd_getgeo, 2335 }; 2336 2337 /******************************************************************************* 2338 * end of block device operations 2339 */ 2340 2341 static void 2342 dasd_exit(void) 2343 { 2344 #ifdef CONFIG_PROC_FS 2345 dasd_proc_exit(); 2346 #endif 2347 dasd_eer_exit(); 2348 if (dasd_page_cache != NULL) { 2349 kmem_cache_destroy(dasd_page_cache); 2350 dasd_page_cache = NULL; 2351 } 2352 dasd_gendisk_exit(); 2353 dasd_devmap_exit(); 2354 if (dasd_debug_area != NULL) { 2355 debug_unregister(dasd_debug_area); 2356 dasd_debug_area = NULL; 2357 } 2358 } 2359 2360 /* 2361 * SECTION: common functions for ccw_driver use 2362 */ 2363 2364 /* 2365 * Is the device read-only? 2366 * Note that this function does not report the setting of the 2367 * readonly device attribute, but how it is configured in z/VM. 2368 */ 2369 int dasd_device_is_ro(struct dasd_device *device) 2370 { 2371 struct ccw_dev_id dev_id; 2372 struct diag210 diag_data; 2373 int rc; 2374 2375 if (!MACHINE_IS_VM) 2376 return 0; 2377 ccw_device_get_id(device->cdev, &dev_id); 2378 memset(&diag_data, 0, sizeof(diag_data)); 2379 diag_data.vrdcdvno = dev_id.devno; 2380 diag_data.vrdclen = sizeof(diag_data); 2381 rc = diag210(&diag_data); 2382 if (rc == 0 || rc == 2) { 2383 return diag_data.vrdcvfla & 0x80; 2384 } else { 2385 DBF_EVENT(DBF_WARNING, "diag210 failed for dev=%04x with rc=%d", 2386 dev_id.devno, rc); 2387 return 0; 2388 } 2389 } 2390 EXPORT_SYMBOL_GPL(dasd_device_is_ro); 2391 2392 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 2393 { 2394 struct ccw_device *cdev = data; 2395 int ret; 2396 2397 ret = ccw_device_set_online(cdev); 2398 if (ret) 2399 pr_warning("%s: Setting the DASD online failed with rc=%d\n", 2400 dev_name(&cdev->dev), ret); 2401 } 2402 2403 /* 2404 * Initial attempt at a probe function. this can be simplified once 2405 * the other detection code is gone. 2406 */ 2407 int dasd_generic_probe(struct ccw_device *cdev, 2408 struct dasd_discipline *discipline) 2409 { 2410 int ret; 2411 2412 ret = dasd_add_sysfs_files(cdev); 2413 if (ret) { 2414 DBF_EVENT_DEVID(DBF_WARNING, cdev, "%s", 2415 "dasd_generic_probe: could not add " 2416 "sysfs entries"); 2417 return ret; 2418 } 2419 cdev->handler = &dasd_int_handler; 2420 2421 /* 2422 * Automatically online either all dasd devices (dasd_autodetect) 2423 * or all devices specified with dasd= parameters during 2424 * initial probe. 2425 */ 2426 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 2427 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 2428 async_schedule(dasd_generic_auto_online, cdev); 2429 return 0; 2430 } 2431 2432 /* 2433 * This will one day be called from a global not_oper handler. 2434 * It is also used by driver_unregister during module unload. 2435 */ 2436 void dasd_generic_remove(struct ccw_device *cdev) 2437 { 2438 struct dasd_device *device; 2439 struct dasd_block *block; 2440 2441 cdev->handler = NULL; 2442 2443 dasd_remove_sysfs_files(cdev); 2444 device = dasd_device_from_cdev(cdev); 2445 if (IS_ERR(device)) 2446 return; 2447 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2448 /* Already doing offline processing */ 2449 dasd_put_device(device); 2450 return; 2451 } 2452 /* 2453 * This device is removed unconditionally. Set offline 2454 * flag to prevent dasd_open from opening it while it is 2455 * no quite down yet. 2456 */ 2457 dasd_set_target_state(device, DASD_STATE_NEW); 2458 /* dasd_delete_device destroys the device reference. */ 2459 block = device->block; 2460 device->block = NULL; 2461 dasd_delete_device(device); 2462 /* 2463 * life cycle of block is bound to device, so delete it after 2464 * device was safely removed 2465 */ 2466 if (block) 2467 dasd_free_block(block); 2468 } 2469 2470 /* 2471 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2472 * the device is detected for the first time and is supposed to be used 2473 * or the user has started activation through sysfs. 2474 */ 2475 int dasd_generic_set_online(struct ccw_device *cdev, 2476 struct dasd_discipline *base_discipline) 2477 { 2478 struct dasd_discipline *discipline; 2479 struct dasd_device *device; 2480 int rc; 2481 2482 /* first online clears initial online feature flag */ 2483 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2484 device = dasd_create_device(cdev); 2485 if (IS_ERR(device)) 2486 return PTR_ERR(device); 2487 2488 discipline = base_discipline; 2489 if (device->features & DASD_FEATURE_USEDIAG) { 2490 if (!dasd_diag_discipline_pointer) { 2491 pr_warning("%s Setting the DASD online failed because " 2492 "of missing DIAG discipline\n", 2493 dev_name(&cdev->dev)); 2494 dasd_delete_device(device); 2495 return -ENODEV; 2496 } 2497 discipline = dasd_diag_discipline_pointer; 2498 } 2499 if (!try_module_get(base_discipline->owner)) { 2500 dasd_delete_device(device); 2501 return -EINVAL; 2502 } 2503 if (!try_module_get(discipline->owner)) { 2504 module_put(base_discipline->owner); 2505 dasd_delete_device(device); 2506 return -EINVAL; 2507 } 2508 device->base_discipline = base_discipline; 2509 device->discipline = discipline; 2510 2511 /* check_device will allocate block device if necessary */ 2512 rc = discipline->check_device(device); 2513 if (rc) { 2514 pr_warning("%s Setting the DASD online with discipline %s " 2515 "failed with rc=%i\n", 2516 dev_name(&cdev->dev), discipline->name, rc); 2517 module_put(discipline->owner); 2518 module_put(base_discipline->owner); 2519 dasd_delete_device(device); 2520 return rc; 2521 } 2522 2523 dasd_set_target_state(device, DASD_STATE_ONLINE); 2524 if (device->state <= DASD_STATE_KNOWN) { 2525 pr_warning("%s Setting the DASD online failed because of a " 2526 "missing discipline\n", dev_name(&cdev->dev)); 2527 rc = -ENODEV; 2528 dasd_set_target_state(device, DASD_STATE_NEW); 2529 if (device->block) 2530 dasd_free_block(device->block); 2531 dasd_delete_device(device); 2532 } else 2533 pr_debug("dasd_generic device %s found\n", 2534 dev_name(&cdev->dev)); 2535 2536 wait_event(dasd_init_waitq, _wait_for_device(device)); 2537 2538 dasd_put_device(device); 2539 return rc; 2540 } 2541 2542 int dasd_generic_set_offline(struct ccw_device *cdev) 2543 { 2544 struct dasd_device *device; 2545 struct dasd_block *block; 2546 int max_count, open_count; 2547 2548 device = dasd_device_from_cdev(cdev); 2549 if (IS_ERR(device)) 2550 return PTR_ERR(device); 2551 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2552 /* Already doing offline processing */ 2553 dasd_put_device(device); 2554 return 0; 2555 } 2556 /* 2557 * We must make sure that this device is currently not in use. 2558 * The open_count is increased for every opener, that includes 2559 * the blkdev_get in dasd_scan_partitions. We are only interested 2560 * in the other openers. 2561 */ 2562 if (device->block) { 2563 max_count = device->block->bdev ? 0 : -1; 2564 open_count = atomic_read(&device->block->open_count); 2565 if (open_count > max_count) { 2566 if (open_count > 0) 2567 pr_warning("%s: The DASD cannot be set offline " 2568 "with open count %i\n", 2569 dev_name(&cdev->dev), open_count); 2570 else 2571 pr_warning("%s: The DASD cannot be set offline " 2572 "while it is in use\n", 2573 dev_name(&cdev->dev)); 2574 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2575 dasd_put_device(device); 2576 return -EBUSY; 2577 } 2578 } 2579 dasd_set_target_state(device, DASD_STATE_NEW); 2580 /* dasd_delete_device destroys the device reference. */ 2581 block = device->block; 2582 device->block = NULL; 2583 dasd_delete_device(device); 2584 /* 2585 * life cycle of block is bound to device, so delete it after 2586 * device was safely removed 2587 */ 2588 if (block) 2589 dasd_free_block(block); 2590 return 0; 2591 } 2592 2593 int dasd_generic_notify(struct ccw_device *cdev, int event) 2594 { 2595 struct dasd_device *device; 2596 struct dasd_ccw_req *cqr; 2597 int ret; 2598 2599 device = dasd_device_from_cdev_locked(cdev); 2600 if (IS_ERR(device)) 2601 return 0; 2602 ret = 0; 2603 switch (event) { 2604 case CIO_GONE: 2605 case CIO_BOXED: 2606 case CIO_NO_PATH: 2607 /* First of all call extended error reporting. */ 2608 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2609 2610 if (device->state < DASD_STATE_BASIC) 2611 break; 2612 /* Device is active. We want to keep it. */ 2613 list_for_each_entry(cqr, &device->ccw_queue, devlist) 2614 if (cqr->status == DASD_CQR_IN_IO) { 2615 cqr->status = DASD_CQR_QUEUED; 2616 cqr->retries++; 2617 } 2618 dasd_device_set_stop_bits(device, DASD_STOPPED_DC_WAIT); 2619 dasd_device_clear_timer(device); 2620 dasd_schedule_device_bh(device); 2621 ret = 1; 2622 break; 2623 case CIO_OPER: 2624 /* FIXME: add a sanity check. */ 2625 dasd_device_remove_stop_bits(device, DASD_STOPPED_DC_WAIT); 2626 if (device->stopped & DASD_UNRESUMED_PM) { 2627 dasd_device_remove_stop_bits(device, DASD_UNRESUMED_PM); 2628 dasd_restore_device(device); 2629 ret = 1; 2630 break; 2631 } 2632 dasd_schedule_device_bh(device); 2633 if (device->block) 2634 dasd_schedule_block_bh(device->block); 2635 ret = 1; 2636 break; 2637 } 2638 dasd_put_device(device); 2639 return ret; 2640 } 2641 2642 int dasd_generic_pm_freeze(struct ccw_device *cdev) 2643 { 2644 struct dasd_ccw_req *cqr, *n; 2645 int rc; 2646 struct list_head freeze_queue; 2647 struct dasd_device *device = dasd_device_from_cdev(cdev); 2648 2649 if (IS_ERR(device)) 2650 return PTR_ERR(device); 2651 /* disallow new I/O */ 2652 dasd_device_set_stop_bits(device, DASD_STOPPED_PM); 2653 /* clear active requests */ 2654 INIT_LIST_HEAD(&freeze_queue); 2655 spin_lock_irq(get_ccwdev_lock(cdev)); 2656 rc = 0; 2657 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 2658 /* Check status and move request to flush_queue */ 2659 if (cqr->status == DASD_CQR_IN_IO) { 2660 rc = device->discipline->term_IO(cqr); 2661 if (rc) { 2662 /* unable to terminate requeust */ 2663 dev_err(&device->cdev->dev, 2664 "Unable to terminate request %p " 2665 "on suspend\n", cqr); 2666 spin_unlock_irq(get_ccwdev_lock(cdev)); 2667 dasd_put_device(device); 2668 return rc; 2669 } 2670 } 2671 list_move_tail(&cqr->devlist, &freeze_queue); 2672 } 2673 2674 spin_unlock_irq(get_ccwdev_lock(cdev)); 2675 2676 list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { 2677 wait_event(dasd_flush_wq, 2678 (cqr->status != DASD_CQR_CLEAR_PENDING)); 2679 if (cqr->status == DASD_CQR_CLEARED) 2680 cqr->status = DASD_CQR_QUEUED; 2681 } 2682 /* move freeze_queue to start of the ccw_queue */ 2683 spin_lock_irq(get_ccwdev_lock(cdev)); 2684 list_splice_tail(&freeze_queue, &device->ccw_queue); 2685 spin_unlock_irq(get_ccwdev_lock(cdev)); 2686 2687 if (device->discipline->freeze) 2688 rc = device->discipline->freeze(device); 2689 2690 dasd_put_device(device); 2691 return rc; 2692 } 2693 EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); 2694 2695 int dasd_generic_restore_device(struct ccw_device *cdev) 2696 { 2697 struct dasd_device *device = dasd_device_from_cdev(cdev); 2698 int rc = 0; 2699 2700 if (IS_ERR(device)) 2701 return PTR_ERR(device); 2702 2703 /* allow new IO again */ 2704 dasd_device_remove_stop_bits(device, 2705 (DASD_STOPPED_PM | DASD_UNRESUMED_PM)); 2706 2707 dasd_schedule_device_bh(device); 2708 2709 /* 2710 * call discipline restore function 2711 * if device is stopped do nothing e.g. for disconnected devices 2712 */ 2713 if (device->discipline->restore && !(device->stopped)) 2714 rc = device->discipline->restore(device); 2715 if (rc || device->stopped) 2716 /* 2717 * if the resume failed for the DASD we put it in 2718 * an UNRESUMED stop state 2719 */ 2720 device->stopped |= DASD_UNRESUMED_PM; 2721 2722 if (device->block) 2723 dasd_schedule_block_bh(device->block); 2724 2725 dasd_put_device(device); 2726 return 0; 2727 } 2728 EXPORT_SYMBOL_GPL(dasd_generic_restore_device); 2729 2730 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2731 void *rdc_buffer, 2732 int rdc_buffer_size, 2733 int magic) 2734 { 2735 struct dasd_ccw_req *cqr; 2736 struct ccw1 *ccw; 2737 unsigned long *idaw; 2738 2739 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2740 2741 if (IS_ERR(cqr)) { 2742 /* internal error 13 - Allocating the RDC request failed*/ 2743 dev_err(&device->cdev->dev, 2744 "An error occurred in the DASD device driver, " 2745 "reason=%s\n", "13"); 2746 return cqr; 2747 } 2748 2749 ccw = cqr->cpaddr; 2750 ccw->cmd_code = CCW_CMD_RDC; 2751 if (idal_is_needed(rdc_buffer, rdc_buffer_size)) { 2752 idaw = (unsigned long *) (cqr->data); 2753 ccw->cda = (__u32)(addr_t) idaw; 2754 ccw->flags = CCW_FLAG_IDA; 2755 idaw = idal_create_words(idaw, rdc_buffer, rdc_buffer_size); 2756 } else { 2757 ccw->cda = (__u32)(addr_t) rdc_buffer; 2758 ccw->flags = 0; 2759 } 2760 2761 ccw->count = rdc_buffer_size; 2762 cqr->startdev = device; 2763 cqr->memdev = device; 2764 cqr->expires = 10*HZ; 2765 cqr->retries = 256; 2766 cqr->buildclk = get_clock(); 2767 cqr->status = DASD_CQR_FILLED; 2768 return cqr; 2769 } 2770 2771 2772 int dasd_generic_read_dev_chars(struct dasd_device *device, int magic, 2773 void *rdc_buffer, int rdc_buffer_size) 2774 { 2775 int ret; 2776 struct dasd_ccw_req *cqr; 2777 2778 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 2779 magic); 2780 if (IS_ERR(cqr)) 2781 return PTR_ERR(cqr); 2782 2783 ret = dasd_sleep_on(cqr); 2784 dasd_sfree_request(cqr, cqr->memdev); 2785 return ret; 2786 } 2787 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2788 2789 /* 2790 * In command mode and transport mode we need to look for sense 2791 * data in different places. The sense data itself is allways 2792 * an array of 32 bytes, so we can unify the sense data access 2793 * for both modes. 2794 */ 2795 char *dasd_get_sense(struct irb *irb) 2796 { 2797 struct tsb *tsb = NULL; 2798 char *sense = NULL; 2799 2800 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 2801 if (irb->scsw.tm.tcw) 2802 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 2803 irb->scsw.tm.tcw); 2804 if (tsb && tsb->length == 64 && tsb->flags) 2805 switch (tsb->flags & 0x07) { 2806 case 1: /* tsa_iostat */ 2807 sense = tsb->tsa.iostat.sense; 2808 break; 2809 case 2: /* tsa_ddpc */ 2810 sense = tsb->tsa.ddpc.sense; 2811 break; 2812 default: 2813 /* currently we don't use interrogate data */ 2814 break; 2815 } 2816 } else if (irb->esw.esw0.erw.cons) { 2817 sense = irb->ecw; 2818 } 2819 return sense; 2820 } 2821 EXPORT_SYMBOL_GPL(dasd_get_sense); 2822 2823 static int __init dasd_init(void) 2824 { 2825 int rc; 2826 2827 init_waitqueue_head(&dasd_init_waitq); 2828 init_waitqueue_head(&dasd_flush_wq); 2829 init_waitqueue_head(&generic_waitq); 2830 2831 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2832 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 2833 if (dasd_debug_area == NULL) { 2834 rc = -ENOMEM; 2835 goto failed; 2836 } 2837 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2838 debug_set_level(dasd_debug_area, DBF_WARNING); 2839 2840 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2841 2842 dasd_diag_discipline_pointer = NULL; 2843 2844 rc = dasd_devmap_init(); 2845 if (rc) 2846 goto failed; 2847 rc = dasd_gendisk_init(); 2848 if (rc) 2849 goto failed; 2850 rc = dasd_parse(); 2851 if (rc) 2852 goto failed; 2853 rc = dasd_eer_init(); 2854 if (rc) 2855 goto failed; 2856 #ifdef CONFIG_PROC_FS 2857 rc = dasd_proc_init(); 2858 if (rc) 2859 goto failed; 2860 #endif 2861 2862 return 0; 2863 failed: 2864 pr_info("The DASD device driver could not be initialized\n"); 2865 dasd_exit(); 2866 return rc; 2867 } 2868 2869 module_init(dasd_init); 2870 module_exit(dasd_exit); 2871 2872 EXPORT_SYMBOL(dasd_debug_area); 2873 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2874 2875 EXPORT_SYMBOL(dasd_add_request_head); 2876 EXPORT_SYMBOL(dasd_add_request_tail); 2877 EXPORT_SYMBOL(dasd_cancel_req); 2878 EXPORT_SYMBOL(dasd_device_clear_timer); 2879 EXPORT_SYMBOL(dasd_block_clear_timer); 2880 EXPORT_SYMBOL(dasd_enable_device); 2881 EXPORT_SYMBOL(dasd_int_handler); 2882 EXPORT_SYMBOL(dasd_kfree_request); 2883 EXPORT_SYMBOL(dasd_kick_device); 2884 EXPORT_SYMBOL(dasd_kmalloc_request); 2885 EXPORT_SYMBOL(dasd_schedule_device_bh); 2886 EXPORT_SYMBOL(dasd_schedule_block_bh); 2887 EXPORT_SYMBOL(dasd_set_target_state); 2888 EXPORT_SYMBOL(dasd_device_set_timer); 2889 EXPORT_SYMBOL(dasd_block_set_timer); 2890 EXPORT_SYMBOL(dasd_sfree_request); 2891 EXPORT_SYMBOL(dasd_sleep_on); 2892 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2893 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2894 EXPORT_SYMBOL(dasd_smalloc_request); 2895 EXPORT_SYMBOL(dasd_start_IO); 2896 EXPORT_SYMBOL(dasd_term_IO); 2897 2898 EXPORT_SYMBOL_GPL(dasd_generic_probe); 2899 EXPORT_SYMBOL_GPL(dasd_generic_remove); 2900 EXPORT_SYMBOL_GPL(dasd_generic_notify); 2901 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2902 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2903 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 2904 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2905 EXPORT_SYMBOL_GPL(dasd_alloc_block); 2906 EXPORT_SYMBOL_GPL(dasd_free_block); 2907