1 /* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 9 * 10 */ 11 12 #define KMSG_COMPONENT "dasd" 13 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 14 15 #include <linux/kmod.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/ctype.h> 19 #include <linux/major.h> 20 #include <linux/slab.h> 21 #include <linux/buffer_head.h> 22 #include <linux/hdreg.h> 23 #include <linux/async.h> 24 25 #include <asm/ccwdev.h> 26 #include <asm/ebcdic.h> 27 #include <asm/idals.h> 28 #include <asm/todclk.h> 29 #include <asm/itcw.h> 30 31 /* This is ugly... */ 32 #define PRINTK_HEADER "dasd:" 33 34 #include "dasd_int.h" 35 /* 36 * SECTION: Constant definitions to be used within this file 37 */ 38 #define DASD_CHANQ_MAX_SIZE 4 39 40 /* 41 * SECTION: exported variables of dasd.c 42 */ 43 debug_info_t *dasd_debug_area; 44 struct dasd_discipline *dasd_diag_discipline_pointer; 45 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 46 47 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 48 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 49 " Copyright 2000 IBM Corporation"); 50 MODULE_SUPPORTED_DEVICE("dasd"); 51 MODULE_LICENSE("GPL"); 52 53 /* 54 * SECTION: prototypes for static functions of dasd.c 55 */ 56 static int dasd_alloc_queue(struct dasd_block *); 57 static void dasd_setup_queue(struct dasd_block *); 58 static void dasd_free_queue(struct dasd_block *); 59 static void dasd_flush_request_queue(struct dasd_block *); 60 static int dasd_flush_block_queue(struct dasd_block *); 61 static void dasd_device_tasklet(struct dasd_device *); 62 static void dasd_block_tasklet(struct dasd_block *); 63 static void do_kick_device(struct work_struct *); 64 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 65 static void dasd_device_timeout(unsigned long); 66 static void dasd_block_timeout(unsigned long); 67 68 /* 69 * SECTION: Operations on the device structure. 70 */ 71 static wait_queue_head_t dasd_init_waitq; 72 static wait_queue_head_t dasd_flush_wq; 73 static wait_queue_head_t generic_waitq; 74 75 /* 76 * Allocate memory for a new device structure. 77 */ 78 struct dasd_device *dasd_alloc_device(void) 79 { 80 struct dasd_device *device; 81 82 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 83 if (!device) 84 return ERR_PTR(-ENOMEM); 85 86 /* Get two pages for normal block device operations. */ 87 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 88 if (!device->ccw_mem) { 89 kfree(device); 90 return ERR_PTR(-ENOMEM); 91 } 92 /* Get one page for error recovery. */ 93 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 94 if (!device->erp_mem) { 95 free_pages((unsigned long) device->ccw_mem, 1); 96 kfree(device); 97 return ERR_PTR(-ENOMEM); 98 } 99 100 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 101 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 102 spin_lock_init(&device->mem_lock); 103 atomic_set(&device->tasklet_scheduled, 0); 104 tasklet_init(&device->tasklet, 105 (void (*)(unsigned long)) dasd_device_tasklet, 106 (unsigned long) device); 107 INIT_LIST_HEAD(&device->ccw_queue); 108 init_timer(&device->timer); 109 device->timer.function = dasd_device_timeout; 110 device->timer.data = (unsigned long) device; 111 INIT_WORK(&device->kick_work, do_kick_device); 112 device->state = DASD_STATE_NEW; 113 device->target = DASD_STATE_NEW; 114 115 return device; 116 } 117 118 /* 119 * Free memory of a device structure. 120 */ 121 void dasd_free_device(struct dasd_device *device) 122 { 123 kfree(device->private); 124 free_page((unsigned long) device->erp_mem); 125 free_pages((unsigned long) device->ccw_mem, 1); 126 kfree(device); 127 } 128 129 /* 130 * Allocate memory for a new device structure. 131 */ 132 struct dasd_block *dasd_alloc_block(void) 133 { 134 struct dasd_block *block; 135 136 block = kzalloc(sizeof(*block), GFP_ATOMIC); 137 if (!block) 138 return ERR_PTR(-ENOMEM); 139 /* open_count = 0 means device online but not in use */ 140 atomic_set(&block->open_count, -1); 141 142 spin_lock_init(&block->request_queue_lock); 143 atomic_set(&block->tasklet_scheduled, 0); 144 tasklet_init(&block->tasklet, 145 (void (*)(unsigned long)) dasd_block_tasklet, 146 (unsigned long) block); 147 INIT_LIST_HEAD(&block->ccw_queue); 148 spin_lock_init(&block->queue_lock); 149 init_timer(&block->timer); 150 block->timer.function = dasd_block_timeout; 151 block->timer.data = (unsigned long) block; 152 153 return block; 154 } 155 156 /* 157 * Free memory of a device structure. 158 */ 159 void dasd_free_block(struct dasd_block *block) 160 { 161 kfree(block); 162 } 163 164 /* 165 * Make a new device known to the system. 166 */ 167 static int dasd_state_new_to_known(struct dasd_device *device) 168 { 169 int rc; 170 171 /* 172 * As long as the device is not in state DASD_STATE_NEW we want to 173 * keep the reference count > 0. 174 */ 175 dasd_get_device(device); 176 177 if (device->block) { 178 rc = dasd_alloc_queue(device->block); 179 if (rc) { 180 dasd_put_device(device); 181 return rc; 182 } 183 } 184 device->state = DASD_STATE_KNOWN; 185 return 0; 186 } 187 188 /* 189 * Let the system forget about a device. 190 */ 191 static int dasd_state_known_to_new(struct dasd_device *device) 192 { 193 /* Disable extended error reporting for this device. */ 194 dasd_eer_disable(device); 195 /* Forget the discipline information. */ 196 if (device->discipline) { 197 if (device->discipline->uncheck_device) 198 device->discipline->uncheck_device(device); 199 module_put(device->discipline->owner); 200 } 201 device->discipline = NULL; 202 if (device->base_discipline) 203 module_put(device->base_discipline->owner); 204 device->base_discipline = NULL; 205 device->state = DASD_STATE_NEW; 206 207 if (device->block) 208 dasd_free_queue(device->block); 209 210 /* Give up reference we took in dasd_state_new_to_known. */ 211 dasd_put_device(device); 212 return 0; 213 } 214 215 /* 216 * Request the irq line for the device. 217 */ 218 static int dasd_state_known_to_basic(struct dasd_device *device) 219 { 220 int rc; 221 222 /* Allocate and register gendisk structure. */ 223 if (device->block) { 224 rc = dasd_gendisk_alloc(device->block); 225 if (rc) 226 return rc; 227 } 228 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 229 device->debug_area = debug_register(dev_name(&device->cdev->dev), 4, 1, 230 8 * sizeof(long)); 231 debug_register_view(device->debug_area, &debug_sprintf_view); 232 debug_set_level(device->debug_area, DBF_WARNING); 233 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 234 235 device->state = DASD_STATE_BASIC; 236 return 0; 237 } 238 239 /* 240 * Release the irq line for the device. Terminate any running i/o. 241 */ 242 static int dasd_state_basic_to_known(struct dasd_device *device) 243 { 244 int rc; 245 if (device->block) { 246 dasd_gendisk_free(device->block); 247 dasd_block_clear_timer(device->block); 248 } 249 rc = dasd_flush_device_queue(device); 250 if (rc) 251 return rc; 252 dasd_device_clear_timer(device); 253 254 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 255 if (device->debug_area != NULL) { 256 debug_unregister(device->debug_area); 257 device->debug_area = NULL; 258 } 259 device->state = DASD_STATE_KNOWN; 260 return 0; 261 } 262 263 /* 264 * Do the initial analysis. The do_analysis function may return 265 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 266 * until the discipline decides to continue the startup sequence 267 * by calling the function dasd_change_state. The eckd disciplines 268 * uses this to start a ccw that detects the format. The completion 269 * interrupt for this detection ccw uses the kernel event daemon to 270 * trigger the call to dasd_change_state. All this is done in the 271 * discipline code, see dasd_eckd.c. 272 * After the analysis ccw is done (do_analysis returned 0) the block 273 * device is setup. 274 * In case the analysis returns an error, the device setup is stopped 275 * (a fake disk was already added to allow formatting). 276 */ 277 static int dasd_state_basic_to_ready(struct dasd_device *device) 278 { 279 int rc; 280 struct dasd_block *block; 281 282 rc = 0; 283 block = device->block; 284 /* make disk known with correct capacity */ 285 if (block) { 286 if (block->base->discipline->do_analysis != NULL) 287 rc = block->base->discipline->do_analysis(block); 288 if (rc) { 289 if (rc != -EAGAIN) 290 device->state = DASD_STATE_UNFMT; 291 return rc; 292 } 293 dasd_setup_queue(block); 294 set_capacity(block->gdp, 295 block->blocks << block->s2b_shift); 296 device->state = DASD_STATE_READY; 297 rc = dasd_scan_partitions(block); 298 if (rc) 299 device->state = DASD_STATE_BASIC; 300 } else { 301 device->state = DASD_STATE_READY; 302 } 303 return rc; 304 } 305 306 /* 307 * Remove device from block device layer. Destroy dirty buffers. 308 * Forget format information. Check if the target level is basic 309 * and if it is create fake disk for formatting. 310 */ 311 static int dasd_state_ready_to_basic(struct dasd_device *device) 312 { 313 int rc; 314 315 device->state = DASD_STATE_BASIC; 316 if (device->block) { 317 struct dasd_block *block = device->block; 318 rc = dasd_flush_block_queue(block); 319 if (rc) { 320 device->state = DASD_STATE_READY; 321 return rc; 322 } 323 dasd_destroy_partitions(block); 324 dasd_flush_request_queue(block); 325 block->blocks = 0; 326 block->bp_block = 0; 327 block->s2b_shift = 0; 328 } 329 return 0; 330 } 331 332 /* 333 * Back to basic. 334 */ 335 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 336 { 337 device->state = DASD_STATE_BASIC; 338 return 0; 339 } 340 341 /* 342 * Make the device online and schedule the bottom half to start 343 * the requeueing of requests from the linux request queue to the 344 * ccw queue. 345 */ 346 static int 347 dasd_state_ready_to_online(struct dasd_device * device) 348 { 349 int rc; 350 struct gendisk *disk; 351 struct disk_part_iter piter; 352 struct hd_struct *part; 353 354 if (device->discipline->ready_to_online) { 355 rc = device->discipline->ready_to_online(device); 356 if (rc) 357 return rc; 358 } 359 device->state = DASD_STATE_ONLINE; 360 if (device->block) { 361 dasd_schedule_block_bh(device->block); 362 disk = device->block->bdev->bd_disk; 363 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 364 while ((part = disk_part_iter_next(&piter))) 365 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 366 disk_part_iter_exit(&piter); 367 } 368 return 0; 369 } 370 371 /* 372 * Stop the requeueing of requests again. 373 */ 374 static int dasd_state_online_to_ready(struct dasd_device *device) 375 { 376 int rc; 377 struct gendisk *disk; 378 struct disk_part_iter piter; 379 struct hd_struct *part; 380 381 if (device->discipline->online_to_ready) { 382 rc = device->discipline->online_to_ready(device); 383 if (rc) 384 return rc; 385 } 386 device->state = DASD_STATE_READY; 387 if (device->block) { 388 disk = device->block->bdev->bd_disk; 389 disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0); 390 while ((part = disk_part_iter_next(&piter))) 391 kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE); 392 disk_part_iter_exit(&piter); 393 } 394 return 0; 395 } 396 397 /* 398 * Device startup state changes. 399 */ 400 static int dasd_increase_state(struct dasd_device *device) 401 { 402 int rc; 403 404 rc = 0; 405 if (device->state == DASD_STATE_NEW && 406 device->target >= DASD_STATE_KNOWN) 407 rc = dasd_state_new_to_known(device); 408 409 if (!rc && 410 device->state == DASD_STATE_KNOWN && 411 device->target >= DASD_STATE_BASIC) 412 rc = dasd_state_known_to_basic(device); 413 414 if (!rc && 415 device->state == DASD_STATE_BASIC && 416 device->target >= DASD_STATE_READY) 417 rc = dasd_state_basic_to_ready(device); 418 419 if (!rc && 420 device->state == DASD_STATE_UNFMT && 421 device->target > DASD_STATE_UNFMT) 422 rc = -EPERM; 423 424 if (!rc && 425 device->state == DASD_STATE_READY && 426 device->target >= DASD_STATE_ONLINE) 427 rc = dasd_state_ready_to_online(device); 428 429 return rc; 430 } 431 432 /* 433 * Device shutdown state changes. 434 */ 435 static int dasd_decrease_state(struct dasd_device *device) 436 { 437 int rc; 438 439 rc = 0; 440 if (device->state == DASD_STATE_ONLINE && 441 device->target <= DASD_STATE_READY) 442 rc = dasd_state_online_to_ready(device); 443 444 if (!rc && 445 device->state == DASD_STATE_READY && 446 device->target <= DASD_STATE_BASIC) 447 rc = dasd_state_ready_to_basic(device); 448 449 if (!rc && 450 device->state == DASD_STATE_UNFMT && 451 device->target <= DASD_STATE_BASIC) 452 rc = dasd_state_unfmt_to_basic(device); 453 454 if (!rc && 455 device->state == DASD_STATE_BASIC && 456 device->target <= DASD_STATE_KNOWN) 457 rc = dasd_state_basic_to_known(device); 458 459 if (!rc && 460 device->state == DASD_STATE_KNOWN && 461 device->target <= DASD_STATE_NEW) 462 rc = dasd_state_known_to_new(device); 463 464 return rc; 465 } 466 467 /* 468 * This is the main startup/shutdown routine. 469 */ 470 static void dasd_change_state(struct dasd_device *device) 471 { 472 int rc; 473 474 if (device->state == device->target) 475 /* Already where we want to go today... */ 476 return; 477 if (device->state < device->target) 478 rc = dasd_increase_state(device); 479 else 480 rc = dasd_decrease_state(device); 481 if (rc && rc != -EAGAIN) 482 device->target = device->state; 483 484 if (device->state == device->target) { 485 wake_up(&dasd_init_waitq); 486 dasd_put_device(device); 487 } 488 489 /* let user-space know that the device status changed */ 490 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 491 } 492 493 /* 494 * Kick starter for devices that did not complete the startup/shutdown 495 * procedure or were sleeping because of a pending state. 496 * dasd_kick_device will schedule a call do do_kick_device to the kernel 497 * event daemon. 498 */ 499 static void do_kick_device(struct work_struct *work) 500 { 501 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 502 dasd_change_state(device); 503 dasd_schedule_device_bh(device); 504 dasd_put_device(device); 505 } 506 507 void dasd_kick_device(struct dasd_device *device) 508 { 509 dasd_get_device(device); 510 /* queue call to dasd_kick_device to the kernel event daemon. */ 511 schedule_work(&device->kick_work); 512 } 513 514 /* 515 * Set the target state for a device and starts the state change. 516 */ 517 void dasd_set_target_state(struct dasd_device *device, int target) 518 { 519 dasd_get_device(device); 520 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 521 if (dasd_probeonly && target > DASD_STATE_READY) 522 target = DASD_STATE_READY; 523 if (device->target != target) { 524 if (device->state == target) { 525 wake_up(&dasd_init_waitq); 526 dasd_put_device(device); 527 } 528 device->target = target; 529 } 530 if (device->state != device->target) 531 dasd_change_state(device); 532 } 533 534 /* 535 * Enable devices with device numbers in [from..to]. 536 */ 537 static inline int _wait_for_device(struct dasd_device *device) 538 { 539 return (device->state == device->target); 540 } 541 542 void dasd_enable_device(struct dasd_device *device) 543 { 544 dasd_set_target_state(device, DASD_STATE_ONLINE); 545 if (device->state <= DASD_STATE_KNOWN) 546 /* No discipline for device found. */ 547 dasd_set_target_state(device, DASD_STATE_NEW); 548 /* Now wait for the devices to come up. */ 549 wait_event(dasd_init_waitq, _wait_for_device(device)); 550 } 551 552 /* 553 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 554 */ 555 #ifdef CONFIG_DASD_PROFILE 556 557 struct dasd_profile_info_t dasd_global_profile; 558 unsigned int dasd_profile_level = DASD_PROFILE_OFF; 559 560 /* 561 * Increments counter in global and local profiling structures. 562 */ 563 #define dasd_profile_counter(value, counter, block) \ 564 { \ 565 int index; \ 566 for (index = 0; index < 31 && value >> (2+index); index++); \ 567 dasd_global_profile.counter[index]++; \ 568 block->profile.counter[index]++; \ 569 } 570 571 /* 572 * Add profiling information for cqr before execution. 573 */ 574 static void dasd_profile_start(struct dasd_block *block, 575 struct dasd_ccw_req *cqr, 576 struct request *req) 577 { 578 struct list_head *l; 579 unsigned int counter; 580 581 if (dasd_profile_level != DASD_PROFILE_ON) 582 return; 583 584 /* count the length of the chanq for statistics */ 585 counter = 0; 586 list_for_each(l, &block->ccw_queue) 587 if (++counter >= 31) 588 break; 589 dasd_global_profile.dasd_io_nr_req[counter]++; 590 block->profile.dasd_io_nr_req[counter]++; 591 } 592 593 /* 594 * Add profiling information for cqr after execution. 595 */ 596 static void dasd_profile_end(struct dasd_block *block, 597 struct dasd_ccw_req *cqr, 598 struct request *req) 599 { 600 long strtime, irqtime, endtime, tottime; /* in microseconds */ 601 long tottimeps, sectors; 602 603 if (dasd_profile_level != DASD_PROFILE_ON) 604 return; 605 606 sectors = blk_rq_sectors(req); 607 if (!cqr->buildclk || !cqr->startclk || 608 !cqr->stopclk || !cqr->endclk || 609 !sectors) 610 return; 611 612 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 613 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 614 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 615 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 616 tottimeps = tottime / sectors; 617 618 if (!dasd_global_profile.dasd_io_reqs) 619 memset(&dasd_global_profile, 0, 620 sizeof(struct dasd_profile_info_t)); 621 dasd_global_profile.dasd_io_reqs++; 622 dasd_global_profile.dasd_io_sects += sectors; 623 624 if (!block->profile.dasd_io_reqs) 625 memset(&block->profile, 0, 626 sizeof(struct dasd_profile_info_t)); 627 block->profile.dasd_io_reqs++; 628 block->profile.dasd_io_sects += sectors; 629 630 dasd_profile_counter(sectors, dasd_io_secs, block); 631 dasd_profile_counter(tottime, dasd_io_times, block); 632 dasd_profile_counter(tottimeps, dasd_io_timps, block); 633 dasd_profile_counter(strtime, dasd_io_time1, block); 634 dasd_profile_counter(irqtime, dasd_io_time2, block); 635 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); 636 dasd_profile_counter(endtime, dasd_io_time3, block); 637 } 638 #else 639 #define dasd_profile_start(block, cqr, req) do {} while (0) 640 #define dasd_profile_end(block, cqr, req) do {} while (0) 641 #endif /* CONFIG_DASD_PROFILE */ 642 643 /* 644 * Allocate memory for a channel program with 'cplength' channel 645 * command words and 'datasize' additional space. There are two 646 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 647 * memory and 2) dasd_smalloc_request uses the static ccw memory 648 * that gets allocated for each device. 649 */ 650 struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, 651 int datasize, 652 struct dasd_device *device) 653 { 654 struct dasd_ccw_req *cqr; 655 656 /* Sanity checks */ 657 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 658 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 659 660 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 661 if (cqr == NULL) 662 return ERR_PTR(-ENOMEM); 663 cqr->cpaddr = NULL; 664 if (cplength > 0) { 665 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 666 GFP_ATOMIC | GFP_DMA); 667 if (cqr->cpaddr == NULL) { 668 kfree(cqr); 669 return ERR_PTR(-ENOMEM); 670 } 671 } 672 cqr->data = NULL; 673 if (datasize > 0) { 674 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 675 if (cqr->data == NULL) { 676 kfree(cqr->cpaddr); 677 kfree(cqr); 678 return ERR_PTR(-ENOMEM); 679 } 680 } 681 strncpy((char *) &cqr->magic, magic, 4); 682 ASCEBC((char *) &cqr->magic, 4); 683 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 684 dasd_get_device(device); 685 return cqr; 686 } 687 688 struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, 689 int datasize, 690 struct dasd_device *device) 691 { 692 unsigned long flags; 693 struct dasd_ccw_req *cqr; 694 char *data; 695 int size; 696 697 /* Sanity checks */ 698 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 699 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 700 701 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 702 if (cplength > 0) 703 size += cplength * sizeof(struct ccw1); 704 if (datasize > 0) 705 size += datasize; 706 spin_lock_irqsave(&device->mem_lock, flags); 707 cqr = (struct dasd_ccw_req *) 708 dasd_alloc_chunk(&device->ccw_chunks, size); 709 spin_unlock_irqrestore(&device->mem_lock, flags); 710 if (cqr == NULL) 711 return ERR_PTR(-ENOMEM); 712 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 713 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 714 cqr->cpaddr = NULL; 715 if (cplength > 0) { 716 cqr->cpaddr = (struct ccw1 *) data; 717 data += cplength*sizeof(struct ccw1); 718 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 719 } 720 cqr->data = NULL; 721 if (datasize > 0) { 722 cqr->data = data; 723 memset(cqr->data, 0, datasize); 724 } 725 strncpy((char *) &cqr->magic, magic, 4); 726 ASCEBC((char *) &cqr->magic, 4); 727 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 728 dasd_get_device(device); 729 return cqr; 730 } 731 732 /* 733 * Free memory of a channel program. This function needs to free all the 734 * idal lists that might have been created by dasd_set_cda and the 735 * struct dasd_ccw_req itself. 736 */ 737 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 738 { 739 #ifdef CONFIG_64BIT 740 struct ccw1 *ccw; 741 742 /* Clear any idals used for the request. */ 743 ccw = cqr->cpaddr; 744 do { 745 clear_normalized_cda(ccw); 746 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 747 #endif 748 kfree(cqr->cpaddr); 749 kfree(cqr->data); 750 kfree(cqr); 751 dasd_put_device(device); 752 } 753 754 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 755 { 756 unsigned long flags; 757 758 spin_lock_irqsave(&device->mem_lock, flags); 759 dasd_free_chunk(&device->ccw_chunks, cqr); 760 spin_unlock_irqrestore(&device->mem_lock, flags); 761 dasd_put_device(device); 762 } 763 764 /* 765 * Check discipline magic in cqr. 766 */ 767 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 768 { 769 struct dasd_device *device; 770 771 if (cqr == NULL) 772 return -EINVAL; 773 device = cqr->startdev; 774 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 775 DBF_DEV_EVENT(DBF_WARNING, device, 776 " dasd_ccw_req 0x%08x magic doesn't match" 777 " discipline 0x%08x", 778 cqr->magic, 779 *(unsigned int *) device->discipline->name); 780 return -EINVAL; 781 } 782 return 0; 783 } 784 785 /* 786 * Terminate the current i/o and set the request to clear_pending. 787 * Timer keeps device runnig. 788 * ccw_device_clear can fail if the i/o subsystem 789 * is in a bad mood. 790 */ 791 int dasd_term_IO(struct dasd_ccw_req *cqr) 792 { 793 struct dasd_device *device; 794 int retries, rc; 795 char errorstring[ERRORLENGTH]; 796 797 /* Check the cqr */ 798 rc = dasd_check_cqr(cqr); 799 if (rc) 800 return rc; 801 retries = 0; 802 device = (struct dasd_device *) cqr->startdev; 803 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 804 rc = ccw_device_clear(device->cdev, (long) cqr); 805 switch (rc) { 806 case 0: /* termination successful */ 807 cqr->retries--; 808 cqr->status = DASD_CQR_CLEAR_PENDING; 809 cqr->stopclk = get_clock(); 810 cqr->starttime = 0; 811 DBF_DEV_EVENT(DBF_DEBUG, device, 812 "terminate cqr %p successful", 813 cqr); 814 break; 815 case -ENODEV: 816 DBF_DEV_EVENT(DBF_ERR, device, "%s", 817 "device gone, retry"); 818 break; 819 case -EIO: 820 DBF_DEV_EVENT(DBF_ERR, device, "%s", 821 "I/O error, retry"); 822 break; 823 case -EINVAL: 824 case -EBUSY: 825 DBF_DEV_EVENT(DBF_ERR, device, "%s", 826 "device busy, retry later"); 827 break; 828 default: 829 /* internal error 10 - unknown rc*/ 830 snprintf(errorstring, ERRORLENGTH, "10 %d", rc); 831 dev_err(&device->cdev->dev, "An error occurred in the " 832 "DASD device driver, reason=%s\n", errorstring); 833 BUG(); 834 break; 835 } 836 retries++; 837 } 838 dasd_schedule_device_bh(device); 839 return rc; 840 } 841 842 /* 843 * Start the i/o. This start_IO can fail if the channel is really busy. 844 * In that case set up a timer to start the request later. 845 */ 846 int dasd_start_IO(struct dasd_ccw_req *cqr) 847 { 848 struct dasd_device *device; 849 int rc; 850 char errorstring[ERRORLENGTH]; 851 852 /* Check the cqr */ 853 rc = dasd_check_cqr(cqr); 854 if (rc) { 855 cqr->intrc = rc; 856 return rc; 857 } 858 device = (struct dasd_device *) cqr->startdev; 859 if (cqr->retries < 0) { 860 /* internal error 14 - start_IO run out of retries */ 861 sprintf(errorstring, "14 %p", cqr); 862 dev_err(&device->cdev->dev, "An error occurred in the DASD " 863 "device driver, reason=%s\n", errorstring); 864 cqr->status = DASD_CQR_ERROR; 865 return -EIO; 866 } 867 cqr->startclk = get_clock(); 868 cqr->starttime = jiffies; 869 cqr->retries--; 870 if (cqr->cpmode == 1) { 871 rc = ccw_device_tm_start(device->cdev, cqr->cpaddr, 872 (long) cqr, cqr->lpm); 873 } else { 874 rc = ccw_device_start(device->cdev, cqr->cpaddr, 875 (long) cqr, cqr->lpm, 0); 876 } 877 switch (rc) { 878 case 0: 879 cqr->status = DASD_CQR_IN_IO; 880 DBF_DEV_EVENT(DBF_DEBUG, device, 881 "start_IO: request %p started successful", 882 cqr); 883 break; 884 case -EBUSY: 885 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 886 "start_IO: device busy, retry later"); 887 break; 888 case -ETIMEDOUT: 889 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 890 "start_IO: request timeout, retry later"); 891 break; 892 case -EACCES: 893 /* -EACCES indicates that the request used only a 894 * subset of the available pathes and all these 895 * pathes are gone. 896 * Do a retry with all available pathes. 897 */ 898 cqr->lpm = LPM_ANYPATH; 899 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 900 "start_IO: selected pathes gone," 901 " retry on all pathes"); 902 break; 903 case -ENODEV: 904 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 905 "start_IO: -ENODEV device gone, retry"); 906 break; 907 case -EIO: 908 DBF_DEV_EVENT(DBF_DEBUG, device, "%s", 909 "start_IO: -EIO device gone, retry"); 910 break; 911 default: 912 /* internal error 11 - unknown rc */ 913 snprintf(errorstring, ERRORLENGTH, "11 %d", rc); 914 dev_err(&device->cdev->dev, 915 "An error occurred in the DASD device driver, " 916 "reason=%s\n", errorstring); 917 BUG(); 918 break; 919 } 920 cqr->intrc = rc; 921 return rc; 922 } 923 924 /* 925 * Timeout function for dasd devices. This is used for different purposes 926 * 1) missing interrupt handler for normal operation 927 * 2) delayed start of request where start_IO failed with -EBUSY 928 * 3) timeout for missing state change interrupts 929 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 930 * DASD_CQR_QUEUED for 2) and 3). 931 */ 932 static void dasd_device_timeout(unsigned long ptr) 933 { 934 unsigned long flags; 935 struct dasd_device *device; 936 937 device = (struct dasd_device *) ptr; 938 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 939 /* re-activate request queue */ 940 device->stopped &= ~DASD_STOPPED_PENDING; 941 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 942 dasd_schedule_device_bh(device); 943 } 944 945 /* 946 * Setup timeout for a device in jiffies. 947 */ 948 void dasd_device_set_timer(struct dasd_device *device, int expires) 949 { 950 if (expires == 0) 951 del_timer(&device->timer); 952 else 953 mod_timer(&device->timer, jiffies + expires); 954 } 955 956 /* 957 * Clear timeout for a device. 958 */ 959 void dasd_device_clear_timer(struct dasd_device *device) 960 { 961 del_timer(&device->timer); 962 } 963 964 static void dasd_handle_killed_request(struct ccw_device *cdev, 965 unsigned long intparm) 966 { 967 struct dasd_ccw_req *cqr; 968 struct dasd_device *device; 969 970 if (!intparm) 971 return; 972 cqr = (struct dasd_ccw_req *) intparm; 973 if (cqr->status != DASD_CQR_IN_IO) { 974 DBF_EVENT(DBF_DEBUG, 975 "invalid status in handle_killed_request: " 976 "bus_id %s, status %02x", 977 dev_name(&cdev->dev), cqr->status); 978 return; 979 } 980 981 device = (struct dasd_device *) cqr->startdev; 982 if (device == NULL || 983 device != dasd_device_from_cdev_locked(cdev) || 984 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 985 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " 986 "bus_id %s", dev_name(&cdev->dev)); 987 return; 988 } 989 990 /* Schedule request to be retried. */ 991 cqr->status = DASD_CQR_QUEUED; 992 993 dasd_device_clear_timer(device); 994 dasd_schedule_device_bh(device); 995 dasd_put_device(device); 996 } 997 998 void dasd_generic_handle_state_change(struct dasd_device *device) 999 { 1000 /* First of all start sense subsystem status request. */ 1001 dasd_eer_snss(device); 1002 1003 device->stopped &= ~DASD_STOPPED_PENDING; 1004 dasd_schedule_device_bh(device); 1005 if (device->block) 1006 dasd_schedule_block_bh(device->block); 1007 } 1008 1009 /* 1010 * Interrupt handler for "normal" ssch-io based dasd devices. 1011 */ 1012 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 1013 struct irb *irb) 1014 { 1015 struct dasd_ccw_req *cqr, *next; 1016 struct dasd_device *device; 1017 unsigned long long now; 1018 int expires; 1019 1020 if (IS_ERR(irb)) { 1021 switch (PTR_ERR(irb)) { 1022 case -EIO: 1023 break; 1024 case -ETIMEDOUT: 1025 DBF_EVENT(DBF_WARNING, "%s(%s): request timed out\n", 1026 __func__, dev_name(&cdev->dev)); 1027 break; 1028 default: 1029 DBF_EVENT(DBF_WARNING, "%s(%s): unknown error %ld\n", 1030 __func__, dev_name(&cdev->dev), PTR_ERR(irb)); 1031 } 1032 dasd_handle_killed_request(cdev, intparm); 1033 return; 1034 } 1035 1036 now = get_clock(); 1037 1038 /* check for unsolicited interrupts */ 1039 cqr = (struct dasd_ccw_req *) intparm; 1040 if (!cqr || ((scsw_cc(&irb->scsw) == 1) && 1041 (scsw_fctl(&irb->scsw) & SCSW_FCTL_START_FUNC) && 1042 (scsw_stctl(&irb->scsw) & SCSW_STCTL_STATUS_PEND))) { 1043 if (cqr && cqr->status == DASD_CQR_IN_IO) 1044 cqr->status = DASD_CQR_QUEUED; 1045 device = dasd_device_from_cdev_locked(cdev); 1046 if (!IS_ERR(device)) { 1047 dasd_device_clear_timer(device); 1048 device->discipline->handle_unsolicited_interrupt(device, 1049 irb); 1050 dasd_put_device(device); 1051 } 1052 return; 1053 } 1054 1055 device = (struct dasd_device *) cqr->startdev; 1056 if (!device || 1057 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1058 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid device in request: " 1059 "bus_id %s", dev_name(&cdev->dev)); 1060 return; 1061 } 1062 1063 /* Check for clear pending */ 1064 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1065 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1066 cqr->status = DASD_CQR_CLEARED; 1067 dasd_device_clear_timer(device); 1068 wake_up(&dasd_flush_wq); 1069 dasd_schedule_device_bh(device); 1070 return; 1071 } 1072 1073 /* check status - the request might have been killed by dyn detach */ 1074 if (cqr->status != DASD_CQR_IN_IO) { 1075 DBF_DEV_EVENT(DBF_DEBUG, device, "invalid status: bus_id %s, " 1076 "status %02x", dev_name(&cdev->dev), cqr->status); 1077 return; 1078 } 1079 1080 next = NULL; 1081 expires = 0; 1082 if (scsw_dstat(&irb->scsw) == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1083 scsw_cstat(&irb->scsw) == 0) { 1084 /* request was completed successfully */ 1085 cqr->status = DASD_CQR_SUCCESS; 1086 cqr->stopclk = now; 1087 /* Start first request on queue if possible -> fast_io. */ 1088 if (cqr->devlist.next != &device->ccw_queue) { 1089 next = list_entry(cqr->devlist.next, 1090 struct dasd_ccw_req, devlist); 1091 } 1092 } else { /* error */ 1093 memcpy(&cqr->irb, irb, sizeof(struct irb)); 1094 /* log sense for every failed I/O to s390 debugfeature */ 1095 dasd_log_sense_dbf(cqr, irb); 1096 if (device->features & DASD_FEATURE_ERPLOG) { 1097 dasd_log_sense(cqr, irb); 1098 } 1099 1100 /* 1101 * If we don't want complex ERP for this request, then just 1102 * reset this and retry it in the fastpath 1103 */ 1104 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1105 cqr->retries > 0) { 1106 if (cqr->lpm == LPM_ANYPATH) 1107 DBF_DEV_EVENT(DBF_DEBUG, device, 1108 "default ERP in fastpath " 1109 "(%i retries left)", 1110 cqr->retries); 1111 cqr->lpm = LPM_ANYPATH; 1112 cqr->status = DASD_CQR_QUEUED; 1113 next = cqr; 1114 } else 1115 cqr->status = DASD_CQR_ERROR; 1116 } 1117 if (next && (next->status == DASD_CQR_QUEUED) && 1118 (!device->stopped)) { 1119 if (device->discipline->start_IO(next) == 0) 1120 expires = next->expires; 1121 } 1122 if (expires != 0) 1123 dasd_device_set_timer(device, expires); 1124 else 1125 dasd_device_clear_timer(device); 1126 dasd_schedule_device_bh(device); 1127 } 1128 1129 /* 1130 * If we have an error on a dasd_block layer request then we cancel 1131 * and return all further requests from the same dasd_block as well. 1132 */ 1133 static void __dasd_device_recovery(struct dasd_device *device, 1134 struct dasd_ccw_req *ref_cqr) 1135 { 1136 struct list_head *l, *n; 1137 struct dasd_ccw_req *cqr; 1138 1139 /* 1140 * only requeue request that came from the dasd_block layer 1141 */ 1142 if (!ref_cqr->block) 1143 return; 1144 1145 list_for_each_safe(l, n, &device->ccw_queue) { 1146 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1147 if (cqr->status == DASD_CQR_QUEUED && 1148 ref_cqr->block == cqr->block) { 1149 cqr->status = DASD_CQR_CLEARED; 1150 } 1151 } 1152 }; 1153 1154 /* 1155 * Remove those ccw requests from the queue that need to be returned 1156 * to the upper layer. 1157 */ 1158 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1159 struct list_head *final_queue) 1160 { 1161 struct list_head *l, *n; 1162 struct dasd_ccw_req *cqr; 1163 1164 /* Process request with final status. */ 1165 list_for_each_safe(l, n, &device->ccw_queue) { 1166 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1167 1168 /* Stop list processing at the first non-final request. */ 1169 if (cqr->status == DASD_CQR_QUEUED || 1170 cqr->status == DASD_CQR_IN_IO || 1171 cqr->status == DASD_CQR_CLEAR_PENDING) 1172 break; 1173 if (cqr->status == DASD_CQR_ERROR) { 1174 __dasd_device_recovery(device, cqr); 1175 } 1176 /* Rechain finished requests to final queue */ 1177 list_move_tail(&cqr->devlist, final_queue); 1178 } 1179 } 1180 1181 /* 1182 * the cqrs from the final queue are returned to the upper layer 1183 * by setting a dasd_block state and calling the callback function 1184 */ 1185 static void __dasd_device_process_final_queue(struct dasd_device *device, 1186 struct list_head *final_queue) 1187 { 1188 struct list_head *l, *n; 1189 struct dasd_ccw_req *cqr; 1190 struct dasd_block *block; 1191 void (*callback)(struct dasd_ccw_req *, void *data); 1192 void *callback_data; 1193 char errorstring[ERRORLENGTH]; 1194 1195 list_for_each_safe(l, n, final_queue) { 1196 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1197 list_del_init(&cqr->devlist); 1198 block = cqr->block; 1199 callback = cqr->callback; 1200 callback_data = cqr->callback_data; 1201 if (block) 1202 spin_lock_bh(&block->queue_lock); 1203 switch (cqr->status) { 1204 case DASD_CQR_SUCCESS: 1205 cqr->status = DASD_CQR_DONE; 1206 break; 1207 case DASD_CQR_ERROR: 1208 cqr->status = DASD_CQR_NEED_ERP; 1209 break; 1210 case DASD_CQR_CLEARED: 1211 cqr->status = DASD_CQR_TERMINATED; 1212 break; 1213 default: 1214 /* internal error 12 - wrong cqr status*/ 1215 snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status); 1216 dev_err(&device->cdev->dev, 1217 "An error occurred in the DASD device driver, " 1218 "reason=%s\n", errorstring); 1219 BUG(); 1220 } 1221 if (cqr->callback != NULL) 1222 (callback)(cqr, callback_data); 1223 if (block) 1224 spin_unlock_bh(&block->queue_lock); 1225 } 1226 } 1227 1228 /* 1229 * Take a look at the first request on the ccw queue and check 1230 * if it reached its expire time. If so, terminate the IO. 1231 */ 1232 static void __dasd_device_check_expire(struct dasd_device *device) 1233 { 1234 struct dasd_ccw_req *cqr; 1235 1236 if (list_empty(&device->ccw_queue)) 1237 return; 1238 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1239 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1240 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1241 if (device->discipline->term_IO(cqr) != 0) { 1242 /* Hmpf, try again in 5 sec */ 1243 dev_err(&device->cdev->dev, 1244 "cqr %p timed out (%is) but cannot be " 1245 "ended, retrying in 5 s\n", 1246 cqr, (cqr->expires/HZ)); 1247 cqr->expires += 5*HZ; 1248 dasd_device_set_timer(device, 5*HZ); 1249 } else { 1250 dev_err(&device->cdev->dev, 1251 "cqr %p timed out (%is), %i retries " 1252 "remaining\n", cqr, (cqr->expires/HZ), 1253 cqr->retries); 1254 } 1255 } 1256 } 1257 1258 /* 1259 * Take a look at the first request on the ccw queue and check 1260 * if it needs to be started. 1261 */ 1262 static void __dasd_device_start_head(struct dasd_device *device) 1263 { 1264 struct dasd_ccw_req *cqr; 1265 int rc; 1266 1267 if (list_empty(&device->ccw_queue)) 1268 return; 1269 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1270 if (cqr->status != DASD_CQR_QUEUED) 1271 return; 1272 /* when device is stopped, return request to previous layer */ 1273 if (device->stopped) { 1274 cqr->status = DASD_CQR_CLEARED; 1275 dasd_schedule_device_bh(device); 1276 return; 1277 } 1278 1279 rc = device->discipline->start_IO(cqr); 1280 if (rc == 0) 1281 dasd_device_set_timer(device, cqr->expires); 1282 else if (rc == -EACCES) { 1283 dasd_schedule_device_bh(device); 1284 } else 1285 /* Hmpf, try again in 1/2 sec */ 1286 dasd_device_set_timer(device, 50); 1287 } 1288 1289 /* 1290 * Go through all request on the dasd_device request queue, 1291 * terminate them on the cdev if necessary, and return them to the 1292 * submitting layer via callback. 1293 * Note: 1294 * Make sure that all 'submitting layers' still exist when 1295 * this function is called!. In other words, when 'device' is a base 1296 * device then all block layer requests must have been removed before 1297 * via dasd_flush_block_queue. 1298 */ 1299 int dasd_flush_device_queue(struct dasd_device *device) 1300 { 1301 struct dasd_ccw_req *cqr, *n; 1302 int rc; 1303 struct list_head flush_queue; 1304 1305 INIT_LIST_HEAD(&flush_queue); 1306 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1307 rc = 0; 1308 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1309 /* Check status and move request to flush_queue */ 1310 switch (cqr->status) { 1311 case DASD_CQR_IN_IO: 1312 rc = device->discipline->term_IO(cqr); 1313 if (rc) { 1314 /* unable to terminate requeust */ 1315 dev_err(&device->cdev->dev, 1316 "Flushing the DASD request queue " 1317 "failed for request %p\n", cqr); 1318 /* stop flush processing */ 1319 goto finished; 1320 } 1321 break; 1322 case DASD_CQR_QUEUED: 1323 cqr->stopclk = get_clock(); 1324 cqr->status = DASD_CQR_CLEARED; 1325 break; 1326 default: /* no need to modify the others */ 1327 break; 1328 } 1329 list_move_tail(&cqr->devlist, &flush_queue); 1330 } 1331 finished: 1332 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1333 /* 1334 * After this point all requests must be in state CLEAR_PENDING, 1335 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1336 * one of the others. 1337 */ 1338 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1339 wait_event(dasd_flush_wq, 1340 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1341 /* 1342 * Now set each request back to TERMINATED, DONE or NEED_ERP 1343 * and call the callback function of flushed requests 1344 */ 1345 __dasd_device_process_final_queue(device, &flush_queue); 1346 return rc; 1347 } 1348 1349 /* 1350 * Acquire the device lock and process queues for the device. 1351 */ 1352 static void dasd_device_tasklet(struct dasd_device *device) 1353 { 1354 struct list_head final_queue; 1355 1356 atomic_set (&device->tasklet_scheduled, 0); 1357 INIT_LIST_HEAD(&final_queue); 1358 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1359 /* Check expire time of first request on the ccw queue. */ 1360 __dasd_device_check_expire(device); 1361 /* find final requests on ccw queue */ 1362 __dasd_device_process_ccw_queue(device, &final_queue); 1363 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1364 /* Now call the callback function of requests with final status */ 1365 __dasd_device_process_final_queue(device, &final_queue); 1366 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1367 /* Now check if the head of the ccw queue needs to be started. */ 1368 __dasd_device_start_head(device); 1369 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1370 dasd_put_device(device); 1371 } 1372 1373 /* 1374 * Schedules a call to dasd_tasklet over the device tasklet. 1375 */ 1376 void dasd_schedule_device_bh(struct dasd_device *device) 1377 { 1378 /* Protect against rescheduling. */ 1379 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1380 return; 1381 dasd_get_device(device); 1382 tasklet_hi_schedule(&device->tasklet); 1383 } 1384 1385 /* 1386 * Queue a request to the head of the device ccw_queue. 1387 * Start the I/O if possible. 1388 */ 1389 void dasd_add_request_head(struct dasd_ccw_req *cqr) 1390 { 1391 struct dasd_device *device; 1392 unsigned long flags; 1393 1394 device = cqr->startdev; 1395 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1396 cqr->status = DASD_CQR_QUEUED; 1397 list_add(&cqr->devlist, &device->ccw_queue); 1398 /* let the bh start the request to keep them in order */ 1399 dasd_schedule_device_bh(device); 1400 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1401 } 1402 1403 /* 1404 * Queue a request to the tail of the device ccw_queue. 1405 * Start the I/O if possible. 1406 */ 1407 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 1408 { 1409 struct dasd_device *device; 1410 unsigned long flags; 1411 1412 device = cqr->startdev; 1413 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1414 cqr->status = DASD_CQR_QUEUED; 1415 list_add_tail(&cqr->devlist, &device->ccw_queue); 1416 /* let the bh start the request to keep them in order */ 1417 dasd_schedule_device_bh(device); 1418 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1419 } 1420 1421 /* 1422 * Wakeup helper for the 'sleep_on' functions. 1423 */ 1424 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1425 { 1426 wake_up((wait_queue_head_t *) data); 1427 } 1428 1429 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 1430 { 1431 struct dasd_device *device; 1432 int rc; 1433 1434 device = cqr->startdev; 1435 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1436 rc = ((cqr->status == DASD_CQR_DONE || 1437 cqr->status == DASD_CQR_NEED_ERP || 1438 cqr->status == DASD_CQR_TERMINATED) && 1439 list_empty(&cqr->devlist)); 1440 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1441 return rc; 1442 } 1443 1444 /* 1445 * Queue a request to the tail of the device ccw_queue and wait for 1446 * it's completion. 1447 */ 1448 int dasd_sleep_on(struct dasd_ccw_req *cqr) 1449 { 1450 struct dasd_device *device; 1451 int rc; 1452 1453 device = cqr->startdev; 1454 1455 cqr->callback = dasd_wakeup_cb; 1456 cqr->callback_data = (void *) &generic_waitq; 1457 dasd_add_request_tail(cqr); 1458 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1459 1460 if (cqr->status == DASD_CQR_DONE) 1461 rc = 0; 1462 else if (cqr->intrc) 1463 rc = cqr->intrc; 1464 else 1465 rc = -EIO; 1466 return rc; 1467 } 1468 1469 /* 1470 * Queue a request to the tail of the device ccw_queue and wait 1471 * interruptible for it's completion. 1472 */ 1473 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1474 { 1475 struct dasd_device *device; 1476 int rc; 1477 1478 device = cqr->startdev; 1479 cqr->callback = dasd_wakeup_cb; 1480 cqr->callback_data = (void *) &generic_waitq; 1481 dasd_add_request_tail(cqr); 1482 rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr)); 1483 if (rc == -ERESTARTSYS) { 1484 dasd_cancel_req(cqr); 1485 /* wait (non-interruptible) for final status */ 1486 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1487 cqr->intrc = rc; 1488 } 1489 1490 if (cqr->status == DASD_CQR_DONE) 1491 rc = 0; 1492 else if (cqr->intrc) 1493 rc = cqr->intrc; 1494 else 1495 rc = -EIO; 1496 return rc; 1497 } 1498 1499 /* 1500 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1501 * for eckd devices) the currently running request has to be terminated 1502 * and be put back to status queued, before the special request is added 1503 * to the head of the queue. Then the special request is waited on normally. 1504 */ 1505 static inline int _dasd_term_running_cqr(struct dasd_device *device) 1506 { 1507 struct dasd_ccw_req *cqr; 1508 1509 if (list_empty(&device->ccw_queue)) 1510 return 0; 1511 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1512 return device->discipline->term_IO(cqr); 1513 } 1514 1515 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1516 { 1517 struct dasd_device *device; 1518 int rc; 1519 1520 device = cqr->startdev; 1521 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1522 rc = _dasd_term_running_cqr(device); 1523 if (rc) { 1524 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1525 return rc; 1526 } 1527 1528 cqr->callback = dasd_wakeup_cb; 1529 cqr->callback_data = (void *) &generic_waitq; 1530 cqr->status = DASD_CQR_QUEUED; 1531 list_add(&cqr->devlist, &device->ccw_queue); 1532 1533 /* let the bh start the request to keep them in order */ 1534 dasd_schedule_device_bh(device); 1535 1536 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1537 1538 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1539 1540 if (cqr->status == DASD_CQR_DONE) 1541 rc = 0; 1542 else if (cqr->intrc) 1543 rc = cqr->intrc; 1544 else 1545 rc = -EIO; 1546 return rc; 1547 } 1548 1549 /* 1550 * Cancels a request that was started with dasd_sleep_on_req. 1551 * This is useful to timeout requests. The request will be 1552 * terminated if it is currently in i/o. 1553 * Returns 1 if the request has been terminated. 1554 * 0 if there was no need to terminate the request (not started yet) 1555 * negative error code if termination failed 1556 * Cancellation of a request is an asynchronous operation! The calling 1557 * function has to wait until the request is properly returned via callback. 1558 */ 1559 int dasd_cancel_req(struct dasd_ccw_req *cqr) 1560 { 1561 struct dasd_device *device = cqr->startdev; 1562 unsigned long flags; 1563 int rc; 1564 1565 rc = 0; 1566 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1567 switch (cqr->status) { 1568 case DASD_CQR_QUEUED: 1569 /* request was not started - just set to cleared */ 1570 cqr->status = DASD_CQR_CLEARED; 1571 break; 1572 case DASD_CQR_IN_IO: 1573 /* request in IO - terminate IO and release again */ 1574 rc = device->discipline->term_IO(cqr); 1575 if (rc) { 1576 dev_err(&device->cdev->dev, 1577 "Cancelling request %p failed with rc=%d\n", 1578 cqr, rc); 1579 } else { 1580 cqr->stopclk = get_clock(); 1581 rc = 1; 1582 } 1583 break; 1584 default: /* already finished or clear pending - do nothing */ 1585 break; 1586 } 1587 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1588 dasd_schedule_device_bh(device); 1589 return rc; 1590 } 1591 1592 1593 /* 1594 * SECTION: Operations of the dasd_block layer. 1595 */ 1596 1597 /* 1598 * Timeout function for dasd_block. This is used when the block layer 1599 * is waiting for something that may not come reliably, (e.g. a state 1600 * change interrupt) 1601 */ 1602 static void dasd_block_timeout(unsigned long ptr) 1603 { 1604 unsigned long flags; 1605 struct dasd_block *block; 1606 1607 block = (struct dasd_block *) ptr; 1608 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1609 /* re-activate request queue */ 1610 block->base->stopped &= ~DASD_STOPPED_PENDING; 1611 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1612 dasd_schedule_block_bh(block); 1613 } 1614 1615 /* 1616 * Setup timeout for a dasd_block in jiffies. 1617 */ 1618 void dasd_block_set_timer(struct dasd_block *block, int expires) 1619 { 1620 if (expires == 0) 1621 del_timer(&block->timer); 1622 else 1623 mod_timer(&block->timer, jiffies + expires); 1624 } 1625 1626 /* 1627 * Clear timeout for a dasd_block. 1628 */ 1629 void dasd_block_clear_timer(struct dasd_block *block) 1630 { 1631 del_timer(&block->timer); 1632 } 1633 1634 /* 1635 * Process finished error recovery ccw. 1636 */ 1637 static inline void __dasd_block_process_erp(struct dasd_block *block, 1638 struct dasd_ccw_req *cqr) 1639 { 1640 dasd_erp_fn_t erp_fn; 1641 struct dasd_device *device = block->base; 1642 1643 if (cqr->status == DASD_CQR_DONE) 1644 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1645 else 1646 dev_err(&device->cdev->dev, "ERP failed for the DASD\n"); 1647 erp_fn = device->discipline->erp_postaction(cqr); 1648 erp_fn(cqr); 1649 } 1650 1651 /* 1652 * Fetch requests from the block device queue. 1653 */ 1654 static void __dasd_process_request_queue(struct dasd_block *block) 1655 { 1656 struct request_queue *queue; 1657 struct request *req; 1658 struct dasd_ccw_req *cqr; 1659 struct dasd_device *basedev; 1660 unsigned long flags; 1661 queue = block->request_queue; 1662 basedev = block->base; 1663 /* No queue ? Then there is nothing to do. */ 1664 if (queue == NULL) 1665 return; 1666 1667 /* 1668 * We requeue request from the block device queue to the ccw 1669 * queue only in two states. In state DASD_STATE_READY the 1670 * partition detection is done and we need to requeue requests 1671 * for that. State DASD_STATE_ONLINE is normal block device 1672 * operation. 1673 */ 1674 if (basedev->state < DASD_STATE_READY) 1675 return; 1676 /* Now we try to fetch requests from the request queue */ 1677 while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { 1678 if (basedev->features & DASD_FEATURE_READONLY && 1679 rq_data_dir(req) == WRITE) { 1680 DBF_DEV_EVENT(DBF_ERR, basedev, 1681 "Rejecting write request %p", 1682 req); 1683 blk_start_request(req); 1684 __blk_end_request_all(req, -EIO); 1685 continue; 1686 } 1687 cqr = basedev->discipline->build_cp(basedev, block, req); 1688 if (IS_ERR(cqr)) { 1689 if (PTR_ERR(cqr) == -EBUSY) 1690 break; /* normal end condition */ 1691 if (PTR_ERR(cqr) == -ENOMEM) 1692 break; /* terminate request queue loop */ 1693 if (PTR_ERR(cqr) == -EAGAIN) { 1694 /* 1695 * The current request cannot be build right 1696 * now, we have to try later. If this request 1697 * is the head-of-queue we stop the device 1698 * for 1/2 second. 1699 */ 1700 if (!list_empty(&block->ccw_queue)) 1701 break; 1702 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); 1703 basedev->stopped |= DASD_STOPPED_PENDING; 1704 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); 1705 dasd_block_set_timer(block, HZ/2); 1706 break; 1707 } 1708 DBF_DEV_EVENT(DBF_ERR, basedev, 1709 "CCW creation failed (rc=%ld) " 1710 "on request %p", 1711 PTR_ERR(cqr), req); 1712 blk_start_request(req); 1713 __blk_end_request_all(req, -EIO); 1714 continue; 1715 } 1716 /* 1717 * Note: callback is set to dasd_return_cqr_cb in 1718 * __dasd_block_start_head to cover erp requests as well 1719 */ 1720 cqr->callback_data = (void *) req; 1721 cqr->status = DASD_CQR_FILLED; 1722 blk_start_request(req); 1723 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1724 dasd_profile_start(block, cqr, req); 1725 } 1726 } 1727 1728 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 1729 { 1730 struct request *req; 1731 int status; 1732 int error = 0; 1733 1734 req = (struct request *) cqr->callback_data; 1735 dasd_profile_end(cqr->block, cqr, req); 1736 status = cqr->block->base->discipline->free_cp(cqr, req); 1737 if (status <= 0) 1738 error = status ? status : -EIO; 1739 __blk_end_request_all(req, error); 1740 } 1741 1742 /* 1743 * Process ccw request queue. 1744 */ 1745 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 1746 struct list_head *final_queue) 1747 { 1748 struct list_head *l, *n; 1749 struct dasd_ccw_req *cqr; 1750 dasd_erp_fn_t erp_fn; 1751 unsigned long flags; 1752 struct dasd_device *base = block->base; 1753 1754 restart: 1755 /* Process request with final status. */ 1756 list_for_each_safe(l, n, &block->ccw_queue) { 1757 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1758 if (cqr->status != DASD_CQR_DONE && 1759 cqr->status != DASD_CQR_FAILED && 1760 cqr->status != DASD_CQR_NEED_ERP && 1761 cqr->status != DASD_CQR_TERMINATED) 1762 continue; 1763 1764 if (cqr->status == DASD_CQR_TERMINATED) { 1765 base->discipline->handle_terminated_request(cqr); 1766 goto restart; 1767 } 1768 1769 /* Process requests that may be recovered */ 1770 if (cqr->status == DASD_CQR_NEED_ERP) { 1771 erp_fn = base->discipline->erp_action(cqr); 1772 erp_fn(cqr); 1773 goto restart; 1774 } 1775 1776 /* log sense for fatal error */ 1777 if (cqr->status == DASD_CQR_FAILED) { 1778 dasd_log_sense(cqr, &cqr->irb); 1779 } 1780 1781 /* First of all call extended error reporting. */ 1782 if (dasd_eer_enabled(base) && 1783 cqr->status == DASD_CQR_FAILED) { 1784 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 1785 1786 /* restart request */ 1787 cqr->status = DASD_CQR_FILLED; 1788 cqr->retries = 255; 1789 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1790 base->stopped |= DASD_STOPPED_QUIESCE; 1791 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1792 flags); 1793 goto restart; 1794 } 1795 1796 /* Process finished ERP request. */ 1797 if (cqr->refers) { 1798 __dasd_block_process_erp(block, cqr); 1799 goto restart; 1800 } 1801 1802 /* Rechain finished requests to final queue */ 1803 cqr->endclk = get_clock(); 1804 list_move_tail(&cqr->blocklist, final_queue); 1805 } 1806 } 1807 1808 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 1809 { 1810 dasd_schedule_block_bh(cqr->block); 1811 } 1812 1813 static void __dasd_block_start_head(struct dasd_block *block) 1814 { 1815 struct dasd_ccw_req *cqr; 1816 1817 if (list_empty(&block->ccw_queue)) 1818 return; 1819 /* We allways begin with the first requests on the queue, as some 1820 * of previously started requests have to be enqueued on a 1821 * dasd_device again for error recovery. 1822 */ 1823 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 1824 if (cqr->status != DASD_CQR_FILLED) 1825 continue; 1826 /* Non-temporary stop condition will trigger fail fast */ 1827 if (block->base->stopped & ~DASD_STOPPED_PENDING && 1828 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1829 (!dasd_eer_enabled(block->base))) { 1830 cqr->status = DASD_CQR_FAILED; 1831 dasd_schedule_block_bh(block); 1832 continue; 1833 } 1834 /* Don't try to start requests if device is stopped */ 1835 if (block->base->stopped) 1836 return; 1837 1838 /* just a fail safe check, should not happen */ 1839 if (!cqr->startdev) 1840 cqr->startdev = block->base; 1841 1842 /* make sure that the requests we submit find their way back */ 1843 cqr->callback = dasd_return_cqr_cb; 1844 1845 dasd_add_request_tail(cqr); 1846 } 1847 } 1848 1849 /* 1850 * Central dasd_block layer routine. Takes requests from the generic 1851 * block layer request queue, creates ccw requests, enqueues them on 1852 * a dasd_device and processes ccw requests that have been returned. 1853 */ 1854 static void dasd_block_tasklet(struct dasd_block *block) 1855 { 1856 struct list_head final_queue; 1857 struct list_head *l, *n; 1858 struct dasd_ccw_req *cqr; 1859 1860 atomic_set(&block->tasklet_scheduled, 0); 1861 INIT_LIST_HEAD(&final_queue); 1862 spin_lock(&block->queue_lock); 1863 /* Finish off requests on ccw queue */ 1864 __dasd_process_block_ccw_queue(block, &final_queue); 1865 spin_unlock(&block->queue_lock); 1866 /* Now call the callback function of requests with final status */ 1867 spin_lock_irq(&block->request_queue_lock); 1868 list_for_each_safe(l, n, &final_queue) { 1869 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1870 list_del_init(&cqr->blocklist); 1871 __dasd_cleanup_cqr(cqr); 1872 } 1873 spin_lock(&block->queue_lock); 1874 /* Get new request from the block device request queue */ 1875 __dasd_process_request_queue(block); 1876 /* Now check if the head of the ccw queue needs to be started. */ 1877 __dasd_block_start_head(block); 1878 spin_unlock(&block->queue_lock); 1879 spin_unlock_irq(&block->request_queue_lock); 1880 dasd_put_device(block->base); 1881 } 1882 1883 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 1884 { 1885 wake_up(&dasd_flush_wq); 1886 } 1887 1888 /* 1889 * Go through all request on the dasd_block request queue, cancel them 1890 * on the respective dasd_device, and return them to the generic 1891 * block layer. 1892 */ 1893 static int dasd_flush_block_queue(struct dasd_block *block) 1894 { 1895 struct dasd_ccw_req *cqr, *n; 1896 int rc, i; 1897 struct list_head flush_queue; 1898 1899 INIT_LIST_HEAD(&flush_queue); 1900 spin_lock_bh(&block->queue_lock); 1901 rc = 0; 1902 restart: 1903 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 1904 /* if this request currently owned by a dasd_device cancel it */ 1905 if (cqr->status >= DASD_CQR_QUEUED) 1906 rc = dasd_cancel_req(cqr); 1907 if (rc < 0) 1908 break; 1909 /* Rechain request (including erp chain) so it won't be 1910 * touched by the dasd_block_tasklet anymore. 1911 * Replace the callback so we notice when the request 1912 * is returned from the dasd_device layer. 1913 */ 1914 cqr->callback = _dasd_wake_block_flush_cb; 1915 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 1916 list_move_tail(&cqr->blocklist, &flush_queue); 1917 if (i > 1) 1918 /* moved more than one request - need to restart */ 1919 goto restart; 1920 } 1921 spin_unlock_bh(&block->queue_lock); 1922 /* Now call the callback function of flushed requests */ 1923 restart_cb: 1924 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 1925 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 1926 /* Process finished ERP request. */ 1927 if (cqr->refers) { 1928 spin_lock_bh(&block->queue_lock); 1929 __dasd_block_process_erp(block, cqr); 1930 spin_unlock_bh(&block->queue_lock); 1931 /* restart list_for_xx loop since dasd_process_erp 1932 * might remove multiple elements */ 1933 goto restart_cb; 1934 } 1935 /* call the callback function */ 1936 spin_lock_irq(&block->request_queue_lock); 1937 cqr->endclk = get_clock(); 1938 list_del_init(&cqr->blocklist); 1939 __dasd_cleanup_cqr(cqr); 1940 spin_unlock_irq(&block->request_queue_lock); 1941 } 1942 return rc; 1943 } 1944 1945 /* 1946 * Schedules a call to dasd_tasklet over the device tasklet. 1947 */ 1948 void dasd_schedule_block_bh(struct dasd_block *block) 1949 { 1950 /* Protect against rescheduling. */ 1951 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 1952 return; 1953 /* life cycle of block is bound to it's base device */ 1954 dasd_get_device(block->base); 1955 tasklet_hi_schedule(&block->tasklet); 1956 } 1957 1958 1959 /* 1960 * SECTION: external block device operations 1961 * (request queue handling, open, release, etc.) 1962 */ 1963 1964 /* 1965 * Dasd request queue function. Called from ll_rw_blk.c 1966 */ 1967 static void do_dasd_request(struct request_queue *queue) 1968 { 1969 struct dasd_block *block; 1970 1971 block = queue->queuedata; 1972 spin_lock(&block->queue_lock); 1973 /* Get new request from the block device request queue */ 1974 __dasd_process_request_queue(block); 1975 /* Now check if the head of the ccw queue needs to be started. */ 1976 __dasd_block_start_head(block); 1977 spin_unlock(&block->queue_lock); 1978 } 1979 1980 /* 1981 * Allocate and initialize request queue and default I/O scheduler. 1982 */ 1983 static int dasd_alloc_queue(struct dasd_block *block) 1984 { 1985 int rc; 1986 1987 block->request_queue = blk_init_queue(do_dasd_request, 1988 &block->request_queue_lock); 1989 if (block->request_queue == NULL) 1990 return -ENOMEM; 1991 1992 block->request_queue->queuedata = block; 1993 1994 elevator_exit(block->request_queue->elevator); 1995 block->request_queue->elevator = NULL; 1996 rc = elevator_init(block->request_queue, "deadline"); 1997 if (rc) { 1998 blk_cleanup_queue(block->request_queue); 1999 return rc; 2000 } 2001 return 0; 2002 } 2003 2004 /* 2005 * Allocate and initialize request queue. 2006 */ 2007 static void dasd_setup_queue(struct dasd_block *block) 2008 { 2009 int max; 2010 2011 blk_queue_logical_block_size(block->request_queue, block->bp_block); 2012 max = block->base->discipline->max_blocks << block->s2b_shift; 2013 blk_queue_max_sectors(block->request_queue, max); 2014 blk_queue_max_phys_segments(block->request_queue, -1L); 2015 blk_queue_max_hw_segments(block->request_queue, -1L); 2016 /* with page sized segments we can translate each segement into 2017 * one idaw/tidaw 2018 */ 2019 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 2020 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 2021 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); 2022 } 2023 2024 /* 2025 * Deactivate and free request queue. 2026 */ 2027 static void dasd_free_queue(struct dasd_block *block) 2028 { 2029 if (block->request_queue) { 2030 blk_cleanup_queue(block->request_queue); 2031 block->request_queue = NULL; 2032 } 2033 } 2034 2035 /* 2036 * Flush request on the request queue. 2037 */ 2038 static void dasd_flush_request_queue(struct dasd_block *block) 2039 { 2040 struct request *req; 2041 2042 if (!block->request_queue) 2043 return; 2044 2045 spin_lock_irq(&block->request_queue_lock); 2046 while ((req = blk_fetch_request(block->request_queue))) 2047 __blk_end_request_all(req, -EIO); 2048 spin_unlock_irq(&block->request_queue_lock); 2049 } 2050 2051 static int dasd_open(struct block_device *bdev, fmode_t mode) 2052 { 2053 struct dasd_block *block = bdev->bd_disk->private_data; 2054 struct dasd_device *base = block->base; 2055 int rc; 2056 2057 atomic_inc(&block->open_count); 2058 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2059 rc = -ENODEV; 2060 goto unlock; 2061 } 2062 2063 if (!try_module_get(base->discipline->owner)) { 2064 rc = -EINVAL; 2065 goto unlock; 2066 } 2067 2068 if (dasd_probeonly) { 2069 dev_info(&base->cdev->dev, 2070 "Accessing the DASD failed because it is in " 2071 "probeonly mode\n"); 2072 rc = -EPERM; 2073 goto out; 2074 } 2075 2076 if (base->state <= DASD_STATE_BASIC) { 2077 DBF_DEV_EVENT(DBF_ERR, base, " %s", 2078 " Cannot open unrecognized device"); 2079 rc = -ENODEV; 2080 goto out; 2081 } 2082 2083 return 0; 2084 2085 out: 2086 module_put(base->discipline->owner); 2087 unlock: 2088 atomic_dec(&block->open_count); 2089 return rc; 2090 } 2091 2092 static int dasd_release(struct gendisk *disk, fmode_t mode) 2093 { 2094 struct dasd_block *block = disk->private_data; 2095 2096 atomic_dec(&block->open_count); 2097 module_put(block->base->discipline->owner); 2098 return 0; 2099 } 2100 2101 /* 2102 * Return disk geometry. 2103 */ 2104 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 2105 { 2106 struct dasd_block *block; 2107 struct dasd_device *base; 2108 2109 block = bdev->bd_disk->private_data; 2110 base = block->base; 2111 if (!block) 2112 return -ENODEV; 2113 2114 if (!base->discipline || 2115 !base->discipline->fill_geometry) 2116 return -EINVAL; 2117 2118 base->discipline->fill_geometry(block, geo); 2119 geo->start = get_start_sect(bdev) >> block->s2b_shift; 2120 return 0; 2121 } 2122 2123 struct block_device_operations 2124 dasd_device_operations = { 2125 .owner = THIS_MODULE, 2126 .open = dasd_open, 2127 .release = dasd_release, 2128 .ioctl = dasd_ioctl, 2129 .compat_ioctl = dasd_ioctl, 2130 .getgeo = dasd_getgeo, 2131 }; 2132 2133 /******************************************************************************* 2134 * end of block device operations 2135 */ 2136 2137 static void 2138 dasd_exit(void) 2139 { 2140 #ifdef CONFIG_PROC_FS 2141 dasd_proc_exit(); 2142 #endif 2143 dasd_eer_exit(); 2144 if (dasd_page_cache != NULL) { 2145 kmem_cache_destroy(dasd_page_cache); 2146 dasd_page_cache = NULL; 2147 } 2148 dasd_gendisk_exit(); 2149 dasd_devmap_exit(); 2150 if (dasd_debug_area != NULL) { 2151 debug_unregister(dasd_debug_area); 2152 dasd_debug_area = NULL; 2153 } 2154 } 2155 2156 /* 2157 * SECTION: common functions for ccw_driver use 2158 */ 2159 2160 static void dasd_generic_auto_online(void *data, async_cookie_t cookie) 2161 { 2162 struct ccw_device *cdev = data; 2163 int ret; 2164 2165 ret = ccw_device_set_online(cdev); 2166 if (ret) 2167 pr_warning("%s: Setting the DASD online failed with rc=%d\n", 2168 dev_name(&cdev->dev), ret); 2169 else { 2170 struct dasd_device *device = dasd_device_from_cdev(cdev); 2171 wait_event(dasd_init_waitq, _wait_for_device(device)); 2172 dasd_put_device(device); 2173 } 2174 } 2175 2176 /* 2177 * Initial attempt at a probe function. this can be simplified once 2178 * the other detection code is gone. 2179 */ 2180 int dasd_generic_probe(struct ccw_device *cdev, 2181 struct dasd_discipline *discipline) 2182 { 2183 int ret; 2184 2185 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 2186 if (ret) { 2187 DBF_EVENT(DBF_WARNING, 2188 "dasd_generic_probe: could not set ccw-device options " 2189 "for %s\n", dev_name(&cdev->dev)); 2190 return ret; 2191 } 2192 ret = dasd_add_sysfs_files(cdev); 2193 if (ret) { 2194 DBF_EVENT(DBF_WARNING, 2195 "dasd_generic_probe: could not add sysfs entries " 2196 "for %s\n", dev_name(&cdev->dev)); 2197 return ret; 2198 } 2199 cdev->handler = &dasd_int_handler; 2200 2201 /* 2202 * Automatically online either all dasd devices (dasd_autodetect) 2203 * or all devices specified with dasd= parameters during 2204 * initial probe. 2205 */ 2206 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 2207 (dasd_autodetect && dasd_busid_known(dev_name(&cdev->dev)) != 0)) 2208 async_schedule(dasd_generic_auto_online, cdev); 2209 return 0; 2210 } 2211 2212 /* 2213 * This will one day be called from a global not_oper handler. 2214 * It is also used by driver_unregister during module unload. 2215 */ 2216 void dasd_generic_remove(struct ccw_device *cdev) 2217 { 2218 struct dasd_device *device; 2219 struct dasd_block *block; 2220 2221 cdev->handler = NULL; 2222 2223 dasd_remove_sysfs_files(cdev); 2224 device = dasd_device_from_cdev(cdev); 2225 if (IS_ERR(device)) 2226 return; 2227 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2228 /* Already doing offline processing */ 2229 dasd_put_device(device); 2230 return; 2231 } 2232 /* 2233 * This device is removed unconditionally. Set offline 2234 * flag to prevent dasd_open from opening it while it is 2235 * no quite down yet. 2236 */ 2237 dasd_set_target_state(device, DASD_STATE_NEW); 2238 /* dasd_delete_device destroys the device reference. */ 2239 block = device->block; 2240 device->block = NULL; 2241 dasd_delete_device(device); 2242 /* 2243 * life cycle of block is bound to device, so delete it after 2244 * device was safely removed 2245 */ 2246 if (block) 2247 dasd_free_block(block); 2248 } 2249 2250 /* 2251 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2252 * the device is detected for the first time and is supposed to be used 2253 * or the user has started activation through sysfs. 2254 */ 2255 int dasd_generic_set_online(struct ccw_device *cdev, 2256 struct dasd_discipline *base_discipline) 2257 { 2258 struct dasd_discipline *discipline; 2259 struct dasd_device *device; 2260 int rc; 2261 2262 /* first online clears initial online feature flag */ 2263 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2264 device = dasd_create_device(cdev); 2265 if (IS_ERR(device)) 2266 return PTR_ERR(device); 2267 2268 discipline = base_discipline; 2269 if (device->features & DASD_FEATURE_USEDIAG) { 2270 if (!dasd_diag_discipline_pointer) { 2271 pr_warning("%s Setting the DASD online failed because " 2272 "of missing DIAG discipline\n", 2273 dev_name(&cdev->dev)); 2274 dasd_delete_device(device); 2275 return -ENODEV; 2276 } 2277 discipline = dasd_diag_discipline_pointer; 2278 } 2279 if (!try_module_get(base_discipline->owner)) { 2280 dasd_delete_device(device); 2281 return -EINVAL; 2282 } 2283 if (!try_module_get(discipline->owner)) { 2284 module_put(base_discipline->owner); 2285 dasd_delete_device(device); 2286 return -EINVAL; 2287 } 2288 device->base_discipline = base_discipline; 2289 device->discipline = discipline; 2290 2291 /* check_device will allocate block device if necessary */ 2292 rc = discipline->check_device(device); 2293 if (rc) { 2294 pr_warning("%s Setting the DASD online with discipline %s " 2295 "failed with rc=%i\n", 2296 dev_name(&cdev->dev), discipline->name, rc); 2297 module_put(discipline->owner); 2298 module_put(base_discipline->owner); 2299 dasd_delete_device(device); 2300 return rc; 2301 } 2302 2303 dasd_set_target_state(device, DASD_STATE_ONLINE); 2304 if (device->state <= DASD_STATE_KNOWN) { 2305 pr_warning("%s Setting the DASD online failed because of a " 2306 "missing discipline\n", dev_name(&cdev->dev)); 2307 rc = -ENODEV; 2308 dasd_set_target_state(device, DASD_STATE_NEW); 2309 if (device->block) 2310 dasd_free_block(device->block); 2311 dasd_delete_device(device); 2312 } else 2313 pr_debug("dasd_generic device %s found\n", 2314 dev_name(&cdev->dev)); 2315 dasd_put_device(device); 2316 return rc; 2317 } 2318 2319 int dasd_generic_set_offline(struct ccw_device *cdev) 2320 { 2321 struct dasd_device *device; 2322 struct dasd_block *block; 2323 int max_count, open_count; 2324 2325 device = dasd_device_from_cdev(cdev); 2326 if (IS_ERR(device)) 2327 return PTR_ERR(device); 2328 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2329 /* Already doing offline processing */ 2330 dasd_put_device(device); 2331 return 0; 2332 } 2333 /* 2334 * We must make sure that this device is currently not in use. 2335 * The open_count is increased for every opener, that includes 2336 * the blkdev_get in dasd_scan_partitions. We are only interested 2337 * in the other openers. 2338 */ 2339 if (device->block) { 2340 max_count = device->block->bdev ? 0 : -1; 2341 open_count = atomic_read(&device->block->open_count); 2342 if (open_count > max_count) { 2343 if (open_count > 0) 2344 pr_warning("%s: The DASD cannot be set offline " 2345 "with open count %i\n", 2346 dev_name(&cdev->dev), open_count); 2347 else 2348 pr_warning("%s: The DASD cannot be set offline " 2349 "while it is in use\n", 2350 dev_name(&cdev->dev)); 2351 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2352 dasd_put_device(device); 2353 return -EBUSY; 2354 } 2355 } 2356 dasd_set_target_state(device, DASD_STATE_NEW); 2357 /* dasd_delete_device destroys the device reference. */ 2358 block = device->block; 2359 device->block = NULL; 2360 dasd_delete_device(device); 2361 /* 2362 * life cycle of block is bound to device, so delete it after 2363 * device was safely removed 2364 */ 2365 if (block) 2366 dasd_free_block(block); 2367 return 0; 2368 } 2369 2370 int dasd_generic_notify(struct ccw_device *cdev, int event) 2371 { 2372 struct dasd_device *device; 2373 struct dasd_ccw_req *cqr; 2374 int ret; 2375 2376 device = dasd_device_from_cdev_locked(cdev); 2377 if (IS_ERR(device)) 2378 return 0; 2379 ret = 0; 2380 switch (event) { 2381 case CIO_GONE: 2382 case CIO_BOXED: 2383 case CIO_NO_PATH: 2384 /* First of all call extended error reporting. */ 2385 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2386 2387 if (device->state < DASD_STATE_BASIC) 2388 break; 2389 /* Device is active. We want to keep it. */ 2390 list_for_each_entry(cqr, &device->ccw_queue, devlist) 2391 if (cqr->status == DASD_CQR_IN_IO) { 2392 cqr->status = DASD_CQR_QUEUED; 2393 cqr->retries++; 2394 } 2395 device->stopped |= DASD_STOPPED_DC_WAIT; 2396 dasd_device_clear_timer(device); 2397 dasd_schedule_device_bh(device); 2398 ret = 1; 2399 break; 2400 case CIO_OPER: 2401 /* FIXME: add a sanity check. */ 2402 device->stopped &= ~DASD_STOPPED_DC_WAIT; 2403 dasd_schedule_device_bh(device); 2404 if (device->block) 2405 dasd_schedule_block_bh(device->block); 2406 ret = 1; 2407 break; 2408 } 2409 dasd_put_device(device); 2410 return ret; 2411 } 2412 2413 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2414 void *rdc_buffer, 2415 int rdc_buffer_size, 2416 char *magic) 2417 { 2418 struct dasd_ccw_req *cqr; 2419 struct ccw1 *ccw; 2420 2421 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2422 2423 if (IS_ERR(cqr)) { 2424 /* internal error 13 - Allocating the RDC request failed*/ 2425 dev_err(&device->cdev->dev, 2426 "An error occurred in the DASD device driver, " 2427 "reason=%s\n", "13"); 2428 return cqr; 2429 } 2430 2431 ccw = cqr->cpaddr; 2432 ccw->cmd_code = CCW_CMD_RDC; 2433 ccw->cda = (__u32)(addr_t)rdc_buffer; 2434 ccw->count = rdc_buffer_size; 2435 2436 cqr->startdev = device; 2437 cqr->memdev = device; 2438 cqr->expires = 10*HZ; 2439 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2440 cqr->retries = 2; 2441 cqr->buildclk = get_clock(); 2442 cqr->status = DASD_CQR_FILLED; 2443 return cqr; 2444 } 2445 2446 2447 int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2448 void *rdc_buffer, int rdc_buffer_size) 2449 { 2450 int ret; 2451 struct dasd_ccw_req *cqr; 2452 2453 cqr = dasd_generic_build_rdc(device, rdc_buffer, rdc_buffer_size, 2454 magic); 2455 if (IS_ERR(cqr)) 2456 return PTR_ERR(cqr); 2457 2458 ret = dasd_sleep_on(cqr); 2459 dasd_sfree_request(cqr, cqr->memdev); 2460 return ret; 2461 } 2462 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2463 2464 /* 2465 * In command mode and transport mode we need to look for sense 2466 * data in different places. The sense data itself is allways 2467 * an array of 32 bytes, so we can unify the sense data access 2468 * for both modes. 2469 */ 2470 char *dasd_get_sense(struct irb *irb) 2471 { 2472 struct tsb *tsb = NULL; 2473 char *sense = NULL; 2474 2475 if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) { 2476 if (irb->scsw.tm.tcw) 2477 tsb = tcw_get_tsb((struct tcw *)(unsigned long) 2478 irb->scsw.tm.tcw); 2479 if (tsb && tsb->length == 64 && tsb->flags) 2480 switch (tsb->flags & 0x07) { 2481 case 1: /* tsa_iostat */ 2482 sense = tsb->tsa.iostat.sense; 2483 break; 2484 case 2: /* tsa_ddpc */ 2485 sense = tsb->tsa.ddpc.sense; 2486 break; 2487 default: 2488 /* currently we don't use interrogate data */ 2489 break; 2490 } 2491 } else if (irb->esw.esw0.erw.cons) { 2492 sense = irb->ecw; 2493 } 2494 return sense; 2495 } 2496 EXPORT_SYMBOL_GPL(dasd_get_sense); 2497 2498 static int __init dasd_init(void) 2499 { 2500 int rc; 2501 2502 init_waitqueue_head(&dasd_init_waitq); 2503 init_waitqueue_head(&dasd_flush_wq); 2504 init_waitqueue_head(&generic_waitq); 2505 2506 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2507 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 2508 if (dasd_debug_area == NULL) { 2509 rc = -ENOMEM; 2510 goto failed; 2511 } 2512 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2513 debug_set_level(dasd_debug_area, DBF_WARNING); 2514 2515 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2516 2517 dasd_diag_discipline_pointer = NULL; 2518 2519 rc = dasd_devmap_init(); 2520 if (rc) 2521 goto failed; 2522 rc = dasd_gendisk_init(); 2523 if (rc) 2524 goto failed; 2525 rc = dasd_parse(); 2526 if (rc) 2527 goto failed; 2528 rc = dasd_eer_init(); 2529 if (rc) 2530 goto failed; 2531 #ifdef CONFIG_PROC_FS 2532 rc = dasd_proc_init(); 2533 if (rc) 2534 goto failed; 2535 #endif 2536 2537 return 0; 2538 failed: 2539 pr_info("The DASD device driver could not be initialized\n"); 2540 dasd_exit(); 2541 return rc; 2542 } 2543 2544 module_init(dasd_init); 2545 module_exit(dasd_exit); 2546 2547 EXPORT_SYMBOL(dasd_debug_area); 2548 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2549 2550 EXPORT_SYMBOL(dasd_add_request_head); 2551 EXPORT_SYMBOL(dasd_add_request_tail); 2552 EXPORT_SYMBOL(dasd_cancel_req); 2553 EXPORT_SYMBOL(dasd_device_clear_timer); 2554 EXPORT_SYMBOL(dasd_block_clear_timer); 2555 EXPORT_SYMBOL(dasd_enable_device); 2556 EXPORT_SYMBOL(dasd_int_handler); 2557 EXPORT_SYMBOL(dasd_kfree_request); 2558 EXPORT_SYMBOL(dasd_kick_device); 2559 EXPORT_SYMBOL(dasd_kmalloc_request); 2560 EXPORT_SYMBOL(dasd_schedule_device_bh); 2561 EXPORT_SYMBOL(dasd_schedule_block_bh); 2562 EXPORT_SYMBOL(dasd_set_target_state); 2563 EXPORT_SYMBOL(dasd_device_set_timer); 2564 EXPORT_SYMBOL(dasd_block_set_timer); 2565 EXPORT_SYMBOL(dasd_sfree_request); 2566 EXPORT_SYMBOL(dasd_sleep_on); 2567 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2568 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2569 EXPORT_SYMBOL(dasd_smalloc_request); 2570 EXPORT_SYMBOL(dasd_start_IO); 2571 EXPORT_SYMBOL(dasd_term_IO); 2572 2573 EXPORT_SYMBOL_GPL(dasd_generic_probe); 2574 EXPORT_SYMBOL_GPL(dasd_generic_remove); 2575 EXPORT_SYMBOL_GPL(dasd_generic_notify); 2576 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2577 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2578 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 2579 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2580 EXPORT_SYMBOL_GPL(dasd_alloc_block); 2581 EXPORT_SYMBOL_GPL(dasd_free_block); 2582