1 /* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 9 * 10 */ 11 12 #include <linux/kmod.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/ctype.h> 16 #include <linux/major.h> 17 #include <linux/slab.h> 18 #include <linux/buffer_head.h> 19 #include <linux/hdreg.h> 20 21 #include <asm/ccwdev.h> 22 #include <asm/ebcdic.h> 23 #include <asm/idals.h> 24 #include <asm/todclk.h> 25 26 /* This is ugly... */ 27 #define PRINTK_HEADER "dasd:" 28 29 #include "dasd_int.h" 30 /* 31 * SECTION: Constant definitions to be used within this file 32 */ 33 #define DASD_CHANQ_MAX_SIZE 4 34 35 /* 36 * SECTION: exported variables of dasd.c 37 */ 38 debug_info_t *dasd_debug_area; 39 struct dasd_discipline *dasd_diag_discipline_pointer; 40 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 41 42 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 43 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 44 " Copyright 2000 IBM Corporation"); 45 MODULE_SUPPORTED_DEVICE("dasd"); 46 MODULE_LICENSE("GPL"); 47 48 /* 49 * SECTION: prototypes for static functions of dasd.c 50 */ 51 static int dasd_alloc_queue(struct dasd_block *); 52 static void dasd_setup_queue(struct dasd_block *); 53 static void dasd_free_queue(struct dasd_block *); 54 static void dasd_flush_request_queue(struct dasd_block *); 55 static int dasd_flush_block_queue(struct dasd_block *); 56 static void dasd_device_tasklet(struct dasd_device *); 57 static void dasd_block_tasklet(struct dasd_block *); 58 static void do_kick_device(struct work_struct *); 59 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 60 61 /* 62 * SECTION: Operations on the device structure. 63 */ 64 static wait_queue_head_t dasd_init_waitq; 65 static wait_queue_head_t dasd_flush_wq; 66 67 /* 68 * Allocate memory for a new device structure. 69 */ 70 struct dasd_device *dasd_alloc_device(void) 71 { 72 struct dasd_device *device; 73 74 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 75 if (!device) 76 return ERR_PTR(-ENOMEM); 77 78 /* Get two pages for normal block device operations. */ 79 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 80 if (!device->ccw_mem) { 81 kfree(device); 82 return ERR_PTR(-ENOMEM); 83 } 84 /* Get one page for error recovery. */ 85 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 86 if (!device->erp_mem) { 87 free_pages((unsigned long) device->ccw_mem, 1); 88 kfree(device); 89 return ERR_PTR(-ENOMEM); 90 } 91 92 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 93 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 94 spin_lock_init(&device->mem_lock); 95 atomic_set(&device->tasklet_scheduled, 0); 96 tasklet_init(&device->tasklet, 97 (void (*)(unsigned long)) dasd_device_tasklet, 98 (unsigned long) device); 99 INIT_LIST_HEAD(&device->ccw_queue); 100 init_timer(&device->timer); 101 INIT_WORK(&device->kick_work, do_kick_device); 102 device->state = DASD_STATE_NEW; 103 device->target = DASD_STATE_NEW; 104 105 return device; 106 } 107 108 /* 109 * Free memory of a device structure. 110 */ 111 void dasd_free_device(struct dasd_device *device) 112 { 113 kfree(device->private); 114 free_page((unsigned long) device->erp_mem); 115 free_pages((unsigned long) device->ccw_mem, 1); 116 kfree(device); 117 } 118 119 /* 120 * Allocate memory for a new device structure. 121 */ 122 struct dasd_block *dasd_alloc_block(void) 123 { 124 struct dasd_block *block; 125 126 block = kzalloc(sizeof(*block), GFP_ATOMIC); 127 if (!block) 128 return ERR_PTR(-ENOMEM); 129 /* open_count = 0 means device online but not in use */ 130 atomic_set(&block->open_count, -1); 131 132 spin_lock_init(&block->request_queue_lock); 133 atomic_set(&block->tasklet_scheduled, 0); 134 tasklet_init(&block->tasklet, 135 (void (*)(unsigned long)) dasd_block_tasklet, 136 (unsigned long) block); 137 INIT_LIST_HEAD(&block->ccw_queue); 138 spin_lock_init(&block->queue_lock); 139 init_timer(&block->timer); 140 141 return block; 142 } 143 144 /* 145 * Free memory of a device structure. 146 */ 147 void dasd_free_block(struct dasd_block *block) 148 { 149 kfree(block); 150 } 151 152 /* 153 * Make a new device known to the system. 154 */ 155 static int dasd_state_new_to_known(struct dasd_device *device) 156 { 157 int rc; 158 159 /* 160 * As long as the device is not in state DASD_STATE_NEW we want to 161 * keep the reference count > 0. 162 */ 163 dasd_get_device(device); 164 165 if (device->block) { 166 rc = dasd_alloc_queue(device->block); 167 if (rc) { 168 dasd_put_device(device); 169 return rc; 170 } 171 } 172 device->state = DASD_STATE_KNOWN; 173 return 0; 174 } 175 176 /* 177 * Let the system forget about a device. 178 */ 179 static int dasd_state_known_to_new(struct dasd_device *device) 180 { 181 /* Disable extended error reporting for this device. */ 182 dasd_eer_disable(device); 183 /* Forget the discipline information. */ 184 if (device->discipline) { 185 if (device->discipline->uncheck_device) 186 device->discipline->uncheck_device(device); 187 module_put(device->discipline->owner); 188 } 189 device->discipline = NULL; 190 if (device->base_discipline) 191 module_put(device->base_discipline->owner); 192 device->base_discipline = NULL; 193 device->state = DASD_STATE_NEW; 194 195 if (device->block) 196 dasd_free_queue(device->block); 197 198 /* Give up reference we took in dasd_state_new_to_known. */ 199 dasd_put_device(device); 200 return 0; 201 } 202 203 /* 204 * Request the irq line for the device. 205 */ 206 static int dasd_state_known_to_basic(struct dasd_device *device) 207 { 208 int rc; 209 210 /* Allocate and register gendisk structure. */ 211 if (device->block) { 212 rc = dasd_gendisk_alloc(device->block); 213 if (rc) 214 return rc; 215 } 216 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 217 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 1, 218 8 * sizeof(long)); 219 debug_register_view(device->debug_area, &debug_sprintf_view); 220 debug_set_level(device->debug_area, DBF_WARNING); 221 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 222 223 device->state = DASD_STATE_BASIC; 224 return 0; 225 } 226 227 /* 228 * Release the irq line for the device. Terminate any running i/o. 229 */ 230 static int dasd_state_basic_to_known(struct dasd_device *device) 231 { 232 int rc; 233 if (device->block) { 234 dasd_gendisk_free(device->block); 235 dasd_block_clear_timer(device->block); 236 } 237 rc = dasd_flush_device_queue(device); 238 if (rc) 239 return rc; 240 dasd_device_clear_timer(device); 241 242 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 243 if (device->debug_area != NULL) { 244 debug_unregister(device->debug_area); 245 device->debug_area = NULL; 246 } 247 device->state = DASD_STATE_KNOWN; 248 return 0; 249 } 250 251 /* 252 * Do the initial analysis. The do_analysis function may return 253 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 254 * until the discipline decides to continue the startup sequence 255 * by calling the function dasd_change_state. The eckd disciplines 256 * uses this to start a ccw that detects the format. The completion 257 * interrupt for this detection ccw uses the kernel event daemon to 258 * trigger the call to dasd_change_state. All this is done in the 259 * discipline code, see dasd_eckd.c. 260 * After the analysis ccw is done (do_analysis returned 0) the block 261 * device is setup. 262 * In case the analysis returns an error, the device setup is stopped 263 * (a fake disk was already added to allow formatting). 264 */ 265 static int dasd_state_basic_to_ready(struct dasd_device *device) 266 { 267 int rc; 268 struct dasd_block *block; 269 270 rc = 0; 271 block = device->block; 272 /* make disk known with correct capacity */ 273 if (block) { 274 if (block->base->discipline->do_analysis != NULL) 275 rc = block->base->discipline->do_analysis(block); 276 if (rc) { 277 if (rc != -EAGAIN) 278 device->state = DASD_STATE_UNFMT; 279 return rc; 280 } 281 dasd_setup_queue(block); 282 set_capacity(block->gdp, 283 block->blocks << block->s2b_shift); 284 device->state = DASD_STATE_READY; 285 rc = dasd_scan_partitions(block); 286 if (rc) 287 device->state = DASD_STATE_BASIC; 288 } else { 289 device->state = DASD_STATE_READY; 290 } 291 return rc; 292 } 293 294 /* 295 * Remove device from block device layer. Destroy dirty buffers. 296 * Forget format information. Check if the target level is basic 297 * and if it is create fake disk for formatting. 298 */ 299 static int dasd_state_ready_to_basic(struct dasd_device *device) 300 { 301 int rc; 302 303 device->state = DASD_STATE_BASIC; 304 if (device->block) { 305 struct dasd_block *block = device->block; 306 rc = dasd_flush_block_queue(block); 307 if (rc) { 308 device->state = DASD_STATE_READY; 309 return rc; 310 } 311 dasd_destroy_partitions(block); 312 dasd_flush_request_queue(block); 313 block->blocks = 0; 314 block->bp_block = 0; 315 block->s2b_shift = 0; 316 } 317 return 0; 318 } 319 320 /* 321 * Back to basic. 322 */ 323 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 324 { 325 device->state = DASD_STATE_BASIC; 326 return 0; 327 } 328 329 /* 330 * Make the device online and schedule the bottom half to start 331 * the requeueing of requests from the linux request queue to the 332 * ccw queue. 333 */ 334 static int 335 dasd_state_ready_to_online(struct dasd_device * device) 336 { 337 int rc; 338 339 if (device->discipline->ready_to_online) { 340 rc = device->discipline->ready_to_online(device); 341 if (rc) 342 return rc; 343 } 344 device->state = DASD_STATE_ONLINE; 345 if (device->block) 346 dasd_schedule_block_bh(device->block); 347 return 0; 348 } 349 350 /* 351 * Stop the requeueing of requests again. 352 */ 353 static int dasd_state_online_to_ready(struct dasd_device *device) 354 { 355 int rc; 356 357 if (device->discipline->online_to_ready) { 358 rc = device->discipline->online_to_ready(device); 359 if (rc) 360 return rc; 361 } 362 device->state = DASD_STATE_READY; 363 return 0; 364 } 365 366 /* 367 * Device startup state changes. 368 */ 369 static int dasd_increase_state(struct dasd_device *device) 370 { 371 int rc; 372 373 rc = 0; 374 if (device->state == DASD_STATE_NEW && 375 device->target >= DASD_STATE_KNOWN) 376 rc = dasd_state_new_to_known(device); 377 378 if (!rc && 379 device->state == DASD_STATE_KNOWN && 380 device->target >= DASD_STATE_BASIC) 381 rc = dasd_state_known_to_basic(device); 382 383 if (!rc && 384 device->state == DASD_STATE_BASIC && 385 device->target >= DASD_STATE_READY) 386 rc = dasd_state_basic_to_ready(device); 387 388 if (!rc && 389 device->state == DASD_STATE_UNFMT && 390 device->target > DASD_STATE_UNFMT) 391 rc = -EPERM; 392 393 if (!rc && 394 device->state == DASD_STATE_READY && 395 device->target >= DASD_STATE_ONLINE) 396 rc = dasd_state_ready_to_online(device); 397 398 return rc; 399 } 400 401 /* 402 * Device shutdown state changes. 403 */ 404 static int dasd_decrease_state(struct dasd_device *device) 405 { 406 int rc; 407 408 rc = 0; 409 if (device->state == DASD_STATE_ONLINE && 410 device->target <= DASD_STATE_READY) 411 rc = dasd_state_online_to_ready(device); 412 413 if (!rc && 414 device->state == DASD_STATE_READY && 415 device->target <= DASD_STATE_BASIC) 416 rc = dasd_state_ready_to_basic(device); 417 418 if (!rc && 419 device->state == DASD_STATE_UNFMT && 420 device->target <= DASD_STATE_BASIC) 421 rc = dasd_state_unfmt_to_basic(device); 422 423 if (!rc && 424 device->state == DASD_STATE_BASIC && 425 device->target <= DASD_STATE_KNOWN) 426 rc = dasd_state_basic_to_known(device); 427 428 if (!rc && 429 device->state == DASD_STATE_KNOWN && 430 device->target <= DASD_STATE_NEW) 431 rc = dasd_state_known_to_new(device); 432 433 return rc; 434 } 435 436 /* 437 * This is the main startup/shutdown routine. 438 */ 439 static void dasd_change_state(struct dasd_device *device) 440 { 441 int rc; 442 443 if (device->state == device->target) 444 /* Already where we want to go today... */ 445 return; 446 if (device->state < device->target) 447 rc = dasd_increase_state(device); 448 else 449 rc = dasd_decrease_state(device); 450 if (rc && rc != -EAGAIN) 451 device->target = device->state; 452 453 if (device->state == device->target) 454 wake_up(&dasd_init_waitq); 455 456 /* let user-space know that the device status changed */ 457 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 458 } 459 460 /* 461 * Kick starter for devices that did not complete the startup/shutdown 462 * procedure or were sleeping because of a pending state. 463 * dasd_kick_device will schedule a call do do_kick_device to the kernel 464 * event daemon. 465 */ 466 static void do_kick_device(struct work_struct *work) 467 { 468 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 469 dasd_change_state(device); 470 dasd_schedule_device_bh(device); 471 dasd_put_device(device); 472 } 473 474 void dasd_kick_device(struct dasd_device *device) 475 { 476 dasd_get_device(device); 477 /* queue call to dasd_kick_device to the kernel event daemon. */ 478 schedule_work(&device->kick_work); 479 } 480 481 /* 482 * Set the target state for a device and starts the state change. 483 */ 484 void dasd_set_target_state(struct dasd_device *device, int target) 485 { 486 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 487 if (dasd_probeonly && target > DASD_STATE_READY) 488 target = DASD_STATE_READY; 489 if (device->target != target) { 490 if (device->state == target) 491 wake_up(&dasd_init_waitq); 492 device->target = target; 493 } 494 if (device->state != device->target) 495 dasd_change_state(device); 496 } 497 498 /* 499 * Enable devices with device numbers in [from..to]. 500 */ 501 static inline int _wait_for_device(struct dasd_device *device) 502 { 503 return (device->state == device->target); 504 } 505 506 void dasd_enable_device(struct dasd_device *device) 507 { 508 dasd_set_target_state(device, DASD_STATE_ONLINE); 509 if (device->state <= DASD_STATE_KNOWN) 510 /* No discipline for device found. */ 511 dasd_set_target_state(device, DASD_STATE_NEW); 512 /* Now wait for the devices to come up. */ 513 wait_event(dasd_init_waitq, _wait_for_device(device)); 514 } 515 516 /* 517 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 518 */ 519 #ifdef CONFIG_DASD_PROFILE 520 521 struct dasd_profile_info_t dasd_global_profile; 522 unsigned int dasd_profile_level = DASD_PROFILE_OFF; 523 524 /* 525 * Increments counter in global and local profiling structures. 526 */ 527 #define dasd_profile_counter(value, counter, block) \ 528 { \ 529 int index; \ 530 for (index = 0; index < 31 && value >> (2+index); index++); \ 531 dasd_global_profile.counter[index]++; \ 532 block->profile.counter[index]++; \ 533 } 534 535 /* 536 * Add profiling information for cqr before execution. 537 */ 538 static void dasd_profile_start(struct dasd_block *block, 539 struct dasd_ccw_req *cqr, 540 struct request *req) 541 { 542 struct list_head *l; 543 unsigned int counter; 544 545 if (dasd_profile_level != DASD_PROFILE_ON) 546 return; 547 548 /* count the length of the chanq for statistics */ 549 counter = 0; 550 list_for_each(l, &block->ccw_queue) 551 if (++counter >= 31) 552 break; 553 dasd_global_profile.dasd_io_nr_req[counter]++; 554 block->profile.dasd_io_nr_req[counter]++; 555 } 556 557 /* 558 * Add profiling information for cqr after execution. 559 */ 560 static void dasd_profile_end(struct dasd_block *block, 561 struct dasd_ccw_req *cqr, 562 struct request *req) 563 { 564 long strtime, irqtime, endtime, tottime; /* in microseconds */ 565 long tottimeps, sectors; 566 567 if (dasd_profile_level != DASD_PROFILE_ON) 568 return; 569 570 sectors = req->nr_sectors; 571 if (!cqr->buildclk || !cqr->startclk || 572 !cqr->stopclk || !cqr->endclk || 573 !sectors) 574 return; 575 576 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 577 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 578 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 579 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 580 tottimeps = tottime / sectors; 581 582 if (!dasd_global_profile.dasd_io_reqs) 583 memset(&dasd_global_profile, 0, 584 sizeof(struct dasd_profile_info_t)); 585 dasd_global_profile.dasd_io_reqs++; 586 dasd_global_profile.dasd_io_sects += sectors; 587 588 if (!block->profile.dasd_io_reqs) 589 memset(&block->profile, 0, 590 sizeof(struct dasd_profile_info_t)); 591 block->profile.dasd_io_reqs++; 592 block->profile.dasd_io_sects += sectors; 593 594 dasd_profile_counter(sectors, dasd_io_secs, block); 595 dasd_profile_counter(tottime, dasd_io_times, block); 596 dasd_profile_counter(tottimeps, dasd_io_timps, block); 597 dasd_profile_counter(strtime, dasd_io_time1, block); 598 dasd_profile_counter(irqtime, dasd_io_time2, block); 599 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); 600 dasd_profile_counter(endtime, dasd_io_time3, block); 601 } 602 #else 603 #define dasd_profile_start(block, cqr, req) do {} while (0) 604 #define dasd_profile_end(block, cqr, req) do {} while (0) 605 #endif /* CONFIG_DASD_PROFILE */ 606 607 /* 608 * Allocate memory for a channel program with 'cplength' channel 609 * command words and 'datasize' additional space. There are two 610 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 611 * memory and 2) dasd_smalloc_request uses the static ccw memory 612 * that gets allocated for each device. 613 */ 614 struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, 615 int datasize, 616 struct dasd_device *device) 617 { 618 struct dasd_ccw_req *cqr; 619 620 /* Sanity checks */ 621 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 622 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 623 624 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 625 if (cqr == NULL) 626 return ERR_PTR(-ENOMEM); 627 cqr->cpaddr = NULL; 628 if (cplength > 0) { 629 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 630 GFP_ATOMIC | GFP_DMA); 631 if (cqr->cpaddr == NULL) { 632 kfree(cqr); 633 return ERR_PTR(-ENOMEM); 634 } 635 } 636 cqr->data = NULL; 637 if (datasize > 0) { 638 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 639 if (cqr->data == NULL) { 640 kfree(cqr->cpaddr); 641 kfree(cqr); 642 return ERR_PTR(-ENOMEM); 643 } 644 } 645 strncpy((char *) &cqr->magic, magic, 4); 646 ASCEBC((char *) &cqr->magic, 4); 647 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 648 dasd_get_device(device); 649 return cqr; 650 } 651 652 struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, 653 int datasize, 654 struct dasd_device *device) 655 { 656 unsigned long flags; 657 struct dasd_ccw_req *cqr; 658 char *data; 659 int size; 660 661 /* Sanity checks */ 662 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 663 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 664 665 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 666 if (cplength > 0) 667 size += cplength * sizeof(struct ccw1); 668 if (datasize > 0) 669 size += datasize; 670 spin_lock_irqsave(&device->mem_lock, flags); 671 cqr = (struct dasd_ccw_req *) 672 dasd_alloc_chunk(&device->ccw_chunks, size); 673 spin_unlock_irqrestore(&device->mem_lock, flags); 674 if (cqr == NULL) 675 return ERR_PTR(-ENOMEM); 676 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 677 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 678 cqr->cpaddr = NULL; 679 if (cplength > 0) { 680 cqr->cpaddr = (struct ccw1 *) data; 681 data += cplength*sizeof(struct ccw1); 682 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 683 } 684 cqr->data = NULL; 685 if (datasize > 0) { 686 cqr->data = data; 687 memset(cqr->data, 0, datasize); 688 } 689 strncpy((char *) &cqr->magic, magic, 4); 690 ASCEBC((char *) &cqr->magic, 4); 691 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 692 dasd_get_device(device); 693 return cqr; 694 } 695 696 /* 697 * Free memory of a channel program. This function needs to free all the 698 * idal lists that might have been created by dasd_set_cda and the 699 * struct dasd_ccw_req itself. 700 */ 701 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 702 { 703 #ifdef CONFIG_64BIT 704 struct ccw1 *ccw; 705 706 /* Clear any idals used for the request. */ 707 ccw = cqr->cpaddr; 708 do { 709 clear_normalized_cda(ccw); 710 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 711 #endif 712 kfree(cqr->cpaddr); 713 kfree(cqr->data); 714 kfree(cqr); 715 dasd_put_device(device); 716 } 717 718 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 719 { 720 unsigned long flags; 721 722 spin_lock_irqsave(&device->mem_lock, flags); 723 dasd_free_chunk(&device->ccw_chunks, cqr); 724 spin_unlock_irqrestore(&device->mem_lock, flags); 725 dasd_put_device(device); 726 } 727 728 /* 729 * Check discipline magic in cqr. 730 */ 731 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 732 { 733 struct dasd_device *device; 734 735 if (cqr == NULL) 736 return -EINVAL; 737 device = cqr->startdev; 738 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 739 DEV_MESSAGE(KERN_WARNING, device, 740 " dasd_ccw_req 0x%08x magic doesn't match" 741 " discipline 0x%08x", 742 cqr->magic, 743 *(unsigned int *) device->discipline->name); 744 return -EINVAL; 745 } 746 return 0; 747 } 748 749 /* 750 * Terminate the current i/o and set the request to clear_pending. 751 * Timer keeps device runnig. 752 * ccw_device_clear can fail if the i/o subsystem 753 * is in a bad mood. 754 */ 755 int dasd_term_IO(struct dasd_ccw_req *cqr) 756 { 757 struct dasd_device *device; 758 int retries, rc; 759 760 /* Check the cqr */ 761 rc = dasd_check_cqr(cqr); 762 if (rc) 763 return rc; 764 retries = 0; 765 device = (struct dasd_device *) cqr->startdev; 766 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 767 rc = ccw_device_clear(device->cdev, (long) cqr); 768 switch (rc) { 769 case 0: /* termination successful */ 770 cqr->retries--; 771 cqr->status = DASD_CQR_CLEAR_PENDING; 772 cqr->stopclk = get_clock(); 773 cqr->starttime = 0; 774 DBF_DEV_EVENT(DBF_DEBUG, device, 775 "terminate cqr %p successful", 776 cqr); 777 break; 778 case -ENODEV: 779 DBF_DEV_EVENT(DBF_ERR, device, "%s", 780 "device gone, retry"); 781 break; 782 case -EIO: 783 DBF_DEV_EVENT(DBF_ERR, device, "%s", 784 "I/O error, retry"); 785 break; 786 case -EINVAL: 787 case -EBUSY: 788 DBF_DEV_EVENT(DBF_ERR, device, "%s", 789 "device busy, retry later"); 790 break; 791 default: 792 DEV_MESSAGE(KERN_ERR, device, 793 "line %d unknown RC=%d, please " 794 "report to linux390@de.ibm.com", 795 __LINE__, rc); 796 BUG(); 797 break; 798 } 799 retries++; 800 } 801 dasd_schedule_device_bh(device); 802 return rc; 803 } 804 805 /* 806 * Start the i/o. This start_IO can fail if the channel is really busy. 807 * In that case set up a timer to start the request later. 808 */ 809 int dasd_start_IO(struct dasd_ccw_req *cqr) 810 { 811 struct dasd_device *device; 812 int rc; 813 814 /* Check the cqr */ 815 rc = dasd_check_cqr(cqr); 816 if (rc) 817 return rc; 818 device = (struct dasd_device *) cqr->startdev; 819 if (cqr->retries < 0) { 820 DEV_MESSAGE(KERN_DEBUG, device, 821 "start_IO: request %p (%02x/%i) - no retry left.", 822 cqr, cqr->status, cqr->retries); 823 cqr->status = DASD_CQR_ERROR; 824 return -EIO; 825 } 826 cqr->startclk = get_clock(); 827 cqr->starttime = jiffies; 828 cqr->retries--; 829 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr, 830 cqr->lpm, 0); 831 switch (rc) { 832 case 0: 833 cqr->status = DASD_CQR_IN_IO; 834 DBF_DEV_EVENT(DBF_DEBUG, device, 835 "start_IO: request %p started successful", 836 cqr); 837 break; 838 case -EBUSY: 839 DBF_DEV_EVENT(DBF_ERR, device, "%s", 840 "start_IO: device busy, retry later"); 841 break; 842 case -ETIMEDOUT: 843 DBF_DEV_EVENT(DBF_ERR, device, "%s", 844 "start_IO: request timeout, retry later"); 845 break; 846 case -EACCES: 847 /* -EACCES indicates that the request used only a 848 * subset of the available pathes and all these 849 * pathes are gone. 850 * Do a retry with all available pathes. 851 */ 852 cqr->lpm = LPM_ANYPATH; 853 DBF_DEV_EVENT(DBF_ERR, device, "%s", 854 "start_IO: selected pathes gone," 855 " retry on all pathes"); 856 break; 857 case -ENODEV: 858 case -EIO: 859 DBF_DEV_EVENT(DBF_ERR, device, "%s", 860 "start_IO: device gone, retry"); 861 break; 862 default: 863 DEV_MESSAGE(KERN_ERR, device, 864 "line %d unknown RC=%d, please report" 865 " to linux390@de.ibm.com", __LINE__, rc); 866 BUG(); 867 break; 868 } 869 return rc; 870 } 871 872 /* 873 * Timeout function for dasd devices. This is used for different purposes 874 * 1) missing interrupt handler for normal operation 875 * 2) delayed start of request where start_IO failed with -EBUSY 876 * 3) timeout for missing state change interrupts 877 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 878 * DASD_CQR_QUEUED for 2) and 3). 879 */ 880 static void dasd_device_timeout(unsigned long ptr) 881 { 882 unsigned long flags; 883 struct dasd_device *device; 884 885 device = (struct dasd_device *) ptr; 886 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 887 /* re-activate request queue */ 888 device->stopped &= ~DASD_STOPPED_PENDING; 889 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 890 dasd_schedule_device_bh(device); 891 } 892 893 /* 894 * Setup timeout for a device in jiffies. 895 */ 896 void dasd_device_set_timer(struct dasd_device *device, int expires) 897 { 898 if (expires == 0) { 899 if (timer_pending(&device->timer)) 900 del_timer(&device->timer); 901 return; 902 } 903 if (timer_pending(&device->timer)) { 904 if (mod_timer(&device->timer, jiffies + expires)) 905 return; 906 } 907 device->timer.function = dasd_device_timeout; 908 device->timer.data = (unsigned long) device; 909 device->timer.expires = jiffies + expires; 910 add_timer(&device->timer); 911 } 912 913 /* 914 * Clear timeout for a device. 915 */ 916 void dasd_device_clear_timer(struct dasd_device *device) 917 { 918 if (timer_pending(&device->timer)) 919 del_timer(&device->timer); 920 } 921 922 static void dasd_handle_killed_request(struct ccw_device *cdev, 923 unsigned long intparm) 924 { 925 struct dasd_ccw_req *cqr; 926 struct dasd_device *device; 927 928 cqr = (struct dasd_ccw_req *) intparm; 929 if (cqr->status != DASD_CQR_IN_IO) { 930 MESSAGE(KERN_DEBUG, 931 "invalid status in handle_killed_request: " 932 "bus_id %s, status %02x", 933 cdev->dev.bus_id, cqr->status); 934 return; 935 } 936 937 device = (struct dasd_device *) cqr->startdev; 938 if (device == NULL || 939 device != dasd_device_from_cdev_locked(cdev) || 940 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 941 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 942 cdev->dev.bus_id); 943 return; 944 } 945 946 /* Schedule request to be retried. */ 947 cqr->status = DASD_CQR_QUEUED; 948 949 dasd_device_clear_timer(device); 950 dasd_schedule_device_bh(device); 951 dasd_put_device(device); 952 } 953 954 void dasd_generic_handle_state_change(struct dasd_device *device) 955 { 956 /* First of all start sense subsystem status request. */ 957 dasd_eer_snss(device); 958 959 device->stopped &= ~DASD_STOPPED_PENDING; 960 dasd_schedule_device_bh(device); 961 if (device->block) 962 dasd_schedule_block_bh(device->block); 963 } 964 965 /* 966 * Interrupt handler for "normal" ssch-io based dasd devices. 967 */ 968 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 969 struct irb *irb) 970 { 971 struct dasd_ccw_req *cqr, *next; 972 struct dasd_device *device; 973 unsigned long long now; 974 int expires; 975 976 if (IS_ERR(irb)) { 977 switch (PTR_ERR(irb)) { 978 case -EIO: 979 dasd_handle_killed_request(cdev, intparm); 980 break; 981 case -ETIMEDOUT: 982 printk(KERN_WARNING"%s(%s): request timed out\n", 983 __FUNCTION__, cdev->dev.bus_id); 984 //FIXME - dasd uses own timeout interface... 985 break; 986 default: 987 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 988 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb)); 989 } 990 return; 991 } 992 993 now = get_clock(); 994 995 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 996 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 997 (unsigned int) intparm); 998 999 /* check for unsolicited interrupts */ 1000 cqr = (struct dasd_ccw_req *) intparm; 1001 if (!cqr || ((irb->scsw.cc == 1) && 1002 (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && 1003 (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) { 1004 if (cqr && cqr->status == DASD_CQR_IN_IO) 1005 cqr->status = DASD_CQR_QUEUED; 1006 device = dasd_device_from_cdev_locked(cdev); 1007 if (!IS_ERR(device)) { 1008 dasd_device_clear_timer(device); 1009 device->discipline->handle_unsolicited_interrupt(device, 1010 irb); 1011 dasd_put_device(device); 1012 } 1013 return; 1014 } 1015 1016 device = (struct dasd_device *) cqr->startdev; 1017 if (!device || 1018 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1019 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 1020 cdev->dev.bus_id); 1021 return; 1022 } 1023 1024 /* Check for clear pending */ 1025 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1026 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1027 cqr->status = DASD_CQR_CLEARED; 1028 dasd_device_clear_timer(device); 1029 wake_up(&dasd_flush_wq); 1030 dasd_schedule_device_bh(device); 1031 return; 1032 } 1033 1034 /* check status - the request might have been killed by dyn detach */ 1035 if (cqr->status != DASD_CQR_IN_IO) { 1036 MESSAGE(KERN_DEBUG, 1037 "invalid status: bus_id %s, status %02x", 1038 cdev->dev.bus_id, cqr->status); 1039 return; 1040 } 1041 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1042 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1043 next = NULL; 1044 expires = 0; 1045 if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1046 irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) { 1047 /* request was completed successfully */ 1048 cqr->status = DASD_CQR_SUCCESS; 1049 cqr->stopclk = now; 1050 /* Start first request on queue if possible -> fast_io. */ 1051 if (cqr->devlist.next != &device->ccw_queue) { 1052 next = list_entry(cqr->devlist.next, 1053 struct dasd_ccw_req, devlist); 1054 } 1055 } else { /* error */ 1056 memcpy(&cqr->irb, irb, sizeof(struct irb)); 1057 if (device->features & DASD_FEATURE_ERPLOG) { 1058 dasd_log_sense(cqr, irb); 1059 } 1060 /* If we have no sense data, or we just don't want complex ERP 1061 * for this request, but if we have retries left, then just 1062 * reset this request and retry it in the fastpath 1063 */ 1064 if (!(cqr->irb.esw.esw0.erw.cons && 1065 test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags)) && 1066 cqr->retries > 0) { 1067 DEV_MESSAGE(KERN_DEBUG, device, 1068 "default ERP in fastpath (%i retries left)", 1069 cqr->retries); 1070 cqr->lpm = LPM_ANYPATH; 1071 cqr->status = DASD_CQR_QUEUED; 1072 next = cqr; 1073 } else 1074 cqr->status = DASD_CQR_ERROR; 1075 } 1076 if (next && (next->status == DASD_CQR_QUEUED) && 1077 (!device->stopped)) { 1078 if (device->discipline->start_IO(next) == 0) 1079 expires = next->expires; 1080 else 1081 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1082 "Interrupt fastpath " 1083 "failed!"); 1084 } 1085 if (expires != 0) 1086 dasd_device_set_timer(device, expires); 1087 else 1088 dasd_device_clear_timer(device); 1089 dasd_schedule_device_bh(device); 1090 } 1091 1092 /* 1093 * If we have an error on a dasd_block layer request then we cancel 1094 * and return all further requests from the same dasd_block as well. 1095 */ 1096 static void __dasd_device_recovery(struct dasd_device *device, 1097 struct dasd_ccw_req *ref_cqr) 1098 { 1099 struct list_head *l, *n; 1100 struct dasd_ccw_req *cqr; 1101 1102 /* 1103 * only requeue request that came from the dasd_block layer 1104 */ 1105 if (!ref_cqr->block) 1106 return; 1107 1108 list_for_each_safe(l, n, &device->ccw_queue) { 1109 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1110 if (cqr->status == DASD_CQR_QUEUED && 1111 ref_cqr->block == cqr->block) { 1112 cqr->status = DASD_CQR_CLEARED; 1113 } 1114 } 1115 }; 1116 1117 /* 1118 * Remove those ccw requests from the queue that need to be returned 1119 * to the upper layer. 1120 */ 1121 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1122 struct list_head *final_queue) 1123 { 1124 struct list_head *l, *n; 1125 struct dasd_ccw_req *cqr; 1126 1127 /* Process request with final status. */ 1128 list_for_each_safe(l, n, &device->ccw_queue) { 1129 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1130 1131 /* Stop list processing at the first non-final request. */ 1132 if (cqr->status == DASD_CQR_QUEUED || 1133 cqr->status == DASD_CQR_IN_IO || 1134 cqr->status == DASD_CQR_CLEAR_PENDING) 1135 break; 1136 if (cqr->status == DASD_CQR_ERROR) { 1137 __dasd_device_recovery(device, cqr); 1138 } 1139 /* Rechain finished requests to final queue */ 1140 list_move_tail(&cqr->devlist, final_queue); 1141 } 1142 } 1143 1144 /* 1145 * the cqrs from the final queue are returned to the upper layer 1146 * by setting a dasd_block state and calling the callback function 1147 */ 1148 static void __dasd_device_process_final_queue(struct dasd_device *device, 1149 struct list_head *final_queue) 1150 { 1151 struct list_head *l, *n; 1152 struct dasd_ccw_req *cqr; 1153 1154 list_for_each_safe(l, n, final_queue) { 1155 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1156 list_del_init(&cqr->devlist); 1157 if (cqr->block) 1158 spin_lock_bh(&cqr->block->queue_lock); 1159 switch (cqr->status) { 1160 case DASD_CQR_SUCCESS: 1161 cqr->status = DASD_CQR_DONE; 1162 break; 1163 case DASD_CQR_ERROR: 1164 cqr->status = DASD_CQR_NEED_ERP; 1165 break; 1166 case DASD_CQR_CLEARED: 1167 cqr->status = DASD_CQR_TERMINATED; 1168 break; 1169 default: 1170 DEV_MESSAGE(KERN_ERR, device, 1171 "wrong cqr status in __dasd_process_final_queue " 1172 "for cqr %p, status %x", 1173 cqr, cqr->status); 1174 BUG(); 1175 } 1176 if (cqr->block) 1177 spin_unlock_bh(&cqr->block->queue_lock); 1178 if (cqr->callback != NULL) 1179 (cqr->callback)(cqr, cqr->callback_data); 1180 } 1181 } 1182 1183 1184 1185 /* 1186 * Take a look at the first request on the ccw queue and check 1187 * if it reached its expire time. If so, terminate the IO. 1188 */ 1189 static void __dasd_device_check_expire(struct dasd_device *device) 1190 { 1191 struct dasd_ccw_req *cqr; 1192 1193 if (list_empty(&device->ccw_queue)) 1194 return; 1195 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1196 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1197 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1198 if (device->discipline->term_IO(cqr) != 0) { 1199 /* Hmpf, try again in 5 sec */ 1200 DEV_MESSAGE(KERN_ERR, device, 1201 "internal error - timeout (%is) expired " 1202 "for cqr %p, termination failed, " 1203 "retrying in 5s", 1204 (cqr->expires/HZ), cqr); 1205 cqr->expires += 5*HZ; 1206 dasd_device_set_timer(device, 5*HZ); 1207 } else { 1208 DEV_MESSAGE(KERN_ERR, device, 1209 "internal error - timeout (%is) expired " 1210 "for cqr %p (%i retries left)", 1211 (cqr->expires/HZ), cqr, cqr->retries); 1212 } 1213 } 1214 } 1215 1216 /* 1217 * Take a look at the first request on the ccw queue and check 1218 * if it needs to be started. 1219 */ 1220 static void __dasd_device_start_head(struct dasd_device *device) 1221 { 1222 struct dasd_ccw_req *cqr; 1223 int rc; 1224 1225 if (list_empty(&device->ccw_queue)) 1226 return; 1227 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1228 if (cqr->status != DASD_CQR_QUEUED) 1229 return; 1230 /* when device is stopped, return request to previous layer */ 1231 if (device->stopped) { 1232 cqr->status = DASD_CQR_CLEARED; 1233 dasd_schedule_device_bh(device); 1234 return; 1235 } 1236 1237 rc = device->discipline->start_IO(cqr); 1238 if (rc == 0) 1239 dasd_device_set_timer(device, cqr->expires); 1240 else if (rc == -EACCES) { 1241 dasd_schedule_device_bh(device); 1242 } else 1243 /* Hmpf, try again in 1/2 sec */ 1244 dasd_device_set_timer(device, 50); 1245 } 1246 1247 /* 1248 * Go through all request on the dasd_device request queue, 1249 * terminate them on the cdev if necessary, and return them to the 1250 * submitting layer via callback. 1251 * Note: 1252 * Make sure that all 'submitting layers' still exist when 1253 * this function is called!. In other words, when 'device' is a base 1254 * device then all block layer requests must have been removed before 1255 * via dasd_flush_block_queue. 1256 */ 1257 int dasd_flush_device_queue(struct dasd_device *device) 1258 { 1259 struct dasd_ccw_req *cqr, *n; 1260 int rc; 1261 struct list_head flush_queue; 1262 1263 INIT_LIST_HEAD(&flush_queue); 1264 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1265 rc = 0; 1266 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1267 /* Check status and move request to flush_queue */ 1268 switch (cqr->status) { 1269 case DASD_CQR_IN_IO: 1270 rc = device->discipline->term_IO(cqr); 1271 if (rc) { 1272 /* unable to terminate requeust */ 1273 DEV_MESSAGE(KERN_ERR, device, 1274 "dasd flush ccw_queue is unable " 1275 " to terminate request %p", 1276 cqr); 1277 /* stop flush processing */ 1278 goto finished; 1279 } 1280 break; 1281 case DASD_CQR_QUEUED: 1282 cqr->stopclk = get_clock(); 1283 cqr->status = DASD_CQR_CLEARED; 1284 break; 1285 default: /* no need to modify the others */ 1286 break; 1287 } 1288 list_move_tail(&cqr->devlist, &flush_queue); 1289 } 1290 finished: 1291 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1292 /* 1293 * After this point all requests must be in state CLEAR_PENDING, 1294 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1295 * one of the others. 1296 */ 1297 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1298 wait_event(dasd_flush_wq, 1299 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1300 /* 1301 * Now set each request back to TERMINATED, DONE or NEED_ERP 1302 * and call the callback function of flushed requests 1303 */ 1304 __dasd_device_process_final_queue(device, &flush_queue); 1305 return rc; 1306 } 1307 1308 /* 1309 * Acquire the device lock and process queues for the device. 1310 */ 1311 static void dasd_device_tasklet(struct dasd_device *device) 1312 { 1313 struct list_head final_queue; 1314 1315 atomic_set (&device->tasklet_scheduled, 0); 1316 INIT_LIST_HEAD(&final_queue); 1317 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1318 /* Check expire time of first request on the ccw queue. */ 1319 __dasd_device_check_expire(device); 1320 /* find final requests on ccw queue */ 1321 __dasd_device_process_ccw_queue(device, &final_queue); 1322 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1323 /* Now call the callback function of requests with final status */ 1324 __dasd_device_process_final_queue(device, &final_queue); 1325 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1326 /* Now check if the head of the ccw queue needs to be started. */ 1327 __dasd_device_start_head(device); 1328 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1329 dasd_put_device(device); 1330 } 1331 1332 /* 1333 * Schedules a call to dasd_tasklet over the device tasklet. 1334 */ 1335 void dasd_schedule_device_bh(struct dasd_device *device) 1336 { 1337 /* Protect against rescheduling. */ 1338 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1339 return; 1340 dasd_get_device(device); 1341 tasklet_hi_schedule(&device->tasklet); 1342 } 1343 1344 /* 1345 * Queue a request to the head of the device ccw_queue. 1346 * Start the I/O if possible. 1347 */ 1348 void dasd_add_request_head(struct dasd_ccw_req *cqr) 1349 { 1350 struct dasd_device *device; 1351 unsigned long flags; 1352 1353 device = cqr->startdev; 1354 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1355 cqr->status = DASD_CQR_QUEUED; 1356 list_add(&cqr->devlist, &device->ccw_queue); 1357 /* let the bh start the request to keep them in order */ 1358 dasd_schedule_device_bh(device); 1359 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1360 } 1361 1362 /* 1363 * Queue a request to the tail of the device ccw_queue. 1364 * Start the I/O if possible. 1365 */ 1366 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 1367 { 1368 struct dasd_device *device; 1369 unsigned long flags; 1370 1371 device = cqr->startdev; 1372 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1373 cqr->status = DASD_CQR_QUEUED; 1374 list_add_tail(&cqr->devlist, &device->ccw_queue); 1375 /* let the bh start the request to keep them in order */ 1376 dasd_schedule_device_bh(device); 1377 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1378 } 1379 1380 /* 1381 * Wakeup helper for the 'sleep_on' functions. 1382 */ 1383 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1384 { 1385 wake_up((wait_queue_head_t *) data); 1386 } 1387 1388 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 1389 { 1390 struct dasd_device *device; 1391 int rc; 1392 1393 device = cqr->startdev; 1394 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1395 rc = ((cqr->status == DASD_CQR_DONE || 1396 cqr->status == DASD_CQR_NEED_ERP || 1397 cqr->status == DASD_CQR_TERMINATED) && 1398 list_empty(&cqr->devlist)); 1399 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1400 return rc; 1401 } 1402 1403 /* 1404 * Queue a request to the tail of the device ccw_queue and wait for 1405 * it's completion. 1406 */ 1407 int dasd_sleep_on(struct dasd_ccw_req *cqr) 1408 { 1409 wait_queue_head_t wait_q; 1410 struct dasd_device *device; 1411 int rc; 1412 1413 device = cqr->startdev; 1414 1415 init_waitqueue_head (&wait_q); 1416 cqr->callback = dasd_wakeup_cb; 1417 cqr->callback_data = (void *) &wait_q; 1418 dasd_add_request_tail(cqr); 1419 wait_event(wait_q, _wait_for_wakeup(cqr)); 1420 1421 /* Request status is either done or failed. */ 1422 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1423 return rc; 1424 } 1425 1426 /* 1427 * Queue a request to the tail of the device ccw_queue and wait 1428 * interruptible for it's completion. 1429 */ 1430 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1431 { 1432 wait_queue_head_t wait_q; 1433 struct dasd_device *device; 1434 int rc; 1435 1436 device = cqr->startdev; 1437 init_waitqueue_head (&wait_q); 1438 cqr->callback = dasd_wakeup_cb; 1439 cqr->callback_data = (void *) &wait_q; 1440 dasd_add_request_tail(cqr); 1441 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); 1442 if (rc == -ERESTARTSYS) { 1443 dasd_cancel_req(cqr); 1444 /* wait (non-interruptible) for final status */ 1445 wait_event(wait_q, _wait_for_wakeup(cqr)); 1446 } 1447 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1448 return rc; 1449 } 1450 1451 /* 1452 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1453 * for eckd devices) the currently running request has to be terminated 1454 * and be put back to status queued, before the special request is added 1455 * to the head of the queue. Then the special request is waited on normally. 1456 */ 1457 static inline int _dasd_term_running_cqr(struct dasd_device *device) 1458 { 1459 struct dasd_ccw_req *cqr; 1460 1461 if (list_empty(&device->ccw_queue)) 1462 return 0; 1463 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1464 return device->discipline->term_IO(cqr); 1465 } 1466 1467 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1468 { 1469 wait_queue_head_t wait_q; 1470 struct dasd_device *device; 1471 int rc; 1472 1473 device = cqr->startdev; 1474 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1475 rc = _dasd_term_running_cqr(device); 1476 if (rc) { 1477 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1478 return rc; 1479 } 1480 1481 init_waitqueue_head (&wait_q); 1482 cqr->callback = dasd_wakeup_cb; 1483 cqr->callback_data = (void *) &wait_q; 1484 cqr->status = DASD_CQR_QUEUED; 1485 list_add(&cqr->devlist, &device->ccw_queue); 1486 1487 /* let the bh start the request to keep them in order */ 1488 dasd_schedule_device_bh(device); 1489 1490 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1491 1492 wait_event(wait_q, _wait_for_wakeup(cqr)); 1493 1494 /* Request status is either done or failed. */ 1495 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1496 return rc; 1497 } 1498 1499 /* 1500 * Cancels a request that was started with dasd_sleep_on_req. 1501 * This is useful to timeout requests. The request will be 1502 * terminated if it is currently in i/o. 1503 * Returns 1 if the request has been terminated. 1504 * 0 if there was no need to terminate the request (not started yet) 1505 * negative error code if termination failed 1506 * Cancellation of a request is an asynchronous operation! The calling 1507 * function has to wait until the request is properly returned via callback. 1508 */ 1509 int dasd_cancel_req(struct dasd_ccw_req *cqr) 1510 { 1511 struct dasd_device *device = cqr->startdev; 1512 unsigned long flags; 1513 int rc; 1514 1515 rc = 0; 1516 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1517 switch (cqr->status) { 1518 case DASD_CQR_QUEUED: 1519 /* request was not started - just set to cleared */ 1520 cqr->status = DASD_CQR_CLEARED; 1521 break; 1522 case DASD_CQR_IN_IO: 1523 /* request in IO - terminate IO and release again */ 1524 rc = device->discipline->term_IO(cqr); 1525 if (rc) { 1526 DEV_MESSAGE(KERN_ERR, device, 1527 "dasd_cancel_req is unable " 1528 " to terminate request %p, rc = %d", 1529 cqr, rc); 1530 } else { 1531 cqr->stopclk = get_clock(); 1532 rc = 1; 1533 } 1534 break; 1535 default: /* already finished or clear pending - do nothing */ 1536 break; 1537 } 1538 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1539 dasd_schedule_device_bh(device); 1540 return rc; 1541 } 1542 1543 1544 /* 1545 * SECTION: Operations of the dasd_block layer. 1546 */ 1547 1548 /* 1549 * Timeout function for dasd_block. This is used when the block layer 1550 * is waiting for something that may not come reliably, (e.g. a state 1551 * change interrupt) 1552 */ 1553 static void dasd_block_timeout(unsigned long ptr) 1554 { 1555 unsigned long flags; 1556 struct dasd_block *block; 1557 1558 block = (struct dasd_block *) ptr; 1559 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1560 /* re-activate request queue */ 1561 block->base->stopped &= ~DASD_STOPPED_PENDING; 1562 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1563 dasd_schedule_block_bh(block); 1564 } 1565 1566 /* 1567 * Setup timeout for a dasd_block in jiffies. 1568 */ 1569 void dasd_block_set_timer(struct dasd_block *block, int expires) 1570 { 1571 if (expires == 0) { 1572 if (timer_pending(&block->timer)) 1573 del_timer(&block->timer); 1574 return; 1575 } 1576 if (timer_pending(&block->timer)) { 1577 if (mod_timer(&block->timer, jiffies + expires)) 1578 return; 1579 } 1580 block->timer.function = dasd_block_timeout; 1581 block->timer.data = (unsigned long) block; 1582 block->timer.expires = jiffies + expires; 1583 add_timer(&block->timer); 1584 } 1585 1586 /* 1587 * Clear timeout for a dasd_block. 1588 */ 1589 void dasd_block_clear_timer(struct dasd_block *block) 1590 { 1591 if (timer_pending(&block->timer)) 1592 del_timer(&block->timer); 1593 } 1594 1595 /* 1596 * posts the buffer_cache about a finalized request 1597 */ 1598 static inline void dasd_end_request(struct request *req, int error) 1599 { 1600 if (__blk_end_request(req, error, blk_rq_bytes(req))) 1601 BUG(); 1602 } 1603 1604 /* 1605 * Process finished error recovery ccw. 1606 */ 1607 static inline void __dasd_block_process_erp(struct dasd_block *block, 1608 struct dasd_ccw_req *cqr) 1609 { 1610 dasd_erp_fn_t erp_fn; 1611 struct dasd_device *device = block->base; 1612 1613 if (cqr->status == DASD_CQR_DONE) 1614 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1615 else 1616 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1617 erp_fn = device->discipline->erp_postaction(cqr); 1618 erp_fn(cqr); 1619 } 1620 1621 /* 1622 * Fetch requests from the block device queue. 1623 */ 1624 static void __dasd_process_request_queue(struct dasd_block *block) 1625 { 1626 struct request_queue *queue; 1627 struct request *req; 1628 struct dasd_ccw_req *cqr; 1629 struct dasd_device *basedev; 1630 unsigned long flags; 1631 queue = block->request_queue; 1632 basedev = block->base; 1633 /* No queue ? Then there is nothing to do. */ 1634 if (queue == NULL) 1635 return; 1636 1637 /* 1638 * We requeue request from the block device queue to the ccw 1639 * queue only in two states. In state DASD_STATE_READY the 1640 * partition detection is done and we need to requeue requests 1641 * for that. State DASD_STATE_ONLINE is normal block device 1642 * operation. 1643 */ 1644 if (basedev->state < DASD_STATE_READY) 1645 return; 1646 /* Now we try to fetch requests from the request queue */ 1647 while (!blk_queue_plugged(queue) && 1648 elv_next_request(queue)) { 1649 1650 req = elv_next_request(queue); 1651 1652 if (basedev->features & DASD_FEATURE_READONLY && 1653 rq_data_dir(req) == WRITE) { 1654 DBF_DEV_EVENT(DBF_ERR, basedev, 1655 "Rejecting write request %p", 1656 req); 1657 blkdev_dequeue_request(req); 1658 dasd_end_request(req, -EIO); 1659 continue; 1660 } 1661 cqr = basedev->discipline->build_cp(basedev, block, req); 1662 if (IS_ERR(cqr)) { 1663 if (PTR_ERR(cqr) == -EBUSY) 1664 break; /* normal end condition */ 1665 if (PTR_ERR(cqr) == -ENOMEM) 1666 break; /* terminate request queue loop */ 1667 if (PTR_ERR(cqr) == -EAGAIN) { 1668 /* 1669 * The current request cannot be build right 1670 * now, we have to try later. If this request 1671 * is the head-of-queue we stop the device 1672 * for 1/2 second. 1673 */ 1674 if (!list_empty(&block->ccw_queue)) 1675 break; 1676 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); 1677 basedev->stopped |= DASD_STOPPED_PENDING; 1678 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); 1679 dasd_block_set_timer(block, HZ/2); 1680 break; 1681 } 1682 DBF_DEV_EVENT(DBF_ERR, basedev, 1683 "CCW creation failed (rc=%ld) " 1684 "on request %p", 1685 PTR_ERR(cqr), req); 1686 blkdev_dequeue_request(req); 1687 dasd_end_request(req, -EIO); 1688 continue; 1689 } 1690 /* 1691 * Note: callback is set to dasd_return_cqr_cb in 1692 * __dasd_block_start_head to cover erp requests as well 1693 */ 1694 cqr->callback_data = (void *) req; 1695 cqr->status = DASD_CQR_FILLED; 1696 blkdev_dequeue_request(req); 1697 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1698 dasd_profile_start(block, cqr, req); 1699 } 1700 } 1701 1702 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 1703 { 1704 struct request *req; 1705 int status; 1706 int error = 0; 1707 1708 req = (struct request *) cqr->callback_data; 1709 dasd_profile_end(cqr->block, cqr, req); 1710 status = cqr->memdev->discipline->free_cp(cqr, req); 1711 if (status <= 0) 1712 error = status ? status : -EIO; 1713 dasd_end_request(req, error); 1714 } 1715 1716 /* 1717 * Process ccw request queue. 1718 */ 1719 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 1720 struct list_head *final_queue) 1721 { 1722 struct list_head *l, *n; 1723 struct dasd_ccw_req *cqr; 1724 dasd_erp_fn_t erp_fn; 1725 unsigned long flags; 1726 struct dasd_device *base = block->base; 1727 1728 restart: 1729 /* Process request with final status. */ 1730 list_for_each_safe(l, n, &block->ccw_queue) { 1731 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1732 if (cqr->status != DASD_CQR_DONE && 1733 cqr->status != DASD_CQR_FAILED && 1734 cqr->status != DASD_CQR_NEED_ERP && 1735 cqr->status != DASD_CQR_TERMINATED) 1736 continue; 1737 1738 if (cqr->status == DASD_CQR_TERMINATED) { 1739 base->discipline->handle_terminated_request(cqr); 1740 goto restart; 1741 } 1742 1743 /* Process requests that may be recovered */ 1744 if (cqr->status == DASD_CQR_NEED_ERP) { 1745 if (cqr->irb.esw.esw0.erw.cons && 1746 test_bit(DASD_CQR_FLAGS_USE_ERP, 1747 &cqr->flags)) { 1748 erp_fn = base->discipline->erp_action(cqr); 1749 erp_fn(cqr); 1750 } 1751 goto restart; 1752 } 1753 1754 /* First of all call extended error reporting. */ 1755 if (dasd_eer_enabled(base) && 1756 cqr->status == DASD_CQR_FAILED) { 1757 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 1758 1759 /* restart request */ 1760 cqr->status = DASD_CQR_FILLED; 1761 cqr->retries = 255; 1762 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1763 base->stopped |= DASD_STOPPED_QUIESCE; 1764 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1765 flags); 1766 goto restart; 1767 } 1768 1769 /* Process finished ERP request. */ 1770 if (cqr->refers) { 1771 __dasd_block_process_erp(block, cqr); 1772 goto restart; 1773 } 1774 1775 /* Rechain finished requests to final queue */ 1776 cqr->endclk = get_clock(); 1777 list_move_tail(&cqr->blocklist, final_queue); 1778 } 1779 } 1780 1781 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 1782 { 1783 dasd_schedule_block_bh(cqr->block); 1784 } 1785 1786 static void __dasd_block_start_head(struct dasd_block *block) 1787 { 1788 struct dasd_ccw_req *cqr; 1789 1790 if (list_empty(&block->ccw_queue)) 1791 return; 1792 /* We allways begin with the first requests on the queue, as some 1793 * of previously started requests have to be enqueued on a 1794 * dasd_device again for error recovery. 1795 */ 1796 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 1797 if (cqr->status != DASD_CQR_FILLED) 1798 continue; 1799 /* Non-temporary stop condition will trigger fail fast */ 1800 if (block->base->stopped & ~DASD_STOPPED_PENDING && 1801 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1802 (!dasd_eer_enabled(block->base))) { 1803 cqr->status = DASD_CQR_FAILED; 1804 dasd_schedule_block_bh(block); 1805 continue; 1806 } 1807 /* Don't try to start requests if device is stopped */ 1808 if (block->base->stopped) 1809 return; 1810 1811 /* just a fail safe check, should not happen */ 1812 if (!cqr->startdev) 1813 cqr->startdev = block->base; 1814 1815 /* make sure that the requests we submit find their way back */ 1816 cqr->callback = dasd_return_cqr_cb; 1817 1818 dasd_add_request_tail(cqr); 1819 } 1820 } 1821 1822 /* 1823 * Central dasd_block layer routine. Takes requests from the generic 1824 * block layer request queue, creates ccw requests, enqueues them on 1825 * a dasd_device and processes ccw requests that have been returned. 1826 */ 1827 static void dasd_block_tasklet(struct dasd_block *block) 1828 { 1829 struct list_head final_queue; 1830 struct list_head *l, *n; 1831 struct dasd_ccw_req *cqr; 1832 1833 atomic_set(&block->tasklet_scheduled, 0); 1834 INIT_LIST_HEAD(&final_queue); 1835 spin_lock(&block->queue_lock); 1836 /* Finish off requests on ccw queue */ 1837 __dasd_process_block_ccw_queue(block, &final_queue); 1838 spin_unlock(&block->queue_lock); 1839 /* Now call the callback function of requests with final status */ 1840 spin_lock_irq(&block->request_queue_lock); 1841 list_for_each_safe(l, n, &final_queue) { 1842 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1843 list_del_init(&cqr->blocklist); 1844 __dasd_cleanup_cqr(cqr); 1845 } 1846 spin_lock(&block->queue_lock); 1847 /* Get new request from the block device request queue */ 1848 __dasd_process_request_queue(block); 1849 /* Now check if the head of the ccw queue needs to be started. */ 1850 __dasd_block_start_head(block); 1851 spin_unlock(&block->queue_lock); 1852 spin_unlock_irq(&block->request_queue_lock); 1853 dasd_put_device(block->base); 1854 } 1855 1856 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 1857 { 1858 wake_up(&dasd_flush_wq); 1859 } 1860 1861 /* 1862 * Go through all request on the dasd_block request queue, cancel them 1863 * on the respective dasd_device, and return them to the generic 1864 * block layer. 1865 */ 1866 static int dasd_flush_block_queue(struct dasd_block *block) 1867 { 1868 struct dasd_ccw_req *cqr, *n; 1869 int rc, i; 1870 struct list_head flush_queue; 1871 1872 INIT_LIST_HEAD(&flush_queue); 1873 spin_lock_bh(&block->queue_lock); 1874 rc = 0; 1875 restart: 1876 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 1877 /* if this request currently owned by a dasd_device cancel it */ 1878 if (cqr->status >= DASD_CQR_QUEUED) 1879 rc = dasd_cancel_req(cqr); 1880 if (rc < 0) 1881 break; 1882 /* Rechain request (including erp chain) so it won't be 1883 * touched by the dasd_block_tasklet anymore. 1884 * Replace the callback so we notice when the request 1885 * is returned from the dasd_device layer. 1886 */ 1887 cqr->callback = _dasd_wake_block_flush_cb; 1888 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 1889 list_move_tail(&cqr->blocklist, &flush_queue); 1890 if (i > 1) 1891 /* moved more than one request - need to restart */ 1892 goto restart; 1893 } 1894 spin_unlock_bh(&block->queue_lock); 1895 /* Now call the callback function of flushed requests */ 1896 restart_cb: 1897 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 1898 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 1899 /* Process finished ERP request. */ 1900 if (cqr->refers) { 1901 __dasd_block_process_erp(block, cqr); 1902 /* restart list_for_xx loop since dasd_process_erp 1903 * might remove multiple elements */ 1904 goto restart_cb; 1905 } 1906 /* call the callback function */ 1907 cqr->endclk = get_clock(); 1908 list_del_init(&cqr->blocklist); 1909 __dasd_cleanup_cqr(cqr); 1910 } 1911 return rc; 1912 } 1913 1914 /* 1915 * Schedules a call to dasd_tasklet over the device tasklet. 1916 */ 1917 void dasd_schedule_block_bh(struct dasd_block *block) 1918 { 1919 /* Protect against rescheduling. */ 1920 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 1921 return; 1922 /* life cycle of block is bound to it's base device */ 1923 dasd_get_device(block->base); 1924 tasklet_hi_schedule(&block->tasklet); 1925 } 1926 1927 1928 /* 1929 * SECTION: external block device operations 1930 * (request queue handling, open, release, etc.) 1931 */ 1932 1933 /* 1934 * Dasd request queue function. Called from ll_rw_blk.c 1935 */ 1936 static void do_dasd_request(struct request_queue *queue) 1937 { 1938 struct dasd_block *block; 1939 1940 block = queue->queuedata; 1941 spin_lock(&block->queue_lock); 1942 /* Get new request from the block device request queue */ 1943 __dasd_process_request_queue(block); 1944 /* Now check if the head of the ccw queue needs to be started. */ 1945 __dasd_block_start_head(block); 1946 spin_unlock(&block->queue_lock); 1947 } 1948 1949 /* 1950 * Allocate and initialize request queue and default I/O scheduler. 1951 */ 1952 static int dasd_alloc_queue(struct dasd_block *block) 1953 { 1954 int rc; 1955 1956 block->request_queue = blk_init_queue(do_dasd_request, 1957 &block->request_queue_lock); 1958 if (block->request_queue == NULL) 1959 return -ENOMEM; 1960 1961 block->request_queue->queuedata = block; 1962 1963 elevator_exit(block->request_queue->elevator); 1964 rc = elevator_init(block->request_queue, "deadline"); 1965 if (rc) { 1966 blk_cleanup_queue(block->request_queue); 1967 return rc; 1968 } 1969 return 0; 1970 } 1971 1972 /* 1973 * Allocate and initialize request queue. 1974 */ 1975 static void dasd_setup_queue(struct dasd_block *block) 1976 { 1977 int max; 1978 1979 blk_queue_hardsect_size(block->request_queue, block->bp_block); 1980 max = block->base->discipline->max_blocks << block->s2b_shift; 1981 blk_queue_max_sectors(block->request_queue, max); 1982 blk_queue_max_phys_segments(block->request_queue, -1L); 1983 blk_queue_max_hw_segments(block->request_queue, -1L); 1984 blk_queue_max_segment_size(block->request_queue, -1L); 1985 blk_queue_segment_boundary(block->request_queue, -1L); 1986 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); 1987 } 1988 1989 /* 1990 * Deactivate and free request queue. 1991 */ 1992 static void dasd_free_queue(struct dasd_block *block) 1993 { 1994 if (block->request_queue) { 1995 blk_cleanup_queue(block->request_queue); 1996 block->request_queue = NULL; 1997 } 1998 } 1999 2000 /* 2001 * Flush request on the request queue. 2002 */ 2003 static void dasd_flush_request_queue(struct dasd_block *block) 2004 { 2005 struct request *req; 2006 2007 if (!block->request_queue) 2008 return; 2009 2010 spin_lock_irq(&block->request_queue_lock); 2011 while ((req = elv_next_request(block->request_queue))) { 2012 blkdev_dequeue_request(req); 2013 dasd_end_request(req, -EIO); 2014 } 2015 spin_unlock_irq(&block->request_queue_lock); 2016 } 2017 2018 static int dasd_open(struct inode *inp, struct file *filp) 2019 { 2020 struct gendisk *disk = inp->i_bdev->bd_disk; 2021 struct dasd_block *block = disk->private_data; 2022 struct dasd_device *base = block->base; 2023 int rc; 2024 2025 atomic_inc(&block->open_count); 2026 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2027 rc = -ENODEV; 2028 goto unlock; 2029 } 2030 2031 if (!try_module_get(base->discipline->owner)) { 2032 rc = -EINVAL; 2033 goto unlock; 2034 } 2035 2036 if (dasd_probeonly) { 2037 DEV_MESSAGE(KERN_INFO, base, "%s", 2038 "No access to device due to probeonly mode"); 2039 rc = -EPERM; 2040 goto out; 2041 } 2042 2043 if (base->state <= DASD_STATE_BASIC) { 2044 DBF_DEV_EVENT(DBF_ERR, base, " %s", 2045 " Cannot open unrecognized device"); 2046 rc = -ENODEV; 2047 goto out; 2048 } 2049 2050 return 0; 2051 2052 out: 2053 module_put(base->discipline->owner); 2054 unlock: 2055 atomic_dec(&block->open_count); 2056 return rc; 2057 } 2058 2059 static int dasd_release(struct inode *inp, struct file *filp) 2060 { 2061 struct gendisk *disk = inp->i_bdev->bd_disk; 2062 struct dasd_block *block = disk->private_data; 2063 2064 atomic_dec(&block->open_count); 2065 module_put(block->base->discipline->owner); 2066 return 0; 2067 } 2068 2069 /* 2070 * Return disk geometry. 2071 */ 2072 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 2073 { 2074 struct dasd_block *block; 2075 struct dasd_device *base; 2076 2077 block = bdev->bd_disk->private_data; 2078 base = block->base; 2079 if (!block) 2080 return -ENODEV; 2081 2082 if (!base->discipline || 2083 !base->discipline->fill_geometry) 2084 return -EINVAL; 2085 2086 base->discipline->fill_geometry(block, geo); 2087 geo->start = get_start_sect(bdev) >> block->s2b_shift; 2088 return 0; 2089 } 2090 2091 struct block_device_operations 2092 dasd_device_operations = { 2093 .owner = THIS_MODULE, 2094 .open = dasd_open, 2095 .release = dasd_release, 2096 .ioctl = dasd_ioctl, 2097 .compat_ioctl = dasd_compat_ioctl, 2098 .getgeo = dasd_getgeo, 2099 }; 2100 2101 /******************************************************************************* 2102 * end of block device operations 2103 */ 2104 2105 static void 2106 dasd_exit(void) 2107 { 2108 #ifdef CONFIG_PROC_FS 2109 dasd_proc_exit(); 2110 #endif 2111 dasd_eer_exit(); 2112 if (dasd_page_cache != NULL) { 2113 kmem_cache_destroy(dasd_page_cache); 2114 dasd_page_cache = NULL; 2115 } 2116 dasd_gendisk_exit(); 2117 dasd_devmap_exit(); 2118 if (dasd_debug_area != NULL) { 2119 debug_unregister(dasd_debug_area); 2120 dasd_debug_area = NULL; 2121 } 2122 } 2123 2124 /* 2125 * SECTION: common functions for ccw_driver use 2126 */ 2127 2128 /* 2129 * Initial attempt at a probe function. this can be simplified once 2130 * the other detection code is gone. 2131 */ 2132 int dasd_generic_probe(struct ccw_device *cdev, 2133 struct dasd_discipline *discipline) 2134 { 2135 int ret; 2136 2137 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 2138 if (ret) { 2139 printk(KERN_WARNING 2140 "dasd_generic_probe: could not set ccw-device options " 2141 "for %s\n", cdev->dev.bus_id); 2142 return ret; 2143 } 2144 ret = dasd_add_sysfs_files(cdev); 2145 if (ret) { 2146 printk(KERN_WARNING 2147 "dasd_generic_probe: could not add sysfs entries " 2148 "for %s\n", cdev->dev.bus_id); 2149 return ret; 2150 } 2151 cdev->handler = &dasd_int_handler; 2152 2153 /* 2154 * Automatically online either all dasd devices (dasd_autodetect) 2155 * or all devices specified with dasd= parameters during 2156 * initial probe. 2157 */ 2158 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 2159 (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0)) 2160 ret = ccw_device_set_online(cdev); 2161 if (ret) 2162 printk(KERN_WARNING 2163 "dasd_generic_probe: could not initially " 2164 "online ccw-device %s; return code: %d\n", 2165 cdev->dev.bus_id, ret); 2166 return 0; 2167 } 2168 2169 /* 2170 * This will one day be called from a global not_oper handler. 2171 * It is also used by driver_unregister during module unload. 2172 */ 2173 void dasd_generic_remove(struct ccw_device *cdev) 2174 { 2175 struct dasd_device *device; 2176 struct dasd_block *block; 2177 2178 cdev->handler = NULL; 2179 2180 dasd_remove_sysfs_files(cdev); 2181 device = dasd_device_from_cdev(cdev); 2182 if (IS_ERR(device)) 2183 return; 2184 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2185 /* Already doing offline processing */ 2186 dasd_put_device(device); 2187 return; 2188 } 2189 /* 2190 * This device is removed unconditionally. Set offline 2191 * flag to prevent dasd_open from opening it while it is 2192 * no quite down yet. 2193 */ 2194 dasd_set_target_state(device, DASD_STATE_NEW); 2195 /* dasd_delete_device destroys the device reference. */ 2196 block = device->block; 2197 device->block = NULL; 2198 dasd_delete_device(device); 2199 /* 2200 * life cycle of block is bound to device, so delete it after 2201 * device was safely removed 2202 */ 2203 if (block) 2204 dasd_free_block(block); 2205 } 2206 2207 /* 2208 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2209 * the device is detected for the first time and is supposed to be used 2210 * or the user has started activation through sysfs. 2211 */ 2212 int dasd_generic_set_online(struct ccw_device *cdev, 2213 struct dasd_discipline *base_discipline) 2214 { 2215 struct dasd_discipline *discipline; 2216 struct dasd_device *device; 2217 int rc; 2218 2219 /* first online clears initial online feature flag */ 2220 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2221 device = dasd_create_device(cdev); 2222 if (IS_ERR(device)) 2223 return PTR_ERR(device); 2224 2225 discipline = base_discipline; 2226 if (device->features & DASD_FEATURE_USEDIAG) { 2227 if (!dasd_diag_discipline_pointer) { 2228 printk (KERN_WARNING 2229 "dasd_generic couldn't online device %s " 2230 "- discipline DIAG not available\n", 2231 cdev->dev.bus_id); 2232 dasd_delete_device(device); 2233 return -ENODEV; 2234 } 2235 discipline = dasd_diag_discipline_pointer; 2236 } 2237 if (!try_module_get(base_discipline->owner)) { 2238 dasd_delete_device(device); 2239 return -EINVAL; 2240 } 2241 if (!try_module_get(discipline->owner)) { 2242 module_put(base_discipline->owner); 2243 dasd_delete_device(device); 2244 return -EINVAL; 2245 } 2246 device->base_discipline = base_discipline; 2247 device->discipline = discipline; 2248 2249 /* check_device will allocate block device if necessary */ 2250 rc = discipline->check_device(device); 2251 if (rc) { 2252 printk (KERN_WARNING 2253 "dasd_generic couldn't online device %s " 2254 "with discipline %s rc=%i\n", 2255 cdev->dev.bus_id, discipline->name, rc); 2256 module_put(discipline->owner); 2257 module_put(base_discipline->owner); 2258 dasd_delete_device(device); 2259 return rc; 2260 } 2261 2262 dasd_set_target_state(device, DASD_STATE_ONLINE); 2263 if (device->state <= DASD_STATE_KNOWN) { 2264 printk (KERN_WARNING 2265 "dasd_generic discipline not found for %s\n", 2266 cdev->dev.bus_id); 2267 rc = -ENODEV; 2268 dasd_set_target_state(device, DASD_STATE_NEW); 2269 if (device->block) 2270 dasd_free_block(device->block); 2271 dasd_delete_device(device); 2272 } else 2273 pr_debug("dasd_generic device %s found\n", 2274 cdev->dev.bus_id); 2275 2276 /* FIXME: we have to wait for the root device but we don't want 2277 * to wait for each single device but for all at once. */ 2278 wait_event(dasd_init_waitq, _wait_for_device(device)); 2279 2280 dasd_put_device(device); 2281 2282 return rc; 2283 } 2284 2285 int dasd_generic_set_offline(struct ccw_device *cdev) 2286 { 2287 struct dasd_device *device; 2288 struct dasd_block *block; 2289 int max_count, open_count; 2290 2291 device = dasd_device_from_cdev(cdev); 2292 if (IS_ERR(device)) 2293 return PTR_ERR(device); 2294 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2295 /* Already doing offline processing */ 2296 dasd_put_device(device); 2297 return 0; 2298 } 2299 /* 2300 * We must make sure that this device is currently not in use. 2301 * The open_count is increased for every opener, that includes 2302 * the blkdev_get in dasd_scan_partitions. We are only interested 2303 * in the other openers. 2304 */ 2305 if (device->block) { 2306 struct dasd_block *block = device->block; 2307 max_count = block->bdev ? 0 : -1; 2308 open_count = (int) atomic_read(&block->open_count); 2309 if (open_count > max_count) { 2310 if (open_count > 0) 2311 printk(KERN_WARNING "Can't offline dasd " 2312 "device with open count = %i.\n", 2313 open_count); 2314 else 2315 printk(KERN_WARNING "%s", 2316 "Can't offline dasd device due " 2317 "to internal use\n"); 2318 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2319 dasd_put_device(device); 2320 return -EBUSY; 2321 } 2322 } 2323 dasd_set_target_state(device, DASD_STATE_NEW); 2324 /* dasd_delete_device destroys the device reference. */ 2325 block = device->block; 2326 device->block = NULL; 2327 dasd_delete_device(device); 2328 /* 2329 * life cycle of block is bound to device, so delete it after 2330 * device was safely removed 2331 */ 2332 if (block) 2333 dasd_free_block(block); 2334 return 0; 2335 } 2336 2337 int dasd_generic_notify(struct ccw_device *cdev, int event) 2338 { 2339 struct dasd_device *device; 2340 struct dasd_ccw_req *cqr; 2341 unsigned long flags; 2342 int ret; 2343 2344 device = dasd_device_from_cdev(cdev); 2345 if (IS_ERR(device)) 2346 return 0; 2347 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 2348 ret = 0; 2349 switch (event) { 2350 case CIO_GONE: 2351 case CIO_NO_PATH: 2352 /* First of all call extended error reporting. */ 2353 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2354 2355 if (device->state < DASD_STATE_BASIC) 2356 break; 2357 /* Device is active. We want to keep it. */ 2358 list_for_each_entry(cqr, &device->ccw_queue, devlist) 2359 if (cqr->status == DASD_CQR_IN_IO) { 2360 cqr->status = DASD_CQR_QUEUED; 2361 cqr->retries++; 2362 } 2363 device->stopped |= DASD_STOPPED_DC_WAIT; 2364 dasd_device_clear_timer(device); 2365 dasd_schedule_device_bh(device); 2366 ret = 1; 2367 break; 2368 case CIO_OPER: 2369 /* FIXME: add a sanity check. */ 2370 device->stopped &= ~DASD_STOPPED_DC_WAIT; 2371 dasd_schedule_device_bh(device); 2372 if (device->block) 2373 dasd_schedule_block_bh(device->block); 2374 ret = 1; 2375 break; 2376 } 2377 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2378 dasd_put_device(device); 2379 return ret; 2380 } 2381 2382 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2383 void *rdc_buffer, 2384 int rdc_buffer_size, 2385 char *magic) 2386 { 2387 struct dasd_ccw_req *cqr; 2388 struct ccw1 *ccw; 2389 2390 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2391 2392 if (IS_ERR(cqr)) { 2393 DEV_MESSAGE(KERN_WARNING, device, "%s", 2394 "Could not allocate RDC request"); 2395 return cqr; 2396 } 2397 2398 ccw = cqr->cpaddr; 2399 ccw->cmd_code = CCW_CMD_RDC; 2400 ccw->cda = (__u32)(addr_t)rdc_buffer; 2401 ccw->count = rdc_buffer_size; 2402 2403 cqr->startdev = device; 2404 cqr->memdev = device; 2405 cqr->expires = 10*HZ; 2406 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2407 cqr->retries = 2; 2408 cqr->buildclk = get_clock(); 2409 cqr->status = DASD_CQR_FILLED; 2410 return cqr; 2411 } 2412 2413 2414 int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2415 void **rdc_buffer, int rdc_buffer_size) 2416 { 2417 int ret; 2418 struct dasd_ccw_req *cqr; 2419 2420 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, 2421 magic); 2422 if (IS_ERR(cqr)) 2423 return PTR_ERR(cqr); 2424 2425 ret = dasd_sleep_on(cqr); 2426 dasd_sfree_request(cqr, cqr->memdev); 2427 return ret; 2428 } 2429 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2430 2431 static int __init dasd_init(void) 2432 { 2433 int rc; 2434 2435 init_waitqueue_head(&dasd_init_waitq); 2436 init_waitqueue_head(&dasd_flush_wq); 2437 2438 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2439 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 2440 if (dasd_debug_area == NULL) { 2441 rc = -ENOMEM; 2442 goto failed; 2443 } 2444 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2445 debug_set_level(dasd_debug_area, DBF_WARNING); 2446 2447 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2448 2449 dasd_diag_discipline_pointer = NULL; 2450 2451 rc = dasd_devmap_init(); 2452 if (rc) 2453 goto failed; 2454 rc = dasd_gendisk_init(); 2455 if (rc) 2456 goto failed; 2457 rc = dasd_parse(); 2458 if (rc) 2459 goto failed; 2460 rc = dasd_eer_init(); 2461 if (rc) 2462 goto failed; 2463 #ifdef CONFIG_PROC_FS 2464 rc = dasd_proc_init(); 2465 if (rc) 2466 goto failed; 2467 #endif 2468 2469 return 0; 2470 failed: 2471 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors"); 2472 dasd_exit(); 2473 return rc; 2474 } 2475 2476 module_init(dasd_init); 2477 module_exit(dasd_exit); 2478 2479 EXPORT_SYMBOL(dasd_debug_area); 2480 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2481 2482 EXPORT_SYMBOL(dasd_add_request_head); 2483 EXPORT_SYMBOL(dasd_add_request_tail); 2484 EXPORT_SYMBOL(dasd_cancel_req); 2485 EXPORT_SYMBOL(dasd_device_clear_timer); 2486 EXPORT_SYMBOL(dasd_block_clear_timer); 2487 EXPORT_SYMBOL(dasd_enable_device); 2488 EXPORT_SYMBOL(dasd_int_handler); 2489 EXPORT_SYMBOL(dasd_kfree_request); 2490 EXPORT_SYMBOL(dasd_kick_device); 2491 EXPORT_SYMBOL(dasd_kmalloc_request); 2492 EXPORT_SYMBOL(dasd_schedule_device_bh); 2493 EXPORT_SYMBOL(dasd_schedule_block_bh); 2494 EXPORT_SYMBOL(dasd_set_target_state); 2495 EXPORT_SYMBOL(dasd_device_set_timer); 2496 EXPORT_SYMBOL(dasd_block_set_timer); 2497 EXPORT_SYMBOL(dasd_sfree_request); 2498 EXPORT_SYMBOL(dasd_sleep_on); 2499 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2500 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2501 EXPORT_SYMBOL(dasd_smalloc_request); 2502 EXPORT_SYMBOL(dasd_start_IO); 2503 EXPORT_SYMBOL(dasd_term_IO); 2504 2505 EXPORT_SYMBOL_GPL(dasd_generic_probe); 2506 EXPORT_SYMBOL_GPL(dasd_generic_remove); 2507 EXPORT_SYMBOL_GPL(dasd_generic_notify); 2508 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2509 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2510 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 2511 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2512 EXPORT_SYMBOL_GPL(dasd_alloc_block); 2513 EXPORT_SYMBOL_GPL(dasd_free_block); 2514