1 /* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 9 * 10 */ 11 12 #include <linux/kmod.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/ctype.h> 16 #include <linux/major.h> 17 #include <linux/slab.h> 18 #include <linux/buffer_head.h> 19 #include <linux/hdreg.h> 20 21 #include <asm/ccwdev.h> 22 #include <asm/ebcdic.h> 23 #include <asm/idals.h> 24 #include <asm/todclk.h> 25 26 /* This is ugly... */ 27 #define PRINTK_HEADER "dasd:" 28 29 #include "dasd_int.h" 30 /* 31 * SECTION: Constant definitions to be used within this file 32 */ 33 #define DASD_CHANQ_MAX_SIZE 4 34 35 /* 36 * SECTION: exported variables of dasd.c 37 */ 38 debug_info_t *dasd_debug_area; 39 struct dasd_discipline *dasd_diag_discipline_pointer; 40 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 41 42 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 43 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 44 " Copyright 2000 IBM Corporation"); 45 MODULE_SUPPORTED_DEVICE("dasd"); 46 MODULE_LICENSE("GPL"); 47 48 /* 49 * SECTION: prototypes for static functions of dasd.c 50 */ 51 static int dasd_alloc_queue(struct dasd_block *); 52 static void dasd_setup_queue(struct dasd_block *); 53 static void dasd_free_queue(struct dasd_block *); 54 static void dasd_flush_request_queue(struct dasd_block *); 55 static int dasd_flush_block_queue(struct dasd_block *); 56 static void dasd_device_tasklet(struct dasd_device *); 57 static void dasd_block_tasklet(struct dasd_block *); 58 static void do_kick_device(struct work_struct *); 59 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 60 61 /* 62 * SECTION: Operations on the device structure. 63 */ 64 static wait_queue_head_t dasd_init_waitq; 65 static wait_queue_head_t dasd_flush_wq; 66 static wait_queue_head_t generic_waitq; 67 68 /* 69 * Allocate memory for a new device structure. 70 */ 71 struct dasd_device *dasd_alloc_device(void) 72 { 73 struct dasd_device *device; 74 75 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 76 if (!device) 77 return ERR_PTR(-ENOMEM); 78 79 /* Get two pages for normal block device operations. */ 80 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 81 if (!device->ccw_mem) { 82 kfree(device); 83 return ERR_PTR(-ENOMEM); 84 } 85 /* Get one page for error recovery. */ 86 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 87 if (!device->erp_mem) { 88 free_pages((unsigned long) device->ccw_mem, 1); 89 kfree(device); 90 return ERR_PTR(-ENOMEM); 91 } 92 93 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 94 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 95 spin_lock_init(&device->mem_lock); 96 atomic_set(&device->tasklet_scheduled, 0); 97 tasklet_init(&device->tasklet, 98 (void (*)(unsigned long)) dasd_device_tasklet, 99 (unsigned long) device); 100 INIT_LIST_HEAD(&device->ccw_queue); 101 init_timer(&device->timer); 102 INIT_WORK(&device->kick_work, do_kick_device); 103 device->state = DASD_STATE_NEW; 104 device->target = DASD_STATE_NEW; 105 106 return device; 107 } 108 109 /* 110 * Free memory of a device structure. 111 */ 112 void dasd_free_device(struct dasd_device *device) 113 { 114 kfree(device->private); 115 free_page((unsigned long) device->erp_mem); 116 free_pages((unsigned long) device->ccw_mem, 1); 117 kfree(device); 118 } 119 120 /* 121 * Allocate memory for a new device structure. 122 */ 123 struct dasd_block *dasd_alloc_block(void) 124 { 125 struct dasd_block *block; 126 127 block = kzalloc(sizeof(*block), GFP_ATOMIC); 128 if (!block) 129 return ERR_PTR(-ENOMEM); 130 /* open_count = 0 means device online but not in use */ 131 atomic_set(&block->open_count, -1); 132 133 spin_lock_init(&block->request_queue_lock); 134 atomic_set(&block->tasklet_scheduled, 0); 135 tasklet_init(&block->tasklet, 136 (void (*)(unsigned long)) dasd_block_tasklet, 137 (unsigned long) block); 138 INIT_LIST_HEAD(&block->ccw_queue); 139 spin_lock_init(&block->queue_lock); 140 init_timer(&block->timer); 141 142 return block; 143 } 144 145 /* 146 * Free memory of a device structure. 147 */ 148 void dasd_free_block(struct dasd_block *block) 149 { 150 kfree(block); 151 } 152 153 /* 154 * Make a new device known to the system. 155 */ 156 static int dasd_state_new_to_known(struct dasd_device *device) 157 { 158 int rc; 159 160 /* 161 * As long as the device is not in state DASD_STATE_NEW we want to 162 * keep the reference count > 0. 163 */ 164 dasd_get_device(device); 165 166 if (device->block) { 167 rc = dasd_alloc_queue(device->block); 168 if (rc) { 169 dasd_put_device(device); 170 return rc; 171 } 172 } 173 device->state = DASD_STATE_KNOWN; 174 return 0; 175 } 176 177 /* 178 * Let the system forget about a device. 179 */ 180 static int dasd_state_known_to_new(struct dasd_device *device) 181 { 182 /* Disable extended error reporting for this device. */ 183 dasd_eer_disable(device); 184 /* Forget the discipline information. */ 185 if (device->discipline) { 186 if (device->discipline->uncheck_device) 187 device->discipline->uncheck_device(device); 188 module_put(device->discipline->owner); 189 } 190 device->discipline = NULL; 191 if (device->base_discipline) 192 module_put(device->base_discipline->owner); 193 device->base_discipline = NULL; 194 device->state = DASD_STATE_NEW; 195 196 if (device->block) 197 dasd_free_queue(device->block); 198 199 /* Give up reference we took in dasd_state_new_to_known. */ 200 dasd_put_device(device); 201 return 0; 202 } 203 204 /* 205 * Request the irq line for the device. 206 */ 207 static int dasd_state_known_to_basic(struct dasd_device *device) 208 { 209 int rc; 210 211 /* Allocate and register gendisk structure. */ 212 if (device->block) { 213 rc = dasd_gendisk_alloc(device->block); 214 if (rc) 215 return rc; 216 } 217 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 218 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 1, 219 8 * sizeof(long)); 220 debug_register_view(device->debug_area, &debug_sprintf_view); 221 debug_set_level(device->debug_area, DBF_WARNING); 222 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 223 224 device->state = DASD_STATE_BASIC; 225 return 0; 226 } 227 228 /* 229 * Release the irq line for the device. Terminate any running i/o. 230 */ 231 static int dasd_state_basic_to_known(struct dasd_device *device) 232 { 233 int rc; 234 if (device->block) { 235 dasd_gendisk_free(device->block); 236 dasd_block_clear_timer(device->block); 237 } 238 rc = dasd_flush_device_queue(device); 239 if (rc) 240 return rc; 241 dasd_device_clear_timer(device); 242 243 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 244 if (device->debug_area != NULL) { 245 debug_unregister(device->debug_area); 246 device->debug_area = NULL; 247 } 248 device->state = DASD_STATE_KNOWN; 249 return 0; 250 } 251 252 /* 253 * Do the initial analysis. The do_analysis function may return 254 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 255 * until the discipline decides to continue the startup sequence 256 * by calling the function dasd_change_state. The eckd disciplines 257 * uses this to start a ccw that detects the format. The completion 258 * interrupt for this detection ccw uses the kernel event daemon to 259 * trigger the call to dasd_change_state. All this is done in the 260 * discipline code, see dasd_eckd.c. 261 * After the analysis ccw is done (do_analysis returned 0) the block 262 * device is setup. 263 * In case the analysis returns an error, the device setup is stopped 264 * (a fake disk was already added to allow formatting). 265 */ 266 static int dasd_state_basic_to_ready(struct dasd_device *device) 267 { 268 int rc; 269 struct dasd_block *block; 270 271 rc = 0; 272 block = device->block; 273 /* make disk known with correct capacity */ 274 if (block) { 275 if (block->base->discipline->do_analysis != NULL) 276 rc = block->base->discipline->do_analysis(block); 277 if (rc) { 278 if (rc != -EAGAIN) 279 device->state = DASD_STATE_UNFMT; 280 return rc; 281 } 282 dasd_setup_queue(block); 283 set_capacity(block->gdp, 284 block->blocks << block->s2b_shift); 285 device->state = DASD_STATE_READY; 286 rc = dasd_scan_partitions(block); 287 if (rc) 288 device->state = DASD_STATE_BASIC; 289 } else { 290 device->state = DASD_STATE_READY; 291 } 292 return rc; 293 } 294 295 /* 296 * Remove device from block device layer. Destroy dirty buffers. 297 * Forget format information. Check if the target level is basic 298 * and if it is create fake disk for formatting. 299 */ 300 static int dasd_state_ready_to_basic(struct dasd_device *device) 301 { 302 int rc; 303 304 device->state = DASD_STATE_BASIC; 305 if (device->block) { 306 struct dasd_block *block = device->block; 307 rc = dasd_flush_block_queue(block); 308 if (rc) { 309 device->state = DASD_STATE_READY; 310 return rc; 311 } 312 dasd_destroy_partitions(block); 313 dasd_flush_request_queue(block); 314 block->blocks = 0; 315 block->bp_block = 0; 316 block->s2b_shift = 0; 317 } 318 return 0; 319 } 320 321 /* 322 * Back to basic. 323 */ 324 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 325 { 326 device->state = DASD_STATE_BASIC; 327 return 0; 328 } 329 330 /* 331 * Make the device online and schedule the bottom half to start 332 * the requeueing of requests from the linux request queue to the 333 * ccw queue. 334 */ 335 static int 336 dasd_state_ready_to_online(struct dasd_device * device) 337 { 338 int rc; 339 340 if (device->discipline->ready_to_online) { 341 rc = device->discipline->ready_to_online(device); 342 if (rc) 343 return rc; 344 } 345 device->state = DASD_STATE_ONLINE; 346 if (device->block) 347 dasd_schedule_block_bh(device->block); 348 return 0; 349 } 350 351 /* 352 * Stop the requeueing of requests again. 353 */ 354 static int dasd_state_online_to_ready(struct dasd_device *device) 355 { 356 int rc; 357 358 if (device->discipline->online_to_ready) { 359 rc = device->discipline->online_to_ready(device); 360 if (rc) 361 return rc; 362 } 363 device->state = DASD_STATE_READY; 364 return 0; 365 } 366 367 /* 368 * Device startup state changes. 369 */ 370 static int dasd_increase_state(struct dasd_device *device) 371 { 372 int rc; 373 374 rc = 0; 375 if (device->state == DASD_STATE_NEW && 376 device->target >= DASD_STATE_KNOWN) 377 rc = dasd_state_new_to_known(device); 378 379 if (!rc && 380 device->state == DASD_STATE_KNOWN && 381 device->target >= DASD_STATE_BASIC) 382 rc = dasd_state_known_to_basic(device); 383 384 if (!rc && 385 device->state == DASD_STATE_BASIC && 386 device->target >= DASD_STATE_READY) 387 rc = dasd_state_basic_to_ready(device); 388 389 if (!rc && 390 device->state == DASD_STATE_UNFMT && 391 device->target > DASD_STATE_UNFMT) 392 rc = -EPERM; 393 394 if (!rc && 395 device->state == DASD_STATE_READY && 396 device->target >= DASD_STATE_ONLINE) 397 rc = dasd_state_ready_to_online(device); 398 399 return rc; 400 } 401 402 /* 403 * Device shutdown state changes. 404 */ 405 static int dasd_decrease_state(struct dasd_device *device) 406 { 407 int rc; 408 409 rc = 0; 410 if (device->state == DASD_STATE_ONLINE && 411 device->target <= DASD_STATE_READY) 412 rc = dasd_state_online_to_ready(device); 413 414 if (!rc && 415 device->state == DASD_STATE_READY && 416 device->target <= DASD_STATE_BASIC) 417 rc = dasd_state_ready_to_basic(device); 418 419 if (!rc && 420 device->state == DASD_STATE_UNFMT && 421 device->target <= DASD_STATE_BASIC) 422 rc = dasd_state_unfmt_to_basic(device); 423 424 if (!rc && 425 device->state == DASD_STATE_BASIC && 426 device->target <= DASD_STATE_KNOWN) 427 rc = dasd_state_basic_to_known(device); 428 429 if (!rc && 430 device->state == DASD_STATE_KNOWN && 431 device->target <= DASD_STATE_NEW) 432 rc = dasd_state_known_to_new(device); 433 434 return rc; 435 } 436 437 /* 438 * This is the main startup/shutdown routine. 439 */ 440 static void dasd_change_state(struct dasd_device *device) 441 { 442 int rc; 443 444 if (device->state == device->target) 445 /* Already where we want to go today... */ 446 return; 447 if (device->state < device->target) 448 rc = dasd_increase_state(device); 449 else 450 rc = dasd_decrease_state(device); 451 if (rc && rc != -EAGAIN) 452 device->target = device->state; 453 454 if (device->state == device->target) 455 wake_up(&dasd_init_waitq); 456 457 /* let user-space know that the device status changed */ 458 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 459 } 460 461 /* 462 * Kick starter for devices that did not complete the startup/shutdown 463 * procedure or were sleeping because of a pending state. 464 * dasd_kick_device will schedule a call do do_kick_device to the kernel 465 * event daemon. 466 */ 467 static void do_kick_device(struct work_struct *work) 468 { 469 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 470 dasd_change_state(device); 471 dasd_schedule_device_bh(device); 472 dasd_put_device(device); 473 } 474 475 void dasd_kick_device(struct dasd_device *device) 476 { 477 dasd_get_device(device); 478 /* queue call to dasd_kick_device to the kernel event daemon. */ 479 schedule_work(&device->kick_work); 480 } 481 482 /* 483 * Set the target state for a device and starts the state change. 484 */ 485 void dasd_set_target_state(struct dasd_device *device, int target) 486 { 487 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 488 if (dasd_probeonly && target > DASD_STATE_READY) 489 target = DASD_STATE_READY; 490 if (device->target != target) { 491 if (device->state == target) 492 wake_up(&dasd_init_waitq); 493 device->target = target; 494 } 495 if (device->state != device->target) 496 dasd_change_state(device); 497 } 498 499 /* 500 * Enable devices with device numbers in [from..to]. 501 */ 502 static inline int _wait_for_device(struct dasd_device *device) 503 { 504 return (device->state == device->target); 505 } 506 507 void dasd_enable_device(struct dasd_device *device) 508 { 509 dasd_set_target_state(device, DASD_STATE_ONLINE); 510 if (device->state <= DASD_STATE_KNOWN) 511 /* No discipline for device found. */ 512 dasd_set_target_state(device, DASD_STATE_NEW); 513 /* Now wait for the devices to come up. */ 514 wait_event(dasd_init_waitq, _wait_for_device(device)); 515 } 516 517 /* 518 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 519 */ 520 #ifdef CONFIG_DASD_PROFILE 521 522 struct dasd_profile_info_t dasd_global_profile; 523 unsigned int dasd_profile_level = DASD_PROFILE_OFF; 524 525 /* 526 * Increments counter in global and local profiling structures. 527 */ 528 #define dasd_profile_counter(value, counter, block) \ 529 { \ 530 int index; \ 531 for (index = 0; index < 31 && value >> (2+index); index++); \ 532 dasd_global_profile.counter[index]++; \ 533 block->profile.counter[index]++; \ 534 } 535 536 /* 537 * Add profiling information for cqr before execution. 538 */ 539 static void dasd_profile_start(struct dasd_block *block, 540 struct dasd_ccw_req *cqr, 541 struct request *req) 542 { 543 struct list_head *l; 544 unsigned int counter; 545 546 if (dasd_profile_level != DASD_PROFILE_ON) 547 return; 548 549 /* count the length of the chanq for statistics */ 550 counter = 0; 551 list_for_each(l, &block->ccw_queue) 552 if (++counter >= 31) 553 break; 554 dasd_global_profile.dasd_io_nr_req[counter]++; 555 block->profile.dasd_io_nr_req[counter]++; 556 } 557 558 /* 559 * Add profiling information for cqr after execution. 560 */ 561 static void dasd_profile_end(struct dasd_block *block, 562 struct dasd_ccw_req *cqr, 563 struct request *req) 564 { 565 long strtime, irqtime, endtime, tottime; /* in microseconds */ 566 long tottimeps, sectors; 567 568 if (dasd_profile_level != DASD_PROFILE_ON) 569 return; 570 571 sectors = req->nr_sectors; 572 if (!cqr->buildclk || !cqr->startclk || 573 !cqr->stopclk || !cqr->endclk || 574 !sectors) 575 return; 576 577 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 578 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 579 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 580 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 581 tottimeps = tottime / sectors; 582 583 if (!dasd_global_profile.dasd_io_reqs) 584 memset(&dasd_global_profile, 0, 585 sizeof(struct dasd_profile_info_t)); 586 dasd_global_profile.dasd_io_reqs++; 587 dasd_global_profile.dasd_io_sects += sectors; 588 589 if (!block->profile.dasd_io_reqs) 590 memset(&block->profile, 0, 591 sizeof(struct dasd_profile_info_t)); 592 block->profile.dasd_io_reqs++; 593 block->profile.dasd_io_sects += sectors; 594 595 dasd_profile_counter(sectors, dasd_io_secs, block); 596 dasd_profile_counter(tottime, dasd_io_times, block); 597 dasd_profile_counter(tottimeps, dasd_io_timps, block); 598 dasd_profile_counter(strtime, dasd_io_time1, block); 599 dasd_profile_counter(irqtime, dasd_io_time2, block); 600 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); 601 dasd_profile_counter(endtime, dasd_io_time3, block); 602 } 603 #else 604 #define dasd_profile_start(block, cqr, req) do {} while (0) 605 #define dasd_profile_end(block, cqr, req) do {} while (0) 606 #endif /* CONFIG_DASD_PROFILE */ 607 608 /* 609 * Allocate memory for a channel program with 'cplength' channel 610 * command words and 'datasize' additional space. There are two 611 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 612 * memory and 2) dasd_smalloc_request uses the static ccw memory 613 * that gets allocated for each device. 614 */ 615 struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, 616 int datasize, 617 struct dasd_device *device) 618 { 619 struct dasd_ccw_req *cqr; 620 621 /* Sanity checks */ 622 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 623 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 624 625 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 626 if (cqr == NULL) 627 return ERR_PTR(-ENOMEM); 628 cqr->cpaddr = NULL; 629 if (cplength > 0) { 630 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 631 GFP_ATOMIC | GFP_DMA); 632 if (cqr->cpaddr == NULL) { 633 kfree(cqr); 634 return ERR_PTR(-ENOMEM); 635 } 636 } 637 cqr->data = NULL; 638 if (datasize > 0) { 639 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 640 if (cqr->data == NULL) { 641 kfree(cqr->cpaddr); 642 kfree(cqr); 643 return ERR_PTR(-ENOMEM); 644 } 645 } 646 strncpy((char *) &cqr->magic, magic, 4); 647 ASCEBC((char *) &cqr->magic, 4); 648 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 649 dasd_get_device(device); 650 return cqr; 651 } 652 653 struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, 654 int datasize, 655 struct dasd_device *device) 656 { 657 unsigned long flags; 658 struct dasd_ccw_req *cqr; 659 char *data; 660 int size; 661 662 /* Sanity checks */ 663 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 664 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 665 666 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 667 if (cplength > 0) 668 size += cplength * sizeof(struct ccw1); 669 if (datasize > 0) 670 size += datasize; 671 spin_lock_irqsave(&device->mem_lock, flags); 672 cqr = (struct dasd_ccw_req *) 673 dasd_alloc_chunk(&device->ccw_chunks, size); 674 spin_unlock_irqrestore(&device->mem_lock, flags); 675 if (cqr == NULL) 676 return ERR_PTR(-ENOMEM); 677 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 678 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 679 cqr->cpaddr = NULL; 680 if (cplength > 0) { 681 cqr->cpaddr = (struct ccw1 *) data; 682 data += cplength*sizeof(struct ccw1); 683 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 684 } 685 cqr->data = NULL; 686 if (datasize > 0) { 687 cqr->data = data; 688 memset(cqr->data, 0, datasize); 689 } 690 strncpy((char *) &cqr->magic, magic, 4); 691 ASCEBC((char *) &cqr->magic, 4); 692 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 693 dasd_get_device(device); 694 return cqr; 695 } 696 697 /* 698 * Free memory of a channel program. This function needs to free all the 699 * idal lists that might have been created by dasd_set_cda and the 700 * struct dasd_ccw_req itself. 701 */ 702 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 703 { 704 #ifdef CONFIG_64BIT 705 struct ccw1 *ccw; 706 707 /* Clear any idals used for the request. */ 708 ccw = cqr->cpaddr; 709 do { 710 clear_normalized_cda(ccw); 711 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 712 #endif 713 kfree(cqr->cpaddr); 714 kfree(cqr->data); 715 kfree(cqr); 716 dasd_put_device(device); 717 } 718 719 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 720 { 721 unsigned long flags; 722 723 spin_lock_irqsave(&device->mem_lock, flags); 724 dasd_free_chunk(&device->ccw_chunks, cqr); 725 spin_unlock_irqrestore(&device->mem_lock, flags); 726 dasd_put_device(device); 727 } 728 729 /* 730 * Check discipline magic in cqr. 731 */ 732 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 733 { 734 struct dasd_device *device; 735 736 if (cqr == NULL) 737 return -EINVAL; 738 device = cqr->startdev; 739 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 740 DEV_MESSAGE(KERN_WARNING, device, 741 " dasd_ccw_req 0x%08x magic doesn't match" 742 " discipline 0x%08x", 743 cqr->magic, 744 *(unsigned int *) device->discipline->name); 745 return -EINVAL; 746 } 747 return 0; 748 } 749 750 /* 751 * Terminate the current i/o and set the request to clear_pending. 752 * Timer keeps device runnig. 753 * ccw_device_clear can fail if the i/o subsystem 754 * is in a bad mood. 755 */ 756 int dasd_term_IO(struct dasd_ccw_req *cqr) 757 { 758 struct dasd_device *device; 759 int retries, rc; 760 761 /* Check the cqr */ 762 rc = dasd_check_cqr(cqr); 763 if (rc) 764 return rc; 765 retries = 0; 766 device = (struct dasd_device *) cqr->startdev; 767 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 768 rc = ccw_device_clear(device->cdev, (long) cqr); 769 switch (rc) { 770 case 0: /* termination successful */ 771 cqr->retries--; 772 cqr->status = DASD_CQR_CLEAR_PENDING; 773 cqr->stopclk = get_clock(); 774 cqr->starttime = 0; 775 DBF_DEV_EVENT(DBF_DEBUG, device, 776 "terminate cqr %p successful", 777 cqr); 778 break; 779 case -ENODEV: 780 DBF_DEV_EVENT(DBF_ERR, device, "%s", 781 "device gone, retry"); 782 break; 783 case -EIO: 784 DBF_DEV_EVENT(DBF_ERR, device, "%s", 785 "I/O error, retry"); 786 break; 787 case -EINVAL: 788 case -EBUSY: 789 DBF_DEV_EVENT(DBF_ERR, device, "%s", 790 "device busy, retry later"); 791 break; 792 default: 793 DEV_MESSAGE(KERN_ERR, device, 794 "line %d unknown RC=%d, please " 795 "report to linux390@de.ibm.com", 796 __LINE__, rc); 797 BUG(); 798 break; 799 } 800 retries++; 801 } 802 dasd_schedule_device_bh(device); 803 return rc; 804 } 805 806 /* 807 * Start the i/o. This start_IO can fail if the channel is really busy. 808 * In that case set up a timer to start the request later. 809 */ 810 int dasd_start_IO(struct dasd_ccw_req *cqr) 811 { 812 struct dasd_device *device; 813 int rc; 814 815 /* Check the cqr */ 816 rc = dasd_check_cqr(cqr); 817 if (rc) 818 return rc; 819 device = (struct dasd_device *) cqr->startdev; 820 if (cqr->retries < 0) { 821 DEV_MESSAGE(KERN_DEBUG, device, 822 "start_IO: request %p (%02x/%i) - no retry left.", 823 cqr, cqr->status, cqr->retries); 824 cqr->status = DASD_CQR_ERROR; 825 return -EIO; 826 } 827 cqr->startclk = get_clock(); 828 cqr->starttime = jiffies; 829 cqr->retries--; 830 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr, 831 cqr->lpm, 0); 832 switch (rc) { 833 case 0: 834 cqr->status = DASD_CQR_IN_IO; 835 DBF_DEV_EVENT(DBF_DEBUG, device, 836 "start_IO: request %p started successful", 837 cqr); 838 break; 839 case -EBUSY: 840 DBF_DEV_EVENT(DBF_ERR, device, "%s", 841 "start_IO: device busy, retry later"); 842 break; 843 case -ETIMEDOUT: 844 DBF_DEV_EVENT(DBF_ERR, device, "%s", 845 "start_IO: request timeout, retry later"); 846 break; 847 case -EACCES: 848 /* -EACCES indicates that the request used only a 849 * subset of the available pathes and all these 850 * pathes are gone. 851 * Do a retry with all available pathes. 852 */ 853 cqr->lpm = LPM_ANYPATH; 854 DBF_DEV_EVENT(DBF_ERR, device, "%s", 855 "start_IO: selected pathes gone," 856 " retry on all pathes"); 857 break; 858 case -ENODEV: 859 case -EIO: 860 DBF_DEV_EVENT(DBF_ERR, device, "%s", 861 "start_IO: device gone, retry"); 862 break; 863 default: 864 DEV_MESSAGE(KERN_ERR, device, 865 "line %d unknown RC=%d, please report" 866 " to linux390@de.ibm.com", __LINE__, rc); 867 BUG(); 868 break; 869 } 870 return rc; 871 } 872 873 /* 874 * Timeout function for dasd devices. This is used for different purposes 875 * 1) missing interrupt handler for normal operation 876 * 2) delayed start of request where start_IO failed with -EBUSY 877 * 3) timeout for missing state change interrupts 878 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 879 * DASD_CQR_QUEUED for 2) and 3). 880 */ 881 static void dasd_device_timeout(unsigned long ptr) 882 { 883 unsigned long flags; 884 struct dasd_device *device; 885 886 device = (struct dasd_device *) ptr; 887 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 888 /* re-activate request queue */ 889 device->stopped &= ~DASD_STOPPED_PENDING; 890 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 891 dasd_schedule_device_bh(device); 892 } 893 894 /* 895 * Setup timeout for a device in jiffies. 896 */ 897 void dasd_device_set_timer(struct dasd_device *device, int expires) 898 { 899 if (expires == 0) { 900 if (timer_pending(&device->timer)) 901 del_timer(&device->timer); 902 return; 903 } 904 if (timer_pending(&device->timer)) { 905 if (mod_timer(&device->timer, jiffies + expires)) 906 return; 907 } 908 device->timer.function = dasd_device_timeout; 909 device->timer.data = (unsigned long) device; 910 device->timer.expires = jiffies + expires; 911 add_timer(&device->timer); 912 } 913 914 /* 915 * Clear timeout for a device. 916 */ 917 void dasd_device_clear_timer(struct dasd_device *device) 918 { 919 if (timer_pending(&device->timer)) 920 del_timer(&device->timer); 921 } 922 923 static void dasd_handle_killed_request(struct ccw_device *cdev, 924 unsigned long intparm) 925 { 926 struct dasd_ccw_req *cqr; 927 struct dasd_device *device; 928 929 if (!intparm) 930 return; 931 cqr = (struct dasd_ccw_req *) intparm; 932 if (cqr->status != DASD_CQR_IN_IO) { 933 MESSAGE(KERN_DEBUG, 934 "invalid status in handle_killed_request: " 935 "bus_id %s, status %02x", 936 cdev->dev.bus_id, cqr->status); 937 return; 938 } 939 940 device = (struct dasd_device *) cqr->startdev; 941 if (device == NULL || 942 device != dasd_device_from_cdev_locked(cdev) || 943 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 944 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 945 cdev->dev.bus_id); 946 return; 947 } 948 949 /* Schedule request to be retried. */ 950 cqr->status = DASD_CQR_QUEUED; 951 952 dasd_device_clear_timer(device); 953 dasd_schedule_device_bh(device); 954 dasd_put_device(device); 955 } 956 957 void dasd_generic_handle_state_change(struct dasd_device *device) 958 { 959 /* First of all start sense subsystem status request. */ 960 dasd_eer_snss(device); 961 962 device->stopped &= ~DASD_STOPPED_PENDING; 963 dasd_schedule_device_bh(device); 964 if (device->block) 965 dasd_schedule_block_bh(device->block); 966 } 967 968 /* 969 * Interrupt handler for "normal" ssch-io based dasd devices. 970 */ 971 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 972 struct irb *irb) 973 { 974 struct dasd_ccw_req *cqr, *next; 975 struct dasd_device *device; 976 unsigned long long now; 977 int expires; 978 979 if (IS_ERR(irb)) { 980 switch (PTR_ERR(irb)) { 981 case -EIO: 982 break; 983 case -ETIMEDOUT: 984 printk(KERN_WARNING"%s(%s): request timed out\n", 985 __func__, cdev->dev.bus_id); 986 break; 987 default: 988 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 989 __func__, cdev->dev.bus_id, PTR_ERR(irb)); 990 } 991 dasd_handle_killed_request(cdev, intparm); 992 return; 993 } 994 995 now = get_clock(); 996 997 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 998 cdev->dev.bus_id, ((irb->scsw.cmd.cstat << 8) | 999 irb->scsw.cmd.dstat), (unsigned int) intparm); 1000 1001 /* check for unsolicited interrupts */ 1002 cqr = (struct dasd_ccw_req *) intparm; 1003 if (!cqr || ((irb->scsw.cmd.cc == 1) && 1004 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) && 1005 (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND))) { 1006 if (cqr && cqr->status == DASD_CQR_IN_IO) 1007 cqr->status = DASD_CQR_QUEUED; 1008 device = dasd_device_from_cdev_locked(cdev); 1009 if (!IS_ERR(device)) { 1010 dasd_device_clear_timer(device); 1011 device->discipline->handle_unsolicited_interrupt(device, 1012 irb); 1013 dasd_put_device(device); 1014 } 1015 return; 1016 } 1017 1018 device = (struct dasd_device *) cqr->startdev; 1019 if (!device || 1020 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1021 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 1022 cdev->dev.bus_id); 1023 return; 1024 } 1025 1026 /* Check for clear pending */ 1027 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1028 irb->scsw.cmd.fctl & SCSW_FCTL_CLEAR_FUNC) { 1029 cqr->status = DASD_CQR_CLEARED; 1030 dasd_device_clear_timer(device); 1031 wake_up(&dasd_flush_wq); 1032 dasd_schedule_device_bh(device); 1033 return; 1034 } 1035 1036 /* check status - the request might have been killed by dyn detach */ 1037 if (cqr->status != DASD_CQR_IN_IO) { 1038 MESSAGE(KERN_DEBUG, 1039 "invalid status: bus_id %s, status %02x", 1040 cdev->dev.bus_id, cqr->status); 1041 return; 1042 } 1043 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1044 ((irb->scsw.cmd.cstat << 8) | irb->scsw.cmd.dstat), cqr); 1045 next = NULL; 1046 expires = 0; 1047 if (irb->scsw.cmd.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1048 irb->scsw.cmd.cstat == 0 && !irb->esw.esw0.erw.cons) { 1049 /* request was completed successfully */ 1050 cqr->status = DASD_CQR_SUCCESS; 1051 cqr->stopclk = now; 1052 /* Start first request on queue if possible -> fast_io. */ 1053 if (cqr->devlist.next != &device->ccw_queue) { 1054 next = list_entry(cqr->devlist.next, 1055 struct dasd_ccw_req, devlist); 1056 } 1057 } else { /* error */ 1058 memcpy(&cqr->irb, irb, sizeof(struct irb)); 1059 if (device->features & DASD_FEATURE_ERPLOG) { 1060 dasd_log_sense(cqr, irb); 1061 } 1062 /* 1063 * If we don't want complex ERP for this request, then just 1064 * reset this and retry it in the fastpath 1065 */ 1066 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1067 cqr->retries > 0) { 1068 DEV_MESSAGE(KERN_DEBUG, device, 1069 "default ERP in fastpath (%i retries left)", 1070 cqr->retries); 1071 cqr->lpm = LPM_ANYPATH; 1072 cqr->status = DASD_CQR_QUEUED; 1073 next = cqr; 1074 } else 1075 cqr->status = DASD_CQR_ERROR; 1076 } 1077 if (next && (next->status == DASD_CQR_QUEUED) && 1078 (!device->stopped)) { 1079 if (device->discipline->start_IO(next) == 0) 1080 expires = next->expires; 1081 else 1082 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1083 "Interrupt fastpath " 1084 "failed!"); 1085 } 1086 if (expires != 0) 1087 dasd_device_set_timer(device, expires); 1088 else 1089 dasd_device_clear_timer(device); 1090 dasd_schedule_device_bh(device); 1091 } 1092 1093 /* 1094 * If we have an error on a dasd_block layer request then we cancel 1095 * and return all further requests from the same dasd_block as well. 1096 */ 1097 static void __dasd_device_recovery(struct dasd_device *device, 1098 struct dasd_ccw_req *ref_cqr) 1099 { 1100 struct list_head *l, *n; 1101 struct dasd_ccw_req *cqr; 1102 1103 /* 1104 * only requeue request that came from the dasd_block layer 1105 */ 1106 if (!ref_cqr->block) 1107 return; 1108 1109 list_for_each_safe(l, n, &device->ccw_queue) { 1110 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1111 if (cqr->status == DASD_CQR_QUEUED && 1112 ref_cqr->block == cqr->block) { 1113 cqr->status = DASD_CQR_CLEARED; 1114 } 1115 } 1116 }; 1117 1118 /* 1119 * Remove those ccw requests from the queue that need to be returned 1120 * to the upper layer. 1121 */ 1122 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1123 struct list_head *final_queue) 1124 { 1125 struct list_head *l, *n; 1126 struct dasd_ccw_req *cqr; 1127 1128 /* Process request with final status. */ 1129 list_for_each_safe(l, n, &device->ccw_queue) { 1130 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1131 1132 /* Stop list processing at the first non-final request. */ 1133 if (cqr->status == DASD_CQR_QUEUED || 1134 cqr->status == DASD_CQR_IN_IO || 1135 cqr->status == DASD_CQR_CLEAR_PENDING) 1136 break; 1137 if (cqr->status == DASD_CQR_ERROR) { 1138 __dasd_device_recovery(device, cqr); 1139 } 1140 /* Rechain finished requests to final queue */ 1141 list_move_tail(&cqr->devlist, final_queue); 1142 } 1143 } 1144 1145 /* 1146 * the cqrs from the final queue are returned to the upper layer 1147 * by setting a dasd_block state and calling the callback function 1148 */ 1149 static void __dasd_device_process_final_queue(struct dasd_device *device, 1150 struct list_head *final_queue) 1151 { 1152 struct list_head *l, *n; 1153 struct dasd_ccw_req *cqr; 1154 struct dasd_block *block; 1155 void (*callback)(struct dasd_ccw_req *, void *data); 1156 void *callback_data; 1157 1158 list_for_each_safe(l, n, final_queue) { 1159 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1160 list_del_init(&cqr->devlist); 1161 block = cqr->block; 1162 callback = cqr->callback; 1163 callback_data = cqr->callback_data; 1164 if (block) 1165 spin_lock_bh(&block->queue_lock); 1166 switch (cqr->status) { 1167 case DASD_CQR_SUCCESS: 1168 cqr->status = DASD_CQR_DONE; 1169 break; 1170 case DASD_CQR_ERROR: 1171 cqr->status = DASD_CQR_NEED_ERP; 1172 break; 1173 case DASD_CQR_CLEARED: 1174 cqr->status = DASD_CQR_TERMINATED; 1175 break; 1176 default: 1177 DEV_MESSAGE(KERN_ERR, device, 1178 "wrong cqr status in __dasd_process_final_queue " 1179 "for cqr %p, status %x", 1180 cqr, cqr->status); 1181 BUG(); 1182 } 1183 if (cqr->callback != NULL) 1184 (callback)(cqr, callback_data); 1185 if (block) 1186 spin_unlock_bh(&block->queue_lock); 1187 } 1188 } 1189 1190 /* 1191 * Take a look at the first request on the ccw queue and check 1192 * if it reached its expire time. If so, terminate the IO. 1193 */ 1194 static void __dasd_device_check_expire(struct dasd_device *device) 1195 { 1196 struct dasd_ccw_req *cqr; 1197 1198 if (list_empty(&device->ccw_queue)) 1199 return; 1200 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1201 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1202 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1203 if (device->discipline->term_IO(cqr) != 0) { 1204 /* Hmpf, try again in 5 sec */ 1205 DEV_MESSAGE(KERN_ERR, device, 1206 "internal error - timeout (%is) expired " 1207 "for cqr %p, termination failed, " 1208 "retrying in 5s", 1209 (cqr->expires/HZ), cqr); 1210 cqr->expires += 5*HZ; 1211 dasd_device_set_timer(device, 5*HZ); 1212 } else { 1213 DEV_MESSAGE(KERN_ERR, device, 1214 "internal error - timeout (%is) expired " 1215 "for cqr %p (%i retries left)", 1216 (cqr->expires/HZ), cqr, cqr->retries); 1217 } 1218 } 1219 } 1220 1221 /* 1222 * Take a look at the first request on the ccw queue and check 1223 * if it needs to be started. 1224 */ 1225 static void __dasd_device_start_head(struct dasd_device *device) 1226 { 1227 struct dasd_ccw_req *cqr; 1228 int rc; 1229 1230 if (list_empty(&device->ccw_queue)) 1231 return; 1232 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1233 if (cqr->status != DASD_CQR_QUEUED) 1234 return; 1235 /* when device is stopped, return request to previous layer */ 1236 if (device->stopped) { 1237 cqr->status = DASD_CQR_CLEARED; 1238 dasd_schedule_device_bh(device); 1239 return; 1240 } 1241 1242 rc = device->discipline->start_IO(cqr); 1243 if (rc == 0) 1244 dasd_device_set_timer(device, cqr->expires); 1245 else if (rc == -EACCES) { 1246 dasd_schedule_device_bh(device); 1247 } else 1248 /* Hmpf, try again in 1/2 sec */ 1249 dasd_device_set_timer(device, 50); 1250 } 1251 1252 /* 1253 * Go through all request on the dasd_device request queue, 1254 * terminate them on the cdev if necessary, and return them to the 1255 * submitting layer via callback. 1256 * Note: 1257 * Make sure that all 'submitting layers' still exist when 1258 * this function is called!. In other words, when 'device' is a base 1259 * device then all block layer requests must have been removed before 1260 * via dasd_flush_block_queue. 1261 */ 1262 int dasd_flush_device_queue(struct dasd_device *device) 1263 { 1264 struct dasd_ccw_req *cqr, *n; 1265 int rc; 1266 struct list_head flush_queue; 1267 1268 INIT_LIST_HEAD(&flush_queue); 1269 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1270 rc = 0; 1271 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1272 /* Check status and move request to flush_queue */ 1273 switch (cqr->status) { 1274 case DASD_CQR_IN_IO: 1275 rc = device->discipline->term_IO(cqr); 1276 if (rc) { 1277 /* unable to terminate requeust */ 1278 DEV_MESSAGE(KERN_ERR, device, 1279 "dasd flush ccw_queue is unable " 1280 " to terminate request %p", 1281 cqr); 1282 /* stop flush processing */ 1283 goto finished; 1284 } 1285 break; 1286 case DASD_CQR_QUEUED: 1287 cqr->stopclk = get_clock(); 1288 cqr->status = DASD_CQR_CLEARED; 1289 break; 1290 default: /* no need to modify the others */ 1291 break; 1292 } 1293 list_move_tail(&cqr->devlist, &flush_queue); 1294 } 1295 finished: 1296 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1297 /* 1298 * After this point all requests must be in state CLEAR_PENDING, 1299 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1300 * one of the others. 1301 */ 1302 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1303 wait_event(dasd_flush_wq, 1304 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1305 /* 1306 * Now set each request back to TERMINATED, DONE or NEED_ERP 1307 * and call the callback function of flushed requests 1308 */ 1309 __dasd_device_process_final_queue(device, &flush_queue); 1310 return rc; 1311 } 1312 1313 /* 1314 * Acquire the device lock and process queues for the device. 1315 */ 1316 static void dasd_device_tasklet(struct dasd_device *device) 1317 { 1318 struct list_head final_queue; 1319 1320 atomic_set (&device->tasklet_scheduled, 0); 1321 INIT_LIST_HEAD(&final_queue); 1322 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1323 /* Check expire time of first request on the ccw queue. */ 1324 __dasd_device_check_expire(device); 1325 /* find final requests on ccw queue */ 1326 __dasd_device_process_ccw_queue(device, &final_queue); 1327 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1328 /* Now call the callback function of requests with final status */ 1329 __dasd_device_process_final_queue(device, &final_queue); 1330 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1331 /* Now check if the head of the ccw queue needs to be started. */ 1332 __dasd_device_start_head(device); 1333 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1334 dasd_put_device(device); 1335 } 1336 1337 /* 1338 * Schedules a call to dasd_tasklet over the device tasklet. 1339 */ 1340 void dasd_schedule_device_bh(struct dasd_device *device) 1341 { 1342 /* Protect against rescheduling. */ 1343 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1344 return; 1345 dasd_get_device(device); 1346 tasklet_hi_schedule(&device->tasklet); 1347 } 1348 1349 /* 1350 * Queue a request to the head of the device ccw_queue. 1351 * Start the I/O if possible. 1352 */ 1353 void dasd_add_request_head(struct dasd_ccw_req *cqr) 1354 { 1355 struct dasd_device *device; 1356 unsigned long flags; 1357 1358 device = cqr->startdev; 1359 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1360 cqr->status = DASD_CQR_QUEUED; 1361 list_add(&cqr->devlist, &device->ccw_queue); 1362 /* let the bh start the request to keep them in order */ 1363 dasd_schedule_device_bh(device); 1364 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1365 } 1366 1367 /* 1368 * Queue a request to the tail of the device ccw_queue. 1369 * Start the I/O if possible. 1370 */ 1371 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 1372 { 1373 struct dasd_device *device; 1374 unsigned long flags; 1375 1376 device = cqr->startdev; 1377 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1378 cqr->status = DASD_CQR_QUEUED; 1379 list_add_tail(&cqr->devlist, &device->ccw_queue); 1380 /* let the bh start the request to keep them in order */ 1381 dasd_schedule_device_bh(device); 1382 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1383 } 1384 1385 /* 1386 * Wakeup helper for the 'sleep_on' functions. 1387 */ 1388 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1389 { 1390 wake_up((wait_queue_head_t *) data); 1391 } 1392 1393 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 1394 { 1395 struct dasd_device *device; 1396 int rc; 1397 1398 device = cqr->startdev; 1399 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1400 rc = ((cqr->status == DASD_CQR_DONE || 1401 cqr->status == DASD_CQR_NEED_ERP || 1402 cqr->status == DASD_CQR_TERMINATED) && 1403 list_empty(&cqr->devlist)); 1404 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1405 return rc; 1406 } 1407 1408 /* 1409 * Queue a request to the tail of the device ccw_queue and wait for 1410 * it's completion. 1411 */ 1412 int dasd_sleep_on(struct dasd_ccw_req *cqr) 1413 { 1414 struct dasd_device *device; 1415 int rc; 1416 1417 device = cqr->startdev; 1418 1419 cqr->callback = dasd_wakeup_cb; 1420 cqr->callback_data = (void *) &generic_waitq; 1421 dasd_add_request_tail(cqr); 1422 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1423 1424 /* Request status is either done or failed. */ 1425 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1426 return rc; 1427 } 1428 1429 /* 1430 * Queue a request to the tail of the device ccw_queue and wait 1431 * interruptible for it's completion. 1432 */ 1433 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1434 { 1435 struct dasd_device *device; 1436 int rc; 1437 1438 device = cqr->startdev; 1439 cqr->callback = dasd_wakeup_cb; 1440 cqr->callback_data = (void *) &generic_waitq; 1441 dasd_add_request_tail(cqr); 1442 rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr)); 1443 if (rc == -ERESTARTSYS) { 1444 dasd_cancel_req(cqr); 1445 /* wait (non-interruptible) for final status */ 1446 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1447 } 1448 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1449 return rc; 1450 } 1451 1452 /* 1453 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1454 * for eckd devices) the currently running request has to be terminated 1455 * and be put back to status queued, before the special request is added 1456 * to the head of the queue. Then the special request is waited on normally. 1457 */ 1458 static inline int _dasd_term_running_cqr(struct dasd_device *device) 1459 { 1460 struct dasd_ccw_req *cqr; 1461 1462 if (list_empty(&device->ccw_queue)) 1463 return 0; 1464 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1465 return device->discipline->term_IO(cqr); 1466 } 1467 1468 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1469 { 1470 struct dasd_device *device; 1471 int rc; 1472 1473 device = cqr->startdev; 1474 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1475 rc = _dasd_term_running_cqr(device); 1476 if (rc) { 1477 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1478 return rc; 1479 } 1480 1481 cqr->callback = dasd_wakeup_cb; 1482 cqr->callback_data = (void *) &generic_waitq; 1483 cqr->status = DASD_CQR_QUEUED; 1484 list_add(&cqr->devlist, &device->ccw_queue); 1485 1486 /* let the bh start the request to keep them in order */ 1487 dasd_schedule_device_bh(device); 1488 1489 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1490 1491 wait_event(generic_waitq, _wait_for_wakeup(cqr)); 1492 1493 /* Request status is either done or failed. */ 1494 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1495 return rc; 1496 } 1497 1498 /* 1499 * Cancels a request that was started with dasd_sleep_on_req. 1500 * This is useful to timeout requests. The request will be 1501 * terminated if it is currently in i/o. 1502 * Returns 1 if the request has been terminated. 1503 * 0 if there was no need to terminate the request (not started yet) 1504 * negative error code if termination failed 1505 * Cancellation of a request is an asynchronous operation! The calling 1506 * function has to wait until the request is properly returned via callback. 1507 */ 1508 int dasd_cancel_req(struct dasd_ccw_req *cqr) 1509 { 1510 struct dasd_device *device = cqr->startdev; 1511 unsigned long flags; 1512 int rc; 1513 1514 rc = 0; 1515 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1516 switch (cqr->status) { 1517 case DASD_CQR_QUEUED: 1518 /* request was not started - just set to cleared */ 1519 cqr->status = DASD_CQR_CLEARED; 1520 break; 1521 case DASD_CQR_IN_IO: 1522 /* request in IO - terminate IO and release again */ 1523 rc = device->discipline->term_IO(cqr); 1524 if (rc) { 1525 DEV_MESSAGE(KERN_ERR, device, 1526 "dasd_cancel_req is unable " 1527 " to terminate request %p, rc = %d", 1528 cqr, rc); 1529 } else { 1530 cqr->stopclk = get_clock(); 1531 rc = 1; 1532 } 1533 break; 1534 default: /* already finished or clear pending - do nothing */ 1535 break; 1536 } 1537 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1538 dasd_schedule_device_bh(device); 1539 return rc; 1540 } 1541 1542 1543 /* 1544 * SECTION: Operations of the dasd_block layer. 1545 */ 1546 1547 /* 1548 * Timeout function for dasd_block. This is used when the block layer 1549 * is waiting for something that may not come reliably, (e.g. a state 1550 * change interrupt) 1551 */ 1552 static void dasd_block_timeout(unsigned long ptr) 1553 { 1554 unsigned long flags; 1555 struct dasd_block *block; 1556 1557 block = (struct dasd_block *) ptr; 1558 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1559 /* re-activate request queue */ 1560 block->base->stopped &= ~DASD_STOPPED_PENDING; 1561 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1562 dasd_schedule_block_bh(block); 1563 } 1564 1565 /* 1566 * Setup timeout for a dasd_block in jiffies. 1567 */ 1568 void dasd_block_set_timer(struct dasd_block *block, int expires) 1569 { 1570 if (expires == 0) { 1571 if (timer_pending(&block->timer)) 1572 del_timer(&block->timer); 1573 return; 1574 } 1575 if (timer_pending(&block->timer)) { 1576 if (mod_timer(&block->timer, jiffies + expires)) 1577 return; 1578 } 1579 block->timer.function = dasd_block_timeout; 1580 block->timer.data = (unsigned long) block; 1581 block->timer.expires = jiffies + expires; 1582 add_timer(&block->timer); 1583 } 1584 1585 /* 1586 * Clear timeout for a dasd_block. 1587 */ 1588 void dasd_block_clear_timer(struct dasd_block *block) 1589 { 1590 if (timer_pending(&block->timer)) 1591 del_timer(&block->timer); 1592 } 1593 1594 /* 1595 * posts the buffer_cache about a finalized request 1596 */ 1597 static inline void dasd_end_request(struct request *req, int error) 1598 { 1599 if (__blk_end_request(req, error, blk_rq_bytes(req))) 1600 BUG(); 1601 } 1602 1603 /* 1604 * Process finished error recovery ccw. 1605 */ 1606 static inline void __dasd_block_process_erp(struct dasd_block *block, 1607 struct dasd_ccw_req *cqr) 1608 { 1609 dasd_erp_fn_t erp_fn; 1610 struct dasd_device *device = block->base; 1611 1612 if (cqr->status == DASD_CQR_DONE) 1613 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1614 else 1615 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1616 erp_fn = device->discipline->erp_postaction(cqr); 1617 erp_fn(cqr); 1618 } 1619 1620 /* 1621 * Fetch requests from the block device queue. 1622 */ 1623 static void __dasd_process_request_queue(struct dasd_block *block) 1624 { 1625 struct request_queue *queue; 1626 struct request *req; 1627 struct dasd_ccw_req *cqr; 1628 struct dasd_device *basedev; 1629 unsigned long flags; 1630 queue = block->request_queue; 1631 basedev = block->base; 1632 /* No queue ? Then there is nothing to do. */ 1633 if (queue == NULL) 1634 return; 1635 1636 /* 1637 * We requeue request from the block device queue to the ccw 1638 * queue only in two states. In state DASD_STATE_READY the 1639 * partition detection is done and we need to requeue requests 1640 * for that. State DASD_STATE_ONLINE is normal block device 1641 * operation. 1642 */ 1643 if (basedev->state < DASD_STATE_READY) 1644 return; 1645 /* Now we try to fetch requests from the request queue */ 1646 while (!blk_queue_plugged(queue) && 1647 elv_next_request(queue)) { 1648 1649 req = elv_next_request(queue); 1650 1651 if (basedev->features & DASD_FEATURE_READONLY && 1652 rq_data_dir(req) == WRITE) { 1653 DBF_DEV_EVENT(DBF_ERR, basedev, 1654 "Rejecting write request %p", 1655 req); 1656 blkdev_dequeue_request(req); 1657 dasd_end_request(req, -EIO); 1658 continue; 1659 } 1660 cqr = basedev->discipline->build_cp(basedev, block, req); 1661 if (IS_ERR(cqr)) { 1662 if (PTR_ERR(cqr) == -EBUSY) 1663 break; /* normal end condition */ 1664 if (PTR_ERR(cqr) == -ENOMEM) 1665 break; /* terminate request queue loop */ 1666 if (PTR_ERR(cqr) == -EAGAIN) { 1667 /* 1668 * The current request cannot be build right 1669 * now, we have to try later. If this request 1670 * is the head-of-queue we stop the device 1671 * for 1/2 second. 1672 */ 1673 if (!list_empty(&block->ccw_queue)) 1674 break; 1675 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); 1676 basedev->stopped |= DASD_STOPPED_PENDING; 1677 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); 1678 dasd_block_set_timer(block, HZ/2); 1679 break; 1680 } 1681 DBF_DEV_EVENT(DBF_ERR, basedev, 1682 "CCW creation failed (rc=%ld) " 1683 "on request %p", 1684 PTR_ERR(cqr), req); 1685 blkdev_dequeue_request(req); 1686 dasd_end_request(req, -EIO); 1687 continue; 1688 } 1689 /* 1690 * Note: callback is set to dasd_return_cqr_cb in 1691 * __dasd_block_start_head to cover erp requests as well 1692 */ 1693 cqr->callback_data = (void *) req; 1694 cqr->status = DASD_CQR_FILLED; 1695 blkdev_dequeue_request(req); 1696 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1697 dasd_profile_start(block, cqr, req); 1698 } 1699 } 1700 1701 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 1702 { 1703 struct request *req; 1704 int status; 1705 int error = 0; 1706 1707 req = (struct request *) cqr->callback_data; 1708 dasd_profile_end(cqr->block, cqr, req); 1709 status = cqr->block->base->discipline->free_cp(cqr, req); 1710 if (status <= 0) 1711 error = status ? status : -EIO; 1712 dasd_end_request(req, error); 1713 } 1714 1715 /* 1716 * Process ccw request queue. 1717 */ 1718 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 1719 struct list_head *final_queue) 1720 { 1721 struct list_head *l, *n; 1722 struct dasd_ccw_req *cqr; 1723 dasd_erp_fn_t erp_fn; 1724 unsigned long flags; 1725 struct dasd_device *base = block->base; 1726 1727 restart: 1728 /* Process request with final status. */ 1729 list_for_each_safe(l, n, &block->ccw_queue) { 1730 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1731 if (cqr->status != DASD_CQR_DONE && 1732 cqr->status != DASD_CQR_FAILED && 1733 cqr->status != DASD_CQR_NEED_ERP && 1734 cqr->status != DASD_CQR_TERMINATED) 1735 continue; 1736 1737 if (cqr->status == DASD_CQR_TERMINATED) { 1738 base->discipline->handle_terminated_request(cqr); 1739 goto restart; 1740 } 1741 1742 /* Process requests that may be recovered */ 1743 if (cqr->status == DASD_CQR_NEED_ERP) { 1744 erp_fn = base->discipline->erp_action(cqr); 1745 erp_fn(cqr); 1746 goto restart; 1747 } 1748 1749 /* First of all call extended error reporting. */ 1750 if (dasd_eer_enabled(base) && 1751 cqr->status == DASD_CQR_FAILED) { 1752 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 1753 1754 /* restart request */ 1755 cqr->status = DASD_CQR_FILLED; 1756 cqr->retries = 255; 1757 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1758 base->stopped |= DASD_STOPPED_QUIESCE; 1759 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1760 flags); 1761 goto restart; 1762 } 1763 1764 /* Process finished ERP request. */ 1765 if (cqr->refers) { 1766 __dasd_block_process_erp(block, cqr); 1767 goto restart; 1768 } 1769 1770 /* Rechain finished requests to final queue */ 1771 cqr->endclk = get_clock(); 1772 list_move_tail(&cqr->blocklist, final_queue); 1773 } 1774 } 1775 1776 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 1777 { 1778 dasd_schedule_block_bh(cqr->block); 1779 } 1780 1781 static void __dasd_block_start_head(struct dasd_block *block) 1782 { 1783 struct dasd_ccw_req *cqr; 1784 1785 if (list_empty(&block->ccw_queue)) 1786 return; 1787 /* We allways begin with the first requests on the queue, as some 1788 * of previously started requests have to be enqueued on a 1789 * dasd_device again for error recovery. 1790 */ 1791 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 1792 if (cqr->status != DASD_CQR_FILLED) 1793 continue; 1794 /* Non-temporary stop condition will trigger fail fast */ 1795 if (block->base->stopped & ~DASD_STOPPED_PENDING && 1796 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1797 (!dasd_eer_enabled(block->base))) { 1798 cqr->status = DASD_CQR_FAILED; 1799 dasd_schedule_block_bh(block); 1800 continue; 1801 } 1802 /* Don't try to start requests if device is stopped */ 1803 if (block->base->stopped) 1804 return; 1805 1806 /* just a fail safe check, should not happen */ 1807 if (!cqr->startdev) 1808 cqr->startdev = block->base; 1809 1810 /* make sure that the requests we submit find their way back */ 1811 cqr->callback = dasd_return_cqr_cb; 1812 1813 dasd_add_request_tail(cqr); 1814 } 1815 } 1816 1817 /* 1818 * Central dasd_block layer routine. Takes requests from the generic 1819 * block layer request queue, creates ccw requests, enqueues them on 1820 * a dasd_device and processes ccw requests that have been returned. 1821 */ 1822 static void dasd_block_tasklet(struct dasd_block *block) 1823 { 1824 struct list_head final_queue; 1825 struct list_head *l, *n; 1826 struct dasd_ccw_req *cqr; 1827 1828 atomic_set(&block->tasklet_scheduled, 0); 1829 INIT_LIST_HEAD(&final_queue); 1830 spin_lock(&block->queue_lock); 1831 /* Finish off requests on ccw queue */ 1832 __dasd_process_block_ccw_queue(block, &final_queue); 1833 spin_unlock(&block->queue_lock); 1834 /* Now call the callback function of requests with final status */ 1835 spin_lock_irq(&block->request_queue_lock); 1836 list_for_each_safe(l, n, &final_queue) { 1837 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1838 list_del_init(&cqr->blocklist); 1839 __dasd_cleanup_cqr(cqr); 1840 } 1841 spin_lock(&block->queue_lock); 1842 /* Get new request from the block device request queue */ 1843 __dasd_process_request_queue(block); 1844 /* Now check if the head of the ccw queue needs to be started. */ 1845 __dasd_block_start_head(block); 1846 spin_unlock(&block->queue_lock); 1847 spin_unlock_irq(&block->request_queue_lock); 1848 dasd_put_device(block->base); 1849 } 1850 1851 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 1852 { 1853 wake_up(&dasd_flush_wq); 1854 } 1855 1856 /* 1857 * Go through all request on the dasd_block request queue, cancel them 1858 * on the respective dasd_device, and return them to the generic 1859 * block layer. 1860 */ 1861 static int dasd_flush_block_queue(struct dasd_block *block) 1862 { 1863 struct dasd_ccw_req *cqr, *n; 1864 int rc, i; 1865 struct list_head flush_queue; 1866 1867 INIT_LIST_HEAD(&flush_queue); 1868 spin_lock_bh(&block->queue_lock); 1869 rc = 0; 1870 restart: 1871 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 1872 /* if this request currently owned by a dasd_device cancel it */ 1873 if (cqr->status >= DASD_CQR_QUEUED) 1874 rc = dasd_cancel_req(cqr); 1875 if (rc < 0) 1876 break; 1877 /* Rechain request (including erp chain) so it won't be 1878 * touched by the dasd_block_tasklet anymore. 1879 * Replace the callback so we notice when the request 1880 * is returned from the dasd_device layer. 1881 */ 1882 cqr->callback = _dasd_wake_block_flush_cb; 1883 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 1884 list_move_tail(&cqr->blocklist, &flush_queue); 1885 if (i > 1) 1886 /* moved more than one request - need to restart */ 1887 goto restart; 1888 } 1889 spin_unlock_bh(&block->queue_lock); 1890 /* Now call the callback function of flushed requests */ 1891 restart_cb: 1892 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 1893 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 1894 /* Process finished ERP request. */ 1895 if (cqr->refers) { 1896 __dasd_block_process_erp(block, cqr); 1897 /* restart list_for_xx loop since dasd_process_erp 1898 * might remove multiple elements */ 1899 goto restart_cb; 1900 } 1901 /* call the callback function */ 1902 cqr->endclk = get_clock(); 1903 list_del_init(&cqr->blocklist); 1904 __dasd_cleanup_cqr(cqr); 1905 } 1906 return rc; 1907 } 1908 1909 /* 1910 * Schedules a call to dasd_tasklet over the device tasklet. 1911 */ 1912 void dasd_schedule_block_bh(struct dasd_block *block) 1913 { 1914 /* Protect against rescheduling. */ 1915 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 1916 return; 1917 /* life cycle of block is bound to it's base device */ 1918 dasd_get_device(block->base); 1919 tasklet_hi_schedule(&block->tasklet); 1920 } 1921 1922 1923 /* 1924 * SECTION: external block device operations 1925 * (request queue handling, open, release, etc.) 1926 */ 1927 1928 /* 1929 * Dasd request queue function. Called from ll_rw_blk.c 1930 */ 1931 static void do_dasd_request(struct request_queue *queue) 1932 { 1933 struct dasd_block *block; 1934 1935 block = queue->queuedata; 1936 spin_lock(&block->queue_lock); 1937 /* Get new request from the block device request queue */ 1938 __dasd_process_request_queue(block); 1939 /* Now check if the head of the ccw queue needs to be started. */ 1940 __dasd_block_start_head(block); 1941 spin_unlock(&block->queue_lock); 1942 } 1943 1944 /* 1945 * Allocate and initialize request queue and default I/O scheduler. 1946 */ 1947 static int dasd_alloc_queue(struct dasd_block *block) 1948 { 1949 int rc; 1950 1951 block->request_queue = blk_init_queue(do_dasd_request, 1952 &block->request_queue_lock); 1953 if (block->request_queue == NULL) 1954 return -ENOMEM; 1955 1956 block->request_queue->queuedata = block; 1957 1958 elevator_exit(block->request_queue->elevator); 1959 block->request_queue->elevator = NULL; 1960 rc = elevator_init(block->request_queue, "deadline"); 1961 if (rc) { 1962 blk_cleanup_queue(block->request_queue); 1963 return rc; 1964 } 1965 return 0; 1966 } 1967 1968 /* 1969 * Allocate and initialize request queue. 1970 */ 1971 static void dasd_setup_queue(struct dasd_block *block) 1972 { 1973 int max; 1974 1975 blk_queue_hardsect_size(block->request_queue, block->bp_block); 1976 max = block->base->discipline->max_blocks << block->s2b_shift; 1977 blk_queue_max_sectors(block->request_queue, max); 1978 blk_queue_max_phys_segments(block->request_queue, -1L); 1979 blk_queue_max_hw_segments(block->request_queue, -1L); 1980 blk_queue_max_segment_size(block->request_queue, -1L); 1981 blk_queue_segment_boundary(block->request_queue, -1L); 1982 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); 1983 } 1984 1985 /* 1986 * Deactivate and free request queue. 1987 */ 1988 static void dasd_free_queue(struct dasd_block *block) 1989 { 1990 if (block->request_queue) { 1991 blk_cleanup_queue(block->request_queue); 1992 block->request_queue = NULL; 1993 } 1994 } 1995 1996 /* 1997 * Flush request on the request queue. 1998 */ 1999 static void dasd_flush_request_queue(struct dasd_block *block) 2000 { 2001 struct request *req; 2002 2003 if (!block->request_queue) 2004 return; 2005 2006 spin_lock_irq(&block->request_queue_lock); 2007 while ((req = elv_next_request(block->request_queue))) { 2008 blkdev_dequeue_request(req); 2009 dasd_end_request(req, -EIO); 2010 } 2011 spin_unlock_irq(&block->request_queue_lock); 2012 } 2013 2014 static int dasd_open(struct inode *inp, struct file *filp) 2015 { 2016 struct gendisk *disk = inp->i_bdev->bd_disk; 2017 struct dasd_block *block = disk->private_data; 2018 struct dasd_device *base = block->base; 2019 int rc; 2020 2021 atomic_inc(&block->open_count); 2022 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2023 rc = -ENODEV; 2024 goto unlock; 2025 } 2026 2027 if (!try_module_get(base->discipline->owner)) { 2028 rc = -EINVAL; 2029 goto unlock; 2030 } 2031 2032 if (dasd_probeonly) { 2033 DEV_MESSAGE(KERN_INFO, base, "%s", 2034 "No access to device due to probeonly mode"); 2035 rc = -EPERM; 2036 goto out; 2037 } 2038 2039 if (base->state <= DASD_STATE_BASIC) { 2040 DBF_DEV_EVENT(DBF_ERR, base, " %s", 2041 " Cannot open unrecognized device"); 2042 rc = -ENODEV; 2043 goto out; 2044 } 2045 2046 return 0; 2047 2048 out: 2049 module_put(base->discipline->owner); 2050 unlock: 2051 atomic_dec(&block->open_count); 2052 return rc; 2053 } 2054 2055 static int dasd_release(struct inode *inp, struct file *filp) 2056 { 2057 struct gendisk *disk = inp->i_bdev->bd_disk; 2058 struct dasd_block *block = disk->private_data; 2059 2060 atomic_dec(&block->open_count); 2061 module_put(block->base->discipline->owner); 2062 return 0; 2063 } 2064 2065 /* 2066 * Return disk geometry. 2067 */ 2068 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 2069 { 2070 struct dasd_block *block; 2071 struct dasd_device *base; 2072 2073 block = bdev->bd_disk->private_data; 2074 base = block->base; 2075 if (!block) 2076 return -ENODEV; 2077 2078 if (!base->discipline || 2079 !base->discipline->fill_geometry) 2080 return -EINVAL; 2081 2082 base->discipline->fill_geometry(block, geo); 2083 geo->start = get_start_sect(bdev) >> block->s2b_shift; 2084 return 0; 2085 } 2086 2087 struct block_device_operations 2088 dasd_device_operations = { 2089 .owner = THIS_MODULE, 2090 .open = dasd_open, 2091 .release = dasd_release, 2092 .ioctl = dasd_ioctl, 2093 .compat_ioctl = dasd_compat_ioctl, 2094 .getgeo = dasd_getgeo, 2095 }; 2096 2097 /******************************************************************************* 2098 * end of block device operations 2099 */ 2100 2101 static void 2102 dasd_exit(void) 2103 { 2104 #ifdef CONFIG_PROC_FS 2105 dasd_proc_exit(); 2106 #endif 2107 dasd_eer_exit(); 2108 if (dasd_page_cache != NULL) { 2109 kmem_cache_destroy(dasd_page_cache); 2110 dasd_page_cache = NULL; 2111 } 2112 dasd_gendisk_exit(); 2113 dasd_devmap_exit(); 2114 if (dasd_debug_area != NULL) { 2115 debug_unregister(dasd_debug_area); 2116 dasd_debug_area = NULL; 2117 } 2118 } 2119 2120 /* 2121 * SECTION: common functions for ccw_driver use 2122 */ 2123 2124 /* 2125 * Initial attempt at a probe function. this can be simplified once 2126 * the other detection code is gone. 2127 */ 2128 int dasd_generic_probe(struct ccw_device *cdev, 2129 struct dasd_discipline *discipline) 2130 { 2131 int ret; 2132 2133 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 2134 if (ret) { 2135 printk(KERN_WARNING 2136 "dasd_generic_probe: could not set ccw-device options " 2137 "for %s\n", cdev->dev.bus_id); 2138 return ret; 2139 } 2140 ret = dasd_add_sysfs_files(cdev); 2141 if (ret) { 2142 printk(KERN_WARNING 2143 "dasd_generic_probe: could not add sysfs entries " 2144 "for %s\n", cdev->dev.bus_id); 2145 return ret; 2146 } 2147 cdev->handler = &dasd_int_handler; 2148 2149 /* 2150 * Automatically online either all dasd devices (dasd_autodetect) 2151 * or all devices specified with dasd= parameters during 2152 * initial probe. 2153 */ 2154 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 2155 (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0)) 2156 ret = ccw_device_set_online(cdev); 2157 if (ret) 2158 printk(KERN_WARNING 2159 "dasd_generic_probe: could not initially " 2160 "online ccw-device %s; return code: %d\n", 2161 cdev->dev.bus_id, ret); 2162 return 0; 2163 } 2164 2165 /* 2166 * This will one day be called from a global not_oper handler. 2167 * It is also used by driver_unregister during module unload. 2168 */ 2169 void dasd_generic_remove(struct ccw_device *cdev) 2170 { 2171 struct dasd_device *device; 2172 struct dasd_block *block; 2173 2174 cdev->handler = NULL; 2175 2176 dasd_remove_sysfs_files(cdev); 2177 device = dasd_device_from_cdev(cdev); 2178 if (IS_ERR(device)) 2179 return; 2180 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2181 /* Already doing offline processing */ 2182 dasd_put_device(device); 2183 return; 2184 } 2185 /* 2186 * This device is removed unconditionally. Set offline 2187 * flag to prevent dasd_open from opening it while it is 2188 * no quite down yet. 2189 */ 2190 dasd_set_target_state(device, DASD_STATE_NEW); 2191 /* dasd_delete_device destroys the device reference. */ 2192 block = device->block; 2193 device->block = NULL; 2194 dasd_delete_device(device); 2195 /* 2196 * life cycle of block is bound to device, so delete it after 2197 * device was safely removed 2198 */ 2199 if (block) 2200 dasd_free_block(block); 2201 } 2202 2203 /* 2204 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2205 * the device is detected for the first time and is supposed to be used 2206 * or the user has started activation through sysfs. 2207 */ 2208 int dasd_generic_set_online(struct ccw_device *cdev, 2209 struct dasd_discipline *base_discipline) 2210 { 2211 struct dasd_discipline *discipline; 2212 struct dasd_device *device; 2213 int rc; 2214 2215 /* first online clears initial online feature flag */ 2216 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2217 device = dasd_create_device(cdev); 2218 if (IS_ERR(device)) 2219 return PTR_ERR(device); 2220 2221 discipline = base_discipline; 2222 if (device->features & DASD_FEATURE_USEDIAG) { 2223 if (!dasd_diag_discipline_pointer) { 2224 printk (KERN_WARNING 2225 "dasd_generic couldn't online device %s " 2226 "- discipline DIAG not available\n", 2227 cdev->dev.bus_id); 2228 dasd_delete_device(device); 2229 return -ENODEV; 2230 } 2231 discipline = dasd_diag_discipline_pointer; 2232 } 2233 if (!try_module_get(base_discipline->owner)) { 2234 dasd_delete_device(device); 2235 return -EINVAL; 2236 } 2237 if (!try_module_get(discipline->owner)) { 2238 module_put(base_discipline->owner); 2239 dasd_delete_device(device); 2240 return -EINVAL; 2241 } 2242 device->base_discipline = base_discipline; 2243 device->discipline = discipline; 2244 2245 /* check_device will allocate block device if necessary */ 2246 rc = discipline->check_device(device); 2247 if (rc) { 2248 printk (KERN_WARNING 2249 "dasd_generic couldn't online device %s " 2250 "with discipline %s rc=%i\n", 2251 cdev->dev.bus_id, discipline->name, rc); 2252 module_put(discipline->owner); 2253 module_put(base_discipline->owner); 2254 dasd_delete_device(device); 2255 return rc; 2256 } 2257 2258 dasd_set_target_state(device, DASD_STATE_ONLINE); 2259 if (device->state <= DASD_STATE_KNOWN) { 2260 printk (KERN_WARNING 2261 "dasd_generic discipline not found for %s\n", 2262 cdev->dev.bus_id); 2263 rc = -ENODEV; 2264 dasd_set_target_state(device, DASD_STATE_NEW); 2265 if (device->block) 2266 dasd_free_block(device->block); 2267 dasd_delete_device(device); 2268 } else 2269 pr_debug("dasd_generic device %s found\n", 2270 cdev->dev.bus_id); 2271 2272 /* FIXME: we have to wait for the root device but we don't want 2273 * to wait for each single device but for all at once. */ 2274 wait_event(dasd_init_waitq, _wait_for_device(device)); 2275 2276 dasd_put_device(device); 2277 2278 return rc; 2279 } 2280 2281 int dasd_generic_set_offline(struct ccw_device *cdev) 2282 { 2283 struct dasd_device *device; 2284 struct dasd_block *block; 2285 int max_count, open_count; 2286 2287 device = dasd_device_from_cdev(cdev); 2288 if (IS_ERR(device)) 2289 return PTR_ERR(device); 2290 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2291 /* Already doing offline processing */ 2292 dasd_put_device(device); 2293 return 0; 2294 } 2295 /* 2296 * We must make sure that this device is currently not in use. 2297 * The open_count is increased for every opener, that includes 2298 * the blkdev_get in dasd_scan_partitions. We are only interested 2299 * in the other openers. 2300 */ 2301 if (device->block) { 2302 max_count = device->block->bdev ? 0 : -1; 2303 open_count = atomic_read(&device->block->open_count); 2304 if (open_count > max_count) { 2305 if (open_count > 0) 2306 printk(KERN_WARNING "Can't offline dasd " 2307 "device with open count = %i.\n", 2308 open_count); 2309 else 2310 printk(KERN_WARNING "%s", 2311 "Can't offline dasd device due " 2312 "to internal use\n"); 2313 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2314 dasd_put_device(device); 2315 return -EBUSY; 2316 } 2317 } 2318 dasd_set_target_state(device, DASD_STATE_NEW); 2319 /* dasd_delete_device destroys the device reference. */ 2320 block = device->block; 2321 device->block = NULL; 2322 dasd_delete_device(device); 2323 /* 2324 * life cycle of block is bound to device, so delete it after 2325 * device was safely removed 2326 */ 2327 if (block) 2328 dasd_free_block(block); 2329 return 0; 2330 } 2331 2332 int dasd_generic_notify(struct ccw_device *cdev, int event) 2333 { 2334 struct dasd_device *device; 2335 struct dasd_ccw_req *cqr; 2336 unsigned long flags; 2337 int ret; 2338 2339 device = dasd_device_from_cdev(cdev); 2340 if (IS_ERR(device)) 2341 return 0; 2342 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 2343 ret = 0; 2344 switch (event) { 2345 case CIO_GONE: 2346 case CIO_NO_PATH: 2347 /* First of all call extended error reporting. */ 2348 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2349 2350 if (device->state < DASD_STATE_BASIC) 2351 break; 2352 /* Device is active. We want to keep it. */ 2353 list_for_each_entry(cqr, &device->ccw_queue, devlist) 2354 if (cqr->status == DASD_CQR_IN_IO) { 2355 cqr->status = DASD_CQR_QUEUED; 2356 cqr->retries++; 2357 } 2358 device->stopped |= DASD_STOPPED_DC_WAIT; 2359 dasd_device_clear_timer(device); 2360 dasd_schedule_device_bh(device); 2361 ret = 1; 2362 break; 2363 case CIO_OPER: 2364 /* FIXME: add a sanity check. */ 2365 device->stopped &= ~DASD_STOPPED_DC_WAIT; 2366 dasd_schedule_device_bh(device); 2367 if (device->block) 2368 dasd_schedule_block_bh(device->block); 2369 ret = 1; 2370 break; 2371 } 2372 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2373 dasd_put_device(device); 2374 return ret; 2375 } 2376 2377 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2378 void *rdc_buffer, 2379 int rdc_buffer_size, 2380 char *magic) 2381 { 2382 struct dasd_ccw_req *cqr; 2383 struct ccw1 *ccw; 2384 2385 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2386 2387 if (IS_ERR(cqr)) { 2388 DEV_MESSAGE(KERN_WARNING, device, "%s", 2389 "Could not allocate RDC request"); 2390 return cqr; 2391 } 2392 2393 ccw = cqr->cpaddr; 2394 ccw->cmd_code = CCW_CMD_RDC; 2395 ccw->cda = (__u32)(addr_t)rdc_buffer; 2396 ccw->count = rdc_buffer_size; 2397 2398 cqr->startdev = device; 2399 cqr->memdev = device; 2400 cqr->expires = 10*HZ; 2401 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2402 cqr->retries = 2; 2403 cqr->buildclk = get_clock(); 2404 cqr->status = DASD_CQR_FILLED; 2405 return cqr; 2406 } 2407 2408 2409 int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2410 void **rdc_buffer, int rdc_buffer_size) 2411 { 2412 int ret; 2413 struct dasd_ccw_req *cqr; 2414 2415 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, 2416 magic); 2417 if (IS_ERR(cqr)) 2418 return PTR_ERR(cqr); 2419 2420 ret = dasd_sleep_on(cqr); 2421 dasd_sfree_request(cqr, cqr->memdev); 2422 return ret; 2423 } 2424 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2425 2426 static int __init dasd_init(void) 2427 { 2428 int rc; 2429 2430 init_waitqueue_head(&dasd_init_waitq); 2431 init_waitqueue_head(&dasd_flush_wq); 2432 init_waitqueue_head(&generic_waitq); 2433 2434 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2435 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 2436 if (dasd_debug_area == NULL) { 2437 rc = -ENOMEM; 2438 goto failed; 2439 } 2440 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2441 debug_set_level(dasd_debug_area, DBF_WARNING); 2442 2443 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2444 2445 dasd_diag_discipline_pointer = NULL; 2446 2447 rc = dasd_devmap_init(); 2448 if (rc) 2449 goto failed; 2450 rc = dasd_gendisk_init(); 2451 if (rc) 2452 goto failed; 2453 rc = dasd_parse(); 2454 if (rc) 2455 goto failed; 2456 rc = dasd_eer_init(); 2457 if (rc) 2458 goto failed; 2459 #ifdef CONFIG_PROC_FS 2460 rc = dasd_proc_init(); 2461 if (rc) 2462 goto failed; 2463 #endif 2464 2465 return 0; 2466 failed: 2467 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors"); 2468 dasd_exit(); 2469 return rc; 2470 } 2471 2472 module_init(dasd_init); 2473 module_exit(dasd_exit); 2474 2475 EXPORT_SYMBOL(dasd_debug_area); 2476 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2477 2478 EXPORT_SYMBOL(dasd_add_request_head); 2479 EXPORT_SYMBOL(dasd_add_request_tail); 2480 EXPORT_SYMBOL(dasd_cancel_req); 2481 EXPORT_SYMBOL(dasd_device_clear_timer); 2482 EXPORT_SYMBOL(dasd_block_clear_timer); 2483 EXPORT_SYMBOL(dasd_enable_device); 2484 EXPORT_SYMBOL(dasd_int_handler); 2485 EXPORT_SYMBOL(dasd_kfree_request); 2486 EXPORT_SYMBOL(dasd_kick_device); 2487 EXPORT_SYMBOL(dasd_kmalloc_request); 2488 EXPORT_SYMBOL(dasd_schedule_device_bh); 2489 EXPORT_SYMBOL(dasd_schedule_block_bh); 2490 EXPORT_SYMBOL(dasd_set_target_state); 2491 EXPORT_SYMBOL(dasd_device_set_timer); 2492 EXPORT_SYMBOL(dasd_block_set_timer); 2493 EXPORT_SYMBOL(dasd_sfree_request); 2494 EXPORT_SYMBOL(dasd_sleep_on); 2495 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2496 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2497 EXPORT_SYMBOL(dasd_smalloc_request); 2498 EXPORT_SYMBOL(dasd_start_IO); 2499 EXPORT_SYMBOL(dasd_term_IO); 2500 2501 EXPORT_SYMBOL_GPL(dasd_generic_probe); 2502 EXPORT_SYMBOL_GPL(dasd_generic_remove); 2503 EXPORT_SYMBOL_GPL(dasd_generic_notify); 2504 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2505 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2506 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 2507 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2508 EXPORT_SYMBOL_GPL(dasd_alloc_block); 2509 EXPORT_SYMBOL_GPL(dasd_free_block); 2510