1 /* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 9 * 10 */ 11 12 #include <linux/kmod.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/ctype.h> 16 #include <linux/major.h> 17 #include <linux/slab.h> 18 #include <linux/buffer_head.h> 19 #include <linux/hdreg.h> 20 21 #include <asm/ccwdev.h> 22 #include <asm/ebcdic.h> 23 #include <asm/idals.h> 24 #include <asm/todclk.h> 25 26 /* This is ugly... */ 27 #define PRINTK_HEADER "dasd:" 28 29 #include "dasd_int.h" 30 /* 31 * SECTION: Constant definitions to be used within this file 32 */ 33 #define DASD_CHANQ_MAX_SIZE 4 34 35 /* 36 * SECTION: exported variables of dasd.c 37 */ 38 debug_info_t *dasd_debug_area; 39 struct dasd_discipline *dasd_diag_discipline_pointer; 40 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 41 42 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 43 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 44 " Copyright 2000 IBM Corporation"); 45 MODULE_SUPPORTED_DEVICE("dasd"); 46 MODULE_LICENSE("GPL"); 47 48 /* 49 * SECTION: prototypes for static functions of dasd.c 50 */ 51 static int dasd_alloc_queue(struct dasd_block *); 52 static void dasd_setup_queue(struct dasd_block *); 53 static void dasd_free_queue(struct dasd_block *); 54 static void dasd_flush_request_queue(struct dasd_block *); 55 static int dasd_flush_block_queue(struct dasd_block *); 56 static void dasd_device_tasklet(struct dasd_device *); 57 static void dasd_block_tasklet(struct dasd_block *); 58 static void do_kick_device(struct work_struct *); 59 static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); 60 61 /* 62 * SECTION: Operations on the device structure. 63 */ 64 static wait_queue_head_t dasd_init_waitq; 65 static wait_queue_head_t dasd_flush_wq; 66 67 /* 68 * Allocate memory for a new device structure. 69 */ 70 struct dasd_device *dasd_alloc_device(void) 71 { 72 struct dasd_device *device; 73 74 device = kzalloc(sizeof(struct dasd_device), GFP_ATOMIC); 75 if (!device) 76 return ERR_PTR(-ENOMEM); 77 78 /* Get two pages for normal block device operations. */ 79 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 80 if (!device->ccw_mem) { 81 kfree(device); 82 return ERR_PTR(-ENOMEM); 83 } 84 /* Get one page for error recovery. */ 85 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 86 if (!device->erp_mem) { 87 free_pages((unsigned long) device->ccw_mem, 1); 88 kfree(device); 89 return ERR_PTR(-ENOMEM); 90 } 91 92 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 93 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 94 spin_lock_init(&device->mem_lock); 95 atomic_set(&device->tasklet_scheduled, 0); 96 tasklet_init(&device->tasklet, 97 (void (*)(unsigned long)) dasd_device_tasklet, 98 (unsigned long) device); 99 INIT_LIST_HEAD(&device->ccw_queue); 100 init_timer(&device->timer); 101 INIT_WORK(&device->kick_work, do_kick_device); 102 device->state = DASD_STATE_NEW; 103 device->target = DASD_STATE_NEW; 104 105 return device; 106 } 107 108 /* 109 * Free memory of a device structure. 110 */ 111 void dasd_free_device(struct dasd_device *device) 112 { 113 kfree(device->private); 114 free_page((unsigned long) device->erp_mem); 115 free_pages((unsigned long) device->ccw_mem, 1); 116 kfree(device); 117 } 118 119 /* 120 * Allocate memory for a new device structure. 121 */ 122 struct dasd_block *dasd_alloc_block(void) 123 { 124 struct dasd_block *block; 125 126 block = kzalloc(sizeof(*block), GFP_ATOMIC); 127 if (!block) 128 return ERR_PTR(-ENOMEM); 129 /* open_count = 0 means device online but not in use */ 130 atomic_set(&block->open_count, -1); 131 132 spin_lock_init(&block->request_queue_lock); 133 atomic_set(&block->tasklet_scheduled, 0); 134 tasklet_init(&block->tasklet, 135 (void (*)(unsigned long)) dasd_block_tasklet, 136 (unsigned long) block); 137 INIT_LIST_HEAD(&block->ccw_queue); 138 spin_lock_init(&block->queue_lock); 139 init_timer(&block->timer); 140 141 return block; 142 } 143 144 /* 145 * Free memory of a device structure. 146 */ 147 void dasd_free_block(struct dasd_block *block) 148 { 149 kfree(block); 150 } 151 152 /* 153 * Make a new device known to the system. 154 */ 155 static int dasd_state_new_to_known(struct dasd_device *device) 156 { 157 int rc; 158 159 /* 160 * As long as the device is not in state DASD_STATE_NEW we want to 161 * keep the reference count > 0. 162 */ 163 dasd_get_device(device); 164 165 if (device->block) { 166 rc = dasd_alloc_queue(device->block); 167 if (rc) { 168 dasd_put_device(device); 169 return rc; 170 } 171 } 172 device->state = DASD_STATE_KNOWN; 173 return 0; 174 } 175 176 /* 177 * Let the system forget about a device. 178 */ 179 static int dasd_state_known_to_new(struct dasd_device *device) 180 { 181 /* Disable extended error reporting for this device. */ 182 dasd_eer_disable(device); 183 /* Forget the discipline information. */ 184 if (device->discipline) { 185 if (device->discipline->uncheck_device) 186 device->discipline->uncheck_device(device); 187 module_put(device->discipline->owner); 188 } 189 device->discipline = NULL; 190 if (device->base_discipline) 191 module_put(device->base_discipline->owner); 192 device->base_discipline = NULL; 193 device->state = DASD_STATE_NEW; 194 195 if (device->block) 196 dasd_free_queue(device->block); 197 198 /* Give up reference we took in dasd_state_new_to_known. */ 199 dasd_put_device(device); 200 return 0; 201 } 202 203 /* 204 * Request the irq line for the device. 205 */ 206 static int dasd_state_known_to_basic(struct dasd_device *device) 207 { 208 int rc; 209 210 /* Allocate and register gendisk structure. */ 211 if (device->block) { 212 rc = dasd_gendisk_alloc(device->block); 213 if (rc) 214 return rc; 215 } 216 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 217 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 1, 218 8 * sizeof(long)); 219 debug_register_view(device->debug_area, &debug_sprintf_view); 220 debug_set_level(device->debug_area, DBF_WARNING); 221 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 222 223 device->state = DASD_STATE_BASIC; 224 return 0; 225 } 226 227 /* 228 * Release the irq line for the device. Terminate any running i/o. 229 */ 230 static int dasd_state_basic_to_known(struct dasd_device *device) 231 { 232 int rc; 233 if (device->block) { 234 dasd_gendisk_free(device->block); 235 dasd_block_clear_timer(device->block); 236 } 237 rc = dasd_flush_device_queue(device); 238 if (rc) 239 return rc; 240 dasd_device_clear_timer(device); 241 242 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 243 if (device->debug_area != NULL) { 244 debug_unregister(device->debug_area); 245 device->debug_area = NULL; 246 } 247 device->state = DASD_STATE_KNOWN; 248 return 0; 249 } 250 251 /* 252 * Do the initial analysis. The do_analysis function may return 253 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 254 * until the discipline decides to continue the startup sequence 255 * by calling the function dasd_change_state. The eckd disciplines 256 * uses this to start a ccw that detects the format. The completion 257 * interrupt for this detection ccw uses the kernel event daemon to 258 * trigger the call to dasd_change_state. All this is done in the 259 * discipline code, see dasd_eckd.c. 260 * After the analysis ccw is done (do_analysis returned 0) the block 261 * device is setup. 262 * In case the analysis returns an error, the device setup is stopped 263 * (a fake disk was already added to allow formatting). 264 */ 265 static int dasd_state_basic_to_ready(struct dasd_device *device) 266 { 267 int rc; 268 struct dasd_block *block; 269 270 rc = 0; 271 block = device->block; 272 /* make disk known with correct capacity */ 273 if (block) { 274 if (block->base->discipline->do_analysis != NULL) 275 rc = block->base->discipline->do_analysis(block); 276 if (rc) { 277 if (rc != -EAGAIN) 278 device->state = DASD_STATE_UNFMT; 279 return rc; 280 } 281 dasd_setup_queue(block); 282 set_capacity(block->gdp, 283 block->blocks << block->s2b_shift); 284 device->state = DASD_STATE_READY; 285 rc = dasd_scan_partitions(block); 286 if (rc) 287 device->state = DASD_STATE_BASIC; 288 } else { 289 device->state = DASD_STATE_READY; 290 } 291 return rc; 292 } 293 294 /* 295 * Remove device from block device layer. Destroy dirty buffers. 296 * Forget format information. Check if the target level is basic 297 * and if it is create fake disk for formatting. 298 */ 299 static int dasd_state_ready_to_basic(struct dasd_device *device) 300 { 301 int rc; 302 303 device->state = DASD_STATE_BASIC; 304 if (device->block) { 305 struct dasd_block *block = device->block; 306 rc = dasd_flush_block_queue(block); 307 if (rc) { 308 device->state = DASD_STATE_READY; 309 return rc; 310 } 311 dasd_destroy_partitions(block); 312 dasd_flush_request_queue(block); 313 block->blocks = 0; 314 block->bp_block = 0; 315 block->s2b_shift = 0; 316 } 317 return 0; 318 } 319 320 /* 321 * Back to basic. 322 */ 323 static int dasd_state_unfmt_to_basic(struct dasd_device *device) 324 { 325 device->state = DASD_STATE_BASIC; 326 return 0; 327 } 328 329 /* 330 * Make the device online and schedule the bottom half to start 331 * the requeueing of requests from the linux request queue to the 332 * ccw queue. 333 */ 334 static int 335 dasd_state_ready_to_online(struct dasd_device * device) 336 { 337 int rc; 338 339 if (device->discipline->ready_to_online) { 340 rc = device->discipline->ready_to_online(device); 341 if (rc) 342 return rc; 343 } 344 device->state = DASD_STATE_ONLINE; 345 if (device->block) 346 dasd_schedule_block_bh(device->block); 347 return 0; 348 } 349 350 /* 351 * Stop the requeueing of requests again. 352 */ 353 static int dasd_state_online_to_ready(struct dasd_device *device) 354 { 355 int rc; 356 357 if (device->discipline->online_to_ready) { 358 rc = device->discipline->online_to_ready(device); 359 if (rc) 360 return rc; 361 } 362 device->state = DASD_STATE_READY; 363 return 0; 364 } 365 366 /* 367 * Device startup state changes. 368 */ 369 static int dasd_increase_state(struct dasd_device *device) 370 { 371 int rc; 372 373 rc = 0; 374 if (device->state == DASD_STATE_NEW && 375 device->target >= DASD_STATE_KNOWN) 376 rc = dasd_state_new_to_known(device); 377 378 if (!rc && 379 device->state == DASD_STATE_KNOWN && 380 device->target >= DASD_STATE_BASIC) 381 rc = dasd_state_known_to_basic(device); 382 383 if (!rc && 384 device->state == DASD_STATE_BASIC && 385 device->target >= DASD_STATE_READY) 386 rc = dasd_state_basic_to_ready(device); 387 388 if (!rc && 389 device->state == DASD_STATE_UNFMT && 390 device->target > DASD_STATE_UNFMT) 391 rc = -EPERM; 392 393 if (!rc && 394 device->state == DASD_STATE_READY && 395 device->target >= DASD_STATE_ONLINE) 396 rc = dasd_state_ready_to_online(device); 397 398 return rc; 399 } 400 401 /* 402 * Device shutdown state changes. 403 */ 404 static int dasd_decrease_state(struct dasd_device *device) 405 { 406 int rc; 407 408 rc = 0; 409 if (device->state == DASD_STATE_ONLINE && 410 device->target <= DASD_STATE_READY) 411 rc = dasd_state_online_to_ready(device); 412 413 if (!rc && 414 device->state == DASD_STATE_READY && 415 device->target <= DASD_STATE_BASIC) 416 rc = dasd_state_ready_to_basic(device); 417 418 if (!rc && 419 device->state == DASD_STATE_UNFMT && 420 device->target <= DASD_STATE_BASIC) 421 rc = dasd_state_unfmt_to_basic(device); 422 423 if (!rc && 424 device->state == DASD_STATE_BASIC && 425 device->target <= DASD_STATE_KNOWN) 426 rc = dasd_state_basic_to_known(device); 427 428 if (!rc && 429 device->state == DASD_STATE_KNOWN && 430 device->target <= DASD_STATE_NEW) 431 rc = dasd_state_known_to_new(device); 432 433 return rc; 434 } 435 436 /* 437 * This is the main startup/shutdown routine. 438 */ 439 static void dasd_change_state(struct dasd_device *device) 440 { 441 int rc; 442 443 if (device->state == device->target) 444 /* Already where we want to go today... */ 445 return; 446 if (device->state < device->target) 447 rc = dasd_increase_state(device); 448 else 449 rc = dasd_decrease_state(device); 450 if (rc && rc != -EAGAIN) 451 device->target = device->state; 452 453 if (device->state == device->target) 454 wake_up(&dasd_init_waitq); 455 456 /* let user-space know that the device status changed */ 457 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 458 } 459 460 /* 461 * Kick starter for devices that did not complete the startup/shutdown 462 * procedure or were sleeping because of a pending state. 463 * dasd_kick_device will schedule a call do do_kick_device to the kernel 464 * event daemon. 465 */ 466 static void do_kick_device(struct work_struct *work) 467 { 468 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 469 dasd_change_state(device); 470 dasd_schedule_device_bh(device); 471 dasd_put_device(device); 472 } 473 474 void dasd_kick_device(struct dasd_device *device) 475 { 476 dasd_get_device(device); 477 /* queue call to dasd_kick_device to the kernel event daemon. */ 478 schedule_work(&device->kick_work); 479 } 480 481 /* 482 * Set the target state for a device and starts the state change. 483 */ 484 void dasd_set_target_state(struct dasd_device *device, int target) 485 { 486 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 487 if (dasd_probeonly && target > DASD_STATE_READY) 488 target = DASD_STATE_READY; 489 if (device->target != target) { 490 if (device->state == target) 491 wake_up(&dasd_init_waitq); 492 device->target = target; 493 } 494 if (device->state != device->target) 495 dasd_change_state(device); 496 } 497 498 /* 499 * Enable devices with device numbers in [from..to]. 500 */ 501 static inline int _wait_for_device(struct dasd_device *device) 502 { 503 return (device->state == device->target); 504 } 505 506 void dasd_enable_device(struct dasd_device *device) 507 { 508 dasd_set_target_state(device, DASD_STATE_ONLINE); 509 if (device->state <= DASD_STATE_KNOWN) 510 /* No discipline for device found. */ 511 dasd_set_target_state(device, DASD_STATE_NEW); 512 /* Now wait for the devices to come up. */ 513 wait_event(dasd_init_waitq, _wait_for_device(device)); 514 } 515 516 /* 517 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 518 */ 519 #ifdef CONFIG_DASD_PROFILE 520 521 struct dasd_profile_info_t dasd_global_profile; 522 unsigned int dasd_profile_level = DASD_PROFILE_OFF; 523 524 /* 525 * Increments counter in global and local profiling structures. 526 */ 527 #define dasd_profile_counter(value, counter, block) \ 528 { \ 529 int index; \ 530 for (index = 0; index < 31 && value >> (2+index); index++); \ 531 dasd_global_profile.counter[index]++; \ 532 block->profile.counter[index]++; \ 533 } 534 535 /* 536 * Add profiling information for cqr before execution. 537 */ 538 static void dasd_profile_start(struct dasd_block *block, 539 struct dasd_ccw_req *cqr, 540 struct request *req) 541 { 542 struct list_head *l; 543 unsigned int counter; 544 545 if (dasd_profile_level != DASD_PROFILE_ON) 546 return; 547 548 /* count the length of the chanq for statistics */ 549 counter = 0; 550 list_for_each(l, &block->ccw_queue) 551 if (++counter >= 31) 552 break; 553 dasd_global_profile.dasd_io_nr_req[counter]++; 554 block->profile.dasd_io_nr_req[counter]++; 555 } 556 557 /* 558 * Add profiling information for cqr after execution. 559 */ 560 static void dasd_profile_end(struct dasd_block *block, 561 struct dasd_ccw_req *cqr, 562 struct request *req) 563 { 564 long strtime, irqtime, endtime, tottime; /* in microseconds */ 565 long tottimeps, sectors; 566 567 if (dasd_profile_level != DASD_PROFILE_ON) 568 return; 569 570 sectors = req->nr_sectors; 571 if (!cqr->buildclk || !cqr->startclk || 572 !cqr->stopclk || !cqr->endclk || 573 !sectors) 574 return; 575 576 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 577 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 578 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 579 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 580 tottimeps = tottime / sectors; 581 582 if (!dasd_global_profile.dasd_io_reqs) 583 memset(&dasd_global_profile, 0, 584 sizeof(struct dasd_profile_info_t)); 585 dasd_global_profile.dasd_io_reqs++; 586 dasd_global_profile.dasd_io_sects += sectors; 587 588 if (!block->profile.dasd_io_reqs) 589 memset(&block->profile, 0, 590 sizeof(struct dasd_profile_info_t)); 591 block->profile.dasd_io_reqs++; 592 block->profile.dasd_io_sects += sectors; 593 594 dasd_profile_counter(sectors, dasd_io_secs, block); 595 dasd_profile_counter(tottime, dasd_io_times, block); 596 dasd_profile_counter(tottimeps, dasd_io_timps, block); 597 dasd_profile_counter(strtime, dasd_io_time1, block); 598 dasd_profile_counter(irqtime, dasd_io_time2, block); 599 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, block); 600 dasd_profile_counter(endtime, dasd_io_time3, block); 601 } 602 #else 603 #define dasd_profile_start(block, cqr, req) do {} while (0) 604 #define dasd_profile_end(block, cqr, req) do {} while (0) 605 #endif /* CONFIG_DASD_PROFILE */ 606 607 /* 608 * Allocate memory for a channel program with 'cplength' channel 609 * command words and 'datasize' additional space. There are two 610 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 611 * memory and 2) dasd_smalloc_request uses the static ccw memory 612 * that gets allocated for each device. 613 */ 614 struct dasd_ccw_req *dasd_kmalloc_request(char *magic, int cplength, 615 int datasize, 616 struct dasd_device *device) 617 { 618 struct dasd_ccw_req *cqr; 619 620 /* Sanity checks */ 621 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 622 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 623 624 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 625 if (cqr == NULL) 626 return ERR_PTR(-ENOMEM); 627 cqr->cpaddr = NULL; 628 if (cplength > 0) { 629 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 630 GFP_ATOMIC | GFP_DMA); 631 if (cqr->cpaddr == NULL) { 632 kfree(cqr); 633 return ERR_PTR(-ENOMEM); 634 } 635 } 636 cqr->data = NULL; 637 if (datasize > 0) { 638 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 639 if (cqr->data == NULL) { 640 kfree(cqr->cpaddr); 641 kfree(cqr); 642 return ERR_PTR(-ENOMEM); 643 } 644 } 645 strncpy((char *) &cqr->magic, magic, 4); 646 ASCEBC((char *) &cqr->magic, 4); 647 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 648 dasd_get_device(device); 649 return cqr; 650 } 651 652 struct dasd_ccw_req *dasd_smalloc_request(char *magic, int cplength, 653 int datasize, 654 struct dasd_device *device) 655 { 656 unsigned long flags; 657 struct dasd_ccw_req *cqr; 658 char *data; 659 int size; 660 661 /* Sanity checks */ 662 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 663 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 664 665 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 666 if (cplength > 0) 667 size += cplength * sizeof(struct ccw1); 668 if (datasize > 0) 669 size += datasize; 670 spin_lock_irqsave(&device->mem_lock, flags); 671 cqr = (struct dasd_ccw_req *) 672 dasd_alloc_chunk(&device->ccw_chunks, size); 673 spin_unlock_irqrestore(&device->mem_lock, flags); 674 if (cqr == NULL) 675 return ERR_PTR(-ENOMEM); 676 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 677 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 678 cqr->cpaddr = NULL; 679 if (cplength > 0) { 680 cqr->cpaddr = (struct ccw1 *) data; 681 data += cplength*sizeof(struct ccw1); 682 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 683 } 684 cqr->data = NULL; 685 if (datasize > 0) { 686 cqr->data = data; 687 memset(cqr->data, 0, datasize); 688 } 689 strncpy((char *) &cqr->magic, magic, 4); 690 ASCEBC((char *) &cqr->magic, 4); 691 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 692 dasd_get_device(device); 693 return cqr; 694 } 695 696 /* 697 * Free memory of a channel program. This function needs to free all the 698 * idal lists that might have been created by dasd_set_cda and the 699 * struct dasd_ccw_req itself. 700 */ 701 void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 702 { 703 #ifdef CONFIG_64BIT 704 struct ccw1 *ccw; 705 706 /* Clear any idals used for the request. */ 707 ccw = cqr->cpaddr; 708 do { 709 clear_normalized_cda(ccw); 710 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 711 #endif 712 kfree(cqr->cpaddr); 713 kfree(cqr->data); 714 kfree(cqr); 715 dasd_put_device(device); 716 } 717 718 void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 719 { 720 unsigned long flags; 721 722 spin_lock_irqsave(&device->mem_lock, flags); 723 dasd_free_chunk(&device->ccw_chunks, cqr); 724 spin_unlock_irqrestore(&device->mem_lock, flags); 725 dasd_put_device(device); 726 } 727 728 /* 729 * Check discipline magic in cqr. 730 */ 731 static inline int dasd_check_cqr(struct dasd_ccw_req *cqr) 732 { 733 struct dasd_device *device; 734 735 if (cqr == NULL) 736 return -EINVAL; 737 device = cqr->startdev; 738 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 739 DEV_MESSAGE(KERN_WARNING, device, 740 " dasd_ccw_req 0x%08x magic doesn't match" 741 " discipline 0x%08x", 742 cqr->magic, 743 *(unsigned int *) device->discipline->name); 744 return -EINVAL; 745 } 746 return 0; 747 } 748 749 /* 750 * Terminate the current i/o and set the request to clear_pending. 751 * Timer keeps device runnig. 752 * ccw_device_clear can fail if the i/o subsystem 753 * is in a bad mood. 754 */ 755 int dasd_term_IO(struct dasd_ccw_req *cqr) 756 { 757 struct dasd_device *device; 758 int retries, rc; 759 760 /* Check the cqr */ 761 rc = dasd_check_cqr(cqr); 762 if (rc) 763 return rc; 764 retries = 0; 765 device = (struct dasd_device *) cqr->startdev; 766 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 767 rc = ccw_device_clear(device->cdev, (long) cqr); 768 switch (rc) { 769 case 0: /* termination successful */ 770 cqr->retries--; 771 cqr->status = DASD_CQR_CLEAR_PENDING; 772 cqr->stopclk = get_clock(); 773 cqr->starttime = 0; 774 DBF_DEV_EVENT(DBF_DEBUG, device, 775 "terminate cqr %p successful", 776 cqr); 777 break; 778 case -ENODEV: 779 DBF_DEV_EVENT(DBF_ERR, device, "%s", 780 "device gone, retry"); 781 break; 782 case -EIO: 783 DBF_DEV_EVENT(DBF_ERR, device, "%s", 784 "I/O error, retry"); 785 break; 786 case -EINVAL: 787 case -EBUSY: 788 DBF_DEV_EVENT(DBF_ERR, device, "%s", 789 "device busy, retry later"); 790 break; 791 default: 792 DEV_MESSAGE(KERN_ERR, device, 793 "line %d unknown RC=%d, please " 794 "report to linux390@de.ibm.com", 795 __LINE__, rc); 796 BUG(); 797 break; 798 } 799 retries++; 800 } 801 dasd_schedule_device_bh(device); 802 return rc; 803 } 804 805 /* 806 * Start the i/o. This start_IO can fail if the channel is really busy. 807 * In that case set up a timer to start the request later. 808 */ 809 int dasd_start_IO(struct dasd_ccw_req *cqr) 810 { 811 struct dasd_device *device; 812 int rc; 813 814 /* Check the cqr */ 815 rc = dasd_check_cqr(cqr); 816 if (rc) 817 return rc; 818 device = (struct dasd_device *) cqr->startdev; 819 if (cqr->retries < 0) { 820 DEV_MESSAGE(KERN_DEBUG, device, 821 "start_IO: request %p (%02x/%i) - no retry left.", 822 cqr, cqr->status, cqr->retries); 823 cqr->status = DASD_CQR_ERROR; 824 return -EIO; 825 } 826 cqr->startclk = get_clock(); 827 cqr->starttime = jiffies; 828 cqr->retries--; 829 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr, 830 cqr->lpm, 0); 831 switch (rc) { 832 case 0: 833 cqr->status = DASD_CQR_IN_IO; 834 DBF_DEV_EVENT(DBF_DEBUG, device, 835 "start_IO: request %p started successful", 836 cqr); 837 break; 838 case -EBUSY: 839 DBF_DEV_EVENT(DBF_ERR, device, "%s", 840 "start_IO: device busy, retry later"); 841 break; 842 case -ETIMEDOUT: 843 DBF_DEV_EVENT(DBF_ERR, device, "%s", 844 "start_IO: request timeout, retry later"); 845 break; 846 case -EACCES: 847 /* -EACCES indicates that the request used only a 848 * subset of the available pathes and all these 849 * pathes are gone. 850 * Do a retry with all available pathes. 851 */ 852 cqr->lpm = LPM_ANYPATH; 853 DBF_DEV_EVENT(DBF_ERR, device, "%s", 854 "start_IO: selected pathes gone," 855 " retry on all pathes"); 856 break; 857 case -ENODEV: 858 case -EIO: 859 DBF_DEV_EVENT(DBF_ERR, device, "%s", 860 "start_IO: device gone, retry"); 861 break; 862 default: 863 DEV_MESSAGE(KERN_ERR, device, 864 "line %d unknown RC=%d, please report" 865 " to linux390@de.ibm.com", __LINE__, rc); 866 BUG(); 867 break; 868 } 869 return rc; 870 } 871 872 /* 873 * Timeout function for dasd devices. This is used for different purposes 874 * 1) missing interrupt handler for normal operation 875 * 2) delayed start of request where start_IO failed with -EBUSY 876 * 3) timeout for missing state change interrupts 877 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 878 * DASD_CQR_QUEUED for 2) and 3). 879 */ 880 static void dasd_device_timeout(unsigned long ptr) 881 { 882 unsigned long flags; 883 struct dasd_device *device; 884 885 device = (struct dasd_device *) ptr; 886 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 887 /* re-activate request queue */ 888 device->stopped &= ~DASD_STOPPED_PENDING; 889 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 890 dasd_schedule_device_bh(device); 891 } 892 893 /* 894 * Setup timeout for a device in jiffies. 895 */ 896 void dasd_device_set_timer(struct dasd_device *device, int expires) 897 { 898 if (expires == 0) { 899 if (timer_pending(&device->timer)) 900 del_timer(&device->timer); 901 return; 902 } 903 if (timer_pending(&device->timer)) { 904 if (mod_timer(&device->timer, jiffies + expires)) 905 return; 906 } 907 device->timer.function = dasd_device_timeout; 908 device->timer.data = (unsigned long) device; 909 device->timer.expires = jiffies + expires; 910 add_timer(&device->timer); 911 } 912 913 /* 914 * Clear timeout for a device. 915 */ 916 void dasd_device_clear_timer(struct dasd_device *device) 917 { 918 if (timer_pending(&device->timer)) 919 del_timer(&device->timer); 920 } 921 922 static void dasd_handle_killed_request(struct ccw_device *cdev, 923 unsigned long intparm) 924 { 925 struct dasd_ccw_req *cqr; 926 struct dasd_device *device; 927 928 cqr = (struct dasd_ccw_req *) intparm; 929 if (cqr->status != DASD_CQR_IN_IO) { 930 MESSAGE(KERN_DEBUG, 931 "invalid status in handle_killed_request: " 932 "bus_id %s, status %02x", 933 cdev->dev.bus_id, cqr->status); 934 return; 935 } 936 937 device = (struct dasd_device *) cqr->startdev; 938 if (device == NULL || 939 device != dasd_device_from_cdev_locked(cdev) || 940 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 941 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 942 cdev->dev.bus_id); 943 return; 944 } 945 946 /* Schedule request to be retried. */ 947 cqr->status = DASD_CQR_QUEUED; 948 949 dasd_device_clear_timer(device); 950 dasd_schedule_device_bh(device); 951 dasd_put_device(device); 952 } 953 954 void dasd_generic_handle_state_change(struct dasd_device *device) 955 { 956 /* First of all start sense subsystem status request. */ 957 dasd_eer_snss(device); 958 959 device->stopped &= ~DASD_STOPPED_PENDING; 960 dasd_schedule_device_bh(device); 961 if (device->block) 962 dasd_schedule_block_bh(device->block); 963 } 964 965 /* 966 * Interrupt handler for "normal" ssch-io based dasd devices. 967 */ 968 void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 969 struct irb *irb) 970 { 971 struct dasd_ccw_req *cqr, *next; 972 struct dasd_device *device; 973 unsigned long long now; 974 int expires; 975 976 if (IS_ERR(irb)) { 977 switch (PTR_ERR(irb)) { 978 case -EIO: 979 dasd_handle_killed_request(cdev, intparm); 980 break; 981 case -ETIMEDOUT: 982 printk(KERN_WARNING"%s(%s): request timed out\n", 983 __func__, cdev->dev.bus_id); 984 //FIXME - dasd uses own timeout interface... 985 break; 986 default: 987 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 988 __func__, cdev->dev.bus_id, PTR_ERR(irb)); 989 } 990 return; 991 } 992 993 now = get_clock(); 994 995 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 996 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 997 (unsigned int) intparm); 998 999 /* check for unsolicited interrupts */ 1000 cqr = (struct dasd_ccw_req *) intparm; 1001 if (!cqr || ((irb->scsw.cc == 1) && 1002 (irb->scsw.fctl & SCSW_FCTL_START_FUNC) && 1003 (irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) ) { 1004 if (cqr && cqr->status == DASD_CQR_IN_IO) 1005 cqr->status = DASD_CQR_QUEUED; 1006 device = dasd_device_from_cdev_locked(cdev); 1007 if (!IS_ERR(device)) { 1008 dasd_device_clear_timer(device); 1009 device->discipline->handle_unsolicited_interrupt(device, 1010 irb); 1011 dasd_put_device(device); 1012 } 1013 return; 1014 } 1015 1016 device = (struct dasd_device *) cqr->startdev; 1017 if (!device || 1018 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 1019 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 1020 cdev->dev.bus_id); 1021 return; 1022 } 1023 1024 /* Check for clear pending */ 1025 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1026 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1027 cqr->status = DASD_CQR_CLEARED; 1028 dasd_device_clear_timer(device); 1029 wake_up(&dasd_flush_wq); 1030 dasd_schedule_device_bh(device); 1031 return; 1032 } 1033 1034 /* check status - the request might have been killed by dyn detach */ 1035 if (cqr->status != DASD_CQR_IN_IO) { 1036 MESSAGE(KERN_DEBUG, 1037 "invalid status: bus_id %s, status %02x", 1038 cdev->dev.bus_id, cqr->status); 1039 return; 1040 } 1041 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1042 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1043 next = NULL; 1044 expires = 0; 1045 if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1046 irb->scsw.cstat == 0 && !irb->esw.esw0.erw.cons) { 1047 /* request was completed successfully */ 1048 cqr->status = DASD_CQR_SUCCESS; 1049 cqr->stopclk = now; 1050 /* Start first request on queue if possible -> fast_io. */ 1051 if (cqr->devlist.next != &device->ccw_queue) { 1052 next = list_entry(cqr->devlist.next, 1053 struct dasd_ccw_req, devlist); 1054 } 1055 } else { /* error */ 1056 memcpy(&cqr->irb, irb, sizeof(struct irb)); 1057 if (device->features & DASD_FEATURE_ERPLOG) { 1058 dasd_log_sense(cqr, irb); 1059 } 1060 /* 1061 * If we don't want complex ERP for this request, then just 1062 * reset this and retry it in the fastpath 1063 */ 1064 if (!test_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags) && 1065 cqr->retries > 0) { 1066 DEV_MESSAGE(KERN_DEBUG, device, 1067 "default ERP in fastpath (%i retries left)", 1068 cqr->retries); 1069 cqr->lpm = LPM_ANYPATH; 1070 cqr->status = DASD_CQR_QUEUED; 1071 next = cqr; 1072 } else 1073 cqr->status = DASD_CQR_ERROR; 1074 } 1075 if (next && (next->status == DASD_CQR_QUEUED) && 1076 (!device->stopped)) { 1077 if (device->discipline->start_IO(next) == 0) 1078 expires = next->expires; 1079 else 1080 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1081 "Interrupt fastpath " 1082 "failed!"); 1083 } 1084 if (expires != 0) 1085 dasd_device_set_timer(device, expires); 1086 else 1087 dasd_device_clear_timer(device); 1088 dasd_schedule_device_bh(device); 1089 } 1090 1091 /* 1092 * If we have an error on a dasd_block layer request then we cancel 1093 * and return all further requests from the same dasd_block as well. 1094 */ 1095 static void __dasd_device_recovery(struct dasd_device *device, 1096 struct dasd_ccw_req *ref_cqr) 1097 { 1098 struct list_head *l, *n; 1099 struct dasd_ccw_req *cqr; 1100 1101 /* 1102 * only requeue request that came from the dasd_block layer 1103 */ 1104 if (!ref_cqr->block) 1105 return; 1106 1107 list_for_each_safe(l, n, &device->ccw_queue) { 1108 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1109 if (cqr->status == DASD_CQR_QUEUED && 1110 ref_cqr->block == cqr->block) { 1111 cqr->status = DASD_CQR_CLEARED; 1112 } 1113 } 1114 }; 1115 1116 /* 1117 * Remove those ccw requests from the queue that need to be returned 1118 * to the upper layer. 1119 */ 1120 static void __dasd_device_process_ccw_queue(struct dasd_device *device, 1121 struct list_head *final_queue) 1122 { 1123 struct list_head *l, *n; 1124 struct dasd_ccw_req *cqr; 1125 1126 /* Process request with final status. */ 1127 list_for_each_safe(l, n, &device->ccw_queue) { 1128 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1129 1130 /* Stop list processing at the first non-final request. */ 1131 if (cqr->status == DASD_CQR_QUEUED || 1132 cqr->status == DASD_CQR_IN_IO || 1133 cqr->status == DASD_CQR_CLEAR_PENDING) 1134 break; 1135 if (cqr->status == DASD_CQR_ERROR) { 1136 __dasd_device_recovery(device, cqr); 1137 } 1138 /* Rechain finished requests to final queue */ 1139 list_move_tail(&cqr->devlist, final_queue); 1140 } 1141 } 1142 1143 /* 1144 * the cqrs from the final queue are returned to the upper layer 1145 * by setting a dasd_block state and calling the callback function 1146 */ 1147 static void __dasd_device_process_final_queue(struct dasd_device *device, 1148 struct list_head *final_queue) 1149 { 1150 struct list_head *l, *n; 1151 struct dasd_ccw_req *cqr; 1152 struct dasd_block *block; 1153 1154 list_for_each_safe(l, n, final_queue) { 1155 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1156 list_del_init(&cqr->devlist); 1157 block = cqr->block; 1158 if (block) 1159 spin_lock_bh(&block->queue_lock); 1160 switch (cqr->status) { 1161 case DASD_CQR_SUCCESS: 1162 cqr->status = DASD_CQR_DONE; 1163 break; 1164 case DASD_CQR_ERROR: 1165 cqr->status = DASD_CQR_NEED_ERP; 1166 break; 1167 case DASD_CQR_CLEARED: 1168 cqr->status = DASD_CQR_TERMINATED; 1169 break; 1170 default: 1171 DEV_MESSAGE(KERN_ERR, device, 1172 "wrong cqr status in __dasd_process_final_queue " 1173 "for cqr %p, status %x", 1174 cqr, cqr->status); 1175 BUG(); 1176 } 1177 if (cqr->callback != NULL) 1178 (cqr->callback)(cqr, cqr->callback_data); 1179 if (block) 1180 spin_unlock_bh(&block->queue_lock); 1181 } 1182 } 1183 1184 /* 1185 * Take a look at the first request on the ccw queue and check 1186 * if it reached its expire time. If so, terminate the IO. 1187 */ 1188 static void __dasd_device_check_expire(struct dasd_device *device) 1189 { 1190 struct dasd_ccw_req *cqr; 1191 1192 if (list_empty(&device->ccw_queue)) 1193 return; 1194 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1195 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1196 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1197 if (device->discipline->term_IO(cqr) != 0) { 1198 /* Hmpf, try again in 5 sec */ 1199 DEV_MESSAGE(KERN_ERR, device, 1200 "internal error - timeout (%is) expired " 1201 "for cqr %p, termination failed, " 1202 "retrying in 5s", 1203 (cqr->expires/HZ), cqr); 1204 cqr->expires += 5*HZ; 1205 dasd_device_set_timer(device, 5*HZ); 1206 } else { 1207 DEV_MESSAGE(KERN_ERR, device, 1208 "internal error - timeout (%is) expired " 1209 "for cqr %p (%i retries left)", 1210 (cqr->expires/HZ), cqr, cqr->retries); 1211 } 1212 } 1213 } 1214 1215 /* 1216 * Take a look at the first request on the ccw queue and check 1217 * if it needs to be started. 1218 */ 1219 static void __dasd_device_start_head(struct dasd_device *device) 1220 { 1221 struct dasd_ccw_req *cqr; 1222 int rc; 1223 1224 if (list_empty(&device->ccw_queue)) 1225 return; 1226 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1227 if (cqr->status != DASD_CQR_QUEUED) 1228 return; 1229 /* when device is stopped, return request to previous layer */ 1230 if (device->stopped) { 1231 cqr->status = DASD_CQR_CLEARED; 1232 dasd_schedule_device_bh(device); 1233 return; 1234 } 1235 1236 rc = device->discipline->start_IO(cqr); 1237 if (rc == 0) 1238 dasd_device_set_timer(device, cqr->expires); 1239 else if (rc == -EACCES) { 1240 dasd_schedule_device_bh(device); 1241 } else 1242 /* Hmpf, try again in 1/2 sec */ 1243 dasd_device_set_timer(device, 50); 1244 } 1245 1246 /* 1247 * Go through all request on the dasd_device request queue, 1248 * terminate them on the cdev if necessary, and return them to the 1249 * submitting layer via callback. 1250 * Note: 1251 * Make sure that all 'submitting layers' still exist when 1252 * this function is called!. In other words, when 'device' is a base 1253 * device then all block layer requests must have been removed before 1254 * via dasd_flush_block_queue. 1255 */ 1256 int dasd_flush_device_queue(struct dasd_device *device) 1257 { 1258 struct dasd_ccw_req *cqr, *n; 1259 int rc; 1260 struct list_head flush_queue; 1261 1262 INIT_LIST_HEAD(&flush_queue); 1263 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1264 rc = 0; 1265 list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { 1266 /* Check status and move request to flush_queue */ 1267 switch (cqr->status) { 1268 case DASD_CQR_IN_IO: 1269 rc = device->discipline->term_IO(cqr); 1270 if (rc) { 1271 /* unable to terminate requeust */ 1272 DEV_MESSAGE(KERN_ERR, device, 1273 "dasd flush ccw_queue is unable " 1274 " to terminate request %p", 1275 cqr); 1276 /* stop flush processing */ 1277 goto finished; 1278 } 1279 break; 1280 case DASD_CQR_QUEUED: 1281 cqr->stopclk = get_clock(); 1282 cqr->status = DASD_CQR_CLEARED; 1283 break; 1284 default: /* no need to modify the others */ 1285 break; 1286 } 1287 list_move_tail(&cqr->devlist, &flush_queue); 1288 } 1289 finished: 1290 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1291 /* 1292 * After this point all requests must be in state CLEAR_PENDING, 1293 * CLEARED, SUCCESS or ERROR. Now wait for CLEAR_PENDING to become 1294 * one of the others. 1295 */ 1296 list_for_each_entry_safe(cqr, n, &flush_queue, devlist) 1297 wait_event(dasd_flush_wq, 1298 (cqr->status != DASD_CQR_CLEAR_PENDING)); 1299 /* 1300 * Now set each request back to TERMINATED, DONE or NEED_ERP 1301 * and call the callback function of flushed requests 1302 */ 1303 __dasd_device_process_final_queue(device, &flush_queue); 1304 return rc; 1305 } 1306 1307 /* 1308 * Acquire the device lock and process queues for the device. 1309 */ 1310 static void dasd_device_tasklet(struct dasd_device *device) 1311 { 1312 struct list_head final_queue; 1313 1314 atomic_set (&device->tasklet_scheduled, 0); 1315 INIT_LIST_HEAD(&final_queue); 1316 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1317 /* Check expire time of first request on the ccw queue. */ 1318 __dasd_device_check_expire(device); 1319 /* find final requests on ccw queue */ 1320 __dasd_device_process_ccw_queue(device, &final_queue); 1321 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1322 /* Now call the callback function of requests with final status */ 1323 __dasd_device_process_final_queue(device, &final_queue); 1324 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1325 /* Now check if the head of the ccw queue needs to be started. */ 1326 __dasd_device_start_head(device); 1327 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1328 dasd_put_device(device); 1329 } 1330 1331 /* 1332 * Schedules a call to dasd_tasklet over the device tasklet. 1333 */ 1334 void dasd_schedule_device_bh(struct dasd_device *device) 1335 { 1336 /* Protect against rescheduling. */ 1337 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1338 return; 1339 dasd_get_device(device); 1340 tasklet_hi_schedule(&device->tasklet); 1341 } 1342 1343 /* 1344 * Queue a request to the head of the device ccw_queue. 1345 * Start the I/O if possible. 1346 */ 1347 void dasd_add_request_head(struct dasd_ccw_req *cqr) 1348 { 1349 struct dasd_device *device; 1350 unsigned long flags; 1351 1352 device = cqr->startdev; 1353 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1354 cqr->status = DASD_CQR_QUEUED; 1355 list_add(&cqr->devlist, &device->ccw_queue); 1356 /* let the bh start the request to keep them in order */ 1357 dasd_schedule_device_bh(device); 1358 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1359 } 1360 1361 /* 1362 * Queue a request to the tail of the device ccw_queue. 1363 * Start the I/O if possible. 1364 */ 1365 void dasd_add_request_tail(struct dasd_ccw_req *cqr) 1366 { 1367 struct dasd_device *device; 1368 unsigned long flags; 1369 1370 device = cqr->startdev; 1371 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1372 cqr->status = DASD_CQR_QUEUED; 1373 list_add_tail(&cqr->devlist, &device->ccw_queue); 1374 /* let the bh start the request to keep them in order */ 1375 dasd_schedule_device_bh(device); 1376 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1377 } 1378 1379 /* 1380 * Wakeup helper for the 'sleep_on' functions. 1381 */ 1382 static void dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1383 { 1384 wake_up((wait_queue_head_t *) data); 1385 } 1386 1387 static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) 1388 { 1389 struct dasd_device *device; 1390 int rc; 1391 1392 device = cqr->startdev; 1393 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1394 rc = ((cqr->status == DASD_CQR_DONE || 1395 cqr->status == DASD_CQR_NEED_ERP || 1396 cqr->status == DASD_CQR_TERMINATED) && 1397 list_empty(&cqr->devlist)); 1398 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1399 return rc; 1400 } 1401 1402 /* 1403 * Queue a request to the tail of the device ccw_queue and wait for 1404 * it's completion. 1405 */ 1406 int dasd_sleep_on(struct dasd_ccw_req *cqr) 1407 { 1408 wait_queue_head_t wait_q; 1409 struct dasd_device *device; 1410 int rc; 1411 1412 device = cqr->startdev; 1413 1414 init_waitqueue_head (&wait_q); 1415 cqr->callback = dasd_wakeup_cb; 1416 cqr->callback_data = (void *) &wait_q; 1417 dasd_add_request_tail(cqr); 1418 wait_event(wait_q, _wait_for_wakeup(cqr)); 1419 1420 /* Request status is either done or failed. */ 1421 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1422 return rc; 1423 } 1424 1425 /* 1426 * Queue a request to the tail of the device ccw_queue and wait 1427 * interruptible for it's completion. 1428 */ 1429 int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1430 { 1431 wait_queue_head_t wait_q; 1432 struct dasd_device *device; 1433 int rc; 1434 1435 device = cqr->startdev; 1436 init_waitqueue_head (&wait_q); 1437 cqr->callback = dasd_wakeup_cb; 1438 cqr->callback_data = (void *) &wait_q; 1439 dasd_add_request_tail(cqr); 1440 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); 1441 if (rc == -ERESTARTSYS) { 1442 dasd_cancel_req(cqr); 1443 /* wait (non-interruptible) for final status */ 1444 wait_event(wait_q, _wait_for_wakeup(cqr)); 1445 } 1446 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1447 return rc; 1448 } 1449 1450 /* 1451 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1452 * for eckd devices) the currently running request has to be terminated 1453 * and be put back to status queued, before the special request is added 1454 * to the head of the queue. Then the special request is waited on normally. 1455 */ 1456 static inline int _dasd_term_running_cqr(struct dasd_device *device) 1457 { 1458 struct dasd_ccw_req *cqr; 1459 1460 if (list_empty(&device->ccw_queue)) 1461 return 0; 1462 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, devlist); 1463 return device->discipline->term_IO(cqr); 1464 } 1465 1466 int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1467 { 1468 wait_queue_head_t wait_q; 1469 struct dasd_device *device; 1470 int rc; 1471 1472 device = cqr->startdev; 1473 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1474 rc = _dasd_term_running_cqr(device); 1475 if (rc) { 1476 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1477 return rc; 1478 } 1479 1480 init_waitqueue_head (&wait_q); 1481 cqr->callback = dasd_wakeup_cb; 1482 cqr->callback_data = (void *) &wait_q; 1483 cqr->status = DASD_CQR_QUEUED; 1484 list_add(&cqr->devlist, &device->ccw_queue); 1485 1486 /* let the bh start the request to keep them in order */ 1487 dasd_schedule_device_bh(device); 1488 1489 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1490 1491 wait_event(wait_q, _wait_for_wakeup(cqr)); 1492 1493 /* Request status is either done or failed. */ 1494 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1495 return rc; 1496 } 1497 1498 /* 1499 * Cancels a request that was started with dasd_sleep_on_req. 1500 * This is useful to timeout requests. The request will be 1501 * terminated if it is currently in i/o. 1502 * Returns 1 if the request has been terminated. 1503 * 0 if there was no need to terminate the request (not started yet) 1504 * negative error code if termination failed 1505 * Cancellation of a request is an asynchronous operation! The calling 1506 * function has to wait until the request is properly returned via callback. 1507 */ 1508 int dasd_cancel_req(struct dasd_ccw_req *cqr) 1509 { 1510 struct dasd_device *device = cqr->startdev; 1511 unsigned long flags; 1512 int rc; 1513 1514 rc = 0; 1515 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1516 switch (cqr->status) { 1517 case DASD_CQR_QUEUED: 1518 /* request was not started - just set to cleared */ 1519 cqr->status = DASD_CQR_CLEARED; 1520 break; 1521 case DASD_CQR_IN_IO: 1522 /* request in IO - terminate IO and release again */ 1523 rc = device->discipline->term_IO(cqr); 1524 if (rc) { 1525 DEV_MESSAGE(KERN_ERR, device, 1526 "dasd_cancel_req is unable " 1527 " to terminate request %p, rc = %d", 1528 cqr, rc); 1529 } else { 1530 cqr->stopclk = get_clock(); 1531 rc = 1; 1532 } 1533 break; 1534 default: /* already finished or clear pending - do nothing */ 1535 break; 1536 } 1537 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1538 dasd_schedule_device_bh(device); 1539 return rc; 1540 } 1541 1542 1543 /* 1544 * SECTION: Operations of the dasd_block layer. 1545 */ 1546 1547 /* 1548 * Timeout function for dasd_block. This is used when the block layer 1549 * is waiting for something that may not come reliably, (e.g. a state 1550 * change interrupt) 1551 */ 1552 static void dasd_block_timeout(unsigned long ptr) 1553 { 1554 unsigned long flags; 1555 struct dasd_block *block; 1556 1557 block = (struct dasd_block *) ptr; 1558 spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags); 1559 /* re-activate request queue */ 1560 block->base->stopped &= ~DASD_STOPPED_PENDING; 1561 spin_unlock_irqrestore(get_ccwdev_lock(block->base->cdev), flags); 1562 dasd_schedule_block_bh(block); 1563 } 1564 1565 /* 1566 * Setup timeout for a dasd_block in jiffies. 1567 */ 1568 void dasd_block_set_timer(struct dasd_block *block, int expires) 1569 { 1570 if (expires == 0) { 1571 if (timer_pending(&block->timer)) 1572 del_timer(&block->timer); 1573 return; 1574 } 1575 if (timer_pending(&block->timer)) { 1576 if (mod_timer(&block->timer, jiffies + expires)) 1577 return; 1578 } 1579 block->timer.function = dasd_block_timeout; 1580 block->timer.data = (unsigned long) block; 1581 block->timer.expires = jiffies + expires; 1582 add_timer(&block->timer); 1583 } 1584 1585 /* 1586 * Clear timeout for a dasd_block. 1587 */ 1588 void dasd_block_clear_timer(struct dasd_block *block) 1589 { 1590 if (timer_pending(&block->timer)) 1591 del_timer(&block->timer); 1592 } 1593 1594 /* 1595 * posts the buffer_cache about a finalized request 1596 */ 1597 static inline void dasd_end_request(struct request *req, int error) 1598 { 1599 if (__blk_end_request(req, error, blk_rq_bytes(req))) 1600 BUG(); 1601 } 1602 1603 /* 1604 * Process finished error recovery ccw. 1605 */ 1606 static inline void __dasd_block_process_erp(struct dasd_block *block, 1607 struct dasd_ccw_req *cqr) 1608 { 1609 dasd_erp_fn_t erp_fn; 1610 struct dasd_device *device = block->base; 1611 1612 if (cqr->status == DASD_CQR_DONE) 1613 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1614 else 1615 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1616 erp_fn = device->discipline->erp_postaction(cqr); 1617 erp_fn(cqr); 1618 } 1619 1620 /* 1621 * Fetch requests from the block device queue. 1622 */ 1623 static void __dasd_process_request_queue(struct dasd_block *block) 1624 { 1625 struct request_queue *queue; 1626 struct request *req; 1627 struct dasd_ccw_req *cqr; 1628 struct dasd_device *basedev; 1629 unsigned long flags; 1630 queue = block->request_queue; 1631 basedev = block->base; 1632 /* No queue ? Then there is nothing to do. */ 1633 if (queue == NULL) 1634 return; 1635 1636 /* 1637 * We requeue request from the block device queue to the ccw 1638 * queue only in two states. In state DASD_STATE_READY the 1639 * partition detection is done and we need to requeue requests 1640 * for that. State DASD_STATE_ONLINE is normal block device 1641 * operation. 1642 */ 1643 if (basedev->state < DASD_STATE_READY) 1644 return; 1645 /* Now we try to fetch requests from the request queue */ 1646 while (!blk_queue_plugged(queue) && 1647 elv_next_request(queue)) { 1648 1649 req = elv_next_request(queue); 1650 1651 if (basedev->features & DASD_FEATURE_READONLY && 1652 rq_data_dir(req) == WRITE) { 1653 DBF_DEV_EVENT(DBF_ERR, basedev, 1654 "Rejecting write request %p", 1655 req); 1656 blkdev_dequeue_request(req); 1657 dasd_end_request(req, -EIO); 1658 continue; 1659 } 1660 cqr = basedev->discipline->build_cp(basedev, block, req); 1661 if (IS_ERR(cqr)) { 1662 if (PTR_ERR(cqr) == -EBUSY) 1663 break; /* normal end condition */ 1664 if (PTR_ERR(cqr) == -ENOMEM) 1665 break; /* terminate request queue loop */ 1666 if (PTR_ERR(cqr) == -EAGAIN) { 1667 /* 1668 * The current request cannot be build right 1669 * now, we have to try later. If this request 1670 * is the head-of-queue we stop the device 1671 * for 1/2 second. 1672 */ 1673 if (!list_empty(&block->ccw_queue)) 1674 break; 1675 spin_lock_irqsave(get_ccwdev_lock(basedev->cdev), flags); 1676 basedev->stopped |= DASD_STOPPED_PENDING; 1677 spin_unlock_irqrestore(get_ccwdev_lock(basedev->cdev), flags); 1678 dasd_block_set_timer(block, HZ/2); 1679 break; 1680 } 1681 DBF_DEV_EVENT(DBF_ERR, basedev, 1682 "CCW creation failed (rc=%ld) " 1683 "on request %p", 1684 PTR_ERR(cqr), req); 1685 blkdev_dequeue_request(req); 1686 dasd_end_request(req, -EIO); 1687 continue; 1688 } 1689 /* 1690 * Note: callback is set to dasd_return_cqr_cb in 1691 * __dasd_block_start_head to cover erp requests as well 1692 */ 1693 cqr->callback_data = (void *) req; 1694 cqr->status = DASD_CQR_FILLED; 1695 blkdev_dequeue_request(req); 1696 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1697 dasd_profile_start(block, cqr, req); 1698 } 1699 } 1700 1701 static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr) 1702 { 1703 struct request *req; 1704 int status; 1705 int error = 0; 1706 1707 req = (struct request *) cqr->callback_data; 1708 dasd_profile_end(cqr->block, cqr, req); 1709 status = cqr->block->base->discipline->free_cp(cqr, req); 1710 if (status <= 0) 1711 error = status ? status : -EIO; 1712 dasd_end_request(req, error); 1713 } 1714 1715 /* 1716 * Process ccw request queue. 1717 */ 1718 static void __dasd_process_block_ccw_queue(struct dasd_block *block, 1719 struct list_head *final_queue) 1720 { 1721 struct list_head *l, *n; 1722 struct dasd_ccw_req *cqr; 1723 dasd_erp_fn_t erp_fn; 1724 unsigned long flags; 1725 struct dasd_device *base = block->base; 1726 1727 restart: 1728 /* Process request with final status. */ 1729 list_for_each_safe(l, n, &block->ccw_queue) { 1730 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1731 if (cqr->status != DASD_CQR_DONE && 1732 cqr->status != DASD_CQR_FAILED && 1733 cqr->status != DASD_CQR_NEED_ERP && 1734 cqr->status != DASD_CQR_TERMINATED) 1735 continue; 1736 1737 if (cqr->status == DASD_CQR_TERMINATED) { 1738 base->discipline->handle_terminated_request(cqr); 1739 goto restart; 1740 } 1741 1742 /* Process requests that may be recovered */ 1743 if (cqr->status == DASD_CQR_NEED_ERP) { 1744 erp_fn = base->discipline->erp_action(cqr); 1745 erp_fn(cqr); 1746 goto restart; 1747 } 1748 1749 /* First of all call extended error reporting. */ 1750 if (dasd_eer_enabled(base) && 1751 cqr->status == DASD_CQR_FAILED) { 1752 dasd_eer_write(base, cqr, DASD_EER_FATALERROR); 1753 1754 /* restart request */ 1755 cqr->status = DASD_CQR_FILLED; 1756 cqr->retries = 255; 1757 spin_lock_irqsave(get_ccwdev_lock(base->cdev), flags); 1758 base->stopped |= DASD_STOPPED_QUIESCE; 1759 spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), 1760 flags); 1761 goto restart; 1762 } 1763 1764 /* Process finished ERP request. */ 1765 if (cqr->refers) { 1766 __dasd_block_process_erp(block, cqr); 1767 goto restart; 1768 } 1769 1770 /* Rechain finished requests to final queue */ 1771 cqr->endclk = get_clock(); 1772 list_move_tail(&cqr->blocklist, final_queue); 1773 } 1774 } 1775 1776 static void dasd_return_cqr_cb(struct dasd_ccw_req *cqr, void *data) 1777 { 1778 dasd_schedule_block_bh(cqr->block); 1779 } 1780 1781 static void __dasd_block_start_head(struct dasd_block *block) 1782 { 1783 struct dasd_ccw_req *cqr; 1784 1785 if (list_empty(&block->ccw_queue)) 1786 return; 1787 /* We allways begin with the first requests on the queue, as some 1788 * of previously started requests have to be enqueued on a 1789 * dasd_device again for error recovery. 1790 */ 1791 list_for_each_entry(cqr, &block->ccw_queue, blocklist) { 1792 if (cqr->status != DASD_CQR_FILLED) 1793 continue; 1794 /* Non-temporary stop condition will trigger fail fast */ 1795 if (block->base->stopped & ~DASD_STOPPED_PENDING && 1796 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1797 (!dasd_eer_enabled(block->base))) { 1798 cqr->status = DASD_CQR_FAILED; 1799 dasd_schedule_block_bh(block); 1800 continue; 1801 } 1802 /* Don't try to start requests if device is stopped */ 1803 if (block->base->stopped) 1804 return; 1805 1806 /* just a fail safe check, should not happen */ 1807 if (!cqr->startdev) 1808 cqr->startdev = block->base; 1809 1810 /* make sure that the requests we submit find their way back */ 1811 cqr->callback = dasd_return_cqr_cb; 1812 1813 dasd_add_request_tail(cqr); 1814 } 1815 } 1816 1817 /* 1818 * Central dasd_block layer routine. Takes requests from the generic 1819 * block layer request queue, creates ccw requests, enqueues them on 1820 * a dasd_device and processes ccw requests that have been returned. 1821 */ 1822 static void dasd_block_tasklet(struct dasd_block *block) 1823 { 1824 struct list_head final_queue; 1825 struct list_head *l, *n; 1826 struct dasd_ccw_req *cqr; 1827 1828 atomic_set(&block->tasklet_scheduled, 0); 1829 INIT_LIST_HEAD(&final_queue); 1830 spin_lock(&block->queue_lock); 1831 /* Finish off requests on ccw queue */ 1832 __dasd_process_block_ccw_queue(block, &final_queue); 1833 spin_unlock(&block->queue_lock); 1834 /* Now call the callback function of requests with final status */ 1835 spin_lock_irq(&block->request_queue_lock); 1836 list_for_each_safe(l, n, &final_queue) { 1837 cqr = list_entry(l, struct dasd_ccw_req, blocklist); 1838 list_del_init(&cqr->blocklist); 1839 __dasd_cleanup_cqr(cqr); 1840 } 1841 spin_lock(&block->queue_lock); 1842 /* Get new request from the block device request queue */ 1843 __dasd_process_request_queue(block); 1844 /* Now check if the head of the ccw queue needs to be started. */ 1845 __dasd_block_start_head(block); 1846 spin_unlock(&block->queue_lock); 1847 spin_unlock_irq(&block->request_queue_lock); 1848 dasd_put_device(block->base); 1849 } 1850 1851 static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) 1852 { 1853 wake_up(&dasd_flush_wq); 1854 } 1855 1856 /* 1857 * Go through all request on the dasd_block request queue, cancel them 1858 * on the respective dasd_device, and return them to the generic 1859 * block layer. 1860 */ 1861 static int dasd_flush_block_queue(struct dasd_block *block) 1862 { 1863 struct dasd_ccw_req *cqr, *n; 1864 int rc, i; 1865 struct list_head flush_queue; 1866 1867 INIT_LIST_HEAD(&flush_queue); 1868 spin_lock_bh(&block->queue_lock); 1869 rc = 0; 1870 restart: 1871 list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { 1872 /* if this request currently owned by a dasd_device cancel it */ 1873 if (cqr->status >= DASD_CQR_QUEUED) 1874 rc = dasd_cancel_req(cqr); 1875 if (rc < 0) 1876 break; 1877 /* Rechain request (including erp chain) so it won't be 1878 * touched by the dasd_block_tasklet anymore. 1879 * Replace the callback so we notice when the request 1880 * is returned from the dasd_device layer. 1881 */ 1882 cqr->callback = _dasd_wake_block_flush_cb; 1883 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) 1884 list_move_tail(&cqr->blocklist, &flush_queue); 1885 if (i > 1) 1886 /* moved more than one request - need to restart */ 1887 goto restart; 1888 } 1889 spin_unlock_bh(&block->queue_lock); 1890 /* Now call the callback function of flushed requests */ 1891 restart_cb: 1892 list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { 1893 wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); 1894 /* Process finished ERP request. */ 1895 if (cqr->refers) { 1896 __dasd_block_process_erp(block, cqr); 1897 /* restart list_for_xx loop since dasd_process_erp 1898 * might remove multiple elements */ 1899 goto restart_cb; 1900 } 1901 /* call the callback function */ 1902 cqr->endclk = get_clock(); 1903 list_del_init(&cqr->blocklist); 1904 __dasd_cleanup_cqr(cqr); 1905 } 1906 return rc; 1907 } 1908 1909 /* 1910 * Schedules a call to dasd_tasklet over the device tasklet. 1911 */ 1912 void dasd_schedule_block_bh(struct dasd_block *block) 1913 { 1914 /* Protect against rescheduling. */ 1915 if (atomic_cmpxchg(&block->tasklet_scheduled, 0, 1) != 0) 1916 return; 1917 /* life cycle of block is bound to it's base device */ 1918 dasd_get_device(block->base); 1919 tasklet_hi_schedule(&block->tasklet); 1920 } 1921 1922 1923 /* 1924 * SECTION: external block device operations 1925 * (request queue handling, open, release, etc.) 1926 */ 1927 1928 /* 1929 * Dasd request queue function. Called from ll_rw_blk.c 1930 */ 1931 static void do_dasd_request(struct request_queue *queue) 1932 { 1933 struct dasd_block *block; 1934 1935 block = queue->queuedata; 1936 spin_lock(&block->queue_lock); 1937 /* Get new request from the block device request queue */ 1938 __dasd_process_request_queue(block); 1939 /* Now check if the head of the ccw queue needs to be started. */ 1940 __dasd_block_start_head(block); 1941 spin_unlock(&block->queue_lock); 1942 } 1943 1944 /* 1945 * Allocate and initialize request queue and default I/O scheduler. 1946 */ 1947 static int dasd_alloc_queue(struct dasd_block *block) 1948 { 1949 int rc; 1950 1951 block->request_queue = blk_init_queue(do_dasd_request, 1952 &block->request_queue_lock); 1953 if (block->request_queue == NULL) 1954 return -ENOMEM; 1955 1956 block->request_queue->queuedata = block; 1957 1958 elevator_exit(block->request_queue->elevator); 1959 block->request_queue->elevator = NULL; 1960 rc = elevator_init(block->request_queue, "deadline"); 1961 if (rc) { 1962 blk_cleanup_queue(block->request_queue); 1963 return rc; 1964 } 1965 return 0; 1966 } 1967 1968 /* 1969 * Allocate and initialize request queue. 1970 */ 1971 static void dasd_setup_queue(struct dasd_block *block) 1972 { 1973 int max; 1974 1975 blk_queue_hardsect_size(block->request_queue, block->bp_block); 1976 max = block->base->discipline->max_blocks << block->s2b_shift; 1977 blk_queue_max_sectors(block->request_queue, max); 1978 blk_queue_max_phys_segments(block->request_queue, -1L); 1979 blk_queue_max_hw_segments(block->request_queue, -1L); 1980 blk_queue_max_segment_size(block->request_queue, -1L); 1981 blk_queue_segment_boundary(block->request_queue, -1L); 1982 blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); 1983 } 1984 1985 /* 1986 * Deactivate and free request queue. 1987 */ 1988 static void dasd_free_queue(struct dasd_block *block) 1989 { 1990 if (block->request_queue) { 1991 blk_cleanup_queue(block->request_queue); 1992 block->request_queue = NULL; 1993 } 1994 } 1995 1996 /* 1997 * Flush request on the request queue. 1998 */ 1999 static void dasd_flush_request_queue(struct dasd_block *block) 2000 { 2001 struct request *req; 2002 2003 if (!block->request_queue) 2004 return; 2005 2006 spin_lock_irq(&block->request_queue_lock); 2007 while ((req = elv_next_request(block->request_queue))) { 2008 blkdev_dequeue_request(req); 2009 dasd_end_request(req, -EIO); 2010 } 2011 spin_unlock_irq(&block->request_queue_lock); 2012 } 2013 2014 static int dasd_open(struct inode *inp, struct file *filp) 2015 { 2016 struct gendisk *disk = inp->i_bdev->bd_disk; 2017 struct dasd_block *block = disk->private_data; 2018 struct dasd_device *base = block->base; 2019 int rc; 2020 2021 atomic_inc(&block->open_count); 2022 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { 2023 rc = -ENODEV; 2024 goto unlock; 2025 } 2026 2027 if (!try_module_get(base->discipline->owner)) { 2028 rc = -EINVAL; 2029 goto unlock; 2030 } 2031 2032 if (dasd_probeonly) { 2033 DEV_MESSAGE(KERN_INFO, base, "%s", 2034 "No access to device due to probeonly mode"); 2035 rc = -EPERM; 2036 goto out; 2037 } 2038 2039 if (base->state <= DASD_STATE_BASIC) { 2040 DBF_DEV_EVENT(DBF_ERR, base, " %s", 2041 " Cannot open unrecognized device"); 2042 rc = -ENODEV; 2043 goto out; 2044 } 2045 2046 return 0; 2047 2048 out: 2049 module_put(base->discipline->owner); 2050 unlock: 2051 atomic_dec(&block->open_count); 2052 return rc; 2053 } 2054 2055 static int dasd_release(struct inode *inp, struct file *filp) 2056 { 2057 struct gendisk *disk = inp->i_bdev->bd_disk; 2058 struct dasd_block *block = disk->private_data; 2059 2060 atomic_dec(&block->open_count); 2061 module_put(block->base->discipline->owner); 2062 return 0; 2063 } 2064 2065 /* 2066 * Return disk geometry. 2067 */ 2068 static int dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 2069 { 2070 struct dasd_block *block; 2071 struct dasd_device *base; 2072 2073 block = bdev->bd_disk->private_data; 2074 base = block->base; 2075 if (!block) 2076 return -ENODEV; 2077 2078 if (!base->discipline || 2079 !base->discipline->fill_geometry) 2080 return -EINVAL; 2081 2082 base->discipline->fill_geometry(block, geo); 2083 geo->start = get_start_sect(bdev) >> block->s2b_shift; 2084 return 0; 2085 } 2086 2087 struct block_device_operations 2088 dasd_device_operations = { 2089 .owner = THIS_MODULE, 2090 .open = dasd_open, 2091 .release = dasd_release, 2092 .ioctl = dasd_ioctl, 2093 .compat_ioctl = dasd_compat_ioctl, 2094 .getgeo = dasd_getgeo, 2095 }; 2096 2097 /******************************************************************************* 2098 * end of block device operations 2099 */ 2100 2101 static void 2102 dasd_exit(void) 2103 { 2104 #ifdef CONFIG_PROC_FS 2105 dasd_proc_exit(); 2106 #endif 2107 dasd_eer_exit(); 2108 if (dasd_page_cache != NULL) { 2109 kmem_cache_destroy(dasd_page_cache); 2110 dasd_page_cache = NULL; 2111 } 2112 dasd_gendisk_exit(); 2113 dasd_devmap_exit(); 2114 if (dasd_debug_area != NULL) { 2115 debug_unregister(dasd_debug_area); 2116 dasd_debug_area = NULL; 2117 } 2118 } 2119 2120 /* 2121 * SECTION: common functions for ccw_driver use 2122 */ 2123 2124 /* 2125 * Initial attempt at a probe function. this can be simplified once 2126 * the other detection code is gone. 2127 */ 2128 int dasd_generic_probe(struct ccw_device *cdev, 2129 struct dasd_discipline *discipline) 2130 { 2131 int ret; 2132 2133 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 2134 if (ret) { 2135 printk(KERN_WARNING 2136 "dasd_generic_probe: could not set ccw-device options " 2137 "for %s\n", cdev->dev.bus_id); 2138 return ret; 2139 } 2140 ret = dasd_add_sysfs_files(cdev); 2141 if (ret) { 2142 printk(KERN_WARNING 2143 "dasd_generic_probe: could not add sysfs entries " 2144 "for %s\n", cdev->dev.bus_id); 2145 return ret; 2146 } 2147 cdev->handler = &dasd_int_handler; 2148 2149 /* 2150 * Automatically online either all dasd devices (dasd_autodetect) 2151 * or all devices specified with dasd= parameters during 2152 * initial probe. 2153 */ 2154 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 2155 (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0)) 2156 ret = ccw_device_set_online(cdev); 2157 if (ret) 2158 printk(KERN_WARNING 2159 "dasd_generic_probe: could not initially " 2160 "online ccw-device %s; return code: %d\n", 2161 cdev->dev.bus_id, ret); 2162 return 0; 2163 } 2164 2165 /* 2166 * This will one day be called from a global not_oper handler. 2167 * It is also used by driver_unregister during module unload. 2168 */ 2169 void dasd_generic_remove(struct ccw_device *cdev) 2170 { 2171 struct dasd_device *device; 2172 struct dasd_block *block; 2173 2174 cdev->handler = NULL; 2175 2176 dasd_remove_sysfs_files(cdev); 2177 device = dasd_device_from_cdev(cdev); 2178 if (IS_ERR(device)) 2179 return; 2180 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2181 /* Already doing offline processing */ 2182 dasd_put_device(device); 2183 return; 2184 } 2185 /* 2186 * This device is removed unconditionally. Set offline 2187 * flag to prevent dasd_open from opening it while it is 2188 * no quite down yet. 2189 */ 2190 dasd_set_target_state(device, DASD_STATE_NEW); 2191 /* dasd_delete_device destroys the device reference. */ 2192 block = device->block; 2193 device->block = NULL; 2194 dasd_delete_device(device); 2195 /* 2196 * life cycle of block is bound to device, so delete it after 2197 * device was safely removed 2198 */ 2199 if (block) 2200 dasd_free_block(block); 2201 } 2202 2203 /* 2204 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2205 * the device is detected for the first time and is supposed to be used 2206 * or the user has started activation through sysfs. 2207 */ 2208 int dasd_generic_set_online(struct ccw_device *cdev, 2209 struct dasd_discipline *base_discipline) 2210 { 2211 struct dasd_discipline *discipline; 2212 struct dasd_device *device; 2213 int rc; 2214 2215 /* first online clears initial online feature flag */ 2216 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2217 device = dasd_create_device(cdev); 2218 if (IS_ERR(device)) 2219 return PTR_ERR(device); 2220 2221 discipline = base_discipline; 2222 if (device->features & DASD_FEATURE_USEDIAG) { 2223 if (!dasd_diag_discipline_pointer) { 2224 printk (KERN_WARNING 2225 "dasd_generic couldn't online device %s " 2226 "- discipline DIAG not available\n", 2227 cdev->dev.bus_id); 2228 dasd_delete_device(device); 2229 return -ENODEV; 2230 } 2231 discipline = dasd_diag_discipline_pointer; 2232 } 2233 if (!try_module_get(base_discipline->owner)) { 2234 dasd_delete_device(device); 2235 return -EINVAL; 2236 } 2237 if (!try_module_get(discipline->owner)) { 2238 module_put(base_discipline->owner); 2239 dasd_delete_device(device); 2240 return -EINVAL; 2241 } 2242 device->base_discipline = base_discipline; 2243 device->discipline = discipline; 2244 2245 /* check_device will allocate block device if necessary */ 2246 rc = discipline->check_device(device); 2247 if (rc) { 2248 printk (KERN_WARNING 2249 "dasd_generic couldn't online device %s " 2250 "with discipline %s rc=%i\n", 2251 cdev->dev.bus_id, discipline->name, rc); 2252 module_put(discipline->owner); 2253 module_put(base_discipline->owner); 2254 dasd_delete_device(device); 2255 return rc; 2256 } 2257 2258 dasd_set_target_state(device, DASD_STATE_ONLINE); 2259 if (device->state <= DASD_STATE_KNOWN) { 2260 printk (KERN_WARNING 2261 "dasd_generic discipline not found for %s\n", 2262 cdev->dev.bus_id); 2263 rc = -ENODEV; 2264 dasd_set_target_state(device, DASD_STATE_NEW); 2265 if (device->block) 2266 dasd_free_block(device->block); 2267 dasd_delete_device(device); 2268 } else 2269 pr_debug("dasd_generic device %s found\n", 2270 cdev->dev.bus_id); 2271 2272 /* FIXME: we have to wait for the root device but we don't want 2273 * to wait for each single device but for all at once. */ 2274 wait_event(dasd_init_waitq, _wait_for_device(device)); 2275 2276 dasd_put_device(device); 2277 2278 return rc; 2279 } 2280 2281 int dasd_generic_set_offline(struct ccw_device *cdev) 2282 { 2283 struct dasd_device *device; 2284 struct dasd_block *block; 2285 int max_count, open_count; 2286 2287 device = dasd_device_from_cdev(cdev); 2288 if (IS_ERR(device)) 2289 return PTR_ERR(device); 2290 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2291 /* Already doing offline processing */ 2292 dasd_put_device(device); 2293 return 0; 2294 } 2295 /* 2296 * We must make sure that this device is currently not in use. 2297 * The open_count is increased for every opener, that includes 2298 * the blkdev_get in dasd_scan_partitions. We are only interested 2299 * in the other openers. 2300 */ 2301 if (device->block) { 2302 max_count = device->block->bdev ? 0 : -1; 2303 open_count = atomic_read(&device->block->open_count); 2304 if (open_count > max_count) { 2305 if (open_count > 0) 2306 printk(KERN_WARNING "Can't offline dasd " 2307 "device with open count = %i.\n", 2308 open_count); 2309 else 2310 printk(KERN_WARNING "%s", 2311 "Can't offline dasd device due " 2312 "to internal use\n"); 2313 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2314 dasd_put_device(device); 2315 return -EBUSY; 2316 } 2317 } 2318 dasd_set_target_state(device, DASD_STATE_NEW); 2319 /* dasd_delete_device destroys the device reference. */ 2320 block = device->block; 2321 device->block = NULL; 2322 dasd_delete_device(device); 2323 /* 2324 * life cycle of block is bound to device, so delete it after 2325 * device was safely removed 2326 */ 2327 if (block) 2328 dasd_free_block(block); 2329 return 0; 2330 } 2331 2332 int dasd_generic_notify(struct ccw_device *cdev, int event) 2333 { 2334 struct dasd_device *device; 2335 struct dasd_ccw_req *cqr; 2336 unsigned long flags; 2337 int ret; 2338 2339 device = dasd_device_from_cdev(cdev); 2340 if (IS_ERR(device)) 2341 return 0; 2342 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 2343 ret = 0; 2344 switch (event) { 2345 case CIO_GONE: 2346 case CIO_NO_PATH: 2347 /* First of all call extended error reporting. */ 2348 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2349 2350 if (device->state < DASD_STATE_BASIC) 2351 break; 2352 /* Device is active. We want to keep it. */ 2353 list_for_each_entry(cqr, &device->ccw_queue, devlist) 2354 if (cqr->status == DASD_CQR_IN_IO) { 2355 cqr->status = DASD_CQR_QUEUED; 2356 cqr->retries++; 2357 } 2358 device->stopped |= DASD_STOPPED_DC_WAIT; 2359 dasd_device_clear_timer(device); 2360 dasd_schedule_device_bh(device); 2361 ret = 1; 2362 break; 2363 case CIO_OPER: 2364 /* FIXME: add a sanity check. */ 2365 device->stopped &= ~DASD_STOPPED_DC_WAIT; 2366 dasd_schedule_device_bh(device); 2367 if (device->block) 2368 dasd_schedule_block_bh(device->block); 2369 ret = 1; 2370 break; 2371 } 2372 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2373 dasd_put_device(device); 2374 return ret; 2375 } 2376 2377 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2378 void *rdc_buffer, 2379 int rdc_buffer_size, 2380 char *magic) 2381 { 2382 struct dasd_ccw_req *cqr; 2383 struct ccw1 *ccw; 2384 2385 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2386 2387 if (IS_ERR(cqr)) { 2388 DEV_MESSAGE(KERN_WARNING, device, "%s", 2389 "Could not allocate RDC request"); 2390 return cqr; 2391 } 2392 2393 ccw = cqr->cpaddr; 2394 ccw->cmd_code = CCW_CMD_RDC; 2395 ccw->cda = (__u32)(addr_t)rdc_buffer; 2396 ccw->count = rdc_buffer_size; 2397 2398 cqr->startdev = device; 2399 cqr->memdev = device; 2400 cqr->expires = 10*HZ; 2401 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2402 cqr->retries = 2; 2403 cqr->buildclk = get_clock(); 2404 cqr->status = DASD_CQR_FILLED; 2405 return cqr; 2406 } 2407 2408 2409 int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2410 void **rdc_buffer, int rdc_buffer_size) 2411 { 2412 int ret; 2413 struct dasd_ccw_req *cqr; 2414 2415 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, 2416 magic); 2417 if (IS_ERR(cqr)) 2418 return PTR_ERR(cqr); 2419 2420 ret = dasd_sleep_on(cqr); 2421 dasd_sfree_request(cqr, cqr->memdev); 2422 return ret; 2423 } 2424 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2425 2426 static int __init dasd_init(void) 2427 { 2428 int rc; 2429 2430 init_waitqueue_head(&dasd_init_waitq); 2431 init_waitqueue_head(&dasd_flush_wq); 2432 2433 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2434 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 2435 if (dasd_debug_area == NULL) { 2436 rc = -ENOMEM; 2437 goto failed; 2438 } 2439 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2440 debug_set_level(dasd_debug_area, DBF_WARNING); 2441 2442 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2443 2444 dasd_diag_discipline_pointer = NULL; 2445 2446 rc = dasd_devmap_init(); 2447 if (rc) 2448 goto failed; 2449 rc = dasd_gendisk_init(); 2450 if (rc) 2451 goto failed; 2452 rc = dasd_parse(); 2453 if (rc) 2454 goto failed; 2455 rc = dasd_eer_init(); 2456 if (rc) 2457 goto failed; 2458 #ifdef CONFIG_PROC_FS 2459 rc = dasd_proc_init(); 2460 if (rc) 2461 goto failed; 2462 #endif 2463 2464 return 0; 2465 failed: 2466 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors"); 2467 dasd_exit(); 2468 return rc; 2469 } 2470 2471 module_init(dasd_init); 2472 module_exit(dasd_exit); 2473 2474 EXPORT_SYMBOL(dasd_debug_area); 2475 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2476 2477 EXPORT_SYMBOL(dasd_add_request_head); 2478 EXPORT_SYMBOL(dasd_add_request_tail); 2479 EXPORT_SYMBOL(dasd_cancel_req); 2480 EXPORT_SYMBOL(dasd_device_clear_timer); 2481 EXPORT_SYMBOL(dasd_block_clear_timer); 2482 EXPORT_SYMBOL(dasd_enable_device); 2483 EXPORT_SYMBOL(dasd_int_handler); 2484 EXPORT_SYMBOL(dasd_kfree_request); 2485 EXPORT_SYMBOL(dasd_kick_device); 2486 EXPORT_SYMBOL(dasd_kmalloc_request); 2487 EXPORT_SYMBOL(dasd_schedule_device_bh); 2488 EXPORT_SYMBOL(dasd_schedule_block_bh); 2489 EXPORT_SYMBOL(dasd_set_target_state); 2490 EXPORT_SYMBOL(dasd_device_set_timer); 2491 EXPORT_SYMBOL(dasd_block_set_timer); 2492 EXPORT_SYMBOL(dasd_sfree_request); 2493 EXPORT_SYMBOL(dasd_sleep_on); 2494 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2495 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2496 EXPORT_SYMBOL(dasd_smalloc_request); 2497 EXPORT_SYMBOL(dasd_start_IO); 2498 EXPORT_SYMBOL(dasd_term_IO); 2499 2500 EXPORT_SYMBOL_GPL(dasd_generic_probe); 2501 EXPORT_SYMBOL_GPL(dasd_generic_remove); 2502 EXPORT_SYMBOL_GPL(dasd_generic_notify); 2503 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2504 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2505 EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change); 2506 EXPORT_SYMBOL_GPL(dasd_flush_device_queue); 2507 EXPORT_SYMBOL_GPL(dasd_alloc_block); 2508 EXPORT_SYMBOL_GPL(dasd_free_block); 2509