1 /* 2 * File...........: linux/drivers/s390/block/dasd.c 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Horst Hummel <Horst.Hummel@de.ibm.com> 5 * Carsten Otte <Cotte@de.ibm.com> 6 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Bugreports.to..: <Linux390@de.ibm.com> 8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 9 * 10 */ 11 12 #include <linux/kmod.h> 13 #include <linux/init.h> 14 #include <linux/interrupt.h> 15 #include <linux/ctype.h> 16 #include <linux/major.h> 17 #include <linux/slab.h> 18 #include <linux/buffer_head.h> 19 #include <linux/hdreg.h> 20 21 #include <asm/ccwdev.h> 22 #include <asm/ebcdic.h> 23 #include <asm/idals.h> 24 #include <asm/todclk.h> 25 26 /* This is ugly... */ 27 #define PRINTK_HEADER "dasd:" 28 29 #include "dasd_int.h" 30 /* 31 * SECTION: Constant definitions to be used within this file 32 */ 33 #define DASD_CHANQ_MAX_SIZE 4 34 35 /* 36 * SECTION: exported variables of dasd.c 37 */ 38 debug_info_t *dasd_debug_area; 39 struct dasd_discipline *dasd_diag_discipline_pointer; 40 void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 41 42 MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 43 MODULE_DESCRIPTION("Linux on S/390 DASD device driver," 44 " Copyright 2000 IBM Corporation"); 45 MODULE_SUPPORTED_DEVICE("dasd"); 46 MODULE_LICENSE("GPL"); 47 48 /* 49 * SECTION: prototypes for static functions of dasd.c 50 */ 51 static int dasd_alloc_queue(struct dasd_device * device); 52 static void dasd_setup_queue(struct dasd_device * device); 53 static void dasd_free_queue(struct dasd_device * device); 54 static void dasd_flush_request_queue(struct dasd_device *); 55 static int dasd_flush_ccw_queue(struct dasd_device *, int); 56 static void dasd_tasklet(struct dasd_device *); 57 static void do_kick_device(struct work_struct *); 58 59 /* 60 * SECTION: Operations on the device structure. 61 */ 62 static wait_queue_head_t dasd_init_waitq; 63 static wait_queue_head_t dasd_flush_wq; 64 65 /* 66 * Allocate memory for a new device structure. 67 */ 68 struct dasd_device * 69 dasd_alloc_device(void) 70 { 71 struct dasd_device *device; 72 73 device = kzalloc(sizeof (struct dasd_device), GFP_ATOMIC); 74 if (device == NULL) 75 return ERR_PTR(-ENOMEM); 76 /* open_count = 0 means device online but not in use */ 77 atomic_set(&device->open_count, -1); 78 79 /* Get two pages for normal block device operations. */ 80 device->ccw_mem = (void *) __get_free_pages(GFP_ATOMIC | GFP_DMA, 1); 81 if (device->ccw_mem == NULL) { 82 kfree(device); 83 return ERR_PTR(-ENOMEM); 84 } 85 /* Get one page for error recovery. */ 86 device->erp_mem = (void *) get_zeroed_page(GFP_ATOMIC | GFP_DMA); 87 if (device->erp_mem == NULL) { 88 free_pages((unsigned long) device->ccw_mem, 1); 89 kfree(device); 90 return ERR_PTR(-ENOMEM); 91 } 92 93 dasd_init_chunklist(&device->ccw_chunks, device->ccw_mem, PAGE_SIZE*2); 94 dasd_init_chunklist(&device->erp_chunks, device->erp_mem, PAGE_SIZE); 95 spin_lock_init(&device->mem_lock); 96 spin_lock_init(&device->request_queue_lock); 97 atomic_set (&device->tasklet_scheduled, 0); 98 tasklet_init(&device->tasklet, 99 (void (*)(unsigned long)) dasd_tasklet, 100 (unsigned long) device); 101 INIT_LIST_HEAD(&device->ccw_queue); 102 init_timer(&device->timer); 103 INIT_WORK(&device->kick_work, do_kick_device); 104 device->state = DASD_STATE_NEW; 105 device->target = DASD_STATE_NEW; 106 107 return device; 108 } 109 110 /* 111 * Free memory of a device structure. 112 */ 113 void 114 dasd_free_device(struct dasd_device *device) 115 { 116 kfree(device->private); 117 free_page((unsigned long) device->erp_mem); 118 free_pages((unsigned long) device->ccw_mem, 1); 119 kfree(device); 120 } 121 122 /* 123 * Make a new device known to the system. 124 */ 125 static int 126 dasd_state_new_to_known(struct dasd_device *device) 127 { 128 int rc; 129 130 /* 131 * As long as the device is not in state DASD_STATE_NEW we want to 132 * keep the reference count > 0. 133 */ 134 dasd_get_device(device); 135 136 rc = dasd_alloc_queue(device); 137 if (rc) { 138 dasd_put_device(device); 139 return rc; 140 } 141 142 device->state = DASD_STATE_KNOWN; 143 return 0; 144 } 145 146 /* 147 * Let the system forget about a device. 148 */ 149 static int 150 dasd_state_known_to_new(struct dasd_device * device) 151 { 152 /* Disable extended error reporting for this device. */ 153 dasd_eer_disable(device); 154 /* Forget the discipline information. */ 155 if (device->discipline) 156 module_put(device->discipline->owner); 157 device->discipline = NULL; 158 if (device->base_discipline) 159 module_put(device->base_discipline->owner); 160 device->base_discipline = NULL; 161 device->state = DASD_STATE_NEW; 162 163 dasd_free_queue(device); 164 165 /* Give up reference we took in dasd_state_new_to_known. */ 166 dasd_put_device(device); 167 return 0; 168 } 169 170 /* 171 * Request the irq line for the device. 172 */ 173 static int 174 dasd_state_known_to_basic(struct dasd_device * device) 175 { 176 int rc; 177 178 /* Allocate and register gendisk structure. */ 179 rc = dasd_gendisk_alloc(device); 180 if (rc) 181 return rc; 182 183 /* register 'device' debug area, used for all DBF_DEV_XXX calls */ 184 device->debug_area = debug_register(device->cdev->dev.bus_id, 1, 2, 185 8 * sizeof (long)); 186 debug_register_view(device->debug_area, &debug_sprintf_view); 187 debug_set_level(device->debug_area, DBF_WARNING); 188 DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); 189 190 device->state = DASD_STATE_BASIC; 191 return 0; 192 } 193 194 /* 195 * Release the irq line for the device. Terminate any running i/o. 196 */ 197 static int 198 dasd_state_basic_to_known(struct dasd_device * device) 199 { 200 int rc; 201 202 dasd_gendisk_free(device); 203 rc = dasd_flush_ccw_queue(device, 1); 204 if (rc) 205 return rc; 206 dasd_clear_timer(device); 207 208 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 209 if (device->debug_area != NULL) { 210 debug_unregister(device->debug_area); 211 device->debug_area = NULL; 212 } 213 device->state = DASD_STATE_KNOWN; 214 return 0; 215 } 216 217 /* 218 * Do the initial analysis. The do_analysis function may return 219 * -EAGAIN in which case the device keeps the state DASD_STATE_BASIC 220 * until the discipline decides to continue the startup sequence 221 * by calling the function dasd_change_state. The eckd disciplines 222 * uses this to start a ccw that detects the format. The completion 223 * interrupt for this detection ccw uses the kernel event daemon to 224 * trigger the call to dasd_change_state. All this is done in the 225 * discipline code, see dasd_eckd.c. 226 * After the analysis ccw is done (do_analysis returned 0) the block 227 * device is setup. 228 * In case the analysis returns an error, the device setup is stopped 229 * (a fake disk was already added to allow formatting). 230 */ 231 static int 232 dasd_state_basic_to_ready(struct dasd_device * device) 233 { 234 int rc; 235 236 rc = 0; 237 if (device->discipline->do_analysis != NULL) 238 rc = device->discipline->do_analysis(device); 239 if (rc) { 240 if (rc != -EAGAIN) 241 device->state = DASD_STATE_UNFMT; 242 return rc; 243 } 244 /* make disk known with correct capacity */ 245 dasd_setup_queue(device); 246 set_capacity(device->gdp, device->blocks << device->s2b_shift); 247 device->state = DASD_STATE_READY; 248 rc = dasd_scan_partitions(device); 249 if (rc) 250 device->state = DASD_STATE_BASIC; 251 return rc; 252 } 253 254 /* 255 * Remove device from block device layer. Destroy dirty buffers. 256 * Forget format information. Check if the target level is basic 257 * and if it is create fake disk for formatting. 258 */ 259 static int 260 dasd_state_ready_to_basic(struct dasd_device * device) 261 { 262 int rc; 263 264 rc = dasd_flush_ccw_queue(device, 0); 265 if (rc) 266 return rc; 267 dasd_destroy_partitions(device); 268 dasd_flush_request_queue(device); 269 device->blocks = 0; 270 device->bp_block = 0; 271 device->s2b_shift = 0; 272 device->state = DASD_STATE_BASIC; 273 return 0; 274 } 275 276 /* 277 * Back to basic. 278 */ 279 static int 280 dasd_state_unfmt_to_basic(struct dasd_device * device) 281 { 282 device->state = DASD_STATE_BASIC; 283 return 0; 284 } 285 286 /* 287 * Make the device online and schedule the bottom half to start 288 * the requeueing of requests from the linux request queue to the 289 * ccw queue. 290 */ 291 static int 292 dasd_state_ready_to_online(struct dasd_device * device) 293 { 294 device->state = DASD_STATE_ONLINE; 295 dasd_schedule_bh(device); 296 return 0; 297 } 298 299 /* 300 * Stop the requeueing of requests again. 301 */ 302 static int 303 dasd_state_online_to_ready(struct dasd_device * device) 304 { 305 device->state = DASD_STATE_READY; 306 return 0; 307 } 308 309 /* 310 * Device startup state changes. 311 */ 312 static int 313 dasd_increase_state(struct dasd_device *device) 314 { 315 int rc; 316 317 rc = 0; 318 if (device->state == DASD_STATE_NEW && 319 device->target >= DASD_STATE_KNOWN) 320 rc = dasd_state_new_to_known(device); 321 322 if (!rc && 323 device->state == DASD_STATE_KNOWN && 324 device->target >= DASD_STATE_BASIC) 325 rc = dasd_state_known_to_basic(device); 326 327 if (!rc && 328 device->state == DASD_STATE_BASIC && 329 device->target >= DASD_STATE_READY) 330 rc = dasd_state_basic_to_ready(device); 331 332 if (!rc && 333 device->state == DASD_STATE_UNFMT && 334 device->target > DASD_STATE_UNFMT) 335 rc = -EPERM; 336 337 if (!rc && 338 device->state == DASD_STATE_READY && 339 device->target >= DASD_STATE_ONLINE) 340 rc = dasd_state_ready_to_online(device); 341 342 return rc; 343 } 344 345 /* 346 * Device shutdown state changes. 347 */ 348 static int 349 dasd_decrease_state(struct dasd_device *device) 350 { 351 int rc; 352 353 rc = 0; 354 if (device->state == DASD_STATE_ONLINE && 355 device->target <= DASD_STATE_READY) 356 rc = dasd_state_online_to_ready(device); 357 358 if (!rc && 359 device->state == DASD_STATE_READY && 360 device->target <= DASD_STATE_BASIC) 361 rc = dasd_state_ready_to_basic(device); 362 363 if (!rc && 364 device->state == DASD_STATE_UNFMT && 365 device->target <= DASD_STATE_BASIC) 366 rc = dasd_state_unfmt_to_basic(device); 367 368 if (!rc && 369 device->state == DASD_STATE_BASIC && 370 device->target <= DASD_STATE_KNOWN) 371 rc = dasd_state_basic_to_known(device); 372 373 if (!rc && 374 device->state == DASD_STATE_KNOWN && 375 device->target <= DASD_STATE_NEW) 376 rc = dasd_state_known_to_new(device); 377 378 return rc; 379 } 380 381 /* 382 * This is the main startup/shutdown routine. 383 */ 384 static void 385 dasd_change_state(struct dasd_device *device) 386 { 387 int rc; 388 389 if (device->state == device->target) 390 /* Already where we want to go today... */ 391 return; 392 if (device->state < device->target) 393 rc = dasd_increase_state(device); 394 else 395 rc = dasd_decrease_state(device); 396 if (rc && rc != -EAGAIN) 397 device->target = device->state; 398 399 if (device->state == device->target) 400 wake_up(&dasd_init_waitq); 401 402 /* let user-space know that the device status changed */ 403 kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); 404 } 405 406 /* 407 * Kick starter for devices that did not complete the startup/shutdown 408 * procedure or were sleeping because of a pending state. 409 * dasd_kick_device will schedule a call do do_kick_device to the kernel 410 * event daemon. 411 */ 412 static void 413 do_kick_device(struct work_struct *work) 414 { 415 struct dasd_device *device = container_of(work, struct dasd_device, kick_work); 416 dasd_change_state(device); 417 dasd_schedule_bh(device); 418 dasd_put_device(device); 419 } 420 421 void 422 dasd_kick_device(struct dasd_device *device) 423 { 424 dasd_get_device(device); 425 /* queue call to dasd_kick_device to the kernel event daemon. */ 426 schedule_work(&device->kick_work); 427 } 428 429 /* 430 * Set the target state for a device and starts the state change. 431 */ 432 void 433 dasd_set_target_state(struct dasd_device *device, int target) 434 { 435 /* If we are in probeonly mode stop at DASD_STATE_READY. */ 436 if (dasd_probeonly && target > DASD_STATE_READY) 437 target = DASD_STATE_READY; 438 if (device->target != target) { 439 if (device->state == target) 440 wake_up(&dasd_init_waitq); 441 device->target = target; 442 } 443 if (device->state != device->target) 444 dasd_change_state(device); 445 } 446 447 /* 448 * Enable devices with device numbers in [from..to]. 449 */ 450 static inline int 451 _wait_for_device(struct dasd_device *device) 452 { 453 return (device->state == device->target); 454 } 455 456 void 457 dasd_enable_device(struct dasd_device *device) 458 { 459 dasd_set_target_state(device, DASD_STATE_ONLINE); 460 if (device->state <= DASD_STATE_KNOWN) 461 /* No discipline for device found. */ 462 dasd_set_target_state(device, DASD_STATE_NEW); 463 /* Now wait for the devices to come up. */ 464 wait_event(dasd_init_waitq, _wait_for_device(device)); 465 } 466 467 /* 468 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 469 */ 470 #ifdef CONFIG_DASD_PROFILE 471 472 struct dasd_profile_info_t dasd_global_profile; 473 unsigned int dasd_profile_level = DASD_PROFILE_OFF; 474 475 /* 476 * Increments counter in global and local profiling structures. 477 */ 478 #define dasd_profile_counter(value, counter, device) \ 479 { \ 480 int index; \ 481 for (index = 0; index < 31 && value >> (2+index); index++); \ 482 dasd_global_profile.counter[index]++; \ 483 device->profile.counter[index]++; \ 484 } 485 486 /* 487 * Add profiling information for cqr before execution. 488 */ 489 static void 490 dasd_profile_start(struct dasd_device *device, struct dasd_ccw_req * cqr, 491 struct request *req) 492 { 493 struct list_head *l; 494 unsigned int counter; 495 496 if (dasd_profile_level != DASD_PROFILE_ON) 497 return; 498 499 /* count the length of the chanq for statistics */ 500 counter = 0; 501 list_for_each(l, &device->ccw_queue) 502 if (++counter >= 31) 503 break; 504 dasd_global_profile.dasd_io_nr_req[counter]++; 505 device->profile.dasd_io_nr_req[counter]++; 506 } 507 508 /* 509 * Add profiling information for cqr after execution. 510 */ 511 static void 512 dasd_profile_end(struct dasd_device *device, struct dasd_ccw_req * cqr, 513 struct request *req) 514 { 515 long strtime, irqtime, endtime, tottime; /* in microseconds */ 516 long tottimeps, sectors; 517 518 if (dasd_profile_level != DASD_PROFILE_ON) 519 return; 520 521 sectors = req->nr_sectors; 522 if (!cqr->buildclk || !cqr->startclk || 523 !cqr->stopclk || !cqr->endclk || 524 !sectors) 525 return; 526 527 strtime = ((cqr->startclk - cqr->buildclk) >> 12); 528 irqtime = ((cqr->stopclk - cqr->startclk) >> 12); 529 endtime = ((cqr->endclk - cqr->stopclk) >> 12); 530 tottime = ((cqr->endclk - cqr->buildclk) >> 12); 531 tottimeps = tottime / sectors; 532 533 if (!dasd_global_profile.dasd_io_reqs) 534 memset(&dasd_global_profile, 0, 535 sizeof (struct dasd_profile_info_t)); 536 dasd_global_profile.dasd_io_reqs++; 537 dasd_global_profile.dasd_io_sects += sectors; 538 539 if (!device->profile.dasd_io_reqs) 540 memset(&device->profile, 0, 541 sizeof (struct dasd_profile_info_t)); 542 device->profile.dasd_io_reqs++; 543 device->profile.dasd_io_sects += sectors; 544 545 dasd_profile_counter(sectors, dasd_io_secs, device); 546 dasd_profile_counter(tottime, dasd_io_times, device); 547 dasd_profile_counter(tottimeps, dasd_io_timps, device); 548 dasd_profile_counter(strtime, dasd_io_time1, device); 549 dasd_profile_counter(irqtime, dasd_io_time2, device); 550 dasd_profile_counter(irqtime / sectors, dasd_io_time2ps, device); 551 dasd_profile_counter(endtime, dasd_io_time3, device); 552 } 553 #else 554 #define dasd_profile_start(device, cqr, req) do {} while (0) 555 #define dasd_profile_end(device, cqr, req) do {} while (0) 556 #endif /* CONFIG_DASD_PROFILE */ 557 558 /* 559 * Allocate memory for a channel program with 'cplength' channel 560 * command words and 'datasize' additional space. There are two 561 * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed 562 * memory and 2) dasd_smalloc_request uses the static ccw memory 563 * that gets allocated for each device. 564 */ 565 struct dasd_ccw_req * 566 dasd_kmalloc_request(char *magic, int cplength, int datasize, 567 struct dasd_device * device) 568 { 569 struct dasd_ccw_req *cqr; 570 571 /* Sanity checks */ 572 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 573 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 574 575 cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC); 576 if (cqr == NULL) 577 return ERR_PTR(-ENOMEM); 578 cqr->cpaddr = NULL; 579 if (cplength > 0) { 580 cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1), 581 GFP_ATOMIC | GFP_DMA); 582 if (cqr->cpaddr == NULL) { 583 kfree(cqr); 584 return ERR_PTR(-ENOMEM); 585 } 586 } 587 cqr->data = NULL; 588 if (datasize > 0) { 589 cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA); 590 if (cqr->data == NULL) { 591 kfree(cqr->cpaddr); 592 kfree(cqr); 593 return ERR_PTR(-ENOMEM); 594 } 595 } 596 strncpy((char *) &cqr->magic, magic, 4); 597 ASCEBC((char *) &cqr->magic, 4); 598 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 599 dasd_get_device(device); 600 return cqr; 601 } 602 603 struct dasd_ccw_req * 604 dasd_smalloc_request(char *magic, int cplength, int datasize, 605 struct dasd_device * device) 606 { 607 unsigned long flags; 608 struct dasd_ccw_req *cqr; 609 char *data; 610 int size; 611 612 /* Sanity checks */ 613 BUG_ON( magic == NULL || datasize > PAGE_SIZE || 614 (cplength*sizeof(struct ccw1)) > PAGE_SIZE); 615 616 size = (sizeof(struct dasd_ccw_req) + 7L) & -8L; 617 if (cplength > 0) 618 size += cplength * sizeof(struct ccw1); 619 if (datasize > 0) 620 size += datasize; 621 spin_lock_irqsave(&device->mem_lock, flags); 622 cqr = (struct dasd_ccw_req *) 623 dasd_alloc_chunk(&device->ccw_chunks, size); 624 spin_unlock_irqrestore(&device->mem_lock, flags); 625 if (cqr == NULL) 626 return ERR_PTR(-ENOMEM); 627 memset(cqr, 0, sizeof(struct dasd_ccw_req)); 628 data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L); 629 cqr->cpaddr = NULL; 630 if (cplength > 0) { 631 cqr->cpaddr = (struct ccw1 *) data; 632 data += cplength*sizeof(struct ccw1); 633 memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1)); 634 } 635 cqr->data = NULL; 636 if (datasize > 0) { 637 cqr->data = data; 638 memset(cqr->data, 0, datasize); 639 } 640 strncpy((char *) &cqr->magic, magic, 4); 641 ASCEBC((char *) &cqr->magic, 4); 642 set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 643 dasd_get_device(device); 644 return cqr; 645 } 646 647 /* 648 * Free memory of a channel program. This function needs to free all the 649 * idal lists that might have been created by dasd_set_cda and the 650 * struct dasd_ccw_req itself. 651 */ 652 void 653 dasd_kfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) 654 { 655 #ifdef CONFIG_64BIT 656 struct ccw1 *ccw; 657 658 /* Clear any idals used for the request. */ 659 ccw = cqr->cpaddr; 660 do { 661 clear_normalized_cda(ccw); 662 } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC)); 663 #endif 664 kfree(cqr->cpaddr); 665 kfree(cqr->data); 666 kfree(cqr); 667 dasd_put_device(device); 668 } 669 670 void 671 dasd_sfree_request(struct dasd_ccw_req * cqr, struct dasd_device * device) 672 { 673 unsigned long flags; 674 675 spin_lock_irqsave(&device->mem_lock, flags); 676 dasd_free_chunk(&device->ccw_chunks, cqr); 677 spin_unlock_irqrestore(&device->mem_lock, flags); 678 dasd_put_device(device); 679 } 680 681 /* 682 * Check discipline magic in cqr. 683 */ 684 static inline int 685 dasd_check_cqr(struct dasd_ccw_req *cqr) 686 { 687 struct dasd_device *device; 688 689 if (cqr == NULL) 690 return -EINVAL; 691 device = cqr->device; 692 if (strncmp((char *) &cqr->magic, device->discipline->ebcname, 4)) { 693 DEV_MESSAGE(KERN_WARNING, device, 694 " dasd_ccw_req 0x%08x magic doesn't match" 695 " discipline 0x%08x", 696 cqr->magic, 697 *(unsigned int *) device->discipline->name); 698 return -EINVAL; 699 } 700 return 0; 701 } 702 703 /* 704 * Terminate the current i/o and set the request to clear_pending. 705 * Timer keeps device runnig. 706 * ccw_device_clear can fail if the i/o subsystem 707 * is in a bad mood. 708 */ 709 int 710 dasd_term_IO(struct dasd_ccw_req * cqr) 711 { 712 struct dasd_device *device; 713 int retries, rc; 714 715 /* Check the cqr */ 716 rc = dasd_check_cqr(cqr); 717 if (rc) 718 return rc; 719 retries = 0; 720 device = (struct dasd_device *) cqr->device; 721 while ((retries < 5) && (cqr->status == DASD_CQR_IN_IO)) { 722 rc = ccw_device_clear(device->cdev, (long) cqr); 723 switch (rc) { 724 case 0: /* termination successful */ 725 cqr->retries--; 726 cqr->status = DASD_CQR_CLEAR; 727 cqr->stopclk = get_clock(); 728 cqr->starttime = 0; 729 DBF_DEV_EVENT(DBF_DEBUG, device, 730 "terminate cqr %p successful", 731 cqr); 732 break; 733 case -ENODEV: 734 DBF_DEV_EVENT(DBF_ERR, device, "%s", 735 "device gone, retry"); 736 break; 737 case -EIO: 738 DBF_DEV_EVENT(DBF_ERR, device, "%s", 739 "I/O error, retry"); 740 break; 741 case -EINVAL: 742 case -EBUSY: 743 DBF_DEV_EVENT(DBF_ERR, device, "%s", 744 "device busy, retry later"); 745 break; 746 default: 747 DEV_MESSAGE(KERN_ERR, device, 748 "line %d unknown RC=%d, please " 749 "report to linux390@de.ibm.com", 750 __LINE__, rc); 751 BUG(); 752 break; 753 } 754 retries++; 755 } 756 dasd_schedule_bh(device); 757 return rc; 758 } 759 760 /* 761 * Start the i/o. This start_IO can fail if the channel is really busy. 762 * In that case set up a timer to start the request later. 763 */ 764 int 765 dasd_start_IO(struct dasd_ccw_req * cqr) 766 { 767 struct dasd_device *device; 768 int rc; 769 770 /* Check the cqr */ 771 rc = dasd_check_cqr(cqr); 772 if (rc) 773 return rc; 774 device = (struct dasd_device *) cqr->device; 775 if (cqr->retries < 0) { 776 DEV_MESSAGE(KERN_DEBUG, device, 777 "start_IO: request %p (%02x/%i) - no retry left.", 778 cqr, cqr->status, cqr->retries); 779 cqr->status = DASD_CQR_FAILED; 780 return -EIO; 781 } 782 cqr->startclk = get_clock(); 783 cqr->starttime = jiffies; 784 cqr->retries--; 785 rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr, 786 cqr->lpm, 0); 787 switch (rc) { 788 case 0: 789 cqr->status = DASD_CQR_IN_IO; 790 DBF_DEV_EVENT(DBF_DEBUG, device, 791 "start_IO: request %p started successful", 792 cqr); 793 break; 794 case -EBUSY: 795 DBF_DEV_EVENT(DBF_ERR, device, "%s", 796 "start_IO: device busy, retry later"); 797 break; 798 case -ETIMEDOUT: 799 DBF_DEV_EVENT(DBF_ERR, device, "%s", 800 "start_IO: request timeout, retry later"); 801 break; 802 case -EACCES: 803 /* -EACCES indicates that the request used only a 804 * subset of the available pathes and all these 805 * pathes are gone. 806 * Do a retry with all available pathes. 807 */ 808 cqr->lpm = LPM_ANYPATH; 809 DBF_DEV_EVENT(DBF_ERR, device, "%s", 810 "start_IO: selected pathes gone," 811 " retry on all pathes"); 812 break; 813 case -ENODEV: 814 case -EIO: 815 DBF_DEV_EVENT(DBF_ERR, device, "%s", 816 "start_IO: device gone, retry"); 817 break; 818 default: 819 DEV_MESSAGE(KERN_ERR, device, 820 "line %d unknown RC=%d, please report" 821 " to linux390@de.ibm.com", __LINE__, rc); 822 BUG(); 823 break; 824 } 825 return rc; 826 } 827 828 /* 829 * Timeout function for dasd devices. This is used for different purposes 830 * 1) missing interrupt handler for normal operation 831 * 2) delayed start of request where start_IO failed with -EBUSY 832 * 3) timeout for missing state change interrupts 833 * The head of the ccw queue will have status DASD_CQR_IN_IO for 1), 834 * DASD_CQR_QUEUED for 2) and 3). 835 */ 836 static void 837 dasd_timeout_device(unsigned long ptr) 838 { 839 unsigned long flags; 840 struct dasd_device *device; 841 842 device = (struct dasd_device *) ptr; 843 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 844 /* re-activate request queue */ 845 device->stopped &= ~DASD_STOPPED_PENDING; 846 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 847 dasd_schedule_bh(device); 848 } 849 850 /* 851 * Setup timeout for a device in jiffies. 852 */ 853 void 854 dasd_set_timer(struct dasd_device *device, int expires) 855 { 856 if (expires == 0) { 857 if (timer_pending(&device->timer)) 858 del_timer(&device->timer); 859 return; 860 } 861 if (timer_pending(&device->timer)) { 862 if (mod_timer(&device->timer, jiffies + expires)) 863 return; 864 } 865 device->timer.function = dasd_timeout_device; 866 device->timer.data = (unsigned long) device; 867 device->timer.expires = jiffies + expires; 868 add_timer(&device->timer); 869 } 870 871 /* 872 * Clear timeout for a device. 873 */ 874 void 875 dasd_clear_timer(struct dasd_device *device) 876 { 877 if (timer_pending(&device->timer)) 878 del_timer(&device->timer); 879 } 880 881 static void 882 dasd_handle_killed_request(struct ccw_device *cdev, unsigned long intparm) 883 { 884 struct dasd_ccw_req *cqr; 885 struct dasd_device *device; 886 887 cqr = (struct dasd_ccw_req *) intparm; 888 if (cqr->status != DASD_CQR_IN_IO) { 889 MESSAGE(KERN_DEBUG, 890 "invalid status in handle_killed_request: " 891 "bus_id %s, status %02x", 892 cdev->dev.bus_id, cqr->status); 893 return; 894 } 895 896 device = (struct dasd_device *) cqr->device; 897 if (device == NULL || 898 device != dasd_device_from_cdev_locked(cdev) || 899 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 900 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 901 cdev->dev.bus_id); 902 return; 903 } 904 905 /* Schedule request to be retried. */ 906 cqr->status = DASD_CQR_QUEUED; 907 908 dasd_clear_timer(device); 909 dasd_schedule_bh(device); 910 dasd_put_device(device); 911 } 912 913 static void 914 dasd_handle_state_change_pending(struct dasd_device *device) 915 { 916 struct dasd_ccw_req *cqr; 917 struct list_head *l, *n; 918 919 /* First of all start sense subsystem status request. */ 920 dasd_eer_snss(device); 921 922 device->stopped &= ~DASD_STOPPED_PENDING; 923 924 /* restart all 'running' IO on queue */ 925 list_for_each_safe(l, n, &device->ccw_queue) { 926 cqr = list_entry(l, struct dasd_ccw_req, list); 927 if (cqr->status == DASD_CQR_IN_IO) { 928 cqr->status = DASD_CQR_QUEUED; 929 } 930 } 931 dasd_clear_timer(device); 932 dasd_schedule_bh(device); 933 } 934 935 /* 936 * Interrupt handler for "normal" ssch-io based dasd devices. 937 */ 938 void 939 dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, 940 struct irb *irb) 941 { 942 struct dasd_ccw_req *cqr, *next; 943 struct dasd_device *device; 944 unsigned long long now; 945 int expires; 946 dasd_era_t era; 947 char mask; 948 949 if (IS_ERR(irb)) { 950 switch (PTR_ERR(irb)) { 951 case -EIO: 952 dasd_handle_killed_request(cdev, intparm); 953 break; 954 case -ETIMEDOUT: 955 printk(KERN_WARNING"%s(%s): request timed out\n", 956 __FUNCTION__, cdev->dev.bus_id); 957 //FIXME - dasd uses own timeout interface... 958 break; 959 default: 960 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 961 __FUNCTION__, cdev->dev.bus_id, PTR_ERR(irb)); 962 } 963 return; 964 } 965 966 now = get_clock(); 967 968 DBF_EVENT(DBF_ERR, "Interrupt: bus_id %s CS/DS %04x ip %08x", 969 cdev->dev.bus_id, ((irb->scsw.cstat<<8)|irb->scsw.dstat), 970 (unsigned int) intparm); 971 972 /* first of all check for state change pending interrupt */ 973 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP; 974 if ((irb->scsw.dstat & mask) == mask) { 975 device = dasd_device_from_cdev_locked(cdev); 976 if (!IS_ERR(device)) { 977 dasd_handle_state_change_pending(device); 978 dasd_put_device(device); 979 } 980 return; 981 } 982 983 cqr = (struct dasd_ccw_req *) intparm; 984 985 /* check for unsolicited interrupts */ 986 if (cqr == NULL) { 987 MESSAGE(KERN_DEBUG, 988 "unsolicited interrupt received: bus_id %s", 989 cdev->dev.bus_id); 990 return; 991 } 992 993 device = (struct dasd_device *) cqr->device; 994 if (device == NULL || 995 strncmp(device->discipline->ebcname, (char *) &cqr->magic, 4)) { 996 MESSAGE(KERN_DEBUG, "invalid device in request: bus_id %s", 997 cdev->dev.bus_id); 998 return; 999 } 1000 1001 /* Check for clear pending */ 1002 if (cqr->status == DASD_CQR_CLEAR && 1003 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1004 cqr->status = DASD_CQR_QUEUED; 1005 dasd_clear_timer(device); 1006 wake_up(&dasd_flush_wq); 1007 dasd_schedule_bh(device); 1008 return; 1009 } 1010 1011 /* check status - the request might have been killed by dyn detach */ 1012 if (cqr->status != DASD_CQR_IN_IO) { 1013 MESSAGE(KERN_DEBUG, 1014 "invalid status: bus_id %s, status %02x", 1015 cdev->dev.bus_id, cqr->status); 1016 return; 1017 } 1018 DBF_DEV_EVENT(DBF_DEBUG, device, "Int: CS/DS 0x%04x for cqr %p", 1019 ((irb->scsw.cstat << 8) | irb->scsw.dstat), cqr); 1020 1021 /* Find out the appropriate era_action. */ 1022 if (irb->scsw.fctl & SCSW_FCTL_HALT_FUNC) 1023 era = dasd_era_fatal; 1024 else if (irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END) && 1025 irb->scsw.cstat == 0 && 1026 !irb->esw.esw0.erw.cons) 1027 era = dasd_era_none; 1028 else if (irb->esw.esw0.erw.cons) 1029 era = device->discipline->examine_error(cqr, irb); 1030 else 1031 era = dasd_era_recover; 1032 1033 DBF_DEV_EVENT(DBF_DEBUG, device, "era_code %d", era); 1034 expires = 0; 1035 if (era == dasd_era_none) { 1036 cqr->status = DASD_CQR_DONE; 1037 cqr->stopclk = now; 1038 /* Start first request on queue if possible -> fast_io. */ 1039 if (cqr->list.next != &device->ccw_queue) { 1040 next = list_entry(cqr->list.next, 1041 struct dasd_ccw_req, list); 1042 if ((next->status == DASD_CQR_QUEUED) && 1043 (!device->stopped)) { 1044 if (device->discipline->start_IO(next) == 0) 1045 expires = next->expires; 1046 else 1047 DEV_MESSAGE(KERN_DEBUG, device, "%s", 1048 "Interrupt fastpath " 1049 "failed!"); 1050 } 1051 } 1052 } else { /* error */ 1053 memcpy(&cqr->irb, irb, sizeof (struct irb)); 1054 if (device->features & DASD_FEATURE_ERPLOG) { 1055 /* dump sense data */ 1056 dasd_log_sense(cqr, irb); 1057 } 1058 switch (era) { 1059 case dasd_era_fatal: 1060 cqr->status = DASD_CQR_FAILED; 1061 cqr->stopclk = now; 1062 break; 1063 case dasd_era_recover: 1064 cqr->status = DASD_CQR_ERROR; 1065 break; 1066 default: 1067 BUG(); 1068 } 1069 } 1070 if (expires != 0) 1071 dasd_set_timer(device, expires); 1072 else 1073 dasd_clear_timer(device); 1074 dasd_schedule_bh(device); 1075 } 1076 1077 /* 1078 * posts the buffer_cache about a finalized request 1079 */ 1080 static inline void 1081 dasd_end_request(struct request *req, int uptodate) 1082 { 1083 if (end_that_request_first(req, uptodate, req->hard_nr_sectors)) 1084 BUG(); 1085 add_disk_randomness(req->rq_disk); 1086 end_that_request_last(req, uptodate); 1087 } 1088 1089 /* 1090 * Process finished error recovery ccw. 1091 */ 1092 static inline void 1093 __dasd_process_erp(struct dasd_device *device, struct dasd_ccw_req *cqr) 1094 { 1095 dasd_erp_fn_t erp_fn; 1096 1097 if (cqr->status == DASD_CQR_DONE) 1098 DBF_DEV_EVENT(DBF_NOTICE, device, "%s", "ERP successful"); 1099 else 1100 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP unsuccessful"); 1101 erp_fn = device->discipline->erp_postaction(cqr); 1102 erp_fn(cqr); 1103 } 1104 1105 /* 1106 * Process ccw request queue. 1107 */ 1108 static void 1109 __dasd_process_ccw_queue(struct dasd_device * device, 1110 struct list_head *final_queue) 1111 { 1112 struct list_head *l, *n; 1113 struct dasd_ccw_req *cqr; 1114 dasd_erp_fn_t erp_fn; 1115 1116 restart: 1117 /* Process request with final status. */ 1118 list_for_each_safe(l, n, &device->ccw_queue) { 1119 cqr = list_entry(l, struct dasd_ccw_req, list); 1120 /* Stop list processing at the first non-final request. */ 1121 if (cqr->status != DASD_CQR_DONE && 1122 cqr->status != DASD_CQR_FAILED && 1123 cqr->status != DASD_CQR_ERROR) 1124 break; 1125 /* Process requests with DASD_CQR_ERROR */ 1126 if (cqr->status == DASD_CQR_ERROR) { 1127 if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) { 1128 cqr->status = DASD_CQR_FAILED; 1129 cqr->stopclk = get_clock(); 1130 } else { 1131 if (cqr->irb.esw.esw0.erw.cons && 1132 test_bit(DASD_CQR_FLAGS_USE_ERP, 1133 &cqr->flags)) { 1134 erp_fn = device->discipline-> 1135 erp_action(cqr); 1136 erp_fn(cqr); 1137 } else 1138 dasd_default_erp_action(cqr); 1139 } 1140 goto restart; 1141 } 1142 1143 /* First of all call extended error reporting. */ 1144 if (dasd_eer_enabled(device) && 1145 cqr->status == DASD_CQR_FAILED) { 1146 dasd_eer_write(device, cqr, DASD_EER_FATALERROR); 1147 1148 /* restart request */ 1149 cqr->status = DASD_CQR_QUEUED; 1150 cqr->retries = 255; 1151 device->stopped |= DASD_STOPPED_QUIESCE; 1152 goto restart; 1153 } 1154 1155 /* Process finished ERP request. */ 1156 if (cqr->refers) { 1157 __dasd_process_erp(device, cqr); 1158 goto restart; 1159 } 1160 1161 /* Rechain finished requests to final queue */ 1162 cqr->endclk = get_clock(); 1163 list_move_tail(&cqr->list, final_queue); 1164 } 1165 } 1166 1167 static void 1168 dasd_end_request_cb(struct dasd_ccw_req * cqr, void *data) 1169 { 1170 struct request *req; 1171 struct dasd_device *device; 1172 int status; 1173 1174 req = (struct request *) data; 1175 device = cqr->device; 1176 dasd_profile_end(device, cqr, req); 1177 status = cqr->device->discipline->free_cp(cqr,req); 1178 spin_lock_irq(&device->request_queue_lock); 1179 dasd_end_request(req, status); 1180 spin_unlock_irq(&device->request_queue_lock); 1181 } 1182 1183 1184 /* 1185 * Fetch requests from the block device queue. 1186 */ 1187 static void 1188 __dasd_process_blk_queue(struct dasd_device * device) 1189 { 1190 struct request_queue *queue; 1191 struct request *req; 1192 struct dasd_ccw_req *cqr; 1193 int nr_queued; 1194 1195 queue = device->request_queue; 1196 /* No queue ? Then there is nothing to do. */ 1197 if (queue == NULL) 1198 return; 1199 1200 /* 1201 * We requeue request from the block device queue to the ccw 1202 * queue only in two states. In state DASD_STATE_READY the 1203 * partition detection is done and we need to requeue requests 1204 * for that. State DASD_STATE_ONLINE is normal block device 1205 * operation. 1206 */ 1207 if (device->state != DASD_STATE_READY && 1208 device->state != DASD_STATE_ONLINE) 1209 return; 1210 nr_queued = 0; 1211 /* Now we try to fetch requests from the request queue */ 1212 list_for_each_entry(cqr, &device->ccw_queue, list) 1213 if (cqr->status == DASD_CQR_QUEUED) 1214 nr_queued++; 1215 while (!blk_queue_plugged(queue) && 1216 elv_next_request(queue) && 1217 nr_queued < DASD_CHANQ_MAX_SIZE) { 1218 req = elv_next_request(queue); 1219 1220 if (device->features & DASD_FEATURE_READONLY && 1221 rq_data_dir(req) == WRITE) { 1222 DBF_DEV_EVENT(DBF_ERR, device, 1223 "Rejecting write request %p", 1224 req); 1225 blkdev_dequeue_request(req); 1226 dasd_end_request(req, 0); 1227 continue; 1228 } 1229 if (device->stopped & DASD_STOPPED_DC_EIO) { 1230 blkdev_dequeue_request(req); 1231 dasd_end_request(req, 0); 1232 continue; 1233 } 1234 cqr = device->discipline->build_cp(device, req); 1235 if (IS_ERR(cqr)) { 1236 if (PTR_ERR(cqr) == -ENOMEM) 1237 break; /* terminate request queue loop */ 1238 if (PTR_ERR(cqr) == -EAGAIN) { 1239 /* 1240 * The current request cannot be build right 1241 * now, we have to try later. If this request 1242 * is the head-of-queue we stop the device 1243 * for 1/2 second. 1244 */ 1245 if (!list_empty(&device->ccw_queue)) 1246 break; 1247 device->stopped |= DASD_STOPPED_PENDING; 1248 dasd_set_timer(device, HZ/2); 1249 break; 1250 } 1251 DBF_DEV_EVENT(DBF_ERR, device, 1252 "CCW creation failed (rc=%ld) " 1253 "on request %p", 1254 PTR_ERR(cqr), req); 1255 blkdev_dequeue_request(req); 1256 dasd_end_request(req, 0); 1257 continue; 1258 } 1259 cqr->callback = dasd_end_request_cb; 1260 cqr->callback_data = (void *) req; 1261 cqr->status = DASD_CQR_QUEUED; 1262 blkdev_dequeue_request(req); 1263 list_add_tail(&cqr->list, &device->ccw_queue); 1264 dasd_profile_start(device, cqr, req); 1265 nr_queued++; 1266 } 1267 } 1268 1269 /* 1270 * Take a look at the first request on the ccw queue and check 1271 * if it reached its expire time. If so, terminate the IO. 1272 */ 1273 static void 1274 __dasd_check_expire(struct dasd_device * device) 1275 { 1276 struct dasd_ccw_req *cqr; 1277 1278 if (list_empty(&device->ccw_queue)) 1279 return; 1280 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1281 if ((cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) && 1282 (time_after_eq(jiffies, cqr->expires + cqr->starttime))) { 1283 if (device->discipline->term_IO(cqr) != 0) { 1284 /* Hmpf, try again in 5 sec */ 1285 dasd_set_timer(device, 5*HZ); 1286 DEV_MESSAGE(KERN_ERR, device, 1287 "internal error - timeout (%is) expired " 1288 "for cqr %p, termination failed, " 1289 "retrying in 5s", 1290 (cqr->expires/HZ), cqr); 1291 } else { 1292 DEV_MESSAGE(KERN_ERR, device, 1293 "internal error - timeout (%is) expired " 1294 "for cqr %p (%i retries left)", 1295 (cqr->expires/HZ), cqr, cqr->retries); 1296 } 1297 } 1298 } 1299 1300 /* 1301 * Take a look at the first request on the ccw queue and check 1302 * if it needs to be started. 1303 */ 1304 static void 1305 __dasd_start_head(struct dasd_device * device) 1306 { 1307 struct dasd_ccw_req *cqr; 1308 int rc; 1309 1310 if (list_empty(&device->ccw_queue)) 1311 return; 1312 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1313 if (cqr->status != DASD_CQR_QUEUED) 1314 return; 1315 /* Non-temporary stop condition will trigger fail fast */ 1316 if (device->stopped & ~DASD_STOPPED_PENDING && 1317 test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && 1318 (!dasd_eer_enabled(device))) { 1319 cqr->status = DASD_CQR_FAILED; 1320 dasd_schedule_bh(device); 1321 return; 1322 } 1323 /* Don't try to start requests if device is stopped */ 1324 if (device->stopped) 1325 return; 1326 1327 rc = device->discipline->start_IO(cqr); 1328 if (rc == 0) 1329 dasd_set_timer(device, cqr->expires); 1330 else if (rc == -EACCES) { 1331 dasd_schedule_bh(device); 1332 } else 1333 /* Hmpf, try again in 1/2 sec */ 1334 dasd_set_timer(device, 50); 1335 } 1336 1337 static inline int 1338 _wait_for_clear(struct dasd_ccw_req *cqr) 1339 { 1340 return (cqr->status == DASD_CQR_QUEUED); 1341 } 1342 1343 /* 1344 * Remove all requests from the ccw queue (all = '1') or only block device 1345 * requests in case all = '0'. 1346 * Take care of the erp-chain (chained via cqr->refers) and remove either 1347 * the whole erp-chain or none of the erp-requests. 1348 * If a request is currently running, term_IO is called and the request 1349 * is re-queued. Prior to removing the terminated request we need to wait 1350 * for the clear-interrupt. 1351 * In case termination is not possible we stop processing and just finishing 1352 * the already moved requests. 1353 */ 1354 static int 1355 dasd_flush_ccw_queue(struct dasd_device * device, int all) 1356 { 1357 struct dasd_ccw_req *cqr, *orig, *n; 1358 int rc, i; 1359 1360 struct list_head flush_queue; 1361 1362 INIT_LIST_HEAD(&flush_queue); 1363 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1364 rc = 0; 1365 restart: 1366 list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) { 1367 /* get original request of erp request-chain */ 1368 for (orig = cqr; orig->refers != NULL; orig = orig->refers); 1369 1370 /* Flush all request or only block device requests? */ 1371 if (all == 0 && cqr->callback != dasd_end_request_cb && 1372 orig->callback != dasd_end_request_cb) { 1373 continue; 1374 } 1375 /* Check status and move request to flush_queue */ 1376 switch (cqr->status) { 1377 case DASD_CQR_IN_IO: 1378 rc = device->discipline->term_IO(cqr); 1379 if (rc) { 1380 /* unable to terminate requeust */ 1381 DEV_MESSAGE(KERN_ERR, device, 1382 "dasd flush ccw_queue is unable " 1383 " to terminate request %p", 1384 cqr); 1385 /* stop flush processing */ 1386 goto finished; 1387 } 1388 break; 1389 case DASD_CQR_QUEUED: 1390 case DASD_CQR_ERROR: 1391 /* set request to FAILED */ 1392 cqr->stopclk = get_clock(); 1393 cqr->status = DASD_CQR_FAILED; 1394 break; 1395 default: /* do not touch the others */ 1396 break; 1397 } 1398 /* Rechain request (including erp chain) */ 1399 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) { 1400 cqr->endclk = get_clock(); 1401 list_move_tail(&cqr->list, &flush_queue); 1402 } 1403 if (i > 1) 1404 /* moved more than one request - need to restart */ 1405 goto restart; 1406 } 1407 1408 finished: 1409 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1410 /* Now call the callback function of flushed requests */ 1411 restart_cb: 1412 list_for_each_entry_safe(cqr, n, &flush_queue, list) { 1413 if (cqr->status == DASD_CQR_CLEAR) { 1414 /* wait for clear interrupt! */ 1415 wait_event(dasd_flush_wq, _wait_for_clear(cqr)); 1416 cqr->status = DASD_CQR_FAILED; 1417 } 1418 /* Process finished ERP request. */ 1419 if (cqr->refers) { 1420 __dasd_process_erp(device, cqr); 1421 /* restart list_for_xx loop since dasd_process_erp 1422 * might remove multiple elements */ 1423 goto restart_cb; 1424 } 1425 /* call the callback function */ 1426 cqr->endclk = get_clock(); 1427 if (cqr->callback != NULL) 1428 (cqr->callback)(cqr, cqr->callback_data); 1429 } 1430 return rc; 1431 } 1432 1433 /* 1434 * Acquire the device lock and process queues for the device. 1435 */ 1436 static void 1437 dasd_tasklet(struct dasd_device * device) 1438 { 1439 struct list_head final_queue; 1440 struct list_head *l, *n; 1441 struct dasd_ccw_req *cqr; 1442 1443 atomic_set (&device->tasklet_scheduled, 0); 1444 INIT_LIST_HEAD(&final_queue); 1445 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1446 /* Check expire time of first request on the ccw queue. */ 1447 __dasd_check_expire(device); 1448 /* Finish off requests on ccw queue */ 1449 __dasd_process_ccw_queue(device, &final_queue); 1450 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1451 /* Now call the callback function of requests with final status */ 1452 list_for_each_safe(l, n, &final_queue) { 1453 cqr = list_entry(l, struct dasd_ccw_req, list); 1454 list_del_init(&cqr->list); 1455 if (cqr->callback != NULL) 1456 (cqr->callback)(cqr, cqr->callback_data); 1457 } 1458 spin_lock_irq(&device->request_queue_lock); 1459 spin_lock(get_ccwdev_lock(device->cdev)); 1460 /* Get new request from the block device request queue */ 1461 __dasd_process_blk_queue(device); 1462 /* Now check if the head of the ccw queue needs to be started. */ 1463 __dasd_start_head(device); 1464 spin_unlock(get_ccwdev_lock(device->cdev)); 1465 spin_unlock_irq(&device->request_queue_lock); 1466 dasd_put_device(device); 1467 } 1468 1469 /* 1470 * Schedules a call to dasd_tasklet over the device tasklet. 1471 */ 1472 void 1473 dasd_schedule_bh(struct dasd_device * device) 1474 { 1475 /* Protect against rescheduling. */ 1476 if (atomic_cmpxchg (&device->tasklet_scheduled, 0, 1) != 0) 1477 return; 1478 dasd_get_device(device); 1479 tasklet_hi_schedule(&device->tasklet); 1480 } 1481 1482 /* 1483 * Queue a request to the head of the ccw_queue. Start the I/O if 1484 * possible. 1485 */ 1486 void 1487 dasd_add_request_head(struct dasd_ccw_req *req) 1488 { 1489 struct dasd_device *device; 1490 unsigned long flags; 1491 1492 device = req->device; 1493 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1494 req->status = DASD_CQR_QUEUED; 1495 req->device = device; 1496 list_add(&req->list, &device->ccw_queue); 1497 /* let the bh start the request to keep them in order */ 1498 dasd_schedule_bh(device); 1499 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1500 } 1501 1502 /* 1503 * Queue a request to the tail of the ccw_queue. Start the I/O if 1504 * possible. 1505 */ 1506 void 1507 dasd_add_request_tail(struct dasd_ccw_req *req) 1508 { 1509 struct dasd_device *device; 1510 unsigned long flags; 1511 1512 device = req->device; 1513 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1514 req->status = DASD_CQR_QUEUED; 1515 req->device = device; 1516 list_add_tail(&req->list, &device->ccw_queue); 1517 /* let the bh start the request to keep them in order */ 1518 dasd_schedule_bh(device); 1519 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1520 } 1521 1522 /* 1523 * Wakeup callback. 1524 */ 1525 static void 1526 dasd_wakeup_cb(struct dasd_ccw_req *cqr, void *data) 1527 { 1528 wake_up((wait_queue_head_t *) data); 1529 } 1530 1531 static inline int 1532 _wait_for_wakeup(struct dasd_ccw_req *cqr) 1533 { 1534 struct dasd_device *device; 1535 int rc; 1536 1537 device = cqr->device; 1538 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1539 rc = ((cqr->status == DASD_CQR_DONE || 1540 cqr->status == DASD_CQR_FAILED) && 1541 list_empty(&cqr->list)); 1542 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1543 return rc; 1544 } 1545 1546 /* 1547 * Attempts to start a special ccw queue and waits for its completion. 1548 */ 1549 int 1550 dasd_sleep_on(struct dasd_ccw_req * cqr) 1551 { 1552 wait_queue_head_t wait_q; 1553 struct dasd_device *device; 1554 int rc; 1555 1556 device = cqr->device; 1557 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1558 1559 init_waitqueue_head (&wait_q); 1560 cqr->callback = dasd_wakeup_cb; 1561 cqr->callback_data = (void *) &wait_q; 1562 cqr->status = DASD_CQR_QUEUED; 1563 list_add_tail(&cqr->list, &device->ccw_queue); 1564 1565 /* let the bh start the request to keep them in order */ 1566 dasd_schedule_bh(device); 1567 1568 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1569 1570 wait_event(wait_q, _wait_for_wakeup(cqr)); 1571 1572 /* Request status is either done or failed. */ 1573 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1574 return rc; 1575 } 1576 1577 /* 1578 * Attempts to start a special ccw queue and wait interruptible 1579 * for its completion. 1580 */ 1581 int 1582 dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr) 1583 { 1584 wait_queue_head_t wait_q; 1585 struct dasd_device *device; 1586 int rc, finished; 1587 1588 device = cqr->device; 1589 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1590 1591 init_waitqueue_head (&wait_q); 1592 cqr->callback = dasd_wakeup_cb; 1593 cqr->callback_data = (void *) &wait_q; 1594 cqr->status = DASD_CQR_QUEUED; 1595 list_add_tail(&cqr->list, &device->ccw_queue); 1596 1597 /* let the bh start the request to keep them in order */ 1598 dasd_schedule_bh(device); 1599 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1600 1601 finished = 0; 1602 while (!finished) { 1603 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); 1604 if (rc != -ERESTARTSYS) { 1605 /* Request is final (done or failed) */ 1606 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1607 break; 1608 } 1609 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1610 switch (cqr->status) { 1611 case DASD_CQR_IN_IO: 1612 /* terminate runnig cqr */ 1613 if (device->discipline->term_IO) { 1614 cqr->retries = -1; 1615 device->discipline->term_IO(cqr); 1616 /* wait (non-interruptible) for final status 1617 * because signal ist still pending */ 1618 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1619 wait_event(wait_q, _wait_for_wakeup(cqr)); 1620 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1621 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1622 finished = 1; 1623 } 1624 break; 1625 case DASD_CQR_QUEUED: 1626 /* request */ 1627 list_del_init(&cqr->list); 1628 rc = -EIO; 1629 finished = 1; 1630 break; 1631 default: 1632 /* cqr with 'non-interruptable' status - just wait */ 1633 break; 1634 } 1635 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1636 } 1637 return rc; 1638 } 1639 1640 /* 1641 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 1642 * for eckd devices) the currently running request has to be terminated 1643 * and be put back to status queued, before the special request is added 1644 * to the head of the queue. Then the special request is waited on normally. 1645 */ 1646 static inline int 1647 _dasd_term_running_cqr(struct dasd_device *device) 1648 { 1649 struct dasd_ccw_req *cqr; 1650 1651 if (list_empty(&device->ccw_queue)) 1652 return 0; 1653 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1654 return device->discipline->term_IO(cqr); 1655 } 1656 1657 int 1658 dasd_sleep_on_immediatly(struct dasd_ccw_req * cqr) 1659 { 1660 wait_queue_head_t wait_q; 1661 struct dasd_device *device; 1662 int rc; 1663 1664 device = cqr->device; 1665 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1666 rc = _dasd_term_running_cqr(device); 1667 if (rc) { 1668 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1669 return rc; 1670 } 1671 1672 init_waitqueue_head (&wait_q); 1673 cqr->callback = dasd_wakeup_cb; 1674 cqr->callback_data = (void *) &wait_q; 1675 cqr->status = DASD_CQR_QUEUED; 1676 list_add(&cqr->list, &device->ccw_queue); 1677 1678 /* let the bh start the request to keep them in order */ 1679 dasd_schedule_bh(device); 1680 1681 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1682 1683 wait_event(wait_q, _wait_for_wakeup(cqr)); 1684 1685 /* Request status is either done or failed. */ 1686 rc = (cqr->status == DASD_CQR_FAILED) ? -EIO : 0; 1687 return rc; 1688 } 1689 1690 /* 1691 * Cancels a request that was started with dasd_sleep_on_req. 1692 * This is useful to timeout requests. The request will be 1693 * terminated if it is currently in i/o. 1694 * Returns 1 if the request has been terminated. 1695 */ 1696 int 1697 dasd_cancel_req(struct dasd_ccw_req *cqr) 1698 { 1699 struct dasd_device *device = cqr->device; 1700 unsigned long flags; 1701 int rc; 1702 1703 rc = 0; 1704 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); 1705 switch (cqr->status) { 1706 case DASD_CQR_QUEUED: 1707 /* request was not started - just set to failed */ 1708 cqr->status = DASD_CQR_FAILED; 1709 break; 1710 case DASD_CQR_IN_IO: 1711 /* request in IO - terminate IO and release again */ 1712 if (device->discipline->term_IO(cqr) != 0) 1713 /* what to do if unable to terminate ?????? 1714 e.g. not _IN_IO */ 1715 cqr->status = DASD_CQR_FAILED; 1716 cqr->stopclk = get_clock(); 1717 rc = 1; 1718 break; 1719 case DASD_CQR_DONE: 1720 case DASD_CQR_FAILED: 1721 /* already finished - do nothing */ 1722 break; 1723 default: 1724 DEV_MESSAGE(KERN_ALERT, device, 1725 "invalid status %02x in request", 1726 cqr->status); 1727 BUG(); 1728 1729 } 1730 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 1731 dasd_schedule_bh(device); 1732 return rc; 1733 } 1734 1735 /* 1736 * SECTION: Block device operations (request queue, partitions, open, release). 1737 */ 1738 1739 /* 1740 * Dasd request queue function. Called from ll_rw_blk.c 1741 */ 1742 static void 1743 do_dasd_request(struct request_queue * queue) 1744 { 1745 struct dasd_device *device; 1746 1747 device = (struct dasd_device *) queue->queuedata; 1748 spin_lock(get_ccwdev_lock(device->cdev)); 1749 /* Get new request from the block device request queue */ 1750 __dasd_process_blk_queue(device); 1751 /* Now check if the head of the ccw queue needs to be started. */ 1752 __dasd_start_head(device); 1753 spin_unlock(get_ccwdev_lock(device->cdev)); 1754 } 1755 1756 /* 1757 * Allocate and initialize request queue and default I/O scheduler. 1758 */ 1759 static int 1760 dasd_alloc_queue(struct dasd_device * device) 1761 { 1762 int rc; 1763 1764 device->request_queue = blk_init_queue(do_dasd_request, 1765 &device->request_queue_lock); 1766 if (device->request_queue == NULL) 1767 return -ENOMEM; 1768 1769 device->request_queue->queuedata = device; 1770 1771 elevator_exit(device->request_queue->elevator); 1772 rc = elevator_init(device->request_queue, "deadline"); 1773 if (rc) { 1774 blk_cleanup_queue(device->request_queue); 1775 return rc; 1776 } 1777 return 0; 1778 } 1779 1780 /* 1781 * Allocate and initialize request queue. 1782 */ 1783 static void 1784 dasd_setup_queue(struct dasd_device * device) 1785 { 1786 int max; 1787 1788 blk_queue_hardsect_size(device->request_queue, device->bp_block); 1789 max = device->discipline->max_blocks << device->s2b_shift; 1790 blk_queue_max_sectors(device->request_queue, max); 1791 blk_queue_max_phys_segments(device->request_queue, -1L); 1792 blk_queue_max_hw_segments(device->request_queue, -1L); 1793 blk_queue_max_segment_size(device->request_queue, -1L); 1794 blk_queue_segment_boundary(device->request_queue, -1L); 1795 blk_queue_ordered(device->request_queue, QUEUE_ORDERED_TAG, NULL); 1796 } 1797 1798 /* 1799 * Deactivate and free request queue. 1800 */ 1801 static void 1802 dasd_free_queue(struct dasd_device * device) 1803 { 1804 if (device->request_queue) { 1805 blk_cleanup_queue(device->request_queue); 1806 device->request_queue = NULL; 1807 } 1808 } 1809 1810 /* 1811 * Flush request on the request queue. 1812 */ 1813 static void 1814 dasd_flush_request_queue(struct dasd_device * device) 1815 { 1816 struct request *req; 1817 1818 if (!device->request_queue) 1819 return; 1820 1821 spin_lock_irq(&device->request_queue_lock); 1822 while ((req = elv_next_request(device->request_queue))) { 1823 blkdev_dequeue_request(req); 1824 dasd_end_request(req, 0); 1825 } 1826 spin_unlock_irq(&device->request_queue_lock); 1827 } 1828 1829 static int 1830 dasd_open(struct inode *inp, struct file *filp) 1831 { 1832 struct gendisk *disk = inp->i_bdev->bd_disk; 1833 struct dasd_device *device = disk->private_data; 1834 int rc; 1835 1836 atomic_inc(&device->open_count); 1837 if (test_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1838 rc = -ENODEV; 1839 goto unlock; 1840 } 1841 1842 if (!try_module_get(device->discipline->owner)) { 1843 rc = -EINVAL; 1844 goto unlock; 1845 } 1846 1847 if (dasd_probeonly) { 1848 DEV_MESSAGE(KERN_INFO, device, "%s", 1849 "No access to device due to probeonly mode"); 1850 rc = -EPERM; 1851 goto out; 1852 } 1853 1854 if (device->state <= DASD_STATE_BASIC) { 1855 DBF_DEV_EVENT(DBF_ERR, device, " %s", 1856 " Cannot open unrecognized device"); 1857 rc = -ENODEV; 1858 goto out; 1859 } 1860 1861 return 0; 1862 1863 out: 1864 module_put(device->discipline->owner); 1865 unlock: 1866 atomic_dec(&device->open_count); 1867 return rc; 1868 } 1869 1870 static int 1871 dasd_release(struct inode *inp, struct file *filp) 1872 { 1873 struct gendisk *disk = inp->i_bdev->bd_disk; 1874 struct dasd_device *device = disk->private_data; 1875 1876 atomic_dec(&device->open_count); 1877 module_put(device->discipline->owner); 1878 return 0; 1879 } 1880 1881 /* 1882 * Return disk geometry. 1883 */ 1884 static int 1885 dasd_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1886 { 1887 struct dasd_device *device; 1888 1889 device = bdev->bd_disk->private_data; 1890 if (!device) 1891 return -ENODEV; 1892 1893 if (!device->discipline || 1894 !device->discipline->fill_geometry) 1895 return -EINVAL; 1896 1897 device->discipline->fill_geometry(device, geo); 1898 geo->start = get_start_sect(bdev) >> device->s2b_shift; 1899 return 0; 1900 } 1901 1902 struct block_device_operations 1903 dasd_device_operations = { 1904 .owner = THIS_MODULE, 1905 .open = dasd_open, 1906 .release = dasd_release, 1907 .ioctl = dasd_ioctl, 1908 .compat_ioctl = dasd_compat_ioctl, 1909 .getgeo = dasd_getgeo, 1910 }; 1911 1912 1913 static void 1914 dasd_exit(void) 1915 { 1916 #ifdef CONFIG_PROC_FS 1917 dasd_proc_exit(); 1918 #endif 1919 dasd_eer_exit(); 1920 if (dasd_page_cache != NULL) { 1921 kmem_cache_destroy(dasd_page_cache); 1922 dasd_page_cache = NULL; 1923 } 1924 dasd_gendisk_exit(); 1925 dasd_devmap_exit(); 1926 if (dasd_debug_area != NULL) { 1927 debug_unregister(dasd_debug_area); 1928 dasd_debug_area = NULL; 1929 } 1930 } 1931 1932 /* 1933 * SECTION: common functions for ccw_driver use 1934 */ 1935 1936 /* 1937 * Initial attempt at a probe function. this can be simplified once 1938 * the other detection code is gone. 1939 */ 1940 int 1941 dasd_generic_probe (struct ccw_device *cdev, 1942 struct dasd_discipline *discipline) 1943 { 1944 int ret; 1945 1946 ret = ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); 1947 if (ret) { 1948 printk(KERN_WARNING 1949 "dasd_generic_probe: could not set ccw-device options " 1950 "for %s\n", cdev->dev.bus_id); 1951 return ret; 1952 } 1953 ret = dasd_add_sysfs_files(cdev); 1954 if (ret) { 1955 printk(KERN_WARNING 1956 "dasd_generic_probe: could not add sysfs entries " 1957 "for %s\n", cdev->dev.bus_id); 1958 return ret; 1959 } 1960 cdev->handler = &dasd_int_handler; 1961 1962 /* 1963 * Automatically online either all dasd devices (dasd_autodetect) 1964 * or all devices specified with dasd= parameters during 1965 * initial probe. 1966 */ 1967 if ((dasd_get_feature(cdev, DASD_FEATURE_INITIAL_ONLINE) > 0 ) || 1968 (dasd_autodetect && dasd_busid_known(cdev->dev.bus_id) != 0)) 1969 ret = ccw_device_set_online(cdev); 1970 if (ret) 1971 printk(KERN_WARNING 1972 "dasd_generic_probe: could not initially " 1973 "online ccw-device %s; return code: %d\n", 1974 cdev->dev.bus_id, ret); 1975 return 0; 1976 } 1977 1978 /* 1979 * This will one day be called from a global not_oper handler. 1980 * It is also used by driver_unregister during module unload. 1981 */ 1982 void 1983 dasd_generic_remove (struct ccw_device *cdev) 1984 { 1985 struct dasd_device *device; 1986 1987 cdev->handler = NULL; 1988 1989 dasd_remove_sysfs_files(cdev); 1990 device = dasd_device_from_cdev(cdev); 1991 if (IS_ERR(device)) 1992 return; 1993 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 1994 /* Already doing offline processing */ 1995 dasd_put_device(device); 1996 return; 1997 } 1998 /* 1999 * This device is removed unconditionally. Set offline 2000 * flag to prevent dasd_open from opening it while it is 2001 * no quite down yet. 2002 */ 2003 dasd_set_target_state(device, DASD_STATE_NEW); 2004 /* dasd_delete_device destroys the device reference. */ 2005 dasd_delete_device(device); 2006 } 2007 2008 /* 2009 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 2010 * the device is detected for the first time and is supposed to be used 2011 * or the user has started activation through sysfs. 2012 */ 2013 int 2014 dasd_generic_set_online (struct ccw_device *cdev, 2015 struct dasd_discipline *base_discipline) 2016 2017 { 2018 struct dasd_discipline *discipline; 2019 struct dasd_device *device; 2020 int rc; 2021 2022 /* first online clears initial online feature flag */ 2023 dasd_set_feature(cdev, DASD_FEATURE_INITIAL_ONLINE, 0); 2024 device = dasd_create_device(cdev); 2025 if (IS_ERR(device)) 2026 return PTR_ERR(device); 2027 2028 discipline = base_discipline; 2029 if (device->features & DASD_FEATURE_USEDIAG) { 2030 if (!dasd_diag_discipline_pointer) { 2031 printk (KERN_WARNING 2032 "dasd_generic couldn't online device %s " 2033 "- discipline DIAG not available\n", 2034 cdev->dev.bus_id); 2035 dasd_delete_device(device); 2036 return -ENODEV; 2037 } 2038 discipline = dasd_diag_discipline_pointer; 2039 } 2040 if (!try_module_get(base_discipline->owner)) { 2041 dasd_delete_device(device); 2042 return -EINVAL; 2043 } 2044 if (!try_module_get(discipline->owner)) { 2045 module_put(base_discipline->owner); 2046 dasd_delete_device(device); 2047 return -EINVAL; 2048 } 2049 device->base_discipline = base_discipline; 2050 device->discipline = discipline; 2051 2052 rc = discipline->check_device(device); 2053 if (rc) { 2054 printk (KERN_WARNING 2055 "dasd_generic couldn't online device %s " 2056 "with discipline %s rc=%i\n", 2057 cdev->dev.bus_id, discipline->name, rc); 2058 module_put(discipline->owner); 2059 module_put(base_discipline->owner); 2060 dasd_delete_device(device); 2061 return rc; 2062 } 2063 2064 dasd_set_target_state(device, DASD_STATE_ONLINE); 2065 if (device->state <= DASD_STATE_KNOWN) { 2066 printk (KERN_WARNING 2067 "dasd_generic discipline not found for %s\n", 2068 cdev->dev.bus_id); 2069 rc = -ENODEV; 2070 dasd_set_target_state(device, DASD_STATE_NEW); 2071 dasd_delete_device(device); 2072 } else 2073 pr_debug("dasd_generic device %s found\n", 2074 cdev->dev.bus_id); 2075 2076 /* FIXME: we have to wait for the root device but we don't want 2077 * to wait for each single device but for all at once. */ 2078 wait_event(dasd_init_waitq, _wait_for_device(device)); 2079 2080 dasd_put_device(device); 2081 2082 return rc; 2083 } 2084 2085 int 2086 dasd_generic_set_offline (struct ccw_device *cdev) 2087 { 2088 struct dasd_device *device; 2089 int max_count, open_count; 2090 2091 device = dasd_device_from_cdev(cdev); 2092 if (IS_ERR(device)) 2093 return PTR_ERR(device); 2094 if (test_and_set_bit(DASD_FLAG_OFFLINE, &device->flags)) { 2095 /* Already doing offline processing */ 2096 dasd_put_device(device); 2097 return 0; 2098 } 2099 /* 2100 * We must make sure that this device is currently not in use. 2101 * The open_count is increased for every opener, that includes 2102 * the blkdev_get in dasd_scan_partitions. We are only interested 2103 * in the other openers. 2104 */ 2105 max_count = device->bdev ? 0 : -1; 2106 open_count = (int) atomic_read(&device->open_count); 2107 if (open_count > max_count) { 2108 if (open_count > 0) 2109 printk (KERN_WARNING "Can't offline dasd device with " 2110 "open count = %i.\n", 2111 open_count); 2112 else 2113 printk (KERN_WARNING "%s", 2114 "Can't offline dasd device due to internal " 2115 "use\n"); 2116 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 2117 dasd_put_device(device); 2118 return -EBUSY; 2119 } 2120 dasd_set_target_state(device, DASD_STATE_NEW); 2121 /* dasd_delete_device destroys the device reference. */ 2122 dasd_delete_device(device); 2123 2124 return 0; 2125 } 2126 2127 int 2128 dasd_generic_notify(struct ccw_device *cdev, int event) 2129 { 2130 struct dasd_device *device; 2131 struct dasd_ccw_req *cqr; 2132 unsigned long flags; 2133 int ret; 2134 2135 device = dasd_device_from_cdev(cdev); 2136 if (IS_ERR(device)) 2137 return 0; 2138 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 2139 ret = 0; 2140 switch (event) { 2141 case CIO_GONE: 2142 case CIO_NO_PATH: 2143 /* First of all call extended error reporting. */ 2144 dasd_eer_write(device, NULL, DASD_EER_NOPATH); 2145 2146 if (device->state < DASD_STATE_BASIC) 2147 break; 2148 /* Device is active. We want to keep it. */ 2149 if (test_bit(DASD_FLAG_DSC_ERROR, &device->flags)) { 2150 list_for_each_entry(cqr, &device->ccw_queue, list) 2151 if (cqr->status == DASD_CQR_IN_IO) 2152 cqr->status = DASD_CQR_FAILED; 2153 device->stopped |= DASD_STOPPED_DC_EIO; 2154 } else { 2155 list_for_each_entry(cqr, &device->ccw_queue, list) 2156 if (cqr->status == DASD_CQR_IN_IO) { 2157 cqr->status = DASD_CQR_QUEUED; 2158 cqr->retries++; 2159 } 2160 device->stopped |= DASD_STOPPED_DC_WAIT; 2161 dasd_set_timer(device, 0); 2162 } 2163 dasd_schedule_bh(device); 2164 ret = 1; 2165 break; 2166 case CIO_OPER: 2167 /* FIXME: add a sanity check. */ 2168 device->stopped &= ~(DASD_STOPPED_DC_WAIT|DASD_STOPPED_DC_EIO); 2169 dasd_schedule_bh(device); 2170 ret = 1; 2171 break; 2172 } 2173 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 2174 dasd_put_device(device); 2175 return ret; 2176 } 2177 2178 static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, 2179 void *rdc_buffer, 2180 int rdc_buffer_size, 2181 char *magic) 2182 { 2183 struct dasd_ccw_req *cqr; 2184 struct ccw1 *ccw; 2185 2186 cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device); 2187 2188 if (IS_ERR(cqr)) { 2189 DEV_MESSAGE(KERN_WARNING, device, "%s", 2190 "Could not allocate RDC request"); 2191 return cqr; 2192 } 2193 2194 ccw = cqr->cpaddr; 2195 ccw->cmd_code = CCW_CMD_RDC; 2196 ccw->cda = (__u32)(addr_t)rdc_buffer; 2197 ccw->count = rdc_buffer_size; 2198 2199 cqr->device = device; 2200 cqr->expires = 10*HZ; 2201 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags); 2202 cqr->retries = 2; 2203 cqr->buildclk = get_clock(); 2204 cqr->status = DASD_CQR_FILLED; 2205 return cqr; 2206 } 2207 2208 2209 int dasd_generic_read_dev_chars(struct dasd_device *device, char *magic, 2210 void **rdc_buffer, int rdc_buffer_size) 2211 { 2212 int ret; 2213 struct dasd_ccw_req *cqr; 2214 2215 cqr = dasd_generic_build_rdc(device, *rdc_buffer, rdc_buffer_size, 2216 magic); 2217 if (IS_ERR(cqr)) 2218 return PTR_ERR(cqr); 2219 2220 ret = dasd_sleep_on(cqr); 2221 dasd_sfree_request(cqr, cqr->device); 2222 return ret; 2223 } 2224 EXPORT_SYMBOL_GPL(dasd_generic_read_dev_chars); 2225 2226 static int __init 2227 dasd_init(void) 2228 { 2229 int rc; 2230 2231 init_waitqueue_head(&dasd_init_waitq); 2232 init_waitqueue_head(&dasd_flush_wq); 2233 2234 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2235 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); 2236 if (dasd_debug_area == NULL) { 2237 rc = -ENOMEM; 2238 goto failed; 2239 } 2240 debug_register_view(dasd_debug_area, &debug_sprintf_view); 2241 debug_set_level(dasd_debug_area, DBF_WARNING); 2242 2243 DBF_EVENT(DBF_EMERG, "%s", "debug area created"); 2244 2245 dasd_diag_discipline_pointer = NULL; 2246 2247 rc = dasd_devmap_init(); 2248 if (rc) 2249 goto failed; 2250 rc = dasd_gendisk_init(); 2251 if (rc) 2252 goto failed; 2253 rc = dasd_parse(); 2254 if (rc) 2255 goto failed; 2256 rc = dasd_eer_init(); 2257 if (rc) 2258 goto failed; 2259 #ifdef CONFIG_PROC_FS 2260 rc = dasd_proc_init(); 2261 if (rc) 2262 goto failed; 2263 #endif 2264 2265 return 0; 2266 failed: 2267 MESSAGE(KERN_INFO, "%s", "initialization not performed due to errors"); 2268 dasd_exit(); 2269 return rc; 2270 } 2271 2272 module_init(dasd_init); 2273 module_exit(dasd_exit); 2274 2275 EXPORT_SYMBOL(dasd_debug_area); 2276 EXPORT_SYMBOL(dasd_diag_discipline_pointer); 2277 2278 EXPORT_SYMBOL(dasd_add_request_head); 2279 EXPORT_SYMBOL(dasd_add_request_tail); 2280 EXPORT_SYMBOL(dasd_cancel_req); 2281 EXPORT_SYMBOL(dasd_clear_timer); 2282 EXPORT_SYMBOL(dasd_enable_device); 2283 EXPORT_SYMBOL(dasd_int_handler); 2284 EXPORT_SYMBOL(dasd_kfree_request); 2285 EXPORT_SYMBOL(dasd_kick_device); 2286 EXPORT_SYMBOL(dasd_kmalloc_request); 2287 EXPORT_SYMBOL(dasd_schedule_bh); 2288 EXPORT_SYMBOL(dasd_set_target_state); 2289 EXPORT_SYMBOL(dasd_set_timer); 2290 EXPORT_SYMBOL(dasd_sfree_request); 2291 EXPORT_SYMBOL(dasd_sleep_on); 2292 EXPORT_SYMBOL(dasd_sleep_on_immediatly); 2293 EXPORT_SYMBOL(dasd_sleep_on_interruptible); 2294 EXPORT_SYMBOL(dasd_smalloc_request); 2295 EXPORT_SYMBOL(dasd_start_IO); 2296 EXPORT_SYMBOL(dasd_term_IO); 2297 2298 EXPORT_SYMBOL_GPL(dasd_generic_probe); 2299 EXPORT_SYMBOL_GPL(dasd_generic_remove); 2300 EXPORT_SYMBOL_GPL(dasd_generic_notify); 2301 EXPORT_SYMBOL_GPL(dasd_generic_set_online); 2302 EXPORT_SYMBOL_GPL(dasd_generic_set_offline); 2303 2304