1 /* 2 * drivers/s390/cio/device_fsm.c 3 * finite state machine for device handling 4 * 5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * IBM Corporation 7 * Author(s): Cornelia Huck(cohuck@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 9 */ 10 11 #include <linux/module.h> 12 #include <linux/config.h> 13 #include <linux/init.h> 14 #include <linux/jiffies.h> 15 #include <linux/string.h> 16 17 #include <asm/ccwdev.h> 18 #include <asm/cio.h> 19 20 #include "cio.h" 21 #include "cio_debug.h" 22 #include "css.h" 23 #include "device.h" 24 #include "chsc.h" 25 #include "ioasm.h" 26 27 int 28 device_is_online(struct subchannel *sch) 29 { 30 struct ccw_device *cdev; 31 32 if (!sch->dev.driver_data) 33 return 0; 34 cdev = sch->dev.driver_data; 35 return (cdev->private->state == DEV_STATE_ONLINE); 36 } 37 38 int 39 device_is_disconnected(struct subchannel *sch) 40 { 41 struct ccw_device *cdev; 42 43 if (!sch->dev.driver_data) 44 return 0; 45 cdev = sch->dev.driver_data; 46 return (cdev->private->state == DEV_STATE_DISCONNECTED || 47 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); 48 } 49 50 void 51 device_set_disconnected(struct subchannel *sch) 52 { 53 struct ccw_device *cdev; 54 55 if (!sch->dev.driver_data) 56 return; 57 cdev = sch->dev.driver_data; 58 ccw_device_set_timeout(cdev, 0); 59 cdev->private->flags.fake_irb = 0; 60 cdev->private->state = DEV_STATE_DISCONNECTED; 61 } 62 63 void 64 device_set_waiting(struct subchannel *sch) 65 { 66 struct ccw_device *cdev; 67 68 if (!sch->dev.driver_data) 69 return; 70 cdev = sch->dev.driver_data; 71 ccw_device_set_timeout(cdev, 10*HZ); 72 cdev->private->state = DEV_STATE_WAIT4IO; 73 } 74 75 /* 76 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 77 */ 78 static void 79 ccw_device_timeout(unsigned long data) 80 { 81 struct ccw_device *cdev; 82 83 cdev = (struct ccw_device *) data; 84 spin_lock_irq(cdev->ccwlock); 85 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); 86 spin_unlock_irq(cdev->ccwlock); 87 } 88 89 /* 90 * Set timeout 91 */ 92 void 93 ccw_device_set_timeout(struct ccw_device *cdev, int expires) 94 { 95 if (expires == 0) { 96 del_timer(&cdev->private->timer); 97 return; 98 } 99 if (timer_pending(&cdev->private->timer)) { 100 if (mod_timer(&cdev->private->timer, jiffies + expires)) 101 return; 102 } 103 cdev->private->timer.function = ccw_device_timeout; 104 cdev->private->timer.data = (unsigned long) cdev; 105 cdev->private->timer.expires = jiffies + expires; 106 add_timer(&cdev->private->timer); 107 } 108 109 /* Kill any pending timers after machine check. */ 110 void 111 device_kill_pending_timer(struct subchannel *sch) 112 { 113 struct ccw_device *cdev; 114 115 if (!sch->dev.driver_data) 116 return; 117 cdev = sch->dev.driver_data; 118 ccw_device_set_timeout(cdev, 0); 119 } 120 121 /* 122 * Cancel running i/o. This is called repeatedly since halt/clear are 123 * asynchronous operations. We do one try with cio_cancel, two tries 124 * with cio_halt, 255 tries with cio_clear. If everythings fails panic. 125 * Returns 0 if device now idle, -ENODEV for device not operational and 126 * -EBUSY if an interrupt is expected (either from halt/clear or from a 127 * status pending). 128 */ 129 int 130 ccw_device_cancel_halt_clear(struct ccw_device *cdev) 131 { 132 struct subchannel *sch; 133 int ret; 134 135 sch = to_subchannel(cdev->dev.parent); 136 ret = stsch(sch->schid, &sch->schib); 137 if (ret || !sch->schib.pmcw.dnv) 138 return -ENODEV; 139 if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) 140 /* Not operational or no activity -> done. */ 141 return 0; 142 /* Stage 1: cancel io. */ 143 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && 144 !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 145 ret = cio_cancel(sch); 146 if (ret != -EINVAL) 147 return ret; 148 /* cancel io unsuccessful. From now on it is asynchronous. */ 149 cdev->private->iretry = 3; /* 3 halt retries. */ 150 } 151 if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 152 /* Stage 2: halt io. */ 153 if (cdev->private->iretry) { 154 cdev->private->iretry--; 155 ret = cio_halt(sch); 156 return (ret == 0) ? -EBUSY : ret; 157 } 158 /* halt io unsuccessful. */ 159 cdev->private->iretry = 255; /* 255 clear retries. */ 160 } 161 /* Stage 3: clear io. */ 162 if (cdev->private->iretry) { 163 cdev->private->iretry--; 164 ret = cio_clear (sch); 165 return (ret == 0) ? -EBUSY : ret; 166 } 167 panic("Can't stop i/o on subchannel.\n"); 168 } 169 170 static int 171 ccw_device_handle_oper(struct ccw_device *cdev) 172 { 173 struct subchannel *sch; 174 175 sch = to_subchannel(cdev->dev.parent); 176 cdev->private->flags.recog_done = 1; 177 /* 178 * Check if cu type and device type still match. If 179 * not, it is certainly another device and we have to 180 * de- and re-register. Also check here for non-matching devno. 181 */ 182 if (cdev->id.cu_type != cdev->private->senseid.cu_type || 183 cdev->id.cu_model != cdev->private->senseid.cu_model || 184 cdev->id.dev_type != cdev->private->senseid.dev_type || 185 cdev->id.dev_model != cdev->private->senseid.dev_model || 186 cdev->private->devno != sch->schib.pmcw.dev) { 187 PREPARE_WORK(&cdev->private->kick_work, 188 ccw_device_do_unreg_rereg, (void *)cdev); 189 queue_work(ccw_device_work, &cdev->private->kick_work); 190 return 0; 191 } 192 cdev->private->flags.donotify = 1; 193 return 1; 194 } 195 196 /* 197 * The machine won't give us any notification by machine check if a chpid has 198 * been varied online on the SE so we have to find out by magic (i. e. driving 199 * the channel subsystem to device selection and updating our path masks). 200 */ 201 static inline void 202 __recover_lost_chpids(struct subchannel *sch, int old_lpm) 203 { 204 int mask, i; 205 206 for (i = 0; i<8; i++) { 207 mask = 0x80 >> i; 208 if (!(sch->lpm & mask)) 209 continue; 210 if (old_lpm & mask) 211 continue; 212 chpid_is_actually_online(sch->schib.pmcw.chpid[i]); 213 } 214 } 215 216 /* 217 * Stop device recognition. 218 */ 219 static void 220 ccw_device_recog_done(struct ccw_device *cdev, int state) 221 { 222 struct subchannel *sch; 223 int notify, old_lpm, same_dev; 224 225 sch = to_subchannel(cdev->dev.parent); 226 227 ccw_device_set_timeout(cdev, 0); 228 cio_disable_subchannel(sch); 229 /* 230 * Now that we tried recognition, we have performed device selection 231 * through ssch() and the path information is up to date. 232 */ 233 old_lpm = sch->lpm; 234 stsch(sch->schid, &sch->schib); 235 sch->lpm = sch->schib.pmcw.pim & 236 sch->schib.pmcw.pam & 237 sch->schib.pmcw.pom & 238 sch->opm; 239 /* Check since device may again have become not operational. */ 240 if (!sch->schib.pmcw.dnv) 241 state = DEV_STATE_NOT_OPER; 242 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) 243 /* Force reprobe on all chpids. */ 244 old_lpm = 0; 245 if (sch->lpm != old_lpm) 246 __recover_lost_chpids(sch, old_lpm); 247 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { 248 if (state == DEV_STATE_NOT_OPER) { 249 cdev->private->flags.recog_done = 1; 250 cdev->private->state = DEV_STATE_DISCONNECTED; 251 return; 252 } 253 /* Boxed devices don't need extra treatment. */ 254 } 255 notify = 0; 256 same_dev = 0; /* Keep the compiler quiet... */ 257 switch (state) { 258 case DEV_STATE_NOT_OPER: 259 CIO_DEBUG(KERN_WARNING, 2, 260 "SenseID : unknown device %04x on subchannel " 261 "0.%x.%04x\n", cdev->private->devno, 262 sch->schid.ssid, sch->schid.sch_no); 263 break; 264 case DEV_STATE_OFFLINE: 265 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { 266 same_dev = ccw_device_handle_oper(cdev); 267 notify = 1; 268 } 269 /* fill out sense information */ 270 cdev->id = (struct ccw_device_id) { 271 .cu_type = cdev->private->senseid.cu_type, 272 .cu_model = cdev->private->senseid.cu_model, 273 .dev_type = cdev->private->senseid.dev_type, 274 .dev_model = cdev->private->senseid.dev_model, 275 }; 276 if (notify) { 277 cdev->private->state = DEV_STATE_OFFLINE; 278 if (same_dev) { 279 /* Get device online again. */ 280 ccw_device_online(cdev); 281 wake_up(&cdev->private->wait_q); 282 } 283 return; 284 } 285 /* Issue device info message. */ 286 CIO_DEBUG(KERN_INFO, 2, "SenseID : device 0.%x.%04x reports: " 287 "CU Type/Mod = %04X/%02X, Dev Type/Mod = " 288 "%04X/%02X\n", 289 cdev->private->ssid, cdev->private->devno, 290 cdev->id.cu_type, cdev->id.cu_model, 291 cdev->id.dev_type, cdev->id.dev_model); 292 break; 293 case DEV_STATE_BOXED: 294 CIO_DEBUG(KERN_WARNING, 2, 295 "SenseID : boxed device %04x on subchannel " 296 "0.%x.%04x\n", cdev->private->devno, 297 sch->schid.ssid, sch->schid.sch_no); 298 break; 299 } 300 cdev->private->state = state; 301 io_subchannel_recog_done(cdev); 302 if (state != DEV_STATE_NOT_OPER) 303 wake_up(&cdev->private->wait_q); 304 } 305 306 /* 307 * Function called from device_id.c after sense id has completed. 308 */ 309 void 310 ccw_device_sense_id_done(struct ccw_device *cdev, int err) 311 { 312 switch (err) { 313 case 0: 314 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE); 315 break; 316 case -ETIME: /* Sense id stopped by timeout. */ 317 ccw_device_recog_done(cdev, DEV_STATE_BOXED); 318 break; 319 default: 320 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 321 break; 322 } 323 } 324 325 static void 326 ccw_device_oper_notify(void *data) 327 { 328 struct ccw_device *cdev; 329 struct subchannel *sch; 330 int ret; 331 332 cdev = (struct ccw_device *)data; 333 sch = to_subchannel(cdev->dev.parent); 334 ret = (sch->driver && sch->driver->notify) ? 335 sch->driver->notify(&sch->dev, CIO_OPER) : 0; 336 if (!ret) 337 /* Driver doesn't want device back. */ 338 ccw_device_do_unreg_rereg((void *)cdev); 339 else 340 wake_up(&cdev->private->wait_q); 341 } 342 343 /* 344 * Finished with online/offline processing. 345 */ 346 static void 347 ccw_device_done(struct ccw_device *cdev, int state) 348 { 349 struct subchannel *sch; 350 351 sch = to_subchannel(cdev->dev.parent); 352 353 if (state != DEV_STATE_ONLINE) 354 cio_disable_subchannel(sch); 355 356 /* Reset device status. */ 357 memset(&cdev->private->irb, 0, sizeof(struct irb)); 358 359 cdev->private->state = state; 360 361 362 if (state == DEV_STATE_BOXED) 363 CIO_DEBUG(KERN_WARNING, 2, 364 "Boxed device %04x on subchannel %04x\n", 365 cdev->private->devno, sch->schid.sch_no); 366 367 if (cdev->private->flags.donotify) { 368 cdev->private->flags.donotify = 0; 369 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, 370 (void *)cdev); 371 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 372 } 373 wake_up(&cdev->private->wait_q); 374 375 if (css_init_done && state != DEV_STATE_ONLINE) 376 put_device (&cdev->dev); 377 } 378 379 /* 380 * Function called from device_pgid.c after sense path ground has completed. 381 */ 382 void 383 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err) 384 { 385 struct subchannel *sch; 386 387 sch = to_subchannel(cdev->dev.parent); 388 switch (err) { 389 case 0: 390 /* Start Path Group verification. */ 391 sch->vpm = 0; /* Start with no path groups set. */ 392 cdev->private->state = DEV_STATE_VERIFY; 393 ccw_device_verify_start(cdev); 394 break; 395 case -ETIME: /* Sense path group id stopped by timeout. */ 396 case -EUSERS: /* device is reserved for someone else. */ 397 ccw_device_done(cdev, DEV_STATE_BOXED); 398 break; 399 case -EOPNOTSUPP: /* path grouping not supported, just set online. */ 400 cdev->private->options.pgroup = 0; 401 ccw_device_done(cdev, DEV_STATE_ONLINE); 402 break; 403 default: 404 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 405 break; 406 } 407 } 408 409 /* 410 * Start device recognition. 411 */ 412 int 413 ccw_device_recognition(struct ccw_device *cdev) 414 { 415 struct subchannel *sch; 416 int ret; 417 418 if ((cdev->private->state != DEV_STATE_NOT_OPER) && 419 (cdev->private->state != DEV_STATE_BOXED)) 420 return -EINVAL; 421 sch = to_subchannel(cdev->dev.parent); 422 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); 423 if (ret != 0) 424 /* Couldn't enable the subchannel for i/o. Sick device. */ 425 return ret; 426 427 /* After 60s the device recognition is considered to have failed. */ 428 ccw_device_set_timeout(cdev, 60*HZ); 429 430 /* 431 * We used to start here with a sense pgid to find out whether a device 432 * is locked by someone else. Unfortunately, the sense pgid command 433 * code has other meanings on devices predating the path grouping 434 * algorithm, so we start with sense id and box the device after an 435 * timeout (or if sense pgid during path verification detects the device 436 * is locked, as may happen on newer devices). 437 */ 438 cdev->private->flags.recog_done = 0; 439 cdev->private->state = DEV_STATE_SENSE_ID; 440 ccw_device_sense_id_start(cdev); 441 return 0; 442 } 443 444 /* 445 * Handle timeout in device recognition. 446 */ 447 static void 448 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) 449 { 450 int ret; 451 452 ret = ccw_device_cancel_halt_clear(cdev); 453 switch (ret) { 454 case 0: 455 ccw_device_recog_done(cdev, DEV_STATE_BOXED); 456 break; 457 case -ENODEV: 458 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 459 break; 460 default: 461 ccw_device_set_timeout(cdev, 3*HZ); 462 } 463 } 464 465 466 static void 467 ccw_device_nopath_notify(void *data) 468 { 469 struct ccw_device *cdev; 470 struct subchannel *sch; 471 int ret; 472 473 cdev = (struct ccw_device *)data; 474 sch = to_subchannel(cdev->dev.parent); 475 /* Extra sanity. */ 476 if (sch->lpm) 477 return; 478 ret = (sch->driver && sch->driver->notify) ? 479 sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0; 480 if (!ret) { 481 if (get_device(&sch->dev)) { 482 /* Driver doesn't want to keep device. */ 483 cio_disable_subchannel(sch); 484 if (get_device(&cdev->dev)) { 485 PREPARE_WORK(&cdev->private->kick_work, 486 ccw_device_call_sch_unregister, 487 (void *)cdev); 488 queue_work(ccw_device_work, 489 &cdev->private->kick_work); 490 } else 491 put_device(&sch->dev); 492 } 493 } else { 494 cio_disable_subchannel(sch); 495 ccw_device_set_timeout(cdev, 0); 496 cdev->private->flags.fake_irb = 0; 497 cdev->private->state = DEV_STATE_DISCONNECTED; 498 wake_up(&cdev->private->wait_q); 499 } 500 } 501 502 void 503 ccw_device_verify_done(struct ccw_device *cdev, int err) 504 { 505 cdev->private->flags.doverify = 0; 506 switch (err) { 507 case -EOPNOTSUPP: /* path grouping not supported, just set online. */ 508 cdev->private->options.pgroup = 0; 509 case 0: 510 ccw_device_done(cdev, DEV_STATE_ONLINE); 511 /* Deliver fake irb to device driver, if needed. */ 512 if (cdev->private->flags.fake_irb) { 513 memset(&cdev->private->irb, 0, sizeof(struct irb)); 514 cdev->private->irb.scsw = (struct scsw) { 515 .cc = 1, 516 .fctl = SCSW_FCTL_START_FUNC, 517 .actl = SCSW_ACTL_START_PEND, 518 .stctl = SCSW_STCTL_STATUS_PEND, 519 }; 520 cdev->private->flags.fake_irb = 0; 521 if (cdev->handler) 522 cdev->handler(cdev, cdev->private->intparm, 523 &cdev->private->irb); 524 memset(&cdev->private->irb, 0, sizeof(struct irb)); 525 } 526 break; 527 case -ETIME: 528 ccw_device_done(cdev, DEV_STATE_BOXED); 529 break; 530 default: 531 PREPARE_WORK(&cdev->private->kick_work, 532 ccw_device_nopath_notify, (void *)cdev); 533 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 534 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 535 break; 536 } 537 } 538 539 /* 540 * Get device online. 541 */ 542 int 543 ccw_device_online(struct ccw_device *cdev) 544 { 545 struct subchannel *sch; 546 int ret; 547 548 if ((cdev->private->state != DEV_STATE_OFFLINE) && 549 (cdev->private->state != DEV_STATE_BOXED)) 550 return -EINVAL; 551 sch = to_subchannel(cdev->dev.parent); 552 if (css_init_done && !get_device(&cdev->dev)) 553 return -ENODEV; 554 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); 555 if (ret != 0) { 556 /* Couldn't enable the subchannel for i/o. Sick device. */ 557 if (ret == -ENODEV) 558 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 559 return ret; 560 } 561 /* Do we want to do path grouping? */ 562 if (!cdev->private->options.pgroup) { 563 /* No, set state online immediately. */ 564 ccw_device_done(cdev, DEV_STATE_ONLINE); 565 return 0; 566 } 567 /* Do a SensePGID first. */ 568 cdev->private->state = DEV_STATE_SENSE_PGID; 569 ccw_device_sense_pgid_start(cdev); 570 return 0; 571 } 572 573 void 574 ccw_device_disband_done(struct ccw_device *cdev, int err) 575 { 576 switch (err) { 577 case 0: 578 ccw_device_done(cdev, DEV_STATE_OFFLINE); 579 break; 580 case -ETIME: 581 ccw_device_done(cdev, DEV_STATE_BOXED); 582 break; 583 default: 584 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 585 break; 586 } 587 } 588 589 /* 590 * Shutdown device. 591 */ 592 int 593 ccw_device_offline(struct ccw_device *cdev) 594 { 595 struct subchannel *sch; 596 597 sch = to_subchannel(cdev->dev.parent); 598 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) 599 return -ENODEV; 600 if (cdev->private->state != DEV_STATE_ONLINE) { 601 if (sch->schib.scsw.actl != 0) 602 return -EBUSY; 603 return -EINVAL; 604 } 605 if (sch->schib.scsw.actl != 0) 606 return -EBUSY; 607 /* Are we doing path grouping? */ 608 if (!cdev->private->options.pgroup) { 609 /* No, set state offline immediately. */ 610 ccw_device_done(cdev, DEV_STATE_OFFLINE); 611 return 0; 612 } 613 /* Start Set Path Group commands. */ 614 cdev->private->state = DEV_STATE_DISBAND_PGID; 615 ccw_device_disband_start(cdev); 616 return 0; 617 } 618 619 /* 620 * Handle timeout in device online/offline process. 621 */ 622 static void 623 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event) 624 { 625 int ret; 626 627 ret = ccw_device_cancel_halt_clear(cdev); 628 switch (ret) { 629 case 0: 630 ccw_device_done(cdev, DEV_STATE_BOXED); 631 break; 632 case -ENODEV: 633 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 634 break; 635 default: 636 ccw_device_set_timeout(cdev, 3*HZ); 637 } 638 } 639 640 /* 641 * Handle not oper event in device recognition. 642 */ 643 static void 644 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event) 645 { 646 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 647 } 648 649 /* 650 * Handle not operational event while offline. 651 */ 652 static void 653 ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event) 654 { 655 struct subchannel *sch; 656 657 cdev->private->state = DEV_STATE_NOT_OPER; 658 sch = to_subchannel(cdev->dev.parent); 659 if (get_device(&cdev->dev)) { 660 PREPARE_WORK(&cdev->private->kick_work, 661 ccw_device_call_sch_unregister, (void *)cdev); 662 queue_work(ccw_device_work, &cdev->private->kick_work); 663 } 664 wake_up(&cdev->private->wait_q); 665 } 666 667 /* 668 * Handle not operational event while online. 669 */ 670 static void 671 ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) 672 { 673 struct subchannel *sch; 674 675 sch = to_subchannel(cdev->dev.parent); 676 if (sch->driver->notify && 677 sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) { 678 ccw_device_set_timeout(cdev, 0); 679 cdev->private->flags.fake_irb = 0; 680 cdev->private->state = DEV_STATE_DISCONNECTED; 681 wake_up(&cdev->private->wait_q); 682 return; 683 } 684 cdev->private->state = DEV_STATE_NOT_OPER; 685 cio_disable_subchannel(sch); 686 if (sch->schib.scsw.actl != 0) { 687 // FIXME: not-oper indication to device driver ? 688 ccw_device_call_handler(cdev); 689 } 690 if (get_device(&cdev->dev)) { 691 PREPARE_WORK(&cdev->private->kick_work, 692 ccw_device_call_sch_unregister, (void *)cdev); 693 queue_work(ccw_device_work, &cdev->private->kick_work); 694 } 695 wake_up(&cdev->private->wait_q); 696 } 697 698 /* 699 * Handle path verification event. 700 */ 701 static void 702 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) 703 { 704 struct subchannel *sch; 705 706 if (!cdev->private->options.pgroup) 707 return; 708 if (cdev->private->state == DEV_STATE_W4SENSE) { 709 cdev->private->flags.doverify = 1; 710 return; 711 } 712 sch = to_subchannel(cdev->dev.parent); 713 /* 714 * Since we might not just be coming from an interrupt from the 715 * subchannel we have to update the schib. 716 */ 717 stsch(sch->schid, &sch->schib); 718 719 if (sch->schib.scsw.actl != 0 || 720 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { 721 /* 722 * No final status yet or final status not yet delivered 723 * to the device driver. Can't do path verfication now, 724 * delay until final status was delivered. 725 */ 726 cdev->private->flags.doverify = 1; 727 return; 728 } 729 /* Device is idle, we can do the path verification. */ 730 cdev->private->state = DEV_STATE_VERIFY; 731 ccw_device_verify_start(cdev); 732 } 733 734 /* 735 * Got an interrupt for a normal io (state online). 736 */ 737 static void 738 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) 739 { 740 struct irb *irb; 741 742 irb = (struct irb *) __LC_IRB; 743 /* Check for unsolicited interrupt. */ 744 if ((irb->scsw.stctl == 745 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) 746 && (!irb->scsw.cc)) { 747 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 748 !irb->esw.esw0.erw.cons) { 749 /* Unit check but no sense data. Need basic sense. */ 750 if (ccw_device_do_sense(cdev, irb) != 0) 751 goto call_handler_unsol; 752 memcpy(irb, &cdev->private->irb, sizeof(struct irb)); 753 cdev->private->state = DEV_STATE_W4SENSE; 754 cdev->private->intparm = 0; 755 return; 756 } 757 call_handler_unsol: 758 if (cdev->handler) 759 cdev->handler (cdev, 0, irb); 760 return; 761 } 762 /* Accumulate status and find out if a basic sense is needed. */ 763 ccw_device_accumulate_irb(cdev, irb); 764 if (cdev->private->flags.dosense) { 765 if (ccw_device_do_sense(cdev, irb) == 0) { 766 cdev->private->state = DEV_STATE_W4SENSE; 767 } 768 return; 769 } 770 /* Call the handler. */ 771 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 772 /* Start delayed path verification. */ 773 ccw_device_online_verify(cdev, 0); 774 } 775 776 /* 777 * Got an timeout in online state. 778 */ 779 static void 780 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) 781 { 782 int ret; 783 784 ccw_device_set_timeout(cdev, 0); 785 ret = ccw_device_cancel_halt_clear(cdev); 786 if (ret == -EBUSY) { 787 ccw_device_set_timeout(cdev, 3*HZ); 788 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 789 return; 790 } 791 if (ret == -ENODEV) { 792 struct subchannel *sch; 793 794 sch = to_subchannel(cdev->dev.parent); 795 if (!sch->lpm) { 796 PREPARE_WORK(&cdev->private->kick_work, 797 ccw_device_nopath_notify, (void *)cdev); 798 queue_work(ccw_device_notify_work, 799 &cdev->private->kick_work); 800 } else 801 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 802 } else if (cdev->handler) 803 cdev->handler(cdev, cdev->private->intparm, 804 ERR_PTR(-ETIMEDOUT)); 805 } 806 807 /* 808 * Got an interrupt for a basic sense. 809 */ 810 void 811 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 812 { 813 struct irb *irb; 814 815 irb = (struct irb *) __LC_IRB; 816 /* Check for unsolicited interrupt. */ 817 if (irb->scsw.stctl == 818 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 819 if (irb->scsw.cc == 1) 820 /* Basic sense hasn't started. Try again. */ 821 ccw_device_do_sense(cdev, irb); 822 else { 823 printk("Huh? %s(%s): unsolicited interrupt...\n", 824 __FUNCTION__, cdev->dev.bus_id); 825 if (cdev->handler) 826 cdev->handler (cdev, 0, irb); 827 } 828 return; 829 } 830 /* Add basic sense info to irb. */ 831 ccw_device_accumulate_basic_sense(cdev, irb); 832 if (cdev->private->flags.dosense) { 833 /* Another basic sense is needed. */ 834 ccw_device_do_sense(cdev, irb); 835 return; 836 } 837 cdev->private->state = DEV_STATE_ONLINE; 838 /* Call the handler. */ 839 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 840 /* Start delayed path verification. */ 841 ccw_device_online_verify(cdev, 0); 842 } 843 844 static void 845 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event) 846 { 847 struct irb *irb; 848 849 irb = (struct irb *) __LC_IRB; 850 /* Accumulate status. We don't do basic sense. */ 851 ccw_device_accumulate_irb(cdev, irb); 852 /* Try to start delayed device verification. */ 853 ccw_device_online_verify(cdev, 0); 854 /* Note: Don't call handler for cio initiated clear! */ 855 } 856 857 static void 858 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 859 { 860 struct subchannel *sch; 861 862 sch = to_subchannel(cdev->dev.parent); 863 ccw_device_set_timeout(cdev, 0); 864 /* OK, i/o is dead now. Call interrupt handler. */ 865 cdev->private->state = DEV_STATE_ONLINE; 866 if (cdev->handler) 867 cdev->handler(cdev, cdev->private->intparm, 868 ERR_PTR(-ETIMEDOUT)); 869 if (!sch->lpm) { 870 PREPARE_WORK(&cdev->private->kick_work, 871 ccw_device_nopath_notify, (void *)cdev); 872 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 873 } else if (cdev->private->flags.doverify) 874 /* Start delayed path verification. */ 875 ccw_device_online_verify(cdev, 0); 876 } 877 878 static void 879 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) 880 { 881 int ret; 882 883 ret = ccw_device_cancel_halt_clear(cdev); 884 if (ret == -EBUSY) { 885 ccw_device_set_timeout(cdev, 3*HZ); 886 return; 887 } 888 if (ret == -ENODEV) { 889 struct subchannel *sch; 890 891 sch = to_subchannel(cdev->dev.parent); 892 if (!sch->lpm) { 893 PREPARE_WORK(&cdev->private->kick_work, 894 ccw_device_nopath_notify, (void *)cdev); 895 queue_work(ccw_device_notify_work, 896 &cdev->private->kick_work); 897 } else 898 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 899 return; 900 } 901 //FIXME: Can we get here? 902 cdev->private->state = DEV_STATE_ONLINE; 903 if (cdev->handler) 904 cdev->handler(cdev, cdev->private->intparm, 905 ERR_PTR(-ETIMEDOUT)); 906 } 907 908 static void 909 ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event) 910 { 911 struct irb *irb; 912 struct subchannel *sch; 913 914 irb = (struct irb *) __LC_IRB; 915 /* 916 * Accumulate status and find out if a basic sense is needed. 917 * This is fine since we have already adapted the lpm. 918 */ 919 ccw_device_accumulate_irb(cdev, irb); 920 if (cdev->private->flags.dosense) { 921 if (ccw_device_do_sense(cdev, irb) == 0) { 922 cdev->private->state = DEV_STATE_W4SENSE; 923 } 924 return; 925 } 926 927 /* Iff device is idle, reset timeout. */ 928 sch = to_subchannel(cdev->dev.parent); 929 if (!stsch(sch->schid, &sch->schib)) 930 if (sch->schib.scsw.actl == 0) 931 ccw_device_set_timeout(cdev, 0); 932 /* Call the handler. */ 933 ccw_device_call_handler(cdev); 934 if (!sch->lpm) { 935 PREPARE_WORK(&cdev->private->kick_work, 936 ccw_device_nopath_notify, (void *)cdev); 937 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 938 } else if (cdev->private->flags.doverify) 939 ccw_device_online_verify(cdev, 0); 940 } 941 942 static void 943 ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) 944 { 945 int ret; 946 struct subchannel *sch; 947 948 sch = to_subchannel(cdev->dev.parent); 949 ccw_device_set_timeout(cdev, 0); 950 ret = ccw_device_cancel_halt_clear(cdev); 951 if (ret == -EBUSY) { 952 ccw_device_set_timeout(cdev, 3*HZ); 953 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 954 return; 955 } 956 if (ret == -ENODEV) { 957 if (!sch->lpm) { 958 PREPARE_WORK(&cdev->private->kick_work, 959 ccw_device_nopath_notify, (void *)cdev); 960 queue_work(ccw_device_notify_work, 961 &cdev->private->kick_work); 962 } else 963 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 964 return; 965 } 966 if (cdev->handler) 967 cdev->handler(cdev, cdev->private->intparm, 968 ERR_PTR(-ETIMEDOUT)); 969 if (!sch->lpm) { 970 PREPARE_WORK(&cdev->private->kick_work, 971 ccw_device_nopath_notify, (void *)cdev); 972 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 973 } else if (cdev->private->flags.doverify) 974 /* Start delayed path verification. */ 975 ccw_device_online_verify(cdev, 0); 976 } 977 978 static void 979 ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) 980 { 981 /* When the I/O has terminated, we have to start verification. */ 982 if (cdev->private->options.pgroup) 983 cdev->private->flags.doverify = 1; 984 } 985 986 static void 987 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) 988 { 989 struct irb *irb; 990 991 switch (dev_event) { 992 case DEV_EVENT_INTERRUPT: 993 irb = (struct irb *) __LC_IRB; 994 /* Check for unsolicited interrupt. */ 995 if ((irb->scsw.stctl == 996 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && 997 (!irb->scsw.cc)) 998 /* FIXME: we should restart stlck here, but this 999 * is extremely unlikely ... */ 1000 goto out_wakeup; 1001 1002 ccw_device_accumulate_irb(cdev, irb); 1003 /* We don't care about basic sense etc. */ 1004 break; 1005 default: /* timeout */ 1006 break; 1007 } 1008 out_wakeup: 1009 wake_up(&cdev->private->wait_q); 1010 } 1011 1012 static void 1013 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) 1014 { 1015 struct subchannel *sch; 1016 1017 sch = to_subchannel(cdev->dev.parent); 1018 if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0) 1019 /* Couldn't enable the subchannel for i/o. Sick device. */ 1020 return; 1021 1022 /* After 60s the device recognition is considered to have failed. */ 1023 ccw_device_set_timeout(cdev, 60*HZ); 1024 1025 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; 1026 ccw_device_sense_id_start(cdev); 1027 } 1028 1029 void 1030 device_trigger_reprobe(struct subchannel *sch) 1031 { 1032 struct ccw_device *cdev; 1033 1034 if (!sch->dev.driver_data) 1035 return; 1036 cdev = sch->dev.driver_data; 1037 if (cdev->private->state != DEV_STATE_DISCONNECTED) 1038 return; 1039 1040 /* Update some values. */ 1041 if (stsch(sch->schid, &sch->schib)) 1042 return; 1043 1044 /* 1045 * The pim, pam, pom values may not be accurate, but they are the best 1046 * we have before performing device selection :/ 1047 */ 1048 sch->lpm = sch->schib.pmcw.pim & 1049 sch->schib.pmcw.pam & 1050 sch->schib.pmcw.pom & 1051 sch->opm; 1052 /* Re-set some bits in the pmcw that were lost. */ 1053 sch->schib.pmcw.isc = 3; 1054 sch->schib.pmcw.csense = 1; 1055 sch->schib.pmcw.ena = 0; 1056 if ((sch->lpm & (sch->lpm - 1)) != 0) 1057 sch->schib.pmcw.mp = 1; 1058 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 1059 /* We should also udate ssd info, but this has to wait. */ 1060 ccw_device_start_id(cdev, 0); 1061 } 1062 1063 static void 1064 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) 1065 { 1066 struct subchannel *sch; 1067 1068 sch = to_subchannel(cdev->dev.parent); 1069 /* 1070 * An interrupt in state offline means a previous disable was not 1071 * successful. Try again. 1072 */ 1073 cio_disable_subchannel(sch); 1074 } 1075 1076 static void 1077 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event) 1078 { 1079 retry_set_schib(cdev); 1080 cdev->private->state = DEV_STATE_ONLINE; 1081 dev_fsm_event(cdev, dev_event); 1082 } 1083 1084 1085 static void 1086 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 1087 { 1088 ccw_device_set_timeout(cdev, 0); 1089 if (dev_event == DEV_EVENT_NOTOPER) 1090 cdev->private->state = DEV_STATE_NOT_OPER; 1091 else 1092 cdev->private->state = DEV_STATE_OFFLINE; 1093 wake_up(&cdev->private->wait_q); 1094 } 1095 1096 static void 1097 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event) 1098 { 1099 int ret; 1100 1101 ret = ccw_device_cancel_halt_clear(cdev); 1102 switch (ret) { 1103 case 0: 1104 cdev->private->state = DEV_STATE_OFFLINE; 1105 wake_up(&cdev->private->wait_q); 1106 break; 1107 case -ENODEV: 1108 cdev->private->state = DEV_STATE_NOT_OPER; 1109 wake_up(&cdev->private->wait_q); 1110 break; 1111 default: 1112 ccw_device_set_timeout(cdev, HZ/10); 1113 } 1114 } 1115 1116 /* 1117 * No operation action. This is used e.g. to ignore a timeout event in 1118 * state offline. 1119 */ 1120 static void 1121 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) 1122 { 1123 } 1124 1125 /* 1126 * Bug operation action. 1127 */ 1128 static void 1129 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) 1130 { 1131 printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n", 1132 cdev->private->state, dev_event); 1133 BUG(); 1134 } 1135 1136 /* 1137 * device statemachine 1138 */ 1139 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { 1140 [DEV_STATE_NOT_OPER] = { 1141 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1142 [DEV_EVENT_INTERRUPT] = ccw_device_bug, 1143 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1144 [DEV_EVENT_VERIFY] = ccw_device_nop, 1145 }, 1146 [DEV_STATE_SENSE_PGID] = { 1147 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1148 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, 1149 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1150 [DEV_EVENT_VERIFY] = ccw_device_nop, 1151 }, 1152 [DEV_STATE_SENSE_ID] = { 1153 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1154 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 1155 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 1156 [DEV_EVENT_VERIFY] = ccw_device_nop, 1157 }, 1158 [DEV_STATE_OFFLINE] = { 1159 [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, 1160 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, 1161 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1162 [DEV_EVENT_VERIFY] = ccw_device_nop, 1163 }, 1164 [DEV_STATE_VERIFY] = { 1165 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1166 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 1167 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1168 [DEV_EVENT_VERIFY] = ccw_device_nop, 1169 }, 1170 [DEV_STATE_ONLINE] = { 1171 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1172 [DEV_EVENT_INTERRUPT] = ccw_device_irq, 1173 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, 1174 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1175 }, 1176 [DEV_STATE_W4SENSE] = { 1177 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1178 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, 1179 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1180 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1181 }, 1182 [DEV_STATE_DISBAND_PGID] = { 1183 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1184 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, 1185 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1186 [DEV_EVENT_VERIFY] = ccw_device_nop, 1187 }, 1188 [DEV_STATE_BOXED] = { 1189 [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, 1190 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, 1191 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done, 1192 [DEV_EVENT_VERIFY] = ccw_device_nop, 1193 }, 1194 /* states to wait for i/o completion before doing something */ 1195 [DEV_STATE_CLEAR_VERIFY] = { 1196 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1197 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify, 1198 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1199 [DEV_EVENT_VERIFY] = ccw_device_nop, 1200 }, 1201 [DEV_STATE_TIMEOUT_KILL] = { 1202 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1203 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1204 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, 1205 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME 1206 }, 1207 [DEV_STATE_WAIT4IO] = { 1208 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1209 [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, 1210 [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, 1211 [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify, 1212 }, 1213 [DEV_STATE_QUIESCE] = { 1214 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, 1215 [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, 1216 [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout, 1217 [DEV_EVENT_VERIFY] = ccw_device_nop, 1218 }, 1219 /* special states for devices gone not operational */ 1220 [DEV_STATE_DISCONNECTED] = { 1221 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1222 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1223 [DEV_EVENT_TIMEOUT] = ccw_device_bug, 1224 [DEV_EVENT_VERIFY] = ccw_device_nop, 1225 }, 1226 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1227 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1228 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 1229 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 1230 [DEV_EVENT_VERIFY] = ccw_device_nop, 1231 }, 1232 [DEV_STATE_CMFCHANGE] = { 1233 [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate, 1234 [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate, 1235 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, 1236 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, 1237 }, 1238 }; 1239 1240 /* 1241 * io_subchannel_irq is called for "real" interrupts or for status 1242 * pending conditions on msch. 1243 */ 1244 void 1245 io_subchannel_irq (struct device *pdev) 1246 { 1247 struct ccw_device *cdev; 1248 1249 cdev = to_subchannel(pdev)->dev.driver_data; 1250 1251 CIO_TRACE_EVENT (3, "IRQ"); 1252 CIO_TRACE_EVENT (3, pdev->bus_id); 1253 if (cdev) 1254 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1255 } 1256 1257 EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1258