1 /* 2 * drivers/s390/cio/device_fsm.c 3 * finite state machine for device handling 4 * 5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * IBM Corporation 7 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 9 */ 10 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/jiffies.h> 14 #include <linux/string.h> 15 16 #include <asm/ccwdev.h> 17 #include <asm/cio.h> 18 #include <asm/chpid.h> 19 20 #include "cio.h" 21 #include "cio_debug.h" 22 #include "css.h" 23 #include "device.h" 24 #include "chsc.h" 25 #include "ioasm.h" 26 #include "chp.h" 27 28 static int timeout_log_enabled; 29 30 int 31 device_is_online(struct subchannel *sch) 32 { 33 struct ccw_device *cdev; 34 35 cdev = sch_get_cdev(sch); 36 if (!cdev) 37 return 0; 38 return (cdev->private->state == DEV_STATE_ONLINE); 39 } 40 41 int 42 device_is_disconnected(struct subchannel *sch) 43 { 44 struct ccw_device *cdev; 45 46 cdev = sch_get_cdev(sch); 47 if (!cdev) 48 return 0; 49 return (cdev->private->state == DEV_STATE_DISCONNECTED || 50 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); 51 } 52 53 void 54 device_set_disconnected(struct subchannel *sch) 55 { 56 struct ccw_device *cdev; 57 58 cdev = sch_get_cdev(sch); 59 if (!cdev) 60 return; 61 ccw_device_set_timeout(cdev, 0); 62 cdev->private->flags.fake_irb = 0; 63 cdev->private->state = DEV_STATE_DISCONNECTED; 64 if (cdev->online) 65 ccw_device_schedule_recovery(); 66 } 67 68 void device_set_intretry(struct subchannel *sch) 69 { 70 struct ccw_device *cdev; 71 72 cdev = sch_get_cdev(sch); 73 if (!cdev) 74 return; 75 cdev->private->flags.intretry = 1; 76 } 77 78 int device_trigger_verify(struct subchannel *sch) 79 { 80 struct ccw_device *cdev; 81 82 cdev = sch_get_cdev(sch); 83 if (!cdev || !cdev->online) 84 return -EINVAL; 85 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 86 return 0; 87 } 88 89 static int __init ccw_timeout_log_setup(char *unused) 90 { 91 timeout_log_enabled = 1; 92 return 1; 93 } 94 95 __setup("ccw_timeout_log", ccw_timeout_log_setup); 96 97 static void ccw_timeout_log(struct ccw_device *cdev) 98 { 99 struct schib schib; 100 struct subchannel *sch; 101 struct io_subchannel_private *private; 102 int cc; 103 104 sch = to_subchannel(cdev->dev.parent); 105 private = to_io_private(sch); 106 cc = stsch(sch->schid, &schib); 107 108 printk(KERN_WARNING "cio: ccw device timeout occurred at %llx, " 109 "device information:\n", get_clock()); 110 printk(KERN_WARNING "cio: orb:\n"); 111 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 112 &private->orb, sizeof(private->orb), 0); 113 printk(KERN_WARNING "cio: ccw device bus id: %s\n", cdev->dev.bus_id); 114 printk(KERN_WARNING "cio: subchannel bus id: %s\n", sch->dev.bus_id); 115 printk(KERN_WARNING "cio: subchannel lpm: %02x, opm: %02x, " 116 "vpm: %02x\n", sch->lpm, sch->opm, sch->vpm); 117 118 if ((void *)(addr_t)private->orb.cpa == &private->sense_ccw || 119 (void *)(addr_t)private->orb.cpa == cdev->private->iccws) 120 printk(KERN_WARNING "cio: last channel program (intern):\n"); 121 else 122 printk(KERN_WARNING "cio: last channel program:\n"); 123 124 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 125 (void *)(addr_t)private->orb.cpa, 126 sizeof(struct ccw1), 0); 127 printk(KERN_WARNING "cio: ccw device state: %d\n", 128 cdev->private->state); 129 printk(KERN_WARNING "cio: store subchannel returned: cc=%d\n", cc); 130 printk(KERN_WARNING "cio: schib:\n"); 131 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 132 &schib, sizeof(schib), 0); 133 printk(KERN_WARNING "cio: ccw device flags:\n"); 134 print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1, 135 &cdev->private->flags, sizeof(cdev->private->flags), 0); 136 } 137 138 /* 139 * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. 140 */ 141 static void 142 ccw_device_timeout(unsigned long data) 143 { 144 struct ccw_device *cdev; 145 146 cdev = (struct ccw_device *) data; 147 spin_lock_irq(cdev->ccwlock); 148 if (timeout_log_enabled) 149 ccw_timeout_log(cdev); 150 dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); 151 spin_unlock_irq(cdev->ccwlock); 152 } 153 154 /* 155 * Set timeout 156 */ 157 void 158 ccw_device_set_timeout(struct ccw_device *cdev, int expires) 159 { 160 if (expires == 0) { 161 del_timer(&cdev->private->timer); 162 return; 163 } 164 if (timer_pending(&cdev->private->timer)) { 165 if (mod_timer(&cdev->private->timer, jiffies + expires)) 166 return; 167 } 168 cdev->private->timer.function = ccw_device_timeout; 169 cdev->private->timer.data = (unsigned long) cdev; 170 cdev->private->timer.expires = jiffies + expires; 171 add_timer(&cdev->private->timer); 172 } 173 174 /* Kill any pending timers after machine check. */ 175 void 176 device_kill_pending_timer(struct subchannel *sch) 177 { 178 struct ccw_device *cdev; 179 180 cdev = sch_get_cdev(sch); 181 if (!cdev) 182 return; 183 ccw_device_set_timeout(cdev, 0); 184 } 185 186 /* 187 * Cancel running i/o. This is called repeatedly since halt/clear are 188 * asynchronous operations. We do one try with cio_cancel, two tries 189 * with cio_halt, 255 tries with cio_clear. If everythings fails panic. 190 * Returns 0 if device now idle, -ENODEV for device not operational and 191 * -EBUSY if an interrupt is expected (either from halt/clear or from a 192 * status pending). 193 */ 194 int 195 ccw_device_cancel_halt_clear(struct ccw_device *cdev) 196 { 197 struct subchannel *sch; 198 int ret; 199 200 sch = to_subchannel(cdev->dev.parent); 201 ret = stsch(sch->schid, &sch->schib); 202 if (ret || !sch->schib.pmcw.dnv) 203 return -ENODEV; 204 if (!sch->schib.pmcw.ena) 205 /* Not operational -> done. */ 206 return 0; 207 /* Stage 1: cancel io. */ 208 if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && 209 !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 210 ret = cio_cancel(sch); 211 if (ret != -EINVAL) 212 return ret; 213 /* cancel io unsuccessful. From now on it is asynchronous. */ 214 cdev->private->iretry = 3; /* 3 halt retries. */ 215 } 216 if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { 217 /* Stage 2: halt io. */ 218 if (cdev->private->iretry) { 219 cdev->private->iretry--; 220 ret = cio_halt(sch); 221 if (ret != -EBUSY) 222 return (ret == 0) ? -EBUSY : ret; 223 } 224 /* halt io unsuccessful. */ 225 cdev->private->iretry = 255; /* 255 clear retries. */ 226 } 227 /* Stage 3: clear io. */ 228 if (cdev->private->iretry) { 229 cdev->private->iretry--; 230 ret = cio_clear (sch); 231 return (ret == 0) ? -EBUSY : ret; 232 } 233 panic("Can't stop i/o on subchannel.\n"); 234 } 235 236 static int 237 ccw_device_handle_oper(struct ccw_device *cdev) 238 { 239 struct subchannel *sch; 240 241 sch = to_subchannel(cdev->dev.parent); 242 cdev->private->flags.recog_done = 1; 243 /* 244 * Check if cu type and device type still match. If 245 * not, it is certainly another device and we have to 246 * de- and re-register. 247 */ 248 if (cdev->id.cu_type != cdev->private->senseid.cu_type || 249 cdev->id.cu_model != cdev->private->senseid.cu_model || 250 cdev->id.dev_type != cdev->private->senseid.dev_type || 251 cdev->id.dev_model != cdev->private->senseid.dev_model) { 252 PREPARE_WORK(&cdev->private->kick_work, 253 ccw_device_do_unreg_rereg); 254 queue_work(ccw_device_work, &cdev->private->kick_work); 255 return 0; 256 } 257 cdev->private->flags.donotify = 1; 258 return 1; 259 } 260 261 /* 262 * The machine won't give us any notification by machine check if a chpid has 263 * been varied online on the SE so we have to find out by magic (i. e. driving 264 * the channel subsystem to device selection and updating our path masks). 265 */ 266 static void 267 __recover_lost_chpids(struct subchannel *sch, int old_lpm) 268 { 269 int mask, i; 270 struct chp_id chpid; 271 272 chp_id_init(&chpid); 273 for (i = 0; i<8; i++) { 274 mask = 0x80 >> i; 275 if (!(sch->lpm & mask)) 276 continue; 277 if (old_lpm & mask) 278 continue; 279 chpid.id = sch->schib.pmcw.chpid[i]; 280 if (!chp_is_registered(chpid)) 281 css_schedule_eval_all(); 282 } 283 } 284 285 /* 286 * Stop device recognition. 287 */ 288 static void 289 ccw_device_recog_done(struct ccw_device *cdev, int state) 290 { 291 struct subchannel *sch; 292 int notify, old_lpm, same_dev; 293 294 sch = to_subchannel(cdev->dev.parent); 295 296 ccw_device_set_timeout(cdev, 0); 297 cio_disable_subchannel(sch); 298 /* 299 * Now that we tried recognition, we have performed device selection 300 * through ssch() and the path information is up to date. 301 */ 302 old_lpm = sch->lpm; 303 stsch(sch->schid, &sch->schib); 304 sch->lpm = sch->schib.pmcw.pam & sch->opm; 305 /* Check since device may again have become not operational. */ 306 if (!sch->schib.pmcw.dnv) 307 state = DEV_STATE_NOT_OPER; 308 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) 309 /* Force reprobe on all chpids. */ 310 old_lpm = 0; 311 if (sch->lpm != old_lpm) 312 __recover_lost_chpids(sch, old_lpm); 313 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { 314 if (state == DEV_STATE_NOT_OPER) { 315 cdev->private->flags.recog_done = 1; 316 cdev->private->state = DEV_STATE_DISCONNECTED; 317 return; 318 } 319 /* Boxed devices don't need extra treatment. */ 320 } 321 notify = 0; 322 same_dev = 0; /* Keep the compiler quiet... */ 323 switch (state) { 324 case DEV_STATE_NOT_OPER: 325 CIO_DEBUG(KERN_WARNING, 2, 326 "SenseID : unknown device %04x on subchannel " 327 "0.%x.%04x\n", cdev->private->dev_id.devno, 328 sch->schid.ssid, sch->schid.sch_no); 329 break; 330 case DEV_STATE_OFFLINE: 331 if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { 332 same_dev = ccw_device_handle_oper(cdev); 333 notify = 1; 334 } 335 /* fill out sense information */ 336 memset(&cdev->id, 0, sizeof(cdev->id)); 337 cdev->id.cu_type = cdev->private->senseid.cu_type; 338 cdev->id.cu_model = cdev->private->senseid.cu_model; 339 cdev->id.dev_type = cdev->private->senseid.dev_type; 340 cdev->id.dev_model = cdev->private->senseid.dev_model; 341 if (notify) { 342 cdev->private->state = DEV_STATE_OFFLINE; 343 if (same_dev) { 344 /* Get device online again. */ 345 ccw_device_online(cdev); 346 wake_up(&cdev->private->wait_q); 347 } 348 return; 349 } 350 /* Issue device info message. */ 351 CIO_DEBUG(KERN_INFO, 2, 352 "SenseID : device 0.%x.%04x reports: " 353 "CU Type/Mod = %04X/%02X, Dev Type/Mod = " 354 "%04X/%02X\n", 355 cdev->private->dev_id.ssid, 356 cdev->private->dev_id.devno, 357 cdev->id.cu_type, cdev->id.cu_model, 358 cdev->id.dev_type, cdev->id.dev_model); 359 break; 360 case DEV_STATE_BOXED: 361 CIO_DEBUG(KERN_WARNING, 2, 362 "SenseID : boxed device %04x on subchannel " 363 "0.%x.%04x\n", cdev->private->dev_id.devno, 364 sch->schid.ssid, sch->schid.sch_no); 365 break; 366 } 367 cdev->private->state = state; 368 io_subchannel_recog_done(cdev); 369 if (state != DEV_STATE_NOT_OPER) 370 wake_up(&cdev->private->wait_q); 371 } 372 373 /* 374 * Function called from device_id.c after sense id has completed. 375 */ 376 void 377 ccw_device_sense_id_done(struct ccw_device *cdev, int err) 378 { 379 switch (err) { 380 case 0: 381 ccw_device_recog_done(cdev, DEV_STATE_OFFLINE); 382 break; 383 case -ETIME: /* Sense id stopped by timeout. */ 384 ccw_device_recog_done(cdev, DEV_STATE_BOXED); 385 break; 386 default: 387 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 388 break; 389 } 390 } 391 392 static void 393 ccw_device_oper_notify(struct work_struct *work) 394 { 395 struct ccw_device_private *priv; 396 struct ccw_device *cdev; 397 struct subchannel *sch; 398 int ret; 399 unsigned long flags; 400 401 priv = container_of(work, struct ccw_device_private, kick_work); 402 cdev = priv->cdev; 403 spin_lock_irqsave(cdev->ccwlock, flags); 404 sch = to_subchannel(cdev->dev.parent); 405 if (sch->driver && sch->driver->notify) { 406 spin_unlock_irqrestore(cdev->ccwlock, flags); 407 ret = sch->driver->notify(sch, CIO_OPER); 408 spin_lock_irqsave(cdev->ccwlock, flags); 409 } else 410 ret = 0; 411 if (ret) { 412 /* Reenable channel measurements, if needed. */ 413 spin_unlock_irqrestore(cdev->ccwlock, flags); 414 cmf_reenable(cdev); 415 spin_lock_irqsave(cdev->ccwlock, flags); 416 wake_up(&cdev->private->wait_q); 417 } 418 spin_unlock_irqrestore(cdev->ccwlock, flags); 419 if (!ret) 420 /* Driver doesn't want device back. */ 421 ccw_device_do_unreg_rereg(work); 422 } 423 424 /* 425 * Finished with online/offline processing. 426 */ 427 static void 428 ccw_device_done(struct ccw_device *cdev, int state) 429 { 430 struct subchannel *sch; 431 432 sch = to_subchannel(cdev->dev.parent); 433 434 ccw_device_set_timeout(cdev, 0); 435 436 if (state != DEV_STATE_ONLINE) 437 cio_disable_subchannel(sch); 438 439 /* Reset device status. */ 440 memset(&cdev->private->irb, 0, sizeof(struct irb)); 441 442 cdev->private->state = state; 443 444 445 if (state == DEV_STATE_BOXED) 446 CIO_DEBUG(KERN_WARNING, 2, 447 "Boxed device %04x on subchannel %04x\n", 448 cdev->private->dev_id.devno, sch->schid.sch_no); 449 450 if (cdev->private->flags.donotify) { 451 cdev->private->flags.donotify = 0; 452 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify); 453 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 454 } 455 wake_up(&cdev->private->wait_q); 456 457 if (css_init_done && state != DEV_STATE_ONLINE) 458 put_device (&cdev->dev); 459 } 460 461 static int cmp_pgid(struct pgid *p1, struct pgid *p2) 462 { 463 char *c1; 464 char *c2; 465 466 c1 = (char *)p1; 467 c2 = (char *)p2; 468 469 return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1); 470 } 471 472 static void __ccw_device_get_common_pgid(struct ccw_device *cdev) 473 { 474 int i; 475 int last; 476 477 last = 0; 478 for (i = 0; i < 8; i++) { 479 if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET) 480 /* No PGID yet */ 481 continue; 482 if (cdev->private->pgid[last].inf.ps.state1 == 483 SNID_STATE1_RESET) { 484 /* First non-zero PGID */ 485 last = i; 486 continue; 487 } 488 if (cmp_pgid(&cdev->private->pgid[i], 489 &cdev->private->pgid[last]) == 0) 490 /* Non-conflicting PGIDs */ 491 continue; 492 493 /* PGID mismatch, can't pathgroup. */ 494 CIO_MSG_EVENT(0, "SNID - pgid mismatch for device " 495 "0.%x.%04x, can't pathgroup\n", 496 cdev->private->dev_id.ssid, 497 cdev->private->dev_id.devno); 498 cdev->private->options.pgroup = 0; 499 return; 500 } 501 if (cdev->private->pgid[last].inf.ps.state1 == 502 SNID_STATE1_RESET) 503 /* No previous pgid found */ 504 memcpy(&cdev->private->pgid[0], 505 &channel_subsystems[0]->global_pgid, 506 sizeof(struct pgid)); 507 else 508 /* Use existing pgid */ 509 memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last], 510 sizeof(struct pgid)); 511 } 512 513 /* 514 * Function called from device_pgid.c after sense path ground has completed. 515 */ 516 void 517 ccw_device_sense_pgid_done(struct ccw_device *cdev, int err) 518 { 519 struct subchannel *sch; 520 521 sch = to_subchannel(cdev->dev.parent); 522 switch (err) { 523 case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */ 524 cdev->private->options.pgroup = 0; 525 break; 526 case 0: /* success */ 527 case -EACCES: /* partial success, some paths not operational */ 528 /* Check if all pgids are equal or 0. */ 529 __ccw_device_get_common_pgid(cdev); 530 break; 531 case -ETIME: /* Sense path group id stopped by timeout. */ 532 case -EUSERS: /* device is reserved for someone else. */ 533 ccw_device_done(cdev, DEV_STATE_BOXED); 534 return; 535 default: 536 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 537 return; 538 } 539 /* Start Path Group verification. */ 540 cdev->private->state = DEV_STATE_VERIFY; 541 cdev->private->flags.doverify = 0; 542 ccw_device_verify_start(cdev); 543 } 544 545 /* 546 * Start device recognition. 547 */ 548 int 549 ccw_device_recognition(struct ccw_device *cdev) 550 { 551 struct subchannel *sch; 552 int ret; 553 554 if ((cdev->private->state != DEV_STATE_NOT_OPER) && 555 (cdev->private->state != DEV_STATE_BOXED)) 556 return -EINVAL; 557 sch = to_subchannel(cdev->dev.parent); 558 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc, 559 (u32)(addr_t)sch); 560 if (ret != 0) 561 /* Couldn't enable the subchannel for i/o. Sick device. */ 562 return ret; 563 564 /* After 60s the device recognition is considered to have failed. */ 565 ccw_device_set_timeout(cdev, 60*HZ); 566 567 /* 568 * We used to start here with a sense pgid to find out whether a device 569 * is locked by someone else. Unfortunately, the sense pgid command 570 * code has other meanings on devices predating the path grouping 571 * algorithm, so we start with sense id and box the device after an 572 * timeout (or if sense pgid during path verification detects the device 573 * is locked, as may happen on newer devices). 574 */ 575 cdev->private->flags.recog_done = 0; 576 cdev->private->state = DEV_STATE_SENSE_ID; 577 ccw_device_sense_id_start(cdev); 578 return 0; 579 } 580 581 /* 582 * Handle timeout in device recognition. 583 */ 584 static void 585 ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) 586 { 587 int ret; 588 589 ret = ccw_device_cancel_halt_clear(cdev); 590 switch (ret) { 591 case 0: 592 ccw_device_recog_done(cdev, DEV_STATE_BOXED); 593 break; 594 case -ENODEV: 595 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 596 break; 597 default: 598 ccw_device_set_timeout(cdev, 3*HZ); 599 } 600 } 601 602 603 void 604 ccw_device_verify_done(struct ccw_device *cdev, int err) 605 { 606 struct subchannel *sch; 607 608 sch = to_subchannel(cdev->dev.parent); 609 /* Update schib - pom may have changed. */ 610 stsch(sch->schid, &sch->schib); 611 /* Update lpm with verified path mask. */ 612 sch->lpm = sch->vpm; 613 /* Repeat path verification? */ 614 if (cdev->private->flags.doverify) { 615 cdev->private->flags.doverify = 0; 616 ccw_device_verify_start(cdev); 617 return; 618 } 619 switch (err) { 620 case -EOPNOTSUPP: /* path grouping not supported, just set online. */ 621 cdev->private->options.pgroup = 0; 622 case 0: 623 ccw_device_done(cdev, DEV_STATE_ONLINE); 624 /* Deliver fake irb to device driver, if needed. */ 625 if (cdev->private->flags.fake_irb) { 626 memset(&cdev->private->irb, 0, sizeof(struct irb)); 627 cdev->private->irb.scsw.cc = 1; 628 cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; 629 cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; 630 cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; 631 cdev->private->flags.fake_irb = 0; 632 if (cdev->handler) 633 cdev->handler(cdev, cdev->private->intparm, 634 &cdev->private->irb); 635 memset(&cdev->private->irb, 0, sizeof(struct irb)); 636 } 637 break; 638 case -ETIME: 639 /* Reset oper notify indication after verify error. */ 640 cdev->private->flags.donotify = 0; 641 ccw_device_done(cdev, DEV_STATE_BOXED); 642 break; 643 default: 644 /* Reset oper notify indication after verify error. */ 645 cdev->private->flags.donotify = 0; 646 if (cdev->online) { 647 ccw_device_set_timeout(cdev, 0); 648 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 649 } else 650 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 651 break; 652 } 653 } 654 655 /* 656 * Get device online. 657 */ 658 int 659 ccw_device_online(struct ccw_device *cdev) 660 { 661 struct subchannel *sch; 662 int ret; 663 664 if ((cdev->private->state != DEV_STATE_OFFLINE) && 665 (cdev->private->state != DEV_STATE_BOXED)) 666 return -EINVAL; 667 sch = to_subchannel(cdev->dev.parent); 668 if (css_init_done && !get_device(&cdev->dev)) 669 return -ENODEV; 670 ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc, 671 (u32)(addr_t)sch); 672 if (ret != 0) { 673 /* Couldn't enable the subchannel for i/o. Sick device. */ 674 if (ret == -ENODEV) 675 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 676 return ret; 677 } 678 /* Do we want to do path grouping? */ 679 if (!cdev->private->options.pgroup) { 680 /* Start initial path verification. */ 681 cdev->private->state = DEV_STATE_VERIFY; 682 cdev->private->flags.doverify = 0; 683 ccw_device_verify_start(cdev); 684 return 0; 685 } 686 /* Do a SensePGID first. */ 687 cdev->private->state = DEV_STATE_SENSE_PGID; 688 ccw_device_sense_pgid_start(cdev); 689 return 0; 690 } 691 692 void 693 ccw_device_disband_done(struct ccw_device *cdev, int err) 694 { 695 switch (err) { 696 case 0: 697 ccw_device_done(cdev, DEV_STATE_OFFLINE); 698 break; 699 case -ETIME: 700 ccw_device_done(cdev, DEV_STATE_BOXED); 701 break; 702 default: 703 cdev->private->flags.donotify = 0; 704 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 705 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 706 break; 707 } 708 } 709 710 /* 711 * Shutdown device. 712 */ 713 int 714 ccw_device_offline(struct ccw_device *cdev) 715 { 716 struct subchannel *sch; 717 718 if (ccw_device_is_orphan(cdev)) { 719 ccw_device_done(cdev, DEV_STATE_OFFLINE); 720 return 0; 721 } 722 sch = to_subchannel(cdev->dev.parent); 723 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) 724 return -ENODEV; 725 if (cdev->private->state != DEV_STATE_ONLINE) { 726 if (sch->schib.scsw.actl != 0) 727 return -EBUSY; 728 return -EINVAL; 729 } 730 if (sch->schib.scsw.actl != 0) 731 return -EBUSY; 732 /* Are we doing path grouping? */ 733 if (!cdev->private->options.pgroup) { 734 /* No, set state offline immediately. */ 735 ccw_device_done(cdev, DEV_STATE_OFFLINE); 736 return 0; 737 } 738 /* Start Set Path Group commands. */ 739 cdev->private->state = DEV_STATE_DISBAND_PGID; 740 ccw_device_disband_start(cdev); 741 return 0; 742 } 743 744 /* 745 * Handle timeout in device online/offline process. 746 */ 747 static void 748 ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event) 749 { 750 int ret; 751 752 ret = ccw_device_cancel_halt_clear(cdev); 753 switch (ret) { 754 case 0: 755 ccw_device_done(cdev, DEV_STATE_BOXED); 756 break; 757 case -ENODEV: 758 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 759 break; 760 default: 761 ccw_device_set_timeout(cdev, 3*HZ); 762 } 763 } 764 765 /* 766 * Handle not oper event in device recognition. 767 */ 768 static void 769 ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event) 770 { 771 ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); 772 } 773 774 /* 775 * Handle not operational event in non-special state. 776 */ 777 static void ccw_device_generic_notoper(struct ccw_device *cdev, 778 enum dev_event dev_event) 779 { 780 struct subchannel *sch; 781 782 cdev->private->state = DEV_STATE_NOT_OPER; 783 sch = to_subchannel(cdev->dev.parent); 784 css_schedule_eval(sch->schid); 785 } 786 787 /* 788 * Handle path verification event. 789 */ 790 static void 791 ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) 792 { 793 struct subchannel *sch; 794 795 if (cdev->private->state == DEV_STATE_W4SENSE) { 796 cdev->private->flags.doverify = 1; 797 return; 798 } 799 sch = to_subchannel(cdev->dev.parent); 800 /* 801 * Since we might not just be coming from an interrupt from the 802 * subchannel we have to update the schib. 803 */ 804 stsch(sch->schid, &sch->schib); 805 806 if (sch->schib.scsw.actl != 0 || 807 (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || 808 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { 809 /* 810 * No final status yet or final status not yet delivered 811 * to the device driver. Can't do path verfication now, 812 * delay until final status was delivered. 813 */ 814 cdev->private->flags.doverify = 1; 815 return; 816 } 817 /* Device is idle, we can do the path verification. */ 818 cdev->private->state = DEV_STATE_VERIFY; 819 cdev->private->flags.doverify = 0; 820 ccw_device_verify_start(cdev); 821 } 822 823 /* 824 * Got an interrupt for a normal io (state online). 825 */ 826 static void 827 ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) 828 { 829 struct irb *irb; 830 831 irb = (struct irb *) __LC_IRB; 832 /* Check for unsolicited interrupt. */ 833 if ((irb->scsw.stctl == 834 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) 835 && (!irb->scsw.cc)) { 836 if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && 837 !irb->esw.esw0.erw.cons) { 838 /* Unit check but no sense data. Need basic sense. */ 839 if (ccw_device_do_sense(cdev, irb) != 0) 840 goto call_handler_unsol; 841 memcpy(&cdev->private->irb, irb, sizeof(struct irb)); 842 cdev->private->state = DEV_STATE_W4SENSE; 843 cdev->private->intparm = 0; 844 return; 845 } 846 call_handler_unsol: 847 if (cdev->handler) 848 cdev->handler (cdev, 0, irb); 849 if (cdev->private->flags.doverify) 850 ccw_device_online_verify(cdev, 0); 851 return; 852 } 853 /* Accumulate status and find out if a basic sense is needed. */ 854 ccw_device_accumulate_irb(cdev, irb); 855 if (cdev->private->flags.dosense) { 856 if (ccw_device_do_sense(cdev, irb) == 0) { 857 cdev->private->state = DEV_STATE_W4SENSE; 858 } 859 return; 860 } 861 /* Call the handler. */ 862 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 863 /* Start delayed path verification. */ 864 ccw_device_online_verify(cdev, 0); 865 } 866 867 /* 868 * Got an timeout in online state. 869 */ 870 static void 871 ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) 872 { 873 int ret; 874 875 ccw_device_set_timeout(cdev, 0); 876 ret = ccw_device_cancel_halt_clear(cdev); 877 if (ret == -EBUSY) { 878 ccw_device_set_timeout(cdev, 3*HZ); 879 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 880 return; 881 } 882 if (ret == -ENODEV) 883 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 884 else if (cdev->handler) 885 cdev->handler(cdev, cdev->private->intparm, 886 ERR_PTR(-ETIMEDOUT)); 887 } 888 889 /* 890 * Got an interrupt for a basic sense. 891 */ 892 static void 893 ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) 894 { 895 struct irb *irb; 896 897 irb = (struct irb *) __LC_IRB; 898 /* Check for unsolicited interrupt. */ 899 if (irb->scsw.stctl == 900 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 901 if (irb->scsw.cc == 1) 902 /* Basic sense hasn't started. Try again. */ 903 ccw_device_do_sense(cdev, irb); 904 else { 905 CIO_MSG_EVENT(2, "Huh? 0.%x.%04x: unsolicited " 906 "interrupt during w4sense...\n", 907 cdev->private->dev_id.ssid, 908 cdev->private->dev_id.devno); 909 if (cdev->handler) 910 cdev->handler (cdev, 0, irb); 911 } 912 return; 913 } 914 /* 915 * Check if a halt or clear has been issued in the meanwhile. If yes, 916 * only deliver the halt/clear interrupt to the device driver as if it 917 * had killed the original request. 918 */ 919 if (irb->scsw.fctl & (SCSW_FCTL_CLEAR_FUNC | SCSW_FCTL_HALT_FUNC)) { 920 /* Retry Basic Sense if requested. */ 921 if (cdev->private->flags.intretry) { 922 cdev->private->flags.intretry = 0; 923 ccw_device_do_sense(cdev, irb); 924 return; 925 } 926 cdev->private->flags.dosense = 0; 927 memset(&cdev->private->irb, 0, sizeof(struct irb)); 928 ccw_device_accumulate_irb(cdev, irb); 929 goto call_handler; 930 } 931 /* Add basic sense info to irb. */ 932 ccw_device_accumulate_basic_sense(cdev, irb); 933 if (cdev->private->flags.dosense) { 934 /* Another basic sense is needed. */ 935 ccw_device_do_sense(cdev, irb); 936 return; 937 } 938 call_handler: 939 cdev->private->state = DEV_STATE_ONLINE; 940 /* Call the handler. */ 941 if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) 942 /* Start delayed path verification. */ 943 ccw_device_online_verify(cdev, 0); 944 } 945 946 static void 947 ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event) 948 { 949 struct irb *irb; 950 951 irb = (struct irb *) __LC_IRB; 952 /* Accumulate status. We don't do basic sense. */ 953 ccw_device_accumulate_irb(cdev, irb); 954 /* Remember to clear irb to avoid residuals. */ 955 memset(&cdev->private->irb, 0, sizeof(struct irb)); 956 /* Try to start delayed device verification. */ 957 ccw_device_online_verify(cdev, 0); 958 /* Note: Don't call handler for cio initiated clear! */ 959 } 960 961 static void 962 ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 963 { 964 struct subchannel *sch; 965 966 sch = to_subchannel(cdev->dev.parent); 967 ccw_device_set_timeout(cdev, 0); 968 /* Start delayed path verification. */ 969 ccw_device_online_verify(cdev, 0); 970 /* OK, i/o is dead now. Call interrupt handler. */ 971 if (cdev->handler) 972 cdev->handler(cdev, cdev->private->intparm, 973 ERR_PTR(-EIO)); 974 } 975 976 static void 977 ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) 978 { 979 int ret; 980 981 ret = ccw_device_cancel_halt_clear(cdev); 982 if (ret == -EBUSY) { 983 ccw_device_set_timeout(cdev, 3*HZ); 984 return; 985 } 986 /* Start delayed path verification. */ 987 ccw_device_online_verify(cdev, 0); 988 if (cdev->handler) 989 cdev->handler(cdev, cdev->private->intparm, 990 ERR_PTR(-EIO)); 991 } 992 993 void device_kill_io(struct subchannel *sch) 994 { 995 int ret; 996 struct ccw_device *cdev; 997 998 cdev = sch_get_cdev(sch); 999 ret = ccw_device_cancel_halt_clear(cdev); 1000 if (ret == -EBUSY) { 1001 ccw_device_set_timeout(cdev, 3*HZ); 1002 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 1003 return; 1004 } 1005 /* Start delayed path verification. */ 1006 ccw_device_online_verify(cdev, 0); 1007 if (cdev->handler) 1008 cdev->handler(cdev, cdev->private->intparm, 1009 ERR_PTR(-EIO)); 1010 } 1011 1012 static void 1013 ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event) 1014 { 1015 /* Start verification after current task finished. */ 1016 cdev->private->flags.doverify = 1; 1017 } 1018 1019 static void 1020 ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) 1021 { 1022 struct irb *irb; 1023 1024 switch (dev_event) { 1025 case DEV_EVENT_INTERRUPT: 1026 irb = (struct irb *) __LC_IRB; 1027 /* Check for unsolicited interrupt. */ 1028 if ((irb->scsw.stctl == 1029 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && 1030 (!irb->scsw.cc)) 1031 /* FIXME: we should restart stlck here, but this 1032 * is extremely unlikely ... */ 1033 goto out_wakeup; 1034 1035 ccw_device_accumulate_irb(cdev, irb); 1036 /* We don't care about basic sense etc. */ 1037 break; 1038 default: /* timeout */ 1039 break; 1040 } 1041 out_wakeup: 1042 wake_up(&cdev->private->wait_q); 1043 } 1044 1045 static void 1046 ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) 1047 { 1048 struct subchannel *sch; 1049 1050 sch = to_subchannel(cdev->dev.parent); 1051 if (cio_enable_subchannel(sch, sch->schib.pmcw.isc, 1052 (u32)(addr_t)sch) != 0) 1053 /* Couldn't enable the subchannel for i/o. Sick device. */ 1054 return; 1055 1056 /* After 60s the device recognition is considered to have failed. */ 1057 ccw_device_set_timeout(cdev, 60*HZ); 1058 1059 cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; 1060 ccw_device_sense_id_start(cdev); 1061 } 1062 1063 void 1064 device_trigger_reprobe(struct subchannel *sch) 1065 { 1066 struct ccw_device *cdev; 1067 1068 cdev = sch_get_cdev(sch); 1069 if (!cdev) 1070 return; 1071 if (cdev->private->state != DEV_STATE_DISCONNECTED) 1072 return; 1073 1074 /* Update some values. */ 1075 if (stsch(sch->schid, &sch->schib)) 1076 return; 1077 if (!sch->schib.pmcw.dnv) 1078 return; 1079 /* 1080 * The pim, pam, pom values may not be accurate, but they are the best 1081 * we have before performing device selection :/ 1082 */ 1083 sch->lpm = sch->schib.pmcw.pam & sch->opm; 1084 /* Re-set some bits in the pmcw that were lost. */ 1085 sch->schib.pmcw.isc = 3; 1086 sch->schib.pmcw.csense = 1; 1087 sch->schib.pmcw.ena = 0; 1088 if ((sch->lpm & (sch->lpm - 1)) != 0) 1089 sch->schib.pmcw.mp = 1; 1090 sch->schib.pmcw.intparm = (u32)(addr_t)sch; 1091 /* We should also udate ssd info, but this has to wait. */ 1092 /* Check if this is another device which appeared on the same sch. */ 1093 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1094 PREPARE_WORK(&cdev->private->kick_work, 1095 ccw_device_move_to_orphanage); 1096 queue_work(slow_path_wq, &cdev->private->kick_work); 1097 } else 1098 ccw_device_start_id(cdev, 0); 1099 } 1100 1101 static void 1102 ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) 1103 { 1104 struct subchannel *sch; 1105 1106 sch = to_subchannel(cdev->dev.parent); 1107 /* 1108 * An interrupt in state offline means a previous disable was not 1109 * successful. Try again. 1110 */ 1111 cio_disable_subchannel(sch); 1112 } 1113 1114 static void 1115 ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event) 1116 { 1117 retry_set_schib(cdev); 1118 cdev->private->state = DEV_STATE_ONLINE; 1119 dev_fsm_event(cdev, dev_event); 1120 } 1121 1122 static void ccw_device_update_cmfblock(struct ccw_device *cdev, 1123 enum dev_event dev_event) 1124 { 1125 cmf_retry_copy_block(cdev); 1126 cdev->private->state = DEV_STATE_ONLINE; 1127 dev_fsm_event(cdev, dev_event); 1128 } 1129 1130 static void 1131 ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) 1132 { 1133 ccw_device_set_timeout(cdev, 0); 1134 if (dev_event == DEV_EVENT_NOTOPER) 1135 cdev->private->state = DEV_STATE_NOT_OPER; 1136 else 1137 cdev->private->state = DEV_STATE_OFFLINE; 1138 wake_up(&cdev->private->wait_q); 1139 } 1140 1141 static void 1142 ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event) 1143 { 1144 int ret; 1145 1146 ret = ccw_device_cancel_halt_clear(cdev); 1147 switch (ret) { 1148 case 0: 1149 cdev->private->state = DEV_STATE_OFFLINE; 1150 wake_up(&cdev->private->wait_q); 1151 break; 1152 case -ENODEV: 1153 cdev->private->state = DEV_STATE_NOT_OPER; 1154 wake_up(&cdev->private->wait_q); 1155 break; 1156 default: 1157 ccw_device_set_timeout(cdev, HZ/10); 1158 } 1159 } 1160 1161 /* 1162 * No operation action. This is used e.g. to ignore a timeout event in 1163 * state offline. 1164 */ 1165 static void 1166 ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) 1167 { 1168 } 1169 1170 /* 1171 * Bug operation action. 1172 */ 1173 static void 1174 ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) 1175 { 1176 CIO_MSG_EVENT(0, "dev_jumptable[%i][%i] == NULL\n", 1177 cdev->private->state, dev_event); 1178 BUG(); 1179 } 1180 1181 /* 1182 * device statemachine 1183 */ 1184 fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { 1185 [DEV_STATE_NOT_OPER] = { 1186 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1187 [DEV_EVENT_INTERRUPT] = ccw_device_bug, 1188 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1189 [DEV_EVENT_VERIFY] = ccw_device_nop, 1190 }, 1191 [DEV_STATE_SENSE_PGID] = { 1192 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1193 [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, 1194 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1195 [DEV_EVENT_VERIFY] = ccw_device_nop, 1196 }, 1197 [DEV_STATE_SENSE_ID] = { 1198 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1199 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 1200 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 1201 [DEV_EVENT_VERIFY] = ccw_device_nop, 1202 }, 1203 [DEV_STATE_OFFLINE] = { 1204 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1205 [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, 1206 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1207 [DEV_EVENT_VERIFY] = ccw_device_nop, 1208 }, 1209 [DEV_STATE_VERIFY] = { 1210 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1211 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 1212 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1213 [DEV_EVENT_VERIFY] = ccw_device_delay_verify, 1214 }, 1215 [DEV_STATE_ONLINE] = { 1216 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1217 [DEV_EVENT_INTERRUPT] = ccw_device_irq, 1218 [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, 1219 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1220 }, 1221 [DEV_STATE_W4SENSE] = { 1222 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1223 [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, 1224 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1225 [DEV_EVENT_VERIFY] = ccw_device_online_verify, 1226 }, 1227 [DEV_STATE_DISBAND_PGID] = { 1228 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1229 [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, 1230 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1231 [DEV_EVENT_VERIFY] = ccw_device_nop, 1232 }, 1233 [DEV_STATE_BOXED] = { 1234 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1235 [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, 1236 [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done, 1237 [DEV_EVENT_VERIFY] = ccw_device_nop, 1238 }, 1239 /* states to wait for i/o completion before doing something */ 1240 [DEV_STATE_CLEAR_VERIFY] = { 1241 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1242 [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify, 1243 [DEV_EVENT_TIMEOUT] = ccw_device_nop, 1244 [DEV_EVENT_VERIFY] = ccw_device_nop, 1245 }, 1246 [DEV_STATE_TIMEOUT_KILL] = { 1247 [DEV_EVENT_NOTOPER] = ccw_device_generic_notoper, 1248 [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, 1249 [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, 1250 [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME 1251 }, 1252 [DEV_STATE_QUIESCE] = { 1253 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, 1254 [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, 1255 [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout, 1256 [DEV_EVENT_VERIFY] = ccw_device_nop, 1257 }, 1258 /* special states for devices gone not operational */ 1259 [DEV_STATE_DISCONNECTED] = { 1260 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1261 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1262 [DEV_EVENT_TIMEOUT] = ccw_device_bug, 1263 [DEV_EVENT_VERIFY] = ccw_device_start_id, 1264 }, 1265 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1266 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1267 [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, 1268 [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, 1269 [DEV_EVENT_VERIFY] = ccw_device_nop, 1270 }, 1271 [DEV_STATE_CMFCHANGE] = { 1272 [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate, 1273 [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate, 1274 [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, 1275 [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, 1276 }, 1277 [DEV_STATE_CMFUPDATE] = { 1278 [DEV_EVENT_NOTOPER] = ccw_device_update_cmfblock, 1279 [DEV_EVENT_INTERRUPT] = ccw_device_update_cmfblock, 1280 [DEV_EVENT_TIMEOUT] = ccw_device_update_cmfblock, 1281 [DEV_EVENT_VERIFY] = ccw_device_update_cmfblock, 1282 }, 1283 }; 1284 1285 EXPORT_SYMBOL_GPL(ccw_device_set_timeout); 1286