1 /* 2 * drivers/s390/cio/device.c 3 * bus driver for ccw devices 4 * 5 * Copyright IBM Corp. 2002,2008 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 9 */ 10 #include <linux/module.h> 11 #include <linux/init.h> 12 #include <linux/spinlock.h> 13 #include <linux/errno.h> 14 #include <linux/err.h> 15 #include <linux/slab.h> 16 #include <linux/list.h> 17 #include <linux/device.h> 18 #include <linux/workqueue.h> 19 #include <linux/timer.h> 20 21 #include <asm/ccwdev.h> 22 #include <asm/cio.h> 23 #include <asm/param.h> /* HZ */ 24 #include <asm/cmb.h> 25 #include <asm/isc.h> 26 27 #include "chp.h" 28 #include "cio.h" 29 #include "cio_debug.h" 30 #include "css.h" 31 #include "device.h" 32 #include "ioasm.h" 33 #include "io_sch.h" 34 35 static struct timer_list recovery_timer; 36 static DEFINE_SPINLOCK(recovery_lock); 37 static int recovery_phase; 38 static const unsigned long recovery_delay[] = { 3, 30, 300 }; 39 40 /******************* bus type handling ***********************/ 41 42 /* The Linux driver model distinguishes between a bus type and 43 * the bus itself. Of course we only have one channel 44 * subsystem driver and one channel system per machine, but 45 * we still use the abstraction. T.R. says it's a good idea. */ 46 static int 47 ccw_bus_match (struct device * dev, struct device_driver * drv) 48 { 49 struct ccw_device *cdev = to_ccwdev(dev); 50 struct ccw_driver *cdrv = to_ccwdrv(drv); 51 const struct ccw_device_id *ids = cdrv->ids, *found; 52 53 if (!ids) 54 return 0; 55 56 found = ccw_device_id_match(ids, &cdev->id); 57 if (!found) 58 return 0; 59 60 cdev->id.driver_info = found->driver_info; 61 62 return 1; 63 } 64 65 /* Store modalias string delimited by prefix/suffix string into buffer with 66 * specified size. Return length of resulting string (excluding trailing '\0') 67 * even if string doesn't fit buffer (snprintf semantics). */ 68 static int snprint_alias(char *buf, size_t size, 69 struct ccw_device_id *id, const char *suffix) 70 { 71 int len; 72 73 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); 74 if (len > size) 75 return len; 76 buf += len; 77 size -= len; 78 79 if (id->dev_type != 0) 80 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, 81 id->dev_model, suffix); 82 else 83 len += snprintf(buf, size, "dtdm%s", suffix); 84 85 return len; 86 } 87 88 /* Set up environment variables for ccw device uevent. Return 0 on success, 89 * non-zero otherwise. */ 90 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) 91 { 92 struct ccw_device *cdev = to_ccwdev(dev); 93 struct ccw_device_id *id = &(cdev->id); 94 int ret; 95 char modalias_buf[30]; 96 97 /* CU_TYPE= */ 98 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type); 99 if (ret) 100 return ret; 101 102 /* CU_MODEL= */ 103 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model); 104 if (ret) 105 return ret; 106 107 /* The next two can be zero, that's ok for us */ 108 /* DEV_TYPE= */ 109 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type); 110 if (ret) 111 return ret; 112 113 /* DEV_MODEL= */ 114 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model); 115 if (ret) 116 return ret; 117 118 /* MODALIAS= */ 119 snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); 120 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf); 121 return ret; 122 } 123 124 struct bus_type ccw_bus_type; 125 126 static void io_subchannel_irq(struct subchannel *); 127 static int io_subchannel_probe(struct subchannel *); 128 static int io_subchannel_remove(struct subchannel *); 129 static void io_subchannel_shutdown(struct subchannel *); 130 static int io_subchannel_sch_event(struct subchannel *, int); 131 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, 132 int); 133 134 static struct css_device_id io_subchannel_ids[] = { 135 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 136 { /* end of list */ }, 137 }; 138 MODULE_DEVICE_TABLE(css, io_subchannel_ids); 139 140 static struct css_driver io_subchannel_driver = { 141 .owner = THIS_MODULE, 142 .subchannel_type = io_subchannel_ids, 143 .name = "io_subchannel", 144 .irq = io_subchannel_irq, 145 .sch_event = io_subchannel_sch_event, 146 .chp_event = io_subchannel_chp_event, 147 .probe = io_subchannel_probe, 148 .remove = io_subchannel_remove, 149 .shutdown = io_subchannel_shutdown, 150 }; 151 152 struct workqueue_struct *ccw_device_work; 153 struct workqueue_struct *ccw_device_notify_work; 154 wait_queue_head_t ccw_device_init_wq; 155 atomic_t ccw_device_init_count; 156 157 static void recovery_func(unsigned long data); 158 159 static int __init 160 init_ccw_bus_type (void) 161 { 162 int ret; 163 164 init_waitqueue_head(&ccw_device_init_wq); 165 atomic_set(&ccw_device_init_count, 0); 166 setup_timer(&recovery_timer, recovery_func, 0); 167 168 ccw_device_work = create_singlethread_workqueue("cio"); 169 if (!ccw_device_work) 170 return -ENOMEM; /* FIXME: better errno ? */ 171 ccw_device_notify_work = create_singlethread_workqueue("cio_notify"); 172 if (!ccw_device_notify_work) { 173 ret = -ENOMEM; /* FIXME: better errno ? */ 174 goto out_err; 175 } 176 slow_path_wq = create_singlethread_workqueue("kslowcrw"); 177 if (!slow_path_wq) { 178 ret = -ENOMEM; /* FIXME: better errno ? */ 179 goto out_err; 180 } 181 if ((ret = bus_register (&ccw_bus_type))) 182 goto out_err; 183 184 ret = css_driver_register(&io_subchannel_driver); 185 if (ret) 186 goto out_err; 187 188 wait_event(ccw_device_init_wq, 189 atomic_read(&ccw_device_init_count) == 0); 190 flush_workqueue(ccw_device_work); 191 return 0; 192 out_err: 193 if (ccw_device_work) 194 destroy_workqueue(ccw_device_work); 195 if (ccw_device_notify_work) 196 destroy_workqueue(ccw_device_notify_work); 197 if (slow_path_wq) 198 destroy_workqueue(slow_path_wq); 199 return ret; 200 } 201 202 static void __exit 203 cleanup_ccw_bus_type (void) 204 { 205 css_driver_unregister(&io_subchannel_driver); 206 bus_unregister(&ccw_bus_type); 207 destroy_workqueue(ccw_device_notify_work); 208 destroy_workqueue(ccw_device_work); 209 } 210 211 subsys_initcall(init_ccw_bus_type); 212 module_exit(cleanup_ccw_bus_type); 213 214 /************************ device handling **************************/ 215 216 /* 217 * A ccw_device has some interfaces in sysfs in addition to the 218 * standard ones. 219 * The following entries are designed to export the information which 220 * resided in 2.4 in /proc/subchannels. Subchannel and device number 221 * are obvious, so they don't have an entry :) 222 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree? 223 */ 224 static ssize_t 225 chpids_show (struct device * dev, struct device_attribute *attr, char * buf) 226 { 227 struct subchannel *sch = to_subchannel(dev); 228 struct chsc_ssd_info *ssd = &sch->ssd_info; 229 ssize_t ret = 0; 230 int chp; 231 int mask; 232 233 for (chp = 0; chp < 8; chp++) { 234 mask = 0x80 >> chp; 235 if (ssd->path_mask & mask) 236 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); 237 else 238 ret += sprintf(buf + ret, "00 "); 239 } 240 ret += sprintf (buf+ret, "\n"); 241 return min((ssize_t)PAGE_SIZE, ret); 242 } 243 244 static ssize_t 245 pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf) 246 { 247 struct subchannel *sch = to_subchannel(dev); 248 struct pmcw *pmcw = &sch->schib.pmcw; 249 250 return sprintf (buf, "%02x %02x %02x\n", 251 pmcw->pim, pmcw->pam, pmcw->pom); 252 } 253 254 static ssize_t 255 devtype_show (struct device *dev, struct device_attribute *attr, char *buf) 256 { 257 struct ccw_device *cdev = to_ccwdev(dev); 258 struct ccw_device_id *id = &(cdev->id); 259 260 if (id->dev_type != 0) 261 return sprintf(buf, "%04x/%02x\n", 262 id->dev_type, id->dev_model); 263 else 264 return sprintf(buf, "n/a\n"); 265 } 266 267 static ssize_t 268 cutype_show (struct device *dev, struct device_attribute *attr, char *buf) 269 { 270 struct ccw_device *cdev = to_ccwdev(dev); 271 struct ccw_device_id *id = &(cdev->id); 272 273 return sprintf(buf, "%04x/%02x\n", 274 id->cu_type, id->cu_model); 275 } 276 277 static ssize_t 278 modalias_show (struct device *dev, struct device_attribute *attr, char *buf) 279 { 280 struct ccw_device *cdev = to_ccwdev(dev); 281 struct ccw_device_id *id = &(cdev->id); 282 int len; 283 284 len = snprint_alias(buf, PAGE_SIZE, id, "\n"); 285 286 return len > PAGE_SIZE ? PAGE_SIZE : len; 287 } 288 289 static ssize_t 290 online_show (struct device *dev, struct device_attribute *attr, char *buf) 291 { 292 struct ccw_device *cdev = to_ccwdev(dev); 293 294 return sprintf(buf, cdev->online ? "1\n" : "0\n"); 295 } 296 297 int ccw_device_is_orphan(struct ccw_device *cdev) 298 { 299 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); 300 } 301 302 static void ccw_device_unregister(struct ccw_device *cdev) 303 { 304 if (test_and_clear_bit(1, &cdev->private->registered)) 305 device_del(&cdev->dev); 306 } 307 308 static void ccw_device_remove_orphan_cb(struct device *dev) 309 { 310 struct ccw_device *cdev = to_ccwdev(dev); 311 312 ccw_device_unregister(cdev); 313 put_device(&cdev->dev); 314 } 315 316 static void ccw_device_remove_sch_cb(struct device *dev) 317 { 318 struct subchannel *sch; 319 320 sch = to_subchannel(dev); 321 css_sch_device_unregister(sch); 322 /* Reset intparm to zeroes. */ 323 sch->schib.pmcw.intparm = 0; 324 cio_modify(sch); 325 put_device(&sch->dev); 326 } 327 328 static void 329 ccw_device_remove_disconnected(struct ccw_device *cdev) 330 { 331 unsigned long flags; 332 int rc; 333 334 /* 335 * Forced offline in disconnected state means 336 * 'throw away device'. 337 */ 338 if (ccw_device_is_orphan(cdev)) { 339 /* 340 * Deregister ccw device. 341 * Unfortunately, we cannot do this directly from the 342 * attribute method. 343 */ 344 spin_lock_irqsave(cdev->ccwlock, flags); 345 cdev->private->state = DEV_STATE_NOT_OPER; 346 spin_unlock_irqrestore(cdev->ccwlock, flags); 347 rc = device_schedule_callback(&cdev->dev, 348 ccw_device_remove_orphan_cb); 349 if (rc) 350 CIO_MSG_EVENT(0, "Couldn't unregister orphan " 351 "0.%x.%04x\n", 352 cdev->private->dev_id.ssid, 353 cdev->private->dev_id.devno); 354 return; 355 } 356 /* Deregister subchannel, which will kill the ccw device. */ 357 rc = device_schedule_callback(cdev->dev.parent, 358 ccw_device_remove_sch_cb); 359 if (rc) 360 CIO_MSG_EVENT(0, "Couldn't unregister disconnected device " 361 "0.%x.%04x\n", 362 cdev->private->dev_id.ssid, 363 cdev->private->dev_id.devno); 364 } 365 366 /** 367 * ccw_device_set_offline() - disable a ccw device for I/O 368 * @cdev: target ccw device 369 * 370 * This function calls the driver's set_offline() function for @cdev, if 371 * given, and then disables @cdev. 372 * Returns: 373 * %0 on success and a negative error value on failure. 374 * Context: 375 * enabled, ccw device lock not held 376 */ 377 int ccw_device_set_offline(struct ccw_device *cdev) 378 { 379 int ret; 380 381 if (!cdev) 382 return -ENODEV; 383 if (!cdev->online || !cdev->drv) 384 return -EINVAL; 385 386 if (cdev->drv->set_offline) { 387 ret = cdev->drv->set_offline(cdev); 388 if (ret != 0) 389 return ret; 390 } 391 cdev->online = 0; 392 spin_lock_irq(cdev->ccwlock); 393 ret = ccw_device_offline(cdev); 394 if (ret == -ENODEV) { 395 if (cdev->private->state != DEV_STATE_NOT_OPER) { 396 cdev->private->state = DEV_STATE_OFFLINE; 397 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 398 } 399 spin_unlock_irq(cdev->ccwlock); 400 return ret; 401 } 402 spin_unlock_irq(cdev->ccwlock); 403 if (ret == 0) 404 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 405 else { 406 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " 407 "device 0.%x.%04x\n", 408 ret, cdev->private->dev_id.ssid, 409 cdev->private->dev_id.devno); 410 cdev->online = 1; 411 } 412 return ret; 413 } 414 415 /** 416 * ccw_device_set_online() - enable a ccw device for I/O 417 * @cdev: target ccw device 418 * 419 * This function first enables @cdev and then calls the driver's set_online() 420 * function for @cdev, if given. If set_online() returns an error, @cdev is 421 * disabled again. 422 * Returns: 423 * %0 on success and a negative error value on failure. 424 * Context: 425 * enabled, ccw device lock not held 426 */ 427 int ccw_device_set_online(struct ccw_device *cdev) 428 { 429 int ret; 430 431 if (!cdev) 432 return -ENODEV; 433 if (cdev->online || !cdev->drv) 434 return -EINVAL; 435 436 spin_lock_irq(cdev->ccwlock); 437 ret = ccw_device_online(cdev); 438 spin_unlock_irq(cdev->ccwlock); 439 if (ret == 0) 440 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 441 else { 442 CIO_MSG_EVENT(0, "ccw_device_online returned %d, " 443 "device 0.%x.%04x\n", 444 ret, cdev->private->dev_id.ssid, 445 cdev->private->dev_id.devno); 446 return ret; 447 } 448 if (cdev->private->state != DEV_STATE_ONLINE) 449 return -ENODEV; 450 if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { 451 cdev->online = 1; 452 return 0; 453 } 454 spin_lock_irq(cdev->ccwlock); 455 ret = ccw_device_offline(cdev); 456 spin_unlock_irq(cdev->ccwlock); 457 if (ret == 0) 458 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 459 else 460 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " 461 "device 0.%x.%04x\n", 462 ret, cdev->private->dev_id.ssid, 463 cdev->private->dev_id.devno); 464 return (ret == 0) ? -ENODEV : ret; 465 } 466 467 static void online_store_handle_offline(struct ccw_device *cdev) 468 { 469 if (cdev->private->state == DEV_STATE_DISCONNECTED) 470 ccw_device_remove_disconnected(cdev); 471 else if (cdev->drv && cdev->drv->set_offline) 472 ccw_device_set_offline(cdev); 473 } 474 475 static int online_store_recog_and_online(struct ccw_device *cdev) 476 { 477 int ret; 478 479 /* Do device recognition, if needed. */ 480 if (cdev->id.cu_type == 0) { 481 ret = ccw_device_recognition(cdev); 482 if (ret) { 483 CIO_MSG_EVENT(0, "Couldn't start recognition " 484 "for device 0.%x.%04x (ret=%d)\n", 485 cdev->private->dev_id.ssid, 486 cdev->private->dev_id.devno, ret); 487 return ret; 488 } 489 wait_event(cdev->private->wait_q, 490 cdev->private->flags.recog_done); 491 } 492 if (cdev->drv && cdev->drv->set_online) 493 ccw_device_set_online(cdev); 494 return 0; 495 } 496 static int online_store_handle_online(struct ccw_device *cdev, int force) 497 { 498 int ret; 499 500 ret = online_store_recog_and_online(cdev); 501 if (ret) 502 return ret; 503 if (force && cdev->private->state == DEV_STATE_BOXED) { 504 ret = ccw_device_stlck(cdev); 505 if (ret) 506 return ret; 507 if (cdev->id.cu_type == 0) 508 cdev->private->state = DEV_STATE_NOT_OPER; 509 online_store_recog_and_online(cdev); 510 } 511 return 0; 512 } 513 514 static ssize_t online_store (struct device *dev, struct device_attribute *attr, 515 const char *buf, size_t count) 516 { 517 struct ccw_device *cdev = to_ccwdev(dev); 518 int force, ret; 519 unsigned long i; 520 521 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 522 return -EAGAIN; 523 524 if (cdev->drv && !try_module_get(cdev->drv->owner)) { 525 atomic_set(&cdev->private->onoff, 0); 526 return -EINVAL; 527 } 528 if (!strncmp(buf, "force\n", count)) { 529 force = 1; 530 i = 1; 531 ret = 0; 532 } else { 533 force = 0; 534 ret = strict_strtoul(buf, 16, &i); 535 } 536 if (ret) 537 goto out; 538 switch (i) { 539 case 0: 540 online_store_handle_offline(cdev); 541 ret = count; 542 break; 543 case 1: 544 ret = online_store_handle_online(cdev, force); 545 if (!ret) 546 ret = count; 547 break; 548 default: 549 ret = -EINVAL; 550 } 551 out: 552 if (cdev->drv) 553 module_put(cdev->drv->owner); 554 atomic_set(&cdev->private->onoff, 0); 555 return ret; 556 } 557 558 static ssize_t 559 available_show (struct device *dev, struct device_attribute *attr, char *buf) 560 { 561 struct ccw_device *cdev = to_ccwdev(dev); 562 struct subchannel *sch; 563 564 if (ccw_device_is_orphan(cdev)) 565 return sprintf(buf, "no device\n"); 566 switch (cdev->private->state) { 567 case DEV_STATE_BOXED: 568 return sprintf(buf, "boxed\n"); 569 case DEV_STATE_DISCONNECTED: 570 case DEV_STATE_DISCONNECTED_SENSE_ID: 571 case DEV_STATE_NOT_OPER: 572 sch = to_subchannel(dev->parent); 573 if (!sch->lpm) 574 return sprintf(buf, "no path\n"); 575 else 576 return sprintf(buf, "no device\n"); 577 default: 578 /* All other states considered fine. */ 579 return sprintf(buf, "good\n"); 580 } 581 } 582 583 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); 584 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); 585 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); 586 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); 587 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 588 static DEVICE_ATTR(online, 0644, online_show, online_store); 589 static DEVICE_ATTR(availability, 0444, available_show, NULL); 590 591 static struct attribute *io_subchannel_attrs[] = { 592 &dev_attr_chpids.attr, 593 &dev_attr_pimpampom.attr, 594 NULL, 595 }; 596 597 static struct attribute_group io_subchannel_attr_group = { 598 .attrs = io_subchannel_attrs, 599 }; 600 601 static struct attribute * ccwdev_attrs[] = { 602 &dev_attr_devtype.attr, 603 &dev_attr_cutype.attr, 604 &dev_attr_modalias.attr, 605 &dev_attr_online.attr, 606 &dev_attr_cmb_enable.attr, 607 &dev_attr_availability.attr, 608 NULL, 609 }; 610 611 static struct attribute_group ccwdev_attr_group = { 612 .attrs = ccwdev_attrs, 613 }; 614 615 static struct attribute_group *ccwdev_attr_groups[] = { 616 &ccwdev_attr_group, 617 NULL, 618 }; 619 620 /* this is a simple abstraction for device_register that sets the 621 * correct bus type and adds the bus specific files */ 622 static int ccw_device_register(struct ccw_device *cdev) 623 { 624 struct device *dev = &cdev->dev; 625 int ret; 626 627 dev->bus = &ccw_bus_type; 628 629 if ((ret = device_add(dev))) 630 return ret; 631 632 set_bit(1, &cdev->private->registered); 633 return ret; 634 } 635 636 struct match_data { 637 struct ccw_dev_id dev_id; 638 struct ccw_device * sibling; 639 }; 640 641 static int 642 match_devno(struct device * dev, void * data) 643 { 644 struct match_data * d = data; 645 struct ccw_device * cdev; 646 647 cdev = to_ccwdev(dev); 648 if ((cdev->private->state == DEV_STATE_DISCONNECTED) && 649 !ccw_device_is_orphan(cdev) && 650 ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) && 651 (cdev != d->sibling)) 652 return 1; 653 return 0; 654 } 655 656 static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id, 657 struct ccw_device *sibling) 658 { 659 struct device *dev; 660 struct match_data data; 661 662 data.dev_id = *dev_id; 663 data.sibling = sibling; 664 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno); 665 666 return dev ? to_ccwdev(dev) : NULL; 667 } 668 669 static int match_orphan(struct device *dev, void *data) 670 { 671 struct ccw_dev_id *dev_id; 672 struct ccw_device *cdev; 673 674 dev_id = data; 675 cdev = to_ccwdev(dev); 676 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 677 } 678 679 static struct ccw_device * 680 get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css, 681 struct ccw_dev_id *dev_id) 682 { 683 struct device *dev; 684 685 dev = device_find_child(&css->pseudo_subchannel->dev, dev_id, 686 match_orphan); 687 688 return dev ? to_ccwdev(dev) : NULL; 689 } 690 691 static void 692 ccw_device_add_changed(struct work_struct *work) 693 { 694 struct ccw_device_private *priv; 695 struct ccw_device *cdev; 696 697 priv = container_of(work, struct ccw_device_private, kick_work); 698 cdev = priv->cdev; 699 if (device_add(&cdev->dev)) { 700 put_device(&cdev->dev); 701 return; 702 } 703 set_bit(1, &cdev->private->registered); 704 } 705 706 void ccw_device_do_unreg_rereg(struct work_struct *work) 707 { 708 struct ccw_device_private *priv; 709 struct ccw_device *cdev; 710 struct subchannel *sch; 711 712 priv = container_of(work, struct ccw_device_private, kick_work); 713 cdev = priv->cdev; 714 sch = to_subchannel(cdev->dev.parent); 715 716 ccw_device_unregister(cdev); 717 PREPARE_WORK(&cdev->private->kick_work, 718 ccw_device_add_changed); 719 queue_work(ccw_device_work, &cdev->private->kick_work); 720 } 721 722 static void 723 ccw_device_release(struct device *dev) 724 { 725 struct ccw_device *cdev; 726 727 cdev = to_ccwdev(dev); 728 kfree(cdev->private); 729 kfree(cdev); 730 } 731 732 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) 733 { 734 struct ccw_device *cdev; 735 736 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 737 if (cdev) { 738 cdev->private = kzalloc(sizeof(struct ccw_device_private), 739 GFP_KERNEL | GFP_DMA); 740 if (cdev->private) 741 return cdev; 742 } 743 kfree(cdev); 744 return ERR_PTR(-ENOMEM); 745 } 746 747 static int io_subchannel_initialize_dev(struct subchannel *sch, 748 struct ccw_device *cdev) 749 { 750 cdev->private->cdev = cdev; 751 atomic_set(&cdev->private->onoff, 0); 752 cdev->dev.parent = &sch->dev; 753 cdev->dev.release = ccw_device_release; 754 INIT_WORK(&cdev->private->kick_work, NULL); 755 cdev->dev.groups = ccwdev_attr_groups; 756 /* Do first half of device_register. */ 757 device_initialize(&cdev->dev); 758 if (!get_device(&sch->dev)) { 759 if (cdev->dev.release) 760 cdev->dev.release(&cdev->dev); 761 return -ENODEV; 762 } 763 return 0; 764 } 765 766 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) 767 { 768 struct ccw_device *cdev; 769 int ret; 770 771 cdev = io_subchannel_allocate_dev(sch); 772 if (!IS_ERR(cdev)) { 773 ret = io_subchannel_initialize_dev(sch, cdev); 774 if (ret) { 775 kfree(cdev); 776 cdev = ERR_PTR(ret); 777 } 778 } 779 return cdev; 780 } 781 782 static int io_subchannel_recog(struct ccw_device *, struct subchannel *); 783 784 static void sch_attach_device(struct subchannel *sch, 785 struct ccw_device *cdev) 786 { 787 css_update_ssd_info(sch); 788 spin_lock_irq(sch->lock); 789 sch_set_cdev(sch, cdev); 790 cdev->private->schid = sch->schid; 791 cdev->ccwlock = sch->lock; 792 ccw_device_trigger_reprobe(cdev); 793 spin_unlock_irq(sch->lock); 794 } 795 796 static void sch_attach_disconnected_device(struct subchannel *sch, 797 struct ccw_device *cdev) 798 { 799 struct subchannel *other_sch; 800 int ret; 801 802 other_sch = to_subchannel(get_device(cdev->dev.parent)); 803 ret = device_move(&cdev->dev, &sch->dev); 804 if (ret) { 805 CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed " 806 "(ret=%d)!\n", cdev->private->dev_id.ssid, 807 cdev->private->dev_id.devno, ret); 808 put_device(&other_sch->dev); 809 return; 810 } 811 sch_set_cdev(other_sch, NULL); 812 /* No need to keep a subchannel without ccw device around. */ 813 css_sch_device_unregister(other_sch); 814 put_device(&other_sch->dev); 815 sch_attach_device(sch, cdev); 816 } 817 818 static void sch_attach_orphaned_device(struct subchannel *sch, 819 struct ccw_device *cdev) 820 { 821 int ret; 822 823 /* Try to move the ccw device to its new subchannel. */ 824 ret = device_move(&cdev->dev, &sch->dev); 825 if (ret) { 826 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage " 827 "failed (ret=%d)!\n", 828 cdev->private->dev_id.ssid, 829 cdev->private->dev_id.devno, ret); 830 return; 831 } 832 sch_attach_device(sch, cdev); 833 } 834 835 static void sch_create_and_recog_new_device(struct subchannel *sch) 836 { 837 struct ccw_device *cdev; 838 839 /* Need to allocate a new ccw device. */ 840 cdev = io_subchannel_create_ccwdev(sch); 841 if (IS_ERR(cdev)) { 842 /* OK, we did everything we could... */ 843 css_sch_device_unregister(sch); 844 return; 845 } 846 spin_lock_irq(sch->lock); 847 sch_set_cdev(sch, cdev); 848 spin_unlock_irq(sch->lock); 849 /* Start recognition for the new ccw device. */ 850 if (io_subchannel_recog(cdev, sch)) { 851 spin_lock_irq(sch->lock); 852 sch_set_cdev(sch, NULL); 853 spin_unlock_irq(sch->lock); 854 if (cdev->dev.release) 855 cdev->dev.release(&cdev->dev); 856 css_sch_device_unregister(sch); 857 } 858 } 859 860 861 void ccw_device_move_to_orphanage(struct work_struct *work) 862 { 863 struct ccw_device_private *priv; 864 struct ccw_device *cdev; 865 struct ccw_device *replacing_cdev; 866 struct subchannel *sch; 867 int ret; 868 struct channel_subsystem *css; 869 struct ccw_dev_id dev_id; 870 871 priv = container_of(work, struct ccw_device_private, kick_work); 872 cdev = priv->cdev; 873 sch = to_subchannel(cdev->dev.parent); 874 css = to_css(sch->dev.parent); 875 dev_id.devno = sch->schib.pmcw.dev; 876 dev_id.ssid = sch->schid.ssid; 877 878 /* 879 * Move the orphaned ccw device to the orphanage so the replacing 880 * ccw device can take its place on the subchannel. 881 */ 882 ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev); 883 if (ret) { 884 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed " 885 "(ret=%d)!\n", cdev->private->dev_id.ssid, 886 cdev->private->dev_id.devno, ret); 887 return; 888 } 889 cdev->ccwlock = css->pseudo_subchannel->lock; 890 /* 891 * Search for the replacing ccw device 892 * - among the disconnected devices 893 * - in the orphanage 894 */ 895 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev); 896 if (replacing_cdev) { 897 sch_attach_disconnected_device(sch, replacing_cdev); 898 return; 899 } 900 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); 901 if (replacing_cdev) { 902 sch_attach_orphaned_device(sch, replacing_cdev); 903 return; 904 } 905 sch_create_and_recog_new_device(sch); 906 } 907 908 /* 909 * Register recognized device. 910 */ 911 static void 912 io_subchannel_register(struct work_struct *work) 913 { 914 struct ccw_device_private *priv; 915 struct ccw_device *cdev; 916 struct subchannel *sch; 917 int ret; 918 unsigned long flags; 919 920 priv = container_of(work, struct ccw_device_private, kick_work); 921 cdev = priv->cdev; 922 sch = to_subchannel(cdev->dev.parent); 923 css_update_ssd_info(sch); 924 /* 925 * io_subchannel_register() will also be called after device 926 * recognition has been done for a boxed device (which will already 927 * be registered). We need to reprobe since we may now have sense id 928 * information. 929 */ 930 if (klist_node_attached(&cdev->dev.knode_parent)) { 931 if (!cdev->drv) { 932 ret = device_reprobe(&cdev->dev); 933 if (ret) 934 /* We can't do much here. */ 935 CIO_MSG_EVENT(0, "device_reprobe() returned" 936 " %d for 0.%x.%04x\n", ret, 937 cdev->private->dev_id.ssid, 938 cdev->private->dev_id.devno); 939 } 940 goto out; 941 } 942 /* 943 * Now we know this subchannel will stay, we can throw 944 * our delayed uevent. 945 */ 946 sch->dev.uevent_suppress = 0; 947 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 948 /* make it known to the system */ 949 ret = ccw_device_register(cdev); 950 if (ret) { 951 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", 952 cdev->private->dev_id.ssid, 953 cdev->private->dev_id.devno, ret); 954 put_device(&cdev->dev); 955 spin_lock_irqsave(sch->lock, flags); 956 sch_set_cdev(sch, NULL); 957 spin_unlock_irqrestore(sch->lock, flags); 958 kfree (cdev->private); 959 kfree (cdev); 960 put_device(&sch->dev); 961 if (atomic_dec_and_test(&ccw_device_init_count)) 962 wake_up(&ccw_device_init_wq); 963 return; 964 } 965 put_device(&cdev->dev); 966 out: 967 cdev->private->flags.recog_done = 1; 968 put_device(&sch->dev); 969 wake_up(&cdev->private->wait_q); 970 if (atomic_dec_and_test(&ccw_device_init_count)) 971 wake_up(&ccw_device_init_wq); 972 } 973 974 static void ccw_device_call_sch_unregister(struct work_struct *work) 975 { 976 struct ccw_device_private *priv; 977 struct ccw_device *cdev; 978 struct subchannel *sch; 979 980 priv = container_of(work, struct ccw_device_private, kick_work); 981 cdev = priv->cdev; 982 sch = to_subchannel(cdev->dev.parent); 983 css_sch_device_unregister(sch); 984 /* Reset intparm to zeroes. */ 985 sch->schib.pmcw.intparm = 0; 986 cio_modify(sch); 987 put_device(&cdev->dev); 988 put_device(&sch->dev); 989 } 990 991 /* 992 * subchannel recognition done. Called from the state machine. 993 */ 994 void 995 io_subchannel_recog_done(struct ccw_device *cdev) 996 { 997 struct subchannel *sch; 998 999 if (css_init_done == 0) { 1000 cdev->private->flags.recog_done = 1; 1001 return; 1002 } 1003 switch (cdev->private->state) { 1004 case DEV_STATE_NOT_OPER: 1005 cdev->private->flags.recog_done = 1; 1006 /* Remove device found not operational. */ 1007 if (!get_device(&cdev->dev)) 1008 break; 1009 sch = to_subchannel(cdev->dev.parent); 1010 PREPARE_WORK(&cdev->private->kick_work, 1011 ccw_device_call_sch_unregister); 1012 queue_work(slow_path_wq, &cdev->private->kick_work); 1013 if (atomic_dec_and_test(&ccw_device_init_count)) 1014 wake_up(&ccw_device_init_wq); 1015 break; 1016 case DEV_STATE_BOXED: 1017 /* Device did not respond in time. */ 1018 case DEV_STATE_OFFLINE: 1019 /* 1020 * We can't register the device in interrupt context so 1021 * we schedule a work item. 1022 */ 1023 if (!get_device(&cdev->dev)) 1024 break; 1025 PREPARE_WORK(&cdev->private->kick_work, 1026 io_subchannel_register); 1027 queue_work(slow_path_wq, &cdev->private->kick_work); 1028 break; 1029 } 1030 } 1031 1032 static int 1033 io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) 1034 { 1035 int rc; 1036 struct ccw_device_private *priv; 1037 1038 sch_set_cdev(sch, cdev); 1039 cdev->ccwlock = sch->lock; 1040 1041 /* Init private data. */ 1042 priv = cdev->private; 1043 priv->dev_id.devno = sch->schib.pmcw.dev; 1044 priv->dev_id.ssid = sch->schid.ssid; 1045 priv->schid = sch->schid; 1046 priv->state = DEV_STATE_NOT_OPER; 1047 INIT_LIST_HEAD(&priv->cmb_list); 1048 init_waitqueue_head(&priv->wait_q); 1049 init_timer(&priv->timer); 1050 1051 /* Set an initial name for the device. */ 1052 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", 1053 sch->schid.ssid, sch->schib.pmcw.dev); 1054 1055 /* Increase counter of devices currently in recognition. */ 1056 atomic_inc(&ccw_device_init_count); 1057 1058 /* Start async. device sensing. */ 1059 spin_lock_irq(sch->lock); 1060 rc = ccw_device_recognition(cdev); 1061 spin_unlock_irq(sch->lock); 1062 if (rc) { 1063 if (atomic_dec_and_test(&ccw_device_init_count)) 1064 wake_up(&ccw_device_init_wq); 1065 } 1066 return rc; 1067 } 1068 1069 static void ccw_device_move_to_sch(struct work_struct *work) 1070 { 1071 struct ccw_device_private *priv; 1072 int rc; 1073 struct subchannel *sch; 1074 struct ccw_device *cdev; 1075 struct subchannel *former_parent; 1076 1077 priv = container_of(work, struct ccw_device_private, kick_work); 1078 sch = priv->sch; 1079 cdev = priv->cdev; 1080 former_parent = ccw_device_is_orphan(cdev) ? 1081 NULL : to_subchannel(get_device(cdev->dev.parent)); 1082 mutex_lock(&sch->reg_mutex); 1083 /* Try to move the ccw device to its new subchannel. */ 1084 rc = device_move(&cdev->dev, &sch->dev); 1085 mutex_unlock(&sch->reg_mutex); 1086 if (rc) { 1087 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to subchannel " 1088 "0.%x.%04x failed (ret=%d)!\n", 1089 cdev->private->dev_id.ssid, 1090 cdev->private->dev_id.devno, sch->schid.ssid, 1091 sch->schid.sch_no, rc); 1092 css_sch_device_unregister(sch); 1093 goto out; 1094 } 1095 if (former_parent) { 1096 spin_lock_irq(former_parent->lock); 1097 sch_set_cdev(former_parent, NULL); 1098 spin_unlock_irq(former_parent->lock); 1099 css_sch_device_unregister(former_parent); 1100 /* Reset intparm to zeroes. */ 1101 former_parent->schib.pmcw.intparm = 0; 1102 cio_modify(former_parent); 1103 } 1104 sch_attach_device(sch, cdev); 1105 out: 1106 if (former_parent) 1107 put_device(&former_parent->dev); 1108 put_device(&cdev->dev); 1109 } 1110 1111 static void io_subchannel_irq(struct subchannel *sch) 1112 { 1113 struct ccw_device *cdev; 1114 1115 cdev = sch_get_cdev(sch); 1116 1117 CIO_TRACE_EVENT(3, "IRQ"); 1118 CIO_TRACE_EVENT(3, sch->dev.bus_id); 1119 if (cdev) 1120 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1121 } 1122 1123 static void io_subchannel_init_fields(struct subchannel *sch) 1124 { 1125 if (cio_is_console(sch->schid)) 1126 sch->opm = 0xff; 1127 else 1128 sch->opm = chp_get_sch_opm(sch); 1129 sch->lpm = sch->schib.pmcw.pam & sch->opm; 1130 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; 1131 1132 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" 1133 " - PIM = %02X, PAM = %02X, POM = %02X\n", 1134 sch->schib.pmcw.dev, sch->schid.ssid, 1135 sch->schid.sch_no, sch->schib.pmcw.pim, 1136 sch->schib.pmcw.pam, sch->schib.pmcw.pom); 1137 /* Initially set up some fields in the pmcw. */ 1138 sch->schib.pmcw.ena = 0; 1139 sch->schib.pmcw.csense = 1; /* concurrent sense */ 1140 if ((sch->lpm & (sch->lpm - 1)) != 0) 1141 sch->schib.pmcw.mp = 1; /* multipath mode */ 1142 /* clean up possible residual cmf stuff */ 1143 sch->schib.pmcw.mme = 0; 1144 sch->schib.pmcw.mbfc = 0; 1145 sch->schib.pmcw.mbi = 0; 1146 sch->schib.mba = 0; 1147 } 1148 1149 static int io_subchannel_probe(struct subchannel *sch) 1150 { 1151 struct ccw_device *cdev; 1152 int rc; 1153 unsigned long flags; 1154 struct ccw_dev_id dev_id; 1155 1156 cdev = sch_get_cdev(sch); 1157 if (cdev) { 1158 rc = sysfs_create_group(&sch->dev.kobj, 1159 &io_subchannel_attr_group); 1160 if (rc) 1161 CIO_MSG_EVENT(0, "Failed to create io subchannel " 1162 "attributes for subchannel " 1163 "0.%x.%04x (rc=%d)\n", 1164 sch->schid.ssid, sch->schid.sch_no, rc); 1165 /* 1166 * This subchannel already has an associated ccw_device. 1167 * Throw the delayed uevent for the subchannel, register 1168 * the ccw_device and exit. This happens for all early 1169 * devices, e.g. the console. 1170 */ 1171 sch->dev.uevent_suppress = 0; 1172 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 1173 cdev->dev.groups = ccwdev_attr_groups; 1174 device_initialize(&cdev->dev); 1175 ccw_device_register(cdev); 1176 /* 1177 * Check if the device is already online. If it is 1178 * the reference count needs to be corrected 1179 * (see ccw_device_online and css_init_done for the 1180 * ugly details). 1181 */ 1182 if (cdev->private->state != DEV_STATE_NOT_OPER && 1183 cdev->private->state != DEV_STATE_OFFLINE && 1184 cdev->private->state != DEV_STATE_BOXED) 1185 get_device(&cdev->dev); 1186 return 0; 1187 } 1188 io_subchannel_init_fields(sch); 1189 /* 1190 * First check if a fitting device may be found amongst the 1191 * disconnected devices or in the orphanage. 1192 */ 1193 dev_id.devno = sch->schib.pmcw.dev; 1194 dev_id.ssid = sch->schid.ssid; 1195 rc = sysfs_create_group(&sch->dev.kobj, 1196 &io_subchannel_attr_group); 1197 if (rc) 1198 return rc; 1199 /* Allocate I/O subchannel private data. */ 1200 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1201 GFP_KERNEL | GFP_DMA); 1202 if (!sch->private) { 1203 rc = -ENOMEM; 1204 goto out_err; 1205 } 1206 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); 1207 if (!cdev) 1208 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), 1209 &dev_id); 1210 if (cdev) { 1211 /* 1212 * Schedule moving the device until when we have a registered 1213 * subchannel to move to and succeed the probe. We can 1214 * unregister later again, when the probe is through. 1215 */ 1216 cdev->private->sch = sch; 1217 PREPARE_WORK(&cdev->private->kick_work, 1218 ccw_device_move_to_sch); 1219 queue_work(slow_path_wq, &cdev->private->kick_work); 1220 return 0; 1221 } 1222 cdev = io_subchannel_create_ccwdev(sch); 1223 if (IS_ERR(cdev)) { 1224 rc = PTR_ERR(cdev); 1225 goto out_err; 1226 } 1227 rc = io_subchannel_recog(cdev, sch); 1228 if (rc) { 1229 spin_lock_irqsave(sch->lock, flags); 1230 sch_set_cdev(sch, NULL); 1231 spin_unlock_irqrestore(sch->lock, flags); 1232 if (cdev->dev.release) 1233 cdev->dev.release(&cdev->dev); 1234 goto out_err; 1235 } 1236 return 0; 1237 out_err: 1238 kfree(sch->private); 1239 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1240 return rc; 1241 } 1242 1243 static int 1244 io_subchannel_remove (struct subchannel *sch) 1245 { 1246 struct ccw_device *cdev; 1247 unsigned long flags; 1248 1249 cdev = sch_get_cdev(sch); 1250 if (!cdev) 1251 return 0; 1252 /* Set ccw device to not operational and drop reference. */ 1253 spin_lock_irqsave(cdev->ccwlock, flags); 1254 sch_set_cdev(sch, NULL); 1255 cdev->private->state = DEV_STATE_NOT_OPER; 1256 spin_unlock_irqrestore(cdev->ccwlock, flags); 1257 ccw_device_unregister(cdev); 1258 put_device(&cdev->dev); 1259 kfree(sch->private); 1260 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1261 return 0; 1262 } 1263 1264 static int io_subchannel_notify(struct subchannel *sch, int event) 1265 { 1266 struct ccw_device *cdev; 1267 1268 cdev = sch_get_cdev(sch); 1269 if (!cdev) 1270 return 0; 1271 return ccw_device_notify(cdev, event); 1272 } 1273 1274 static void io_subchannel_verify(struct subchannel *sch) 1275 { 1276 struct ccw_device *cdev; 1277 1278 cdev = sch_get_cdev(sch); 1279 if (cdev) 1280 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1281 } 1282 1283 static int check_for_io_on_path(struct subchannel *sch, int mask) 1284 { 1285 int cc; 1286 1287 cc = stsch(sch->schid, &sch->schib); 1288 if (cc) 1289 return 0; 1290 if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) 1291 return 1; 1292 return 0; 1293 } 1294 1295 static void terminate_internal_io(struct subchannel *sch, 1296 struct ccw_device *cdev) 1297 { 1298 if (cio_clear(sch)) { 1299 /* Recheck device in case clear failed. */ 1300 sch->lpm = 0; 1301 if (cdev->online) 1302 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1303 else 1304 css_schedule_eval(sch->schid); 1305 return; 1306 } 1307 cdev->private->state = DEV_STATE_CLEAR_VERIFY; 1308 /* Request retry of internal operation. */ 1309 cdev->private->flags.intretry = 1; 1310 /* Call handler. */ 1311 if (cdev->handler) 1312 cdev->handler(cdev, cdev->private->intparm, 1313 ERR_PTR(-EIO)); 1314 } 1315 1316 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) 1317 { 1318 struct ccw_device *cdev; 1319 1320 cdev = sch_get_cdev(sch); 1321 if (!cdev) 1322 return; 1323 if (check_for_io_on_path(sch, mask)) { 1324 if (cdev->private->state == DEV_STATE_ONLINE) 1325 ccw_device_kill_io(cdev); 1326 else { 1327 terminate_internal_io(sch, cdev); 1328 /* Re-start path verification. */ 1329 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1330 } 1331 } else 1332 /* trigger path verification. */ 1333 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1334 1335 } 1336 1337 static int io_subchannel_chp_event(struct subchannel *sch, 1338 struct chp_link *link, int event) 1339 { 1340 int mask; 1341 1342 mask = chp_ssd_get_mask(&sch->ssd_info, link); 1343 if (!mask) 1344 return 0; 1345 switch (event) { 1346 case CHP_VARY_OFF: 1347 sch->opm &= ~mask; 1348 sch->lpm &= ~mask; 1349 io_subchannel_terminate_path(sch, mask); 1350 break; 1351 case CHP_VARY_ON: 1352 sch->opm |= mask; 1353 sch->lpm |= mask; 1354 io_subchannel_verify(sch); 1355 break; 1356 case CHP_OFFLINE: 1357 if (stsch(sch->schid, &sch->schib)) 1358 return -ENXIO; 1359 if (!css_sch_is_valid(&sch->schib)) 1360 return -ENODEV; 1361 io_subchannel_terminate_path(sch, mask); 1362 break; 1363 case CHP_ONLINE: 1364 if (stsch(sch->schid, &sch->schib)) 1365 return -ENXIO; 1366 sch->lpm |= mask & sch->opm; 1367 io_subchannel_verify(sch); 1368 break; 1369 } 1370 return 0; 1371 } 1372 1373 static void 1374 io_subchannel_shutdown(struct subchannel *sch) 1375 { 1376 struct ccw_device *cdev; 1377 int ret; 1378 1379 cdev = sch_get_cdev(sch); 1380 1381 if (cio_is_console(sch->schid)) 1382 return; 1383 if (!sch->schib.pmcw.ena) 1384 /* Nothing to do. */ 1385 return; 1386 ret = cio_disable_subchannel(sch); 1387 if (ret != -EBUSY) 1388 /* Subchannel is disabled, we're done. */ 1389 return; 1390 cdev->private->state = DEV_STATE_QUIESCE; 1391 if (cdev->handler) 1392 cdev->handler(cdev, cdev->private->intparm, 1393 ERR_PTR(-EIO)); 1394 ret = ccw_device_cancel_halt_clear(cdev); 1395 if (ret == -EBUSY) { 1396 ccw_device_set_timeout(cdev, HZ/10); 1397 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1398 } 1399 cio_disable_subchannel(sch); 1400 } 1401 1402 static int io_subchannel_get_status(struct subchannel *sch) 1403 { 1404 struct schib schib; 1405 1406 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv) 1407 return CIO_GONE; 1408 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev)) 1409 return CIO_REVALIDATE; 1410 if (!sch->lpm) 1411 return CIO_NO_PATH; 1412 return CIO_OPER; 1413 } 1414 1415 static int device_is_disconnected(struct ccw_device *cdev) 1416 { 1417 if (!cdev) 1418 return 0; 1419 return (cdev->private->state == DEV_STATE_DISCONNECTED || 1420 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); 1421 } 1422 1423 static int recovery_check(struct device *dev, void *data) 1424 { 1425 struct ccw_device *cdev = to_ccwdev(dev); 1426 int *redo = data; 1427 1428 spin_lock_irq(cdev->ccwlock); 1429 switch (cdev->private->state) { 1430 case DEV_STATE_DISCONNECTED: 1431 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", 1432 cdev->private->dev_id.ssid, 1433 cdev->private->dev_id.devno); 1434 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1435 *redo = 1; 1436 break; 1437 case DEV_STATE_DISCONNECTED_SENSE_ID: 1438 *redo = 1; 1439 break; 1440 } 1441 spin_unlock_irq(cdev->ccwlock); 1442 1443 return 0; 1444 } 1445 1446 static void recovery_work_func(struct work_struct *unused) 1447 { 1448 int redo = 0; 1449 1450 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); 1451 if (redo) { 1452 spin_lock_irq(&recovery_lock); 1453 if (!timer_pending(&recovery_timer)) { 1454 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) 1455 recovery_phase++; 1456 mod_timer(&recovery_timer, jiffies + 1457 recovery_delay[recovery_phase] * HZ); 1458 } 1459 spin_unlock_irq(&recovery_lock); 1460 } else 1461 CIO_MSG_EVENT(4, "recovery: end\n"); 1462 } 1463 1464 static DECLARE_WORK(recovery_work, recovery_work_func); 1465 1466 static void recovery_func(unsigned long data) 1467 { 1468 /* 1469 * We can't do our recovery in softirq context and it's not 1470 * performance critical, so we schedule it. 1471 */ 1472 schedule_work(&recovery_work); 1473 } 1474 1475 static void ccw_device_schedule_recovery(void) 1476 { 1477 unsigned long flags; 1478 1479 CIO_MSG_EVENT(4, "recovery: schedule\n"); 1480 spin_lock_irqsave(&recovery_lock, flags); 1481 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { 1482 recovery_phase = 0; 1483 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); 1484 } 1485 spin_unlock_irqrestore(&recovery_lock, flags); 1486 } 1487 1488 static void device_set_disconnected(struct ccw_device *cdev) 1489 { 1490 if (!cdev) 1491 return; 1492 ccw_device_set_timeout(cdev, 0); 1493 cdev->private->flags.fake_irb = 0; 1494 cdev->private->state = DEV_STATE_DISCONNECTED; 1495 if (cdev->online) 1496 ccw_device_schedule_recovery(); 1497 } 1498 1499 static int io_subchannel_sch_event(struct subchannel *sch, int slow) 1500 { 1501 int event, ret, disc; 1502 unsigned long flags; 1503 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; 1504 struct ccw_device *cdev; 1505 1506 spin_lock_irqsave(sch->lock, flags); 1507 cdev = sch_get_cdev(sch); 1508 disc = device_is_disconnected(cdev); 1509 if (disc && slow) { 1510 /* Disconnected devices are evaluated directly only.*/ 1511 spin_unlock_irqrestore(sch->lock, flags); 1512 return 0; 1513 } 1514 /* No interrupt after machine check - kill pending timers. */ 1515 if (cdev) 1516 ccw_device_set_timeout(cdev, 0); 1517 if (!disc && !slow) { 1518 /* Non-disconnected devices are evaluated on the slow path. */ 1519 spin_unlock_irqrestore(sch->lock, flags); 1520 return -EAGAIN; 1521 } 1522 event = io_subchannel_get_status(sch); 1523 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 1524 sch->schid.ssid, sch->schid.sch_no, event, 1525 disc ? "disconnected" : "normal", 1526 slow ? "slow" : "fast"); 1527 /* Analyze subchannel status. */ 1528 action = NONE; 1529 switch (event) { 1530 case CIO_NO_PATH: 1531 if (disc) { 1532 /* Check if paths have become available. */ 1533 action = REPROBE; 1534 break; 1535 } 1536 /* fall through */ 1537 case CIO_GONE: 1538 /* Prevent unwanted effects when opening lock. */ 1539 cio_disable_subchannel(sch); 1540 device_set_disconnected(cdev); 1541 /* Ask driver what to do with device. */ 1542 action = UNREGISTER; 1543 spin_unlock_irqrestore(sch->lock, flags); 1544 ret = io_subchannel_notify(sch, event); 1545 spin_lock_irqsave(sch->lock, flags); 1546 if (ret) 1547 action = NONE; 1548 break; 1549 case CIO_REVALIDATE: 1550 /* Device will be removed, so no notify necessary. */ 1551 if (disc) 1552 /* Reprobe because immediate unregister might block. */ 1553 action = REPROBE; 1554 else 1555 action = UNREGISTER_PROBE; 1556 break; 1557 case CIO_OPER: 1558 if (disc) 1559 /* Get device operational again. */ 1560 action = REPROBE; 1561 break; 1562 } 1563 /* Perform action. */ 1564 ret = 0; 1565 switch (action) { 1566 case UNREGISTER: 1567 case UNREGISTER_PROBE: 1568 /* Unregister device (will use subchannel lock). */ 1569 spin_unlock_irqrestore(sch->lock, flags); 1570 css_sch_device_unregister(sch); 1571 spin_lock_irqsave(sch->lock, flags); 1572 1573 /* Reset intparm to zeroes. */ 1574 sch->schib.pmcw.intparm = 0; 1575 cio_modify(sch); 1576 break; 1577 case REPROBE: 1578 ccw_device_trigger_reprobe(cdev); 1579 break; 1580 default: 1581 break; 1582 } 1583 spin_unlock_irqrestore(sch->lock, flags); 1584 /* Probe if necessary. */ 1585 if (action == UNREGISTER_PROBE) 1586 ret = css_probe_device(sch->schid); 1587 1588 return ret; 1589 } 1590 1591 #ifdef CONFIG_CCW_CONSOLE 1592 static struct ccw_device console_cdev; 1593 static struct ccw_device_private console_private; 1594 static int console_cdev_in_use; 1595 1596 static DEFINE_SPINLOCK(ccw_console_lock); 1597 1598 spinlock_t * cio_get_console_lock(void) 1599 { 1600 return &ccw_console_lock; 1601 } 1602 1603 static int ccw_device_console_enable(struct ccw_device *cdev, 1604 struct subchannel *sch) 1605 { 1606 int rc; 1607 1608 /* Attach subchannel private data. */ 1609 sch->private = cio_get_console_priv(); 1610 memset(sch->private, 0, sizeof(struct io_subchannel_private)); 1611 io_subchannel_init_fields(sch); 1612 sch->driver = &io_subchannel_driver; 1613 /* Initialize the ccw_device structure. */ 1614 cdev->dev.parent= &sch->dev; 1615 rc = io_subchannel_recog(cdev, sch); 1616 if (rc) 1617 return rc; 1618 1619 /* Now wait for the async. recognition to come to an end. */ 1620 spin_lock_irq(cdev->ccwlock); 1621 while (!dev_fsm_final_state(cdev)) 1622 wait_cons_dev(); 1623 rc = -EIO; 1624 if (cdev->private->state != DEV_STATE_OFFLINE) 1625 goto out_unlock; 1626 ccw_device_online(cdev); 1627 while (!dev_fsm_final_state(cdev)) 1628 wait_cons_dev(); 1629 if (cdev->private->state != DEV_STATE_ONLINE) 1630 goto out_unlock; 1631 rc = 0; 1632 out_unlock: 1633 spin_unlock_irq(cdev->ccwlock); 1634 return 0; 1635 } 1636 1637 struct ccw_device * 1638 ccw_device_probe_console(void) 1639 { 1640 struct subchannel *sch; 1641 int ret; 1642 1643 if (xchg(&console_cdev_in_use, 1) != 0) 1644 return ERR_PTR(-EBUSY); 1645 sch = cio_probe_console(); 1646 if (IS_ERR(sch)) { 1647 console_cdev_in_use = 0; 1648 return (void *) sch; 1649 } 1650 memset(&console_cdev, 0, sizeof(struct ccw_device)); 1651 memset(&console_private, 0, sizeof(struct ccw_device_private)); 1652 console_cdev.private = &console_private; 1653 console_private.cdev = &console_cdev; 1654 ret = ccw_device_console_enable(&console_cdev, sch); 1655 if (ret) { 1656 cio_release_console(); 1657 console_cdev_in_use = 0; 1658 return ERR_PTR(ret); 1659 } 1660 console_cdev.online = 1; 1661 return &console_cdev; 1662 } 1663 #endif 1664 1665 /* 1666 * get ccw_device matching the busid, but only if owned by cdrv 1667 */ 1668 static int 1669 __ccwdev_check_busid(struct device *dev, void *id) 1670 { 1671 char *bus_id; 1672 1673 bus_id = id; 1674 1675 return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0); 1676 } 1677 1678 1679 /** 1680 * get_ccwdev_by_busid() - obtain device from a bus id 1681 * @cdrv: driver the device is owned by 1682 * @bus_id: bus id of the device to be searched 1683 * 1684 * This function searches all devices owned by @cdrv for a device with a bus 1685 * id matching @bus_id. 1686 * Returns: 1687 * If a match is found, its reference count of the found device is increased 1688 * and it is returned; else %NULL is returned. 1689 */ 1690 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, 1691 const char *bus_id) 1692 { 1693 struct device *dev; 1694 struct device_driver *drv; 1695 1696 drv = get_driver(&cdrv->driver); 1697 if (!drv) 1698 return NULL; 1699 1700 dev = driver_find_device(drv, NULL, (void *)bus_id, 1701 __ccwdev_check_busid); 1702 put_driver(drv); 1703 1704 return dev ? to_ccwdev(dev) : NULL; 1705 } 1706 1707 /************************** device driver handling ************************/ 1708 1709 /* This is the implementation of the ccw_driver class. The probe, remove 1710 * and release methods are initially very similar to the device_driver 1711 * implementations, with the difference that they have ccw_device 1712 * arguments. 1713 * 1714 * A ccw driver also contains the information that is needed for 1715 * device matching. 1716 */ 1717 static int 1718 ccw_device_probe (struct device *dev) 1719 { 1720 struct ccw_device *cdev = to_ccwdev(dev); 1721 struct ccw_driver *cdrv = to_ccwdrv(dev->driver); 1722 int ret; 1723 1724 cdev->drv = cdrv; /* to let the driver call _set_online */ 1725 1726 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; 1727 1728 if (ret) { 1729 cdev->drv = NULL; 1730 return ret; 1731 } 1732 1733 return 0; 1734 } 1735 1736 static int 1737 ccw_device_remove (struct device *dev) 1738 { 1739 struct ccw_device *cdev = to_ccwdev(dev); 1740 struct ccw_driver *cdrv = cdev->drv; 1741 int ret; 1742 1743 if (cdrv->remove) 1744 cdrv->remove(cdev); 1745 if (cdev->online) { 1746 cdev->online = 0; 1747 spin_lock_irq(cdev->ccwlock); 1748 ret = ccw_device_offline(cdev); 1749 spin_unlock_irq(cdev->ccwlock); 1750 if (ret == 0) 1751 wait_event(cdev->private->wait_q, 1752 dev_fsm_final_state(cdev)); 1753 else 1754 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " 1755 "device 0.%x.%04x\n", 1756 ret, cdev->private->dev_id.ssid, 1757 cdev->private->dev_id.devno); 1758 } 1759 ccw_device_set_timeout(cdev, 0); 1760 cdev->drv = NULL; 1761 return 0; 1762 } 1763 1764 static void ccw_device_shutdown(struct device *dev) 1765 { 1766 struct ccw_device *cdev; 1767 1768 cdev = to_ccwdev(dev); 1769 if (cdev->drv && cdev->drv->shutdown) 1770 cdev->drv->shutdown(cdev); 1771 disable_cmf(cdev); 1772 } 1773 1774 struct bus_type ccw_bus_type = { 1775 .name = "ccw", 1776 .match = ccw_bus_match, 1777 .uevent = ccw_uevent, 1778 .probe = ccw_device_probe, 1779 .remove = ccw_device_remove, 1780 .shutdown = ccw_device_shutdown, 1781 }; 1782 1783 /** 1784 * ccw_driver_register() - register a ccw driver 1785 * @cdriver: driver to be registered 1786 * 1787 * This function is mainly a wrapper around driver_register(). 1788 * Returns: 1789 * %0 on success and a negative error value on failure. 1790 */ 1791 int ccw_driver_register(struct ccw_driver *cdriver) 1792 { 1793 struct device_driver *drv = &cdriver->driver; 1794 1795 drv->bus = &ccw_bus_type; 1796 drv->name = cdriver->name; 1797 drv->owner = cdriver->owner; 1798 1799 return driver_register(drv); 1800 } 1801 1802 /** 1803 * ccw_driver_unregister() - deregister a ccw driver 1804 * @cdriver: driver to be deregistered 1805 * 1806 * This function is mainly a wrapper around driver_unregister(). 1807 */ 1808 void ccw_driver_unregister(struct ccw_driver *cdriver) 1809 { 1810 driver_unregister(&cdriver->driver); 1811 } 1812 1813 /* Helper func for qdio. */ 1814 struct subchannel_id 1815 ccw_device_get_subchannel_id(struct ccw_device *cdev) 1816 { 1817 struct subchannel *sch; 1818 1819 sch = to_subchannel(cdev->dev.parent); 1820 return sch->schid; 1821 } 1822 1823 MODULE_LICENSE("GPL"); 1824 EXPORT_SYMBOL(ccw_device_set_online); 1825 EXPORT_SYMBOL(ccw_device_set_offline); 1826 EXPORT_SYMBOL(ccw_driver_register); 1827 EXPORT_SYMBOL(ccw_driver_unregister); 1828 EXPORT_SYMBOL(get_ccwdev_by_busid); 1829 EXPORT_SYMBOL(ccw_bus_type); 1830 EXPORT_SYMBOL(ccw_device_work); 1831 EXPORT_SYMBOL(ccw_device_notify_work); 1832 EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); 1833