1 /* 2 * drivers/s390/cio/device.c 3 * bus driver for ccw devices 4 * 5 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, 6 * IBM Corporation 7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 * Martin Schwidefsky (schwidefsky@de.ibm.com) 10 */ 11 #include <linux/module.h> 12 #include <linux/init.h> 13 #include <linux/spinlock.h> 14 #include <linux/errno.h> 15 #include <linux/err.h> 16 #include <linux/slab.h> 17 #include <linux/list.h> 18 #include <linux/device.h> 19 #include <linux/workqueue.h> 20 #include <linux/timer.h> 21 22 #include <asm/ccwdev.h> 23 #include <asm/cio.h> 24 #include <asm/param.h> /* HZ */ 25 #include <asm/cmb.h> 26 27 #include "cio.h" 28 #include "cio_debug.h" 29 #include "css.h" 30 #include "device.h" 31 #include "ioasm.h" 32 #include "io_sch.h" 33 34 static struct timer_list recovery_timer; 35 static DEFINE_SPINLOCK(recovery_lock); 36 static int recovery_phase; 37 static const unsigned long recovery_delay[] = { 3, 30, 300 }; 38 39 /******************* bus type handling ***********************/ 40 41 /* The Linux driver model distinguishes between a bus type and 42 * the bus itself. Of course we only have one channel 43 * subsystem driver and one channel system per machine, but 44 * we still use the abstraction. T.R. says it's a good idea. */ 45 static int 46 ccw_bus_match (struct device * dev, struct device_driver * drv) 47 { 48 struct ccw_device *cdev = to_ccwdev(dev); 49 struct ccw_driver *cdrv = to_ccwdrv(drv); 50 const struct ccw_device_id *ids = cdrv->ids, *found; 51 52 if (!ids) 53 return 0; 54 55 found = ccw_device_id_match(ids, &cdev->id); 56 if (!found) 57 return 0; 58 59 cdev->id.driver_info = found->driver_info; 60 61 return 1; 62 } 63 64 /* Store modalias string delimited by prefix/suffix string into buffer with 65 * specified size. Return length of resulting string (excluding trailing '\0') 66 * even if string doesn't fit buffer (snprintf semantics). */ 67 static int snprint_alias(char *buf, size_t size, 68 struct ccw_device_id *id, const char *suffix) 69 { 70 int len; 71 72 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); 73 if (len > size) 74 return len; 75 buf += len; 76 size -= len; 77 78 if (id->dev_type != 0) 79 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, 80 id->dev_model, suffix); 81 else 82 len += snprintf(buf, size, "dtdm%s", suffix); 83 84 return len; 85 } 86 87 /* Set up environment variables for ccw device uevent. Return 0 on success, 88 * non-zero otherwise. */ 89 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) 90 { 91 struct ccw_device *cdev = to_ccwdev(dev); 92 struct ccw_device_id *id = &(cdev->id); 93 int ret; 94 char modalias_buf[30]; 95 96 /* CU_TYPE= */ 97 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type); 98 if (ret) 99 return ret; 100 101 /* CU_MODEL= */ 102 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model); 103 if (ret) 104 return ret; 105 106 /* The next two can be zero, that's ok for us */ 107 /* DEV_TYPE= */ 108 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type); 109 if (ret) 110 return ret; 111 112 /* DEV_MODEL= */ 113 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model); 114 if (ret) 115 return ret; 116 117 /* MODALIAS= */ 118 snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); 119 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf); 120 return ret; 121 } 122 123 struct bus_type ccw_bus_type; 124 125 static void io_subchannel_irq(struct subchannel *); 126 static int io_subchannel_probe(struct subchannel *); 127 static int io_subchannel_remove(struct subchannel *); 128 static int io_subchannel_notify(struct subchannel *, int); 129 static void io_subchannel_verify(struct subchannel *); 130 static void io_subchannel_ioterm(struct subchannel *); 131 static void io_subchannel_shutdown(struct subchannel *); 132 133 static struct css_driver io_subchannel_driver = { 134 .owner = THIS_MODULE, 135 .subchannel_type = SUBCHANNEL_TYPE_IO, 136 .name = "io_subchannel", 137 .irq = io_subchannel_irq, 138 .notify = io_subchannel_notify, 139 .verify = io_subchannel_verify, 140 .termination = io_subchannel_ioterm, 141 .probe = io_subchannel_probe, 142 .remove = io_subchannel_remove, 143 .shutdown = io_subchannel_shutdown, 144 }; 145 146 struct workqueue_struct *ccw_device_work; 147 struct workqueue_struct *ccw_device_notify_work; 148 wait_queue_head_t ccw_device_init_wq; 149 atomic_t ccw_device_init_count; 150 151 static void recovery_func(unsigned long data); 152 153 static int __init 154 init_ccw_bus_type (void) 155 { 156 int ret; 157 158 init_waitqueue_head(&ccw_device_init_wq); 159 atomic_set(&ccw_device_init_count, 0); 160 setup_timer(&recovery_timer, recovery_func, 0); 161 162 ccw_device_work = create_singlethread_workqueue("cio"); 163 if (!ccw_device_work) 164 return -ENOMEM; /* FIXME: better errno ? */ 165 ccw_device_notify_work = create_singlethread_workqueue("cio_notify"); 166 if (!ccw_device_notify_work) { 167 ret = -ENOMEM; /* FIXME: better errno ? */ 168 goto out_err; 169 } 170 slow_path_wq = create_singlethread_workqueue("kslowcrw"); 171 if (!slow_path_wq) { 172 ret = -ENOMEM; /* FIXME: better errno ? */ 173 goto out_err; 174 } 175 if ((ret = bus_register (&ccw_bus_type))) 176 goto out_err; 177 178 ret = css_driver_register(&io_subchannel_driver); 179 if (ret) 180 goto out_err; 181 182 wait_event(ccw_device_init_wq, 183 atomic_read(&ccw_device_init_count) == 0); 184 flush_workqueue(ccw_device_work); 185 return 0; 186 out_err: 187 if (ccw_device_work) 188 destroy_workqueue(ccw_device_work); 189 if (ccw_device_notify_work) 190 destroy_workqueue(ccw_device_notify_work); 191 if (slow_path_wq) 192 destroy_workqueue(slow_path_wq); 193 return ret; 194 } 195 196 static void __exit 197 cleanup_ccw_bus_type (void) 198 { 199 css_driver_unregister(&io_subchannel_driver); 200 bus_unregister(&ccw_bus_type); 201 destroy_workqueue(ccw_device_notify_work); 202 destroy_workqueue(ccw_device_work); 203 } 204 205 subsys_initcall(init_ccw_bus_type); 206 module_exit(cleanup_ccw_bus_type); 207 208 /************************ device handling **************************/ 209 210 /* 211 * A ccw_device has some interfaces in sysfs in addition to the 212 * standard ones. 213 * The following entries are designed to export the information which 214 * resided in 2.4 in /proc/subchannels. Subchannel and device number 215 * are obvious, so they don't have an entry :) 216 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree? 217 */ 218 static ssize_t 219 chpids_show (struct device * dev, struct device_attribute *attr, char * buf) 220 { 221 struct subchannel *sch = to_subchannel(dev); 222 struct chsc_ssd_info *ssd = &sch->ssd_info; 223 ssize_t ret = 0; 224 int chp; 225 int mask; 226 227 for (chp = 0; chp < 8; chp++) { 228 mask = 0x80 >> chp; 229 if (ssd->path_mask & mask) 230 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); 231 else 232 ret += sprintf(buf + ret, "00 "); 233 } 234 ret += sprintf (buf+ret, "\n"); 235 return min((ssize_t)PAGE_SIZE, ret); 236 } 237 238 static ssize_t 239 pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf) 240 { 241 struct subchannel *sch = to_subchannel(dev); 242 struct pmcw *pmcw = &sch->schib.pmcw; 243 244 return sprintf (buf, "%02x %02x %02x\n", 245 pmcw->pim, pmcw->pam, pmcw->pom); 246 } 247 248 static ssize_t 249 devtype_show (struct device *dev, struct device_attribute *attr, char *buf) 250 { 251 struct ccw_device *cdev = to_ccwdev(dev); 252 struct ccw_device_id *id = &(cdev->id); 253 254 if (id->dev_type != 0) 255 return sprintf(buf, "%04x/%02x\n", 256 id->dev_type, id->dev_model); 257 else 258 return sprintf(buf, "n/a\n"); 259 } 260 261 static ssize_t 262 cutype_show (struct device *dev, struct device_attribute *attr, char *buf) 263 { 264 struct ccw_device *cdev = to_ccwdev(dev); 265 struct ccw_device_id *id = &(cdev->id); 266 267 return sprintf(buf, "%04x/%02x\n", 268 id->cu_type, id->cu_model); 269 } 270 271 static ssize_t 272 modalias_show (struct device *dev, struct device_attribute *attr, char *buf) 273 { 274 struct ccw_device *cdev = to_ccwdev(dev); 275 struct ccw_device_id *id = &(cdev->id); 276 int len; 277 278 len = snprint_alias(buf, PAGE_SIZE, id, "\n"); 279 280 return len > PAGE_SIZE ? PAGE_SIZE : len; 281 } 282 283 static ssize_t 284 online_show (struct device *dev, struct device_attribute *attr, char *buf) 285 { 286 struct ccw_device *cdev = to_ccwdev(dev); 287 288 return sprintf(buf, cdev->online ? "1\n" : "0\n"); 289 } 290 291 int ccw_device_is_orphan(struct ccw_device *cdev) 292 { 293 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); 294 } 295 296 static void ccw_device_unregister(struct ccw_device *cdev) 297 { 298 if (test_and_clear_bit(1, &cdev->private->registered)) 299 device_del(&cdev->dev); 300 } 301 302 static void ccw_device_remove_orphan_cb(struct device *dev) 303 { 304 struct ccw_device *cdev = to_ccwdev(dev); 305 306 ccw_device_unregister(cdev); 307 put_device(&cdev->dev); 308 } 309 310 static void ccw_device_remove_sch_cb(struct device *dev) 311 { 312 struct subchannel *sch; 313 314 sch = to_subchannel(dev); 315 css_sch_device_unregister(sch); 316 /* Reset intparm to zeroes. */ 317 sch->schib.pmcw.intparm = 0; 318 cio_modify(sch); 319 put_device(&sch->dev); 320 } 321 322 static void 323 ccw_device_remove_disconnected(struct ccw_device *cdev) 324 { 325 unsigned long flags; 326 int rc; 327 328 /* 329 * Forced offline in disconnected state means 330 * 'throw away device'. 331 */ 332 if (ccw_device_is_orphan(cdev)) { 333 /* 334 * Deregister ccw device. 335 * Unfortunately, we cannot do this directly from the 336 * attribute method. 337 */ 338 spin_lock_irqsave(cdev->ccwlock, flags); 339 cdev->private->state = DEV_STATE_NOT_OPER; 340 spin_unlock_irqrestore(cdev->ccwlock, flags); 341 rc = device_schedule_callback(&cdev->dev, 342 ccw_device_remove_orphan_cb); 343 if (rc) 344 CIO_MSG_EVENT(2, "Couldn't unregister orphan " 345 "0.%x.%04x\n", 346 cdev->private->dev_id.ssid, 347 cdev->private->dev_id.devno); 348 return; 349 } 350 /* Deregister subchannel, which will kill the ccw device. */ 351 rc = device_schedule_callback(cdev->dev.parent, 352 ccw_device_remove_sch_cb); 353 if (rc) 354 CIO_MSG_EVENT(2, "Couldn't unregister disconnected device " 355 "0.%x.%04x\n", 356 cdev->private->dev_id.ssid, 357 cdev->private->dev_id.devno); 358 } 359 360 /** 361 * ccw_device_set_offline() - disable a ccw device for I/O 362 * @cdev: target ccw device 363 * 364 * This function calls the driver's set_offline() function for @cdev, if 365 * given, and then disables @cdev. 366 * Returns: 367 * %0 on success and a negative error value on failure. 368 * Context: 369 * enabled, ccw device lock not held 370 */ 371 int ccw_device_set_offline(struct ccw_device *cdev) 372 { 373 int ret; 374 375 if (!cdev) 376 return -ENODEV; 377 if (!cdev->online || !cdev->drv) 378 return -EINVAL; 379 380 if (cdev->drv->set_offline) { 381 ret = cdev->drv->set_offline(cdev); 382 if (ret != 0) 383 return ret; 384 } 385 cdev->online = 0; 386 spin_lock_irq(cdev->ccwlock); 387 ret = ccw_device_offline(cdev); 388 if (ret == -ENODEV) { 389 if (cdev->private->state != DEV_STATE_NOT_OPER) { 390 cdev->private->state = DEV_STATE_OFFLINE; 391 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 392 } 393 spin_unlock_irq(cdev->ccwlock); 394 return ret; 395 } 396 spin_unlock_irq(cdev->ccwlock); 397 if (ret == 0) 398 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 399 else { 400 CIO_MSG_EVENT(2, "ccw_device_offline returned %d, " 401 "device 0.%x.%04x\n", 402 ret, cdev->private->dev_id.ssid, 403 cdev->private->dev_id.devno); 404 cdev->online = 1; 405 } 406 return ret; 407 } 408 409 /** 410 * ccw_device_set_online() - enable a ccw device for I/O 411 * @cdev: target ccw device 412 * 413 * This function first enables @cdev and then calls the driver's set_online() 414 * function for @cdev, if given. If set_online() returns an error, @cdev is 415 * disabled again. 416 * Returns: 417 * %0 on success and a negative error value on failure. 418 * Context: 419 * enabled, ccw device lock not held 420 */ 421 int ccw_device_set_online(struct ccw_device *cdev) 422 { 423 int ret; 424 425 if (!cdev) 426 return -ENODEV; 427 if (cdev->online || !cdev->drv) 428 return -EINVAL; 429 430 spin_lock_irq(cdev->ccwlock); 431 ret = ccw_device_online(cdev); 432 spin_unlock_irq(cdev->ccwlock); 433 if (ret == 0) 434 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 435 else { 436 CIO_MSG_EVENT(2, "ccw_device_online returned %d, " 437 "device 0.%x.%04x\n", 438 ret, cdev->private->dev_id.ssid, 439 cdev->private->dev_id.devno); 440 return ret; 441 } 442 if (cdev->private->state != DEV_STATE_ONLINE) 443 return -ENODEV; 444 if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { 445 cdev->online = 1; 446 return 0; 447 } 448 spin_lock_irq(cdev->ccwlock); 449 ret = ccw_device_offline(cdev); 450 spin_unlock_irq(cdev->ccwlock); 451 if (ret == 0) 452 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 453 else 454 CIO_MSG_EVENT(2, "ccw_device_offline returned %d, " 455 "device 0.%x.%04x\n", 456 ret, cdev->private->dev_id.ssid, 457 cdev->private->dev_id.devno); 458 return (ret == 0) ? -ENODEV : ret; 459 } 460 461 static void online_store_handle_offline(struct ccw_device *cdev) 462 { 463 if (cdev->private->state == DEV_STATE_DISCONNECTED) 464 ccw_device_remove_disconnected(cdev); 465 else if (cdev->drv && cdev->drv->set_offline) 466 ccw_device_set_offline(cdev); 467 } 468 469 static int online_store_recog_and_online(struct ccw_device *cdev) 470 { 471 int ret; 472 473 /* Do device recognition, if needed. */ 474 if (cdev->id.cu_type == 0) { 475 ret = ccw_device_recognition(cdev); 476 if (ret) { 477 CIO_MSG_EVENT(0, "Couldn't start recognition " 478 "for device 0.%x.%04x (ret=%d)\n", 479 cdev->private->dev_id.ssid, 480 cdev->private->dev_id.devno, ret); 481 return ret; 482 } 483 wait_event(cdev->private->wait_q, 484 cdev->private->flags.recog_done); 485 } 486 if (cdev->drv && cdev->drv->set_online) 487 ccw_device_set_online(cdev); 488 return 0; 489 } 490 static void online_store_handle_online(struct ccw_device *cdev, int force) 491 { 492 int ret; 493 494 ret = online_store_recog_and_online(cdev); 495 if (ret) 496 return; 497 if (force && cdev->private->state == DEV_STATE_BOXED) { 498 ret = ccw_device_stlck(cdev); 499 if (ret) { 500 dev_warn(&cdev->dev, 501 "ccw_device_stlck returned %d!\n", ret); 502 return; 503 } 504 if (cdev->id.cu_type == 0) 505 cdev->private->state = DEV_STATE_NOT_OPER; 506 online_store_recog_and_online(cdev); 507 } 508 509 } 510 511 static ssize_t online_store (struct device *dev, struct device_attribute *attr, 512 const char *buf, size_t count) 513 { 514 struct ccw_device *cdev = to_ccwdev(dev); 515 int i, force; 516 char *tmp; 517 518 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 519 return -EAGAIN; 520 521 if (cdev->drv && !try_module_get(cdev->drv->owner)) { 522 atomic_set(&cdev->private->onoff, 0); 523 return -EINVAL; 524 } 525 if (!strncmp(buf, "force\n", count)) { 526 force = 1; 527 i = 1; 528 } else { 529 force = 0; 530 i = simple_strtoul(buf, &tmp, 16); 531 } 532 533 switch (i) { 534 case 0: 535 online_store_handle_offline(cdev); 536 break; 537 case 1: 538 online_store_handle_online(cdev, force); 539 break; 540 default: 541 count = -EINVAL; 542 } 543 if (cdev->drv) 544 module_put(cdev->drv->owner); 545 atomic_set(&cdev->private->onoff, 0); 546 return count; 547 } 548 549 static ssize_t 550 available_show (struct device *dev, struct device_attribute *attr, char *buf) 551 { 552 struct ccw_device *cdev = to_ccwdev(dev); 553 struct subchannel *sch; 554 555 if (ccw_device_is_orphan(cdev)) 556 return sprintf(buf, "no device\n"); 557 switch (cdev->private->state) { 558 case DEV_STATE_BOXED: 559 return sprintf(buf, "boxed\n"); 560 case DEV_STATE_DISCONNECTED: 561 case DEV_STATE_DISCONNECTED_SENSE_ID: 562 case DEV_STATE_NOT_OPER: 563 sch = to_subchannel(dev->parent); 564 if (!sch->lpm) 565 return sprintf(buf, "no path\n"); 566 else 567 return sprintf(buf, "no device\n"); 568 default: 569 /* All other states considered fine. */ 570 return sprintf(buf, "good\n"); 571 } 572 } 573 574 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); 575 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); 576 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); 577 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); 578 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 579 static DEVICE_ATTR(online, 0644, online_show, online_store); 580 extern struct device_attribute dev_attr_cmb_enable; 581 static DEVICE_ATTR(availability, 0444, available_show, NULL); 582 583 static struct attribute * subch_attrs[] = { 584 &dev_attr_chpids.attr, 585 &dev_attr_pimpampom.attr, 586 NULL, 587 }; 588 589 static struct attribute_group subch_attr_group = { 590 .attrs = subch_attrs, 591 }; 592 593 struct attribute_group *subch_attr_groups[] = { 594 &subch_attr_group, 595 NULL, 596 }; 597 598 static struct attribute * ccwdev_attrs[] = { 599 &dev_attr_devtype.attr, 600 &dev_attr_cutype.attr, 601 &dev_attr_modalias.attr, 602 &dev_attr_online.attr, 603 &dev_attr_cmb_enable.attr, 604 &dev_attr_availability.attr, 605 NULL, 606 }; 607 608 static struct attribute_group ccwdev_attr_group = { 609 .attrs = ccwdev_attrs, 610 }; 611 612 static struct attribute_group *ccwdev_attr_groups[] = { 613 &ccwdev_attr_group, 614 NULL, 615 }; 616 617 /* this is a simple abstraction for device_register that sets the 618 * correct bus type and adds the bus specific files */ 619 static int ccw_device_register(struct ccw_device *cdev) 620 { 621 struct device *dev = &cdev->dev; 622 int ret; 623 624 dev->bus = &ccw_bus_type; 625 626 if ((ret = device_add(dev))) 627 return ret; 628 629 set_bit(1, &cdev->private->registered); 630 return ret; 631 } 632 633 struct match_data { 634 struct ccw_dev_id dev_id; 635 struct ccw_device * sibling; 636 }; 637 638 static int 639 match_devno(struct device * dev, void * data) 640 { 641 struct match_data * d = data; 642 struct ccw_device * cdev; 643 644 cdev = to_ccwdev(dev); 645 if ((cdev->private->state == DEV_STATE_DISCONNECTED) && 646 !ccw_device_is_orphan(cdev) && 647 ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) && 648 (cdev != d->sibling)) 649 return 1; 650 return 0; 651 } 652 653 static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id, 654 struct ccw_device *sibling) 655 { 656 struct device *dev; 657 struct match_data data; 658 659 data.dev_id = *dev_id; 660 data.sibling = sibling; 661 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno); 662 663 return dev ? to_ccwdev(dev) : NULL; 664 } 665 666 static int match_orphan(struct device *dev, void *data) 667 { 668 struct ccw_dev_id *dev_id; 669 struct ccw_device *cdev; 670 671 dev_id = data; 672 cdev = to_ccwdev(dev); 673 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 674 } 675 676 static struct ccw_device * 677 get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css, 678 struct ccw_dev_id *dev_id) 679 { 680 struct device *dev; 681 682 dev = device_find_child(&css->pseudo_subchannel->dev, dev_id, 683 match_orphan); 684 685 return dev ? to_ccwdev(dev) : NULL; 686 } 687 688 static void 689 ccw_device_add_changed(struct work_struct *work) 690 { 691 struct ccw_device_private *priv; 692 struct ccw_device *cdev; 693 694 priv = container_of(work, struct ccw_device_private, kick_work); 695 cdev = priv->cdev; 696 if (device_add(&cdev->dev)) { 697 put_device(&cdev->dev); 698 return; 699 } 700 set_bit(1, &cdev->private->registered); 701 } 702 703 void ccw_device_do_unreg_rereg(struct work_struct *work) 704 { 705 struct ccw_device_private *priv; 706 struct ccw_device *cdev; 707 struct subchannel *sch; 708 709 priv = container_of(work, struct ccw_device_private, kick_work); 710 cdev = priv->cdev; 711 sch = to_subchannel(cdev->dev.parent); 712 713 ccw_device_unregister(cdev); 714 PREPARE_WORK(&cdev->private->kick_work, 715 ccw_device_add_changed); 716 queue_work(ccw_device_work, &cdev->private->kick_work); 717 } 718 719 static void 720 ccw_device_release(struct device *dev) 721 { 722 struct ccw_device *cdev; 723 724 cdev = to_ccwdev(dev); 725 kfree(cdev->private); 726 kfree(cdev); 727 } 728 729 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) 730 { 731 struct ccw_device *cdev; 732 733 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 734 if (cdev) { 735 cdev->private = kzalloc(sizeof(struct ccw_device_private), 736 GFP_KERNEL | GFP_DMA); 737 if (cdev->private) 738 return cdev; 739 } 740 kfree(cdev); 741 return ERR_PTR(-ENOMEM); 742 } 743 744 static int io_subchannel_initialize_dev(struct subchannel *sch, 745 struct ccw_device *cdev) 746 { 747 cdev->private->cdev = cdev; 748 atomic_set(&cdev->private->onoff, 0); 749 cdev->dev.parent = &sch->dev; 750 cdev->dev.release = ccw_device_release; 751 INIT_WORK(&cdev->private->kick_work, NULL); 752 cdev->dev.groups = ccwdev_attr_groups; 753 /* Do first half of device_register. */ 754 device_initialize(&cdev->dev); 755 if (!get_device(&sch->dev)) { 756 if (cdev->dev.release) 757 cdev->dev.release(&cdev->dev); 758 return -ENODEV; 759 } 760 return 0; 761 } 762 763 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) 764 { 765 struct ccw_device *cdev; 766 int ret; 767 768 cdev = io_subchannel_allocate_dev(sch); 769 if (!IS_ERR(cdev)) { 770 ret = io_subchannel_initialize_dev(sch, cdev); 771 if (ret) { 772 kfree(cdev); 773 cdev = ERR_PTR(ret); 774 } 775 } 776 return cdev; 777 } 778 779 static int io_subchannel_recog(struct ccw_device *, struct subchannel *); 780 781 static void sch_attach_device(struct subchannel *sch, 782 struct ccw_device *cdev) 783 { 784 css_update_ssd_info(sch); 785 spin_lock_irq(sch->lock); 786 sch_set_cdev(sch, cdev); 787 cdev->private->schid = sch->schid; 788 cdev->ccwlock = sch->lock; 789 device_trigger_reprobe(sch); 790 spin_unlock_irq(sch->lock); 791 } 792 793 static void sch_attach_disconnected_device(struct subchannel *sch, 794 struct ccw_device *cdev) 795 { 796 struct subchannel *other_sch; 797 int ret; 798 799 other_sch = to_subchannel(get_device(cdev->dev.parent)); 800 ret = device_move(&cdev->dev, &sch->dev); 801 if (ret) { 802 CIO_MSG_EVENT(2, "Moving disconnected device 0.%x.%04x failed " 803 "(ret=%d)!\n", cdev->private->dev_id.ssid, 804 cdev->private->dev_id.devno, ret); 805 put_device(&other_sch->dev); 806 return; 807 } 808 sch_set_cdev(other_sch, NULL); 809 /* No need to keep a subchannel without ccw device around. */ 810 css_sch_device_unregister(other_sch); 811 put_device(&other_sch->dev); 812 sch_attach_device(sch, cdev); 813 } 814 815 static void sch_attach_orphaned_device(struct subchannel *sch, 816 struct ccw_device *cdev) 817 { 818 int ret; 819 820 /* Try to move the ccw device to its new subchannel. */ 821 ret = device_move(&cdev->dev, &sch->dev); 822 if (ret) { 823 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage " 824 "failed (ret=%d)!\n", 825 cdev->private->dev_id.ssid, 826 cdev->private->dev_id.devno, ret); 827 return; 828 } 829 sch_attach_device(sch, cdev); 830 } 831 832 static void sch_create_and_recog_new_device(struct subchannel *sch) 833 { 834 struct ccw_device *cdev; 835 836 /* Need to allocate a new ccw device. */ 837 cdev = io_subchannel_create_ccwdev(sch); 838 if (IS_ERR(cdev)) { 839 /* OK, we did everything we could... */ 840 css_sch_device_unregister(sch); 841 return; 842 } 843 spin_lock_irq(sch->lock); 844 sch_set_cdev(sch, cdev); 845 spin_unlock_irq(sch->lock); 846 /* Start recognition for the new ccw device. */ 847 if (io_subchannel_recog(cdev, sch)) { 848 spin_lock_irq(sch->lock); 849 sch_set_cdev(sch, NULL); 850 spin_unlock_irq(sch->lock); 851 if (cdev->dev.release) 852 cdev->dev.release(&cdev->dev); 853 css_sch_device_unregister(sch); 854 } 855 } 856 857 858 void ccw_device_move_to_orphanage(struct work_struct *work) 859 { 860 struct ccw_device_private *priv; 861 struct ccw_device *cdev; 862 struct ccw_device *replacing_cdev; 863 struct subchannel *sch; 864 int ret; 865 struct channel_subsystem *css; 866 struct ccw_dev_id dev_id; 867 868 priv = container_of(work, struct ccw_device_private, kick_work); 869 cdev = priv->cdev; 870 sch = to_subchannel(cdev->dev.parent); 871 css = to_css(sch->dev.parent); 872 dev_id.devno = sch->schib.pmcw.dev; 873 dev_id.ssid = sch->schid.ssid; 874 875 /* 876 * Move the orphaned ccw device to the orphanage so the replacing 877 * ccw device can take its place on the subchannel. 878 */ 879 ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev); 880 if (ret) { 881 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed " 882 "(ret=%d)!\n", cdev->private->dev_id.ssid, 883 cdev->private->dev_id.devno, ret); 884 return; 885 } 886 cdev->ccwlock = css->pseudo_subchannel->lock; 887 /* 888 * Search for the replacing ccw device 889 * - among the disconnected devices 890 * - in the orphanage 891 */ 892 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev); 893 if (replacing_cdev) { 894 sch_attach_disconnected_device(sch, replacing_cdev); 895 return; 896 } 897 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); 898 if (replacing_cdev) { 899 sch_attach_orphaned_device(sch, replacing_cdev); 900 return; 901 } 902 sch_create_and_recog_new_device(sch); 903 } 904 905 /* 906 * Register recognized device. 907 */ 908 static void 909 io_subchannel_register(struct work_struct *work) 910 { 911 struct ccw_device_private *priv; 912 struct ccw_device *cdev; 913 struct subchannel *sch; 914 int ret; 915 unsigned long flags; 916 917 priv = container_of(work, struct ccw_device_private, kick_work); 918 cdev = priv->cdev; 919 sch = to_subchannel(cdev->dev.parent); 920 css_update_ssd_info(sch); 921 /* 922 * io_subchannel_register() will also be called after device 923 * recognition has been done for a boxed device (which will already 924 * be registered). We need to reprobe since we may now have sense id 925 * information. 926 */ 927 if (klist_node_attached(&cdev->dev.knode_parent)) { 928 if (!cdev->drv) { 929 ret = device_reprobe(&cdev->dev); 930 if (ret) 931 /* We can't do much here. */ 932 CIO_MSG_EVENT(2, "device_reprobe() returned" 933 " %d for 0.%x.%04x\n", ret, 934 cdev->private->dev_id.ssid, 935 cdev->private->dev_id.devno); 936 } 937 goto out; 938 } 939 /* 940 * Now we know this subchannel will stay, we can throw 941 * our delayed uevent. 942 */ 943 sch->dev.uevent_suppress = 0; 944 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 945 /* make it known to the system */ 946 ret = ccw_device_register(cdev); 947 if (ret) { 948 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", 949 cdev->private->dev_id.ssid, 950 cdev->private->dev_id.devno, ret); 951 put_device(&cdev->dev); 952 spin_lock_irqsave(sch->lock, flags); 953 sch_set_cdev(sch, NULL); 954 spin_unlock_irqrestore(sch->lock, flags); 955 kfree (cdev->private); 956 kfree (cdev); 957 put_device(&sch->dev); 958 if (atomic_dec_and_test(&ccw_device_init_count)) 959 wake_up(&ccw_device_init_wq); 960 return; 961 } 962 put_device(&cdev->dev); 963 out: 964 cdev->private->flags.recog_done = 1; 965 put_device(&sch->dev); 966 wake_up(&cdev->private->wait_q); 967 if (atomic_dec_and_test(&ccw_device_init_count)) 968 wake_up(&ccw_device_init_wq); 969 } 970 971 static void ccw_device_call_sch_unregister(struct work_struct *work) 972 { 973 struct ccw_device_private *priv; 974 struct ccw_device *cdev; 975 struct subchannel *sch; 976 977 priv = container_of(work, struct ccw_device_private, kick_work); 978 cdev = priv->cdev; 979 sch = to_subchannel(cdev->dev.parent); 980 css_sch_device_unregister(sch); 981 /* Reset intparm to zeroes. */ 982 sch->schib.pmcw.intparm = 0; 983 cio_modify(sch); 984 put_device(&cdev->dev); 985 put_device(&sch->dev); 986 } 987 988 /* 989 * subchannel recognition done. Called from the state machine. 990 */ 991 void 992 io_subchannel_recog_done(struct ccw_device *cdev) 993 { 994 struct subchannel *sch; 995 996 if (css_init_done == 0) { 997 cdev->private->flags.recog_done = 1; 998 return; 999 } 1000 switch (cdev->private->state) { 1001 case DEV_STATE_NOT_OPER: 1002 cdev->private->flags.recog_done = 1; 1003 /* Remove device found not operational. */ 1004 if (!get_device(&cdev->dev)) 1005 break; 1006 sch = to_subchannel(cdev->dev.parent); 1007 PREPARE_WORK(&cdev->private->kick_work, 1008 ccw_device_call_sch_unregister); 1009 queue_work(slow_path_wq, &cdev->private->kick_work); 1010 if (atomic_dec_and_test(&ccw_device_init_count)) 1011 wake_up(&ccw_device_init_wq); 1012 break; 1013 case DEV_STATE_BOXED: 1014 /* Device did not respond in time. */ 1015 case DEV_STATE_OFFLINE: 1016 /* 1017 * We can't register the device in interrupt context so 1018 * we schedule a work item. 1019 */ 1020 if (!get_device(&cdev->dev)) 1021 break; 1022 PREPARE_WORK(&cdev->private->kick_work, 1023 io_subchannel_register); 1024 queue_work(slow_path_wq, &cdev->private->kick_work); 1025 break; 1026 } 1027 } 1028 1029 static int 1030 io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) 1031 { 1032 int rc; 1033 struct ccw_device_private *priv; 1034 1035 sch_set_cdev(sch, cdev); 1036 sch->driver = &io_subchannel_driver; 1037 cdev->ccwlock = sch->lock; 1038 1039 /* Init private data. */ 1040 priv = cdev->private; 1041 priv->dev_id.devno = sch->schib.pmcw.dev; 1042 priv->dev_id.ssid = sch->schid.ssid; 1043 priv->schid = sch->schid; 1044 priv->state = DEV_STATE_NOT_OPER; 1045 INIT_LIST_HEAD(&priv->cmb_list); 1046 init_waitqueue_head(&priv->wait_q); 1047 init_timer(&priv->timer); 1048 1049 /* Set an initial name for the device. */ 1050 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", 1051 sch->schid.ssid, sch->schib.pmcw.dev); 1052 1053 /* Increase counter of devices currently in recognition. */ 1054 atomic_inc(&ccw_device_init_count); 1055 1056 /* Start async. device sensing. */ 1057 spin_lock_irq(sch->lock); 1058 rc = ccw_device_recognition(cdev); 1059 spin_unlock_irq(sch->lock); 1060 if (rc) { 1061 if (atomic_dec_and_test(&ccw_device_init_count)) 1062 wake_up(&ccw_device_init_wq); 1063 } 1064 return rc; 1065 } 1066 1067 static void ccw_device_move_to_sch(struct work_struct *work) 1068 { 1069 struct ccw_device_private *priv; 1070 int rc; 1071 struct subchannel *sch; 1072 struct ccw_device *cdev; 1073 struct subchannel *former_parent; 1074 1075 priv = container_of(work, struct ccw_device_private, kick_work); 1076 sch = priv->sch; 1077 cdev = priv->cdev; 1078 former_parent = ccw_device_is_orphan(cdev) ? 1079 NULL : to_subchannel(get_device(cdev->dev.parent)); 1080 mutex_lock(&sch->reg_mutex); 1081 /* Try to move the ccw device to its new subchannel. */ 1082 rc = device_move(&cdev->dev, &sch->dev); 1083 mutex_unlock(&sch->reg_mutex); 1084 if (rc) { 1085 CIO_MSG_EVENT(2, "Moving device 0.%x.%04x to subchannel " 1086 "0.%x.%04x failed (ret=%d)!\n", 1087 cdev->private->dev_id.ssid, 1088 cdev->private->dev_id.devno, sch->schid.ssid, 1089 sch->schid.sch_no, rc); 1090 css_sch_device_unregister(sch); 1091 goto out; 1092 } 1093 if (former_parent) { 1094 spin_lock_irq(former_parent->lock); 1095 sch_set_cdev(former_parent, NULL); 1096 spin_unlock_irq(former_parent->lock); 1097 css_sch_device_unregister(former_parent); 1098 /* Reset intparm to zeroes. */ 1099 former_parent->schib.pmcw.intparm = 0; 1100 cio_modify(former_parent); 1101 } 1102 sch_attach_device(sch, cdev); 1103 out: 1104 if (former_parent) 1105 put_device(&former_parent->dev); 1106 put_device(&cdev->dev); 1107 } 1108 1109 static void io_subchannel_irq(struct subchannel *sch) 1110 { 1111 struct ccw_device *cdev; 1112 1113 cdev = sch_get_cdev(sch); 1114 1115 CIO_TRACE_EVENT(3, "IRQ"); 1116 CIO_TRACE_EVENT(3, sch->dev.bus_id); 1117 if (cdev) 1118 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1119 } 1120 1121 static int 1122 io_subchannel_probe (struct subchannel *sch) 1123 { 1124 struct ccw_device *cdev; 1125 int rc; 1126 unsigned long flags; 1127 struct ccw_dev_id dev_id; 1128 1129 cdev = sch_get_cdev(sch); 1130 if (cdev) { 1131 /* 1132 * This subchannel already has an associated ccw_device. 1133 * Register it and exit. This happens for all early 1134 * device, e.g. the console. 1135 */ 1136 cdev->dev.groups = ccwdev_attr_groups; 1137 device_initialize(&cdev->dev); 1138 ccw_device_register(cdev); 1139 /* 1140 * Check if the device is already online. If it is 1141 * the reference count needs to be corrected 1142 * (see ccw_device_online and css_init_done for the 1143 * ugly details). 1144 */ 1145 if (cdev->private->state != DEV_STATE_NOT_OPER && 1146 cdev->private->state != DEV_STATE_OFFLINE && 1147 cdev->private->state != DEV_STATE_BOXED) 1148 get_device(&cdev->dev); 1149 return 0; 1150 } 1151 /* 1152 * First check if a fitting device may be found amongst the 1153 * disconnected devices or in the orphanage. 1154 */ 1155 dev_id.devno = sch->schib.pmcw.dev; 1156 dev_id.ssid = sch->schid.ssid; 1157 /* Allocate I/O subchannel private data. */ 1158 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1159 GFP_KERNEL | GFP_DMA); 1160 if (!sch->private) 1161 return -ENOMEM; 1162 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); 1163 if (!cdev) 1164 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), 1165 &dev_id); 1166 if (cdev) { 1167 /* 1168 * Schedule moving the device until when we have a registered 1169 * subchannel to move to and succeed the probe. We can 1170 * unregister later again, when the probe is through. 1171 */ 1172 cdev->private->sch = sch; 1173 PREPARE_WORK(&cdev->private->kick_work, 1174 ccw_device_move_to_sch); 1175 queue_work(slow_path_wq, &cdev->private->kick_work); 1176 return 0; 1177 } 1178 cdev = io_subchannel_create_ccwdev(sch); 1179 if (IS_ERR(cdev)) { 1180 kfree(sch->private); 1181 return PTR_ERR(cdev); 1182 } 1183 rc = io_subchannel_recog(cdev, sch); 1184 if (rc) { 1185 spin_lock_irqsave(sch->lock, flags); 1186 sch_set_cdev(sch, NULL); 1187 spin_unlock_irqrestore(sch->lock, flags); 1188 if (cdev->dev.release) 1189 cdev->dev.release(&cdev->dev); 1190 kfree(sch->private); 1191 } 1192 1193 return rc; 1194 } 1195 1196 static int 1197 io_subchannel_remove (struct subchannel *sch) 1198 { 1199 struct ccw_device *cdev; 1200 unsigned long flags; 1201 1202 cdev = sch_get_cdev(sch); 1203 if (!cdev) 1204 return 0; 1205 /* Set ccw device to not operational and drop reference. */ 1206 spin_lock_irqsave(cdev->ccwlock, flags); 1207 sch_set_cdev(sch, NULL); 1208 cdev->private->state = DEV_STATE_NOT_OPER; 1209 spin_unlock_irqrestore(cdev->ccwlock, flags); 1210 ccw_device_unregister(cdev); 1211 put_device(&cdev->dev); 1212 kfree(sch->private); 1213 return 0; 1214 } 1215 1216 static int io_subchannel_notify(struct subchannel *sch, int event) 1217 { 1218 struct ccw_device *cdev; 1219 1220 cdev = sch_get_cdev(sch); 1221 if (!cdev) 1222 return 0; 1223 if (!cdev->drv) 1224 return 0; 1225 if (!cdev->online) 1226 return 0; 1227 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 1228 } 1229 1230 static void io_subchannel_verify(struct subchannel *sch) 1231 { 1232 struct ccw_device *cdev; 1233 1234 cdev = sch_get_cdev(sch); 1235 if (cdev) 1236 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1237 } 1238 1239 static void io_subchannel_ioterm(struct subchannel *sch) 1240 { 1241 struct ccw_device *cdev; 1242 1243 cdev = sch_get_cdev(sch); 1244 if (!cdev) 1245 return; 1246 /* Internal I/O will be retried by the interrupt handler. */ 1247 if (cdev->private->flags.intretry) 1248 return; 1249 cdev->private->state = DEV_STATE_CLEAR_VERIFY; 1250 if (cdev->handler) 1251 cdev->handler(cdev, cdev->private->intparm, 1252 ERR_PTR(-EIO)); 1253 } 1254 1255 static void 1256 io_subchannel_shutdown(struct subchannel *sch) 1257 { 1258 struct ccw_device *cdev; 1259 int ret; 1260 1261 cdev = sch_get_cdev(sch); 1262 1263 if (cio_is_console(sch->schid)) 1264 return; 1265 if (!sch->schib.pmcw.ena) 1266 /* Nothing to do. */ 1267 return; 1268 ret = cio_disable_subchannel(sch); 1269 if (ret != -EBUSY) 1270 /* Subchannel is disabled, we're done. */ 1271 return; 1272 cdev->private->state = DEV_STATE_QUIESCE; 1273 if (cdev->handler) 1274 cdev->handler(cdev, cdev->private->intparm, 1275 ERR_PTR(-EIO)); 1276 ret = ccw_device_cancel_halt_clear(cdev); 1277 if (ret == -EBUSY) { 1278 ccw_device_set_timeout(cdev, HZ/10); 1279 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1280 } 1281 cio_disable_subchannel(sch); 1282 } 1283 1284 #ifdef CONFIG_CCW_CONSOLE 1285 static struct ccw_device console_cdev; 1286 static struct ccw_device_private console_private; 1287 static int console_cdev_in_use; 1288 1289 static DEFINE_SPINLOCK(ccw_console_lock); 1290 1291 spinlock_t * cio_get_console_lock(void) 1292 { 1293 return &ccw_console_lock; 1294 } 1295 1296 static int 1297 ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) 1298 { 1299 int rc; 1300 1301 /* Attach subchannel private data. */ 1302 sch->private = cio_get_console_priv(); 1303 memset(sch->private, 0, sizeof(struct io_subchannel_private)); 1304 /* Initialize the ccw_device structure. */ 1305 cdev->dev.parent= &sch->dev; 1306 rc = io_subchannel_recog(cdev, sch); 1307 if (rc) 1308 return rc; 1309 1310 /* Now wait for the async. recognition to come to an end. */ 1311 spin_lock_irq(cdev->ccwlock); 1312 while (!dev_fsm_final_state(cdev)) 1313 wait_cons_dev(); 1314 rc = -EIO; 1315 if (cdev->private->state != DEV_STATE_OFFLINE) 1316 goto out_unlock; 1317 ccw_device_online(cdev); 1318 while (!dev_fsm_final_state(cdev)) 1319 wait_cons_dev(); 1320 if (cdev->private->state != DEV_STATE_ONLINE) 1321 goto out_unlock; 1322 rc = 0; 1323 out_unlock: 1324 spin_unlock_irq(cdev->ccwlock); 1325 return 0; 1326 } 1327 1328 struct ccw_device * 1329 ccw_device_probe_console(void) 1330 { 1331 struct subchannel *sch; 1332 int ret; 1333 1334 if (xchg(&console_cdev_in_use, 1) != 0) 1335 return ERR_PTR(-EBUSY); 1336 sch = cio_probe_console(); 1337 if (IS_ERR(sch)) { 1338 console_cdev_in_use = 0; 1339 return (void *) sch; 1340 } 1341 memset(&console_cdev, 0, sizeof(struct ccw_device)); 1342 memset(&console_private, 0, sizeof(struct ccw_device_private)); 1343 console_cdev.private = &console_private; 1344 console_private.cdev = &console_cdev; 1345 ret = ccw_device_console_enable(&console_cdev, sch); 1346 if (ret) { 1347 cio_release_console(); 1348 console_cdev_in_use = 0; 1349 return ERR_PTR(ret); 1350 } 1351 console_cdev.online = 1; 1352 return &console_cdev; 1353 } 1354 #endif 1355 1356 /* 1357 * get ccw_device matching the busid, but only if owned by cdrv 1358 */ 1359 static int 1360 __ccwdev_check_busid(struct device *dev, void *id) 1361 { 1362 char *bus_id; 1363 1364 bus_id = id; 1365 1366 return (strncmp(bus_id, dev->bus_id, BUS_ID_SIZE) == 0); 1367 } 1368 1369 1370 /** 1371 * get_ccwdev_by_busid() - obtain device from a bus id 1372 * @cdrv: driver the device is owned by 1373 * @bus_id: bus id of the device to be searched 1374 * 1375 * This function searches all devices owned by @cdrv for a device with a bus 1376 * id matching @bus_id. 1377 * Returns: 1378 * If a match is found, its reference count of the found device is increased 1379 * and it is returned; else %NULL is returned. 1380 */ 1381 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, 1382 const char *bus_id) 1383 { 1384 struct device *dev; 1385 struct device_driver *drv; 1386 1387 drv = get_driver(&cdrv->driver); 1388 if (!drv) 1389 return NULL; 1390 1391 dev = driver_find_device(drv, NULL, (void *)bus_id, 1392 __ccwdev_check_busid); 1393 put_driver(drv); 1394 1395 return dev ? to_ccwdev(dev) : NULL; 1396 } 1397 1398 /************************** device driver handling ************************/ 1399 1400 /* This is the implementation of the ccw_driver class. The probe, remove 1401 * and release methods are initially very similar to the device_driver 1402 * implementations, with the difference that they have ccw_device 1403 * arguments. 1404 * 1405 * A ccw driver also contains the information that is needed for 1406 * device matching. 1407 */ 1408 static int 1409 ccw_device_probe (struct device *dev) 1410 { 1411 struct ccw_device *cdev = to_ccwdev(dev); 1412 struct ccw_driver *cdrv = to_ccwdrv(dev->driver); 1413 int ret; 1414 1415 cdev->drv = cdrv; /* to let the driver call _set_online */ 1416 1417 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; 1418 1419 if (ret) { 1420 cdev->drv = NULL; 1421 return ret; 1422 } 1423 1424 return 0; 1425 } 1426 1427 static int 1428 ccw_device_remove (struct device *dev) 1429 { 1430 struct ccw_device *cdev = to_ccwdev(dev); 1431 struct ccw_driver *cdrv = cdev->drv; 1432 int ret; 1433 1434 if (cdrv->remove) 1435 cdrv->remove(cdev); 1436 if (cdev->online) { 1437 cdev->online = 0; 1438 spin_lock_irq(cdev->ccwlock); 1439 ret = ccw_device_offline(cdev); 1440 spin_unlock_irq(cdev->ccwlock); 1441 if (ret == 0) 1442 wait_event(cdev->private->wait_q, 1443 dev_fsm_final_state(cdev)); 1444 else 1445 //FIXME: we can't fail! 1446 CIO_MSG_EVENT(2, "ccw_device_offline returned %d, " 1447 "device 0.%x.%04x\n", 1448 ret, cdev->private->dev_id.ssid, 1449 cdev->private->dev_id.devno); 1450 } 1451 ccw_device_set_timeout(cdev, 0); 1452 cdev->drv = NULL; 1453 return 0; 1454 } 1455 1456 static void ccw_device_shutdown(struct device *dev) 1457 { 1458 struct ccw_device *cdev; 1459 1460 cdev = to_ccwdev(dev); 1461 if (cdev->drv && cdev->drv->shutdown) 1462 cdev->drv->shutdown(cdev); 1463 disable_cmf(cdev); 1464 } 1465 1466 struct bus_type ccw_bus_type = { 1467 .name = "ccw", 1468 .match = ccw_bus_match, 1469 .uevent = ccw_uevent, 1470 .probe = ccw_device_probe, 1471 .remove = ccw_device_remove, 1472 .shutdown = ccw_device_shutdown, 1473 }; 1474 1475 /** 1476 * ccw_driver_register() - register a ccw driver 1477 * @cdriver: driver to be registered 1478 * 1479 * This function is mainly a wrapper around driver_register(). 1480 * Returns: 1481 * %0 on success and a negative error value on failure. 1482 */ 1483 int ccw_driver_register(struct ccw_driver *cdriver) 1484 { 1485 struct device_driver *drv = &cdriver->driver; 1486 1487 drv->bus = &ccw_bus_type; 1488 drv->name = cdriver->name; 1489 drv->owner = cdriver->owner; 1490 1491 return driver_register(drv); 1492 } 1493 1494 /** 1495 * ccw_driver_unregister() - deregister a ccw driver 1496 * @cdriver: driver to be deregistered 1497 * 1498 * This function is mainly a wrapper around driver_unregister(). 1499 */ 1500 void ccw_driver_unregister(struct ccw_driver *cdriver) 1501 { 1502 driver_unregister(&cdriver->driver); 1503 } 1504 1505 /* Helper func for qdio. */ 1506 struct subchannel_id 1507 ccw_device_get_subchannel_id(struct ccw_device *cdev) 1508 { 1509 struct subchannel *sch; 1510 1511 sch = to_subchannel(cdev->dev.parent); 1512 return sch->schid; 1513 } 1514 1515 static int recovery_check(struct device *dev, void *data) 1516 { 1517 struct ccw_device *cdev = to_ccwdev(dev); 1518 int *redo = data; 1519 1520 spin_lock_irq(cdev->ccwlock); 1521 switch (cdev->private->state) { 1522 case DEV_STATE_DISCONNECTED: 1523 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", 1524 cdev->private->dev_id.ssid, 1525 cdev->private->dev_id.devno); 1526 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1527 *redo = 1; 1528 break; 1529 case DEV_STATE_DISCONNECTED_SENSE_ID: 1530 *redo = 1; 1531 break; 1532 } 1533 spin_unlock_irq(cdev->ccwlock); 1534 1535 return 0; 1536 } 1537 1538 static void recovery_work_func(struct work_struct *unused) 1539 { 1540 int redo = 0; 1541 1542 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); 1543 if (redo) { 1544 spin_lock_irq(&recovery_lock); 1545 if (!timer_pending(&recovery_timer)) { 1546 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) 1547 recovery_phase++; 1548 mod_timer(&recovery_timer, jiffies + 1549 recovery_delay[recovery_phase] * HZ); 1550 } 1551 spin_unlock_irq(&recovery_lock); 1552 } else 1553 CIO_MSG_EVENT(2, "recovery: end\n"); 1554 } 1555 1556 static DECLARE_WORK(recovery_work, recovery_work_func); 1557 1558 static void recovery_func(unsigned long data) 1559 { 1560 /* 1561 * We can't do our recovery in softirq context and it's not 1562 * performance critical, so we schedule it. 1563 */ 1564 schedule_work(&recovery_work); 1565 } 1566 1567 void ccw_device_schedule_recovery(void) 1568 { 1569 unsigned long flags; 1570 1571 CIO_MSG_EVENT(2, "recovery: schedule\n"); 1572 spin_lock_irqsave(&recovery_lock, flags); 1573 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { 1574 recovery_phase = 0; 1575 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); 1576 } 1577 spin_unlock_irqrestore(&recovery_lock, flags); 1578 } 1579 1580 MODULE_LICENSE("GPL"); 1581 EXPORT_SYMBOL(ccw_device_set_online); 1582 EXPORT_SYMBOL(ccw_device_set_offline); 1583 EXPORT_SYMBOL(ccw_driver_register); 1584 EXPORT_SYMBOL(ccw_driver_unregister); 1585 EXPORT_SYMBOL(get_ccwdev_by_busid); 1586 EXPORT_SYMBOL(ccw_bus_type); 1587 EXPORT_SYMBOL(ccw_device_work); 1588 EXPORT_SYMBOL(ccw_device_notify_work); 1589 EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); 1590