1 /* 2 * bus driver for ccw devices 3 * 4 * Copyright IBM Corp. 2002, 2008 5 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/spinlock.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/slab.h> 19 #include <linux/list.h> 20 #include <linux/device.h> 21 #include <linux/workqueue.h> 22 #include <linux/timer.h> 23 #include <linux/kernel_stat.h> 24 25 #include <asm/ccwdev.h> 26 #include <asm/cio.h> 27 #include <asm/param.h> /* HZ */ 28 #include <asm/cmb.h> 29 #include <asm/isc.h> 30 31 #include "chp.h" 32 #include "cio.h" 33 #include "cio_debug.h" 34 #include "css.h" 35 #include "device.h" 36 #include "ioasm.h" 37 #include "io_sch.h" 38 #include "blacklist.h" 39 #include "chsc.h" 40 41 static struct timer_list recovery_timer; 42 static DEFINE_SPINLOCK(recovery_lock); 43 static int recovery_phase; 44 static const unsigned long recovery_delay[] = { 3, 30, 300 }; 45 46 /******************* bus type handling ***********************/ 47 48 /* The Linux driver model distinguishes between a bus type and 49 * the bus itself. Of course we only have one channel 50 * subsystem driver and one channel system per machine, but 51 * we still use the abstraction. T.R. says it's a good idea. */ 52 static int 53 ccw_bus_match (struct device * dev, struct device_driver * drv) 54 { 55 struct ccw_device *cdev = to_ccwdev(dev); 56 struct ccw_driver *cdrv = to_ccwdrv(drv); 57 const struct ccw_device_id *ids = cdrv->ids, *found; 58 59 if (!ids) 60 return 0; 61 62 found = ccw_device_id_match(ids, &cdev->id); 63 if (!found) 64 return 0; 65 66 cdev->id.driver_info = found->driver_info; 67 68 return 1; 69 } 70 71 /* Store modalias string delimited by prefix/suffix string into buffer with 72 * specified size. Return length of resulting string (excluding trailing '\0') 73 * even if string doesn't fit buffer (snprintf semantics). */ 74 static int snprint_alias(char *buf, size_t size, 75 struct ccw_device_id *id, const char *suffix) 76 { 77 int len; 78 79 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); 80 if (len > size) 81 return len; 82 buf += len; 83 size -= len; 84 85 if (id->dev_type != 0) 86 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, 87 id->dev_model, suffix); 88 else 89 len += snprintf(buf, size, "dtdm%s", suffix); 90 91 return len; 92 } 93 94 /* Set up environment variables for ccw device uevent. Return 0 on success, 95 * non-zero otherwise. */ 96 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) 97 { 98 struct ccw_device *cdev = to_ccwdev(dev); 99 struct ccw_device_id *id = &(cdev->id); 100 int ret; 101 char modalias_buf[30]; 102 103 /* CU_TYPE= */ 104 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type); 105 if (ret) 106 return ret; 107 108 /* CU_MODEL= */ 109 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model); 110 if (ret) 111 return ret; 112 113 /* The next two can be zero, that's ok for us */ 114 /* DEV_TYPE= */ 115 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type); 116 if (ret) 117 return ret; 118 119 /* DEV_MODEL= */ 120 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model); 121 if (ret) 122 return ret; 123 124 /* MODALIAS= */ 125 snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); 126 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf); 127 return ret; 128 } 129 130 static struct bus_type ccw_bus_type; 131 132 static void io_subchannel_irq(struct subchannel *); 133 static int io_subchannel_probe(struct subchannel *); 134 static int io_subchannel_remove(struct subchannel *); 135 static void io_subchannel_shutdown(struct subchannel *); 136 static int io_subchannel_sch_event(struct subchannel *, int); 137 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, 138 int); 139 static void recovery_func(unsigned long data); 140 wait_queue_head_t ccw_device_init_wq; 141 atomic_t ccw_device_init_count; 142 143 static struct css_device_id io_subchannel_ids[] = { 144 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 145 { /* end of list */ }, 146 }; 147 MODULE_DEVICE_TABLE(css, io_subchannel_ids); 148 149 static int io_subchannel_prepare(struct subchannel *sch) 150 { 151 struct ccw_device *cdev; 152 /* 153 * Don't allow suspend while a ccw device registration 154 * is still outstanding. 155 */ 156 cdev = sch_get_cdev(sch); 157 if (cdev && !device_is_registered(&cdev->dev)) 158 return -EAGAIN; 159 return 0; 160 } 161 162 static int io_subchannel_settle(void) 163 { 164 int ret; 165 166 ret = wait_event_interruptible(ccw_device_init_wq, 167 atomic_read(&ccw_device_init_count) == 0); 168 if (ret) 169 return -EINTR; 170 flush_workqueue(cio_work_q); 171 return 0; 172 } 173 174 static struct css_driver io_subchannel_driver = { 175 .drv = { 176 .owner = THIS_MODULE, 177 .name = "io_subchannel", 178 }, 179 .subchannel_type = io_subchannel_ids, 180 .irq = io_subchannel_irq, 181 .sch_event = io_subchannel_sch_event, 182 .chp_event = io_subchannel_chp_event, 183 .probe = io_subchannel_probe, 184 .remove = io_subchannel_remove, 185 .shutdown = io_subchannel_shutdown, 186 .prepare = io_subchannel_prepare, 187 .settle = io_subchannel_settle, 188 }; 189 190 int __init io_subchannel_init(void) 191 { 192 int ret; 193 194 init_waitqueue_head(&ccw_device_init_wq); 195 atomic_set(&ccw_device_init_count, 0); 196 setup_timer(&recovery_timer, recovery_func, 0); 197 198 ret = bus_register(&ccw_bus_type); 199 if (ret) 200 return ret; 201 ret = css_driver_register(&io_subchannel_driver); 202 if (ret) 203 bus_unregister(&ccw_bus_type); 204 205 return ret; 206 } 207 208 209 /************************ device handling **************************/ 210 211 /* 212 * A ccw_device has some interfaces in sysfs in addition to the 213 * standard ones. 214 * The following entries are designed to export the information which 215 * resided in 2.4 in /proc/subchannels. Subchannel and device number 216 * are obvious, so they don't have an entry :) 217 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree? 218 */ 219 static ssize_t 220 chpids_show (struct device * dev, struct device_attribute *attr, char * buf) 221 { 222 struct subchannel *sch = to_subchannel(dev); 223 struct chsc_ssd_info *ssd = &sch->ssd_info; 224 ssize_t ret = 0; 225 int chp; 226 int mask; 227 228 for (chp = 0; chp < 8; chp++) { 229 mask = 0x80 >> chp; 230 if (ssd->path_mask & mask) 231 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); 232 else 233 ret += sprintf(buf + ret, "00 "); 234 } 235 ret += sprintf (buf+ret, "\n"); 236 return min((ssize_t)PAGE_SIZE, ret); 237 } 238 239 static ssize_t 240 pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf) 241 { 242 struct subchannel *sch = to_subchannel(dev); 243 struct pmcw *pmcw = &sch->schib.pmcw; 244 245 return sprintf (buf, "%02x %02x %02x\n", 246 pmcw->pim, pmcw->pam, pmcw->pom); 247 } 248 249 static ssize_t 250 devtype_show (struct device *dev, struct device_attribute *attr, char *buf) 251 { 252 struct ccw_device *cdev = to_ccwdev(dev); 253 struct ccw_device_id *id = &(cdev->id); 254 255 if (id->dev_type != 0) 256 return sprintf(buf, "%04x/%02x\n", 257 id->dev_type, id->dev_model); 258 else 259 return sprintf(buf, "n/a\n"); 260 } 261 262 static ssize_t 263 cutype_show (struct device *dev, struct device_attribute *attr, char *buf) 264 { 265 struct ccw_device *cdev = to_ccwdev(dev); 266 struct ccw_device_id *id = &(cdev->id); 267 268 return sprintf(buf, "%04x/%02x\n", 269 id->cu_type, id->cu_model); 270 } 271 272 static ssize_t 273 modalias_show (struct device *dev, struct device_attribute *attr, char *buf) 274 { 275 struct ccw_device *cdev = to_ccwdev(dev); 276 struct ccw_device_id *id = &(cdev->id); 277 int len; 278 279 len = snprint_alias(buf, PAGE_SIZE, id, "\n"); 280 281 return len > PAGE_SIZE ? PAGE_SIZE : len; 282 } 283 284 static ssize_t 285 online_show (struct device *dev, struct device_attribute *attr, char *buf) 286 { 287 struct ccw_device *cdev = to_ccwdev(dev); 288 289 return sprintf(buf, cdev->online ? "1\n" : "0\n"); 290 } 291 292 int ccw_device_is_orphan(struct ccw_device *cdev) 293 { 294 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); 295 } 296 297 static void ccw_device_unregister(struct ccw_device *cdev) 298 { 299 if (device_is_registered(&cdev->dev)) { 300 /* Undo device_add(). */ 301 device_del(&cdev->dev); 302 } 303 if (cdev->private->flags.initialized) { 304 cdev->private->flags.initialized = 0; 305 /* Release reference from device_initialize(). */ 306 put_device(&cdev->dev); 307 } 308 } 309 310 static void io_subchannel_quiesce(struct subchannel *); 311 312 /** 313 * ccw_device_set_offline() - disable a ccw device for I/O 314 * @cdev: target ccw device 315 * 316 * This function calls the driver's set_offline() function for @cdev, if 317 * given, and then disables @cdev. 318 * Returns: 319 * %0 on success and a negative error value on failure. 320 * Context: 321 * enabled, ccw device lock not held 322 */ 323 int ccw_device_set_offline(struct ccw_device *cdev) 324 { 325 struct subchannel *sch; 326 int ret, state; 327 328 if (!cdev) 329 return -ENODEV; 330 if (!cdev->online || !cdev->drv) 331 return -EINVAL; 332 333 if (cdev->drv->set_offline) { 334 ret = cdev->drv->set_offline(cdev); 335 if (ret != 0) 336 return ret; 337 } 338 cdev->online = 0; 339 spin_lock_irq(cdev->ccwlock); 340 sch = to_subchannel(cdev->dev.parent); 341 /* Wait until a final state or DISCONNECTED is reached */ 342 while (!dev_fsm_final_state(cdev) && 343 cdev->private->state != DEV_STATE_DISCONNECTED) { 344 spin_unlock_irq(cdev->ccwlock); 345 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 346 cdev->private->state == DEV_STATE_DISCONNECTED)); 347 spin_lock_irq(cdev->ccwlock); 348 } 349 do { 350 ret = ccw_device_offline(cdev); 351 if (!ret) 352 break; 353 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device " 354 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid, 355 cdev->private->dev_id.devno); 356 if (ret != -EBUSY) 357 goto error; 358 state = cdev->private->state; 359 spin_unlock_irq(cdev->ccwlock); 360 io_subchannel_quiesce(sch); 361 spin_lock_irq(cdev->ccwlock); 362 cdev->private->state = state; 363 } while (ret == -EBUSY); 364 spin_unlock_irq(cdev->ccwlock); 365 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 366 cdev->private->state == DEV_STATE_DISCONNECTED)); 367 /* Inform the user if set offline failed. */ 368 if (cdev->private->state == DEV_STATE_BOXED) { 369 pr_warning("%s: The device entered boxed state while " 370 "being set offline\n", dev_name(&cdev->dev)); 371 } else if (cdev->private->state == DEV_STATE_NOT_OPER) { 372 pr_warning("%s: The device stopped operating while " 373 "being set offline\n", dev_name(&cdev->dev)); 374 } 375 /* Give up reference from ccw_device_set_online(). */ 376 put_device(&cdev->dev); 377 return 0; 378 379 error: 380 cdev->private->state = DEV_STATE_OFFLINE; 381 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 382 spin_unlock_irq(cdev->ccwlock); 383 /* Give up reference from ccw_device_set_online(). */ 384 put_device(&cdev->dev); 385 return -ENODEV; 386 } 387 388 /** 389 * ccw_device_set_online() - enable a ccw device for I/O 390 * @cdev: target ccw device 391 * 392 * This function first enables @cdev and then calls the driver's set_online() 393 * function for @cdev, if given. If set_online() returns an error, @cdev is 394 * disabled again. 395 * Returns: 396 * %0 on success and a negative error value on failure. 397 * Context: 398 * enabled, ccw device lock not held 399 */ 400 int ccw_device_set_online(struct ccw_device *cdev) 401 { 402 int ret; 403 int ret2; 404 405 if (!cdev) 406 return -ENODEV; 407 if (cdev->online || !cdev->drv) 408 return -EINVAL; 409 /* Hold on to an extra reference while device is online. */ 410 if (!get_device(&cdev->dev)) 411 return -ENODEV; 412 413 spin_lock_irq(cdev->ccwlock); 414 ret = ccw_device_online(cdev); 415 spin_unlock_irq(cdev->ccwlock); 416 if (ret == 0) 417 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 418 else { 419 CIO_MSG_EVENT(0, "ccw_device_online returned %d, " 420 "device 0.%x.%04x\n", 421 ret, cdev->private->dev_id.ssid, 422 cdev->private->dev_id.devno); 423 /* Give up online reference since onlining failed. */ 424 put_device(&cdev->dev); 425 return ret; 426 } 427 spin_lock_irq(cdev->ccwlock); 428 /* Check if online processing was successful */ 429 if ((cdev->private->state != DEV_STATE_ONLINE) && 430 (cdev->private->state != DEV_STATE_W4SENSE)) { 431 spin_unlock_irq(cdev->ccwlock); 432 /* Inform the user that set online failed. */ 433 if (cdev->private->state == DEV_STATE_BOXED) { 434 pr_warning("%s: Setting the device online failed " 435 "because it is boxed\n", 436 dev_name(&cdev->dev)); 437 } else if (cdev->private->state == DEV_STATE_NOT_OPER) { 438 pr_warning("%s: Setting the device online failed " 439 "because it is not operational\n", 440 dev_name(&cdev->dev)); 441 } 442 /* Give up online reference since onlining failed. */ 443 put_device(&cdev->dev); 444 return -ENODEV; 445 } 446 spin_unlock_irq(cdev->ccwlock); 447 if (cdev->drv->set_online) 448 ret = cdev->drv->set_online(cdev); 449 if (ret) 450 goto rollback; 451 cdev->online = 1; 452 return 0; 453 454 rollback: 455 spin_lock_irq(cdev->ccwlock); 456 /* Wait until a final state or DISCONNECTED is reached */ 457 while (!dev_fsm_final_state(cdev) && 458 cdev->private->state != DEV_STATE_DISCONNECTED) { 459 spin_unlock_irq(cdev->ccwlock); 460 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 461 cdev->private->state == DEV_STATE_DISCONNECTED)); 462 spin_lock_irq(cdev->ccwlock); 463 } 464 ret2 = ccw_device_offline(cdev); 465 if (ret2) 466 goto error; 467 spin_unlock_irq(cdev->ccwlock); 468 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 469 cdev->private->state == DEV_STATE_DISCONNECTED)); 470 /* Give up online reference since onlining failed. */ 471 put_device(&cdev->dev); 472 return ret; 473 474 error: 475 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, " 476 "device 0.%x.%04x\n", 477 ret2, cdev->private->dev_id.ssid, 478 cdev->private->dev_id.devno); 479 cdev->private->state = DEV_STATE_OFFLINE; 480 spin_unlock_irq(cdev->ccwlock); 481 /* Give up online reference since onlining failed. */ 482 put_device(&cdev->dev); 483 return ret; 484 } 485 486 static int online_store_handle_offline(struct ccw_device *cdev) 487 { 488 if (cdev->private->state == DEV_STATE_DISCONNECTED) { 489 spin_lock_irq(cdev->ccwlock); 490 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); 491 spin_unlock_irq(cdev->ccwlock); 492 return 0; 493 } 494 if (cdev->drv && cdev->drv->set_offline) 495 return ccw_device_set_offline(cdev); 496 return -EINVAL; 497 } 498 499 static int online_store_recog_and_online(struct ccw_device *cdev) 500 { 501 /* Do device recognition, if needed. */ 502 if (cdev->private->state == DEV_STATE_BOXED) { 503 spin_lock_irq(cdev->ccwlock); 504 ccw_device_recognition(cdev); 505 spin_unlock_irq(cdev->ccwlock); 506 wait_event(cdev->private->wait_q, 507 cdev->private->flags.recog_done); 508 if (cdev->private->state != DEV_STATE_OFFLINE) 509 /* recognition failed */ 510 return -EAGAIN; 511 } 512 if (cdev->drv && cdev->drv->set_online) 513 return ccw_device_set_online(cdev); 514 return -EINVAL; 515 } 516 517 static int online_store_handle_online(struct ccw_device *cdev, int force) 518 { 519 int ret; 520 521 ret = online_store_recog_and_online(cdev); 522 if (ret && !force) 523 return ret; 524 if (force && cdev->private->state == DEV_STATE_BOXED) { 525 ret = ccw_device_stlck(cdev); 526 if (ret) 527 return ret; 528 if (cdev->id.cu_type == 0) 529 cdev->private->state = DEV_STATE_NOT_OPER; 530 ret = online_store_recog_and_online(cdev); 531 if (ret) 532 return ret; 533 } 534 return 0; 535 } 536 537 static ssize_t online_store (struct device *dev, struct device_attribute *attr, 538 const char *buf, size_t count) 539 { 540 struct ccw_device *cdev = to_ccwdev(dev); 541 int force, ret; 542 unsigned long i; 543 544 /* Prevent conflict between multiple on-/offline processing requests. */ 545 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 546 return -EAGAIN; 547 /* Prevent conflict between internal I/Os and on-/offline processing. */ 548 if (!dev_fsm_final_state(cdev) && 549 cdev->private->state != DEV_STATE_DISCONNECTED) { 550 ret = -EAGAIN; 551 goto out_onoff; 552 } 553 /* Prevent conflict between pending work and on-/offline processing.*/ 554 if (work_pending(&cdev->private->todo_work)) { 555 ret = -EAGAIN; 556 goto out_onoff; 557 } 558 559 if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) { 560 ret = -EINVAL; 561 goto out_onoff; 562 } 563 if (!strncmp(buf, "force\n", count)) { 564 force = 1; 565 i = 1; 566 ret = 0; 567 } else { 568 force = 0; 569 ret = strict_strtoul(buf, 16, &i); 570 } 571 if (ret) 572 goto out; 573 switch (i) { 574 case 0: 575 ret = online_store_handle_offline(cdev); 576 break; 577 case 1: 578 ret = online_store_handle_online(cdev, force); 579 break; 580 default: 581 ret = -EINVAL; 582 } 583 out: 584 if (cdev->drv) 585 module_put(cdev->drv->driver.owner); 586 out_onoff: 587 atomic_set(&cdev->private->onoff, 0); 588 return (ret < 0) ? ret : count; 589 } 590 591 static ssize_t 592 available_show (struct device *dev, struct device_attribute *attr, char *buf) 593 { 594 struct ccw_device *cdev = to_ccwdev(dev); 595 struct subchannel *sch; 596 597 if (ccw_device_is_orphan(cdev)) 598 return sprintf(buf, "no device\n"); 599 switch (cdev->private->state) { 600 case DEV_STATE_BOXED: 601 return sprintf(buf, "boxed\n"); 602 case DEV_STATE_DISCONNECTED: 603 case DEV_STATE_DISCONNECTED_SENSE_ID: 604 case DEV_STATE_NOT_OPER: 605 sch = to_subchannel(dev->parent); 606 if (!sch->lpm) 607 return sprintf(buf, "no path\n"); 608 else 609 return sprintf(buf, "no device\n"); 610 default: 611 /* All other states considered fine. */ 612 return sprintf(buf, "good\n"); 613 } 614 } 615 616 static ssize_t 617 initiate_logging(struct device *dev, struct device_attribute *attr, 618 const char *buf, size_t count) 619 { 620 struct subchannel *sch = to_subchannel(dev); 621 int rc; 622 623 rc = chsc_siosl(sch->schid); 624 if (rc < 0) { 625 pr_warning("Logging for subchannel 0.%x.%04x failed with " 626 "errno=%d\n", 627 sch->schid.ssid, sch->schid.sch_no, rc); 628 return rc; 629 } 630 pr_notice("Logging for subchannel 0.%x.%04x was triggered\n", 631 sch->schid.ssid, sch->schid.sch_no); 632 return count; 633 } 634 635 static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, 636 char *buf) 637 { 638 struct subchannel *sch = to_subchannel(dev); 639 640 return sprintf(buf, "%02x\n", sch->vpm); 641 } 642 643 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); 644 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); 645 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); 646 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); 647 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 648 static DEVICE_ATTR(online, 0644, online_show, online_store); 649 static DEVICE_ATTR(availability, 0444, available_show, NULL); 650 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); 651 static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); 652 653 static struct attribute *io_subchannel_attrs[] = { 654 &dev_attr_chpids.attr, 655 &dev_attr_pimpampom.attr, 656 &dev_attr_logging.attr, 657 &dev_attr_vpm.attr, 658 NULL, 659 }; 660 661 static struct attribute_group io_subchannel_attr_group = { 662 .attrs = io_subchannel_attrs, 663 }; 664 665 static struct attribute * ccwdev_attrs[] = { 666 &dev_attr_devtype.attr, 667 &dev_attr_cutype.attr, 668 &dev_attr_modalias.attr, 669 &dev_attr_online.attr, 670 &dev_attr_cmb_enable.attr, 671 &dev_attr_availability.attr, 672 NULL, 673 }; 674 675 static struct attribute_group ccwdev_attr_group = { 676 .attrs = ccwdev_attrs, 677 }; 678 679 static const struct attribute_group *ccwdev_attr_groups[] = { 680 &ccwdev_attr_group, 681 NULL, 682 }; 683 684 /* this is a simple abstraction for device_register that sets the 685 * correct bus type and adds the bus specific files */ 686 static int ccw_device_register(struct ccw_device *cdev) 687 { 688 struct device *dev = &cdev->dev; 689 int ret; 690 691 dev->bus = &ccw_bus_type; 692 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, 693 cdev->private->dev_id.devno); 694 if (ret) 695 return ret; 696 return device_add(dev); 697 } 698 699 static int match_dev_id(struct device *dev, void *data) 700 { 701 struct ccw_device *cdev = to_ccwdev(dev); 702 struct ccw_dev_id *dev_id = data; 703 704 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 705 } 706 707 /** 708 * get_ccwdev_by_dev_id() - obtain device from a ccw device id 709 * @dev_id: id of the device to be searched 710 * 711 * This function searches all devices attached to the ccw bus for a device 712 * matching @dev_id. 713 * Returns: 714 * If a device is found its reference count is increased and returned; 715 * else %NULL is returned. 716 */ 717 struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) 718 { 719 struct device *dev; 720 721 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id); 722 723 return dev ? to_ccwdev(dev) : NULL; 724 } 725 EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id); 726 727 static void ccw_device_do_unbind_bind(struct ccw_device *cdev) 728 { 729 int ret; 730 731 if (device_is_registered(&cdev->dev)) { 732 device_release_driver(&cdev->dev); 733 ret = device_attach(&cdev->dev); 734 WARN_ON(ret == -ENODEV); 735 } 736 } 737 738 static void 739 ccw_device_release(struct device *dev) 740 { 741 struct ccw_device *cdev; 742 743 cdev = to_ccwdev(dev); 744 /* Release reference of parent subchannel. */ 745 put_device(cdev->dev.parent); 746 kfree(cdev->private); 747 kfree(cdev); 748 } 749 750 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) 751 { 752 struct ccw_device *cdev; 753 754 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 755 if (cdev) { 756 cdev->private = kzalloc(sizeof(struct ccw_device_private), 757 GFP_KERNEL | GFP_DMA); 758 if (cdev->private) 759 return cdev; 760 } 761 kfree(cdev); 762 return ERR_PTR(-ENOMEM); 763 } 764 765 static void ccw_device_todo(struct work_struct *work); 766 767 static int io_subchannel_initialize_dev(struct subchannel *sch, 768 struct ccw_device *cdev) 769 { 770 cdev->private->cdev = cdev; 771 cdev->private->int_class = IRQIO_CIO; 772 atomic_set(&cdev->private->onoff, 0); 773 cdev->dev.parent = &sch->dev; 774 cdev->dev.release = ccw_device_release; 775 INIT_WORK(&cdev->private->todo_work, ccw_device_todo); 776 cdev->dev.groups = ccwdev_attr_groups; 777 /* Do first half of device_register. */ 778 device_initialize(&cdev->dev); 779 if (!get_device(&sch->dev)) { 780 /* Release reference from device_initialize(). */ 781 put_device(&cdev->dev); 782 return -ENODEV; 783 } 784 cdev->private->flags.initialized = 1; 785 return 0; 786 } 787 788 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) 789 { 790 struct ccw_device *cdev; 791 int ret; 792 793 cdev = io_subchannel_allocate_dev(sch); 794 if (!IS_ERR(cdev)) { 795 ret = io_subchannel_initialize_dev(sch, cdev); 796 if (ret) 797 cdev = ERR_PTR(ret); 798 } 799 return cdev; 800 } 801 802 static void io_subchannel_recog(struct ccw_device *, struct subchannel *); 803 804 static void sch_create_and_recog_new_device(struct subchannel *sch) 805 { 806 struct ccw_device *cdev; 807 808 /* Need to allocate a new ccw device. */ 809 cdev = io_subchannel_create_ccwdev(sch); 810 if (IS_ERR(cdev)) { 811 /* OK, we did everything we could... */ 812 css_sch_device_unregister(sch); 813 return; 814 } 815 /* Start recognition for the new ccw device. */ 816 io_subchannel_recog(cdev, sch); 817 } 818 819 /* 820 * Register recognized device. 821 */ 822 static void io_subchannel_register(struct ccw_device *cdev) 823 { 824 struct subchannel *sch; 825 int ret, adjust_init_count = 1; 826 unsigned long flags; 827 828 sch = to_subchannel(cdev->dev.parent); 829 /* 830 * Check if subchannel is still registered. It may have become 831 * unregistered if a machine check hit us after finishing 832 * device recognition but before the register work could be 833 * queued. 834 */ 835 if (!device_is_registered(&sch->dev)) 836 goto out_err; 837 css_update_ssd_info(sch); 838 /* 839 * io_subchannel_register() will also be called after device 840 * recognition has been done for a boxed device (which will already 841 * be registered). We need to reprobe since we may now have sense id 842 * information. 843 */ 844 if (device_is_registered(&cdev->dev)) { 845 if (!cdev->drv) { 846 ret = device_reprobe(&cdev->dev); 847 if (ret) 848 /* We can't do much here. */ 849 CIO_MSG_EVENT(0, "device_reprobe() returned" 850 " %d for 0.%x.%04x\n", ret, 851 cdev->private->dev_id.ssid, 852 cdev->private->dev_id.devno); 853 } 854 adjust_init_count = 0; 855 goto out; 856 } 857 /* 858 * Now we know this subchannel will stay, we can throw 859 * our delayed uevent. 860 */ 861 dev_set_uevent_suppress(&sch->dev, 0); 862 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 863 /* make it known to the system */ 864 ret = ccw_device_register(cdev); 865 if (ret) { 866 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", 867 cdev->private->dev_id.ssid, 868 cdev->private->dev_id.devno, ret); 869 spin_lock_irqsave(sch->lock, flags); 870 sch_set_cdev(sch, NULL); 871 spin_unlock_irqrestore(sch->lock, flags); 872 /* Release initial device reference. */ 873 put_device(&cdev->dev); 874 goto out_err; 875 } 876 out: 877 cdev->private->flags.recog_done = 1; 878 wake_up(&cdev->private->wait_q); 879 out_err: 880 if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count)) 881 wake_up(&ccw_device_init_wq); 882 } 883 884 static void ccw_device_call_sch_unregister(struct ccw_device *cdev) 885 { 886 struct subchannel *sch; 887 888 /* Get subchannel reference for local processing. */ 889 if (!get_device(cdev->dev.parent)) 890 return; 891 sch = to_subchannel(cdev->dev.parent); 892 css_sch_device_unregister(sch); 893 /* Release subchannel reference for local processing. */ 894 put_device(&sch->dev); 895 } 896 897 /* 898 * subchannel recognition done. Called from the state machine. 899 */ 900 void 901 io_subchannel_recog_done(struct ccw_device *cdev) 902 { 903 if (css_init_done == 0) { 904 cdev->private->flags.recog_done = 1; 905 return; 906 } 907 switch (cdev->private->state) { 908 case DEV_STATE_BOXED: 909 /* Device did not respond in time. */ 910 case DEV_STATE_NOT_OPER: 911 cdev->private->flags.recog_done = 1; 912 /* Remove device found not operational. */ 913 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 914 if (atomic_dec_and_test(&ccw_device_init_count)) 915 wake_up(&ccw_device_init_wq); 916 break; 917 case DEV_STATE_OFFLINE: 918 /* 919 * We can't register the device in interrupt context so 920 * we schedule a work item. 921 */ 922 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER); 923 break; 924 } 925 } 926 927 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) 928 { 929 struct ccw_device_private *priv; 930 931 cdev->ccwlock = sch->lock; 932 933 /* Init private data. */ 934 priv = cdev->private; 935 priv->dev_id.devno = sch->schib.pmcw.dev; 936 priv->dev_id.ssid = sch->schid.ssid; 937 priv->schid = sch->schid; 938 priv->state = DEV_STATE_NOT_OPER; 939 INIT_LIST_HEAD(&priv->cmb_list); 940 init_waitqueue_head(&priv->wait_q); 941 init_timer(&priv->timer); 942 943 /* Increase counter of devices currently in recognition. */ 944 atomic_inc(&ccw_device_init_count); 945 946 /* Start async. device sensing. */ 947 spin_lock_irq(sch->lock); 948 sch_set_cdev(sch, cdev); 949 ccw_device_recognition(cdev); 950 spin_unlock_irq(sch->lock); 951 } 952 953 static int ccw_device_move_to_sch(struct ccw_device *cdev, 954 struct subchannel *sch) 955 { 956 struct subchannel *old_sch; 957 int rc, old_enabled = 0; 958 959 old_sch = to_subchannel(cdev->dev.parent); 960 /* Obtain child reference for new parent. */ 961 if (!get_device(&sch->dev)) 962 return -ENODEV; 963 964 if (!sch_is_pseudo_sch(old_sch)) { 965 spin_lock_irq(old_sch->lock); 966 old_enabled = old_sch->schib.pmcw.ena; 967 rc = 0; 968 if (old_enabled) 969 rc = cio_disable_subchannel(old_sch); 970 spin_unlock_irq(old_sch->lock); 971 if (rc == -EBUSY) { 972 /* Release child reference for new parent. */ 973 put_device(&sch->dev); 974 return rc; 975 } 976 } 977 978 mutex_lock(&sch->reg_mutex); 979 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); 980 mutex_unlock(&sch->reg_mutex); 981 if (rc) { 982 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n", 983 cdev->private->dev_id.ssid, 984 cdev->private->dev_id.devno, sch->schid.ssid, 985 sch->schib.pmcw.dev, rc); 986 if (old_enabled) { 987 /* Try to reenable the old subchannel. */ 988 spin_lock_irq(old_sch->lock); 989 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch); 990 spin_unlock_irq(old_sch->lock); 991 } 992 /* Release child reference for new parent. */ 993 put_device(&sch->dev); 994 return rc; 995 } 996 /* Clean up old subchannel. */ 997 if (!sch_is_pseudo_sch(old_sch)) { 998 spin_lock_irq(old_sch->lock); 999 sch_set_cdev(old_sch, NULL); 1000 spin_unlock_irq(old_sch->lock); 1001 css_schedule_eval(old_sch->schid); 1002 } 1003 /* Release child reference for old parent. */ 1004 put_device(&old_sch->dev); 1005 /* Initialize new subchannel. */ 1006 spin_lock_irq(sch->lock); 1007 cdev->private->schid = sch->schid; 1008 cdev->ccwlock = sch->lock; 1009 if (!sch_is_pseudo_sch(sch)) 1010 sch_set_cdev(sch, cdev); 1011 spin_unlock_irq(sch->lock); 1012 if (!sch_is_pseudo_sch(sch)) 1013 css_update_ssd_info(sch); 1014 return 0; 1015 } 1016 1017 static int ccw_device_move_to_orph(struct ccw_device *cdev) 1018 { 1019 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1020 struct channel_subsystem *css = to_css(sch->dev.parent); 1021 1022 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel); 1023 } 1024 1025 static void io_subchannel_irq(struct subchannel *sch) 1026 { 1027 struct ccw_device *cdev; 1028 1029 cdev = sch_get_cdev(sch); 1030 1031 CIO_TRACE_EVENT(6, "IRQ"); 1032 CIO_TRACE_EVENT(6, dev_name(&sch->dev)); 1033 if (cdev) 1034 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1035 else 1036 inc_irq_stat(IRQIO_CIO); 1037 } 1038 1039 void io_subchannel_init_config(struct subchannel *sch) 1040 { 1041 memset(&sch->config, 0, sizeof(sch->config)); 1042 sch->config.csense = 1; 1043 } 1044 1045 static void io_subchannel_init_fields(struct subchannel *sch) 1046 { 1047 if (cio_is_console(sch->schid)) 1048 sch->opm = 0xff; 1049 else 1050 sch->opm = chp_get_sch_opm(sch); 1051 sch->lpm = sch->schib.pmcw.pam & sch->opm; 1052 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; 1053 1054 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" 1055 " - PIM = %02X, PAM = %02X, POM = %02X\n", 1056 sch->schib.pmcw.dev, sch->schid.ssid, 1057 sch->schid.sch_no, sch->schib.pmcw.pim, 1058 sch->schib.pmcw.pam, sch->schib.pmcw.pom); 1059 1060 io_subchannel_init_config(sch); 1061 } 1062 1063 /* 1064 * Note: We always return 0 so that we bind to the device even on error. 1065 * This is needed so that our remove function is called on unregister. 1066 */ 1067 static int io_subchannel_probe(struct subchannel *sch) 1068 { 1069 struct io_subchannel_private *io_priv; 1070 struct ccw_device *cdev; 1071 int rc; 1072 1073 if (cio_is_console(sch->schid)) { 1074 rc = sysfs_create_group(&sch->dev.kobj, 1075 &io_subchannel_attr_group); 1076 if (rc) 1077 CIO_MSG_EVENT(0, "Failed to create io subchannel " 1078 "attributes for subchannel " 1079 "0.%x.%04x (rc=%d)\n", 1080 sch->schid.ssid, sch->schid.sch_no, rc); 1081 /* 1082 * The console subchannel already has an associated ccw_device. 1083 * Throw the delayed uevent for the subchannel, register 1084 * the ccw_device and exit. 1085 */ 1086 dev_set_uevent_suppress(&sch->dev, 0); 1087 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 1088 cdev = sch_get_cdev(sch); 1089 cdev->dev.groups = ccwdev_attr_groups; 1090 device_initialize(&cdev->dev); 1091 cdev->private->flags.initialized = 1; 1092 ccw_device_register(cdev); 1093 /* 1094 * Check if the device is already online. If it is 1095 * the reference count needs to be corrected since we 1096 * didn't obtain a reference in ccw_device_set_online. 1097 */ 1098 if (cdev->private->state != DEV_STATE_NOT_OPER && 1099 cdev->private->state != DEV_STATE_OFFLINE && 1100 cdev->private->state != DEV_STATE_BOXED) 1101 get_device(&cdev->dev); 1102 return 0; 1103 } 1104 io_subchannel_init_fields(sch); 1105 rc = cio_commit_config(sch); 1106 if (rc) 1107 goto out_schedule; 1108 rc = sysfs_create_group(&sch->dev.kobj, 1109 &io_subchannel_attr_group); 1110 if (rc) 1111 goto out_schedule; 1112 /* Allocate I/O subchannel private data. */ 1113 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); 1114 if (!io_priv) 1115 goto out_schedule; 1116 1117 set_io_private(sch, io_priv); 1118 css_schedule_eval(sch->schid); 1119 return 0; 1120 1121 out_schedule: 1122 spin_lock_irq(sch->lock); 1123 css_sched_sch_todo(sch, SCH_TODO_UNREG); 1124 spin_unlock_irq(sch->lock); 1125 return 0; 1126 } 1127 1128 static int 1129 io_subchannel_remove (struct subchannel *sch) 1130 { 1131 struct io_subchannel_private *io_priv = to_io_private(sch); 1132 struct ccw_device *cdev; 1133 1134 cdev = sch_get_cdev(sch); 1135 if (!cdev) 1136 goto out_free; 1137 io_subchannel_quiesce(sch); 1138 /* Set ccw device to not operational and drop reference. */ 1139 spin_lock_irq(cdev->ccwlock); 1140 sch_set_cdev(sch, NULL); 1141 set_io_private(sch, NULL); 1142 cdev->private->state = DEV_STATE_NOT_OPER; 1143 spin_unlock_irq(cdev->ccwlock); 1144 ccw_device_unregister(cdev); 1145 out_free: 1146 kfree(io_priv); 1147 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1148 return 0; 1149 } 1150 1151 static void io_subchannel_verify(struct subchannel *sch) 1152 { 1153 struct ccw_device *cdev; 1154 1155 cdev = sch_get_cdev(sch); 1156 if (cdev) 1157 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1158 } 1159 1160 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) 1161 { 1162 struct ccw_device *cdev; 1163 1164 cdev = sch_get_cdev(sch); 1165 if (!cdev) 1166 return; 1167 if (cio_update_schib(sch)) 1168 goto err; 1169 /* Check for I/O on path. */ 1170 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask) 1171 goto out; 1172 if (cdev->private->state == DEV_STATE_ONLINE) { 1173 ccw_device_kill_io(cdev); 1174 goto out; 1175 } 1176 if (cio_clear(sch)) 1177 goto err; 1178 out: 1179 /* Trigger path verification. */ 1180 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1181 return; 1182 1183 err: 1184 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1185 } 1186 1187 static int io_subchannel_chp_event(struct subchannel *sch, 1188 struct chp_link *link, int event) 1189 { 1190 struct ccw_device *cdev = sch_get_cdev(sch); 1191 int mask; 1192 1193 mask = chp_ssd_get_mask(&sch->ssd_info, link); 1194 if (!mask) 1195 return 0; 1196 switch (event) { 1197 case CHP_VARY_OFF: 1198 sch->opm &= ~mask; 1199 sch->lpm &= ~mask; 1200 if (cdev) 1201 cdev->private->path_gone_mask |= mask; 1202 io_subchannel_terminate_path(sch, mask); 1203 break; 1204 case CHP_VARY_ON: 1205 sch->opm |= mask; 1206 sch->lpm |= mask; 1207 if (cdev) 1208 cdev->private->path_new_mask |= mask; 1209 io_subchannel_verify(sch); 1210 break; 1211 case CHP_OFFLINE: 1212 if (cio_update_schib(sch)) 1213 return -ENODEV; 1214 if (cdev) 1215 cdev->private->path_gone_mask |= mask; 1216 io_subchannel_terminate_path(sch, mask); 1217 break; 1218 case CHP_ONLINE: 1219 if (cio_update_schib(sch)) 1220 return -ENODEV; 1221 sch->lpm |= mask & sch->opm; 1222 if (cdev) 1223 cdev->private->path_new_mask |= mask; 1224 io_subchannel_verify(sch); 1225 break; 1226 } 1227 return 0; 1228 } 1229 1230 static void io_subchannel_quiesce(struct subchannel *sch) 1231 { 1232 struct ccw_device *cdev; 1233 int ret; 1234 1235 spin_lock_irq(sch->lock); 1236 cdev = sch_get_cdev(sch); 1237 if (cio_is_console(sch->schid)) 1238 goto out_unlock; 1239 if (!sch->schib.pmcw.ena) 1240 goto out_unlock; 1241 ret = cio_disable_subchannel(sch); 1242 if (ret != -EBUSY) 1243 goto out_unlock; 1244 if (cdev->handler) 1245 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); 1246 while (ret == -EBUSY) { 1247 cdev->private->state = DEV_STATE_QUIESCE; 1248 cdev->private->iretry = 255; 1249 ret = ccw_device_cancel_halt_clear(cdev); 1250 if (ret == -EBUSY) { 1251 ccw_device_set_timeout(cdev, HZ/10); 1252 spin_unlock_irq(sch->lock); 1253 wait_event(cdev->private->wait_q, 1254 cdev->private->state != DEV_STATE_QUIESCE); 1255 spin_lock_irq(sch->lock); 1256 } 1257 ret = cio_disable_subchannel(sch); 1258 } 1259 out_unlock: 1260 spin_unlock_irq(sch->lock); 1261 } 1262 1263 static void io_subchannel_shutdown(struct subchannel *sch) 1264 { 1265 io_subchannel_quiesce(sch); 1266 } 1267 1268 static int device_is_disconnected(struct ccw_device *cdev) 1269 { 1270 if (!cdev) 1271 return 0; 1272 return (cdev->private->state == DEV_STATE_DISCONNECTED || 1273 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); 1274 } 1275 1276 static int recovery_check(struct device *dev, void *data) 1277 { 1278 struct ccw_device *cdev = to_ccwdev(dev); 1279 int *redo = data; 1280 1281 spin_lock_irq(cdev->ccwlock); 1282 switch (cdev->private->state) { 1283 case DEV_STATE_DISCONNECTED: 1284 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", 1285 cdev->private->dev_id.ssid, 1286 cdev->private->dev_id.devno); 1287 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1288 *redo = 1; 1289 break; 1290 case DEV_STATE_DISCONNECTED_SENSE_ID: 1291 *redo = 1; 1292 break; 1293 } 1294 spin_unlock_irq(cdev->ccwlock); 1295 1296 return 0; 1297 } 1298 1299 static void recovery_work_func(struct work_struct *unused) 1300 { 1301 int redo = 0; 1302 1303 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); 1304 if (redo) { 1305 spin_lock_irq(&recovery_lock); 1306 if (!timer_pending(&recovery_timer)) { 1307 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) 1308 recovery_phase++; 1309 mod_timer(&recovery_timer, jiffies + 1310 recovery_delay[recovery_phase] * HZ); 1311 } 1312 spin_unlock_irq(&recovery_lock); 1313 } else 1314 CIO_MSG_EVENT(4, "recovery: end\n"); 1315 } 1316 1317 static DECLARE_WORK(recovery_work, recovery_work_func); 1318 1319 static void recovery_func(unsigned long data) 1320 { 1321 /* 1322 * We can't do our recovery in softirq context and it's not 1323 * performance critical, so we schedule it. 1324 */ 1325 schedule_work(&recovery_work); 1326 } 1327 1328 static void ccw_device_schedule_recovery(void) 1329 { 1330 unsigned long flags; 1331 1332 CIO_MSG_EVENT(4, "recovery: schedule\n"); 1333 spin_lock_irqsave(&recovery_lock, flags); 1334 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { 1335 recovery_phase = 0; 1336 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); 1337 } 1338 spin_unlock_irqrestore(&recovery_lock, flags); 1339 } 1340 1341 static int purge_fn(struct device *dev, void *data) 1342 { 1343 struct ccw_device *cdev = to_ccwdev(dev); 1344 struct ccw_dev_id *id = &cdev->private->dev_id; 1345 1346 spin_lock_irq(cdev->ccwlock); 1347 if (is_blacklisted(id->ssid, id->devno) && 1348 (cdev->private->state == DEV_STATE_OFFLINE) && 1349 (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { 1350 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, 1351 id->devno); 1352 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1353 atomic_set(&cdev->private->onoff, 0); 1354 } 1355 spin_unlock_irq(cdev->ccwlock); 1356 /* Abort loop in case of pending signal. */ 1357 if (signal_pending(current)) 1358 return -EINTR; 1359 1360 return 0; 1361 } 1362 1363 /** 1364 * ccw_purge_blacklisted - purge unused, blacklisted devices 1365 * 1366 * Unregister all ccw devices that are offline and on the blacklist. 1367 */ 1368 int ccw_purge_blacklisted(void) 1369 { 1370 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); 1371 bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); 1372 return 0; 1373 } 1374 1375 void ccw_device_set_disconnected(struct ccw_device *cdev) 1376 { 1377 if (!cdev) 1378 return; 1379 ccw_device_set_timeout(cdev, 0); 1380 cdev->private->flags.fake_irb = 0; 1381 cdev->private->state = DEV_STATE_DISCONNECTED; 1382 if (cdev->online) 1383 ccw_device_schedule_recovery(); 1384 } 1385 1386 void ccw_device_set_notoper(struct ccw_device *cdev) 1387 { 1388 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1389 1390 CIO_TRACE_EVENT(2, "notoper"); 1391 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 1392 ccw_device_set_timeout(cdev, 0); 1393 cio_disable_subchannel(sch); 1394 cdev->private->state = DEV_STATE_NOT_OPER; 1395 } 1396 1397 enum io_sch_action { 1398 IO_SCH_UNREG, 1399 IO_SCH_ORPH_UNREG, 1400 IO_SCH_ATTACH, 1401 IO_SCH_UNREG_ATTACH, 1402 IO_SCH_ORPH_ATTACH, 1403 IO_SCH_REPROBE, 1404 IO_SCH_VERIFY, 1405 IO_SCH_DISC, 1406 IO_SCH_NOP, 1407 }; 1408 1409 static enum io_sch_action sch_get_action(struct subchannel *sch) 1410 { 1411 struct ccw_device *cdev; 1412 1413 cdev = sch_get_cdev(sch); 1414 if (cio_update_schib(sch)) { 1415 /* Not operational. */ 1416 if (!cdev) 1417 return IO_SCH_UNREG; 1418 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 1419 return IO_SCH_UNREG; 1420 return IO_SCH_ORPH_UNREG; 1421 } 1422 /* Operational. */ 1423 if (!cdev) 1424 return IO_SCH_ATTACH; 1425 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1426 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 1427 return IO_SCH_UNREG_ATTACH; 1428 return IO_SCH_ORPH_ATTACH; 1429 } 1430 if ((sch->schib.pmcw.pam & sch->opm) == 0) { 1431 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) 1432 return IO_SCH_UNREG; 1433 return IO_SCH_DISC; 1434 } 1435 if (device_is_disconnected(cdev)) 1436 return IO_SCH_REPROBE; 1437 if (cdev->online && !cdev->private->flags.resuming) 1438 return IO_SCH_VERIFY; 1439 if (cdev->private->state == DEV_STATE_NOT_OPER) 1440 return IO_SCH_UNREG_ATTACH; 1441 return IO_SCH_NOP; 1442 } 1443 1444 /** 1445 * io_subchannel_sch_event - process subchannel event 1446 * @sch: subchannel 1447 * @process: non-zero if function is called in process context 1448 * 1449 * An unspecified event occurred for this subchannel. Adjust data according 1450 * to the current operational state of the subchannel and device. Return 1451 * zero when the event has been handled sufficiently or -EAGAIN when this 1452 * function should be called again in process context. 1453 */ 1454 static int io_subchannel_sch_event(struct subchannel *sch, int process) 1455 { 1456 unsigned long flags; 1457 struct ccw_device *cdev; 1458 struct ccw_dev_id dev_id; 1459 enum io_sch_action action; 1460 int rc = -EAGAIN; 1461 1462 spin_lock_irqsave(sch->lock, flags); 1463 if (!device_is_registered(&sch->dev)) 1464 goto out_unlock; 1465 if (work_pending(&sch->todo_work)) 1466 goto out_unlock; 1467 cdev = sch_get_cdev(sch); 1468 if (cdev && work_pending(&cdev->private->todo_work)) 1469 goto out_unlock; 1470 action = sch_get_action(sch); 1471 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", 1472 sch->schid.ssid, sch->schid.sch_no, process, 1473 action); 1474 /* Perform immediate actions while holding the lock. */ 1475 switch (action) { 1476 case IO_SCH_REPROBE: 1477 /* Trigger device recognition. */ 1478 ccw_device_trigger_reprobe(cdev); 1479 rc = 0; 1480 goto out_unlock; 1481 case IO_SCH_VERIFY: 1482 /* Trigger path verification. */ 1483 io_subchannel_verify(sch); 1484 rc = 0; 1485 goto out_unlock; 1486 case IO_SCH_DISC: 1487 ccw_device_set_disconnected(cdev); 1488 rc = 0; 1489 goto out_unlock; 1490 case IO_SCH_ORPH_UNREG: 1491 case IO_SCH_ORPH_ATTACH: 1492 ccw_device_set_disconnected(cdev); 1493 break; 1494 case IO_SCH_UNREG_ATTACH: 1495 case IO_SCH_UNREG: 1496 if (!cdev) 1497 break; 1498 if (cdev->private->state == DEV_STATE_SENSE_ID) { 1499 /* 1500 * Note: delayed work triggered by this event 1501 * and repeated calls to sch_event are synchronized 1502 * by the above check for work_pending(cdev). 1503 */ 1504 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1505 } else 1506 ccw_device_set_notoper(cdev); 1507 break; 1508 case IO_SCH_NOP: 1509 rc = 0; 1510 goto out_unlock; 1511 default: 1512 break; 1513 } 1514 spin_unlock_irqrestore(sch->lock, flags); 1515 /* All other actions require process context. */ 1516 if (!process) 1517 goto out; 1518 /* Handle attached ccw device. */ 1519 switch (action) { 1520 case IO_SCH_ORPH_UNREG: 1521 case IO_SCH_ORPH_ATTACH: 1522 /* Move ccw device to orphanage. */ 1523 rc = ccw_device_move_to_orph(cdev); 1524 if (rc) 1525 goto out; 1526 break; 1527 case IO_SCH_UNREG_ATTACH: 1528 spin_lock_irqsave(sch->lock, flags); 1529 if (cdev->private->flags.resuming) { 1530 /* Device will be handled later. */ 1531 rc = 0; 1532 goto out_unlock; 1533 } 1534 sch_set_cdev(sch, NULL); 1535 spin_unlock_irqrestore(sch->lock, flags); 1536 /* Unregister ccw device. */ 1537 ccw_device_unregister(cdev); 1538 break; 1539 default: 1540 break; 1541 } 1542 /* Handle subchannel. */ 1543 switch (action) { 1544 case IO_SCH_ORPH_UNREG: 1545 case IO_SCH_UNREG: 1546 if (!cdev || !cdev->private->flags.resuming) 1547 css_sch_device_unregister(sch); 1548 break; 1549 case IO_SCH_ORPH_ATTACH: 1550 case IO_SCH_UNREG_ATTACH: 1551 case IO_SCH_ATTACH: 1552 dev_id.ssid = sch->schid.ssid; 1553 dev_id.devno = sch->schib.pmcw.dev; 1554 cdev = get_ccwdev_by_dev_id(&dev_id); 1555 if (!cdev) { 1556 sch_create_and_recog_new_device(sch); 1557 break; 1558 } 1559 rc = ccw_device_move_to_sch(cdev, sch); 1560 if (rc) { 1561 /* Release reference from get_ccwdev_by_dev_id() */ 1562 put_device(&cdev->dev); 1563 goto out; 1564 } 1565 spin_lock_irqsave(sch->lock, flags); 1566 ccw_device_trigger_reprobe(cdev); 1567 spin_unlock_irqrestore(sch->lock, flags); 1568 /* Release reference from get_ccwdev_by_dev_id() */ 1569 put_device(&cdev->dev); 1570 break; 1571 default: 1572 break; 1573 } 1574 return 0; 1575 1576 out_unlock: 1577 spin_unlock_irqrestore(sch->lock, flags); 1578 out: 1579 return rc; 1580 } 1581 1582 #ifdef CONFIG_CCW_CONSOLE 1583 static struct ccw_device console_cdev; 1584 static struct ccw_device_private console_private; 1585 static int console_cdev_in_use; 1586 1587 static DEFINE_SPINLOCK(ccw_console_lock); 1588 1589 spinlock_t * cio_get_console_lock(void) 1590 { 1591 return &ccw_console_lock; 1592 } 1593 1594 static int ccw_device_console_enable(struct ccw_device *cdev, 1595 struct subchannel *sch) 1596 { 1597 struct io_subchannel_private *io_priv = cio_get_console_priv(); 1598 int rc; 1599 1600 /* Attach subchannel private data. */ 1601 memset(io_priv, 0, sizeof(*io_priv)); 1602 set_io_private(sch, io_priv); 1603 io_subchannel_init_fields(sch); 1604 rc = cio_commit_config(sch); 1605 if (rc) 1606 return rc; 1607 sch->driver = &io_subchannel_driver; 1608 /* Initialize the ccw_device structure. */ 1609 cdev->dev.parent= &sch->dev; 1610 sch_set_cdev(sch, cdev); 1611 io_subchannel_recog(cdev, sch); 1612 /* Now wait for the async. recognition to come to an end. */ 1613 spin_lock_irq(cdev->ccwlock); 1614 while (!dev_fsm_final_state(cdev)) 1615 wait_cons_dev(); 1616 rc = -EIO; 1617 if (cdev->private->state != DEV_STATE_OFFLINE) 1618 goto out_unlock; 1619 ccw_device_online(cdev); 1620 while (!dev_fsm_final_state(cdev)) 1621 wait_cons_dev(); 1622 if (cdev->private->state != DEV_STATE_ONLINE) 1623 goto out_unlock; 1624 rc = 0; 1625 out_unlock: 1626 spin_unlock_irq(cdev->ccwlock); 1627 return rc; 1628 } 1629 1630 struct ccw_device * 1631 ccw_device_probe_console(void) 1632 { 1633 struct subchannel *sch; 1634 int ret; 1635 1636 if (xchg(&console_cdev_in_use, 1) != 0) 1637 return ERR_PTR(-EBUSY); 1638 sch = cio_probe_console(); 1639 if (IS_ERR(sch)) { 1640 console_cdev_in_use = 0; 1641 return (void *) sch; 1642 } 1643 memset(&console_cdev, 0, sizeof(struct ccw_device)); 1644 memset(&console_private, 0, sizeof(struct ccw_device_private)); 1645 console_cdev.private = &console_private; 1646 console_private.cdev = &console_cdev; 1647 console_private.int_class = IRQIO_CIO; 1648 ret = ccw_device_console_enable(&console_cdev, sch); 1649 if (ret) { 1650 cio_release_console(); 1651 console_cdev_in_use = 0; 1652 return ERR_PTR(ret); 1653 } 1654 console_cdev.online = 1; 1655 return &console_cdev; 1656 } 1657 1658 static int ccw_device_pm_restore(struct device *dev); 1659 1660 int ccw_device_force_console(void) 1661 { 1662 if (!console_cdev_in_use) 1663 return -ENODEV; 1664 return ccw_device_pm_restore(&console_cdev.dev); 1665 } 1666 EXPORT_SYMBOL_GPL(ccw_device_force_console); 1667 #endif 1668 1669 /* 1670 * get ccw_device matching the busid, but only if owned by cdrv 1671 */ 1672 static int 1673 __ccwdev_check_busid(struct device *dev, void *id) 1674 { 1675 char *bus_id; 1676 1677 bus_id = id; 1678 1679 return (strcmp(bus_id, dev_name(dev)) == 0); 1680 } 1681 1682 1683 /** 1684 * get_ccwdev_by_busid() - obtain device from a bus id 1685 * @cdrv: driver the device is owned by 1686 * @bus_id: bus id of the device to be searched 1687 * 1688 * This function searches all devices owned by @cdrv for a device with a bus 1689 * id matching @bus_id. 1690 * Returns: 1691 * If a match is found, its reference count of the found device is increased 1692 * and it is returned; else %NULL is returned. 1693 */ 1694 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, 1695 const char *bus_id) 1696 { 1697 struct device *dev; 1698 1699 dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id, 1700 __ccwdev_check_busid); 1701 1702 return dev ? to_ccwdev(dev) : NULL; 1703 } 1704 1705 /************************** device driver handling ************************/ 1706 1707 /* This is the implementation of the ccw_driver class. The probe, remove 1708 * and release methods are initially very similar to the device_driver 1709 * implementations, with the difference that they have ccw_device 1710 * arguments. 1711 * 1712 * A ccw driver also contains the information that is needed for 1713 * device matching. 1714 */ 1715 static int 1716 ccw_device_probe (struct device *dev) 1717 { 1718 struct ccw_device *cdev = to_ccwdev(dev); 1719 struct ccw_driver *cdrv = to_ccwdrv(dev->driver); 1720 int ret; 1721 1722 cdev->drv = cdrv; /* to let the driver call _set_online */ 1723 /* Note: we interpret class 0 in this context as an uninitialized 1724 * field since it translates to a non-I/O interrupt class. */ 1725 if (cdrv->int_class != 0) 1726 cdev->private->int_class = cdrv->int_class; 1727 else 1728 cdev->private->int_class = IRQIO_CIO; 1729 1730 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; 1731 1732 if (ret) { 1733 cdev->drv = NULL; 1734 cdev->private->int_class = IRQIO_CIO; 1735 return ret; 1736 } 1737 1738 return 0; 1739 } 1740 1741 static int 1742 ccw_device_remove (struct device *dev) 1743 { 1744 struct ccw_device *cdev = to_ccwdev(dev); 1745 struct ccw_driver *cdrv = cdev->drv; 1746 int ret; 1747 1748 if (cdrv->remove) 1749 cdrv->remove(cdev); 1750 if (cdev->online) { 1751 cdev->online = 0; 1752 spin_lock_irq(cdev->ccwlock); 1753 ret = ccw_device_offline(cdev); 1754 spin_unlock_irq(cdev->ccwlock); 1755 if (ret == 0) 1756 wait_event(cdev->private->wait_q, 1757 dev_fsm_final_state(cdev)); 1758 else 1759 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " 1760 "device 0.%x.%04x\n", 1761 ret, cdev->private->dev_id.ssid, 1762 cdev->private->dev_id.devno); 1763 /* Give up reference obtained in ccw_device_set_online(). */ 1764 put_device(&cdev->dev); 1765 } 1766 ccw_device_set_timeout(cdev, 0); 1767 cdev->drv = NULL; 1768 cdev->private->int_class = IRQIO_CIO; 1769 return 0; 1770 } 1771 1772 static void ccw_device_shutdown(struct device *dev) 1773 { 1774 struct ccw_device *cdev; 1775 1776 cdev = to_ccwdev(dev); 1777 if (cdev->drv && cdev->drv->shutdown) 1778 cdev->drv->shutdown(cdev); 1779 disable_cmf(cdev); 1780 } 1781 1782 static int ccw_device_pm_prepare(struct device *dev) 1783 { 1784 struct ccw_device *cdev = to_ccwdev(dev); 1785 1786 if (work_pending(&cdev->private->todo_work)) 1787 return -EAGAIN; 1788 /* Fail while device is being set online/offline. */ 1789 if (atomic_read(&cdev->private->onoff)) 1790 return -EAGAIN; 1791 1792 if (cdev->online && cdev->drv && cdev->drv->prepare) 1793 return cdev->drv->prepare(cdev); 1794 1795 return 0; 1796 } 1797 1798 static void ccw_device_pm_complete(struct device *dev) 1799 { 1800 struct ccw_device *cdev = to_ccwdev(dev); 1801 1802 if (cdev->online && cdev->drv && cdev->drv->complete) 1803 cdev->drv->complete(cdev); 1804 } 1805 1806 static int ccw_device_pm_freeze(struct device *dev) 1807 { 1808 struct ccw_device *cdev = to_ccwdev(dev); 1809 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1810 int ret, cm_enabled; 1811 1812 /* Fail suspend while device is in transistional state. */ 1813 if (!dev_fsm_final_state(cdev)) 1814 return -EAGAIN; 1815 if (!cdev->online) 1816 return 0; 1817 if (cdev->drv && cdev->drv->freeze) { 1818 ret = cdev->drv->freeze(cdev); 1819 if (ret) 1820 return ret; 1821 } 1822 1823 spin_lock_irq(sch->lock); 1824 cm_enabled = cdev->private->cmb != NULL; 1825 spin_unlock_irq(sch->lock); 1826 if (cm_enabled) { 1827 /* Don't have the css write on memory. */ 1828 ret = ccw_set_cmf(cdev, 0); 1829 if (ret) 1830 return ret; 1831 } 1832 /* From here on, disallow device driver I/O. */ 1833 spin_lock_irq(sch->lock); 1834 ret = cio_disable_subchannel(sch); 1835 spin_unlock_irq(sch->lock); 1836 1837 return ret; 1838 } 1839 1840 static int ccw_device_pm_thaw(struct device *dev) 1841 { 1842 struct ccw_device *cdev = to_ccwdev(dev); 1843 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1844 int ret, cm_enabled; 1845 1846 if (!cdev->online) 1847 return 0; 1848 1849 spin_lock_irq(sch->lock); 1850 /* Allow device driver I/O again. */ 1851 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 1852 cm_enabled = cdev->private->cmb != NULL; 1853 spin_unlock_irq(sch->lock); 1854 if (ret) 1855 return ret; 1856 1857 if (cm_enabled) { 1858 ret = ccw_set_cmf(cdev, 1); 1859 if (ret) 1860 return ret; 1861 } 1862 1863 if (cdev->drv && cdev->drv->thaw) 1864 ret = cdev->drv->thaw(cdev); 1865 1866 return ret; 1867 } 1868 1869 static void __ccw_device_pm_restore(struct ccw_device *cdev) 1870 { 1871 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1872 1873 spin_lock_irq(sch->lock); 1874 if (cio_is_console(sch->schid)) { 1875 cio_enable_subchannel(sch, (u32)(addr_t)sch); 1876 goto out_unlock; 1877 } 1878 /* 1879 * While we were sleeping, devices may have gone or become 1880 * available again. Kick re-detection. 1881 */ 1882 cdev->private->flags.resuming = 1; 1883 cdev->private->path_new_mask = LPM_ANYPATH; 1884 css_sched_sch_todo(sch, SCH_TODO_EVAL); 1885 spin_unlock_irq(sch->lock); 1886 css_wait_for_slow_path(); 1887 1888 /* cdev may have been moved to a different subchannel. */ 1889 sch = to_subchannel(cdev->dev.parent); 1890 spin_lock_irq(sch->lock); 1891 if (cdev->private->state != DEV_STATE_ONLINE && 1892 cdev->private->state != DEV_STATE_OFFLINE) 1893 goto out_unlock; 1894 1895 ccw_device_recognition(cdev); 1896 spin_unlock_irq(sch->lock); 1897 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || 1898 cdev->private->state == DEV_STATE_DISCONNECTED); 1899 spin_lock_irq(sch->lock); 1900 1901 out_unlock: 1902 cdev->private->flags.resuming = 0; 1903 spin_unlock_irq(sch->lock); 1904 } 1905 1906 static int resume_handle_boxed(struct ccw_device *cdev) 1907 { 1908 cdev->private->state = DEV_STATE_BOXED; 1909 if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK) 1910 return 0; 1911 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1912 return -ENODEV; 1913 } 1914 1915 static int resume_handle_disc(struct ccw_device *cdev) 1916 { 1917 cdev->private->state = DEV_STATE_DISCONNECTED; 1918 if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK) 1919 return 0; 1920 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1921 return -ENODEV; 1922 } 1923 1924 static int ccw_device_pm_restore(struct device *dev) 1925 { 1926 struct ccw_device *cdev = to_ccwdev(dev); 1927 struct subchannel *sch; 1928 int ret = 0; 1929 1930 __ccw_device_pm_restore(cdev); 1931 sch = to_subchannel(cdev->dev.parent); 1932 spin_lock_irq(sch->lock); 1933 if (cio_is_console(sch->schid)) 1934 goto out_restore; 1935 1936 /* check recognition results */ 1937 switch (cdev->private->state) { 1938 case DEV_STATE_OFFLINE: 1939 case DEV_STATE_ONLINE: 1940 cdev->private->flags.donotify = 0; 1941 break; 1942 case DEV_STATE_BOXED: 1943 ret = resume_handle_boxed(cdev); 1944 if (ret) 1945 goto out_unlock; 1946 goto out_restore; 1947 default: 1948 ret = resume_handle_disc(cdev); 1949 if (ret) 1950 goto out_unlock; 1951 goto out_restore; 1952 } 1953 /* check if the device type has changed */ 1954 if (!ccw_device_test_sense_data(cdev)) { 1955 ccw_device_update_sense_data(cdev); 1956 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); 1957 ret = -ENODEV; 1958 goto out_unlock; 1959 } 1960 if (!cdev->online) 1961 goto out_unlock; 1962 1963 if (ccw_device_online(cdev)) { 1964 ret = resume_handle_disc(cdev); 1965 if (ret) 1966 goto out_unlock; 1967 goto out_restore; 1968 } 1969 spin_unlock_irq(sch->lock); 1970 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1971 spin_lock_irq(sch->lock); 1972 1973 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) { 1974 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1975 ret = -ENODEV; 1976 goto out_unlock; 1977 } 1978 1979 /* reenable cmf, if needed */ 1980 if (cdev->private->cmb) { 1981 spin_unlock_irq(sch->lock); 1982 ret = ccw_set_cmf(cdev, 1); 1983 spin_lock_irq(sch->lock); 1984 if (ret) { 1985 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " 1986 "(rc=%d)\n", cdev->private->dev_id.ssid, 1987 cdev->private->dev_id.devno, ret); 1988 ret = 0; 1989 } 1990 } 1991 1992 out_restore: 1993 spin_unlock_irq(sch->lock); 1994 if (cdev->online && cdev->drv && cdev->drv->restore) 1995 ret = cdev->drv->restore(cdev); 1996 return ret; 1997 1998 out_unlock: 1999 spin_unlock_irq(sch->lock); 2000 return ret; 2001 } 2002 2003 static const struct dev_pm_ops ccw_pm_ops = { 2004 .prepare = ccw_device_pm_prepare, 2005 .complete = ccw_device_pm_complete, 2006 .freeze = ccw_device_pm_freeze, 2007 .thaw = ccw_device_pm_thaw, 2008 .restore = ccw_device_pm_restore, 2009 }; 2010 2011 static struct bus_type ccw_bus_type = { 2012 .name = "ccw", 2013 .match = ccw_bus_match, 2014 .uevent = ccw_uevent, 2015 .probe = ccw_device_probe, 2016 .remove = ccw_device_remove, 2017 .shutdown = ccw_device_shutdown, 2018 .pm = &ccw_pm_ops, 2019 }; 2020 2021 /** 2022 * ccw_driver_register() - register a ccw driver 2023 * @cdriver: driver to be registered 2024 * 2025 * This function is mainly a wrapper around driver_register(). 2026 * Returns: 2027 * %0 on success and a negative error value on failure. 2028 */ 2029 int ccw_driver_register(struct ccw_driver *cdriver) 2030 { 2031 struct device_driver *drv = &cdriver->driver; 2032 2033 drv->bus = &ccw_bus_type; 2034 2035 return driver_register(drv); 2036 } 2037 2038 /** 2039 * ccw_driver_unregister() - deregister a ccw driver 2040 * @cdriver: driver to be deregistered 2041 * 2042 * This function is mainly a wrapper around driver_unregister(). 2043 */ 2044 void ccw_driver_unregister(struct ccw_driver *cdriver) 2045 { 2046 driver_unregister(&cdriver->driver); 2047 } 2048 2049 static void ccw_device_todo(struct work_struct *work) 2050 { 2051 struct ccw_device_private *priv; 2052 struct ccw_device *cdev; 2053 struct subchannel *sch; 2054 enum cdev_todo todo; 2055 2056 priv = container_of(work, struct ccw_device_private, todo_work); 2057 cdev = priv->cdev; 2058 sch = to_subchannel(cdev->dev.parent); 2059 /* Find out todo. */ 2060 spin_lock_irq(cdev->ccwlock); 2061 todo = priv->todo; 2062 priv->todo = CDEV_TODO_NOTHING; 2063 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n", 2064 priv->dev_id.ssid, priv->dev_id.devno, todo); 2065 spin_unlock_irq(cdev->ccwlock); 2066 /* Perform todo. */ 2067 switch (todo) { 2068 case CDEV_TODO_ENABLE_CMF: 2069 cmf_reenable(cdev); 2070 break; 2071 case CDEV_TODO_REBIND: 2072 ccw_device_do_unbind_bind(cdev); 2073 break; 2074 case CDEV_TODO_REGISTER: 2075 io_subchannel_register(cdev); 2076 break; 2077 case CDEV_TODO_UNREG_EVAL: 2078 if (!sch_is_pseudo_sch(sch)) 2079 css_schedule_eval(sch->schid); 2080 /* fall-through */ 2081 case CDEV_TODO_UNREG: 2082 if (sch_is_pseudo_sch(sch)) 2083 ccw_device_unregister(cdev); 2084 else 2085 ccw_device_call_sch_unregister(cdev); 2086 break; 2087 default: 2088 break; 2089 } 2090 /* Release workqueue ref. */ 2091 put_device(&cdev->dev); 2092 } 2093 2094 /** 2095 * ccw_device_sched_todo - schedule ccw device operation 2096 * @cdev: ccw device 2097 * @todo: todo 2098 * 2099 * Schedule the operation identified by @todo to be performed on the slow path 2100 * workqueue. Do nothing if another operation with higher priority is already 2101 * scheduled. Needs to be called with ccwdev lock held. 2102 */ 2103 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) 2104 { 2105 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n", 2106 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 2107 todo); 2108 if (cdev->private->todo >= todo) 2109 return; 2110 cdev->private->todo = todo; 2111 /* Get workqueue ref. */ 2112 if (!get_device(&cdev->dev)) 2113 return; 2114 if (!queue_work(cio_work_q, &cdev->private->todo_work)) { 2115 /* Already queued, release workqueue ref. */ 2116 put_device(&cdev->dev); 2117 } 2118 } 2119 2120 /** 2121 * ccw_device_siosl() - initiate logging 2122 * @cdev: ccw device 2123 * 2124 * This function is used to invoke model-dependent logging within the channel 2125 * subsystem. 2126 */ 2127 int ccw_device_siosl(struct ccw_device *cdev) 2128 { 2129 struct subchannel *sch = to_subchannel(cdev->dev.parent); 2130 2131 return chsc_siosl(sch->schid); 2132 } 2133 EXPORT_SYMBOL_GPL(ccw_device_siosl); 2134 2135 MODULE_LICENSE("GPL"); 2136 EXPORT_SYMBOL(ccw_device_set_online); 2137 EXPORT_SYMBOL(ccw_device_set_offline); 2138 EXPORT_SYMBOL(ccw_driver_register); 2139 EXPORT_SYMBOL(ccw_driver_unregister); 2140 EXPORT_SYMBOL(get_ccwdev_by_busid); 2141