1 /* 2 * bus driver for ccw devices 3 * 4 * Copyright IBM Corp. 2002, 2008 5 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Martin Schwidefsky (schwidefsky@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/spinlock.h> 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/slab.h> 19 #include <linux/list.h> 20 #include <linux/device.h> 21 #include <linux/workqueue.h> 22 #include <linux/delay.h> 23 #include <linux/timer.h> 24 #include <linux/kernel_stat.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/cio.h> 28 #include <asm/param.h> /* HZ */ 29 #include <asm/cmb.h> 30 #include <asm/isc.h> 31 32 #include "chp.h" 33 #include "cio.h" 34 #include "cio_debug.h" 35 #include "css.h" 36 #include "device.h" 37 #include "ioasm.h" 38 #include "io_sch.h" 39 #include "blacklist.h" 40 #include "chsc.h" 41 42 static struct timer_list recovery_timer; 43 static DEFINE_SPINLOCK(recovery_lock); 44 static int recovery_phase; 45 static const unsigned long recovery_delay[] = { 3, 30, 300 }; 46 47 static atomic_t ccw_device_init_count = ATOMIC_INIT(0); 48 static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); 49 static struct bus_type ccw_bus_type; 50 51 /******************* bus type handling ***********************/ 52 53 /* The Linux driver model distinguishes between a bus type and 54 * the bus itself. Of course we only have one channel 55 * subsystem driver and one channel system per machine, but 56 * we still use the abstraction. T.R. says it's a good idea. */ 57 static int 58 ccw_bus_match (struct device * dev, struct device_driver * drv) 59 { 60 struct ccw_device *cdev = to_ccwdev(dev); 61 struct ccw_driver *cdrv = to_ccwdrv(drv); 62 const struct ccw_device_id *ids = cdrv->ids, *found; 63 64 if (!ids) 65 return 0; 66 67 found = ccw_device_id_match(ids, &cdev->id); 68 if (!found) 69 return 0; 70 71 cdev->id.driver_info = found->driver_info; 72 73 return 1; 74 } 75 76 /* Store modalias string delimited by prefix/suffix string into buffer with 77 * specified size. Return length of resulting string (excluding trailing '\0') 78 * even if string doesn't fit buffer (snprintf semantics). */ 79 static int snprint_alias(char *buf, size_t size, 80 struct ccw_device_id *id, const char *suffix) 81 { 82 int len; 83 84 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); 85 if (len > size) 86 return len; 87 buf += len; 88 size -= len; 89 90 if (id->dev_type != 0) 91 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, 92 id->dev_model, suffix); 93 else 94 len += snprintf(buf, size, "dtdm%s", suffix); 95 96 return len; 97 } 98 99 /* Set up environment variables for ccw device uevent. Return 0 on success, 100 * non-zero otherwise. */ 101 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) 102 { 103 struct ccw_device *cdev = to_ccwdev(dev); 104 struct ccw_device_id *id = &(cdev->id); 105 int ret; 106 char modalias_buf[30]; 107 108 /* CU_TYPE= */ 109 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type); 110 if (ret) 111 return ret; 112 113 /* CU_MODEL= */ 114 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model); 115 if (ret) 116 return ret; 117 118 /* The next two can be zero, that's ok for us */ 119 /* DEV_TYPE= */ 120 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type); 121 if (ret) 122 return ret; 123 124 /* DEV_MODEL= */ 125 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model); 126 if (ret) 127 return ret; 128 129 /* MODALIAS= */ 130 snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); 131 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf); 132 return ret; 133 } 134 135 static void io_subchannel_irq(struct subchannel *); 136 static int io_subchannel_probe(struct subchannel *); 137 static int io_subchannel_remove(struct subchannel *); 138 static void io_subchannel_shutdown(struct subchannel *); 139 static int io_subchannel_sch_event(struct subchannel *, int); 140 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, 141 int); 142 static void recovery_func(unsigned long data); 143 144 static struct css_device_id io_subchannel_ids[] = { 145 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 146 { /* end of list */ }, 147 }; 148 MODULE_DEVICE_TABLE(css, io_subchannel_ids); 149 150 static int io_subchannel_prepare(struct subchannel *sch) 151 { 152 struct ccw_device *cdev; 153 /* 154 * Don't allow suspend while a ccw device registration 155 * is still outstanding. 156 */ 157 cdev = sch_get_cdev(sch); 158 if (cdev && !device_is_registered(&cdev->dev)) 159 return -EAGAIN; 160 return 0; 161 } 162 163 static int io_subchannel_settle(void) 164 { 165 int ret; 166 167 ret = wait_event_interruptible(ccw_device_init_wq, 168 atomic_read(&ccw_device_init_count) == 0); 169 if (ret) 170 return -EINTR; 171 flush_workqueue(cio_work_q); 172 return 0; 173 } 174 175 static struct css_driver io_subchannel_driver = { 176 .drv = { 177 .owner = THIS_MODULE, 178 .name = "io_subchannel", 179 }, 180 .subchannel_type = io_subchannel_ids, 181 .irq = io_subchannel_irq, 182 .sch_event = io_subchannel_sch_event, 183 .chp_event = io_subchannel_chp_event, 184 .probe = io_subchannel_probe, 185 .remove = io_subchannel_remove, 186 .shutdown = io_subchannel_shutdown, 187 .prepare = io_subchannel_prepare, 188 .settle = io_subchannel_settle, 189 }; 190 191 int __init io_subchannel_init(void) 192 { 193 int ret; 194 195 setup_timer(&recovery_timer, recovery_func, 0); 196 ret = bus_register(&ccw_bus_type); 197 if (ret) 198 return ret; 199 ret = css_driver_register(&io_subchannel_driver); 200 if (ret) 201 bus_unregister(&ccw_bus_type); 202 203 return ret; 204 } 205 206 207 /************************ device handling **************************/ 208 209 /* 210 * A ccw_device has some interfaces in sysfs in addition to the 211 * standard ones. 212 * The following entries are designed to export the information which 213 * resided in 2.4 in /proc/subchannels. Subchannel and device number 214 * are obvious, so they don't have an entry :) 215 * TODO: Split chpids and pimpampom up? Where is "in use" in the tree? 216 */ 217 static ssize_t 218 chpids_show (struct device * dev, struct device_attribute *attr, char * buf) 219 { 220 struct subchannel *sch = to_subchannel(dev); 221 struct chsc_ssd_info *ssd = &sch->ssd_info; 222 ssize_t ret = 0; 223 int chp; 224 int mask; 225 226 for (chp = 0; chp < 8; chp++) { 227 mask = 0x80 >> chp; 228 if (ssd->path_mask & mask) 229 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); 230 else 231 ret += sprintf(buf + ret, "00 "); 232 } 233 ret += sprintf (buf+ret, "\n"); 234 return min((ssize_t)PAGE_SIZE, ret); 235 } 236 237 static ssize_t 238 pimpampom_show (struct device * dev, struct device_attribute *attr, char * buf) 239 { 240 struct subchannel *sch = to_subchannel(dev); 241 struct pmcw *pmcw = &sch->schib.pmcw; 242 243 return sprintf (buf, "%02x %02x %02x\n", 244 pmcw->pim, pmcw->pam, pmcw->pom); 245 } 246 247 static ssize_t 248 devtype_show (struct device *dev, struct device_attribute *attr, char *buf) 249 { 250 struct ccw_device *cdev = to_ccwdev(dev); 251 struct ccw_device_id *id = &(cdev->id); 252 253 if (id->dev_type != 0) 254 return sprintf(buf, "%04x/%02x\n", 255 id->dev_type, id->dev_model); 256 else 257 return sprintf(buf, "n/a\n"); 258 } 259 260 static ssize_t 261 cutype_show (struct device *dev, struct device_attribute *attr, char *buf) 262 { 263 struct ccw_device *cdev = to_ccwdev(dev); 264 struct ccw_device_id *id = &(cdev->id); 265 266 return sprintf(buf, "%04x/%02x\n", 267 id->cu_type, id->cu_model); 268 } 269 270 static ssize_t 271 modalias_show (struct device *dev, struct device_attribute *attr, char *buf) 272 { 273 struct ccw_device *cdev = to_ccwdev(dev); 274 struct ccw_device_id *id = &(cdev->id); 275 int len; 276 277 len = snprint_alias(buf, PAGE_SIZE, id, "\n"); 278 279 return len > PAGE_SIZE ? PAGE_SIZE : len; 280 } 281 282 static ssize_t 283 online_show (struct device *dev, struct device_attribute *attr, char *buf) 284 { 285 struct ccw_device *cdev = to_ccwdev(dev); 286 287 return sprintf(buf, cdev->online ? "1\n" : "0\n"); 288 } 289 290 int ccw_device_is_orphan(struct ccw_device *cdev) 291 { 292 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); 293 } 294 295 static void ccw_device_unregister(struct ccw_device *cdev) 296 { 297 if (device_is_registered(&cdev->dev)) { 298 /* Undo device_add(). */ 299 device_del(&cdev->dev); 300 } 301 if (cdev->private->flags.initialized) { 302 cdev->private->flags.initialized = 0; 303 /* Release reference from device_initialize(). */ 304 put_device(&cdev->dev); 305 } 306 } 307 308 static void io_subchannel_quiesce(struct subchannel *); 309 310 /** 311 * ccw_device_set_offline() - disable a ccw device for I/O 312 * @cdev: target ccw device 313 * 314 * This function calls the driver's set_offline() function for @cdev, if 315 * given, and then disables @cdev. 316 * Returns: 317 * %0 on success and a negative error value on failure. 318 * Context: 319 * enabled, ccw device lock not held 320 */ 321 int ccw_device_set_offline(struct ccw_device *cdev) 322 { 323 struct subchannel *sch; 324 int ret, state; 325 326 if (!cdev) 327 return -ENODEV; 328 if (!cdev->online || !cdev->drv) 329 return -EINVAL; 330 331 if (cdev->drv->set_offline) { 332 ret = cdev->drv->set_offline(cdev); 333 if (ret != 0) 334 return ret; 335 } 336 spin_lock_irq(cdev->ccwlock); 337 sch = to_subchannel(cdev->dev.parent); 338 cdev->online = 0; 339 /* Wait until a final state or DISCONNECTED is reached */ 340 while (!dev_fsm_final_state(cdev) && 341 cdev->private->state != DEV_STATE_DISCONNECTED) { 342 spin_unlock_irq(cdev->ccwlock); 343 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 344 cdev->private->state == DEV_STATE_DISCONNECTED)); 345 spin_lock_irq(cdev->ccwlock); 346 } 347 do { 348 ret = ccw_device_offline(cdev); 349 if (!ret) 350 break; 351 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device " 352 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid, 353 cdev->private->dev_id.devno); 354 if (ret != -EBUSY) 355 goto error; 356 state = cdev->private->state; 357 spin_unlock_irq(cdev->ccwlock); 358 io_subchannel_quiesce(sch); 359 spin_lock_irq(cdev->ccwlock); 360 cdev->private->state = state; 361 } while (ret == -EBUSY); 362 spin_unlock_irq(cdev->ccwlock); 363 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 364 cdev->private->state == DEV_STATE_DISCONNECTED)); 365 /* Inform the user if set offline failed. */ 366 if (cdev->private->state == DEV_STATE_BOXED) { 367 pr_warning("%s: The device entered boxed state while " 368 "being set offline\n", dev_name(&cdev->dev)); 369 } else if (cdev->private->state == DEV_STATE_NOT_OPER) { 370 pr_warning("%s: The device stopped operating while " 371 "being set offline\n", dev_name(&cdev->dev)); 372 } 373 /* Give up reference from ccw_device_set_online(). */ 374 put_device(&cdev->dev); 375 return 0; 376 377 error: 378 cdev->private->state = DEV_STATE_OFFLINE; 379 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 380 spin_unlock_irq(cdev->ccwlock); 381 /* Give up reference from ccw_device_set_online(). */ 382 put_device(&cdev->dev); 383 return -ENODEV; 384 } 385 386 /** 387 * ccw_device_set_online() - enable a ccw device for I/O 388 * @cdev: target ccw device 389 * 390 * This function first enables @cdev and then calls the driver's set_online() 391 * function for @cdev, if given. If set_online() returns an error, @cdev is 392 * disabled again. 393 * Returns: 394 * %0 on success and a negative error value on failure. 395 * Context: 396 * enabled, ccw device lock not held 397 */ 398 int ccw_device_set_online(struct ccw_device *cdev) 399 { 400 int ret; 401 int ret2; 402 403 if (!cdev) 404 return -ENODEV; 405 if (cdev->online || !cdev->drv) 406 return -EINVAL; 407 /* Hold on to an extra reference while device is online. */ 408 if (!get_device(&cdev->dev)) 409 return -ENODEV; 410 411 spin_lock_irq(cdev->ccwlock); 412 ret = ccw_device_online(cdev); 413 spin_unlock_irq(cdev->ccwlock); 414 if (ret == 0) 415 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 416 else { 417 CIO_MSG_EVENT(0, "ccw_device_online returned %d, " 418 "device 0.%x.%04x\n", 419 ret, cdev->private->dev_id.ssid, 420 cdev->private->dev_id.devno); 421 /* Give up online reference since onlining failed. */ 422 put_device(&cdev->dev); 423 return ret; 424 } 425 spin_lock_irq(cdev->ccwlock); 426 /* Check if online processing was successful */ 427 if ((cdev->private->state != DEV_STATE_ONLINE) && 428 (cdev->private->state != DEV_STATE_W4SENSE)) { 429 spin_unlock_irq(cdev->ccwlock); 430 /* Inform the user that set online failed. */ 431 if (cdev->private->state == DEV_STATE_BOXED) { 432 pr_warning("%s: Setting the device online failed " 433 "because it is boxed\n", 434 dev_name(&cdev->dev)); 435 } else if (cdev->private->state == DEV_STATE_NOT_OPER) { 436 pr_warning("%s: Setting the device online failed " 437 "because it is not operational\n", 438 dev_name(&cdev->dev)); 439 } 440 /* Give up online reference since onlining failed. */ 441 put_device(&cdev->dev); 442 return -ENODEV; 443 } 444 spin_unlock_irq(cdev->ccwlock); 445 if (cdev->drv->set_online) 446 ret = cdev->drv->set_online(cdev); 447 if (ret) 448 goto rollback; 449 450 spin_lock_irq(cdev->ccwlock); 451 cdev->online = 1; 452 spin_unlock_irq(cdev->ccwlock); 453 return 0; 454 455 rollback: 456 spin_lock_irq(cdev->ccwlock); 457 /* Wait until a final state or DISCONNECTED is reached */ 458 while (!dev_fsm_final_state(cdev) && 459 cdev->private->state != DEV_STATE_DISCONNECTED) { 460 spin_unlock_irq(cdev->ccwlock); 461 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 462 cdev->private->state == DEV_STATE_DISCONNECTED)); 463 spin_lock_irq(cdev->ccwlock); 464 } 465 ret2 = ccw_device_offline(cdev); 466 if (ret2) 467 goto error; 468 spin_unlock_irq(cdev->ccwlock); 469 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 470 cdev->private->state == DEV_STATE_DISCONNECTED)); 471 /* Give up online reference since onlining failed. */ 472 put_device(&cdev->dev); 473 return ret; 474 475 error: 476 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, " 477 "device 0.%x.%04x\n", 478 ret2, cdev->private->dev_id.ssid, 479 cdev->private->dev_id.devno); 480 cdev->private->state = DEV_STATE_OFFLINE; 481 spin_unlock_irq(cdev->ccwlock); 482 /* Give up online reference since onlining failed. */ 483 put_device(&cdev->dev); 484 return ret; 485 } 486 487 static int online_store_handle_offline(struct ccw_device *cdev) 488 { 489 if (cdev->private->state == DEV_STATE_DISCONNECTED) { 490 spin_lock_irq(cdev->ccwlock); 491 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); 492 spin_unlock_irq(cdev->ccwlock); 493 return 0; 494 } 495 if (cdev->drv && cdev->drv->set_offline) 496 return ccw_device_set_offline(cdev); 497 return -EINVAL; 498 } 499 500 static int online_store_recog_and_online(struct ccw_device *cdev) 501 { 502 /* Do device recognition, if needed. */ 503 if (cdev->private->state == DEV_STATE_BOXED) { 504 spin_lock_irq(cdev->ccwlock); 505 ccw_device_recognition(cdev); 506 spin_unlock_irq(cdev->ccwlock); 507 wait_event(cdev->private->wait_q, 508 cdev->private->flags.recog_done); 509 if (cdev->private->state != DEV_STATE_OFFLINE) 510 /* recognition failed */ 511 return -EAGAIN; 512 } 513 if (cdev->drv && cdev->drv->set_online) 514 return ccw_device_set_online(cdev); 515 return -EINVAL; 516 } 517 518 static int online_store_handle_online(struct ccw_device *cdev, int force) 519 { 520 int ret; 521 522 ret = online_store_recog_and_online(cdev); 523 if (ret && !force) 524 return ret; 525 if (force && cdev->private->state == DEV_STATE_BOXED) { 526 ret = ccw_device_stlck(cdev); 527 if (ret) 528 return ret; 529 if (cdev->id.cu_type == 0) 530 cdev->private->state = DEV_STATE_NOT_OPER; 531 ret = online_store_recog_and_online(cdev); 532 if (ret) 533 return ret; 534 } 535 return 0; 536 } 537 538 static ssize_t online_store (struct device *dev, struct device_attribute *attr, 539 const char *buf, size_t count) 540 { 541 struct ccw_device *cdev = to_ccwdev(dev); 542 int force, ret; 543 unsigned long i; 544 545 /* Prevent conflict between multiple on-/offline processing requests. */ 546 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 547 return -EAGAIN; 548 /* Prevent conflict between internal I/Os and on-/offline processing. */ 549 if (!dev_fsm_final_state(cdev) && 550 cdev->private->state != DEV_STATE_DISCONNECTED) { 551 ret = -EAGAIN; 552 goto out_onoff; 553 } 554 /* Prevent conflict between pending work and on-/offline processing.*/ 555 if (work_pending(&cdev->private->todo_work)) { 556 ret = -EAGAIN; 557 goto out_onoff; 558 } 559 560 if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) { 561 ret = -EINVAL; 562 goto out_onoff; 563 } 564 if (!strncmp(buf, "force\n", count)) { 565 force = 1; 566 i = 1; 567 ret = 0; 568 } else { 569 force = 0; 570 ret = kstrtoul(buf, 16, &i); 571 } 572 if (ret) 573 goto out; 574 switch (i) { 575 case 0: 576 ret = online_store_handle_offline(cdev); 577 break; 578 case 1: 579 ret = online_store_handle_online(cdev, force); 580 break; 581 default: 582 ret = -EINVAL; 583 } 584 out: 585 if (cdev->drv) 586 module_put(cdev->drv->driver.owner); 587 out_onoff: 588 atomic_set(&cdev->private->onoff, 0); 589 return (ret < 0) ? ret : count; 590 } 591 592 static ssize_t 593 available_show (struct device *dev, struct device_attribute *attr, char *buf) 594 { 595 struct ccw_device *cdev = to_ccwdev(dev); 596 struct subchannel *sch; 597 598 if (ccw_device_is_orphan(cdev)) 599 return sprintf(buf, "no device\n"); 600 switch (cdev->private->state) { 601 case DEV_STATE_BOXED: 602 return sprintf(buf, "boxed\n"); 603 case DEV_STATE_DISCONNECTED: 604 case DEV_STATE_DISCONNECTED_SENSE_ID: 605 case DEV_STATE_NOT_OPER: 606 sch = to_subchannel(dev->parent); 607 if (!sch->lpm) 608 return sprintf(buf, "no path\n"); 609 else 610 return sprintf(buf, "no device\n"); 611 default: 612 /* All other states considered fine. */ 613 return sprintf(buf, "good\n"); 614 } 615 } 616 617 static ssize_t 618 initiate_logging(struct device *dev, struct device_attribute *attr, 619 const char *buf, size_t count) 620 { 621 struct subchannel *sch = to_subchannel(dev); 622 int rc; 623 624 rc = chsc_siosl(sch->schid); 625 if (rc < 0) { 626 pr_warning("Logging for subchannel 0.%x.%04x failed with " 627 "errno=%d\n", 628 sch->schid.ssid, sch->schid.sch_no, rc); 629 return rc; 630 } 631 pr_notice("Logging for subchannel 0.%x.%04x was triggered\n", 632 sch->schid.ssid, sch->schid.sch_no); 633 return count; 634 } 635 636 static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, 637 char *buf) 638 { 639 struct subchannel *sch = to_subchannel(dev); 640 641 return sprintf(buf, "%02x\n", sch->vpm); 642 } 643 644 static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); 645 static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); 646 static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); 647 static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); 648 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 649 static DEVICE_ATTR(online, 0644, online_show, online_store); 650 static DEVICE_ATTR(availability, 0444, available_show, NULL); 651 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); 652 static DEVICE_ATTR(vpm, 0444, vpm_show, NULL); 653 654 static struct attribute *io_subchannel_attrs[] = { 655 &dev_attr_chpids.attr, 656 &dev_attr_pimpampom.attr, 657 &dev_attr_logging.attr, 658 &dev_attr_vpm.attr, 659 NULL, 660 }; 661 662 static struct attribute_group io_subchannel_attr_group = { 663 .attrs = io_subchannel_attrs, 664 }; 665 666 static struct attribute * ccwdev_attrs[] = { 667 &dev_attr_devtype.attr, 668 &dev_attr_cutype.attr, 669 &dev_attr_modalias.attr, 670 &dev_attr_online.attr, 671 &dev_attr_cmb_enable.attr, 672 &dev_attr_availability.attr, 673 NULL, 674 }; 675 676 static struct attribute_group ccwdev_attr_group = { 677 .attrs = ccwdev_attrs, 678 }; 679 680 static const struct attribute_group *ccwdev_attr_groups[] = { 681 &ccwdev_attr_group, 682 NULL, 683 }; 684 685 /* this is a simple abstraction for device_register that sets the 686 * correct bus type and adds the bus specific files */ 687 static int ccw_device_register(struct ccw_device *cdev) 688 { 689 struct device *dev = &cdev->dev; 690 int ret; 691 692 dev->bus = &ccw_bus_type; 693 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, 694 cdev->private->dev_id.devno); 695 if (ret) 696 return ret; 697 return device_add(dev); 698 } 699 700 static int match_dev_id(struct device *dev, void *data) 701 { 702 struct ccw_device *cdev = to_ccwdev(dev); 703 struct ccw_dev_id *dev_id = data; 704 705 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 706 } 707 708 /** 709 * get_ccwdev_by_dev_id() - obtain device from a ccw device id 710 * @dev_id: id of the device to be searched 711 * 712 * This function searches all devices attached to the ccw bus for a device 713 * matching @dev_id. 714 * Returns: 715 * If a device is found its reference count is increased and returned; 716 * else %NULL is returned. 717 */ 718 struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) 719 { 720 struct device *dev; 721 722 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id); 723 724 return dev ? to_ccwdev(dev) : NULL; 725 } 726 EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id); 727 728 static void ccw_device_do_unbind_bind(struct ccw_device *cdev) 729 { 730 int ret; 731 732 if (device_is_registered(&cdev->dev)) { 733 device_release_driver(&cdev->dev); 734 ret = device_attach(&cdev->dev); 735 WARN_ON(ret == -ENODEV); 736 } 737 } 738 739 static void 740 ccw_device_release(struct device *dev) 741 { 742 struct ccw_device *cdev; 743 744 cdev = to_ccwdev(dev); 745 /* Release reference of parent subchannel. */ 746 put_device(cdev->dev.parent); 747 kfree(cdev->private); 748 kfree(cdev); 749 } 750 751 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) 752 { 753 struct ccw_device *cdev; 754 755 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 756 if (cdev) { 757 cdev->private = kzalloc(sizeof(struct ccw_device_private), 758 GFP_KERNEL | GFP_DMA); 759 if (cdev->private) 760 return cdev; 761 } 762 kfree(cdev); 763 return ERR_PTR(-ENOMEM); 764 } 765 766 static void ccw_device_todo(struct work_struct *work); 767 768 static int io_subchannel_initialize_dev(struct subchannel *sch, 769 struct ccw_device *cdev) 770 { 771 cdev->private->cdev = cdev; 772 cdev->private->int_class = IRQIO_CIO; 773 atomic_set(&cdev->private->onoff, 0); 774 cdev->dev.parent = &sch->dev; 775 cdev->dev.release = ccw_device_release; 776 INIT_WORK(&cdev->private->todo_work, ccw_device_todo); 777 cdev->dev.groups = ccwdev_attr_groups; 778 /* Do first half of device_register. */ 779 device_initialize(&cdev->dev); 780 if (!get_device(&sch->dev)) { 781 /* Release reference from device_initialize(). */ 782 put_device(&cdev->dev); 783 return -ENODEV; 784 } 785 cdev->private->flags.initialized = 1; 786 return 0; 787 } 788 789 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) 790 { 791 struct ccw_device *cdev; 792 int ret; 793 794 cdev = io_subchannel_allocate_dev(sch); 795 if (!IS_ERR(cdev)) { 796 ret = io_subchannel_initialize_dev(sch, cdev); 797 if (ret) 798 cdev = ERR_PTR(ret); 799 } 800 return cdev; 801 } 802 803 static void io_subchannel_recog(struct ccw_device *, struct subchannel *); 804 805 static void sch_create_and_recog_new_device(struct subchannel *sch) 806 { 807 struct ccw_device *cdev; 808 809 /* Need to allocate a new ccw device. */ 810 cdev = io_subchannel_create_ccwdev(sch); 811 if (IS_ERR(cdev)) { 812 /* OK, we did everything we could... */ 813 css_sch_device_unregister(sch); 814 return; 815 } 816 /* Start recognition for the new ccw device. */ 817 io_subchannel_recog(cdev, sch); 818 } 819 820 /* 821 * Register recognized device. 822 */ 823 static void io_subchannel_register(struct ccw_device *cdev) 824 { 825 struct subchannel *sch; 826 int ret, adjust_init_count = 1; 827 unsigned long flags; 828 829 sch = to_subchannel(cdev->dev.parent); 830 /* 831 * Check if subchannel is still registered. It may have become 832 * unregistered if a machine check hit us after finishing 833 * device recognition but before the register work could be 834 * queued. 835 */ 836 if (!device_is_registered(&sch->dev)) 837 goto out_err; 838 css_update_ssd_info(sch); 839 /* 840 * io_subchannel_register() will also be called after device 841 * recognition has been done for a boxed device (which will already 842 * be registered). We need to reprobe since we may now have sense id 843 * information. 844 */ 845 if (device_is_registered(&cdev->dev)) { 846 if (!cdev->drv) { 847 ret = device_reprobe(&cdev->dev); 848 if (ret) 849 /* We can't do much here. */ 850 CIO_MSG_EVENT(0, "device_reprobe() returned" 851 " %d for 0.%x.%04x\n", ret, 852 cdev->private->dev_id.ssid, 853 cdev->private->dev_id.devno); 854 } 855 adjust_init_count = 0; 856 goto out; 857 } 858 /* 859 * Now we know this subchannel will stay, we can throw 860 * our delayed uevent. 861 */ 862 dev_set_uevent_suppress(&sch->dev, 0); 863 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 864 /* make it known to the system */ 865 ret = ccw_device_register(cdev); 866 if (ret) { 867 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", 868 cdev->private->dev_id.ssid, 869 cdev->private->dev_id.devno, ret); 870 spin_lock_irqsave(sch->lock, flags); 871 sch_set_cdev(sch, NULL); 872 spin_unlock_irqrestore(sch->lock, flags); 873 /* Release initial device reference. */ 874 put_device(&cdev->dev); 875 goto out_err; 876 } 877 out: 878 cdev->private->flags.recog_done = 1; 879 wake_up(&cdev->private->wait_q); 880 out_err: 881 if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count)) 882 wake_up(&ccw_device_init_wq); 883 } 884 885 static void ccw_device_call_sch_unregister(struct ccw_device *cdev) 886 { 887 struct subchannel *sch; 888 889 /* Get subchannel reference for local processing. */ 890 if (!get_device(cdev->dev.parent)) 891 return; 892 sch = to_subchannel(cdev->dev.parent); 893 css_sch_device_unregister(sch); 894 /* Release subchannel reference for local processing. */ 895 put_device(&sch->dev); 896 } 897 898 /* 899 * subchannel recognition done. Called from the state machine. 900 */ 901 void 902 io_subchannel_recog_done(struct ccw_device *cdev) 903 { 904 if (css_init_done == 0) { 905 cdev->private->flags.recog_done = 1; 906 return; 907 } 908 switch (cdev->private->state) { 909 case DEV_STATE_BOXED: 910 /* Device did not respond in time. */ 911 case DEV_STATE_NOT_OPER: 912 cdev->private->flags.recog_done = 1; 913 /* Remove device found not operational. */ 914 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 915 if (atomic_dec_and_test(&ccw_device_init_count)) 916 wake_up(&ccw_device_init_wq); 917 break; 918 case DEV_STATE_OFFLINE: 919 /* 920 * We can't register the device in interrupt context so 921 * we schedule a work item. 922 */ 923 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER); 924 break; 925 } 926 } 927 928 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) 929 { 930 struct ccw_device_private *priv; 931 932 cdev->ccwlock = sch->lock; 933 934 /* Init private data. */ 935 priv = cdev->private; 936 priv->dev_id.devno = sch->schib.pmcw.dev; 937 priv->dev_id.ssid = sch->schid.ssid; 938 priv->schid = sch->schid; 939 priv->state = DEV_STATE_NOT_OPER; 940 INIT_LIST_HEAD(&priv->cmb_list); 941 init_waitqueue_head(&priv->wait_q); 942 init_timer(&priv->timer); 943 944 /* Increase counter of devices currently in recognition. */ 945 atomic_inc(&ccw_device_init_count); 946 947 /* Start async. device sensing. */ 948 spin_lock_irq(sch->lock); 949 sch_set_cdev(sch, cdev); 950 ccw_device_recognition(cdev); 951 spin_unlock_irq(sch->lock); 952 } 953 954 static int ccw_device_move_to_sch(struct ccw_device *cdev, 955 struct subchannel *sch) 956 { 957 struct subchannel *old_sch; 958 int rc, old_enabled = 0; 959 960 old_sch = to_subchannel(cdev->dev.parent); 961 /* Obtain child reference for new parent. */ 962 if (!get_device(&sch->dev)) 963 return -ENODEV; 964 965 if (!sch_is_pseudo_sch(old_sch)) { 966 spin_lock_irq(old_sch->lock); 967 old_enabled = old_sch->schib.pmcw.ena; 968 rc = 0; 969 if (old_enabled) 970 rc = cio_disable_subchannel(old_sch); 971 spin_unlock_irq(old_sch->lock); 972 if (rc == -EBUSY) { 973 /* Release child reference for new parent. */ 974 put_device(&sch->dev); 975 return rc; 976 } 977 } 978 979 mutex_lock(&sch->reg_mutex); 980 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); 981 mutex_unlock(&sch->reg_mutex); 982 if (rc) { 983 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n", 984 cdev->private->dev_id.ssid, 985 cdev->private->dev_id.devno, sch->schid.ssid, 986 sch->schib.pmcw.dev, rc); 987 if (old_enabled) { 988 /* Try to reenable the old subchannel. */ 989 spin_lock_irq(old_sch->lock); 990 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch); 991 spin_unlock_irq(old_sch->lock); 992 } 993 /* Release child reference for new parent. */ 994 put_device(&sch->dev); 995 return rc; 996 } 997 /* Clean up old subchannel. */ 998 if (!sch_is_pseudo_sch(old_sch)) { 999 spin_lock_irq(old_sch->lock); 1000 sch_set_cdev(old_sch, NULL); 1001 spin_unlock_irq(old_sch->lock); 1002 css_schedule_eval(old_sch->schid); 1003 } 1004 /* Release child reference for old parent. */ 1005 put_device(&old_sch->dev); 1006 /* Initialize new subchannel. */ 1007 spin_lock_irq(sch->lock); 1008 cdev->private->schid = sch->schid; 1009 cdev->ccwlock = sch->lock; 1010 if (!sch_is_pseudo_sch(sch)) 1011 sch_set_cdev(sch, cdev); 1012 spin_unlock_irq(sch->lock); 1013 if (!sch_is_pseudo_sch(sch)) 1014 css_update_ssd_info(sch); 1015 return 0; 1016 } 1017 1018 static int ccw_device_move_to_orph(struct ccw_device *cdev) 1019 { 1020 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1021 struct channel_subsystem *css = to_css(sch->dev.parent); 1022 1023 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel); 1024 } 1025 1026 static void io_subchannel_irq(struct subchannel *sch) 1027 { 1028 struct ccw_device *cdev; 1029 1030 cdev = sch_get_cdev(sch); 1031 1032 CIO_TRACE_EVENT(6, "IRQ"); 1033 CIO_TRACE_EVENT(6, dev_name(&sch->dev)); 1034 if (cdev) 1035 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 1036 else 1037 inc_irq_stat(IRQIO_CIO); 1038 } 1039 1040 void io_subchannel_init_config(struct subchannel *sch) 1041 { 1042 memset(&sch->config, 0, sizeof(sch->config)); 1043 sch->config.csense = 1; 1044 } 1045 1046 static void io_subchannel_init_fields(struct subchannel *sch) 1047 { 1048 if (cio_is_console(sch->schid)) 1049 sch->opm = 0xff; 1050 else 1051 sch->opm = chp_get_sch_opm(sch); 1052 sch->lpm = sch->schib.pmcw.pam & sch->opm; 1053 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; 1054 1055 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" 1056 " - PIM = %02X, PAM = %02X, POM = %02X\n", 1057 sch->schib.pmcw.dev, sch->schid.ssid, 1058 sch->schid.sch_no, sch->schib.pmcw.pim, 1059 sch->schib.pmcw.pam, sch->schib.pmcw.pom); 1060 1061 io_subchannel_init_config(sch); 1062 } 1063 1064 /* 1065 * Note: We always return 0 so that we bind to the device even on error. 1066 * This is needed so that our remove function is called on unregister. 1067 */ 1068 static int io_subchannel_probe(struct subchannel *sch) 1069 { 1070 struct io_subchannel_private *io_priv; 1071 struct ccw_device *cdev; 1072 int rc; 1073 1074 if (cio_is_console(sch->schid)) { 1075 rc = sysfs_create_group(&sch->dev.kobj, 1076 &io_subchannel_attr_group); 1077 if (rc) 1078 CIO_MSG_EVENT(0, "Failed to create io subchannel " 1079 "attributes for subchannel " 1080 "0.%x.%04x (rc=%d)\n", 1081 sch->schid.ssid, sch->schid.sch_no, rc); 1082 /* 1083 * The console subchannel already has an associated ccw_device. 1084 * Throw the delayed uevent for the subchannel, register 1085 * the ccw_device and exit. 1086 */ 1087 dev_set_uevent_suppress(&sch->dev, 0); 1088 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 1089 cdev = sch_get_cdev(sch); 1090 rc = ccw_device_register(cdev); 1091 if (rc) { 1092 /* Release online reference. */ 1093 put_device(&cdev->dev); 1094 goto out_schedule; 1095 } 1096 if (atomic_dec_and_test(&ccw_device_init_count)) 1097 wake_up(&ccw_device_init_wq); 1098 return 0; 1099 } 1100 io_subchannel_init_fields(sch); 1101 rc = cio_commit_config(sch); 1102 if (rc) 1103 goto out_schedule; 1104 rc = sysfs_create_group(&sch->dev.kobj, 1105 &io_subchannel_attr_group); 1106 if (rc) 1107 goto out_schedule; 1108 /* Allocate I/O subchannel private data. */ 1109 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); 1110 if (!io_priv) 1111 goto out_schedule; 1112 1113 set_io_private(sch, io_priv); 1114 css_schedule_eval(sch->schid); 1115 return 0; 1116 1117 out_schedule: 1118 spin_lock_irq(sch->lock); 1119 css_sched_sch_todo(sch, SCH_TODO_UNREG); 1120 spin_unlock_irq(sch->lock); 1121 return 0; 1122 } 1123 1124 static int 1125 io_subchannel_remove (struct subchannel *sch) 1126 { 1127 struct io_subchannel_private *io_priv = to_io_private(sch); 1128 struct ccw_device *cdev; 1129 1130 cdev = sch_get_cdev(sch); 1131 if (!cdev) 1132 goto out_free; 1133 io_subchannel_quiesce(sch); 1134 /* Set ccw device to not operational and drop reference. */ 1135 spin_lock_irq(cdev->ccwlock); 1136 sch_set_cdev(sch, NULL); 1137 set_io_private(sch, NULL); 1138 cdev->private->state = DEV_STATE_NOT_OPER; 1139 spin_unlock_irq(cdev->ccwlock); 1140 ccw_device_unregister(cdev); 1141 out_free: 1142 kfree(io_priv); 1143 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1144 return 0; 1145 } 1146 1147 static void io_subchannel_verify(struct subchannel *sch) 1148 { 1149 struct ccw_device *cdev; 1150 1151 cdev = sch_get_cdev(sch); 1152 if (cdev) 1153 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1154 } 1155 1156 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) 1157 { 1158 struct ccw_device *cdev; 1159 1160 cdev = sch_get_cdev(sch); 1161 if (!cdev) 1162 return; 1163 if (cio_update_schib(sch)) 1164 goto err; 1165 /* Check for I/O on path. */ 1166 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask) 1167 goto out; 1168 if (cdev->private->state == DEV_STATE_ONLINE) { 1169 ccw_device_kill_io(cdev); 1170 goto out; 1171 } 1172 if (cio_clear(sch)) 1173 goto err; 1174 out: 1175 /* Trigger path verification. */ 1176 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1177 return; 1178 1179 err: 1180 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1181 } 1182 1183 static int io_subchannel_chp_event(struct subchannel *sch, 1184 struct chp_link *link, int event) 1185 { 1186 struct ccw_device *cdev = sch_get_cdev(sch); 1187 int mask; 1188 1189 mask = chp_ssd_get_mask(&sch->ssd_info, link); 1190 if (!mask) 1191 return 0; 1192 switch (event) { 1193 case CHP_VARY_OFF: 1194 sch->opm &= ~mask; 1195 sch->lpm &= ~mask; 1196 if (cdev) 1197 cdev->private->path_gone_mask |= mask; 1198 io_subchannel_terminate_path(sch, mask); 1199 break; 1200 case CHP_VARY_ON: 1201 sch->opm |= mask; 1202 sch->lpm |= mask; 1203 if (cdev) 1204 cdev->private->path_new_mask |= mask; 1205 io_subchannel_verify(sch); 1206 break; 1207 case CHP_OFFLINE: 1208 if (cio_update_schib(sch)) 1209 return -ENODEV; 1210 if (cdev) 1211 cdev->private->path_gone_mask |= mask; 1212 io_subchannel_terminate_path(sch, mask); 1213 break; 1214 case CHP_ONLINE: 1215 if (cio_update_schib(sch)) 1216 return -ENODEV; 1217 sch->lpm |= mask & sch->opm; 1218 if (cdev) 1219 cdev->private->path_new_mask |= mask; 1220 io_subchannel_verify(sch); 1221 break; 1222 } 1223 return 0; 1224 } 1225 1226 static void io_subchannel_quiesce(struct subchannel *sch) 1227 { 1228 struct ccw_device *cdev; 1229 int ret; 1230 1231 spin_lock_irq(sch->lock); 1232 cdev = sch_get_cdev(sch); 1233 if (cio_is_console(sch->schid)) 1234 goto out_unlock; 1235 if (!sch->schib.pmcw.ena) 1236 goto out_unlock; 1237 ret = cio_disable_subchannel(sch); 1238 if (ret != -EBUSY) 1239 goto out_unlock; 1240 if (cdev->handler) 1241 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); 1242 while (ret == -EBUSY) { 1243 cdev->private->state = DEV_STATE_QUIESCE; 1244 cdev->private->iretry = 255; 1245 ret = ccw_device_cancel_halt_clear(cdev); 1246 if (ret == -EBUSY) { 1247 ccw_device_set_timeout(cdev, HZ/10); 1248 spin_unlock_irq(sch->lock); 1249 wait_event(cdev->private->wait_q, 1250 cdev->private->state != DEV_STATE_QUIESCE); 1251 spin_lock_irq(sch->lock); 1252 } 1253 ret = cio_disable_subchannel(sch); 1254 } 1255 out_unlock: 1256 spin_unlock_irq(sch->lock); 1257 } 1258 1259 static void io_subchannel_shutdown(struct subchannel *sch) 1260 { 1261 io_subchannel_quiesce(sch); 1262 } 1263 1264 static int device_is_disconnected(struct ccw_device *cdev) 1265 { 1266 if (!cdev) 1267 return 0; 1268 return (cdev->private->state == DEV_STATE_DISCONNECTED || 1269 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); 1270 } 1271 1272 static int recovery_check(struct device *dev, void *data) 1273 { 1274 struct ccw_device *cdev = to_ccwdev(dev); 1275 int *redo = data; 1276 1277 spin_lock_irq(cdev->ccwlock); 1278 switch (cdev->private->state) { 1279 case DEV_STATE_DISCONNECTED: 1280 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", 1281 cdev->private->dev_id.ssid, 1282 cdev->private->dev_id.devno); 1283 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1284 *redo = 1; 1285 break; 1286 case DEV_STATE_DISCONNECTED_SENSE_ID: 1287 *redo = 1; 1288 break; 1289 } 1290 spin_unlock_irq(cdev->ccwlock); 1291 1292 return 0; 1293 } 1294 1295 static void recovery_work_func(struct work_struct *unused) 1296 { 1297 int redo = 0; 1298 1299 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); 1300 if (redo) { 1301 spin_lock_irq(&recovery_lock); 1302 if (!timer_pending(&recovery_timer)) { 1303 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) 1304 recovery_phase++; 1305 mod_timer(&recovery_timer, jiffies + 1306 recovery_delay[recovery_phase] * HZ); 1307 } 1308 spin_unlock_irq(&recovery_lock); 1309 } else 1310 CIO_MSG_EVENT(4, "recovery: end\n"); 1311 } 1312 1313 static DECLARE_WORK(recovery_work, recovery_work_func); 1314 1315 static void recovery_func(unsigned long data) 1316 { 1317 /* 1318 * We can't do our recovery in softirq context and it's not 1319 * performance critical, so we schedule it. 1320 */ 1321 schedule_work(&recovery_work); 1322 } 1323 1324 static void ccw_device_schedule_recovery(void) 1325 { 1326 unsigned long flags; 1327 1328 CIO_MSG_EVENT(4, "recovery: schedule\n"); 1329 spin_lock_irqsave(&recovery_lock, flags); 1330 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { 1331 recovery_phase = 0; 1332 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); 1333 } 1334 spin_unlock_irqrestore(&recovery_lock, flags); 1335 } 1336 1337 static int purge_fn(struct device *dev, void *data) 1338 { 1339 struct ccw_device *cdev = to_ccwdev(dev); 1340 struct ccw_dev_id *id = &cdev->private->dev_id; 1341 1342 spin_lock_irq(cdev->ccwlock); 1343 if (is_blacklisted(id->ssid, id->devno) && 1344 (cdev->private->state == DEV_STATE_OFFLINE) && 1345 (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { 1346 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, 1347 id->devno); 1348 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1349 atomic_set(&cdev->private->onoff, 0); 1350 } 1351 spin_unlock_irq(cdev->ccwlock); 1352 /* Abort loop in case of pending signal. */ 1353 if (signal_pending(current)) 1354 return -EINTR; 1355 1356 return 0; 1357 } 1358 1359 /** 1360 * ccw_purge_blacklisted - purge unused, blacklisted devices 1361 * 1362 * Unregister all ccw devices that are offline and on the blacklist. 1363 */ 1364 int ccw_purge_blacklisted(void) 1365 { 1366 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); 1367 bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); 1368 return 0; 1369 } 1370 1371 void ccw_device_set_disconnected(struct ccw_device *cdev) 1372 { 1373 if (!cdev) 1374 return; 1375 ccw_device_set_timeout(cdev, 0); 1376 cdev->private->flags.fake_irb = 0; 1377 cdev->private->state = DEV_STATE_DISCONNECTED; 1378 if (cdev->online) 1379 ccw_device_schedule_recovery(); 1380 } 1381 1382 void ccw_device_set_notoper(struct ccw_device *cdev) 1383 { 1384 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1385 1386 CIO_TRACE_EVENT(2, "notoper"); 1387 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 1388 ccw_device_set_timeout(cdev, 0); 1389 cio_disable_subchannel(sch); 1390 cdev->private->state = DEV_STATE_NOT_OPER; 1391 } 1392 1393 enum io_sch_action { 1394 IO_SCH_UNREG, 1395 IO_SCH_ORPH_UNREG, 1396 IO_SCH_ATTACH, 1397 IO_SCH_UNREG_ATTACH, 1398 IO_SCH_ORPH_ATTACH, 1399 IO_SCH_REPROBE, 1400 IO_SCH_VERIFY, 1401 IO_SCH_DISC, 1402 IO_SCH_NOP, 1403 }; 1404 1405 static enum io_sch_action sch_get_action(struct subchannel *sch) 1406 { 1407 struct ccw_device *cdev; 1408 1409 cdev = sch_get_cdev(sch); 1410 if (cio_update_schib(sch)) { 1411 /* Not operational. */ 1412 if (!cdev) 1413 return IO_SCH_UNREG; 1414 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 1415 return IO_SCH_UNREG; 1416 return IO_SCH_ORPH_UNREG; 1417 } 1418 /* Operational. */ 1419 if (!cdev) 1420 return IO_SCH_ATTACH; 1421 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1422 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 1423 return IO_SCH_UNREG_ATTACH; 1424 return IO_SCH_ORPH_ATTACH; 1425 } 1426 if ((sch->schib.pmcw.pam & sch->opm) == 0) { 1427 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) 1428 return IO_SCH_UNREG; 1429 return IO_SCH_DISC; 1430 } 1431 if (device_is_disconnected(cdev)) 1432 return IO_SCH_REPROBE; 1433 if (cdev->online && !cdev->private->flags.resuming) 1434 return IO_SCH_VERIFY; 1435 if (cdev->private->state == DEV_STATE_NOT_OPER) 1436 return IO_SCH_UNREG_ATTACH; 1437 return IO_SCH_NOP; 1438 } 1439 1440 /** 1441 * io_subchannel_sch_event - process subchannel event 1442 * @sch: subchannel 1443 * @process: non-zero if function is called in process context 1444 * 1445 * An unspecified event occurred for this subchannel. Adjust data according 1446 * to the current operational state of the subchannel and device. Return 1447 * zero when the event has been handled sufficiently or -EAGAIN when this 1448 * function should be called again in process context. 1449 */ 1450 static int io_subchannel_sch_event(struct subchannel *sch, int process) 1451 { 1452 unsigned long flags; 1453 struct ccw_device *cdev; 1454 struct ccw_dev_id dev_id; 1455 enum io_sch_action action; 1456 int rc = -EAGAIN; 1457 1458 spin_lock_irqsave(sch->lock, flags); 1459 if (!device_is_registered(&sch->dev)) 1460 goto out_unlock; 1461 if (work_pending(&sch->todo_work)) 1462 goto out_unlock; 1463 cdev = sch_get_cdev(sch); 1464 if (cdev && work_pending(&cdev->private->todo_work)) 1465 goto out_unlock; 1466 action = sch_get_action(sch); 1467 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", 1468 sch->schid.ssid, sch->schid.sch_no, process, 1469 action); 1470 /* Perform immediate actions while holding the lock. */ 1471 switch (action) { 1472 case IO_SCH_REPROBE: 1473 /* Trigger device recognition. */ 1474 ccw_device_trigger_reprobe(cdev); 1475 rc = 0; 1476 goto out_unlock; 1477 case IO_SCH_VERIFY: 1478 /* Trigger path verification. */ 1479 io_subchannel_verify(sch); 1480 rc = 0; 1481 goto out_unlock; 1482 case IO_SCH_DISC: 1483 ccw_device_set_disconnected(cdev); 1484 rc = 0; 1485 goto out_unlock; 1486 case IO_SCH_ORPH_UNREG: 1487 case IO_SCH_ORPH_ATTACH: 1488 ccw_device_set_disconnected(cdev); 1489 break; 1490 case IO_SCH_UNREG_ATTACH: 1491 case IO_SCH_UNREG: 1492 if (!cdev) 1493 break; 1494 if (cdev->private->state == DEV_STATE_SENSE_ID) { 1495 /* 1496 * Note: delayed work triggered by this event 1497 * and repeated calls to sch_event are synchronized 1498 * by the above check for work_pending(cdev). 1499 */ 1500 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1501 } else 1502 ccw_device_set_notoper(cdev); 1503 break; 1504 case IO_SCH_NOP: 1505 rc = 0; 1506 goto out_unlock; 1507 default: 1508 break; 1509 } 1510 spin_unlock_irqrestore(sch->lock, flags); 1511 /* All other actions require process context. */ 1512 if (!process) 1513 goto out; 1514 /* Handle attached ccw device. */ 1515 switch (action) { 1516 case IO_SCH_ORPH_UNREG: 1517 case IO_SCH_ORPH_ATTACH: 1518 /* Move ccw device to orphanage. */ 1519 rc = ccw_device_move_to_orph(cdev); 1520 if (rc) 1521 goto out; 1522 break; 1523 case IO_SCH_UNREG_ATTACH: 1524 spin_lock_irqsave(sch->lock, flags); 1525 if (cdev->private->flags.resuming) { 1526 /* Device will be handled later. */ 1527 rc = 0; 1528 goto out_unlock; 1529 } 1530 sch_set_cdev(sch, NULL); 1531 spin_unlock_irqrestore(sch->lock, flags); 1532 /* Unregister ccw device. */ 1533 ccw_device_unregister(cdev); 1534 break; 1535 default: 1536 break; 1537 } 1538 /* Handle subchannel. */ 1539 switch (action) { 1540 case IO_SCH_ORPH_UNREG: 1541 case IO_SCH_UNREG: 1542 if (!cdev || !cdev->private->flags.resuming) 1543 css_sch_device_unregister(sch); 1544 break; 1545 case IO_SCH_ORPH_ATTACH: 1546 case IO_SCH_UNREG_ATTACH: 1547 case IO_SCH_ATTACH: 1548 dev_id.ssid = sch->schid.ssid; 1549 dev_id.devno = sch->schib.pmcw.dev; 1550 cdev = get_ccwdev_by_dev_id(&dev_id); 1551 if (!cdev) { 1552 sch_create_and_recog_new_device(sch); 1553 break; 1554 } 1555 rc = ccw_device_move_to_sch(cdev, sch); 1556 if (rc) { 1557 /* Release reference from get_ccwdev_by_dev_id() */ 1558 put_device(&cdev->dev); 1559 goto out; 1560 } 1561 spin_lock_irqsave(sch->lock, flags); 1562 ccw_device_trigger_reprobe(cdev); 1563 spin_unlock_irqrestore(sch->lock, flags); 1564 /* Release reference from get_ccwdev_by_dev_id() */ 1565 put_device(&cdev->dev); 1566 break; 1567 default: 1568 break; 1569 } 1570 return 0; 1571 1572 out_unlock: 1573 spin_unlock_irqrestore(sch->lock, flags); 1574 out: 1575 return rc; 1576 } 1577 1578 #ifdef CONFIG_CCW_CONSOLE 1579 static int ccw_device_console_enable(struct ccw_device *cdev, 1580 struct subchannel *sch) 1581 { 1582 int rc; 1583 1584 io_subchannel_init_fields(sch); 1585 rc = cio_commit_config(sch); 1586 if (rc) 1587 return rc; 1588 sch->driver = &io_subchannel_driver; 1589 sch_set_cdev(sch, cdev); 1590 io_subchannel_recog(cdev, sch); 1591 /* Now wait for the async. recognition to come to an end. */ 1592 spin_lock_irq(cdev->ccwlock); 1593 while (!dev_fsm_final_state(cdev)) 1594 ccw_device_wait_idle(cdev); 1595 1596 /* Hold on to an extra reference while device is online. */ 1597 get_device(&cdev->dev); 1598 rc = ccw_device_online(cdev); 1599 if (rc) 1600 goto out_unlock; 1601 1602 while (!dev_fsm_final_state(cdev)) 1603 ccw_device_wait_idle(cdev); 1604 1605 if (cdev->private->state == DEV_STATE_ONLINE) 1606 cdev->online = 1; 1607 else 1608 rc = -EIO; 1609 out_unlock: 1610 spin_unlock_irq(cdev->ccwlock); 1611 if (rc) /* Give up online reference since onlining failed. */ 1612 put_device(&cdev->dev); 1613 return rc; 1614 } 1615 1616 struct ccw_device *ccw_device_probe_console(void) 1617 { 1618 struct io_subchannel_private *io_priv; 1619 struct ccw_device *cdev; 1620 struct subchannel *sch; 1621 int ret; 1622 1623 sch = cio_probe_console(); 1624 if (IS_ERR(sch)) 1625 return ERR_CAST(sch); 1626 1627 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); 1628 if (!io_priv) { 1629 put_device(&sch->dev); 1630 return ERR_PTR(-ENOMEM); 1631 } 1632 cdev = io_subchannel_create_ccwdev(sch); 1633 if (IS_ERR(cdev)) { 1634 put_device(&sch->dev); 1635 kfree(io_priv); 1636 return cdev; 1637 } 1638 set_io_private(sch, io_priv); 1639 ret = ccw_device_console_enable(cdev, sch); 1640 if (ret) { 1641 set_io_private(sch, NULL); 1642 put_device(&sch->dev); 1643 put_device(&cdev->dev); 1644 kfree(io_priv); 1645 return ERR_PTR(ret); 1646 } 1647 return cdev; 1648 } 1649 1650 /** 1651 * ccw_device_wait_idle() - busy wait for device to become idle 1652 * @cdev: ccw device 1653 * 1654 * Poll until activity control is zero, that is, no function or data 1655 * transfer is pending/active. 1656 * Called with device lock being held. 1657 */ 1658 void ccw_device_wait_idle(struct ccw_device *cdev) 1659 { 1660 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1661 1662 while (1) { 1663 cio_tsch(sch); 1664 if (sch->schib.scsw.cmd.actl == 0) 1665 break; 1666 udelay_simple(100); 1667 } 1668 } 1669 1670 static int ccw_device_pm_restore(struct device *dev); 1671 1672 int ccw_device_force_console(struct ccw_device *cdev) 1673 { 1674 return ccw_device_pm_restore(&cdev->dev); 1675 } 1676 EXPORT_SYMBOL_GPL(ccw_device_force_console); 1677 #endif 1678 1679 /* 1680 * get ccw_device matching the busid, but only if owned by cdrv 1681 */ 1682 static int 1683 __ccwdev_check_busid(struct device *dev, void *id) 1684 { 1685 char *bus_id; 1686 1687 bus_id = id; 1688 1689 return (strcmp(bus_id, dev_name(dev)) == 0); 1690 } 1691 1692 1693 /** 1694 * get_ccwdev_by_busid() - obtain device from a bus id 1695 * @cdrv: driver the device is owned by 1696 * @bus_id: bus id of the device to be searched 1697 * 1698 * This function searches all devices owned by @cdrv for a device with a bus 1699 * id matching @bus_id. 1700 * Returns: 1701 * If a match is found, its reference count of the found device is increased 1702 * and it is returned; else %NULL is returned. 1703 */ 1704 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, 1705 const char *bus_id) 1706 { 1707 struct device *dev; 1708 1709 dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id, 1710 __ccwdev_check_busid); 1711 1712 return dev ? to_ccwdev(dev) : NULL; 1713 } 1714 1715 /************************** device driver handling ************************/ 1716 1717 /* This is the implementation of the ccw_driver class. The probe, remove 1718 * and release methods are initially very similar to the device_driver 1719 * implementations, with the difference that they have ccw_device 1720 * arguments. 1721 * 1722 * A ccw driver also contains the information that is needed for 1723 * device matching. 1724 */ 1725 static int 1726 ccw_device_probe (struct device *dev) 1727 { 1728 struct ccw_device *cdev = to_ccwdev(dev); 1729 struct ccw_driver *cdrv = to_ccwdrv(dev->driver); 1730 int ret; 1731 1732 cdev->drv = cdrv; /* to let the driver call _set_online */ 1733 /* Note: we interpret class 0 in this context as an uninitialized 1734 * field since it translates to a non-I/O interrupt class. */ 1735 if (cdrv->int_class != 0) 1736 cdev->private->int_class = cdrv->int_class; 1737 else 1738 cdev->private->int_class = IRQIO_CIO; 1739 1740 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; 1741 1742 if (ret) { 1743 cdev->drv = NULL; 1744 cdev->private->int_class = IRQIO_CIO; 1745 return ret; 1746 } 1747 1748 return 0; 1749 } 1750 1751 static int ccw_device_remove(struct device *dev) 1752 { 1753 struct ccw_device *cdev = to_ccwdev(dev); 1754 struct ccw_driver *cdrv = cdev->drv; 1755 int ret; 1756 1757 if (cdrv->remove) 1758 cdrv->remove(cdev); 1759 1760 spin_lock_irq(cdev->ccwlock); 1761 if (cdev->online) { 1762 cdev->online = 0; 1763 ret = ccw_device_offline(cdev); 1764 spin_unlock_irq(cdev->ccwlock); 1765 if (ret == 0) 1766 wait_event(cdev->private->wait_q, 1767 dev_fsm_final_state(cdev)); 1768 else 1769 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " 1770 "device 0.%x.%04x\n", 1771 ret, cdev->private->dev_id.ssid, 1772 cdev->private->dev_id.devno); 1773 /* Give up reference obtained in ccw_device_set_online(). */ 1774 put_device(&cdev->dev); 1775 spin_lock_irq(cdev->ccwlock); 1776 } 1777 ccw_device_set_timeout(cdev, 0); 1778 cdev->drv = NULL; 1779 cdev->private->int_class = IRQIO_CIO; 1780 spin_unlock_irq(cdev->ccwlock); 1781 return 0; 1782 } 1783 1784 static void ccw_device_shutdown(struct device *dev) 1785 { 1786 struct ccw_device *cdev; 1787 1788 cdev = to_ccwdev(dev); 1789 if (cdev->drv && cdev->drv->shutdown) 1790 cdev->drv->shutdown(cdev); 1791 disable_cmf(cdev); 1792 } 1793 1794 static int ccw_device_pm_prepare(struct device *dev) 1795 { 1796 struct ccw_device *cdev = to_ccwdev(dev); 1797 1798 if (work_pending(&cdev->private->todo_work)) 1799 return -EAGAIN; 1800 /* Fail while device is being set online/offline. */ 1801 if (atomic_read(&cdev->private->onoff)) 1802 return -EAGAIN; 1803 1804 if (cdev->online && cdev->drv && cdev->drv->prepare) 1805 return cdev->drv->prepare(cdev); 1806 1807 return 0; 1808 } 1809 1810 static void ccw_device_pm_complete(struct device *dev) 1811 { 1812 struct ccw_device *cdev = to_ccwdev(dev); 1813 1814 if (cdev->online && cdev->drv && cdev->drv->complete) 1815 cdev->drv->complete(cdev); 1816 } 1817 1818 static int ccw_device_pm_freeze(struct device *dev) 1819 { 1820 struct ccw_device *cdev = to_ccwdev(dev); 1821 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1822 int ret, cm_enabled; 1823 1824 /* Fail suspend while device is in transistional state. */ 1825 if (!dev_fsm_final_state(cdev)) 1826 return -EAGAIN; 1827 if (!cdev->online) 1828 return 0; 1829 if (cdev->drv && cdev->drv->freeze) { 1830 ret = cdev->drv->freeze(cdev); 1831 if (ret) 1832 return ret; 1833 } 1834 1835 spin_lock_irq(sch->lock); 1836 cm_enabled = cdev->private->cmb != NULL; 1837 spin_unlock_irq(sch->lock); 1838 if (cm_enabled) { 1839 /* Don't have the css write on memory. */ 1840 ret = ccw_set_cmf(cdev, 0); 1841 if (ret) 1842 return ret; 1843 } 1844 /* From here on, disallow device driver I/O. */ 1845 spin_lock_irq(sch->lock); 1846 ret = cio_disable_subchannel(sch); 1847 spin_unlock_irq(sch->lock); 1848 1849 return ret; 1850 } 1851 1852 static int ccw_device_pm_thaw(struct device *dev) 1853 { 1854 struct ccw_device *cdev = to_ccwdev(dev); 1855 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1856 int ret, cm_enabled; 1857 1858 if (!cdev->online) 1859 return 0; 1860 1861 spin_lock_irq(sch->lock); 1862 /* Allow device driver I/O again. */ 1863 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 1864 cm_enabled = cdev->private->cmb != NULL; 1865 spin_unlock_irq(sch->lock); 1866 if (ret) 1867 return ret; 1868 1869 if (cm_enabled) { 1870 ret = ccw_set_cmf(cdev, 1); 1871 if (ret) 1872 return ret; 1873 } 1874 1875 if (cdev->drv && cdev->drv->thaw) 1876 ret = cdev->drv->thaw(cdev); 1877 1878 return ret; 1879 } 1880 1881 static void __ccw_device_pm_restore(struct ccw_device *cdev) 1882 { 1883 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1884 1885 spin_lock_irq(sch->lock); 1886 if (cio_is_console(sch->schid)) { 1887 cio_enable_subchannel(sch, (u32)(addr_t)sch); 1888 goto out_unlock; 1889 } 1890 /* 1891 * While we were sleeping, devices may have gone or become 1892 * available again. Kick re-detection. 1893 */ 1894 cdev->private->flags.resuming = 1; 1895 cdev->private->path_new_mask = LPM_ANYPATH; 1896 css_sched_sch_todo(sch, SCH_TODO_EVAL); 1897 spin_unlock_irq(sch->lock); 1898 css_wait_for_slow_path(); 1899 1900 /* cdev may have been moved to a different subchannel. */ 1901 sch = to_subchannel(cdev->dev.parent); 1902 spin_lock_irq(sch->lock); 1903 if (cdev->private->state != DEV_STATE_ONLINE && 1904 cdev->private->state != DEV_STATE_OFFLINE) 1905 goto out_unlock; 1906 1907 ccw_device_recognition(cdev); 1908 spin_unlock_irq(sch->lock); 1909 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || 1910 cdev->private->state == DEV_STATE_DISCONNECTED); 1911 spin_lock_irq(sch->lock); 1912 1913 out_unlock: 1914 cdev->private->flags.resuming = 0; 1915 spin_unlock_irq(sch->lock); 1916 } 1917 1918 static int resume_handle_boxed(struct ccw_device *cdev) 1919 { 1920 cdev->private->state = DEV_STATE_BOXED; 1921 if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK) 1922 return 0; 1923 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1924 return -ENODEV; 1925 } 1926 1927 static int resume_handle_disc(struct ccw_device *cdev) 1928 { 1929 cdev->private->state = DEV_STATE_DISCONNECTED; 1930 if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK) 1931 return 0; 1932 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1933 return -ENODEV; 1934 } 1935 1936 static int ccw_device_pm_restore(struct device *dev) 1937 { 1938 struct ccw_device *cdev = to_ccwdev(dev); 1939 struct subchannel *sch; 1940 int ret = 0; 1941 1942 __ccw_device_pm_restore(cdev); 1943 sch = to_subchannel(cdev->dev.parent); 1944 spin_lock_irq(sch->lock); 1945 if (cio_is_console(sch->schid)) 1946 goto out_restore; 1947 1948 /* check recognition results */ 1949 switch (cdev->private->state) { 1950 case DEV_STATE_OFFLINE: 1951 case DEV_STATE_ONLINE: 1952 cdev->private->flags.donotify = 0; 1953 break; 1954 case DEV_STATE_BOXED: 1955 ret = resume_handle_boxed(cdev); 1956 if (ret) 1957 goto out_unlock; 1958 goto out_restore; 1959 default: 1960 ret = resume_handle_disc(cdev); 1961 if (ret) 1962 goto out_unlock; 1963 goto out_restore; 1964 } 1965 /* check if the device type has changed */ 1966 if (!ccw_device_test_sense_data(cdev)) { 1967 ccw_device_update_sense_data(cdev); 1968 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); 1969 ret = -ENODEV; 1970 goto out_unlock; 1971 } 1972 if (!cdev->online) 1973 goto out_unlock; 1974 1975 if (ccw_device_online(cdev)) { 1976 ret = resume_handle_disc(cdev); 1977 if (ret) 1978 goto out_unlock; 1979 goto out_restore; 1980 } 1981 spin_unlock_irq(sch->lock); 1982 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1983 spin_lock_irq(sch->lock); 1984 1985 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) { 1986 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1987 ret = -ENODEV; 1988 goto out_unlock; 1989 } 1990 1991 /* reenable cmf, if needed */ 1992 if (cdev->private->cmb) { 1993 spin_unlock_irq(sch->lock); 1994 ret = ccw_set_cmf(cdev, 1); 1995 spin_lock_irq(sch->lock); 1996 if (ret) { 1997 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " 1998 "(rc=%d)\n", cdev->private->dev_id.ssid, 1999 cdev->private->dev_id.devno, ret); 2000 ret = 0; 2001 } 2002 } 2003 2004 out_restore: 2005 spin_unlock_irq(sch->lock); 2006 if (cdev->online && cdev->drv && cdev->drv->restore) 2007 ret = cdev->drv->restore(cdev); 2008 return ret; 2009 2010 out_unlock: 2011 spin_unlock_irq(sch->lock); 2012 return ret; 2013 } 2014 2015 static const struct dev_pm_ops ccw_pm_ops = { 2016 .prepare = ccw_device_pm_prepare, 2017 .complete = ccw_device_pm_complete, 2018 .freeze = ccw_device_pm_freeze, 2019 .thaw = ccw_device_pm_thaw, 2020 .restore = ccw_device_pm_restore, 2021 }; 2022 2023 static struct bus_type ccw_bus_type = { 2024 .name = "ccw", 2025 .match = ccw_bus_match, 2026 .uevent = ccw_uevent, 2027 .probe = ccw_device_probe, 2028 .remove = ccw_device_remove, 2029 .shutdown = ccw_device_shutdown, 2030 .pm = &ccw_pm_ops, 2031 }; 2032 2033 /** 2034 * ccw_driver_register() - register a ccw driver 2035 * @cdriver: driver to be registered 2036 * 2037 * This function is mainly a wrapper around driver_register(). 2038 * Returns: 2039 * %0 on success and a negative error value on failure. 2040 */ 2041 int ccw_driver_register(struct ccw_driver *cdriver) 2042 { 2043 struct device_driver *drv = &cdriver->driver; 2044 2045 drv->bus = &ccw_bus_type; 2046 2047 return driver_register(drv); 2048 } 2049 2050 /** 2051 * ccw_driver_unregister() - deregister a ccw driver 2052 * @cdriver: driver to be deregistered 2053 * 2054 * This function is mainly a wrapper around driver_unregister(). 2055 */ 2056 void ccw_driver_unregister(struct ccw_driver *cdriver) 2057 { 2058 driver_unregister(&cdriver->driver); 2059 } 2060 2061 static void ccw_device_todo(struct work_struct *work) 2062 { 2063 struct ccw_device_private *priv; 2064 struct ccw_device *cdev; 2065 struct subchannel *sch; 2066 enum cdev_todo todo; 2067 2068 priv = container_of(work, struct ccw_device_private, todo_work); 2069 cdev = priv->cdev; 2070 sch = to_subchannel(cdev->dev.parent); 2071 /* Find out todo. */ 2072 spin_lock_irq(cdev->ccwlock); 2073 todo = priv->todo; 2074 priv->todo = CDEV_TODO_NOTHING; 2075 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n", 2076 priv->dev_id.ssid, priv->dev_id.devno, todo); 2077 spin_unlock_irq(cdev->ccwlock); 2078 /* Perform todo. */ 2079 switch (todo) { 2080 case CDEV_TODO_ENABLE_CMF: 2081 cmf_reenable(cdev); 2082 break; 2083 case CDEV_TODO_REBIND: 2084 ccw_device_do_unbind_bind(cdev); 2085 break; 2086 case CDEV_TODO_REGISTER: 2087 io_subchannel_register(cdev); 2088 break; 2089 case CDEV_TODO_UNREG_EVAL: 2090 if (!sch_is_pseudo_sch(sch)) 2091 css_schedule_eval(sch->schid); 2092 /* fall-through */ 2093 case CDEV_TODO_UNREG: 2094 if (sch_is_pseudo_sch(sch)) 2095 ccw_device_unregister(cdev); 2096 else 2097 ccw_device_call_sch_unregister(cdev); 2098 break; 2099 default: 2100 break; 2101 } 2102 /* Release workqueue ref. */ 2103 put_device(&cdev->dev); 2104 } 2105 2106 /** 2107 * ccw_device_sched_todo - schedule ccw device operation 2108 * @cdev: ccw device 2109 * @todo: todo 2110 * 2111 * Schedule the operation identified by @todo to be performed on the slow path 2112 * workqueue. Do nothing if another operation with higher priority is already 2113 * scheduled. Needs to be called with ccwdev lock held. 2114 */ 2115 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) 2116 { 2117 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n", 2118 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 2119 todo); 2120 if (cdev->private->todo >= todo) 2121 return; 2122 cdev->private->todo = todo; 2123 /* Get workqueue ref. */ 2124 if (!get_device(&cdev->dev)) 2125 return; 2126 if (!queue_work(cio_work_q, &cdev->private->todo_work)) { 2127 /* Already queued, release workqueue ref. */ 2128 put_device(&cdev->dev); 2129 } 2130 } 2131 2132 /** 2133 * ccw_device_siosl() - initiate logging 2134 * @cdev: ccw device 2135 * 2136 * This function is used to invoke model-dependent logging within the channel 2137 * subsystem. 2138 */ 2139 int ccw_device_siosl(struct ccw_device *cdev) 2140 { 2141 struct subchannel *sch = to_subchannel(cdev->dev.parent); 2142 2143 return chsc_siosl(sch->schid); 2144 } 2145 EXPORT_SYMBOL_GPL(ccw_device_siosl); 2146 2147 MODULE_LICENSE("GPL"); 2148 EXPORT_SYMBOL(ccw_device_set_online); 2149 EXPORT_SYMBOL(ccw_device_set_offline); 2150 EXPORT_SYMBOL(ccw_driver_register); 2151 EXPORT_SYMBOL(ccw_driver_unregister); 2152 EXPORT_SYMBOL(get_ccwdev_by_busid); 2153