1 // SPDX-License-Identifier: GPL-1.0+ 2 /* 3 * bus driver for ccw devices 4 * 5 * Copyright IBM Corp. 2002, 2008 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/export.h> 15 #include <linux/init.h> 16 #include <linux/spinlock.h> 17 #include <linux/errno.h> 18 #include <linux/err.h> 19 #include <linux/slab.h> 20 #include <linux/list.h> 21 #include <linux/device.h> 22 #include <linux/workqueue.h> 23 #include <linux/delay.h> 24 #include <linux/timer.h> 25 #include <linux/kernel_stat.h> 26 #include <linux/sched/signal.h> 27 28 #include <asm/ccwdev.h> 29 #include <asm/cio.h> 30 #include <asm/param.h> /* HZ */ 31 #include <asm/cmb.h> 32 #include <asm/isc.h> 33 34 #include "chp.h" 35 #include "cio.h" 36 #include "cio_debug.h" 37 #include "css.h" 38 #include "device.h" 39 #include "ioasm.h" 40 #include "io_sch.h" 41 #include "blacklist.h" 42 #include "chsc.h" 43 44 static struct timer_list recovery_timer; 45 static DEFINE_SPINLOCK(recovery_lock); 46 static int recovery_phase; 47 static const unsigned long recovery_delay[] = { 3, 30, 300 }; 48 49 static atomic_t ccw_device_init_count = ATOMIC_INIT(0); 50 static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); 51 static struct bus_type ccw_bus_type; 52 53 /******************* bus type handling ***********************/ 54 55 /* The Linux driver model distinguishes between a bus type and 56 * the bus itself. Of course we only have one channel 57 * subsystem driver and one channel system per machine, but 58 * we still use the abstraction. T.R. says it's a good idea. */ 59 static int 60 ccw_bus_match (struct device * dev, struct device_driver * drv) 61 { 62 struct ccw_device *cdev = to_ccwdev(dev); 63 struct ccw_driver *cdrv = to_ccwdrv(drv); 64 const struct ccw_device_id *ids = cdrv->ids, *found; 65 66 if (!ids) 67 return 0; 68 69 found = ccw_device_id_match(ids, &cdev->id); 70 if (!found) 71 return 0; 72 73 cdev->id.driver_info = found->driver_info; 74 75 return 1; 76 } 77 78 /* Store modalias string delimited by prefix/suffix string into buffer with 79 * specified size. Return length of resulting string (excluding trailing '\0') 80 * even if string doesn't fit buffer (snprintf semantics). */ 81 static int snprint_alias(char *buf, size_t size, 82 struct ccw_device_id *id, const char *suffix) 83 { 84 int len; 85 86 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); 87 if (len > size) 88 return len; 89 buf += len; 90 size -= len; 91 92 if (id->dev_type != 0) 93 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, 94 id->dev_model, suffix); 95 else 96 len += snprintf(buf, size, "dtdm%s", suffix); 97 98 return len; 99 } 100 101 /* Set up environment variables for ccw device uevent. Return 0 on success, 102 * non-zero otherwise. */ 103 static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) 104 { 105 struct ccw_device *cdev = to_ccwdev(dev); 106 struct ccw_device_id *id = &(cdev->id); 107 int ret; 108 char modalias_buf[30]; 109 110 /* CU_TYPE= */ 111 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type); 112 if (ret) 113 return ret; 114 115 /* CU_MODEL= */ 116 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model); 117 if (ret) 118 return ret; 119 120 /* The next two can be zero, that's ok for us */ 121 /* DEV_TYPE= */ 122 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type); 123 if (ret) 124 return ret; 125 126 /* DEV_MODEL= */ 127 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model); 128 if (ret) 129 return ret; 130 131 /* MODALIAS= */ 132 snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); 133 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf); 134 return ret; 135 } 136 137 static void io_subchannel_irq(struct subchannel *); 138 static int io_subchannel_probe(struct subchannel *); 139 static int io_subchannel_remove(struct subchannel *); 140 static void io_subchannel_shutdown(struct subchannel *); 141 static int io_subchannel_sch_event(struct subchannel *, int); 142 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, 143 int); 144 static void recovery_func(struct timer_list *unused); 145 146 static struct css_device_id io_subchannel_ids[] = { 147 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 148 { /* end of list */ }, 149 }; 150 151 static int io_subchannel_prepare(struct subchannel *sch) 152 { 153 struct ccw_device *cdev; 154 /* 155 * Don't allow suspend while a ccw device registration 156 * is still outstanding. 157 */ 158 cdev = sch_get_cdev(sch); 159 if (cdev && !device_is_registered(&cdev->dev)) 160 return -EAGAIN; 161 return 0; 162 } 163 164 static int io_subchannel_settle(void) 165 { 166 int ret; 167 168 ret = wait_event_interruptible(ccw_device_init_wq, 169 atomic_read(&ccw_device_init_count) == 0); 170 if (ret) 171 return -EINTR; 172 flush_workqueue(cio_work_q); 173 return 0; 174 } 175 176 static struct css_driver io_subchannel_driver = { 177 .drv = { 178 .owner = THIS_MODULE, 179 .name = "io_subchannel", 180 }, 181 .subchannel_type = io_subchannel_ids, 182 .irq = io_subchannel_irq, 183 .sch_event = io_subchannel_sch_event, 184 .chp_event = io_subchannel_chp_event, 185 .probe = io_subchannel_probe, 186 .remove = io_subchannel_remove, 187 .shutdown = io_subchannel_shutdown, 188 .prepare = io_subchannel_prepare, 189 .settle = io_subchannel_settle, 190 }; 191 192 int __init io_subchannel_init(void) 193 { 194 int ret; 195 196 timer_setup(&recovery_timer, recovery_func, 0); 197 ret = bus_register(&ccw_bus_type); 198 if (ret) 199 return ret; 200 ret = css_driver_register(&io_subchannel_driver); 201 if (ret) 202 bus_unregister(&ccw_bus_type); 203 204 return ret; 205 } 206 207 208 /************************ device handling **************************/ 209 210 static ssize_t 211 devtype_show (struct device *dev, struct device_attribute *attr, char *buf) 212 { 213 struct ccw_device *cdev = to_ccwdev(dev); 214 struct ccw_device_id *id = &(cdev->id); 215 216 if (id->dev_type != 0) 217 return sprintf(buf, "%04x/%02x\n", 218 id->dev_type, id->dev_model); 219 else 220 return sprintf(buf, "n/a\n"); 221 } 222 223 static ssize_t 224 cutype_show (struct device *dev, struct device_attribute *attr, char *buf) 225 { 226 struct ccw_device *cdev = to_ccwdev(dev); 227 struct ccw_device_id *id = &(cdev->id); 228 229 return sprintf(buf, "%04x/%02x\n", 230 id->cu_type, id->cu_model); 231 } 232 233 static ssize_t 234 modalias_show (struct device *dev, struct device_attribute *attr, char *buf) 235 { 236 struct ccw_device *cdev = to_ccwdev(dev); 237 struct ccw_device_id *id = &(cdev->id); 238 int len; 239 240 len = snprint_alias(buf, PAGE_SIZE, id, "\n"); 241 242 return len > PAGE_SIZE ? PAGE_SIZE : len; 243 } 244 245 static ssize_t 246 online_show (struct device *dev, struct device_attribute *attr, char *buf) 247 { 248 struct ccw_device *cdev = to_ccwdev(dev); 249 250 return sprintf(buf, cdev->online ? "1\n" : "0\n"); 251 } 252 253 int ccw_device_is_orphan(struct ccw_device *cdev) 254 { 255 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); 256 } 257 258 static void ccw_device_unregister(struct ccw_device *cdev) 259 { 260 if (device_is_registered(&cdev->dev)) { 261 /* Undo device_add(). */ 262 device_del(&cdev->dev); 263 } 264 if (cdev->private->flags.initialized) { 265 cdev->private->flags.initialized = 0; 266 /* Release reference from device_initialize(). */ 267 put_device(&cdev->dev); 268 } 269 } 270 271 static void io_subchannel_quiesce(struct subchannel *); 272 273 /** 274 * ccw_device_set_offline() - disable a ccw device for I/O 275 * @cdev: target ccw device 276 * 277 * This function calls the driver's set_offline() function for @cdev, if 278 * given, and then disables @cdev. 279 * Returns: 280 * %0 on success and a negative error value on failure. 281 * Context: 282 * enabled, ccw device lock not held 283 */ 284 int ccw_device_set_offline(struct ccw_device *cdev) 285 { 286 struct subchannel *sch; 287 int ret, state; 288 289 if (!cdev) 290 return -ENODEV; 291 if (!cdev->online || !cdev->drv) 292 return -EINVAL; 293 294 if (cdev->drv->set_offline) { 295 ret = cdev->drv->set_offline(cdev); 296 if (ret != 0) 297 return ret; 298 } 299 spin_lock_irq(cdev->ccwlock); 300 sch = to_subchannel(cdev->dev.parent); 301 cdev->online = 0; 302 /* Wait until a final state or DISCONNECTED is reached */ 303 while (!dev_fsm_final_state(cdev) && 304 cdev->private->state != DEV_STATE_DISCONNECTED) { 305 spin_unlock_irq(cdev->ccwlock); 306 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 307 cdev->private->state == DEV_STATE_DISCONNECTED)); 308 spin_lock_irq(cdev->ccwlock); 309 } 310 do { 311 ret = ccw_device_offline(cdev); 312 if (!ret) 313 break; 314 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device " 315 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid, 316 cdev->private->dev_id.devno); 317 if (ret != -EBUSY) 318 goto error; 319 state = cdev->private->state; 320 spin_unlock_irq(cdev->ccwlock); 321 io_subchannel_quiesce(sch); 322 spin_lock_irq(cdev->ccwlock); 323 cdev->private->state = state; 324 } while (ret == -EBUSY); 325 spin_unlock_irq(cdev->ccwlock); 326 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 327 cdev->private->state == DEV_STATE_DISCONNECTED)); 328 /* Inform the user if set offline failed. */ 329 if (cdev->private->state == DEV_STATE_BOXED) { 330 pr_warn("%s: The device entered boxed state while being set offline\n", 331 dev_name(&cdev->dev)); 332 } else if (cdev->private->state == DEV_STATE_NOT_OPER) { 333 pr_warn("%s: The device stopped operating while being set offline\n", 334 dev_name(&cdev->dev)); 335 } 336 /* Give up reference from ccw_device_set_online(). */ 337 put_device(&cdev->dev); 338 return 0; 339 340 error: 341 cdev->private->state = DEV_STATE_OFFLINE; 342 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 343 spin_unlock_irq(cdev->ccwlock); 344 /* Give up reference from ccw_device_set_online(). */ 345 put_device(&cdev->dev); 346 return -ENODEV; 347 } 348 349 /** 350 * ccw_device_set_online() - enable a ccw device for I/O 351 * @cdev: target ccw device 352 * 353 * This function first enables @cdev and then calls the driver's set_online() 354 * function for @cdev, if given. If set_online() returns an error, @cdev is 355 * disabled again. 356 * Returns: 357 * %0 on success and a negative error value on failure. 358 * Context: 359 * enabled, ccw device lock not held 360 */ 361 int ccw_device_set_online(struct ccw_device *cdev) 362 { 363 int ret; 364 int ret2; 365 366 if (!cdev) 367 return -ENODEV; 368 if (cdev->online || !cdev->drv) 369 return -EINVAL; 370 /* Hold on to an extra reference while device is online. */ 371 if (!get_device(&cdev->dev)) 372 return -ENODEV; 373 374 spin_lock_irq(cdev->ccwlock); 375 ret = ccw_device_online(cdev); 376 spin_unlock_irq(cdev->ccwlock); 377 if (ret == 0) 378 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 379 else { 380 CIO_MSG_EVENT(0, "ccw_device_online returned %d, " 381 "device 0.%x.%04x\n", 382 ret, cdev->private->dev_id.ssid, 383 cdev->private->dev_id.devno); 384 /* Give up online reference since onlining failed. */ 385 put_device(&cdev->dev); 386 return ret; 387 } 388 spin_lock_irq(cdev->ccwlock); 389 /* Check if online processing was successful */ 390 if ((cdev->private->state != DEV_STATE_ONLINE) && 391 (cdev->private->state != DEV_STATE_W4SENSE)) { 392 spin_unlock_irq(cdev->ccwlock); 393 /* Inform the user that set online failed. */ 394 if (cdev->private->state == DEV_STATE_BOXED) { 395 pr_warn("%s: Setting the device online failed because it is boxed\n", 396 dev_name(&cdev->dev)); 397 } else if (cdev->private->state == DEV_STATE_NOT_OPER) { 398 pr_warn("%s: Setting the device online failed because it is not operational\n", 399 dev_name(&cdev->dev)); 400 } 401 /* Give up online reference since onlining failed. */ 402 put_device(&cdev->dev); 403 return -ENODEV; 404 } 405 spin_unlock_irq(cdev->ccwlock); 406 if (cdev->drv->set_online) 407 ret = cdev->drv->set_online(cdev); 408 if (ret) 409 goto rollback; 410 411 spin_lock_irq(cdev->ccwlock); 412 cdev->online = 1; 413 spin_unlock_irq(cdev->ccwlock); 414 return 0; 415 416 rollback: 417 spin_lock_irq(cdev->ccwlock); 418 /* Wait until a final state or DISCONNECTED is reached */ 419 while (!dev_fsm_final_state(cdev) && 420 cdev->private->state != DEV_STATE_DISCONNECTED) { 421 spin_unlock_irq(cdev->ccwlock); 422 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 423 cdev->private->state == DEV_STATE_DISCONNECTED)); 424 spin_lock_irq(cdev->ccwlock); 425 } 426 ret2 = ccw_device_offline(cdev); 427 if (ret2) 428 goto error; 429 spin_unlock_irq(cdev->ccwlock); 430 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 431 cdev->private->state == DEV_STATE_DISCONNECTED)); 432 /* Give up online reference since onlining failed. */ 433 put_device(&cdev->dev); 434 return ret; 435 436 error: 437 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, " 438 "device 0.%x.%04x\n", 439 ret2, cdev->private->dev_id.ssid, 440 cdev->private->dev_id.devno); 441 cdev->private->state = DEV_STATE_OFFLINE; 442 spin_unlock_irq(cdev->ccwlock); 443 /* Give up online reference since onlining failed. */ 444 put_device(&cdev->dev); 445 return ret; 446 } 447 448 static int online_store_handle_offline(struct ccw_device *cdev) 449 { 450 if (cdev->private->state == DEV_STATE_DISCONNECTED) { 451 spin_lock_irq(cdev->ccwlock); 452 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); 453 spin_unlock_irq(cdev->ccwlock); 454 return 0; 455 } 456 if (cdev->drv && cdev->drv->set_offline) 457 return ccw_device_set_offline(cdev); 458 return -EINVAL; 459 } 460 461 static int online_store_recog_and_online(struct ccw_device *cdev) 462 { 463 /* Do device recognition, if needed. */ 464 if (cdev->private->state == DEV_STATE_BOXED) { 465 spin_lock_irq(cdev->ccwlock); 466 ccw_device_recognition(cdev); 467 spin_unlock_irq(cdev->ccwlock); 468 wait_event(cdev->private->wait_q, 469 cdev->private->flags.recog_done); 470 if (cdev->private->state != DEV_STATE_OFFLINE) 471 /* recognition failed */ 472 return -EAGAIN; 473 } 474 if (cdev->drv && cdev->drv->set_online) 475 return ccw_device_set_online(cdev); 476 return -EINVAL; 477 } 478 479 static int online_store_handle_online(struct ccw_device *cdev, int force) 480 { 481 int ret; 482 483 ret = online_store_recog_and_online(cdev); 484 if (ret && !force) 485 return ret; 486 if (force && cdev->private->state == DEV_STATE_BOXED) { 487 ret = ccw_device_stlck(cdev); 488 if (ret) 489 return ret; 490 if (cdev->id.cu_type == 0) 491 cdev->private->state = DEV_STATE_NOT_OPER; 492 ret = online_store_recog_and_online(cdev); 493 if (ret) 494 return ret; 495 } 496 return 0; 497 } 498 499 static ssize_t online_store (struct device *dev, struct device_attribute *attr, 500 const char *buf, size_t count) 501 { 502 struct ccw_device *cdev = to_ccwdev(dev); 503 int force, ret; 504 unsigned long i; 505 506 /* Prevent conflict between multiple on-/offline processing requests. */ 507 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 508 return -EAGAIN; 509 /* Prevent conflict between internal I/Os and on-/offline processing. */ 510 if (!dev_fsm_final_state(cdev) && 511 cdev->private->state != DEV_STATE_DISCONNECTED) { 512 ret = -EAGAIN; 513 goto out; 514 } 515 /* Prevent conflict between pending work and on-/offline processing.*/ 516 if (work_pending(&cdev->private->todo_work)) { 517 ret = -EAGAIN; 518 goto out; 519 } 520 if (!strncmp(buf, "force\n", count)) { 521 force = 1; 522 i = 1; 523 ret = 0; 524 } else { 525 force = 0; 526 ret = kstrtoul(buf, 16, &i); 527 } 528 if (ret) 529 goto out; 530 531 device_lock(dev); 532 switch (i) { 533 case 0: 534 ret = online_store_handle_offline(cdev); 535 break; 536 case 1: 537 ret = online_store_handle_online(cdev, force); 538 break; 539 default: 540 ret = -EINVAL; 541 } 542 device_unlock(dev); 543 544 out: 545 atomic_set(&cdev->private->onoff, 0); 546 return (ret < 0) ? ret : count; 547 } 548 549 static ssize_t 550 available_show (struct device *dev, struct device_attribute *attr, char *buf) 551 { 552 struct ccw_device *cdev = to_ccwdev(dev); 553 struct subchannel *sch; 554 555 if (ccw_device_is_orphan(cdev)) 556 return sprintf(buf, "no device\n"); 557 switch (cdev->private->state) { 558 case DEV_STATE_BOXED: 559 return sprintf(buf, "boxed\n"); 560 case DEV_STATE_DISCONNECTED: 561 case DEV_STATE_DISCONNECTED_SENSE_ID: 562 case DEV_STATE_NOT_OPER: 563 sch = to_subchannel(dev->parent); 564 if (!sch->lpm) 565 return sprintf(buf, "no path\n"); 566 else 567 return sprintf(buf, "no device\n"); 568 default: 569 /* All other states considered fine. */ 570 return sprintf(buf, "good\n"); 571 } 572 } 573 574 static ssize_t 575 initiate_logging(struct device *dev, struct device_attribute *attr, 576 const char *buf, size_t count) 577 { 578 struct subchannel *sch = to_subchannel(dev); 579 int rc; 580 581 rc = chsc_siosl(sch->schid); 582 if (rc < 0) { 583 pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n", 584 sch->schid.ssid, sch->schid.sch_no, rc); 585 return rc; 586 } 587 pr_notice("Logging for subchannel 0.%x.%04x was triggered\n", 588 sch->schid.ssid, sch->schid.sch_no); 589 return count; 590 } 591 592 static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, 593 char *buf) 594 { 595 struct subchannel *sch = to_subchannel(dev); 596 597 return sprintf(buf, "%02x\n", sch->vpm); 598 } 599 600 static DEVICE_ATTR_RO(devtype); 601 static DEVICE_ATTR_RO(cutype); 602 static DEVICE_ATTR_RO(modalias); 603 static DEVICE_ATTR_RW(online); 604 static DEVICE_ATTR(availability, 0444, available_show, NULL); 605 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); 606 static DEVICE_ATTR_RO(vpm); 607 608 static struct attribute *io_subchannel_attrs[] = { 609 &dev_attr_logging.attr, 610 &dev_attr_vpm.attr, 611 NULL, 612 }; 613 614 static const struct attribute_group io_subchannel_attr_group = { 615 .attrs = io_subchannel_attrs, 616 }; 617 618 static struct attribute * ccwdev_attrs[] = { 619 &dev_attr_devtype.attr, 620 &dev_attr_cutype.attr, 621 &dev_attr_modalias.attr, 622 &dev_attr_online.attr, 623 &dev_attr_cmb_enable.attr, 624 &dev_attr_availability.attr, 625 NULL, 626 }; 627 628 static const struct attribute_group ccwdev_attr_group = { 629 .attrs = ccwdev_attrs, 630 }; 631 632 static const struct attribute_group *ccwdev_attr_groups[] = { 633 &ccwdev_attr_group, 634 NULL, 635 }; 636 637 static int ccw_device_add(struct ccw_device *cdev) 638 { 639 struct device *dev = &cdev->dev; 640 641 dev->bus = &ccw_bus_type; 642 return device_add(dev); 643 } 644 645 static int match_dev_id(struct device *dev, void *data) 646 { 647 struct ccw_device *cdev = to_ccwdev(dev); 648 struct ccw_dev_id *dev_id = data; 649 650 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 651 } 652 653 /** 654 * get_ccwdev_by_dev_id() - obtain device from a ccw device id 655 * @dev_id: id of the device to be searched 656 * 657 * This function searches all devices attached to the ccw bus for a device 658 * matching @dev_id. 659 * Returns: 660 * If a device is found its reference count is increased and returned; 661 * else %NULL is returned. 662 */ 663 struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) 664 { 665 struct device *dev; 666 667 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id); 668 669 return dev ? to_ccwdev(dev) : NULL; 670 } 671 EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id); 672 673 static void ccw_device_do_unbind_bind(struct ccw_device *cdev) 674 { 675 int ret; 676 677 if (device_is_registered(&cdev->dev)) { 678 device_release_driver(&cdev->dev); 679 ret = device_attach(&cdev->dev); 680 WARN_ON(ret == -ENODEV); 681 } 682 } 683 684 static void 685 ccw_device_release(struct device *dev) 686 { 687 struct ccw_device *cdev; 688 689 cdev = to_ccwdev(dev); 690 /* Release reference of parent subchannel. */ 691 put_device(cdev->dev.parent); 692 kfree(cdev->private); 693 kfree(cdev); 694 } 695 696 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) 697 { 698 struct ccw_device *cdev; 699 700 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 701 if (cdev) { 702 cdev->private = kzalloc(sizeof(struct ccw_device_private), 703 GFP_KERNEL | GFP_DMA); 704 if (cdev->private) 705 return cdev; 706 } 707 kfree(cdev); 708 return ERR_PTR(-ENOMEM); 709 } 710 711 static void ccw_device_todo(struct work_struct *work); 712 713 static int io_subchannel_initialize_dev(struct subchannel *sch, 714 struct ccw_device *cdev) 715 { 716 struct ccw_device_private *priv = cdev->private; 717 int ret; 718 719 priv->cdev = cdev; 720 priv->int_class = IRQIO_CIO; 721 priv->state = DEV_STATE_NOT_OPER; 722 priv->dev_id.devno = sch->schib.pmcw.dev; 723 priv->dev_id.ssid = sch->schid.ssid; 724 725 INIT_WORK(&priv->todo_work, ccw_device_todo); 726 INIT_LIST_HEAD(&priv->cmb_list); 727 init_waitqueue_head(&priv->wait_q); 728 timer_setup(&priv->timer, ccw_device_timeout, 0); 729 730 atomic_set(&priv->onoff, 0); 731 cdev->ccwlock = sch->lock; 732 cdev->dev.parent = &sch->dev; 733 cdev->dev.release = ccw_device_release; 734 cdev->dev.groups = ccwdev_attr_groups; 735 /* Do first half of device_register. */ 736 device_initialize(&cdev->dev); 737 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, 738 cdev->private->dev_id.devno); 739 if (ret) 740 goto out_put; 741 if (!get_device(&sch->dev)) { 742 ret = -ENODEV; 743 goto out_put; 744 } 745 priv->flags.initialized = 1; 746 spin_lock_irq(sch->lock); 747 sch_set_cdev(sch, cdev); 748 spin_unlock_irq(sch->lock); 749 return 0; 750 751 out_put: 752 /* Release reference from device_initialize(). */ 753 put_device(&cdev->dev); 754 return ret; 755 } 756 757 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) 758 { 759 struct ccw_device *cdev; 760 int ret; 761 762 cdev = io_subchannel_allocate_dev(sch); 763 if (!IS_ERR(cdev)) { 764 ret = io_subchannel_initialize_dev(sch, cdev); 765 if (ret) 766 cdev = ERR_PTR(ret); 767 } 768 return cdev; 769 } 770 771 static void io_subchannel_recog(struct ccw_device *, struct subchannel *); 772 773 static void sch_create_and_recog_new_device(struct subchannel *sch) 774 { 775 struct ccw_device *cdev; 776 777 /* Need to allocate a new ccw device. */ 778 cdev = io_subchannel_create_ccwdev(sch); 779 if (IS_ERR(cdev)) { 780 /* OK, we did everything we could... */ 781 css_sch_device_unregister(sch); 782 return; 783 } 784 /* Start recognition for the new ccw device. */ 785 io_subchannel_recog(cdev, sch); 786 } 787 788 /* 789 * Register recognized device. 790 */ 791 static void io_subchannel_register(struct ccw_device *cdev) 792 { 793 struct subchannel *sch; 794 int ret, adjust_init_count = 1; 795 unsigned long flags; 796 797 sch = to_subchannel(cdev->dev.parent); 798 /* 799 * Check if subchannel is still registered. It may have become 800 * unregistered if a machine check hit us after finishing 801 * device recognition but before the register work could be 802 * queued. 803 */ 804 if (!device_is_registered(&sch->dev)) 805 goto out_err; 806 css_update_ssd_info(sch); 807 /* 808 * io_subchannel_register() will also be called after device 809 * recognition has been done for a boxed device (which will already 810 * be registered). We need to reprobe since we may now have sense id 811 * information. 812 */ 813 if (device_is_registered(&cdev->dev)) { 814 if (!cdev->drv) { 815 ret = device_reprobe(&cdev->dev); 816 if (ret) 817 /* We can't do much here. */ 818 CIO_MSG_EVENT(0, "device_reprobe() returned" 819 " %d for 0.%x.%04x\n", ret, 820 cdev->private->dev_id.ssid, 821 cdev->private->dev_id.devno); 822 } 823 adjust_init_count = 0; 824 goto out; 825 } 826 /* 827 * Now we know this subchannel will stay, we can throw 828 * our delayed uevent. 829 */ 830 dev_set_uevent_suppress(&sch->dev, 0); 831 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 832 /* make it known to the system */ 833 ret = ccw_device_add(cdev); 834 if (ret) { 835 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", 836 cdev->private->dev_id.ssid, 837 cdev->private->dev_id.devno, ret); 838 spin_lock_irqsave(sch->lock, flags); 839 sch_set_cdev(sch, NULL); 840 spin_unlock_irqrestore(sch->lock, flags); 841 /* Release initial device reference. */ 842 put_device(&cdev->dev); 843 goto out_err; 844 } 845 out: 846 cdev->private->flags.recog_done = 1; 847 wake_up(&cdev->private->wait_q); 848 out_err: 849 if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count)) 850 wake_up(&ccw_device_init_wq); 851 } 852 853 static void ccw_device_call_sch_unregister(struct ccw_device *cdev) 854 { 855 struct subchannel *sch; 856 857 /* Get subchannel reference for local processing. */ 858 if (!get_device(cdev->dev.parent)) 859 return; 860 sch = to_subchannel(cdev->dev.parent); 861 css_sch_device_unregister(sch); 862 /* Release subchannel reference for local processing. */ 863 put_device(&sch->dev); 864 } 865 866 /* 867 * subchannel recognition done. Called from the state machine. 868 */ 869 void 870 io_subchannel_recog_done(struct ccw_device *cdev) 871 { 872 if (css_init_done == 0) { 873 cdev->private->flags.recog_done = 1; 874 return; 875 } 876 switch (cdev->private->state) { 877 case DEV_STATE_BOXED: 878 /* Device did not respond in time. */ 879 case DEV_STATE_NOT_OPER: 880 cdev->private->flags.recog_done = 1; 881 /* Remove device found not operational. */ 882 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 883 if (atomic_dec_and_test(&ccw_device_init_count)) 884 wake_up(&ccw_device_init_wq); 885 break; 886 case DEV_STATE_OFFLINE: 887 /* 888 * We can't register the device in interrupt context so 889 * we schedule a work item. 890 */ 891 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER); 892 break; 893 } 894 } 895 896 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) 897 { 898 /* Increase counter of devices currently in recognition. */ 899 atomic_inc(&ccw_device_init_count); 900 901 /* Start async. device sensing. */ 902 spin_lock_irq(sch->lock); 903 ccw_device_recognition(cdev); 904 spin_unlock_irq(sch->lock); 905 } 906 907 static int ccw_device_move_to_sch(struct ccw_device *cdev, 908 struct subchannel *sch) 909 { 910 struct subchannel *old_sch; 911 int rc, old_enabled = 0; 912 913 old_sch = to_subchannel(cdev->dev.parent); 914 /* Obtain child reference for new parent. */ 915 if (!get_device(&sch->dev)) 916 return -ENODEV; 917 918 if (!sch_is_pseudo_sch(old_sch)) { 919 spin_lock_irq(old_sch->lock); 920 old_enabled = old_sch->schib.pmcw.ena; 921 rc = 0; 922 if (old_enabled) 923 rc = cio_disable_subchannel(old_sch); 924 spin_unlock_irq(old_sch->lock); 925 if (rc == -EBUSY) { 926 /* Release child reference for new parent. */ 927 put_device(&sch->dev); 928 return rc; 929 } 930 } 931 932 mutex_lock(&sch->reg_mutex); 933 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); 934 mutex_unlock(&sch->reg_mutex); 935 if (rc) { 936 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n", 937 cdev->private->dev_id.ssid, 938 cdev->private->dev_id.devno, sch->schid.ssid, 939 sch->schib.pmcw.dev, rc); 940 if (old_enabled) { 941 /* Try to reenable the old subchannel. */ 942 spin_lock_irq(old_sch->lock); 943 cio_enable_subchannel(old_sch, (u32)(addr_t)old_sch); 944 spin_unlock_irq(old_sch->lock); 945 } 946 /* Release child reference for new parent. */ 947 put_device(&sch->dev); 948 return rc; 949 } 950 /* Clean up old subchannel. */ 951 if (!sch_is_pseudo_sch(old_sch)) { 952 spin_lock_irq(old_sch->lock); 953 sch_set_cdev(old_sch, NULL); 954 spin_unlock_irq(old_sch->lock); 955 css_schedule_eval(old_sch->schid); 956 } 957 /* Release child reference for old parent. */ 958 put_device(&old_sch->dev); 959 /* Initialize new subchannel. */ 960 spin_lock_irq(sch->lock); 961 cdev->ccwlock = sch->lock; 962 if (!sch_is_pseudo_sch(sch)) 963 sch_set_cdev(sch, cdev); 964 spin_unlock_irq(sch->lock); 965 if (!sch_is_pseudo_sch(sch)) 966 css_update_ssd_info(sch); 967 return 0; 968 } 969 970 static int ccw_device_move_to_orph(struct ccw_device *cdev) 971 { 972 struct subchannel *sch = to_subchannel(cdev->dev.parent); 973 struct channel_subsystem *css = to_css(sch->dev.parent); 974 975 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel); 976 } 977 978 static void io_subchannel_irq(struct subchannel *sch) 979 { 980 struct ccw_device *cdev; 981 982 cdev = sch_get_cdev(sch); 983 984 CIO_TRACE_EVENT(6, "IRQ"); 985 CIO_TRACE_EVENT(6, dev_name(&sch->dev)); 986 if (cdev) 987 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 988 else 989 inc_irq_stat(IRQIO_CIO); 990 } 991 992 void io_subchannel_init_config(struct subchannel *sch) 993 { 994 memset(&sch->config, 0, sizeof(sch->config)); 995 sch->config.csense = 1; 996 } 997 998 static void io_subchannel_init_fields(struct subchannel *sch) 999 { 1000 if (cio_is_console(sch->schid)) 1001 sch->opm = 0xff; 1002 else 1003 sch->opm = chp_get_sch_opm(sch); 1004 sch->lpm = sch->schib.pmcw.pam & sch->opm; 1005 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; 1006 1007 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" 1008 " - PIM = %02X, PAM = %02X, POM = %02X\n", 1009 sch->schib.pmcw.dev, sch->schid.ssid, 1010 sch->schid.sch_no, sch->schib.pmcw.pim, 1011 sch->schib.pmcw.pam, sch->schib.pmcw.pom); 1012 1013 io_subchannel_init_config(sch); 1014 } 1015 1016 /* 1017 * Note: We always return 0 so that we bind to the device even on error. 1018 * This is needed so that our remove function is called on unregister. 1019 */ 1020 static int io_subchannel_probe(struct subchannel *sch) 1021 { 1022 struct io_subchannel_private *io_priv; 1023 struct ccw_device *cdev; 1024 int rc; 1025 1026 if (cio_is_console(sch->schid)) { 1027 rc = sysfs_create_group(&sch->dev.kobj, 1028 &io_subchannel_attr_group); 1029 if (rc) 1030 CIO_MSG_EVENT(0, "Failed to create io subchannel " 1031 "attributes for subchannel " 1032 "0.%x.%04x (rc=%d)\n", 1033 sch->schid.ssid, sch->schid.sch_no, rc); 1034 /* 1035 * The console subchannel already has an associated ccw_device. 1036 * Throw the delayed uevent for the subchannel, register 1037 * the ccw_device and exit. 1038 */ 1039 dev_set_uevent_suppress(&sch->dev, 0); 1040 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 1041 cdev = sch_get_cdev(sch); 1042 rc = ccw_device_add(cdev); 1043 if (rc) { 1044 /* Release online reference. */ 1045 put_device(&cdev->dev); 1046 goto out_schedule; 1047 } 1048 if (atomic_dec_and_test(&ccw_device_init_count)) 1049 wake_up(&ccw_device_init_wq); 1050 return 0; 1051 } 1052 io_subchannel_init_fields(sch); 1053 rc = cio_commit_config(sch); 1054 if (rc) 1055 goto out_schedule; 1056 rc = sysfs_create_group(&sch->dev.kobj, 1057 &io_subchannel_attr_group); 1058 if (rc) 1059 goto out_schedule; 1060 /* Allocate I/O subchannel private data. */ 1061 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); 1062 if (!io_priv) 1063 goto out_schedule; 1064 1065 set_io_private(sch, io_priv); 1066 css_schedule_eval(sch->schid); 1067 return 0; 1068 1069 out_schedule: 1070 spin_lock_irq(sch->lock); 1071 css_sched_sch_todo(sch, SCH_TODO_UNREG); 1072 spin_unlock_irq(sch->lock); 1073 return 0; 1074 } 1075 1076 static int 1077 io_subchannel_remove (struct subchannel *sch) 1078 { 1079 struct io_subchannel_private *io_priv = to_io_private(sch); 1080 struct ccw_device *cdev; 1081 1082 cdev = sch_get_cdev(sch); 1083 if (!cdev) 1084 goto out_free; 1085 io_subchannel_quiesce(sch); 1086 /* Set ccw device to not operational and drop reference. */ 1087 spin_lock_irq(cdev->ccwlock); 1088 sch_set_cdev(sch, NULL); 1089 set_io_private(sch, NULL); 1090 cdev->private->state = DEV_STATE_NOT_OPER; 1091 spin_unlock_irq(cdev->ccwlock); 1092 ccw_device_unregister(cdev); 1093 out_free: 1094 kfree(io_priv); 1095 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1096 return 0; 1097 } 1098 1099 static void io_subchannel_verify(struct subchannel *sch) 1100 { 1101 struct ccw_device *cdev; 1102 1103 cdev = sch_get_cdev(sch); 1104 if (cdev) 1105 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1106 } 1107 1108 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) 1109 { 1110 struct ccw_device *cdev; 1111 1112 cdev = sch_get_cdev(sch); 1113 if (!cdev) 1114 return; 1115 if (cio_update_schib(sch)) 1116 goto err; 1117 /* Check for I/O on path. */ 1118 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask) 1119 goto out; 1120 if (cdev->private->state == DEV_STATE_ONLINE) { 1121 ccw_device_kill_io(cdev); 1122 goto out; 1123 } 1124 if (cio_clear(sch)) 1125 goto err; 1126 out: 1127 /* Trigger path verification. */ 1128 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1129 return; 1130 1131 err: 1132 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1133 } 1134 1135 static int io_subchannel_chp_event(struct subchannel *sch, 1136 struct chp_link *link, int event) 1137 { 1138 struct ccw_device *cdev = sch_get_cdev(sch); 1139 int mask; 1140 1141 mask = chp_ssd_get_mask(&sch->ssd_info, link); 1142 if (!mask) 1143 return 0; 1144 switch (event) { 1145 case CHP_VARY_OFF: 1146 sch->opm &= ~mask; 1147 sch->lpm &= ~mask; 1148 if (cdev) 1149 cdev->private->path_gone_mask |= mask; 1150 io_subchannel_terminate_path(sch, mask); 1151 break; 1152 case CHP_VARY_ON: 1153 sch->opm |= mask; 1154 sch->lpm |= mask; 1155 if (cdev) 1156 cdev->private->path_new_mask |= mask; 1157 io_subchannel_verify(sch); 1158 break; 1159 case CHP_OFFLINE: 1160 if (cio_update_schib(sch)) 1161 return -ENODEV; 1162 if (cdev) 1163 cdev->private->path_gone_mask |= mask; 1164 io_subchannel_terminate_path(sch, mask); 1165 break; 1166 case CHP_ONLINE: 1167 if (cio_update_schib(sch)) 1168 return -ENODEV; 1169 sch->lpm |= mask & sch->opm; 1170 if (cdev) 1171 cdev->private->path_new_mask |= mask; 1172 io_subchannel_verify(sch); 1173 break; 1174 } 1175 return 0; 1176 } 1177 1178 static void io_subchannel_quiesce(struct subchannel *sch) 1179 { 1180 struct ccw_device *cdev; 1181 int ret; 1182 1183 spin_lock_irq(sch->lock); 1184 cdev = sch_get_cdev(sch); 1185 if (cio_is_console(sch->schid)) 1186 goto out_unlock; 1187 if (!sch->schib.pmcw.ena) 1188 goto out_unlock; 1189 ret = cio_disable_subchannel(sch); 1190 if (ret != -EBUSY) 1191 goto out_unlock; 1192 if (cdev->handler) 1193 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); 1194 while (ret == -EBUSY) { 1195 cdev->private->state = DEV_STATE_QUIESCE; 1196 cdev->private->iretry = 255; 1197 ret = ccw_device_cancel_halt_clear(cdev); 1198 if (ret == -EBUSY) { 1199 ccw_device_set_timeout(cdev, HZ/10); 1200 spin_unlock_irq(sch->lock); 1201 wait_event(cdev->private->wait_q, 1202 cdev->private->state != DEV_STATE_QUIESCE); 1203 spin_lock_irq(sch->lock); 1204 } 1205 ret = cio_disable_subchannel(sch); 1206 } 1207 out_unlock: 1208 spin_unlock_irq(sch->lock); 1209 } 1210 1211 static void io_subchannel_shutdown(struct subchannel *sch) 1212 { 1213 io_subchannel_quiesce(sch); 1214 } 1215 1216 static int device_is_disconnected(struct ccw_device *cdev) 1217 { 1218 if (!cdev) 1219 return 0; 1220 return (cdev->private->state == DEV_STATE_DISCONNECTED || 1221 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); 1222 } 1223 1224 static int recovery_check(struct device *dev, void *data) 1225 { 1226 struct ccw_device *cdev = to_ccwdev(dev); 1227 struct subchannel *sch; 1228 int *redo = data; 1229 1230 spin_lock_irq(cdev->ccwlock); 1231 switch (cdev->private->state) { 1232 case DEV_STATE_ONLINE: 1233 sch = to_subchannel(cdev->dev.parent); 1234 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm) 1235 break; 1236 /* fall through */ 1237 case DEV_STATE_DISCONNECTED: 1238 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", 1239 cdev->private->dev_id.ssid, 1240 cdev->private->dev_id.devno); 1241 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1242 *redo = 1; 1243 break; 1244 case DEV_STATE_DISCONNECTED_SENSE_ID: 1245 *redo = 1; 1246 break; 1247 } 1248 spin_unlock_irq(cdev->ccwlock); 1249 1250 return 0; 1251 } 1252 1253 static void recovery_work_func(struct work_struct *unused) 1254 { 1255 int redo = 0; 1256 1257 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); 1258 if (redo) { 1259 spin_lock_irq(&recovery_lock); 1260 if (!timer_pending(&recovery_timer)) { 1261 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) 1262 recovery_phase++; 1263 mod_timer(&recovery_timer, jiffies + 1264 recovery_delay[recovery_phase] * HZ); 1265 } 1266 spin_unlock_irq(&recovery_lock); 1267 } else 1268 CIO_MSG_EVENT(3, "recovery: end\n"); 1269 } 1270 1271 static DECLARE_WORK(recovery_work, recovery_work_func); 1272 1273 static void recovery_func(struct timer_list *unused) 1274 { 1275 /* 1276 * We can't do our recovery in softirq context and it's not 1277 * performance critical, so we schedule it. 1278 */ 1279 schedule_work(&recovery_work); 1280 } 1281 1282 void ccw_device_schedule_recovery(void) 1283 { 1284 unsigned long flags; 1285 1286 CIO_MSG_EVENT(3, "recovery: schedule\n"); 1287 spin_lock_irqsave(&recovery_lock, flags); 1288 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { 1289 recovery_phase = 0; 1290 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); 1291 } 1292 spin_unlock_irqrestore(&recovery_lock, flags); 1293 } 1294 1295 static int purge_fn(struct device *dev, void *data) 1296 { 1297 struct ccw_device *cdev = to_ccwdev(dev); 1298 struct ccw_dev_id *id = &cdev->private->dev_id; 1299 1300 spin_lock_irq(cdev->ccwlock); 1301 if (is_blacklisted(id->ssid, id->devno) && 1302 (cdev->private->state == DEV_STATE_OFFLINE) && 1303 (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { 1304 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, 1305 id->devno); 1306 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1307 atomic_set(&cdev->private->onoff, 0); 1308 } 1309 spin_unlock_irq(cdev->ccwlock); 1310 /* Abort loop in case of pending signal. */ 1311 if (signal_pending(current)) 1312 return -EINTR; 1313 1314 return 0; 1315 } 1316 1317 /** 1318 * ccw_purge_blacklisted - purge unused, blacklisted devices 1319 * 1320 * Unregister all ccw devices that are offline and on the blacklist. 1321 */ 1322 int ccw_purge_blacklisted(void) 1323 { 1324 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); 1325 bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); 1326 return 0; 1327 } 1328 1329 void ccw_device_set_disconnected(struct ccw_device *cdev) 1330 { 1331 if (!cdev) 1332 return; 1333 ccw_device_set_timeout(cdev, 0); 1334 cdev->private->flags.fake_irb = 0; 1335 cdev->private->state = DEV_STATE_DISCONNECTED; 1336 if (cdev->online) 1337 ccw_device_schedule_recovery(); 1338 } 1339 1340 void ccw_device_set_notoper(struct ccw_device *cdev) 1341 { 1342 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1343 1344 CIO_TRACE_EVENT(2, "notoper"); 1345 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 1346 ccw_device_set_timeout(cdev, 0); 1347 cio_disable_subchannel(sch); 1348 cdev->private->state = DEV_STATE_NOT_OPER; 1349 } 1350 1351 enum io_sch_action { 1352 IO_SCH_UNREG, 1353 IO_SCH_ORPH_UNREG, 1354 IO_SCH_ATTACH, 1355 IO_SCH_UNREG_ATTACH, 1356 IO_SCH_ORPH_ATTACH, 1357 IO_SCH_REPROBE, 1358 IO_SCH_VERIFY, 1359 IO_SCH_DISC, 1360 IO_SCH_NOP, 1361 }; 1362 1363 static enum io_sch_action sch_get_action(struct subchannel *sch) 1364 { 1365 struct ccw_device *cdev; 1366 1367 cdev = sch_get_cdev(sch); 1368 if (cio_update_schib(sch)) { 1369 /* Not operational. */ 1370 if (!cdev) 1371 return IO_SCH_UNREG; 1372 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 1373 return IO_SCH_UNREG; 1374 return IO_SCH_ORPH_UNREG; 1375 } 1376 /* Operational. */ 1377 if (!cdev) 1378 return IO_SCH_ATTACH; 1379 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1380 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 1381 return IO_SCH_UNREG_ATTACH; 1382 return IO_SCH_ORPH_ATTACH; 1383 } 1384 if ((sch->schib.pmcw.pam & sch->opm) == 0) { 1385 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) 1386 return IO_SCH_UNREG; 1387 return IO_SCH_DISC; 1388 } 1389 if (device_is_disconnected(cdev)) 1390 return IO_SCH_REPROBE; 1391 if (cdev->online && !cdev->private->flags.resuming) 1392 return IO_SCH_VERIFY; 1393 if (cdev->private->state == DEV_STATE_NOT_OPER) 1394 return IO_SCH_UNREG_ATTACH; 1395 return IO_SCH_NOP; 1396 } 1397 1398 /** 1399 * io_subchannel_sch_event - process subchannel event 1400 * @sch: subchannel 1401 * @process: non-zero if function is called in process context 1402 * 1403 * An unspecified event occurred for this subchannel. Adjust data according 1404 * to the current operational state of the subchannel and device. Return 1405 * zero when the event has been handled sufficiently or -EAGAIN when this 1406 * function should be called again in process context. 1407 */ 1408 static int io_subchannel_sch_event(struct subchannel *sch, int process) 1409 { 1410 unsigned long flags; 1411 struct ccw_device *cdev; 1412 struct ccw_dev_id dev_id; 1413 enum io_sch_action action; 1414 int rc = -EAGAIN; 1415 1416 spin_lock_irqsave(sch->lock, flags); 1417 if (!device_is_registered(&sch->dev)) 1418 goto out_unlock; 1419 if (work_pending(&sch->todo_work)) 1420 goto out_unlock; 1421 cdev = sch_get_cdev(sch); 1422 if (cdev && work_pending(&cdev->private->todo_work)) 1423 goto out_unlock; 1424 action = sch_get_action(sch); 1425 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", 1426 sch->schid.ssid, sch->schid.sch_no, process, 1427 action); 1428 /* Perform immediate actions while holding the lock. */ 1429 switch (action) { 1430 case IO_SCH_REPROBE: 1431 /* Trigger device recognition. */ 1432 ccw_device_trigger_reprobe(cdev); 1433 rc = 0; 1434 goto out_unlock; 1435 case IO_SCH_VERIFY: 1436 /* Trigger path verification. */ 1437 io_subchannel_verify(sch); 1438 rc = 0; 1439 goto out_unlock; 1440 case IO_SCH_DISC: 1441 ccw_device_set_disconnected(cdev); 1442 rc = 0; 1443 goto out_unlock; 1444 case IO_SCH_ORPH_UNREG: 1445 case IO_SCH_ORPH_ATTACH: 1446 ccw_device_set_disconnected(cdev); 1447 break; 1448 case IO_SCH_UNREG_ATTACH: 1449 case IO_SCH_UNREG: 1450 if (!cdev) 1451 break; 1452 if (cdev->private->state == DEV_STATE_SENSE_ID) { 1453 /* 1454 * Note: delayed work triggered by this event 1455 * and repeated calls to sch_event are synchronized 1456 * by the above check for work_pending(cdev). 1457 */ 1458 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1459 } else 1460 ccw_device_set_notoper(cdev); 1461 break; 1462 case IO_SCH_NOP: 1463 rc = 0; 1464 goto out_unlock; 1465 default: 1466 break; 1467 } 1468 spin_unlock_irqrestore(sch->lock, flags); 1469 /* All other actions require process context. */ 1470 if (!process) 1471 goto out; 1472 /* Handle attached ccw device. */ 1473 switch (action) { 1474 case IO_SCH_ORPH_UNREG: 1475 case IO_SCH_ORPH_ATTACH: 1476 /* Move ccw device to orphanage. */ 1477 rc = ccw_device_move_to_orph(cdev); 1478 if (rc) 1479 goto out; 1480 break; 1481 case IO_SCH_UNREG_ATTACH: 1482 spin_lock_irqsave(sch->lock, flags); 1483 if (cdev->private->flags.resuming) { 1484 /* Device will be handled later. */ 1485 rc = 0; 1486 goto out_unlock; 1487 } 1488 sch_set_cdev(sch, NULL); 1489 spin_unlock_irqrestore(sch->lock, flags); 1490 /* Unregister ccw device. */ 1491 ccw_device_unregister(cdev); 1492 break; 1493 default: 1494 break; 1495 } 1496 /* Handle subchannel. */ 1497 switch (action) { 1498 case IO_SCH_ORPH_UNREG: 1499 case IO_SCH_UNREG: 1500 if (!cdev || !cdev->private->flags.resuming) 1501 css_sch_device_unregister(sch); 1502 break; 1503 case IO_SCH_ORPH_ATTACH: 1504 case IO_SCH_UNREG_ATTACH: 1505 case IO_SCH_ATTACH: 1506 dev_id.ssid = sch->schid.ssid; 1507 dev_id.devno = sch->schib.pmcw.dev; 1508 cdev = get_ccwdev_by_dev_id(&dev_id); 1509 if (!cdev) { 1510 sch_create_and_recog_new_device(sch); 1511 break; 1512 } 1513 rc = ccw_device_move_to_sch(cdev, sch); 1514 if (rc) { 1515 /* Release reference from get_ccwdev_by_dev_id() */ 1516 put_device(&cdev->dev); 1517 goto out; 1518 } 1519 spin_lock_irqsave(sch->lock, flags); 1520 ccw_device_trigger_reprobe(cdev); 1521 spin_unlock_irqrestore(sch->lock, flags); 1522 /* Release reference from get_ccwdev_by_dev_id() */ 1523 put_device(&cdev->dev); 1524 break; 1525 default: 1526 break; 1527 } 1528 return 0; 1529 1530 out_unlock: 1531 spin_unlock_irqrestore(sch->lock, flags); 1532 out: 1533 return rc; 1534 } 1535 1536 static void ccw_device_set_int_class(struct ccw_device *cdev) 1537 { 1538 struct ccw_driver *cdrv = cdev->drv; 1539 1540 /* Note: we interpret class 0 in this context as an uninitialized 1541 * field since it translates to a non-I/O interrupt class. */ 1542 if (cdrv->int_class != 0) 1543 cdev->private->int_class = cdrv->int_class; 1544 else 1545 cdev->private->int_class = IRQIO_CIO; 1546 } 1547 1548 #ifdef CONFIG_CCW_CONSOLE 1549 int __init ccw_device_enable_console(struct ccw_device *cdev) 1550 { 1551 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1552 int rc; 1553 1554 if (!cdev->drv || !cdev->handler) 1555 return -EINVAL; 1556 1557 io_subchannel_init_fields(sch); 1558 rc = cio_commit_config(sch); 1559 if (rc) 1560 return rc; 1561 sch->driver = &io_subchannel_driver; 1562 io_subchannel_recog(cdev, sch); 1563 /* Now wait for the async. recognition to come to an end. */ 1564 spin_lock_irq(cdev->ccwlock); 1565 while (!dev_fsm_final_state(cdev)) 1566 ccw_device_wait_idle(cdev); 1567 1568 /* Hold on to an extra reference while device is online. */ 1569 get_device(&cdev->dev); 1570 rc = ccw_device_online(cdev); 1571 if (rc) 1572 goto out_unlock; 1573 1574 while (!dev_fsm_final_state(cdev)) 1575 ccw_device_wait_idle(cdev); 1576 1577 if (cdev->private->state == DEV_STATE_ONLINE) 1578 cdev->online = 1; 1579 else 1580 rc = -EIO; 1581 out_unlock: 1582 spin_unlock_irq(cdev->ccwlock); 1583 if (rc) /* Give up online reference since onlining failed. */ 1584 put_device(&cdev->dev); 1585 return rc; 1586 } 1587 1588 struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) 1589 { 1590 struct io_subchannel_private *io_priv; 1591 struct ccw_device *cdev; 1592 struct subchannel *sch; 1593 1594 sch = cio_probe_console(); 1595 if (IS_ERR(sch)) 1596 return ERR_CAST(sch); 1597 1598 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); 1599 if (!io_priv) { 1600 put_device(&sch->dev); 1601 return ERR_PTR(-ENOMEM); 1602 } 1603 set_io_private(sch, io_priv); 1604 cdev = io_subchannel_create_ccwdev(sch); 1605 if (IS_ERR(cdev)) { 1606 put_device(&sch->dev); 1607 kfree(io_priv); 1608 return cdev; 1609 } 1610 cdev->drv = drv; 1611 ccw_device_set_int_class(cdev); 1612 return cdev; 1613 } 1614 1615 void __init ccw_device_destroy_console(struct ccw_device *cdev) 1616 { 1617 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1618 struct io_subchannel_private *io_priv = to_io_private(sch); 1619 1620 set_io_private(sch, NULL); 1621 put_device(&sch->dev); 1622 put_device(&cdev->dev); 1623 kfree(io_priv); 1624 } 1625 1626 /** 1627 * ccw_device_wait_idle() - busy wait for device to become idle 1628 * @cdev: ccw device 1629 * 1630 * Poll until activity control is zero, that is, no function or data 1631 * transfer is pending/active. 1632 * Called with device lock being held. 1633 */ 1634 void ccw_device_wait_idle(struct ccw_device *cdev) 1635 { 1636 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1637 1638 while (1) { 1639 cio_tsch(sch); 1640 if (sch->schib.scsw.cmd.actl == 0) 1641 break; 1642 udelay_simple(100); 1643 } 1644 } 1645 1646 static int ccw_device_pm_restore(struct device *dev); 1647 1648 int ccw_device_force_console(struct ccw_device *cdev) 1649 { 1650 return ccw_device_pm_restore(&cdev->dev); 1651 } 1652 EXPORT_SYMBOL_GPL(ccw_device_force_console); 1653 #endif 1654 1655 /* 1656 * get ccw_device matching the busid, but only if owned by cdrv 1657 */ 1658 static int 1659 __ccwdev_check_busid(struct device *dev, void *id) 1660 { 1661 char *bus_id; 1662 1663 bus_id = id; 1664 1665 return (strcmp(bus_id, dev_name(dev)) == 0); 1666 } 1667 1668 1669 /** 1670 * get_ccwdev_by_busid() - obtain device from a bus id 1671 * @cdrv: driver the device is owned by 1672 * @bus_id: bus id of the device to be searched 1673 * 1674 * This function searches all devices owned by @cdrv for a device with a bus 1675 * id matching @bus_id. 1676 * Returns: 1677 * If a match is found, its reference count of the found device is increased 1678 * and it is returned; else %NULL is returned. 1679 */ 1680 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, 1681 const char *bus_id) 1682 { 1683 struct device *dev; 1684 1685 dev = driver_find_device(&cdrv->driver, NULL, (void *)bus_id, 1686 __ccwdev_check_busid); 1687 1688 return dev ? to_ccwdev(dev) : NULL; 1689 } 1690 1691 /************************** device driver handling ************************/ 1692 1693 /* This is the implementation of the ccw_driver class. The probe, remove 1694 * and release methods are initially very similar to the device_driver 1695 * implementations, with the difference that they have ccw_device 1696 * arguments. 1697 * 1698 * A ccw driver also contains the information that is needed for 1699 * device matching. 1700 */ 1701 static int 1702 ccw_device_probe (struct device *dev) 1703 { 1704 struct ccw_device *cdev = to_ccwdev(dev); 1705 struct ccw_driver *cdrv = to_ccwdrv(dev->driver); 1706 int ret; 1707 1708 cdev->drv = cdrv; /* to let the driver call _set_online */ 1709 ccw_device_set_int_class(cdev); 1710 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; 1711 if (ret) { 1712 cdev->drv = NULL; 1713 cdev->private->int_class = IRQIO_CIO; 1714 return ret; 1715 } 1716 1717 return 0; 1718 } 1719 1720 static int ccw_device_remove(struct device *dev) 1721 { 1722 struct ccw_device *cdev = to_ccwdev(dev); 1723 struct ccw_driver *cdrv = cdev->drv; 1724 int ret; 1725 1726 if (cdrv->remove) 1727 cdrv->remove(cdev); 1728 1729 spin_lock_irq(cdev->ccwlock); 1730 if (cdev->online) { 1731 cdev->online = 0; 1732 ret = ccw_device_offline(cdev); 1733 spin_unlock_irq(cdev->ccwlock); 1734 if (ret == 0) 1735 wait_event(cdev->private->wait_q, 1736 dev_fsm_final_state(cdev)); 1737 else 1738 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " 1739 "device 0.%x.%04x\n", 1740 ret, cdev->private->dev_id.ssid, 1741 cdev->private->dev_id.devno); 1742 /* Give up reference obtained in ccw_device_set_online(). */ 1743 put_device(&cdev->dev); 1744 spin_lock_irq(cdev->ccwlock); 1745 } 1746 ccw_device_set_timeout(cdev, 0); 1747 cdev->drv = NULL; 1748 cdev->private->int_class = IRQIO_CIO; 1749 spin_unlock_irq(cdev->ccwlock); 1750 __disable_cmf(cdev); 1751 1752 return 0; 1753 } 1754 1755 static void ccw_device_shutdown(struct device *dev) 1756 { 1757 struct ccw_device *cdev; 1758 1759 cdev = to_ccwdev(dev); 1760 if (cdev->drv && cdev->drv->shutdown) 1761 cdev->drv->shutdown(cdev); 1762 __disable_cmf(cdev); 1763 } 1764 1765 static int ccw_device_pm_prepare(struct device *dev) 1766 { 1767 struct ccw_device *cdev = to_ccwdev(dev); 1768 1769 if (work_pending(&cdev->private->todo_work)) 1770 return -EAGAIN; 1771 /* Fail while device is being set online/offline. */ 1772 if (atomic_read(&cdev->private->onoff)) 1773 return -EAGAIN; 1774 1775 if (cdev->online && cdev->drv && cdev->drv->prepare) 1776 return cdev->drv->prepare(cdev); 1777 1778 return 0; 1779 } 1780 1781 static void ccw_device_pm_complete(struct device *dev) 1782 { 1783 struct ccw_device *cdev = to_ccwdev(dev); 1784 1785 if (cdev->online && cdev->drv && cdev->drv->complete) 1786 cdev->drv->complete(cdev); 1787 } 1788 1789 static int ccw_device_pm_freeze(struct device *dev) 1790 { 1791 struct ccw_device *cdev = to_ccwdev(dev); 1792 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1793 int ret, cm_enabled; 1794 1795 /* Fail suspend while device is in transistional state. */ 1796 if (!dev_fsm_final_state(cdev)) 1797 return -EAGAIN; 1798 if (!cdev->online) 1799 return 0; 1800 if (cdev->drv && cdev->drv->freeze) { 1801 ret = cdev->drv->freeze(cdev); 1802 if (ret) 1803 return ret; 1804 } 1805 1806 spin_lock_irq(sch->lock); 1807 cm_enabled = cdev->private->cmb != NULL; 1808 spin_unlock_irq(sch->lock); 1809 if (cm_enabled) { 1810 /* Don't have the css write on memory. */ 1811 ret = ccw_set_cmf(cdev, 0); 1812 if (ret) 1813 return ret; 1814 } 1815 /* From here on, disallow device driver I/O. */ 1816 spin_lock_irq(sch->lock); 1817 ret = cio_disable_subchannel(sch); 1818 spin_unlock_irq(sch->lock); 1819 1820 return ret; 1821 } 1822 1823 static int ccw_device_pm_thaw(struct device *dev) 1824 { 1825 struct ccw_device *cdev = to_ccwdev(dev); 1826 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1827 int ret, cm_enabled; 1828 1829 if (!cdev->online) 1830 return 0; 1831 1832 spin_lock_irq(sch->lock); 1833 /* Allow device driver I/O again. */ 1834 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); 1835 cm_enabled = cdev->private->cmb != NULL; 1836 spin_unlock_irq(sch->lock); 1837 if (ret) 1838 return ret; 1839 1840 if (cm_enabled) { 1841 ret = ccw_set_cmf(cdev, 1); 1842 if (ret) 1843 return ret; 1844 } 1845 1846 if (cdev->drv && cdev->drv->thaw) 1847 ret = cdev->drv->thaw(cdev); 1848 1849 return ret; 1850 } 1851 1852 static void __ccw_device_pm_restore(struct ccw_device *cdev) 1853 { 1854 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1855 1856 spin_lock_irq(sch->lock); 1857 if (cio_is_console(sch->schid)) { 1858 cio_enable_subchannel(sch, (u32)(addr_t)sch); 1859 goto out_unlock; 1860 } 1861 /* 1862 * While we were sleeping, devices may have gone or become 1863 * available again. Kick re-detection. 1864 */ 1865 cdev->private->flags.resuming = 1; 1866 cdev->private->path_new_mask = LPM_ANYPATH; 1867 css_sched_sch_todo(sch, SCH_TODO_EVAL); 1868 spin_unlock_irq(sch->lock); 1869 css_wait_for_slow_path(); 1870 1871 /* cdev may have been moved to a different subchannel. */ 1872 sch = to_subchannel(cdev->dev.parent); 1873 spin_lock_irq(sch->lock); 1874 if (cdev->private->state != DEV_STATE_ONLINE && 1875 cdev->private->state != DEV_STATE_OFFLINE) 1876 goto out_unlock; 1877 1878 ccw_device_recognition(cdev); 1879 spin_unlock_irq(sch->lock); 1880 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || 1881 cdev->private->state == DEV_STATE_DISCONNECTED); 1882 spin_lock_irq(sch->lock); 1883 1884 out_unlock: 1885 cdev->private->flags.resuming = 0; 1886 spin_unlock_irq(sch->lock); 1887 } 1888 1889 static int resume_handle_boxed(struct ccw_device *cdev) 1890 { 1891 cdev->private->state = DEV_STATE_BOXED; 1892 if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK) 1893 return 0; 1894 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1895 return -ENODEV; 1896 } 1897 1898 static int resume_handle_disc(struct ccw_device *cdev) 1899 { 1900 cdev->private->state = DEV_STATE_DISCONNECTED; 1901 if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK) 1902 return 0; 1903 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1904 return -ENODEV; 1905 } 1906 1907 static int ccw_device_pm_restore(struct device *dev) 1908 { 1909 struct ccw_device *cdev = to_ccwdev(dev); 1910 struct subchannel *sch; 1911 int ret = 0; 1912 1913 __ccw_device_pm_restore(cdev); 1914 sch = to_subchannel(cdev->dev.parent); 1915 spin_lock_irq(sch->lock); 1916 if (cio_is_console(sch->schid)) 1917 goto out_restore; 1918 1919 /* check recognition results */ 1920 switch (cdev->private->state) { 1921 case DEV_STATE_OFFLINE: 1922 case DEV_STATE_ONLINE: 1923 cdev->private->flags.donotify = 0; 1924 break; 1925 case DEV_STATE_BOXED: 1926 ret = resume_handle_boxed(cdev); 1927 if (ret) 1928 goto out_unlock; 1929 goto out_restore; 1930 default: 1931 ret = resume_handle_disc(cdev); 1932 if (ret) 1933 goto out_unlock; 1934 goto out_restore; 1935 } 1936 /* check if the device type has changed */ 1937 if (!ccw_device_test_sense_data(cdev)) { 1938 ccw_device_update_sense_data(cdev); 1939 ccw_device_sched_todo(cdev, CDEV_TODO_REBIND); 1940 ret = -ENODEV; 1941 goto out_unlock; 1942 } 1943 if (!cdev->online) 1944 goto out_unlock; 1945 1946 if (ccw_device_online(cdev)) { 1947 ret = resume_handle_disc(cdev); 1948 if (ret) 1949 goto out_unlock; 1950 goto out_restore; 1951 } 1952 spin_unlock_irq(sch->lock); 1953 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1954 spin_lock_irq(sch->lock); 1955 1956 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) { 1957 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1958 ret = -ENODEV; 1959 goto out_unlock; 1960 } 1961 1962 /* reenable cmf, if needed */ 1963 if (cdev->private->cmb) { 1964 spin_unlock_irq(sch->lock); 1965 ret = ccw_set_cmf(cdev, 1); 1966 spin_lock_irq(sch->lock); 1967 if (ret) { 1968 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " 1969 "(rc=%d)\n", cdev->private->dev_id.ssid, 1970 cdev->private->dev_id.devno, ret); 1971 ret = 0; 1972 } 1973 } 1974 1975 out_restore: 1976 spin_unlock_irq(sch->lock); 1977 if (cdev->online && cdev->drv && cdev->drv->restore) 1978 ret = cdev->drv->restore(cdev); 1979 return ret; 1980 1981 out_unlock: 1982 spin_unlock_irq(sch->lock); 1983 return ret; 1984 } 1985 1986 static const struct dev_pm_ops ccw_pm_ops = { 1987 .prepare = ccw_device_pm_prepare, 1988 .complete = ccw_device_pm_complete, 1989 .freeze = ccw_device_pm_freeze, 1990 .thaw = ccw_device_pm_thaw, 1991 .restore = ccw_device_pm_restore, 1992 }; 1993 1994 static struct bus_type ccw_bus_type = { 1995 .name = "ccw", 1996 .match = ccw_bus_match, 1997 .uevent = ccw_uevent, 1998 .probe = ccw_device_probe, 1999 .remove = ccw_device_remove, 2000 .shutdown = ccw_device_shutdown, 2001 .pm = &ccw_pm_ops, 2002 }; 2003 2004 /** 2005 * ccw_driver_register() - register a ccw driver 2006 * @cdriver: driver to be registered 2007 * 2008 * This function is mainly a wrapper around driver_register(). 2009 * Returns: 2010 * %0 on success and a negative error value on failure. 2011 */ 2012 int ccw_driver_register(struct ccw_driver *cdriver) 2013 { 2014 struct device_driver *drv = &cdriver->driver; 2015 2016 drv->bus = &ccw_bus_type; 2017 2018 return driver_register(drv); 2019 } 2020 2021 /** 2022 * ccw_driver_unregister() - deregister a ccw driver 2023 * @cdriver: driver to be deregistered 2024 * 2025 * This function is mainly a wrapper around driver_unregister(). 2026 */ 2027 void ccw_driver_unregister(struct ccw_driver *cdriver) 2028 { 2029 driver_unregister(&cdriver->driver); 2030 } 2031 2032 static void ccw_device_todo(struct work_struct *work) 2033 { 2034 struct ccw_device_private *priv; 2035 struct ccw_device *cdev; 2036 struct subchannel *sch; 2037 enum cdev_todo todo; 2038 2039 priv = container_of(work, struct ccw_device_private, todo_work); 2040 cdev = priv->cdev; 2041 sch = to_subchannel(cdev->dev.parent); 2042 /* Find out todo. */ 2043 spin_lock_irq(cdev->ccwlock); 2044 todo = priv->todo; 2045 priv->todo = CDEV_TODO_NOTHING; 2046 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n", 2047 priv->dev_id.ssid, priv->dev_id.devno, todo); 2048 spin_unlock_irq(cdev->ccwlock); 2049 /* Perform todo. */ 2050 switch (todo) { 2051 case CDEV_TODO_ENABLE_CMF: 2052 cmf_reenable(cdev); 2053 break; 2054 case CDEV_TODO_REBIND: 2055 ccw_device_do_unbind_bind(cdev); 2056 break; 2057 case CDEV_TODO_REGISTER: 2058 io_subchannel_register(cdev); 2059 break; 2060 case CDEV_TODO_UNREG_EVAL: 2061 if (!sch_is_pseudo_sch(sch)) 2062 css_schedule_eval(sch->schid); 2063 /* fall-through */ 2064 case CDEV_TODO_UNREG: 2065 if (sch_is_pseudo_sch(sch)) 2066 ccw_device_unregister(cdev); 2067 else 2068 ccw_device_call_sch_unregister(cdev); 2069 break; 2070 default: 2071 break; 2072 } 2073 /* Release workqueue ref. */ 2074 put_device(&cdev->dev); 2075 } 2076 2077 /** 2078 * ccw_device_sched_todo - schedule ccw device operation 2079 * @cdev: ccw device 2080 * @todo: todo 2081 * 2082 * Schedule the operation identified by @todo to be performed on the slow path 2083 * workqueue. Do nothing if another operation with higher priority is already 2084 * scheduled. Needs to be called with ccwdev lock held. 2085 */ 2086 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) 2087 { 2088 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n", 2089 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 2090 todo); 2091 if (cdev->private->todo >= todo) 2092 return; 2093 cdev->private->todo = todo; 2094 /* Get workqueue ref. */ 2095 if (!get_device(&cdev->dev)) 2096 return; 2097 if (!queue_work(cio_work_q, &cdev->private->todo_work)) { 2098 /* Already queued, release workqueue ref. */ 2099 put_device(&cdev->dev); 2100 } 2101 } 2102 2103 /** 2104 * ccw_device_siosl() - initiate logging 2105 * @cdev: ccw device 2106 * 2107 * This function is used to invoke model-dependent logging within the channel 2108 * subsystem. 2109 */ 2110 int ccw_device_siosl(struct ccw_device *cdev) 2111 { 2112 struct subchannel *sch = to_subchannel(cdev->dev.parent); 2113 2114 return chsc_siosl(sch->schid); 2115 } 2116 EXPORT_SYMBOL_GPL(ccw_device_siosl); 2117 2118 EXPORT_SYMBOL(ccw_device_set_online); 2119 EXPORT_SYMBOL(ccw_device_set_offline); 2120 EXPORT_SYMBOL(ccw_driver_register); 2121 EXPORT_SYMBOL(ccw_driver_unregister); 2122 EXPORT_SYMBOL(get_ccwdev_by_busid); 2123