1 // SPDX-License-Identifier: GPL-1.0+ 2 /* 3 * bus driver for ccw devices 4 * 5 * Copyright IBM Corp. 2002, 2008 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 * Martin Schwidefsky (schwidefsky@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/export.h> 15 #include <linux/init.h> 16 #include <linux/spinlock.h> 17 #include <linux/errno.h> 18 #include <linux/err.h> 19 #include <linux/slab.h> 20 #include <linux/list.h> 21 #include <linux/device.h> 22 #include <linux/workqueue.h> 23 #include <linux/delay.h> 24 #include <linux/timer.h> 25 #include <linux/kernel_stat.h> 26 #include <linux/sched/signal.h> 27 #include <linux/dma-mapping.h> 28 29 #include <asm/ccwdev.h> 30 #include <asm/cio.h> 31 #include <asm/param.h> /* HZ */ 32 #include <asm/cmb.h> 33 #include <asm/isc.h> 34 35 #include "chp.h" 36 #include "cio.h" 37 #include "cio_debug.h" 38 #include "css.h" 39 #include "device.h" 40 #include "ioasm.h" 41 #include "io_sch.h" 42 #include "blacklist.h" 43 #include "chsc.h" 44 45 static struct timer_list recovery_timer; 46 static DEFINE_SPINLOCK(recovery_lock); 47 static int recovery_phase; 48 static const unsigned long recovery_delay[] = { 3, 30, 300 }; 49 50 static atomic_t ccw_device_init_count = ATOMIC_INIT(0); 51 static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); 52 static struct bus_type ccw_bus_type; 53 54 /******************* bus type handling ***********************/ 55 56 /* The Linux driver model distinguishes between a bus type and 57 * the bus itself. Of course we only have one channel 58 * subsystem driver and one channel system per machine, but 59 * we still use the abstraction. T.R. says it's a good idea. */ 60 static int 61 ccw_bus_match (struct device * dev, struct device_driver * drv) 62 { 63 struct ccw_device *cdev = to_ccwdev(dev); 64 struct ccw_driver *cdrv = to_ccwdrv(drv); 65 const struct ccw_device_id *ids = cdrv->ids, *found; 66 67 if (!ids) 68 return 0; 69 70 found = ccw_device_id_match(ids, &cdev->id); 71 if (!found) 72 return 0; 73 74 cdev->id.driver_info = found->driver_info; 75 76 return 1; 77 } 78 79 /* Store modalias string delimited by prefix/suffix string into buffer with 80 * specified size. Return length of resulting string (excluding trailing '\0') 81 * even if string doesn't fit buffer (snprintf semantics). */ 82 static int snprint_alias(char *buf, size_t size, 83 const struct ccw_device_id *id, const char *suffix) 84 { 85 int len; 86 87 len = snprintf(buf, size, "ccw:t%04Xm%02X", id->cu_type, id->cu_model); 88 if (len > size) 89 return len; 90 buf += len; 91 size -= len; 92 93 if (id->dev_type != 0) 94 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type, 95 id->dev_model, suffix); 96 else 97 len += snprintf(buf, size, "dtdm%s", suffix); 98 99 return len; 100 } 101 102 /* Set up environment variables for ccw device uevent. Return 0 on success, 103 * non-zero otherwise. */ 104 static int ccw_uevent(const struct device *dev, struct kobj_uevent_env *env) 105 { 106 const struct ccw_device *cdev = to_ccwdev(dev); 107 const struct ccw_device_id *id = &(cdev->id); 108 int ret; 109 char modalias_buf[30]; 110 111 /* CU_TYPE= */ 112 ret = add_uevent_var(env, "CU_TYPE=%04X", id->cu_type); 113 if (ret) 114 return ret; 115 116 /* CU_MODEL= */ 117 ret = add_uevent_var(env, "CU_MODEL=%02X", id->cu_model); 118 if (ret) 119 return ret; 120 121 /* The next two can be zero, that's ok for us */ 122 /* DEV_TYPE= */ 123 ret = add_uevent_var(env, "DEV_TYPE=%04X", id->dev_type); 124 if (ret) 125 return ret; 126 127 /* DEV_MODEL= */ 128 ret = add_uevent_var(env, "DEV_MODEL=%02X", id->dev_model); 129 if (ret) 130 return ret; 131 132 /* MODALIAS= */ 133 snprint_alias(modalias_buf, sizeof(modalias_buf), id, ""); 134 ret = add_uevent_var(env, "MODALIAS=%s", modalias_buf); 135 return ret; 136 } 137 138 static void io_subchannel_irq(struct subchannel *); 139 static int io_subchannel_probe(struct subchannel *); 140 static void io_subchannel_remove(struct subchannel *); 141 static void io_subchannel_shutdown(struct subchannel *); 142 static int io_subchannel_sch_event(struct subchannel *, int); 143 static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, 144 int); 145 static void recovery_func(struct timer_list *unused); 146 147 static struct css_device_id io_subchannel_ids[] = { 148 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 149 { /* end of list */ }, 150 }; 151 152 static int io_subchannel_settle(void) 153 { 154 int ret; 155 156 ret = wait_event_interruptible(ccw_device_init_wq, 157 atomic_read(&ccw_device_init_count) == 0); 158 if (ret) 159 return -EINTR; 160 flush_workqueue(cio_work_q); 161 return 0; 162 } 163 164 static struct css_driver io_subchannel_driver = { 165 .drv = { 166 .owner = THIS_MODULE, 167 .name = "io_subchannel", 168 }, 169 .subchannel_type = io_subchannel_ids, 170 .irq = io_subchannel_irq, 171 .sch_event = io_subchannel_sch_event, 172 .chp_event = io_subchannel_chp_event, 173 .probe = io_subchannel_probe, 174 .remove = io_subchannel_remove, 175 .shutdown = io_subchannel_shutdown, 176 .settle = io_subchannel_settle, 177 }; 178 179 int __init io_subchannel_init(void) 180 { 181 int ret; 182 183 timer_setup(&recovery_timer, recovery_func, 0); 184 ret = bus_register(&ccw_bus_type); 185 if (ret) 186 return ret; 187 ret = css_driver_register(&io_subchannel_driver); 188 if (ret) 189 bus_unregister(&ccw_bus_type); 190 191 return ret; 192 } 193 194 195 /************************ device handling **************************/ 196 197 static ssize_t 198 devtype_show (struct device *dev, struct device_attribute *attr, char *buf) 199 { 200 struct ccw_device *cdev = to_ccwdev(dev); 201 struct ccw_device_id *id = &(cdev->id); 202 203 if (id->dev_type != 0) 204 return sprintf(buf, "%04x/%02x\n", 205 id->dev_type, id->dev_model); 206 else 207 return sprintf(buf, "n/a\n"); 208 } 209 210 static ssize_t 211 cutype_show (struct device *dev, struct device_attribute *attr, char *buf) 212 { 213 struct ccw_device *cdev = to_ccwdev(dev); 214 struct ccw_device_id *id = &(cdev->id); 215 216 return sprintf(buf, "%04x/%02x\n", 217 id->cu_type, id->cu_model); 218 } 219 220 static ssize_t 221 modalias_show (struct device *dev, struct device_attribute *attr, char *buf) 222 { 223 struct ccw_device *cdev = to_ccwdev(dev); 224 struct ccw_device_id *id = &(cdev->id); 225 int len; 226 227 len = snprint_alias(buf, PAGE_SIZE, id, "\n"); 228 229 return len > PAGE_SIZE ? PAGE_SIZE : len; 230 } 231 232 static ssize_t 233 online_show (struct device *dev, struct device_attribute *attr, char *buf) 234 { 235 struct ccw_device *cdev = to_ccwdev(dev); 236 237 return sprintf(buf, cdev->online ? "1\n" : "0\n"); 238 } 239 240 int ccw_device_is_orphan(struct ccw_device *cdev) 241 { 242 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent)); 243 } 244 245 static void ccw_device_unregister(struct ccw_device *cdev) 246 { 247 mutex_lock(&cdev->reg_mutex); 248 if (device_is_registered(&cdev->dev)) { 249 /* Undo device_add(). */ 250 device_del(&cdev->dev); 251 } 252 mutex_unlock(&cdev->reg_mutex); 253 254 if (cdev->private->flags.initialized) { 255 cdev->private->flags.initialized = 0; 256 /* Release reference from device_initialize(). */ 257 put_device(&cdev->dev); 258 } 259 } 260 261 static void io_subchannel_quiesce(struct subchannel *); 262 263 /** 264 * ccw_device_set_offline() - disable a ccw device for I/O 265 * @cdev: target ccw device 266 * 267 * This function calls the driver's set_offline() function for @cdev, if 268 * given, and then disables @cdev. 269 * Returns: 270 * %0 on success and a negative error value on failure. 271 * Context: 272 * enabled, ccw device lock not held 273 */ 274 int ccw_device_set_offline(struct ccw_device *cdev) 275 { 276 struct subchannel *sch; 277 int ret, state; 278 279 if (!cdev) 280 return -ENODEV; 281 if (!cdev->online || !cdev->drv) 282 return -EINVAL; 283 284 if (cdev->drv->set_offline) { 285 ret = cdev->drv->set_offline(cdev); 286 if (ret != 0) 287 return ret; 288 } 289 spin_lock_irq(cdev->ccwlock); 290 sch = to_subchannel(cdev->dev.parent); 291 cdev->online = 0; 292 /* Wait until a final state or DISCONNECTED is reached */ 293 while (!dev_fsm_final_state(cdev) && 294 cdev->private->state != DEV_STATE_DISCONNECTED) { 295 spin_unlock_irq(cdev->ccwlock); 296 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 297 cdev->private->state == DEV_STATE_DISCONNECTED)); 298 spin_lock_irq(cdev->ccwlock); 299 } 300 do { 301 ret = ccw_device_offline(cdev); 302 if (!ret) 303 break; 304 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, device " 305 "0.%x.%04x\n", ret, cdev->private->dev_id.ssid, 306 cdev->private->dev_id.devno); 307 if (ret != -EBUSY) 308 goto error; 309 state = cdev->private->state; 310 spin_unlock_irq(cdev->ccwlock); 311 io_subchannel_quiesce(sch); 312 spin_lock_irq(cdev->ccwlock); 313 cdev->private->state = state; 314 } while (ret == -EBUSY); 315 spin_unlock_irq(cdev->ccwlock); 316 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 317 cdev->private->state == DEV_STATE_DISCONNECTED)); 318 /* Inform the user if set offline failed. */ 319 if (cdev->private->state == DEV_STATE_BOXED) { 320 pr_warn("%s: The device entered boxed state while being set offline\n", 321 dev_name(&cdev->dev)); 322 } else if (cdev->private->state == DEV_STATE_NOT_OPER) { 323 pr_warn("%s: The device stopped operating while being set offline\n", 324 dev_name(&cdev->dev)); 325 } 326 /* Give up reference from ccw_device_set_online(). */ 327 put_device(&cdev->dev); 328 return 0; 329 330 error: 331 cdev->private->state = DEV_STATE_OFFLINE; 332 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 333 spin_unlock_irq(cdev->ccwlock); 334 /* Give up reference from ccw_device_set_online(). */ 335 put_device(&cdev->dev); 336 return -ENODEV; 337 } 338 339 /** 340 * ccw_device_set_online() - enable a ccw device for I/O 341 * @cdev: target ccw device 342 * 343 * This function first enables @cdev and then calls the driver's set_online() 344 * function for @cdev, if given. If set_online() returns an error, @cdev is 345 * disabled again. 346 * Returns: 347 * %0 on success and a negative error value on failure. 348 * Context: 349 * enabled, ccw device lock not held 350 */ 351 int ccw_device_set_online(struct ccw_device *cdev) 352 { 353 int ret; 354 int ret2; 355 356 if (!cdev) 357 return -ENODEV; 358 if (cdev->online || !cdev->drv) 359 return -EINVAL; 360 /* Hold on to an extra reference while device is online. */ 361 if (!get_device(&cdev->dev)) 362 return -ENODEV; 363 364 spin_lock_irq(cdev->ccwlock); 365 ret = ccw_device_online(cdev); 366 spin_unlock_irq(cdev->ccwlock); 367 if (ret == 0) 368 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 369 else { 370 CIO_MSG_EVENT(0, "ccw_device_online returned %d, " 371 "device 0.%x.%04x\n", 372 ret, cdev->private->dev_id.ssid, 373 cdev->private->dev_id.devno); 374 /* Give up online reference since onlining failed. */ 375 put_device(&cdev->dev); 376 return ret; 377 } 378 spin_lock_irq(cdev->ccwlock); 379 /* Check if online processing was successful */ 380 if ((cdev->private->state != DEV_STATE_ONLINE) && 381 (cdev->private->state != DEV_STATE_W4SENSE)) { 382 spin_unlock_irq(cdev->ccwlock); 383 /* Inform the user that set online failed. */ 384 if (cdev->private->state == DEV_STATE_BOXED) { 385 pr_warn("%s: Setting the device online failed because it is boxed\n", 386 dev_name(&cdev->dev)); 387 } else if (cdev->private->state == DEV_STATE_NOT_OPER) { 388 pr_warn("%s: Setting the device online failed because it is not operational\n", 389 dev_name(&cdev->dev)); 390 } 391 /* Give up online reference since onlining failed. */ 392 put_device(&cdev->dev); 393 return -ENODEV; 394 } 395 spin_unlock_irq(cdev->ccwlock); 396 if (cdev->drv->set_online) 397 ret = cdev->drv->set_online(cdev); 398 if (ret) 399 goto rollback; 400 401 spin_lock_irq(cdev->ccwlock); 402 cdev->online = 1; 403 spin_unlock_irq(cdev->ccwlock); 404 return 0; 405 406 rollback: 407 spin_lock_irq(cdev->ccwlock); 408 /* Wait until a final state or DISCONNECTED is reached */ 409 while (!dev_fsm_final_state(cdev) && 410 cdev->private->state != DEV_STATE_DISCONNECTED) { 411 spin_unlock_irq(cdev->ccwlock); 412 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 413 cdev->private->state == DEV_STATE_DISCONNECTED)); 414 spin_lock_irq(cdev->ccwlock); 415 } 416 ret2 = ccw_device_offline(cdev); 417 if (ret2) 418 goto error; 419 spin_unlock_irq(cdev->ccwlock); 420 wait_event(cdev->private->wait_q, (dev_fsm_final_state(cdev) || 421 cdev->private->state == DEV_STATE_DISCONNECTED)); 422 /* Give up online reference since onlining failed. */ 423 put_device(&cdev->dev); 424 return ret; 425 426 error: 427 CIO_MSG_EVENT(0, "rollback ccw_device_offline returned %d, " 428 "device 0.%x.%04x\n", 429 ret2, cdev->private->dev_id.ssid, 430 cdev->private->dev_id.devno); 431 cdev->private->state = DEV_STATE_OFFLINE; 432 spin_unlock_irq(cdev->ccwlock); 433 /* Give up online reference since onlining failed. */ 434 put_device(&cdev->dev); 435 return ret; 436 } 437 438 static int online_store_handle_offline(struct ccw_device *cdev) 439 { 440 if (cdev->private->state == DEV_STATE_DISCONNECTED) { 441 spin_lock_irq(cdev->ccwlock); 442 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL); 443 spin_unlock_irq(cdev->ccwlock); 444 return 0; 445 } 446 if (cdev->drv && cdev->drv->set_offline) 447 return ccw_device_set_offline(cdev); 448 return -EINVAL; 449 } 450 451 static int online_store_recog_and_online(struct ccw_device *cdev) 452 { 453 /* Do device recognition, if needed. */ 454 if (cdev->private->state == DEV_STATE_BOXED) { 455 spin_lock_irq(cdev->ccwlock); 456 ccw_device_recognition(cdev); 457 spin_unlock_irq(cdev->ccwlock); 458 wait_event(cdev->private->wait_q, 459 cdev->private->flags.recog_done); 460 if (cdev->private->state != DEV_STATE_OFFLINE) 461 /* recognition failed */ 462 return -EAGAIN; 463 } 464 if (cdev->drv && cdev->drv->set_online) 465 return ccw_device_set_online(cdev); 466 return -EINVAL; 467 } 468 469 static int online_store_handle_online(struct ccw_device *cdev, int force) 470 { 471 int ret; 472 473 ret = online_store_recog_and_online(cdev); 474 if (ret && !force) 475 return ret; 476 if (force && cdev->private->state == DEV_STATE_BOXED) { 477 ret = ccw_device_stlck(cdev); 478 if (ret) 479 return ret; 480 if (cdev->id.cu_type == 0) 481 cdev->private->state = DEV_STATE_NOT_OPER; 482 ret = online_store_recog_and_online(cdev); 483 if (ret) 484 return ret; 485 } 486 return 0; 487 } 488 489 static ssize_t online_store (struct device *dev, struct device_attribute *attr, 490 const char *buf, size_t count) 491 { 492 struct ccw_device *cdev = to_ccwdev(dev); 493 int force, ret; 494 unsigned long i; 495 496 /* Prevent conflict between multiple on-/offline processing requests. */ 497 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 498 return -EAGAIN; 499 /* Prevent conflict between internal I/Os and on-/offline processing. */ 500 if (!dev_fsm_final_state(cdev) && 501 cdev->private->state != DEV_STATE_DISCONNECTED) { 502 ret = -EAGAIN; 503 goto out; 504 } 505 /* Prevent conflict between pending work and on-/offline processing.*/ 506 if (work_pending(&cdev->private->todo_work)) { 507 ret = -EAGAIN; 508 goto out; 509 } 510 if (!strncmp(buf, "force\n", count)) { 511 force = 1; 512 i = 1; 513 ret = 0; 514 } else { 515 force = 0; 516 ret = kstrtoul(buf, 16, &i); 517 } 518 if (ret) 519 goto out; 520 521 device_lock(dev); 522 switch (i) { 523 case 0: 524 ret = online_store_handle_offline(cdev); 525 break; 526 case 1: 527 ret = online_store_handle_online(cdev, force); 528 break; 529 default: 530 ret = -EINVAL; 531 } 532 device_unlock(dev); 533 534 out: 535 atomic_set(&cdev->private->onoff, 0); 536 return (ret < 0) ? ret : count; 537 } 538 539 static ssize_t 540 available_show (struct device *dev, struct device_attribute *attr, char *buf) 541 { 542 struct ccw_device *cdev = to_ccwdev(dev); 543 struct subchannel *sch; 544 545 if (ccw_device_is_orphan(cdev)) 546 return sprintf(buf, "no device\n"); 547 switch (cdev->private->state) { 548 case DEV_STATE_BOXED: 549 return sprintf(buf, "boxed\n"); 550 case DEV_STATE_DISCONNECTED: 551 case DEV_STATE_DISCONNECTED_SENSE_ID: 552 case DEV_STATE_NOT_OPER: 553 sch = to_subchannel(dev->parent); 554 if (!sch->lpm) 555 return sprintf(buf, "no path\n"); 556 else 557 return sprintf(buf, "no device\n"); 558 default: 559 /* All other states considered fine. */ 560 return sprintf(buf, "good\n"); 561 } 562 } 563 564 static ssize_t 565 initiate_logging(struct device *dev, struct device_attribute *attr, 566 const char *buf, size_t count) 567 { 568 struct subchannel *sch = to_subchannel(dev); 569 int rc; 570 571 rc = chsc_siosl(sch->schid); 572 if (rc < 0) { 573 pr_warn("Logging for subchannel 0.%x.%04x failed with errno=%d\n", 574 sch->schid.ssid, sch->schid.sch_no, rc); 575 return rc; 576 } 577 pr_notice("Logging for subchannel 0.%x.%04x was triggered\n", 578 sch->schid.ssid, sch->schid.sch_no); 579 return count; 580 } 581 582 static ssize_t vpm_show(struct device *dev, struct device_attribute *attr, 583 char *buf) 584 { 585 struct subchannel *sch = to_subchannel(dev); 586 587 return sprintf(buf, "%02x\n", sch->vpm); 588 } 589 590 static DEVICE_ATTR_RO(devtype); 591 static DEVICE_ATTR_RO(cutype); 592 static DEVICE_ATTR_RO(modalias); 593 static DEVICE_ATTR_RW(online); 594 static DEVICE_ATTR(availability, 0444, available_show, NULL); 595 static DEVICE_ATTR(logging, 0200, NULL, initiate_logging); 596 static DEVICE_ATTR_RO(vpm); 597 598 static struct attribute *io_subchannel_attrs[] = { 599 &dev_attr_logging.attr, 600 &dev_attr_vpm.attr, 601 NULL, 602 }; 603 604 static const struct attribute_group io_subchannel_attr_group = { 605 .attrs = io_subchannel_attrs, 606 }; 607 608 static struct attribute * ccwdev_attrs[] = { 609 &dev_attr_devtype.attr, 610 &dev_attr_cutype.attr, 611 &dev_attr_modalias.attr, 612 &dev_attr_online.attr, 613 &dev_attr_cmb_enable.attr, 614 &dev_attr_availability.attr, 615 NULL, 616 }; 617 618 static const struct attribute_group ccwdev_attr_group = { 619 .attrs = ccwdev_attrs, 620 }; 621 622 static const struct attribute_group *ccwdev_attr_groups[] = { 623 &ccwdev_attr_group, 624 NULL, 625 }; 626 627 static int match_dev_id(struct device *dev, const void *data) 628 { 629 struct ccw_device *cdev = to_ccwdev(dev); 630 struct ccw_dev_id *dev_id = (void *)data; 631 632 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id); 633 } 634 635 /** 636 * get_ccwdev_by_dev_id() - obtain device from a ccw device id 637 * @dev_id: id of the device to be searched 638 * 639 * This function searches all devices attached to the ccw bus for a device 640 * matching @dev_id. 641 * Returns: 642 * If a device is found its reference count is increased and returned; 643 * else %NULL is returned. 644 */ 645 struct ccw_device *get_ccwdev_by_dev_id(struct ccw_dev_id *dev_id) 646 { 647 struct device *dev; 648 649 dev = bus_find_device(&ccw_bus_type, NULL, dev_id, match_dev_id); 650 651 return dev ? to_ccwdev(dev) : NULL; 652 } 653 EXPORT_SYMBOL_GPL(get_ccwdev_by_dev_id); 654 655 static void ccw_device_do_unbind_bind(struct ccw_device *cdev) 656 { 657 int ret; 658 659 mutex_lock(&cdev->reg_mutex); 660 if (device_is_registered(&cdev->dev)) { 661 device_release_driver(&cdev->dev); 662 ret = device_attach(&cdev->dev); 663 WARN_ON(ret == -ENODEV); 664 } 665 mutex_unlock(&cdev->reg_mutex); 666 } 667 668 static void 669 ccw_device_release(struct device *dev) 670 { 671 struct ccw_device *cdev; 672 673 cdev = to_ccwdev(dev); 674 cio_gp_dma_free(cdev->private->dma_pool, cdev->private->dma_area, 675 sizeof(*cdev->private->dma_area)); 676 cio_gp_dma_destroy(cdev->private->dma_pool, &cdev->dev); 677 /* Release reference of parent subchannel. */ 678 put_device(cdev->dev.parent); 679 kfree(cdev->private); 680 kfree(cdev); 681 } 682 683 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch) 684 { 685 struct ccw_device *cdev; 686 struct gen_pool *dma_pool; 687 int ret; 688 689 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); 690 if (!cdev) { 691 ret = -ENOMEM; 692 goto err_cdev; 693 } 694 cdev->private = kzalloc(sizeof(struct ccw_device_private), 695 GFP_KERNEL | GFP_DMA); 696 if (!cdev->private) { 697 ret = -ENOMEM; 698 goto err_priv; 699 } 700 701 cdev->dev.dma_mask = sch->dev.dma_mask; 702 ret = dma_set_coherent_mask(&cdev->dev, sch->dev.coherent_dma_mask); 703 if (ret) 704 goto err_coherent_mask; 705 706 dma_pool = cio_gp_dma_create(&cdev->dev, 1); 707 if (!dma_pool) { 708 ret = -ENOMEM; 709 goto err_dma_pool; 710 } 711 cdev->private->dma_pool = dma_pool; 712 cdev->private->dma_area = cio_gp_dma_zalloc(dma_pool, &cdev->dev, 713 sizeof(*cdev->private->dma_area)); 714 if (!cdev->private->dma_area) { 715 ret = -ENOMEM; 716 goto err_dma_area; 717 } 718 return cdev; 719 err_dma_area: 720 cio_gp_dma_destroy(dma_pool, &cdev->dev); 721 err_dma_pool: 722 err_coherent_mask: 723 kfree(cdev->private); 724 err_priv: 725 kfree(cdev); 726 err_cdev: 727 return ERR_PTR(ret); 728 } 729 730 static void ccw_device_todo(struct work_struct *work); 731 732 static int io_subchannel_initialize_dev(struct subchannel *sch, 733 struct ccw_device *cdev) 734 { 735 struct ccw_device_private *priv = cdev->private; 736 int ret; 737 738 priv->cdev = cdev; 739 priv->int_class = IRQIO_CIO; 740 priv->state = DEV_STATE_NOT_OPER; 741 priv->dev_id.devno = sch->schib.pmcw.dev; 742 priv->dev_id.ssid = sch->schid.ssid; 743 744 INIT_WORK(&priv->todo_work, ccw_device_todo); 745 INIT_LIST_HEAD(&priv->cmb_list); 746 init_waitqueue_head(&priv->wait_q); 747 timer_setup(&priv->timer, ccw_device_timeout, 0); 748 mutex_init(&cdev->reg_mutex); 749 750 atomic_set(&priv->onoff, 0); 751 cdev->ccwlock = sch->lock; 752 cdev->dev.parent = &sch->dev; 753 cdev->dev.release = ccw_device_release; 754 cdev->dev.bus = &ccw_bus_type; 755 cdev->dev.groups = ccwdev_attr_groups; 756 /* Do first half of device_register. */ 757 device_initialize(&cdev->dev); 758 ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid, 759 cdev->private->dev_id.devno); 760 if (ret) 761 goto out_put; 762 if (!get_device(&sch->dev)) { 763 ret = -ENODEV; 764 goto out_put; 765 } 766 priv->flags.initialized = 1; 767 spin_lock_irq(sch->lock); 768 sch_set_cdev(sch, cdev); 769 spin_unlock_irq(sch->lock); 770 return 0; 771 772 out_put: 773 /* Release reference from device_initialize(). */ 774 put_device(&cdev->dev); 775 return ret; 776 } 777 778 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch) 779 { 780 struct ccw_device *cdev; 781 int ret; 782 783 cdev = io_subchannel_allocate_dev(sch); 784 if (!IS_ERR(cdev)) { 785 ret = io_subchannel_initialize_dev(sch, cdev); 786 if (ret) 787 cdev = ERR_PTR(ret); 788 } 789 return cdev; 790 } 791 792 static void io_subchannel_recog(struct ccw_device *, struct subchannel *); 793 794 static void sch_create_and_recog_new_device(struct subchannel *sch) 795 { 796 struct ccw_device *cdev; 797 798 /* Need to allocate a new ccw device. */ 799 cdev = io_subchannel_create_ccwdev(sch); 800 if (IS_ERR(cdev)) { 801 /* OK, we did everything we could... */ 802 css_sch_device_unregister(sch); 803 return; 804 } 805 /* Start recognition for the new ccw device. */ 806 io_subchannel_recog(cdev, sch); 807 } 808 809 /* 810 * Register recognized device. 811 */ 812 static void io_subchannel_register(struct ccw_device *cdev) 813 { 814 struct subchannel *sch; 815 int ret, adjust_init_count = 1; 816 unsigned long flags; 817 818 sch = to_subchannel(cdev->dev.parent); 819 /* 820 * Check if subchannel is still registered. It may have become 821 * unregistered if a machine check hit us after finishing 822 * device recognition but before the register work could be 823 * queued. 824 */ 825 if (!device_is_registered(&sch->dev)) 826 goto out_err; 827 css_update_ssd_info(sch); 828 /* 829 * io_subchannel_register() will also be called after device 830 * recognition has been done for a boxed device (which will already 831 * be registered). We need to reprobe since we may now have sense id 832 * information. 833 */ 834 mutex_lock(&cdev->reg_mutex); 835 if (device_is_registered(&cdev->dev)) { 836 if (!cdev->drv) { 837 ret = device_reprobe(&cdev->dev); 838 if (ret) 839 /* We can't do much here. */ 840 CIO_MSG_EVENT(0, "device_reprobe() returned" 841 " %d for 0.%x.%04x\n", ret, 842 cdev->private->dev_id.ssid, 843 cdev->private->dev_id.devno); 844 } 845 adjust_init_count = 0; 846 goto out; 847 } 848 /* make it known to the system */ 849 ret = device_add(&cdev->dev); 850 if (ret) { 851 CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", 852 cdev->private->dev_id.ssid, 853 cdev->private->dev_id.devno, ret); 854 spin_lock_irqsave(sch->lock, flags); 855 sch_set_cdev(sch, NULL); 856 spin_unlock_irqrestore(sch->lock, flags); 857 mutex_unlock(&cdev->reg_mutex); 858 /* Release initial device reference. */ 859 put_device(&cdev->dev); 860 goto out_err; 861 } 862 out: 863 cdev->private->flags.recog_done = 1; 864 mutex_unlock(&cdev->reg_mutex); 865 wake_up(&cdev->private->wait_q); 866 out_err: 867 if (adjust_init_count && atomic_dec_and_test(&ccw_device_init_count)) 868 wake_up(&ccw_device_init_wq); 869 } 870 871 /* 872 * subchannel recognition done. Called from the state machine. 873 */ 874 void 875 io_subchannel_recog_done(struct ccw_device *cdev) 876 { 877 if (css_init_done == 0) { 878 cdev->private->flags.recog_done = 1; 879 return; 880 } 881 switch (cdev->private->state) { 882 case DEV_STATE_BOXED: 883 /* Device did not respond in time. */ 884 case DEV_STATE_NOT_OPER: 885 cdev->private->flags.recog_done = 1; 886 /* Remove device found not operational. */ 887 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 888 if (atomic_dec_and_test(&ccw_device_init_count)) 889 wake_up(&ccw_device_init_wq); 890 break; 891 case DEV_STATE_OFFLINE: 892 /* 893 * We can't register the device in interrupt context so 894 * we schedule a work item. 895 */ 896 ccw_device_sched_todo(cdev, CDEV_TODO_REGISTER); 897 break; 898 } 899 } 900 901 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) 902 { 903 /* Increase counter of devices currently in recognition. */ 904 atomic_inc(&ccw_device_init_count); 905 906 /* Start async. device sensing. */ 907 spin_lock_irq(sch->lock); 908 ccw_device_recognition(cdev); 909 spin_unlock_irq(sch->lock); 910 } 911 912 static int ccw_device_move_to_sch(struct ccw_device *cdev, 913 struct subchannel *sch) 914 { 915 struct subchannel *old_sch; 916 int rc, old_enabled = 0; 917 918 old_sch = to_subchannel(cdev->dev.parent); 919 /* Obtain child reference for new parent. */ 920 if (!get_device(&sch->dev)) 921 return -ENODEV; 922 923 if (!sch_is_pseudo_sch(old_sch)) { 924 spin_lock_irq(old_sch->lock); 925 old_enabled = old_sch->schib.pmcw.ena; 926 rc = 0; 927 if (old_enabled) 928 rc = cio_disable_subchannel(old_sch); 929 spin_unlock_irq(old_sch->lock); 930 if (rc == -EBUSY) { 931 /* Release child reference for new parent. */ 932 put_device(&sch->dev); 933 return rc; 934 } 935 } 936 937 mutex_lock(&sch->reg_mutex); 938 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV); 939 mutex_unlock(&sch->reg_mutex); 940 if (rc) { 941 CIO_MSG_EVENT(0, "device_move(0.%x.%04x,0.%x.%04x)=%d\n", 942 cdev->private->dev_id.ssid, 943 cdev->private->dev_id.devno, sch->schid.ssid, 944 sch->schib.pmcw.dev, rc); 945 if (old_enabled) { 946 /* Try to reenable the old subchannel. */ 947 spin_lock_irq(old_sch->lock); 948 cio_enable_subchannel(old_sch, (u32)virt_to_phys(old_sch)); 949 spin_unlock_irq(old_sch->lock); 950 } 951 /* Release child reference for new parent. */ 952 put_device(&sch->dev); 953 return rc; 954 } 955 /* Clean up old subchannel. */ 956 if (!sch_is_pseudo_sch(old_sch)) { 957 spin_lock_irq(old_sch->lock); 958 sch_set_cdev(old_sch, NULL); 959 spin_unlock_irq(old_sch->lock); 960 css_schedule_eval(old_sch->schid); 961 } 962 /* Release child reference for old parent. */ 963 put_device(&old_sch->dev); 964 /* Initialize new subchannel. */ 965 spin_lock_irq(sch->lock); 966 cdev->ccwlock = sch->lock; 967 if (!sch_is_pseudo_sch(sch)) 968 sch_set_cdev(sch, cdev); 969 spin_unlock_irq(sch->lock); 970 if (!sch_is_pseudo_sch(sch)) 971 css_update_ssd_info(sch); 972 return 0; 973 } 974 975 static int ccw_device_move_to_orph(struct ccw_device *cdev) 976 { 977 struct subchannel *sch = to_subchannel(cdev->dev.parent); 978 struct channel_subsystem *css = to_css(sch->dev.parent); 979 980 return ccw_device_move_to_sch(cdev, css->pseudo_subchannel); 981 } 982 983 static void io_subchannel_irq(struct subchannel *sch) 984 { 985 struct ccw_device *cdev; 986 987 cdev = sch_get_cdev(sch); 988 989 CIO_TRACE_EVENT(6, "IRQ"); 990 CIO_TRACE_EVENT(6, dev_name(&sch->dev)); 991 if (cdev) 992 dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); 993 else 994 inc_irq_stat(IRQIO_CIO); 995 } 996 997 void io_subchannel_init_config(struct subchannel *sch) 998 { 999 memset(&sch->config, 0, sizeof(sch->config)); 1000 sch->config.csense = 1; 1001 } 1002 1003 static void io_subchannel_init_fields(struct subchannel *sch) 1004 { 1005 if (cio_is_console(sch->schid)) 1006 sch->opm = 0xff; 1007 else 1008 sch->opm = chp_get_sch_opm(sch); 1009 sch->lpm = sch->schib.pmcw.pam & sch->opm; 1010 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC; 1011 1012 CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X" 1013 " - PIM = %02X, PAM = %02X, POM = %02X\n", 1014 sch->schib.pmcw.dev, sch->schid.ssid, 1015 sch->schid.sch_no, sch->schib.pmcw.pim, 1016 sch->schib.pmcw.pam, sch->schib.pmcw.pom); 1017 1018 io_subchannel_init_config(sch); 1019 } 1020 1021 /* 1022 * Note: We always return 0 so that we bind to the device even on error. 1023 * This is needed so that our remove function is called on unregister. 1024 */ 1025 static int io_subchannel_probe(struct subchannel *sch) 1026 { 1027 struct io_subchannel_private *io_priv; 1028 struct ccw_device *cdev; 1029 int rc; 1030 1031 if (cio_is_console(sch->schid)) { 1032 rc = sysfs_create_group(&sch->dev.kobj, 1033 &io_subchannel_attr_group); 1034 if (rc) 1035 CIO_MSG_EVENT(0, "Failed to create io subchannel " 1036 "attributes for subchannel " 1037 "0.%x.%04x (rc=%d)\n", 1038 sch->schid.ssid, sch->schid.sch_no, rc); 1039 /* 1040 * The console subchannel already has an associated ccw_device. 1041 * Register it and exit. 1042 */ 1043 cdev = sch_get_cdev(sch); 1044 rc = device_add(&cdev->dev); 1045 if (rc) { 1046 /* Release online reference. */ 1047 put_device(&cdev->dev); 1048 goto out_schedule; 1049 } 1050 if (atomic_dec_and_test(&ccw_device_init_count)) 1051 wake_up(&ccw_device_init_wq); 1052 return 0; 1053 } 1054 io_subchannel_init_fields(sch); 1055 rc = cio_commit_config(sch); 1056 if (rc) 1057 goto out_schedule; 1058 rc = sysfs_create_group(&sch->dev.kobj, 1059 &io_subchannel_attr_group); 1060 if (rc) 1061 goto out_schedule; 1062 /* Allocate I/O subchannel private data. */ 1063 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); 1064 if (!io_priv) 1065 goto out_schedule; 1066 1067 io_priv->dma_area = dma_alloc_coherent(&sch->dev, 1068 sizeof(*io_priv->dma_area), 1069 &io_priv->dma_area_dma, GFP_KERNEL); 1070 if (!io_priv->dma_area) { 1071 kfree(io_priv); 1072 goto out_schedule; 1073 } 1074 1075 set_io_private(sch, io_priv); 1076 css_schedule_eval(sch->schid); 1077 return 0; 1078 1079 out_schedule: 1080 spin_lock_irq(sch->lock); 1081 css_sched_sch_todo(sch, SCH_TODO_UNREG); 1082 spin_unlock_irq(sch->lock); 1083 return 0; 1084 } 1085 1086 static void io_subchannel_remove(struct subchannel *sch) 1087 { 1088 struct io_subchannel_private *io_priv = to_io_private(sch); 1089 struct ccw_device *cdev; 1090 1091 cdev = sch_get_cdev(sch); 1092 if (!cdev) 1093 goto out_free; 1094 1095 ccw_device_unregister(cdev); 1096 spin_lock_irq(sch->lock); 1097 sch_set_cdev(sch, NULL); 1098 set_io_private(sch, NULL); 1099 spin_unlock_irq(sch->lock); 1100 out_free: 1101 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), 1102 io_priv->dma_area, io_priv->dma_area_dma); 1103 kfree(io_priv); 1104 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1105 } 1106 1107 static void io_subchannel_verify(struct subchannel *sch) 1108 { 1109 struct ccw_device *cdev; 1110 1111 cdev = sch_get_cdev(sch); 1112 if (cdev) 1113 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1114 else 1115 css_schedule_eval(sch->schid); 1116 } 1117 1118 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask) 1119 { 1120 struct ccw_device *cdev; 1121 1122 cdev = sch_get_cdev(sch); 1123 if (!cdev) 1124 return; 1125 if (cio_update_schib(sch)) 1126 goto err; 1127 /* Check for I/O on path. */ 1128 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask) 1129 goto out; 1130 if (cdev->private->state == DEV_STATE_ONLINE) { 1131 ccw_device_kill_io(cdev); 1132 goto out; 1133 } 1134 if (cio_clear(sch)) 1135 goto err; 1136 out: 1137 /* Trigger path verification. */ 1138 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1139 return; 1140 1141 err: 1142 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1143 } 1144 1145 static int io_subchannel_chp_event(struct subchannel *sch, 1146 struct chp_link *link, int event) 1147 { 1148 struct ccw_device *cdev = sch_get_cdev(sch); 1149 int mask, chpid, valid_bit; 1150 int path_event[8]; 1151 1152 mask = chp_ssd_get_mask(&sch->ssd_info, link); 1153 if (!mask) 1154 return 0; 1155 switch (event) { 1156 case CHP_VARY_OFF: 1157 sch->opm &= ~mask; 1158 sch->lpm &= ~mask; 1159 if (cdev) 1160 cdev->private->path_gone_mask |= mask; 1161 io_subchannel_terminate_path(sch, mask); 1162 break; 1163 case CHP_VARY_ON: 1164 sch->opm |= mask; 1165 sch->lpm |= mask; 1166 if (cdev) 1167 cdev->private->path_new_mask |= mask; 1168 io_subchannel_verify(sch); 1169 break; 1170 case CHP_OFFLINE: 1171 if (cio_update_schib(sch)) 1172 return -ENODEV; 1173 if (cdev) 1174 cdev->private->path_gone_mask |= mask; 1175 io_subchannel_terminate_path(sch, mask); 1176 break; 1177 case CHP_ONLINE: 1178 if (cio_update_schib(sch)) 1179 return -ENODEV; 1180 sch->lpm |= mask & sch->opm; 1181 if (cdev) 1182 cdev->private->path_new_mask |= mask; 1183 io_subchannel_verify(sch); 1184 break; 1185 case CHP_FCES_EVENT: 1186 /* Forward Endpoint Security event */ 1187 for (chpid = 0, valid_bit = 0x80; chpid < 8; chpid++, 1188 valid_bit >>= 1) { 1189 if (mask & valid_bit) 1190 path_event[chpid] = PE_PATH_FCES_EVENT; 1191 else 1192 path_event[chpid] = PE_NONE; 1193 } 1194 if (cdev && cdev->drv && cdev->drv->path_event) 1195 cdev->drv->path_event(cdev, path_event); 1196 break; 1197 } 1198 return 0; 1199 } 1200 1201 static void io_subchannel_quiesce(struct subchannel *sch) 1202 { 1203 struct ccw_device *cdev; 1204 int ret; 1205 1206 spin_lock_irq(sch->lock); 1207 cdev = sch_get_cdev(sch); 1208 if (cio_is_console(sch->schid)) 1209 goto out_unlock; 1210 if (!sch->schib.pmcw.ena) 1211 goto out_unlock; 1212 ret = cio_disable_subchannel(sch); 1213 if (ret != -EBUSY) 1214 goto out_unlock; 1215 if (cdev->handler) 1216 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); 1217 while (ret == -EBUSY) { 1218 cdev->private->state = DEV_STATE_QUIESCE; 1219 cdev->private->iretry = 255; 1220 ret = ccw_device_cancel_halt_clear(cdev); 1221 if (ret == -EBUSY) { 1222 ccw_device_set_timeout(cdev, HZ/10); 1223 spin_unlock_irq(sch->lock); 1224 wait_event(cdev->private->wait_q, 1225 cdev->private->state != DEV_STATE_QUIESCE); 1226 spin_lock_irq(sch->lock); 1227 } 1228 ret = cio_disable_subchannel(sch); 1229 } 1230 out_unlock: 1231 spin_unlock_irq(sch->lock); 1232 } 1233 1234 static void io_subchannel_shutdown(struct subchannel *sch) 1235 { 1236 io_subchannel_quiesce(sch); 1237 } 1238 1239 static int device_is_disconnected(struct ccw_device *cdev) 1240 { 1241 if (!cdev) 1242 return 0; 1243 return (cdev->private->state == DEV_STATE_DISCONNECTED || 1244 cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); 1245 } 1246 1247 static int recovery_check(struct device *dev, void *data) 1248 { 1249 struct ccw_device *cdev = to_ccwdev(dev); 1250 struct subchannel *sch; 1251 int *redo = data; 1252 1253 spin_lock_irq(cdev->ccwlock); 1254 switch (cdev->private->state) { 1255 case DEV_STATE_ONLINE: 1256 sch = to_subchannel(cdev->dev.parent); 1257 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm) 1258 break; 1259 fallthrough; 1260 case DEV_STATE_DISCONNECTED: 1261 CIO_MSG_EVENT(3, "recovery: trigger 0.%x.%04x\n", 1262 cdev->private->dev_id.ssid, 1263 cdev->private->dev_id.devno); 1264 dev_fsm_event(cdev, DEV_EVENT_VERIFY); 1265 *redo = 1; 1266 break; 1267 case DEV_STATE_DISCONNECTED_SENSE_ID: 1268 *redo = 1; 1269 break; 1270 } 1271 spin_unlock_irq(cdev->ccwlock); 1272 1273 return 0; 1274 } 1275 1276 static void recovery_work_func(struct work_struct *unused) 1277 { 1278 int redo = 0; 1279 1280 bus_for_each_dev(&ccw_bus_type, NULL, &redo, recovery_check); 1281 if (redo) { 1282 spin_lock_irq(&recovery_lock); 1283 if (!timer_pending(&recovery_timer)) { 1284 if (recovery_phase < ARRAY_SIZE(recovery_delay) - 1) 1285 recovery_phase++; 1286 mod_timer(&recovery_timer, jiffies + 1287 recovery_delay[recovery_phase] * HZ); 1288 } 1289 spin_unlock_irq(&recovery_lock); 1290 } else 1291 CIO_MSG_EVENT(3, "recovery: end\n"); 1292 } 1293 1294 static DECLARE_WORK(recovery_work, recovery_work_func); 1295 1296 static void recovery_func(struct timer_list *unused) 1297 { 1298 /* 1299 * We can't do our recovery in softirq context and it's not 1300 * performance critical, so we schedule it. 1301 */ 1302 schedule_work(&recovery_work); 1303 } 1304 1305 void ccw_device_schedule_recovery(void) 1306 { 1307 unsigned long flags; 1308 1309 CIO_MSG_EVENT(3, "recovery: schedule\n"); 1310 spin_lock_irqsave(&recovery_lock, flags); 1311 if (!timer_pending(&recovery_timer) || (recovery_phase != 0)) { 1312 recovery_phase = 0; 1313 mod_timer(&recovery_timer, jiffies + recovery_delay[0] * HZ); 1314 } 1315 spin_unlock_irqrestore(&recovery_lock, flags); 1316 } 1317 1318 static int purge_fn(struct device *dev, void *data) 1319 { 1320 struct ccw_device *cdev = to_ccwdev(dev); 1321 struct ccw_dev_id *id = &cdev->private->dev_id; 1322 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1323 1324 spin_lock_irq(cdev->ccwlock); 1325 if (is_blacklisted(id->ssid, id->devno) && 1326 (cdev->private->state == DEV_STATE_OFFLINE) && 1327 (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) { 1328 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, 1329 id->devno); 1330 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1331 css_sched_sch_todo(sch, SCH_TODO_UNREG); 1332 atomic_set(&cdev->private->onoff, 0); 1333 } 1334 spin_unlock_irq(cdev->ccwlock); 1335 /* Abort loop in case of pending signal. */ 1336 if (signal_pending(current)) 1337 return -EINTR; 1338 1339 return 0; 1340 } 1341 1342 /** 1343 * ccw_purge_blacklisted - purge unused, blacklisted devices 1344 * 1345 * Unregister all ccw devices that are offline and on the blacklist. 1346 */ 1347 int ccw_purge_blacklisted(void) 1348 { 1349 CIO_MSG_EVENT(2, "ccw: purging blacklisted devices\n"); 1350 bus_for_each_dev(&ccw_bus_type, NULL, NULL, purge_fn); 1351 return 0; 1352 } 1353 1354 void ccw_device_set_disconnected(struct ccw_device *cdev) 1355 { 1356 if (!cdev) 1357 return; 1358 ccw_device_set_timeout(cdev, 0); 1359 cdev->private->flags.fake_irb = 0; 1360 cdev->private->state = DEV_STATE_DISCONNECTED; 1361 if (cdev->online) 1362 ccw_device_schedule_recovery(); 1363 } 1364 1365 void ccw_device_set_notoper(struct ccw_device *cdev) 1366 { 1367 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1368 1369 CIO_TRACE_EVENT(2, "notoper"); 1370 CIO_TRACE_EVENT(2, dev_name(&sch->dev)); 1371 ccw_device_set_timeout(cdev, 0); 1372 cio_disable_subchannel(sch); 1373 cdev->private->state = DEV_STATE_NOT_OPER; 1374 } 1375 1376 enum io_sch_action { 1377 IO_SCH_UNREG, 1378 IO_SCH_ORPH_UNREG, 1379 IO_SCH_ATTACH, 1380 IO_SCH_UNREG_ATTACH, 1381 IO_SCH_ORPH_ATTACH, 1382 IO_SCH_REPROBE, 1383 IO_SCH_VERIFY, 1384 IO_SCH_DISC, 1385 IO_SCH_NOP, 1386 }; 1387 1388 static enum io_sch_action sch_get_action(struct subchannel *sch) 1389 { 1390 struct ccw_device *cdev; 1391 1392 cdev = sch_get_cdev(sch); 1393 if (cio_update_schib(sch)) { 1394 /* Not operational. */ 1395 if (!cdev) 1396 return IO_SCH_UNREG; 1397 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 1398 return IO_SCH_UNREG; 1399 return IO_SCH_ORPH_UNREG; 1400 } 1401 /* Operational. */ 1402 if (!cdev) 1403 return IO_SCH_ATTACH; 1404 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1405 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK) 1406 return IO_SCH_UNREG_ATTACH; 1407 return IO_SCH_ORPH_ATTACH; 1408 } 1409 if ((sch->schib.pmcw.pam & sch->opm) == 0) { 1410 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) 1411 return IO_SCH_UNREG; 1412 return IO_SCH_DISC; 1413 } 1414 if (device_is_disconnected(cdev)) 1415 return IO_SCH_REPROBE; 1416 if (cdev->online) 1417 return IO_SCH_VERIFY; 1418 if (cdev->private->state == DEV_STATE_NOT_OPER) 1419 return IO_SCH_UNREG_ATTACH; 1420 return IO_SCH_NOP; 1421 } 1422 1423 /** 1424 * io_subchannel_sch_event - process subchannel event 1425 * @sch: subchannel 1426 * @process: non-zero if function is called in process context 1427 * 1428 * An unspecified event occurred for this subchannel. Adjust data according 1429 * to the current operational state of the subchannel and device. Return 1430 * zero when the event has been handled sufficiently or -EAGAIN when this 1431 * function should be called again in process context. 1432 */ 1433 static int io_subchannel_sch_event(struct subchannel *sch, int process) 1434 { 1435 unsigned long flags; 1436 struct ccw_device *cdev; 1437 struct ccw_dev_id dev_id; 1438 enum io_sch_action action; 1439 int rc = -EAGAIN; 1440 1441 spin_lock_irqsave(sch->lock, flags); 1442 if (!device_is_registered(&sch->dev)) 1443 goto out_unlock; 1444 if (work_pending(&sch->todo_work)) 1445 goto out_unlock; 1446 cdev = sch_get_cdev(sch); 1447 if (cdev && work_pending(&cdev->private->todo_work)) 1448 goto out_unlock; 1449 action = sch_get_action(sch); 1450 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n", 1451 sch->schid.ssid, sch->schid.sch_no, process, 1452 action); 1453 /* Perform immediate actions while holding the lock. */ 1454 switch (action) { 1455 case IO_SCH_REPROBE: 1456 /* Trigger device recognition. */ 1457 ccw_device_trigger_reprobe(cdev); 1458 rc = 0; 1459 goto out_unlock; 1460 case IO_SCH_VERIFY: 1461 /* Trigger path verification. */ 1462 io_subchannel_verify(sch); 1463 rc = 0; 1464 goto out_unlock; 1465 case IO_SCH_DISC: 1466 ccw_device_set_disconnected(cdev); 1467 rc = 0; 1468 goto out_unlock; 1469 case IO_SCH_ORPH_UNREG: 1470 case IO_SCH_ORPH_ATTACH: 1471 ccw_device_set_disconnected(cdev); 1472 break; 1473 case IO_SCH_UNREG_ATTACH: 1474 case IO_SCH_UNREG: 1475 if (!cdev) 1476 break; 1477 if (cdev->private->state == DEV_STATE_SENSE_ID) { 1478 /* 1479 * Note: delayed work triggered by this event 1480 * and repeated calls to sch_event are synchronized 1481 * by the above check for work_pending(cdev). 1482 */ 1483 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 1484 } else 1485 ccw_device_set_notoper(cdev); 1486 break; 1487 case IO_SCH_NOP: 1488 rc = 0; 1489 goto out_unlock; 1490 default: 1491 break; 1492 } 1493 spin_unlock_irqrestore(sch->lock, flags); 1494 /* All other actions require process context. */ 1495 if (!process) 1496 goto out; 1497 /* Handle attached ccw device. */ 1498 switch (action) { 1499 case IO_SCH_ORPH_UNREG: 1500 case IO_SCH_ORPH_ATTACH: 1501 /* Move ccw device to orphanage. */ 1502 rc = ccw_device_move_to_orph(cdev); 1503 if (rc) 1504 goto out; 1505 break; 1506 case IO_SCH_UNREG_ATTACH: 1507 spin_lock_irqsave(sch->lock, flags); 1508 sch_set_cdev(sch, NULL); 1509 spin_unlock_irqrestore(sch->lock, flags); 1510 /* Unregister ccw device. */ 1511 ccw_device_unregister(cdev); 1512 break; 1513 default: 1514 break; 1515 } 1516 /* Handle subchannel. */ 1517 switch (action) { 1518 case IO_SCH_ORPH_UNREG: 1519 case IO_SCH_UNREG: 1520 css_sch_device_unregister(sch); 1521 break; 1522 case IO_SCH_ORPH_ATTACH: 1523 case IO_SCH_UNREG_ATTACH: 1524 case IO_SCH_ATTACH: 1525 dev_id.ssid = sch->schid.ssid; 1526 dev_id.devno = sch->schib.pmcw.dev; 1527 cdev = get_ccwdev_by_dev_id(&dev_id); 1528 if (!cdev) { 1529 sch_create_and_recog_new_device(sch); 1530 break; 1531 } 1532 rc = ccw_device_move_to_sch(cdev, sch); 1533 if (rc) { 1534 /* Release reference from get_ccwdev_by_dev_id() */ 1535 put_device(&cdev->dev); 1536 goto out; 1537 } 1538 spin_lock_irqsave(sch->lock, flags); 1539 ccw_device_trigger_reprobe(cdev); 1540 spin_unlock_irqrestore(sch->lock, flags); 1541 /* Release reference from get_ccwdev_by_dev_id() */ 1542 put_device(&cdev->dev); 1543 break; 1544 default: 1545 break; 1546 } 1547 return 0; 1548 1549 out_unlock: 1550 spin_unlock_irqrestore(sch->lock, flags); 1551 out: 1552 return rc; 1553 } 1554 1555 static void ccw_device_set_int_class(struct ccw_device *cdev) 1556 { 1557 struct ccw_driver *cdrv = cdev->drv; 1558 1559 /* Note: we interpret class 0 in this context as an uninitialized 1560 * field since it translates to a non-I/O interrupt class. */ 1561 if (cdrv->int_class != 0) 1562 cdev->private->int_class = cdrv->int_class; 1563 else 1564 cdev->private->int_class = IRQIO_CIO; 1565 } 1566 1567 #ifdef CONFIG_CCW_CONSOLE 1568 int __init ccw_device_enable_console(struct ccw_device *cdev) 1569 { 1570 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1571 int rc; 1572 1573 if (!cdev->drv || !cdev->handler) 1574 return -EINVAL; 1575 1576 io_subchannel_init_fields(sch); 1577 rc = cio_commit_config(sch); 1578 if (rc) 1579 return rc; 1580 sch->driver = &io_subchannel_driver; 1581 io_subchannel_recog(cdev, sch); 1582 /* Now wait for the async. recognition to come to an end. */ 1583 spin_lock_irq(cdev->ccwlock); 1584 while (!dev_fsm_final_state(cdev)) 1585 ccw_device_wait_idle(cdev); 1586 1587 /* Hold on to an extra reference while device is online. */ 1588 get_device(&cdev->dev); 1589 rc = ccw_device_online(cdev); 1590 if (rc) 1591 goto out_unlock; 1592 1593 while (!dev_fsm_final_state(cdev)) 1594 ccw_device_wait_idle(cdev); 1595 1596 if (cdev->private->state == DEV_STATE_ONLINE) 1597 cdev->online = 1; 1598 else 1599 rc = -EIO; 1600 out_unlock: 1601 spin_unlock_irq(cdev->ccwlock); 1602 if (rc) /* Give up online reference since onlining failed. */ 1603 put_device(&cdev->dev); 1604 return rc; 1605 } 1606 1607 struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv) 1608 { 1609 struct io_subchannel_private *io_priv; 1610 struct ccw_device *cdev; 1611 struct subchannel *sch; 1612 1613 sch = cio_probe_console(); 1614 if (IS_ERR(sch)) 1615 return ERR_CAST(sch); 1616 1617 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); 1618 if (!io_priv) 1619 goto err_priv; 1620 io_priv->dma_area = dma_alloc_coherent(&sch->dev, 1621 sizeof(*io_priv->dma_area), 1622 &io_priv->dma_area_dma, GFP_KERNEL); 1623 if (!io_priv->dma_area) 1624 goto err_dma_area; 1625 set_io_private(sch, io_priv); 1626 cdev = io_subchannel_create_ccwdev(sch); 1627 if (IS_ERR(cdev)) { 1628 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), 1629 io_priv->dma_area, io_priv->dma_area_dma); 1630 set_io_private(sch, NULL); 1631 put_device(&sch->dev); 1632 kfree(io_priv); 1633 return cdev; 1634 } 1635 cdev->drv = drv; 1636 ccw_device_set_int_class(cdev); 1637 return cdev; 1638 1639 err_dma_area: 1640 kfree(io_priv); 1641 err_priv: 1642 put_device(&sch->dev); 1643 return ERR_PTR(-ENOMEM); 1644 } 1645 1646 void __init ccw_device_destroy_console(struct ccw_device *cdev) 1647 { 1648 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1649 struct io_subchannel_private *io_priv = to_io_private(sch); 1650 1651 set_io_private(sch, NULL); 1652 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area), 1653 io_priv->dma_area, io_priv->dma_area_dma); 1654 put_device(&sch->dev); 1655 put_device(&cdev->dev); 1656 kfree(io_priv); 1657 } 1658 1659 /** 1660 * ccw_device_wait_idle() - busy wait for device to become idle 1661 * @cdev: ccw device 1662 * 1663 * Poll until activity control is zero, that is, no function or data 1664 * transfer is pending/active. 1665 * Called with device lock being held. 1666 */ 1667 void ccw_device_wait_idle(struct ccw_device *cdev) 1668 { 1669 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1670 1671 while (1) { 1672 cio_tsch(sch); 1673 if (sch->schib.scsw.cmd.actl == 0) 1674 break; 1675 udelay(100); 1676 } 1677 } 1678 #endif 1679 1680 /** 1681 * get_ccwdev_by_busid() - obtain device from a bus id 1682 * @cdrv: driver the device is owned by 1683 * @bus_id: bus id of the device to be searched 1684 * 1685 * This function searches all devices owned by @cdrv for a device with a bus 1686 * id matching @bus_id. 1687 * Returns: 1688 * If a match is found, its reference count of the found device is increased 1689 * and it is returned; else %NULL is returned. 1690 */ 1691 struct ccw_device *get_ccwdev_by_busid(struct ccw_driver *cdrv, 1692 const char *bus_id) 1693 { 1694 struct device *dev; 1695 1696 dev = driver_find_device_by_name(&cdrv->driver, bus_id); 1697 1698 return dev ? to_ccwdev(dev) : NULL; 1699 } 1700 1701 /************************** device driver handling ************************/ 1702 1703 /* This is the implementation of the ccw_driver class. The probe, remove 1704 * and release methods are initially very similar to the device_driver 1705 * implementations, with the difference that they have ccw_device 1706 * arguments. 1707 * 1708 * A ccw driver also contains the information that is needed for 1709 * device matching. 1710 */ 1711 static int 1712 ccw_device_probe (struct device *dev) 1713 { 1714 struct ccw_device *cdev = to_ccwdev(dev); 1715 struct ccw_driver *cdrv = to_ccwdrv(dev->driver); 1716 int ret; 1717 1718 cdev->drv = cdrv; /* to let the driver call _set_online */ 1719 ccw_device_set_int_class(cdev); 1720 ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; 1721 if (ret) { 1722 cdev->drv = NULL; 1723 cdev->private->int_class = IRQIO_CIO; 1724 return ret; 1725 } 1726 1727 return 0; 1728 } 1729 1730 static void ccw_device_remove(struct device *dev) 1731 { 1732 struct ccw_device *cdev = to_ccwdev(dev); 1733 struct ccw_driver *cdrv = cdev->drv; 1734 struct subchannel *sch; 1735 int ret; 1736 1737 if (cdrv->remove) 1738 cdrv->remove(cdev); 1739 1740 spin_lock_irq(cdev->ccwlock); 1741 if (cdev->online) { 1742 cdev->online = 0; 1743 ret = ccw_device_offline(cdev); 1744 spin_unlock_irq(cdev->ccwlock); 1745 if (ret == 0) 1746 wait_event(cdev->private->wait_q, 1747 dev_fsm_final_state(cdev)); 1748 else 1749 CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " 1750 "device 0.%x.%04x\n", 1751 ret, cdev->private->dev_id.ssid, 1752 cdev->private->dev_id.devno); 1753 /* Give up reference obtained in ccw_device_set_online(). */ 1754 put_device(&cdev->dev); 1755 spin_lock_irq(cdev->ccwlock); 1756 } 1757 ccw_device_set_timeout(cdev, 0); 1758 cdev->drv = NULL; 1759 cdev->private->int_class = IRQIO_CIO; 1760 sch = to_subchannel(cdev->dev.parent); 1761 spin_unlock_irq(cdev->ccwlock); 1762 io_subchannel_quiesce(sch); 1763 __disable_cmf(cdev); 1764 } 1765 1766 static void ccw_device_shutdown(struct device *dev) 1767 { 1768 struct ccw_device *cdev; 1769 1770 cdev = to_ccwdev(dev); 1771 if (cdev->drv && cdev->drv->shutdown) 1772 cdev->drv->shutdown(cdev); 1773 __disable_cmf(cdev); 1774 } 1775 1776 static struct bus_type ccw_bus_type = { 1777 .name = "ccw", 1778 .match = ccw_bus_match, 1779 .uevent = ccw_uevent, 1780 .probe = ccw_device_probe, 1781 .remove = ccw_device_remove, 1782 .shutdown = ccw_device_shutdown, 1783 }; 1784 1785 /** 1786 * ccw_driver_register() - register a ccw driver 1787 * @cdriver: driver to be registered 1788 * 1789 * This function is mainly a wrapper around driver_register(). 1790 * Returns: 1791 * %0 on success and a negative error value on failure. 1792 */ 1793 int ccw_driver_register(struct ccw_driver *cdriver) 1794 { 1795 struct device_driver *drv = &cdriver->driver; 1796 1797 drv->bus = &ccw_bus_type; 1798 1799 return driver_register(drv); 1800 } 1801 1802 /** 1803 * ccw_driver_unregister() - deregister a ccw driver 1804 * @cdriver: driver to be deregistered 1805 * 1806 * This function is mainly a wrapper around driver_unregister(). 1807 */ 1808 void ccw_driver_unregister(struct ccw_driver *cdriver) 1809 { 1810 driver_unregister(&cdriver->driver); 1811 } 1812 1813 static void ccw_device_todo(struct work_struct *work) 1814 { 1815 struct ccw_device_private *priv; 1816 struct ccw_device *cdev; 1817 struct subchannel *sch; 1818 enum cdev_todo todo; 1819 1820 priv = container_of(work, struct ccw_device_private, todo_work); 1821 cdev = priv->cdev; 1822 sch = to_subchannel(cdev->dev.parent); 1823 /* Find out todo. */ 1824 spin_lock_irq(cdev->ccwlock); 1825 todo = priv->todo; 1826 priv->todo = CDEV_TODO_NOTHING; 1827 CIO_MSG_EVENT(4, "cdev_todo: cdev=0.%x.%04x todo=%d\n", 1828 priv->dev_id.ssid, priv->dev_id.devno, todo); 1829 spin_unlock_irq(cdev->ccwlock); 1830 /* Perform todo. */ 1831 switch (todo) { 1832 case CDEV_TODO_ENABLE_CMF: 1833 cmf_reenable(cdev); 1834 break; 1835 case CDEV_TODO_REBIND: 1836 ccw_device_do_unbind_bind(cdev); 1837 break; 1838 case CDEV_TODO_REGISTER: 1839 io_subchannel_register(cdev); 1840 break; 1841 case CDEV_TODO_UNREG_EVAL: 1842 if (!sch_is_pseudo_sch(sch)) 1843 css_schedule_eval(sch->schid); 1844 fallthrough; 1845 case CDEV_TODO_UNREG: 1846 spin_lock_irq(sch->lock); 1847 sch_set_cdev(sch, NULL); 1848 spin_unlock_irq(sch->lock); 1849 ccw_device_unregister(cdev); 1850 break; 1851 default: 1852 break; 1853 } 1854 /* Release workqueue ref. */ 1855 put_device(&cdev->dev); 1856 } 1857 1858 /** 1859 * ccw_device_sched_todo - schedule ccw device operation 1860 * @cdev: ccw device 1861 * @todo: todo 1862 * 1863 * Schedule the operation identified by @todo to be performed on the slow path 1864 * workqueue. Do nothing if another operation with higher priority is already 1865 * scheduled. Needs to be called with ccwdev lock held. 1866 */ 1867 void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo) 1868 { 1869 CIO_MSG_EVENT(4, "cdev_todo: sched cdev=0.%x.%04x todo=%d\n", 1870 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 1871 todo); 1872 if (cdev->private->todo >= todo) 1873 return; 1874 cdev->private->todo = todo; 1875 /* Get workqueue ref. */ 1876 if (!get_device(&cdev->dev)) 1877 return; 1878 if (!queue_work(cio_work_q, &cdev->private->todo_work)) { 1879 /* Already queued, release workqueue ref. */ 1880 put_device(&cdev->dev); 1881 } 1882 } 1883 1884 /** 1885 * ccw_device_siosl() - initiate logging 1886 * @cdev: ccw device 1887 * 1888 * This function is used to invoke model-dependent logging within the channel 1889 * subsystem. 1890 */ 1891 int ccw_device_siosl(struct ccw_device *cdev) 1892 { 1893 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1894 1895 return chsc_siosl(sch->schid); 1896 } 1897 EXPORT_SYMBOL_GPL(ccw_device_siosl); 1898 1899 EXPORT_SYMBOL(ccw_device_set_online); 1900 EXPORT_SYMBOL(ccw_device_set_offline); 1901 EXPORT_SYMBOL(ccw_driver_register); 1902 EXPORT_SYMBOL(ccw_driver_unregister); 1903 EXPORT_SYMBOL(get_ccwdev_by_busid); 1904