1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * driver for channel subsystem 4 * 5 * Copyright IBM Corp. 2002, 2010 6 * 7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/export.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/list.h> 20 #include <linux/reboot.h> 21 #include <linux/suspend.h> 22 #include <linux/proc_fs.h> 23 #include <asm/isc.h> 24 #include <asm/crw.h> 25 26 #include "css.h" 27 #include "cio.h" 28 #include "blacklist.h" 29 #include "cio_debug.h" 30 #include "ioasm.h" 31 #include "chsc.h" 32 #include "device.h" 33 #include "idset.h" 34 #include "chp.h" 35 36 int css_init_done = 0; 37 int max_ssid; 38 39 #define MAX_CSS_IDX 0 40 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1]; 41 static struct bus_type css_bus_type; 42 43 int 44 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 45 { 46 struct subchannel_id schid; 47 int ret; 48 49 init_subchannel_id(&schid); 50 do { 51 do { 52 ret = fn(schid, data); 53 if (ret) 54 break; 55 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 56 schid.sch_no = 0; 57 } while (schid.ssid++ < max_ssid); 58 return ret; 59 } 60 61 struct cb_data { 62 void *data; 63 struct idset *set; 64 int (*fn_known_sch)(struct subchannel *, void *); 65 int (*fn_unknown_sch)(struct subchannel_id, void *); 66 }; 67 68 static int call_fn_known_sch(struct device *dev, void *data) 69 { 70 struct subchannel *sch = to_subchannel(dev); 71 struct cb_data *cb = data; 72 int rc = 0; 73 74 if (cb->set) 75 idset_sch_del(cb->set, sch->schid); 76 if (cb->fn_known_sch) 77 rc = cb->fn_known_sch(sch, cb->data); 78 return rc; 79 } 80 81 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 82 { 83 struct cb_data *cb = data; 84 int rc = 0; 85 86 if (idset_sch_contains(cb->set, schid)) 87 rc = cb->fn_unknown_sch(schid, cb->data); 88 return rc; 89 } 90 91 static int call_fn_all_sch(struct subchannel_id schid, void *data) 92 { 93 struct cb_data *cb = data; 94 struct subchannel *sch; 95 int rc = 0; 96 97 sch = get_subchannel_by_schid(schid); 98 if (sch) { 99 if (cb->fn_known_sch) 100 rc = cb->fn_known_sch(sch, cb->data); 101 put_device(&sch->dev); 102 } else { 103 if (cb->fn_unknown_sch) 104 rc = cb->fn_unknown_sch(schid, cb->data); 105 } 106 107 return rc; 108 } 109 110 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 111 int (*fn_unknown)(struct subchannel_id, 112 void *), void *data) 113 { 114 struct cb_data cb; 115 int rc; 116 117 cb.data = data; 118 cb.fn_known_sch = fn_known; 119 cb.fn_unknown_sch = fn_unknown; 120 121 if (fn_known && !fn_unknown) { 122 /* Skip idset allocation in case of known-only loop. */ 123 cb.set = NULL; 124 return bus_for_each_dev(&css_bus_type, NULL, &cb, 125 call_fn_known_sch); 126 } 127 128 cb.set = idset_sch_new(); 129 if (!cb.set) 130 /* fall back to brute force scanning in case of oom */ 131 return for_each_subchannel(call_fn_all_sch, &cb); 132 133 idset_fill(cb.set); 134 135 /* Process registered subchannels. */ 136 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 137 if (rc) 138 goto out; 139 /* Process unregistered subchannels. */ 140 if (fn_unknown) 141 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 142 out: 143 idset_free(cb.set); 144 145 return rc; 146 } 147 148 static void css_sch_todo(struct work_struct *work); 149 150 static int css_sch_create_locks(struct subchannel *sch) 151 { 152 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); 153 if (!sch->lock) 154 return -ENOMEM; 155 156 spin_lock_init(sch->lock); 157 mutex_init(&sch->reg_mutex); 158 159 return 0; 160 } 161 162 static void css_subchannel_release(struct device *dev) 163 { 164 struct subchannel *sch = to_subchannel(dev); 165 166 sch->config.intparm = 0; 167 cio_commit_config(sch); 168 kfree(sch->lock); 169 kfree(sch); 170 } 171 172 static int css_validate_subchannel(struct subchannel_id schid, 173 struct schib *schib) 174 { 175 int err; 176 177 switch (schib->pmcw.st) { 178 case SUBCHANNEL_TYPE_IO: 179 case SUBCHANNEL_TYPE_MSG: 180 if (!css_sch_is_valid(schib)) 181 err = -ENODEV; 182 else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) { 183 CIO_MSG_EVENT(6, "Blacklisted device detected " 184 "at devno %04X, subchannel set %x\n", 185 schib->pmcw.dev, schid.ssid); 186 err = -ENODEV; 187 } else 188 err = 0; 189 break; 190 default: 191 err = 0; 192 } 193 if (err) 194 goto out; 195 196 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", 197 schid.ssid, schid.sch_no, schib->pmcw.st); 198 out: 199 return err; 200 } 201 202 struct subchannel *css_alloc_subchannel(struct subchannel_id schid, 203 struct schib *schib) 204 { 205 struct subchannel *sch; 206 int ret; 207 208 ret = css_validate_subchannel(schid, schib); 209 if (ret < 0) 210 return ERR_PTR(ret); 211 212 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); 213 if (!sch) 214 return ERR_PTR(-ENOMEM); 215 216 sch->schid = schid; 217 sch->schib = *schib; 218 sch->st = schib->pmcw.st; 219 220 ret = css_sch_create_locks(sch); 221 if (ret) 222 goto err; 223 224 INIT_WORK(&sch->todo_work, css_sch_todo); 225 sch->dev.release = &css_subchannel_release; 226 device_initialize(&sch->dev); 227 return sch; 228 229 err: 230 kfree(sch); 231 return ERR_PTR(ret); 232 } 233 234 static int css_sch_device_register(struct subchannel *sch) 235 { 236 int ret; 237 238 mutex_lock(&sch->reg_mutex); 239 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, 240 sch->schid.sch_no); 241 ret = device_add(&sch->dev); 242 mutex_unlock(&sch->reg_mutex); 243 return ret; 244 } 245 246 /** 247 * css_sch_device_unregister - unregister a subchannel 248 * @sch: subchannel to be unregistered 249 */ 250 void css_sch_device_unregister(struct subchannel *sch) 251 { 252 mutex_lock(&sch->reg_mutex); 253 if (device_is_registered(&sch->dev)) 254 device_unregister(&sch->dev); 255 mutex_unlock(&sch->reg_mutex); 256 } 257 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 258 259 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 260 { 261 int i; 262 int mask; 263 264 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 265 ssd->path_mask = pmcw->pim; 266 for (i = 0; i < 8; i++) { 267 mask = 0x80 >> i; 268 if (pmcw->pim & mask) { 269 chp_id_init(&ssd->chpid[i]); 270 ssd->chpid[i].id = pmcw->chpid[i]; 271 } 272 } 273 } 274 275 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 276 { 277 int i; 278 int mask; 279 280 for (i = 0; i < 8; i++) { 281 mask = 0x80 >> i; 282 if (ssd->path_mask & mask) 283 chp_new(ssd->chpid[i]); 284 } 285 } 286 287 void css_update_ssd_info(struct subchannel *sch) 288 { 289 int ret; 290 291 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 292 if (ret) 293 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 294 295 ssd_register_chpids(&sch->ssd_info); 296 } 297 298 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 299 char *buf) 300 { 301 struct subchannel *sch = to_subchannel(dev); 302 303 return sprintf(buf, "%01x\n", sch->st); 304 } 305 306 static DEVICE_ATTR_RO(type); 307 308 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 309 char *buf) 310 { 311 struct subchannel *sch = to_subchannel(dev); 312 313 return sprintf(buf, "css:t%01X\n", sch->st); 314 } 315 316 static DEVICE_ATTR_RO(modalias); 317 318 static struct attribute *subch_attrs[] = { 319 &dev_attr_type.attr, 320 &dev_attr_modalias.attr, 321 NULL, 322 }; 323 324 static struct attribute_group subch_attr_group = { 325 .attrs = subch_attrs, 326 }; 327 328 static const struct attribute_group *default_subch_attr_groups[] = { 329 &subch_attr_group, 330 NULL, 331 }; 332 333 static ssize_t chpids_show(struct device *dev, 334 struct device_attribute *attr, 335 char *buf) 336 { 337 struct subchannel *sch = to_subchannel(dev); 338 struct chsc_ssd_info *ssd = &sch->ssd_info; 339 ssize_t ret = 0; 340 int mask; 341 int chp; 342 343 for (chp = 0; chp < 8; chp++) { 344 mask = 0x80 >> chp; 345 if (ssd->path_mask & mask) 346 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); 347 else 348 ret += sprintf(buf + ret, "00 "); 349 } 350 ret += sprintf(buf + ret, "\n"); 351 return ret; 352 } 353 static DEVICE_ATTR_RO(chpids); 354 355 static ssize_t pimpampom_show(struct device *dev, 356 struct device_attribute *attr, 357 char *buf) 358 { 359 struct subchannel *sch = to_subchannel(dev); 360 struct pmcw *pmcw = &sch->schib.pmcw; 361 362 return sprintf(buf, "%02x %02x %02x\n", 363 pmcw->pim, pmcw->pam, pmcw->pom); 364 } 365 static DEVICE_ATTR_RO(pimpampom); 366 367 static struct attribute *io_subchannel_type_attrs[] = { 368 &dev_attr_chpids.attr, 369 &dev_attr_pimpampom.attr, 370 NULL, 371 }; 372 ATTRIBUTE_GROUPS(io_subchannel_type); 373 374 static const struct device_type io_subchannel_type = { 375 .groups = io_subchannel_type_groups, 376 }; 377 378 int css_register_subchannel(struct subchannel *sch) 379 { 380 int ret; 381 382 /* Initialize the subchannel structure */ 383 sch->dev.parent = &channel_subsystems[0]->device; 384 sch->dev.bus = &css_bus_type; 385 sch->dev.groups = default_subch_attr_groups; 386 387 if (sch->st == SUBCHANNEL_TYPE_IO) 388 sch->dev.type = &io_subchannel_type; 389 390 /* 391 * We don't want to generate uevents for I/O subchannels that don't 392 * have a working ccw device behind them since they will be 393 * unregistered before they can be used anyway, so we delay the add 394 * uevent until after device recognition was successful. 395 * Note that we suppress the uevent for all subchannel types; 396 * the subchannel driver can decide itself when it wants to inform 397 * userspace of its existence. 398 */ 399 dev_set_uevent_suppress(&sch->dev, 1); 400 css_update_ssd_info(sch); 401 /* make it known to the system */ 402 ret = css_sch_device_register(sch); 403 if (ret) { 404 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 405 sch->schid.ssid, sch->schid.sch_no, ret); 406 return ret; 407 } 408 if (!sch->driver) { 409 /* 410 * No driver matched. Generate the uevent now so that 411 * a fitting driver module may be loaded based on the 412 * modalias. 413 */ 414 dev_set_uevent_suppress(&sch->dev, 0); 415 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 416 } 417 return ret; 418 } 419 420 static int css_probe_device(struct subchannel_id schid, struct schib *schib) 421 { 422 struct subchannel *sch; 423 int ret; 424 425 sch = css_alloc_subchannel(schid, schib); 426 if (IS_ERR(sch)) 427 return PTR_ERR(sch); 428 429 ret = css_register_subchannel(sch); 430 if (ret) 431 put_device(&sch->dev); 432 433 return ret; 434 } 435 436 static int 437 check_subchannel(struct device * dev, void * data) 438 { 439 struct subchannel *sch; 440 struct subchannel_id *schid = data; 441 442 sch = to_subchannel(dev); 443 return schid_equal(&sch->schid, schid); 444 } 445 446 struct subchannel * 447 get_subchannel_by_schid(struct subchannel_id schid) 448 { 449 struct device *dev; 450 451 dev = bus_find_device(&css_bus_type, NULL, 452 &schid, check_subchannel); 453 454 return dev ? to_subchannel(dev) : NULL; 455 } 456 457 /** 458 * css_sch_is_valid() - check if a subchannel is valid 459 * @schib: subchannel information block for the subchannel 460 */ 461 int css_sch_is_valid(struct schib *schib) 462 { 463 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 464 return 0; 465 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 466 return 0; 467 return 1; 468 } 469 EXPORT_SYMBOL_GPL(css_sch_is_valid); 470 471 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 472 { 473 struct schib schib; 474 int ccode; 475 476 if (!slow) { 477 /* Will be done on the slow path. */ 478 return -EAGAIN; 479 } 480 /* 481 * The first subchannel that is not-operational (ccode==3) 482 * indicates that there aren't any more devices available. 483 * If stsch gets an exception, it means the current subchannel set 484 * is not valid. 485 */ 486 ccode = stsch(schid, &schib); 487 if (ccode) 488 return (ccode == 3) ? -ENXIO : ccode; 489 490 return css_probe_device(schid, &schib); 491 } 492 493 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 494 { 495 int ret = 0; 496 497 if (sch->driver) { 498 if (sch->driver->sch_event) 499 ret = sch->driver->sch_event(sch, slow); 500 else 501 dev_dbg(&sch->dev, 502 "Got subchannel machine check but " 503 "no sch_event handler provided.\n"); 504 } 505 if (ret != 0 && ret != -EAGAIN) { 506 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", 507 sch->schid.ssid, sch->schid.sch_no, ret); 508 } 509 return ret; 510 } 511 512 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 513 { 514 struct subchannel *sch; 515 int ret; 516 517 sch = get_subchannel_by_schid(schid); 518 if (sch) { 519 ret = css_evaluate_known_subchannel(sch, slow); 520 put_device(&sch->dev); 521 } else 522 ret = css_evaluate_new_subchannel(schid, slow); 523 if (ret == -EAGAIN) 524 css_schedule_eval(schid); 525 } 526 527 /** 528 * css_sched_sch_todo - schedule a subchannel operation 529 * @sch: subchannel 530 * @todo: todo 531 * 532 * Schedule the operation identified by @todo to be performed on the slow path 533 * workqueue. Do nothing if another operation with higher priority is already 534 * scheduled. Needs to be called with subchannel lock held. 535 */ 536 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 537 { 538 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 539 sch->schid.ssid, sch->schid.sch_no, todo); 540 if (sch->todo >= todo) 541 return; 542 /* Get workqueue ref. */ 543 if (!get_device(&sch->dev)) 544 return; 545 sch->todo = todo; 546 if (!queue_work(cio_work_q, &sch->todo_work)) { 547 /* Already queued, release workqueue ref. */ 548 put_device(&sch->dev); 549 } 550 } 551 EXPORT_SYMBOL_GPL(css_sched_sch_todo); 552 553 static void css_sch_todo(struct work_struct *work) 554 { 555 struct subchannel *sch; 556 enum sch_todo todo; 557 int ret; 558 559 sch = container_of(work, struct subchannel, todo_work); 560 /* Find out todo. */ 561 spin_lock_irq(sch->lock); 562 todo = sch->todo; 563 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 564 sch->schid.sch_no, todo); 565 sch->todo = SCH_TODO_NOTHING; 566 spin_unlock_irq(sch->lock); 567 /* Perform todo. */ 568 switch (todo) { 569 case SCH_TODO_NOTHING: 570 break; 571 case SCH_TODO_EVAL: 572 ret = css_evaluate_known_subchannel(sch, 1); 573 if (ret == -EAGAIN) { 574 spin_lock_irq(sch->lock); 575 css_sched_sch_todo(sch, todo); 576 spin_unlock_irq(sch->lock); 577 } 578 break; 579 case SCH_TODO_UNREG: 580 css_sch_device_unregister(sch); 581 break; 582 } 583 /* Release workqueue ref. */ 584 put_device(&sch->dev); 585 } 586 587 static struct idset *slow_subchannel_set; 588 static spinlock_t slow_subchannel_lock; 589 static wait_queue_head_t css_eval_wq; 590 static atomic_t css_eval_scheduled; 591 592 static int __init slow_subchannel_init(void) 593 { 594 spin_lock_init(&slow_subchannel_lock); 595 atomic_set(&css_eval_scheduled, 0); 596 init_waitqueue_head(&css_eval_wq); 597 slow_subchannel_set = idset_sch_new(); 598 if (!slow_subchannel_set) { 599 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 600 return -ENOMEM; 601 } 602 return 0; 603 } 604 605 static int slow_eval_known_fn(struct subchannel *sch, void *data) 606 { 607 int eval; 608 int rc; 609 610 spin_lock_irq(&slow_subchannel_lock); 611 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 612 idset_sch_del(slow_subchannel_set, sch->schid); 613 spin_unlock_irq(&slow_subchannel_lock); 614 if (eval) { 615 rc = css_evaluate_known_subchannel(sch, 1); 616 if (rc == -EAGAIN) 617 css_schedule_eval(sch->schid); 618 } 619 return 0; 620 } 621 622 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 623 { 624 int eval; 625 int rc = 0; 626 627 spin_lock_irq(&slow_subchannel_lock); 628 eval = idset_sch_contains(slow_subchannel_set, schid); 629 idset_sch_del(slow_subchannel_set, schid); 630 spin_unlock_irq(&slow_subchannel_lock); 631 if (eval) { 632 rc = css_evaluate_new_subchannel(schid, 1); 633 switch (rc) { 634 case -EAGAIN: 635 css_schedule_eval(schid); 636 rc = 0; 637 break; 638 case -ENXIO: 639 case -ENOMEM: 640 case -EIO: 641 /* These should abort looping */ 642 spin_lock_irq(&slow_subchannel_lock); 643 idset_sch_del_subseq(slow_subchannel_set, schid); 644 spin_unlock_irq(&slow_subchannel_lock); 645 break; 646 default: 647 rc = 0; 648 } 649 /* Allow scheduling here since the containing loop might 650 * take a while. */ 651 cond_resched(); 652 } 653 return rc; 654 } 655 656 static void css_slow_path_func(struct work_struct *unused) 657 { 658 unsigned long flags; 659 660 CIO_TRACE_EVENT(4, "slowpath"); 661 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 662 NULL); 663 spin_lock_irqsave(&slow_subchannel_lock, flags); 664 if (idset_is_empty(slow_subchannel_set)) { 665 atomic_set(&css_eval_scheduled, 0); 666 wake_up(&css_eval_wq); 667 } 668 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 669 } 670 671 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func); 672 struct workqueue_struct *cio_work_q; 673 674 void css_schedule_eval(struct subchannel_id schid) 675 { 676 unsigned long flags; 677 678 spin_lock_irqsave(&slow_subchannel_lock, flags); 679 idset_sch_add(slow_subchannel_set, schid); 680 atomic_set(&css_eval_scheduled, 1); 681 queue_delayed_work(cio_work_q, &slow_path_work, 0); 682 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 683 } 684 685 void css_schedule_eval_all(void) 686 { 687 unsigned long flags; 688 689 spin_lock_irqsave(&slow_subchannel_lock, flags); 690 idset_fill(slow_subchannel_set); 691 atomic_set(&css_eval_scheduled, 1); 692 queue_delayed_work(cio_work_q, &slow_path_work, 0); 693 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 694 } 695 696 static int __unset_registered(struct device *dev, void *data) 697 { 698 struct idset *set = data; 699 struct subchannel *sch = to_subchannel(dev); 700 701 idset_sch_del(set, sch->schid); 702 return 0; 703 } 704 705 void css_schedule_eval_all_unreg(unsigned long delay) 706 { 707 unsigned long flags; 708 struct idset *unreg_set; 709 710 /* Find unregistered subchannels. */ 711 unreg_set = idset_sch_new(); 712 if (!unreg_set) { 713 /* Fallback. */ 714 css_schedule_eval_all(); 715 return; 716 } 717 idset_fill(unreg_set); 718 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); 719 /* Apply to slow_subchannel_set. */ 720 spin_lock_irqsave(&slow_subchannel_lock, flags); 721 idset_add_set(slow_subchannel_set, unreg_set); 722 atomic_set(&css_eval_scheduled, 1); 723 queue_delayed_work(cio_work_q, &slow_path_work, delay); 724 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 725 idset_free(unreg_set); 726 } 727 728 void css_wait_for_slow_path(void) 729 { 730 flush_workqueue(cio_work_q); 731 } 732 733 /* Schedule reprobing of all unregistered subchannels. */ 734 void css_schedule_reprobe(void) 735 { 736 /* Schedule with a delay to allow merging of subsequent calls. */ 737 css_schedule_eval_all_unreg(1 * HZ); 738 } 739 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 740 741 /* 742 * Called from the machine check handler for subchannel report words. 743 */ 744 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 745 { 746 struct subchannel_id mchk_schid; 747 struct subchannel *sch; 748 749 if (overflow) { 750 css_schedule_eval_all(); 751 return; 752 } 753 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 754 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 755 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 756 crw0->erc, crw0->rsid); 757 if (crw1) 758 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 759 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 760 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 761 crw1->anc, crw1->erc, crw1->rsid); 762 init_subchannel_id(&mchk_schid); 763 mchk_schid.sch_no = crw0->rsid; 764 if (crw1) 765 mchk_schid.ssid = (crw1->rsid >> 4) & 3; 766 767 if (crw0->erc == CRW_ERC_PMOD) { 768 sch = get_subchannel_by_schid(mchk_schid); 769 if (sch) { 770 css_update_ssd_info(sch); 771 put_device(&sch->dev); 772 } 773 } 774 /* 775 * Since we are always presented with IPI in the CRW, we have to 776 * use stsch() to find out if the subchannel in question has come 777 * or gone. 778 */ 779 css_evaluate_subchannel(mchk_schid, 0); 780 } 781 782 static void __init 783 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 784 { 785 struct cpuid cpu_id; 786 787 if (css_general_characteristics.mcss) { 788 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 789 css->global_pgid.pgid_high.ext_cssid.cssid = 790 (css->cssid < 0) ? 0 : css->cssid; 791 } else { 792 css->global_pgid.pgid_high.cpu_addr = stap(); 793 } 794 get_cpu_id(&cpu_id); 795 css->global_pgid.cpu_id = cpu_id.ident; 796 css->global_pgid.cpu_model = cpu_id.machine; 797 css->global_pgid.tod_high = tod_high; 798 } 799 800 static void channel_subsystem_release(struct device *dev) 801 { 802 struct channel_subsystem *css = to_css(dev); 803 804 mutex_destroy(&css->mutex); 805 kfree(css); 806 } 807 808 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a, 809 char *buf) 810 { 811 struct channel_subsystem *css = to_css(dev); 812 813 if (css->cssid < 0) 814 return -EINVAL; 815 816 return sprintf(buf, "%x\n", css->cssid); 817 } 818 static DEVICE_ATTR_RO(real_cssid); 819 820 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a, 821 char *buf) 822 { 823 struct channel_subsystem *css = to_css(dev); 824 int ret; 825 826 mutex_lock(&css->mutex); 827 ret = sprintf(buf, "%x\n", css->cm_enabled); 828 mutex_unlock(&css->mutex); 829 return ret; 830 } 831 832 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a, 833 const char *buf, size_t count) 834 { 835 struct channel_subsystem *css = to_css(dev); 836 unsigned long val; 837 int ret; 838 839 ret = kstrtoul(buf, 16, &val); 840 if (ret) 841 return ret; 842 mutex_lock(&css->mutex); 843 switch (val) { 844 case 0: 845 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 846 break; 847 case 1: 848 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 849 break; 850 default: 851 ret = -EINVAL; 852 } 853 mutex_unlock(&css->mutex); 854 return ret < 0 ? ret : count; 855 } 856 static DEVICE_ATTR_RW(cm_enable); 857 858 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr, 859 int index) 860 { 861 return css_chsc_characteristics.secm ? attr->mode : 0; 862 } 863 864 static struct attribute *cssdev_attrs[] = { 865 &dev_attr_real_cssid.attr, 866 NULL, 867 }; 868 869 static struct attribute_group cssdev_attr_group = { 870 .attrs = cssdev_attrs, 871 }; 872 873 static struct attribute *cssdev_cm_attrs[] = { 874 &dev_attr_cm_enable.attr, 875 NULL, 876 }; 877 878 static struct attribute_group cssdev_cm_attr_group = { 879 .attrs = cssdev_cm_attrs, 880 .is_visible = cm_enable_mode, 881 }; 882 883 static const struct attribute_group *cssdev_attr_groups[] = { 884 &cssdev_attr_group, 885 &cssdev_cm_attr_group, 886 NULL, 887 }; 888 889 static int __init setup_css(int nr) 890 { 891 struct channel_subsystem *css; 892 int ret; 893 894 css = kzalloc(sizeof(*css), GFP_KERNEL); 895 if (!css) 896 return -ENOMEM; 897 898 channel_subsystems[nr] = css; 899 dev_set_name(&css->device, "css%x", nr); 900 css->device.groups = cssdev_attr_groups; 901 css->device.release = channel_subsystem_release; 902 903 mutex_init(&css->mutex); 904 css->cssid = chsc_get_cssid(nr); 905 css_generate_pgid(css, (u32) (get_tod_clock() >> 32)); 906 907 ret = device_register(&css->device); 908 if (ret) { 909 put_device(&css->device); 910 goto out_err; 911 } 912 913 css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel), 914 GFP_KERNEL); 915 if (!css->pseudo_subchannel) { 916 device_unregister(&css->device); 917 ret = -ENOMEM; 918 goto out_err; 919 } 920 921 css->pseudo_subchannel->dev.parent = &css->device; 922 css->pseudo_subchannel->dev.release = css_subchannel_release; 923 mutex_init(&css->pseudo_subchannel->reg_mutex); 924 ret = css_sch_create_locks(css->pseudo_subchannel); 925 if (ret) { 926 kfree(css->pseudo_subchannel); 927 device_unregister(&css->device); 928 goto out_err; 929 } 930 931 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 932 ret = device_register(&css->pseudo_subchannel->dev); 933 if (ret) { 934 put_device(&css->pseudo_subchannel->dev); 935 device_unregister(&css->device); 936 goto out_err; 937 } 938 939 return ret; 940 out_err: 941 channel_subsystems[nr] = NULL; 942 return ret; 943 } 944 945 static int css_reboot_event(struct notifier_block *this, 946 unsigned long event, 947 void *ptr) 948 { 949 struct channel_subsystem *css; 950 int ret; 951 952 ret = NOTIFY_DONE; 953 for_each_css(css) { 954 mutex_lock(&css->mutex); 955 if (css->cm_enabled) 956 if (chsc_secm(css, 0)) 957 ret = NOTIFY_BAD; 958 mutex_unlock(&css->mutex); 959 } 960 961 return ret; 962 } 963 964 static struct notifier_block css_reboot_notifier = { 965 .notifier_call = css_reboot_event, 966 }; 967 968 /* 969 * Since the css devices are neither on a bus nor have a class 970 * nor have a special device type, we cannot stop/restart channel 971 * path measurements via the normal suspend/resume callbacks, but have 972 * to use notifiers. 973 */ 974 static int css_power_event(struct notifier_block *this, unsigned long event, 975 void *ptr) 976 { 977 struct channel_subsystem *css; 978 int ret; 979 980 switch (event) { 981 case PM_HIBERNATION_PREPARE: 982 case PM_SUSPEND_PREPARE: 983 ret = NOTIFY_DONE; 984 for_each_css(css) { 985 mutex_lock(&css->mutex); 986 if (!css->cm_enabled) { 987 mutex_unlock(&css->mutex); 988 continue; 989 } 990 ret = __chsc_do_secm(css, 0); 991 ret = notifier_from_errno(ret); 992 mutex_unlock(&css->mutex); 993 } 994 break; 995 case PM_POST_HIBERNATION: 996 case PM_POST_SUSPEND: 997 ret = NOTIFY_DONE; 998 for_each_css(css) { 999 mutex_lock(&css->mutex); 1000 if (!css->cm_enabled) { 1001 mutex_unlock(&css->mutex); 1002 continue; 1003 } 1004 ret = __chsc_do_secm(css, 1); 1005 ret = notifier_from_errno(ret); 1006 mutex_unlock(&css->mutex); 1007 } 1008 /* search for subchannels, which appeared during hibernation */ 1009 css_schedule_reprobe(); 1010 break; 1011 default: 1012 ret = NOTIFY_DONE; 1013 } 1014 return ret; 1015 1016 } 1017 static struct notifier_block css_power_notifier = { 1018 .notifier_call = css_power_event, 1019 }; 1020 1021 /* 1022 * Now that the driver core is running, we can setup our channel subsystem. 1023 * The struct subchannel's are created during probing. 1024 */ 1025 static int __init css_bus_init(void) 1026 { 1027 int ret, i; 1028 1029 ret = chsc_init(); 1030 if (ret) 1031 return ret; 1032 1033 chsc_determine_css_characteristics(); 1034 /* Try to enable MSS. */ 1035 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 1036 if (ret) 1037 max_ssid = 0; 1038 else /* Success. */ 1039 max_ssid = __MAX_SSID; 1040 1041 ret = slow_subchannel_init(); 1042 if (ret) 1043 goto out; 1044 1045 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 1046 if (ret) 1047 goto out; 1048 1049 if ((ret = bus_register(&css_bus_type))) 1050 goto out; 1051 1052 /* Setup css structure. */ 1053 for (i = 0; i <= MAX_CSS_IDX; i++) { 1054 ret = setup_css(i); 1055 if (ret) 1056 goto out_unregister; 1057 } 1058 ret = register_reboot_notifier(&css_reboot_notifier); 1059 if (ret) 1060 goto out_unregister; 1061 ret = register_pm_notifier(&css_power_notifier); 1062 if (ret) { 1063 unregister_reboot_notifier(&css_reboot_notifier); 1064 goto out_unregister; 1065 } 1066 css_init_done = 1; 1067 1068 /* Enable default isc for I/O subchannels. */ 1069 isc_register(IO_SCH_ISC); 1070 1071 return 0; 1072 out_unregister: 1073 while (i-- > 0) { 1074 struct channel_subsystem *css = channel_subsystems[i]; 1075 device_unregister(&css->pseudo_subchannel->dev); 1076 device_unregister(&css->device); 1077 } 1078 bus_unregister(&css_bus_type); 1079 out: 1080 crw_unregister_handler(CRW_RSC_SCH); 1081 idset_free(slow_subchannel_set); 1082 chsc_init_cleanup(); 1083 pr_alert("The CSS device driver initialization failed with " 1084 "errno=%d\n", ret); 1085 return ret; 1086 } 1087 1088 static void __init css_bus_cleanup(void) 1089 { 1090 struct channel_subsystem *css; 1091 1092 for_each_css(css) { 1093 device_unregister(&css->pseudo_subchannel->dev); 1094 device_unregister(&css->device); 1095 } 1096 bus_unregister(&css_bus_type); 1097 crw_unregister_handler(CRW_RSC_SCH); 1098 idset_free(slow_subchannel_set); 1099 chsc_init_cleanup(); 1100 isc_unregister(IO_SCH_ISC); 1101 } 1102 1103 static int __init channel_subsystem_init(void) 1104 { 1105 int ret; 1106 1107 ret = css_bus_init(); 1108 if (ret) 1109 return ret; 1110 cio_work_q = create_singlethread_workqueue("cio"); 1111 if (!cio_work_q) { 1112 ret = -ENOMEM; 1113 goto out_bus; 1114 } 1115 ret = io_subchannel_init(); 1116 if (ret) 1117 goto out_wq; 1118 1119 /* Register subchannels which are already in use. */ 1120 cio_register_early_subchannels(); 1121 /* Start initial subchannel evaluation. */ 1122 css_schedule_eval_all(); 1123 1124 return ret; 1125 out_wq: 1126 destroy_workqueue(cio_work_q); 1127 out_bus: 1128 css_bus_cleanup(); 1129 return ret; 1130 } 1131 subsys_initcall(channel_subsystem_init); 1132 1133 static int css_settle(struct device_driver *drv, void *unused) 1134 { 1135 struct css_driver *cssdrv = to_cssdriver(drv); 1136 1137 if (cssdrv->settle) 1138 return cssdrv->settle(); 1139 return 0; 1140 } 1141 1142 int css_complete_work(void) 1143 { 1144 int ret; 1145 1146 /* Wait for the evaluation of subchannels to finish. */ 1147 ret = wait_event_interruptible(css_eval_wq, 1148 atomic_read(&css_eval_scheduled) == 0); 1149 if (ret) 1150 return -EINTR; 1151 flush_workqueue(cio_work_q); 1152 /* Wait for the subchannel type specific initialization to finish */ 1153 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 1154 } 1155 1156 1157 /* 1158 * Wait for the initialization of devices to finish, to make sure we are 1159 * done with our setup if the search for the root device starts. 1160 */ 1161 static int __init channel_subsystem_init_sync(void) 1162 { 1163 css_complete_work(); 1164 return 0; 1165 } 1166 subsys_initcall_sync(channel_subsystem_init_sync); 1167 1168 void channel_subsystem_reinit(void) 1169 { 1170 struct channel_path *chp; 1171 struct chp_id chpid; 1172 1173 chsc_enable_facility(CHSC_SDA_OC_MSS); 1174 chp_id_for_each(&chpid) { 1175 chp = chpid_to_chp(chpid); 1176 if (chp) 1177 chp_update_desc(chp); 1178 } 1179 cmf_reactivate(); 1180 } 1181 1182 #ifdef CONFIG_PROC_FS 1183 static ssize_t cio_settle_write(struct file *file, const char __user *buf, 1184 size_t count, loff_t *ppos) 1185 { 1186 int ret; 1187 1188 /* Handle pending CRW's. */ 1189 crw_wait_for_channel_report(); 1190 ret = css_complete_work(); 1191 1192 return ret ? ret : count; 1193 } 1194 1195 static const struct file_operations cio_settle_proc_fops = { 1196 .open = nonseekable_open, 1197 .write = cio_settle_write, 1198 .llseek = no_llseek, 1199 }; 1200 1201 static int __init cio_settle_init(void) 1202 { 1203 struct proc_dir_entry *entry; 1204 1205 entry = proc_create("cio_settle", S_IWUSR, NULL, 1206 &cio_settle_proc_fops); 1207 if (!entry) 1208 return -ENOMEM; 1209 return 0; 1210 } 1211 device_initcall(cio_settle_init); 1212 #endif /*CONFIG_PROC_FS*/ 1213 1214 int sch_is_pseudo_sch(struct subchannel *sch) 1215 { 1216 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1217 } 1218 1219 static int css_bus_match(struct device *dev, struct device_driver *drv) 1220 { 1221 struct subchannel *sch = to_subchannel(dev); 1222 struct css_driver *driver = to_cssdriver(drv); 1223 struct css_device_id *id; 1224 1225 for (id = driver->subchannel_type; id->match_flags; id++) { 1226 if (sch->st == id->type) 1227 return 1; 1228 } 1229 1230 return 0; 1231 } 1232 1233 static int css_probe(struct device *dev) 1234 { 1235 struct subchannel *sch; 1236 int ret; 1237 1238 sch = to_subchannel(dev); 1239 sch->driver = to_cssdriver(dev->driver); 1240 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 1241 if (ret) 1242 sch->driver = NULL; 1243 return ret; 1244 } 1245 1246 static int css_remove(struct device *dev) 1247 { 1248 struct subchannel *sch; 1249 int ret; 1250 1251 sch = to_subchannel(dev); 1252 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 1253 sch->driver = NULL; 1254 return ret; 1255 } 1256 1257 static void css_shutdown(struct device *dev) 1258 { 1259 struct subchannel *sch; 1260 1261 sch = to_subchannel(dev); 1262 if (sch->driver && sch->driver->shutdown) 1263 sch->driver->shutdown(sch); 1264 } 1265 1266 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1267 { 1268 struct subchannel *sch = to_subchannel(dev); 1269 int ret; 1270 1271 ret = add_uevent_var(env, "ST=%01X", sch->st); 1272 if (ret) 1273 return ret; 1274 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1275 return ret; 1276 } 1277 1278 static int css_pm_prepare(struct device *dev) 1279 { 1280 struct subchannel *sch = to_subchannel(dev); 1281 struct css_driver *drv; 1282 1283 if (mutex_is_locked(&sch->reg_mutex)) 1284 return -EAGAIN; 1285 if (!sch->dev.driver) 1286 return 0; 1287 drv = to_cssdriver(sch->dev.driver); 1288 /* Notify drivers that they may not register children. */ 1289 return drv->prepare ? drv->prepare(sch) : 0; 1290 } 1291 1292 static void css_pm_complete(struct device *dev) 1293 { 1294 struct subchannel *sch = to_subchannel(dev); 1295 struct css_driver *drv; 1296 1297 if (!sch->dev.driver) 1298 return; 1299 drv = to_cssdriver(sch->dev.driver); 1300 if (drv->complete) 1301 drv->complete(sch); 1302 } 1303 1304 static int css_pm_freeze(struct device *dev) 1305 { 1306 struct subchannel *sch = to_subchannel(dev); 1307 struct css_driver *drv; 1308 1309 if (!sch->dev.driver) 1310 return 0; 1311 drv = to_cssdriver(sch->dev.driver); 1312 return drv->freeze ? drv->freeze(sch) : 0; 1313 } 1314 1315 static int css_pm_thaw(struct device *dev) 1316 { 1317 struct subchannel *sch = to_subchannel(dev); 1318 struct css_driver *drv; 1319 1320 if (!sch->dev.driver) 1321 return 0; 1322 drv = to_cssdriver(sch->dev.driver); 1323 return drv->thaw ? drv->thaw(sch) : 0; 1324 } 1325 1326 static int css_pm_restore(struct device *dev) 1327 { 1328 struct subchannel *sch = to_subchannel(dev); 1329 struct css_driver *drv; 1330 1331 css_update_ssd_info(sch); 1332 if (!sch->dev.driver) 1333 return 0; 1334 drv = to_cssdriver(sch->dev.driver); 1335 return drv->restore ? drv->restore(sch) : 0; 1336 } 1337 1338 static const struct dev_pm_ops css_pm_ops = { 1339 .prepare = css_pm_prepare, 1340 .complete = css_pm_complete, 1341 .freeze = css_pm_freeze, 1342 .thaw = css_pm_thaw, 1343 .restore = css_pm_restore, 1344 }; 1345 1346 static struct bus_type css_bus_type = { 1347 .name = "css", 1348 .match = css_bus_match, 1349 .probe = css_probe, 1350 .remove = css_remove, 1351 .shutdown = css_shutdown, 1352 .uevent = css_uevent, 1353 .pm = &css_pm_ops, 1354 }; 1355 1356 /** 1357 * css_driver_register - register a css driver 1358 * @cdrv: css driver to register 1359 * 1360 * This is mainly a wrapper around driver_register that sets name 1361 * and bus_type in the embedded struct device_driver correctly. 1362 */ 1363 int css_driver_register(struct css_driver *cdrv) 1364 { 1365 cdrv->drv.bus = &css_bus_type; 1366 return driver_register(&cdrv->drv); 1367 } 1368 EXPORT_SYMBOL_GPL(css_driver_register); 1369 1370 /** 1371 * css_driver_unregister - unregister a css driver 1372 * @cdrv: css driver to unregister 1373 * 1374 * This is a wrapper around driver_unregister. 1375 */ 1376 void css_driver_unregister(struct css_driver *cdrv) 1377 { 1378 driver_unregister(&cdrv->drv); 1379 } 1380 EXPORT_SYMBOL_GPL(css_driver_unregister); 1381