1 /* 2 * driver for channel subsystem 3 * 4 * Copyright IBM Corp. 2002, 2009 5 * 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/reboot.h> 20 #include <linux/suspend.h> 21 #include <asm/isc.h> 22 #include <asm/crw.h> 23 24 #include "css.h" 25 #include "cio.h" 26 #include "cio_debug.h" 27 #include "ioasm.h" 28 #include "chsc.h" 29 #include "device.h" 30 #include "idset.h" 31 #include "chp.h" 32 33 int css_init_done = 0; 34 int max_ssid; 35 36 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 37 38 int 39 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 40 { 41 struct subchannel_id schid; 42 int ret; 43 44 init_subchannel_id(&schid); 45 ret = -ENODEV; 46 do { 47 do { 48 ret = fn(schid, data); 49 if (ret) 50 break; 51 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 52 schid.sch_no = 0; 53 } while (schid.ssid++ < max_ssid); 54 return ret; 55 } 56 57 struct cb_data { 58 void *data; 59 struct idset *set; 60 int (*fn_known_sch)(struct subchannel *, void *); 61 int (*fn_unknown_sch)(struct subchannel_id, void *); 62 }; 63 64 static int call_fn_known_sch(struct device *dev, void *data) 65 { 66 struct subchannel *sch = to_subchannel(dev); 67 struct cb_data *cb = data; 68 int rc = 0; 69 70 idset_sch_del(cb->set, sch->schid); 71 if (cb->fn_known_sch) 72 rc = cb->fn_known_sch(sch, cb->data); 73 return rc; 74 } 75 76 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 77 { 78 struct cb_data *cb = data; 79 int rc = 0; 80 81 if (idset_sch_contains(cb->set, schid)) 82 rc = cb->fn_unknown_sch(schid, cb->data); 83 return rc; 84 } 85 86 static int call_fn_all_sch(struct subchannel_id schid, void *data) 87 { 88 struct cb_data *cb = data; 89 struct subchannel *sch; 90 int rc = 0; 91 92 sch = get_subchannel_by_schid(schid); 93 if (sch) { 94 if (cb->fn_known_sch) 95 rc = cb->fn_known_sch(sch, cb->data); 96 put_device(&sch->dev); 97 } else { 98 if (cb->fn_unknown_sch) 99 rc = cb->fn_unknown_sch(schid, cb->data); 100 } 101 102 return rc; 103 } 104 105 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 106 int (*fn_unknown)(struct subchannel_id, 107 void *), void *data) 108 { 109 struct cb_data cb; 110 int rc; 111 112 cb.data = data; 113 cb.fn_known_sch = fn_known; 114 cb.fn_unknown_sch = fn_unknown; 115 116 cb.set = idset_sch_new(); 117 if (!cb.set) 118 /* fall back to brute force scanning in case of oom */ 119 return for_each_subchannel(call_fn_all_sch, &cb); 120 121 idset_fill(cb.set); 122 123 /* Process registered subchannels. */ 124 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 125 if (rc) 126 goto out; 127 /* Process unregistered subchannels. */ 128 if (fn_unknown) 129 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 130 out: 131 idset_free(cb.set); 132 133 return rc; 134 } 135 136 static void css_sch_todo(struct work_struct *work); 137 138 static struct subchannel * 139 css_alloc_subchannel(struct subchannel_id schid) 140 { 141 struct subchannel *sch; 142 int ret; 143 144 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 145 if (sch == NULL) 146 return ERR_PTR(-ENOMEM); 147 ret = cio_validate_subchannel (sch, schid); 148 if (ret < 0) { 149 kfree(sch); 150 return ERR_PTR(ret); 151 } 152 INIT_WORK(&sch->todo_work, css_sch_todo); 153 return sch; 154 } 155 156 static void 157 css_subchannel_release(struct device *dev) 158 { 159 struct subchannel *sch; 160 161 sch = to_subchannel(dev); 162 if (!cio_is_console(sch->schid)) { 163 /* Reset intparm to zeroes. */ 164 sch->config.intparm = 0; 165 cio_commit_config(sch); 166 kfree(sch->lock); 167 kfree(sch); 168 } 169 } 170 171 static int css_sch_device_register(struct subchannel *sch) 172 { 173 int ret; 174 175 mutex_lock(&sch->reg_mutex); 176 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, 177 sch->schid.sch_no); 178 ret = device_register(&sch->dev); 179 mutex_unlock(&sch->reg_mutex); 180 return ret; 181 } 182 183 /** 184 * css_sch_device_unregister - unregister a subchannel 185 * @sch: subchannel to be unregistered 186 */ 187 void css_sch_device_unregister(struct subchannel *sch) 188 { 189 mutex_lock(&sch->reg_mutex); 190 if (device_is_registered(&sch->dev)) 191 device_unregister(&sch->dev); 192 mutex_unlock(&sch->reg_mutex); 193 } 194 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 195 196 static void css_sch_todo(struct work_struct *work) 197 { 198 struct subchannel *sch; 199 enum sch_todo todo; 200 201 sch = container_of(work, struct subchannel, todo_work); 202 /* Find out todo. */ 203 spin_lock_irq(sch->lock); 204 todo = sch->todo; 205 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 206 sch->schid.sch_no, todo); 207 sch->todo = SCH_TODO_NOTHING; 208 spin_unlock_irq(sch->lock); 209 /* Perform todo. */ 210 if (todo == SCH_TODO_UNREG) 211 css_sch_device_unregister(sch); 212 /* Release workqueue ref. */ 213 put_device(&sch->dev); 214 } 215 216 /** 217 * css_sched_sch_todo - schedule a subchannel operation 218 * @sch: subchannel 219 * @todo: todo 220 * 221 * Schedule the operation identified by @todo to be performed on the slow path 222 * workqueue. Do nothing if another operation with higher priority is already 223 * scheduled. Needs to be called with subchannel lock held. 224 */ 225 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 226 { 227 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 228 sch->schid.ssid, sch->schid.sch_no, todo); 229 if (sch->todo >= todo) 230 return; 231 /* Get workqueue ref. */ 232 if (!get_device(&sch->dev)) 233 return; 234 sch->todo = todo; 235 if (!queue_work(slow_path_wq, &sch->todo_work)) { 236 /* Already queued, release workqueue ref. */ 237 put_device(&sch->dev); 238 } 239 } 240 241 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 242 { 243 int i; 244 int mask; 245 246 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 247 ssd->path_mask = pmcw->pim; 248 for (i = 0; i < 8; i++) { 249 mask = 0x80 >> i; 250 if (pmcw->pim & mask) { 251 chp_id_init(&ssd->chpid[i]); 252 ssd->chpid[i].id = pmcw->chpid[i]; 253 } 254 } 255 } 256 257 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 258 { 259 int i; 260 int mask; 261 262 for (i = 0; i < 8; i++) { 263 mask = 0x80 >> i; 264 if (ssd->path_mask & mask) 265 if (!chp_is_registered(ssd->chpid[i])) 266 chp_new(ssd->chpid[i]); 267 } 268 } 269 270 void css_update_ssd_info(struct subchannel *sch) 271 { 272 int ret; 273 274 if (cio_is_console(sch->schid)) { 275 /* Console is initialized too early for functions requiring 276 * memory allocation. */ 277 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 278 } else { 279 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 280 if (ret) 281 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 282 ssd_register_chpids(&sch->ssd_info); 283 } 284 } 285 286 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 287 char *buf) 288 { 289 struct subchannel *sch = to_subchannel(dev); 290 291 return sprintf(buf, "%01x\n", sch->st); 292 } 293 294 static DEVICE_ATTR(type, 0444, type_show, NULL); 295 296 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 297 char *buf) 298 { 299 struct subchannel *sch = to_subchannel(dev); 300 301 return sprintf(buf, "css:t%01X\n", sch->st); 302 } 303 304 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 305 306 static struct attribute *subch_attrs[] = { 307 &dev_attr_type.attr, 308 &dev_attr_modalias.attr, 309 NULL, 310 }; 311 312 static struct attribute_group subch_attr_group = { 313 .attrs = subch_attrs, 314 }; 315 316 static const struct attribute_group *default_subch_attr_groups[] = { 317 &subch_attr_group, 318 NULL, 319 }; 320 321 static int css_register_subchannel(struct subchannel *sch) 322 { 323 int ret; 324 325 /* Initialize the subchannel structure */ 326 sch->dev.parent = &channel_subsystems[0]->device; 327 sch->dev.bus = &css_bus_type; 328 sch->dev.release = &css_subchannel_release; 329 sch->dev.groups = default_subch_attr_groups; 330 /* 331 * We don't want to generate uevents for I/O subchannels that don't 332 * have a working ccw device behind them since they will be 333 * unregistered before they can be used anyway, so we delay the add 334 * uevent until after device recognition was successful. 335 * Note that we suppress the uevent for all subchannel types; 336 * the subchannel driver can decide itself when it wants to inform 337 * userspace of its existence. 338 */ 339 dev_set_uevent_suppress(&sch->dev, 1); 340 css_update_ssd_info(sch); 341 /* make it known to the system */ 342 ret = css_sch_device_register(sch); 343 if (ret) { 344 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 345 sch->schid.ssid, sch->schid.sch_no, ret); 346 return ret; 347 } 348 if (!sch->driver) { 349 /* 350 * No driver matched. Generate the uevent now so that 351 * a fitting driver module may be loaded based on the 352 * modalias. 353 */ 354 dev_set_uevent_suppress(&sch->dev, 0); 355 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 356 } 357 return ret; 358 } 359 360 int css_probe_device(struct subchannel_id schid) 361 { 362 int ret; 363 struct subchannel *sch; 364 365 if (cio_is_console(schid)) 366 sch = cio_get_console_subchannel(); 367 else { 368 sch = css_alloc_subchannel(schid); 369 if (IS_ERR(sch)) 370 return PTR_ERR(sch); 371 } 372 ret = css_register_subchannel(sch); 373 if (ret) { 374 if (!cio_is_console(schid)) 375 put_device(&sch->dev); 376 } 377 return ret; 378 } 379 380 static int 381 check_subchannel(struct device * dev, void * data) 382 { 383 struct subchannel *sch; 384 struct subchannel_id *schid = data; 385 386 sch = to_subchannel(dev); 387 return schid_equal(&sch->schid, schid); 388 } 389 390 struct subchannel * 391 get_subchannel_by_schid(struct subchannel_id schid) 392 { 393 struct device *dev; 394 395 dev = bus_find_device(&css_bus_type, NULL, 396 &schid, check_subchannel); 397 398 return dev ? to_subchannel(dev) : NULL; 399 } 400 401 /** 402 * css_sch_is_valid() - check if a subchannel is valid 403 * @schib: subchannel information block for the subchannel 404 */ 405 int css_sch_is_valid(struct schib *schib) 406 { 407 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 408 return 0; 409 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 410 return 0; 411 return 1; 412 } 413 EXPORT_SYMBOL_GPL(css_sch_is_valid); 414 415 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 416 { 417 struct schib schib; 418 419 if (!slow) { 420 /* Will be done on the slow path. */ 421 return -EAGAIN; 422 } 423 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 424 /* Unusable - ignore. */ 425 return 0; 426 } 427 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, 428 schid.sch_no); 429 430 return css_probe_device(schid); 431 } 432 433 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 434 { 435 int ret = 0; 436 437 if (sch->driver) { 438 if (sch->driver->sch_event) 439 ret = sch->driver->sch_event(sch, slow); 440 else 441 dev_dbg(&sch->dev, 442 "Got subchannel machine check but " 443 "no sch_event handler provided.\n"); 444 } 445 if (ret != 0 && ret != -EAGAIN) { 446 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", 447 sch->schid.ssid, sch->schid.sch_no, ret); 448 } 449 return ret; 450 } 451 452 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 453 { 454 struct subchannel *sch; 455 int ret; 456 457 sch = get_subchannel_by_schid(schid); 458 if (sch) { 459 ret = css_evaluate_known_subchannel(sch, slow); 460 put_device(&sch->dev); 461 } else 462 ret = css_evaluate_new_subchannel(schid, slow); 463 if (ret == -EAGAIN) 464 css_schedule_eval(schid); 465 } 466 467 static struct idset *slow_subchannel_set; 468 static spinlock_t slow_subchannel_lock; 469 static wait_queue_head_t css_eval_wq; 470 static atomic_t css_eval_scheduled; 471 472 static int __init slow_subchannel_init(void) 473 { 474 spin_lock_init(&slow_subchannel_lock); 475 atomic_set(&css_eval_scheduled, 0); 476 init_waitqueue_head(&css_eval_wq); 477 slow_subchannel_set = idset_sch_new(); 478 if (!slow_subchannel_set) { 479 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 480 return -ENOMEM; 481 } 482 return 0; 483 } 484 485 static int slow_eval_known_fn(struct subchannel *sch, void *data) 486 { 487 int eval; 488 int rc; 489 490 spin_lock_irq(&slow_subchannel_lock); 491 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 492 idset_sch_del(slow_subchannel_set, sch->schid); 493 spin_unlock_irq(&slow_subchannel_lock); 494 if (eval) { 495 rc = css_evaluate_known_subchannel(sch, 1); 496 if (rc == -EAGAIN) 497 css_schedule_eval(sch->schid); 498 } 499 return 0; 500 } 501 502 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 503 { 504 int eval; 505 int rc = 0; 506 507 spin_lock_irq(&slow_subchannel_lock); 508 eval = idset_sch_contains(slow_subchannel_set, schid); 509 idset_sch_del(slow_subchannel_set, schid); 510 spin_unlock_irq(&slow_subchannel_lock); 511 if (eval) { 512 rc = css_evaluate_new_subchannel(schid, 1); 513 switch (rc) { 514 case -EAGAIN: 515 css_schedule_eval(schid); 516 rc = 0; 517 break; 518 case -ENXIO: 519 case -ENOMEM: 520 case -EIO: 521 /* These should abort looping */ 522 break; 523 default: 524 rc = 0; 525 } 526 } 527 return rc; 528 } 529 530 static void css_slow_path_func(struct work_struct *unused) 531 { 532 unsigned long flags; 533 534 CIO_TRACE_EVENT(4, "slowpath"); 535 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 536 NULL); 537 spin_lock_irqsave(&slow_subchannel_lock, flags); 538 if (idset_is_empty(slow_subchannel_set)) { 539 atomic_set(&css_eval_scheduled, 0); 540 wake_up(&css_eval_wq); 541 } 542 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 543 } 544 545 static DECLARE_WORK(slow_path_work, css_slow_path_func); 546 struct workqueue_struct *slow_path_wq; 547 548 void css_schedule_eval(struct subchannel_id schid) 549 { 550 unsigned long flags; 551 552 spin_lock_irqsave(&slow_subchannel_lock, flags); 553 idset_sch_add(slow_subchannel_set, schid); 554 atomic_set(&css_eval_scheduled, 1); 555 queue_work(slow_path_wq, &slow_path_work); 556 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 557 } 558 559 void css_schedule_eval_all(void) 560 { 561 unsigned long flags; 562 563 spin_lock_irqsave(&slow_subchannel_lock, flags); 564 idset_fill(slow_subchannel_set); 565 atomic_set(&css_eval_scheduled, 1); 566 queue_work(slow_path_wq, &slow_path_work); 567 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 568 } 569 570 static int __unset_registered(struct device *dev, void *data) 571 { 572 struct idset *set = data; 573 struct subchannel *sch = to_subchannel(dev); 574 575 idset_sch_del(set, sch->schid); 576 return 0; 577 } 578 579 void css_schedule_eval_all_unreg(void) 580 { 581 unsigned long flags; 582 struct idset *unreg_set; 583 584 /* Find unregistered subchannels. */ 585 unreg_set = idset_sch_new(); 586 if (!unreg_set) { 587 /* Fallback. */ 588 css_schedule_eval_all(); 589 return; 590 } 591 idset_fill(unreg_set); 592 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); 593 /* Apply to slow_subchannel_set. */ 594 spin_lock_irqsave(&slow_subchannel_lock, flags); 595 idset_add_set(slow_subchannel_set, unreg_set); 596 atomic_set(&css_eval_scheduled, 1); 597 queue_work(slow_path_wq, &slow_path_work); 598 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 599 idset_free(unreg_set); 600 } 601 602 void css_wait_for_slow_path(void) 603 { 604 flush_workqueue(slow_path_wq); 605 } 606 607 /* Schedule reprobing of all unregistered subchannels. */ 608 void css_schedule_reprobe(void) 609 { 610 css_schedule_eval_all_unreg(); 611 } 612 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 613 614 /* 615 * Called from the machine check handler for subchannel report words. 616 */ 617 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 618 { 619 struct subchannel_id mchk_schid; 620 621 if (overflow) { 622 css_schedule_eval_all(); 623 return; 624 } 625 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 626 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 627 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 628 crw0->erc, crw0->rsid); 629 if (crw1) 630 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 631 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 632 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 633 crw1->anc, crw1->erc, crw1->rsid); 634 init_subchannel_id(&mchk_schid); 635 mchk_schid.sch_no = crw0->rsid; 636 if (crw1) 637 mchk_schid.ssid = (crw1->rsid >> 8) & 3; 638 639 /* 640 * Since we are always presented with IPI in the CRW, we have to 641 * use stsch() to find out if the subchannel in question has come 642 * or gone. 643 */ 644 css_evaluate_subchannel(mchk_schid, 0); 645 } 646 647 static void __init 648 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 649 { 650 if (css_general_characteristics.mcss) { 651 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 652 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 653 } else { 654 #ifdef CONFIG_SMP 655 css->global_pgid.pgid_high.cpu_addr = stap(); 656 #else 657 css->global_pgid.pgid_high.cpu_addr = 0; 658 #endif 659 } 660 css->global_pgid.cpu_id = S390_lowcore.cpu_id.ident; 661 css->global_pgid.cpu_model = S390_lowcore.cpu_id.machine; 662 css->global_pgid.tod_high = tod_high; 663 664 } 665 666 static void 667 channel_subsystem_release(struct device *dev) 668 { 669 struct channel_subsystem *css; 670 671 css = to_css(dev); 672 mutex_destroy(&css->mutex); 673 if (css->pseudo_subchannel) { 674 /* Implies that it has been generated but never registered. */ 675 css_subchannel_release(&css->pseudo_subchannel->dev); 676 css->pseudo_subchannel = NULL; 677 } 678 kfree(css); 679 } 680 681 static ssize_t 682 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 683 char *buf) 684 { 685 struct channel_subsystem *css = to_css(dev); 686 int ret; 687 688 if (!css) 689 return 0; 690 mutex_lock(&css->mutex); 691 ret = sprintf(buf, "%x\n", css->cm_enabled); 692 mutex_unlock(&css->mutex); 693 return ret; 694 } 695 696 static ssize_t 697 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 698 const char *buf, size_t count) 699 { 700 struct channel_subsystem *css = to_css(dev); 701 int ret; 702 unsigned long val; 703 704 ret = strict_strtoul(buf, 16, &val); 705 if (ret) 706 return ret; 707 mutex_lock(&css->mutex); 708 switch (val) { 709 case 0: 710 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 711 break; 712 case 1: 713 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 714 break; 715 default: 716 ret = -EINVAL; 717 } 718 mutex_unlock(&css->mutex); 719 return ret < 0 ? ret : count; 720 } 721 722 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 723 724 static int __init setup_css(int nr) 725 { 726 u32 tod_high; 727 int ret; 728 struct channel_subsystem *css; 729 730 css = channel_subsystems[nr]; 731 memset(css, 0, sizeof(struct channel_subsystem)); 732 css->pseudo_subchannel = 733 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 734 if (!css->pseudo_subchannel) 735 return -ENOMEM; 736 css->pseudo_subchannel->dev.parent = &css->device; 737 css->pseudo_subchannel->dev.release = css_subchannel_release; 738 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 739 mutex_init(&css->pseudo_subchannel->reg_mutex); 740 ret = cio_create_sch_lock(css->pseudo_subchannel); 741 if (ret) { 742 kfree(css->pseudo_subchannel); 743 return ret; 744 } 745 mutex_init(&css->mutex); 746 css->valid = 1; 747 css->cssid = nr; 748 dev_set_name(&css->device, "css%x", nr); 749 css->device.release = channel_subsystem_release; 750 tod_high = (u32) (get_clock() >> 32); 751 css_generate_pgid(css, tod_high); 752 return 0; 753 } 754 755 static int css_reboot_event(struct notifier_block *this, 756 unsigned long event, 757 void *ptr) 758 { 759 int ret, i; 760 761 ret = NOTIFY_DONE; 762 for (i = 0; i <= __MAX_CSSID; i++) { 763 struct channel_subsystem *css; 764 765 css = channel_subsystems[i]; 766 mutex_lock(&css->mutex); 767 if (css->cm_enabled) 768 if (chsc_secm(css, 0)) 769 ret = NOTIFY_BAD; 770 mutex_unlock(&css->mutex); 771 } 772 773 return ret; 774 } 775 776 static struct notifier_block css_reboot_notifier = { 777 .notifier_call = css_reboot_event, 778 }; 779 780 /* 781 * Since the css devices are neither on a bus nor have a class 782 * nor have a special device type, we cannot stop/restart channel 783 * path measurements via the normal suspend/resume callbacks, but have 784 * to use notifiers. 785 */ 786 static int css_power_event(struct notifier_block *this, unsigned long event, 787 void *ptr) 788 { 789 void *secm_area; 790 int ret, i; 791 792 switch (event) { 793 case PM_HIBERNATION_PREPARE: 794 case PM_SUSPEND_PREPARE: 795 ret = NOTIFY_DONE; 796 for (i = 0; i <= __MAX_CSSID; i++) { 797 struct channel_subsystem *css; 798 799 css = channel_subsystems[i]; 800 mutex_lock(&css->mutex); 801 if (!css->cm_enabled) { 802 mutex_unlock(&css->mutex); 803 continue; 804 } 805 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 806 GFP_DMA); 807 if (secm_area) { 808 if (__chsc_do_secm(css, 0, secm_area)) 809 ret = NOTIFY_BAD; 810 free_page((unsigned long)secm_area); 811 } else 812 ret = NOTIFY_BAD; 813 814 mutex_unlock(&css->mutex); 815 } 816 break; 817 case PM_POST_HIBERNATION: 818 case PM_POST_SUSPEND: 819 ret = NOTIFY_DONE; 820 for (i = 0; i <= __MAX_CSSID; i++) { 821 struct channel_subsystem *css; 822 823 css = channel_subsystems[i]; 824 mutex_lock(&css->mutex); 825 if (!css->cm_enabled) { 826 mutex_unlock(&css->mutex); 827 continue; 828 } 829 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 830 GFP_DMA); 831 if (secm_area) { 832 if (__chsc_do_secm(css, 1, secm_area)) 833 ret = NOTIFY_BAD; 834 free_page((unsigned long)secm_area); 835 } else 836 ret = NOTIFY_BAD; 837 838 mutex_unlock(&css->mutex); 839 } 840 /* search for subchannels, which appeared during hibernation */ 841 css_schedule_reprobe(); 842 break; 843 default: 844 ret = NOTIFY_DONE; 845 } 846 return ret; 847 848 } 849 static struct notifier_block css_power_notifier = { 850 .notifier_call = css_power_event, 851 }; 852 853 /* 854 * Now that the driver core is running, we can setup our channel subsystem. 855 * The struct subchannel's are created during probing (except for the 856 * static console subchannel). 857 */ 858 static int __init css_bus_init(void) 859 { 860 int ret, i; 861 862 ret = chsc_determine_css_characteristics(); 863 if (ret == -ENOMEM) 864 goto out; 865 866 ret = chsc_alloc_sei_area(); 867 if (ret) 868 goto out; 869 870 /* Try to enable MSS. */ 871 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 872 switch (ret) { 873 case 0: /* Success. */ 874 max_ssid = __MAX_SSID; 875 break; 876 case -ENOMEM: 877 goto out; 878 default: 879 max_ssid = 0; 880 } 881 882 ret = slow_subchannel_init(); 883 if (ret) 884 goto out; 885 886 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 887 if (ret) 888 goto out; 889 890 if ((ret = bus_register(&css_bus_type))) 891 goto out; 892 893 /* Setup css structure. */ 894 for (i = 0; i <= __MAX_CSSID; i++) { 895 struct channel_subsystem *css; 896 897 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 898 if (!css) { 899 ret = -ENOMEM; 900 goto out_unregister; 901 } 902 channel_subsystems[i] = css; 903 ret = setup_css(i); 904 if (ret) { 905 kfree(channel_subsystems[i]); 906 goto out_unregister; 907 } 908 ret = device_register(&css->device); 909 if (ret) { 910 put_device(&css->device); 911 goto out_unregister; 912 } 913 if (css_chsc_characteristics.secm) { 914 ret = device_create_file(&css->device, 915 &dev_attr_cm_enable); 916 if (ret) 917 goto out_device; 918 } 919 ret = device_register(&css->pseudo_subchannel->dev); 920 if (ret) { 921 put_device(&css->pseudo_subchannel->dev); 922 goto out_file; 923 } 924 } 925 ret = register_reboot_notifier(&css_reboot_notifier); 926 if (ret) 927 goto out_unregister; 928 ret = register_pm_notifier(&css_power_notifier); 929 if (ret) { 930 unregister_reboot_notifier(&css_reboot_notifier); 931 goto out_unregister; 932 } 933 css_init_done = 1; 934 935 /* Enable default isc for I/O subchannels. */ 936 isc_register(IO_SCH_ISC); 937 938 return 0; 939 out_file: 940 if (css_chsc_characteristics.secm) 941 device_remove_file(&channel_subsystems[i]->device, 942 &dev_attr_cm_enable); 943 out_device: 944 device_unregister(&channel_subsystems[i]->device); 945 out_unregister: 946 while (i > 0) { 947 struct channel_subsystem *css; 948 949 i--; 950 css = channel_subsystems[i]; 951 device_unregister(&css->pseudo_subchannel->dev); 952 css->pseudo_subchannel = NULL; 953 if (css_chsc_characteristics.secm) 954 device_remove_file(&css->device, 955 &dev_attr_cm_enable); 956 device_unregister(&css->device); 957 } 958 bus_unregister(&css_bus_type); 959 out: 960 crw_unregister_handler(CRW_RSC_CSS); 961 chsc_free_sei_area(); 962 idset_free(slow_subchannel_set); 963 pr_alert("The CSS device driver initialization failed with " 964 "errno=%d\n", ret); 965 return ret; 966 } 967 968 static void __init css_bus_cleanup(void) 969 { 970 struct channel_subsystem *css; 971 int i; 972 973 for (i = 0; i <= __MAX_CSSID; i++) { 974 css = channel_subsystems[i]; 975 device_unregister(&css->pseudo_subchannel->dev); 976 css->pseudo_subchannel = NULL; 977 if (css_chsc_characteristics.secm) 978 device_remove_file(&css->device, &dev_attr_cm_enable); 979 device_unregister(&css->device); 980 } 981 bus_unregister(&css_bus_type); 982 crw_unregister_handler(CRW_RSC_CSS); 983 chsc_free_sei_area(); 984 idset_free(slow_subchannel_set); 985 isc_unregister(IO_SCH_ISC); 986 } 987 988 static int __init channel_subsystem_init(void) 989 { 990 int ret; 991 992 ret = css_bus_init(); 993 if (ret) 994 return ret; 995 996 ret = io_subchannel_init(); 997 if (ret) 998 css_bus_cleanup(); 999 1000 return ret; 1001 } 1002 subsys_initcall(channel_subsystem_init); 1003 1004 static int css_settle(struct device_driver *drv, void *unused) 1005 { 1006 struct css_driver *cssdrv = to_cssdriver(drv); 1007 1008 if (cssdrv->settle) 1009 cssdrv->settle(); 1010 return 0; 1011 } 1012 1013 /* 1014 * Wait for the initialization of devices to finish, to make sure we are 1015 * done with our setup if the search for the root device starts. 1016 */ 1017 static int __init channel_subsystem_init_sync(void) 1018 { 1019 /* Start initial subchannel evaluation. */ 1020 css_schedule_eval_all(); 1021 /* Wait for the evaluation of subchannels to finish. */ 1022 wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); 1023 /* Wait for the subchannel type specific initialization to finish */ 1024 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 1025 } 1026 subsys_initcall_sync(channel_subsystem_init_sync); 1027 1028 int sch_is_pseudo_sch(struct subchannel *sch) 1029 { 1030 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1031 } 1032 1033 static int css_bus_match(struct device *dev, struct device_driver *drv) 1034 { 1035 struct subchannel *sch = to_subchannel(dev); 1036 struct css_driver *driver = to_cssdriver(drv); 1037 struct css_device_id *id; 1038 1039 for (id = driver->subchannel_type; id->match_flags; id++) { 1040 if (sch->st == id->type) 1041 return 1; 1042 } 1043 1044 return 0; 1045 } 1046 1047 static int css_probe(struct device *dev) 1048 { 1049 struct subchannel *sch; 1050 int ret; 1051 1052 sch = to_subchannel(dev); 1053 sch->driver = to_cssdriver(dev->driver); 1054 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 1055 if (ret) 1056 sch->driver = NULL; 1057 return ret; 1058 } 1059 1060 static int css_remove(struct device *dev) 1061 { 1062 struct subchannel *sch; 1063 int ret; 1064 1065 sch = to_subchannel(dev); 1066 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 1067 sch->driver = NULL; 1068 return ret; 1069 } 1070 1071 static void css_shutdown(struct device *dev) 1072 { 1073 struct subchannel *sch; 1074 1075 sch = to_subchannel(dev); 1076 if (sch->driver && sch->driver->shutdown) 1077 sch->driver->shutdown(sch); 1078 } 1079 1080 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1081 { 1082 struct subchannel *sch = to_subchannel(dev); 1083 int ret; 1084 1085 ret = add_uevent_var(env, "ST=%01X", sch->st); 1086 if (ret) 1087 return ret; 1088 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1089 return ret; 1090 } 1091 1092 static int css_pm_prepare(struct device *dev) 1093 { 1094 struct subchannel *sch = to_subchannel(dev); 1095 struct css_driver *drv; 1096 1097 if (mutex_is_locked(&sch->reg_mutex)) 1098 return -EAGAIN; 1099 if (!sch->dev.driver) 1100 return 0; 1101 drv = to_cssdriver(sch->dev.driver); 1102 /* Notify drivers that they may not register children. */ 1103 return drv->prepare ? drv->prepare(sch) : 0; 1104 } 1105 1106 static void css_pm_complete(struct device *dev) 1107 { 1108 struct subchannel *sch = to_subchannel(dev); 1109 struct css_driver *drv; 1110 1111 if (!sch->dev.driver) 1112 return; 1113 drv = to_cssdriver(sch->dev.driver); 1114 if (drv->complete) 1115 drv->complete(sch); 1116 } 1117 1118 static int css_pm_freeze(struct device *dev) 1119 { 1120 struct subchannel *sch = to_subchannel(dev); 1121 struct css_driver *drv; 1122 1123 if (!sch->dev.driver) 1124 return 0; 1125 drv = to_cssdriver(sch->dev.driver); 1126 return drv->freeze ? drv->freeze(sch) : 0; 1127 } 1128 1129 static int css_pm_thaw(struct device *dev) 1130 { 1131 struct subchannel *sch = to_subchannel(dev); 1132 struct css_driver *drv; 1133 1134 if (!sch->dev.driver) 1135 return 0; 1136 drv = to_cssdriver(sch->dev.driver); 1137 return drv->thaw ? drv->thaw(sch) : 0; 1138 } 1139 1140 static int css_pm_restore(struct device *dev) 1141 { 1142 struct subchannel *sch = to_subchannel(dev); 1143 struct css_driver *drv; 1144 1145 if (!sch->dev.driver) 1146 return 0; 1147 drv = to_cssdriver(sch->dev.driver); 1148 return drv->restore ? drv->restore(sch) : 0; 1149 } 1150 1151 static const struct dev_pm_ops css_pm_ops = { 1152 .prepare = css_pm_prepare, 1153 .complete = css_pm_complete, 1154 .freeze = css_pm_freeze, 1155 .thaw = css_pm_thaw, 1156 .restore = css_pm_restore, 1157 }; 1158 1159 struct bus_type css_bus_type = { 1160 .name = "css", 1161 .match = css_bus_match, 1162 .probe = css_probe, 1163 .remove = css_remove, 1164 .shutdown = css_shutdown, 1165 .uevent = css_uevent, 1166 .pm = &css_pm_ops, 1167 }; 1168 1169 /** 1170 * css_driver_register - register a css driver 1171 * @cdrv: css driver to register 1172 * 1173 * This is mainly a wrapper around driver_register that sets name 1174 * and bus_type in the embedded struct device_driver correctly. 1175 */ 1176 int css_driver_register(struct css_driver *cdrv) 1177 { 1178 cdrv->drv.name = cdrv->name; 1179 cdrv->drv.bus = &css_bus_type; 1180 cdrv->drv.owner = cdrv->owner; 1181 return driver_register(&cdrv->drv); 1182 } 1183 EXPORT_SYMBOL_GPL(css_driver_register); 1184 1185 /** 1186 * css_driver_unregister - unregister a css driver 1187 * @cdrv: css driver to unregister 1188 * 1189 * This is a wrapper around driver_unregister. 1190 */ 1191 void css_driver_unregister(struct css_driver *cdrv) 1192 { 1193 driver_unregister(&cdrv->drv); 1194 } 1195 EXPORT_SYMBOL_GPL(css_driver_unregister); 1196 1197 MODULE_LICENSE("GPL"); 1198 EXPORT_SYMBOL(css_bus_type); 1199