1 /* 2 * drivers/s390/cio/css.c 3 * driver for channel subsystem 4 * 5 * Copyright IBM Corp. 2002,2008 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/reboot.h> 20 #include <asm/isc.h> 21 #include <asm/crw.h> 22 23 #include "css.h" 24 #include "cio.h" 25 #include "cio_debug.h" 26 #include "ioasm.h" 27 #include "chsc.h" 28 #include "device.h" 29 #include "idset.h" 30 #include "chp.h" 31 32 int css_init_done = 0; 33 static int need_reprobe = 0; 34 static int max_ssid = 0; 35 36 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 37 38 int 39 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 40 { 41 struct subchannel_id schid; 42 int ret; 43 44 init_subchannel_id(&schid); 45 ret = -ENODEV; 46 do { 47 do { 48 ret = fn(schid, data); 49 if (ret) 50 break; 51 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 52 schid.sch_no = 0; 53 } while (schid.ssid++ < max_ssid); 54 return ret; 55 } 56 57 struct cb_data { 58 void *data; 59 struct idset *set; 60 int (*fn_known_sch)(struct subchannel *, void *); 61 int (*fn_unknown_sch)(struct subchannel_id, void *); 62 }; 63 64 static int call_fn_known_sch(struct device *dev, void *data) 65 { 66 struct subchannel *sch = to_subchannel(dev); 67 struct cb_data *cb = data; 68 int rc = 0; 69 70 idset_sch_del(cb->set, sch->schid); 71 if (cb->fn_known_sch) 72 rc = cb->fn_known_sch(sch, cb->data); 73 return rc; 74 } 75 76 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 77 { 78 struct cb_data *cb = data; 79 int rc = 0; 80 81 if (idset_sch_contains(cb->set, schid)) 82 rc = cb->fn_unknown_sch(schid, cb->data); 83 return rc; 84 } 85 86 static int call_fn_all_sch(struct subchannel_id schid, void *data) 87 { 88 struct cb_data *cb = data; 89 struct subchannel *sch; 90 int rc = 0; 91 92 sch = get_subchannel_by_schid(schid); 93 if (sch) { 94 if (cb->fn_known_sch) 95 rc = cb->fn_known_sch(sch, cb->data); 96 put_device(&sch->dev); 97 } else { 98 if (cb->fn_unknown_sch) 99 rc = cb->fn_unknown_sch(schid, cb->data); 100 } 101 102 return rc; 103 } 104 105 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 106 int (*fn_unknown)(struct subchannel_id, 107 void *), void *data) 108 { 109 struct cb_data cb; 110 int rc; 111 112 cb.data = data; 113 cb.fn_known_sch = fn_known; 114 cb.fn_unknown_sch = fn_unknown; 115 116 cb.set = idset_sch_new(); 117 if (!cb.set) 118 /* fall back to brute force scanning in case of oom */ 119 return for_each_subchannel(call_fn_all_sch, &cb); 120 121 idset_fill(cb.set); 122 123 /* Process registered subchannels. */ 124 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 125 if (rc) 126 goto out; 127 /* Process unregistered subchannels. */ 128 if (fn_unknown) 129 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 130 out: 131 idset_free(cb.set); 132 133 return rc; 134 } 135 136 static struct subchannel * 137 css_alloc_subchannel(struct subchannel_id schid) 138 { 139 struct subchannel *sch; 140 int ret; 141 142 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 143 if (sch == NULL) 144 return ERR_PTR(-ENOMEM); 145 ret = cio_validate_subchannel (sch, schid); 146 if (ret < 0) { 147 kfree(sch); 148 return ERR_PTR(ret); 149 } 150 return sch; 151 } 152 153 static void 154 css_free_subchannel(struct subchannel *sch) 155 { 156 if (sch) { 157 /* Reset intparm to zeroes. */ 158 sch->config.intparm = 0; 159 cio_commit_config(sch); 160 kfree(sch->lock); 161 kfree(sch); 162 } 163 } 164 165 static void 166 css_subchannel_release(struct device *dev) 167 { 168 struct subchannel *sch; 169 170 sch = to_subchannel(dev); 171 if (!cio_is_console(sch->schid)) { 172 kfree(sch->lock); 173 kfree(sch); 174 } 175 } 176 177 static int css_sch_device_register(struct subchannel *sch) 178 { 179 int ret; 180 181 mutex_lock(&sch->reg_mutex); 182 ret = device_register(&sch->dev); 183 mutex_unlock(&sch->reg_mutex); 184 return ret; 185 } 186 187 /** 188 * css_sch_device_unregister - unregister a subchannel 189 * @sch: subchannel to be unregistered 190 */ 191 void css_sch_device_unregister(struct subchannel *sch) 192 { 193 mutex_lock(&sch->reg_mutex); 194 if (device_is_registered(&sch->dev)) 195 device_unregister(&sch->dev); 196 mutex_unlock(&sch->reg_mutex); 197 } 198 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 199 200 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 201 { 202 int i; 203 int mask; 204 205 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 206 ssd->path_mask = pmcw->pim; 207 for (i = 0; i < 8; i++) { 208 mask = 0x80 >> i; 209 if (pmcw->pim & mask) { 210 chp_id_init(&ssd->chpid[i]); 211 ssd->chpid[i].id = pmcw->chpid[i]; 212 } 213 } 214 } 215 216 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 217 { 218 int i; 219 int mask; 220 221 for (i = 0; i < 8; i++) { 222 mask = 0x80 >> i; 223 if (ssd->path_mask & mask) 224 if (!chp_is_registered(ssd->chpid[i])) 225 chp_new(ssd->chpid[i]); 226 } 227 } 228 229 void css_update_ssd_info(struct subchannel *sch) 230 { 231 int ret; 232 233 if (cio_is_console(sch->schid)) { 234 /* Console is initialized too early for functions requiring 235 * memory allocation. */ 236 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 237 } else { 238 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 239 if (ret) 240 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 241 ssd_register_chpids(&sch->ssd_info); 242 } 243 } 244 245 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 246 char *buf) 247 { 248 struct subchannel *sch = to_subchannel(dev); 249 250 return sprintf(buf, "%01x\n", sch->st); 251 } 252 253 static DEVICE_ATTR(type, 0444, type_show, NULL); 254 255 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 256 char *buf) 257 { 258 struct subchannel *sch = to_subchannel(dev); 259 260 return sprintf(buf, "css:t%01X\n", sch->st); 261 } 262 263 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 264 265 static struct attribute *subch_attrs[] = { 266 &dev_attr_type.attr, 267 &dev_attr_modalias.attr, 268 NULL, 269 }; 270 271 static struct attribute_group subch_attr_group = { 272 .attrs = subch_attrs, 273 }; 274 275 static struct attribute_group *default_subch_attr_groups[] = { 276 &subch_attr_group, 277 NULL, 278 }; 279 280 static int css_register_subchannel(struct subchannel *sch) 281 { 282 int ret; 283 284 /* Initialize the subchannel structure */ 285 sch->dev.parent = &channel_subsystems[0]->device; 286 sch->dev.bus = &css_bus_type; 287 sch->dev.release = &css_subchannel_release; 288 sch->dev.groups = default_subch_attr_groups; 289 /* 290 * We don't want to generate uevents for I/O subchannels that don't 291 * have a working ccw device behind them since they will be 292 * unregistered before they can be used anyway, so we delay the add 293 * uevent until after device recognition was successful. 294 * Note that we suppress the uevent for all subchannel types; 295 * the subchannel driver can decide itself when it wants to inform 296 * userspace of its existence. 297 */ 298 dev_set_uevent_suppress(&sch->dev, 1); 299 css_update_ssd_info(sch); 300 /* make it known to the system */ 301 ret = css_sch_device_register(sch); 302 if (ret) { 303 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 304 sch->schid.ssid, sch->schid.sch_no, ret); 305 return ret; 306 } 307 if (!sch->driver) { 308 /* 309 * No driver matched. Generate the uevent now so that 310 * a fitting driver module may be loaded based on the 311 * modalias. 312 */ 313 dev_set_uevent_suppress(&sch->dev, 0); 314 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 315 } 316 return ret; 317 } 318 319 int css_probe_device(struct subchannel_id schid) 320 { 321 int ret; 322 struct subchannel *sch; 323 324 sch = css_alloc_subchannel(schid); 325 if (IS_ERR(sch)) 326 return PTR_ERR(sch); 327 ret = css_register_subchannel(sch); 328 if (ret) 329 css_free_subchannel(sch); 330 return ret; 331 } 332 333 static int 334 check_subchannel(struct device * dev, void * data) 335 { 336 struct subchannel *sch; 337 struct subchannel_id *schid = data; 338 339 sch = to_subchannel(dev); 340 return schid_equal(&sch->schid, schid); 341 } 342 343 struct subchannel * 344 get_subchannel_by_schid(struct subchannel_id schid) 345 { 346 struct device *dev; 347 348 dev = bus_find_device(&css_bus_type, NULL, 349 &schid, check_subchannel); 350 351 return dev ? to_subchannel(dev) : NULL; 352 } 353 354 /** 355 * css_sch_is_valid() - check if a subchannel is valid 356 * @schib: subchannel information block for the subchannel 357 */ 358 int css_sch_is_valid(struct schib *schib) 359 { 360 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 361 return 0; 362 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 363 return 0; 364 return 1; 365 } 366 EXPORT_SYMBOL_GPL(css_sch_is_valid); 367 368 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 369 { 370 struct schib schib; 371 372 if (!slow) { 373 /* Will be done on the slow path. */ 374 return -EAGAIN; 375 } 376 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 377 /* Unusable - ignore. */ 378 return 0; 379 } 380 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, " 381 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER); 382 383 return css_probe_device(schid); 384 } 385 386 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 387 { 388 int ret = 0; 389 390 if (sch->driver) { 391 if (sch->driver->sch_event) 392 ret = sch->driver->sch_event(sch, slow); 393 else 394 dev_dbg(&sch->dev, 395 "Got subchannel machine check but " 396 "no sch_event handler provided.\n"); 397 } 398 return ret; 399 } 400 401 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 402 { 403 struct subchannel *sch; 404 int ret; 405 406 sch = get_subchannel_by_schid(schid); 407 if (sch) { 408 ret = css_evaluate_known_subchannel(sch, slow); 409 put_device(&sch->dev); 410 } else 411 ret = css_evaluate_new_subchannel(schid, slow); 412 if (ret == -EAGAIN) 413 css_schedule_eval(schid); 414 } 415 416 static struct idset *slow_subchannel_set; 417 static spinlock_t slow_subchannel_lock; 418 419 static int __init slow_subchannel_init(void) 420 { 421 spin_lock_init(&slow_subchannel_lock); 422 slow_subchannel_set = idset_sch_new(); 423 if (!slow_subchannel_set) { 424 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 425 return -ENOMEM; 426 } 427 return 0; 428 } 429 430 static int slow_eval_known_fn(struct subchannel *sch, void *data) 431 { 432 int eval; 433 int rc; 434 435 spin_lock_irq(&slow_subchannel_lock); 436 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 437 idset_sch_del(slow_subchannel_set, sch->schid); 438 spin_unlock_irq(&slow_subchannel_lock); 439 if (eval) { 440 rc = css_evaluate_known_subchannel(sch, 1); 441 if (rc == -EAGAIN) 442 css_schedule_eval(sch->schid); 443 } 444 return 0; 445 } 446 447 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 448 { 449 int eval; 450 int rc = 0; 451 452 spin_lock_irq(&slow_subchannel_lock); 453 eval = idset_sch_contains(slow_subchannel_set, schid); 454 idset_sch_del(slow_subchannel_set, schid); 455 spin_unlock_irq(&slow_subchannel_lock); 456 if (eval) { 457 rc = css_evaluate_new_subchannel(schid, 1); 458 switch (rc) { 459 case -EAGAIN: 460 css_schedule_eval(schid); 461 rc = 0; 462 break; 463 case -ENXIO: 464 case -ENOMEM: 465 case -EIO: 466 /* These should abort looping */ 467 break; 468 default: 469 rc = 0; 470 } 471 } 472 return rc; 473 } 474 475 static void css_slow_path_func(struct work_struct *unused) 476 { 477 CIO_TRACE_EVENT(4, "slowpath"); 478 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 479 NULL); 480 } 481 482 static DECLARE_WORK(slow_path_work, css_slow_path_func); 483 struct workqueue_struct *slow_path_wq; 484 485 void css_schedule_eval(struct subchannel_id schid) 486 { 487 unsigned long flags; 488 489 spin_lock_irqsave(&slow_subchannel_lock, flags); 490 idset_sch_add(slow_subchannel_set, schid); 491 queue_work(slow_path_wq, &slow_path_work); 492 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 493 } 494 495 void css_schedule_eval_all(void) 496 { 497 unsigned long flags; 498 499 spin_lock_irqsave(&slow_subchannel_lock, flags); 500 idset_fill(slow_subchannel_set); 501 queue_work(slow_path_wq, &slow_path_work); 502 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 503 } 504 505 void css_wait_for_slow_path(void) 506 { 507 flush_workqueue(slow_path_wq); 508 } 509 510 /* Reprobe subchannel if unregistered. */ 511 static int reprobe_subchannel(struct subchannel_id schid, void *data) 512 { 513 int ret; 514 515 CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", 516 schid.ssid, schid.sch_no); 517 if (need_reprobe) 518 return -EAGAIN; 519 520 ret = css_probe_device(schid); 521 switch (ret) { 522 case 0: 523 break; 524 case -ENXIO: 525 case -ENOMEM: 526 case -EIO: 527 /* These should abort looping */ 528 break; 529 default: 530 ret = 0; 531 } 532 533 return ret; 534 } 535 536 static void reprobe_after_idle(struct work_struct *unused) 537 { 538 /* Make sure initial subchannel scan is done. */ 539 wait_event(ccw_device_init_wq, 540 atomic_read(&ccw_device_init_count) == 0); 541 if (need_reprobe) 542 css_schedule_reprobe(); 543 } 544 545 static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle); 546 547 /* Work function used to reprobe all unregistered subchannels. */ 548 static void reprobe_all(struct work_struct *unused) 549 { 550 int ret; 551 552 CIO_MSG_EVENT(4, "reprobe start\n"); 553 554 /* Make sure initial subchannel scan is done. */ 555 if (atomic_read(&ccw_device_init_count) != 0) { 556 queue_work(ccw_device_work, &reprobe_idle_work); 557 return; 558 } 559 need_reprobe = 0; 560 ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); 561 562 CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, 563 need_reprobe); 564 } 565 566 static DECLARE_WORK(css_reprobe_work, reprobe_all); 567 568 /* Schedule reprobing of all unregistered subchannels. */ 569 void css_schedule_reprobe(void) 570 { 571 need_reprobe = 1; 572 queue_work(slow_path_wq, &css_reprobe_work); 573 } 574 575 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 576 577 /* 578 * Called from the machine check handler for subchannel report words. 579 */ 580 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 581 { 582 struct subchannel_id mchk_schid; 583 584 if (overflow) { 585 css_schedule_eval_all(); 586 return; 587 } 588 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 589 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 590 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 591 crw0->erc, crw0->rsid); 592 if (crw1) 593 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 594 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 595 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 596 crw1->anc, crw1->erc, crw1->rsid); 597 init_subchannel_id(&mchk_schid); 598 mchk_schid.sch_no = crw0->rsid; 599 if (crw1) 600 mchk_schid.ssid = (crw1->rsid >> 8) & 3; 601 602 /* 603 * Since we are always presented with IPI in the CRW, we have to 604 * use stsch() to find out if the subchannel in question has come 605 * or gone. 606 */ 607 css_evaluate_subchannel(mchk_schid, 0); 608 } 609 610 static int __init 611 __init_channel_subsystem(struct subchannel_id schid, void *data) 612 { 613 struct subchannel *sch; 614 int ret; 615 616 if (cio_is_console(schid)) 617 sch = cio_get_console_subchannel(); 618 else { 619 sch = css_alloc_subchannel(schid); 620 if (IS_ERR(sch)) 621 ret = PTR_ERR(sch); 622 else 623 ret = 0; 624 switch (ret) { 625 case 0: 626 break; 627 case -ENOMEM: 628 panic("Out of memory in init_channel_subsystem\n"); 629 /* -ENXIO: no more subchannels. */ 630 case -ENXIO: 631 return ret; 632 /* -EIO: this subchannel set not supported. */ 633 case -EIO: 634 return ret; 635 default: 636 return 0; 637 } 638 } 639 /* 640 * We register ALL valid subchannels in ioinfo, even those 641 * that have been present before init_channel_subsystem. 642 * These subchannels can't have been registered yet (kmalloc 643 * not working) so we do it now. This is true e.g. for the 644 * console subchannel. 645 */ 646 css_register_subchannel(sch); 647 return 0; 648 } 649 650 static void __init 651 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 652 { 653 if (css_general_characteristics.mcss) { 654 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 655 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 656 } else { 657 #ifdef CONFIG_SMP 658 css->global_pgid.pgid_high.cpu_addr = stap(); 659 #else 660 css->global_pgid.pgid_high.cpu_addr = 0; 661 #endif 662 } 663 css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; 664 css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; 665 css->global_pgid.tod_high = tod_high; 666 667 } 668 669 static void 670 channel_subsystem_release(struct device *dev) 671 { 672 struct channel_subsystem *css; 673 674 css = to_css(dev); 675 mutex_destroy(&css->mutex); 676 if (css->pseudo_subchannel) { 677 /* Implies that it has been generated but never registered. */ 678 css_subchannel_release(&css->pseudo_subchannel->dev); 679 css->pseudo_subchannel = NULL; 680 } 681 kfree(css); 682 } 683 684 static ssize_t 685 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 686 char *buf) 687 { 688 struct channel_subsystem *css = to_css(dev); 689 int ret; 690 691 if (!css) 692 return 0; 693 mutex_lock(&css->mutex); 694 ret = sprintf(buf, "%x\n", css->cm_enabled); 695 mutex_unlock(&css->mutex); 696 return ret; 697 } 698 699 static ssize_t 700 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 701 const char *buf, size_t count) 702 { 703 struct channel_subsystem *css = to_css(dev); 704 int ret; 705 unsigned long val; 706 707 ret = strict_strtoul(buf, 16, &val); 708 if (ret) 709 return ret; 710 mutex_lock(&css->mutex); 711 switch (val) { 712 case 0: 713 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 714 break; 715 case 1: 716 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 717 break; 718 default: 719 ret = -EINVAL; 720 } 721 mutex_unlock(&css->mutex); 722 return ret < 0 ? ret : count; 723 } 724 725 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 726 727 static int __init setup_css(int nr) 728 { 729 u32 tod_high; 730 int ret; 731 struct channel_subsystem *css; 732 733 css = channel_subsystems[nr]; 734 memset(css, 0, sizeof(struct channel_subsystem)); 735 css->pseudo_subchannel = 736 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 737 if (!css->pseudo_subchannel) 738 return -ENOMEM; 739 css->pseudo_subchannel->dev.parent = &css->device; 740 css->pseudo_subchannel->dev.release = css_subchannel_release; 741 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 742 ret = cio_create_sch_lock(css->pseudo_subchannel); 743 if (ret) { 744 kfree(css->pseudo_subchannel); 745 return ret; 746 } 747 mutex_init(&css->mutex); 748 css->valid = 1; 749 css->cssid = nr; 750 dev_set_name(&css->device, "css%x", nr); 751 css->device.release = channel_subsystem_release; 752 tod_high = (u32) (get_clock() >> 32); 753 css_generate_pgid(css, tod_high); 754 return 0; 755 } 756 757 static int css_reboot_event(struct notifier_block *this, 758 unsigned long event, 759 void *ptr) 760 { 761 int ret, i; 762 763 ret = NOTIFY_DONE; 764 for (i = 0; i <= __MAX_CSSID; i++) { 765 struct channel_subsystem *css; 766 767 css = channel_subsystems[i]; 768 mutex_lock(&css->mutex); 769 if (css->cm_enabled) 770 if (chsc_secm(css, 0)) 771 ret = NOTIFY_BAD; 772 mutex_unlock(&css->mutex); 773 } 774 775 return ret; 776 } 777 778 static struct notifier_block css_reboot_notifier = { 779 .notifier_call = css_reboot_event, 780 }; 781 782 /* 783 * Now that the driver core is running, we can setup our channel subsystem. 784 * The struct subchannel's are created during probing (except for the 785 * static console subchannel). 786 */ 787 static int __init 788 init_channel_subsystem (void) 789 { 790 int ret, i; 791 792 ret = chsc_determine_css_characteristics(); 793 if (ret == -ENOMEM) 794 goto out; /* No need to continue. */ 795 796 ret = chsc_alloc_sei_area(); 797 if (ret) 798 goto out; 799 800 ret = slow_subchannel_init(); 801 if (ret) 802 goto out; 803 804 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 805 if (ret) 806 goto out; 807 808 if ((ret = bus_register(&css_bus_type))) 809 goto out; 810 811 /* Try to enable MSS. */ 812 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 813 switch (ret) { 814 case 0: /* Success. */ 815 max_ssid = __MAX_SSID; 816 break; 817 case -ENOMEM: 818 goto out_bus; 819 default: 820 max_ssid = 0; 821 } 822 /* Setup css structure. */ 823 for (i = 0; i <= __MAX_CSSID; i++) { 824 struct channel_subsystem *css; 825 826 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 827 if (!css) { 828 ret = -ENOMEM; 829 goto out_unregister; 830 } 831 channel_subsystems[i] = css; 832 ret = setup_css(i); 833 if (ret) { 834 kfree(channel_subsystems[i]); 835 goto out_unregister; 836 } 837 ret = device_register(&css->device); 838 if (ret) { 839 put_device(&css->device); 840 goto out_unregister; 841 } 842 if (css_chsc_characteristics.secm) { 843 ret = device_create_file(&css->device, 844 &dev_attr_cm_enable); 845 if (ret) 846 goto out_device; 847 } 848 ret = device_register(&css->pseudo_subchannel->dev); 849 if (ret) 850 goto out_file; 851 } 852 ret = register_reboot_notifier(&css_reboot_notifier); 853 if (ret) 854 goto out_unregister; 855 css_init_done = 1; 856 857 /* Enable default isc for I/O subchannels. */ 858 isc_register(IO_SCH_ISC); 859 860 for_each_subchannel(__init_channel_subsystem, NULL); 861 return 0; 862 out_file: 863 if (css_chsc_characteristics.secm) 864 device_remove_file(&channel_subsystems[i]->device, 865 &dev_attr_cm_enable); 866 out_device: 867 device_unregister(&channel_subsystems[i]->device); 868 out_unregister: 869 while (i > 0) { 870 struct channel_subsystem *css; 871 872 i--; 873 css = channel_subsystems[i]; 874 device_unregister(&css->pseudo_subchannel->dev); 875 css->pseudo_subchannel = NULL; 876 if (css_chsc_characteristics.secm) 877 device_remove_file(&css->device, 878 &dev_attr_cm_enable); 879 device_unregister(&css->device); 880 } 881 out_bus: 882 bus_unregister(&css_bus_type); 883 out: 884 crw_unregister_handler(CRW_RSC_CSS); 885 chsc_free_sei_area(); 886 kfree(slow_subchannel_set); 887 pr_alert("The CSS device driver initialization failed with " 888 "errno=%d\n", ret); 889 return ret; 890 } 891 892 int sch_is_pseudo_sch(struct subchannel *sch) 893 { 894 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 895 } 896 897 static int css_bus_match(struct device *dev, struct device_driver *drv) 898 { 899 struct subchannel *sch = to_subchannel(dev); 900 struct css_driver *driver = to_cssdriver(drv); 901 struct css_device_id *id; 902 903 for (id = driver->subchannel_type; id->match_flags; id++) { 904 if (sch->st == id->type) 905 return 1; 906 } 907 908 return 0; 909 } 910 911 static int css_probe(struct device *dev) 912 { 913 struct subchannel *sch; 914 int ret; 915 916 sch = to_subchannel(dev); 917 sch->driver = to_cssdriver(dev->driver); 918 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 919 if (ret) 920 sch->driver = NULL; 921 return ret; 922 } 923 924 static int css_remove(struct device *dev) 925 { 926 struct subchannel *sch; 927 int ret; 928 929 sch = to_subchannel(dev); 930 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 931 sch->driver = NULL; 932 return ret; 933 } 934 935 static void css_shutdown(struct device *dev) 936 { 937 struct subchannel *sch; 938 939 sch = to_subchannel(dev); 940 if (sch->driver && sch->driver->shutdown) 941 sch->driver->shutdown(sch); 942 } 943 944 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 945 { 946 struct subchannel *sch = to_subchannel(dev); 947 int ret; 948 949 ret = add_uevent_var(env, "ST=%01X", sch->st); 950 if (ret) 951 return ret; 952 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 953 return ret; 954 } 955 956 struct bus_type css_bus_type = { 957 .name = "css", 958 .match = css_bus_match, 959 .probe = css_probe, 960 .remove = css_remove, 961 .shutdown = css_shutdown, 962 .uevent = css_uevent, 963 }; 964 965 /** 966 * css_driver_register - register a css driver 967 * @cdrv: css driver to register 968 * 969 * This is mainly a wrapper around driver_register that sets name 970 * and bus_type in the embedded struct device_driver correctly. 971 */ 972 int css_driver_register(struct css_driver *cdrv) 973 { 974 cdrv->drv.name = cdrv->name; 975 cdrv->drv.bus = &css_bus_type; 976 cdrv->drv.owner = cdrv->owner; 977 return driver_register(&cdrv->drv); 978 } 979 EXPORT_SYMBOL_GPL(css_driver_register); 980 981 /** 982 * css_driver_unregister - unregister a css driver 983 * @cdrv: css driver to unregister 984 * 985 * This is a wrapper around driver_unregister. 986 */ 987 void css_driver_unregister(struct css_driver *cdrv) 988 { 989 driver_unregister(&cdrv->drv); 990 } 991 EXPORT_SYMBOL_GPL(css_driver_unregister); 992 993 subsys_initcall(init_channel_subsystem); 994 995 MODULE_LICENSE("GPL"); 996 EXPORT_SYMBOL(css_bus_type); 997