1 /* 2 * driver for channel subsystem 3 * 4 * Copyright IBM Corp. 2002, 2010 5 * 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com) 8 */ 9 10 #define KMSG_COMPONENT "cio" 11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/errno.h> 18 #include <linux/list.h> 19 #include <linux/reboot.h> 20 #include <linux/suspend.h> 21 #include <linux/proc_fs.h> 22 #include <asm/isc.h> 23 #include <asm/crw.h> 24 25 #include "css.h" 26 #include "cio.h" 27 #include "cio_debug.h" 28 #include "ioasm.h" 29 #include "chsc.h" 30 #include "device.h" 31 #include "idset.h" 32 #include "chp.h" 33 34 int css_init_done = 0; 35 int max_ssid; 36 37 struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 38 39 int 40 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 41 { 42 struct subchannel_id schid; 43 int ret; 44 45 init_subchannel_id(&schid); 46 ret = -ENODEV; 47 do { 48 do { 49 ret = fn(schid, data); 50 if (ret) 51 break; 52 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 53 schid.sch_no = 0; 54 } while (schid.ssid++ < max_ssid); 55 return ret; 56 } 57 58 struct cb_data { 59 void *data; 60 struct idset *set; 61 int (*fn_known_sch)(struct subchannel *, void *); 62 int (*fn_unknown_sch)(struct subchannel_id, void *); 63 }; 64 65 static int call_fn_known_sch(struct device *dev, void *data) 66 { 67 struct subchannel *sch = to_subchannel(dev); 68 struct cb_data *cb = data; 69 int rc = 0; 70 71 idset_sch_del(cb->set, sch->schid); 72 if (cb->fn_known_sch) 73 rc = cb->fn_known_sch(sch, cb->data); 74 return rc; 75 } 76 77 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 78 { 79 struct cb_data *cb = data; 80 int rc = 0; 81 82 if (idset_sch_contains(cb->set, schid)) 83 rc = cb->fn_unknown_sch(schid, cb->data); 84 return rc; 85 } 86 87 static int call_fn_all_sch(struct subchannel_id schid, void *data) 88 { 89 struct cb_data *cb = data; 90 struct subchannel *sch; 91 int rc = 0; 92 93 sch = get_subchannel_by_schid(schid); 94 if (sch) { 95 if (cb->fn_known_sch) 96 rc = cb->fn_known_sch(sch, cb->data); 97 put_device(&sch->dev); 98 } else { 99 if (cb->fn_unknown_sch) 100 rc = cb->fn_unknown_sch(schid, cb->data); 101 } 102 103 return rc; 104 } 105 106 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 107 int (*fn_unknown)(struct subchannel_id, 108 void *), void *data) 109 { 110 struct cb_data cb; 111 int rc; 112 113 cb.data = data; 114 cb.fn_known_sch = fn_known; 115 cb.fn_unknown_sch = fn_unknown; 116 117 cb.set = idset_sch_new(); 118 if (!cb.set) 119 /* fall back to brute force scanning in case of oom */ 120 return for_each_subchannel(call_fn_all_sch, &cb); 121 122 idset_fill(cb.set); 123 124 /* Process registered subchannels. */ 125 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 126 if (rc) 127 goto out; 128 /* Process unregistered subchannels. */ 129 if (fn_unknown) 130 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 131 out: 132 idset_free(cb.set); 133 134 return rc; 135 } 136 137 static void css_sch_todo(struct work_struct *work); 138 139 static struct subchannel * 140 css_alloc_subchannel(struct subchannel_id schid) 141 { 142 struct subchannel *sch; 143 int ret; 144 145 sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); 146 if (sch == NULL) 147 return ERR_PTR(-ENOMEM); 148 ret = cio_validate_subchannel (sch, schid); 149 if (ret < 0) { 150 kfree(sch); 151 return ERR_PTR(ret); 152 } 153 INIT_WORK(&sch->todo_work, css_sch_todo); 154 return sch; 155 } 156 157 static void 158 css_subchannel_release(struct device *dev) 159 { 160 struct subchannel *sch; 161 162 sch = to_subchannel(dev); 163 if (!cio_is_console(sch->schid)) { 164 /* Reset intparm to zeroes. */ 165 sch->config.intparm = 0; 166 cio_commit_config(sch); 167 kfree(sch->lock); 168 kfree(sch); 169 } 170 } 171 172 static int css_sch_device_register(struct subchannel *sch) 173 { 174 int ret; 175 176 mutex_lock(&sch->reg_mutex); 177 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, 178 sch->schid.sch_no); 179 ret = device_register(&sch->dev); 180 mutex_unlock(&sch->reg_mutex); 181 return ret; 182 } 183 184 /** 185 * css_sch_device_unregister - unregister a subchannel 186 * @sch: subchannel to be unregistered 187 */ 188 void css_sch_device_unregister(struct subchannel *sch) 189 { 190 mutex_lock(&sch->reg_mutex); 191 if (device_is_registered(&sch->dev)) 192 device_unregister(&sch->dev); 193 mutex_unlock(&sch->reg_mutex); 194 } 195 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 196 197 static void css_sch_todo(struct work_struct *work) 198 { 199 struct subchannel *sch; 200 enum sch_todo todo; 201 202 sch = container_of(work, struct subchannel, todo_work); 203 /* Find out todo. */ 204 spin_lock_irq(sch->lock); 205 todo = sch->todo; 206 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 207 sch->schid.sch_no, todo); 208 sch->todo = SCH_TODO_NOTHING; 209 spin_unlock_irq(sch->lock); 210 /* Perform todo. */ 211 if (todo == SCH_TODO_UNREG) 212 css_sch_device_unregister(sch); 213 /* Release workqueue ref. */ 214 put_device(&sch->dev); 215 } 216 217 /** 218 * css_sched_sch_todo - schedule a subchannel operation 219 * @sch: subchannel 220 * @todo: todo 221 * 222 * Schedule the operation identified by @todo to be performed on the slow path 223 * workqueue. Do nothing if another operation with higher priority is already 224 * scheduled. Needs to be called with subchannel lock held. 225 */ 226 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 227 { 228 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 229 sch->schid.ssid, sch->schid.sch_no, todo); 230 if (sch->todo >= todo) 231 return; 232 /* Get workqueue ref. */ 233 if (!get_device(&sch->dev)) 234 return; 235 sch->todo = todo; 236 if (!queue_work(cio_work_q, &sch->todo_work)) { 237 /* Already queued, release workqueue ref. */ 238 put_device(&sch->dev); 239 } 240 } 241 242 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 243 { 244 int i; 245 int mask; 246 247 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 248 ssd->path_mask = pmcw->pim; 249 for (i = 0; i < 8; i++) { 250 mask = 0x80 >> i; 251 if (pmcw->pim & mask) { 252 chp_id_init(&ssd->chpid[i]); 253 ssd->chpid[i].id = pmcw->chpid[i]; 254 } 255 } 256 } 257 258 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 259 { 260 int i; 261 int mask; 262 263 for (i = 0; i < 8; i++) { 264 mask = 0x80 >> i; 265 if (ssd->path_mask & mask) 266 if (!chp_is_registered(ssd->chpid[i])) 267 chp_new(ssd->chpid[i]); 268 } 269 } 270 271 void css_update_ssd_info(struct subchannel *sch) 272 { 273 int ret; 274 275 if (cio_is_console(sch->schid)) { 276 /* Console is initialized too early for functions requiring 277 * memory allocation. */ 278 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 279 } else { 280 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 281 if (ret) 282 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 283 ssd_register_chpids(&sch->ssd_info); 284 } 285 } 286 287 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 288 char *buf) 289 { 290 struct subchannel *sch = to_subchannel(dev); 291 292 return sprintf(buf, "%01x\n", sch->st); 293 } 294 295 static DEVICE_ATTR(type, 0444, type_show, NULL); 296 297 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 298 char *buf) 299 { 300 struct subchannel *sch = to_subchannel(dev); 301 302 return sprintf(buf, "css:t%01X\n", sch->st); 303 } 304 305 static DEVICE_ATTR(modalias, 0444, modalias_show, NULL); 306 307 static struct attribute *subch_attrs[] = { 308 &dev_attr_type.attr, 309 &dev_attr_modalias.attr, 310 NULL, 311 }; 312 313 static struct attribute_group subch_attr_group = { 314 .attrs = subch_attrs, 315 }; 316 317 static const struct attribute_group *default_subch_attr_groups[] = { 318 &subch_attr_group, 319 NULL, 320 }; 321 322 static int css_register_subchannel(struct subchannel *sch) 323 { 324 int ret; 325 326 /* Initialize the subchannel structure */ 327 sch->dev.parent = &channel_subsystems[0]->device; 328 sch->dev.bus = &css_bus_type; 329 sch->dev.release = &css_subchannel_release; 330 sch->dev.groups = default_subch_attr_groups; 331 /* 332 * We don't want to generate uevents for I/O subchannels that don't 333 * have a working ccw device behind them since they will be 334 * unregistered before they can be used anyway, so we delay the add 335 * uevent until after device recognition was successful. 336 * Note that we suppress the uevent for all subchannel types; 337 * the subchannel driver can decide itself when it wants to inform 338 * userspace of its existence. 339 */ 340 dev_set_uevent_suppress(&sch->dev, 1); 341 css_update_ssd_info(sch); 342 /* make it known to the system */ 343 ret = css_sch_device_register(sch); 344 if (ret) { 345 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 346 sch->schid.ssid, sch->schid.sch_no, ret); 347 return ret; 348 } 349 if (!sch->driver) { 350 /* 351 * No driver matched. Generate the uevent now so that 352 * a fitting driver module may be loaded based on the 353 * modalias. 354 */ 355 dev_set_uevent_suppress(&sch->dev, 0); 356 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 357 } 358 return ret; 359 } 360 361 int css_probe_device(struct subchannel_id schid) 362 { 363 int ret; 364 struct subchannel *sch; 365 366 if (cio_is_console(schid)) 367 sch = cio_get_console_subchannel(); 368 else { 369 sch = css_alloc_subchannel(schid); 370 if (IS_ERR(sch)) 371 return PTR_ERR(sch); 372 } 373 ret = css_register_subchannel(sch); 374 if (ret) { 375 if (!cio_is_console(schid)) 376 put_device(&sch->dev); 377 } 378 return ret; 379 } 380 381 static int 382 check_subchannel(struct device * dev, void * data) 383 { 384 struct subchannel *sch; 385 struct subchannel_id *schid = data; 386 387 sch = to_subchannel(dev); 388 return schid_equal(&sch->schid, schid); 389 } 390 391 struct subchannel * 392 get_subchannel_by_schid(struct subchannel_id schid) 393 { 394 struct device *dev; 395 396 dev = bus_find_device(&css_bus_type, NULL, 397 &schid, check_subchannel); 398 399 return dev ? to_subchannel(dev) : NULL; 400 } 401 402 /** 403 * css_sch_is_valid() - check if a subchannel is valid 404 * @schib: subchannel information block for the subchannel 405 */ 406 int css_sch_is_valid(struct schib *schib) 407 { 408 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 409 return 0; 410 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 411 return 0; 412 return 1; 413 } 414 EXPORT_SYMBOL_GPL(css_sch_is_valid); 415 416 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 417 { 418 struct schib schib; 419 420 if (!slow) { 421 /* Will be done on the slow path. */ 422 return -EAGAIN; 423 } 424 if (stsch_err(schid, &schib) || !css_sch_is_valid(&schib)) { 425 /* Unusable - ignore. */ 426 return 0; 427 } 428 CIO_MSG_EVENT(4, "event: sch 0.%x.%04x, new\n", schid.ssid, 429 schid.sch_no); 430 431 return css_probe_device(schid); 432 } 433 434 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 435 { 436 int ret = 0; 437 438 if (sch->driver) { 439 if (sch->driver->sch_event) 440 ret = sch->driver->sch_event(sch, slow); 441 else 442 dev_dbg(&sch->dev, 443 "Got subchannel machine check but " 444 "no sch_event handler provided.\n"); 445 } 446 if (ret != 0 && ret != -EAGAIN) { 447 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", 448 sch->schid.ssid, sch->schid.sch_no, ret); 449 } 450 return ret; 451 } 452 453 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 454 { 455 struct subchannel *sch; 456 int ret; 457 458 sch = get_subchannel_by_schid(schid); 459 if (sch) { 460 ret = css_evaluate_known_subchannel(sch, slow); 461 put_device(&sch->dev); 462 } else 463 ret = css_evaluate_new_subchannel(schid, slow); 464 if (ret == -EAGAIN) 465 css_schedule_eval(schid); 466 } 467 468 static struct idset *slow_subchannel_set; 469 static spinlock_t slow_subchannel_lock; 470 static wait_queue_head_t css_eval_wq; 471 static atomic_t css_eval_scheduled; 472 473 static int __init slow_subchannel_init(void) 474 { 475 spin_lock_init(&slow_subchannel_lock); 476 atomic_set(&css_eval_scheduled, 0); 477 init_waitqueue_head(&css_eval_wq); 478 slow_subchannel_set = idset_sch_new(); 479 if (!slow_subchannel_set) { 480 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 481 return -ENOMEM; 482 } 483 return 0; 484 } 485 486 static int slow_eval_known_fn(struct subchannel *sch, void *data) 487 { 488 int eval; 489 int rc; 490 491 spin_lock_irq(&slow_subchannel_lock); 492 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 493 idset_sch_del(slow_subchannel_set, sch->schid); 494 spin_unlock_irq(&slow_subchannel_lock); 495 if (eval) { 496 rc = css_evaluate_known_subchannel(sch, 1); 497 if (rc == -EAGAIN) 498 css_schedule_eval(sch->schid); 499 } 500 return 0; 501 } 502 503 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 504 { 505 int eval; 506 int rc = 0; 507 508 spin_lock_irq(&slow_subchannel_lock); 509 eval = idset_sch_contains(slow_subchannel_set, schid); 510 idset_sch_del(slow_subchannel_set, schid); 511 spin_unlock_irq(&slow_subchannel_lock); 512 if (eval) { 513 rc = css_evaluate_new_subchannel(schid, 1); 514 switch (rc) { 515 case -EAGAIN: 516 css_schedule_eval(schid); 517 rc = 0; 518 break; 519 case -ENXIO: 520 case -ENOMEM: 521 case -EIO: 522 /* These should abort looping */ 523 break; 524 default: 525 rc = 0; 526 } 527 } 528 return rc; 529 } 530 531 static void css_slow_path_func(struct work_struct *unused) 532 { 533 unsigned long flags; 534 535 CIO_TRACE_EVENT(4, "slowpath"); 536 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 537 NULL); 538 spin_lock_irqsave(&slow_subchannel_lock, flags); 539 if (idset_is_empty(slow_subchannel_set)) { 540 atomic_set(&css_eval_scheduled, 0); 541 wake_up(&css_eval_wq); 542 } 543 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 544 } 545 546 static DECLARE_WORK(slow_path_work, css_slow_path_func); 547 struct workqueue_struct *cio_work_q; 548 549 void css_schedule_eval(struct subchannel_id schid) 550 { 551 unsigned long flags; 552 553 spin_lock_irqsave(&slow_subchannel_lock, flags); 554 idset_sch_add(slow_subchannel_set, schid); 555 atomic_set(&css_eval_scheduled, 1); 556 queue_work(cio_work_q, &slow_path_work); 557 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 558 } 559 560 void css_schedule_eval_all(void) 561 { 562 unsigned long flags; 563 564 spin_lock_irqsave(&slow_subchannel_lock, flags); 565 idset_fill(slow_subchannel_set); 566 atomic_set(&css_eval_scheduled, 1); 567 queue_work(cio_work_q, &slow_path_work); 568 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 569 } 570 571 static int __unset_registered(struct device *dev, void *data) 572 { 573 struct idset *set = data; 574 struct subchannel *sch = to_subchannel(dev); 575 576 idset_sch_del(set, sch->schid); 577 return 0; 578 } 579 580 static void css_schedule_eval_all_unreg(void) 581 { 582 unsigned long flags; 583 struct idset *unreg_set; 584 585 /* Find unregistered subchannels. */ 586 unreg_set = idset_sch_new(); 587 if (!unreg_set) { 588 /* Fallback. */ 589 css_schedule_eval_all(); 590 return; 591 } 592 idset_fill(unreg_set); 593 bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); 594 /* Apply to slow_subchannel_set. */ 595 spin_lock_irqsave(&slow_subchannel_lock, flags); 596 idset_add_set(slow_subchannel_set, unreg_set); 597 atomic_set(&css_eval_scheduled, 1); 598 queue_work(cio_work_q, &slow_path_work); 599 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 600 idset_free(unreg_set); 601 } 602 603 void css_wait_for_slow_path(void) 604 { 605 flush_workqueue(cio_work_q); 606 } 607 608 /* Schedule reprobing of all unregistered subchannels. */ 609 void css_schedule_reprobe(void) 610 { 611 css_schedule_eval_all_unreg(); 612 } 613 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 614 615 /* 616 * Called from the machine check handler for subchannel report words. 617 */ 618 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 619 { 620 struct subchannel_id mchk_schid; 621 struct subchannel *sch; 622 623 if (overflow) { 624 css_schedule_eval_all(); 625 return; 626 } 627 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 628 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 629 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 630 crw0->erc, crw0->rsid); 631 if (crw1) 632 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 633 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 634 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 635 crw1->anc, crw1->erc, crw1->rsid); 636 init_subchannel_id(&mchk_schid); 637 mchk_schid.sch_no = crw0->rsid; 638 if (crw1) 639 mchk_schid.ssid = (crw1->rsid >> 4) & 3; 640 641 if (crw0->erc == CRW_ERC_PMOD) { 642 sch = get_subchannel_by_schid(mchk_schid); 643 if (sch) { 644 css_update_ssd_info(sch); 645 put_device(&sch->dev); 646 } 647 } 648 /* 649 * Since we are always presented with IPI in the CRW, we have to 650 * use stsch() to find out if the subchannel in question has come 651 * or gone. 652 */ 653 css_evaluate_subchannel(mchk_schid, 0); 654 } 655 656 static void __init 657 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 658 { 659 struct cpuid cpu_id; 660 661 if (css_general_characteristics.mcss) { 662 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 663 css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; 664 } else { 665 #ifdef CONFIG_SMP 666 css->global_pgid.pgid_high.cpu_addr = stap(); 667 #else 668 css->global_pgid.pgid_high.cpu_addr = 0; 669 #endif 670 } 671 get_cpu_id(&cpu_id); 672 css->global_pgid.cpu_id = cpu_id.ident; 673 css->global_pgid.cpu_model = cpu_id.machine; 674 css->global_pgid.tod_high = tod_high; 675 676 } 677 678 static void 679 channel_subsystem_release(struct device *dev) 680 { 681 struct channel_subsystem *css; 682 683 css = to_css(dev); 684 mutex_destroy(&css->mutex); 685 if (css->pseudo_subchannel) { 686 /* Implies that it has been generated but never registered. */ 687 css_subchannel_release(&css->pseudo_subchannel->dev); 688 css->pseudo_subchannel = NULL; 689 } 690 kfree(css); 691 } 692 693 static ssize_t 694 css_cm_enable_show(struct device *dev, struct device_attribute *attr, 695 char *buf) 696 { 697 struct channel_subsystem *css = to_css(dev); 698 int ret; 699 700 if (!css) 701 return 0; 702 mutex_lock(&css->mutex); 703 ret = sprintf(buf, "%x\n", css->cm_enabled); 704 mutex_unlock(&css->mutex); 705 return ret; 706 } 707 708 static ssize_t 709 css_cm_enable_store(struct device *dev, struct device_attribute *attr, 710 const char *buf, size_t count) 711 { 712 struct channel_subsystem *css = to_css(dev); 713 int ret; 714 unsigned long val; 715 716 ret = strict_strtoul(buf, 16, &val); 717 if (ret) 718 return ret; 719 mutex_lock(&css->mutex); 720 switch (val) { 721 case 0: 722 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 723 break; 724 case 1: 725 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 726 break; 727 default: 728 ret = -EINVAL; 729 } 730 mutex_unlock(&css->mutex); 731 return ret < 0 ? ret : count; 732 } 733 734 static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 735 736 static int __init setup_css(int nr) 737 { 738 u32 tod_high; 739 int ret; 740 struct channel_subsystem *css; 741 742 css = channel_subsystems[nr]; 743 memset(css, 0, sizeof(struct channel_subsystem)); 744 css->pseudo_subchannel = 745 kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL); 746 if (!css->pseudo_subchannel) 747 return -ENOMEM; 748 css->pseudo_subchannel->dev.parent = &css->device; 749 css->pseudo_subchannel->dev.release = css_subchannel_release; 750 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 751 mutex_init(&css->pseudo_subchannel->reg_mutex); 752 ret = cio_create_sch_lock(css->pseudo_subchannel); 753 if (ret) { 754 kfree(css->pseudo_subchannel); 755 return ret; 756 } 757 mutex_init(&css->mutex); 758 css->valid = 1; 759 css->cssid = nr; 760 dev_set_name(&css->device, "css%x", nr); 761 css->device.release = channel_subsystem_release; 762 tod_high = (u32) (get_clock() >> 32); 763 css_generate_pgid(css, tod_high); 764 return 0; 765 } 766 767 static int css_reboot_event(struct notifier_block *this, 768 unsigned long event, 769 void *ptr) 770 { 771 int ret, i; 772 773 ret = NOTIFY_DONE; 774 for (i = 0; i <= __MAX_CSSID; i++) { 775 struct channel_subsystem *css; 776 777 css = channel_subsystems[i]; 778 mutex_lock(&css->mutex); 779 if (css->cm_enabled) 780 if (chsc_secm(css, 0)) 781 ret = NOTIFY_BAD; 782 mutex_unlock(&css->mutex); 783 } 784 785 return ret; 786 } 787 788 static struct notifier_block css_reboot_notifier = { 789 .notifier_call = css_reboot_event, 790 }; 791 792 /* 793 * Since the css devices are neither on a bus nor have a class 794 * nor have a special device type, we cannot stop/restart channel 795 * path measurements via the normal suspend/resume callbacks, but have 796 * to use notifiers. 797 */ 798 static int css_power_event(struct notifier_block *this, unsigned long event, 799 void *ptr) 800 { 801 int ret, i; 802 803 switch (event) { 804 case PM_HIBERNATION_PREPARE: 805 case PM_SUSPEND_PREPARE: 806 ret = NOTIFY_DONE; 807 for (i = 0; i <= __MAX_CSSID; i++) { 808 struct channel_subsystem *css; 809 810 css = channel_subsystems[i]; 811 mutex_lock(&css->mutex); 812 if (!css->cm_enabled) { 813 mutex_unlock(&css->mutex); 814 continue; 815 } 816 if (__chsc_do_secm(css, 0)) 817 ret = NOTIFY_BAD; 818 mutex_unlock(&css->mutex); 819 } 820 break; 821 case PM_POST_HIBERNATION: 822 case PM_POST_SUSPEND: 823 ret = NOTIFY_DONE; 824 for (i = 0; i <= __MAX_CSSID; i++) { 825 struct channel_subsystem *css; 826 827 css = channel_subsystems[i]; 828 mutex_lock(&css->mutex); 829 if (!css->cm_enabled) { 830 mutex_unlock(&css->mutex); 831 continue; 832 } 833 if (__chsc_do_secm(css, 1)) 834 ret = NOTIFY_BAD; 835 mutex_unlock(&css->mutex); 836 } 837 /* search for subchannels, which appeared during hibernation */ 838 css_schedule_reprobe(); 839 break; 840 default: 841 ret = NOTIFY_DONE; 842 } 843 return ret; 844 845 } 846 static struct notifier_block css_power_notifier = { 847 .notifier_call = css_power_event, 848 }; 849 850 /* 851 * Now that the driver core is running, we can setup our channel subsystem. 852 * The struct subchannel's are created during probing (except for the 853 * static console subchannel). 854 */ 855 static int __init css_bus_init(void) 856 { 857 int ret, i; 858 859 ret = chsc_init(); 860 if (ret) 861 return ret; 862 863 chsc_determine_css_characteristics(); 864 /* Try to enable MSS. */ 865 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 866 if (ret) 867 max_ssid = 0; 868 else /* Success. */ 869 max_ssid = __MAX_SSID; 870 871 ret = slow_subchannel_init(); 872 if (ret) 873 goto out; 874 875 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 876 if (ret) 877 goto out; 878 879 if ((ret = bus_register(&css_bus_type))) 880 goto out; 881 882 /* Setup css structure. */ 883 for (i = 0; i <= __MAX_CSSID; i++) { 884 struct channel_subsystem *css; 885 886 css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); 887 if (!css) { 888 ret = -ENOMEM; 889 goto out_unregister; 890 } 891 channel_subsystems[i] = css; 892 ret = setup_css(i); 893 if (ret) { 894 kfree(channel_subsystems[i]); 895 goto out_unregister; 896 } 897 ret = device_register(&css->device); 898 if (ret) { 899 put_device(&css->device); 900 goto out_unregister; 901 } 902 if (css_chsc_characteristics.secm) { 903 ret = device_create_file(&css->device, 904 &dev_attr_cm_enable); 905 if (ret) 906 goto out_device; 907 } 908 ret = device_register(&css->pseudo_subchannel->dev); 909 if (ret) { 910 put_device(&css->pseudo_subchannel->dev); 911 goto out_file; 912 } 913 } 914 ret = register_reboot_notifier(&css_reboot_notifier); 915 if (ret) 916 goto out_unregister; 917 ret = register_pm_notifier(&css_power_notifier); 918 if (ret) { 919 unregister_reboot_notifier(&css_reboot_notifier); 920 goto out_unregister; 921 } 922 css_init_done = 1; 923 924 /* Enable default isc for I/O subchannels. */ 925 isc_register(IO_SCH_ISC); 926 927 return 0; 928 out_file: 929 if (css_chsc_characteristics.secm) 930 device_remove_file(&channel_subsystems[i]->device, 931 &dev_attr_cm_enable); 932 out_device: 933 device_unregister(&channel_subsystems[i]->device); 934 out_unregister: 935 while (i > 0) { 936 struct channel_subsystem *css; 937 938 i--; 939 css = channel_subsystems[i]; 940 device_unregister(&css->pseudo_subchannel->dev); 941 css->pseudo_subchannel = NULL; 942 if (css_chsc_characteristics.secm) 943 device_remove_file(&css->device, 944 &dev_attr_cm_enable); 945 device_unregister(&css->device); 946 } 947 bus_unregister(&css_bus_type); 948 out: 949 crw_unregister_handler(CRW_RSC_SCH); 950 idset_free(slow_subchannel_set); 951 chsc_init_cleanup(); 952 pr_alert("The CSS device driver initialization failed with " 953 "errno=%d\n", ret); 954 return ret; 955 } 956 957 static void __init css_bus_cleanup(void) 958 { 959 struct channel_subsystem *css; 960 int i; 961 962 for (i = 0; i <= __MAX_CSSID; i++) { 963 css = channel_subsystems[i]; 964 device_unregister(&css->pseudo_subchannel->dev); 965 css->pseudo_subchannel = NULL; 966 if (css_chsc_characteristics.secm) 967 device_remove_file(&css->device, &dev_attr_cm_enable); 968 device_unregister(&css->device); 969 } 970 bus_unregister(&css_bus_type); 971 crw_unregister_handler(CRW_RSC_SCH); 972 idset_free(slow_subchannel_set); 973 chsc_init_cleanup(); 974 isc_unregister(IO_SCH_ISC); 975 } 976 977 static int __init channel_subsystem_init(void) 978 { 979 int ret; 980 981 ret = css_bus_init(); 982 if (ret) 983 return ret; 984 cio_work_q = create_singlethread_workqueue("cio"); 985 if (!cio_work_q) { 986 ret = -ENOMEM; 987 goto out_bus; 988 } 989 ret = io_subchannel_init(); 990 if (ret) 991 goto out_wq; 992 993 return ret; 994 out_wq: 995 destroy_workqueue(cio_work_q); 996 out_bus: 997 css_bus_cleanup(); 998 return ret; 999 } 1000 subsys_initcall(channel_subsystem_init); 1001 1002 static int css_settle(struct device_driver *drv, void *unused) 1003 { 1004 struct css_driver *cssdrv = to_cssdriver(drv); 1005 1006 if (cssdrv->settle) 1007 return cssdrv->settle(); 1008 return 0; 1009 } 1010 1011 int css_complete_work(void) 1012 { 1013 int ret; 1014 1015 /* Wait for the evaluation of subchannels to finish. */ 1016 ret = wait_event_interruptible(css_eval_wq, 1017 atomic_read(&css_eval_scheduled) == 0); 1018 if (ret) 1019 return -EINTR; 1020 flush_workqueue(cio_work_q); 1021 /* Wait for the subchannel type specific initialization to finish */ 1022 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 1023 } 1024 1025 1026 /* 1027 * Wait for the initialization of devices to finish, to make sure we are 1028 * done with our setup if the search for the root device starts. 1029 */ 1030 static int __init channel_subsystem_init_sync(void) 1031 { 1032 /* Start initial subchannel evaluation. */ 1033 css_schedule_eval_all(); 1034 css_complete_work(); 1035 return 0; 1036 } 1037 subsys_initcall_sync(channel_subsystem_init_sync); 1038 1039 void channel_subsystem_reinit(void) 1040 { 1041 struct channel_path *chp; 1042 struct chp_id chpid; 1043 1044 chsc_enable_facility(CHSC_SDA_OC_MSS); 1045 chp_id_for_each(&chpid) { 1046 chp = chpid_to_chp(chpid); 1047 if (!chp) 1048 continue; 1049 chsc_determine_base_channel_path_desc(chpid, &chp->desc); 1050 } 1051 } 1052 1053 #ifdef CONFIG_PROC_FS 1054 static ssize_t cio_settle_write(struct file *file, const char __user *buf, 1055 size_t count, loff_t *ppos) 1056 { 1057 int ret; 1058 1059 /* Handle pending CRW's. */ 1060 crw_wait_for_channel_report(); 1061 ret = css_complete_work(); 1062 1063 return ret ? ret : count; 1064 } 1065 1066 static const struct file_operations cio_settle_proc_fops = { 1067 .open = nonseekable_open, 1068 .write = cio_settle_write, 1069 .llseek = no_llseek, 1070 }; 1071 1072 static int __init cio_settle_init(void) 1073 { 1074 struct proc_dir_entry *entry; 1075 1076 entry = proc_create("cio_settle", S_IWUSR, NULL, 1077 &cio_settle_proc_fops); 1078 if (!entry) 1079 return -ENOMEM; 1080 return 0; 1081 } 1082 device_initcall(cio_settle_init); 1083 #endif /*CONFIG_PROC_FS*/ 1084 1085 int sch_is_pseudo_sch(struct subchannel *sch) 1086 { 1087 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1088 } 1089 1090 static int css_bus_match(struct device *dev, struct device_driver *drv) 1091 { 1092 struct subchannel *sch = to_subchannel(dev); 1093 struct css_driver *driver = to_cssdriver(drv); 1094 struct css_device_id *id; 1095 1096 for (id = driver->subchannel_type; id->match_flags; id++) { 1097 if (sch->st == id->type) 1098 return 1; 1099 } 1100 1101 return 0; 1102 } 1103 1104 static int css_probe(struct device *dev) 1105 { 1106 struct subchannel *sch; 1107 int ret; 1108 1109 sch = to_subchannel(dev); 1110 sch->driver = to_cssdriver(dev->driver); 1111 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 1112 if (ret) 1113 sch->driver = NULL; 1114 return ret; 1115 } 1116 1117 static int css_remove(struct device *dev) 1118 { 1119 struct subchannel *sch; 1120 int ret; 1121 1122 sch = to_subchannel(dev); 1123 ret = sch->driver->remove ? sch->driver->remove(sch) : 0; 1124 sch->driver = NULL; 1125 return ret; 1126 } 1127 1128 static void css_shutdown(struct device *dev) 1129 { 1130 struct subchannel *sch; 1131 1132 sch = to_subchannel(dev); 1133 if (sch->driver && sch->driver->shutdown) 1134 sch->driver->shutdown(sch); 1135 } 1136 1137 static int css_uevent(struct device *dev, struct kobj_uevent_env *env) 1138 { 1139 struct subchannel *sch = to_subchannel(dev); 1140 int ret; 1141 1142 ret = add_uevent_var(env, "ST=%01X", sch->st); 1143 if (ret) 1144 return ret; 1145 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1146 return ret; 1147 } 1148 1149 static int css_pm_prepare(struct device *dev) 1150 { 1151 struct subchannel *sch = to_subchannel(dev); 1152 struct css_driver *drv; 1153 1154 if (mutex_is_locked(&sch->reg_mutex)) 1155 return -EAGAIN; 1156 if (!sch->dev.driver) 1157 return 0; 1158 drv = to_cssdriver(sch->dev.driver); 1159 /* Notify drivers that they may not register children. */ 1160 return drv->prepare ? drv->prepare(sch) : 0; 1161 } 1162 1163 static void css_pm_complete(struct device *dev) 1164 { 1165 struct subchannel *sch = to_subchannel(dev); 1166 struct css_driver *drv; 1167 1168 if (!sch->dev.driver) 1169 return; 1170 drv = to_cssdriver(sch->dev.driver); 1171 if (drv->complete) 1172 drv->complete(sch); 1173 } 1174 1175 static int css_pm_freeze(struct device *dev) 1176 { 1177 struct subchannel *sch = to_subchannel(dev); 1178 struct css_driver *drv; 1179 1180 if (!sch->dev.driver) 1181 return 0; 1182 drv = to_cssdriver(sch->dev.driver); 1183 return drv->freeze ? drv->freeze(sch) : 0; 1184 } 1185 1186 static int css_pm_thaw(struct device *dev) 1187 { 1188 struct subchannel *sch = to_subchannel(dev); 1189 struct css_driver *drv; 1190 1191 if (!sch->dev.driver) 1192 return 0; 1193 drv = to_cssdriver(sch->dev.driver); 1194 return drv->thaw ? drv->thaw(sch) : 0; 1195 } 1196 1197 static int css_pm_restore(struct device *dev) 1198 { 1199 struct subchannel *sch = to_subchannel(dev); 1200 struct css_driver *drv; 1201 1202 css_update_ssd_info(sch); 1203 if (!sch->dev.driver) 1204 return 0; 1205 drv = to_cssdriver(sch->dev.driver); 1206 return drv->restore ? drv->restore(sch) : 0; 1207 } 1208 1209 static const struct dev_pm_ops css_pm_ops = { 1210 .prepare = css_pm_prepare, 1211 .complete = css_pm_complete, 1212 .freeze = css_pm_freeze, 1213 .thaw = css_pm_thaw, 1214 .restore = css_pm_restore, 1215 }; 1216 1217 struct bus_type css_bus_type = { 1218 .name = "css", 1219 .match = css_bus_match, 1220 .probe = css_probe, 1221 .remove = css_remove, 1222 .shutdown = css_shutdown, 1223 .uevent = css_uevent, 1224 .pm = &css_pm_ops, 1225 }; 1226 1227 /** 1228 * css_driver_register - register a css driver 1229 * @cdrv: css driver to register 1230 * 1231 * This is mainly a wrapper around driver_register that sets name 1232 * and bus_type in the embedded struct device_driver correctly. 1233 */ 1234 int css_driver_register(struct css_driver *cdrv) 1235 { 1236 cdrv->drv.name = cdrv->name; 1237 cdrv->drv.bus = &css_bus_type; 1238 cdrv->drv.owner = cdrv->owner; 1239 return driver_register(&cdrv->drv); 1240 } 1241 EXPORT_SYMBOL_GPL(css_driver_register); 1242 1243 /** 1244 * css_driver_unregister - unregister a css driver 1245 * @cdrv: css driver to unregister 1246 * 1247 * This is a wrapper around driver_unregister. 1248 */ 1249 void css_driver_unregister(struct css_driver *cdrv) 1250 { 1251 driver_unregister(&cdrv->drv); 1252 } 1253 EXPORT_SYMBOL_GPL(css_driver_unregister); 1254 1255 MODULE_LICENSE("GPL"); 1256 EXPORT_SYMBOL(css_bus_type); 1257