1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * driver for channel subsystem 4 * 5 * Copyright IBM Corp. 2002, 2010 6 * 7 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 8 * Cornelia Huck (cornelia.huck@de.ibm.com) 9 */ 10 11 #define KMSG_COMPONENT "cio" 12 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 13 14 #include <linux/export.h> 15 #include <linux/init.h> 16 #include <linux/device.h> 17 #include <linux/slab.h> 18 #include <linux/errno.h> 19 #include <linux/list.h> 20 #include <linux/reboot.h> 21 #include <linux/proc_fs.h> 22 #include <linux/genalloc.h> 23 #include <linux/dma-mapping.h> 24 #include <asm/isc.h> 25 #include <asm/crw.h> 26 27 #include "css.h" 28 #include "cio.h" 29 #include "blacklist.h" 30 #include "cio_debug.h" 31 #include "ioasm.h" 32 #include "chsc.h" 33 #include "device.h" 34 #include "idset.h" 35 #include "chp.h" 36 37 int css_init_done = 0; 38 int max_ssid; 39 40 #define MAX_CSS_IDX 0 41 struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1]; 42 static struct bus_type css_bus_type; 43 44 int 45 for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 46 { 47 struct subchannel_id schid; 48 int ret; 49 50 init_subchannel_id(&schid); 51 do { 52 do { 53 ret = fn(schid, data); 54 if (ret) 55 break; 56 } while (schid.sch_no++ < __MAX_SUBCHANNEL); 57 schid.sch_no = 0; 58 } while (schid.ssid++ < max_ssid); 59 return ret; 60 } 61 62 struct cb_data { 63 void *data; 64 struct idset *set; 65 int (*fn_known_sch)(struct subchannel *, void *); 66 int (*fn_unknown_sch)(struct subchannel_id, void *); 67 }; 68 69 static int call_fn_known_sch(struct device *dev, void *data) 70 { 71 struct subchannel *sch = to_subchannel(dev); 72 struct cb_data *cb = data; 73 int rc = 0; 74 75 if (cb->set) 76 idset_sch_del(cb->set, sch->schid); 77 if (cb->fn_known_sch) 78 rc = cb->fn_known_sch(sch, cb->data); 79 return rc; 80 } 81 82 static int call_fn_unknown_sch(struct subchannel_id schid, void *data) 83 { 84 struct cb_data *cb = data; 85 int rc = 0; 86 87 if (idset_sch_contains(cb->set, schid)) 88 rc = cb->fn_unknown_sch(schid, cb->data); 89 return rc; 90 } 91 92 static int call_fn_all_sch(struct subchannel_id schid, void *data) 93 { 94 struct cb_data *cb = data; 95 struct subchannel *sch; 96 int rc = 0; 97 98 sch = get_subchannel_by_schid(schid); 99 if (sch) { 100 if (cb->fn_known_sch) 101 rc = cb->fn_known_sch(sch, cb->data); 102 put_device(&sch->dev); 103 } else { 104 if (cb->fn_unknown_sch) 105 rc = cb->fn_unknown_sch(schid, cb->data); 106 } 107 108 return rc; 109 } 110 111 int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), 112 int (*fn_unknown)(struct subchannel_id, 113 void *), void *data) 114 { 115 struct cb_data cb; 116 int rc; 117 118 cb.data = data; 119 cb.fn_known_sch = fn_known; 120 cb.fn_unknown_sch = fn_unknown; 121 122 if (fn_known && !fn_unknown) { 123 /* Skip idset allocation in case of known-only loop. */ 124 cb.set = NULL; 125 return bus_for_each_dev(&css_bus_type, NULL, &cb, 126 call_fn_known_sch); 127 } 128 129 cb.set = idset_sch_new(); 130 if (!cb.set) 131 /* fall back to brute force scanning in case of oom */ 132 return for_each_subchannel(call_fn_all_sch, &cb); 133 134 idset_fill(cb.set); 135 136 /* Process registered subchannels. */ 137 rc = bus_for_each_dev(&css_bus_type, NULL, &cb, call_fn_known_sch); 138 if (rc) 139 goto out; 140 /* Process unregistered subchannels. */ 141 if (fn_unknown) 142 rc = for_each_subchannel(call_fn_unknown_sch, &cb); 143 out: 144 idset_free(cb.set); 145 146 return rc; 147 } 148 149 static void css_sch_todo(struct work_struct *work); 150 151 static int css_sch_create_locks(struct subchannel *sch) 152 { 153 sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); 154 if (!sch->lock) 155 return -ENOMEM; 156 157 spin_lock_init(sch->lock); 158 mutex_init(&sch->reg_mutex); 159 160 return 0; 161 } 162 163 static void css_subchannel_release(struct device *dev) 164 { 165 struct subchannel *sch = to_subchannel(dev); 166 167 sch->config.intparm = 0; 168 cio_commit_config(sch); 169 kfree(sch->driver_override); 170 kfree(sch->lock); 171 kfree(sch); 172 } 173 174 static int css_validate_subchannel(struct subchannel_id schid, 175 struct schib *schib) 176 { 177 int err; 178 179 switch (schib->pmcw.st) { 180 case SUBCHANNEL_TYPE_IO: 181 case SUBCHANNEL_TYPE_MSG: 182 if (!css_sch_is_valid(schib)) 183 err = -ENODEV; 184 else if (is_blacklisted(schid.ssid, schib->pmcw.dev)) { 185 CIO_MSG_EVENT(6, "Blacklisted device detected " 186 "at devno %04X, subchannel set %x\n", 187 schib->pmcw.dev, schid.ssid); 188 err = -ENODEV; 189 } else 190 err = 0; 191 break; 192 default: 193 err = 0; 194 } 195 if (err) 196 goto out; 197 198 CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", 199 schid.ssid, schid.sch_no, schib->pmcw.st); 200 out: 201 return err; 202 } 203 204 struct subchannel *css_alloc_subchannel(struct subchannel_id schid, 205 struct schib *schib) 206 { 207 struct subchannel *sch; 208 int ret; 209 210 ret = css_validate_subchannel(schid, schib); 211 if (ret < 0) 212 return ERR_PTR(ret); 213 214 sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); 215 if (!sch) 216 return ERR_PTR(-ENOMEM); 217 218 sch->schid = schid; 219 sch->schib = *schib; 220 sch->st = schib->pmcw.st; 221 222 ret = css_sch_create_locks(sch); 223 if (ret) 224 goto err; 225 226 INIT_WORK(&sch->todo_work, css_sch_todo); 227 sch->dev.release = &css_subchannel_release; 228 sch->dev.dma_mask = &sch->dma_mask; 229 device_initialize(&sch->dev); 230 /* 231 * The physical addresses for some of the dma structures that can 232 * belong to a subchannel need to fit 31 bit width (e.g. ccw). 233 */ 234 ret = dma_set_coherent_mask(&sch->dev, DMA_BIT_MASK(31)); 235 if (ret) 236 goto err; 237 /* 238 * But we don't have such restrictions imposed on the stuff that 239 * is handled by the streaming API. 240 */ 241 ret = dma_set_mask(&sch->dev, DMA_BIT_MASK(64)); 242 if (ret) 243 goto err; 244 245 return sch; 246 247 err: 248 kfree(sch); 249 return ERR_PTR(ret); 250 } 251 252 static int css_sch_device_register(struct subchannel *sch) 253 { 254 int ret; 255 256 mutex_lock(&sch->reg_mutex); 257 dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, 258 sch->schid.sch_no); 259 ret = device_add(&sch->dev); 260 mutex_unlock(&sch->reg_mutex); 261 return ret; 262 } 263 264 /** 265 * css_sch_device_unregister - unregister a subchannel 266 * @sch: subchannel to be unregistered 267 */ 268 void css_sch_device_unregister(struct subchannel *sch) 269 { 270 mutex_lock(&sch->reg_mutex); 271 if (device_is_registered(&sch->dev)) 272 device_unregister(&sch->dev); 273 mutex_unlock(&sch->reg_mutex); 274 } 275 EXPORT_SYMBOL_GPL(css_sch_device_unregister); 276 277 static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw) 278 { 279 int i; 280 int mask; 281 282 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 283 ssd->path_mask = pmcw->pim; 284 for (i = 0; i < 8; i++) { 285 mask = 0x80 >> i; 286 if (pmcw->pim & mask) { 287 chp_id_init(&ssd->chpid[i]); 288 ssd->chpid[i].id = pmcw->chpid[i]; 289 } 290 } 291 } 292 293 static void ssd_register_chpids(struct chsc_ssd_info *ssd) 294 { 295 int i; 296 int mask; 297 298 for (i = 0; i < 8; i++) { 299 mask = 0x80 >> i; 300 if (ssd->path_mask & mask) 301 chp_new(ssd->chpid[i]); 302 } 303 } 304 305 void css_update_ssd_info(struct subchannel *sch) 306 { 307 int ret; 308 309 ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); 310 if (ret) 311 ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); 312 313 ssd_register_chpids(&sch->ssd_info); 314 } 315 316 static ssize_t type_show(struct device *dev, struct device_attribute *attr, 317 char *buf) 318 { 319 struct subchannel *sch = to_subchannel(dev); 320 321 return sprintf(buf, "%01x\n", sch->st); 322 } 323 324 static DEVICE_ATTR_RO(type); 325 326 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, 327 char *buf) 328 { 329 struct subchannel *sch = to_subchannel(dev); 330 331 return sprintf(buf, "css:t%01X\n", sch->st); 332 } 333 334 static DEVICE_ATTR_RO(modalias); 335 336 static ssize_t driver_override_store(struct device *dev, 337 struct device_attribute *attr, 338 const char *buf, size_t count) 339 { 340 struct subchannel *sch = to_subchannel(dev); 341 int ret; 342 343 ret = driver_set_override(dev, &sch->driver_override, buf, count); 344 if (ret) 345 return ret; 346 347 return count; 348 } 349 350 static ssize_t driver_override_show(struct device *dev, 351 struct device_attribute *attr, char *buf) 352 { 353 struct subchannel *sch = to_subchannel(dev); 354 ssize_t len; 355 356 device_lock(dev); 357 len = snprintf(buf, PAGE_SIZE, "%s\n", sch->driver_override); 358 device_unlock(dev); 359 return len; 360 } 361 static DEVICE_ATTR_RW(driver_override); 362 363 static struct attribute *subch_attrs[] = { 364 &dev_attr_type.attr, 365 &dev_attr_modalias.attr, 366 &dev_attr_driver_override.attr, 367 NULL, 368 }; 369 370 static struct attribute_group subch_attr_group = { 371 .attrs = subch_attrs, 372 }; 373 374 static const struct attribute_group *default_subch_attr_groups[] = { 375 &subch_attr_group, 376 NULL, 377 }; 378 379 static ssize_t chpids_show(struct device *dev, 380 struct device_attribute *attr, 381 char *buf) 382 { 383 struct subchannel *sch = to_subchannel(dev); 384 struct chsc_ssd_info *ssd = &sch->ssd_info; 385 ssize_t ret = 0; 386 int mask; 387 int chp; 388 389 for (chp = 0; chp < 8; chp++) { 390 mask = 0x80 >> chp; 391 if (ssd->path_mask & mask) 392 ret += sprintf(buf + ret, "%02x ", ssd->chpid[chp].id); 393 else 394 ret += sprintf(buf + ret, "00 "); 395 } 396 ret += sprintf(buf + ret, "\n"); 397 return ret; 398 } 399 static DEVICE_ATTR_RO(chpids); 400 401 static ssize_t pimpampom_show(struct device *dev, 402 struct device_attribute *attr, 403 char *buf) 404 { 405 struct subchannel *sch = to_subchannel(dev); 406 struct pmcw *pmcw = &sch->schib.pmcw; 407 408 return sprintf(buf, "%02x %02x %02x\n", 409 pmcw->pim, pmcw->pam, pmcw->pom); 410 } 411 static DEVICE_ATTR_RO(pimpampom); 412 413 static ssize_t dev_busid_show(struct device *dev, 414 struct device_attribute *attr, 415 char *buf) 416 { 417 struct subchannel *sch = to_subchannel(dev); 418 struct pmcw *pmcw = &sch->schib.pmcw; 419 420 if ((pmcw->st == SUBCHANNEL_TYPE_IO && pmcw->dnv) || 421 (pmcw->st == SUBCHANNEL_TYPE_MSG && pmcw->w)) 422 return sysfs_emit(buf, "0.%x.%04x\n", sch->schid.ssid, 423 pmcw->dev); 424 else 425 return sysfs_emit(buf, "none\n"); 426 } 427 static DEVICE_ATTR_RO(dev_busid); 428 429 static struct attribute *io_subchannel_type_attrs[] = { 430 &dev_attr_chpids.attr, 431 &dev_attr_pimpampom.attr, 432 &dev_attr_dev_busid.attr, 433 NULL, 434 }; 435 ATTRIBUTE_GROUPS(io_subchannel_type); 436 437 static const struct device_type io_subchannel_type = { 438 .groups = io_subchannel_type_groups, 439 }; 440 441 int css_register_subchannel(struct subchannel *sch) 442 { 443 int ret; 444 445 /* Initialize the subchannel structure */ 446 sch->dev.parent = &channel_subsystems[0]->device; 447 sch->dev.bus = &css_bus_type; 448 sch->dev.groups = default_subch_attr_groups; 449 450 if (sch->st == SUBCHANNEL_TYPE_IO) 451 sch->dev.type = &io_subchannel_type; 452 453 css_update_ssd_info(sch); 454 /* make it known to the system */ 455 ret = css_sch_device_register(sch); 456 if (ret) { 457 CIO_MSG_EVENT(0, "Could not register sch 0.%x.%04x: %d\n", 458 sch->schid.ssid, sch->schid.sch_no, ret); 459 return ret; 460 } 461 return ret; 462 } 463 464 static int css_probe_device(struct subchannel_id schid, struct schib *schib) 465 { 466 struct subchannel *sch; 467 int ret; 468 469 sch = css_alloc_subchannel(schid, schib); 470 if (IS_ERR(sch)) 471 return PTR_ERR(sch); 472 473 ret = css_register_subchannel(sch); 474 if (ret) 475 put_device(&sch->dev); 476 477 return ret; 478 } 479 480 static int 481 check_subchannel(struct device *dev, const void *data) 482 { 483 struct subchannel *sch; 484 struct subchannel_id *schid = (void *)data; 485 486 sch = to_subchannel(dev); 487 return schid_equal(&sch->schid, schid); 488 } 489 490 struct subchannel * 491 get_subchannel_by_schid(struct subchannel_id schid) 492 { 493 struct device *dev; 494 495 dev = bus_find_device(&css_bus_type, NULL, 496 &schid, check_subchannel); 497 498 return dev ? to_subchannel(dev) : NULL; 499 } 500 501 /** 502 * css_sch_is_valid() - check if a subchannel is valid 503 * @schib: subchannel information block for the subchannel 504 */ 505 int css_sch_is_valid(struct schib *schib) 506 { 507 if ((schib->pmcw.st == SUBCHANNEL_TYPE_IO) && !schib->pmcw.dnv) 508 return 0; 509 if ((schib->pmcw.st == SUBCHANNEL_TYPE_MSG) && !schib->pmcw.w) 510 return 0; 511 return 1; 512 } 513 EXPORT_SYMBOL_GPL(css_sch_is_valid); 514 515 static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow) 516 { 517 struct schib schib; 518 int ccode; 519 520 if (!slow) { 521 /* Will be done on the slow path. */ 522 return -EAGAIN; 523 } 524 /* 525 * The first subchannel that is not-operational (ccode==3) 526 * indicates that there aren't any more devices available. 527 * If stsch gets an exception, it means the current subchannel set 528 * is not valid. 529 */ 530 ccode = stsch(schid, &schib); 531 if (ccode) 532 return (ccode == 3) ? -ENXIO : ccode; 533 534 return css_probe_device(schid, &schib); 535 } 536 537 static int css_evaluate_known_subchannel(struct subchannel *sch, int slow) 538 { 539 int ret = 0; 540 541 if (sch->driver) { 542 if (sch->driver->sch_event) 543 ret = sch->driver->sch_event(sch, slow); 544 else 545 dev_dbg(&sch->dev, 546 "Got subchannel machine check but " 547 "no sch_event handler provided.\n"); 548 } 549 if (ret != 0 && ret != -EAGAIN) { 550 CIO_MSG_EVENT(2, "eval: sch 0.%x.%04x, rc=%d\n", 551 sch->schid.ssid, sch->schid.sch_no, ret); 552 } 553 return ret; 554 } 555 556 static void css_evaluate_subchannel(struct subchannel_id schid, int slow) 557 { 558 struct subchannel *sch; 559 int ret; 560 561 sch = get_subchannel_by_schid(schid); 562 if (sch) { 563 ret = css_evaluate_known_subchannel(sch, slow); 564 put_device(&sch->dev); 565 } else 566 ret = css_evaluate_new_subchannel(schid, slow); 567 if (ret == -EAGAIN) 568 css_schedule_eval(schid); 569 } 570 571 /** 572 * css_sched_sch_todo - schedule a subchannel operation 573 * @sch: subchannel 574 * @todo: todo 575 * 576 * Schedule the operation identified by @todo to be performed on the slow path 577 * workqueue. Do nothing if another operation with higher priority is already 578 * scheduled. Needs to be called with subchannel lock held. 579 */ 580 void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo) 581 { 582 CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n", 583 sch->schid.ssid, sch->schid.sch_no, todo); 584 if (sch->todo >= todo) 585 return; 586 /* Get workqueue ref. */ 587 if (!get_device(&sch->dev)) 588 return; 589 sch->todo = todo; 590 if (!queue_work(cio_work_q, &sch->todo_work)) { 591 /* Already queued, release workqueue ref. */ 592 put_device(&sch->dev); 593 } 594 } 595 EXPORT_SYMBOL_GPL(css_sched_sch_todo); 596 597 static void css_sch_todo(struct work_struct *work) 598 { 599 struct subchannel *sch; 600 enum sch_todo todo; 601 int ret; 602 603 sch = container_of(work, struct subchannel, todo_work); 604 /* Find out todo. */ 605 spin_lock_irq(sch->lock); 606 todo = sch->todo; 607 CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid, 608 sch->schid.sch_no, todo); 609 sch->todo = SCH_TODO_NOTHING; 610 spin_unlock_irq(sch->lock); 611 /* Perform todo. */ 612 switch (todo) { 613 case SCH_TODO_NOTHING: 614 break; 615 case SCH_TODO_EVAL: 616 ret = css_evaluate_known_subchannel(sch, 1); 617 if (ret == -EAGAIN) { 618 spin_lock_irq(sch->lock); 619 css_sched_sch_todo(sch, todo); 620 spin_unlock_irq(sch->lock); 621 } 622 break; 623 case SCH_TODO_UNREG: 624 css_sch_device_unregister(sch); 625 break; 626 } 627 /* Release workqueue ref. */ 628 put_device(&sch->dev); 629 } 630 631 static struct idset *slow_subchannel_set; 632 static DEFINE_SPINLOCK(slow_subchannel_lock); 633 static DECLARE_WAIT_QUEUE_HEAD(css_eval_wq); 634 static atomic_t css_eval_scheduled; 635 636 static int __init slow_subchannel_init(void) 637 { 638 atomic_set(&css_eval_scheduled, 0); 639 slow_subchannel_set = idset_sch_new(); 640 if (!slow_subchannel_set) { 641 CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); 642 return -ENOMEM; 643 } 644 return 0; 645 } 646 647 static int slow_eval_known_fn(struct subchannel *sch, void *data) 648 { 649 int eval; 650 int rc; 651 652 spin_lock_irq(&slow_subchannel_lock); 653 eval = idset_sch_contains(slow_subchannel_set, sch->schid); 654 idset_sch_del(slow_subchannel_set, sch->schid); 655 spin_unlock_irq(&slow_subchannel_lock); 656 if (eval) { 657 rc = css_evaluate_known_subchannel(sch, 1); 658 if (rc == -EAGAIN) 659 css_schedule_eval(sch->schid); 660 /* 661 * The loop might take long time for platforms with lots of 662 * known devices. Allow scheduling here. 663 */ 664 cond_resched(); 665 } 666 return 0; 667 } 668 669 static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) 670 { 671 int eval; 672 int rc = 0; 673 674 spin_lock_irq(&slow_subchannel_lock); 675 eval = idset_sch_contains(slow_subchannel_set, schid); 676 idset_sch_del(slow_subchannel_set, schid); 677 spin_unlock_irq(&slow_subchannel_lock); 678 if (eval) { 679 rc = css_evaluate_new_subchannel(schid, 1); 680 switch (rc) { 681 case -EAGAIN: 682 css_schedule_eval(schid); 683 rc = 0; 684 break; 685 case -ENXIO: 686 case -ENOMEM: 687 case -EIO: 688 /* These should abort looping */ 689 spin_lock_irq(&slow_subchannel_lock); 690 idset_sch_del_subseq(slow_subchannel_set, schid); 691 spin_unlock_irq(&slow_subchannel_lock); 692 break; 693 default: 694 rc = 0; 695 } 696 /* Allow scheduling here since the containing loop might 697 * take a while. */ 698 cond_resched(); 699 } 700 return rc; 701 } 702 703 static void css_slow_path_func(struct work_struct *unused) 704 { 705 unsigned long flags; 706 707 CIO_TRACE_EVENT(4, "slowpath"); 708 for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, 709 NULL); 710 spin_lock_irqsave(&slow_subchannel_lock, flags); 711 if (idset_is_empty(slow_subchannel_set)) { 712 atomic_set(&css_eval_scheduled, 0); 713 wake_up(&css_eval_wq); 714 } 715 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 716 } 717 718 static DECLARE_DELAYED_WORK(slow_path_work, css_slow_path_func); 719 struct workqueue_struct *cio_work_q; 720 721 void css_schedule_eval(struct subchannel_id schid) 722 { 723 unsigned long flags; 724 725 spin_lock_irqsave(&slow_subchannel_lock, flags); 726 idset_sch_add(slow_subchannel_set, schid); 727 atomic_set(&css_eval_scheduled, 1); 728 queue_delayed_work(cio_work_q, &slow_path_work, 0); 729 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 730 } 731 732 void css_schedule_eval_all(void) 733 { 734 unsigned long flags; 735 736 spin_lock_irqsave(&slow_subchannel_lock, flags); 737 idset_fill(slow_subchannel_set); 738 atomic_set(&css_eval_scheduled, 1); 739 queue_delayed_work(cio_work_q, &slow_path_work, 0); 740 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 741 } 742 743 static int __unset_validpath(struct device *dev, void *data) 744 { 745 struct idset *set = data; 746 struct subchannel *sch = to_subchannel(dev); 747 struct pmcw *pmcw = &sch->schib.pmcw; 748 749 /* Here we want to make sure that we are considering only those subchannels 750 * which do not have an operational device attached to it. This can be found 751 * with the help of PAM and POM values of pmcw. OPM provides the information 752 * about any path which is currently vary-off, so that we should not consider. 753 */ 754 if (sch->st == SUBCHANNEL_TYPE_IO && 755 (sch->opm & pmcw->pam & pmcw->pom)) 756 idset_sch_del(set, sch->schid); 757 758 return 0; 759 } 760 761 static int __unset_online(struct device *dev, void *data) 762 { 763 struct idset *set = data; 764 struct subchannel *sch = to_subchannel(dev); 765 766 if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena) 767 idset_sch_del(set, sch->schid); 768 769 return 0; 770 } 771 772 void css_schedule_eval_cond(enum css_eval_cond cond, unsigned long delay) 773 { 774 unsigned long flags; 775 struct idset *set; 776 777 /* Find unregistered subchannels. */ 778 set = idset_sch_new(); 779 if (!set) { 780 /* Fallback. */ 781 css_schedule_eval_all(); 782 return; 783 } 784 idset_fill(set); 785 switch (cond) { 786 case CSS_EVAL_NO_PATH: 787 bus_for_each_dev(&css_bus_type, NULL, set, __unset_validpath); 788 break; 789 case CSS_EVAL_NOT_ONLINE: 790 bus_for_each_dev(&css_bus_type, NULL, set, __unset_online); 791 break; 792 default: 793 break; 794 } 795 796 /* Apply to slow_subchannel_set. */ 797 spin_lock_irqsave(&slow_subchannel_lock, flags); 798 idset_add_set(slow_subchannel_set, set); 799 atomic_set(&css_eval_scheduled, 1); 800 queue_delayed_work(cio_work_q, &slow_path_work, delay); 801 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 802 idset_free(set); 803 } 804 805 void css_wait_for_slow_path(void) 806 { 807 flush_workqueue(cio_work_q); 808 } 809 810 /* Schedule reprobing of all subchannels with no valid operational path. */ 811 void css_schedule_reprobe(void) 812 { 813 /* Schedule with a delay to allow merging of subsequent calls. */ 814 css_schedule_eval_cond(CSS_EVAL_NO_PATH, 1 * HZ); 815 } 816 EXPORT_SYMBOL_GPL(css_schedule_reprobe); 817 818 /* 819 * Called from the machine check handler for subchannel report words. 820 */ 821 static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 822 { 823 struct subchannel_id mchk_schid; 824 struct subchannel *sch; 825 826 if (overflow) { 827 css_schedule_eval_all(); 828 return; 829 } 830 CIO_CRW_EVENT(2, "CRW0 reports slct=%d, oflw=%d, " 831 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 832 crw0->slct, crw0->oflw, crw0->chn, crw0->rsc, crw0->anc, 833 crw0->erc, crw0->rsid); 834 if (crw1) 835 CIO_CRW_EVENT(2, "CRW1 reports slct=%d, oflw=%d, " 836 "chn=%d, rsc=%X, anc=%d, erc=%X, rsid=%X\n", 837 crw1->slct, crw1->oflw, crw1->chn, crw1->rsc, 838 crw1->anc, crw1->erc, crw1->rsid); 839 init_subchannel_id(&mchk_schid); 840 mchk_schid.sch_no = crw0->rsid; 841 if (crw1) 842 mchk_schid.ssid = (crw1->rsid >> 4) & 3; 843 844 if (crw0->erc == CRW_ERC_PMOD) { 845 sch = get_subchannel_by_schid(mchk_schid); 846 if (sch) { 847 css_update_ssd_info(sch); 848 put_device(&sch->dev); 849 } 850 } 851 /* 852 * Since we are always presented with IPI in the CRW, we have to 853 * use stsch() to find out if the subchannel in question has come 854 * or gone. 855 */ 856 css_evaluate_subchannel(mchk_schid, 0); 857 } 858 859 static void __init 860 css_generate_pgid(struct channel_subsystem *css, u32 tod_high) 861 { 862 struct cpuid cpu_id; 863 864 if (css_general_characteristics.mcss) { 865 css->global_pgid.pgid_high.ext_cssid.version = 0x80; 866 css->global_pgid.pgid_high.ext_cssid.cssid = 867 css->id_valid ? css->cssid : 0; 868 } else { 869 css->global_pgid.pgid_high.cpu_addr = stap(); 870 } 871 get_cpu_id(&cpu_id); 872 css->global_pgid.cpu_id = cpu_id.ident; 873 css->global_pgid.cpu_model = cpu_id.machine; 874 css->global_pgid.tod_high = tod_high; 875 } 876 877 static void channel_subsystem_release(struct device *dev) 878 { 879 struct channel_subsystem *css = to_css(dev); 880 881 mutex_destroy(&css->mutex); 882 kfree(css); 883 } 884 885 static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a, 886 char *buf) 887 { 888 struct channel_subsystem *css = to_css(dev); 889 890 if (!css->id_valid) 891 return -EINVAL; 892 893 return sprintf(buf, "%x\n", css->cssid); 894 } 895 static DEVICE_ATTR_RO(real_cssid); 896 897 static ssize_t rescan_store(struct device *dev, struct device_attribute *a, 898 const char *buf, size_t count) 899 { 900 CIO_TRACE_EVENT(4, "usr-rescan"); 901 902 css_schedule_eval_all(); 903 css_complete_work(); 904 905 return count; 906 } 907 static DEVICE_ATTR_WO(rescan); 908 909 static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a, 910 char *buf) 911 { 912 struct channel_subsystem *css = to_css(dev); 913 int ret; 914 915 mutex_lock(&css->mutex); 916 ret = sprintf(buf, "%x\n", css->cm_enabled); 917 mutex_unlock(&css->mutex); 918 return ret; 919 } 920 921 static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a, 922 const char *buf, size_t count) 923 { 924 struct channel_subsystem *css = to_css(dev); 925 unsigned long val; 926 int ret; 927 928 ret = kstrtoul(buf, 16, &val); 929 if (ret) 930 return ret; 931 mutex_lock(&css->mutex); 932 switch (val) { 933 case 0: 934 ret = css->cm_enabled ? chsc_secm(css, 0) : 0; 935 break; 936 case 1: 937 ret = css->cm_enabled ? 0 : chsc_secm(css, 1); 938 break; 939 default: 940 ret = -EINVAL; 941 } 942 mutex_unlock(&css->mutex); 943 return ret < 0 ? ret : count; 944 } 945 static DEVICE_ATTR_RW(cm_enable); 946 947 static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr, 948 int index) 949 { 950 return css_chsc_characteristics.secm ? attr->mode : 0; 951 } 952 953 static struct attribute *cssdev_attrs[] = { 954 &dev_attr_real_cssid.attr, 955 &dev_attr_rescan.attr, 956 NULL, 957 }; 958 959 static struct attribute_group cssdev_attr_group = { 960 .attrs = cssdev_attrs, 961 }; 962 963 static struct attribute *cssdev_cm_attrs[] = { 964 &dev_attr_cm_enable.attr, 965 NULL, 966 }; 967 968 static struct attribute_group cssdev_cm_attr_group = { 969 .attrs = cssdev_cm_attrs, 970 .is_visible = cm_enable_mode, 971 }; 972 973 static const struct attribute_group *cssdev_attr_groups[] = { 974 &cssdev_attr_group, 975 &cssdev_cm_attr_group, 976 NULL, 977 }; 978 979 static int __init setup_css(int nr) 980 { 981 struct channel_subsystem *css; 982 int ret; 983 984 css = kzalloc(sizeof(*css), GFP_KERNEL); 985 if (!css) 986 return -ENOMEM; 987 988 channel_subsystems[nr] = css; 989 dev_set_name(&css->device, "css%x", nr); 990 css->device.groups = cssdev_attr_groups; 991 css->device.release = channel_subsystem_release; 992 /* 993 * We currently allocate notifier bits with this (using 994 * css->device as the device argument with the DMA API) 995 * and are fine with 64 bit addresses. 996 */ 997 ret = dma_coerce_mask_and_coherent(&css->device, DMA_BIT_MASK(64)); 998 if (ret) { 999 kfree(css); 1000 goto out_err; 1001 } 1002 1003 mutex_init(&css->mutex); 1004 ret = chsc_get_cssid_iid(nr, &css->cssid, &css->iid); 1005 if (!ret) { 1006 css->id_valid = true; 1007 pr_info("Partition identifier %01x.%01x\n", css->cssid, 1008 css->iid); 1009 } 1010 css_generate_pgid(css, (u32) (get_tod_clock() >> 32)); 1011 1012 ret = device_register(&css->device); 1013 if (ret) { 1014 put_device(&css->device); 1015 goto out_err; 1016 } 1017 1018 css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel), 1019 GFP_KERNEL); 1020 if (!css->pseudo_subchannel) { 1021 device_unregister(&css->device); 1022 ret = -ENOMEM; 1023 goto out_err; 1024 } 1025 1026 css->pseudo_subchannel->dev.parent = &css->device; 1027 css->pseudo_subchannel->dev.release = css_subchannel_release; 1028 mutex_init(&css->pseudo_subchannel->reg_mutex); 1029 ret = css_sch_create_locks(css->pseudo_subchannel); 1030 if (ret) { 1031 kfree(css->pseudo_subchannel); 1032 device_unregister(&css->device); 1033 goto out_err; 1034 } 1035 1036 dev_set_name(&css->pseudo_subchannel->dev, "defunct"); 1037 ret = device_register(&css->pseudo_subchannel->dev); 1038 if (ret) { 1039 put_device(&css->pseudo_subchannel->dev); 1040 device_unregister(&css->device); 1041 goto out_err; 1042 } 1043 1044 return ret; 1045 out_err: 1046 channel_subsystems[nr] = NULL; 1047 return ret; 1048 } 1049 1050 static int css_reboot_event(struct notifier_block *this, 1051 unsigned long event, 1052 void *ptr) 1053 { 1054 struct channel_subsystem *css; 1055 int ret; 1056 1057 ret = NOTIFY_DONE; 1058 for_each_css(css) { 1059 mutex_lock(&css->mutex); 1060 if (css->cm_enabled) 1061 if (chsc_secm(css, 0)) 1062 ret = NOTIFY_BAD; 1063 mutex_unlock(&css->mutex); 1064 } 1065 1066 return ret; 1067 } 1068 1069 static struct notifier_block css_reboot_notifier = { 1070 .notifier_call = css_reboot_event, 1071 }; 1072 1073 #define CIO_DMA_GFP (GFP_KERNEL | __GFP_ZERO) 1074 static struct gen_pool *cio_dma_pool; 1075 1076 /* Currently cio supports only a single css */ 1077 struct device *cio_get_dma_css_dev(void) 1078 { 1079 return &channel_subsystems[0]->device; 1080 } 1081 1082 struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages) 1083 { 1084 struct gen_pool *gp_dma; 1085 void *cpu_addr; 1086 dma_addr_t dma_addr; 1087 int i; 1088 1089 gp_dma = gen_pool_create(3, -1); 1090 if (!gp_dma) 1091 return NULL; 1092 for (i = 0; i < nr_pages; ++i) { 1093 cpu_addr = dma_alloc_coherent(dma_dev, PAGE_SIZE, &dma_addr, 1094 CIO_DMA_GFP); 1095 if (!cpu_addr) 1096 return gp_dma; 1097 gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr, 1098 dma_addr, PAGE_SIZE, -1); 1099 } 1100 return gp_dma; 1101 } 1102 1103 static void __gp_dma_free_dma(struct gen_pool *pool, 1104 struct gen_pool_chunk *chunk, void *data) 1105 { 1106 size_t chunk_size = chunk->end_addr - chunk->start_addr + 1; 1107 1108 dma_free_coherent((struct device *) data, chunk_size, 1109 (void *) chunk->start_addr, 1110 (dma_addr_t) chunk->phys_addr); 1111 } 1112 1113 void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev) 1114 { 1115 if (!gp_dma) 1116 return; 1117 /* this is quite ugly but no better idea */ 1118 gen_pool_for_each_chunk(gp_dma, __gp_dma_free_dma, dma_dev); 1119 gen_pool_destroy(gp_dma); 1120 } 1121 1122 static int cio_dma_pool_init(void) 1123 { 1124 /* No need to free up the resources: compiled in */ 1125 cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1); 1126 if (!cio_dma_pool) 1127 return -ENOMEM; 1128 return 0; 1129 } 1130 1131 void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev, 1132 size_t size) 1133 { 1134 dma_addr_t dma_addr; 1135 unsigned long addr; 1136 size_t chunk_size; 1137 1138 if (!gp_dma) 1139 return NULL; 1140 addr = gen_pool_alloc(gp_dma, size); 1141 while (!addr) { 1142 chunk_size = round_up(size, PAGE_SIZE); 1143 addr = (unsigned long) dma_alloc_coherent(dma_dev, 1144 chunk_size, &dma_addr, CIO_DMA_GFP); 1145 if (!addr) 1146 return NULL; 1147 gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1); 1148 addr = gen_pool_alloc(gp_dma, size); 1149 } 1150 return (void *) addr; 1151 } 1152 1153 void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size) 1154 { 1155 if (!cpu_addr) 1156 return; 1157 memset(cpu_addr, 0, size); 1158 gen_pool_free(gp_dma, (unsigned long) cpu_addr, size); 1159 } 1160 1161 /* 1162 * Allocate dma memory from the css global pool. Intended for memory not 1163 * specific to any single device within the css. The allocated memory 1164 * is not guaranteed to be 31-bit addressable. 1165 * 1166 * Caution: Not suitable for early stuff like console. 1167 */ 1168 void *cio_dma_zalloc(size_t size) 1169 { 1170 return cio_gp_dma_zalloc(cio_dma_pool, cio_get_dma_css_dev(), size); 1171 } 1172 1173 void cio_dma_free(void *cpu_addr, size_t size) 1174 { 1175 cio_gp_dma_free(cio_dma_pool, cpu_addr, size); 1176 } 1177 1178 /* 1179 * Now that the driver core is running, we can setup our channel subsystem. 1180 * The struct subchannel's are created during probing. 1181 */ 1182 static int __init css_bus_init(void) 1183 { 1184 int ret, i; 1185 1186 ret = chsc_init(); 1187 if (ret) 1188 return ret; 1189 1190 chsc_determine_css_characteristics(); 1191 /* Try to enable MSS. */ 1192 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 1193 if (ret) 1194 max_ssid = 0; 1195 else /* Success. */ 1196 max_ssid = __MAX_SSID; 1197 1198 ret = slow_subchannel_init(); 1199 if (ret) 1200 goto out; 1201 1202 ret = crw_register_handler(CRW_RSC_SCH, css_process_crw); 1203 if (ret) 1204 goto out; 1205 1206 if ((ret = bus_register(&css_bus_type))) 1207 goto out; 1208 1209 /* Setup css structure. */ 1210 for (i = 0; i <= MAX_CSS_IDX; i++) { 1211 ret = setup_css(i); 1212 if (ret) 1213 goto out_unregister; 1214 } 1215 ret = register_reboot_notifier(&css_reboot_notifier); 1216 if (ret) 1217 goto out_unregister; 1218 ret = cio_dma_pool_init(); 1219 if (ret) 1220 goto out_unregister_rn; 1221 airq_init(); 1222 css_init_done = 1; 1223 1224 /* Enable default isc for I/O subchannels. */ 1225 isc_register(IO_SCH_ISC); 1226 1227 return 0; 1228 out_unregister_rn: 1229 unregister_reboot_notifier(&css_reboot_notifier); 1230 out_unregister: 1231 while (i-- > 0) { 1232 struct channel_subsystem *css = channel_subsystems[i]; 1233 device_unregister(&css->pseudo_subchannel->dev); 1234 device_unregister(&css->device); 1235 } 1236 bus_unregister(&css_bus_type); 1237 out: 1238 crw_unregister_handler(CRW_RSC_SCH); 1239 idset_free(slow_subchannel_set); 1240 chsc_init_cleanup(); 1241 pr_alert("The CSS device driver initialization failed with " 1242 "errno=%d\n", ret); 1243 return ret; 1244 } 1245 1246 static void __init css_bus_cleanup(void) 1247 { 1248 struct channel_subsystem *css; 1249 1250 for_each_css(css) { 1251 device_unregister(&css->pseudo_subchannel->dev); 1252 device_unregister(&css->device); 1253 } 1254 bus_unregister(&css_bus_type); 1255 crw_unregister_handler(CRW_RSC_SCH); 1256 idset_free(slow_subchannel_set); 1257 chsc_init_cleanup(); 1258 isc_unregister(IO_SCH_ISC); 1259 } 1260 1261 static int __init channel_subsystem_init(void) 1262 { 1263 int ret; 1264 1265 ret = css_bus_init(); 1266 if (ret) 1267 return ret; 1268 cio_work_q = create_singlethread_workqueue("cio"); 1269 if (!cio_work_q) { 1270 ret = -ENOMEM; 1271 goto out_bus; 1272 } 1273 ret = io_subchannel_init(); 1274 if (ret) 1275 goto out_wq; 1276 1277 /* Register subchannels which are already in use. */ 1278 cio_register_early_subchannels(); 1279 /* Start initial subchannel evaluation. */ 1280 css_schedule_eval_all(); 1281 1282 return ret; 1283 out_wq: 1284 destroy_workqueue(cio_work_q); 1285 out_bus: 1286 css_bus_cleanup(); 1287 return ret; 1288 } 1289 subsys_initcall(channel_subsystem_init); 1290 1291 static int css_settle(struct device_driver *drv, void *unused) 1292 { 1293 struct css_driver *cssdrv = to_cssdriver(drv); 1294 1295 if (cssdrv->settle) 1296 return cssdrv->settle(); 1297 return 0; 1298 } 1299 1300 int css_complete_work(void) 1301 { 1302 int ret; 1303 1304 /* Wait for the evaluation of subchannels to finish. */ 1305 ret = wait_event_interruptible(css_eval_wq, 1306 atomic_read(&css_eval_scheduled) == 0); 1307 if (ret) 1308 return -EINTR; 1309 flush_workqueue(cio_work_q); 1310 /* Wait for the subchannel type specific initialization to finish */ 1311 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); 1312 } 1313 1314 1315 /* 1316 * Wait for the initialization of devices to finish, to make sure we are 1317 * done with our setup if the search for the root device starts. 1318 */ 1319 static int __init channel_subsystem_init_sync(void) 1320 { 1321 css_complete_work(); 1322 return 0; 1323 } 1324 subsys_initcall_sync(channel_subsystem_init_sync); 1325 1326 #ifdef CONFIG_PROC_FS 1327 static ssize_t cio_settle_write(struct file *file, const char __user *buf, 1328 size_t count, loff_t *ppos) 1329 { 1330 int ret; 1331 1332 /* Handle pending CRW's. */ 1333 crw_wait_for_channel_report(); 1334 ret = css_complete_work(); 1335 1336 return ret ? ret : count; 1337 } 1338 1339 static const struct proc_ops cio_settle_proc_ops = { 1340 .proc_open = nonseekable_open, 1341 .proc_write = cio_settle_write, 1342 .proc_lseek = no_llseek, 1343 }; 1344 1345 static int __init cio_settle_init(void) 1346 { 1347 struct proc_dir_entry *entry; 1348 1349 entry = proc_create("cio_settle", S_IWUSR, NULL, &cio_settle_proc_ops); 1350 if (!entry) 1351 return -ENOMEM; 1352 return 0; 1353 } 1354 device_initcall(cio_settle_init); 1355 #endif /*CONFIG_PROC_FS*/ 1356 1357 int sch_is_pseudo_sch(struct subchannel *sch) 1358 { 1359 if (!sch->dev.parent) 1360 return 0; 1361 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1362 } 1363 1364 static int css_bus_match(struct device *dev, struct device_driver *drv) 1365 { 1366 struct subchannel *sch = to_subchannel(dev); 1367 struct css_driver *driver = to_cssdriver(drv); 1368 struct css_device_id *id; 1369 1370 /* When driver_override is set, only bind to the matching driver */ 1371 if (sch->driver_override && strcmp(sch->driver_override, drv->name)) 1372 return 0; 1373 1374 for (id = driver->subchannel_type; id->match_flags; id++) { 1375 if (sch->st == id->type) 1376 return 1; 1377 } 1378 1379 return 0; 1380 } 1381 1382 static int css_probe(struct device *dev) 1383 { 1384 struct subchannel *sch; 1385 int ret; 1386 1387 sch = to_subchannel(dev); 1388 sch->driver = to_cssdriver(dev->driver); 1389 ret = sch->driver->probe ? sch->driver->probe(sch) : 0; 1390 if (ret) 1391 sch->driver = NULL; 1392 return ret; 1393 } 1394 1395 static void css_remove(struct device *dev) 1396 { 1397 struct subchannel *sch; 1398 1399 sch = to_subchannel(dev); 1400 if (sch->driver->remove) 1401 sch->driver->remove(sch); 1402 sch->driver = NULL; 1403 } 1404 1405 static void css_shutdown(struct device *dev) 1406 { 1407 struct subchannel *sch; 1408 1409 sch = to_subchannel(dev); 1410 if (sch->driver && sch->driver->shutdown) 1411 sch->driver->shutdown(sch); 1412 } 1413 1414 static int css_uevent(const struct device *dev, struct kobj_uevent_env *env) 1415 { 1416 const struct subchannel *sch = to_subchannel(dev); 1417 int ret; 1418 1419 ret = add_uevent_var(env, "ST=%01X", sch->st); 1420 if (ret) 1421 return ret; 1422 ret = add_uevent_var(env, "MODALIAS=css:t%01X", sch->st); 1423 return ret; 1424 } 1425 1426 static struct bus_type css_bus_type = { 1427 .name = "css", 1428 .match = css_bus_match, 1429 .probe = css_probe, 1430 .remove = css_remove, 1431 .shutdown = css_shutdown, 1432 .uevent = css_uevent, 1433 }; 1434 1435 /** 1436 * css_driver_register - register a css driver 1437 * @cdrv: css driver to register 1438 * 1439 * This is mainly a wrapper around driver_register that sets name 1440 * and bus_type in the embedded struct device_driver correctly. 1441 */ 1442 int css_driver_register(struct css_driver *cdrv) 1443 { 1444 cdrv->drv.bus = &css_bus_type; 1445 return driver_register(&cdrv->drv); 1446 } 1447 EXPORT_SYMBOL_GPL(css_driver_register); 1448 1449 /** 1450 * css_driver_unregister - unregister a css driver 1451 * @cdrv: css driver to unregister 1452 * 1453 * This is a wrapper around driver_unregister. 1454 */ 1455 void css_driver_unregister(struct css_driver *cdrv) 1456 { 1457 driver_unregister(&cdrv->drv); 1458 } 1459 EXPORT_SYMBOL_GPL(css_driver_unregister); 1460