1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright(c) 2022 Intel Corporation. All rights reserved. */ 3 #include <linux/memregion.h> 4 #include <linux/genalloc.h> 5 #include <linux/device.h> 6 #include <linux/module.h> 7 #include <linux/slab.h> 8 #include <linux/uuid.h> 9 #include <linux/sort.h> 10 #include <linux/idr.h> 11 #include <cxlmem.h> 12 #include <cxl.h> 13 #include "core.h" 14 15 /** 16 * DOC: cxl core region 17 * 18 * CXL Regions represent mapped memory capacity in system physical address 19 * space. Whereas the CXL Root Decoders identify the bounds of potential CXL 20 * Memory ranges, Regions represent the active mapped capacity by the HDM 21 * Decoder Capability structures throughout the Host Bridges, Switches, and 22 * Endpoints in the topology. 23 * 24 * Region configuration has ordering constraints. UUID may be set at any time 25 * but is only visible for persistent regions. 26 * 1. Interleave granularity 27 * 2. Interleave size 28 * 3. Decoder targets 29 */ 30 31 static struct cxl_region *to_cxl_region(struct device *dev); 32 33 static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, 34 char *buf) 35 { 36 struct cxl_region *cxlr = to_cxl_region(dev); 37 struct cxl_region_params *p = &cxlr->params; 38 ssize_t rc; 39 40 rc = down_read_interruptible(&cxl_region_rwsem); 41 if (rc) 42 return rc; 43 if (cxlr->mode != CXL_DECODER_PMEM) 44 rc = sysfs_emit(buf, "\n"); 45 else 46 rc = sysfs_emit(buf, "%pUb\n", &p->uuid); 47 up_read(&cxl_region_rwsem); 48 49 return rc; 50 } 51 52 static int is_dup(struct device *match, void *data) 53 { 54 struct cxl_region_params *p; 55 struct cxl_region *cxlr; 56 uuid_t *uuid = data; 57 58 if (!is_cxl_region(match)) 59 return 0; 60 61 lockdep_assert_held(&cxl_region_rwsem); 62 cxlr = to_cxl_region(match); 63 p = &cxlr->params; 64 65 if (uuid_equal(&p->uuid, uuid)) { 66 dev_dbg(match, "already has uuid: %pUb\n", uuid); 67 return -EBUSY; 68 } 69 70 return 0; 71 } 72 73 static ssize_t uuid_store(struct device *dev, struct device_attribute *attr, 74 const char *buf, size_t len) 75 { 76 struct cxl_region *cxlr = to_cxl_region(dev); 77 struct cxl_region_params *p = &cxlr->params; 78 uuid_t temp; 79 ssize_t rc; 80 81 if (len != UUID_STRING_LEN + 1) 82 return -EINVAL; 83 84 rc = uuid_parse(buf, &temp); 85 if (rc) 86 return rc; 87 88 if (uuid_is_null(&temp)) 89 return -EINVAL; 90 91 rc = down_write_killable(&cxl_region_rwsem); 92 if (rc) 93 return rc; 94 95 if (uuid_equal(&p->uuid, &temp)) 96 goto out; 97 98 rc = -EBUSY; 99 if (p->state >= CXL_CONFIG_ACTIVE) 100 goto out; 101 102 rc = bus_for_each_dev(&cxl_bus_type, NULL, &temp, is_dup); 103 if (rc < 0) 104 goto out; 105 106 uuid_copy(&p->uuid, &temp); 107 out: 108 up_write(&cxl_region_rwsem); 109 110 if (rc) 111 return rc; 112 return len; 113 } 114 static DEVICE_ATTR_RW(uuid); 115 116 static struct cxl_region_ref *cxl_rr_load(struct cxl_port *port, 117 struct cxl_region *cxlr) 118 { 119 return xa_load(&port->regions, (unsigned long)cxlr); 120 } 121 122 static int cxl_region_invalidate_memregion(struct cxl_region *cxlr) 123 { 124 if (!cpu_cache_has_invalidate_memregion()) { 125 if (IS_ENABLED(CONFIG_CXL_REGION_INVALIDATION_TEST)) { 126 dev_warn_once( 127 &cxlr->dev, 128 "Bypassing cpu_cache_invalidate_memregion() for testing!\n"); 129 return 0; 130 } else { 131 dev_err(&cxlr->dev, 132 "Failed to synchronize CPU cache state\n"); 133 return -ENXIO; 134 } 135 } 136 137 cpu_cache_invalidate_memregion(IORES_DESC_CXL); 138 return 0; 139 } 140 141 static int cxl_region_decode_reset(struct cxl_region *cxlr, int count) 142 { 143 struct cxl_region_params *p = &cxlr->params; 144 int i, rc = 0; 145 146 /* 147 * Before region teardown attempt to flush, and if the flush 148 * fails cancel the region teardown for data consistency 149 * concerns 150 */ 151 rc = cxl_region_invalidate_memregion(cxlr); 152 if (rc) 153 return rc; 154 155 for (i = count - 1; i >= 0; i--) { 156 struct cxl_endpoint_decoder *cxled = p->targets[i]; 157 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 158 struct cxl_port *iter = cxled_to_port(cxled); 159 struct cxl_dev_state *cxlds = cxlmd->cxlds; 160 struct cxl_ep *ep; 161 162 if (cxlds->rcd) 163 goto endpoint_reset; 164 165 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) 166 iter = to_cxl_port(iter->dev.parent); 167 168 for (ep = cxl_ep_load(iter, cxlmd); iter; 169 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { 170 struct cxl_region_ref *cxl_rr; 171 struct cxl_decoder *cxld; 172 173 cxl_rr = cxl_rr_load(iter, cxlr); 174 cxld = cxl_rr->decoder; 175 if (cxld->reset) 176 rc = cxld->reset(cxld); 177 if (rc) 178 return rc; 179 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 180 } 181 182 endpoint_reset: 183 rc = cxled->cxld.reset(&cxled->cxld); 184 if (rc) 185 return rc; 186 set_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 187 } 188 189 /* all decoders associated with this region have been torn down */ 190 clear_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags); 191 192 return 0; 193 } 194 195 static int commit_decoder(struct cxl_decoder *cxld) 196 { 197 struct cxl_switch_decoder *cxlsd = NULL; 198 199 if (cxld->commit) 200 return cxld->commit(cxld); 201 202 if (is_switch_decoder(&cxld->dev)) 203 cxlsd = to_cxl_switch_decoder(&cxld->dev); 204 205 if (dev_WARN_ONCE(&cxld->dev, !cxlsd || cxlsd->nr_targets > 1, 206 "->commit() is required\n")) 207 return -ENXIO; 208 return 0; 209 } 210 211 static int cxl_region_decode_commit(struct cxl_region *cxlr) 212 { 213 struct cxl_region_params *p = &cxlr->params; 214 int i, rc = 0; 215 216 for (i = 0; i < p->nr_targets; i++) { 217 struct cxl_endpoint_decoder *cxled = p->targets[i]; 218 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 219 struct cxl_region_ref *cxl_rr; 220 struct cxl_decoder *cxld; 221 struct cxl_port *iter; 222 struct cxl_ep *ep; 223 224 /* commit bottom up */ 225 for (iter = cxled_to_port(cxled); !is_cxl_root(iter); 226 iter = to_cxl_port(iter->dev.parent)) { 227 cxl_rr = cxl_rr_load(iter, cxlr); 228 cxld = cxl_rr->decoder; 229 rc = commit_decoder(cxld); 230 if (rc) 231 break; 232 } 233 234 if (rc) { 235 /* programming @iter failed, teardown */ 236 for (ep = cxl_ep_load(iter, cxlmd); ep && iter; 237 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { 238 cxl_rr = cxl_rr_load(iter, cxlr); 239 cxld = cxl_rr->decoder; 240 if (cxld->reset) 241 cxld->reset(cxld); 242 } 243 244 cxled->cxld.reset(&cxled->cxld); 245 goto err; 246 } 247 } 248 249 return 0; 250 251 err: 252 /* undo the targets that were successfully committed */ 253 cxl_region_decode_reset(cxlr, i); 254 return rc; 255 } 256 257 static ssize_t commit_store(struct device *dev, struct device_attribute *attr, 258 const char *buf, size_t len) 259 { 260 struct cxl_region *cxlr = to_cxl_region(dev); 261 struct cxl_region_params *p = &cxlr->params; 262 bool commit; 263 ssize_t rc; 264 265 rc = kstrtobool(buf, &commit); 266 if (rc) 267 return rc; 268 269 rc = down_write_killable(&cxl_region_rwsem); 270 if (rc) 271 return rc; 272 273 /* Already in the requested state? */ 274 if (commit && p->state >= CXL_CONFIG_COMMIT) 275 goto out; 276 if (!commit && p->state < CXL_CONFIG_COMMIT) 277 goto out; 278 279 /* Not ready to commit? */ 280 if (commit && p->state < CXL_CONFIG_ACTIVE) { 281 rc = -ENXIO; 282 goto out; 283 } 284 285 /* 286 * Invalidate caches before region setup to drop any speculative 287 * consumption of this address space 288 */ 289 rc = cxl_region_invalidate_memregion(cxlr); 290 if (rc) 291 return rc; 292 293 if (commit) { 294 rc = cxl_region_decode_commit(cxlr); 295 if (rc == 0) 296 p->state = CXL_CONFIG_COMMIT; 297 } else { 298 p->state = CXL_CONFIG_RESET_PENDING; 299 up_write(&cxl_region_rwsem); 300 device_release_driver(&cxlr->dev); 301 down_write(&cxl_region_rwsem); 302 303 /* 304 * The lock was dropped, so need to revalidate that the reset is 305 * still pending. 306 */ 307 if (p->state == CXL_CONFIG_RESET_PENDING) { 308 rc = cxl_region_decode_reset(cxlr, p->interleave_ways); 309 /* 310 * Revert to committed since there may still be active 311 * decoders associated with this region, or move forward 312 * to active to mark the reset successful 313 */ 314 if (rc) 315 p->state = CXL_CONFIG_COMMIT; 316 else 317 p->state = CXL_CONFIG_ACTIVE; 318 } 319 } 320 321 out: 322 up_write(&cxl_region_rwsem); 323 324 if (rc) 325 return rc; 326 return len; 327 } 328 329 static ssize_t commit_show(struct device *dev, struct device_attribute *attr, 330 char *buf) 331 { 332 struct cxl_region *cxlr = to_cxl_region(dev); 333 struct cxl_region_params *p = &cxlr->params; 334 ssize_t rc; 335 336 rc = down_read_interruptible(&cxl_region_rwsem); 337 if (rc) 338 return rc; 339 rc = sysfs_emit(buf, "%d\n", p->state >= CXL_CONFIG_COMMIT); 340 up_read(&cxl_region_rwsem); 341 342 return rc; 343 } 344 static DEVICE_ATTR_RW(commit); 345 346 static umode_t cxl_region_visible(struct kobject *kobj, struct attribute *a, 347 int n) 348 { 349 struct device *dev = kobj_to_dev(kobj); 350 struct cxl_region *cxlr = to_cxl_region(dev); 351 352 /* 353 * Support tooling that expects to find a 'uuid' attribute for all 354 * regions regardless of mode. 355 */ 356 if (a == &dev_attr_uuid.attr && cxlr->mode != CXL_DECODER_PMEM) 357 return 0444; 358 return a->mode; 359 } 360 361 static ssize_t interleave_ways_show(struct device *dev, 362 struct device_attribute *attr, char *buf) 363 { 364 struct cxl_region *cxlr = to_cxl_region(dev); 365 struct cxl_region_params *p = &cxlr->params; 366 ssize_t rc; 367 368 rc = down_read_interruptible(&cxl_region_rwsem); 369 if (rc) 370 return rc; 371 rc = sysfs_emit(buf, "%d\n", p->interleave_ways); 372 up_read(&cxl_region_rwsem); 373 374 return rc; 375 } 376 377 static const struct attribute_group *get_cxl_region_target_group(void); 378 379 static ssize_t interleave_ways_store(struct device *dev, 380 struct device_attribute *attr, 381 const char *buf, size_t len) 382 { 383 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 384 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 385 struct cxl_region *cxlr = to_cxl_region(dev); 386 struct cxl_region_params *p = &cxlr->params; 387 unsigned int val, save; 388 int rc; 389 u8 iw; 390 391 rc = kstrtouint(buf, 0, &val); 392 if (rc) 393 return rc; 394 395 rc = ways_to_eiw(val, &iw); 396 if (rc) 397 return rc; 398 399 /* 400 * Even for x3, x9, and x12 interleaves the region interleave must be a 401 * power of 2 multiple of the host bridge interleave. 402 */ 403 if (!is_power_of_2(val / cxld->interleave_ways) || 404 (val % cxld->interleave_ways)) { 405 dev_dbg(&cxlr->dev, "invalid interleave: %d\n", val); 406 return -EINVAL; 407 } 408 409 rc = down_write_killable(&cxl_region_rwsem); 410 if (rc) 411 return rc; 412 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { 413 rc = -EBUSY; 414 goto out; 415 } 416 417 save = p->interleave_ways; 418 p->interleave_ways = val; 419 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); 420 if (rc) 421 p->interleave_ways = save; 422 out: 423 up_write(&cxl_region_rwsem); 424 if (rc) 425 return rc; 426 return len; 427 } 428 static DEVICE_ATTR_RW(interleave_ways); 429 430 static ssize_t interleave_granularity_show(struct device *dev, 431 struct device_attribute *attr, 432 char *buf) 433 { 434 struct cxl_region *cxlr = to_cxl_region(dev); 435 struct cxl_region_params *p = &cxlr->params; 436 ssize_t rc; 437 438 rc = down_read_interruptible(&cxl_region_rwsem); 439 if (rc) 440 return rc; 441 rc = sysfs_emit(buf, "%d\n", p->interleave_granularity); 442 up_read(&cxl_region_rwsem); 443 444 return rc; 445 } 446 447 static ssize_t interleave_granularity_store(struct device *dev, 448 struct device_attribute *attr, 449 const char *buf, size_t len) 450 { 451 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 452 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 453 struct cxl_region *cxlr = to_cxl_region(dev); 454 struct cxl_region_params *p = &cxlr->params; 455 int rc, val; 456 u16 ig; 457 458 rc = kstrtoint(buf, 0, &val); 459 if (rc) 460 return rc; 461 462 rc = granularity_to_eig(val, &ig); 463 if (rc) 464 return rc; 465 466 /* 467 * When the host-bridge is interleaved, disallow region granularity != 468 * root granularity. Regions with a granularity less than the root 469 * interleave result in needing multiple endpoints to support a single 470 * slot in the interleave (possible to support in the future). Regions 471 * with a granularity greater than the root interleave result in invalid 472 * DPA translations (invalid to support). 473 */ 474 if (cxld->interleave_ways > 1 && val != cxld->interleave_granularity) 475 return -EINVAL; 476 477 rc = down_write_killable(&cxl_region_rwsem); 478 if (rc) 479 return rc; 480 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { 481 rc = -EBUSY; 482 goto out; 483 } 484 485 p->interleave_granularity = val; 486 out: 487 up_write(&cxl_region_rwsem); 488 if (rc) 489 return rc; 490 return len; 491 } 492 static DEVICE_ATTR_RW(interleave_granularity); 493 494 static ssize_t resource_show(struct device *dev, struct device_attribute *attr, 495 char *buf) 496 { 497 struct cxl_region *cxlr = to_cxl_region(dev); 498 struct cxl_region_params *p = &cxlr->params; 499 u64 resource = -1ULL; 500 ssize_t rc; 501 502 rc = down_read_interruptible(&cxl_region_rwsem); 503 if (rc) 504 return rc; 505 if (p->res) 506 resource = p->res->start; 507 rc = sysfs_emit(buf, "%#llx\n", resource); 508 up_read(&cxl_region_rwsem); 509 510 return rc; 511 } 512 static DEVICE_ATTR_RO(resource); 513 514 static ssize_t mode_show(struct device *dev, struct device_attribute *attr, 515 char *buf) 516 { 517 struct cxl_region *cxlr = to_cxl_region(dev); 518 519 return sysfs_emit(buf, "%s\n", cxl_decoder_mode_name(cxlr->mode)); 520 } 521 static DEVICE_ATTR_RO(mode); 522 523 static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size) 524 { 525 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 526 struct cxl_region_params *p = &cxlr->params; 527 struct resource *res; 528 u32 remainder = 0; 529 530 lockdep_assert_held_write(&cxl_region_rwsem); 531 532 /* Nothing to do... */ 533 if (p->res && resource_size(p->res) == size) 534 return 0; 535 536 /* To change size the old size must be freed first */ 537 if (p->res) 538 return -EBUSY; 539 540 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) 541 return -EBUSY; 542 543 /* ways, granularity and uuid (if PMEM) need to be set before HPA */ 544 if (!p->interleave_ways || !p->interleave_granularity || 545 (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid))) 546 return -ENXIO; 547 548 div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder); 549 if (remainder) 550 return -EINVAL; 551 552 res = alloc_free_mem_region(cxlrd->res, size, SZ_256M, 553 dev_name(&cxlr->dev)); 554 if (IS_ERR(res)) { 555 dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n", 556 PTR_ERR(res)); 557 return PTR_ERR(res); 558 } 559 560 p->res = res; 561 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; 562 563 return 0; 564 } 565 566 static void cxl_region_iomem_release(struct cxl_region *cxlr) 567 { 568 struct cxl_region_params *p = &cxlr->params; 569 570 if (device_is_registered(&cxlr->dev)) 571 lockdep_assert_held_write(&cxl_region_rwsem); 572 if (p->res) { 573 /* 574 * Autodiscovered regions may not have been able to insert their 575 * resource. 576 */ 577 if (p->res->parent) 578 remove_resource(p->res); 579 kfree(p->res); 580 p->res = NULL; 581 } 582 } 583 584 static int free_hpa(struct cxl_region *cxlr) 585 { 586 struct cxl_region_params *p = &cxlr->params; 587 588 lockdep_assert_held_write(&cxl_region_rwsem); 589 590 if (!p->res) 591 return 0; 592 593 if (p->state >= CXL_CONFIG_ACTIVE) 594 return -EBUSY; 595 596 cxl_region_iomem_release(cxlr); 597 p->state = CXL_CONFIG_IDLE; 598 return 0; 599 } 600 601 static ssize_t size_store(struct device *dev, struct device_attribute *attr, 602 const char *buf, size_t len) 603 { 604 struct cxl_region *cxlr = to_cxl_region(dev); 605 u64 val; 606 int rc; 607 608 rc = kstrtou64(buf, 0, &val); 609 if (rc) 610 return rc; 611 612 rc = down_write_killable(&cxl_region_rwsem); 613 if (rc) 614 return rc; 615 616 if (val) 617 rc = alloc_hpa(cxlr, val); 618 else 619 rc = free_hpa(cxlr); 620 up_write(&cxl_region_rwsem); 621 622 if (rc) 623 return rc; 624 625 return len; 626 } 627 628 static ssize_t size_show(struct device *dev, struct device_attribute *attr, 629 char *buf) 630 { 631 struct cxl_region *cxlr = to_cxl_region(dev); 632 struct cxl_region_params *p = &cxlr->params; 633 u64 size = 0; 634 ssize_t rc; 635 636 rc = down_read_interruptible(&cxl_region_rwsem); 637 if (rc) 638 return rc; 639 if (p->res) 640 size = resource_size(p->res); 641 rc = sysfs_emit(buf, "%#llx\n", size); 642 up_read(&cxl_region_rwsem); 643 644 return rc; 645 } 646 static DEVICE_ATTR_RW(size); 647 648 static struct attribute *cxl_region_attrs[] = { 649 &dev_attr_uuid.attr, 650 &dev_attr_commit.attr, 651 &dev_attr_interleave_ways.attr, 652 &dev_attr_interleave_granularity.attr, 653 &dev_attr_resource.attr, 654 &dev_attr_size.attr, 655 &dev_attr_mode.attr, 656 NULL, 657 }; 658 659 static const struct attribute_group cxl_region_group = { 660 .attrs = cxl_region_attrs, 661 .is_visible = cxl_region_visible, 662 }; 663 664 static size_t show_targetN(struct cxl_region *cxlr, char *buf, int pos) 665 { 666 struct cxl_region_params *p = &cxlr->params; 667 struct cxl_endpoint_decoder *cxled; 668 int rc; 669 670 rc = down_read_interruptible(&cxl_region_rwsem); 671 if (rc) 672 return rc; 673 674 if (pos >= p->interleave_ways) { 675 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, 676 p->interleave_ways); 677 rc = -ENXIO; 678 goto out; 679 } 680 681 cxled = p->targets[pos]; 682 if (!cxled) 683 rc = sysfs_emit(buf, "\n"); 684 else 685 rc = sysfs_emit(buf, "%s\n", dev_name(&cxled->cxld.dev)); 686 out: 687 up_read(&cxl_region_rwsem); 688 689 return rc; 690 } 691 692 static int match_free_decoder(struct device *dev, void *data) 693 { 694 struct cxl_decoder *cxld; 695 int *id = data; 696 697 if (!is_switch_decoder(dev)) 698 return 0; 699 700 cxld = to_cxl_decoder(dev); 701 702 /* enforce ordered allocation */ 703 if (cxld->id != *id) 704 return 0; 705 706 if (!cxld->region) 707 return 1; 708 709 (*id)++; 710 711 return 0; 712 } 713 714 static int match_auto_decoder(struct device *dev, void *data) 715 { 716 struct cxl_region_params *p = data; 717 struct cxl_decoder *cxld; 718 struct range *r; 719 720 if (!is_switch_decoder(dev)) 721 return 0; 722 723 cxld = to_cxl_decoder(dev); 724 r = &cxld->hpa_range; 725 726 if (p->res && p->res->start == r->start && p->res->end == r->end) 727 return 1; 728 729 return 0; 730 } 731 732 static struct cxl_decoder *cxl_region_find_decoder(struct cxl_port *port, 733 struct cxl_region *cxlr) 734 { 735 struct device *dev; 736 int id = 0; 737 738 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) 739 dev = device_find_child(&port->dev, &cxlr->params, 740 match_auto_decoder); 741 else 742 dev = device_find_child(&port->dev, &id, match_free_decoder); 743 if (!dev) 744 return NULL; 745 /* 746 * This decoder is pinned registered as long as the endpoint decoder is 747 * registered, and endpoint decoder unregistration holds the 748 * cxl_region_rwsem over unregister events, so no need to hold on to 749 * this extra reference. 750 */ 751 put_device(dev); 752 return to_cxl_decoder(dev); 753 } 754 755 static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port, 756 struct cxl_region *cxlr) 757 { 758 struct cxl_region_params *p = &cxlr->params; 759 struct cxl_region_ref *cxl_rr, *iter; 760 unsigned long index; 761 int rc; 762 763 xa_for_each(&port->regions, index, iter) { 764 struct cxl_region_params *ip = &iter->region->params; 765 766 if (!ip->res) 767 continue; 768 769 if (ip->res->start > p->res->start) { 770 dev_dbg(&cxlr->dev, 771 "%s: HPA order violation %s:%pr vs %pr\n", 772 dev_name(&port->dev), 773 dev_name(&iter->region->dev), ip->res, p->res); 774 return ERR_PTR(-EBUSY); 775 } 776 } 777 778 cxl_rr = kzalloc(sizeof(*cxl_rr), GFP_KERNEL); 779 if (!cxl_rr) 780 return ERR_PTR(-ENOMEM); 781 cxl_rr->port = port; 782 cxl_rr->region = cxlr; 783 cxl_rr->nr_targets = 1; 784 xa_init(&cxl_rr->endpoints); 785 786 rc = xa_insert(&port->regions, (unsigned long)cxlr, cxl_rr, GFP_KERNEL); 787 if (rc) { 788 dev_dbg(&cxlr->dev, 789 "%s: failed to track region reference: %d\n", 790 dev_name(&port->dev), rc); 791 kfree(cxl_rr); 792 return ERR_PTR(rc); 793 } 794 795 return cxl_rr; 796 } 797 798 static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr) 799 { 800 struct cxl_region *cxlr = cxl_rr->region; 801 struct cxl_decoder *cxld = cxl_rr->decoder; 802 803 if (!cxld) 804 return; 805 806 dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n"); 807 if (cxld->region == cxlr) { 808 cxld->region = NULL; 809 put_device(&cxlr->dev); 810 } 811 } 812 813 static void free_region_ref(struct cxl_region_ref *cxl_rr) 814 { 815 struct cxl_port *port = cxl_rr->port; 816 struct cxl_region *cxlr = cxl_rr->region; 817 818 cxl_rr_free_decoder(cxl_rr); 819 xa_erase(&port->regions, (unsigned long)cxlr); 820 xa_destroy(&cxl_rr->endpoints); 821 kfree(cxl_rr); 822 } 823 824 static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr, 825 struct cxl_endpoint_decoder *cxled) 826 { 827 int rc; 828 struct cxl_port *port = cxl_rr->port; 829 struct cxl_region *cxlr = cxl_rr->region; 830 struct cxl_decoder *cxld = cxl_rr->decoder; 831 struct cxl_ep *ep = cxl_ep_load(port, cxled_to_memdev(cxled)); 832 833 if (ep) { 834 rc = xa_insert(&cxl_rr->endpoints, (unsigned long)cxled, ep, 835 GFP_KERNEL); 836 if (rc) 837 return rc; 838 } 839 cxl_rr->nr_eps++; 840 841 if (!cxld->region) { 842 cxld->region = cxlr; 843 get_device(&cxlr->dev); 844 } 845 846 return 0; 847 } 848 849 static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr, 850 struct cxl_endpoint_decoder *cxled, 851 struct cxl_region_ref *cxl_rr) 852 { 853 struct cxl_decoder *cxld; 854 855 if (port == cxled_to_port(cxled)) 856 cxld = &cxled->cxld; 857 else 858 cxld = cxl_region_find_decoder(port, cxlr); 859 if (!cxld) { 860 dev_dbg(&cxlr->dev, "%s: no decoder available\n", 861 dev_name(&port->dev)); 862 return -EBUSY; 863 } 864 865 if (cxld->region) { 866 dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", 867 dev_name(&port->dev), dev_name(&cxld->dev), 868 dev_name(&cxld->region->dev)); 869 return -EBUSY; 870 } 871 872 /* 873 * Endpoints should already match the region type, but backstop that 874 * assumption with an assertion. Switch-decoders change mapping-type 875 * based on what is mapped when they are assigned to a region. 876 */ 877 dev_WARN_ONCE(&cxlr->dev, 878 port == cxled_to_port(cxled) && 879 cxld->target_type != cxlr->type, 880 "%s:%s mismatch decoder type %d -> %d\n", 881 dev_name(&cxled_to_memdev(cxled)->dev), 882 dev_name(&cxld->dev), cxld->target_type, cxlr->type); 883 cxld->target_type = cxlr->type; 884 cxl_rr->decoder = cxld; 885 return 0; 886 } 887 888 /** 889 * cxl_port_attach_region() - track a region's interest in a port by endpoint 890 * @port: port to add a new region reference 'struct cxl_region_ref' 891 * @cxlr: region to attach to @port 892 * @cxled: endpoint decoder used to create or further pin a region reference 893 * @pos: interleave position of @cxled in @cxlr 894 * 895 * The attach event is an opportunity to validate CXL decode setup 896 * constraints and record metadata needed for programming HDM decoders, 897 * in particular decoder target lists. 898 * 899 * The steps are: 900 * 901 * - validate that there are no other regions with a higher HPA already 902 * associated with @port 903 * - establish a region reference if one is not already present 904 * 905 * - additionally allocate a decoder instance that will host @cxlr on 906 * @port 907 * 908 * - pin the region reference by the endpoint 909 * - account for how many entries in @port's target list are needed to 910 * cover all of the added endpoints. 911 */ 912 static int cxl_port_attach_region(struct cxl_port *port, 913 struct cxl_region *cxlr, 914 struct cxl_endpoint_decoder *cxled, int pos) 915 { 916 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 917 struct cxl_ep *ep = cxl_ep_load(port, cxlmd); 918 struct cxl_region_ref *cxl_rr; 919 bool nr_targets_inc = false; 920 struct cxl_decoder *cxld; 921 unsigned long index; 922 int rc = -EBUSY; 923 924 lockdep_assert_held_write(&cxl_region_rwsem); 925 926 cxl_rr = cxl_rr_load(port, cxlr); 927 if (cxl_rr) { 928 struct cxl_ep *ep_iter; 929 int found = 0; 930 931 /* 932 * Walk the existing endpoints that have been attached to 933 * @cxlr at @port and see if they share the same 'next' port 934 * in the downstream direction. I.e. endpoints that share common 935 * upstream switch. 936 */ 937 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { 938 if (ep_iter == ep) 939 continue; 940 if (ep_iter->next == ep->next) { 941 found++; 942 break; 943 } 944 } 945 946 /* 947 * New target port, or @port is an endpoint port that always 948 * accounts its own local decode as a target. 949 */ 950 if (!found || !ep->next) { 951 cxl_rr->nr_targets++; 952 nr_targets_inc = true; 953 } 954 } else { 955 cxl_rr = alloc_region_ref(port, cxlr); 956 if (IS_ERR(cxl_rr)) { 957 dev_dbg(&cxlr->dev, 958 "%s: failed to allocate region reference\n", 959 dev_name(&port->dev)); 960 return PTR_ERR(cxl_rr); 961 } 962 nr_targets_inc = true; 963 964 rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr); 965 if (rc) 966 goto out_erase; 967 } 968 cxld = cxl_rr->decoder; 969 970 rc = cxl_rr_ep_add(cxl_rr, cxled); 971 if (rc) { 972 dev_dbg(&cxlr->dev, 973 "%s: failed to track endpoint %s:%s reference\n", 974 dev_name(&port->dev), dev_name(&cxlmd->dev), 975 dev_name(&cxld->dev)); 976 goto out_erase; 977 } 978 979 dev_dbg(&cxlr->dev, 980 "%s:%s %s add: %s:%s @ %d next: %s nr_eps: %d nr_targets: %d\n", 981 dev_name(port->uport_dev), dev_name(&port->dev), 982 dev_name(&cxld->dev), dev_name(&cxlmd->dev), 983 dev_name(&cxled->cxld.dev), pos, 984 ep ? ep->next ? dev_name(ep->next->uport_dev) : 985 dev_name(&cxlmd->dev) : 986 "none", 987 cxl_rr->nr_eps, cxl_rr->nr_targets); 988 989 return 0; 990 out_erase: 991 if (nr_targets_inc) 992 cxl_rr->nr_targets--; 993 if (cxl_rr->nr_eps == 0) 994 free_region_ref(cxl_rr); 995 return rc; 996 } 997 998 static void cxl_port_detach_region(struct cxl_port *port, 999 struct cxl_region *cxlr, 1000 struct cxl_endpoint_decoder *cxled) 1001 { 1002 struct cxl_region_ref *cxl_rr; 1003 struct cxl_ep *ep = NULL; 1004 1005 lockdep_assert_held_write(&cxl_region_rwsem); 1006 1007 cxl_rr = cxl_rr_load(port, cxlr); 1008 if (!cxl_rr) 1009 return; 1010 1011 /* 1012 * Endpoint ports do not carry cxl_ep references, and they 1013 * never target more than one endpoint by definition 1014 */ 1015 if (cxl_rr->decoder == &cxled->cxld) 1016 cxl_rr->nr_eps--; 1017 else 1018 ep = xa_erase(&cxl_rr->endpoints, (unsigned long)cxled); 1019 if (ep) { 1020 struct cxl_ep *ep_iter; 1021 unsigned long index; 1022 int found = 0; 1023 1024 cxl_rr->nr_eps--; 1025 xa_for_each(&cxl_rr->endpoints, index, ep_iter) { 1026 if (ep_iter->next == ep->next) { 1027 found++; 1028 break; 1029 } 1030 } 1031 if (!found) 1032 cxl_rr->nr_targets--; 1033 } 1034 1035 if (cxl_rr->nr_eps == 0) 1036 free_region_ref(cxl_rr); 1037 } 1038 1039 static int check_last_peer(struct cxl_endpoint_decoder *cxled, 1040 struct cxl_ep *ep, struct cxl_region_ref *cxl_rr, 1041 int distance) 1042 { 1043 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1044 struct cxl_region *cxlr = cxl_rr->region; 1045 struct cxl_region_params *p = &cxlr->params; 1046 struct cxl_endpoint_decoder *cxled_peer; 1047 struct cxl_port *port = cxl_rr->port; 1048 struct cxl_memdev *cxlmd_peer; 1049 struct cxl_ep *ep_peer; 1050 int pos = cxled->pos; 1051 1052 /* 1053 * If this position wants to share a dport with the last endpoint mapped 1054 * then that endpoint, at index 'position - distance', must also be 1055 * mapped by this dport. 1056 */ 1057 if (pos < distance) { 1058 dev_dbg(&cxlr->dev, "%s:%s: cannot host %s:%s at %d\n", 1059 dev_name(port->uport_dev), dev_name(&port->dev), 1060 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); 1061 return -ENXIO; 1062 } 1063 cxled_peer = p->targets[pos - distance]; 1064 cxlmd_peer = cxled_to_memdev(cxled_peer); 1065 ep_peer = cxl_ep_load(port, cxlmd_peer); 1066 if (ep->dport != ep_peer->dport) { 1067 dev_dbg(&cxlr->dev, 1068 "%s:%s: %s:%s pos %d mismatched peer %s:%s\n", 1069 dev_name(port->uport_dev), dev_name(&port->dev), 1070 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos, 1071 dev_name(&cxlmd_peer->dev), 1072 dev_name(&cxled_peer->cxld.dev)); 1073 return -ENXIO; 1074 } 1075 1076 return 0; 1077 } 1078 1079 static int cxl_port_setup_targets(struct cxl_port *port, 1080 struct cxl_region *cxlr, 1081 struct cxl_endpoint_decoder *cxled) 1082 { 1083 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 1084 int parent_iw, parent_ig, ig, iw, rc, inc = 0, pos = cxled->pos; 1085 struct cxl_port *parent_port = to_cxl_port(port->dev.parent); 1086 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr); 1087 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1088 struct cxl_ep *ep = cxl_ep_load(port, cxlmd); 1089 struct cxl_region_params *p = &cxlr->params; 1090 struct cxl_decoder *cxld = cxl_rr->decoder; 1091 struct cxl_switch_decoder *cxlsd; 1092 u16 eig, peig; 1093 u8 eiw, peiw; 1094 1095 /* 1096 * While root level decoders support x3, x6, x12, switch level 1097 * decoders only support powers of 2 up to x16. 1098 */ 1099 if (!is_power_of_2(cxl_rr->nr_targets)) { 1100 dev_dbg(&cxlr->dev, "%s:%s: invalid target count %d\n", 1101 dev_name(port->uport_dev), dev_name(&port->dev), 1102 cxl_rr->nr_targets); 1103 return -EINVAL; 1104 } 1105 1106 cxlsd = to_cxl_switch_decoder(&cxld->dev); 1107 if (cxl_rr->nr_targets_set) { 1108 int i, distance; 1109 1110 /* 1111 * Passthrough decoders impose no distance requirements between 1112 * peers 1113 */ 1114 if (cxl_rr->nr_targets == 1) 1115 distance = 0; 1116 else 1117 distance = p->nr_targets / cxl_rr->nr_targets; 1118 for (i = 0; i < cxl_rr->nr_targets_set; i++) 1119 if (ep->dport == cxlsd->target[i]) { 1120 rc = check_last_peer(cxled, ep, cxl_rr, 1121 distance); 1122 if (rc) 1123 return rc; 1124 goto out_target_set; 1125 } 1126 goto add_target; 1127 } 1128 1129 if (is_cxl_root(parent_port)) { 1130 parent_ig = cxlrd->cxlsd.cxld.interleave_granularity; 1131 parent_iw = cxlrd->cxlsd.cxld.interleave_ways; 1132 /* 1133 * For purposes of address bit routing, use power-of-2 math for 1134 * switch ports. 1135 */ 1136 if (!is_power_of_2(parent_iw)) 1137 parent_iw /= 3; 1138 } else { 1139 struct cxl_region_ref *parent_rr; 1140 struct cxl_decoder *parent_cxld; 1141 1142 parent_rr = cxl_rr_load(parent_port, cxlr); 1143 parent_cxld = parent_rr->decoder; 1144 parent_ig = parent_cxld->interleave_granularity; 1145 parent_iw = parent_cxld->interleave_ways; 1146 } 1147 1148 rc = granularity_to_eig(parent_ig, &peig); 1149 if (rc) { 1150 dev_dbg(&cxlr->dev, "%s:%s: invalid parent granularity: %d\n", 1151 dev_name(parent_port->uport_dev), 1152 dev_name(&parent_port->dev), parent_ig); 1153 return rc; 1154 } 1155 1156 rc = ways_to_eiw(parent_iw, &peiw); 1157 if (rc) { 1158 dev_dbg(&cxlr->dev, "%s:%s: invalid parent interleave: %d\n", 1159 dev_name(parent_port->uport_dev), 1160 dev_name(&parent_port->dev), parent_iw); 1161 return rc; 1162 } 1163 1164 iw = cxl_rr->nr_targets; 1165 rc = ways_to_eiw(iw, &eiw); 1166 if (rc) { 1167 dev_dbg(&cxlr->dev, "%s:%s: invalid port interleave: %d\n", 1168 dev_name(port->uport_dev), dev_name(&port->dev), iw); 1169 return rc; 1170 } 1171 1172 /* 1173 * Interleave granularity is a multiple of @parent_port granularity. 1174 * Multiplier is the parent port interleave ways. 1175 */ 1176 rc = granularity_to_eig(parent_ig * parent_iw, &eig); 1177 if (rc) { 1178 dev_dbg(&cxlr->dev, 1179 "%s: invalid granularity calculation (%d * %d)\n", 1180 dev_name(&parent_port->dev), parent_ig, parent_iw); 1181 return rc; 1182 } 1183 1184 rc = eig_to_granularity(eig, &ig); 1185 if (rc) { 1186 dev_dbg(&cxlr->dev, "%s:%s: invalid interleave: %d\n", 1187 dev_name(port->uport_dev), dev_name(&port->dev), 1188 256 << eig); 1189 return rc; 1190 } 1191 1192 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { 1193 if (cxld->interleave_ways != iw || 1194 cxld->interleave_granularity != ig || 1195 cxld->hpa_range.start != p->res->start || 1196 cxld->hpa_range.end != p->res->end || 1197 ((cxld->flags & CXL_DECODER_F_ENABLE) == 0)) { 1198 dev_err(&cxlr->dev, 1199 "%s:%s %s expected iw: %d ig: %d %pr\n", 1200 dev_name(port->uport_dev), dev_name(&port->dev), 1201 __func__, iw, ig, p->res); 1202 dev_err(&cxlr->dev, 1203 "%s:%s %s got iw: %d ig: %d state: %s %#llx:%#llx\n", 1204 dev_name(port->uport_dev), dev_name(&port->dev), 1205 __func__, cxld->interleave_ways, 1206 cxld->interleave_granularity, 1207 (cxld->flags & CXL_DECODER_F_ENABLE) ? 1208 "enabled" : 1209 "disabled", 1210 cxld->hpa_range.start, cxld->hpa_range.end); 1211 return -ENXIO; 1212 } 1213 } else { 1214 cxld->interleave_ways = iw; 1215 cxld->interleave_granularity = ig; 1216 cxld->hpa_range = (struct range) { 1217 .start = p->res->start, 1218 .end = p->res->end, 1219 }; 1220 } 1221 dev_dbg(&cxlr->dev, "%s:%s iw: %d ig: %d\n", dev_name(port->uport_dev), 1222 dev_name(&port->dev), iw, ig); 1223 add_target: 1224 if (cxl_rr->nr_targets_set == cxl_rr->nr_targets) { 1225 dev_dbg(&cxlr->dev, 1226 "%s:%s: targets full trying to add %s:%s at %d\n", 1227 dev_name(port->uport_dev), dev_name(&port->dev), 1228 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); 1229 return -ENXIO; 1230 } 1231 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { 1232 if (cxlsd->target[cxl_rr->nr_targets_set] != ep->dport) { 1233 dev_dbg(&cxlr->dev, "%s:%s: %s expected %s at %d\n", 1234 dev_name(port->uport_dev), dev_name(&port->dev), 1235 dev_name(&cxlsd->cxld.dev), 1236 dev_name(ep->dport->dport_dev), 1237 cxl_rr->nr_targets_set); 1238 return -ENXIO; 1239 } 1240 } else 1241 cxlsd->target[cxl_rr->nr_targets_set] = ep->dport; 1242 inc = 1; 1243 out_target_set: 1244 cxl_rr->nr_targets_set += inc; 1245 dev_dbg(&cxlr->dev, "%s:%s target[%d] = %s for %s:%s @ %d\n", 1246 dev_name(port->uport_dev), dev_name(&port->dev), 1247 cxl_rr->nr_targets_set - 1, dev_name(ep->dport->dport_dev), 1248 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), pos); 1249 1250 return 0; 1251 } 1252 1253 static void cxl_port_reset_targets(struct cxl_port *port, 1254 struct cxl_region *cxlr) 1255 { 1256 struct cxl_region_ref *cxl_rr = cxl_rr_load(port, cxlr); 1257 struct cxl_decoder *cxld; 1258 1259 /* 1260 * After the last endpoint has been detached the entire cxl_rr may now 1261 * be gone. 1262 */ 1263 if (!cxl_rr) 1264 return; 1265 cxl_rr->nr_targets_set = 0; 1266 1267 cxld = cxl_rr->decoder; 1268 cxld->hpa_range = (struct range) { 1269 .start = 0, 1270 .end = -1, 1271 }; 1272 } 1273 1274 static void cxl_region_teardown_targets(struct cxl_region *cxlr) 1275 { 1276 struct cxl_region_params *p = &cxlr->params; 1277 struct cxl_endpoint_decoder *cxled; 1278 struct cxl_dev_state *cxlds; 1279 struct cxl_memdev *cxlmd; 1280 struct cxl_port *iter; 1281 struct cxl_ep *ep; 1282 int i; 1283 1284 /* 1285 * In the auto-discovery case skip automatic teardown since the 1286 * address space is already active 1287 */ 1288 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) 1289 return; 1290 1291 for (i = 0; i < p->nr_targets; i++) { 1292 cxled = p->targets[i]; 1293 cxlmd = cxled_to_memdev(cxled); 1294 cxlds = cxlmd->cxlds; 1295 1296 if (cxlds->rcd) 1297 continue; 1298 1299 iter = cxled_to_port(cxled); 1300 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) 1301 iter = to_cxl_port(iter->dev.parent); 1302 1303 for (ep = cxl_ep_load(iter, cxlmd); iter; 1304 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) 1305 cxl_port_reset_targets(iter, cxlr); 1306 } 1307 } 1308 1309 static int cxl_region_setup_targets(struct cxl_region *cxlr) 1310 { 1311 struct cxl_region_params *p = &cxlr->params; 1312 struct cxl_endpoint_decoder *cxled; 1313 struct cxl_dev_state *cxlds; 1314 int i, rc, rch = 0, vh = 0; 1315 struct cxl_memdev *cxlmd; 1316 struct cxl_port *iter; 1317 struct cxl_ep *ep; 1318 1319 for (i = 0; i < p->nr_targets; i++) { 1320 cxled = p->targets[i]; 1321 cxlmd = cxled_to_memdev(cxled); 1322 cxlds = cxlmd->cxlds; 1323 1324 /* validate that all targets agree on topology */ 1325 if (!cxlds->rcd) { 1326 vh++; 1327 } else { 1328 rch++; 1329 continue; 1330 } 1331 1332 iter = cxled_to_port(cxled); 1333 while (!is_cxl_root(to_cxl_port(iter->dev.parent))) 1334 iter = to_cxl_port(iter->dev.parent); 1335 1336 /* 1337 * Descend the topology tree programming / validating 1338 * targets while looking for conflicts. 1339 */ 1340 for (ep = cxl_ep_load(iter, cxlmd); iter; 1341 iter = ep->next, ep = cxl_ep_load(iter, cxlmd)) { 1342 rc = cxl_port_setup_targets(iter, cxlr, cxled); 1343 if (rc) { 1344 cxl_region_teardown_targets(cxlr); 1345 return rc; 1346 } 1347 } 1348 } 1349 1350 if (rch && vh) { 1351 dev_err(&cxlr->dev, "mismatched CXL topologies detected\n"); 1352 cxl_region_teardown_targets(cxlr); 1353 return -ENXIO; 1354 } 1355 1356 return 0; 1357 } 1358 1359 static int cxl_region_validate_position(struct cxl_region *cxlr, 1360 struct cxl_endpoint_decoder *cxled, 1361 int pos) 1362 { 1363 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1364 struct cxl_region_params *p = &cxlr->params; 1365 int i; 1366 1367 if (pos < 0 || pos >= p->interleave_ways) { 1368 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, 1369 p->interleave_ways); 1370 return -ENXIO; 1371 } 1372 1373 if (p->targets[pos] == cxled) 1374 return 0; 1375 1376 if (p->targets[pos]) { 1377 struct cxl_endpoint_decoder *cxled_target = p->targets[pos]; 1378 struct cxl_memdev *cxlmd_target = cxled_to_memdev(cxled_target); 1379 1380 dev_dbg(&cxlr->dev, "position %d already assigned to %s:%s\n", 1381 pos, dev_name(&cxlmd_target->dev), 1382 dev_name(&cxled_target->cxld.dev)); 1383 return -EBUSY; 1384 } 1385 1386 for (i = 0; i < p->interleave_ways; i++) { 1387 struct cxl_endpoint_decoder *cxled_target; 1388 struct cxl_memdev *cxlmd_target; 1389 1390 cxled_target = p->targets[i]; 1391 if (!cxled_target) 1392 continue; 1393 1394 cxlmd_target = cxled_to_memdev(cxled_target); 1395 if (cxlmd_target == cxlmd) { 1396 dev_dbg(&cxlr->dev, 1397 "%s already specified at position %d via: %s\n", 1398 dev_name(&cxlmd->dev), pos, 1399 dev_name(&cxled_target->cxld.dev)); 1400 return -EBUSY; 1401 } 1402 } 1403 1404 return 0; 1405 } 1406 1407 static int cxl_region_attach_position(struct cxl_region *cxlr, 1408 struct cxl_root_decoder *cxlrd, 1409 struct cxl_endpoint_decoder *cxled, 1410 const struct cxl_dport *dport, int pos) 1411 { 1412 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1413 struct cxl_port *iter; 1414 int rc; 1415 1416 if (cxlrd->calc_hb(cxlrd, pos) != dport) { 1417 dev_dbg(&cxlr->dev, "%s:%s invalid target position for %s\n", 1418 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1419 dev_name(&cxlrd->cxlsd.cxld.dev)); 1420 return -ENXIO; 1421 } 1422 1423 for (iter = cxled_to_port(cxled); !is_cxl_root(iter); 1424 iter = to_cxl_port(iter->dev.parent)) { 1425 rc = cxl_port_attach_region(iter, cxlr, cxled, pos); 1426 if (rc) 1427 goto err; 1428 } 1429 1430 return 0; 1431 1432 err: 1433 for (iter = cxled_to_port(cxled); !is_cxl_root(iter); 1434 iter = to_cxl_port(iter->dev.parent)) 1435 cxl_port_detach_region(iter, cxlr, cxled); 1436 return rc; 1437 } 1438 1439 static int cxl_region_attach_auto(struct cxl_region *cxlr, 1440 struct cxl_endpoint_decoder *cxled, int pos) 1441 { 1442 struct cxl_region_params *p = &cxlr->params; 1443 1444 if (cxled->state != CXL_DECODER_STATE_AUTO) { 1445 dev_err(&cxlr->dev, 1446 "%s: unable to add decoder to autodetected region\n", 1447 dev_name(&cxled->cxld.dev)); 1448 return -EINVAL; 1449 } 1450 1451 if (pos >= 0) { 1452 dev_dbg(&cxlr->dev, "%s: expected auto position, not %d\n", 1453 dev_name(&cxled->cxld.dev), pos); 1454 return -EINVAL; 1455 } 1456 1457 if (p->nr_targets >= p->interleave_ways) { 1458 dev_err(&cxlr->dev, "%s: no more target slots available\n", 1459 dev_name(&cxled->cxld.dev)); 1460 return -ENXIO; 1461 } 1462 1463 /* 1464 * Temporarily record the endpoint decoder into the target array. Yes, 1465 * this means that userspace can view devices in the wrong position 1466 * before the region activates, and must be careful to understand when 1467 * it might be racing region autodiscovery. 1468 */ 1469 pos = p->nr_targets; 1470 p->targets[pos] = cxled; 1471 cxled->pos = pos; 1472 p->nr_targets++; 1473 1474 return 0; 1475 } 1476 1477 static struct cxl_port *next_port(struct cxl_port *port) 1478 { 1479 if (!port->parent_dport) 1480 return NULL; 1481 return port->parent_dport->port; 1482 } 1483 1484 static int match_switch_decoder_by_range(struct device *dev, void *data) 1485 { 1486 struct cxl_switch_decoder *cxlsd; 1487 struct range *r1, *r2 = data; 1488 1489 if (!is_switch_decoder(dev)) 1490 return 0; 1491 1492 cxlsd = to_cxl_switch_decoder(dev); 1493 r1 = &cxlsd->cxld.hpa_range; 1494 1495 if (is_root_decoder(dev)) 1496 return range_contains(r1, r2); 1497 return (r1->start == r2->start && r1->end == r2->end); 1498 } 1499 1500 static int find_pos_and_ways(struct cxl_port *port, struct range *range, 1501 int *pos, int *ways) 1502 { 1503 struct cxl_switch_decoder *cxlsd; 1504 struct cxl_port *parent; 1505 struct device *dev; 1506 int rc = -ENXIO; 1507 1508 parent = next_port(port); 1509 if (!parent) 1510 return rc; 1511 1512 dev = device_find_child(&parent->dev, range, 1513 match_switch_decoder_by_range); 1514 if (!dev) { 1515 dev_err(port->uport_dev, 1516 "failed to find decoder mapping %#llx-%#llx\n", 1517 range->start, range->end); 1518 return rc; 1519 } 1520 cxlsd = to_cxl_switch_decoder(dev); 1521 *ways = cxlsd->cxld.interleave_ways; 1522 1523 for (int i = 0; i < *ways; i++) { 1524 if (cxlsd->target[i] == port->parent_dport) { 1525 *pos = i; 1526 rc = 0; 1527 break; 1528 } 1529 } 1530 put_device(dev); 1531 1532 return rc; 1533 } 1534 1535 /** 1536 * cxl_calc_interleave_pos() - calculate an endpoint position in a region 1537 * @cxled: endpoint decoder member of given region 1538 * 1539 * The endpoint position is calculated by traversing the topology from 1540 * the endpoint to the root decoder and iteratively applying this 1541 * calculation: 1542 * 1543 * position = position * parent_ways + parent_pos; 1544 * 1545 * ...where @position is inferred from switch and root decoder target lists. 1546 * 1547 * Return: position >= 0 on success 1548 * -ENXIO on failure 1549 */ 1550 static int cxl_calc_interleave_pos(struct cxl_endpoint_decoder *cxled) 1551 { 1552 struct cxl_port *iter, *port = cxled_to_port(cxled); 1553 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1554 struct range *range = &cxled->cxld.hpa_range; 1555 int parent_ways = 0, parent_pos = 0, pos = 0; 1556 int rc; 1557 1558 /* 1559 * Example: the expected interleave order of the 4-way region shown 1560 * below is: mem0, mem2, mem1, mem3 1561 * 1562 * root_port 1563 * / \ 1564 * host_bridge_0 host_bridge_1 1565 * | | | | 1566 * mem0 mem1 mem2 mem3 1567 * 1568 * In the example the calculator will iterate twice. The first iteration 1569 * uses the mem position in the host-bridge and the ways of the host- 1570 * bridge to generate the first, or local, position. The second 1571 * iteration uses the host-bridge position in the root_port and the ways 1572 * of the root_port to refine the position. 1573 * 1574 * A trace of the calculation per endpoint looks like this: 1575 * mem0: pos = 0 * 2 + 0 mem2: pos = 0 * 2 + 0 1576 * pos = 0 * 2 + 0 pos = 0 * 2 + 1 1577 * pos: 0 pos: 1 1578 * 1579 * mem1: pos = 0 * 2 + 1 mem3: pos = 0 * 2 + 1 1580 * pos = 1 * 2 + 0 pos = 1 * 2 + 1 1581 * pos: 2 pos = 3 1582 * 1583 * Note that while this example is simple, the method applies to more 1584 * complex topologies, including those with switches. 1585 */ 1586 1587 /* Iterate from endpoint to root_port refining the position */ 1588 for (iter = port; iter; iter = next_port(iter)) { 1589 if (is_cxl_root(iter)) 1590 break; 1591 1592 rc = find_pos_and_ways(iter, range, &parent_pos, &parent_ways); 1593 if (rc) 1594 return rc; 1595 1596 pos = pos * parent_ways + parent_pos; 1597 } 1598 1599 dev_dbg(&cxlmd->dev, 1600 "decoder:%s parent:%s port:%s range:%#llx-%#llx pos:%d\n", 1601 dev_name(&cxled->cxld.dev), dev_name(cxlmd->dev.parent), 1602 dev_name(&port->dev), range->start, range->end, pos); 1603 1604 return pos; 1605 } 1606 1607 static void find_positions(const struct cxl_switch_decoder *cxlsd, 1608 const struct cxl_port *iter_a, 1609 const struct cxl_port *iter_b, int *a_pos, 1610 int *b_pos) 1611 { 1612 int i; 1613 1614 for (i = 0, *a_pos = -1, *b_pos = -1; i < cxlsd->nr_targets; i++) { 1615 if (cxlsd->target[i] == iter_a->parent_dport) 1616 *a_pos = i; 1617 else if (cxlsd->target[i] == iter_b->parent_dport) 1618 *b_pos = i; 1619 if (*a_pos >= 0 && *b_pos >= 0) 1620 break; 1621 } 1622 } 1623 1624 static int cmp_decode_pos(const void *a, const void *b) 1625 { 1626 struct cxl_endpoint_decoder *cxled_a = *(typeof(cxled_a) *)a; 1627 struct cxl_endpoint_decoder *cxled_b = *(typeof(cxled_b) *)b; 1628 struct cxl_memdev *cxlmd_a = cxled_to_memdev(cxled_a); 1629 struct cxl_memdev *cxlmd_b = cxled_to_memdev(cxled_b); 1630 struct cxl_port *port_a = cxled_to_port(cxled_a); 1631 struct cxl_port *port_b = cxled_to_port(cxled_b); 1632 struct cxl_port *iter_a, *iter_b, *port = NULL; 1633 struct cxl_switch_decoder *cxlsd; 1634 struct device *dev; 1635 int a_pos, b_pos; 1636 unsigned int seq; 1637 1638 /* Exit early if any prior sorting failed */ 1639 if (cxled_a->pos < 0 || cxled_b->pos < 0) 1640 return 0; 1641 1642 /* 1643 * Walk up the hierarchy to find a shared port, find the decoder that 1644 * maps the range, compare the relative position of those dport 1645 * mappings. 1646 */ 1647 for (iter_a = port_a; iter_a; iter_a = next_port(iter_a)) { 1648 struct cxl_port *next_a, *next_b; 1649 1650 next_a = next_port(iter_a); 1651 if (!next_a) 1652 break; 1653 1654 for (iter_b = port_b; iter_b; iter_b = next_port(iter_b)) { 1655 next_b = next_port(iter_b); 1656 if (next_a != next_b) 1657 continue; 1658 port = next_a; 1659 break; 1660 } 1661 1662 if (port) 1663 break; 1664 } 1665 1666 if (!port) { 1667 dev_err(cxlmd_a->dev.parent, 1668 "failed to find shared port with %s\n", 1669 dev_name(cxlmd_b->dev.parent)); 1670 goto err; 1671 } 1672 1673 dev = device_find_child(&port->dev, &cxled_a->cxld.hpa_range, 1674 match_switch_decoder_by_range); 1675 if (!dev) { 1676 struct range *range = &cxled_a->cxld.hpa_range; 1677 1678 dev_err(port->uport_dev, 1679 "failed to find decoder that maps %#llx-%#llx\n", 1680 range->start, range->end); 1681 goto err; 1682 } 1683 1684 cxlsd = to_cxl_switch_decoder(dev); 1685 do { 1686 seq = read_seqbegin(&cxlsd->target_lock); 1687 find_positions(cxlsd, iter_a, iter_b, &a_pos, &b_pos); 1688 } while (read_seqretry(&cxlsd->target_lock, seq)); 1689 1690 put_device(dev); 1691 1692 if (a_pos < 0 || b_pos < 0) { 1693 dev_err(port->uport_dev, 1694 "failed to find shared decoder for %s and %s\n", 1695 dev_name(cxlmd_a->dev.parent), 1696 dev_name(cxlmd_b->dev.parent)); 1697 goto err; 1698 } 1699 1700 dev_dbg(port->uport_dev, "%s comes %s %s\n", 1701 dev_name(cxlmd_a->dev.parent), 1702 a_pos - b_pos < 0 ? "before" : "after", 1703 dev_name(cxlmd_b->dev.parent)); 1704 1705 return a_pos - b_pos; 1706 err: 1707 cxled_a->pos = -1; 1708 return 0; 1709 } 1710 1711 static int cxl_region_sort_targets(struct cxl_region *cxlr) 1712 { 1713 struct cxl_region_params *p = &cxlr->params; 1714 int i, rc = 0; 1715 1716 sort(p->targets, p->nr_targets, sizeof(p->targets[0]), cmp_decode_pos, 1717 NULL); 1718 1719 for (i = 0; i < p->nr_targets; i++) { 1720 struct cxl_endpoint_decoder *cxled = p->targets[i]; 1721 1722 /* 1723 * Record that sorting failed, but still continue to restore 1724 * cxled->pos with its ->targets[] position so that follow-on 1725 * code paths can reliably do p->targets[cxled->pos] to 1726 * self-reference their entry. 1727 */ 1728 if (cxled->pos < 0) 1729 rc = -ENXIO; 1730 cxled->pos = i; 1731 } 1732 1733 dev_dbg(&cxlr->dev, "region sort %s\n", rc ? "failed" : "successful"); 1734 return rc; 1735 } 1736 1737 static int cxl_region_attach(struct cxl_region *cxlr, 1738 struct cxl_endpoint_decoder *cxled, int pos) 1739 { 1740 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent); 1741 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1742 struct cxl_region_params *p = &cxlr->params; 1743 struct cxl_port *ep_port, *root_port; 1744 struct cxl_dport *dport; 1745 int rc = -ENXIO; 1746 1747 if (cxled->mode != cxlr->mode) { 1748 dev_dbg(&cxlr->dev, "%s region mode: %d mismatch: %d\n", 1749 dev_name(&cxled->cxld.dev), cxlr->mode, cxled->mode); 1750 return -EINVAL; 1751 } 1752 1753 if (cxled->mode == CXL_DECODER_DEAD) { 1754 dev_dbg(&cxlr->dev, "%s dead\n", dev_name(&cxled->cxld.dev)); 1755 return -ENODEV; 1756 } 1757 1758 /* all full of members, or interleave config not established? */ 1759 if (p->state > CXL_CONFIG_INTERLEAVE_ACTIVE) { 1760 dev_dbg(&cxlr->dev, "region already active\n"); 1761 return -EBUSY; 1762 } else if (p->state < CXL_CONFIG_INTERLEAVE_ACTIVE) { 1763 dev_dbg(&cxlr->dev, "interleave config missing\n"); 1764 return -ENXIO; 1765 } 1766 1767 ep_port = cxled_to_port(cxled); 1768 root_port = cxlrd_to_port(cxlrd); 1769 dport = cxl_find_dport_by_dev(root_port, ep_port->host_bridge); 1770 if (!dport) { 1771 dev_dbg(&cxlr->dev, "%s:%s invalid target for %s\n", 1772 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1773 dev_name(cxlr->dev.parent)); 1774 return -ENXIO; 1775 } 1776 1777 if (cxled->cxld.target_type != cxlr->type) { 1778 dev_dbg(&cxlr->dev, "%s:%s type mismatch: %d vs %d\n", 1779 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1780 cxled->cxld.target_type, cxlr->type); 1781 return -ENXIO; 1782 } 1783 1784 if (!cxled->dpa_res) { 1785 dev_dbg(&cxlr->dev, "%s:%s: missing DPA allocation.\n", 1786 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev)); 1787 return -ENXIO; 1788 } 1789 1790 if (resource_size(cxled->dpa_res) * p->interleave_ways != 1791 resource_size(p->res)) { 1792 dev_dbg(&cxlr->dev, 1793 "%s:%s: decoder-size-%#llx * ways-%d != region-size-%#llx\n", 1794 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1795 (u64)resource_size(cxled->dpa_res), p->interleave_ways, 1796 (u64)resource_size(p->res)); 1797 return -EINVAL; 1798 } 1799 1800 if (test_bit(CXL_REGION_F_AUTO, &cxlr->flags)) { 1801 int i; 1802 1803 rc = cxl_region_attach_auto(cxlr, cxled, pos); 1804 if (rc) 1805 return rc; 1806 1807 /* await more targets to arrive... */ 1808 if (p->nr_targets < p->interleave_ways) 1809 return 0; 1810 1811 /* 1812 * All targets are here, which implies all PCI enumeration that 1813 * affects this region has been completed. Walk the topology to 1814 * sort the devices into their relative region decode position. 1815 */ 1816 rc = cxl_region_sort_targets(cxlr); 1817 if (rc) 1818 return rc; 1819 1820 for (i = 0; i < p->nr_targets; i++) { 1821 cxled = p->targets[i]; 1822 ep_port = cxled_to_port(cxled); 1823 dport = cxl_find_dport_by_dev(root_port, 1824 ep_port->host_bridge); 1825 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, 1826 dport, i); 1827 if (rc) 1828 return rc; 1829 } 1830 1831 rc = cxl_region_setup_targets(cxlr); 1832 if (rc) 1833 return rc; 1834 1835 /* 1836 * If target setup succeeds in the autodiscovery case 1837 * then the region is already committed. 1838 */ 1839 p->state = CXL_CONFIG_COMMIT; 1840 1841 return 0; 1842 } 1843 1844 rc = cxl_region_validate_position(cxlr, cxled, pos); 1845 if (rc) 1846 return rc; 1847 1848 rc = cxl_region_attach_position(cxlr, cxlrd, cxled, dport, pos); 1849 if (rc) 1850 return rc; 1851 1852 p->targets[pos] = cxled; 1853 cxled->pos = pos; 1854 p->nr_targets++; 1855 1856 if (p->nr_targets == p->interleave_ways) { 1857 rc = cxl_region_setup_targets(cxlr); 1858 if (rc) 1859 goto err_decrement; 1860 p->state = CXL_CONFIG_ACTIVE; 1861 } 1862 1863 cxled->cxld.interleave_ways = p->interleave_ways; 1864 cxled->cxld.interleave_granularity = p->interleave_granularity; 1865 cxled->cxld.hpa_range = (struct range) { 1866 .start = p->res->start, 1867 .end = p->res->end, 1868 }; 1869 1870 if (p->nr_targets != p->interleave_ways) 1871 return 0; 1872 1873 /* 1874 * Test the auto-discovery position calculator function 1875 * against this successfully created user-defined region. 1876 * A fail message here means that this interleave config 1877 * will fail when presented as CXL_REGION_F_AUTO. 1878 */ 1879 for (int i = 0; i < p->nr_targets; i++) { 1880 struct cxl_endpoint_decoder *cxled = p->targets[i]; 1881 int test_pos; 1882 1883 test_pos = cxl_calc_interleave_pos(cxled); 1884 dev_dbg(&cxled->cxld.dev, 1885 "Test cxl_calc_interleave_pos(): %s test_pos:%d cxled->pos:%d\n", 1886 (test_pos == cxled->pos) ? "success" : "fail", 1887 test_pos, cxled->pos); 1888 } 1889 1890 return 0; 1891 1892 err_decrement: 1893 p->nr_targets--; 1894 cxled->pos = -1; 1895 p->targets[pos] = NULL; 1896 return rc; 1897 } 1898 1899 static int cxl_region_detach(struct cxl_endpoint_decoder *cxled) 1900 { 1901 struct cxl_port *iter, *ep_port = cxled_to_port(cxled); 1902 struct cxl_region *cxlr = cxled->cxld.region; 1903 struct cxl_region_params *p; 1904 int rc = 0; 1905 1906 lockdep_assert_held_write(&cxl_region_rwsem); 1907 1908 if (!cxlr) 1909 return 0; 1910 1911 p = &cxlr->params; 1912 get_device(&cxlr->dev); 1913 1914 if (p->state > CXL_CONFIG_ACTIVE) { 1915 /* 1916 * TODO: tear down all impacted regions if a device is 1917 * removed out of order 1918 */ 1919 rc = cxl_region_decode_reset(cxlr, p->interleave_ways); 1920 if (rc) 1921 goto out; 1922 p->state = CXL_CONFIG_ACTIVE; 1923 } 1924 1925 for (iter = ep_port; !is_cxl_root(iter); 1926 iter = to_cxl_port(iter->dev.parent)) 1927 cxl_port_detach_region(iter, cxlr, cxled); 1928 1929 if (cxled->pos < 0 || cxled->pos >= p->interleave_ways || 1930 p->targets[cxled->pos] != cxled) { 1931 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 1932 1933 dev_WARN_ONCE(&cxlr->dev, 1, "expected %s:%s at position %d\n", 1934 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 1935 cxled->pos); 1936 goto out; 1937 } 1938 1939 if (p->state == CXL_CONFIG_ACTIVE) { 1940 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; 1941 cxl_region_teardown_targets(cxlr); 1942 } 1943 p->targets[cxled->pos] = NULL; 1944 p->nr_targets--; 1945 cxled->cxld.hpa_range = (struct range) { 1946 .start = 0, 1947 .end = -1, 1948 }; 1949 1950 /* notify the region driver that one of its targets has departed */ 1951 up_write(&cxl_region_rwsem); 1952 device_release_driver(&cxlr->dev); 1953 down_write(&cxl_region_rwsem); 1954 out: 1955 put_device(&cxlr->dev); 1956 return rc; 1957 } 1958 1959 void cxl_decoder_kill_region(struct cxl_endpoint_decoder *cxled) 1960 { 1961 down_write(&cxl_region_rwsem); 1962 cxled->mode = CXL_DECODER_DEAD; 1963 cxl_region_detach(cxled); 1964 up_write(&cxl_region_rwsem); 1965 } 1966 1967 static int attach_target(struct cxl_region *cxlr, 1968 struct cxl_endpoint_decoder *cxled, int pos, 1969 unsigned int state) 1970 { 1971 int rc = 0; 1972 1973 if (state == TASK_INTERRUPTIBLE) 1974 rc = down_write_killable(&cxl_region_rwsem); 1975 else 1976 down_write(&cxl_region_rwsem); 1977 if (rc) 1978 return rc; 1979 1980 down_read(&cxl_dpa_rwsem); 1981 rc = cxl_region_attach(cxlr, cxled, pos); 1982 up_read(&cxl_dpa_rwsem); 1983 up_write(&cxl_region_rwsem); 1984 return rc; 1985 } 1986 1987 static int detach_target(struct cxl_region *cxlr, int pos) 1988 { 1989 struct cxl_region_params *p = &cxlr->params; 1990 int rc; 1991 1992 rc = down_write_killable(&cxl_region_rwsem); 1993 if (rc) 1994 return rc; 1995 1996 if (pos >= p->interleave_ways) { 1997 dev_dbg(&cxlr->dev, "position %d out of range %d\n", pos, 1998 p->interleave_ways); 1999 rc = -ENXIO; 2000 goto out; 2001 } 2002 2003 if (!p->targets[pos]) { 2004 rc = 0; 2005 goto out; 2006 } 2007 2008 rc = cxl_region_detach(p->targets[pos]); 2009 out: 2010 up_write(&cxl_region_rwsem); 2011 return rc; 2012 } 2013 2014 static size_t store_targetN(struct cxl_region *cxlr, const char *buf, int pos, 2015 size_t len) 2016 { 2017 int rc; 2018 2019 if (sysfs_streq(buf, "\n")) 2020 rc = detach_target(cxlr, pos); 2021 else { 2022 struct device *dev; 2023 2024 dev = bus_find_device_by_name(&cxl_bus_type, NULL, buf); 2025 if (!dev) 2026 return -ENODEV; 2027 2028 if (!is_endpoint_decoder(dev)) { 2029 rc = -EINVAL; 2030 goto out; 2031 } 2032 2033 rc = attach_target(cxlr, to_cxl_endpoint_decoder(dev), pos, 2034 TASK_INTERRUPTIBLE); 2035 out: 2036 put_device(dev); 2037 } 2038 2039 if (rc < 0) 2040 return rc; 2041 return len; 2042 } 2043 2044 #define TARGET_ATTR_RW(n) \ 2045 static ssize_t target##n##_show( \ 2046 struct device *dev, struct device_attribute *attr, char *buf) \ 2047 { \ 2048 return show_targetN(to_cxl_region(dev), buf, (n)); \ 2049 } \ 2050 static ssize_t target##n##_store(struct device *dev, \ 2051 struct device_attribute *attr, \ 2052 const char *buf, size_t len) \ 2053 { \ 2054 return store_targetN(to_cxl_region(dev), buf, (n), len); \ 2055 } \ 2056 static DEVICE_ATTR_RW(target##n) 2057 2058 TARGET_ATTR_RW(0); 2059 TARGET_ATTR_RW(1); 2060 TARGET_ATTR_RW(2); 2061 TARGET_ATTR_RW(3); 2062 TARGET_ATTR_RW(4); 2063 TARGET_ATTR_RW(5); 2064 TARGET_ATTR_RW(6); 2065 TARGET_ATTR_RW(7); 2066 TARGET_ATTR_RW(8); 2067 TARGET_ATTR_RW(9); 2068 TARGET_ATTR_RW(10); 2069 TARGET_ATTR_RW(11); 2070 TARGET_ATTR_RW(12); 2071 TARGET_ATTR_RW(13); 2072 TARGET_ATTR_RW(14); 2073 TARGET_ATTR_RW(15); 2074 2075 static struct attribute *target_attrs[] = { 2076 &dev_attr_target0.attr, 2077 &dev_attr_target1.attr, 2078 &dev_attr_target2.attr, 2079 &dev_attr_target3.attr, 2080 &dev_attr_target4.attr, 2081 &dev_attr_target5.attr, 2082 &dev_attr_target6.attr, 2083 &dev_attr_target7.attr, 2084 &dev_attr_target8.attr, 2085 &dev_attr_target9.attr, 2086 &dev_attr_target10.attr, 2087 &dev_attr_target11.attr, 2088 &dev_attr_target12.attr, 2089 &dev_attr_target13.attr, 2090 &dev_attr_target14.attr, 2091 &dev_attr_target15.attr, 2092 NULL, 2093 }; 2094 2095 static umode_t cxl_region_target_visible(struct kobject *kobj, 2096 struct attribute *a, int n) 2097 { 2098 struct device *dev = kobj_to_dev(kobj); 2099 struct cxl_region *cxlr = to_cxl_region(dev); 2100 struct cxl_region_params *p = &cxlr->params; 2101 2102 if (n < p->interleave_ways) 2103 return a->mode; 2104 return 0; 2105 } 2106 2107 static const struct attribute_group cxl_region_target_group = { 2108 .attrs = target_attrs, 2109 .is_visible = cxl_region_target_visible, 2110 }; 2111 2112 static const struct attribute_group *get_cxl_region_target_group(void) 2113 { 2114 return &cxl_region_target_group; 2115 } 2116 2117 static const struct attribute_group *region_groups[] = { 2118 &cxl_base_attribute_group, 2119 &cxl_region_group, 2120 &cxl_region_target_group, 2121 NULL, 2122 }; 2123 2124 static void cxl_region_release(struct device *dev) 2125 { 2126 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); 2127 struct cxl_region *cxlr = to_cxl_region(dev); 2128 int id = atomic_read(&cxlrd->region_id); 2129 2130 /* 2131 * Try to reuse the recently idled id rather than the cached 2132 * next id to prevent the region id space from increasing 2133 * unnecessarily. 2134 */ 2135 if (cxlr->id < id) 2136 if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) { 2137 memregion_free(id); 2138 goto out; 2139 } 2140 2141 memregion_free(cxlr->id); 2142 out: 2143 put_device(dev->parent); 2144 kfree(cxlr); 2145 } 2146 2147 const struct device_type cxl_region_type = { 2148 .name = "cxl_region", 2149 .release = cxl_region_release, 2150 .groups = region_groups 2151 }; 2152 2153 bool is_cxl_region(struct device *dev) 2154 { 2155 return dev->type == &cxl_region_type; 2156 } 2157 EXPORT_SYMBOL_NS_GPL(is_cxl_region, CXL); 2158 2159 static struct cxl_region *to_cxl_region(struct device *dev) 2160 { 2161 if (dev_WARN_ONCE(dev, dev->type != &cxl_region_type, 2162 "not a cxl_region device\n")) 2163 return NULL; 2164 2165 return container_of(dev, struct cxl_region, dev); 2166 } 2167 2168 static void unregister_region(void *dev) 2169 { 2170 struct cxl_region *cxlr = to_cxl_region(dev); 2171 struct cxl_region_params *p = &cxlr->params; 2172 int i; 2173 2174 device_del(dev); 2175 2176 /* 2177 * Now that region sysfs is shutdown, the parameter block is now 2178 * read-only, so no need to hold the region rwsem to access the 2179 * region parameters. 2180 */ 2181 for (i = 0; i < p->interleave_ways; i++) 2182 detach_target(cxlr, i); 2183 2184 cxl_region_iomem_release(cxlr); 2185 put_device(dev); 2186 } 2187 2188 static struct lock_class_key cxl_region_key; 2189 2190 static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int id) 2191 { 2192 struct cxl_region *cxlr; 2193 struct device *dev; 2194 2195 cxlr = kzalloc(sizeof(*cxlr), GFP_KERNEL); 2196 if (!cxlr) { 2197 memregion_free(id); 2198 return ERR_PTR(-ENOMEM); 2199 } 2200 2201 dev = &cxlr->dev; 2202 device_initialize(dev); 2203 lockdep_set_class(&dev->mutex, &cxl_region_key); 2204 dev->parent = &cxlrd->cxlsd.cxld.dev; 2205 /* 2206 * Keep root decoder pinned through cxl_region_release to fixup 2207 * region id allocations 2208 */ 2209 get_device(dev->parent); 2210 device_set_pm_not_required(dev); 2211 dev->bus = &cxl_bus_type; 2212 dev->type = &cxl_region_type; 2213 cxlr->id = id; 2214 2215 return cxlr; 2216 } 2217 2218 /** 2219 * devm_cxl_add_region - Adds a region to a decoder 2220 * @cxlrd: root decoder 2221 * @id: memregion id to create, or memregion_free() on failure 2222 * @mode: mode for the endpoint decoders of this region 2223 * @type: select whether this is an expander or accelerator (type-2 or type-3) 2224 * 2225 * This is the second step of region initialization. Regions exist within an 2226 * address space which is mapped by a @cxlrd. 2227 * 2228 * Return: 0 if the region was added to the @cxlrd, else returns negative error 2229 * code. The region will be named "regionZ" where Z is the unique region number. 2230 */ 2231 static struct cxl_region *devm_cxl_add_region(struct cxl_root_decoder *cxlrd, 2232 int id, 2233 enum cxl_decoder_mode mode, 2234 enum cxl_decoder_type type) 2235 { 2236 struct cxl_port *port = to_cxl_port(cxlrd->cxlsd.cxld.dev.parent); 2237 struct cxl_region *cxlr; 2238 struct device *dev; 2239 int rc; 2240 2241 switch (mode) { 2242 case CXL_DECODER_RAM: 2243 case CXL_DECODER_PMEM: 2244 break; 2245 default: 2246 dev_err(&cxlrd->cxlsd.cxld.dev, "unsupported mode %d\n", mode); 2247 return ERR_PTR(-EINVAL); 2248 } 2249 2250 cxlr = cxl_region_alloc(cxlrd, id); 2251 if (IS_ERR(cxlr)) 2252 return cxlr; 2253 cxlr->mode = mode; 2254 cxlr->type = type; 2255 2256 dev = &cxlr->dev; 2257 rc = dev_set_name(dev, "region%d", id); 2258 if (rc) 2259 goto err; 2260 2261 rc = device_add(dev); 2262 if (rc) 2263 goto err; 2264 2265 rc = devm_add_action_or_reset(port->uport_dev, unregister_region, cxlr); 2266 if (rc) 2267 return ERR_PTR(rc); 2268 2269 dev_dbg(port->uport_dev, "%s: created %s\n", 2270 dev_name(&cxlrd->cxlsd.cxld.dev), dev_name(dev)); 2271 return cxlr; 2272 2273 err: 2274 put_device(dev); 2275 return ERR_PTR(rc); 2276 } 2277 2278 static ssize_t __create_region_show(struct cxl_root_decoder *cxlrd, char *buf) 2279 { 2280 return sysfs_emit(buf, "region%u\n", atomic_read(&cxlrd->region_id)); 2281 } 2282 2283 static ssize_t create_pmem_region_show(struct device *dev, 2284 struct device_attribute *attr, char *buf) 2285 { 2286 return __create_region_show(to_cxl_root_decoder(dev), buf); 2287 } 2288 2289 static ssize_t create_ram_region_show(struct device *dev, 2290 struct device_attribute *attr, char *buf) 2291 { 2292 return __create_region_show(to_cxl_root_decoder(dev), buf); 2293 } 2294 2295 static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd, 2296 enum cxl_decoder_mode mode, int id) 2297 { 2298 int rc; 2299 2300 rc = memregion_alloc(GFP_KERNEL); 2301 if (rc < 0) 2302 return ERR_PTR(rc); 2303 2304 if (atomic_cmpxchg(&cxlrd->region_id, id, rc) != id) { 2305 memregion_free(rc); 2306 return ERR_PTR(-EBUSY); 2307 } 2308 2309 return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM); 2310 } 2311 2312 static ssize_t create_pmem_region_store(struct device *dev, 2313 struct device_attribute *attr, 2314 const char *buf, size_t len) 2315 { 2316 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2317 struct cxl_region *cxlr; 2318 int rc, id; 2319 2320 rc = sscanf(buf, "region%d\n", &id); 2321 if (rc != 1) 2322 return -EINVAL; 2323 2324 cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id); 2325 if (IS_ERR(cxlr)) 2326 return PTR_ERR(cxlr); 2327 2328 return len; 2329 } 2330 DEVICE_ATTR_RW(create_pmem_region); 2331 2332 static ssize_t create_ram_region_store(struct device *dev, 2333 struct device_attribute *attr, 2334 const char *buf, size_t len) 2335 { 2336 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2337 struct cxl_region *cxlr; 2338 int rc, id; 2339 2340 rc = sscanf(buf, "region%d\n", &id); 2341 if (rc != 1) 2342 return -EINVAL; 2343 2344 cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id); 2345 if (IS_ERR(cxlr)) 2346 return PTR_ERR(cxlr); 2347 2348 return len; 2349 } 2350 DEVICE_ATTR_RW(create_ram_region); 2351 2352 static ssize_t region_show(struct device *dev, struct device_attribute *attr, 2353 char *buf) 2354 { 2355 struct cxl_decoder *cxld = to_cxl_decoder(dev); 2356 ssize_t rc; 2357 2358 rc = down_read_interruptible(&cxl_region_rwsem); 2359 if (rc) 2360 return rc; 2361 2362 if (cxld->region) 2363 rc = sysfs_emit(buf, "%s\n", dev_name(&cxld->region->dev)); 2364 else 2365 rc = sysfs_emit(buf, "\n"); 2366 up_read(&cxl_region_rwsem); 2367 2368 return rc; 2369 } 2370 DEVICE_ATTR_RO(region); 2371 2372 static struct cxl_region * 2373 cxl_find_region_by_name(struct cxl_root_decoder *cxlrd, const char *name) 2374 { 2375 struct cxl_decoder *cxld = &cxlrd->cxlsd.cxld; 2376 struct device *region_dev; 2377 2378 region_dev = device_find_child_by_name(&cxld->dev, name); 2379 if (!region_dev) 2380 return ERR_PTR(-ENODEV); 2381 2382 return to_cxl_region(region_dev); 2383 } 2384 2385 static ssize_t delete_region_store(struct device *dev, 2386 struct device_attribute *attr, 2387 const char *buf, size_t len) 2388 { 2389 struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev); 2390 struct cxl_port *port = to_cxl_port(dev->parent); 2391 struct cxl_region *cxlr; 2392 2393 cxlr = cxl_find_region_by_name(cxlrd, buf); 2394 if (IS_ERR(cxlr)) 2395 return PTR_ERR(cxlr); 2396 2397 devm_release_action(port->uport_dev, unregister_region, cxlr); 2398 put_device(&cxlr->dev); 2399 2400 return len; 2401 } 2402 DEVICE_ATTR_WO(delete_region); 2403 2404 static void cxl_pmem_region_release(struct device *dev) 2405 { 2406 struct cxl_pmem_region *cxlr_pmem = to_cxl_pmem_region(dev); 2407 int i; 2408 2409 for (i = 0; i < cxlr_pmem->nr_mappings; i++) { 2410 struct cxl_memdev *cxlmd = cxlr_pmem->mapping[i].cxlmd; 2411 2412 put_device(&cxlmd->dev); 2413 } 2414 2415 kfree(cxlr_pmem); 2416 } 2417 2418 static const struct attribute_group *cxl_pmem_region_attribute_groups[] = { 2419 &cxl_base_attribute_group, 2420 NULL, 2421 }; 2422 2423 const struct device_type cxl_pmem_region_type = { 2424 .name = "cxl_pmem_region", 2425 .release = cxl_pmem_region_release, 2426 .groups = cxl_pmem_region_attribute_groups, 2427 }; 2428 2429 bool is_cxl_pmem_region(struct device *dev) 2430 { 2431 return dev->type == &cxl_pmem_region_type; 2432 } 2433 EXPORT_SYMBOL_NS_GPL(is_cxl_pmem_region, CXL); 2434 2435 struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev) 2436 { 2437 if (dev_WARN_ONCE(dev, !is_cxl_pmem_region(dev), 2438 "not a cxl_pmem_region device\n")) 2439 return NULL; 2440 return container_of(dev, struct cxl_pmem_region, dev); 2441 } 2442 EXPORT_SYMBOL_NS_GPL(to_cxl_pmem_region, CXL); 2443 2444 struct cxl_poison_context { 2445 struct cxl_port *port; 2446 enum cxl_decoder_mode mode; 2447 u64 offset; 2448 }; 2449 2450 static int cxl_get_poison_unmapped(struct cxl_memdev *cxlmd, 2451 struct cxl_poison_context *ctx) 2452 { 2453 struct cxl_dev_state *cxlds = cxlmd->cxlds; 2454 u64 offset, length; 2455 int rc = 0; 2456 2457 /* 2458 * Collect poison for the remaining unmapped resources 2459 * after poison is collected by committed endpoints. 2460 * 2461 * Knowing that PMEM must always follow RAM, get poison 2462 * for unmapped resources based on the last decoder's mode: 2463 * ram: scan remains of ram range, then any pmem range 2464 * pmem: scan remains of pmem range 2465 */ 2466 2467 if (ctx->mode == CXL_DECODER_RAM) { 2468 offset = ctx->offset; 2469 length = resource_size(&cxlds->ram_res) - offset; 2470 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); 2471 if (rc == -EFAULT) 2472 rc = 0; 2473 if (rc) 2474 return rc; 2475 } 2476 if (ctx->mode == CXL_DECODER_PMEM) { 2477 offset = ctx->offset; 2478 length = resource_size(&cxlds->dpa_res) - offset; 2479 if (!length) 2480 return 0; 2481 } else if (resource_size(&cxlds->pmem_res)) { 2482 offset = cxlds->pmem_res.start; 2483 length = resource_size(&cxlds->pmem_res); 2484 } else { 2485 return 0; 2486 } 2487 2488 return cxl_mem_get_poison(cxlmd, offset, length, NULL); 2489 } 2490 2491 static int poison_by_decoder(struct device *dev, void *arg) 2492 { 2493 struct cxl_poison_context *ctx = arg; 2494 struct cxl_endpoint_decoder *cxled; 2495 struct cxl_memdev *cxlmd; 2496 u64 offset, length; 2497 int rc = 0; 2498 2499 if (!is_endpoint_decoder(dev)) 2500 return rc; 2501 2502 cxled = to_cxl_endpoint_decoder(dev); 2503 if (!cxled->dpa_res || !resource_size(cxled->dpa_res)) 2504 return rc; 2505 2506 /* 2507 * Regions are only created with single mode decoders: pmem or ram. 2508 * Linux does not support mixed mode decoders. This means that 2509 * reading poison per endpoint decoder adheres to the requirement 2510 * that poison reads of pmem and ram must be separated. 2511 * CXL 3.0 Spec 8.2.9.8.4.1 2512 */ 2513 if (cxled->mode == CXL_DECODER_MIXED) { 2514 dev_dbg(dev, "poison list read unsupported in mixed mode\n"); 2515 return rc; 2516 } 2517 2518 cxlmd = cxled_to_memdev(cxled); 2519 if (cxled->skip) { 2520 offset = cxled->dpa_res->start - cxled->skip; 2521 length = cxled->skip; 2522 rc = cxl_mem_get_poison(cxlmd, offset, length, NULL); 2523 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) 2524 rc = 0; 2525 if (rc) 2526 return rc; 2527 } 2528 2529 offset = cxled->dpa_res->start; 2530 length = cxled->dpa_res->end - offset + 1; 2531 rc = cxl_mem_get_poison(cxlmd, offset, length, cxled->cxld.region); 2532 if (rc == -EFAULT && cxled->mode == CXL_DECODER_RAM) 2533 rc = 0; 2534 if (rc) 2535 return rc; 2536 2537 /* Iterate until commit_end is reached */ 2538 if (cxled->cxld.id == ctx->port->commit_end) { 2539 ctx->offset = cxled->dpa_res->end + 1; 2540 ctx->mode = cxled->mode; 2541 return 1; 2542 } 2543 2544 return 0; 2545 } 2546 2547 int cxl_get_poison_by_endpoint(struct cxl_port *port) 2548 { 2549 struct cxl_poison_context ctx; 2550 int rc = 0; 2551 2552 rc = down_read_interruptible(&cxl_region_rwsem); 2553 if (rc) 2554 return rc; 2555 2556 ctx = (struct cxl_poison_context) { 2557 .port = port 2558 }; 2559 2560 rc = device_for_each_child(&port->dev, &ctx, poison_by_decoder); 2561 if (rc == 1) 2562 rc = cxl_get_poison_unmapped(to_cxl_memdev(port->uport_dev), 2563 &ctx); 2564 2565 up_read(&cxl_region_rwsem); 2566 return rc; 2567 } 2568 2569 static struct lock_class_key cxl_pmem_region_key; 2570 2571 static struct cxl_pmem_region *cxl_pmem_region_alloc(struct cxl_region *cxlr) 2572 { 2573 struct cxl_region_params *p = &cxlr->params; 2574 struct cxl_nvdimm_bridge *cxl_nvb; 2575 struct cxl_pmem_region *cxlr_pmem; 2576 struct device *dev; 2577 int i; 2578 2579 down_read(&cxl_region_rwsem); 2580 if (p->state != CXL_CONFIG_COMMIT) { 2581 cxlr_pmem = ERR_PTR(-ENXIO); 2582 goto out; 2583 } 2584 2585 cxlr_pmem = kzalloc(struct_size(cxlr_pmem, mapping, p->nr_targets), 2586 GFP_KERNEL); 2587 if (!cxlr_pmem) { 2588 cxlr_pmem = ERR_PTR(-ENOMEM); 2589 goto out; 2590 } 2591 2592 cxlr_pmem->hpa_range.start = p->res->start; 2593 cxlr_pmem->hpa_range.end = p->res->end; 2594 2595 /* Snapshot the region configuration underneath the cxl_region_rwsem */ 2596 cxlr_pmem->nr_mappings = p->nr_targets; 2597 for (i = 0; i < p->nr_targets; i++) { 2598 struct cxl_endpoint_decoder *cxled = p->targets[i]; 2599 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 2600 struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; 2601 2602 /* 2603 * Regions never span CXL root devices, so by definition the 2604 * bridge for one device is the same for all. 2605 */ 2606 if (i == 0) { 2607 cxl_nvb = cxl_find_nvdimm_bridge(cxlmd); 2608 if (!cxl_nvb) { 2609 cxlr_pmem = ERR_PTR(-ENODEV); 2610 goto out; 2611 } 2612 cxlr->cxl_nvb = cxl_nvb; 2613 } 2614 m->cxlmd = cxlmd; 2615 get_device(&cxlmd->dev); 2616 m->start = cxled->dpa_res->start; 2617 m->size = resource_size(cxled->dpa_res); 2618 m->position = i; 2619 } 2620 2621 dev = &cxlr_pmem->dev; 2622 cxlr_pmem->cxlr = cxlr; 2623 cxlr->cxlr_pmem = cxlr_pmem; 2624 device_initialize(dev); 2625 lockdep_set_class(&dev->mutex, &cxl_pmem_region_key); 2626 device_set_pm_not_required(dev); 2627 dev->parent = &cxlr->dev; 2628 dev->bus = &cxl_bus_type; 2629 dev->type = &cxl_pmem_region_type; 2630 out: 2631 up_read(&cxl_region_rwsem); 2632 2633 return cxlr_pmem; 2634 } 2635 2636 static void cxl_dax_region_release(struct device *dev) 2637 { 2638 struct cxl_dax_region *cxlr_dax = to_cxl_dax_region(dev); 2639 2640 kfree(cxlr_dax); 2641 } 2642 2643 static const struct attribute_group *cxl_dax_region_attribute_groups[] = { 2644 &cxl_base_attribute_group, 2645 NULL, 2646 }; 2647 2648 const struct device_type cxl_dax_region_type = { 2649 .name = "cxl_dax_region", 2650 .release = cxl_dax_region_release, 2651 .groups = cxl_dax_region_attribute_groups, 2652 }; 2653 2654 static bool is_cxl_dax_region(struct device *dev) 2655 { 2656 return dev->type == &cxl_dax_region_type; 2657 } 2658 2659 struct cxl_dax_region *to_cxl_dax_region(struct device *dev) 2660 { 2661 if (dev_WARN_ONCE(dev, !is_cxl_dax_region(dev), 2662 "not a cxl_dax_region device\n")) 2663 return NULL; 2664 return container_of(dev, struct cxl_dax_region, dev); 2665 } 2666 EXPORT_SYMBOL_NS_GPL(to_cxl_dax_region, CXL); 2667 2668 static struct lock_class_key cxl_dax_region_key; 2669 2670 static struct cxl_dax_region *cxl_dax_region_alloc(struct cxl_region *cxlr) 2671 { 2672 struct cxl_region_params *p = &cxlr->params; 2673 struct cxl_dax_region *cxlr_dax; 2674 struct device *dev; 2675 2676 down_read(&cxl_region_rwsem); 2677 if (p->state != CXL_CONFIG_COMMIT) { 2678 cxlr_dax = ERR_PTR(-ENXIO); 2679 goto out; 2680 } 2681 2682 cxlr_dax = kzalloc(sizeof(*cxlr_dax), GFP_KERNEL); 2683 if (!cxlr_dax) { 2684 cxlr_dax = ERR_PTR(-ENOMEM); 2685 goto out; 2686 } 2687 2688 cxlr_dax->hpa_range.start = p->res->start; 2689 cxlr_dax->hpa_range.end = p->res->end; 2690 2691 dev = &cxlr_dax->dev; 2692 cxlr_dax->cxlr = cxlr; 2693 device_initialize(dev); 2694 lockdep_set_class(&dev->mutex, &cxl_dax_region_key); 2695 device_set_pm_not_required(dev); 2696 dev->parent = &cxlr->dev; 2697 dev->bus = &cxl_bus_type; 2698 dev->type = &cxl_dax_region_type; 2699 out: 2700 up_read(&cxl_region_rwsem); 2701 2702 return cxlr_dax; 2703 } 2704 2705 static void cxlr_pmem_unregister(void *_cxlr_pmem) 2706 { 2707 struct cxl_pmem_region *cxlr_pmem = _cxlr_pmem; 2708 struct cxl_region *cxlr = cxlr_pmem->cxlr; 2709 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; 2710 2711 /* 2712 * Either the bridge is in ->remove() context under the device_lock(), 2713 * or cxlr_release_nvdimm() is cancelling the bridge's release action 2714 * for @cxlr_pmem and doing it itself (while manually holding the bridge 2715 * lock). 2716 */ 2717 device_lock_assert(&cxl_nvb->dev); 2718 cxlr->cxlr_pmem = NULL; 2719 cxlr_pmem->cxlr = NULL; 2720 device_unregister(&cxlr_pmem->dev); 2721 } 2722 2723 static void cxlr_release_nvdimm(void *_cxlr) 2724 { 2725 struct cxl_region *cxlr = _cxlr; 2726 struct cxl_nvdimm_bridge *cxl_nvb = cxlr->cxl_nvb; 2727 2728 device_lock(&cxl_nvb->dev); 2729 if (cxlr->cxlr_pmem) 2730 devm_release_action(&cxl_nvb->dev, cxlr_pmem_unregister, 2731 cxlr->cxlr_pmem); 2732 device_unlock(&cxl_nvb->dev); 2733 cxlr->cxl_nvb = NULL; 2734 put_device(&cxl_nvb->dev); 2735 } 2736 2737 /** 2738 * devm_cxl_add_pmem_region() - add a cxl_region-to-nd_region bridge 2739 * @cxlr: parent CXL region for this pmem region bridge device 2740 * 2741 * Return: 0 on success negative error code on failure. 2742 */ 2743 static int devm_cxl_add_pmem_region(struct cxl_region *cxlr) 2744 { 2745 struct cxl_pmem_region *cxlr_pmem; 2746 struct cxl_nvdimm_bridge *cxl_nvb; 2747 struct device *dev; 2748 int rc; 2749 2750 cxlr_pmem = cxl_pmem_region_alloc(cxlr); 2751 if (IS_ERR(cxlr_pmem)) 2752 return PTR_ERR(cxlr_pmem); 2753 cxl_nvb = cxlr->cxl_nvb; 2754 2755 dev = &cxlr_pmem->dev; 2756 rc = dev_set_name(dev, "pmem_region%d", cxlr->id); 2757 if (rc) 2758 goto err; 2759 2760 rc = device_add(dev); 2761 if (rc) 2762 goto err; 2763 2764 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), 2765 dev_name(dev)); 2766 2767 device_lock(&cxl_nvb->dev); 2768 if (cxl_nvb->dev.driver) 2769 rc = devm_add_action_or_reset(&cxl_nvb->dev, 2770 cxlr_pmem_unregister, cxlr_pmem); 2771 else 2772 rc = -ENXIO; 2773 device_unlock(&cxl_nvb->dev); 2774 2775 if (rc) 2776 goto err_bridge; 2777 2778 /* @cxlr carries a reference on @cxl_nvb until cxlr_release_nvdimm */ 2779 return devm_add_action_or_reset(&cxlr->dev, cxlr_release_nvdimm, cxlr); 2780 2781 err: 2782 put_device(dev); 2783 err_bridge: 2784 put_device(&cxl_nvb->dev); 2785 cxlr->cxl_nvb = NULL; 2786 return rc; 2787 } 2788 2789 static void cxlr_dax_unregister(void *_cxlr_dax) 2790 { 2791 struct cxl_dax_region *cxlr_dax = _cxlr_dax; 2792 2793 device_unregister(&cxlr_dax->dev); 2794 } 2795 2796 static int devm_cxl_add_dax_region(struct cxl_region *cxlr) 2797 { 2798 struct cxl_dax_region *cxlr_dax; 2799 struct device *dev; 2800 int rc; 2801 2802 cxlr_dax = cxl_dax_region_alloc(cxlr); 2803 if (IS_ERR(cxlr_dax)) 2804 return PTR_ERR(cxlr_dax); 2805 2806 dev = &cxlr_dax->dev; 2807 rc = dev_set_name(dev, "dax_region%d", cxlr->id); 2808 if (rc) 2809 goto err; 2810 2811 rc = device_add(dev); 2812 if (rc) 2813 goto err; 2814 2815 dev_dbg(&cxlr->dev, "%s: register %s\n", dev_name(dev->parent), 2816 dev_name(dev)); 2817 2818 return devm_add_action_or_reset(&cxlr->dev, cxlr_dax_unregister, 2819 cxlr_dax); 2820 err: 2821 put_device(dev); 2822 return rc; 2823 } 2824 2825 static int match_root_decoder_by_range(struct device *dev, void *data) 2826 { 2827 struct range *r1, *r2 = data; 2828 struct cxl_root_decoder *cxlrd; 2829 2830 if (!is_root_decoder(dev)) 2831 return 0; 2832 2833 cxlrd = to_cxl_root_decoder(dev); 2834 r1 = &cxlrd->cxlsd.cxld.hpa_range; 2835 return range_contains(r1, r2); 2836 } 2837 2838 static int match_region_by_range(struct device *dev, void *data) 2839 { 2840 struct cxl_region_params *p; 2841 struct cxl_region *cxlr; 2842 struct range *r = data; 2843 int rc = 0; 2844 2845 if (!is_cxl_region(dev)) 2846 return 0; 2847 2848 cxlr = to_cxl_region(dev); 2849 p = &cxlr->params; 2850 2851 down_read(&cxl_region_rwsem); 2852 if (p->res && p->res->start == r->start && p->res->end == r->end) 2853 rc = 1; 2854 up_read(&cxl_region_rwsem); 2855 2856 return rc; 2857 } 2858 2859 /* Establish an empty region covering the given HPA range */ 2860 static struct cxl_region *construct_region(struct cxl_root_decoder *cxlrd, 2861 struct cxl_endpoint_decoder *cxled) 2862 { 2863 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 2864 struct cxl_port *port = cxlrd_to_port(cxlrd); 2865 struct range *hpa = &cxled->cxld.hpa_range; 2866 struct cxl_region_params *p; 2867 struct cxl_region *cxlr; 2868 struct resource *res; 2869 int rc; 2870 2871 do { 2872 cxlr = __create_region(cxlrd, cxled->mode, 2873 atomic_read(&cxlrd->region_id)); 2874 } while (IS_ERR(cxlr) && PTR_ERR(cxlr) == -EBUSY); 2875 2876 if (IS_ERR(cxlr)) { 2877 dev_err(cxlmd->dev.parent, 2878 "%s:%s: %s failed assign region: %ld\n", 2879 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 2880 __func__, PTR_ERR(cxlr)); 2881 return cxlr; 2882 } 2883 2884 down_write(&cxl_region_rwsem); 2885 p = &cxlr->params; 2886 if (p->state >= CXL_CONFIG_INTERLEAVE_ACTIVE) { 2887 dev_err(cxlmd->dev.parent, 2888 "%s:%s: %s autodiscovery interrupted\n", 2889 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 2890 __func__); 2891 rc = -EBUSY; 2892 goto err; 2893 } 2894 2895 set_bit(CXL_REGION_F_AUTO, &cxlr->flags); 2896 2897 res = kmalloc(sizeof(*res), GFP_KERNEL); 2898 if (!res) { 2899 rc = -ENOMEM; 2900 goto err; 2901 } 2902 2903 *res = DEFINE_RES_MEM_NAMED(hpa->start, range_len(hpa), 2904 dev_name(&cxlr->dev)); 2905 rc = insert_resource(cxlrd->res, res); 2906 if (rc) { 2907 /* 2908 * Platform-firmware may not have split resources like "System 2909 * RAM" on CXL window boundaries see cxl_region_iomem_release() 2910 */ 2911 dev_warn(cxlmd->dev.parent, 2912 "%s:%s: %s %s cannot insert resource\n", 2913 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), 2914 __func__, dev_name(&cxlr->dev)); 2915 } 2916 2917 p->res = res; 2918 p->interleave_ways = cxled->cxld.interleave_ways; 2919 p->interleave_granularity = cxled->cxld.interleave_granularity; 2920 p->state = CXL_CONFIG_INTERLEAVE_ACTIVE; 2921 2922 rc = sysfs_update_group(&cxlr->dev.kobj, get_cxl_region_target_group()); 2923 if (rc) 2924 goto err; 2925 2926 dev_dbg(cxlmd->dev.parent, "%s:%s: %s %s res: %pr iw: %d ig: %d\n", 2927 dev_name(&cxlmd->dev), dev_name(&cxled->cxld.dev), __func__, 2928 dev_name(&cxlr->dev), p->res, p->interleave_ways, 2929 p->interleave_granularity); 2930 2931 /* ...to match put_device() in cxl_add_to_region() */ 2932 get_device(&cxlr->dev); 2933 up_write(&cxl_region_rwsem); 2934 2935 return cxlr; 2936 2937 err: 2938 up_write(&cxl_region_rwsem); 2939 devm_release_action(port->uport_dev, unregister_region, cxlr); 2940 return ERR_PTR(rc); 2941 } 2942 2943 int cxl_add_to_region(struct cxl_port *root, struct cxl_endpoint_decoder *cxled) 2944 { 2945 struct cxl_memdev *cxlmd = cxled_to_memdev(cxled); 2946 struct range *hpa = &cxled->cxld.hpa_range; 2947 struct cxl_decoder *cxld = &cxled->cxld; 2948 struct device *cxlrd_dev, *region_dev; 2949 struct cxl_root_decoder *cxlrd; 2950 struct cxl_region_params *p; 2951 struct cxl_region *cxlr; 2952 bool attach = false; 2953 int rc; 2954 2955 cxlrd_dev = device_find_child(&root->dev, &cxld->hpa_range, 2956 match_root_decoder_by_range); 2957 if (!cxlrd_dev) { 2958 dev_err(cxlmd->dev.parent, 2959 "%s:%s no CXL window for range %#llx:%#llx\n", 2960 dev_name(&cxlmd->dev), dev_name(&cxld->dev), 2961 cxld->hpa_range.start, cxld->hpa_range.end); 2962 return -ENXIO; 2963 } 2964 2965 cxlrd = to_cxl_root_decoder(cxlrd_dev); 2966 2967 /* 2968 * Ensure that if multiple threads race to construct_region() for @hpa 2969 * one does the construction and the others add to that. 2970 */ 2971 mutex_lock(&cxlrd->range_lock); 2972 region_dev = device_find_child(&cxlrd->cxlsd.cxld.dev, hpa, 2973 match_region_by_range); 2974 if (!region_dev) { 2975 cxlr = construct_region(cxlrd, cxled); 2976 region_dev = &cxlr->dev; 2977 } else 2978 cxlr = to_cxl_region(region_dev); 2979 mutex_unlock(&cxlrd->range_lock); 2980 2981 rc = PTR_ERR_OR_ZERO(cxlr); 2982 if (rc) 2983 goto out; 2984 2985 attach_target(cxlr, cxled, -1, TASK_UNINTERRUPTIBLE); 2986 2987 down_read(&cxl_region_rwsem); 2988 p = &cxlr->params; 2989 attach = p->state == CXL_CONFIG_COMMIT; 2990 up_read(&cxl_region_rwsem); 2991 2992 if (attach) { 2993 /* 2994 * If device_attach() fails the range may still be active via 2995 * the platform-firmware memory map, otherwise the driver for 2996 * regions is local to this file, so driver matching can't fail. 2997 */ 2998 if (device_attach(&cxlr->dev) < 0) 2999 dev_err(&cxlr->dev, "failed to enable, range: %pr\n", 3000 p->res); 3001 } 3002 3003 put_device(region_dev); 3004 out: 3005 put_device(cxlrd_dev); 3006 return rc; 3007 } 3008 EXPORT_SYMBOL_NS_GPL(cxl_add_to_region, CXL); 3009 3010 static int is_system_ram(struct resource *res, void *arg) 3011 { 3012 struct cxl_region *cxlr = arg; 3013 struct cxl_region_params *p = &cxlr->params; 3014 3015 dev_dbg(&cxlr->dev, "%pr has System RAM: %pr\n", p->res, res); 3016 return 1; 3017 } 3018 3019 static int cxl_region_probe(struct device *dev) 3020 { 3021 struct cxl_region *cxlr = to_cxl_region(dev); 3022 struct cxl_region_params *p = &cxlr->params; 3023 int rc; 3024 3025 rc = down_read_interruptible(&cxl_region_rwsem); 3026 if (rc) { 3027 dev_dbg(&cxlr->dev, "probe interrupted\n"); 3028 return rc; 3029 } 3030 3031 if (p->state < CXL_CONFIG_COMMIT) { 3032 dev_dbg(&cxlr->dev, "config state: %d\n", p->state); 3033 rc = -ENXIO; 3034 goto out; 3035 } 3036 3037 if (test_bit(CXL_REGION_F_NEEDS_RESET, &cxlr->flags)) { 3038 dev_err(&cxlr->dev, 3039 "failed to activate, re-commit region and retry\n"); 3040 rc = -ENXIO; 3041 goto out; 3042 } 3043 3044 /* 3045 * From this point on any path that changes the region's state away from 3046 * CXL_CONFIG_COMMIT is also responsible for releasing the driver. 3047 */ 3048 out: 3049 up_read(&cxl_region_rwsem); 3050 3051 if (rc) 3052 return rc; 3053 3054 switch (cxlr->mode) { 3055 case CXL_DECODER_PMEM: 3056 return devm_cxl_add_pmem_region(cxlr); 3057 case CXL_DECODER_RAM: 3058 /* 3059 * The region can not be manged by CXL if any portion of 3060 * it is already online as 'System RAM' 3061 */ 3062 if (walk_iomem_res_desc(IORES_DESC_NONE, 3063 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY, 3064 p->res->start, p->res->end, cxlr, 3065 is_system_ram) > 0) 3066 return 0; 3067 return devm_cxl_add_dax_region(cxlr); 3068 default: 3069 dev_dbg(&cxlr->dev, "unsupported region mode: %d\n", 3070 cxlr->mode); 3071 return -ENXIO; 3072 } 3073 } 3074 3075 static struct cxl_driver cxl_region_driver = { 3076 .name = "cxl_region", 3077 .probe = cxl_region_probe, 3078 .id = CXL_DEVICE_REGION, 3079 }; 3080 3081 int cxl_region_init(void) 3082 { 3083 return cxl_driver_register(&cxl_region_driver); 3084 } 3085 3086 void cxl_region_exit(void) 3087 { 3088 cxl_driver_unregister(&cxl_region_driver); 3089 } 3090 3091 MODULE_IMPORT_NS(CXL); 3092 MODULE_IMPORT_NS(DEVMEM); 3093 MODULE_ALIAS_CXL(CXL_DEVICE_REGION); 3094